blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 7
139
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
16
| license_type
stringclasses 2
values | repo_name
stringlengths 7
55
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 6
values | visit_date
int64 1,471B
1,694B
| revision_date
int64 1,378B
1,694B
| committer_date
int64 1,378B
1,694B
| github_id
float64 1.33M
604M
⌀ | star_events_count
int64 0
43.5k
| fork_events_count
int64 0
1.5k
| gha_license_id
stringclasses 6
values | gha_event_created_at
int64 1,402B
1,695B
⌀ | gha_created_at
int64 1,359B
1,637B
⌀ | gha_language
stringclasses 19
values | src_encoding
stringclasses 2
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 1
class | length_bytes
int64 3
6.4M
| extension
stringclasses 4
values | content
stringlengths 3
6.12M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77d54b344660a48cf1628133fce36d8d08e5278a
|
fa02ed5a3c9c0adee3c26887a16855e7841c668b
|
/src/analysis/complex/isometry.lean
|
de4b047e7f3afb976cc28f6c9122e8ab3ad773a8
|
[
"Apache-2.0"
] |
permissive
|
jjgarzella/mathlib
|
96a345378c4e0bf26cf604aed84f90329e4896a2
|
395d8716c3ad03747059d482090e2bb97db612c8
|
refs/heads/master
| 1,686,480,124,379
| 1,625,163,323,000
| 1,625,163,323,000
| 281,190,421
| 2
| 0
|
Apache-2.0
| 1,595,268,170,000
| 1,595,268,169,000
| null |
UTF-8
|
Lean
| false
| false
| 5,906
|
lean
|
/-
Copyright (c) 2021 François Sunatori. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: François Sunatori
-/
import analysis.complex.basic
import data.complex.exponential
import data.real.sqrt
import analysis.normed_space.linear_isometry
import algebra.group.units
/-!
# Isometries of the Complex Plane
The lemma `linear_isometry_complex` states the classification of isometries in the complex plane.
Specifically, isometries with rotations but without translation.
The proof involves:
1. creating a linear isometry `g` with two fixed points, `g(0) = 0`, `g(1) = 1`
2. applying `linear_isometry_complex_aux` to `g`
The proof of `linear_isometry_complex_aux` is separated in the following parts:
1. show that the real parts match up: `linear_isometry.re_apply_eq_re`
2. show that I maps to either I or -I
3. every z is a linear combination of a + b * I
## References
* [Isometries of the Complex Plane](http://helmut.knaust.info/mediawiki/images/b/b5/Iso.pdf)
-/
noncomputable theory
open complex
local notation `|` x `|` := complex.abs x
lemma linear_isometry.re_apply_eq_re_of_add_conj_eq (f : ℂ →ₗᵢ[ℝ] ℂ)
(h₃ : ∀ z, z + conj z = f z + conj (f z)) (z : ℂ) : (f z).re = z.re :=
by simpa [ext_iff, add_re, add_im, conj_re, conj_im, ←two_mul,
(show (2 : ℝ) ≠ 0, by simp [two_ne_zero'])] using (h₃ z).symm
lemma linear_isometry.im_apply_eq_im_or_neg_of_re_apply_eq_re {f : ℂ →ₗᵢ[ℝ] ℂ}
(h₁ : ∀ z, |f z| = |z|) (h₂ : ∀ z, (f z).re = z.re) (z : ℂ) :
(f z).im = z.im ∨ (f z).im = -z.im :=
begin
specialize h₁ z,
simp only [complex.abs] at h₁,
rwa [real.sqrt_inj (norm_sq_nonneg _) (norm_sq_nonneg _), norm_sq_apply (f z), norm_sq_apply z,
h₂, add_left_cancel_iff, mul_self_eq_mul_self_iff] at h₁,
end
lemma linear_isometry.abs_apply_sub_one_eq_abs_sub_one {f : ℂ →ₗᵢ[ℝ] ℂ} (h : f 1 = 1) (z : ℂ) :
∥f z - 1∥ = ∥z - 1∥ :=
by rw [←linear_isometry.norm_map f (z - 1), linear_isometry.map_sub, h]
lemma linear_isometry.im_apply_eq_im {f : ℂ →ₗᵢ[ℝ] ℂ} (h : f 1 = 1) (z : ℂ) :
z + conj z = f z + conj (f z) :=
begin
have := linear_isometry.abs_apply_sub_one_eq_abs_sub_one h z,
apply_fun λ x, x ^ 2 at this,
simp only [norm_eq_abs, ←norm_sq_eq_abs] at this,
rw [←of_real_inj, ←mul_conj, ←mul_conj] at this,
rw [conj.map_sub, conj.map_sub] at this,
simp only [sub_mul, mul_sub, one_mul, mul_one] at this,
rw [mul_conj, norm_sq_eq_abs, ←norm_eq_abs, linear_isometry.norm_map] at this,
rw [mul_conj, norm_sq_eq_abs, ←norm_eq_abs] at this,
simp only [sub_sub, sub_right_inj, mul_one, of_real_pow, ring_hom.map_one, norm_eq_abs] at this,
simp only [add_sub, sub_left_inj] at this,
rw [add_comm, ←this, add_comm],
end
lemma linear_isometry.re_apply_eq_re {f : ℂ →ₗᵢ[ℝ] ℂ} (h : f 1 = 1) (z : ℂ) : (f z).re = z.re :=
begin
apply linear_isometry.re_apply_eq_re_of_add_conj_eq,
intro z,
apply linear_isometry.im_apply_eq_im h,
end
lemma linear_isometry_complex_aux (f : ℂ →ₗᵢ[ℝ] ℂ) (h : f 1 = 1) :
(∀ z, f z = z) ∨ (∀ z, f z = conj z) :=
begin
have h0 : f I = I ∨ f I = -I,
{ have : |f I| = 1,
{ rw [←norm_eq_abs, linear_isometry.norm_map, norm_eq_abs, abs_I] },
simp only [ext_iff, ←and_or_distrib_left, neg_re, I_re, neg_im, neg_zero],
split,
{ rw ←I_re,
rw linear_isometry.re_apply_eq_re h },
{ apply linear_isometry.im_apply_eq_im_or_neg_of_re_apply_eq_re,
{ intro z, rw [←norm_eq_abs, ←norm_eq_abs, linear_isometry.norm_map] },
{ intro z, rw linear_isometry.re_apply_eq_re h } } },
refine or.imp (λ h1, _) (λ h1 z, _) h0,
{ suffices : f.to_linear_map = linear_isometry.id.to_linear_map,
{ simp [this, ←linear_isometry.coe_to_linear_map, linear_map.id_apply] },
apply basis.ext basis_one_I,
intro i,
fin_cases i,
{ simp [h] },
{ simp only [matrix.head_cons, linear_isometry.coe_to_linear_map,
linear_map.id_coe, id.def, matrix.cons_val_one], simpa } },
{ suffices : f.to_linear_map = conj_lie.to_linear_equiv,
{ rw [←linear_isometry.coe_to_linear_map, this],
simp only [linear_isometry.coe_to_linear_map], refl },
apply basis.ext basis_one_I,
intro i,
fin_cases i,
{ simp only [h, linear_isometry.coe_to_linear_map, matrix.cons_val_zero], simpa },
{ simp only [matrix.head_cons, linear_isometry.coe_to_linear_map,
linear_map.id_coe, id.def, matrix.cons_val_one], simpa } },
end
lemma linear_isometry_complex (f : ℂ →ₗᵢ[ℝ] ℂ) :
∃ a : ℂ, |a| = 1 ∧ ((∀ z, f z = a * z) ∨ (∀ z, f z = a * conj z)) :=
begin
let a := f 1,
use a,
split,
{ simp only [← norm_eq_abs, a, linear_isometry.norm_map, norm_one] },
{ let g : ℂ →ₗᵢ[ℝ] ℂ :=
{ to_fun := λ z, a⁻¹ * f z,
map_add' := by {
intros x y,
rw linear_isometry.map_add,
rw mul_add },
map_smul' := by {
intros m x,
rw linear_isometry.map_smul,
rw algebra.mul_smul_comm },
norm_map' := by {
intros x,
simp,
suffices : ∥f 1∥⁻¹ * ∥f x∥ = ∥x∥, { simpa },
iterate 2 { rw linear_isometry.norm_map },
simp } },
have hg1 : g 1 = 1 := by {
change a⁻¹ * a = 1,
rw inv_mul_cancel,
rw ← norm_sq_pos,
rw norm_sq_eq_abs,
change 0 < ∥a∥ ^ 2,
rw linear_isometry.norm_map,
simp },
have h : (∀ z, g z = z) ∨ (∀ z, g z = conj z) := linear_isometry_complex_aux g hg1,
change (∀ z, a⁻¹ * f z = z) ∨ (∀ z, a⁻¹ * f z = conj z) at h,
have ha : a ≠ 0 := by {
rw ← norm_sq_pos,
rw norm_sq_eq_abs,
change 0 < ∥a∥ ^ 2,
rw linear_isometry.norm_map,
simp },
simpa only [← inv_mul_eq_iff_eq_mul' ha] }
end
|
f78cc4a4845b9ebbdec3b259b0521f96f6238ecd
|
82e44445c70db0f03e30d7be725775f122d72f3e
|
/src/analysis/calculus/conformal.lean
|
081bf02093680761103c75ad8222b037e9c873a8
|
[
"Apache-2.0"
] |
permissive
|
stjordanis/mathlib
|
51e286d19140e3788ef2c470bc7b953e4991f0c9
|
2568d41bca08f5d6bf39d915434c8447e21f42ee
|
refs/heads/master
| 1,631,748,053,501
| 1,627,938,886,000
| 1,627,938,886,000
| 228,728,358
| 0
| 0
|
Apache-2.0
| 1,576,630,588,000
| 1,576,630,587,000
| null |
UTF-8
|
Lean
| false
| false
| 6,998
|
lean
|
/-
Copyright (c) 2021 Yourong Zang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yourong Zang
-/
import analysis.normed_space.conformal_linear_map
/-!
# Conformal Maps
A continuous linear map between real normed spaces `X` and `Y` is `conformal_at` some point `x`
if it is real differentiable at that point and its differential `is_conformal_linear_map`.
## Main definitions
* `conformal_at`: the main definition of conformal maps
* `conformal`: maps that are conformal at every point
* `conformal_factor_at`: the conformal factor of a conformal map at some point
## Main results
* The conformality of the composition of two conformal maps, the identity map
and multiplications by nonzero constants
* `conformal_at_iff_is_conformal_map_fderiv`: an equivalent definition of the conformality of a map
* `conformal_at_iff`: an equivalent definition of the conformality of a map
* `conformal_at.preserves_angle`: if a map is conformal at `x`, then its differential
preserves all angles at `x`
## Tags
conformal
## Warning
The definition of conformality in this file does NOT require the maps to be orientation-preserving.
Maps such as the complex conjugate are considered to be conformal.
-/
noncomputable theory
variables {E F : Type*} [inner_product_space ℝ E] [inner_product_space ℝ F]
{X Y Z : Type*} [normed_group X] [normed_group Y] [normed_group Z]
[normed_space ℝ X] [normed_space ℝ Y] [normed_space ℝ Z]
section loc_conformality
open linear_isometry continuous_linear_map
open_locale real_inner_product_space
/-- A map `f` is said to be conformal if it has a conformal differential `f'`. -/
def conformal_at (f : X → Y) (x : X) :=
∃ (f' : X →L[ℝ] Y), has_fderiv_at f f' x ∧ is_conformal_map f'
lemma conformal_at_id (x : X) : conformal_at id x :=
⟨id ℝ X, has_fderiv_at_id _, is_conformal_map_id⟩
lemma conformal_at_const_smul {c : ℝ} (h : c ≠ 0) (x : X) :
conformal_at (λ (x': X), c • x') x :=
⟨c • continuous_linear_map.id ℝ X,
(has_fderiv_at_id x).const_smul c, is_conformal_map_const_smul h⟩
/-- A function is a conformal map if and only if its differential is a conformal linear map-/
lemma conformal_at_iff_is_conformal_map_fderiv {f : X → Y} {x : X} :
conformal_at f x ↔ is_conformal_map (fderiv ℝ f x) :=
begin
split,
{ rintros ⟨c, hf, hf'⟩,
rw hf.fderiv,
exact hf' },
{ intros H,
by_cases h : differentiable_at ℝ f x,
{ exact ⟨fderiv ℝ f x, h.has_fderiv_at, H⟩, },
{ cases subsingleton_or_nontrivial X with w w; resetI,
{ exact ⟨(0 : X →L[ℝ] Y), has_fderiv_at_of_subsingleton f x,
is_conformal_map_of_subsingleton 0⟩, },
{ exfalso,
rcases nontrivial_iff.mp w with ⟨a, b, hab⟩,
rw [fderiv_zero_of_not_differentiable_at h] at H,
exact hab (H.injective rfl), }, }, },
end
/-- A real differentiable map `f` is conformal at point `x` if and only if its
differential `fderiv ℝ f x` at that point scales every inner product by a positive scalar. -/
lemma conformal_at_iff' {f : E → F} {x : E} :
conformal_at f x ↔
∃ (c : ℝ), 0 < c ∧ ∀ (u v : E), ⟪fderiv ℝ f x u, fderiv ℝ f x v⟫ = c * ⟪u, v⟫ :=
by rw [conformal_at_iff_is_conformal_map_fderiv, is_conformal_map_iff]
/-- A real differentiable map `f` is conformal at point `x` if and only if its
differential `f'` at that point scales every inner product by a positive scalar. -/
lemma conformal_at_iff {f : E → F} {x : E} {f' : E →L[ℝ] F} (h : has_fderiv_at f f' x) :
conformal_at f x ↔ ∃ (c : ℝ), 0 < c ∧ ∀ (u v : E), ⟪f' u, f' v⟫ = c * ⟪u, v⟫ :=
by simp only [conformal_at_iff', h.fderiv]
namespace conformal_at
lemma differentiable_at {f : X → Y} {x : X} (h : conformal_at f x) :
differentiable_at ℝ f x :=
let ⟨_, h₁, _⟩ := h in h₁.differentiable_at
lemma congr {f g : X → Y} {x : X} {u : set X} (hx : x ∈ u) (hu : is_open u)
(hf : conformal_at f x) (h : ∀ (x : X), x ∈ u → g x = f x) :
conformal_at g x :=
let ⟨f', hfderiv, hf'⟩ := hf in
⟨f', hfderiv.congr_of_eventually_eq ((hu.eventually_mem hx).mono h), hf'⟩
lemma comp {f : X → Y} {g : Y → Z} (x : X)
(hg : conformal_at g (f x)) (hf : conformal_at f x) : conformal_at (g ∘ f) x :=
begin
rcases hf with ⟨f', hf₁, cf⟩,
rcases hg with ⟨g', hg₁, cg⟩,
exact ⟨g'.comp f', hg₁.comp x hf₁, cf.comp cg⟩,
end
lemma const_smul {f : X → Y} {x : X} {c : ℝ} (hc : c ≠ 0) (hf : conformal_at f x) :
conformal_at (c • f) x :=
(conformal_at_const_smul hc $ f x).comp x hf
/-- The conformal factor of a conformal map at some point `x`. Some authors refer to this function
as the characteristic function of the conformal map. -/
def conformal_factor_at {f : E → F} {x : E} (h : conformal_at f x) : ℝ :=
classical.some (conformal_at_iff'.mp h)
lemma conformal_factor_at_pos {f : E → F} {x : E} (h : conformal_at f x) :
0 < conformal_factor_at h :=
(classical.some_spec $ conformal_at_iff'.mp h).1
lemma conformal_factor_at_inner_eq_mul_inner' {f : E → F} {x : E}
(h : conformal_at f x) (u v : E) :
⟪(fderiv ℝ f x) u, (fderiv ℝ f x) v⟫ = (conformal_factor_at h : ℝ) * ⟪u, v⟫ :=
(classical.some_spec $ conformal_at_iff'.mp h).2 u v
lemma conformal_factor_at_inner_eq_mul_inner {f : E → F} {x : E} {f' : E →L[ℝ] F}
(h : has_fderiv_at f f' x) (H : conformal_at f x) (u v : E) :
⟪f' u, f' v⟫ = (conformal_factor_at H : ℝ) * ⟪u, v⟫ :=
(H.differentiable_at.has_fderiv_at.unique h) ▸ conformal_factor_at_inner_eq_mul_inner' H u v
/-- If a real differentiable map `f` is conformal at a point `x`,
then it preserves the angles at that point. -/
lemma preserves_angle {f : E → F} {x : E} {f' : E →L[ℝ] F}
(h : has_fderiv_at f f' x) (H : conformal_at f x) (u v : E) :
inner_product_geometry.angle (f' u) (f' v) = inner_product_geometry.angle u v :=
let ⟨f₁, h₁, c⟩ := H in h₁.unique h ▸ c.preserves_angle u v
end conformal_at
end loc_conformality
section global_conformality
/-- A map `f` is conformal if it's conformal at every point. -/
def conformal (f : X → Y) :=
∀ (x : X), conformal_at f x
lemma conformal_id : conformal (id : X → X) := λ x, conformal_at_id x
lemma conformal_const_smul {c : ℝ} (h : c ≠ 0) : conformal (λ (x : X), c • x) :=
λ x, conformal_at_const_smul h x
namespace conformal
lemma conformal_at {f : X → Y} (h : conformal f) (x : X) : conformal_at f x := h x
lemma differentiable {f : X → Y} (h : conformal f) : differentiable ℝ f :=
λ x, (h x).differentiable_at
lemma comp {f : X → Y} {g : Y → Z} (hf : conformal f) (hg : conformal g) : conformal (g ∘ f) :=
λ x, (hg $ f x).comp x (hf x)
lemma const_smul {f : X → Y} (hf : conformal f) {c : ℝ} (hc : c ≠ 0) : conformal (c • f) :=
λ x, (hf x).const_smul hc
end conformal
end global_conformality
|
250b8e21c2aa5d99ecd5376f9f46901c843ae564
|
b9a81ebb9de684db509231c4469a7d2c88915808
|
/src/super/datatypes.lean
|
bcf1b283cbead2d54f6233462888602c1417e657
|
[] |
no_license
|
leanprover/super
|
3dd81ce8d9ac3cba20bce55e84833fadb2f5716e
|
47b107b4cec8f3b41d72daba9cbda2f9d54025de
|
refs/heads/master
| 1,678,482,996,979
| 1,676,526,367,000
| 1,676,526,367,000
| 92,215,900
| 12
| 6
| null | 1,513,327,539,000
| 1,495,570,640,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 1,506
|
lean
|
/-
Copyright (c) 2017 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner
-/
import .clause_ops .prover_state
open expr tactic monad
namespace super
meta def has_diff_constr_eq_l (c : clause) : tactic bool := do
env ← get_env,
return $ list.bor $ do
l ← c.get_lits,
guard l.is_neg,
return $ match is_eq l.formula with
| some (lhs, rhs) :=
if env.is_constructor_app lhs ∧ env.is_constructor_app rhs ∧
lhs.get_app_fn.const_name ≠ rhs.get_app_fn.const_name then
tt
else
ff
| _ := ff
end
meta def diff_constr_eq_l_pre := preprocessing_rule $
filter (λc, bnot <$> has_diff_constr_eq_l c.c)
meta def try_no_confusion_eq_r (c : clause) (i : ℕ) : tactic (list clause) :=
on_right_at' c i $ λhyp,
match is_eq hyp.local_type with
| some (lhs, rhs) := do
env ← get_env,
lhs ← whnf lhs, rhs ← whnf rhs,
guard $ env.is_constructor_app lhs ∧ env.is_constructor_app rhs,
pr ← mk_app (lhs.get_app_fn.const_name.get_prefix <.> "no_confusion") [`(false), lhs, rhs, hyp],
-- FIXME: change to local false ^^
ty ← infer_type pr, ty ← whnf ty,
pr ← to_expr ``(@eq.mpr _ %%ty rfl %%pr), -- FIXME
return [([], pr)]
| _ := failed
end
@[super.inf]
meta def datatype_infs : inf_decl := inf_decl.mk 10 $ assume given, do
sequence' (do i ← list.range given.c.num_lits, [inf_if_successful 0 given $ try_no_confusion_eq_r given.c i])
end super
|
e2f88655dc5dfa35b9da12656752d59f6a6bdf77
|
624f6f2ae8b3b1adc5f8f67a365c51d5126be45a
|
/src/Init/Lean/Compiler/IR/SimpCase.lean
|
bf7b018b5f79aa3878f48d3a26e7bf75de6a04c3
|
[
"Apache-2.0"
] |
permissive
|
mhuisi/lean4
|
28d35a4febc2e251c7f05492e13f3b05d6f9b7af
|
dda44bc47f3e5d024508060dac2bcb59fd12e4c0
|
refs/heads/master
| 1,621,225,489,283
| 1,585,142,689,000
| 1,585,142,689,000
| 250,590,438
| 0
| 2
|
Apache-2.0
| 1,602,443,220,000
| 1,585,327,814,000
|
C
|
UTF-8
|
Lean
| false
| false
| 2,108
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Lean.Compiler.IR.Basic
import Init.Lean.Compiler.IR.Format
namespace Lean
namespace IR
def ensureHasDefault (alts : Array Alt) : Array Alt :=
if alts.any Alt.isDefault then alts
else if alts.size < 2 then alts
else
let last := alts.back;
let alts := alts.pop;
alts.push (Alt.default last.body)
private def getOccsOf (alts : Array Alt) (i : Nat) : Nat :=
let aBody := (alts.get! i).body;
alts.iterateFrom 1 (i + 1) $ fun _ a' n =>
if a'.body == aBody then n+1 else n
private def maxOccs (alts : Array Alt) : Alt × Nat :=
alts.iterateFrom (alts.get! 0, getOccsOf alts 0) 1 $ fun i a p =>
let noccs := getOccsOf alts i.val;
if noccs > p.2 then (alts.get i, noccs) else p
private def addDefault (alts : Array Alt) : Array Alt :=
if alts.size <= 1 || alts.any Alt.isDefault then alts
else
let (max, noccs) := maxOccs alts;
if noccs == 1 then alts
else
let alts := alts.filter $ (fun alt => alt.body != max.body);
alts.push (Alt.default max.body)
private def mkSimpCase (tid : Name) (x : VarId) (xType : IRType) (alts : Array Alt) : FnBody :=
let alts := alts.filter (fun alt => alt.body != FnBody.unreachable);
let alts := addDefault alts;
if alts.size == 0 then
FnBody.unreachable
else if alts.size == 1 then
(alts.get! 0).body
else
FnBody.case tid x xType alts
partial def FnBody.simpCase : FnBody → FnBody
| b =>
let (bs, term) := b.flatten;
let bs := modifyJPs bs FnBody.simpCase;
match term with
| FnBody.case tid x xType alts =>
let alts := alts.map $ fun alt => alt.modifyBody FnBody.simpCase;
reshape bs (mkSimpCase tid x xType alts)
| other => reshape bs term
/-- Simplify `case`
- Remove unreachable branches.
- Remove `case` if there is only one branch.
- Merge most common branches using `Alt.default`. -/
def Decl.simpCase : Decl → Decl
| Decl.fdecl f xs t b => Decl.fdecl f xs t b.simpCase
| other => other
end IR
end Lean
|
af4735e810ed265b34fbf068661a1f87577be612
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/data/set/intervals/unordered_interval.lean
|
43f8c39e4197153a9120cc2ee6b844ecc19a381a
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 10,190
|
lean
|
/-
Copyright (c) 2020 Zhouhang Zhou. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Zhouhang Zhou
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.order.bounds
import Mathlib.data.set.intervals.image_preimage
import Mathlib.PostPort
universes u
namespace Mathlib
/-!
# Intervals without endpoints ordering
In any decidable linear order `α`, we define the set of elements lying between two elements `a` and
`b` as `Icc (min a b) (max a b)`.
`Icc a b` requires the assumption `a ≤ b` to be meaningful, which is sometimes inconvenient. The
interval as defined in this file is always the set of things lying between `a` and `b`, regardless
of the relative order of `a` and `b`.
For real numbers, `Icc (min a b) (max a b)` is the same as `segment a b`.
## Notation
We use the localized notation `[a, b]` for `interval a b`. One can open the locale `interval` to
make the notation available.
-/
namespace set
/-- `interval a b` is the set of elements lying between `a` and `b`, with `a` and `b` included. -/
def interval {α : Type u} [linear_order α] (a : α) (b : α) : set α :=
Icc (min a b) (max a b)
@[simp] theorem interval_of_le {α : Type u} [linear_order α] {a : α} {b : α} (h : a ≤ b) : interval a b = Icc a b :=
eq.mpr (id (Eq._oldrec (Eq.refl (interval a b = Icc a b)) (interval.equations._eqn_1 a b)))
(eq.mpr (id (Eq._oldrec (Eq.refl (Icc (min a b) (max a b) = Icc a b)) (min_eq_left h)))
(eq.mpr (id (Eq._oldrec (Eq.refl (Icc a (max a b) = Icc a b)) (max_eq_right h))) (Eq.refl (Icc a b))))
@[simp] theorem interval_of_ge {α : Type u} [linear_order α] {a : α} {b : α} (h : b ≤ a) : interval a b = Icc b a :=
eq.mpr (id (Eq._oldrec (Eq.refl (interval a b = Icc b a)) (interval.equations._eqn_1 a b)))
(eq.mpr (id (Eq._oldrec (Eq.refl (Icc (min a b) (max a b) = Icc b a)) (min_eq_right h)))
(eq.mpr (id (Eq._oldrec (Eq.refl (Icc b (max a b) = Icc b a)) (max_eq_left h))) (Eq.refl (Icc b a))))
theorem interval_swap {α : Type u} [linear_order α] (a : α) (b : α) : interval a b = interval b a := sorry
theorem interval_of_lt {α : Type u} [linear_order α] {a : α} {b : α} (h : a < b) : interval a b = Icc a b :=
interval_of_le (le_of_lt h)
theorem interval_of_gt {α : Type u} [linear_order α] {a : α} {b : α} (h : b < a) : interval a b = Icc b a :=
interval_of_ge (le_of_lt h)
theorem interval_of_not_le {α : Type u} [linear_order α] {a : α} {b : α} (h : ¬a ≤ b) : interval a b = Icc b a :=
interval_of_gt (lt_of_not_ge h)
theorem interval_of_not_ge {α : Type u} [linear_order α] {a : α} {b : α} (h : ¬b ≤ a) : interval a b = Icc a b :=
interval_of_lt (lt_of_not_ge h)
@[simp] theorem interval_self {α : Type u} [linear_order α] {a : α} : interval a a = singleton a := sorry
@[simp] theorem nonempty_interval {α : Type u} [linear_order α] {a : α} {b : α} : set.nonempty (interval a b) := sorry
@[simp] theorem left_mem_interval {α : Type u} [linear_order α] {a : α} {b : α} : a ∈ interval a b :=
eq.mpr (id (Eq._oldrec (Eq.refl (a ∈ interval a b)) (interval.equations._eqn_1 a b)))
(eq.mpr (id (Eq._oldrec (Eq.refl (a ∈ Icc (min a b) (max a b))) (propext mem_Icc)))
{ left := min_le_left a b, right := le_max_left a b })
@[simp] theorem right_mem_interval {α : Type u} [linear_order α] {a : α} {b : α} : b ∈ interval a b :=
eq.mpr (id (Eq._oldrec (Eq.refl (b ∈ interval a b)) (interval_swap a b))) left_mem_interval
theorem Icc_subset_interval {α : Type u} [linear_order α] {a : α} {b : α} : Icc a b ⊆ interval a b :=
id
fun (x : α) (h : x ∈ Icc a b) =>
eq.mpr (id (Eq._oldrec (Eq.refl (x ∈ interval a b)) (interval_of_le (le_trans (and.left h) (and.right h))))) h
theorem Icc_subset_interval' {α : Type u} [linear_order α] {a : α} {b : α} : Icc b a ⊆ interval a b :=
eq.mpr (id (Eq._oldrec (Eq.refl (Icc b a ⊆ interval a b)) (interval_swap a b))) Icc_subset_interval
theorem mem_interval_of_le {α : Type u} [linear_order α] {a : α} {b : α} {x : α} (ha : a ≤ x) (hb : x ≤ b) : x ∈ interval a b :=
Icc_subset_interval { left := ha, right := hb }
theorem mem_interval_of_ge {α : Type u} [linear_order α] {a : α} {b : α} {x : α} (hb : b ≤ x) (ha : x ≤ a) : x ∈ interval a b :=
Icc_subset_interval' { left := hb, right := ha }
theorem interval_subset_interval {α : Type u} [linear_order α] {a₁ : α} {a₂ : α} {b₁ : α} {b₂ : α} (h₁ : a₁ ∈ interval a₂ b₂) (h₂ : b₁ ∈ interval a₂ b₂) : interval a₁ b₁ ⊆ interval a₂ b₂ :=
Icc_subset_Icc (le_min (and.left h₁) (and.left h₂)) (max_le (and.right h₁) (and.right h₂))
theorem interval_subset_interval_iff_mem {α : Type u} [linear_order α] {a₁ : α} {a₂ : α} {b₁ : α} {b₂ : α} : interval a₁ b₁ ⊆ interval a₂ b₂ ↔ a₁ ∈ interval a₂ b₂ ∧ b₁ ∈ interval a₂ b₂ :=
{ mp := fun (h : interval a₁ b₁ ⊆ interval a₂ b₂) => { left := h left_mem_interval, right := h right_mem_interval },
mpr := fun (h : a₁ ∈ interval a₂ b₂ ∧ b₁ ∈ interval a₂ b₂) => interval_subset_interval (and.left h) (and.right h) }
theorem interval_subset_interval_iff_le {α : Type u} [linear_order α] {a₁ : α} {a₂ : α} {b₁ : α} {b₂ : α} : interval a₁ b₁ ⊆ interval a₂ b₂ ↔ min a₂ b₂ ≤ min a₁ b₁ ∧ max a₁ b₁ ≤ max a₂ b₂ := sorry
theorem interval_subset_interval_right {α : Type u} [linear_order α] {a : α} {b : α} {x : α} (h : x ∈ interval a b) : interval x b ⊆ interval a b :=
interval_subset_interval h right_mem_interval
theorem interval_subset_interval_left {α : Type u} [linear_order α] {a : α} {b : α} {x : α} (h : x ∈ interval a b) : interval a x ⊆ interval a b :=
interval_subset_interval left_mem_interval h
theorem bdd_below_bdd_above_iff_subset_interval {α : Type u} [linear_order α] (s : set α) : bdd_below s ∧ bdd_above s ↔ ∃ (a : α), ∃ (b : α), s ⊆ interval a b := sorry
@[simp] theorem preimage_const_add_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) (c : α) : (fun (x : α) => a + x) ⁻¹' interval b c = interval (b - a) (c - a) := sorry
@[simp] theorem preimage_add_const_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) (c : α) : (fun (x : α) => x + a) ⁻¹' interval b c = interval (b - a) (c - a) := sorry
@[simp] theorem preimage_neg_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) : -interval a b = interval (-a) (-b) := sorry
@[simp] theorem preimage_sub_const_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) (c : α) : (fun (x : α) => x - a) ⁻¹' interval b c = interval (b + a) (c + a) := sorry
@[simp] theorem preimage_const_sub_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) (c : α) : (fun (x : α) => a - x) ⁻¹' interval b c = interval (a - b) (a - c) := sorry
@[simp] theorem image_const_add_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) (c : α) : (fun (x : α) => a + x) '' interval b c = interval (a + b) (a + c) := sorry
@[simp] theorem image_add_const_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) (c : α) : (fun (x : α) => x + a) '' interval b c = interval (b + a) (c + a) := sorry
@[simp] theorem image_const_sub_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) (c : α) : (fun (x : α) => a - x) '' interval b c = interval (a - b) (a - c) := sorry
@[simp] theorem image_sub_const_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) (c : α) : (fun (x : α) => x - a) '' interval b c = interval (b - a) (c - a) := sorry
theorem image_neg_interval {α : Type u} [linear_ordered_add_comm_group α] (a : α) (b : α) : Neg.neg '' interval a b = interval (-a) (-b) := sorry
/-- If `[x, y]` is a subinterval of `[a, b]`, then the distance between `x` and `y`
is less than or equal to that of `a` and `b` -/
theorem abs_sub_le_of_subinterval {α : Type u} [linear_ordered_add_comm_group α] {a : α} {b : α} {x : α} {y : α} (h : interval x y ⊆ interval a b) : abs (y - x) ≤ abs (b - a) := sorry
/-- If `x ∈ [a, b]`, then the distance between `a` and `x` is less than or equal to
that of `a` and `b` -/
theorem abs_sub_left_of_mem_interval {α : Type u} [linear_ordered_add_comm_group α] {a : α} {b : α} {x : α} (h : x ∈ interval a b) : abs (x - a) ≤ abs (b - a) :=
abs_sub_le_of_subinterval (interval_subset_interval_left h)
/-- If `x ∈ [a, b]`, then the distance between `x` and `b` is less than or equal to
that of `a` and `b` -/
theorem abs_sub_right_of_mem_interval {α : Type u} [linear_ordered_add_comm_group α] {a : α} {b : α} {x : α} (h : x ∈ interval a b) : abs (b - x) ≤ abs (b - a) :=
abs_sub_le_of_subinterval (interval_subset_interval_right h)
@[simp] theorem preimage_mul_const_interval {k : Type u} [linear_ordered_field k] {a : k} (ha : a ≠ 0) (b : k) (c : k) : (fun (x : k) => x * a) ⁻¹' interval b c = interval (b / a) (c / a) := sorry
@[simp] theorem preimage_const_mul_interval {k : Type u} [linear_ordered_field k] {a : k} (ha : a ≠ 0) (b : k) (c : k) : (fun (x : k) => a * x) ⁻¹' interval b c = interval (b / a) (c / a) := sorry
@[simp] theorem preimage_div_const_interval {k : Type u} [linear_ordered_field k] {a : k} (ha : a ≠ 0) (b : k) (c : k) : (fun (x : k) => x / a) ⁻¹' interval b c = interval (b * a) (c * a) := sorry
@[simp] theorem image_mul_const_interval {k : Type u} [linear_ordered_field k] (a : k) (b : k) (c : k) : (fun (x : k) => x * a) '' interval b c = interval (b * a) (c * a) := sorry
@[simp] theorem image_const_mul_interval {k : Type u} [linear_ordered_field k] (a : k) (b : k) (c : k) : (fun (x : k) => a * x) '' interval b c = interval (a * b) (a * c) := sorry
@[simp] theorem image_div_const_interval {k : Type u} [linear_ordered_field k] (a : k) (b : k) (c : k) : (fun (x : k) => x / a) '' interval b c = interval (b / a) (c / a) :=
image_mul_const_interval (a⁻¹) b c
|
ab78a28031e6dc6f69ea2660923c52ffc785455e
|
94e33a31faa76775069b071adea97e86e218a8ee
|
/src/analysis/special_functions/log/basic.lean
|
c96d348628638e674d2cf08808e602c6d8ab5d54
|
[
"Apache-2.0"
] |
permissive
|
urkud/mathlib
|
eab80095e1b9f1513bfb7f25b4fa82fa4fd02989
|
6379d39e6b5b279df9715f8011369a301b634e41
|
refs/heads/master
| 1,658,425,342,662
| 1,658,078,703,000
| 1,658,078,703,000
| 186,910,338
| 0
| 0
|
Apache-2.0
| 1,568,512,083,000
| 1,557,958,709,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 12,086
|
lean
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes, Abhimanyu Pallavi Sudhir, Jean Lo, Calle Sönne
-/
import analysis.special_functions.exp
/-!
# Real logarithm
In this file we define `real.log` to be the logarithm of a real number. As usual, we extend it from
its domain `(0, +∞)` to a globally defined function. We choose to do it so that `log 0 = 0` and
`log (-x) = log x`.
We prove some basic properties of this function and show that it is continuous.
## Tags
logarithm, continuity
-/
open set filter function
open_locale topological_space
noncomputable theory
namespace real
variables {x y : ℝ}
/-- The real logarithm function, equal to the inverse of the exponential for `x > 0`,
to `log |x|` for `x < 0`, and to `0` for `0`. We use this unconventional extension to
`(-∞, 0]` as it gives the formula `log (x * y) = log x + log y` for all nonzero `x` and `y`, and
the derivative of `log` is `1/x` away from `0`. -/
@[pp_nodot] noncomputable def log (x : ℝ) : ℝ :=
if hx : x = 0 then 0 else exp_order_iso.symm ⟨|x|, abs_pos.2 hx⟩
lemma log_of_ne_zero (hx : x ≠ 0) : log x = exp_order_iso.symm ⟨|x|, abs_pos.2 hx⟩ := dif_neg hx
lemma log_of_pos (hx : 0 < x) : log x = exp_order_iso.symm ⟨x, hx⟩ :=
by { rw [log_of_ne_zero hx.ne'], congr, exact abs_of_pos hx }
lemma exp_log_eq_abs (hx : x ≠ 0) : exp (log x) = |x| :=
by rw [log_of_ne_zero hx, ← coe_exp_order_iso_apply, order_iso.apply_symm_apply, subtype.coe_mk]
lemma exp_log (hx : 0 < x) : exp (log x) = x :=
by { rw exp_log_eq_abs hx.ne', exact abs_of_pos hx }
lemma exp_log_of_neg (hx : x < 0) : exp (log x) = -x :=
by { rw exp_log_eq_abs (ne_of_lt hx), exact abs_of_neg hx }
lemma le_exp_log (x : ℝ) : x ≤ exp (log x) :=
begin
by_cases h_zero : x = 0,
{ rw [h_zero, log, dif_pos rfl, exp_zero], exact zero_le_one, },
{ rw exp_log_eq_abs h_zero, exact le_abs_self _, },
end
@[simp] lemma log_exp (x : ℝ) : log (exp x) = x :=
exp_injective $ exp_log (exp_pos x)
lemma surj_on_log : surj_on log (Ioi 0) univ :=
λ x _, ⟨exp x, exp_pos x, log_exp x⟩
lemma log_surjective : surjective log :=
λ x, ⟨exp x, log_exp x⟩
@[simp] lemma range_log : range log = univ :=
log_surjective.range_eq
@[simp] lemma log_zero : log 0 = 0 := dif_pos rfl
@[simp] lemma log_one : log 1 = 0 :=
exp_injective $ by rw [exp_log zero_lt_one, exp_zero]
@[simp] lemma log_abs (x : ℝ) : log (|x|) = log x :=
begin
by_cases h : x = 0,
{ simp [h] },
{ rw [← exp_eq_exp, exp_log_eq_abs h, exp_log_eq_abs (abs_pos.2 h).ne', abs_abs] }
end
@[simp] lemma log_neg_eq_log (x : ℝ) : log (-x) = log x :=
by rw [← log_abs x, ← log_abs (-x), abs_neg]
lemma sinh_log {x : ℝ} (hx : 0 < x) : sinh (log x) = (x - x⁻¹) / 2 :=
by rw [sinh_eq, exp_neg, exp_log hx]
lemma cosh_log {x : ℝ} (hx : 0 < x) : cosh (log x) = (x + x⁻¹) / 2 :=
by rw [cosh_eq, exp_neg, exp_log hx]
lemma surj_on_log' : surj_on log (Iio 0) univ :=
λ x _, ⟨-exp x, neg_lt_zero.2 $ exp_pos x, by rw [log_neg_eq_log, log_exp]⟩
lemma log_mul (hx : x ≠ 0) (hy : y ≠ 0) : log (x * y) = log x + log y :=
exp_injective $
by rw [exp_log_eq_abs (mul_ne_zero hx hy), exp_add, exp_log_eq_abs hx, exp_log_eq_abs hy, abs_mul]
lemma log_div (hx : x ≠ 0) (hy : y ≠ 0) : log (x / y) = log x - log y :=
exp_injective $
by rw [exp_log_eq_abs (div_ne_zero hx hy), exp_sub, exp_log_eq_abs hx, exp_log_eq_abs hy, abs_div]
@[simp] lemma log_inv (x : ℝ) : log (x⁻¹) = -log x :=
begin
by_cases hx : x = 0, { simp [hx] },
rw [← exp_eq_exp, exp_log_eq_abs (inv_ne_zero hx), exp_neg, exp_log_eq_abs hx, abs_inv]
end
lemma log_le_log (h : 0 < x) (h₁ : 0 < y) : log x ≤ log y ↔ x ≤ y :=
by rw [← exp_le_exp, exp_log h, exp_log h₁]
lemma log_lt_log (hx : 0 < x) : x < y → log x < log y :=
by { intro h, rwa [← exp_lt_exp, exp_log hx, exp_log (lt_trans hx h)] }
lemma log_lt_log_iff (hx : 0 < x) (hy : 0 < y) : log x < log y ↔ x < y :=
by { rw [← exp_lt_exp, exp_log hx, exp_log hy] }
lemma log_le_iff_le_exp (hx : 0 < x) : log x ≤ y ↔ x ≤ exp y := by rw [←exp_le_exp, exp_log hx]
lemma log_lt_iff_lt_exp (hx : 0 < x) : log x < y ↔ x < exp y := by rw [←exp_lt_exp, exp_log hx]
lemma le_log_iff_exp_le (hy : 0 < y) : x ≤ log y ↔ exp x ≤ y := by rw [←exp_le_exp, exp_log hy]
lemma lt_log_iff_exp_lt (hy : 0 < y) : x < log y ↔ exp x < y := by rw [←exp_lt_exp, exp_log hy]
lemma log_pos_iff (hx : 0 < x) : 0 < log x ↔ 1 < x :=
by { rw ← log_one, exact log_lt_log_iff zero_lt_one hx }
lemma log_pos (hx : 1 < x) : 0 < log x :=
(log_pos_iff (lt_trans zero_lt_one hx)).2 hx
lemma log_neg_iff (h : 0 < x) : log x < 0 ↔ x < 1 :=
by { rw ← log_one, exact log_lt_log_iff h zero_lt_one }
lemma log_neg (h0 : 0 < x) (h1 : x < 1) : log x < 0 := (log_neg_iff h0).2 h1
lemma log_nonneg_iff (hx : 0 < x) : 0 ≤ log x ↔ 1 ≤ x :=
by rw [← not_lt, log_neg_iff hx, not_lt]
lemma log_nonneg (hx : 1 ≤ x) : 0 ≤ log x :=
(log_nonneg_iff (zero_lt_one.trans_le hx)).2 hx
lemma log_nonpos_iff (hx : 0 < x) : log x ≤ 0 ↔ x ≤ 1 :=
by rw [← not_lt, log_pos_iff hx, not_lt]
lemma log_nonpos_iff' (hx : 0 ≤ x) : log x ≤ 0 ↔ x ≤ 1 :=
begin
rcases hx.eq_or_lt with (rfl|hx),
{ simp [le_refl, zero_le_one] },
exact log_nonpos_iff hx
end
lemma log_nonpos (hx : 0 ≤ x) (h'x : x ≤ 1) : log x ≤ 0 :=
(log_nonpos_iff' hx).2 h'x
lemma strict_mono_on_log : strict_mono_on log (set.Ioi 0) :=
λ x hx y hy hxy, log_lt_log hx hxy
lemma strict_anti_on_log : strict_anti_on log (set.Iio 0) :=
begin
rintros x (hx : x < 0) y (hy : y < 0) hxy,
rw [← log_abs y, ← log_abs x],
refine log_lt_log (abs_pos.2 hy.ne) _,
rwa [abs_of_neg hy, abs_of_neg hx, neg_lt_neg_iff]
end
lemma log_inj_on_pos : set.inj_on log (set.Ioi 0) :=
strict_mono_on_log.inj_on
lemma eq_one_of_pos_of_log_eq_zero {x : ℝ} (h₁ : 0 < x) (h₂ : log x = 0) : x = 1 :=
log_inj_on_pos (set.mem_Ioi.2 h₁) (set.mem_Ioi.2 zero_lt_one) (h₂.trans real.log_one.symm)
lemma log_ne_zero_of_pos_of_ne_one {x : ℝ} (hx_pos : 0 < x) (hx : x ≠ 1) : log x ≠ 0 :=
mt (eq_one_of_pos_of_log_eq_zero hx_pos) hx
@[simp] lemma log_eq_zero {x : ℝ} : log x = 0 ↔ x = 0 ∨ x = 1 ∨ x = -1 :=
begin
split,
{ intros h,
rcases lt_trichotomy x 0 with x_lt_zero | rfl | x_gt_zero,
{ refine or.inr (or.inr (eq_neg_iff_eq_neg.mp _)),
rw [←log_neg_eq_log x] at h,
exact (eq_one_of_pos_of_log_eq_zero (neg_pos.mpr x_lt_zero) h).symm, },
{ exact or.inl rfl },
{ exact or.inr (or.inl (eq_one_of_pos_of_log_eq_zero x_gt_zero h)), }, },
{ rintro (rfl|rfl|rfl); simp only [log_one, log_zero, log_neg_eq_log], }
end
@[simp] lemma log_pow (x : ℝ) (n : ℕ) : log (x ^ n) = n * log x :=
begin
induction n with n ih,
{ simp },
rcases eq_or_ne x 0 with rfl | hx,
{ simp },
rw [pow_succ', log_mul (pow_ne_zero _ hx) hx, ih, nat.cast_succ, add_mul, one_mul],
end
@[simp] lemma log_zpow (x : ℝ) (n : ℤ) : log (x ^ n) = n * log x :=
begin
induction n,
{ rw [int.of_nat_eq_coe, zpow_coe_nat, log_pow, int.cast_coe_nat] },
rw [zpow_neg_succ_of_nat, log_inv, log_pow, int.cast_neg_succ_of_nat, nat.cast_add_one,
neg_mul_eq_neg_mul],
end
lemma log_sqrt {x : ℝ} (hx : 0 ≤ x) : log (sqrt x) = log x / 2 :=
by { rw [eq_div_iff, mul_comm, ← nat.cast_two, ← log_pow, sq_sqrt hx], exact two_ne_zero }
lemma log_le_sub_one_of_pos {x : ℝ} (hx : 0 < x) : log x ≤ x - 1 :=
begin
rw le_sub_iff_add_le,
convert add_one_le_exp (log x),
rw exp_log hx,
end
/-- Bound for `|log x * x|` in the interval `(0, 1]`. -/
lemma abs_log_mul_self_lt (x: ℝ) (h1 : 0 < x) (h2 : x ≤ 1) : |log x * x| < 1 :=
begin
have : 0 < 1/x := by simpa only [one_div, inv_pos] using h1,
replace := log_le_sub_one_of_pos this,
replace : log (1 / x) < 1/x := by linarith,
rw [log_div one_ne_zero h1.ne', log_one, zero_sub, lt_div_iff h1] at this,
have aux : 0 ≤ -log x * x,
{ refine mul_nonneg _ h1.le, rw ←log_inv, apply log_nonneg,
rw [←(le_inv h1 zero_lt_one), inv_one], exact h2, },
rw [←(abs_of_nonneg aux), neg_mul, abs_neg] at this, exact this,
end
/-- The real logarithm function tends to `+∞` at `+∞`. -/
lemma tendsto_log_at_top : tendsto log at_top at_top :=
tendsto_comp_exp_at_top.1 $ by simpa only [log_exp] using tendsto_id
lemma tendsto_log_nhds_within_zero : tendsto log (𝓝[≠] 0) at_bot :=
begin
rw [← (show _ = log, from funext log_abs)],
refine tendsto.comp _ tendsto_abs_nhds_within_zero,
simpa [← tendsto_comp_exp_at_bot] using tendsto_id
end
lemma continuous_on_log : continuous_on log {0}ᶜ :=
begin
rw [continuous_on_iff_continuous_restrict, restrict],
conv in (log _) { rw [log_of_ne_zero (show (x : ℝ) ≠ 0, from x.2)] },
exact exp_order_iso.symm.continuous.comp (continuous_subtype_mk _ continuous_subtype_coe.norm)
end
@[continuity] lemma continuous_log : continuous (λ x : {x : ℝ // x ≠ 0}, log x) :=
continuous_on_iff_continuous_restrict.1 $ continuous_on_log.mono $ λ x hx, hx
@[continuity] lemma continuous_log' : continuous (λ x : {x : ℝ // 0 < x}, log x) :=
continuous_on_iff_continuous_restrict.1 $ continuous_on_log.mono $ λ x hx, ne_of_gt hx
lemma continuous_at_log (hx : x ≠ 0) : continuous_at log x :=
(continuous_on_log x hx).continuous_at $ is_open.mem_nhds is_open_compl_singleton hx
@[simp] lemma continuous_at_log_iff : continuous_at log x ↔ x ≠ 0 :=
begin
refine ⟨_, continuous_at_log⟩,
rintros h rfl,
exact not_tendsto_nhds_of_tendsto_at_bot tendsto_log_nhds_within_zero _
(h.tendsto.mono_left inf_le_left)
end
open_locale big_operators
lemma log_prod {α : Type*} (s : finset α) (f : α → ℝ) (hf : ∀ x ∈ s, f x ≠ 0):
log (∏ i in s, f i) = ∑ i in s, log (f i) :=
begin
induction s using finset.cons_induction_on with a s ha ih,
{ simp },
{ rw [finset.forall_mem_cons] at hf,
simp [ih hf.2, log_mul hf.1 (finset.prod_ne_zero_iff.2 hf.2)] }
end
lemma log_nat_eq_sum_factorization (n : ℕ) : log n = n.factorization.sum (λ p t, t * log p) :=
begin
rcases eq_or_ne n 0 with rfl | hn,
{ simp },
nth_rewrite 0 [←nat.factorization_prod_pow_eq_self hn],
rw [finsupp.prod, nat.cast_prod, log_prod _ _ (λ p hp, _), finsupp.sum],
{ simp_rw [nat.cast_pow, log_pow] },
{ norm_cast,
exact pow_ne_zero _ (nat.prime_of_mem_factorization hp).ne_zero },
end
lemma tendsto_pow_log_div_mul_add_at_top (a b : ℝ) (n : ℕ) (ha : a ≠ 0) :
tendsto (λ x, log x ^ n / (a * x + b)) at_top (𝓝 0) :=
((tendsto_div_pow_mul_exp_add_at_top a b n ha.symm).comp tendsto_log_at_top).congr'
(by filter_upwards [eventually_gt_at_top (0 : ℝ)] with x hx using by simp [exp_log hx])
lemma is_o_pow_log_id_at_top {n : ℕ} : (λ x, log x ^ n) =o[at_top] id :=
begin
rw asymptotics.is_o_iff_tendsto',
{ simpa using tendsto_pow_log_div_mul_add_at_top 1 0 n one_ne_zero },
filter_upwards [eventually_ne_at_top (0 : ℝ)] with x h₁ h₂ using (h₁ h₂).elim,
end
lemma is_o_log_id_at_top : log =o[at_top] id := is_o_pow_log_id_at_top.congr_left (λ x, pow_one _)
end real
section continuity
open real
variables {α : Type*}
lemma filter.tendsto.log {f : α → ℝ} {l : filter α} {x : ℝ} (h : tendsto f l (𝓝 x)) (hx : x ≠ 0) :
tendsto (λ x, log (f x)) l (𝓝 (log x)) :=
(continuous_at_log hx).tendsto.comp h
variables [topological_space α] {f : α → ℝ} {s : set α} {a : α}
lemma continuous.log (hf : continuous f) (h₀ : ∀ x, f x ≠ 0) : continuous (λ x, log (f x)) :=
continuous_on_log.comp_continuous hf h₀
lemma continuous_at.log (hf : continuous_at f a) (h₀ : f a ≠ 0) :
continuous_at (λ x, log (f x)) a :=
hf.log h₀
lemma continuous_within_at.log (hf : continuous_within_at f s a) (h₀ : f a ≠ 0) :
continuous_within_at (λ x, log (f x)) s a :=
hf.log h₀
lemma continuous_on.log (hf : continuous_on f s) (h₀ : ∀ x ∈ s, f x ≠ 0) :
continuous_on (λ x, log (f x)) s :=
λ x hx, (hf x hx).log (h₀ x hx)
end continuity
|
971cbed7a9d9dbf54bab8837b8a3111e9cadae26
|
c45b34bfd44d8607a2e8762c926e3cfaa7436201
|
/uexp/src/uexp/rules/transitiveInferenceJoin.lean
|
230dc105a08e980dafdf987a877439bee6700adc
|
[
"BSD-2-Clause"
] |
permissive
|
Shamrock-Frost/Cosette
|
b477c442c07e45082348a145f19ebb35a7f29392
|
24cbc4adebf627f13f5eac878f04ffa20d1209af
|
refs/heads/master
| 1,619,721,304,969
| 1,526,082,841,000
| 1,526,082,841,000
| 121,695,605
| 1
| 0
| null | 1,518,737,210,000
| 1,518,737,210,000
| null |
UTF-8
|
Lean
| false
| false
| 2,247
|
lean
|
import ..sql
import ..tactics
import ..u_semiring
import ..extra_constants
import ..ucongr
import ..TDP
set_option profiler true
open Expr
open Proj
open Pred
open SQL
open tree
notation `int` := datatypes.int
variable integer_1: const datatypes.int
variable integer_7: const datatypes.int
theorem rule:
forall ( Γ scm_t scm_account scm_bonus scm_dept scm_emp: Schema) (rel_t: relation scm_t) (rel_account: relation scm_account) (rel_bonus: relation scm_bonus) (rel_dept: relation scm_dept) (rel_emp: relation scm_emp) (t_k0 : Column int scm_t) (t_c1 : Column int scm_t) (t_f1_a0 : Column int scm_t) (t_f2_a0 : Column int scm_t) (t_f0_c0 : Column int scm_t) (t_f1_c0 : Column int scm_t) (t_f0_c1 : Column int scm_t) (t_f1_c2 : Column int scm_t) (t_f2_c3 : Column int scm_t) (account_acctno : Column int scm_account) (account_type : Column int scm_account) (account_balance : Column int scm_account) (bonus_ename : Column int scm_bonus) (bonus_job : Column int scm_bonus) (bonus_sal : Column int scm_bonus) (bonus_comm : Column int scm_bonus) (dept_deptno : Column int scm_dept) (dept_name : Column int scm_dept) (emp_empno : Column int scm_emp) (emp_ename : Column int scm_emp) (emp_job : Column int scm_emp) (emp_mgr : Column int scm_emp) (emp_hiredate : Column int scm_emp) (emp_comm : Column int scm_emp) (emp_sal : Column int scm_emp) (emp_deptno : Column int scm_emp) (emp_slacker : Column int scm_emp),
denoteSQL ((SELECT1 (e2p (constantExpr integer_1)) FROM1 (product (table rel_emp) ((SELECT * FROM1 (table rel_emp) WHERE (castPred (combine (right⋅emp_deptno) (e2p (constantExpr integer_7)) ) predicates.gt)))) WHERE (equal (uvariable (right⋅left⋅emp_deptno)) (uvariable (right⋅right⋅emp_deptno)))) :SQL Γ _)
=
denoteSQL ((SELECT1 (e2p (constantExpr integer_1)) FROM1 (product ((SELECT * FROM1 (table rel_emp) WHERE (castPred (combine (right⋅emp_deptno) (e2p (constantExpr integer_7)) ) predicates.gt))) ((SELECT * FROM1 (table rel_emp) WHERE (castPred (combine (right⋅emp_deptno) (e2p (constantExpr integer_7)) ) predicates.gt)))) WHERE (equal (uvariable (right⋅left⋅emp_deptno)) (uvariable (right⋅right⋅emp_deptno)))) :SQL Γ _) :=
begin
intros,
unfold_all_denotations,
funext,
try {simp},
TDP
end
|
1c3fddb00898f44f25fc4963ba27eee41048d1d4
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/category_theory/idempotents/functor_categories.lean
|
910bdec18751e7d6ad18b6f6e3c7288d9cf5b33f
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 5,752
|
lean
|
/-
Copyright (c) 2022 Joël Riou. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Joël Riou
-/
import category_theory.idempotents.karoubi
/-!
# Idempotent completeness and functor categories
In this file we define an instance `functor_category_is_idempotent_complete` expressing
that a functor category `J ⥤ C` is idempotent complete when the target category `C` is.
We also provide a fully faithful functor
`karoubi_functor_category_embedding : karoubi (J ⥤ C)) : J ⥤ karoubi C` for all categories
`J` and `C`.
-/
open category_theory
open category_theory.category
open category_theory.idempotents.karoubi
open category_theory.limits
namespace category_theory
namespace idempotents
variables (J C : Type*) [category J] [category C]
instance functor_category_is_idempotent_complete [is_idempotent_complete C] :
is_idempotent_complete (J ⥤ C) :=
begin
refine ⟨_⟩,
intros F p hp,
have hC := (is_idempotent_complete_iff_has_equalizer_of_id_and_idempotent C).mp infer_instance,
haveI : ∀ (j : J), has_equalizer (𝟙 _) (p.app j) := λ j, hC _ _ (congr_app hp j),
/- We construct the direct factor `Y` associated to `p : F ⟶ F` by computing
the equalizer of the identity and `p.app j` on each object `(j : J)`. -/
let Y : J ⥤ C :=
{ obj := λ j, limits.equalizer (𝟙 _) (p.app j),
map := λ j j' φ, equalizer.lift (limits.equalizer.ι (𝟙 _) (p.app j) ≫ F.map φ)
(by rw [comp_id, assoc, p.naturality φ, ← assoc, ← limits.equalizer.condition, comp_id]),
map_id' := λ j, by { ext, simp only [comp_id, functor.map_id, equalizer.lift_ι, id_comp], },
map_comp' := λ j j' j'' φ φ', begin
ext,
simp only [assoc, functor.map_comp, equalizer.lift_ι, equalizer.lift_ι_assoc],
end },
let i : Y ⟶ F :=
{ app := λ j, equalizer.ι _ _,
naturality' := λ j j' φ, by rw [equalizer.lift_ι], },
let e : F ⟶ Y :=
{ app := λ j, equalizer.lift (p.app j)
(by { rw comp_id, exact (congr_app hp j).symm, }),
naturality' := λ j j' φ, begin
ext,
simp only [assoc, equalizer.lift_ι, nat_trans.naturality, equalizer.lift_ι_assoc],
end },
use [Y, i, e],
split; ext j,
{ simp only [nat_trans.comp_app, assoc, equalizer.lift_ι, nat_trans.id_app, id_comp,
← equalizer.condition, comp_id], },
{ simp only [nat_trans.comp_app, equalizer.lift_ι], },
end
namespace karoubi_functor_category_embedding
variables {J C}
/-- On objects, the functor which sends a formal direct factor `P` of a
functor `F : J ⥤ C` to the functor `J ⥤ karoubi C` which sends `(j : J)` to
the corresponding direct factor of `F.obj j`. -/
@[simps]
def obj (P : karoubi (J ⥤ C)) : J ⥤ karoubi C :=
{ obj := λ j, ⟨P.X.obj j, P.p.app j, congr_app P.idem j⟩,
map := λ j j' φ,
{ f := P.p.app j ≫ P.X.map φ,
comm := begin
simp only [nat_trans.naturality, assoc],
have h := congr_app P.idem j,
rw [nat_trans.comp_app] at h,
slice_rhs 1 3 { erw [h, h], },
end },
map_id' := λ j, by { ext, simp only [functor.map_id, comp_id, id_eq], },
map_comp' := λ j j' j'' φ φ', begin
ext,
have h := congr_app P.idem j,
rw [nat_trans.comp_app] at h,
simp only [assoc, nat_trans.naturality_assoc, functor.map_comp, comp],
slice_rhs 1 2 { rw h, },
rw [assoc],
end }
/-- Tautological action on maps of the functor `karoubi (J ⥤ C) ⥤ (J ⥤ karoubi C)`. -/
@[simps]
def map {P Q : karoubi (J ⥤ C)} (f : P ⟶ Q) : obj P ⟶ obj Q :=
{ app := λ j, ⟨f.f.app j, congr_app f.comm j⟩,
naturality' := λ j j' φ, begin
ext,
simp only [comp],
have h := congr_app (comp_p f) j,
have h' := congr_app (p_comp f) j',
dsimp at h h' ⊢,
slice_rhs 1 2 { erw h, },
rw ← P.p.naturality,
slice_lhs 2 3 { erw h', },
rw f.f.naturality,
end }
end karoubi_functor_category_embedding
variables (J C)
/-- The tautological fully faithful functor `karoubi (J ⥤ C) ⥤ (J ⥤ karoubi C)`. -/
@[simps]
def karoubi_functor_category_embedding :
karoubi (J ⥤ C) ⥤ (J ⥤ karoubi C) :=
{ obj := karoubi_functor_category_embedding.obj,
map := λ P Q, karoubi_functor_category_embedding.map,
map_id' := λ P, rfl,
map_comp' := λ P Q R f g, rfl, }
instance : full (karoubi_functor_category_embedding J C) :=
{ preimage := λ P Q f,
{ f :=
{ app := λ j, (f.app j).f,
naturality' := λ j j' φ, begin
slice_rhs 1 1 { rw ← karoubi.comp_p, },
have h := hom_ext.mp (f.naturality φ),
simp only [comp] at h,
dsimp [karoubi_functor_category_embedding] at h ⊢,
erw [assoc, ← h, ← P.p.naturality φ, assoc, p_comp (f.app j')],
end },
comm := by { ext j, exact (f.app j).comm, } },
witness' := λ P Q f, by { ext j, refl, }, }
instance : faithful (karoubi_functor_category_embedding J C) :=
{ map_injective' := λ P Q f f' h, by { ext j, exact hom_ext.mp (congr_app h j), }, }
/-- The composition of `(J ⥤ C) ⥤ karoubi (J ⥤ C)` and `karoubi (J ⥤ C) ⥤ (J ⥤ karoubi C)`
equals the functor `(J ⥤ C) ⥤ (J ⥤ karoubi C)` given by the composition with
`to_karoubi C : C ⥤ karoubi C`. -/
lemma to_karoubi_comp_karoubi_functor_category_embedding :
(to_karoubi _) ⋙ karoubi_functor_category_embedding J C =
(whiskering_right J _ _).obj (to_karoubi C) :=
begin
apply functor.ext,
{ intros X Y f,
ext j,
dsimp [to_karoubi],
simp only [eq_to_hom_app, eq_to_hom_refl, id_comp],
erw [comp_id], },
{ intro X,
apply functor.ext,
{ intros j j' φ,
ext,
dsimp,
simpa only [comp_id, id_comp], },
{ intro j,
refl, }, }
end
end idempotents
end category_theory
|
4398031971b0f415bede8b15277411eb3b11ded5
|
d29d82a0af640c937e499f6be79fc552eae0aa13
|
/src/group_theory/quotient_group.lean
|
59f39734f54f29eb18f4e6805742ec2c51171563
|
[
"Apache-2.0"
] |
permissive
|
AbdulMajeedkhurasani/mathlib
|
835f8a5c5cf3075b250b3737172043ab4fa1edf6
|
79bc7323b164aebd000524ebafd198eb0e17f956
|
refs/heads/master
| 1,688,003,895,660
| 1,627,788,521,000
| 1,627,788,521,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 16,578
|
lean
|
/-
Copyright (c) 2018 Kevin Buzzard, Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Buzzard, Patrick Massot
This file is to a certain extent based on `quotient_module.lean` by Johannes Hölzl.
-/
import group_theory.coset
/-!
# Quotients of groups by normal subgroups
This files develops the basic theory of quotients of groups by normal subgroups. In particular it
proves Noether's first and second isomorphism theorems.
## Main definitions
* `mk'`: the canonical group homomorphism `G →* G/N` given a normal subgroup `N` of `G`.
* `lift φ`: the group homomorphism `G/N →* H` given a group homomorphism `φ : G →* H` such that
`N ⊆ ker φ`.
* `map f`: the group homomorphism `G/N →* H/M` given a group homomorphism `f : G →* H` such that
`N ⊆ f⁻¹(M)`.
## Main statements
* `quotient_ker_equiv_range`: Noether's first isomorphism theorem, an explicit isomorphism
`G/ker φ → range φ` for every group homomorphism `φ : G →* H`.
* `quotient_inf_equiv_prod_normal_quotient`: Noether's second isomorphism theorem, an explicit
isomorphism between `H/(H ∩ N)` and `(HN)/N` given a subgroup `H` and a normal subgroup `N` of a
group `G`.
* `quotient_group.quotient_quotient_equiv_quotient`: Noether's third isomorphism theorem,
the canonical isomorphism between `(G / M) / (M / N)` and `G / N`, where `N ≤ M`.
## Tags
isomorphism theorems, quotient groups
-/
universes u v
namespace quotient_group
variables {G : Type u} [group G] (N : subgroup G) [nN : N.normal] {H : Type v} [group H]
include nN
-- Define the `div_inv_monoid` before the `group` structure,
-- to make sure we have `inv` fully defined before we show `mul_left_inv`.
-- TODO: is there a non-invasive way of defining this in one declaration?
@[to_additive quotient_add_group.div_inv_monoid]
instance : div_inv_monoid (quotient N) :=
{ one := (1 : G),
mul := quotient.map₂' (*)
(λ a₁ b₁ hab₁ a₂ b₂ hab₂,
((N.mul_mem_cancel_right (N.inv_mem hab₂)).1
(by rw [mul_inv_rev, mul_inv_rev, ← mul_assoc (a₂⁻¹ * a₁⁻¹),
mul_assoc _ b₂, ← mul_assoc b₂, mul_inv_self, one_mul, mul_assoc (a₂⁻¹)];
exact nN.conj_mem _ hab₁ _))),
mul_assoc := λ a b c, quotient.induction_on₃' a b c
(λ a b c, congr_arg mk (mul_assoc a b c)),
one_mul := λ a, quotient.induction_on' a
(λ a, congr_arg mk (one_mul a)),
mul_one := λ a, quotient.induction_on' a
(λ a, congr_arg mk (mul_one a)),
inv := λ a, quotient.lift_on' a (λ a, ((a⁻¹ : G) : quotient N))
(λ a b hab, quotient.sound' begin
show a⁻¹⁻¹ * b⁻¹ ∈ N,
rw ← mul_inv_rev,
exact N.inv_mem (nN.mem_comm hab)
end) }
@[to_additive quotient_add_group.add_group]
instance : group (quotient N) :=
{ mul_left_inv := λ a, quotient.induction_on' a
(λ a, congr_arg mk (mul_left_inv a)),
.. quotient.div_inv_monoid _ }
/-- The group homomorphism from `G` to `G/N`. -/
@[to_additive quotient_add_group.mk' "The additive group homomorphism from `G` to `G/N`."]
def mk' : G →* quotient N := monoid_hom.mk' (quotient_group.mk) (λ _ _, rfl)
@[to_additive, simp]
lemma coe_mk' : (mk' N : G → quotient N) = coe := rfl
@[to_additive, simp]
lemma mk'_apply (x : G) : mk' N x = x := rfl
@[simp, to_additive quotient_add_group.eq_zero_iff]
lemma eq_one_iff {N : subgroup G} [nN : N.normal] (x : G) : (x : quotient N) = 1 ↔ x ∈ N :=
begin
refine quotient_group.eq.trans _,
rw [mul_one, subgroup.inv_mem_iff],
end
@[simp, to_additive quotient_add_group.ker_mk]
lemma ker_mk :
monoid_hom.ker (quotient_group.mk' N : G →* quotient_group.quotient N) = N :=
subgroup.ext eq_one_iff
@[to_additive quotient_add_group.eq_iff_sub_mem]
lemma eq_iff_div_mem {N : subgroup G} [nN : N.normal] {x y : G} :
(x : quotient N) = y ↔ x / y ∈ N :=
begin
refine eq_comm.trans (quotient_group.eq.trans _),
rw [nN.mem_comm_iff, div_eq_mul_inv]
end
-- for commutative groups we don't need normality assumption
omit nN
@[to_additive quotient_add_group.add_comm_group]
instance {G : Type*} [comm_group G] (N : subgroup G) : comm_group (quotient N) :=
{ mul_comm := λ a b, quotient.induction_on₂' a b
(λ a b, congr_arg mk (mul_comm a b)),
.. @quotient_group.quotient.group _ _ N N.normal_of_comm }
include nN
local notation ` Q ` := quotient N
@[simp, to_additive quotient_add_group.coe_zero]
lemma coe_one : ((1 : G) : Q) = 1 := rfl
@[simp, to_additive quotient_add_group.coe_add]
lemma coe_mul (a b : G) : ((a * b : G) : Q) = a * b := rfl
@[simp, to_additive quotient_add_group.coe_neg]
lemma coe_inv (a : G) : ((a⁻¹ : G) : Q) = a⁻¹ := rfl
@[simp] lemma coe_pow (a : G) (n : ℕ) : ((a ^ n : G) : Q) = a ^ n :=
(mk' N).map_pow a n
@[simp] lemma coe_gpow (a : G) (n : ℤ) : ((a ^ n : G) : Q) = a ^ n :=
(mk' N).map_gpow a n
/-- A group homomorphism `φ : G →* H` with `N ⊆ ker(φ)` descends (i.e. `lift`s) to a
group homomorphism `G/N →* H`. -/
@[to_additive quotient_add_group.lift "An `add_group` homomorphism `φ : G →+ H` with `N ⊆ ker(φ)`
descends (i.e. `lift`s) to a group homomorphism `G/N →* H`."]
def lift (φ : G →* H) (HN : ∀x∈N, φ x = 1) : Q →* H :=
monoid_hom.mk'
(λ q : Q, q.lift_on' φ $ assume a b (hab : a⁻¹ * b ∈ N),
(calc φ a = φ a * 1 : (mul_one _).symm
... = φ a * φ (a⁻¹ * b) : HN (a⁻¹ * b) hab ▸ rfl
... = φ (a * (a⁻¹ * b)) : (is_mul_hom.map_mul φ a (a⁻¹ * b)).symm
... = φ b : by rw mul_inv_cancel_left))
(λ q r, quotient.induction_on₂' q r $ is_mul_hom.map_mul φ)
@[simp, to_additive quotient_add_group.lift_mk]
lemma lift_mk {φ : G →* H} (HN : ∀x∈N, φ x = 1) (g : G) :
lift N φ HN (g : Q) = φ g := rfl
@[simp, to_additive quotient_add_group.lift_mk']
lemma lift_mk' {φ : G →* H} (HN : ∀x∈N, φ x = 1) (g : G) :
lift N φ HN (mk g : Q) = φ g := rfl
@[simp, to_additive quotient_add_group.lift_quot_mk]
lemma lift_quot_mk {φ : G →* H} (HN : ∀x∈N, φ x = 1) (g : G) :
lift N φ HN (quot.mk _ g : Q) = φ g := rfl
/-- A group homomorphism `f : G →* H` induces a map `G/N →* H/M` if `N ⊆ f⁻¹(M)`. -/
@[to_additive quotient_add_group.map "An `add_group` homomorphism `f : G →+ H` induces a map
`G/N →+ H/M` if `N ⊆ f⁻¹(M)`."]
def map (M : subgroup H) [M.normal] (f : G →* H) (h : N ≤ M.comap f) :
quotient N →* quotient M :=
begin
refine quotient_group.lift N ((mk' M).comp f) _,
assume x hx,
refine quotient_group.eq.2 _,
rw [mul_one, subgroup.inv_mem_iff],
exact h hx,
end
@[simp, to_additive quotient_add_group.map_coe] lemma map_coe
(M : subgroup H) [M.normal] (f : G →* H) (h : N ≤ M.comap f) (x : G) :
map N M f h ↑x = ↑(f x) :=
lift_mk' _ _ x
@[to_additive quotient_add_group.map_mk'] lemma map_mk'
(M : subgroup H) [M.normal] (f : G →* H) (h : N ≤ M.comap f) (x : G) :
map N M f h (mk' _ x) = ↑(f x) :=
quotient_group.lift_mk' _ _ x
omit nN
variables (φ : G →* H)
open function monoid_hom
/-- The induced map from the quotient by the kernel to the codomain. -/
@[to_additive quotient_add_group.ker_lift "The induced map from the quotient by the kernel to the
codomain."]
def ker_lift : quotient (ker φ) →* H :=
lift _ φ $ λ g, φ.mem_ker.mp
@[simp, to_additive quotient_add_group.ker_lift_mk]
lemma ker_lift_mk (g : G) : (ker_lift φ) g = φ g :=
lift_mk _ _ _
@[simp, to_additive quotient_add_group.ker_lift_mk']
lemma ker_lift_mk' (g : G) : (ker_lift φ) (mk g) = φ g :=
lift_mk' _ _ _
@[to_additive quotient_add_group.injective_ker_lift]
lemma ker_lift_injective : injective (ker_lift φ) :=
assume a b, quotient.induction_on₂' a b $
assume a b (h : φ a = φ b), quotient.sound' $
show a⁻¹ * b ∈ ker φ, by rw [mem_ker,
is_mul_hom.map_mul φ, ← h, is_group_hom.map_inv φ, inv_mul_self]
-- Note that `ker φ` isn't definitionally `ker (φ.range_restrict)`
-- so there is a bit of annoying code duplication here
/-- The induced map from the quotient by the kernel to the range. -/
@[to_additive quotient_add_group.range_ker_lift "The induced map from the quotient by the kernel to
the range."]
def range_ker_lift : quotient (ker φ) →* φ.range :=
lift _ φ.range_restrict $ λ g hg, (mem_ker _).mp $ by rwa range_restrict_ker
@[to_additive quotient_add_group.range_ker_lift_injective]
lemma range_ker_lift_injective : injective (range_ker_lift φ) :=
assume a b, quotient.induction_on₂' a b $
assume a b (h : φ.range_restrict a = φ.range_restrict b), quotient.sound' $
show a⁻¹ * b ∈ ker φ, by rw [←range_restrict_ker, mem_ker,
φ.range_restrict.map_mul, ← h, φ.range_restrict.map_inv, inv_mul_self]
@[to_additive quotient_add_group.range_ker_lift_surjective]
lemma range_ker_lift_surjective : surjective (range_ker_lift φ) :=
begin
rintro ⟨_, g, rfl⟩,
use mk g,
refl,
end
/-- **Noether's first isomorphism theorem** (a definition): the canonical isomorphism between
`G/(ker φ)` to `range φ`. -/
@[to_additive quotient_add_group.quotient_ker_equiv_range "The first isomorphism theorem
(a definition): the canonical isomorphism between `G/(ker φ)` to `range φ`."]
noncomputable def quotient_ker_equiv_range : (quotient (ker φ)) ≃* range φ :=
mul_equiv.of_bijective (range_ker_lift φ) ⟨range_ker_lift_injective φ, range_ker_lift_surjective φ⟩
/-- The canonical isomorphism `G/(ker φ) ≃* H` induced by a homomorphism `φ : G →* H`
with a right inverse `ψ : H → G`. -/
@[to_additive quotient_add_group.quotient_ker_equiv_of_right_inverse "The canonical isomorphism
`G/(ker φ) ≃+ H` induced by a homomorphism `φ : G →+ H` with a right inverse `ψ : H → G`.",
simps]
def quotient_ker_equiv_of_right_inverse (ψ : H → G) (hφ : function.right_inverse ψ φ) :
quotient (ker φ) ≃* H :=
{ to_fun := ker_lift φ,
inv_fun := mk ∘ ψ,
left_inv := λ x, ker_lift_injective φ (by rw [function.comp_app, ker_lift_mk', hφ]),
right_inv := hφ,
.. ker_lift φ }
/-- The canonical isomorphism `G/(ker φ) ≃* H` induced by a surjection `φ : G →* H`.
For a `computable` version, see `quotient_group.quotient_ker_equiv_of_right_inverse`.
-/
@[to_additive quotient_add_group.quotient_ker_equiv_of_surjective "The canonical isomorphism
`G/(ker φ) ≃+ H` induced by a surjection `φ : G →+ H`.
For a `computable` version, see `quotient_add_group.quotient_ker_equiv_of_right_inverse`."]
noncomputable def quotient_ker_equiv_of_surjective (hφ : function.surjective φ) :
quotient (ker φ) ≃* H :=
quotient_ker_equiv_of_right_inverse φ _ hφ.has_right_inverse.some_spec
/-- If two normal subgroups `M` and `N` of `G` are the same, their quotient groups are
isomorphic. -/
@[to_additive "If two normal subgroups `M` and `N` of `G` are the same, their quotient groups are
isomorphic."]
def equiv_quotient_of_eq {M N : subgroup G} [M.normal] [N.normal] (h : M = N) :
quotient M ≃* quotient N :=
{ to_fun := (lift M (mk' N) (λ m hm, quotient_group.eq.mpr (by simpa [← h] using M.inv_mem hm))),
inv_fun := (lift N (mk' M) (λ n hn, quotient_group.eq.mpr (by simpa [← h] using N.inv_mem hn))),
left_inv := λ x, x.induction_on' $ by { intro, refl },
right_inv := λ x, x.induction_on' $ by { intro, refl },
map_mul' := λ x y, by rw map_mul }
/-- Let `A', A, B', B` be subgroups of `G`. If `A' ≤ B'` and `A ≤ B`,
then there is a map `A / (A' ⊓ A) →* B / (B' ⊓ B)` induced by the inclusions. -/
@[to_additive "Let `A', A, B', B` be subgroups of `G`. If `A' ≤ B'` and `A ≤ B`,
then there is a map `A / (A' ⊓ A) →+ B / (B' ⊓ B)` induced by the inclusions."]
def quotient_map_subgroup_of_of_le {A' A B' B : subgroup G}
[hAN : (A'.subgroup_of A).normal] [hBN : (B'.subgroup_of B).normal]
(h' : A' ≤ B') (h : A ≤ B) :
quotient (A'.subgroup_of A) →* quotient (B'.subgroup_of B) :=
map _ _ (subgroup.inclusion h) $
by simp [subgroup.subgroup_of, subgroup.comap_comap]; exact subgroup.comap_mono h'
/-- Let `A', A, B', B` be subgroups of `G`.
If `A' = B'` and `A = B`, then the quotients `A / (A' ⊓ A)` and `B / (B' ⊓ B)` are isomorphic.
Applying this equiv is nicer than rewriting along the equalities, since the type of
`(A'.subgroup_of A : subgroup A)` depends on on `A`.
-/
@[to_additive "Let `A', A, B', B` be subgroups of `G`.
If `A' = B'` and `A = B`, then the quotients `A / (A' ⊓ A)` and `B / (B' ⊓ B)` are isomorphic.
Applying this equiv is nicer than rewriting along the equalities, since the type of
`(A'.add_subgroup_of A : add_subgroup A)` depends on on `A`.
"]
def equiv_quotient_subgroup_of_of_eq {A' A B' B : subgroup G}
[hAN : (A'.subgroup_of A).normal] [hBN : (B'.subgroup_of B).normal]
(h' : A' = B') (h : A = B) :
quotient (A'.subgroup_of A) ≃* quotient (B'.subgroup_of B) :=
by apply monoid_hom.to_mul_equiv
(quotient_map_subgroup_of_of_le h'.le h.le) (quotient_map_subgroup_of_of_le h'.ge h.ge);
{ ext ⟨x⟩, simp [quotient_map_subgroup_of_of_le, map, lift, mk', subgroup.inclusion], refl }
section snd_isomorphism_thm
open subgroup
/-- **Noether's second isomorphism theorem**: given two subgroups `H` and `N` of a group `G`, where
`N` is normal, defines an isomorphism between `H/(H ∩ N)` and `(HN)/N`. -/
@[to_additive "The second isomorphism theorem: given two subgroups `H` and `N` of a group `G`,
where `N` is normal, defines an isomorphism between `H/(H ∩ N)` and `(H + N)/N`"]
noncomputable def quotient_inf_equiv_prod_normal_quotient (H N : subgroup G) [N.normal] :
quotient ((H ⊓ N).comap H.subtype) ≃* quotient (N.comap (H ⊔ N).subtype) :=
/- φ is the natural homomorphism H →* (HN)/N. -/
let φ : H →* quotient (N.comap (H ⊔ N).subtype) :=
(mk' $ N.comap (H ⊔ N).subtype).comp (inclusion le_sup_left) in
have φ_surjective : function.surjective φ := λ x, x.induction_on' $
begin
rintro ⟨y, (hy : y ∈ ↑(H ⊔ N))⟩, rw mul_normal H N at hy,
rcases hy with ⟨h, n, hh, hn, rfl⟩,
use [h, hh], apply quotient.eq.mpr, change h⁻¹ * (h * n) ∈ N,
rwa [←mul_assoc, inv_mul_self, one_mul],
end,
(equiv_quotient_of_eq (by simp [comap_comap, ←comap_ker])).trans
(quotient_ker_equiv_of_surjective φ φ_surjective)
end snd_isomorphism_thm
section third_iso_thm
variables (M : subgroup G) [nM : M.normal]
include nM nN
@[to_additive quotient_add_group.map_normal]
instance map_normal : (M.map (quotient_group.mk' N)).normal :=
{ conj_mem := begin
rintro _ ⟨x, hx, rfl⟩ y,
refine induction_on' y (λ y, ⟨y * x * y⁻¹, subgroup.normal.conj_mem nM x hx y, _⟩),
simp only [mk'_apply, coe_mul, coe_inv]
end }
variables (h : N ≤ M)
/-- The map from the third isomorphism theorem for groups: `(G / N) / (M / N) → G / M`. -/
@[to_additive quotient_add_group.quotient_quotient_equiv_quotient_aux
"The map from the third isomorphism theorem for additive groups: `(A / N) / (M / N) → A / M`."]
def quotient_quotient_equiv_quotient_aux :
quotient (M.map (mk' N)) →* quotient M :=
lift (M.map (mk' N))
(map N M (monoid_hom.id G) h)
(by { rintro _ ⟨x, hx, rfl⟩, rw map_mk' N M _ _ x,
exact (quotient_group.eq_one_iff _).mpr hx })
@[simp, to_additive quotient_add_group.quotient_quotient_equiv_quotient_aux_coe]
lemma quotient_quotient_equiv_quotient_aux_coe (x : quotient_group.quotient N) :
quotient_quotient_equiv_quotient_aux N M h x = quotient_group.map N M (monoid_hom.id G) h x :=
quotient_group.lift_mk' _ _ x
@[to_additive quotient_add_group.quotient_quotient_equiv_quotient_aux_coe_coe]
lemma quotient_quotient_equiv_quotient_aux_coe_coe (x : G) :
quotient_quotient_equiv_quotient_aux N M h (x : quotient_group.quotient N) =
x :=
quotient_group.lift_mk' _ _ x
/-- **Noether's third isomorphism theorem** for groups: `(G / N) / (M / N) ≃ G / M`. -/
@[to_additive quotient_add_group.quotient_quotient_equiv_quotient
"**Noether's third isomorphism theorem** for additive groups: `(A / N) / (M / N) ≃ A / M`."]
def quotient_quotient_equiv_quotient :
quotient_group.quotient (M.map (quotient_group.mk' N)) ≃* quotient_group.quotient M :=
{ to_fun := quotient_quotient_equiv_quotient_aux N M h,
inv_fun := quotient_group.map _ _ (quotient_group.mk' N) (subgroup.le_comap_map _ _),
left_inv := λ x, quotient_group.induction_on' x $ λ x, quotient_group.induction_on' x $
λ x, by simp,
right_inv := λ x, quotient_group.induction_on' x $ λ x, by simp,
map_mul' := monoid_hom.map_mul _ }
end third_iso_thm
end quotient_group
|
ae0f6d7ec00eb71f2e4e0d622691b0a4b55de77a
|
9be442d9ec2fcf442516ed6e9e1660aa9071b7bd
|
/src/Init/Data/Float.lean
|
d6e5c0a6506fcc8e791b9ed120d5f1ef35ace178
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
EdAyers/lean4
|
57ac632d6b0789cb91fab2170e8c9e40441221bd
|
37ba0df5841bde51dbc2329da81ac23d4f6a4de4
|
refs/heads/master
| 1,676,463,245,298
| 1,660,619,433,000
| 1,660,619,433,000
| 183,433,437
| 1
| 0
|
Apache-2.0
| 1,657,612,672,000
| 1,556,196,574,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,656
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Core
import Init.Data.Int.Basic
import Init.Data.ToString.Basic
structure FloatSpec where
float : Type
val : float
lt : float → float → Prop
le : float → float → Prop
decLt : DecidableRel lt
decLe : DecidableRel le
-- Just show FloatSpec is inhabited.
opaque floatSpec : FloatSpec := {
float := Unit,
val := (),
lt := fun _ _ => True,
le := fun _ _ => True,
decLt := fun _ _ => inferInstanceAs (Decidable True),
decLe := fun _ _ => inferInstanceAs (Decidable True)
}
structure Float where
val : floatSpec.float
instance : Inhabited Float := ⟨{ val := floatSpec.val }⟩
@[extern "lean_float_add"] opaque Float.add : Float → Float → Float
@[extern "lean_float_sub"] opaque Float.sub : Float → Float → Float
@[extern "lean_float_mul"] opaque Float.mul : Float → Float → Float
@[extern "lean_float_div"] opaque Float.div : Float → Float → Float
@[extern "lean_float_negate"] opaque Float.neg : Float → Float
set_option bootstrap.genMatcherCode false
def Float.lt : Float → Float → Prop := fun a b =>
match a, b with
| ⟨a⟩, ⟨b⟩ => floatSpec.lt a b
def Float.le : Float → Float → Prop := fun a b =>
floatSpec.le a.val b.val
instance : Add Float := ⟨Float.add⟩
instance : Sub Float := ⟨Float.sub⟩
instance : Mul Float := ⟨Float.mul⟩
instance : Div Float := ⟨Float.div⟩
instance : Neg Float := ⟨Float.neg⟩
instance : LT Float := ⟨Float.lt⟩
instance : LE Float := ⟨Float.le⟩
@[extern "lean_float_beq"] opaque Float.beq (a b : Float) : Bool
instance : BEq Float := ⟨Float.beq⟩
@[extern "lean_float_decLt"] opaque Float.decLt (a b : Float) : Decidable (a < b) :=
match a, b with
| ⟨a⟩, ⟨b⟩ => floatSpec.decLt a b
@[extern "lean_float_decLe"] opaque Float.decLe (a b : Float) : Decidable (a ≤ b) :=
match a, b with
| ⟨a⟩, ⟨b⟩ => floatSpec.decLe a b
instance floatDecLt (a b : Float) : Decidable (a < b) := Float.decLt a b
instance floatDecLe (a b : Float) : Decidable (a ≤ b) := Float.decLe a b
@[extern "lean_float_to_string"] opaque Float.toString : Float → String
@[extern "lean_float_to_uint8"] opaque Float.toUInt8 : Float → UInt8
@[extern "lean_float_to_uint16"] opaque Float.toUInt16 : Float → UInt16
@[extern "lean_float_to_uint32"] opaque Float.toUInt32 : Float → UInt32
@[extern "lean_float_to_uint64"] opaque Float.toUInt64 : Float → UInt64
@[extern "lean_float_to_usize"] opaque Float.toUSize : Float → USize
@[extern "lean_float_isnan"] opaque Float.isNaN : Float → Bool
@[extern "lean_float_isfinite"] opaque Float.isFinite : Float → Bool
@[extern "lean_float_isinf"] opaque Float.isInf : Float → Bool
@[extern "lean_float_frexp"] opaque Float.frExp : Float → Float × Int
instance : ToString Float where
toString := Float.toString
instance : Repr Float where
reprPrec n _ := Float.toString n
instance : ReprAtom Float := ⟨⟩
@[extern "lean_uint64_to_float"] opaque UInt64.toFloat (n : UInt64) : Float
@[extern "sin"] opaque Float.sin : Float → Float
@[extern "cos"] opaque Float.cos : Float → Float
@[extern "tan"] opaque Float.tan : Float → Float
@[extern "asin"] opaque Float.asin : Float → Float
@[extern "acos"] opaque Float.acos : Float → Float
@[extern "atan"] opaque Float.atan : Float → Float
@[extern "atan2"] opaque Float.atan2 : Float → Float → Float
@[extern "sinh"] opaque Float.sinh : Float → Float
@[extern "cosh"] opaque Float.cosh : Float → Float
@[extern "tanh"] opaque Float.tanh : Float → Float
@[extern "asinh"] opaque Float.asinh : Float → Float
@[extern "acosh"] opaque Float.acosh : Float → Float
@[extern "atanh"] opaque Float.atanh : Float → Float
@[extern "exp"] opaque Float.exp : Float → Float
@[extern "exp2"] opaque Float.exp2 : Float → Float
@[extern "log"] opaque Float.log : Float → Float
@[extern "log2"] opaque Float.log2 : Float → Float
@[extern "log10"] opaque Float.log10 : Float → Float
@[extern "pow"] opaque Float.pow : Float → Float → Float
@[extern "sqrt"] opaque Float.sqrt : Float → Float
@[extern "cbrt"] opaque Float.cbrt : Float → Float
@[extern "ceil"] opaque Float.ceil : Float → Float
@[extern "floor"] opaque Float.floor : Float → Float
@[extern "round"] opaque Float.round : Float → Float
instance : Pow Float Float := ⟨Float.pow⟩
/--
Efficiently computes `x * 2^i`.
-/
@[extern "lean_float_scaleb"]
opaque Float.scaleB (x : Float) (i : @& Int) : Float
|
2d6254e361a7c28cc31c62246926dcddd7574fd4
|
82b86ba2ae0d5aed0f01f49c46db5afec0eb2bd7
|
/stage0/src/Lean/PrettyPrinter/Parenthesizer.lean
|
0a87ed417cd7191b6c91ba40140f5a76e3a5436b
|
[
"Apache-2.0"
] |
permissive
|
banksonian/lean4
|
3a2e6b0f1eb63aa56ff95b8d07b2f851072d54dc
|
78da6b3aa2840693eea354a41e89fc5b212a5011
|
refs/heads/master
| 1,673,703,624,165
| 1,605,123,551,000
| 1,605,123,551,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 30,115
|
lean
|
/-
Copyright (c) 2020 Sebastian Ullrich. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sebastian Ullrich
-/
/-!
The parenthesizer inserts parentheses into a `Syntax` object where syntactically necessary, usually as an intermediary
step between the delaborator and the formatter. While the delaborator outputs structurally well-formed syntax trees that
can be re-elaborated without post-processing, this tree structure is lost in the formatter and thus needs to be
preserved by proper insertion of parentheses.
# The abstract problem & solution
The Lean 4 grammar is unstructured and extensible with arbitrary new parsers, so in general it is undecidable whether
parentheses are necessary or even allowed at any point in the syntax tree. Parentheses for different categories, e.g.
terms and levels, might not even have the same structure. In this module, we focus on the correct parenthesization of
parsers defined via `Lean.Parser.prattParser`, which includes both aforementioned built-in categories. Custom
parenthesizers can be added for new node kinds, but the data collected in the implementation below might not be
appropriate for other parenthesization strategies.
Usages of a parser defined via `prattParser` in general have the form `p prec`, where `prec` is the minimum precedence
or binding power. Recall that a Pratt parser greedily runs a leading parser with precedence at least `prec` (otherwise
it fails) followed by zero or more trailing parsers with precedence at least `prec`; the precedence of a parser is
encoded in the call to `leadingNode/trailingNode`, respectively. Thus we should parenthesize a syntax node `stx`
supposedly produced by `p prec` if
1. the leading/any trailing parser involved in `stx` has precedence < `prec` (because without parentheses, `p prec`
would not produce all of `stx`), or
2. the trailing parser parsing the input to *the right of* `stx`, if any, has precedence >= `prec` (because without
parentheses, `p prec` would have parsed it as well and made it a part of `stx`). We also check that the two parsers
are from the same syntax category.
Note that in case 2, it is also sufficient to parenthesize a *parent* node as long as the offending parser is still to
the right of that node. For example, imagine the tree structure of `(f $ fun x => x) y` without parentheses. We need to
insert *some* parentheses between `x` and `y` since the lambda body is parsed with precedence 0, while the identifier
parser for `y` has precedence `maxPrec`. But we need to parenthesize the `$` node anyway since the precedence of its
RHS (0) again is smaller than that of `y`. So it's better to only parenthesize the outer node than ending up with
`(f $ (fun x => x)) y`.
# Implementation
We transform the syntax tree and collect the necessary precedence information for that in a single traversal. The
traversal is right-to-left to cover case 2. More specifically, for every Pratt parser call, we store as monadic state
the precedence of the left-most trailing parser and the minimum precedence of any parser (`contPrec`/`minPrec`) in this
call, if any, and the precedence of the nested trailing Pratt parser call (`trailPrec`), if any. If `stP` is the state
resulting from the traversal of a Pratt parser call `p prec`, and `st` is the state of the surrounding call, we
parenthesize if `prec > stP.minPrec` (case 1) or if `stP.trailPrec <= st.contPrec` (case 2).
The traversal can be customized for each `[*Parser]` parser declaration `c` (more specifically, each `SyntaxNodeKind`
`c`) using the `[parenthesizer c]` attribute. Otherwise, a default parenthesizer will be synthesized from the used
parser combinators by recursively replacing them with declarations tagged as `[combinatorParenthesizer]` for the
respective combinator. If a called function does not have a registered combinator parenthesizer and is not reducible,
the synthesizer fails. This happens mostly at the `Parser.mk` decl, which is irreducible, when some parser primitive has
not been handled yet.
The traversal over the `Syntax` object is complicated by the fact that a parser does not produce exactly one syntax
node, but an arbitrary (but constant, for each parser) amount that it pushes on top of the parser stack. This amount can
even be zero for parsers such as `checkWsBefore`. Thus we cannot simply pass and return a `Syntax` object to and from
`visit`. Instead, we use a `Syntax.Traverser` that allows arbitrary movement and modification inside the syntax tree.
Our traversal invariant is that a parser interpreter should stop at the syntax object to the *left* of all syntax
objects its parser produced, except when it is already at the left-most child. This special case is not an issue in
practice since if there is another parser to the left that produced zero nodes in this case, it should always do so, so
there is no danger of the left-most child being processed multiple times.
Ultimately, most parenthesizers are implemented via three primitives that do all the actual syntax traversal:
`maybeParenthesize mkParen prec x` runs `x` and afterwards transforms it with `mkParen` if the above
condition for `p prec` is fulfilled. `visitToken` advances to the preceding sibling and is used on atoms. `visitArgs x`
executes `x` on the last child of the current node and then advances to the preceding sibling (of the original current
node).
-/
import Lean.CoreM
import Lean.KeyedDeclsAttribute
import Lean.Parser.Extension
import Lean.ParserCompiler.Attribute
import Lean.PrettyPrinter.Basic
namespace Lean
namespace PrettyPrinter
namespace Parenthesizer
structure Context :=
-- We need to store this `categoryParser` argument to deal with the implicit Pratt parser call in `trailingNode.parenthesizer`.
(cat : Name := Name.anonymous)
structure State :=
(stxTrav : Syntax.Traverser)
--- precedence and category of the current left-most trailing parser, if any; see module doc for details
(contPrec : Option Nat := none)
(contCat : Name := Name.anonymous)
-- current minimum precedence in this Pratt parser call, if any; see module doc for details
(minPrec : Option Nat := none)
-- precedence and category of the trailing Pratt parser call if any; see module doc for details
(trailPrec : Option Nat := none)
(trailCat : Name := Name.anonymous)
-- true iff we have already visited a token on this parser level; used for detecting trailing parsers
(visitedToken : Bool := false)
end Parenthesizer
abbrev ParenthesizerM := ReaderT Parenthesizer.Context $ StateRefT Parenthesizer.State CoreM
abbrev Parenthesizer := ParenthesizerM Unit
@[inline] def ParenthesizerM.orelse {α} (p₁ p₂ : ParenthesizerM α) : ParenthesizerM α := do
let s ← get
catchInternalId backtrackExceptionId
p₁
(fun _ => do set s; p₂)
instance {α} : OrElse (ParenthesizerM α) := ⟨ParenthesizerM.orelse⟩
unsafe def mkParenthesizerAttribute : IO (KeyedDeclsAttribute Parenthesizer) :=
KeyedDeclsAttribute.init {
builtinName := `builtinParenthesizer,
name := `parenthesizer,
descr := "Register a parenthesizer for a parser.
[parenthesizer k] registers a declaration of type `Lean.PrettyPrinter.Parenthesizer` for the `SyntaxNodeKind` `k`.",
valueTypeName := `Lean.PrettyPrinter.Parenthesizer,
evalKey := fun builtin args => do
let env ← getEnv
match attrParamSyntaxToIdentifier args with
| some id =>
-- `isValidSyntaxNodeKind` is updated only in the next stage for new `[builtin*Parser]`s, but we try to
-- synthesize a parenthesizer for it immediately, so we just check for a declaration in this case
if (builtin && (env.find? id).isSome) || Parser.isValidSyntaxNodeKind env id then pure id
else throwError ("invalid [parenthesizer] argument, unknown syntax kind '" ++ toString id ++ "'")
| none => throwError "invalid [parenthesizer] argument, expected identifier"
} `Lean.PrettyPrinter.parenthesizerAttribute
@[builtinInit mkParenthesizerAttribute] constant parenthesizerAttribute : KeyedDeclsAttribute Parenthesizer
abbrev CategoryParenthesizer := forall (prec : Nat), Parenthesizer
unsafe def mkCategoryParenthesizerAttribute : IO (KeyedDeclsAttribute CategoryParenthesizer) :=
KeyedDeclsAttribute.init {
builtinName := `builtinCategoryParenthesizer,
name := `categoryParenthesizer,
descr := "Register a parenthesizer for a syntax category.
[parenthesizer cat] registers a declaration of type `Lean.PrettyPrinter.CategoryParenthesizer` for the category `cat`,
which is used when parenthesizing calls of `categoryParser cat prec`. Implementations should call `maybeParenthesize`
with the precedence and `cat`. If no category parenthesizer is registered, the category will never be parenthesized,
but still be traversed for parenthesizing nested categories.",
valueTypeName := `Lean.PrettyPrinter.CategoryParenthesizer,
evalKey := fun _ args => do
let env ← getEnv
match attrParamSyntaxToIdentifier args with
| some id =>
if Parser.isParserCategory env id then pure id
else throwError ("invalid [parenthesizer] argument, unknown parser category '" ++ toString id ++ "'")
| none => throwError "invalid [parenthesizer] argument, expected identifier"
} `Lean.PrettyPrinter.categoryParenthesizerAttribute
@[builtinInit mkCategoryParenthesizerAttribute] constant categoryParenthesizerAttribute : KeyedDeclsAttribute CategoryParenthesizer
unsafe def mkCombinatorParenthesizerAttribute : IO ParserCompiler.CombinatorAttribute :=
ParserCompiler.registerCombinatorAttribute
`combinatorParenthesizer
"Register a parenthesizer for a parser combinator.
[combinatorParenthesizer c] registers a declaration of type `Lean.PrettyPrinter.Parenthesizer` for the `Parser` declaration `c`.
Note that, unlike with [parenthesizer], this is not a node kind since combinators usually do not introduce their own node kinds.
The tagged declaration may optionally accept parameters corresponding to (a prefix of) those of `c`, where `Parser` is replaced
with `Parenthesizer` in the parameter types."
@[builtinInit mkCombinatorParenthesizerAttribute] constant combinatorParenthesizerAttribute : ParserCompiler.CombinatorAttribute
namespace Parenthesizer
open Lean.Core
open Lean.Format
def throwBacktrack {α} : ParenthesizerM α :=
throw $ Exception.internal backtrackExceptionId
instance : Syntax.MonadTraverser ParenthesizerM := ⟨{
get := State.stxTrav <$> get,
set := fun t => modify (fun st => { st with stxTrav := t }),
modifyGet := fun f => modifyGet (fun st => let (a, t) := f st.stxTrav; (a, { st with stxTrav := t }))
}⟩
open Syntax.MonadTraverser
def addPrecCheck (prec : Nat) : ParenthesizerM Unit :=
modify fun st => { st with contPrec := Nat.min (st.contPrec.getD prec) prec, minPrec := Nat.min (st.minPrec.getD prec) prec }
/-- Execute `x` at the right-most child of the current node, if any, then advance to the left. -/
def visitArgs (x : ParenthesizerM Unit) : ParenthesizerM Unit := do
let stx ← getCur
if stx.getArgs.size > 0 then
goDown (stx.getArgs.size - 1) *> x <* goUp
goLeft
-- Macro scopes in the parenthesizer output are ultimately ignored by the pretty printer,
-- so give a trivial implementation.
instance : MonadQuotation ParenthesizerM := {
getCurrMacroScope := pure $ arbitrary _,
getMainModule := pure $ arbitrary _,
withFreshMacroScope := fun x => x,
}
/--
Run `x` and parenthesize the result using `mkParen` if necessary.
If `canJuxtapose` is false, we assume the category does not have a token-less juxtaposition syntax a la function application and deactivate rule 2. -/
def maybeParenthesize (cat : Name) (canJuxtapose : Bool) (mkParen : Syntax → Syntax) (prec : Nat) (x : ParenthesizerM Unit) : ParenthesizerM Unit := do
let stx ← getCur
let idx ← getIdx
let st ← get
-- reset precs for the recursive call
set { stxTrav := st.stxTrav : State }
trace[PrettyPrinter.parenthesize]! "parenthesizing (cont := {(st.contPrec, st.contCat)}){MessageData.nest 2 (line ++ stx)}"
x
let { minPrec := some minPrec, trailPrec := trailPrec, trailCat := trailCat, .. } ← get
| panic! "maybeParenthesize: visited a syntax tree without precedences?!"
trace[PrettyPrinter.parenthesize]! ("...precedences are {prec} >? {minPrec}" ++ if canJuxtapose then msg!", {(trailPrec, trailCat)} <=? {(st.contPrec, st.contCat)}" else "")
-- Should we parenthesize?
if (prec > minPrec || canJuxtapose && match trailPrec, st.contPrec with some trailPrec, some contPrec => trailCat == st.contCat && trailPrec <= contPrec | _, _ => false) then
-- The recursive `visit` call, by the invariant, has moved to the preceding node. In order to parenthesize
-- the original node, we must first move to the right, except if we already were at the left-most child in the first
-- place.
if idx > 0 then goRight
let stx ← getCur
match stx.getHeadInfo, stx.getTailInfo with
| some hi, some ti =>
-- Move leading/trailing whitespace of `stx` outside of parentheses
let stx := (stx.setHeadInfo { hi with leading := "".toSubstring }).setTailInfo { ti with trailing := "".toSubstring }
let stx := mkParen stx
let stx := (stx.setHeadInfo { hi with trailing := "".toSubstring }).setTailInfo { ti with leading := "".toSubstring }
setCur stx
| _, _ => setCur (mkParen stx)
let stx ← getCur; trace! `PrettyPrinter.parenthesize ("parenthesized: " ++ stx.formatStx none)
goLeft
-- after parenthesization, there is no more trailing parser
modify (fun st => { st with contPrec := Parser.maxPrec, contCat := cat, trailPrec := none })
let { trailPrec := trailPrec, .. } ← get
-- If we already had a token at this level, keep the trailing parser. Otherwise, use the minimum of
-- `prec` and `trailPrec`.
if st.visitedToken then
modify fun stP => { stP with trailPrec := st.trailPrec, trailCat := st.trailCat }
else
let trailPrec := match trailPrec with
| some trailPrec => Nat.min trailPrec prec
| _ => prec
modify fun stP => { stP with trailPrec := trailPrec, trailCat := cat }
modify fun stP => { stP with minPrec := st.minPrec }
/-- Adjust state and advance. -/
def visitToken : Parenthesizer := do
modify fun st => { st with contPrec := none, contCat := Name.anonymous, visitedToken := true }
goLeft
@[combinatorParenthesizer Lean.Parser.orelse] def orelse.parenthesizer (p1 p2 : Parenthesizer) : Parenthesizer := do
let st ← get
-- HACK: We have no (immediate) information on which side of the orelse could have produced the current node, so try
-- them in turn. Uses the syntax traverser non-linearly!
p1 <|> p2
-- `mkAntiquot` is quite complex, so we'd rather have its parenthesizer synthesized below the actual parser definition.
-- Note that there is a mutual recursion
-- `categoryParser -> mkAntiquot -> termParser -> categoryParser`, so we need to introduce an indirection somewhere
-- anyway.
@[extern 8 "lean_mk_antiquot_parenthesizer"]
constant mkAntiquot.parenthesizer' (name : String) (kind : Option SyntaxNodeKind) (anonymous := true) : Parenthesizer
@[inline] def liftCoreM {α} (x : CoreM α) : ParenthesizerM α :=
liftM x
def throwError {α} (msg : MessageData) : ParenthesizerM α :=
liftCoreM $ Lean.throwError msg
-- break up big mutual recursion
@[extern "lean_pretty_printer_parenthesizer_interpret_parser_descr"]
constant interpretParserDescr' : ParserDescr → CoreM Parenthesizer := arbitrary _
unsafe def parenthesizerForKindUnsafe (k : SyntaxNodeKind) : Parenthesizer := do
(← liftM $ runForNodeKind parenthesizerAttribute k interpretParserDescr')
@[implementedBy parenthesizerForKindUnsafe]
constant parenthesizerForKind (k : SyntaxNodeKind) : Parenthesizer := arbitrary _
@[combinatorParenthesizer Lean.Parser.withAntiquot]
def withAntiquot.parenthesizer (antiP p : Parenthesizer) : Parenthesizer :=
-- TODO: could be optimized using `isAntiquot` (which would have to be moved), but I'd rather
-- fix the backtracking hack outright.
orelse.parenthesizer antiP p
def parenthesizeCategoryCore (cat : Name) (prec : Nat) : Parenthesizer :=
withReader (fun ctx => { ctx with cat := cat }) do
let stx ← getCur
if stx.getKind == `choice then
visitArgs $ stx.getArgs.size.forM fun _ => do
let stx ← getCur
parenthesizerForKind stx.getKind
else
withAntiquot.parenthesizer (mkAntiquot.parenthesizer' cat.toString none) (parenthesizerForKind stx.getKind)
modify fun st => { st with contCat := cat }
@[combinatorParenthesizer Lean.Parser.categoryParser]
def categoryParser.parenthesizer (cat : Name) (prec : Nat) : Parenthesizer := do
let env ← getEnv
match categoryParenthesizerAttribute.getValues env cat with
| p::_ => p prec
-- Fall back to the generic parenthesizer.
-- In this case this node will never be parenthesized since we don't know which parentheses to use.
| _ => parenthesizeCategoryCore cat prec
@[combinatorParenthesizer Lean.Parser.categoryParserOfStack]
def categoryParserOfStack.parenthesizer (offset : Nat) (prec : Nat) : Parenthesizer := do
let st ← get
let stx := st.stxTrav.parents.back.getArg (st.stxTrav.idxs.back - offset)
categoryParser.parenthesizer stx.getId prec
@[builtinCategoryParenthesizer term]
def term.parenthesizer : CategoryParenthesizer | prec => do
let stx ← getCur
-- this can happen at `termParser <|> many1 commandParser` in `Term.stxQuot`
if stx.getKind == nullKind then
throwBacktrack
else do
maybeParenthesize `term true (fun stx => Unhygienic.run `(($stx))) prec $
parenthesizeCategoryCore `term prec
@[builtinCategoryParenthesizer tactic]
def tactic.parenthesizer : CategoryParenthesizer | prec => do
maybeParenthesize `tactic false (fun stx => Unhygienic.run `(tactic|($stx))) prec $
parenthesizeCategoryCore `tactic prec
@[builtinCategoryParenthesizer level]
def level.parenthesizer : CategoryParenthesizer | prec => do
maybeParenthesize `level false (fun stx => Unhygienic.run `(level|($stx))) prec $
parenthesizeCategoryCore `level prec
@[combinatorParenthesizer Lean.Parser.error]
def error.parenthesizer (msg : String) : Parenthesizer :=
pure ()
@[combinatorParenthesizer Lean.Parser.errorAtSavedPos]
def errorAtSavedPos.parenthesizer (msg : String) (delta : Bool) : Parenthesizer :=
pure ()
@[combinatorParenthesizer Lean.Parser.try]
def try.parenthesizer (p : Parenthesizer) : Parenthesizer :=
p
@[combinatorParenthesizer Lean.Parser.lookahead]
def lookahead.parenthesizer (p : Parenthesizer) : Parenthesizer :=
pure ()
@[combinatorParenthesizer Lean.Parser.notFollowedBy]
def notFollowedBy.parenthesizer (p : Parenthesizer) : Parenthesizer :=
pure ()
@[combinatorParenthesizer Lean.Parser.andthen]
def andthen.parenthesizer (p1 p2 : Parenthesizer) : Parenthesizer :=
p2 *> p1
@[combinatorParenthesizer Lean.Parser.node]
def node.parenthesizer (k : SyntaxNodeKind) (p : Parenthesizer) : Parenthesizer := do
let stx ← getCur
if k != stx.getKind then
trace[PrettyPrinter.parenthesize.backtrack]! "unexpected node kind '{stx.getKind}', expected '{k}'"
-- HACK; see `orelse.parenthesizer`
throwBacktrack
visitArgs p
@[combinatorParenthesizer Lean.Parser.checkPrec]
def checkPrec.parenthesizer (prec : Nat) : Parenthesizer :=
addPrecCheck prec
@[combinatorParenthesizer Lean.Parser.leadingNode]
def leadingNode.parenthesizer (k : SyntaxNodeKind) (prec : Nat) (p : Parenthesizer) : Parenthesizer := do
node.parenthesizer k p
addPrecCheck prec
-- Limit `cont` precedence to `maxPrec-1`.
-- This is because `maxPrec-1` is the precedence of function application, which is the only way to turn a leading parser
-- into a trailing one.
modify fun st => { st with contPrec := Nat.min (Parser.maxPrec-1) prec }
@[combinatorParenthesizer Lean.Parser.trailingNode]
def trailingNode.parenthesizer (k : SyntaxNodeKind) (prec : Nat) (p : Parenthesizer) : Parenthesizer := do
let stx ← getCur
if k != stx.getKind then
trace[PrettyPrinter.parenthesize.backtrack]! "unexpected node kind '{stx.getKind}', expected '{k}'"
-- HACK; see `orelse.parenthesizer`
throwBacktrack
visitArgs do
p
addPrecCheck prec
let ctx ← read
modify fun st => { st with contCat := ctx.cat }
-- After visiting the nodes actually produced by the parser passed to `trailingNode`, we are positioned on the
-- left-most child, which is the term injected by `trailingNode` in place of the recursion. Left recursion is not an
-- issue for the parenthesizer, so we can think of this child being produced by `termParser 0`, or whichever Pratt
-- parser is calling us.
categoryParser.parenthesizer ctx.cat 0
@[combinatorParenthesizer Lean.Parser.symbol] def symbol.parenthesizer (sym : String) := visitToken
@[combinatorParenthesizer Lean.Parser.unicodeSymbol] def unicodeSymbol.parenthesizer (sym asciiSym : String) := visitToken
@[combinatorParenthesizer Lean.Parser.identNoAntiquot] def identNoAntiquot.parenthesizer := visitToken
@[combinatorParenthesizer Lean.Parser.rawIdent] def rawIdent.parenthesizer := visitToken
@[combinatorParenthesizer Lean.Parser.identEq] def identEq.parenthesizer (id : Name) := visitToken
@[combinatorParenthesizer Lean.Parser.nonReservedSymbol] def nonReservedSymbol.parenthesizer (sym : String) (includeIdent : Bool) := visitToken
@[combinatorParenthesizer Lean.Parser.charLitNoAntiquot] def charLitNoAntiquot.parenthesizer := visitToken
@[combinatorParenthesizer Lean.Parser.strLitNoAntiquot] def strLitNoAntiquot.parenthesizer := visitToken
@[combinatorParenthesizer Lean.Parser.nameLitNoAntiquot] def nameLitNoAntiquot.parenthesizer := visitToken
@[combinatorParenthesizer Lean.Parser.numLitNoAntiquot] def numLitNoAntiquot.parenthesizer := visitToken
@[combinatorParenthesizer Lean.Parser.fieldIdx] def fieldIdx.parenthesizer := visitToken
@[combinatorParenthesizer Lean.Parser.many]
def many.parenthesizer (p : Parenthesizer) : Parenthesizer := do
let stx ← getCur
visitArgs $ stx.getArgs.size.forM fun _ => p
@[combinatorParenthesizer Lean.Parser.many1]
def many1.parenthesizer (p : Parenthesizer) : Parenthesizer := do
many.parenthesizer p
@[combinatorParenthesizer Lean.Parser.many1Unbox]
def many1Unbox.parenthesizer (p : Parenthesizer) : Parenthesizer := do
let stx ← getCur
if stx.getKind == nullKind then
many.parenthesizer p
else
p
@[combinatorParenthesizer Lean.Parser.optional]
def optional.parenthesizer (p : Parenthesizer) : Parenthesizer := do
visitArgs p
@[combinatorParenthesizer Lean.Parser.sepBy]
def sepBy.parenthesizer (p pSep : Parenthesizer) : Parenthesizer := do
let stx ← getCur
visitArgs $ (List.range stx.getArgs.size).reverse.forM $ fun i => if i % 2 == 0 then p else pSep
@[combinatorParenthesizer Lean.Parser.sepBy1] def sepBy1.parenthesizer := sepBy.parenthesizer
@[combinatorParenthesizer Lean.Parser.withPosition] def withPosition.parenthesizer (p : Parenthesizer) : Parenthesizer := do
-- We assume the formatter will indent syntax sufficiently such that parenthesizing a `withPosition` node is never necessary
modify fun st => { st with contPrec := none }
p
@[combinatorParenthesizer Lean.Parser.withoutPosition] def withoutPosition.parenthesizer (p : Parenthesizer) : Parenthesizer := p
@[combinatorParenthesizer Lean.Parser.withForbidden] def withForbidden.parenthesizer (tk : Parser.Token) (p : Parenthesizer) : Parenthesizer := p
@[combinatorParenthesizer Lean.Parser.withoutForbidden] def withoutForbidden.parenthesizer (p : Parenthesizer) : Parenthesizer := p
@[combinatorParenthesizer Lean.Parser.setExpected]
def setExpected.parenthesizer (expected : List String) (p : Parenthesizer) : Parenthesizer := p
@[combinatorParenthesizer Lean.Parser.toggleInsideQuot] def toggleInsideQuot.parenthesizer (p : Parenthesizer) : Parenthesizer := p
@[combinatorParenthesizer Lean.Parser.suppressInsideQuot] def suppressInsideQuot.parenthesizer (p : Parenthesizer) : Parenthesizer := p
@[combinatorParenthesizer Lean.Parser.checkStackTop] def checkStackTop.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkWsBefore] def checkWsBefore.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkNoWsBefore] def checkNoWsBefore.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkTailWs] def checkTailWs.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkColGe] def checkColGe.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkColGt] def checkColGt.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkLineEq] def checkLineEq.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.eoi] def eoi.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.notFollowedByCategoryToken] def notFollowedByCategoryToken.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkNoImmediateColon] def checkNoImmediateColon.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkInsideQuot] def checkInsideQuot.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.checkOutsideQuot] def checkOutsideQuot.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.skip] def skip.parenthesizer : Parenthesizer := pure ()
@[combinatorParenthesizer Lean.Parser.pushNone] def pushNone.parenthesizer : Parenthesizer := goLeft
@[combinatorParenthesizer Lean.Parser.interpolatedStr]
def interpolatedStr.parenthesizer (p : Parenthesizer) : Parenthesizer := do
visitArgs $ (← getCur).getArgs.reverse.forM fun chunk =>
if chunk.isOfKind interpolatedStrLitKind then
goLeft
else
p
@[combinatorParenthesizer ite, macroInline] def ite {α : Type} (c : Prop) [h : Decidable c] (t e : Parenthesizer) : Parenthesizer :=
if c then t else e
@[export lean_pretty_printer_parenthesizer_interpret_parser_descr]
unsafe def interpretParserDescr : ParserDescr → CoreM Parenthesizer
| ParserDescr.andthen d₁ d₂ => andthen.parenthesizer <$> interpretParserDescr d₁ <*> interpretParserDescr d₂
| ParserDescr.orelse d₁ d₂ => orelse.parenthesizer <$> interpretParserDescr d₁ <*> interpretParserDescr d₂
| ParserDescr.optional d => optional.parenthesizer <$> interpretParserDescr d
| ParserDescr.lookahead d => lookahead.parenthesizer <$> interpretParserDescr d
| ParserDescr.try d => try.parenthesizer <$> interpretParserDescr d
| ParserDescr.notFollowedBy d => notFollowedBy.parenthesizer <$> interpretParserDescr d
| ParserDescr.many d => many.parenthesizer <$> interpretParserDescr d
| ParserDescr.many1 d => many1.parenthesizer <$> interpretParserDescr d
| ParserDescr.sepBy d₁ d₂ _ => sepBy.parenthesizer <$> interpretParserDescr d₁ <*> interpretParserDescr d₂
| ParserDescr.sepBy1 d₁ d₂ _ => sepBy1.parenthesizer <$> interpretParserDescr d₁ <*> interpretParserDescr d₂
| ParserDescr.node k prec d => leadingNode.parenthesizer k prec <$> interpretParserDescr d
| ParserDescr.trailingNode k prec d => trailingNode.parenthesizer k prec <$> interpretParserDescr d
| ParserDescr.symbol tk => pure $ symbol.parenthesizer tk
| ParserDescr.numLit => pure $ withAntiquot.parenthesizer (mkAntiquot.parenthesizer' "numLit" `numLit) numLitNoAntiquot.parenthesizer
| ParserDescr.strLit => pure $ withAntiquot.parenthesizer (mkAntiquot.parenthesizer' "strLit" `strLit) strLitNoAntiquot.parenthesizer
| ParserDescr.charLit => pure $ withAntiquot.parenthesizer (mkAntiquot.parenthesizer' "charLit" `charLit) charLitNoAntiquot.parenthesizer
| ParserDescr.nameLit => pure $ withAntiquot.parenthesizer (mkAntiquot.parenthesizer' "nameLit" `nameLit) nameLitNoAntiquot.parenthesizer
| ParserDescr.ident => pure $ withAntiquot.parenthesizer (mkAntiquot.parenthesizer' "ident" `ident) identNoAntiquot.parenthesizer
| ParserDescr.interpolatedStr d => interpolatedStr.parenthesizer <$> interpretParserDescr d
| ParserDescr.nonReservedSymbol tk includeIdent => pure $ nonReservedSymbol.parenthesizer tk includeIdent
| ParserDescr.noWs => pure $ checkNoWsBefore.parenthesizer
| ParserDescr.checkCol strict => pure $ if strict then checkColGt.parenthesizer else checkColGe.parenthesizer
| ParserDescr.withPosition d => withPosition.parenthesizer <$> interpretParserDescr d
| ParserDescr.parser constName => combinatorParenthesizerAttribute.runDeclFor constName
| ParserDescr.cat catName prec => pure $ categoryParser.parenthesizer catName prec
end Parenthesizer
open Parenthesizer
/-- Add necessary parentheses in `stx` parsed by `parser`. -/
def parenthesize (parenthesizer : Parenthesizer) (stx : Syntax) : CoreM Syntax :=
catchInternalId backtrackExceptionId
(do
let (_, st) ← (parenthesizer {}).run { stxTrav := Syntax.Traverser.fromSyntax stx }
pure st.stxTrav.cur)
(fun _ => throwError "parenthesize: uncaught backtrack exception")
def parenthesizeTerm := parenthesize $ categoryParser.parenthesizer `term 0
def parenthesizeCommand := parenthesize $ categoryParser.parenthesizer `command 0
builtin_initialize registerTraceClass `PrettyPrinter.parenthesize
end PrettyPrinter
end Lean
|
b7df46505eab4237f1fd9179a81083064330f3c8
|
f618aea02cb4104ad34ecf3b9713065cc0d06103
|
/src/logic/basic.lean
|
19d0638ec798e12b8017d0f30cd0d125ee7886f0
|
[
"Apache-2.0"
] |
permissive
|
joehendrix/mathlib
|
84b6603f6be88a7e4d62f5b1b0cbb523bb82b9a5
|
c15eab34ad754f9ecd738525cb8b5a870e834ddc
|
refs/heads/master
| 1,589,606,591,630
| 1,555,946,393,000
| 1,555,946,393,000
| 182,813,854
| 0
| 0
| null | 1,555,946,309,000
| 1,555,946,308,000
| null |
UTF-8
|
Lean
| false
| false
| 27,426
|
lean
|
/-
Copyright (c) 2016 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura
Theorems that require decidability hypotheses are in the namespace "decidable".
Classical versions are in the namespace "classical".
Note: in the presence of automation, this whole file may be unnecessary. On the other hand,
maybe it is useful for writing automation.
-/
import data.prod tactic.cache
/-
miscellany
TODO: move elsewhere
-/
section miscellany
variables {α : Type*} {β : Type*}
@[reducible] def hidden {a : α} := a
def empty.elim {C : Sort*} : empty → C.
instance : subsingleton empty := ⟨λa, a.elim⟩
instance : decidable_eq empty := λa, a.elim
@[priority 0] instance decidable_eq_of_subsingleton
{α} [subsingleton α] : decidable_eq α
| a b := is_true (subsingleton.elim a b)
/- Add an instance to "undo" coercion transitivity into a chain of coercions, because
most simp lemmas are stated with respect to simple coercions and will not match when
part of a chain. -/
@[simp] theorem coe_coe {α β γ} [has_coe α β] [has_coe_t β γ]
(a : α) : (a : γ) = (a : β) := rfl
@[simp] theorem coe_fn_coe_trans
{α β γ} [has_coe α β] [has_coe_t_aux β γ] [has_coe_to_fun γ]
(x : α) : @coe_fn α _ x = @coe_fn β _ x := rfl
@[simp] theorem coe_fn_coe_base
{α β} [has_coe α β] [has_coe_to_fun β]
(x : α) : @coe_fn α _ x = @coe_fn β _ x := rfl
@[simp] theorem coe_sort_coe_trans
{α β γ} [has_coe α β] [has_coe_t_aux β γ] [has_coe_to_sort γ]
(x : α) : @coe_sort α _ x = @coe_sort β _ x := rfl
@[simp] theorem coe_sort_coe_base
{α β} [has_coe α β] [has_coe_to_sort β]
(x : α) : @coe_sort α _ x = @coe_sort β _ x := rfl
/-- `pempty` is the universe-polymorphic analogue of `empty`. -/
@[derive decidable_eq]
inductive {u} pempty : Sort u
def pempty.elim {C : Sort*} : pempty → C.
instance subsingleton_pempty : subsingleton pempty := ⟨λa, a.elim⟩
lemma congr_arg_heq {α} {β : α → Sort*} (f : ∀ a, β a) : ∀ {a₁ a₂ : α}, a₁ = a₂ → f a₁ == f a₂
| a _ rfl := heq.rfl
lemma plift.down_inj {α : Sort*} : ∀ (a b : plift α), a.down = b.down → a = b
| ⟨a⟩ ⟨b⟩ rfl := rfl
end miscellany
/-
propositional connectives
-/
@[simp] theorem false_ne_true : false ≠ true
| h := h.symm ▸ trivial
section propositional
variables {a b c d : Prop}
/- implies -/
theorem iff_of_eq (e : a = b) : a ↔ b := e ▸ iff.rfl
theorem iff_iff_eq : (a ↔ b) ↔ a = b := ⟨propext, iff_of_eq⟩
@[simp] theorem imp_self : (a → a) ↔ true := iff_true_intro id
theorem imp_intro {α β} (h : α) (h₂ : β) : α := h
theorem imp_false : (a → false) ↔ ¬ a := iff.rfl
theorem imp_and_distrib {α} : (α → b ∧ c) ↔ (α → b) ∧ (α → c) :=
⟨λ h, ⟨λ ha, (h ha).left, λ ha, (h ha).right⟩,
λ h ha, ⟨h.left ha, h.right ha⟩⟩
@[simp] theorem and_imp : (a ∧ b → c) ↔ (a → b → c) :=
iff.intro (λ h ha hb, h ⟨ha, hb⟩) (λ h ⟨ha, hb⟩, h ha hb)
theorem iff_def : (a ↔ b) ↔ (a → b) ∧ (b → a) :=
iff_iff_implies_and_implies _ _
theorem iff_def' : (a ↔ b) ↔ (b → a) ∧ (a → b) :=
iff_def.trans and.comm
@[simp] theorem imp_true_iff {α : Sort*} : (α → true) ↔ true :=
iff_true_intro $ λ_, trivial
@[simp] theorem imp_iff_right (ha : a) : (a → b) ↔ b :=
⟨λf, f ha, imp_intro⟩
/- not -/
theorem not.elim {α : Sort*} (H1 : ¬a) (H2 : a) : α := absurd H2 H1
@[reducible] theorem not.imp {a b : Prop} (H2 : ¬b) (H1 : a → b) : ¬a := mt H1 H2
theorem not_not_of_not_imp : ¬(a → b) → ¬¬a :=
mt not.elim
theorem not_of_not_imp {α} : ¬(α → b) → ¬b :=
mt imp_intro
theorem dec_em (p : Prop) [decidable p] : p ∨ ¬p := decidable.em p
theorem by_contradiction {p} [decidable p] : (¬p → false) → p :=
decidable.by_contradiction
@[simp] theorem not_not [decidable a] : ¬¬a ↔ a :=
iff.intro by_contradiction not_not_intro
theorem of_not_not [decidable a] : ¬¬a → a :=
by_contradiction
theorem of_not_imp [decidable a] (h : ¬ (a → b)) : a :=
by_contradiction (not_not_of_not_imp h)
theorem not.imp_symm [decidable a] (h : ¬a → b) (hb : ¬b) : a :=
by_contradiction $ hb ∘ h
theorem not_imp_comm [decidable a] [decidable b] : (¬a → b) ↔ (¬b → a) :=
⟨not.imp_symm, not.imp_symm⟩
theorem imp.swap : (a → b → c) ↔ (b → a → c) :=
⟨function.swap, function.swap⟩
theorem imp_not_comm : (a → ¬b) ↔ (b → ¬a) :=
imp.swap
/- and -/
theorem not_and_of_not_left (b : Prop) : ¬a → ¬(a ∧ b) :=
mt and.left
theorem not_and_of_not_right (a : Prop) {b : Prop} : ¬b → ¬(a ∧ b) :=
mt and.right
theorem and.imp_left (h : a → b) : a ∧ c → b ∧ c :=
and.imp h id
theorem and.imp_right (h : a → b) : c ∧ a → c ∧ b :=
and.imp id h
lemma and.right_comm : (a ∧ b) ∧ c ↔ (a ∧ c) ∧ b :=
by simp [and.left_comm, and.comm]
lemma and.rotate : a ∧ b ∧ c ↔ b ∧ c ∧ a :=
by simp [and.left_comm, and.comm]
theorem and_not_self_iff (a : Prop) : a ∧ ¬ a ↔ false :=
iff.intro (assume h, (h.right) (h.left)) (assume h, h.elim)
theorem not_and_self_iff (a : Prop) : ¬ a ∧ a ↔ false :=
iff.intro (assume ⟨hna, ha⟩, hna ha) false.elim
theorem and_iff_left_of_imp {a b : Prop} (h : a → b) : (a ∧ b) ↔ a :=
iff.intro and.left (λ ha, ⟨ha, h ha⟩)
theorem and_iff_right_of_imp {a b : Prop} (h : b → a) : (a ∧ b) ↔ b :=
iff.intro and.right (λ hb, ⟨h hb, hb⟩)
lemma and.congr_right_iff : (a ∧ b ↔ a ∧ c) ↔ (a → (b ↔ c)) :=
⟨λ h ha, by simp [ha] at h; exact h, and_congr_right⟩
/- or -/
theorem or_of_or_of_imp_of_imp (h₁ : a ∨ b) (h₂ : a → c) (h₃ : b → d) : c ∨ d :=
or.imp h₂ h₃ h₁
theorem or_of_or_of_imp_left (h₁ : a ∨ c) (h : a → b) : b ∨ c :=
or.imp_left h h₁
theorem or_of_or_of_imp_right (h₁ : c ∨ a) (h : a → b) : c ∨ b :=
or.imp_right h h₁
theorem or.elim3 (h : a ∨ b ∨ c) (ha : a → d) (hb : b → d) (hc : c → d) : d :=
or.elim h ha (assume h₂, or.elim h₂ hb hc)
theorem or_imp_distrib : (a ∨ b → c) ↔ (a → c) ∧ (b → c) :=
⟨assume h, ⟨assume ha, h (or.inl ha), assume hb, h (or.inr hb)⟩,
assume ⟨ha, hb⟩, or.rec ha hb⟩
theorem or_iff_not_imp_left [decidable a] : a ∨ b ↔ (¬ a → b) :=
⟨or.resolve_left, λ h, dite _ or.inl (or.inr ∘ h)⟩
theorem or_iff_not_imp_right [decidable b] : a ∨ b ↔ (¬ b → a) :=
or.comm.trans or_iff_not_imp_left
theorem not_imp_not [decidable a] : (¬ a → ¬ b) ↔ (b → a) :=
⟨assume h hb, by_contradiction $ assume na, h na hb, mt⟩
/- distributivity -/
theorem and_or_distrib_left : a ∧ (b ∨ c) ↔ (a ∧ b) ∨ (a ∧ c) :=
⟨λ ⟨ha, hbc⟩, hbc.imp (and.intro ha) (and.intro ha),
or.rec (and.imp_right or.inl) (and.imp_right or.inr)⟩
theorem or_and_distrib_right : (a ∨ b) ∧ c ↔ (a ∧ c) ∨ (b ∧ c) :=
(and.comm.trans and_or_distrib_left).trans (or_congr and.comm and.comm)
theorem or_and_distrib_left : a ∨ (b ∧ c) ↔ (a ∨ b) ∧ (a ∨ c) :=
⟨or.rec (λha, and.intro (or.inl ha) (or.inl ha)) (and.imp or.inr or.inr),
and.rec $ or.rec (imp_intro ∘ or.inl) (or.imp_right ∘ and.intro)⟩
theorem and_or_distrib_right : (a ∧ b) ∨ c ↔ (a ∨ c) ∧ (b ∨ c) :=
(or.comm.trans or_and_distrib_left).trans (and_congr or.comm or.comm)
/- iff -/
theorem iff_of_true (ha : a) (hb : b) : a ↔ b :=
⟨λ_, hb, λ _, ha⟩
theorem iff_of_false (ha : ¬a) (hb : ¬b) : a ↔ b :=
⟨ha.elim, hb.elim⟩
theorem iff_true_left (ha : a) : (a ↔ b) ↔ b :=
⟨λ h, h.1 ha, iff_of_true ha⟩
theorem iff_true_right (ha : a) : (b ↔ a) ↔ b :=
iff.comm.trans (iff_true_left ha)
theorem iff_false_left (ha : ¬a) : (a ↔ b) ↔ ¬b :=
⟨λ h, mt h.2 ha, iff_of_false ha⟩
theorem iff_false_right (ha : ¬a) : (b ↔ a) ↔ ¬b :=
iff.comm.trans (iff_false_left ha)
theorem not_or_of_imp [decidable a] (h : a → b) : ¬ a ∨ b :=
if ha : a then or.inr (h ha) else or.inl ha
theorem imp_iff_not_or [decidable a] : (a → b) ↔ (¬ a ∨ b) :=
⟨not_or_of_imp, or.neg_resolve_left⟩
theorem imp_or_distrib [decidable a] : (a → b ∨ c) ↔ (a → b) ∨ (a → c) :=
by simp [imp_iff_not_or, or.comm, or.left_comm]
theorem imp_or_distrib' [decidable b] : (a → b ∨ c) ↔ (a → b) ∨ (a → c) :=
by by_cases b; simp [h, or_iff_right_of_imp ((∘) false.elim)]
theorem not_imp_of_and_not : a ∧ ¬ b → ¬ (a → b)
| ⟨ha, hb⟩ h := hb $ h ha
@[simp] theorem not_imp [decidable a] : ¬(a → b) ↔ a ∧ ¬b :=
⟨λ h, ⟨of_not_imp h, not_of_not_imp h⟩, not_imp_of_and_not⟩
-- for monotonicity
lemma imp_imp_imp
(h₀ : c → a) (h₁ : b → d) :
(a → b) → (c → d) :=
assume (h₂ : a → b),
h₁ ∘ h₂ ∘ h₀
theorem peirce (a b : Prop) [decidable a] : ((a → b) → a) → a :=
if ha : a then λ h, ha else λ h, h ha.elim
theorem peirce' {a : Prop} (H : ∀ b : Prop, (a → b) → a) : a := H _ id
theorem not_iff_not [decidable a] [decidable b] : (¬ a ↔ ¬ b) ↔ (a ↔ b) :=
by rw [@iff_def (¬ a), @iff_def' a]; exact and_congr not_imp_not not_imp_not
theorem not_iff_comm [decidable a] [decidable b] : (¬ a ↔ b) ↔ (¬ b ↔ a) :=
by rw [@iff_def (¬ a), @iff_def (¬ b)]; exact and_congr not_imp_comm imp_not_comm
theorem not_iff [decidable a] [decidable b] : ¬ (a ↔ b) ↔ (¬ a ↔ b) :=
by split; intro h; [split, skip]; intro h'; [by_contradiction,intro,skip];
try { refine h _; simp [*] }; rw [h',not_iff_self] at h; exact h
theorem iff_not_comm [decidable a] [decidable b] : (a ↔ ¬ b) ↔ (b ↔ ¬ a) :=
by rw [@iff_def a, @iff_def b]; exact and_congr imp_not_comm not_imp_comm
theorem iff_iff_and_or_not_and_not [decidable b] : (a ↔ b) ↔ (a ∧ b) ∨ (¬ a ∧ ¬ b) :=
by { split; intro h,
{ rw h; by_cases b; [left,right]; split; assumption },
{ cases h with h h; cases h; split; intro; { contradiction <|> assumption } } }
@[simp] theorem not_and_not_right [decidable b] : ¬(a ∧ ¬b) ↔ (a → b) :=
⟨λ h ha, h.imp_symm $ and.intro ha, λ h ⟨ha, hb⟩, hb $ h ha⟩
@[inline] def decidable_of_iff (a : Prop) (h : a ↔ b) [D : decidable a] : decidable b :=
decidable_of_decidable_of_iff D h
@[inline] def decidable_of_iff' (b : Prop) (h : a ↔ b) [D : decidable b] : decidable a :=
decidable_of_decidable_of_iff D h.symm
def decidable_of_bool : ∀ (b : bool) (h : b ↔ a), decidable a
| tt h := is_true (h.1 rfl)
| ff h := is_false (mt h.2 bool.ff_ne_tt)
/- de morgan's laws -/
theorem not_and_of_not_or_not (h : ¬ a ∨ ¬ b) : ¬ (a ∧ b)
| ⟨ha, hb⟩ := or.elim h (absurd ha) (absurd hb)
theorem not_and_distrib [decidable a] : ¬ (a ∧ b) ↔ ¬a ∨ ¬b :=
⟨λ h, if ha : a then or.inr (λ hb, h ⟨ha, hb⟩) else or.inl ha, not_and_of_not_or_not⟩
theorem not_and_distrib' [decidable b] : ¬ (a ∧ b) ↔ ¬a ∨ ¬b :=
⟨λ h, if hb : b then or.inl (λ ha, h ⟨ha, hb⟩) else or.inr hb, not_and_of_not_or_not⟩
@[simp] theorem not_and : ¬ (a ∧ b) ↔ (a → ¬ b) := and_imp
theorem not_and' : ¬ (a ∧ b) ↔ b → ¬a :=
not_and.trans imp_not_comm
theorem not_or_distrib : ¬ (a ∨ b) ↔ ¬ a ∧ ¬ b :=
⟨λ h, ⟨λ ha, h (or.inl ha), λ hb, h (or.inr hb)⟩,
λ ⟨h₁, h₂⟩ h, or.elim h h₁ h₂⟩
theorem or_iff_not_and_not [decidable a] [decidable b] : a ∨ b ↔ ¬ (¬a ∧ ¬b) :=
by rw [← not_or_distrib, not_not]
theorem and_iff_not_or_not [decidable a] [decidable b] : a ∧ b ↔ ¬ (¬ a ∨ ¬ b) :=
by rw [← not_and_distrib, not_not]
end propositional
/- equality -/
section equality
variables {α : Sort*} {a b : α}
@[simp] theorem heq_iff_eq : a == b ↔ a = b :=
⟨eq_of_heq, heq_of_eq⟩
theorem proof_irrel_heq {p q : Prop} (hp : p) (hq : q) : hp == hq :=
have p = q, from propext ⟨λ _, hq, λ _, hp⟩,
by subst q; refl
theorem ne_of_mem_of_not_mem {α β} [has_mem α β] {s : β} {a b : α}
(h : a ∈ s) : b ∉ s → a ≠ b :=
mt $ λ e, e ▸ h
theorem eq_equivalence : equivalence (@eq α) :=
⟨eq.refl, @eq.symm _, @eq.trans _⟩
lemma heq_of_eq_mp :
∀ {α β : Sort*} {a : α} {a' : β} (e : α = β) (h₂ : (eq.mp e a) = a'), a == a'
| α ._ a a' rfl h := eq.rec_on h (heq.refl _)
lemma rec_heq_of_heq {β} {C : α → Sort*} {x : C a} {y : β} (eq : a = b) (h : x == y) :
@eq.rec α a C x b eq == y :=
by subst eq; exact h
@[simp] lemma {u} eq_mpr_heq {α β : Sort u} (h : β = α) (x : α) : eq.mpr h x == x :=
by subst h; refl
protected lemma eq.congr {x₁ x₂ y₁ y₂ : α} (h₁ : x₁ = y₁) (h₂ : x₂ = y₂) :
(x₁ = x₂) ↔ (y₁ = y₂) :=
by { subst h₁, subst h₂ }
lemma congr_arg2 {α β γ : Type*} (f : α → β → γ) {x x' : α} {y y' : β}
(hx : x = x') (hy : y = y') : f x y = f x' y' :=
by { subst hx, subst hy }
end equality
/-
quantifiers
-/
section quantifiers
variables {α : Sort*} {p q : α → Prop} {b : Prop}
def Exists.imp := @exists_imp_exists
theorem forall_swap {α β} {p : α → β → Prop} : (∀ x y, p x y) ↔ ∀ y x, p x y :=
⟨function.swap, function.swap⟩
theorem exists_swap {α β} {p : α → β → Prop} : (∃ x y, p x y) ↔ ∃ y x, p x y :=
⟨λ ⟨x, y, h⟩, ⟨y, x, h⟩, λ ⟨y, x, h⟩, ⟨x, y, h⟩⟩
@[simp] theorem exists_imp_distrib : ((∃ x, p x) → b) ↔ ∀ x, p x → b :=
⟨λ h x hpx, h ⟨x, hpx⟩, λ h ⟨x, hpx⟩, h x hpx⟩
--theorem forall_not_of_not_exists (h : ¬ ∃ x, p x) : ∀ x, ¬ p x :=
--forall_imp_of_exists_imp h
theorem not_exists_of_forall_not (h : ∀ x, ¬ p x) : ¬ ∃ x, p x :=
exists_imp_distrib.2 h
@[simp] theorem not_exists : (¬ ∃ x, p x) ↔ ∀ x, ¬ p x :=
exists_imp_distrib
theorem not_forall_of_exists_not : (∃ x, ¬ p x) → ¬ ∀ x, p x
| ⟨x, hn⟩ h := hn (h x)
theorem not_forall {p : α → Prop}
[decidable (∃ x, ¬ p x)] [∀ x, decidable (p x)] :
(¬ ∀ x, p x) ↔ ∃ x, ¬ p x :=
⟨not.imp_symm $ λ nx x, nx.imp_symm $ λ h, ⟨x, h⟩,
not_forall_of_exists_not⟩
@[simp] theorem not_forall_not [decidable (∃ x, p x)] :
(¬ ∀ x, ¬ p x) ↔ ∃ x, p x :=
by haveI := decidable_of_iff (¬ ∃ x, p x) not_exists;
exact not_iff_comm.1 not_exists
@[simp] theorem not_exists_not [∀ x, decidable (p x)] :
(¬ ∃ x, ¬ p x) ↔ ∀ x, p x :=
by simp
@[simp] theorem forall_true_iff : (α → true) ↔ true :=
iff_true_intro (λ _, trivial)
-- Unfortunately this causes simp to loop sometimes, so we
-- add the 2 and 3 cases as simp lemmas instead
theorem forall_true_iff' (h : ∀ a, p a ↔ true) : (∀ a, p a) ↔ true :=
iff_true_intro (λ _, of_iff_true (h _))
@[simp] theorem forall_2_true_iff {β : α → Sort*} : (∀ a, β a → true) ↔ true :=
forall_true_iff' $ λ _, forall_true_iff
@[simp] theorem forall_3_true_iff {β : α → Sort*} {γ : Π a, β a → Sort*} :
(∀ a (b : β a), γ a b → true) ↔ true :=
forall_true_iff' $ λ _, forall_2_true_iff
@[simp] theorem forall_const (α : Sort*) [inhabited α] : (α → b) ↔ b :=
⟨λ h, h (arbitrary α), λ hb x, hb⟩
@[simp] theorem exists_const (α : Sort*) [inhabited α] : (∃ x : α, b) ↔ b :=
⟨λ ⟨x, h⟩, h, λ h, ⟨arbitrary α, h⟩⟩
theorem forall_and_distrib : (∀ x, p x ∧ q x) ↔ (∀ x, p x) ∧ (∀ x, q x) :=
⟨λ h, ⟨λ x, (h x).left, λ x, (h x).right⟩, λ ⟨h₁, h₂⟩ x, ⟨h₁ x, h₂ x⟩⟩
theorem exists_or_distrib : (∃ x, p x ∨ q x) ↔ (∃ x, p x) ∨ (∃ x, q x) :=
⟨λ ⟨x, hpq⟩, hpq.elim (λ hpx, or.inl ⟨x, hpx⟩) (λ hqx, or.inr ⟨x, hqx⟩),
λ hepq, hepq.elim (λ ⟨x, hpx⟩, ⟨x, or.inl hpx⟩) (λ ⟨x, hqx⟩, ⟨x, or.inr hqx⟩)⟩
@[simp] theorem exists_and_distrib_left {q : Prop} {p : α → Prop} :
(∃x, q ∧ p x) ↔ q ∧ (∃x, p x) :=
⟨λ ⟨x, hq, hp⟩, ⟨hq, x, hp⟩, λ ⟨hq, x, hp⟩, ⟨x, hq, hp⟩⟩
@[simp] theorem exists_and_distrib_right {q : Prop} {p : α → Prop} :
(∃x, p x ∧ q) ↔ (∃x, p x) ∧ q :=
by simp [and_comm]
@[simp] theorem forall_eq {a' : α} : (∀a, a = a' → p a) ↔ p a' :=
⟨λ h, h a' rfl, λ h a e, e.symm ▸ h⟩
@[simp] theorem exists_eq {a' : α} : ∃ a, a = a' := ⟨_, rfl⟩
@[simp] theorem exists_eq_left {a' : α} : (∃ a, a = a' ∧ p a) ↔ p a' :=
⟨λ ⟨a, e, h⟩, e ▸ h, λ h, ⟨_, rfl, h⟩⟩
@[simp] theorem exists_eq_right {a' : α} : (∃ a, p a ∧ a = a') ↔ p a' :=
(exists_congr $ by exact λ a, and.comm).trans exists_eq_left
@[simp] theorem forall_eq' {a' : α} : (∀a, a' = a → p a) ↔ p a' :=
by simp [@eq_comm _ a']
@[simp] theorem exists_eq_left' {a' : α} : (∃ a, a' = a ∧ p a) ↔ p a' :=
by simp [@eq_comm _ a']
@[simp] theorem exists_eq_right' {a' : α} : (∃ a, p a ∧ a' = a) ↔ p a' :=
by simp [@eq_comm _ a']
theorem forall_or_of_or_forall (h : b ∨ ∀x, p x) (x) : b ∨ p x :=
h.imp_right $ λ h₂, h₂ x
theorem forall_or_distrib_left {q : Prop} {p : α → Prop} [decidable q] :
(∀x, q ∨ p x) ↔ q ∨ (∀x, p x) :=
⟨λ h, if hq : q then or.inl hq else or.inr $ λ x, (h x).resolve_left hq,
forall_or_of_or_forall⟩
@[simp] theorem exists_prop {p q : Prop} : (∃ h : p, q) ↔ p ∧ q :=
⟨λ ⟨h₁, h₂⟩, ⟨h₁, h₂⟩, λ ⟨h₁, h₂⟩, ⟨h₁, h₂⟩⟩
@[simp] theorem exists_false : ¬ (∃a:α, false) := assume ⟨a, h⟩, h
theorem Exists.fst {p : b → Prop} : Exists p → b
| ⟨h, _⟩ := h
theorem Exists.snd {p : b → Prop} : ∀ h : Exists p, p h.fst
| ⟨_, h⟩ := h
@[simp] theorem forall_prop_of_true {p : Prop} {q : p → Prop} (h : p) : (∀ h' : p, q h') ↔ q h :=
@forall_const (q h) p ⟨h⟩
@[simp] theorem exists_prop_of_true {p : Prop} {q : p → Prop} (h : p) : (∃ h' : p, q h') ↔ q h :=
@exists_const (q h) p ⟨h⟩
@[simp] theorem forall_prop_of_false {p : Prop} {q : p → Prop} (hn : ¬ p) : (∀ h' : p, q h') ↔ true :=
iff_true_intro $ λ h, hn.elim h
@[simp] theorem exists_prop_of_false {p : Prop} {q : p → Prop} : ¬ p → ¬ (∃ h' : p, q h') :=
mt Exists.fst
end quantifiers
/- classical versions -/
namespace classical
variables {α : Sort*} {p : α → Prop}
local attribute [instance] prop_decidable
protected theorem not_forall : (¬ ∀ x, p x) ↔ (∃ x, ¬ p x) := not_forall
protected theorem not_exists_not : (¬ ∃ x, ¬ p x) ↔ ∀ x, p x := not_exists_not
protected theorem forall_or_distrib_left {q : Prop} {p : α → Prop} :
(∀x, q ∨ p x) ↔ q ∨ (∀x, p x) :=
forall_or_distrib_left
theorem cases {p : Prop → Prop} (h1 : p true) (h2 : p false) : ∀a, p a :=
assume a, cases_on a h1 h2
theorem or_not {p : Prop} : p ∨ ¬ p :=
by_cases or.inl or.inr
protected theorem or_iff_not_imp_left {p q : Prop} : p ∨ q ↔ (¬ p → q) :=
or_iff_not_imp_left
protected theorem or_iff_not_imp_right {p q : Prop} : q ∨ p ↔ (¬ p → q) :=
or_iff_not_imp_right
protected lemma not_not {p : Prop} : ¬¬p ↔ p := not_not
protected lemma not_and_distrib {p q : Prop}: ¬(p ∧ q) ↔ ¬p ∨ ¬q := not_and_distrib
protected lemma imp_iff_not_or {a b : Prop} : a → b ↔ ¬a ∨ b := imp_iff_not_or
lemma iff_iff_not_or_and_or_not {a b : Prop} : (a ↔ b) ↔ ((¬a ∨ b) ∧ (a ∨ ¬b)) :=
begin
rw [iff_iff_implies_and_implies a b],
simp only [imp_iff_not_or, or.comm]
end
/- use shortened names to avoid conflict when classical namespace is open -/
noncomputable theorem dec (p : Prop) : decidable p := by apply_instance
noncomputable theorem dec_pred (p : α → Prop) : decidable_pred p := by apply_instance
noncomputable theorem dec_rel (p : α → α → Prop) : decidable_rel p := by apply_instance
noncomputable theorem dec_eq (α : Sort*) : decidable_eq α := by apply_instance
@[elab_as_eliminator]
noncomputable def {u} exists_cases {C : Sort u} (H0 : C) (H : ∀ a, p a → C) : C :=
if h : ∃ a, p a then H (classical.some h) (classical.some_spec h) else H0
lemma some_spec2 {α : Type*} {p : α → Prop} {h : ∃a, p a}
(q : α → Prop) (hpq : ∀a, p a → q a) : q (some h) :=
hpq _ $ some_spec _
/-- A version of classical.indefinite_description which is definitionally equal to a pair -/
noncomputable def subtype_of_exists {α : Type*} {P : α → Prop} (h : ∃ x, P x) : {x // P x} :=
⟨classical.some h, classical.some_spec h⟩
end classical
@[elab_as_eliminator]
noncomputable def {u} exists.classical_rec_on
{α} {p : α → Prop} (h : ∃ a, p a) {C : Sort u} (H : ∀ a, p a → C) : C :=
H (classical.some h) (classical.some_spec h)
/-
bounded quantifiers
-/
section bounded_quantifiers
variables {α : Sort*} {r p q : α → Prop} {P Q : ∀ x, p x → Prop} {b : Prop}
theorem bex_def : (∃ x (h : p x), q x) ↔ ∃ x, p x ∧ q x :=
⟨λ ⟨x, px, qx⟩, ⟨x, px, qx⟩, λ ⟨x, px, qx⟩, ⟨x, px, qx⟩⟩
theorem bex.elim {b : Prop} : (∃ x h, P x h) → (∀ a h, P a h → b) → b
| ⟨a, h₁, h₂⟩ h' := h' a h₁ h₂
theorem bex.intro (a : α) (h₁ : p a) (h₂ : P a h₁) : ∃ x (h : p x), P x h :=
⟨a, h₁, h₂⟩
theorem ball_congr (H : ∀ x h, P x h ↔ Q x h) :
(∀ x h, P x h) ↔ (∀ x h, Q x h) :=
forall_congr $ λ x, forall_congr (H x)
theorem bex_congr (H : ∀ x h, P x h ↔ Q x h) :
(∃ x h, P x h) ↔ (∃ x h, Q x h) :=
exists_congr $ λ x, exists_congr (H x)
theorem ball.imp_right (H : ∀ x h, (P x h → Q x h))
(h₁ : ∀ x h, P x h) (x h) : Q x h :=
H _ _ $ h₁ _ _
theorem bex.imp_right (H : ∀ x h, (P x h → Q x h)) :
(∃ x h, P x h) → ∃ x h, Q x h
| ⟨x, h, h'⟩ := ⟨_, _, H _ _ h'⟩
theorem ball.imp_left (H : ∀ x, p x → q x)
(h₁ : ∀ x, q x → r x) (x) (h : p x) : r x :=
h₁ _ $ H _ h
theorem bex.imp_left (H : ∀ x, p x → q x) :
(∃ x (_ : p x), r x) → ∃ x (_ : q x), r x
| ⟨x, hp, hr⟩ := ⟨x, H _ hp, hr⟩
theorem ball_of_forall (h : ∀ x, p x) (x) (_ : q x) : p x :=
h x
theorem forall_of_ball (H : ∀ x, p x) (h : ∀ x, p x → q x) (x) : q x :=
h x $ H x
theorem bex_of_exists (H : ∀ x, p x) : (∃ x, q x) → ∃ x (_ : p x), q x
| ⟨x, hq⟩ := ⟨x, H x, hq⟩
theorem exists_of_bex : (∃ x (_ : p x), q x) → ∃ x, q x
| ⟨x, _, hq⟩ := ⟨x, hq⟩
@[simp] theorem bex_imp_distrib : ((∃ x h, P x h) → b) ↔ (∀ x h, P x h → b) :=
by simp
theorem not_bex : (¬ ∃ x h, P x h) ↔ ∀ x h, ¬ P x h :=
bex_imp_distrib
theorem not_ball_of_bex_not : (∃ x h, ¬ P x h) → ¬ ∀ x h, P x h
| ⟨x, h, hp⟩ al := hp $ al x h
theorem not_ball [decidable (∃ x h, ¬ P x h)] [∀ x h, decidable (P x h)] :
(¬ ∀ x h, P x h) ↔ (∃ x h, ¬ P x h) :=
⟨not.imp_symm $ λ nx x h, nx.imp_symm $ λ h', ⟨x, h, h'⟩,
not_ball_of_bex_not⟩
theorem ball_true_iff (p : α → Prop) : (∀ x, p x → true) ↔ true :=
iff_true_intro (λ h hrx, trivial)
theorem ball_and_distrib : (∀ x h, P x h ∧ Q x h) ↔ (∀ x h, P x h) ∧ (∀ x h, Q x h) :=
iff.trans (forall_congr $ λ x, forall_and_distrib) forall_and_distrib
theorem bex_or_distrib : (∃ x h, P x h ∨ Q x h) ↔ (∃ x h, P x h) ∨ (∃ x h, Q x h) :=
iff.trans (exists_congr $ λ x, exists_or_distrib) exists_or_distrib
end bounded_quantifiers
namespace classical
local attribute [instance] prop_decidable
theorem not_ball {α : Sort*} {p : α → Prop} {P : Π (x : α), p x → Prop} :
(¬ ∀ x h, P x h) ↔ (∃ x h, ¬ P x h) := _root_.not_ball
end classical
section nonempty
universe variables u v w
variables {α : Type u} {β : Type v} {γ : α → Type w}
attribute [simp] nonempty_of_inhabited
lemma exists_true_iff_nonempty {α : Sort*} : (∃a:α, true) ↔ nonempty α :=
iff.intro (λ⟨a, _⟩, ⟨a⟩) (λ⟨a⟩, ⟨a, trivial⟩)
@[simp] lemma nonempty_Prop {p : Prop} : nonempty p ↔ p :=
iff.intro (assume ⟨h⟩, h) (assume h, ⟨h⟩)
lemma not_nonempty_iff_imp_false {p : Prop} : ¬ nonempty α ↔ α → false :=
⟨λ h a, h ⟨a⟩, λ h ⟨a⟩, h a⟩
@[simp] lemma nonempty_sigma : nonempty (Σa:α, γ a) ↔ (∃a:α, nonempty (γ a)) :=
iff.intro (assume ⟨⟨a, c⟩⟩, ⟨a, ⟨c⟩⟩) (assume ⟨a, ⟨c⟩⟩, ⟨⟨a, c⟩⟩)
@[simp] lemma nonempty_subtype {α : Sort u} {p : α → Prop} : nonempty (subtype p) ↔ (∃a:α, p a) :=
iff.intro (assume ⟨⟨a, h⟩⟩, ⟨a, h⟩) (assume ⟨a, h⟩, ⟨⟨a, h⟩⟩)
@[simp] lemma nonempty_prod : nonempty (α × β) ↔ (nonempty α ∧ nonempty β) :=
iff.intro (assume ⟨⟨a, b⟩⟩, ⟨⟨a⟩, ⟨b⟩⟩) (assume ⟨⟨a⟩, ⟨b⟩⟩, ⟨⟨a, b⟩⟩)
@[simp] lemma nonempty_pprod {α : Sort u} {β : Sort v} :
nonempty (pprod α β) ↔ (nonempty α ∧ nonempty β) :=
iff.intro (assume ⟨⟨a, b⟩⟩, ⟨⟨a⟩, ⟨b⟩⟩) (assume ⟨⟨a⟩, ⟨b⟩⟩, ⟨⟨a, b⟩⟩)
@[simp] lemma nonempty_sum : nonempty (α ⊕ β) ↔ (nonempty α ∨ nonempty β) :=
iff.intro
(assume ⟨h⟩, match h with sum.inl a := or.inl ⟨a⟩ | sum.inr b := or.inr ⟨b⟩ end)
(assume h, match h with or.inl ⟨a⟩ := ⟨sum.inl a⟩ | or.inr ⟨b⟩ := ⟨sum.inr b⟩ end)
@[simp] lemma nonempty_psum {α : Sort u} {β : Sort v} :
nonempty (psum α β) ↔ (nonempty α ∨ nonempty β) :=
iff.intro
(assume ⟨h⟩, match h with psum.inl a := or.inl ⟨a⟩ | psum.inr b := or.inr ⟨b⟩ end)
(assume h, match h with or.inl ⟨a⟩ := ⟨psum.inl a⟩ | or.inr ⟨b⟩ := ⟨psum.inr b⟩ end)
@[simp] lemma nonempty_psigma {α : Sort u} {β : α → Sort v} :
nonempty (psigma β) ↔ (∃a:α, nonempty (β a)) :=
iff.intro (assume ⟨⟨a, c⟩⟩, ⟨a, ⟨c⟩⟩) (assume ⟨a, ⟨c⟩⟩, ⟨⟨a, c⟩⟩)
@[simp] lemma nonempty_empty : ¬ nonempty empty :=
assume ⟨h⟩, h.elim
@[simp] lemma nonempty_ulift : nonempty (ulift α) ↔ nonempty α :=
iff.intro (assume ⟨⟨a⟩⟩, ⟨a⟩) (assume ⟨a⟩, ⟨⟨a⟩⟩)
@[simp] lemma nonempty_plift {α : Sort u} : nonempty (plift α) ↔ nonempty α :=
iff.intro (assume ⟨⟨a⟩⟩, ⟨a⟩) (assume ⟨a⟩, ⟨⟨a⟩⟩)
@[simp] lemma nonempty.forall {α : Sort u} {p : nonempty α → Prop} :
(∀h:nonempty α, p h) ↔ (∀a, p ⟨a⟩) :=
iff.intro (assume h a, h _) (assume h ⟨a⟩, h _)
@[simp] lemma nonempty.exists {α : Sort u} {p : nonempty α → Prop} :
(∃h:nonempty α, p h) ↔ (∃a, p ⟨a⟩) :=
iff.intro (assume ⟨⟨a⟩, h⟩, ⟨a, h⟩) (assume ⟨a, h⟩, ⟨⟨a⟩, h⟩)
lemma classical.nonempty_pi {α : Sort u} {β : α → Sort v} :
nonempty (Πa:α, β a) ↔ (∀a:α, nonempty (β a)) :=
iff.intro (assume ⟨f⟩ a, ⟨f a⟩) (assume f, ⟨assume a, classical.choice $ f a⟩)
-- inhabited_of_nonempty already exists, in core/init/classical.lean, but the
-- assumption is not [...], which makes it unsuitable for some applications
noncomputable def classical.inhabited_of_nonempty' {α : Sort u} [h : nonempty α] : inhabited α :=
⟨classical.choice h⟩
-- `nonempty` cannot be a `functor`, because `functor` is restricted to Types.
lemma nonempty.map {α : Sort u} {β : Sort v} (f : α → β) : nonempty α → nonempty β
| ⟨h⟩ := ⟨f h⟩
protected lemma nonempty.map2 {α β γ : Sort*} (f : α → β → γ) : nonempty α → nonempty β → nonempty γ
| ⟨x⟩ ⟨y⟩ := ⟨f x y⟩
protected lemma nonempty.congr {α : Sort u} {β : Sort v} (f : α → β) (g : β → α) :
nonempty α ↔ nonempty β :=
⟨nonempty.map f, nonempty.map g⟩
end nonempty
|
ce2d4b46db21786218a52528343e8acad50d7258
|
af6139dd14451ab8f69cf181cf3a20f22bd699be
|
/library/tools/super/prover_state.lean
|
ba43d3469984bbd674f4188ac7706597daefdac1
|
[
"Apache-2.0"
] |
permissive
|
gitter-badger/lean-1
|
1cca01252d3113faa45681b6a00e1b5e3a0f6203
|
5c7ade4ee4f1cdf5028eabc5db949479d6737c85
|
refs/heads/master
| 1,611,425,383,521
| 1,487,871,140,000
| 1,487,871,140,000
| 82,995,612
| 0
| 0
| null | 1,487,905,618,000
| 1,487,905,618,000
| null |
UTF-8
|
Lean
| false
| false
| 15,599
|
lean
|
/-
Copyright (c) 2016 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner
-/
import .clause .lpo .cdcl_solver
open tactic monad expr
namespace super
structure score :=
(priority : ℕ)
(in_sos : bool)
(cost : ℕ)
(age : ℕ)
namespace score
def prio.immediate : ℕ := 0
def prio.default : ℕ := 1
def prio.never : ℕ := 2
def sched_default (sc : score) : score := { sc with priority := prio.default }
def sched_now (sc : score) : score := { sc with priority := prio.immediate }
def inc_cost (sc : score) (n : ℕ) : score := { sc with cost := sc^.cost + n }
def min (a b : score) : score :=
{ priority := nat.min a^.priority b^.priority,
in_sos := a^.in_sos && b^.in_sos,
cost := nat.min a^.cost b^.cost,
age := nat.min a^.age b^.age }
def combine (a b : score) : score :=
{ priority := nat.max a^.priority b^.priority,
in_sos := a^.in_sos && b^.in_sos,
cost := a^.cost + b^.cost,
age := nat.max a^.age b^.age }
end score
namespace score
meta instance : has_to_string score :=
⟨λe, "[" ++ to_string e^.priority ++
"," ++ to_string e^.cost ++
"," ++ to_string e^.age ++
",sos=" ++ to_string e^.in_sos ++ "]"⟩
end score
def clause_id := ℕ
namespace clause_id
def to_nat (id : clause_id) : ℕ := id
instance : decidable_eq clause_id := nat.decidable_eq
instance : has_ordering clause_id := nat.has_ordering
end clause_id
meta structure derived_clause :=
(id : clause_id)
(c : clause)
(selected : list ℕ)
(assertions : list expr)
(sc : score)
namespace derived_clause
meta instance : has_to_tactic_format derived_clause :=
⟨λc, do
prf_fmt ← pp c^.c^.proof,
c_fmt ← pp c^.c,
ass_fmt ← pp (c^.assertions^.for (λa, a^.local_type)),
return $
to_string c^.sc ++ " " ++
prf_fmt ++ " " ++
c_fmt ++ " <- " ++ ass_fmt ++
" (selected: " ++ to_fmt c^.selected ++
")"
⟩
meta def clause_with_assertions (ac : derived_clause) : clause :=
ac^.c^.close_constn ac^.assertions
end derived_clause
meta structure locked_clause :=
(dc : derived_clause)
(reasons : list (list expr))
namespace locked_clause
meta instance : has_to_tactic_format locked_clause :=
⟨λc, do
c_fmt ← pp c^.dc,
reasons_fmt ← pp (c^.reasons^.for (λr, r^.for (λa, a^.local_type))),
return $ c_fmt ++ " (locked in case of: " ++ reasons_fmt ++ ")"
⟩
end locked_clause
meta structure prover_state :=
(active : rb_map clause_id derived_clause)
(passive : rb_map clause_id derived_clause)
(newly_derived : list derived_clause)
(prec : list expr)
(locked : list locked_clause)
(local_false : expr)
(sat_solver : cdcl.state)
(current_model : rb_map expr bool)
(sat_hyps : rb_map expr (expr × expr))
(needs_sat_run : bool)
(clause_counter : nat)
open prover_state
private meta def join_with_nl : list format → format :=
list.foldl (λx y, x ++ format.line ++ y) format.nil
private meta def prover_state_tactic_fmt (s : prover_state) : tactic format := do
active_fmts ← mapm pp $ rb_map.values s^.active,
passive_fmts ← mapm pp $ rb_map.values s^.passive,
new_fmts ← mapm pp s^.newly_derived,
locked_fmts ← mapm pp s^.locked,
sat_fmts ← mapm pp s^.sat_solver^.clauses,
sat_model_fmts ← for s^.current_model^.to_list (λx, if x.2 = tt then pp x.1 else pp (not_ x.1)),
prec_fmts ← mapm pp s^.prec,
return (join_with_nl
([to_fmt "active:"] ++ map (append (to_fmt " ")) active_fmts ++
[to_fmt "passive:"] ++ map (append (to_fmt " ")) passive_fmts ++
[to_fmt "new:"] ++ map (append (to_fmt " ")) new_fmts ++
[to_fmt "locked:"] ++ map (append (to_fmt " ")) locked_fmts ++
[to_fmt "sat formulas:"] ++ map (append (to_fmt " ")) sat_fmts ++
[to_fmt "sat model:"] ++ map (append (to_fmt " ")) sat_model_fmts ++
[to_fmt "precedence order: " ++ to_fmt prec_fmts]))
meta instance : has_to_tactic_format prover_state :=
⟨prover_state_tactic_fmt⟩
meta def prover := state_t prover_state tactic
namespace prover
meta instance : monad prover := state_t.monad _ _
meta instance : has_monad_lift tactic prover :=
monad.monad_transformer_lift (state_t prover_state) tactic
meta instance (α : Type) : has_coe (tactic α) (prover α) :=
⟨monad.monad_lift⟩
meta def fail {α β : Type} [has_to_format β] (msg : β) : prover α :=
tactic.fail msg
meta def orelse (A : Type) (p1 p2 : prover A) : prover A :=
take state, p1 state <|> p2 state
meta instance : alternative prover :=
{ monad_is_applicative prover with
failure := λα, fail "failed",
orelse := orelse }
end prover
meta def selection_strategy := derived_clause → prover derived_clause
meta def get_active : prover (rb_map clause_id derived_clause) :=
do state ← state_t.read, return state^.active
meta def add_active (a : derived_clause) : prover unit :=
do state ← state_t.read,
state_t.write { state with active := state^.active^.insert a^.id a }
meta def get_passive : prover (rb_map clause_id derived_clause) :=
lift passive state_t.read
meta def get_precedence : prover (list expr) :=
do state ← state_t.read, return state^.prec
meta def get_term_order : prover (expr → expr → bool) := do
state ← state_t.read,
return $ mk_lpo (map name_of_funsym state^.prec)
private meta def set_precedence (new_prec : list expr) : prover unit :=
do state ← state_t.read, state_t.write { state with prec := new_prec }
meta def register_consts_in_precedence (consts : list expr) := do
p ← get_precedence,
p_set ← return (rb_map.set_of_list (map name_of_funsym p)),
new_syms ← return $ list.filter (λc, ¬p_set^.contains (name_of_funsym c)) consts,
set_precedence (new_syms ++ p)
meta def in_sat_solver {A} (cmd : cdcl.solver A) : prover A := do
state ← state_t.read,
result ← cmd state^.sat_solver,
state_t.write { state with sat_solver := result.2 },
return result.1
meta def collect_ass_hyps (c : clause) : prover (list expr) :=
let lcs := contained_lconsts c^.proof in
do st ← state_t.read,
return (do
hs ← st^.sat_hyps^.values,
h ← [hs.1, hs.2],
guard $ lcs^.contains h^.local_uniq_name,
[h])
meta def get_clause_count : prover ℕ :=
do s ← state_t.read, return s^.clause_counter
meta def get_new_cls_id : prover clause_id := do
state ← state_t.read,
state_t.write { state with clause_counter := state^.clause_counter + 1 },
return state^.clause_counter
meta def mk_derived (c : clause) (sc : score) : prover derived_clause := do
ass ← collect_ass_hyps c,
id ← get_new_cls_id,
return { id := id, c := c, selected := [], assertions := ass, sc := sc }
meta def add_inferred (c : derived_clause) : prover unit := do
c' ← c^.c^.normalize, c' ← return { c with c := c' },
register_consts_in_precedence (contained_funsyms c'^.c^.type)^.values,
state ← state_t.read,
state_t.write { state with newly_derived := c' :: state^.newly_derived }
-- FIXME: what if we've seen the variable before, but with a weaker score?
meta def mk_sat_var (v : expr) (suggested_ph : bool) (suggested_ev : score) : prover unit :=
do st ← state_t.read, if st^.sat_hyps^.contains v then return () else do
hpv ← mk_local_def `h v,
hnv ← mk_local_def `hn $ imp v st^.local_false,
state_t.modify $ λst, { st with sat_hyps := st^.sat_hyps^.insert v (hpv, hnv) },
in_sat_solver $ cdcl.mk_var_core v suggested_ph,
match v with
| (pi _ _ _ _) := do
c ← clause.of_proof st^.local_false hpv,
mk_derived c suggested_ev >>= add_inferred
| _ := do cp ← clause.of_proof st^.local_false hpv, mk_derived cp suggested_ev >>= add_inferred,
cn ← clause.of_proof st^.local_false hnv, mk_derived cn suggested_ev >>= add_inferred
end
meta def get_sat_hyp_core (v : expr) (ph : bool) : prover (option expr) :=
flip monad.lift state_t.read $ λst,
match st^.sat_hyps^.find v with
| some (hp, hn) := some $ if ph then hp else hn
| none := none
end
meta def get_sat_hyp (v : expr) (ph : bool) : prover expr :=
do hyp_opt ← get_sat_hyp_core v ph,
match hyp_opt with
| some hyp := return hyp
| none := fail $ "unknown sat variable: " ++ v^.to_string
end
meta def add_sat_clause (c : clause) (suggested_ev : score) : prover unit := do
c ← c^.distinct,
already_added ← flip monad.lift state_t.read $ λst, decidable.to_bool $
c^.type ∈ st^.sat_solver^.clauses^.for (λd, d^.type),
if already_added then return () else do
for c^.get_lits $ λl, mk_sat_var l^.formula l^.is_neg suggested_ev,
in_sat_solver $ cdcl.mk_clause c,
state_t.modify $ λst, { st with needs_sat_run := tt }
meta def sat_eval_lit (v : expr) (pol : bool) : prover bool :=
do v_st ← flip monad.lift state_t.read $ λst, st^.current_model^.find v,
match v_st with
| some ph := return $ if pol then ph else bnot ph
| none := return tt
end
meta def sat_eval_assertion (assertion : expr) : prover bool :=
do lf ← flip monad.lift state_t.read $ λst, st^.local_false,
match is_local_not lf assertion^.local_type with
| some v := sat_eval_lit v ff
| none := sat_eval_lit assertion^.local_type tt
end
meta def sat_eval_assertions : list expr → prover bool
| (a::ass) := do v_a ← sat_eval_assertion a,
if v_a then
sat_eval_assertions ass
else
return ff
| [] := return tt
private meta def intern_clause (c : derived_clause) : prover derived_clause := do
hyp_name ← get_unused_name (mk_simple_name $ "clause_" ++ to_string c^.id^.to_nat) none,
c' ← return $ c^.c^.close_constn c^.assertions,
assertv hyp_name c'^.type c'^.proof,
proof' ← get_local hyp_name,
type ← infer_type proof', -- FIXME: otherwise ""
return { c with c := { (c^.c : clause) with proof := app_of_list proof' c^.assertions } }
meta def register_as_passive (c : derived_clause) : prover unit := do
c ← intern_clause c,
ass_v ← sat_eval_assertions c^.assertions,
if c^.c^.num_quants = 0 ∧ c^.c^.num_lits = 0 then
add_sat_clause c^.clause_with_assertions c^.sc
else if ¬ass_v then do
state_t.modify $ λst, { st with locked := ⟨c, []⟩ :: st^.locked }
else do
state_t.modify $ λst, { st with passive := st^.passive^.insert c^.id c }
meta def remove_passive (id : clause_id) : prover unit :=
do state ← state_t.read, state_t.write { state with passive := state^.passive^.erase id }
meta def move_locked_to_passive : prover unit := do
locked ← flip monad.lift state_t.read (λst, st^.locked),
new_locked ← flip filter locked (λlc, do
reason_vals ← mapm sat_eval_assertions lc^.reasons,
c_val ← sat_eval_assertions lc^.dc^.assertions,
if reason_vals^.for_all (λr, r = ff) ∧ c_val then do
state_t.modify $ λst, { st with passive := st^.passive^.insert lc^.dc^.id lc^.dc },
return ff
else
return tt
),
state_t.modify $ λst, { st with locked := new_locked }
meta def move_active_to_locked : prover unit :=
do active ← get_active, for' active^.values $ λac, do
c_val ← sat_eval_assertions ac^.assertions,
if ¬c_val then do
state_t.modify $ λst, { st with
active := st^.active^.erase ac^.id,
locked := ⟨ac, []⟩ :: st^.locked
}
else
return ()
meta def move_passive_to_locked : prover unit :=
do passive ← flip monad.lift state_t.read $ λst, st^.passive, for' passive^.to_list $ λpc, do
c_val ← sat_eval_assertions pc.2^.assertions,
if ¬c_val then do
state_t.modify $ λst, { st with
passive := st^.passive^.erase pc.1,
locked := ⟨pc.2, []⟩ :: st^.locked
}
else
return ()
def super_cc_config : cc_config :=
{ em := ff }
meta def do_sat_run : prover (option expr) :=
do sat_result ← in_sat_solver $ cdcl.run (cdcl.theory_solver_of_tactic $ using_smt $ return ()),
state_t.modify $ λst, { st with needs_sat_run := ff },
old_model ← lift prover_state.current_model state_t.read,
match sat_result with
| (cdcl.result.unsat proof) := return (some proof)
| (cdcl.result.sat new_model) := do
state_t.modify $ λst, { st with current_model := new_model },
move_locked_to_passive,
move_active_to_locked,
move_passive_to_locked,
return none
end
meta def take_newly_derived : prover (list derived_clause) := do
state ← state_t.read,
state_t.write { state with newly_derived := [] },
return state^.newly_derived
meta def remove_redundant (id : clause_id) (parents : list derived_clause) : prover unit := do
when (not $ parents^.for_all $ λp, p^.id ≠ id) (fail "clause is redundant because of itself"),
red ← flip monad.lift state_t.read (λst, st^.active^.find id),
match red with
| none := return ()
| some red := do
let reasons := parents^.for (λp, p^.assertions),
assertion := red^.assertions in
if reasons^.for_all $ λr, r^.subset_of assertion then do
state_t.modify $ λst, { st with active := st^.active^.erase id }
else do
state_t.modify $ λst, { st with active := st^.active^.erase id,
locked := ⟨red, reasons⟩ :: st^.locked }
end
meta def inference := derived_clause → prover unit
meta structure inf_decl := (prio : ℕ) (inf : inference)
meta def inf_attr : user_attribute :=
⟨ `super.inf, "inference for the super prover" ⟩
run_command attribute.register `super.inf_attr
meta def seq_inferences : list inference → inference
| [] := λgiven, return ()
| (inf::infs) := λgiven, do
inf given,
now_active ← get_active,
if rb_map.contains now_active given^.id then
seq_inferences infs given
else
return ()
meta def simp_inference (simpl : derived_clause → prover (option clause)) : inference :=
λgiven, do maybe_simpld ← simpl given,
match maybe_simpld with
| some simpld := do
derived_simpld ← mk_derived simpld given^.sc^.sched_now,
add_inferred derived_simpld,
remove_redundant given^.id []
| none := return ()
end
meta def preprocessing_rule (f : list derived_clause → prover (list derived_clause)) : prover unit := do
state ← state_t.read,
newly_derived' ← f state^.newly_derived,
state' ← state_t.read,
state_t.write { state' with newly_derived := newly_derived' }
meta def clause_selection_strategy := ℕ → prover clause_id
namespace prover_state
meta def empty (local_false : expr) : prover_state :=
{ active := rb_map.mk _ _, passive := rb_map.mk _ _,
newly_derived := [], prec := [], clause_counter := 0,
local_false := local_false,
locked := [], sat_solver := cdcl.state.initial local_false,
current_model := rb_map.mk _ _, sat_hyps := rb_map.mk _ _, needs_sat_run := ff }
meta def initial (local_false : expr) (clauses : list clause) : tactic prover_state := do
after_setup ← for' clauses (λc,
let in_sos := ((contained_lconsts c^.proof)^.erase local_false^.local_uniq_name)^.size = 0 in
do mk_derived c { priority := score.prio.immediate, in_sos := in_sos,
age := 0, cost := 0 } >>= add_inferred
) $ empty local_false,
return after_setup.2
end prover_state
meta def inf_score (add_cost : ℕ) (scores : list score) : prover score := do
age ← get_clause_count,
return $ list.foldl score.combine { priority := score.prio.default,
in_sos := tt,
age := age,
cost := add_cost
} scores
meta def inf_if_successful (add_cost : ℕ) (parent : derived_clause) (tac : tactic (list clause)) : prover unit :=
(do inferred ← tac,
for' inferred $ λc,
inf_score add_cost [parent^.sc] >>= mk_derived c >>= add_inferred)
<|> return ()
meta def simp_if_successful (parent : derived_clause) (tac : tactic (list clause)) : prover unit :=
(do inferred ← tac,
for' inferred $ λc,
mk_derived c parent^.sc^.sched_now >>= add_inferred,
remove_redundant parent^.id [])
<|> return ()
end super
|
56a8285757cde2fb26bc905181fdbbc95dadfacc
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/data/fintype/fin.lean
|
ab9824e81ff668dbbe9f70d47870b8323feb1a31
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,169
|
lean
|
/-
Copyright (c) 2021 Anne Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anne Baanen
-/
import data.fin.interval
/-!
# The structure of `fintype (fin n)`
This file contains some basic results about the `fintype` instance for `fin`,
especially properties of `finset.univ : finset (fin n)`.
-/
open finset
open fintype
namespace fin
variables {α β : Type*} {n : ℕ}
@[simp] lemma Ioi_zero_eq_map :
Ioi (0 : fin n.succ) = univ.map (fin.succ_embedding _).to_embedding :=
begin
ext i,
simp only [mem_Ioi, mem_map, mem_univ, function.embedding.coe_fn_mk, exists_true_left],
split,
{ refine cases _ _ i,
{ rintro ⟨⟨⟩⟩ },
{ intros j _, exact ⟨j, rfl⟩ } },
{ rintro ⟨i, _, rfl⟩,
exact succ_pos _ },
end
@[simp] lemma Ioi_succ (i : fin n) :
Ioi i.succ = (Ioi i).map (fin.succ_embedding _).to_embedding :=
begin
ext i,
simp only [mem_filter, mem_Ioi, mem_map, mem_univ, true_and,
function.embedding.coe_fn_mk, exists_true_left],
split,
{ refine cases _ _ i,
{ rintro ⟨⟨⟩⟩ },
{ intros i hi,
refine ⟨i, succ_lt_succ_iff.mp hi, rfl⟩ } },
{ rintro ⟨i, hi, rfl⟩, simpa },
end
lemma card_filter_univ_succ' (p : fin (n + 1) → Prop) [decidable_pred p] :
(univ.filter p).card = (ite (p 0) 1 0) + (univ.filter (p ∘ fin.succ)).card :=
begin
rw [fin.univ_succ, filter_cons, card_disj_union, map_filter, card_map],
split_ifs; simp,
end
lemma card_filter_univ_succ (p : fin (n + 1) → Prop) [decidable_pred p] :
(univ.filter p).card =
if p 0 then (univ.filter (p ∘ fin.succ)).card + 1 else (univ.filter (p ∘ fin.succ)).card :=
(card_filter_univ_succ' p).trans (by split_ifs; simp [add_comm 1])
lemma card_filter_univ_eq_vector_nth_eq_count [decidable_eq α] (a : α) (v : vector α n) :
(univ.filter $ λ i, a = v.nth i).card = v.to_list.count a :=
begin
induction v using vector.induction_on with n x xs hxs,
{ simp },
{ simp_rw [card_filter_univ_succ', vector.nth_cons_zero, vector.to_list_cons,
function.comp, vector.nth_cons_succ, hxs, list.count_cons', add_comm (ite (a = x) 1 0)] }
end
end fin
|
79203c72d154412f494ef591f93d559e92a853e2
|
a4673261e60b025e2c8c825dfa4ab9108246c32e
|
/stage0/src/Lean/Elab/DeclUtil.lean
|
67b6f16fdb103677e762c6d36bc5f68efe421c4a
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/lean4
|
c02dec0cc32c4bccab009285475f265f17d73228
|
2909313475588cc20ac0436e55548a4502050d0a
|
refs/heads/master
| 1,674,129,550,893
| 1,606,415,348,000
| 1,606,415,348,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,846
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Sebastian Ullrich
-/
import Lean.Meta.ExprDefEq
namespace Lean.Meta
def forallTelescopeCompatibleAux {α} (k : Array Expr → Expr → Expr → MetaM α) : Nat → Expr → Expr → Array Expr → MetaM α
| 0, type₁, type₂, xs => k xs type₁ type₂
| i+1, type₁, type₂, xs => do
let type₁ ← whnf type₁
let type₂ ← whnf type₂
match type₁, type₂ with
| Expr.forallE n₁ d₁ b₁ c₁, Expr.forallE n₂ d₂ b₂ c₂ =>
unless n₁ == n₂ do
throwError! "parameter name mismatch '{n₁}', expected '{n₂}'"
unless (← isDefEq d₁ d₂) do
throwError! "type mismatch at parameter '{n₁}'{indentExpr d₁}\nexpected type{indentExpr d₂}"
unless c₁.binderInfo == c₂.binderInfo do
throwError! "binder annotation mismatch at parameter '{n₁}'"
withLocalDecl n₁ c₁.binderInfo d₁ fun x =>
let type₁ := b₁.instantiate1 x
let type₂ := b₂.instantiate1 x
forallTelescopeCompatibleAux k i type₁ type₂ (xs.push x)
| _, _ => throwError "unexpected number of parameters"
/-- Given two forall-expressions `type₁` and `type₂`, ensure the first `numParams` parameters are compatible, and
then execute `k` with the parameters and remaining types. -/
@[inline] def forallTelescopeCompatible {α m} [Monad m] [MonadControlT MetaM m] (type₁ type₂ : Expr) (numParams : Nat) (k : Array Expr → Expr → Expr → m α) : m α :=
controlAt MetaM fun runInBase =>
forallTelescopeCompatibleAux (fun xs type₁ type₂ => runInBase $ k xs type₁ type₂) numParams type₁ type₂ #[]
end Meta
namespace Elab
def expandOptDeclSig (stx : Syntax) : Syntax × Option Syntax :=
-- many Term.bracketedBinder >> Term.optType
let binders := stx[0]
let optType := stx[1] -- optional (parser! " : " >> termParser)
if optType.isNone then
(binders, none)
else
let typeSpec := optType[0]
(binders, some typeSpec[1])
def expandDeclSig (stx : Syntax) : Syntax × Syntax :=
-- many Term.bracketedBinder >> Term.typeSpec
let binders := stx[0]
let typeSpec := stx[1]
(binders, typeSpec[1])
def mkFreshInstanceName (env : Environment) (nextIdx : Nat) : Name :=
(env.mainModule ++ `_instance).appendIndexAfter nextIdx
def isFreshInstanceName (name : Name) : Bool :=
match name with
| Name.str _ s _ => "_instance".isPrefixOf s
| _ => false
/--
Sort the given list of `usedParams` using the following order:
- If it is an explicit level `allUserParams`, then use user given order.
- Otherwise, use lexicographical.
Remark: `scopeParams` are the universe params introduced using the `universe` command. `allUserParams` contains
the universe params introduced using the `universe` command *and* the `.{...}` notation.
Remark: this function return an exception if there is an `u` not in `usedParams`, that is in `allUserParams` but not in `scopeParams`.
Remark: `explicitParams` are in reverse declaration order. That is, the head is the last declared parameter. -/
def sortDeclLevelParams (scopeParams : List Name) (allUserParams : List Name) (usedParams : Array Name) : Except String (List Name) :=
match allUserParams.find? $ fun u => !usedParams.contains u && !scopeParams.elem u with
| some u => throw s!"unused universe parameter '{u}'"
| none =>
let result := allUserParams.foldl (fun result levelName => if usedParams.elem levelName then levelName :: result else result) []
let remaining := usedParams.filter (fun levelParam => !allUserParams.elem levelParam)
let remaining := remaining.qsort Name.lt
pure $ result ++ remaining.toList
end Lean.Elab
|
518406ef352135d9738a45a207bc0aedd503dea5
|
5749d8999a76f3a8fddceca1f6941981e33aaa96
|
/src/topology/bases.lean
|
27413b094ba0aca119937d055083822c8aafcb44
|
[
"Apache-2.0"
] |
permissive
|
jdsalchow/mathlib
|
13ab43ef0d0515a17e550b16d09bd14b76125276
|
497e692b946d93906900bb33a51fd243e7649406
|
refs/heads/master
| 1,585,819,143,348
| 1,580,072,892,000
| 1,580,072,892,000
| 154,287,128
| 0
| 0
|
Apache-2.0
| 1,540,281,610,000
| 1,540,281,609,000
| null |
UTF-8
|
Lean
| false
| false
| 18,393
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro
Bases of topologies. Countability axioms.
-/
import topology.constructions data.set.countable
open set filter lattice classical
open_locale topological_space
namespace filter
universes u v
variables {α : Type u} {β : Type v}
/--
A filter has a countable basis iff it is generated by a countable collection
of subsets of α. (A filter is a generated by a collection of sets iff it is
the infimum of the principal filters.)
Note: we do not require the collection to be closed under finite intersections.
-/
def has_countable_basis (f : filter α) : Prop :=
∃ s : set (set α), countable s ∧ f = ⨅ t ∈ s, principal t
lemma has_countable_basis_of_seq (f : filter α) (x : ℕ → set α) (h : f = ⨅ i, principal (x i)) :
f.has_countable_basis :=
⟨range x, countable_range _, by rwa infi_range⟩
lemma seq_of_has_countable_basis (f : filter α) (cblb : f.has_countable_basis) :
∃ x : ℕ → set α, f = ⨅ i, principal (x i) :=
begin
rcases cblb with ⟨B, Bcbl, gen⟩, subst gen,
classical, by_cases Bnonempty : B = ∅, { use λ n, set.univ, simp [principal_univ, *] },
rw countable_iff_exists_surjective_to_subtype Bnonempty at Bcbl,
rcases Bcbl with ⟨g, gsurj⟩,
rw lattice.infi_subtype',
use (λ n, g n), apply le_antisymm; rw le_infi_iff,
{ intro i, apply infi_le_of_le (g i) _, apply le_refl _ },
{ intros a, rcases gsurj a with i, apply infi_le_of_le i _, subst h, apply le_refl _ }
end
/--
Different characterization of countable basis. A filter has a countable basis
iff it is generated by a sequence of sets.
-/
lemma has_countable_basis_iff_seq (f : filter α) :
f.has_countable_basis ↔
∃ x : ℕ → set α, f = ⨅ i, principal (x i) :=
⟨seq_of_has_countable_basis _, λ ⟨x, xgen⟩, has_countable_basis_of_seq _ x xgen⟩
lemma mono_seq_of_has_countable_basis (f : filter α) (cblb : f.has_countable_basis) :
∃ x : ℕ → set α, (∀ i j, i ≤ j → x j ⊆ x i) ∧ f = ⨅ i, principal (x i) :=
begin
rcases (seq_of_has_countable_basis f cblb) with ⟨x', hx'⟩,
let x := λ n, ⋂ m ≤ n, x' m,
use x, split,
{ intros i j hij a, simp [x], intros h i' hi'i, apply h, transitivity; assumption },
subst hx', apply le_antisymm; rw le_infi_iff; intro i,
{ rw le_principal_iff, apply Inter_mem_sets (finite_le_nat _),
intros j hji, rw ← le_principal_iff, apply infi_le_of_le j _, apply le_refl _ },
{ apply infi_le_of_le i _, rw principal_mono, intro a, simp [x], intro h, apply h, refl },
end
/--
Different characterization of countable basis. A filter has a countable basis
iff it is generated by a monotonically decreasing sequence of sets.
-/
lemma has_countable_basis_iff_mono_seq (f : filter α) :
f.has_countable_basis ↔
∃ x : ℕ → set α, (∀ i j, i ≤ j → x j ⊆ x i) ∧ f = ⨅ i, principal (x i) :=
⟨mono_seq_of_has_countable_basis _, λ ⟨x, _, xgen⟩, has_countable_basis_of_seq _ x xgen⟩
/--
Different characterization of countable basis. A filter has a countable basis
iff there exists a monotonically decreasing sequence of sets `x i`
such that `s ∈ f ↔ ∃ i, x i ⊆ s`. -/
lemma has_countable_basis_iff_mono_seq' (f : filter α) :
f.has_countable_basis ↔
∃ x : ℕ → set α, (∀ i j, i ≤ j → x j ⊆ x i) ∧ (∀ {s}, s ∈ f ↔ ∃ i, x i ⊆ s) :=
begin
refine (has_countable_basis_iff_mono_seq f).trans (exists_congr $ λ x, and_congr_right _),
intro hmono,
have : directed (≥) (λ i, principal (x i)),
from directed_of_mono _ (λ i j hij, principal_mono.2 (hmono _ _ hij)),
simp only [filter.ext_iff, mem_infi this ⟨0⟩, mem_Union, mem_principal_sets]
end
lemma has_countable_basis.comap {l : filter β} (h : has_countable_basis l) (f : α → β) :
has_countable_basis (l.comap f) :=
begin
rcases h with ⟨S, h₁, h₂⟩,
refine ⟨preimage f '' S, countable_image _ h₁, _⟩,
calc comap f l = ⨅ s ∈ S, principal (f ⁻¹' s) : by simp [h₂]
... = ⨅ s ∈ S, ⨅ (t : set α) (H : f ⁻¹' s = t), principal t : by simp
... = ⨅ (t : set α) (s ∈ S) (h₂ : f ⁻¹' s = t), principal t :
by { rw [infi_comm], congr' 1, ext t, rw [infi_comm] }
... = _ : by simp [-infi_infi_eq_right, infi_and]
end
-- TODO : prove this for a encodable type
lemma has_countable_basis_at_top_finset_nat : has_countable_basis (@at_top (finset ℕ) _) :=
begin
refine has_countable_basis_of_seq _ (λN, Ici (finset.range N)) (eq_infi_of_mem_sets_iff_exists_mem _),
assume s,
rw mem_at_top_sets,
refine ⟨_, λ ⟨N, hN⟩, ⟨finset.range N, hN⟩⟩,
rintros ⟨t, ht⟩,
rcases mem_at_top_sets.1 (tendsto_finset_range (mem_at_top t)) with ⟨N, hN⟩,
simp only [preimage, mem_set_of_eq] at hN,
exact ⟨N, mem_principal_sets.2 $ λ t' ht', ht t' $ le_trans (hN _ $ le_refl N) ht'⟩
end
lemma has_countable_basis.tendsto_iff_seq_tendsto {f : α → β} {k : filter α} {l : filter β}
(hcb : k.has_countable_basis) :
tendsto f k l ↔ (∀ x : ℕ → α, tendsto x at_top k → tendsto (f ∘ x) at_top l) :=
suffices (∀ x : ℕ → α, tendsto x at_top k → tendsto (f ∘ x) at_top l) → tendsto f k l,
from ⟨by intros; apply tendsto.comp; assumption, by assumption⟩,
begin
rw filter.has_countable_basis_iff_mono_seq at hcb,
rcases hcb with ⟨g, gmon, gbasis⟩,
have gbasis : ∀ A, A ∈ k ↔ ∃ i, g i ⊆ A,
{ intro A,
subst gbasis,
rw mem_infi,
{ simp only [set.mem_Union, iff_self, filter.mem_principal_sets] },
{ exact directed_of_mono _ (λ i j h, principal_mono.mpr $ gmon _ _ h) },
{ apply_instance } },
classical, contrapose,
simp only [not_forall, not_imp, not_exists, subset_def, @tendsto_def _ _ f, gbasis],
rintro ⟨B, hBl, hfBk⟩,
choose x h using hfBk,
use x, split,
{ simp only [tendsto_at_top', gbasis],
rintros A ⟨i, hgiA⟩,
use i,
refine (λ j hj, hgiA $ gmon _ _ hj _),
simp only [h] },
{ simp only [tendsto_at_top', (∘), not_forall, not_exists],
use [B, hBl],
intro i, use [i, (le_refl _)],
apply (h i).right },
end
lemma has_countable_basis.tendsto_of_seq_tendsto {f : α → β} {k : filter α} {l : filter β}
(hcb : k.has_countable_basis) :
(∀ x : ℕ → α, tendsto x at_top k → tendsto (f ∘ x) at_top l) → tendsto f k l :=
hcb.tendsto_iff_seq_tendsto.2
end filter
namespace topological_space
/- countability axioms
For our applications we are interested that there exists a countable basis, but we do not need the
concrete basis itself. This allows us to declare these type classes as `Prop` to use them as mixins.
-/
universe u
variables {α : Type u} [t : topological_space α]
include t
/-- A topological basis is one that satisfies the necessary conditions so that
it suffices to take unions of the basis sets to get a topology (without taking
finite intersections as well). -/
def is_topological_basis (s : set (set α)) : Prop :=
(∀t₁∈s, ∀t₂∈s, ∀ x ∈ t₁ ∩ t₂, ∃ t₃∈s, x ∈ t₃ ∧ t₃ ⊆ t₁ ∩ t₂) ∧
(⋃₀ s) = univ ∧
t = generate_from s
lemma is_topological_basis_of_subbasis {s : set (set α)} (hs : t = generate_from s) :
is_topological_basis ((λf, ⋂₀ f) '' {f:set (set α) | finite f ∧ f ⊆ s ∧ ⋂₀ f ≠ ∅}) :=
let b' := (λf, ⋂₀ f) '' {f:set (set α) | finite f ∧ f ⊆ s ∧ ⋂₀ f ≠ ∅} in
⟨assume s₁ ⟨t₁, ⟨hft₁, ht₁b, ht₁⟩, eq₁⟩ s₂ ⟨t₂, ⟨hft₂, ht₂b, ht₂⟩, eq₂⟩,
have ie : ⋂₀(t₁ ∪ t₂) = ⋂₀ t₁ ∩ ⋂₀ t₂, from Inf_union,
eq₁ ▸ eq₂ ▸ assume x h,
⟨_, ⟨t₁ ∪ t₂, ⟨finite_union hft₁ hft₂, union_subset ht₁b ht₂b,
by simpa only [ie] using ne_empty_of_mem h⟩, ie⟩, h, subset.refl _⟩,
eq_univ_iff_forall.2 $ assume a, ⟨univ, ⟨∅, ⟨finite_empty, empty_subset _,
by rw sInter_empty; exact nonempty_iff_univ_ne_empty.1 ⟨a⟩⟩, sInter_empty⟩, mem_univ _⟩,
have generate_from s = generate_from b',
from le_antisymm
(le_generate_from $ assume u ⟨t, ⟨hft, htb, ne⟩, eq⟩,
eq ▸ @is_open_sInter _ (generate_from s) _ hft (assume s hs, generate_open.basic _ $ htb hs))
(le_generate_from $ assume s hs,
by_cases
(assume : s = ∅, by rw [this]; apply @is_open_empty _ _)
(assume : s ≠ ∅, generate_open.basic _ ⟨{s}, ⟨finite_singleton s, singleton_subset_iff.2 hs,
by rwa [sInter_singleton]⟩, sInter_singleton s⟩)),
this ▸ hs⟩
lemma is_topological_basis_of_open_of_nhds {s : set (set α)}
(h_open : ∀ u ∈ s, _root_.is_open u)
(h_nhds : ∀(a:α) (u : set α), a ∈ u → _root_.is_open u → ∃v ∈ s, a ∈ v ∧ v ⊆ u) :
is_topological_basis s :=
⟨assume t₁ ht₁ t₂ ht₂ x ⟨xt₁, xt₂⟩,
h_nhds x (t₁ ∩ t₂) ⟨xt₁, xt₂⟩
(is_open_inter _ _ _ (h_open _ ht₁) (h_open _ ht₂)),
eq_univ_iff_forall.2 $ assume a,
let ⟨u, h₁, h₂, _⟩ := h_nhds a univ trivial (is_open_univ _) in
⟨u, h₁, h₂⟩,
le_antisymm
(le_generate_from h_open)
(assume u hu,
(@is_open_iff_nhds α (generate_from _) _).mpr $ assume a hau,
let ⟨v, hvs, hav, hvu⟩ := h_nhds a u hau hu in
by rw nhds_generate_from; exact infi_le_of_le v (infi_le_of_le ⟨hav, hvs⟩ $ le_principal_iff.2 hvu))⟩
lemma mem_nhds_of_is_topological_basis {a : α} {s : set α} {b : set (set α)}
(hb : is_topological_basis b) : s ∈ 𝓝 a ↔ ∃t∈b, a ∈ t ∧ t ⊆ s :=
begin
change s ∈ (𝓝 a).sets ↔ ∃t∈b, a ∈ t ∧ t ⊆ s,
rw [hb.2.2, nhds_generate_from, binfi_sets_eq],
{ simp only [mem_bUnion_iff, exists_prop, mem_set_of_eq, and_assoc, and.left_comm], refl },
{ exact assume s ⟨hs₁, hs₂⟩ t ⟨ht₁, ht₂⟩,
have a ∈ s ∩ t, from ⟨hs₁, ht₁⟩,
let ⟨u, hu₁, hu₂, hu₃⟩ := hb.1 _ hs₂ _ ht₂ _ this in
⟨u, ⟨hu₂, hu₁⟩, le_principal_iff.2 (subset.trans hu₃ (inter_subset_left _ _)),
le_principal_iff.2 (subset.trans hu₃ (inter_subset_right _ _))⟩ },
{ rcases eq_univ_iff_forall.1 hb.2.1 a with ⟨i, h1, h2⟩,
exact ⟨i, h2, h1⟩ }
end
lemma is_open_of_is_topological_basis {s : set α} {b : set (set α)}
(hb : is_topological_basis b) (hs : s ∈ b) : _root_.is_open s :=
is_open_iff_mem_nhds.2 $ λ a as,
(mem_nhds_of_is_topological_basis hb).2 ⟨s, hs, as, subset.refl _⟩
lemma mem_basis_subset_of_mem_open {b : set (set α)}
(hb : is_topological_basis b) {a:α} {u : set α} (au : a ∈ u)
(ou : _root_.is_open u) : ∃v ∈ b, a ∈ v ∧ v ⊆ u :=
(mem_nhds_of_is_topological_basis hb).1 $ mem_nhds_sets ou au
lemma sUnion_basis_of_is_open {B : set (set α)}
(hB : is_topological_basis B) {u : set α} (ou : _root_.is_open u) :
∃ S ⊆ B, u = ⋃₀ S :=
⟨{s ∈ B | s ⊆ u}, λ s h, h.1, set.ext $ λ a,
⟨λ ha, let ⟨b, hb, ab, bu⟩ := mem_basis_subset_of_mem_open hB ha ou in
⟨b, ⟨hb, bu⟩, ab⟩,
λ ⟨b, ⟨hb, bu⟩, ab⟩, bu ab⟩⟩
lemma Union_basis_of_is_open {B : set (set α)}
(hB : is_topological_basis B) {u : set α} (ou : _root_.is_open u) :
∃ (β : Type u) (f : β → set α), u = (⋃ i, f i) ∧ ∀ i, f i ∈ B :=
let ⟨S, sb, su⟩ := sUnion_basis_of_is_open hB ou in
⟨S, subtype.val, su.trans set.sUnion_eq_Union, λ ⟨b, h⟩, sb h⟩
variables (α)
/-- A separable space is one with a countable dense subset. -/
class separable_space : Prop :=
(exists_countable_closure_eq_univ : ∃s:set α, countable s ∧ closure s = univ)
/-- A first-countable space is one in which every point has a
countable neighborhood basis. -/
class first_countable_topology : Prop :=
(nhds_generated_countable : ∀a:α, (𝓝 a).has_countable_basis)
/-- A second-countable space is one with a countable basis. -/
class second_countable_topology : Prop :=
(is_open_generated_countable : ∃b:set (set α), countable b ∧ t = topological_space.generate_from b)
@[priority 100] -- see Note [lower instance priority]
instance second_countable_topology.to_first_countable_topology
[second_countable_topology α] : first_countable_topology α :=
let ⟨b, hb, eq⟩ := second_countable_topology.is_open_generated_countable α in
⟨assume a, ⟨{s | a ∈ s ∧ s ∈ b},
countable_subset (assume x ⟨_, hx⟩, hx) hb, by rw [eq, nhds_generate_from]⟩⟩
lemma second_countable_topology_induced (β)
[t : topological_space β] [second_countable_topology β] (f : α → β) :
@second_countable_topology α (t.induced f) :=
begin
rcases second_countable_topology.is_open_generated_countable β with ⟨b, hb, eq⟩,
refine { is_open_generated_countable := ⟨preimage f '' b, countable_image _ hb, _⟩ },
rw [eq, induced_generate_from_eq]
end
instance subtype.second_countable_topology
(s : set α) [second_countable_topology α] : second_countable_topology s :=
second_countable_topology_induced s α coe
lemma is_open_generated_countable_inter [second_countable_topology α] :
∃b:set (set α), countable b ∧ ∅ ∉ b ∧ is_topological_basis b :=
let ⟨b, hb₁, hb₂⟩ := second_countable_topology.is_open_generated_countable α in
let b' := (λs, ⋂₀ s) '' {s:set (set α) | finite s ∧ s ⊆ b ∧ ⋂₀ s ≠ ∅} in
⟨b',
countable_image _ $ countable_subset
(by simp only [(and_assoc _ _).symm]; exact inter_subset_left _ _)
(countable_set_of_finite_subset hb₁),
assume ⟨s, ⟨_, _, hn⟩, hp⟩, hn hp,
is_topological_basis_of_subbasis hb₂⟩
/- TODO: more fine grained instances for first_countable_topology, separable_space, t2_space, ... -/
instance {β : Type*} [topological_space β]
[second_countable_topology α] [second_countable_topology β] : second_countable_topology (α × β) :=
⟨let ⟨a, ha₁, ha₂, ha₃, ha₄, ha₅⟩ := is_open_generated_countable_inter α in
let ⟨b, hb₁, hb₂, hb₃, hb₄, hb₅⟩ := is_open_generated_countable_inter β in
⟨{g | ∃u∈a, ∃v∈b, g = set.prod u v},
have {g | ∃u∈a, ∃v∈b, g = set.prod u v} = (⋃u∈a, ⋃v∈b, {set.prod u v}),
by apply set.ext; simp,
by rw [this]; exact (countable_bUnion ha₁ $ assume u hu, countable_bUnion hb₁ $ by simp),
by rw [ha₅, hb₅, prod_generate_from_generate_from_eq ha₄ hb₄]⟩⟩
instance second_countable_topology_fintype {ι : Type*} {π : ι → Type*}
[fintype ι] [t : ∀a, topological_space (π a)] [sc : ∀a, second_countable_topology (π a)] :
second_countable_topology (∀a, π a) :=
have ∀i, ∃b : set (set (π i)), countable b ∧ ∅ ∉ b ∧ is_topological_basis b, from
assume a, @is_open_generated_countable_inter (π a) _ (sc a),
let ⟨g, hg⟩ := classical.axiom_of_choice this in
have t = (λa, generate_from (g a)), from funext $ assume a, (hg a).2.2.2.2,
begin
constructor,
refine ⟨pi univ '' pi univ g, countable_image _ _, _⟩,
{ suffices : countable {f : Πa, set (π a) | ∀a, f a ∈ g a}, { simpa [pi] },
exact countable_pi (assume i, (hg i).1), },
rw [this, pi_generate_from_eq_fintype],
{ congr' 1, ext f, simp [pi, eq_comm] },
exact assume a, (hg a).2.2.2.1
end
@[priority 100] -- see Note [lower instance priority]
instance second_countable_topology.to_separable_space
[second_countable_topology α] : separable_space α :=
let ⟨b, hb₁, hb₂, hb₃, hb₄, eq⟩ := is_open_generated_countable_inter α in
have nhds_eq : ∀a, 𝓝 a = (⨅ s : {s : set α // a ∈ s ∧ s ∈ b}, principal s.val),
by intro a; rw [eq, nhds_generate_from, infi_subtype]; refl,
have ∀s∈b, ∃a, a ∈ s, from assume s hs, exists_mem_of_ne_empty $ assume eq, hb₂ $ eq ▸ hs,
have ∃f:∀s∈b, α, ∀s h, f s h ∈ s, by simp only [skolem] at this; exact this,
let ⟨f, hf⟩ := this in
⟨⟨(⋃s∈b, ⋃h:s∈b, {f s h}),
countable_bUnion hb₁ (λ _ _, countable_Union_Prop $ λ _, countable_singleton _),
set.ext $ assume a,
have a ∈ (⋃₀ b), by rw [hb₄]; exact trivial,
let ⟨t, ht₁, ht₂⟩ := this in
have w : {s : set α // a ∈ s ∧ s ∈ b}, from ⟨t, ht₂, ht₁⟩,
suffices (⨅ (x : {s // a ∈ s ∧ s ∈ b}), principal (x.val ∩ ⋃s (h₁ h₂ : s ∈ b), {f s h₂})) ≠ ⊥,
by simpa only [closure_eq_nhds, nhds_eq, infi_inf w, inf_principal, mem_set_of_eq, mem_univ, iff_true],
infi_ne_bot_of_directed ⟨a⟩
(assume ⟨s₁, has₁, hs₁⟩ ⟨s₂, has₂, hs₂⟩,
have a ∈ s₁ ∩ s₂, from ⟨has₁, has₂⟩,
let ⟨s₃, hs₃, has₃, hs⟩ := hb₃ _ hs₁ _ hs₂ _ this in
⟨⟨s₃, has₃, hs₃⟩, begin
simp only [le_principal_iff, mem_principal_sets, (≥)],
simp only [subset_inter_iff] at hs, split;
apply inter_subset_inter_left; simp only [hs]
end⟩)
(assume ⟨s, has, hs⟩,
have s ∩ (⋃ (s : set α) (H h : s ∈ b), {f s h}) ≠ ∅,
from ne_empty_of_mem ⟨hf _ hs, mem_bUnion hs $ mem_Union.mpr ⟨hs, mem_singleton _⟩⟩,
mt principal_eq_bot_iff.1 this) ⟩⟩
variables {α}
lemma is_open_Union_countable [second_countable_topology α]
{ι} (s : ι → set α) (H : ∀ i, _root_.is_open (s i)) :
∃ T : set ι, countable T ∧ (⋃ i ∈ T, s i) = ⋃ i, s i :=
let ⟨B, cB, _, bB⟩ := is_open_generated_countable_inter α in
begin
let B' := {b ∈ B | ∃ i, b ⊆ s i},
choose f hf using λ b:B', b.2.2,
haveI : encodable B' := (countable_subset (sep_subset _ _) cB).to_encodable,
refine ⟨_, countable_range f,
subset.antisymm (bUnion_subset_Union _ _) (sUnion_subset _)⟩,
rintro _ ⟨i, rfl⟩ x xs,
rcases mem_basis_subset_of_mem_open bB xs (H _) with ⟨b, hb, xb, bs⟩,
exact ⟨_, ⟨_, rfl⟩, _, ⟨⟨⟨_, hb, _, bs⟩, rfl⟩, rfl⟩, hf _ (by exact xb)⟩
end
lemma is_open_sUnion_countable [second_countable_topology α]
(S : set (set α)) (H : ∀ s ∈ S, _root_.is_open s) :
∃ T : set (set α), countable T ∧ T ⊆ S ∧ ⋃₀ T = ⋃₀ S :=
let ⟨T, cT, hT⟩ := is_open_Union_countable (λ s:S, s.1) (λ s, H s.1 s.2) in
⟨subtype.val '' T, countable_image _ cT,
image_subset_iff.2 $ λ ⟨x, xs⟩ xt, xs,
by rwa [sUnion_image, sUnion_eq_Union]⟩
end topological_space
|
b8fa9931fd310856691de80e4ab4d869c8e00954
|
437dc96105f48409c3981d46fb48e57c9ac3a3e4
|
/src/algebra/group/units.lean
|
b8f83ad34e961c863eb01eca23824125aa233e62
|
[
"Apache-2.0"
] |
permissive
|
dan-c-k/mathlib
|
08efec79bd7481ee6da9cc44c24a653bff4fbe0d
|
96efc220f6225bc7a5ed8349900391a33a38cc56
|
refs/heads/master
| 1,658,082,847,093
| 1,589,013,201,000
| 1,589,013,201,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 7,761
|
lean
|
/-
Copyright (c) 2017 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau, Mario Carneiro, Johannes, Hölzl, Chris Hughes
-/
import logic.function
import algebra.group.to_additive
import tactic.norm_cast
/-!
# Units (i.e., invertible elements) of a multiplicative monoid
-/
universe u
variable {α : Type u}
/-- Units of a monoid, bundled version. An element of a `monoid` is a unit if it has a two-sided
inverse. This version bundles the inverse element so that it can be computed. For a predicate
see `is_unit`. -/
structure units (α : Type u) [monoid α] :=
(val : α)
(inv : α)
(val_inv : val * inv = 1)
(inv_val : inv * val = 1)
/-- Units of an add_monoid, bundled version. An element of an add_monoid is a unit if it has a
two-sided additive inverse. This version bundles the inverse element so that it can be
computed. For a predicate see `is_add_unit`. -/
structure add_units (α : Type u) [add_monoid α] :=
(val : α)
(neg : α)
(val_neg : val + neg = 0)
(neg_val : neg + val = 0)
attribute [to_additive add_units] units
namespace units
variables [monoid α]
@[to_additive] instance : has_coe (units α) α := ⟨val⟩
@[simp, to_additive] lemma coe_mk (a : α) (b h₁ h₂) : ↑(units.mk a b h₁ h₂) = a := rfl
@[ext, to_additive] theorem ext :
function.injective (coe : units α → α)
| ⟨v, i₁, vi₁, iv₁⟩ ⟨v', i₂, vi₂, iv₂⟩ e :=
by change v = v' at e; subst v'; congr;
simpa only [iv₂, vi₁, one_mul, mul_one] using mul_assoc i₂ v i₁
@[to_additive] theorem ext_iff {a b : units α} :
a = b ↔ (a : α) = b :=
ext.eq_iff.symm
@[to_additive] instance [decidable_eq α] : decidable_eq (units α) := λ a b, decidable_of_iff' _ ext_iff
/-- Units of a monoid form a group. -/
@[to_additive] instance : group (units α) :=
{ mul := λ u₁ u₂, ⟨u₁.val * u₂.val, u₂.inv * u₁.inv,
by rw [mul_assoc, ← mul_assoc u₂.val, val_inv, one_mul, val_inv],
by rw [mul_assoc, ← mul_assoc u₁.inv, inv_val, one_mul, inv_val]⟩,
one := ⟨1, 1, one_mul 1, one_mul 1⟩,
mul_one := λ u, ext $ mul_one u,
one_mul := λ u, ext $ one_mul u,
mul_assoc := λ u₁ u₂ u₃, ext $ mul_assoc u₁ u₂ u₃,
inv := λ u, ⟨u.2, u.1, u.4, u.3⟩,
mul_left_inv := λ u, ext u.inv_val }
variables (a b : units α) {c : units α}
@[simp, norm_cast, to_additive] lemma coe_mul : (↑(a * b) : α) = a * b := rfl
attribute [norm_cast] add_units.coe_add
@[simp, norm_cast, to_additive] lemma coe_one : ((1 : units α) : α) = 1 := rfl
attribute [norm_cast] add_units.coe_zero
@[to_additive] lemma val_coe : (↑a : α) = a.val := rfl
@[norm_cast, to_additive] lemma coe_inv : ((a⁻¹ : units α) : α) = a.inv := rfl
attribute [norm_cast] add_units.coe_neg
@[simp, to_additive] lemma inv_mul : (↑a⁻¹ * a : α) = 1 := inv_val _
@[simp, to_additive] lemma mul_inv : (a * ↑a⁻¹ : α) = 1 := val_inv _
@[simp, to_additive] lemma mul_inv_cancel_left (a : units α) (b : α) : (a:α) * (↑a⁻¹ * b) = b :=
by rw [← mul_assoc, mul_inv, one_mul]
@[simp, to_additive] lemma inv_mul_cancel_left (a : units α) (b : α) : (↑a⁻¹:α) * (a * b) = b :=
by rw [← mul_assoc, inv_mul, one_mul]
@[simp, to_additive] lemma mul_inv_cancel_right (a : α) (b : units α) : a * b * ↑b⁻¹ = a :=
by rw [mul_assoc, mul_inv, mul_one]
@[simp, to_additive] lemma inv_mul_cancel_right (a : α) (b : units α) : a * ↑b⁻¹ * b = a :=
by rw [mul_assoc, inv_mul, mul_one]
@[to_additive] instance : inhabited (units α) := ⟨1⟩
@[to_additive] instance {α} [comm_monoid α] : comm_group (units α) :=
{ mul_comm := λ u₁ u₂, ext $ mul_comm _ _, ..units.group }
@[to_additive] instance [has_repr α] : has_repr (units α) := ⟨repr ∘ val⟩
@[simp, to_additive] theorem mul_left_inj (a : units α) {b c : α} : (a:α) * b = a * c ↔ b = c :=
⟨λ h, by simpa only [inv_mul_cancel_left] using congr_arg ((*) ↑(a⁻¹ : units α)) h, congr_arg _⟩
@[simp, to_additive] theorem mul_right_inj (a : units α) {b c : α} : b * a = c * a ↔ b = c :=
⟨λ h, by simpa only [mul_inv_cancel_right] using congr_arg (* ↑(a⁻¹ : units α)) h, congr_arg _⟩
@[to_additive] theorem eq_mul_inv_iff_mul_eq {a b : α} : a = b * ↑c⁻¹ ↔ a * c = b :=
⟨λ h, by rw [h, inv_mul_cancel_right], λ h, by rw [← h, mul_inv_cancel_right]⟩
@[to_additive] theorem eq_inv_mul_iff_mul_eq {a c : α} : a = ↑b⁻¹ * c ↔ ↑b * a = c :=
⟨λ h, by rw [h, mul_inv_cancel_left], λ h, by rw [← h, inv_mul_cancel_left]⟩
@[to_additive] theorem inv_mul_eq_iff_eq_mul {b c : α} : ↑a⁻¹ * b = c ↔ b = a * c :=
⟨λ h, by rw [← h, mul_inv_cancel_left], λ h, by rw [h, inv_mul_cancel_left]⟩
@[to_additive] theorem mul_inv_eq_iff_eq_mul {a c : α} : a * ↑b⁻¹ = c ↔ a = c * b :=
⟨λ h, by rw [← h, inv_mul_cancel_right], λ h, by rw [h, mul_inv_cancel_right]⟩
end units
theorem nat.units_eq_one (u : units ℕ) : u = 1 :=
units.ext $ nat.eq_one_of_dvd_one ⟨u.inv, u.val_inv.symm⟩
theorem nat.add_units_eq_zero (u : add_units ℕ) : u = 0 :=
add_units.ext $ (nat.eq_zero_of_add_eq_zero u.val_neg).1
/-- For `a, b` in a `comm_monoid` such that `a * b = 1`, makes a unit out of `a`. -/
@[to_additive "For `a, b` in an `add_comm_monoid` such that `a + b = 0`, makes an add_unit out of `a`."]
def units.mk_of_mul_eq_one [comm_monoid α] (a b : α) (hab : a * b = 1) :
units α :=
⟨a, b, hab, (mul_comm b a).trans hab⟩
@[simp, to_additive] lemma units.coe_mk_of_mul_eq_one [comm_monoid α] {a b : α} (h : a * b = 1) :
(units.mk_of_mul_eq_one a b h : α) = a := rfl
section monoid
variables [monoid α] {a b c : α}
/-- Partial division. It is defined when the
second argument is invertible, and unlike the division operator
in `division_ring` it is not totalized at zero. -/
def divp (a : α) (u) : α := a * (u⁻¹ : units α)
infix ` /ₚ `:70 := divp
@[simp] theorem divp_self (u : units α) : (u : α) /ₚ u = 1 := units.mul_inv _
@[simp] theorem divp_one (a : α) : a /ₚ 1 = a := mul_one _
theorem divp_assoc (a b : α) (u : units α) : a * b /ₚ u = a * (b /ₚ u) :=
mul_assoc _ _ _
@[simp] theorem divp_inv (u : units α) : a /ₚ u⁻¹ = a * u := rfl
@[simp] theorem divp_mul_cancel (a : α) (u : units α) : a /ₚ u * u = a :=
(mul_assoc _ _ _).trans $ by rw [units.inv_mul, mul_one]
@[simp] theorem mul_divp_cancel (a : α) (u : units α) : (a * u) /ₚ u = a :=
(mul_assoc _ _ _).trans $ by rw [units.mul_inv, mul_one]
@[simp] theorem divp_right_inj (u : units α) {a b : α} : a /ₚ u = b /ₚ u ↔ a = b :=
units.mul_right_inj _
theorem divp_divp_eq_divp_mul (x : α) (u₁ u₂ : units α) : (x /ₚ u₁) /ₚ u₂ = x /ₚ (u₂ * u₁) :=
by simp only [divp, mul_inv_rev, units.coe_mul, mul_assoc]
theorem divp_eq_iff_mul_eq {x : α} {u : units α} {y : α} : x /ₚ u = y ↔ y * u = x :=
u.mul_right_inj.symm.trans $ by rw [divp_mul_cancel]; exact ⟨eq.symm, eq.symm⟩
theorem divp_eq_one_iff_eq {a : α} {u : units α} : a /ₚ u = 1 ↔ a = u :=
(units.mul_right_inj u).symm.trans $ by rw [divp_mul_cancel, one_mul]
@[simp] theorem one_divp (u : units α) : 1 /ₚ u = ↑u⁻¹ :=
one_mul _
end monoid
section comm_monoid
variables [comm_monoid α]
theorem divp_eq_divp_iff {x y : α} {ux uy : units α} :
x /ₚ ux = y /ₚ uy ↔ x * uy = y * ux :=
by rw [divp_eq_iff_mul_eq, mul_comm, ← divp_assoc, divp_eq_iff_mul_eq, mul_comm y ux]
theorem divp_mul_divp (x y : α) (ux uy : units α) :
(x /ₚ ux) * (y /ₚ uy) = (x * y) /ₚ (ux * uy) :=
by rw [← divp_divp_eq_divp_mul, divp_assoc, mul_comm x, divp_assoc, mul_comm]
end comm_monoid
|
b3017463cf5eee177eff33a7f255a2c2c7293b00
|
07c76fbd96ea1786cc6392fa834be62643cea420
|
/hott/types/sigma.hlean
|
f9c8b42637edeb9515e8bdd977c7ce0dc35bc7dc
|
[
"Apache-2.0"
] |
permissive
|
fpvandoorn/lean2
|
5a430a153b570bf70dc8526d06f18fc000a60ad9
|
0889cf65b7b3cebfb8831b8731d89c2453dd1e9f
|
refs/heads/master
| 1,592,036,508,364
| 1,545,093,958,000
| 1,545,093,958,000
| 75,436,854
| 0
| 0
| null | 1,480,718,780,000
| 1,480,718,780,000
| null |
UTF-8
|
Lean
| false
| false
| 29,232
|
hlean
|
/-
Copyright (c) 2014-15 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Floris van Doorn
Partially ported from Coq HoTT
Theorems about sigma-types (dependent sums)
-/
import types.prod
open eq sigma sigma.ops equiv is_equiv function is_trunc sum unit
namespace sigma
variables {A A' : Type} {B : A → Type} {B' : A' → Type} {C : Πa, B a → Type}
{D : Πa b, C a b → Type}
{a a' a'' : A} {b b₁ b₂ : B a} {b' : B a'} {b'' : B a''} {u v w : Σa, B a}
definition destruct := @sigma.cases_on
/- Paths in a sigma-type -/
protected definition eta [unfold 3] : Π (u : Σa, B a), ⟨u.1 , u.2⟩ = u
| eta ⟨u₁, u₂⟩ := idp
definition eta2 : Π (u : Σa b, C a b), ⟨u.1, u.2.1, u.2.2⟩ = u
| eta2 ⟨u₁, u₂, u₃⟩ := idp
definition eta3 : Π (u : Σa b c, D a b c), ⟨u.1, u.2.1, u.2.2.1, u.2.2.2⟩ = u
| eta3 ⟨u₁, u₂, u₃, u₄⟩ := idp
definition dpair_eq_dpair [unfold 8] (p : a = a') (q : b =[p] b') : ⟨a, b⟩ = ⟨a', b'⟩ :=
apd011 sigma.mk p q
definition sigma_eq [unfold 5 6] (p : u.1 = v.1) (q : u.2 =[p] v.2) : u = v :=
by induction u; induction v; exact (dpair_eq_dpair p q)
definition sigma_eq_right [unfold 6] (q : b₁ = b₂) : ⟨a, b₁⟩ = ⟨a, b₂⟩ :=
ap (dpair a) q
definition eq_pr1 [unfold 5] (p : u = v) : u.1 = v.1 :=
ap pr1 p
postfix `..1`:(max+1) := eq_pr1
definition eq_pr2 [unfold 5] (p : u = v) : u.2 =[p..1] v.2 :=
by induction p; exact idpo
postfix `..2`:(max+1) := eq_pr2
definition dpair_sigma_eq (p : u.1 = v.1) (q : u.2 =[p] v.2)
: ⟨(sigma_eq p q)..1, (sigma_eq p q)..2⟩ = ⟨p, q⟩ :=
by induction u; induction v;esimp at *;induction q;esimp
definition sigma_eq_pr1 (p : u.1 = v.1) (q : u.2 =[p] v.2) : (sigma_eq p q)..1 = p :=
(dpair_sigma_eq p q)..1
definition sigma_eq_pr2 (p : u.1 = v.1) (q : u.2 =[p] v.2)
: (sigma_eq p q)..2 =[sigma_eq_pr1 p q] q :=
(dpair_sigma_eq p q)..2
definition sigma_eq_eta (p : u = v) : sigma_eq (p..1) (p..2) = p :=
by induction p; induction u; reflexivity
definition eq2_pr1 {p q : u = v} (r : p = q) : p..1 = q..1 :=
ap eq_pr1 r
definition eq2_pr2 {p q : u = v} (r : p = q) : p..2 =[eq2_pr1 r] q..2 :=
!pathover_ap (apd eq_pr2 r)
definition tr_pr1_sigma_eq {B' : A → Type} (p : u.1 = v.1) (q : u.2 =[p] v.2)
: transport (λx, B' x.1) (sigma_eq p q) = transport B' p :=
by induction u; induction v; esimp at *;induction q; reflexivity
protected definition ap_pr1 (p : u = v) : ap (λx : sigma B, x.1) p = p..1 := idp
/- the uncurried version of sigma_eq. We will prove that this is an equivalence -/
definition sigma_eq_unc [unfold 5] : Π (pq : Σ(p : u.1 = v.1), u.2 =[p] v.2), u = v
| sigma_eq_unc ⟨pq₁, pq₂⟩ := sigma_eq pq₁ pq₂
definition dpair_sigma_eq_unc : Π (pq : Σ(p : u.1 = v.1), u.2 =[p] v.2),
⟨(sigma_eq_unc pq)..1, (sigma_eq_unc pq)..2⟩ = pq
| dpair_sigma_eq_unc ⟨pq₁, pq₂⟩ := dpair_sigma_eq pq₁ pq₂
definition sigma_eq_pr1_unc (pq : Σ(p : u.1 = v.1), u.2 =[p] v.2)
: (sigma_eq_unc pq)..1 = pq.1 :=
(dpair_sigma_eq_unc pq)..1
definition sigma_eq_pr2_unc (pq : Σ(p : u.1 = v.1), u.2 =[p] v.2) :
(sigma_eq_unc pq)..2 =[sigma_eq_pr1_unc pq] pq.2 :=
(dpair_sigma_eq_unc pq)..2
definition sigma_eq_eta_unc (p : u = v) : sigma_eq_unc ⟨p..1, p..2⟩ = p :=
sigma_eq_eta p
definition tr_sigma_eq_pr1_unc {B' : A → Type}
(pq : Σ(p : u.1 = v.1), u.2 =[p] v.2)
: transport (λx, B' x.1) (@sigma_eq_unc A B u v pq) = transport B' pq.1 :=
destruct pq tr_pr1_sigma_eq
definition is_equiv_sigma_eq [instance] [constructor] (u v : Σa, B a)
: is_equiv (@sigma_eq_unc A B u v) :=
adjointify sigma_eq_unc
(λp, ⟨p..1, p..2⟩)
sigma_eq_eta_unc
dpair_sigma_eq_unc
definition sigma_eq_equiv [constructor] (u v : Σa, B a)
: (u = v) ≃ (Σ(p : u.1 = v.1), u.2 =[p] v.2) :=
(equiv.mk sigma_eq_unc _)⁻¹ᵉ
/- induction principle for a path between pairs -/
definition eq.rec_sigma {A : Type} {B : A → Type} {a : A} {b : B a}
(P : Π⦃a'⦄ {b' : B a'}, ⟨a, b⟩ = ⟨a', b'⟩ → Type)
(IH : P idp) ⦃a' : A⦄ {b' : B a'} (p : ⟨a, b⟩ = ⟨a', b'⟩) : P p :=
begin
apply transport (λp, P p) (to_left_inv !sigma_eq_equiv p),
generalize !sigma_eq_equiv p, esimp, intro q,
induction q with q₁ q₂, induction q₂, exact IH
end
definition dpair_eq_dpair_con (p1 : a = a' ) (q1 : b =[p1] b' )
(p2 : a' = a'') (q2 : b' =[p2] b'') :
dpair_eq_dpair (p1 ⬝ p2) (q1 ⬝o q2) = dpair_eq_dpair p1 q1 ⬝ dpair_eq_dpair p2 q2 :=
by induction q1; induction q2; reflexivity
definition sigma_eq_con (p1 : u.1 = v.1) (q1 : u.2 =[p1] v.2)
(p2 : v.1 = w.1) (q2 : v.2 =[p2] w.2) :
sigma_eq (p1 ⬝ p2) (q1 ⬝o q2) = sigma_eq p1 q1 ⬝ sigma_eq p2 q2 :=
by induction u; induction v; induction w; apply dpair_eq_dpair_con
definition sigma_eq_concato_eq {b : B a} {b₁ b₂ : B a'}
(p : a = a') (q : b =[p] b₁) (q' : b₁ = b₂) :
sigma_eq p (q ⬝op q') = sigma_eq p q ⬝ ap (dpair a') q' :=
by induction q'; reflexivity
definition dpair_eq_dpair_inv (p : a = a') (q : b =[p] b') :
(dpair_eq_dpair p q)⁻¹ = dpair_eq_dpair p⁻¹ q⁻¹ᵒ :=
begin induction q, reflexivity end
local attribute dpair_eq_dpair [reducible]
definition dpair_eq_dpair_con_idp (p : a = a') (q : b =[p] b') :
dpair_eq_dpair p q = dpair_eq_dpair p !pathover_tr ⬝
dpair_eq_dpair idp (pathover_idp_of_eq (tr_eq_of_pathover q)) :=
by induction q; reflexivity
definition ap_sigma_pr1 {A B : Type} {C : B → Type} {a₁ a₂ : A} (f : A → B) (g : Πa, C (f a))
(p : a₁ = a₂) : (ap (λa, ⟨f a, g a⟩) p)..1 = ap f p :=
by induction p; reflexivity
definition ap_sigma_pr2 {A B : Type} {C : B → Type} {a₁ a₂ : A} (f : A → B) (g : Πa, C (f a))
(p : a₁ = a₂) : (ap (λa, ⟨f a, g a⟩) p)..2 =
change_path (ap_sigma_pr1 f g p)⁻¹ (pathover_ap C f (apd g p)) :=
by induction p; reflexivity
definition sigma_eq_pr2_constant {A B : Type} {a a' : A} {b b' : B} (p : a = a')
(q : b =[p] b') : ap pr2 (sigma_eq p q) = (eq_of_pathover q) :=
by induction q; reflexivity
definition sigma_eq_pr2_constant2 {A B : Type} {a a' : A} {b b' : B} (p : a = a')
(q : b = b') : ap pr2 (sigma_eq p (pathover_of_eq p q)) = q :=
by induction p; induction q; reflexivity
/- eq_pr1 commutes with the groupoid structure. -/
definition eq_pr1_idp (u : Σa, B a) : (refl u) ..1 = refl (u.1) := idp
definition eq_pr1_con (p : u = v) (q : v = w) : (p ⬝ q) ..1 = (p..1) ⬝ (q..1) := !ap_con
definition eq_pr1_inv (p : u = v) : p⁻¹ ..1 = (p..1)⁻¹ := !ap_inv
/- Applying dpair to one argument is the same as dpair_eq_dpair with reflexivity in the first place. -/
definition ap_dpair (q : b₁ = b₂) :
ap (sigma.mk a) q = dpair_eq_dpair idp (pathover_idp_of_eq q) :=
by induction q; reflexivity
/- Dependent transport is the same as transport along a sigma_eq. -/
definition transportD_eq_transport (p : a = a') (c : C a b) :
p ▸D c = transport (λu, C (u.1) (u.2)) (dpair_eq_dpair p !pathover_tr) c :=
by induction p; reflexivity
definition sigma_eq_eq_sigma_eq {p1 q1 : a = a'} {p2 : b =[p1] b'} {q2 : b =[q1] b'}
(r : p1 = q1) (s : p2 =[r] q2) : sigma_eq p1 p2 = sigma_eq q1 q2 :=
by induction s; reflexivity
/- A path between paths in a total space is commonly shown component wise. -/
definition sigma_eq2 {p q : u = v} (r : p..1 = q..1) (s : p..2 =[r] q..2)
: p = q :=
begin
induction p, induction u with u1 u2,
transitivity sigma_eq q..1 q..2,
apply sigma_eq_eq_sigma_eq r s,
apply sigma_eq_eta,
end
definition sigma_eq2_unc {p q : u = v} (rs : Σ(r : p..1 = q..1), p..2 =[r] q..2) : p = q :=
destruct rs sigma_eq2
/- two equalities about applying a function to a pair of equalities. These two functions are not
definitionally equal (but they are equal on all pairs) -/
definition ap_dpair_eq_dpair (f : Πa, B a → A') (p : a = a') (q : b =[p] b')
: ap (sigma.rec f) (dpair_eq_dpair p q) = apd011 f p q :=
by induction q; reflexivity
definition ap_dpair_eq_dpair_pr (f : Πa, B a → A') (p : a = a') (q : b =[p] b') :
ap (λx, f x.1 x.2) (dpair_eq_dpair p q) = apd011 f p q :=
by induction q; reflexivity
/- Transport -/
/- The concrete description of transport in sigmas (and also pis) is rather trickier than in the other types. In particular, these cannot be described just in terms of transport in simpler types; they require also the dependent transport [transportD].
In particular, this indicates why `transport` alone cannot be fully defined by induction on the structure of types, although Id-elim/transportD can be (cf. Observational Type Theory). A more thorough set of lemmas, along the lines of the present ones but dealing with Id-elim rather than just transport, might be nice to have eventually? -/
definition sigma_transport (p : a = a') (bc : Σ(b : B a), C a b)
: p ▸ bc = ⟨p ▸ bc.1, p ▸D bc.2⟩ :=
by induction p; induction bc; reflexivity
/- The special case when the second variable doesn't depend on the first is simpler. -/
definition sigma_transport_constant {B : Type} {C : A → B → Type} (p : a = a')
(bc : Σ(b : B), C a b) : p ▸ bc = ⟨bc.1, p ▸ bc.2⟩ :=
by induction p; induction bc; reflexivity
/- Or if the second variable contains a first component that doesn't depend on the first. -/
definition sigma_transport2_constant {C : A → Type} {D : Π a:A, B a → C a → Type} (p : a = a')
(bcd : Σ(b : B a) (c : C a), D a b c) : p ▸ bcd = ⟨p ▸ bcd.1, p ▸ bcd.2.1, p ▸D2 bcd.2.2⟩ :=
begin
induction p, induction bcd with b cd, induction cd, reflexivity
end
/- Pathovers -/
definition etao (p : a = a') (bc : Σ(b : B a), C a b)
: bc =[p] ⟨p ▸ bc.1, p ▸D bc.2⟩ :=
by induction p; induction bc; apply idpo
definition sigma_pathover' (p : a = a') (u : Σ(b : B a), C a b) (v : Σ(b : B a'), C a' b)
(r : u.1 =[p] v.1) (s : u.2 =[apd011 C p r] v.2) : u =[p] v :=
begin
induction u, induction v, esimp at *, induction r,
esimp [apd011] at s, induction s using idp_rec_on, apply idpo
end
definition sigma_pathover (p : a = a') (u : Σ(b : B a), C a b) (v : Σ(b : B a'), C a' b)
(r : u.1 =[p] v.1) (s : pathover (λx, C x.1 x.2) u.2 (sigma_eq p r) v.2) : u =[p] v :=
begin
induction u, induction v, esimp at *, induction r,
induction s using idp_rec_on, apply idpo
end
definition sigma_pathover_constant {B : Type} {C : A → B → Type} (p : a = a')
(u : Σ(b : B), C a b) (v : Σ(b : B), C a' b)
(r : u.1 = v.1) (s : pathover (λx, C (prod.pr1 x) (prod.pr2 x)) u.2 (prod.prod_eq p r) v.2) : u =[p] v :=
begin
induction p, induction u, induction v, esimp at *, induction r,
induction s using idp_rec_on, apply idpo
end
definition pathover_pr1 [unfold 9] {A : Type} {B : A → Type} {C : Πa, B a → Type}
{a a' : A} {p : a = a'} {x : Σb, C a b} {x' : Σb', C a' b'}
(q : x =[p] x') : x.1 =[p] x'.1 :=
begin induction q, constructor end
definition sigma_pathover_equiv_of_is_prop {A : Type} {B : A → Type} (C : Πa, B a → Type)
{a a' : A} (p : a = a') (x : Σb, C a b) (x' : Σb', C a' b')
(H : Πa b, is_prop (C a b)) : x =[p] x' ≃ x.1 =[p] x'.1 :=
begin
fapply equiv.MK,
{ exact pathover_pr1 },
{ intro q, induction x with b c, induction x' with b' c', esimp at q, induction q,
apply pathover_idp_of_eq, exact sigma_eq idp !is_prop.elimo },
{ intro q, induction x with b c, induction x' with b' c', esimp at q, induction q,
have c = c', from !is_prop.elim, induction this,
rewrite [▸*, is_prop_elimo_self (C a) c] },
{ intro q, induction q, induction x with b c, rewrite [▸*, is_prop_elimo_self (C a) c] }
end
/-
TODO:
* define the projections from the type u =[p] v
* show that the uncurried version of sigma_pathover is an equivalence
-/
/- Squares in a sigma type are characterized in cubical.squareover (to avoid circular imports) -/
/- Functorial action -/
variables (f : A → A') (g : Πa, B a → B' (f a))
definition sigma_functor [unfold 7] (u : Σa, B a) : Σa', B' a' :=
⟨f u.1, g u.1 u.2⟩
definition total [reducible] [unfold 5] {B' : A → Type} (g : Πa, B a → B' a) : (Σa, B a) → (Σa, B' a) :=
sigma_functor id g
definition sigma_functor_compose {A A' A'' : Type} {B : A → Type} {B' : A' → Type}
{B'' : A'' → Type} {f' : A' → A''} {f : A → A'} (g' : Πa, B' a → B'' (f' a))
(g : Πa, B a → B' (f a)) (x : Σa, B a) :
sigma_functor f' g' (sigma_functor f g x) = sigma_functor (f' ∘ f) (λa, g' (f a) ∘ g a) x :=
begin
reflexivity
end
definition sigma_functor_homotopy {A A' : Type} {B : A → Type} {B' : A' → Type}
{f f' : A → A'} {g : Πa, B a → B' (f a)} {g' : Πa, B a → B' (f' a)} (h : f ~ f')
(k : Πa b, g a b =[h a] g' a b) (x : Σa, B a) : sigma_functor f g x = sigma_functor f' g' x :=
sigma_eq (h x.1) (k x.1 x.2)
section hsquare
variables {A₀₀ A₂₀ A₀₂ A₂₂ : Type}
{B₀₀ : A₀₀ → Type} {B₂₀ : A₂₀ → Type} {B₀₂ : A₀₂ → Type} {B₂₂ : A₂₂ → Type}
{f₁₀ : A₀₀ → A₂₀} {f₁₂ : A₀₂ → A₂₂} {f₀₁ : A₀₀ → A₀₂} {f₂₁ : A₂₀ → A₂₂}
{g₁₀ : Πa, B₀₀ a → B₂₀ (f₁₀ a)} {g₁₂ : Πa, B₀₂ a → B₂₂ (f₁₂ a)}
{g₀₁ : Πa, B₀₀ a → B₀₂ (f₀₁ a)} {g₂₁ : Πa, B₂₀ a → B₂₂ (f₂₁ a)}
definition sigma_functor_hsquare (h : hsquare f₁₀ f₁₂ f₀₁ f₂₁)
(k : Πa (b : B₀₀ a), g₂₁ _ (g₁₀ _ b) =[h a] g₁₂ _ (g₀₁ _ b)) :
hsquare (sigma_functor f₁₀ g₁₀) (sigma_functor f₁₂ g₁₂)
(sigma_functor f₀₁ g₀₁) (sigma_functor f₂₁ g₂₁) :=
λx, sigma_functor_compose g₂₁ g₁₀ x ⬝
sigma_functor_homotopy h k x ⬝
(sigma_functor_compose g₁₂ g₀₁ x)⁻¹
end hsquare
definition sigma_functor2 [constructor] {A₁ A₂ A₃ : Type}
{B₁ : A₁ → Type} {B₂ : A₂ → Type} {B₃ : A₃ → Type}
(f : A₁ → A₂ → A₃) (g : Π⦃a₁ a₂⦄, B₁ a₁ → B₂ a₂ → B₃ (f a₁ a₂))
(x₁ : Σa₁, B₁ a₁) (x₂ : Σa₂, B₂ a₂) : Σa₃, B₃ a₃ :=
⟨f x₁.1 x₂.1, g x₁.2 x₂.2⟩
/- Equivalences -/
definition is_equiv_sigma_functor [constructor] [H1 : is_equiv f] [H2 : Π a, is_equiv (g a)]
: is_equiv (sigma_functor f g) :=
adjointify (sigma_functor f g)
(sigma_functor f⁻¹ (λ(a' : A') (b' : B' a'),
((g (f⁻¹ a'))⁻¹ (transport B' (right_inv f a')⁻¹ b'))))
abstract begin
intro u', induction u' with a' b',
apply sigma_eq (right_inv f a'),
rewrite [▸*,right_inv (g (f⁻¹ a')),▸*],
apply tr_pathover
end end
abstract begin
intro u,
induction u with a b,
apply (sigma_eq (left_inv f a)),
apply pathover_of_tr_eq,
rewrite [▸*,adj f,-(fn_tr_eq_tr_fn (left_inv f a) (λ a, (g a)⁻¹)),
▸*,tr_compose B' f,tr_inv_tr,left_inv]
end end
definition sigma_equiv_sigma_of_is_equiv [constructor]
[H1 : is_equiv f] [H2 : Π a, is_equiv (g a)] : (Σa, B a) ≃ (Σa', B' a') :=
equiv.mk (sigma_functor f g) !is_equiv_sigma_functor
definition sigma_equiv_sigma [constructor] (Hf : A ≃ A') (Hg : Π a, B a ≃ B' (to_fun Hf a)) :
(Σa, B a) ≃ (Σa', B' a') :=
sigma_equiv_sigma_of_is_equiv (to_fun Hf) (λ a, to_fun (Hg a))
definition sigma_equiv_sigma_right [constructor] {B' : A → Type} (Hg : Π a, B a ≃ B' a)
: (Σa, B a) ≃ Σa, B' a :=
sigma_equiv_sigma equiv.rfl Hg
definition sigma_equiv_sigma_left [constructor] (Hf : A ≃ A') :
(Σa, B a) ≃ (Σa', B (to_inv Hf a')) :=
sigma_equiv_sigma Hf (λ a, equiv_ap B !right_inv⁻¹)
definition sigma_equiv_sigma_left' [constructor] (Hf : A' ≃ A) : (Σa, B (Hf a)) ≃ (Σa', B a') :=
sigma_equiv_sigma Hf (λa, erfl)
definition ap_sigma_functor_sigma_eq {A A' : Type} {B : A → Type} {B' : A' → Type} {a a' : A}
{b : B a} {b' : B a'} (f : A → A') (g : Πa, B a → B' (f a)) (p : a = a') (q : b =[p] b') :
ap (sigma_functor f g) (sigma_eq p q) = sigma_eq (ap f p) (pathover_ap B' f (apo g q)) :=
by induction q; reflexivity
definition ap_sigma_functor_id_sigma_eq {A : Type} {B B' : A → Type}
{a a' : A} {b : B a} {b' : B a'} (g : Πa, B a → B' a) (p : a = a') (q : b =[p] b') :
ap (sigma_functor id g) (sigma_eq p q) = sigma_eq p (apo g q) :=
by induction q; reflexivity
definition sigma_ua {A B : Type} (C : A ≃ B → Type) :
(Σ(p : A = B), C (equiv_of_eq p)) ≃ Σ(e : A ≃ B), C e :=
sigma_equiv_sigma_left' !eq_equiv_equiv
-- definition ap_sigma_functor_eq (p : u.1 = v.1) (q : u.2 =[p] v.2)
-- : ap (sigma_functor f g) (sigma_eq p q) =
-- sigma_eq (ap f p)
-- ((tr_compose B' f p (g u.1 u.2))⁻¹ ⬝ (fn_tr_eq_tr_fn p g u.2)⁻¹ ⬝ ap (g v.1) q) :=
-- by induction u; induction v; apply ap_sigma_functor_eq_dpair
/- definition 3.11.9(i): Summing up a contractible family of types does nothing. -/
definition is_equiv_pr1 [instance] [constructor] (B : A → Type) [H : Π a, is_contr (B a)]
: is_equiv (@pr1 A B) :=
adjointify pr1
(λa, ⟨a, !center⟩)
(λa, idp)
(λu, sigma_eq idp (pathover_idp_of_eq !center_eq))
definition sigma_equiv_of_is_contr_right [constructor] (B : A → Type) (H : Π a, is_contr (B a))
: (Σa, B a) ≃ A :=
equiv.mk pr1 _
/- definition 3.11.9(ii): Dually, summing up over a contractible type does nothing. -/
definition sigma_equiv_of_is_contr_left [constructor] (B : A → Type) (H : is_contr A)
: (Σa, B a) ≃ B (center A) :=
equiv.MK
(λu, (center_eq u.1)⁻¹ ▸ u.2)
(λb, ⟨!center, b⟩)
abstract (λb, ap (λx, x ▸ b) !prop_eq_of_is_contr) end
abstract (λu, sigma_eq !center_eq !tr_pathover) end
/- Associativity -/
--this proof is harder than in Coq because we don't have eta definitionally for sigma
definition sigma_assoc_equiv [constructor] (C : (Σa, B a) → Type)
: (Σu, C u) ≃ (Σa b, C ⟨a, b⟩) :=
equiv.mk _ (adjointify
(λuc, ⟨uc.1.1, uc.1.2, transport C (sigma.eta uc.1)⁻¹ uc.2⟩)
(λav, ⟨⟨av.1, av.2.1⟩, av.2.2⟩)
abstract begin intro av, induction av with a v, induction v, reflexivity end end
abstract begin intro uc, induction uc with u c, induction u, reflexivity end end)
definition sigma_assoc_equiv' [constructor] (C : Πa, B a → Type)
: (Σa b, C a b) ≃ (Σu, C u.1 u.2) :=
!sigma_assoc_equiv⁻¹ᵉ
open prod prod.ops
definition assoc_equiv_prod [constructor] (C : (A × A') → Type) : (Σa a', C (a,a')) ≃ (Σu, C u) :=
equiv.mk _ (adjointify
(λav, ⟨(av.1, av.2.1), av.2.2⟩)
(λuc, ⟨pr₁ (uc.1), pr₂ (uc.1), !prod.eta⁻¹ ▸ uc.2⟩)
abstract proof (λuc, destruct uc (λu, prod.destruct u (λa b c, idp))) qed end
abstract proof (λav, destruct av (λa v, destruct v (λb c, idp))) qed end)
definition sigma_assoc_equiv_left {A D : Type} {B : A → Type} {E : D → Type} (C : Πa, B a → Type)
(f : (Σd, E d) ≃ Σa, B a) : (Σa b, C a b) ≃ Σd e, C (f ⟨d, e⟩).1 (f ⟨d, e⟩).2 :=
begin
refine !sigma_assoc_equiv' ⬝e _,
refine sigma_equiv_sigma_left f⁻¹ᵉ ⬝e _,
exact !sigma_assoc_equiv,
end
/- Symmetry -/
definition comm_equiv_unc (C : A × A' → Type) : (Σa a', C (a, a')) ≃ (Σa' a, C (a, a')) :=
calc
(Σa a', C (a, a')) ≃ Σu, C u : assoc_equiv_prod
... ≃ Σv, C (flip v) : sigma_equiv_sigma !prod_comm_equiv
(λu, prod.destruct u (λa a', equiv.rfl))
... ≃ Σa' a, C (a, a') : assoc_equiv_prod
definition sigma_comm_equiv [constructor] (C : A → A' → Type)
: (Σa a', C a a') ≃ (Σa' a, C a a') :=
comm_equiv_unc (λu, C (prod.pr1 u) (prod.pr2 u))
definition equiv_prod [constructor] (A B : Type) : (Σ(a : A), B) ≃ A × B :=
equiv.mk _ (adjointify
(λs, (s.1, s.2))
(λp, ⟨pr₁ p, pr₂ p⟩)
proof (λp, prod.destruct p (λa b, idp)) qed
proof (λs, destruct s (λa b, idp)) qed)
definition comm_equiv_constant (A B : Type) : (Σ(a : A), B) ≃ Σ(b : B), A :=
calc
(Σ(a : A), B) ≃ A × B : equiv_prod
... ≃ B × A : prod_comm_equiv
... ≃ Σ(b : B), A : equiv_prod
definition sigma_assoc_comm_equiv {A : Type} (B C : A → Type)
: (Σ(v : Σa, B a), C v.1) ≃ (Σ(u : Σa, C a), B u.1) :=
calc (Σ(v : Σa, B a), C v.1)
≃ (Σa (b : B a), C a) : sigma_assoc_equiv
... ≃ (Σa (c : C a), B a) : sigma_equiv_sigma_right (λa, !comm_equiv_constant)
... ≃ (Σ(u : Σa, C a), B u.1) : sigma_assoc_equiv
/- Interaction with other type constructors -/
definition sigma_empty_left [constructor] (B : empty → Type) : (Σx, B x) ≃ empty :=
begin
fapply equiv.MK,
{ intro v, induction v, contradiction},
{ intro x, contradiction},
{ intro x, contradiction},
{ intro v, induction v, contradiction},
end
definition sigma_empty_right [constructor] (A : Type) : (Σ(a : A), empty) ≃ empty :=
begin
fapply equiv.MK,
{ intro v, induction v, contradiction},
{ intro x, contradiction},
{ intro x, contradiction},
{ intro v, induction v, contradiction},
end
definition sigma_unit_left [constructor] (B : unit → Type) : (Σx, B x) ≃ B star :=
sigma_equiv_of_is_contr_left B _
definition sigma_unit_right [constructor] (A : Type) : (Σ(a : A), unit) ≃ A :=
sigma_equiv_of_is_contr_right _ _
definition sigma_sum_left [constructor] (B : A + A' → Type)
: (Σp, B p) ≃ (Σa, B (inl a)) + (Σa, B (inr a)) :=
begin
fapply equiv.MK,
{ intro v,
induction v with p b,
induction p,
{ apply inl, constructor, assumption },
{ apply inr, constructor, assumption }},
{ intro p, induction p with v v: induction v; constructor; assumption},
{ intro p, induction p with v v: induction v; reflexivity},
{ intro v, induction v with p b, induction p: reflexivity},
end
definition sigma_sum_right [constructor] (B C : A → Type)
: (Σa, B a + C a) ≃ (Σa, B a) + (Σa, C a) :=
begin
fapply equiv.MK,
{ intro v,
induction v with a p,
induction p,
{ apply inl, constructor, assumption},
{ apply inr, constructor, assumption}},
{ intro p,
induction p with v v,
{ induction v, constructor, apply inl, assumption },
{ induction v, constructor, apply inr, assumption }},
{ intro p, induction p with v v: induction v; reflexivity},
{ intro v, induction v with a p, induction p: reflexivity},
end
definition sigma_sigma_eq_right (a : A) (P : Π(b : A), a = b → Type)
: (Σ(b : A) (p : a = b), P b p) ≃ P a idp :=
calc
(Σ(b : A) (p : a = b), P b p) ≃ (Σ(v : Σ(b : A), a = b), P v.1 v.2) : sigma_assoc_equiv
... ≃ P a idp : sigma_equiv_of_is_contr_left _ _
definition sigma_sigma_eq_left (a : A) (P : Π(b : A), b = a → Type)
: (Σ(b : A) (p : b = a), P b p) ≃ P a idp :=
calc
(Σ(b : A) (p : b = a), P b p) ≃ (Σ(v : Σ(b : A), b = a), P v.1 v.2) : sigma_assoc_equiv
... ≃ P a idp : sigma_equiv_of_is_contr_left _ _
definition sigma_assoc_equiv_of_is_contr_left (C : (Σa, B a) → Type)
(H : is_contr (Σa, B a)) : (Σa b, C ⟨a, b⟩) ≃ C (@center _ H) :=
(sigma_assoc_equiv C)⁻¹ᵉ ⬝e !sigma_equiv_of_is_contr_left
definition sigma_homotopy_constant_equiv {A B : Type} (a₀ : A) (f : A → B) :
(Σ(b : B), Πa, f a = b) ≃ Σ(h : Πa, f a = f a₀), h a₀ = idp :=
begin
transitivity Σ(b : B) (h : Πa, f a = b) (p : f a₀ = b), h a₀ = p,
{ symmetry, apply sigma_equiv_sigma_right, intro b, apply sigma_equiv_of_is_contr_right,
intro h, apply is_trunc.is_contr_sigma_eq },
{ refine sigma_equiv_sigma_right (λb, !sigma_comm_equiv) ⬝e _,
exact sigma_sigma_eq_right _ (λb p, Σ(h : Πa, f a = b), h a₀ = p) }
end
/- ** Universal mapping properties -/
/- *** The positive universal property. -/
section
definition is_equiv_sigma_rec [instance] (C : (Σa, B a) → Type)
: is_equiv (sigma.rec : (Πa b, C ⟨a, b⟩) → Πab, C ab) :=
adjointify _ (λ g a b, g ⟨a, b⟩)
(λ g, proof eq_of_homotopy (λu, destruct u (λa b, idp)) qed)
(λ f, refl f)
definition equiv_sigma_rec (C : (Σa, B a) → Type)
: (Π(a : A) (b: B a), C ⟨a, b⟩) ≃ (Πxy, C xy) :=
equiv.mk sigma.rec _
/- *** The negative universal property. -/
protected definition coind_unc [constructor] (fg : Σ(f : Πa, B a), Πa, C a (f a)) (a : A)
: Σ(b : B a), C a b :=
⟨fg.1 a, fg.2 a⟩
protected definition coind [constructor] (f : Π a, B a) (g : Π a, C a (f a)) (a : A) : Σ(b : B a), C a b :=
sigma.coind_unc ⟨f, g⟩ a
definition is_equiv_coind [constructor] (C : Πa, B a → Type)
: is_equiv (@sigma.coind_unc _ _ C) :=
adjointify _ (λ h, ⟨λa, (h a).1, λa, (h a).2⟩)
(λ h, proof eq_of_homotopy (λu, !sigma.eta) qed)
(λfg, destruct fg (λ(f : Π (a : A), B a) (g : Π (x : A), C x (f x)), proof idp qed))
definition sigma_pi_equiv_pi_sigma [constructor] : (Σ(f : Πa, B a), Πa, C a (f a)) ≃ (Πa, Σb, C a b) :=
equiv.mk sigma.coind_unc !is_equiv_coind
end
/- Subtypes (sigma types whose second components are props) -/
definition subtype [reducible] {A : Type} (P : A → Type) [H : Πa, is_prop (P a)] :=
Σ(a : A), P a
notation [parsing_only] `{` binder `|` r:(scoped:1 P, subtype P) `}` := r
/- To prove equality in a subtype, we only need equality of the first component. -/
definition subtype_eq [unfold_full] [H : Πa, is_prop (B a)] {u v : {a | B a}} :
u.1 = v.1 → u = v :=
sigma_eq_unc ∘ inv pr1
definition is_equiv_subtype_eq [constructor] [H : Πa, is_prop (B a)] (u v : {a | B a})
: is_equiv (subtype_eq : u.1 = v.1 → u = v) :=
is_equiv_compose _ _ _ _
local attribute is_equiv_subtype_eq [instance]
definition equiv_subtype [constructor] [H : Πa, is_prop (B a)] (u v : {a | B a}) :
(u.1 = v.1) ≃ (u = v) :=
equiv.mk !subtype_eq _
definition subtype_eq_equiv [constructor] [H : Πa, is_prop (B a)] (u v : {a | B a}) :
(u = v) ≃ (u.1 = v.1) :=
(equiv_subtype u v)⁻¹ᵉ
definition subtype_eq_inv {A : Type} {B : A → Type} [H : Πa, is_prop (B a)] (u v : Σa, B a)
: u = v → u.1 = v.1 :=
subtype_eq⁻¹ᶠ
local attribute subtype_eq_inv [reducible]
definition is_equiv_subtype_eq_inv {A : Type} {B : A → Type} [H : Πa, is_prop (B a)]
(u v : Σa, B a) : is_equiv (subtype_eq_inv u v) :=
_
/- truncatedness -/
theorem is_trunc_sigma (B : A → Type) (n : ℕ₋₂)
[HA : is_trunc n A] [HB : Πa, is_trunc n (B a)] : is_trunc n (Σa, B a) :=
begin
revert A B HA HB,
induction n with n IH,
{ intro A B HA HB, exact is_contr_equiv_closed_rev (sigma_equiv_of_is_contr_left _ _) _ },
{ intro A B HA HB, apply is_trunc_succ_intro, intro u v,
exact is_trunc_equiv_closed_rev n !sigma_eq_equiv (IH _ _ _ _) }
end
theorem is_trunc_subtype (B : A → Prop) (n : ℕ₋₂)
[HA : is_trunc (n.+1) A] : is_trunc (n.+1) (Σa, B a) :=
@(is_trunc_sigma B (n.+1)) _ (λa, is_trunc_succ_of_is_prop _ _ _)
/- if the total space is a mere proposition, you can equate two points in the base type by
finding points in their fibers -/
definition eq_base_of_is_prop_sigma {A : Type} (B : A → Type) (H : is_prop (Σa, B a)) {a a' : A}
(b : B a) (b' : B a') : a = a' :=
(is_prop.elim ⟨a, b⟩ ⟨a', b'⟩)..1
end sigma
attribute sigma.is_trunc_sigma [instance] [priority 1490]
attribute sigma.is_trunc_subtype [instance] [priority 1200]
namespace sigma
/- pointed sigma type -/
open pointed
definition pointed_sigma [instance] [constructor] {A : Type} (P : A → Type) [G : pointed A]
[H : pointed (P pt)] : pointed (Σx, P x) :=
pointed.mk ⟨pt,pt⟩
definition psigma [constructor] {A : Type*} (P : A → Type*) : Type* :=
pointed.mk' (Σa, P a)
notation `Σ*` binders `, ` r:(scoped P, psigma P) := r
definition ppr1 [constructor] {A : Type*} {B : A → Type*} : (Σ*(x : A), B x) →* A :=
pmap.mk pr1 idp
definition ppr2 [unfold_full] {A : Type*} {B : A → Type*} (v : (Σ*(x : A), B x)) : B (ppr1 v) :=
pr2 v
definition ptsigma [constructor] {n : ℕ₋₂} {A : n-Type*} (P : A → n-Type*) : n-Type* :=
ptrunctype.mk' n (Σa, P a)
end sigma
|
b356fc0949f141527e82d13565f3e12686658e44
|
4efff1f47634ff19e2f786deadd394270a59ecd2
|
/src/data/finset/nat_antidiagonal.lean
|
b265819cf0df446b23c79ef013a316d5459f3d5b
|
[
"Apache-2.0"
] |
permissive
|
agjftucker/mathlib
|
d634cd0d5256b6325e3c55bb7fb2403548371707
|
87fe50de17b00af533f72a102d0adefe4a2285e8
|
refs/heads/master
| 1,625,378,131,941
| 1,599,166,526,000
| 1,599,166,526,000
| 160,748,509
| 0
| 0
|
Apache-2.0
| 1,544,141,789,000
| 1,544,141,789,000
| null |
UTF-8
|
Lean
| false
| false
| 1,105
|
lean
|
/-
Copyright (c) 2019 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import data.finset.basic
import data.multiset.nat_antidiagonal
/-!
# The "antidiagonal" {(0,n), (1,n-1), ..., (n,0)} as a finset.
-/
namespace finset
namespace nat
/-- The antidiagonal of a natural number `n` is
the finset of pairs `(i,j)` such that `i+j = n`. -/
def antidiagonal (n : ℕ) : finset (ℕ × ℕ) :=
⟨multiset.nat.antidiagonal n, multiset.nat.nodup_antidiagonal n⟩
/-- A pair (i,j) is contained in the antidiagonal of `n` if and only if `i+j=n`. -/
@[simp] lemma mem_antidiagonal {n : ℕ} {x : ℕ × ℕ} :
x ∈ antidiagonal n ↔ x.1 + x.2 = n :=
by rw [antidiagonal, finset.mem_def, multiset.nat.mem_antidiagonal]
/-- The cardinality of the antidiagonal of `n` is `n+1`. -/
@[simp] lemma card_antidiagonal (n : ℕ) : (antidiagonal n).card = n+1 :=
by simp [antidiagonal]
/-- The antidiagonal of `0` is the list `[(0,0)]` -/
@[simp] lemma antidiagonal_zero : antidiagonal 0 = {(0, 0)} :=
rfl
end nat
end finset
|
3dae18a69a0369a5a7a516f8bdd6694b97196b93
|
74924b1fe80b8f61262cefcfc0cd4d96135c8731
|
/src/tactic/linarith.lean
|
0215798aa1238c70e083e34008eda66ce0fa4a8f
|
[
"Apache-2.0"
] |
permissive
|
101damnations/mathlib
|
0938b3806a09032d8716d3642cbab65db7688c23
|
900c53ae6d5e3f8cc47953363479593e8debc4d8
|
refs/heads/master
| 1,593,832,305,164
| 1,565,631,735,000
| 1,565,631,735,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 33,368
|
lean
|
/-
Copyright (c) 2018 Robert Y. Lewis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Robert Y. Lewis
A tactic for discharging linear arithmetic goals using Fourier-Motzkin elimination.
`linarith` is (in principle) complete for ℚ and ℝ. It is not complete for non-dense orders, i.e. ℤ.
@TODO: investigate storing comparisons in a list instead of a set, for possible efficiency gains
@TODO: delay proofs of denominator normalization and nat casting until after contradiction is found
-/
import tactic.ring data.nat.gcd data.list.basic meta.rb_map
meta def nat.to_pexpr : ℕ → pexpr
| 0 := ``(0)
| 1 := ``(1)
| n := if n % 2 = 0 then ``(bit0 %%(nat.to_pexpr (n/2))) else ``(bit1 %%(nat.to_pexpr (n/2)))
open native
namespace linarith
section lemmas
lemma int.coe_nat_bit0 (n : ℕ) : (↑(bit0 n : ℕ) : ℤ) = bit0 (↑n : ℤ) := by simp [bit0]
lemma int.coe_nat_bit1 (n : ℕ) : (↑(bit1 n : ℕ) : ℤ) = bit1 (↑n : ℤ) := by simp [bit1, bit0]
lemma int.coe_nat_bit0_mul (n : ℕ) (x : ℕ) : (↑(bit0 n * x) : ℤ) = (↑(bit0 n) : ℤ) * (↑x : ℤ) := by simp
lemma int.coe_nat_bit1_mul (n : ℕ) (x : ℕ) : (↑(bit1 n * x) : ℤ) = (↑(bit1 n) : ℤ) * (↑x : ℤ) := by simp
lemma int.coe_nat_one_mul (x : ℕ) : (↑(1 * x) : ℤ) = 1 * (↑x : ℤ) := by simp
lemma int.coe_nat_zero_mul (x : ℕ) : (↑(0 * x) : ℤ) = 0 * (↑x : ℤ) := by simp
lemma int.coe_nat_mul_bit0 (n : ℕ) (x : ℕ) : (↑(x * bit0 n) : ℤ) = (↑x : ℤ) * (↑(bit0 n) : ℤ) := by simp
lemma int.coe_nat_mul_bit1 (n : ℕ) (x : ℕ) : (↑(x * bit1 n) : ℤ) = (↑x : ℤ) * (↑(bit1 n) : ℤ) := by simp
lemma int.coe_nat_mul_one (x : ℕ) : (↑(x * 1) : ℤ) = (↑x : ℤ) * 1 := by simp
lemma int.coe_nat_mul_zero (x : ℕ) : (↑(x * 0) : ℤ) = (↑x : ℤ) * 0 := by simp
lemma nat_eq_subst {n1 n2 : ℕ} {z1 z2 : ℤ} (hn : n1 = n2) (h1 : ↑n1 = z1) (h2 : ↑n2 = z2) : z1 = z2 :=
by simpa [eq.symm h1, eq.symm h2, int.coe_nat_eq_coe_nat_iff]
lemma nat_le_subst {n1 n2 : ℕ} {z1 z2 : ℤ} (hn : n1 ≤ n2) (h1 : ↑n1 = z1) (h2 : ↑n2 = z2) : z1 ≤ z2 :=
by simpa [eq.symm h1, eq.symm h2, int.coe_nat_le]
lemma nat_lt_subst {n1 n2 : ℕ} {z1 z2 : ℤ} (hn : n1 < n2) (h1 : ↑n1 = z1) (h2 : ↑n2 = z2) : z1 < z2 :=
by simpa [eq.symm h1, eq.symm h2, int.coe_nat_lt]
lemma eq_of_eq_of_eq {α} [ordered_semiring α] {a b : α} (ha : a = 0) (hb : b = 0) : a + b = 0 :=
by simp *
lemma le_of_eq_of_le {α} [ordered_semiring α] {a b : α} (ha : a = 0) (hb : b ≤ 0) : a + b ≤ 0 :=
by simp *
lemma lt_of_eq_of_lt {α} [ordered_semiring α] {a b : α} (ha : a = 0) (hb : b < 0) : a + b < 0 :=
by simp *
lemma le_of_le_of_eq {α} [ordered_semiring α] {a b : α} (ha : a ≤ 0) (hb : b = 0) : a + b ≤ 0 :=
by simp *
lemma lt_of_lt_of_eq {α} [ordered_semiring α] {a b : α} (ha : a < 0) (hb : b = 0) : a + b < 0 :=
by simp *
lemma mul_neg {α} [ordered_ring α] {a b : α} (ha : a < 0) (hb : b > 0) : b * a < 0 :=
have (-b)*a > 0, from mul_pos_of_neg_of_neg (neg_neg_of_pos hb) ha,
neg_of_neg_pos (by simpa)
lemma mul_nonpos {α} [ordered_ring α] {a b : α} (ha : a ≤ 0) (hb : b > 0) : b * a ≤ 0 :=
have (-b)*a ≥ 0, from mul_nonneg_of_nonpos_of_nonpos (le_of_lt (neg_neg_of_pos hb)) ha,
nonpos_of_neg_nonneg (by simp at this; exact this)
lemma mul_eq {α} [ordered_semiring α] {a b : α} (ha : a = 0) (hb : b > 0) : b * a = 0 :=
by simp *
lemma eq_of_not_lt_of_not_gt {α} [linear_order α] (a b : α) (h1 : ¬ a < b) (h2 : ¬ b < a) : a = b :=
le_antisymm (le_of_not_gt h2) (le_of_not_gt h1)
lemma add_subst {α} [ring α] {n e1 e2 t1 t2 : α} (h1 : n * e1 = t1) (h2 : n * e2 = t2) :
n * (e1 + e2) = t1 + t2 := by simp [left_distrib, *]
lemma sub_subst {α} [ring α] {n e1 e2 t1 t2 : α} (h1 : n * e1 = t1) (h2 : n * e2 = t2) :
n * (e1 - e2) = t1 - t2 := by simp [left_distrib, *]
lemma neg_subst {α} [ring α] {n e t : α} (h1 : n * e = t) : n * (-e) = -t := by simp *
private meta def apnn : tactic unit := `[norm_num]
lemma mul_subst {α} [comm_ring α] {n1 n2 k e1 e2 t1 t2 : α} (h1 : n1 * e1 = t1) (h2 : n2 * e2 = t2)
(h3 : n1*n2 = k . apnn) : k * (e1 * e2) = t1 * t2 :=
have h3 : n1 * n2 = k, from h3,
by rw [←h3, mul_comm n1, mul_assoc n2, ←mul_assoc n1, h1, ←mul_assoc n2, mul_comm n2, mul_assoc, h2] -- OUCH
lemma div_subst {α} [field α] {n1 n2 k e1 e2 t1 : α} (h1 : n1 * e1 = t1) (h2 : n2 / e2 = 1) (h3 : n1*n2 = k) :
k * (e1 / e2) = t1 :=
by rw [←h3, mul_assoc, mul_div_comm, h2, ←mul_assoc, h1, mul_comm, one_mul]
end lemmas
section datatypes
@[derive decidable_eq]
inductive ineq
| eq | le | lt
open ineq
def ineq.max : ineq → ineq → ineq
| eq a := a
| le a := a
| lt a := lt
def ineq.is_lt : ineq → ineq → bool
| eq le := tt
| eq lt := tt
| le lt := tt
| _ _ := ff
def ineq.to_string : ineq → string
| eq := "="
| le := "≤"
| lt := "<"
instance : has_to_string ineq := ⟨ineq.to_string⟩
/--
The main datatype for FM elimination.
Variables are represented by natural numbers, each of which has an integer coefficient.
Index 0 is reserved for constants, i.e. `coeffs.find 0` is the coefficient of 1.
The represented term is coeffs.keys.sum (λ i, coeffs.find i * Var[i]).
str determines the direction of the comparison -- is it < 0, ≤ 0, or = 0?
-/
meta structure comp :=
(str : ineq)
(coeffs : rb_map ℕ int)
meta instance : inhabited comp := ⟨⟨ineq.eq, mk_rb_map⟩⟩
meta inductive comp_source
| assump : ℕ → comp_source
| add : comp_source → comp_source → comp_source
| scale : ℕ → comp_source → comp_source
meta def comp_source.flatten : comp_source → rb_map ℕ ℕ
| (comp_source.assump n) := mk_rb_map.insert n 1
| (comp_source.add c1 c2) := (comp_source.flatten c1).add (comp_source.flatten c2)
| (comp_source.scale n c) := (comp_source.flatten c).map (λ v, v * n)
meta def comp_source.to_string : comp_source → string
| (comp_source.assump e) := to_string e
| (comp_source.add c1 c2) := comp_source.to_string c1 ++ " + " ++ comp_source.to_string c2
| (comp_source.scale n c) := to_string n ++ " * " ++ comp_source.to_string c
meta instance comp_source.has_to_format : has_to_format comp_source :=
⟨λ a, comp_source.to_string a⟩
meta structure pcomp :=
(c : comp)
(src : comp_source)
meta def map_lt (m1 m2 : rb_map ℕ int) : bool :=
list.lex (prod.lex (<) (<)) m1.to_list m2.to_list
-- make more efficient
meta def comp.lt (c1 c2 : comp) : bool :=
(c1.str.is_lt c2.str) || (c1.str = c2.str) && map_lt c1.coeffs c2.coeffs
meta instance comp.has_lt : has_lt comp := ⟨λ a b, comp.lt a b⟩
meta instance pcomp.has_lt : has_lt pcomp := ⟨λ p1 p2, p1.c < p2.c⟩
meta instance pcomp.has_lt_dec : decidable_rel ((<) : pcomp → pcomp → Prop) := by apply_instance
meta def comp.coeff_of (c : comp) (a : ℕ) : ℤ :=
c.coeffs.zfind a
meta def comp.scale (c : comp) (n : ℕ) : comp :=
{ c with coeffs := c.coeffs.map ((*) (n : ℤ)) }
meta def comp.add (c1 c2 : comp) : comp :=
⟨c1.str.max c2.str, c1.coeffs.add c2.coeffs⟩
meta def pcomp.scale (c : pcomp) (n : ℕ) : pcomp :=
⟨c.c.scale n, comp_source.scale n c.src⟩
meta def pcomp.add (c1 c2 : pcomp) : pcomp :=
⟨c1.c.add c2.c, comp_source.add c1.src c2.src⟩
meta instance pcomp.to_format : has_to_format pcomp :=
⟨λ p, to_fmt p.c.coeffs ++ to_string p.c.str ++ "0"⟩
meta instance comp.to_format : has_to_format comp :=
⟨λ p, to_fmt p.coeffs⟩
end datatypes
section fm_elim
/-- If c1 and c2 both contain variable a with opposite coefficients,
produces v1, v2, and c such that a has been cancelled in c := v1*c1 + v2*c2 -/
meta def elim_var (c1 c2 : comp) (a : ℕ) : option (ℕ × ℕ × comp) :=
let v1 := c1.coeff_of a,
v2 := c2.coeff_of a in
if v1 * v2 < 0 then
let vlcm := nat.lcm v1.nat_abs v2.nat_abs,
v1' := vlcm / v1.nat_abs,
v2' := vlcm / v2.nat_abs in
some ⟨v1', v2', comp.add (c1.scale v1') (c2.scale v2')⟩
else none
meta def pelim_var (p1 p2 : pcomp) (a : ℕ) : option pcomp :=
do (n1, n2, c) ← elim_var p1.c p2.c a,
return ⟨c, comp_source.add (p1.src.scale n1) (p2.src.scale n2)⟩
meta def comp.is_contr (c : comp) : bool := c.coeffs.empty ∧ c.str = ineq.lt
meta def pcomp.is_contr (p : pcomp) : bool := p.c.is_contr
meta def elim_with_set (a : ℕ) (p : pcomp) (comps : rb_set pcomp) : rb_set pcomp :=
if ¬ p.c.coeffs.contains a then mk_rb_set.insert p else
comps.fold mk_rb_set $ λ pc s,
match pelim_var p pc a with
| some pc := s.insert pc
| none := s
end
/--
The state for the elimination monad.
vars: the set of variables present in comps
comps: a set of comparisons
inputs: a set of pairs of exprs (t, pf), where t is a term and pf is a proof that t {<, ≤, =} 0,
indexed by ℕ.
has_false: stores a pcomp of 0 < 0 if one has been found
TODO: is it more efficient to store comps as a list, to avoid comparisons?
-/
meta structure linarith_structure :=
(vars : rb_set ℕ)
(comps : rb_set pcomp)
@[reducible] meta def linarith_monad :=
state_t linarith_structure (except_t pcomp id)
meta instance : monad linarith_monad := state_t.monad
meta instance : monad_except pcomp linarith_monad :=
state_t.monad_except pcomp
meta def get_vars : linarith_monad (rb_set ℕ) :=
linarith_structure.vars <$> get
meta def get_var_list : linarith_monad (list ℕ) :=
rb_set.to_list <$> get_vars
meta def get_comps : linarith_monad (rb_set pcomp) :=
linarith_structure.comps <$> get
meta def validate : linarith_monad unit :=
do ⟨_, comps⟩ ← get,
match comps.to_list.find (λ p : pcomp, p.is_contr) with
| none := return ()
| some c := throw c
end
meta def update (vars : rb_set ℕ) (comps : rb_set pcomp) : linarith_monad unit :=
state_t.put ⟨vars, comps⟩ >> validate
meta def monad.elim_var (a : ℕ) : linarith_monad unit :=
do vs ← get_vars,
when (vs.contains a) $
do comps ← get_comps,
let cs' := comps.fold mk_rb_set (λ p s, s.union (elim_with_set a p comps)),
update (vs.erase a) cs'
meta def elim_all_vars : linarith_monad unit :=
get_var_list >>= list.mmap' monad.elim_var
end fm_elim
section parse
open ineq tactic
meta def map_of_expr_mul_aux (c1 c2 : rb_map ℕ ℤ) : option (rb_map ℕ ℤ) :=
match c1.keys, c2.keys with
| [0], _ := some $ c2.scale (c1.zfind 0)
| _, [0] := some $ c1.scale (c2.zfind 0)
| [], _ := some mk_rb_map
| _, [] := some mk_rb_map
| _, _ := none
end
meta def list.mfind {α} (tac : α → tactic unit) : list α → tactic α
| [] := failed
| (h::t) := tac h >> return h <|> list.mfind t
meta def rb_map.find_defeq (red : transparency) {v} (m : expr_map v) (e : expr) : tactic v :=
prod.snd <$> list.mfind (λ p, is_def_eq e p.1 red) m.to_list
/--
Turns an expression into a map from ℕ to ℤ, for use in a comp object.
The expr_map ℕ argument identifies which expressions have already been assigned numbers.
Returns a new map.
-/
meta def map_of_expr (red : transparency) : expr_map ℕ → expr → tactic (expr_map ℕ × rb_map ℕ ℤ)
| m e@`(%%e1 * %%e2) :=
(do (m', comp1) ← map_of_expr m e1,
(m', comp2) ← map_of_expr m' e2,
mp ← map_of_expr_mul_aux comp1 comp2,
return (m', mp)) <|>
(do k ← rb_map.find_defeq red m e, return (m, mk_rb_map.insert k 1)) <|>
(let n := m.size + 1 in return (m.insert e n, mk_rb_map.insert n 1))
| m `(%%e1 + %%e2) :=
do (m', comp1) ← map_of_expr m e1,
(m', comp2) ← map_of_expr m' e2,
return (m', comp1.add comp2)
| m `(%%e1 - %%e2) :=
do (m', comp1) ← map_of_expr m e1,
(m', comp2) ← map_of_expr m' e2,
return (m', comp1.add (comp2.scale (-1)))
| m `(-%%e) := do (m', comp) ← map_of_expr m e, return (m', comp.scale (-1))
| m e :=
match e.to_int with
| some 0 := return ⟨m, mk_rb_map⟩
| some z := return ⟨m, mk_rb_map.insert 0 z⟩
| none :=
(do k ← rb_map.find_defeq red m e, return (m, mk_rb_map.insert k 1)) <|>
(let n := m.size + 1 in return (m.insert e n, mk_rb_map.insert n 1))
end
meta def parse_into_comp_and_expr : expr → option (ineq × expr)
| `(%%e < 0) := (ineq.lt, e)
| `(%%e ≤ 0) := (ineq.le, e)
| `(%%e = 0) := (ineq.eq, e)
| _ := none
meta def to_comp (red : transparency) (e : expr) (m : expr_map ℕ) : tactic (comp × expr_map ℕ) :=
do (iq, e) ← parse_into_comp_and_expr e,
(m', comp') ← map_of_expr red m e,
return ⟨⟨iq, comp'⟩, m'⟩
meta def to_comp_fold (red : transparency) : expr_map ℕ → list expr →
tactic (list (option comp) × expr_map ℕ)
| m [] := return ([], m)
| m (h::t) :=
(do (c, m') ← to_comp red h m,
(l, mp) ← to_comp_fold m' t,
return (c::l, mp)) <|>
(do (l, mp) ← to_comp_fold m t,
return (none::l, mp))
/--
Takes a list of proofs of props of the form t {<, ≤, =} 0, and creates a linarith_structure.
-/
meta def mk_linarith_structure (red : transparency) (l : list expr) : tactic (linarith_structure × rb_map ℕ (expr × expr)) :=
do pftps ← l.mmap infer_type,
(l', map) ← to_comp_fold red mk_rb_map pftps,
let lz := list.enum $ ((l.zip pftps).zip l').filter_map (λ ⟨a, b⟩, prod.mk a <$> b),
let prmap := rb_map.of_list $ lz.map (λ ⟨n, x⟩, (n, x.1)),
let vars : rb_set ℕ := rb_map.set_of_list $ list.range map.size.succ,
let pc : rb_set pcomp := rb_map.set_of_list $
lz.map (λ ⟨n, x⟩, ⟨x.2, comp_source.assump n⟩),
return (⟨vars, pc⟩, prmap)
meta def linarith_monad.run (red : transparency) {α} (tac : linarith_monad α) (l : list expr) : tactic ((pcomp ⊕ α) × rb_map ℕ (expr × expr)) :=
do (struct, inputs) ← mk_linarith_structure red l,
match (state_t.run (validate >> tac) struct).run with
| (except.ok (a, _)) := return (sum.inr a, inputs)
| (except.error contr) := return (sum.inl contr, inputs)
end
end parse
section prove
open ineq tactic
meta def get_rel_sides : expr → tactic (expr × expr)
| `(%%a < %%b) := return (a, b)
| `(%%a ≤ %%b) := return (a, b)
| `(%%a = %%b) := return (a, b)
| `(%%a ≥ %%b) := return (a, b)
| `(%%a > %%b) := return (a, b)
| _ := failed
meta def mul_expr (n : ℕ) (e : expr) : pexpr :=
if n = 1 then ``(%%e) else
``(%%(nat.to_pexpr n) * %%e)
meta def add_exprs_aux : pexpr → list pexpr → pexpr
| p [] := p
| p [a] := ``(%%p + %%a)
| p (h::t) := add_exprs_aux ``(%%p + %%h) t
meta def add_exprs : list pexpr → pexpr
| [] := ``(0)
| (h::t) := add_exprs_aux h t
meta def find_contr (m : rb_set pcomp) : option pcomp :=
m.keys.find (λ p, p.c.is_contr)
meta def ineq_const_mul_nm : ineq → name
| lt := ``mul_neg
| le := ``mul_nonpos
| eq := ``mul_eq
meta def ineq_const_nm : ineq → ineq → (name × ineq)
| eq eq := (``eq_of_eq_of_eq, eq)
| eq le := (``le_of_eq_of_le, le)
| eq lt := (``lt_of_eq_of_lt, lt)
| le eq := (``le_of_le_of_eq, le)
| le le := (`add_nonpos, le)
| le lt := (`add_neg_of_nonpos_of_neg, lt)
| lt eq := (``lt_of_lt_of_eq, lt)
| lt le := (`add_neg_of_neg_of_nonpos, lt)
| lt lt := (`add_neg, lt)
meta def mk_single_comp_zero_pf (c : ℕ) (h : expr) : tactic (ineq × expr) :=
do tp ← infer_type h,
some (iq, e) ← return $ parse_into_comp_and_expr tp,
if c = 0 then
do e' ← mk_app ``zero_mul [e], return (eq, e')
else if c = 1 then return (iq, h)
else
do nm ← resolve_name (ineq_const_mul_nm iq),
tp ← (prod.snd <$> (infer_type h >>= get_rel_sides)) >>= infer_type,
cpos ← to_expr ``((%%c.to_pexpr : %%tp) > 0),
(_, ex) ← solve_aux cpos `[norm_num, done],
-- e' ← mk_app (ineq_const_mul_nm iq) [h, ex], -- this takes many seconds longer in some examples! why?
e' ← to_expr ``(%%nm %%h %%ex) ff,
return (iq, e')
meta def mk_lt_zero_pf_aux (c : ineq) (pf npf : expr) (coeff : ℕ) : tactic (ineq × expr) :=
do (iq, h') ← mk_single_comp_zero_pf coeff npf,
let (nm, niq) := ineq_const_nm c iq,
n ← resolve_name nm,
e' ← to_expr ``(%%n %%pf %%h'),
return (niq, e')
/--
Takes a list of coefficients [c] and list of expressions, of equal length.
Each expression is a proof of a prop of the form t {<, ≤, =} 0.
Produces a proof that the sum of (c*t) {<, ≤, =} 0, where the comp is as strong as possible.
-/
meta def mk_lt_zero_pf : list ℕ → list expr → tactic expr
| _ [] := fail "no linear hypotheses found"
| [c] [h] := prod.snd <$> mk_single_comp_zero_pf c h
| (c::ct) (h::t) :=
do (iq, h') ← mk_single_comp_zero_pf c h,
prod.snd <$> (ct.zip t).mfoldl (λ pr ce, mk_lt_zero_pf_aux pr.1 pr.2 ce.2 ce.1) (iq, h')
| _ _ := fail "not enough args to mk_lt_zero_pf"
meta def term_of_ineq_prf (prf : expr) : tactic expr :=
do (lhs, _) ← infer_type prf >>= get_rel_sides,
return lhs
meta structure linarith_config :=
(discharger : tactic unit := `[ring])
(restrict_type : option Type := none)
(restrict_type_reflect : reflected restrict_type . apply_instance)
(exfalso : bool := tt)
(transparency : transparency := reducible)
meta def ineq_pf_tp (pf : expr) : tactic expr :=
do (_, z) ← infer_type pf >>= get_rel_sides,
infer_type z
meta def mk_neg_one_lt_zero_pf (tp : expr) : tactic expr :=
to_expr ``((neg_neg_of_pos zero_lt_one : -1 < (0 : %%tp)))
/--
Assumes e is a proof that t = 0. Creates a proof that -t = 0.
-/
meta def mk_neg_eq_zero_pf (e : expr) : tactic expr :=
to_expr ``(neg_eq_zero.mpr %%e)
meta def add_neg_eq_pfs : list expr → tactic (list expr)
| [] := return []
| (h::t) :=
do some (iq, tp) ← parse_into_comp_and_expr <$> infer_type h,
match iq with
| ineq.eq := do nep ← mk_neg_eq_zero_pf h, tl ← add_neg_eq_pfs t, return $ h::nep::tl
| _ := list.cons h <$> add_neg_eq_pfs t
end
/--
Takes a list of proofs of propositions of the form t {<, ≤, =} 0,
and tries to prove the goal `false`.
-/
meta def prove_false_by_linarith1 (cfg : linarith_config) : list expr → tactic unit
| [] := fail "no args to linarith"
| l@(h::t) :=
do l' ← add_neg_eq_pfs l,
hz ← ineq_pf_tp h >>= mk_neg_one_lt_zero_pf,
(sum.inl contr, inputs) ← elim_all_vars.run cfg.transparency (hz::l')
| fail "linarith failed to find a contradiction",
let coeffs := inputs.keys.map (λ k, (contr.src.flatten.ifind k)),
let pfs : list expr := inputs.keys.map (λ k, (inputs.ifind k).1),
let zip := (coeffs.zip pfs).filter (λ pr, pr.1 ≠ 0),
let (coeffs, pfs) := zip.unzip,
mls ← zip.mmap (λ pr, do e ← term_of_ineq_prf pr.2, return (mul_expr pr.1 e)),
sm ← to_expr $ add_exprs mls,
tgt ← to_expr ``(%%sm = 0),
(a, b) ← solve_aux tgt (cfg.discharger >> done),
pf ← mk_lt_zero_pf coeffs pfs,
pftp ← infer_type pf,
(_, nep, _) ← rewrite_core b pftp,
pf' ← mk_eq_mp nep pf,
mk_app `lt_irrefl [pf'] >>= exact
end prove
section normalize
open tactic
set_option eqn_compiler.max_steps 50000
meta def rem_neg (prf : expr) : expr → tactic expr
| `(_ ≤ _) := to_expr ``(lt_of_not_ge %%prf)
| `(_ < _) := to_expr ``(le_of_not_gt %%prf)
| `(_ > _) := to_expr ``(le_of_not_gt %%prf)
| `(_ ≥ _) := to_expr ``(lt_of_not_ge %%prf)
| e := failed
meta def rearr_comp : expr → expr → tactic expr
| prf `(%%a ≤ 0) := return prf
| prf `(%%a < 0) := return prf
| prf `(%%a = 0) := return prf
| prf `(%%a ≥ 0) := to_expr ``(neg_nonpos.mpr %%prf)
| prf `(%%a > 0) := to_expr ``(neg_neg_of_pos %%prf)
| prf `(0 ≥ %%a) := to_expr ``(show %%a ≤ 0, from %%prf)
| prf `(0 > %%a) := to_expr ``(show %%a < 0, from %%prf)
| prf `(0 = %%a) := to_expr ``(eq.symm %%prf)
| prf `(0 ≤ %%a) := to_expr ``(neg_nonpos.mpr %%prf)
| prf `(0 < %%a) := to_expr ``(neg_neg_of_pos %%prf)
| prf `(%%a ≤ %%b) := to_expr ``(sub_nonpos.mpr %%prf)
| prf `(%%a < %%b) := to_expr ``(sub_neg_of_lt %%prf)
| prf `(%%a = %%b) := to_expr ``(sub_eq_zero.mpr %%prf)
| prf `(%%a > %%b) := to_expr ``(sub_neg_of_lt %%prf)
| prf `(%%a ≥ %%b) := to_expr ``(sub_nonpos.mpr %%prf)
| prf `(¬ %%t) := do nprf ← rem_neg prf t, tp ← infer_type nprf, rearr_comp nprf tp
| prf _ := fail "couldn't rearrange comp"
meta def is_numeric : expr → option ℚ
| `(%%e1 + %%e2) := (+) <$> is_numeric e1 <*> is_numeric e2
| `(%%e1 - %%e2) := has_sub.sub <$> is_numeric e1 <*> is_numeric e2
| `(%%e1 * %%e2) := (*) <$> is_numeric e1 <*> is_numeric e2
| `(%%e1 / %%e2) := (/) <$> is_numeric e1 <*> is_numeric e2
| `(-%%e) := rat.neg <$> is_numeric e
| e := e.to_rat
inductive {u} tree (α : Type u) : Type u
| nil {} : tree
| node : α → tree → tree → tree
def tree.repr {α} [has_repr α] : tree α → string
| tree.nil := "nil"
| (tree.node a t1 t2) := "tree.node " ++ repr a ++ " (" ++ tree.repr t1 ++ ") (" ++ tree.repr t2 ++ ")"
instance {α} [has_repr α] : has_repr (tree α) := ⟨tree.repr⟩
meta def find_cancel_factor : expr → ℕ × tree ℕ
| `(%%e1 + %%e2) :=
let (v1, t1) := find_cancel_factor e1, (v2, t2) := find_cancel_factor e2, lcm := v1.lcm v2 in
(lcm, tree.node lcm t1 t2)
| `(%%e1 - %%e2) :=
let (v1, t1) := find_cancel_factor e1, (v2, t2) := find_cancel_factor e2, lcm := v1.lcm v2 in
(lcm, tree.node lcm t1 t2)
| `(%%e1 * %%e2) :=
match is_numeric e1, is_numeric e2 with
| none, none := (1, tree.node 1 tree.nil tree.nil)
| _, _ :=
let (v1, t1) := find_cancel_factor e1, (v2, t2) := find_cancel_factor e2, pd := v1*v2 in
(pd, tree.node pd t1 t2)
end
| `(%%e1 / %%e2) :=
match is_numeric e2 with
| some q := let (v1, t1) := find_cancel_factor e1, n := v1.lcm q.num.nat_abs in
(n, tree.node n t1 (tree.node q.num.nat_abs tree.nil tree.nil))
| none := (1, tree.node 1 tree.nil tree.nil)
end
| `(-%%e) := find_cancel_factor e
| _ := (1, tree.node 1 tree.nil tree.nil)
open tree
meta def mk_prod_prf : ℕ → tree ℕ → expr → tactic expr
| v (node _ lhs rhs) `(%%e1 + %%e2) :=
do v1 ← mk_prod_prf v lhs e1, v2 ← mk_prod_prf v rhs e2, mk_app ``add_subst [v1, v2]
| v (node _ lhs rhs) `(%%e1 - %%e2) :=
do v1 ← mk_prod_prf v lhs e1, v2 ← mk_prod_prf v rhs e2, mk_app ``sub_subst [v1, v2]
| v (node n lhs@(node ln _ _) rhs) `(%%e1 * %%e2) :=
do tp ← infer_type e1, v1 ← mk_prod_prf ln lhs e1, v2 ← mk_prod_prf (v/ln) rhs e2,
ln' ← tp.of_nat ln, vln' ← tp.of_nat (v/ln), v' ← tp.of_nat v,
ntp ← to_expr ``(%%ln' * %%vln' = %%v'),
(_, npf) ← solve_aux ntp `[norm_num, done],
mk_app ``mul_subst [v1, v2, npf]
| v (node n lhs rhs@(node rn _ _)) `(%%e1 / %%e2) :=
do tp ← infer_type e1, v1 ← mk_prod_prf (v/rn) lhs e1,
rn' ← tp.of_nat rn, vrn' ← tp.of_nat (v/rn), n' ← tp.of_nat n, v' ← tp.of_nat v,
ntp ← to_expr ``(%%rn' / %%e2 = 1),
(_, npf) ← solve_aux ntp `[norm_num, done],
ntp2 ← to_expr ``(%%vrn' * %%n' = %%v'),
(_, npf2) ← solve_aux ntp2 `[norm_num, done],
mk_app ``div_subst [v1, npf, npf2]
| v t `(-%%e) := do v' ← mk_prod_prf v t e, mk_app ``neg_subst [v']
| v _ e :=
do tp ← infer_type e,
v' ← tp.of_nat v,
e' ← to_expr ``(%%v' * %%e),
mk_app `eq.refl [e']
/--
e is a term with rational division. produces a natural number n and a proof that n*e = e',
where e' has no division.
-/
meta def kill_factors (e : expr) : tactic (ℕ × expr) :=
let (n, t) := find_cancel_factor e in
do e' ← mk_prod_prf n t e, return (n, e')
open expr
meta def expr_contains (n : name) : expr → bool
| (const nm _) := nm = n
| (lam _ _ _ bd) := expr_contains bd
| (pi _ _ _ bd) := expr_contains bd
| (app e1 e2) := expr_contains e1 || expr_contains e2
| _ := ff
lemma sub_into_lt {α} [ordered_semiring α] {a b : α} (he : a = b) (hl : a ≤ 0) : b ≤ 0 :=
by rwa he at hl
meta def norm_hyp_aux (h' lhs : expr) : tactic expr :=
do (v, lhs') ← kill_factors lhs,
if v = 1 then return h' else do
(ih, h'') ← mk_single_comp_zero_pf v h',
(_, nep, _) ← infer_type h'' >>= rewrite_core lhs',
mk_eq_mp nep h''
meta def norm_hyp (h : expr) : tactic expr :=
do htp ← infer_type h,
h' ← rearr_comp h htp,
some (c, lhs) ← parse_into_comp_and_expr <$> infer_type h',
if expr_contains `has_div.div lhs then
norm_hyp_aux h' lhs
else return h'
meta def get_contr_lemma_name : expr → option name
| `(%%a < %%b) := return `lt_of_not_ge
| `(%%a ≤ %%b) := return `le_of_not_gt
| `(%%a = %%b) := return ``eq_of_not_lt_of_not_gt
| `(%%a ≠ %%b) := return `not.intro
| `(%%a ≥ %%b) := return `le_of_not_gt
| `(%%a > %%b) := return `lt_of_not_ge
| `(¬ %%a < %%b) := return `not.intro
| `(¬ %%a ≤ %%b) := return `not.intro
| `(¬ %%a = %%b) := return `not.intro
| `(¬ %%a ≥ %%b) := return `not.intro
| `(¬ %%a > %%b) := return `not.intro
| _ := none
-- assumes the input t is of type ℕ. Produces t' of type ℤ such that ↑t = t' and a proof of equality
meta def cast_expr (e : expr) : tactic (expr × expr) :=
do s ← [`int.coe_nat_add, `int.coe_nat_zero, `int.coe_nat_one,
``int.coe_nat_bit0_mul, ``int.coe_nat_bit1_mul, ``int.coe_nat_zero_mul, ``int.coe_nat_one_mul,
``int.coe_nat_mul_bit0, ``int.coe_nat_mul_bit1, ``int.coe_nat_mul_zero, ``int.coe_nat_mul_one,
``int.coe_nat_bit0, ``int.coe_nat_bit1].mfoldl simp_lemmas.add_simp simp_lemmas.mk,
ce ← to_expr ``(↑%%e : ℤ),
simplify s [] ce {fail_if_unchanged := ff}
meta def is_nat_int_coe : expr → option expr
| `((↑(%%n : ℕ) : ℤ)) := some n
| _ := none
meta def mk_coe_nat_nonneg_prf (e : expr) : tactic expr :=
mk_app `int.coe_nat_nonneg [e]
meta def get_nat_comps : expr → list expr
| `(%%a + %%b) := (get_nat_comps a).append (get_nat_comps b)
| `(%%a * %%b) := (get_nat_comps a).append (get_nat_comps b)
| e := match is_nat_int_coe e with
| some e' := [e']
| none := []
end
meta def mk_coe_nat_nonneg_prfs (e : expr) : tactic (list expr) :=
(get_nat_comps e).mmap mk_coe_nat_nonneg_prf
meta def mk_cast_eq_and_nonneg_prfs (pf a b : expr) (ln : name) : tactic (list expr) :=
do (a', prfa) ← cast_expr a,
(b', prfb) ← cast_expr b,
la ← mk_coe_nat_nonneg_prfs a',
lb ← mk_coe_nat_nonneg_prfs b',
pf' ← mk_app ln [pf, prfa, prfb],
return $ pf'::(la.append lb)
meta def mk_int_pfs_of_nat_pf (pf : expr) : tactic (list expr) :=
do tp ← infer_type pf,
match tp with
| `(%%a = %%b) := mk_cast_eq_and_nonneg_prfs pf a b ``nat_eq_subst
| `(%%a ≤ %%b) := mk_cast_eq_and_nonneg_prfs pf a b ``nat_le_subst
| `(%%a < %%b) := mk_cast_eq_and_nonneg_prfs pf a b ``nat_lt_subst
| `(%%a ≥ %%b) := mk_cast_eq_and_nonneg_prfs pf b a ``nat_le_subst
| `(%%a > %%b) := mk_cast_eq_and_nonneg_prfs pf b a ``nat_lt_subst
| `(¬ %%a ≤ %%b) := do pf' ← mk_app ``lt_of_not_ge [pf], mk_cast_eq_and_nonneg_prfs pf' b a ``nat_lt_subst
| `(¬ %%a < %%b) := do pf' ← mk_app ``le_of_not_gt [pf], mk_cast_eq_and_nonneg_prfs pf' b a ``nat_le_subst
| `(¬ %%a ≥ %%b) := do pf' ← mk_app ``lt_of_not_ge [pf], mk_cast_eq_and_nonneg_prfs pf' a b ``nat_lt_subst
| `(¬ %%a > %%b) := do pf' ← mk_app ``le_of_not_gt [pf], mk_cast_eq_and_nonneg_prfs pf' a b ``nat_le_subst
| _ := fail "mk_int_pfs_of_nat_pf failed: proof is not an inequality"
end
meta def mk_non_strict_int_pf_of_strict_int_pf (pf : expr) : tactic expr :=
do tp ← infer_type pf,
match tp with
| `(%%a < %%b) := to_expr ``(@cast (%%a < %%b) (%%a + 1 ≤ %%b) (by refl) %%pf)
| `(%%a > %%b) := to_expr ``(@cast (%%a > %%b) (%%a ≥ %%b + 1) (by refl) %%pf)
| `(¬ %%a ≤ %%b) := to_expr ``(@cast (%%a > %%b) (%%a ≥ %%b + 1) (by refl) (lt_of_not_ge %%pf))
| `(¬ %%a ≥ %%b) := to_expr ``(@cast (%%a < %%b) (%%a + 1 ≤ %%b) (by refl) (lt_of_not_ge %%pf))
| _ := fail "mk_non_strict_int_pf_of_strict_int_pf failed: proof is not an inequality"
end
meta def guard_is_nat_prop : expr → tactic unit
| `(%%a = _) := infer_type a >>= unify `(ℕ)
| `(%%a ≤ _) := infer_type a >>= unify `(ℕ)
| `(%%a < _) := infer_type a >>= unify `(ℕ)
| `(%%a ≥ _) := infer_type a >>= unify `(ℕ)
| `(%%a > _) := infer_type a >>= unify `(ℕ)
| `(¬ %%p) := guard_is_nat_prop p
| _ := failed
meta def guard_is_strict_int_prop : expr → tactic unit
| `(%%a < _) := infer_type a >>= unify `(ℤ)
| `(%%a > _) := infer_type a >>= unify `(ℤ)
| `(¬ %%a ≤ _) := infer_type a >>= unify `(ℤ)
| `(¬ %%a ≥ _) := infer_type a >>= unify `(ℤ)
| _ := failed
meta def replace_nat_pfs : list expr → tactic (list expr)
| [] := return []
| (h::t) :=
(do infer_type h >>= guard_is_nat_prop,
ls ← mk_int_pfs_of_nat_pf h,
list.append ls <$> replace_nat_pfs t) <|> list.cons h <$> replace_nat_pfs t
meta def replace_strict_int_pfs : list expr → tactic (list expr)
| [] := return []
| (h::t) :=
(do infer_type h >>= guard_is_strict_int_prop,
l ← mk_non_strict_int_pf_of_strict_int_pf h,
list.cons l <$> replace_strict_int_pfs t) <|> list.cons h <$> replace_strict_int_pfs t
meta def partition_by_type_aux : rb_lmap expr expr → list expr → tactic (rb_lmap expr expr)
| m [] := return m
| m (h::t) := do tp ← ineq_pf_tp h, partition_by_type_aux (m.insert tp h) t
meta def partition_by_type (l : list expr) : tactic (rb_lmap expr expr) :=
partition_by_type_aux mk_rb_map l
private meta def try_linarith_on_lists (cfg : linarith_config) (ls : list (list expr)) : tactic unit :=
(first $ ls.map $ prove_false_by_linarith1 cfg) <|> fail "linarith failed"
/--
Takes a list of proofs of propositions.
Filters out the proofs of linear (in)equalities,
and tries to use them to prove `false`.
If pref_type is given, starts by working over this type
-/
meta def prove_false_by_linarith (cfg : linarith_config) (pref_type : option expr) (l : list expr) : tactic unit :=
do l' ← replace_nat_pfs l,
l'' ← replace_strict_int_pfs l',
ls ← list.reduce_option <$> l''.mmap (λ h, (do s ← norm_hyp h, return (some s)) <|> return none)
>>= partition_by_type,
pref_type ← (unify pref_type.iget `(ℕ) >> return (some `(ℤ) : option expr)) <|> return pref_type,
match cfg.restrict_type, ls.values, pref_type with
| some rtp, _, _ :=
do m ← mk_mvar, unify `(some %%m : option Type) cfg.restrict_type_reflect, m ← instantiate_mvars m,
prove_false_by_linarith1 cfg (ls.ifind m)
| none, [ls'], _ := prove_false_by_linarith1 cfg ls'
| none, ls', none := try_linarith_on_lists cfg ls'
| none, _, (some t) := prove_false_by_linarith1 cfg (ls.ifind t) <|> try_linarith_on_lists cfg (ls.erase t).values
end
end normalize
end linarith
section
open tactic linarith
open lean lean.parser interactive tactic interactive.types
local postfix `?`:9001 := optional
local postfix *:9001 := many
meta def linarith.elab_arg_list : option (list pexpr) → tactic (list expr)
| none := return []
| (some l) := l.mmap i_to_expr
meta def linarith.preferred_type_of_goal : option expr → tactic (option expr)
| none := return none
| (some e) := some <$> ineq_pf_tp e
/--
linarith.interactive_aux cfg o_goal restrict_hyps args:
* cfg is a linarith_config object
* o_goal : option expr is the local constant corresponding to the former goal, if there was one
* restrict_hyps : bool is tt if `linarith only [...]` was used
* args : option (list pexpr) is the optional list of arguments in `linarith [...]`
-/
meta def linarith.interactive_aux (cfg : linarith_config) :
option expr → bool → option (list pexpr) → tactic unit
| none tt none := fail "linarith only called with no arguments"
| none tt (some l) := l.mmap i_to_expr >>= prove_false_by_linarith cfg none
| (some e) tt l :=
do tp ← ineq_pf_tp e,
list.cons e <$> linarith.elab_arg_list l >>= prove_false_by_linarith cfg (some tp)
| oe ff l :=
do otp ← linarith.preferred_type_of_goal oe,
list.append <$> local_context <*>
(list.filter (λ a, bnot $ expr.is_local_constant a) <$> linarith.elab_arg_list l) >>=
prove_false_by_linarith cfg otp
/--
Tries to prove a goal of `false` by linear arithmetic on hypotheses.
If the goal is a linear (in)equality, tries to prove it by contradiction.
If the goal is not `false` or an inequality, applies `exfalso` and tries linarith on the
hypotheses.
`linarith` will use all relevant hypotheses in the local context.
`linarith [t1, t2, t3]` will add proof terms t1, t2, t3 to the local context.
`linarith only [h1, h2, h3, t1, t2, t3]` will use only the goal (if relevant), local hypotheses
h1, h2, h3, and proofs t1, t2, t3. It will ignore the rest of the local context.
`linarith!` will use a stronger reducibility setting to identify atoms.
Config options:
`linarith {exfalso := ff}` will fail on a goal that is neither an inequality nor `false`
`linarith {restrict_type := T}` will run only on hypotheses that are inequalities over `T`
`linarith {discharger := tac}` will use `tac` instead of `ring` for normalization.
Options: `ring2`, `ring SOP`, `simp`
-/
meta def tactic.interactive.linarith (red : parse ((tk "!")?))
(restr : parse ((tk "only")?)) (hyps : parse pexpr_list?)
(cfg : linarith_config := {}) : tactic unit :=
let cfg :=
if red.is_some then {cfg with transparency := semireducible, discharger := `[ring!]}
else cfg in
do t ← target,
match get_contr_lemma_name t with
| some nm := seq (applyc nm) $
do t ← intro1, linarith.interactive_aux cfg (some t) restr.is_some hyps
| none := if cfg.exfalso then exfalso >> linarith.interactive_aux cfg none restr.is_some hyps
else fail "linarith failed: target type is not an inequality."
end
end
|
9df63dd49a118d5e6a80fdf6077f8f97c236c412
|
6dc0c8ce7a76229dd81e73ed4474f15f88a9e294
|
/src/Lean/Elab/Deriving/FromToJson.lean
|
ddd56a08c4ea72278e4ae2b15d4e423a289d32d7
|
[
"Apache-2.0"
] |
permissive
|
williamdemeo/lean4
|
72161c58fe65c3ad955d6a3050bb7d37c04c0d54
|
6d00fcf1d6d873e195f9220c668ef9c58e9c4a35
|
refs/heads/master
| 1,678,305,356,877
| 1,614,708,995,000
| 1,614,708,995,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,641
|
lean
|
/-
Copyright (c) 2020 Sebastian Ullrich. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sebastian Ullrich
-/
import Lean.Meta.Transform
import Lean.Elab.Deriving.Basic
import Lean.Elab.Deriving.Util
import Lean.Data.Json.FromToJson
namespace Lean.Elab.Deriving.FromToJson
open Lean.Elab.Command
open Lean.Json
def mkJsonField (n : Name) : Bool × Syntax :=
let s := n.toString
let s₁ := s.dropRightWhile (· == '?')
(s != s₁, Syntax.mkStrLit s₁)
def mkToJsonInstanceHandler (declNames : Array Name) : CommandElabM Bool := do
if declNames.size == 1 && (← isStructure (← getEnv) declNames[0]) then
let cmds ← liftTermElabM none <| do
let ctx ← mkContext "toJson" declNames[0]
let header ← mkHeader ctx ``ToJson 1 ctx.typeInfos[0]
let fields := getStructureFieldsFlattened (← getEnv) declNames[0] (includeSubobjectFields := false)
let fields : Array Syntax ← fields.mapM fun field => do
let (isOptField, nm) ← mkJsonField field
if isOptField then `(opt $nm $(mkIdent <| header.targetNames[0] ++ field))
else `([($nm, toJson $(mkIdent <| header.targetNames[0] ++ field))])
let cmd ← `(private def $(mkIdent ctx.auxFunNames[0]):ident $header.binders:explicitBinder* :=
mkObj <| List.join [$fields,*])
return #[cmd] ++ (← mkInstanceCmds ctx ``ToJson declNames)
cmds.forM elabCommand
return true
else
return false
def mkFromJsonInstanceHandler (declNames : Array Name) : CommandElabM Bool := do
if declNames.size == 1 && (← isStructure (← getEnv) declNames[0]) then
let cmds ← liftTermElabM none <| do
let ctx ← mkContext "fromJson" declNames[0]
let header ← mkHeader ctx ``FromJson 0 ctx.typeInfos[0]
let fields := getStructureFieldsFlattened (← getEnv) declNames[0] (includeSubobjectFields := false)
let jsonFields := fields.map (Prod.snd ∘ mkJsonField)
let fields := fields.map mkIdent
let cmd ← `(private def $(mkIdent ctx.auxFunNames[0]):ident $header.binders:explicitBinder* (j : Json)
: Option $(← mkInductiveApp ctx.typeInfos[0] header.argNames) := do
$[let $fields:ident ← getObjValAs? j _ $jsonFields]*
return { $[$fields:ident := $(id fields)]* })
return #[cmd] ++ (← mkInstanceCmds ctx ``FromJson declNames)
cmds.forM elabCommand
return true
else
return false
builtin_initialize
registerBuiltinDerivingHandler ``ToJson mkToJsonInstanceHandler
registerBuiltinDerivingHandler ``FromJson mkFromJsonInstanceHandler
end Lean.Elab.Deriving.FromToJson
|
d54d5a1ed68ba5ac8cc9f8eb8d822f039df46c6c
|
737dc4b96c97368cb66b925eeea3ab633ec3d702
|
/stage0/src/Lean/Meta/SynthInstance.lean
|
788d630804c5b5f621a59ff608d78173f8912fd4
|
[
"Apache-2.0"
] |
permissive
|
Bioye97/lean4
|
1ace34638efd9913dc5991443777b01a08983289
|
bc3900cbb9adda83eed7e6affeaade7cfd07716d
|
refs/heads/master
| 1,690,589,820,211
| 1,631,051,000,000
| 1,631,067,598,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 28,336
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Daniel Selsam, Leonardo de Moura
Type class instance synthesizer using tabled resolution.
-/
import Lean.Meta.Basic
import Lean.Meta.Instances
import Lean.Meta.LevelDefEq
import Lean.Meta.AbstractMVars
import Lean.Meta.WHNF
import Lean.Util.Profile
namespace Lean.Meta
register_builtin_option synthInstance.maxHeartbeats : Nat := {
defValue := 500
descr := "maximum amount of heartbeats per typeclass resolution problem. A heartbeat is number of (small) memory allocations (in thousands), 0 means no limit"
}
register_builtin_option synthInstance.maxSize : Nat := {
defValue := 128
descr := "maximum number of instances used to construct a solution in the type class instance synthesis procedure"
}
namespace SynthInstance
def getMaxHeartbeats (opts : Options) : Nat :=
synthInstance.maxHeartbeats.get opts * 1000
open Std (HashMap)
builtin_initialize inferTCGoalsRLAttr : TagAttribute ←
registerTagAttribute `inferTCGoalsRL "instruct type class resolution procedure to solve goals from right to left for this instance"
def hasInferTCGoalsRLAttribute (env : Environment) (constName : Name) : Bool :=
inferTCGoalsRLAttr.hasTag env constName
structure GeneratorNode where
mvar : Expr
key : Expr
mctx : MetavarContext
instances : Array Expr
currInstanceIdx : Nat
deriving Inhabited
structure ConsumerNode where
mvar : Expr
key : Expr
mctx : MetavarContext
subgoals : List Expr
size : Nat -- instance size so far
deriving Inhabited
inductive Waiter where
| consumerNode : ConsumerNode → Waiter
| root : Waiter
def Waiter.isRoot : Waiter → Bool
| Waiter.consumerNode _ => false
| Waiter.root => true
/-
In tabled resolution, we creating a mapping from goals (e.g., `Coe Nat ?x`) to
answers and waiters. Waiters are consumer nodes that are waiting for answers for a
particular node.
We implement this mapping using a `HashMap` where the keys are
normalized expressions. That is, we replace assignable metavariables
with auxiliary free variables of the form `_tc.<idx>`. We do
not declare these free variables in any local context, and we should
view them as "normalized names" for metavariables. For example, the
term `f ?m ?m ?n` is normalized as
`f _tc.0 _tc.0 _tc.1`.
This approach is structural, and we may visit the same goal more
than once if the different occurrences are just definitionally
equal, but not structurally equal.
Remark: a metavariable is assignable only if its depth is equal to
the metavar context depth.
-/
namespace MkTableKey
structure State where
nextIdx : Nat := 0
lmap : HashMap MVarId Level := {}
emap : HashMap MVarId Expr := {}
abbrev M := ReaderT MetavarContext (StateM State)
partial def normLevel (u : Level) : M Level := do
if !u.hasMVar then
pure u
else match u with
| Level.succ v _ => return u.updateSucc! (← normLevel v)
| Level.max v w _ => return u.updateMax! (← normLevel v) (← normLevel w)
| Level.imax v w _ => return u.updateIMax! (← normLevel v) (← normLevel w)
| Level.mvar mvarId _ =>
let mctx ← read
if !mctx.isLevelAssignable mvarId then
pure u
else
let s ← get
match s.lmap.find? mvarId with
| some u' => pure u'
| none =>
let u' := mkLevelParam $ Name.mkNum `_tc s.nextIdx
modify fun s => { s with nextIdx := s.nextIdx + 1, lmap := s.lmap.insert mvarId u' }
pure u'
| u => pure u
partial def normExpr (e : Expr) : M Expr := do
if !e.hasMVar then
pure e
else match e with
| Expr.const _ us _ => return e.updateConst! (← us.mapM normLevel)
| Expr.sort u _ => return e.updateSort! (← normLevel u)
| Expr.app f a _ => return e.updateApp! (← normExpr f) (← normExpr a)
| Expr.letE _ t v b _ => return e.updateLet! (← normExpr t) (← normExpr v) (← normExpr b)
| Expr.forallE _ d b _ => return e.updateForallE! (← normExpr d) (← normExpr b)
| Expr.lam _ d b _ => return e.updateLambdaE! (← normExpr d) (← normExpr b)
| Expr.mdata _ b _ => return e.updateMData! (← normExpr b)
| Expr.proj _ _ b _ => return e.updateProj! (← normExpr b)
| Expr.mvar mvarId _ =>
let mctx ← read
if !mctx.isExprAssignable mvarId then
pure e
else
let s ← get
match s.emap.find? mvarId with
| some e' => pure e'
| none => do
let e' := mkFVar $ Name.mkNum `_tc s.nextIdx
modify fun s => { s with nextIdx := s.nextIdx + 1, emap := s.emap.insert mvarId e' }
pure e'
| _ => pure e
end MkTableKey
/- Remark: `mkTableKey` assumes `e` does not contain assigned metavariables. -/
def mkTableKey (mctx : MetavarContext) (e : Expr) : Expr :=
MkTableKey.normExpr e mctx |>.run' {}
structure Answer where
result : AbstractMVarsResult
resultType : Expr
size : Nat
instance : Inhabited Answer where
default := { result := arbitrary, resultType := arbitrary, size := 0 }
structure TableEntry where
waiters : Array Waiter
answers : Array Answer := #[]
structure Context where
maxResultSize : Nat
maxHeartbeats : Nat
/-
Remark: the SynthInstance.State is not really an extension of `Meta.State`.
The field `postponed` is not needed, and the field `mctx` is misleading since
`synthInstance` methods operate over different `MetavarContext`s simultaneously.
That being said, we still use `extends` because it makes it simpler to move from
`M` to `MetaM`.
-/
structure State where
result? : Option AbstractMVarsResult := none
generatorStack : Array GeneratorNode := #[]
resumeStack : Array (ConsumerNode × Answer) := #[]
tableEntries : HashMap Expr TableEntry := {}
abbrev SynthM := ReaderT Context $ StateRefT State MetaM
def checkMaxHeartbeats : SynthM Unit := do
Core.checkMaxHeartbeatsCore "typeclass" `synthInstance.maxHeartbeats (← read).maxHeartbeats
@[inline] def mapMetaM (f : forall {α}, MetaM α → MetaM α) {α} : SynthM α → SynthM α :=
monadMap @f
instance : Inhabited (SynthM α) where
default := fun _ _ => arbitrary
/-- Return globals and locals instances that may unify with `type` -/
def getInstances (type : Expr) : MetaM (Array Expr) := do
-- We must retrieve `localInstances` before we use `forallTelescopeReducing` because it will update the set of local instances
let localInstances ← getLocalInstances
forallTelescopeReducing type fun _ type => do
let className? ← isClass? type
match className? with
| none => throwError "type class instance expected{indentExpr type}"
| some className =>
let globalInstances ← getGlobalInstancesIndex
let result ← globalInstances.getUnify type
-- Using insertion sort because it is stable and the array `result` should be mostly sorted.
-- Most instances have default priority.
let result := result.insertionSort fun e₁ e₂ => e₁.priority < e₂.priority
let erasedInstances ← getErasedInstances
let result ← result.filterMapM fun e => match e.val with
| Expr.const constName us _ =>
if erasedInstances.contains constName then
return none
else
return some <| e.val.updateConst! (← us.mapM (fun _ => mkFreshLevelMVar))
| _ => panic! "global instance is not a constant"
trace[Meta.synthInstance.globalInstances] "{type}, {result}"
let result := localInstances.foldl (init := result) fun (result : Array Expr) linst =>
if linst.className == className then result.push linst.fvar else result
pure result
def mkGeneratorNode? (key mvar : Expr) : MetaM (Option GeneratorNode) := do
let mvarType ← inferType mvar
let mvarType ← instantiateMVars mvarType
let instances ← getInstances mvarType
if instances.isEmpty then
pure none
else
let mctx ← getMCtx
pure $ some {
mvar := mvar,
key := key,
mctx := mctx,
instances := instances,
currInstanceIdx := instances.size
}
/-- Create a new generator node for `mvar` and add `waiter` as its waiter.
`key` must be `mkTableKey mctx mvarType`. -/
def newSubgoal (mctx : MetavarContext) (key : Expr) (mvar : Expr) (waiter : Waiter) : SynthM Unit :=
withMCtx mctx do
trace[Meta.synthInstance.newSubgoal] key
match (← mkGeneratorNode? key mvar) with
| none => pure ()
| some node =>
let entry : TableEntry := { waiters := #[waiter] }
modify fun s =>
{ s with
generatorStack := s.generatorStack.push node,
tableEntries := s.tableEntries.insert key entry }
def findEntry? (key : Expr) : SynthM (Option TableEntry) := do
return (← get).tableEntries.find? key
def getEntry (key : Expr) : SynthM TableEntry := do
match (← findEntry? key) with
| none => panic! "invalid key at synthInstance"
| some entry => pure entry
/--
Create a `key` for the goal associated with the given metavariable.
That is, we create a key for the type of the metavariable.
We must instantiate assigned metavariables before we invoke `mkTableKey`. -/
def mkTableKeyFor (mctx : MetavarContext) (mvar : Expr) : SynthM Expr :=
withMCtx mctx do
let mvarType ← inferType mvar
let mvarType ← instantiateMVars mvarType
return mkTableKey mctx mvarType
/- See `getSubgoals` and `getSubgoalsAux`
We use the parameter `j` to reduce the number of `instantiate*` invocations.
It is the same approach we use at `forallTelescope` and `lambdaTelescope`.
Given `getSubgoalsAux args j subgoals instVal type`,
we have that `type.instantiateRevRange j args.size args` does not have loose bound variables. -/
structure SubgoalsResult where
subgoals : List Expr
instVal : Expr
instTypeBody : Expr
private partial def getSubgoalsAux (lctx : LocalContext) (localInsts : LocalInstances) (xs : Array Expr)
: Array Expr → Nat → List Expr → Expr → Expr → MetaM SubgoalsResult
| args, j, subgoals, instVal, Expr.forallE n d b c => do
let d := d.instantiateRevRange j args.size args
let mvarType ← mkForallFVars xs d
let mvar ← mkFreshExprMVarAt lctx localInsts mvarType
let arg := mkAppN mvar xs
let instVal := mkApp instVal arg
let subgoals := if c.binderInfo.isInstImplicit then mvar::subgoals else subgoals
let args := args.push (mkAppN mvar xs)
getSubgoalsAux lctx localInsts xs args j subgoals instVal b
| args, j, subgoals, instVal, type => do
let type := type.instantiateRevRange j args.size args
let type ← whnf type
if type.isForall then
getSubgoalsAux lctx localInsts xs args args.size subgoals instVal type
else
pure ⟨subgoals, instVal, type⟩
/--
`getSubgoals lctx localInsts xs inst` creates the subgoals for the instance `inst`.
The subgoals are in the context of the free variables `xs`, and
`(lctx, localInsts)` is the local context and instances before we added the free variables to it.
This extra complication is required because
1- We want all metavariables created by `synthInstance` to share the same local context.
2- We want to ensure that applications such as `mvar xs` are higher order patterns.
The method `getGoals` create a new metavariable for each parameter of `inst`.
For example, suppose the type of `inst` is `forall (x_1 : A_1) ... (x_n : A_n), B x_1 ... x_n`.
Then, we create the metavariables `?m_i : forall xs, A_i`, and return the subset of these
metavariables that are instance implicit arguments, and the expressions:
- `inst (?m_1 xs) ... (?m_n xs)` (aka `instVal`)
- `B (?m_1 xs) ... (?m_n xs)` -/
def getSubgoals (lctx : LocalContext) (localInsts : LocalInstances) (xs : Array Expr) (inst : Expr) : MetaM SubgoalsResult := do
let instType ← inferType inst
let result ← getSubgoalsAux lctx localInsts xs #[] 0 [] inst instType
match inst.getAppFn with
| Expr.const constName _ _ =>
let env ← getEnv
if hasInferTCGoalsRLAttribute env constName then
pure result
else
pure { result with subgoals := result.subgoals.reverse }
| _ => pure result
def tryResolveCore (mvar : Expr) (inst : Expr) : MetaM (Option (MetavarContext × List Expr)) := do
let mvar ← instantiateMVars mvar
if !(← hasAssignableMVar mvar) then
/- The metavariable `mvar` may have been assinged when solving typing constraints.
This may happen when a local instance type depends on other local instances.
For example, in Mathlib, we have
```
@Submodule.setLike : {R : Type u_1} → {M : Type u_2} →
[_inst_1 : Semiring R] →
[_inst_2 : AddCommMonoid M] →
[_inst_3 : @ModuleS R M _inst_1 _inst_2] →
SetLike (@Submodule R M _inst_1 _inst_2 _inst_3) M
```
TODO: discuss what is the correct behavior here. There are other possibilities.
1) We could try to synthesize the instances `_inst_1` and `_inst_2` and check
whether it is defeq to the one inferred by typing constraints. That is, we
remove this `if`-statement. We discarded this one because some Mathlib theorems
failed to be elaborated using it.
2) Generate an error/warning message when instances such as `Submodule.setLike` are declared,
and instruct user to use `{}` binder annotation for `_inst_1` `_inst_2`.
-/
return some ((← getMCtx), [])
let mvarType ← inferType mvar
let lctx ← getLCtx
let localInsts ← getLocalInstances
forallTelescopeReducing mvarType fun xs mvarTypeBody => do
let ⟨subgoals, instVal, instTypeBody⟩ ← getSubgoals lctx localInsts xs inst
trace[Meta.synthInstance.tryResolve] "{mvarTypeBody} =?= {instTypeBody}"
if (← isDefEq mvarTypeBody instTypeBody) then
let instVal ← mkLambdaFVars xs instVal
if (← isDefEq mvar instVal) then
trace[Meta.synthInstance.tryResolve] "success"
pure (some ((← getMCtx), subgoals))
else
trace[Meta.synthInstance.tryResolve] "failure assigning"
pure none
else
trace[Meta.synthInstance.tryResolve] "failure"
pure none
/--
Try to synthesize metavariable `mvar` using the instance `inst`.
Remark: `mctx` contains `mvar`.
If it succeeds, the result is a new updated metavariable context and a new list of subgoals.
A subgoal is created for each instance implicit parameter of `inst`. -/
def tryResolve (mctx : MetavarContext) (mvar : Expr) (inst : Expr) : SynthM (Option (MetavarContext × List Expr)) :=
traceCtx `Meta.synthInstance.tryResolve <| withMCtx mctx <| tryResolveCore mvar inst
/--
Assign a precomputed answer to `mvar`.
If it succeeds, the result is a new updated metavariable context and a new list of subgoals. -/
def tryAnswer (mctx : MetavarContext) (mvar : Expr) (answer : Answer) : SynthM (Option MetavarContext) :=
withMCtx mctx do
let (_, _, val) ← openAbstractMVarsResult answer.result
if (← isDefEq mvar val) then
pure (some (← getMCtx))
else
pure none
/-- Move waiters that are waiting for the given answer to the resume stack. -/
def wakeUp (answer : Answer) : Waiter → SynthM Unit
| Waiter.root => do
/- Recall that we now use `ignoreLevelMVarDepth := true`. Thus, we should allow solutions
containing universe metavariables, and not check `answer.result.paramNames.isEmpty`.
We use `openAbstractMVarsResult` to construct the universe metavariables
at the correct depth. -/
if answer.result.numMVars == 0 then
modify fun s => { s with result? := answer.result }
else
let (_, _, answerExpr) ← openAbstractMVarsResult answer.result
trace[Meta.synthInstance] "skip answer containing metavariables {answerExpr}"
pure ()
| Waiter.consumerNode cNode =>
modify fun s => { s with resumeStack := s.resumeStack.push (cNode, answer) }
def isNewAnswer (oldAnswers : Array Answer) (answer : Answer) : Bool :=
oldAnswers.all fun oldAnswer => do
-- Remark: isDefEq here is too expensive. TODO: if `==` is too imprecise, add some light normalization to `resultType` at `addAnswer`
-- iseq ← isDefEq oldAnswer.resultType answer.resultType; pure (!iseq)
oldAnswer.resultType != answer.resultType
private def mkAnswer (cNode : ConsumerNode) : MetaM Answer :=
withMCtx cNode.mctx do
traceM `Meta.synthInstance.newAnswer do m!"size: {cNode.size}, {← inferType cNode.mvar}"
let val ← instantiateMVars cNode.mvar
trace[Meta.synthInstance.newAnswer] "val: {val}"
let result ← abstractMVars val -- assignable metavariables become parameters
let resultType ← inferType result.expr
pure { result := result, resultType := resultType, size := cNode.size + 1 }
/--
Create a new answer after `cNode` resolved all subgoals.
That is, `cNode.subgoals == []`.
And then, store it in the tabled entries map, and wakeup waiters. -/
def addAnswer (cNode : ConsumerNode) : SynthM Unit := do
if cNode.size ≥ (← read).maxResultSize then
traceM `Meta.synthInstance.discarded do m!"size: {cNode.size} ≥ {(← read).maxResultSize}, {← inferType cNode.mvar}"
return ()
else
let answer ← mkAnswer cNode
-- Remark: `answer` does not contain assignable or assigned metavariables.
let key := cNode.key
let entry ← getEntry key
if isNewAnswer entry.answers answer then
let newEntry := { entry with answers := entry.answers.push answer }
modify fun s => { s with tableEntries := s.tableEntries.insert key newEntry }
entry.waiters.forM (wakeUp answer)
/-- Process the next subgoal in the given consumer node. -/
def consume (cNode : ConsumerNode) : SynthM Unit :=
match cNode.subgoals with
| [] => addAnswer cNode
| mvar::_ => do
let waiter := Waiter.consumerNode cNode
let key ← mkTableKeyFor cNode.mctx mvar
let entry? ← findEntry? key
match entry? with
| none => newSubgoal cNode.mctx key mvar waiter
| some entry => modify fun s =>
{ s with
resumeStack := entry.answers.foldl (fun s answer => s.push (cNode, answer)) s.resumeStack,
tableEntries := s.tableEntries.insert key { entry with waiters := entry.waiters.push waiter } }
def getTop : SynthM GeneratorNode := do
pure (← get).generatorStack.back
@[inline] def modifyTop (f : GeneratorNode → GeneratorNode) : SynthM Unit :=
modify fun s => { s with generatorStack := s.generatorStack.modify (s.generatorStack.size - 1) f }
/-- Try the next instance in the node on the top of the generator stack. -/
def generate : SynthM Unit := do
let gNode ← getTop
if gNode.currInstanceIdx == 0 then
modify fun s => { s with generatorStack := s.generatorStack.pop }
else do
let key := gNode.key
let idx := gNode.currInstanceIdx - 1
let inst := gNode.instances.get! idx
let mctx := gNode.mctx
let mvar := gNode.mvar
trace[Meta.synthInstance.generate] "instance {inst}"
modifyTop fun gNode => { gNode with currInstanceIdx := idx }
match (← tryResolve mctx mvar inst) with
| none => pure ()
| some (mctx, subgoals) => consume { key := key, mvar := mvar, subgoals := subgoals, mctx := mctx, size := 0 }
def getNextToResume : SynthM (ConsumerNode × Answer) := do
let s ← get
let r := s.resumeStack.back
modify fun s => { s with resumeStack := s.resumeStack.pop }
pure r
/--
Given `(cNode, answer)` on the top of the resume stack, continue execution by using `answer` to solve the
next subgoal. -/
def resume : SynthM Unit := do
let (cNode, answer) ← getNextToResume
match cNode.subgoals with
| [] => panic! "resume found no remaining subgoals"
| mvar::rest =>
match (← tryAnswer cNode.mctx mvar answer) with
| none => pure ()
| some mctx =>
withMCtx mctx <| traceM `Meta.synthInstance.resume do
let goal ← inferType cNode.mvar
let subgoal ← inferType mvar
pure m!"size: {cNode.size + answer.size}, {goal} <== {subgoal}"
consume { key := cNode.key, mvar := cNode.mvar, subgoals := rest, mctx := mctx, size := cNode.size + answer.size }
def step : SynthM Bool := do
checkMaxHeartbeats
let s ← get
if !s.resumeStack.isEmpty then
resume
pure true
else if !s.generatorStack.isEmpty then
generate
pure true
else
pure false
def getResult : SynthM (Option AbstractMVarsResult) := do
pure (← get).result?
partial def synth : SynthM (Option AbstractMVarsResult) := do
if (← step) then
match (← getResult) with
| none => synth
| some result => pure result
else
trace[Meta.synthInstance] "failed"
pure none
def main (type : Expr) (maxResultSize : Nat) : MetaM (Option AbstractMVarsResult) :=
withCurrHeartbeats <| traceCtx `Meta.synthInstance do
trace[Meta.synthInstance] "main goal {type}"
let mvar ← mkFreshExprMVar type
let mctx ← getMCtx
let key := mkTableKey mctx type
let action : SynthM (Option AbstractMVarsResult) := do
newSubgoal mctx key mvar Waiter.root
synth
action.run { maxResultSize := maxResultSize, maxHeartbeats := getMaxHeartbeats (← getOptions) } |>.run' {}
end SynthInstance
/-
Type class parameters can be annotated with `outParam` annotations.
Given `C a_1 ... a_n`, we replace `a_i` with a fresh metavariable `?m_i` IF
`a_i` is an `outParam`.
The result is type correct because we reject type class declarations IF
it contains a regular parameter X that depends on an `out` parameter Y.
Then, we execute type class resolution as usual.
If it succeeds, and metavariables ?m_i have been assigned, we try to unify
the original type `C a_1 ... a_n` witht the normalized one.
-/
private def preprocess (type : Expr) : MetaM Expr :=
forallTelescopeReducing type fun xs type => do
let type ← whnf type
mkForallFVars xs type
private def preprocessLevels (us : List Level) : MetaM (List Level × Bool) := do
let mut r := #[]
let mut modified := false
for u in us do
let u ← instantiateLevelMVars u
if u.hasMVar then
r := r.push (← mkFreshLevelMVar)
modified := true
else
r := r.push u
return (r.toList, modified)
private partial def preprocessArgs (type : Expr) (i : Nat) (args : Array Expr) : MetaM (Array Expr) := do
if h : i < args.size then
let type ← whnf type
match type with
| Expr.forallE _ d b _ => do
let arg := args.get ⟨i, h⟩
let arg ← if isOutParam d then mkFreshExprMVar d else pure arg
let args := args.set ⟨i, h⟩ arg
preprocessArgs (b.instantiate1 arg) (i+1) args
| _ =>
throwError "type class resolution failed, insufficient number of arguments" -- TODO improve error message
else
return args
private def preprocessOutParam (type : Expr) : MetaM Expr :=
forallTelescope type fun xs typeBody => do
match typeBody.getAppFn with
| c@(Expr.const constName us _) =>
let env ← getEnv
if !hasOutParams env constName then
return type
else
let args := typeBody.getAppArgs
let cType ← inferType c
let args ← preprocessArgs cType 0 args
mkForallFVars xs (mkAppN c args)
| _ =>
return type
/-
Remark: when `maxResultSize? == none`, the configuration option `synthInstance.maxResultSize` is used.
Remark: we use a different option for controlling the maximum result size for coercions.
-/
def synthInstance? (type : Expr) (maxResultSize? : Option Nat := none) : MetaM (Option Expr) := do profileitM Exception "typeclass inference" (← getOptions) do
let opts ← getOptions
let maxResultSize := maxResultSize?.getD (synthInstance.maxSize.get opts)
let inputConfig ← getConfig
withConfig (fun config => { config with isDefEqStuckEx := true, transparency := TransparencyMode.instances,
foApprox := true, ctxApprox := true, constApprox := false,
ignoreLevelMVarDepth := true }) do
let type ← instantiateMVars type
let type ← preprocess type
let s ← get
match s.cache.synthInstance.find? type with
| some result => pure result
| none =>
let result? ← withNewMCtxDepth do
let normType ← preprocessOutParam type
trace[Meta.synthInstance] "preprocess: {type} ==> {normType}"
SynthInstance.main normType maxResultSize
let resultHasUnivMVars := if let some result := result? then !result.paramNames.isEmpty else false
let result? ← match result? with
| none => pure none
| some result => do
let (_, _, result) ← openAbstractMVarsResult result
trace[Meta.synthInstance] "result {result}"
let resultType ← inferType result
if (← withConfig (fun _ => inputConfig) <| isDefEq type resultType) then
let result ← instantiateMVars result
pure (some result)
else
trace[Meta.synthInstance] "result type{indentExpr resultType}\nis not definitionally equal to{indentExpr type}"
pure none
if type.hasMVar || resultHasUnivMVars then
pure result?
else do
modify fun s => { s with cache := { s.cache with synthInstance := s.cache.synthInstance.insert type result? } }
pure result?
/--
Return `LOption.some r` if succeeded, `LOption.none` if it failed, and `LOption.undef` if
instance cannot be synthesized right now because `type` contains metavariables. -/
def trySynthInstance (type : Expr) (maxResultSize? : Option Nat := none) : MetaM (LOption Expr) := do
catchInternalId isDefEqStuckExceptionId
(toLOptionM <| synthInstance? type maxResultSize?)
(fun _ => pure LOption.undef)
def synthInstance (type : Expr) (maxResultSize? : Option Nat := none) : MetaM Expr :=
catchInternalId isDefEqStuckExceptionId
(do
let result? ← synthInstance? type maxResultSize?
match result? with
| some result => pure result
| none => throwError "failed to synthesize{indentExpr type}")
(fun _ => throwError "failed to synthesize{indentExpr type}")
@[export lean_synth_pending]
private def synthPendingImp (mvarId : MVarId) : MetaM Bool := withIncRecDepth <| withMVarContext mvarId do
let mvarDecl ← getMVarDecl mvarId
match mvarDecl.kind with
| MetavarKind.syntheticOpaque =>
return false
| _ =>
/- Check whether the type of the given metavariable is a class or not. If yes, then try to synthesize
it using type class resolution. We only do it for `synthetic` and `natural` metavariables. -/
match (← isClass? mvarDecl.type) with
| none =>
return false
| some _ =>
/- TODO: use a configuration option instead of the hard-coded limit `1`. -/
if (← read).synthPendingDepth > 1 then
trace[Meta.synthPending] "too many nested synthPending invocations"
return false
else
withReader (fun ctx => { ctx with synthPendingDepth := ctx.synthPendingDepth + 1 }) do
trace[Meta.synthPending] "synthPending {mkMVar mvarId}"
let val? ← catchInternalId isDefEqStuckExceptionId (synthInstance? mvarDecl.type (maxResultSize? := none)) (fun _ => pure none)
match val? with
| none =>
return false
| some val =>
if (← isExprMVarAssigned mvarId) then
return false
else
assignExprMVar mvarId val
return true
builtin_initialize
registerTraceClass `Meta.synthInstance
registerTraceClass `Meta.synthInstance.globalInstances
registerTraceClass `Meta.synthInstance.newSubgoal
registerTraceClass `Meta.synthInstance.tryResolve
registerTraceClass `Meta.synthInstance.resume
registerTraceClass `Meta.synthInstance.generate
registerTraceClass `Meta.synthPending
end Lean.Meta
|
26b33c6ddc6761c01a363c9288a51c8dcc35250b
|
4efff1f47634ff19e2f786deadd394270a59ecd2
|
/src/data/finset/sort.lean
|
5bfebfda436ee4ef071a24f11abc91a7b5b1742d
|
[
"Apache-2.0"
] |
permissive
|
agjftucker/mathlib
|
d634cd0d5256b6325e3c55bb7fb2403548371707
|
87fe50de17b00af533f72a102d0adefe4a2285e8
|
refs/heads/master
| 1,625,378,131,941
| 1,599,166,526,000
| 1,599,166,526,000
| 160,748,509
| 0
| 0
|
Apache-2.0
| 1,544,141,789,000
| 1,544,141,789,000
| null |
UTF-8
|
Lean
| false
| false
| 10,553
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
-/
import data.finset.lattice
import data.multiset.sort
/-!
# Construct a sorted list from a finset.
-/
namespace finset
open multiset nat
variables {α β : Type*}
/-! ### sort -/
section sort
variables (r : α → α → Prop) [decidable_rel r]
[is_trans α r] [is_antisymm α r] [is_total α r]
/-- `sort s` constructs a sorted list from the unordered set `s`.
(Uses merge sort algorithm.) -/
def sort (s : finset α) : list α := sort r s.1
@[simp] theorem sort_sorted (s : finset α) : list.sorted r (sort r s) :=
sort_sorted _ _
@[simp] theorem sort_eq (s : finset α) : ↑(sort r s) = s.1 :=
sort_eq _ _
@[simp] theorem sort_nodup (s : finset α) : (sort r s).nodup :=
(by rw sort_eq; exact s.2 : @multiset.nodup α (sort r s))
@[simp] theorem sort_to_finset [decidable_eq α] (s : finset α) : (sort r s).to_finset = s :=
list.to_finset_eq (sort_nodup r s) ▸ eq_of_veq (sort_eq r s)
@[simp] theorem mem_sort {s : finset α} {a : α} : a ∈ sort r s ↔ a ∈ s :=
multiset.mem_sort _
@[simp] theorem length_sort {s : finset α} : (sort r s).length = s.card :=
multiset.length_sort _
end sort
section sort_linear_order
variables [decidable_linear_order α]
theorem sort_sorted_lt (s : finset α) :
list.sorted (<) (sort (≤) s) :=
(sort_sorted _ _).imp₂ (@lt_of_le_of_ne _ _) (sort_nodup _ _)
lemma sorted_zero_eq_inf' (s : finset α) (h : 0 < (s.sort (≤)).length) :
(s.sort (≤)).nth_le 0 h = s.1.inf' (by rwa [length_sort, card_pos] at h) :=
begin
let l := s.sort (≤),
apply le_antisymm,
{ have : s.1.inf' _ ∈ l := (finset.mem_sort (≤)).mpr (s.1.inf'_mem _),
obtain ⟨i, i_lt, hi⟩ : ∃ i (hi : i < l.length), l.nth_le i hi = s.1.inf' _ :=
list.mem_iff_nth_le.1 this,
rw ← hi,
exact list.nth_le_of_sorted_of_le (s.sort_sorted (≤)) (nat.zero_le i) },
{ have : l.nth_le 0 h ∈ s := (finset.mem_sort (≤)).1 (list.nth_le_mem l 0 h),
exact s.1.inf'_le _ this }
end
lemma inf'_eq_sorted_zero (s : finset α) (H : s.nonempty) :
s.1.inf' H = (s.sort (≤)).nth_le 0 (by rwa [length_sort, card_pos]) :=
by rw sorted_zero_eq_inf'
lemma sorted_last_eq_sup' (s : finset α) (h : (s.sort (≤)).length - 1 < (s.sort (≤)).length) :
(s.sort (≤)).nth_le ((s.sort (≤)).length - 1) h =
s.1.sup' (by simpa [card_pos] using lt_of_le_of_lt (nat.zero_le _) h) :=
begin
let l := s.sort (≤),
apply le_antisymm,
{ have : l.nth_le ((s.sort (≤)).length - 1) h ∈ s :=
(finset.mem_sort (≤)).1 (list.nth_le_mem l _ h),
exact s.1.le_sup' _ this },
{ have : s.1.sup' _ ∈ l := (finset.mem_sort (≤)).mpr (s.1.sup'_mem _),
obtain ⟨i, i_lt, hi⟩ : ∃ i (hi : i < l.length), l.nth_le i hi = s.1.sup' _ :=
list.mem_iff_nth_le.1 this,
rw ← hi,
have : i ≤ l.length - 1 := nat.le_pred_of_lt i_lt,
exact list.nth_le_of_sorted_of_le (s.sort_sorted (≤)) (nat.le_pred_of_lt i_lt) },
end
lemma sup'_eq_sorted_last (s : finset α) (H : s.nonempty) :
s.1.sup' H = (s.sort (≤)).nth_le ((s.sort (≤)).length - 1)
(by simpa using sub_lt (card_pos.mpr H) zero_lt_one) :=
by rw sorted_last_eq_sup'
/-- Given a finset `s` of cardinal `k` in a linear order `α`, the map `mono_of_fin s h`
is the increasing bijection between `fin k` and `s` as an `α`-valued map. Here, `h` is a proof that
the cardinality of `s` is `k`. We use this instead of a map `fin s.card → α` to avoid
casting issues in further uses of this function. -/
def mono_of_fin (s : finset α) {k : ℕ} (h : s.card = k) (i : fin k) : α :=
have A : (i : ℕ) < (s.sort (≤)).length, by simpa [h] using i.2,
(s.sort (≤)).nth_le i A
lemma mono_of_fin_strict_mono (s : finset α) {k : ℕ} (h : s.card = k) :
strict_mono (s.mono_of_fin h) :=
begin
assume i j hij,
exact list.pairwise_iff_nth_le.1 s.sort_sorted_lt _ _ _ hij
end
lemma mono_of_fin_bij_on (s : finset α) {k : ℕ} (h : s.card = k) :
set.bij_on (s.mono_of_fin h) set.univ ↑s :=
begin
have A : ∀ j, j ∈ s ↔ j ∈ (s.sort (≤)) := λ j, by simp,
apply set.bij_on.mk,
{ assume i hi,
simp only [mono_of_fin, set.mem_preimage, mem_coe, list.nth_le, A],
exact list.nth_le_mem _ _ _ },
{ exact ((mono_of_fin_strict_mono s h).injective).inj_on _ },
{ assume x hx,
simp only [mem_coe, A] at hx,
obtain ⟨i, il, hi⟩ : ∃ (i : ℕ) (h : i < (s.sort (≤)).length), (s.sort (≤)).nth_le i h = x :=
list.nth_le_of_mem hx,
simp [h] at il,
exact ⟨⟨i, il⟩, set.mem_univ _, hi⟩ }
end
lemma mono_of_fin_injective (s : finset α) {k : ℕ} (h : s.card = k) :
function.injective (s.mono_of_fin h) :=
set.injective_iff_inj_on_univ.mpr (s.mono_of_fin_bij_on h).inj_on
/-- The bijection `mono_of_fin s h` sends `0` to the minimum of `s`. -/
lemma mono_of_fin_zero_eq_inf' {s : finset α} {k : ℕ} (h : s.card = k) (hz : 0 < k) :
mono_of_fin s h ⟨0, hz⟩ = s.1.inf' (card_pos.1 (h.symm ▸ hz)) :=
begin
apply le_antisymm,
{ have : inf' _ _ ∈ s := inf'_mem _ _,
rcases (mono_of_fin_bij_on s h).surj_on this with ⟨a, _, ha⟩,
rw ← ha,
apply (mono_of_fin_strict_mono s h).monotone,
exact zero_le a.val },
{ have : mono_of_fin s h ⟨0, hz⟩ ∈ s := (mono_of_fin_bij_on s h).maps_to (set.mem_univ _),
exact inf'_le _ _ this }
end
lemma inf'_eq_mono_of_fin_zero {s : finset α} (hs : s.nonempty) :
s.1.inf' hs = mono_of_fin s rfl ⟨0, card_pos.2 hs⟩ :=
by rw mono_of_fin_zero_eq_inf'
/-- The bijection `mono_of_fin s h` sends `k-1` to the maximum of `s`. -/
lemma mono_of_fin_last_eq_sup' {s : finset α} {k : ℕ} (h : s.card = k) (hz : 0 < k) :
mono_of_fin s h ⟨k-1, sub_lt hz zero_lt_one⟩ = s.1.sup' (card_pos.1 (h.symm ▸ hz)) :=
begin
apply le_antisymm,
{ have : mono_of_fin s h ⟨k-1, _⟩ ∈ s := (mono_of_fin_bij_on s h).maps_to (set.mem_univ _),
exact le_sup' _ _ this },
{ have : sup' _ _ ∈ s := sup'_mem _ _,
rcases (mono_of_fin_bij_on s h).surj_on this with ⟨a, _, ha⟩,
rw ← ha,
apply (mono_of_fin_strict_mono s h).monotone,
exact le_pred_of_lt a.2},
end
lemma sup'_eq_mono_of_fin_last {s : finset α} (hs : s.nonempty) :
s.1.sup' hs = mono_of_fin s rfl ⟨s.card - 1, sub_lt (card_pos.2 hs) zero_lt_one⟩ :=
by rw mono_of_fin_last_eq_sup'
/-- `mono_of_fin {a} h` sends any argument to `a`. -/
@[simp] lemma mono_of_fin_singleton_eq (a : α) (i : fin 1) {h} :
mono_of_fin {a} h i = a :=
begin
rw [subsingleton.elim i ⟨0, zero_lt_one⟩, mono_of_fin_zero_eq_inf' h zero_lt_one],
simp,
end
lemma eq_mono_of_fin_singleton (a : α) (i : fin 1) :
a = mono_of_fin {a} (card_singleton a) i :=
by rw mono_of_fin_singleton_eq
/-- The range of `mono_of_fin`. -/
@[simp] lemma range_mono_of_fin {s : finset α} {k : ℕ} (h : s.card = k) :
set.range (s.mono_of_fin h) = ↑s :=
begin
rw ←set.image_univ,
exact (mono_of_fin_bij_on s h).image_eq
end
/-- Any increasing bijection between `fin k` and a finset of cardinality `k` has to coincide with
the increasing bijection `mono_of_fin s h`. For a statement assuming only that `f` maps `univ` to
`s`, see `mono_of_fin_unique'`.-/
lemma mono_of_fin_unique {s : finset α} {k : ℕ} (h : s.card = k) {f : fin k → α}
(hbij : set.bij_on f set.univ ↑s) (hmono : strict_mono f) : f = s.mono_of_fin h :=
begin
ext ⟨i, hi⟩,
induction i using nat.strong_induction_on with i IH,
rcases lt_trichotomy (f ⟨i, hi⟩) (mono_of_fin s h ⟨i, hi⟩) with H|H|H,
{ have A : f ⟨i, hi⟩ ∈ ↑s := hbij.maps_to (set.mem_univ _),
rcases (mono_of_fin_bij_on s h).surj_on A with ⟨j, _, hj⟩,
rw ← hj at H,
have ji : j < ⟨i, hi⟩ := (mono_of_fin_strict_mono s h).lt_iff_lt.1 H,
have : f j = mono_of_fin s h j,
by { convert IH j ji (lt_trans ji hi), rw [fin.ext_iff, fin.coe_mk] },
rw ← this at hj,
exact (ne_of_lt (hmono ji) hj).elim },
{ exact H },
{ have A : mono_of_fin s h ⟨i, hi⟩ ∈ ↑s := (mono_of_fin_bij_on s h).maps_to (set.mem_univ _),
rcases hbij.surj_on A with ⟨j, _, hj⟩,
rw ← hj at H,
have ji : j < ⟨i, hi⟩ := hmono.lt_iff_lt.1 H,
have : f j = mono_of_fin s h j,
by { convert IH j ji (lt_trans ji hi), rw [fin.ext_iff, fin.coe_mk] },
rw this at hj,
exact (ne_of_lt (mono_of_fin_strict_mono s h ji) hj).elim }
end
/-- Any increasing map between `fin k` and a finset of cardinality `k` has to coincide with
the increasing bijection `mono_of_fin s h`. -/
lemma mono_of_fin_unique' {s : finset α} {k : ℕ} (h : s.card = k)
{f : fin k → α} (fmap : set.maps_to f set.univ ↑s) (hmono : strict_mono f) :
f = s.mono_of_fin h :=
begin
have finj : set.inj_on f set.univ := hmono.injective.inj_on _,
apply mono_of_fin_unique h (set.bij_on.mk fmap finj (λ y hy, _)) hmono,
simp only [set.image_univ, set.mem_range],
rcases surj_on_of_inj_on_of_card_le (λ i (hi : i ∈ finset.fin_range k), f i)
(λ i hi, fmap (set.mem_univ i)) (λ i j hi hj hij, finj (set.mem_univ i) (set.mem_univ j) hij)
(by simp [h]) y hy with ⟨x, _, hx⟩,
exact ⟨x, hx.symm⟩
end
/-- Two parametrizations `mono_of_fin` of the same set take the same value on `i` and `j` if and
only if `i = j`. Since they can be defined on a priori not defeq types `fin k` and `fin l` (although
necessarily `k = l`), the conclusion is rather written `(i : ℕ) = (j : ℕ)`. -/
@[simp] lemma mono_of_fin_eq_mono_of_fin_iff
{k l : ℕ} {s : finset α} {i : fin k} {j : fin l} {h : s.card = k} {h' : s.card = l} :
s.mono_of_fin h i = s.mono_of_fin h' j ↔ (i : ℕ) = (j : ℕ) :=
begin
have A : k = l, by rw [← h', ← h],
have : s.mono_of_fin h = (s.mono_of_fin h') ∘ (λ j : (fin k), ⟨j, A ▸ j.is_lt⟩) := rfl,
rw [this, function.comp_app, (s.mono_of_fin_injective h').eq_iff, fin.ext_iff, fin.coe_mk]
end
/-- Given a finset `s` of cardinal `k` in a linear order `α`, the equiv `mono_equiv_of_fin s h`
is the increasing bijection between `fin k` and `s` as an `s`-valued map. Here, `h` is a proof that
the cardinality of `s` is `k`. We use this instead of a map `fin s.card → α` to avoid
casting issues in further uses of this function. -/
noncomputable def mono_equiv_of_fin (s : finset α) {k : ℕ} (h : s.card = k) :
fin k ≃ {x // x ∈ s} :=
(s.mono_of_fin_bij_on h).equiv _
end sort_linear_order
instance [has_repr α] : has_repr (finset α) := ⟨λ s, repr s.1⟩
end finset
|
137348eab63a795f7111de061b2a14eee1655cb4
|
36938939954e91f23dec66a02728db08a7acfcf9
|
/lean4/deps/galois_stdlib/src/Galois/Data/RBMap.lean
|
a3c926d9345d80e19b9f2ad260963ddc2f838f58
|
[] |
no_license
|
pnwamk/reopt-vcg
|
f8b56dd0279392a5e1c6aee721be8138e6b558d3
|
c9f9f185fbefc25c36c4b506bbc85fd1a03c3b6d
|
refs/heads/master
| 1,631,145,017,772
| 1,593,549,019,000
| 1,593,549,143,000
| 254,191,418
| 0
| 0
| null | 1,586,377,077,000
| 1,586,377,077,000
| null |
UTF-8
|
Lean
| false
| false
| 1,450
|
lean
|
namespace RBMap
universes u v w
variables {α : Type u} {β : Type v}
/-- Builds an RBMap from a list with String keys. --/
def ltMap [HasLess α] [∀ (a b : α), Decidable (a < b)] (entries: List (α × β))
: RBMap α β (λ (x y:α)=> x<y) :=
RBMap.fromList entries (λ (x y:α)=> x<y)
section
variable {lt : α → α → Bool}
@[specialize] def keys : RBMap α β lt → List α
| ⟨t, _⟩ => t.revFold (fun ps k _v => k::ps) []
@[specialize] def values : RBMap α β lt → List β
| ⟨t, _⟩ => t.revFold (fun ps _k v => v::ps) []
@[inline] def map {σ : Type w} (f : α → β → σ) : RBMap α β lt → RBMap α σ lt
| ⟨t, _⟩ => t.fold (λ acc k v => acc.insert k (f k v)) RBMap.empty
/-- Appends two RBMaps, with the second argument's entries overwriting
any entries in the first argument where applicable. --/
@[specialize] def append (lhs rhs : RBMap α β lt) : RBMap α β lt :=
if lhs.isEmpty then rhs
else rhs.fold (λ m k v => m.insert k v) lhs
instance : HasAppend (RBMap α β lt) := ⟨RBMap.append⟩
end
end RBMap
namespace RBMap
-- It's not happy about the universes on this one if we try and add the level parameters (and even use max/ULift/etc) =\
@[inline] def mapM {α β σ : Type} {lt : α → α → Bool} {m : Type → Type} [Monad m] (f : α → β → m σ) : RBMap α β lt → m (RBMap α σ lt)
| ⟨t, _⟩ => t.mfold (λ acc k v => acc.insert k <$> (f k v)) RBMap.empty
end RBMap
|
15b830de022389d76baa7eefe313753d700268d2
|
a44280b79dc85615010e3fbda46abf82c6730fa3
|
/library/init/lean/format.lean
|
020eb04d57db8f123d97c1b12b78882141a2509a
|
[
"Apache-2.0"
] |
permissive
|
kodyvajjha/lean4
|
8e1c613248b531d47367ca6e8d97ee1046645aa1
|
c8a045d69fac152fd5e3a577f718615cecb9c53d
|
refs/heads/master
| 1,589,684,450,102
| 1,555,200,447,000
| 1,556,139,945,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 6,320
|
lean
|
/-
Copyright (c) 2018 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
prelude
import init.lean.options
universes u v
namespace Lean
inductive Format
| nil : Format
| line : Format
| text : String → Format
| nest : Nat → Format → Format
| compose : Bool → Format → Format → Format
| choice : Format → Format → Format
namespace Format
instance : HasAppend Format := ⟨compose false⟩
instance : HasCoe String Format := ⟨text⟩
instance : Inhabited Format := ⟨nil⟩
def join (xs : List Format) : Format :=
xs.foldl (++) ""
def flatten : Format → Format
| nil := nil
| line := text " "
| f@(text _) := f
| (nest _ f) := flatten f
| (choice f _) := flatten f
| f@(compose true _ _) := f
| f@(compose false f₁ f₂) := compose true (flatten f₁) (flatten f₂)
def group : Format → Format
| nil := nil
| f@(text _) := f
| f@(compose true _ _) := f
| f := choice (flatten f) f
structure SpaceResult :=
(found := false)
(exceeded := false)
(space := 0)
@[inline] private def merge (w : Nat) (r₁ : SpaceResult) (r₂ : Thunk SpaceResult) : SpaceResult :=
if r₁.exceeded || r₁.found then r₁
else let y := r₂.get in
if y.exceeded || y.found then y
else let newSpace := r₁.space + y.space in
{ space := newSpace, exceeded := newSpace > w }
def spaceUptoLine : Format → Nat → SpaceResult
| nil w := {}
| line w := { found := true }
| (text s) w := { space := s.length, exceeded := s.length > w }
| (compose _ f₁ f₂) w := merge w (spaceUptoLine f₁ w) (spaceUptoLine f₂ w)
| (nest _ f) w := spaceUptoLine f w
| (choice f₁ f₂) w := spaceUptoLine f₂ w
def spaceUptoLine' : List (Nat × Format) → Nat → SpaceResult
| [] w := {}
| (p::ps) w := merge w (spaceUptoLine p.2 w) (spaceUptoLine' ps w)
partial def be : Nat → Nat → String → List (Nat × Format) → String
| w k out [] := out
| w k out ((i, nil)::z) := be w k out z
| w k out ((i, (compose _ f₁ f₂))::z) := be w k out ((i, f₁)::(i, f₂)::z)
| w k out ((i, (nest n f))::z) := be w k out ((i+n, f)::z)
| w k out ((i, text s)::z) := be w (k + s.length) (out ++ s) z
| w k out ((i, line)::z) := be w i ((out ++ "\n").pushn ' ' i) z
| w k out ((i, choice f₁ f₂)::z) :=
let r := merge w (spaceUptoLine f₁ w) (spaceUptoLine' z w) in
if r.exceeded then be w k out ((i, f₂)::z) else be w k out ((i, f₁)::z)
@[inline] def bracket (l : String) (f : Format) (r : String) : Format :=
group (nest l.length $ l ++ f ++ r)
@[inline] def paren (f : Format) : Format :=
bracket "(" f ")"
@[inline] def sbracket (f : Format) : Format :=
bracket "[" f "]"
def defIndent := 4
def defUnicode := true
def defWidth := 120
def getWidth (o : Options) : Nat := o.get `format.width defWidth
def getIndent (o : Options) : Nat := o.get `format.indent defIndent
def getUnicode (o : Options) : Bool := o.get `format.unicode defUnicode
@[init] def indentOption : IO Unit :=
registerOption `format.indent { defValue := defIndent, group := "format", descr := "indentation" }
@[init] def unicodeOption : IO Unit :=
registerOption `format.unicode { defValue := defUnicode, group := "format", descr := "unicode characters" }
@[init] def widthOption : IO Unit :=
registerOption `format.width { defValue := defWidth, group := "format", descr := "line width" }
def pretty (f : Format) (o : Options := {}) : String :=
let w := getWidth o in
be w 0 "" [(0, f)]
end Format
open Lean.Format
class HasToFormat (α : Type u) :=
(toFormat : α → Format)
export Lean.HasToFormat (toFormat)
def toFmt {α : Type u} [HasToFormat α] : α → Format :=
toFormat
instance toStringToFormat {α : Type u} [HasToString α] : HasToFormat α :=
⟨text ∘ toString⟩
-- note: must take precendence over the above instance to avoid premature formatting
instance formatHasToFormat : HasToFormat Format :=
⟨id⟩
instance stringHasToFormat : HasToFormat String := ⟨Format.text⟩
def Format.joinSep {α : Type u} [HasToFormat α] : List α → Format → Format
| [] sep := nil
| [a] sep := toFmt a
| (a::as) sep := toFmt a ++ sep ++ Format.joinSep as sep
def Format.prefixJoin {α : Type u} [HasToFormat α] (pre : Format) : List α → Format
| [] := nil
| (a::as) := pre ++ toFmt a ++ Format.prefixJoin as
def Format.joinSuffix {α : Type u} [HasToFormat α] : List α → Format → Format
| [] suffix := nil
| (a::as) suffix := toFmt a ++ suffix ++ Format.joinSuffix as suffix
def List.toFormat {α : Type u} [HasToFormat α] : List α → Format
| [] := "[]"
| xs := sbracket $ Format.joinSep xs ("," ++ line)
instance listHasToFormat {α : Type u} [HasToFormat α] : HasToFormat (List α) :=
⟨List.toFormat⟩
instance prodHasToFormat {α : Type u} {β : Type v} [HasToFormat α] [HasToFormat β] : HasToFormat (Prod α β) :=
⟨λ ⟨a, b⟩, paren $ toFormat a ++ "," ++ line ++ toFormat b⟩
instance natHasToFormat : HasToFormat Nat := ⟨λ n, toString n⟩
instance uint16HasToFormat : HasToFormat UInt16 := ⟨λ n, toString n⟩
instance uint32HasToFormat : HasToFormat UInt32 := ⟨λ n, toString n⟩
instance uint64HasToFormat : HasToFormat UInt64 := ⟨λ n, toString n⟩
instance usizeHasToFormat : HasToFormat USize := ⟨λ n, toString n⟩
instance nameHasToFormat : HasToFormat Name := ⟨λ n, n.toString⟩
protected def Format.repr : Format → Format
| nil := "Format.nil"
| line := "Format.line"
| (text s) := paren $ "Format.text" ++ line ++ repr s
| (nest n f) := paren $ "Format.nest" ++ line ++ repr n ++ line ++ Format.repr f
| (compose b f₁ f₂) := paren $ "Format.compose " ++ repr b ++ line ++ Format.repr f₁ ++ line ++ Format.repr f₂
| (choice f₁ f₂) := paren $ "Format.choice" ++ line ++ Format.repr f₁ ++ line ++ Format.repr f₂
instance formatHasToString : HasToString Format := ⟨Format.pretty⟩
instance : HasRepr Format := ⟨Format.pretty ∘ Format.repr⟩
end Lean
|
9651c751568991b12549de5eb3665fe4b616fe41
|
88fb7558b0636ec6b181f2a548ac11ad3919f8a5
|
/tests/lean/run/eval_attr_cache.lean
|
7bb5ab2c4c923ec616281067e4540dcda2438519
|
[
"Apache-2.0"
] |
permissive
|
moritayasuaki/lean
|
9f666c323cb6fa1f31ac597d777914aed41e3b7a
|
ae96ebf6ee953088c235ff7ae0e8c95066ba8001
|
refs/heads/master
| 1,611,135,440,814
| 1,493,852,869,000
| 1,493,852,869,000
| 90,269,903
| 0
| 0
| null | 1,493,906,291,000
| 1,493,906,291,000
| null |
UTF-8
|
Lean
| false
| false
| 840
|
lean
|
open tactic
meta def list_name.to_expr (n : list name) : tactic expr := to_expr (quote n)
@[user_attribute]
meta def my_attr : caching_user_attribute (name → bool) :=
{ name := "my_attr",
descr := "my attr",
mk_cache := λ ls, do {
els ← list_name.to_expr ls,
c ← to_expr `(λ n : name, (name.cases_on n ff (λ _ _, to_bool (n ∈ %%els)) (λ _ _, ff) : bool)),
eval_expr (name → bool) c
},
dependencies := []
}
meta def my_tac : tactic unit :=
do f ← caching_user_attribute.get_cache my_attr,
trace (f `foo),
return ()
@[my_attr] def bla := 10
run_cmd my_tac
@[my_attr] def foo := 10 -- Cache was invalided
run_cmd my_tac -- Add closure to the cache containing auxiliary function created by eval_expr
run_cmd my_tac -- Cache should be flushed since the auxiliary function is gone
|
543ac5edae88fcc0cfc37b88411c0cffe1089fba
|
4d2583807a5ac6caaffd3d7a5f646d61ca85d532
|
/src/ring_theory/polynomial/scale_roots.lean
|
8b74c8e1e0e327cf69accdc125e1e98af32ea58c
|
[
"Apache-2.0"
] |
permissive
|
AntoineChambert-Loir/mathlib
|
64aabb896129885f12296a799818061bc90da1ff
|
07be904260ab6e36a5769680b6012f03a4727134
|
refs/heads/master
| 1,693,187,631,771
| 1,636,719,886,000
| 1,636,719,886,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 5,326
|
lean
|
/-
Copyright (c) 2020 Anne Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anne Baanen, Devon Tuma
-/
import ring_theory.polynomial.basic
import ring_theory.non_zero_divisors
/-!
# Scaling the roots of a polynomial
This file defines `scale_roots p s` for a polynomial `p` in one variable and a ring element `s` to
be the polynomial with root `r * s` for each root `r` of `p` and proves some basic results about it.
-/
section scale_roots
variables {A K R S : Type*} [comm_ring A] [is_domain A] [field K] [comm_ring R] [comm_ring S]
variables {M : submonoid A}
open polynomial
open_locale big_operators
/-- `scale_roots p s` is a polynomial with root `r * s` for each root `r` of `p`. -/
noncomputable def scale_roots (p : polynomial R) (s : R) : polynomial R :=
∑ i in p.support, monomial i (p.coeff i * s ^ (p.nat_degree - i))
@[simp] lemma coeff_scale_roots (p : polynomial R) (s : R) (i : ℕ) :
(scale_roots p s).coeff i = coeff p i * s ^ (p.nat_degree - i) :=
by simp [scale_roots, coeff_monomial] {contextual := tt}
lemma coeff_scale_roots_nat_degree (p : polynomial R) (s : R) :
(scale_roots p s).coeff p.nat_degree = p.leading_coeff :=
by rw [leading_coeff, coeff_scale_roots, tsub_self, pow_zero, mul_one]
@[simp] lemma zero_scale_roots (s : R) : scale_roots 0 s = 0 := by { ext, simp }
lemma scale_roots_ne_zero {p : polynomial R} (hp : p ≠ 0) (s : R) :
scale_roots p s ≠ 0 :=
begin
intro h,
have : p.coeff p.nat_degree ≠ 0 := mt leading_coeff_eq_zero.mp hp,
have : (scale_roots p s).coeff p.nat_degree = 0 :=
congr_fun (congr_arg (coeff : polynomial R → ℕ → R) h) p.nat_degree,
rw [coeff_scale_roots_nat_degree] at this,
contradiction
end
lemma support_scale_roots_le (p : polynomial R) (s : R) :
(scale_roots p s).support ≤ p.support :=
by { intro, simpa using left_ne_zero_of_mul }
lemma support_scale_roots_eq (p : polynomial R) {s : R} (hs : s ∈ non_zero_divisors R) :
(scale_roots p s).support = p.support :=
le_antisymm (support_scale_roots_le p s)
begin
intro i,
simp only [coeff_scale_roots, polynomial.mem_support_iff],
intros p_ne_zero ps_zero,
have := ((non_zero_divisors R).pow_mem hs (p.nat_degree - i)) _ ps_zero,
contradiction
end
@[simp] lemma degree_scale_roots (p : polynomial R) {s : R} :
degree (scale_roots p s) = degree p :=
begin
haveI := classical.prop_decidable,
by_cases hp : p = 0,
{ rw [hp, zero_scale_roots] },
have := scale_roots_ne_zero hp s,
refine le_antisymm (finset.sup_mono (support_scale_roots_le p s)) (degree_le_degree _),
rw coeff_scale_roots_nat_degree,
intro h,
have := leading_coeff_eq_zero.mp h,
contradiction,
end
@[simp] lemma nat_degree_scale_roots (p : polynomial R) (s : R) :
nat_degree (scale_roots p s) = nat_degree p :=
by simp only [nat_degree, degree_scale_roots]
lemma monic_scale_roots_iff {p : polynomial R} (s : R) :
monic (scale_roots p s) ↔ monic p :=
by simp only [monic, leading_coeff, nat_degree_scale_roots, coeff_scale_roots_nat_degree]
lemma scale_roots_eval₂_eq_zero {p : polynomial S} (f : S →+* R)
{r : R} {s : S} (hr : eval₂ f r p = 0) :
eval₂ f (f s * r) (scale_roots p s) = 0 :=
calc eval₂ f (f s * r) (scale_roots p s) =
(scale_roots p s).support.sum (λ i, f (coeff p i * s ^ (p.nat_degree - i)) * (f s * r) ^ i) :
by simp [eval₂_eq_sum, sum_def]
... = p.support.sum (λ i, f (coeff p i * s ^ (p.nat_degree - i)) * (f s * r) ^ i) :
finset.sum_subset (support_scale_roots_le p s)
(λ i hi hi', let this : coeff p i * s ^ (p.nat_degree - i) = 0 :=
by simpa using hi' in by simp [this])
... = p.support.sum (λ (i : ℕ), f (p.coeff i) * f s ^ (p.nat_degree - i + i) * r ^ i) :
finset.sum_congr rfl
(λ i hi, by simp_rw [f.map_mul, f.map_pow, pow_add, mul_pow, mul_assoc])
... = p.support.sum (λ (i : ℕ), f s ^ p.nat_degree * (f (p.coeff i) * r ^ i)) :
finset.sum_congr rfl
(λ i hi, by { rw [mul_assoc, mul_left_comm, tsub_add_cancel_of_le],
exact le_nat_degree_of_ne_zero (polynomial.mem_support_iff.mp hi) })
... = f s ^ p.nat_degree * p.support.sum (λ (i : ℕ), (f (p.coeff i) * r ^ i)) : finset.mul_sum.symm
... = f s ^ p.nat_degree * eval₂ f r p : by { simp [eval₂_eq_sum, sum_def] }
... = 0 : by rw [hr, _root_.mul_zero]
lemma scale_roots_aeval_eq_zero [algebra S R] {p : polynomial S}
{r : R} {s : S} (hr : aeval r p = 0) :
aeval (algebra_map S R s * r) (scale_roots p s) = 0 :=
scale_roots_eval₂_eq_zero (algebra_map S R) hr
lemma scale_roots_eval₂_eq_zero_of_eval₂_div_eq_zero
{p : polynomial A} {f : A →+* K} (hf : function.injective f)
{r s : A} (hr : eval₂ f (f r / f s) p = 0) (hs : s ∈ non_zero_divisors A) :
eval₂ f (f r) (scale_roots p s) = 0 :=
begin
convert scale_roots_eval₂_eq_zero f hr,
rw [←mul_div_assoc, mul_comm, mul_div_cancel],
exact f.map_ne_zero_of_mem_non_zero_divisors hf hs
end
lemma scale_roots_aeval_eq_zero_of_aeval_div_eq_zero [algebra A K]
(inj : function.injective (algebra_map A K)) {p : polynomial A} {r s : A}
(hr : aeval (algebra_map A K r / algebra_map A K s) p = 0) (hs : s ∈ non_zero_divisors A) :
aeval (algebra_map A K r) (scale_roots p s) = 0 :=
scale_roots_eval₂_eq_zero_of_eval₂_div_eq_zero inj hr hs
end scale_roots
|
e788321487bf416c7bea98becaf11d1f03a32f39
|
947fa6c38e48771ae886239b4edce6db6e18d0fb
|
/src/algebra/order/floor.lean
|
87ec96e477564b59f83749d609e527f077085edf
|
[
"Apache-2.0"
] |
permissive
|
ramonfmir/mathlib
|
c5dc8b33155473fab97c38bd3aa6723dc289beaa
|
14c52e990c17f5a00c0cc9e09847af16fabbed25
|
refs/heads/master
| 1,661,979,343,526
| 1,660,830,384,000
| 1,660,830,384,000
| 182,072,989
| 0
| 0
| null | 1,555,585,876,000
| 1,555,585,876,000
| null |
UTF-8
|
Lean
| false
| false
| 30,772
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Kevin Kappelmann
-/
import tactic.abel
import tactic.linarith
/-!
# Floor and ceil
## Summary
We define the natural- and integer-valued floor and ceil functions on linearly ordered rings.
## Main Definitions
* `floor_semiring`: An ordered semiring with natural-valued floor and ceil.
* `nat.floor a`: Greatest natural `n` such that `n ≤ a`. Equal to `0` if `a < 0`.
* `nat.ceil a`: Least natural `n` such that `a ≤ n`.
* `floor_ring`: A linearly ordered ring with integer-valued floor and ceil.
* `int.floor a`: Greatest integer `z` such that `z ≤ a`.
* `int.ceil a`: Least integer `z` such that `a ≤ z`.
* `int.fract a`: Fractional part of `a`, defined as `a - floor a`.
* `round a`: Nearest integer to `a`. It rounds halves towards infinity.
## Notations
* `⌊a⌋₊` is `nat.floor a`.
* `⌈a⌉₊` is `nat.ceil a`.
* `⌊a⌋` is `int.floor a`.
* `⌈a⌉` is `int.ceil a`.
The index `₊` in the notations for `nat.floor` and `nat.ceil` is used in analogy to the notation
for `nnnorm`.
## TODO
Some `nat.floor` and `nat.ceil` lemmas require `linear_ordered_ring α`. Is `has_ordered_sub` enough?
`linear_ordered_ring`/`linear_ordered_semiring` can be relaxed to `order_ring`/`order_semiring` in
many lemmas.
## Tags
rounding, floor, ceil
-/
open set
variables {α : Type*}
/-! ### Floor semiring -/
/-- A `floor_semiring` is an ordered semiring over `α` with a function
`floor : α → ℕ` satisfying `∀ (n : ℕ) (x : α), n ≤ ⌊x⌋ ↔ (n : α) ≤ x)`.
Note that many lemmas require a `linear_order`. Please see the above `TODO`. -/
class floor_semiring (α) [ordered_semiring α] :=
(floor : α → ℕ)
(ceil : α → ℕ)
(floor_of_neg {a : α} (ha : a < 0) : floor a = 0)
(gc_floor {a : α} {n : ℕ} (ha : 0 ≤ a) : n ≤ floor a ↔ (n : α) ≤ a)
(gc_ceil : galois_connection ceil coe)
instance : floor_semiring ℕ :=
{ floor := id,
ceil := id,
floor_of_neg := λ a ha, (a.not_lt_zero ha).elim,
gc_floor := λ n a ha, by { rw nat.cast_id, refl },
gc_ceil := λ n a, by { rw nat.cast_id, refl } }
namespace nat
section ordered_semiring
variables [ordered_semiring α] [floor_semiring α] {a : α} {n : ℕ}
/-- `⌊a⌋₊` is the greatest natural `n` such that `n ≤ a`. If `a` is negative, then `⌊a⌋₊ = 0`. -/
def floor : α → ℕ := floor_semiring.floor
/-- `⌈a⌉₊` is the least natural `n` such that `a ≤ n` -/
def ceil : α → ℕ := floor_semiring.ceil
@[simp] lemma floor_nat : (nat.floor : ℕ → ℕ) = id := rfl
@[simp] lemma ceil_nat : (nat.ceil : ℕ → ℕ) = id := rfl
notation `⌊` a `⌋₊` := nat.floor a
notation `⌈` a `⌉₊` := nat.ceil a
end ordered_semiring
section linear_ordered_semiring
variables [linear_ordered_semiring α] [floor_semiring α] {a : α} {n : ℕ}
lemma le_floor_iff (ha : 0 ≤ a) : n ≤ ⌊a⌋₊ ↔ (n : α) ≤ a := floor_semiring.gc_floor ha
lemma le_floor (h : (n : α) ≤ a) : n ≤ ⌊a⌋₊ := (le_floor_iff $ n.cast_nonneg.trans h).2 h
lemma floor_lt (ha : 0 ≤ a) : ⌊a⌋₊ < n ↔ a < n := lt_iff_lt_of_le_iff_le $ le_floor_iff ha
lemma floor_lt_one (ha : 0 ≤ a) : ⌊a⌋₊ < 1 ↔ a < 1 :=
(floor_lt ha).trans $ by rw nat.cast_one
lemma lt_of_floor_lt (h : ⌊a⌋₊ < n) : a < n := lt_of_not_le $ λ h', (le_floor h').not_lt h
lemma lt_one_of_floor_lt_one (h : ⌊a⌋₊ < 1) : a < 1 := by exact_mod_cast lt_of_floor_lt h
lemma floor_le (ha : 0 ≤ a) : (⌊a⌋₊ : α) ≤ a := (le_floor_iff ha).1 le_rfl
lemma lt_succ_floor (a : α) : a < ⌊a⌋₊.succ := lt_of_floor_lt $ nat.lt_succ_self _
lemma lt_floor_add_one (a : α) : a < ⌊a⌋₊ + 1 := by simpa using lt_succ_floor a
@[simp] lemma floor_coe (n : ℕ) : ⌊(n : α)⌋₊ = n :=
eq_of_forall_le_iff $ λ a, by { rw [le_floor_iff, nat.cast_le], exact n.cast_nonneg }
@[simp] lemma floor_zero : ⌊(0 : α)⌋₊ = 0 := by rw [← nat.cast_zero, floor_coe]
@[simp] lemma floor_one : ⌊(1 : α)⌋₊ = 1 := by rw [←nat.cast_one, floor_coe]
lemma floor_of_nonpos (ha : a ≤ 0) : ⌊a⌋₊ = 0 :=
ha.lt_or_eq.elim floor_semiring.floor_of_neg $ by { rintro rfl, exact floor_zero }
lemma floor_mono : monotone (floor : α → ℕ) := λ a b h, begin
obtain ha | ha := le_total a 0,
{ rw floor_of_nonpos ha,
exact nat.zero_le _ },
{ exact le_floor ((floor_le ha).trans h) }
end
lemma le_floor_iff' (hn : n ≠ 0) : n ≤ ⌊a⌋₊ ↔ (n : α) ≤ a :=
begin
obtain ha | ha := le_total a 0,
{ rw floor_of_nonpos ha,
exact iff_of_false (nat.pos_of_ne_zero hn).not_le
(not_le_of_lt $ ha.trans_lt $ cast_pos.2 $ nat.pos_of_ne_zero hn) },
{ exact le_floor_iff ha }
end
@[simp] lemma one_le_floor_iff (x : α) : 1 ≤ ⌊x⌋₊ ↔ 1 ≤ x :=
by exact_mod_cast (@le_floor_iff' α _ _ x 1 one_ne_zero)
lemma floor_lt' (hn : n ≠ 0) : ⌊a⌋₊ < n ↔ a < n := lt_iff_lt_of_le_iff_le $ le_floor_iff' hn
lemma floor_pos : 0 < ⌊a⌋₊ ↔ 1 ≤ a :=
by { convert le_floor_iff' nat.one_ne_zero, exact cast_one.symm }
lemma pos_of_floor_pos (h : 0 < ⌊a⌋₊) : 0 < a :=
(le_or_lt a 0).resolve_left (λ ha, lt_irrefl 0 $ by rwa floor_of_nonpos ha at h)
lemma lt_of_lt_floor (h : n < ⌊a⌋₊) : ↑n < a :=
(nat.cast_lt.2 h).trans_le $ floor_le (pos_of_floor_pos $ (nat.zero_le n).trans_lt h).le
lemma floor_le_of_le (h : a ≤ n) : ⌊a⌋₊ ≤ n := le_imp_le_iff_lt_imp_lt.2 lt_of_lt_floor h
lemma floor_le_one_of_le_one (h : a ≤ 1) : ⌊a⌋₊ ≤ 1 :=
floor_le_of_le $ h.trans_eq $ nat.cast_one.symm
@[simp] lemma floor_eq_zero : ⌊a⌋₊ = 0 ↔ a < 1 :=
by { rw [←lt_one_iff, ←@cast_one α], exact floor_lt' nat.one_ne_zero }
lemma floor_eq_iff (ha : 0 ≤ a) : ⌊a⌋₊ = n ↔ ↑n ≤ a ∧ a < ↑n + 1 :=
by rw [←le_floor_iff ha, ←nat.cast_one, ←nat.cast_add, ←floor_lt ha, nat.lt_add_one_iff,
le_antisymm_iff, and.comm]
lemma floor_eq_iff' (hn : n ≠ 0) : ⌊a⌋₊ = n ↔ ↑n ≤ a ∧ a < ↑n + 1 :=
by rw [← le_floor_iff' hn, ← nat.cast_one, ← nat.cast_add, ← floor_lt' (nat.add_one_ne_zero n),
nat.lt_add_one_iff, le_antisymm_iff, and.comm]
lemma floor_eq_on_Ico (n : ℕ) : ∀ a ∈ (set.Ico n (n+1) : set α), ⌊a⌋₊ = n :=
λ a ⟨h₀, h₁⟩, (floor_eq_iff $ n.cast_nonneg.trans h₀).mpr ⟨h₀, h₁⟩
lemma floor_eq_on_Ico' (n : ℕ) : ∀ a ∈ (set.Ico n (n+1) : set α), (⌊a⌋₊ : α) = n :=
λ x hx, by exact_mod_cast floor_eq_on_Ico n x hx
@[simp] lemma preimage_floor_zero : (floor : α → ℕ) ⁻¹' {0} = Iio 1 :=
ext $ λ a, floor_eq_zero
lemma preimage_floor_of_ne_zero {n : ℕ} (hn : n ≠ 0) : (floor : α → ℕ) ⁻¹' {n} = Ico n (n + 1) :=
ext $ λ a, floor_eq_iff' hn
/-! #### Ceil -/
lemma gc_ceil_coe : galois_connection (ceil : α → ℕ) coe := floor_semiring.gc_ceil
@[simp] lemma ceil_le : ⌈a⌉₊ ≤ n ↔ a ≤ n := gc_ceil_coe _ _
lemma lt_ceil : n < ⌈a⌉₊ ↔ (n : α) < a := lt_iff_lt_of_le_iff_le ceil_le
lemma le_ceil (a : α) : a ≤ ⌈a⌉₊ := ceil_le.1 le_rfl
lemma ceil_mono : monotone (ceil : α → ℕ) := gc_ceil_coe.monotone_l
@[simp] lemma ceil_coe (n : ℕ) : ⌈(n : α)⌉₊ = n :=
eq_of_forall_ge_iff $ λ a, ceil_le.trans nat.cast_le
@[simp] lemma ceil_zero : ⌈(0 : α)⌉₊ = 0 := by rw [← nat.cast_zero, ceil_coe]
@[simp] lemma ceil_one : ⌈(1 : α)⌉₊ = 1 := by rw [←nat.cast_one, ceil_coe]
@[simp] lemma ceil_eq_zero : ⌈a⌉₊ = 0 ↔ a ≤ 0 := by rw [← le_zero_iff, ceil_le, nat.cast_zero]
@[simp] lemma ceil_pos : 0 < ⌈a⌉₊ ↔ 0 < a := by rw [lt_ceil, cast_zero]
lemma lt_of_ceil_lt (h : ⌈a⌉₊ < n) : a < n := (le_ceil a).trans_lt (nat.cast_lt.2 h)
lemma le_of_ceil_le (h : ⌈a⌉₊ ≤ n) : a ≤ n := (le_ceil a).trans (nat.cast_le.2 h)
lemma floor_le_ceil (a : α) : ⌊a⌋₊ ≤ ⌈a⌉₊ :=
begin
obtain ha | ha := le_total a 0,
{ rw floor_of_nonpos ha,
exact nat.zero_le _ },
{ exact cast_le.1 ((floor_le ha).trans $ le_ceil _) }
end
lemma floor_lt_ceil_of_lt_of_pos {a b : α} (h : a < b) (h' : 0 < b) : ⌊a⌋₊ < ⌈b⌉₊ :=
begin
rcases le_or_lt 0 a with ha|ha,
{ rw floor_lt ha, exact h.trans_le (le_ceil _) },
{ rwa [floor_of_nonpos ha.le, lt_ceil, nat.cast_zero] }
end
lemma ceil_eq_iff (hn : n ≠ 0) : ⌈a⌉₊ = n ↔ ↑(n - 1) < a ∧ a ≤ n :=
by rw [← ceil_le, ← not_le, ← ceil_le, not_le,
tsub_lt_iff_right (nat.add_one_le_iff.2 (pos_iff_ne_zero.2 hn)), nat.lt_add_one_iff,
le_antisymm_iff, and.comm]
@[simp] lemma preimage_ceil_zero : (nat.ceil : α → ℕ) ⁻¹' {0} = Iic 0 :=
ext $ λ x, ceil_eq_zero
lemma preimage_ceil_of_ne_zero (hn : n ≠ 0) : (nat.ceil : α → ℕ) ⁻¹' {n} = Ioc ↑(n - 1) n :=
ext $ λ x, ceil_eq_iff hn
/-! #### Intervals -/
@[simp] lemma preimage_Ioo {a b : α} (ha : 0 ≤ a) :
((coe : ℕ → α) ⁻¹' (set.Ioo a b)) = set.Ioo ⌊a⌋₊ ⌈b⌉₊ :=
by { ext, simp [floor_lt, lt_ceil, ha] }
@[simp] lemma preimage_Ico {a b : α} : ((coe : ℕ → α) ⁻¹' (set.Ico a b)) = set.Ico ⌈a⌉₊ ⌈b⌉₊ :=
by { ext, simp [ceil_le, lt_ceil] }
@[simp] lemma preimage_Ioc {a b : α} (ha : 0 ≤ a) (hb : 0 ≤ b) :
((coe : ℕ → α) ⁻¹' (set.Ioc a b)) = set.Ioc ⌊a⌋₊ ⌊b⌋₊ :=
by { ext, simp [floor_lt, le_floor_iff, hb, ha] }
@[simp] lemma preimage_Icc {a b : α} (hb : 0 ≤ b) :
((coe : ℕ → α) ⁻¹' (set.Icc a b)) = set.Icc ⌈a⌉₊ ⌊b⌋₊ :=
by { ext, simp [ceil_le, hb, le_floor_iff] }
@[simp] lemma preimage_Ioi {a : α} (ha : 0 ≤ a) : ((coe : ℕ → α) ⁻¹' (set.Ioi a)) = set.Ioi ⌊a⌋₊ :=
by { ext, simp [floor_lt, ha] }
@[simp] lemma preimage_Ici {a : α} : ((coe : ℕ → α) ⁻¹' (set.Ici a)) = set.Ici ⌈a⌉₊ :=
by { ext, simp [ceil_le] }
@[simp] lemma preimage_Iio {a : α} : ((coe : ℕ → α) ⁻¹' (set.Iio a)) = set.Iio ⌈a⌉₊ :=
by { ext, simp [lt_ceil] }
@[simp] lemma preimage_Iic {a : α} (ha : 0 ≤ a) : ((coe : ℕ → α) ⁻¹' (set.Iic a)) = set.Iic ⌊a⌋₊ :=
by { ext, simp [le_floor_iff, ha] }
lemma floor_add_nat (ha : 0 ≤ a) (n : ℕ) : ⌊a + n⌋₊ = ⌊a⌋₊ + n :=
eq_of_forall_le_iff $ λ b, begin
rw [le_floor_iff (add_nonneg ha n.cast_nonneg)],
obtain hb | hb := le_total n b,
{ obtain ⟨d, rfl⟩ := exists_add_of_le hb,
rw [nat.cast_add, add_comm n, add_comm (n : α), add_le_add_iff_right, add_le_add_iff_right,
le_floor_iff ha] },
{ obtain ⟨d, rfl⟩ := exists_add_of_le hb,
rw [nat.cast_add, add_left_comm _ b, add_left_comm _ (b : α)],
refine iff_of_true _ le_self_add,
exact (le_add_of_nonneg_right $ ha.trans $ le_add_of_nonneg_right d.cast_nonneg) }
end
lemma floor_add_one (ha : 0 ≤ a) : ⌊a + 1⌋₊ = ⌊a⌋₊ + 1 :=
by { convert floor_add_nat ha 1, exact cast_one.symm }
lemma floor_sub_nat [has_sub α] [has_ordered_sub α] [has_exists_add_of_le α] (a : α) (n : ℕ) :
⌊a - n⌋₊ = ⌊a⌋₊ - n :=
begin
obtain ha | ha := le_total a 0,
{ rw [floor_of_nonpos ha, floor_of_nonpos (tsub_nonpos_of_le (ha.trans n.cast_nonneg)),
zero_tsub] },
cases le_total a n,
{ rw [floor_of_nonpos (tsub_nonpos_of_le h), eq_comm, tsub_eq_zero_iff_le],
exact nat.cast_le.1 ((nat.floor_le ha).trans h) },
{ rw [eq_tsub_iff_add_eq_of_le (le_floor h), ←floor_add_nat _,
tsub_add_cancel_of_le h],
exact le_tsub_of_add_le_left ((add_zero _).trans_le h), }
end
lemma ceil_add_nat (ha : 0 ≤ a) (n : ℕ) : ⌈a + n⌉₊ = ⌈a⌉₊ + n :=
eq_of_forall_ge_iff $ λ b, begin
rw [←not_lt, ←not_lt, not_iff_not],
rw [lt_ceil],
obtain hb | hb := le_or_lt n b,
{ obtain ⟨d, rfl⟩ := exists_add_of_le hb,
rw [nat.cast_add, add_comm n, add_comm (n : α), add_lt_add_iff_right, add_lt_add_iff_right,
lt_ceil] },
{ exact iff_of_true (lt_add_of_nonneg_of_lt ha $ cast_lt.2 hb) (lt_add_left _ _ _ hb) }
end
lemma ceil_add_one (ha : 0 ≤ a) : ⌈a + 1⌉₊ = ⌈a⌉₊ + 1 :=
by { convert ceil_add_nat ha 1, exact cast_one.symm }
lemma ceil_lt_add_one (ha : 0 ≤ a) : (⌈a⌉₊ : α) < a + 1 :=
lt_ceil.1 $ (nat.lt_succ_self _).trans_le (ceil_add_one ha).ge
end linear_ordered_semiring
section linear_ordered_ring
variables [linear_ordered_ring α] [floor_semiring α]
lemma sub_one_lt_floor (a : α) : a - 1 < ⌊a⌋₊ := sub_lt_iff_lt_add.2 $ lt_floor_add_one a
end linear_ordered_ring
section linear_ordered_semifield
variables [linear_ordered_semifield α] [floor_semiring α]
lemma floor_div_nat (a : α) (n : ℕ) : ⌊a / n⌋₊ = ⌊a⌋₊ / n :=
begin
cases le_total a 0 with ha ha,
{ rw [floor_of_nonpos, floor_of_nonpos ha],
{ simp },
apply div_nonpos_of_nonpos_of_nonneg ha n.cast_nonneg },
obtain rfl | hn := n.eq_zero_or_pos,
{ rw [cast_zero, div_zero, nat.div_zero, floor_zero] },
refine (floor_eq_iff _).2 _,
{ exact div_nonneg ha n.cast_nonneg },
split,
{ exact cast_div_le.trans (div_le_div_of_le_of_nonneg (floor_le ha) n.cast_nonneg) },
rw [div_lt_iff, add_mul, one_mul, ←cast_mul, ←cast_add, ←floor_lt ha],
{ exact lt_div_mul_add hn },
{ exact (cast_pos.2 hn) }
end
/-- Natural division is the floor of field division. -/
lemma floor_div_eq_div (m n : ℕ) : ⌊(m : α) / n⌋₊ = m / n :=
by { convert floor_div_nat (m : α) n, rw m.floor_coe }
end linear_ordered_semifield
end nat
/-- There exists at most one `floor_semiring` structure on a linear ordered semiring. -/
lemma subsingleton_floor_semiring {α} [linear_ordered_semiring α] :
subsingleton (floor_semiring α) :=
begin
refine ⟨λ H₁ H₂, _⟩,
have : H₁.ceil = H₂.ceil,
from funext (λ a, H₁.gc_ceil.l_unique H₂.gc_ceil $ λ n, rfl),
have : H₁.floor = H₂.floor,
{ ext a,
cases lt_or_le a 0,
{ rw [H₁.floor_of_neg, H₂.floor_of_neg]; exact h },
{ refine eq_of_forall_le_iff (λ n, _),
rw [H₁.gc_floor, H₂.gc_floor]; exact h } },
cases H₁, cases H₂, congr; assumption
end
/-! ### Floor rings -/
/--
A `floor_ring` is a linear ordered ring over `α` with a function
`floor : α → ℤ` satisfying `∀ (z : ℤ) (a : α), z ≤ floor a ↔ (z : α) ≤ a)`.
-/
class floor_ring (α) [linear_ordered_ring α] :=
(floor : α → ℤ)
(ceil : α → ℤ)
(gc_coe_floor : galois_connection coe floor)
(gc_ceil_coe : galois_connection ceil coe)
instance : floor_ring ℤ :=
{ floor := id,
ceil := id,
gc_coe_floor := λ a b, by { rw int.cast_id, refl },
gc_ceil_coe := λ a b, by { rw int.cast_id, refl } }
/-- A `floor_ring` constructor from the `floor` function alone. -/
def floor_ring.of_floor (α) [linear_ordered_ring α] (floor : α → ℤ)
(gc_coe_floor : galois_connection coe floor) : floor_ring α :=
{ floor := floor,
ceil := λ a, -floor (-a),
gc_coe_floor := gc_coe_floor,
gc_ceil_coe := λ a z, by rw [neg_le, ←gc_coe_floor, int.cast_neg, neg_le_neg_iff] }
/-- A `floor_ring` constructor from the `ceil` function alone. -/
def floor_ring.of_ceil (α) [linear_ordered_ring α] (ceil : α → ℤ)
(gc_ceil_coe : galois_connection ceil coe) : floor_ring α :=
{ floor := λ a, -ceil (-a),
ceil := ceil,
gc_coe_floor := λ a z, by rw [le_neg, gc_ceil_coe, int.cast_neg, neg_le_neg_iff],
gc_ceil_coe := gc_ceil_coe }
namespace int
variables [linear_ordered_ring α] [floor_ring α] {z : ℤ} {a : α}
/-- `int.floor a` is the greatest integer `z` such that `z ≤ a`. It is denoted with `⌊a⌋`. -/
def floor : α → ℤ := floor_ring.floor
/-- `int.ceil a` is the smallest integer `z` such that `a ≤ z`. It is denoted with `⌈a⌉`. -/
def ceil : α → ℤ := floor_ring.ceil
/-- `int.fract a`, the fractional part of `a`, is `a` minus its floor. -/
def fract (a : α) : α := a - floor a
@[simp] lemma floor_int : (int.floor : ℤ → ℤ) = id := rfl
@[simp] lemma ceil_int : (int.ceil : ℤ → ℤ) = id := rfl
@[simp] lemma fract_int : (int.fract : ℤ → ℤ) = 0 := funext $ λ x, by simp [fract]
notation `⌊` a `⌋` := int.floor a
notation `⌈` a `⌉` := int.ceil a
-- Mathematical notation for `fract a` is usually `{a}`. Let's not even go there.
@[simp] lemma floor_ring_floor_eq : @floor_ring.floor = @int.floor := rfl
@[simp] lemma floor_ring_ceil_eq : @floor_ring.ceil = @int.ceil := rfl
/-! #### Floor -/
lemma gc_coe_floor : galois_connection (coe : ℤ → α) floor := floor_ring.gc_coe_floor
lemma le_floor : z ≤ ⌊a⌋ ↔ (z : α) ≤ a := (gc_coe_floor z a).symm
lemma floor_lt : ⌊a⌋ < z ↔ a < z := lt_iff_lt_of_le_iff_le le_floor
lemma floor_le (a : α) : (⌊a⌋ : α) ≤ a := gc_coe_floor.l_u_le a
lemma floor_nonneg : 0 ≤ ⌊a⌋ ↔ 0 ≤ a := by rw [le_floor, int.cast_zero]
lemma floor_nonpos (ha : a ≤ 0) : ⌊a⌋ ≤ 0 :=
begin
rw [← @cast_le α, int.cast_zero],
exact (floor_le a).trans ha,
end
lemma lt_succ_floor (a : α) : a < ⌊a⌋.succ := floor_lt.1 $ int.lt_succ_self _
lemma lt_floor_add_one (a : α) : a < ⌊a⌋ + 1 :=
by simpa only [int.succ, int.cast_add, int.cast_one] using lt_succ_floor a
lemma sub_one_lt_floor (a : α) : a - 1 < ⌊a⌋ := sub_lt_iff_lt_add.2 (lt_floor_add_one a)
@[simp] lemma floor_coe (z : ℤ) : ⌊(z : α)⌋ = z :=
eq_of_forall_le_iff $ λ a, by rw [le_floor, int.cast_le]
@[simp] lemma floor_zero : ⌊(0 : α)⌋ = 0 := by rw [← int.cast_zero, floor_coe]
@[simp] lemma floor_one : ⌊(1 : α)⌋ = 1 := by rw [← int.cast_one, floor_coe]
@[mono] lemma floor_mono : monotone (floor : α → ℤ) := gc_coe_floor.monotone_u
lemma floor_pos : 0 < ⌊a⌋ ↔ 1 ≤ a :=
by { convert le_floor, exact cast_one.symm }
@[simp] lemma floor_add_int (a : α) (z : ℤ) : ⌊a + z⌋ = ⌊a⌋ + z :=
eq_of_forall_le_iff $ λ a, by rw [le_floor,
← sub_le_iff_le_add, ← sub_le_iff_le_add, le_floor, int.cast_sub]
lemma floor_add_one (a : α) : ⌊a + 1⌋ = ⌊a⌋ + 1 :=
by { convert floor_add_int a 1, exact cast_one.symm }
@[simp] lemma floor_int_add (z : ℤ) (a : α) : ⌊↑z + a⌋ = z + ⌊a⌋ :=
by simpa only [add_comm] using floor_add_int a z
@[simp] lemma floor_add_nat (a : α) (n : ℕ) : ⌊a + n⌋ = ⌊a⌋ + n :=
by rw [← int.cast_coe_nat, floor_add_int]
@[simp] lemma floor_nat_add (n : ℕ) (a : α) : ⌊↑n + a⌋ = n + ⌊a⌋ :=
by rw [← int.cast_coe_nat, floor_int_add]
@[simp] lemma floor_sub_int (a : α) (z : ℤ) : ⌊a - z⌋ = ⌊a⌋ - z :=
eq.trans (by rw [int.cast_neg, sub_eq_add_neg]) (floor_add_int _ _)
@[simp] lemma floor_sub_nat (a : α) (n : ℕ) : ⌊a - n⌋ = ⌊a⌋ - n :=
by rw [← int.cast_coe_nat, floor_sub_int]
lemma abs_sub_lt_one_of_floor_eq_floor {α : Type*} [linear_ordered_comm_ring α] [floor_ring α]
{a b : α} (h : ⌊a⌋ = ⌊b⌋) : |a - b| < 1 :=
begin
have : a < ⌊a⌋ + 1 := lt_floor_add_one a,
have : b < ⌊b⌋ + 1 := lt_floor_add_one b,
have : (⌊a⌋ : α) = ⌊b⌋ := int.cast_inj.2 h,
have : (⌊a⌋ : α) ≤ a := floor_le a,
have : (⌊b⌋ : α) ≤ b := floor_le b,
exact abs_sub_lt_iff.2 ⟨by linarith, by linarith⟩
end
lemma floor_eq_iff : ⌊a⌋ = z ↔ ↑z ≤ a ∧ a < z + 1 :=
by rw [le_antisymm_iff, le_floor, ←int.lt_add_one_iff, floor_lt, int.cast_add, int.cast_one,
and.comm]
lemma floor_eq_on_Ico (n : ℤ) : ∀ a ∈ set.Ico (n : α) (n + 1), ⌊a⌋ = n :=
λ a ⟨h₀, h₁⟩, floor_eq_iff.mpr ⟨h₀, h₁⟩
lemma floor_eq_on_Ico' (n : ℤ) : ∀ a ∈ set.Ico (n : α) (n + 1), (⌊a⌋ : α) = n :=
λ a ha, congr_arg _ $ floor_eq_on_Ico n a ha
@[simp] lemma preimage_floor_singleton (m : ℤ) : (floor : α → ℤ) ⁻¹' {m} = Ico m (m + 1) :=
ext $ λ x, floor_eq_iff
/-! #### Fractional part -/
@[simp] lemma self_sub_floor (a : α) : a - ⌊a⌋ = fract a := rfl
@[simp] lemma floor_add_fract (a : α) : (⌊a⌋ : α) + fract a = a := add_sub_cancel'_right _ _
@[simp] lemma fract_add_floor (a : α) : fract a + ⌊a⌋ = a := sub_add_cancel _ _
@[simp] lemma fract_add_int (a : α) (m : ℤ) : fract (a + m) = fract a :=
by { rw fract, simp }
@[simp] lemma fract_sub_int (a : α) (m : ℤ) : fract (a - m) = fract a :=
by { rw fract, simp }
@[simp] lemma fract_int_add (m : ℤ) (a : α) : fract (↑m + a) = fract a :=
by rw [add_comm, fract_add_int]
@[simp] lemma self_sub_fract (a : α) : a - fract a = ⌊a⌋ := sub_sub_cancel _ _
@[simp] lemma fract_sub_self (a : α) : fract a - a = -⌊a⌋ := sub_sub_cancel_left _ _
lemma fract_nonneg (a : α) : 0 ≤ fract a := sub_nonneg.2 $ floor_le _
lemma fract_lt_one (a : α) : fract a < 1 := sub_lt.1 $ sub_one_lt_floor _
@[simp] lemma fract_zero : fract (0 : α) = 0 := by rw [fract, floor_zero, cast_zero, sub_self]
@[simp] lemma fract_one : fract (1 : α) = 0 :=
by simp [fract]
@[simp] lemma fract_coe (z : ℤ) : fract (z : α) = 0 :=
by { unfold fract, rw floor_coe, exact sub_self _ }
@[simp] lemma fract_floor (a : α) : fract (⌊a⌋ : α) = 0 := fract_coe _
@[simp] lemma floor_fract (a : α) : ⌊fract a⌋ = 0 :=
by rw [floor_eq_iff, int.cast_zero, zero_add]; exact ⟨fract_nonneg _, fract_lt_one _⟩
lemma fract_eq_iff {a b : α} : fract a = b ↔ 0 ≤ b ∧ b < 1 ∧ ∃ z : ℤ, a - b = z :=
⟨λ h, by { rw ←h, exact ⟨fract_nonneg _, fract_lt_one _, ⟨⌊a⌋, sub_sub_cancel _ _⟩⟩},
begin
rintro ⟨h₀, h₁, z, hz⟩,
show a - ⌊a⌋ = b, apply eq.symm,
rw [eq_sub_iff_add_eq, add_comm, ←eq_sub_iff_add_eq],
rw [hz, int.cast_inj, floor_eq_iff, ←hz],
clear hz, split; simpa [sub_eq_add_neg, add_assoc]
end⟩
lemma fract_eq_fract {a b : α} : fract a = fract b ↔ ∃ z : ℤ, a - b = z :=
⟨λ h, ⟨⌊a⌋ - ⌊b⌋, begin
unfold fract at h, rw [int.cast_sub, sub_eq_sub_iff_sub_eq_sub.1 h],
end⟩, begin
rintro ⟨z, hz⟩,
refine fract_eq_iff.2 ⟨fract_nonneg _, fract_lt_one _, z + ⌊b⌋, _⟩,
rw [eq_add_of_sub_eq hz, add_comm, int.cast_add],
exact add_sub_sub_cancel _ _ _,
end⟩
@[simp] lemma fract_eq_self {a : α} : fract a = a ↔ 0 ≤ a ∧ a < 1 :=
fract_eq_iff.trans $ and.assoc.symm.trans $ and_iff_left ⟨0, by simp⟩
@[simp] lemma fract_fract (a : α) : fract (fract a) = fract a :=
fract_eq_self.2 ⟨fract_nonneg _, fract_lt_one _⟩
lemma fract_add (a b : α) : ∃ z : ℤ, fract (a + b) - fract a - fract b = z :=
⟨⌊a⌋ + ⌊b⌋ - ⌊a + b⌋, by { unfold fract, simp [sub_eq_add_neg], abel }⟩
lemma fract_mul_nat (a : α) (b : ℕ) : ∃ z : ℤ, fract a * b - fract (a * b) = z :=
begin
induction b with c hc,
use 0, simp,
rcases hc with ⟨z, hz⟩,
rw [nat.succ_eq_add_one, nat.cast_add, mul_add, mul_add, nat.cast_one, mul_one, mul_one],
rcases fract_add (a * c) a with ⟨y, hy⟩,
use z - y,
rw [int.cast_sub, ←hz, ←hy],
abel
end
lemma preimage_fract (s : set α) : fract ⁻¹' s = ⋃ m : ℤ, (λ x, x - m) ⁻¹' (s ∩ Ico (0 : α) 1) :=
begin
ext x,
simp only [mem_preimage, mem_Union, mem_inter_eq],
refine ⟨λ h, ⟨⌊x⌋, h, fract_nonneg x, fract_lt_one x⟩, _⟩,
rintro ⟨m, hms, hm0, hm1⟩,
obtain rfl : ⌊x⌋ = m, from floor_eq_iff.2 ⟨sub_nonneg.1 hm0, sub_lt_iff_lt_add'.1 hm1⟩,
exact hms
end
lemma image_fract (s : set α) : fract '' s = ⋃ m : ℤ, (λ x, x - m) '' s ∩ Ico 0 1 :=
begin
ext x,
simp only [mem_image, mem_inter_eq, mem_Union], split,
{ rintro ⟨y, hy, rfl⟩,
exact ⟨⌊y⌋, ⟨y, hy, rfl⟩, fract_nonneg y, fract_lt_one y⟩ },
{ rintro ⟨m, ⟨y, hys, rfl⟩, h0, h1⟩,
obtain rfl : ⌊y⌋ = m, from floor_eq_iff.2 ⟨sub_nonneg.1 h0, sub_lt_iff_lt_add'.1 h1⟩,
exact ⟨y, hys, rfl⟩ }
end
section linear_ordered_field
variables {k : Type*} [linear_ordered_field k] [floor_ring k] {b : k}
lemma fract_div_mul_self_mem_Ico (a b : k) (ha : 0 < a) : fract (b/a) * a ∈ Ico 0 a :=
⟨(zero_le_mul_right ha).2 (fract_nonneg (b/a)), (mul_lt_iff_lt_one_left ha).2 (fract_lt_one (b/a))⟩
lemma fract_div_mul_self_add_zsmul_eq (a b : k) (ha : a ≠ 0) :
fract (b/a) * a + ⌊b/a⌋ • a = b :=
by rw [zsmul_eq_mul, ← add_mul, fract_add_floor, div_mul_cancel b ha]
lemma sub_floor_div_mul_nonneg (a : k) (hb : 0 < b) : 0 ≤ a - ⌊a / b⌋ * b :=
sub_nonneg_of_le $ (le_div_iff hb).1 $ floor_le _
lemma sub_floor_div_mul_lt (a : k) (hb : 0 < b) : a - ⌊a / b⌋ * b < b :=
sub_lt_iff_lt_add.2 $ by { rw [←one_add_mul, ←div_lt_iff hb, add_comm], exact lt_floor_add_one _ }
end linear_ordered_field
/-! #### Ceil -/
lemma gc_ceil_coe : galois_connection ceil (coe : ℤ → α) := floor_ring.gc_ceil_coe
lemma ceil_le : ⌈a⌉ ≤ z ↔ a ≤ z := gc_ceil_coe a z
lemma floor_neg : ⌊-a⌋ = -⌈a⌉ :=
eq_of_forall_le_iff (λ z, by rw [le_neg, ceil_le, le_floor, int.cast_neg, le_neg])
lemma ceil_neg : ⌈-a⌉ = -⌊a⌋ :=
eq_of_forall_ge_iff (λ z, by rw [neg_le, ceil_le, le_floor, int.cast_neg, neg_le])
lemma lt_ceil : z < ⌈a⌉ ↔ (z : α) < a := lt_iff_lt_of_le_iff_le ceil_le
lemma ceil_le_floor_add_one (a : α) : ⌈a⌉ ≤ ⌊a⌋ + 1 :=
by { rw [ceil_le, int.cast_add, int.cast_one], exact (lt_floor_add_one a).le }
lemma le_ceil (a : α) : a ≤ ⌈a⌉ := gc_ceil_coe.le_u_l a
@[simp] lemma ceil_coe (z : ℤ) : ⌈(z : α)⌉ = z :=
eq_of_forall_ge_iff $ λ a, by rw [ceil_le, int.cast_le]
lemma ceil_mono : monotone (ceil : α → ℤ) := gc_ceil_coe.monotone_l
@[simp] lemma ceil_add_int (a : α) (z : ℤ) : ⌈a + z⌉ = ⌈a⌉ + z :=
by rw [←neg_inj, neg_add', ←floor_neg, ←floor_neg, neg_add', floor_sub_int]
@[simp] lemma ceil_add_one (a : α) : ⌈a + 1⌉ = ⌈a⌉ + 1 :=
by { convert ceil_add_int a (1 : ℤ), exact cast_one.symm }
@[simp] lemma ceil_sub_int (a : α) (z : ℤ) : ⌈a - z⌉ = ⌈a⌉ - z :=
eq.trans (by rw [int.cast_neg, sub_eq_add_neg]) (ceil_add_int _ _)
@[simp] lemma ceil_sub_one (a : α) : ⌈a - 1⌉ = ⌈a⌉ - 1 :=
by rw [eq_sub_iff_add_eq, ← ceil_add_one, sub_add_cancel]
lemma ceil_lt_add_one (a : α) : (⌈a⌉ : α) < a + 1 :=
by { rw [← lt_ceil, ← int.cast_one, ceil_add_int], apply lt_add_one }
@[simp] lemma ceil_pos : 0 < ⌈a⌉ ↔ 0 < a := by rw [lt_ceil, cast_zero]
@[simp] lemma ceil_zero : ⌈(0 : α)⌉ = 0 := by rw [← int.cast_zero, ceil_coe]
@[simp] lemma ceil_one : ⌈(1 : α)⌉ = 1 := by rw [←int.cast_one, ceil_coe]
lemma ceil_nonneg (ha : 0 ≤ a) : 0 ≤ ⌈a⌉ :=
by exact_mod_cast ha.trans (le_ceil a)
lemma ceil_eq_iff : ⌈a⌉ = z ↔ ↑z - 1 < a ∧ a ≤ z :=
by rw [←ceil_le, ←int.cast_one, ←int.cast_sub, ←lt_ceil, int.sub_one_lt_iff, le_antisymm_iff,
and.comm]
lemma ceil_eq_on_Ioc (z : ℤ) : ∀ a ∈ set.Ioc (z - 1 : α) z, ⌈a⌉ = z :=
λ a ⟨h₀, h₁⟩, ceil_eq_iff.mpr ⟨h₀, h₁⟩
lemma ceil_eq_on_Ioc' (z : ℤ) : ∀ a ∈ set.Ioc (z - 1 : α) z, (⌈a⌉ : α) = z :=
λ a ha, by exact_mod_cast ceil_eq_on_Ioc z a ha
lemma floor_le_ceil (a : α) : ⌊a⌋ ≤ ⌈a⌉ := cast_le.1 $ (floor_le _).trans $ le_ceil _
lemma floor_lt_ceil_of_lt {a b : α} (h : a < b) : ⌊a⌋ < ⌈b⌉ :=
cast_lt.1 $ (floor_le a).trans_lt $ h.trans_le $ le_ceil b
@[simp] lemma preimage_ceil_singleton (m : ℤ) : (ceil : α → ℤ) ⁻¹' {m} = Ioc (m - 1) m :=
ext $ λ x, ceil_eq_iff
/-! #### Intervals -/
@[simp] lemma preimage_Ioo {a b : α} : ((coe : ℤ → α) ⁻¹' (set.Ioo a b)) = set.Ioo ⌊a⌋ ⌈b⌉ :=
by { ext, simp [floor_lt, lt_ceil] }
@[simp] lemma preimage_Ico {a b : α} : ((coe : ℤ → α) ⁻¹' (set.Ico a b)) = set.Ico ⌈a⌉ ⌈b⌉ :=
by { ext, simp [ceil_le, lt_ceil] }
@[simp] lemma preimage_Ioc {a b : α} : ((coe : ℤ → α) ⁻¹' (set.Ioc a b)) = set.Ioc ⌊a⌋ ⌊b⌋ :=
by { ext, simp [floor_lt, le_floor] }
@[simp] lemma preimage_Icc {a b : α} : ((coe : ℤ → α) ⁻¹' (set.Icc a b)) = set.Icc ⌈a⌉ ⌊b⌋ :=
by { ext, simp [ceil_le, le_floor] }
@[simp] lemma preimage_Ioi : ((coe : ℤ → α) ⁻¹' (set.Ioi a)) = set.Ioi ⌊a⌋ :=
by { ext, simp [floor_lt] }
@[simp] lemma preimage_Ici : ((coe : ℤ → α) ⁻¹' (set.Ici a)) = set.Ici ⌈a⌉ :=
by { ext, simp [ceil_le] }
@[simp] lemma preimage_Iio : ((coe : ℤ → α) ⁻¹' (set.Iio a)) = set.Iio ⌈a⌉ :=
by { ext, simp [lt_ceil] }
@[simp] lemma preimage_Iic : ((coe : ℤ → α) ⁻¹' (set.Iic a)) = set.Iic ⌊a⌋ :=
by { ext, simp [le_floor] }
end int
open int
/-! ### Round -/
section round
variables [linear_ordered_field α] [floor_ring α]
/-- `round` rounds a number to the nearest integer. `round (1 / 2) = 1` -/
def round (x : α) : ℤ := ⌊x + 1 / 2⌋
@[simp] lemma round_zero : round (0 : α) = 0 := floor_eq_iff.2 (by norm_num)
@[simp] lemma round_one : round (1 : α) = 1 := floor_eq_iff.2 (by norm_num)
lemma abs_sub_round (x : α) : |x - round x| ≤ 1 / 2 :=
begin
rw [round, abs_sub_le_iff],
have := floor_le (x + 1 / 2),
have := lt_floor_add_one (x + 1 / 2),
split; linarith
end
end round
variables {α} [linear_ordered_ring α] [floor_ring α]
/-! #### A floor ring as a floor semiring -/
@[priority 100] -- see Note [lower instance priority]
instance _root_.floor_ring.to_floor_semiring : floor_semiring α :=
{ floor := λ a, ⌊a⌋.to_nat,
ceil := λ a, ⌈a⌉.to_nat,
floor_of_neg := λ a ha, int.to_nat_of_nonpos (int.floor_nonpos ha.le),
gc_floor := λ a n ha,
by rw [int.le_to_nat_iff (int.floor_nonneg.2 ha), int.le_floor, int.cast_coe_nat],
gc_ceil := λ a n, by rw [int.to_nat_le, int.ceil_le, int.cast_coe_nat] }
lemma int.floor_to_nat (a : α) : ⌊a⌋.to_nat = ⌊a⌋₊ := rfl
lemma int.ceil_to_nat (a : α) : ⌈a⌉.to_nat = ⌈a⌉₊ := rfl
@[simp] lemma nat.floor_int : (nat.floor : ℤ → ℕ) = int.to_nat := rfl
@[simp] lemma nat.ceil_int : (nat.ceil : ℤ → ℕ) = int.to_nat := rfl
variables {a : α}
lemma nat.cast_floor_eq_int_floor (ha : 0 ≤ a) : (⌊a⌋₊ : ℤ) = ⌊a⌋ :=
by rw [←int.floor_to_nat, int.to_nat_of_nonneg (int.floor_nonneg.2 ha)]
lemma nat.cast_floor_eq_cast_int_floor (ha : 0 ≤ a) : (⌊a⌋₊ : α) = ⌊a⌋ :=
by rw [←nat.cast_floor_eq_int_floor ha, int.cast_coe_nat]
lemma nat.cast_ceil_eq_int_ceil (ha : 0 ≤ a) : (⌈a⌉₊ : ℤ) = ⌈a⌉ :=
by { rw [←int.ceil_to_nat, int.to_nat_of_nonneg (int.ceil_nonneg ha)] }
lemma nat.cast_ceil_eq_cast_int_ceil (ha : 0 ≤ a) : (⌈a⌉₊ : α) = ⌈a⌉ :=
by rw [←nat.cast_ceil_eq_int_ceil ha, int.cast_coe_nat]
/-- There exists at most one `floor_ring` structure on a given linear ordered ring. -/
lemma subsingleton_floor_ring {α} [linear_ordered_ring α] :
subsingleton (floor_ring α) :=
begin
refine ⟨λ H₁ H₂, _⟩,
have : H₁.floor = H₂.floor := funext (λ a, H₁.gc_coe_floor.u_unique H₂.gc_coe_floor $ λ _, rfl),
have : H₁.ceil = H₂.ceil := funext (λ a, H₁.gc_ceil_coe.l_unique H₂.gc_ceil_coe $ λ _, rfl),
cases H₁, cases H₂, congr; assumption
end
|
98517d7f667b632ef88786b79b09efa2a88ee7b0
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/linear_algebra/affine_space/independent_auto.lean
|
4d5cd2622854bfa36a9d2afc490ac0376242db7c
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 17,596
|
lean
|
/-
Copyright (c) 2020 Joseph Myers. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Joseph Myers.
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.finset.sort
import Mathlib.data.matrix.notation
import Mathlib.linear_algebra.affine_space.combination
import Mathlib.linear_algebra.basis
import Mathlib.PostPort
universes u_1 u_2 u_3 u_4 u_5 l
namespace Mathlib
/-!
# Affine independence
This file defines affinely independent families of points.
## Main definitions
* `affine_independent` defines affinely independent families of points
as those where no nontrivial weighted subtraction is 0. This is
proved equivalent to two other formulations: linear independence of
the results of subtracting a base point in the family from the other
points in the family, or any equal affine combinations having the
same weights. A bundled type `simplex` is provided for finite
affinely independent families of points, with an abbreviation
`triangle` for the case of three points.
## References
* https://en.wikipedia.org/wiki/Affine_space
-/
/-- An indexed family is said to be affinely independent if no
nontrivial weighted subtractions (where the sum of weights is 0) are
0. -/
def affine_independent (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V]
[module k V] [add_torsor V P] {ι : Type u_4} (p : ι → P) :=
∀ (s : finset ι) (w : ι → k),
(finset.sum s fun (i : ι) => w i) = 0 →
coe_fn (finset.weighted_vsub s p) w = 0 → ∀ (i : ι), i ∈ s → w i = 0
/-- The definition of `affine_independent`. -/
theorem affine_independent_def (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k]
[add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (p : ι → P) :
affine_independent k p ↔
∀ (s : finset ι) (w : ι → k),
(finset.sum s fun (i : ι) => w i) = 0 →
coe_fn (finset.weighted_vsub s p) w = 0 → ∀ (i : ι), i ∈ s → w i = 0 :=
iff.rfl
/-- A family with at most one point is affinely independent. -/
theorem affine_independent_of_subsingleton (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k]
[add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [subsingleton ι] (p : ι → P) :
affine_independent k p :=
fun (s : finset ι) (w : ι → k) (h : (finset.sum s fun (i : ι) => w i) = 0)
(hs : coe_fn (finset.weighted_vsub s p) w = 0) (i : ι) (hi : i ∈ s) =>
fintype.eq_of_subsingleton_of_sum_eq h i hi
/-- A family indexed by a `fintype` is affinely independent if and
only if no nontrivial weighted subtractions over `finset.univ` (where
the sum of the weights is 0) are 0. -/
theorem affine_independent_iff_of_fintype (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k]
[add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [fintype ι] (p : ι → P) :
affine_independent k p ↔
∀ (w : ι → k),
(finset.sum finset.univ fun (i : ι) => w i) = 0 →
coe_fn (finset.weighted_vsub finset.univ p) w = 0 → ∀ (i : ι), w i = 0 :=
sorry
/-- A family is affinely independent if and only if the differences
from a base point in that family are linearly independent. -/
theorem affine_independent_iff_linear_independent_vsub (k : Type u_1) {V : Type u_2} {P : Type u_3}
[ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} (p : ι → P) (i1 : ι) :
affine_independent k p ↔
linear_independent k fun (i : Subtype fun (x : ι) => x ≠ i1) => p ↑i -ᵥ p i1 :=
sorry
/-- A set is affinely independent if and only if the differences from
a base point in that set are linearly independent. -/
theorem affine_independent_set_iff_linear_independent_vsub (k : Type u_1) {V : Type u_2}
{P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {s : set P} {p₁ : P}
(hp₁ : p₁ ∈ s) :
(affine_independent k fun (p : ↥s) => ↑p) ↔
linear_independent k fun (v : ↥((fun (p : P) => p -ᵥ p₁) '' (s \ singleton p₁))) => ↑v :=
sorry
/-- A set of nonzero vectors is linearly independent if and only if,
given a point `p₁`, the vectors added to `p₁` and `p₁` itself are
affinely independent. -/
theorem linear_independent_set_iff_affine_independent_vadd_union_singleton (k : Type u_1)
{V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P]
{s : set V} (hs : ∀ (v : V), v ∈ s → v ≠ 0) (p₁ : P) :
(linear_independent k fun (v : ↥s) => ↑v) ↔
affine_independent k fun (p : ↥(singleton p₁ ∪ (fun (v : V) => v +ᵥ p₁) '' s)) => ↑p :=
sorry
/-- A family is affinely independent if and only if any affine
combinations (with sum of weights 1) that evaluate to the same point
have equal `set.indicator`. -/
theorem affine_independent_iff_indicator_eq_of_affine_combination_eq (k : Type u_1) {V : Type u_2}
{P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4}
(p : ι → P) :
affine_independent k p ↔
∀ (s1 s2 : finset ι) (w1 w2 : ι → k),
(finset.sum s1 fun (i : ι) => w1 i) = 1 →
(finset.sum s2 fun (i : ι) => w2 i) = 1 →
coe_fn (finset.affine_combination s1 p) w1 =
coe_fn (finset.affine_combination s2 p) w2 →
set.indicator (↑s1) w1 = set.indicator (↑s2) w2 :=
sorry
/-- An affinely independent family is injective, if the underlying
ring is nontrivial. -/
theorem injective_of_affine_independent {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k]
[add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [nontrivial k] {p : ι → P}
(ha : affine_independent k p) : function.injective p :=
sorry
/-- If a family is affinely independent, so is any subfamily given by
composition of an embedding into index type with the original
family. -/
theorem affine_independent_embedding_of_affine_independent {k : Type u_1} {V : Type u_2}
{P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4}
{ι2 : Type u_5} (f : ι2 ↪ ι) {p : ι → P} (ha : affine_independent k p) :
affine_independent k (p ∘ ⇑f) :=
sorry
/-- If a family is affinely independent, so is any subfamily indexed
by a subtype of the index type. -/
theorem affine_independent_subtype_of_affine_independent {k : Type u_1} {V : Type u_2}
{P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4}
{p : ι → P} (ha : affine_independent k p) (s : set ι) :
affine_independent k fun (i : ↥s) => p ↑i :=
affine_independent_embedding_of_affine_independent
(function.embedding.subtype fun (x : ι) => x ∈ s) ha
/-- If an indexed family of points is affinely independent, so is the
corresponding set of points. -/
theorem affine_independent_set_of_affine_independent {k : Type u_1} {V : Type u_2} {P : Type u_3}
[ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} {p : ι → P}
(ha : affine_independent k p) : affine_independent k fun (x : ↥(set.range p)) => ↑x :=
sorry
/-- If a set of points is affinely independent, so is any subset. -/
theorem affine_independent_of_subset_affine_independent {k : Type u_1} {V : Type u_2} {P : Type u_3}
[ring k] [add_comm_group V] [module k V] [add_torsor V P] {s : set P} {t : set P}
(ha : affine_independent k fun (x : ↥t) => ↑x) (hs : s ⊆ t) :
affine_independent k fun (x : ↥s) => ↑x :=
affine_independent_embedding_of_affine_independent (set.embedding_of_subset s t hs) ha
/-- If the range of an injective indexed family of points is affinely
independent, so is that family. -/
theorem affine_independent_of_affine_independent_set_of_injective {k : Type u_1} {V : Type u_2}
{P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4}
{p : ι → P} (ha : affine_independent k fun (x : ↥(set.range p)) => ↑x)
(hi : function.injective p) : affine_independent k p :=
sorry
/-- If a family is affinely independent, and the spans of points
indexed by two subsets of the index type have a point in common, those
subsets of the index type have an element in common, if the underlying
ring is nontrivial. -/
theorem exists_mem_inter_of_exists_mem_inter_affine_span_of_affine_independent {k : Type u_1}
{V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P]
{ι : Type u_4} [nontrivial k] {p : ι → P} (ha : affine_independent k p) {s1 : set ι}
{s2 : set ι} {p0 : P} (hp0s1 : p0 ∈ affine_span k (p '' s1))
(hp0s2 : p0 ∈ affine_span k (p '' s2)) : ∃ (i : ι), i ∈ s1 ∩ s2 :=
sorry
/-- If a family is affinely independent, the spans of points indexed
by disjoint subsets of the index type are disjoint, if the underlying
ring is nontrivial. -/
theorem affine_span_disjoint_of_disjoint_of_affine_independent {k : Type u_1} {V : Type u_2}
{P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4}
[nontrivial k] {p : ι → P} (ha : affine_independent k p) {s1 : set ι} {s2 : set ι}
(hd : s1 ∩ s2 = ∅) : ↑(affine_span k (p '' s1)) ∩ ↑(affine_span k (p '' s2)) = ∅ :=
sorry
/-- If a family is affinely independent, a point in the family is in
the span of some of the points given by a subset of the index type if
and only if that point's index is in the subset, if the underlying
ring is nontrivial. -/
@[simp] theorem mem_affine_span_iff_mem_of_affine_independent {k : Type u_1} {V : Type u_2}
{P : Type u_3} [ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4}
[nontrivial k] {p : ι → P} (ha : affine_independent k p) (i : ι) (s : set ι) :
p i ∈ affine_span k (p '' s) ↔ i ∈ s :=
sorry
/-- If a family is affinely independent, a point in the family is not
in the affine span of the other points, if the underlying ring is
nontrivial. -/
theorem not_mem_affine_span_diff_of_affine_independent {k : Type u_1} {V : Type u_2} {P : Type u_3}
[ring k] [add_comm_group V] [module k V] [add_torsor V P] {ι : Type u_4} [nontrivial k]
{p : ι → P} (ha : affine_independent k p) (i : ι) (s : set ι) :
¬p i ∈ affine_span k (p '' (s \ singleton i)) :=
sorry
/-- An affinely independent set of points can be extended to such a
set that spans the whole space. -/
theorem exists_subset_affine_independent_affine_span_eq_top {k : Type u_1} {V : Type u_2}
{P : Type u_3} [field k] [add_comm_group V] [module k V] [add_torsor V P] {s : set P}
(h : affine_independent k fun (p : ↥s) => ↑p) :
∃ (t : set P), s ⊆ t ∧ (affine_independent k fun (p : ↥t) => ↑p) ∧ affine_span k t = ⊤ :=
sorry
/-- Two different points are affinely independent. -/
theorem affine_independent_of_ne (k : Type u_1) {V : Type u_2} {P : Type u_3} [field k]
[add_comm_group V] [module k V] [add_torsor V P] {p₁ : P} {p₂ : P} (h : p₁ ≠ p₂) :
affine_independent k (matrix.vec_cons p₁ (matrix.vec_cons p₂ matrix.vec_empty)) :=
sorry
namespace affine
/-- A `simplex k P n` is a collection of `n + 1` affinely
independent points. -/
structure simplex (k : Type u_1) {V : Type u_2} (P : Type u_3) [ring k] [add_comm_group V]
[module k V] [add_torsor V P] (n : ℕ)
where
points : fin (n + 1) → P
independent : affine_independent k points
/-- A `triangle k P` is a collection of three affinely independent points. -/
def triangle (k : Type u_1) {V : Type u_2} (P : Type u_3) [ring k] [add_comm_group V] [module k V]
[add_torsor V P] :=
simplex k P (bit0 1)
namespace simplex
/-- Construct a 0-simplex from a point. -/
def mk_of_point (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V]
[module k V] [add_torsor V P] (p : P) : simplex k P 0 :=
mk (fun (_x : fin (0 + 1)) => p) sorry
/-- The point in a simplex constructed with `mk_of_point`. -/
@[simp] theorem mk_of_point_points (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k]
[add_comm_group V] [module k V] [add_torsor V P] (p : P) (i : fin 1) :
points (mk_of_point k p) i = p :=
rfl
protected instance inhabited (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k]
[add_comm_group V] [module k V] [add_torsor V P] [Inhabited P] : Inhabited (simplex k P 0) :=
{ default := mk_of_point k Inhabited.default }
protected instance nonempty (k : Type u_1) {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V]
[module k V] [add_torsor V P] : Nonempty (simplex k P 0) :=
Nonempty.intro (mk_of_point k (nonempty.some add_torsor.nonempty))
/-- Two simplices are equal if they have the same points. -/
theorem ext {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V]
[add_torsor V P] {n : ℕ} {s1 : simplex k P n} {s2 : simplex k P n}
(h : ∀ (i : fin (n + 1)), points s1 i = points s2 i) : s1 = s2 :=
sorry
/-- Two simplices are equal if and only if they have the same points. -/
theorem ext_iff {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V]
[module k V] [add_torsor V P] {n : ℕ} (s1 : simplex k P n) (s2 : simplex k P n) :
s1 = s2 ↔ ∀ (i : fin (n + 1)), points s1 i = points s2 i :=
{ mp := fun (h : s1 = s2) (_x : fin (n + 1)) => h ▸ rfl, mpr := ext }
/-- A face of a simplex is a simplex with the given subset of
points. -/
def face {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V] [module k V]
[add_torsor V P] {n : ℕ} (s : simplex k P n) {fs : finset (fin (n + 1))} {m : ℕ}
(h : finset.card fs = m + 1) : simplex k P m :=
mk (points s ∘ ⇑(finset.order_emb_of_fin fs h)) sorry
/-- The points of a face of a simplex are given by `mono_of_fin`. -/
theorem face_points {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V]
[module k V] [add_torsor V P] {n : ℕ} (s : simplex k P n) {fs : finset (fin (n + 1))} {m : ℕ}
(h : finset.card fs = m + 1) (i : fin (m + 1)) :
points (face s h) i = points s (coe_fn (finset.order_emb_of_fin fs h) i) :=
rfl
/-- The points of a face of a simplex are given by `mono_of_fin`. -/
theorem face_points' {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k] [add_comm_group V]
[module k V] [add_torsor V P] {n : ℕ} (s : simplex k P n) {fs : finset (fin (n + 1))} {m : ℕ}
(h : finset.card fs = m + 1) : points (face s h) = points s ∘ ⇑(finset.order_emb_of_fin fs h) :=
rfl
/-- A single-point face equals the 0-simplex constructed with
`mk_of_point`. -/
@[simp] theorem face_eq_mk_of_point {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k]
[add_comm_group V] [module k V] [add_torsor V P] {n : ℕ} (s : simplex k P n) (i : fin (n + 1)) :
face s (finset.card_singleton i) = mk_of_point k (points s i) :=
sorry
/-- The set of points of a face. -/
@[simp] theorem range_face_points {k : Type u_1} {V : Type u_2} {P : Type u_3} [ring k]
[add_comm_group V] [module k V] [add_torsor V P] {n : ℕ} (s : simplex k P n)
{fs : finset (fin (n + 1))} {m : ℕ} (h : finset.card fs = m + 1) :
set.range (points (face s h)) = points s '' ↑fs :=
sorry
end simplex
end affine
namespace affine
namespace simplex
/-- The centroid of a face of a simplex as the centroid of a subset of
the points. -/
@[simp] theorem face_centroid_eq_centroid {k : Type u_1} {V : Type u_2} {P : Type u_3}
[division_ring k] [add_comm_group V] [module k V] [add_torsor V P] {n : ℕ} (s : simplex k P n)
{fs : finset (fin (n + 1))} {m : ℕ} (h : finset.card fs = m + 1) :
finset.centroid k finset.univ (points (face s h)) = finset.centroid k fs (points s) :=
sorry
/-- Over a characteristic-zero division ring, the centroids given by
two subsets of the points of a simplex are equal if and only if those
faces are given by the same subset of points. -/
@[simp] theorem centroid_eq_iff {k : Type u_1} {V : Type u_2} {P : Type u_3} [division_ring k]
[add_comm_group V] [module k V] [add_torsor V P] [char_zero k] {n : ℕ} (s : simplex k P n)
{fs₁ : finset (fin (n + 1))} {fs₂ : finset (fin (n + 1))} {m₁ : ℕ} {m₂ : ℕ}
(h₁ : finset.card fs₁ = m₁ + 1) (h₂ : finset.card fs₂ = m₂ + 1) :
finset.centroid k fs₁ (points s) = finset.centroid k fs₂ (points s) ↔ fs₁ = fs₂ :=
sorry
/-- Over a characteristic-zero division ring, the centroids of two
faces of a simplex are equal if and only if those faces are given by
the same subset of points. -/
theorem face_centroid_eq_iff {k : Type u_1} {V : Type u_2} {P : Type u_3} [division_ring k]
[add_comm_group V] [module k V] [add_torsor V P] [char_zero k] {n : ℕ} (s : simplex k P n)
{fs₁ : finset (fin (n + 1))} {fs₂ : finset (fin (n + 1))} {m₁ : ℕ} {m₂ : ℕ}
(h₁ : finset.card fs₁ = m₁ + 1) (h₂ : finset.card fs₂ = m₂ + 1) :
finset.centroid k finset.univ (points (face s h₁)) =
finset.centroid k finset.univ (points (face s h₂)) ↔
fs₁ = fs₂ :=
sorry
/-- Two simplices with the same points have the same centroid. -/
theorem centroid_eq_of_range_eq {k : Type u_1} {V : Type u_2} {P : Type u_3} [division_ring k]
[add_comm_group V] [module k V] [add_torsor V P] {n : ℕ} {s₁ : simplex k P n}
{s₂ : simplex k P n} (h : set.range (points s₁) = set.range (points s₂)) :
finset.centroid k finset.univ (points s₁) = finset.centroid k finset.univ (points s₂) :=
sorry
end Mathlib
|
e031c2ab8cea1e02fff1c9bfd7768fd450662d93
|
626e312b5c1cb2d88fca108f5933076012633192
|
/src/linear_algebra/dual.lean
|
aad8570beb3a1dd86f1be21258162248f08e1d17
|
[
"Apache-2.0"
] |
permissive
|
Bioye97/mathlib
|
9db2f9ee54418d29dd06996279ba9dc874fd6beb
|
782a20a27ee83b523f801ff34efb1a9557085019
|
refs/heads/master
| 1,690,305,956,488
| 1,631,067,774,000
| 1,631,067,774,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 24,708
|
lean
|
/-
Copyright (c) 2019 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin, Fabian Glöckle
-/
import linear_algebra.finite_dimensional
import linear_algebra.projection
/-!
# Dual vector spaces
The dual space of an R-module M is the R-module of linear maps `M → R`.
## Main definitions
* `dual R M` defines the dual space of M over R.
* Given a basis for an `R`-module `M`, `basis.to_dual` produces a map from `M` to `dual R M`.
* Given families of vectors `e` and `ε`, `dual_pair e ε` states that these families have the
characteristic properties of a basis and a dual.
* `dual_annihilator W` is the submodule of `dual R M` where every element annihilates `W`.
## Main results
* `to_dual_equiv` : the linear equivalence between the dual module and primal module,
given a finite basis.
* `dual_pair.basis` and `dual_pair.eq_dual`: if `e` and `ε` form a dual pair, `e` is a basis and
`ε` is its dual basis.
* `quot_equiv_annihilator`: the quotient by a subspace is isomorphic to its dual annihilator.
## Notation
We sometimes use `V'` as local notation for `dual K V`.
## TODO
Erdös-Kaplansky theorem about the dimension of a dual vector space in case of infinite dimension.
-/
noncomputable theory
namespace module
variables (R : Type*) (M : Type*)
variables [comm_semiring R] [add_comm_monoid M] [module R M]
/-- The dual space of an R-module M is the R-module of linear maps `M → R`. -/
@[derive [add_comm_monoid, module R]] def dual := M →ₗ[R] R
instance {S : Type*} [comm_ring S] {N : Type*} [add_comm_group N] [module S N] :
add_comm_group (dual S N) := by {unfold dual, apply_instance}
namespace dual
instance : inhabited (dual R M) := by dunfold dual; apply_instance
instance : has_coe_to_fun (dual R M) := ⟨_, linear_map.to_fun⟩
/-- Maps a module M to the dual of the dual of M. See `module.erange_coe` and
`module.eval_equiv`. -/
def eval : M →ₗ[R] (dual R (dual R M)) := linear_map.flip linear_map.id
@[simp] lemma eval_apply (v : M) (a : dual R M) : eval R M v a = a v :=
begin
dunfold eval,
rw [linear_map.flip_apply, linear_map.id_apply]
end
variables {R M} {M' : Type*} [add_comm_monoid M'] [module R M']
/-- The transposition of linear maps, as a linear map from `M →ₗ[R] M'` to
`dual R M' →ₗ[R] dual R M`. -/
def transpose : (M →ₗ[R] M') →ₗ[R] (dual R M' →ₗ[R] dual R M) :=
(linear_map.llcomp R M M' R).flip
lemma transpose_apply (u : M →ₗ[R] M') (l : dual R M') : transpose u l = l.comp u := rfl
variables {M'' : Type*} [add_comm_monoid M''] [module R M'']
lemma transpose_comp (u : M' →ₗ[R] M'') (v : M →ₗ[R] M') :
transpose (u.comp v) = (transpose v).comp (transpose u) := rfl
end dual
end module
namespace basis
universes u v w
open module module.dual submodule linear_map cardinal function
variables {R M K V ι : Type*}
section comm_semiring
variables [comm_semiring R] [add_comm_monoid M] [module R M] [decidable_eq ι]
variables (b : basis ι R M)
/-- The linear map from a vector space equipped with basis to its dual vector space,
taking basis elements to corresponding dual basis elements. -/
def to_dual : M →ₗ[R] module.dual R M :=
b.constr ℕ $ λ v, b.constr ℕ $ λ w, if w = v then (1 : R) else 0
lemma to_dual_apply (i j : ι) :
b.to_dual (b i) (b j) = if i = j then 1 else 0 :=
by { erw [constr_basis b, constr_basis b], ac_refl }
@[simp] lemma to_dual_total_left (f : ι →₀ R) (i : ι) :
b.to_dual (finsupp.total ι M R b f) (b i) = f i :=
begin
rw [finsupp.total_apply, finsupp.sum, linear_map.map_sum, linear_map.sum_apply],
simp_rw [linear_map.map_smul, linear_map.smul_apply, to_dual_apply, smul_eq_mul,
mul_boole, finset.sum_ite_eq'],
split_ifs with h,
{ refl },
{ rw finsupp.not_mem_support_iff.mp h }
end
@[simp] lemma to_dual_total_right (f : ι →₀ R) (i : ι) :
b.to_dual (b i) (finsupp.total ι M R b f) = f i :=
begin
rw [finsupp.total_apply, finsupp.sum, linear_map.map_sum],
simp_rw [linear_map.map_smul, to_dual_apply, smul_eq_mul, mul_boole, finset.sum_ite_eq],
split_ifs with h,
{ refl },
{ rw finsupp.not_mem_support_iff.mp h }
end
lemma to_dual_apply_left (m : M) (i : ι) : b.to_dual m (b i) = b.repr m i :=
by rw [← b.to_dual_total_left, b.total_repr]
lemma to_dual_apply_right (i : ι) (m : M) : b.to_dual (b i) m = b.repr m i :=
by rw [← b.to_dual_total_right, b.total_repr]
lemma coe_to_dual_self (i : ι) : b.to_dual (b i) = b.coord i :=
by { ext, apply to_dual_apply_right }
/-- `h.to_dual_flip v` is the linear map sending `w` to `h.to_dual w v`. -/
def to_dual_flip (m : M) : (M →ₗ[R] R) := b.to_dual.flip m
lemma to_dual_flip_apply (m₁ m₂ : M) : b.to_dual_flip m₁ m₂ = b.to_dual m₂ m₁ := rfl
lemma to_dual_eq_repr (m : M) (i : ι) : b.to_dual m (b i) = b.repr m i :=
b.to_dual_apply_left m i
lemma to_dual_eq_equiv_fun [fintype ι] (m : M) (i : ι) : b.to_dual m (b i) = b.equiv_fun m i :=
by rw [b.equiv_fun_apply, to_dual_eq_repr]
lemma to_dual_inj (m : M) (a : b.to_dual m = 0) : m = 0 :=
begin
rw [← mem_bot R, ← b.repr.ker, mem_ker, linear_equiv.coe_coe],
apply finsupp.ext,
intro b,
rw [← to_dual_eq_repr, a],
refl
end
theorem to_dual_ker : b.to_dual.ker = ⊥ :=
ker_eq_bot'.mpr b.to_dual_inj
theorem to_dual_range [fin : fintype ι] : b.to_dual.range = ⊤ :=
begin
rw eq_top_iff',
intro f,
rw linear_map.mem_range,
let lin_comb : ι →₀ R := finsupp.on_finset fin.elems (λ i, f.to_fun (b i)) _,
{ use finsupp.total ι M R b lin_comb,
apply b.ext,
{ intros i,
rw [b.to_dual_eq_repr _ i, repr_total b],
{ refl } } },
{ intros a _,
apply fin.complete }
end
end comm_semiring
section comm_ring
variables [comm_ring R] [add_comm_group M] [module R M] [decidable_eq ι]
variables (b : basis ι R M)
/-- A vector space is linearly equivalent to its dual space. -/
@[simps]
def to_dual_equiv [fintype ι] : M ≃ₗ[R] (dual R M) :=
linear_equiv.of_bijective b.to_dual b.to_dual_ker b.to_dual_range
/-- Maps a basis for `V` to a basis for the dual space. -/
def dual_basis [fintype ι] : basis ι R (dual R M) :=
b.map b.to_dual_equiv
-- We use `j = i` to match `basis.repr_self`
lemma dual_basis_apply_self [fintype ι] (i j : ι) :
b.dual_basis i (b j) = if j = i then 1 else 0 :=
by { convert b.to_dual_apply i j using 2, rw @eq_comm _ j i }
lemma total_dual_basis [fintype ι] (f : ι →₀ R) (i : ι) :
finsupp.total ι (dual R M) R b.dual_basis f (b i) = f i :=
begin
rw [finsupp.total_apply, finsupp.sum_fintype, linear_map.sum_apply],
{ simp_rw [linear_map.smul_apply, smul_eq_mul, dual_basis_apply_self, mul_boole,
finset.sum_ite_eq, if_pos (finset.mem_univ i)] },
{ intro, rw zero_smul },
end
lemma dual_basis_repr [fintype ι] (l : dual R M) (i : ι) :
b.dual_basis.repr l i = l (b i) :=
by rw [← total_dual_basis b, basis.total_repr b.dual_basis l]
lemma dual_basis_equiv_fun [fintype ι] (l : dual R M) (i : ι) :
b.dual_basis.equiv_fun l i = l (b i) :=
by rw [basis.equiv_fun_apply, dual_basis_repr]
lemma dual_basis_apply [fintype ι] (i : ι) (m : M) : b.dual_basis i m = b.repr m i :=
b.to_dual_apply_right i m
@[simp] lemma coe_dual_basis [fintype ι] :
⇑b.dual_basis = b.coord :=
by { ext i x, apply dual_basis_apply }
@[simp] lemma to_dual_to_dual [fintype ι] :
b.dual_basis.to_dual.comp b.to_dual = dual.eval R M :=
begin
refine b.ext (λ i, b.dual_basis.ext (λ j, _)),
rw [linear_map.comp_apply, to_dual_apply_left, coe_to_dual_self, ← coe_dual_basis,
dual.eval_apply, basis.repr_self, finsupp.single_apply, dual_basis_apply_self]
end
theorem eval_ker {ι : Type*} (b : basis ι R M) :
(dual.eval R M).ker = ⊥ :=
begin
rw ker_eq_bot',
intros m hm,
simp_rw [linear_map.ext_iff, dual.eval_apply, zero_apply] at hm,
exact (basis.forall_coord_eq_zero_iff _).mp (λ i, hm (b.coord i))
end
lemma eval_range {ι : Type*} [fintype ι] (b : basis ι R M) :
(eval R M).range = ⊤ :=
begin
classical,
rw [← b.to_dual_to_dual, range_comp, b.to_dual_range, map_top, to_dual_range _],
apply_instance
end
/-- A module with a basis is linearly equivalent to the dual of its dual space. -/
def eval_equiv {ι : Type*} [fintype ι] (b : basis ι R M) : M ≃ₗ[R] dual R (dual R M) :=
linear_equiv.of_bijective (eval R M) b.eval_ker b.eval_range
@[simp] lemma eval_equiv_to_linear_map {ι : Type*} [fintype ι] (b : basis ι R M) :
(b.eval_equiv).to_linear_map = dual.eval R M := rfl
end comm_ring
/-- `simp` normal form version of `total_dual_basis` -/
@[simp] lemma total_coord [comm_ring R] [add_comm_group M] [module R M] [fintype ι]
(b : basis ι R M) (f : ι →₀ R) (i : ι) :
finsupp.total ι (dual R M) R b.coord f (b i) = f i :=
by { haveI := classical.dec_eq ι, rw [← coe_dual_basis, total_dual_basis] }
-- TODO(jmc): generalize to rings, once `module.rank` is generalized
theorem dual_dim_eq [field K] [add_comm_group V] [module K V] [fintype ι] (b : basis ι K V) :
cardinal.lift (module.rank K V) = module.rank K (dual K V) :=
begin
classical,
have := linear_equiv.lift_dim_eq b.to_dual_equiv,
simp only [cardinal.lift_umax] at this,
rw [this, ← cardinal.lift_umax],
apply cardinal.lift_id,
end
end basis
namespace module
variables {K V : Type*}
variables [field K] [add_comm_group V] [module K V]
open module module.dual submodule linear_map cardinal basis finite_dimensional
theorem eval_ker : (eval K V).ker = ⊥ :=
by { classical, exact (basis.of_vector_space K V).eval_ker }
-- TODO(jmc): generalize to rings, once `module.rank` is generalized
theorem dual_dim_eq [finite_dimensional K V] :
cardinal.lift (module.rank K V) = module.rank K (dual K V) :=
(basis.of_vector_space K V).dual_dim_eq
lemma erange_coe [finite_dimensional K V] : (eval K V).range = ⊤ :=
by { classical, exact (basis.of_vector_space K V).eval_range }
variables (K V)
/-- A vector space is linearly equivalent to the dual of its dual space. -/
def eval_equiv [finite_dimensional K V] : V ≃ₗ[K] dual K (dual K V) :=
linear_equiv.of_bijective (eval K V) eval_ker (erange_coe)
variables {K V}
@[simp] lemma eval_equiv_to_linear_map [finite_dimensional K V] :
(eval_equiv K V).to_linear_map = dual.eval K V := rfl
end module
section dual_pair
open module
variables {R M ι : Type*}
variables [comm_ring R] [add_comm_group M] [module R M] [decidable_eq ι]
/-- `e` and `ε` have characteristic properties of a basis and its dual -/
@[nolint has_inhabited_instance]
structure dual_pair (e : ι → M) (ε : ι → (dual R M)) :=
(eval : ∀ i j : ι, ε i (e j) = if i = j then 1 else 0)
(total : ∀ {m : M}, (∀ i, ε i m = 0) → m = 0)
[finite : ∀ m : M, fintype {i | ε i m ≠ 0}]
end dual_pair
namespace dual_pair
open module module.dual linear_map function
variables {R M ι : Type*}
variables [comm_ring R] [add_comm_group M] [module R M]
variables {e : ι → M} {ε : ι → dual R M}
/-- The coefficients of `v` on the basis `e` -/
def coeffs [decidable_eq ι] (h : dual_pair e ε) (m : M) : ι →₀ R :=
{ to_fun := λ i, ε i m,
support := by { haveI := h.finite m, exact {i : ι | ε i m ≠ 0}.to_finset },
mem_support_to_fun := by {intro i, rw set.mem_to_finset, exact iff.rfl } }
@[simp] lemma coeffs_apply [decidable_eq ι] (h : dual_pair e ε) (m : M) (i : ι) :
h.coeffs m i = ε i m := rfl
/-- linear combinations of elements of `e`.
This is a convenient abbreviation for `finsupp.total _ M R e l` -/
def lc {ι} (e : ι → M) (l : ι →₀ R) : M := l.sum (λ (i : ι) (a : R), a • (e i))
lemma lc_def (e : ι → M) (l : ι →₀ R) : lc e l = finsupp.total _ _ _ e l := rfl
variables [decidable_eq ι] (h : dual_pair e ε)
include h
lemma dual_lc (l : ι →₀ R) (i : ι) : ε i (dual_pair.lc e l) = l i :=
begin
erw linear_map.map_sum,
simp only [h.eval, map_smul, smul_eq_mul],
rw finset.sum_eq_single i,
{ simp },
{ intros q q_in q_ne,
simp [q_ne.symm] },
{ intro p_not_in,
simp [finsupp.not_mem_support_iff.1 p_not_in] },
end
@[simp]
lemma coeffs_lc (l : ι →₀ R) : h.coeffs (dual_pair.lc e l) = l :=
by { ext i, rw [h.coeffs_apply, h.dual_lc] }
/-- For any m : M n, \sum_{p ∈ Q n} (ε p m) • e p = m -/
@[simp]
lemma lc_coeffs (m : M) : dual_pair.lc e (h.coeffs m) = m :=
begin
refine eq_of_sub_eq_zero (h.total _),
intros i,
simp [-sub_eq_add_neg, linear_map.map_sub, h.dual_lc, sub_eq_zero]
end
/-- `(h : dual_pair e ε).basis` shows the family of vectors `e` forms a basis. -/
@[simps]
def basis : basis ι R M :=
basis.of_repr
{ to_fun := coeffs h,
inv_fun := lc e,
left_inv := lc_coeffs h,
right_inv := coeffs_lc h,
map_add' := λ v w, by { ext i, exact (ε i).map_add v w },
map_smul' := λ c v, by { ext i, exact (ε i).map_smul c v } }
@[simp] lemma coe_basis : ⇑h.basis = e :=
by { ext i, rw basis.apply_eq_iff, ext j,
rw [h.basis_repr_apply, coeffs_apply, h.eval, finsupp.single_apply],
convert if_congr eq_comm rfl rfl } -- `convert` to get rid of a `decidable_eq` mismatch
lemma mem_of_mem_span {H : set ι} {x : M} (hmem : x ∈ submodule.span R (e '' H)) :
∀ i : ι, ε i x ≠ 0 → i ∈ H :=
begin
intros i hi,
rcases (finsupp.mem_span_image_iff_total _).mp hmem with ⟨l, supp_l, rfl⟩,
apply not_imp_comm.mp ((finsupp.mem_supported' _ _).mp supp_l i),
rwa [← lc_def, h.dual_lc] at hi
end
lemma coe_dual_basis [fintype ι] : ⇑h.basis.dual_basis = ε :=
funext (λ i, h.basis.ext (λ j, by rw [h.basis.dual_basis_apply_self, h.coe_basis, h.eval,
if_congr eq_comm rfl rfl]))
end dual_pair
namespace submodule
universes u v w
variables {R : Type u} {M : Type v} [comm_ring R] [add_comm_group M] [module R M]
variable {W : submodule R M}
/-- The `dual_restrict` of a submodule `W` of `M` is the linear map from the
dual of `M` to the dual of `W` such that the domain of each linear map is
restricted to `W`. -/
def dual_restrict (W : submodule R M) :
module.dual R M →ₗ[R] module.dual R W :=
linear_map.dom_restrict' W
@[simp] lemma dual_restrict_apply
(W : submodule R M) (φ : module.dual R M) (x : W) :
W.dual_restrict φ x = φ (x : M) := rfl
/-- The `dual_annihilator` of a submodule `W` is the set of linear maps `φ` such
that `φ w = 0` for all `w ∈ W`. -/
def dual_annihilator {R : Type u} {M : Type v} [comm_ring R] [add_comm_group M]
[module R M] (W : submodule R M) : submodule R $ module.dual R M :=
W.dual_restrict.ker
@[simp] lemma mem_dual_annihilator (φ : module.dual R M) :
φ ∈ W.dual_annihilator ↔ ∀ w ∈ W, φ w = 0 :=
begin
refine linear_map.mem_ker.trans _,
simp_rw [linear_map.ext_iff, dual_restrict_apply],
exact ⟨λ h w hw, h ⟨w, hw⟩, λ h w, h w.1 w.2⟩
end
lemma dual_restrict_ker_eq_dual_annihilator (W : submodule R M) :
W.dual_restrict.ker = W.dual_annihilator :=
rfl
lemma dual_annihilator_sup_eq_inf_dual_annihilator (U V : submodule R M) :
(U ⊔ V).dual_annihilator = U.dual_annihilator ⊓ V.dual_annihilator :=
begin
ext φ,
rw [mem_inf, mem_dual_annihilator, mem_dual_annihilator, mem_dual_annihilator],
split; intro h,
{ refine ⟨_, _⟩;
intros x hx,
exact h x (mem_sup.2 ⟨x, hx, 0, zero_mem _, add_zero _⟩),
exact h x (mem_sup.2 ⟨0, zero_mem _, x, hx, zero_add _⟩) },
{ simp_rw mem_sup,
rintro _ ⟨x, hx, y, hy, rfl⟩,
rw [linear_map.map_add, h.1 _ hx, h.2 _ hy, add_zero] }
end
/-- The pullback of a submodule in the dual space along the evaluation map. -/
def dual_annihilator_comap (Φ : submodule R (module.dual R M)) : submodule R M :=
Φ.dual_annihilator.comap (module.dual.eval R M)
lemma mem_dual_annihilator_comap_iff {Φ : submodule R (module.dual R M)} (x : M) :
x ∈ Φ.dual_annihilator_comap ↔ ∀ φ ∈ Φ, (φ x : R) = 0 :=
by simp_rw [dual_annihilator_comap, mem_comap, mem_dual_annihilator, module.dual.eval_apply]
end submodule
namespace subspace
open submodule linear_map
universes u v w
-- We work in vector spaces because `exists_is_compl` only hold for vector spaces
variables {K : Type u} {V : Type v} [field K] [add_comm_group V] [module K V]
/-- Given a subspace `W` of `V` and an element of its dual `φ`, `dual_lift W φ` is
the natural extension of `φ` to an element of the dual of `V`.
That is, `dual_lift W φ` sends `w ∈ W` to `φ x` and `x` in the complement of `W` to `0`. -/
noncomputable def dual_lift (W : subspace K V) :
module.dual K W →ₗ[K] module.dual K V :=
let h := classical.indefinite_description _ W.exists_is_compl in
(linear_map.of_is_compl_prod h.2).comp (linear_map.inl _ _ _)
variable {W : subspace K V}
@[simp] lemma dual_lift_of_subtype {φ : module.dual K W} (w : W) :
W.dual_lift φ (w : V) = φ w :=
by { erw of_is_compl_left_apply _ w, refl }
lemma dual_lift_of_mem {φ : module.dual K W} {w : V} (hw : w ∈ W) :
W.dual_lift φ w = φ ⟨w, hw⟩ :=
dual_lift_of_subtype ⟨w, hw⟩
@[simp] lemma dual_restrict_comp_dual_lift (W : subspace K V) :
W.dual_restrict.comp W.dual_lift = 1 :=
by { ext φ x, simp }
lemma dual_restrict_left_inverse (W : subspace K V) :
function.left_inverse W.dual_restrict W.dual_lift :=
λ x, show W.dual_restrict.comp W.dual_lift x = x,
by { rw [dual_restrict_comp_dual_lift], refl }
lemma dual_lift_right_inverse (W : subspace K V) :
function.right_inverse W.dual_lift W.dual_restrict :=
W.dual_restrict_left_inverse
lemma dual_restrict_surjective :
function.surjective W.dual_restrict :=
W.dual_lift_right_inverse.surjective
lemma dual_lift_injective : function.injective W.dual_lift :=
W.dual_restrict_left_inverse.injective
/-- The quotient by the `dual_annihilator` of a subspace is isomorphic to the
dual of that subspace. -/
noncomputable def quot_annihilator_equiv (W : subspace K V) :
W.dual_annihilator.quotient ≃ₗ[K] module.dual K W :=
(quot_equiv_of_eq _ _ W.dual_restrict_ker_eq_dual_annihilator).symm.trans $
W.dual_restrict.quot_ker_equiv_of_surjective dual_restrict_surjective
/-- The natural isomorphism forom the dual of a subspace `W` to `W.dual_lift.range`. -/
noncomputable def dual_equiv_dual (W : subspace K V) :
module.dual K W ≃ₗ[K] W.dual_lift.range :=
linear_equiv.of_injective _ $ ker_eq_bot.2 dual_lift_injective
lemma dual_equiv_dual_def (W : subspace K V) :
W.dual_equiv_dual.to_linear_map = W.dual_lift.range_restrict := rfl
@[simp] lemma dual_equiv_dual_apply (φ : module.dual K W) :
W.dual_equiv_dual φ = ⟨W.dual_lift φ, mem_range.2 ⟨φ, rfl⟩⟩ := rfl
section
open_locale classical
open finite_dimensional
variables {V₁ : Type*} [add_comm_group V₁] [module K V₁]
instance [H : finite_dimensional K V] : finite_dimensional K (module.dual K V) :=
linear_equiv.finite_dimensional (basis.of_vector_space K V).to_dual_equiv
variables [finite_dimensional K V] [finite_dimensional K V₁]
@[simp] lemma dual_finrank_eq :
finrank K (module.dual K V) = finrank K V :=
linear_equiv.finrank_eq (basis.of_vector_space K V).to_dual_equiv.symm
/-- The quotient by the dual is isomorphic to its dual annihilator. -/
noncomputable def quot_dual_equiv_annihilator (W : subspace K V) :
W.dual_lift.range.quotient ≃ₗ[K] W.dual_annihilator :=
linear_equiv.quot_equiv_of_quot_equiv $
linear_equiv.trans W.quot_annihilator_equiv W.dual_equiv_dual
/-- The quotient by a subspace is isomorphic to its dual annihilator. -/
noncomputable def quot_equiv_annihilator (W : subspace K V) :
W.quotient ≃ₗ[K] W.dual_annihilator :=
begin
refine _ ≪≫ₗ W.quot_dual_equiv_annihilator,
refine linear_equiv.quot_equiv_of_equiv _ (basis.of_vector_space K V).to_dual_equiv,
exact (basis.of_vector_space K W).to_dual_equiv.trans W.dual_equiv_dual
end
open finite_dimensional
@[simp]
lemma finrank_dual_annihilator_comap_eq {Φ : subspace K (module.dual K V)} :
finrank K Φ.dual_annihilator_comap = finrank K Φ.dual_annihilator :=
begin
rw [submodule.dual_annihilator_comap, ← module.eval_equiv_to_linear_map],
exact linear_equiv.finrank_eq (linear_equiv.of_submodule' _ _),
end
lemma finrank_add_finrank_dual_annihilator_comap_eq
(W : subspace K (module.dual K V)) :
finrank K W + finrank K W.dual_annihilator_comap = finrank K V :=
begin
rw [finrank_dual_annihilator_comap_eq, W.quot_equiv_annihilator.finrank_eq.symm, add_comm,
submodule.finrank_quotient_add_finrank, subspace.dual_finrank_eq],
end
end
end subspace
variables {R : Type*} [comm_ring R] {M₁ : Type*} {M₂ : Type*}
variables [add_comm_group M₁] [module R M₁] [add_comm_group M₂] [module R M₂]
open module
/-- Given a linear map `f : M₁ →ₗ[R] M₂`, `f.dual_map` is the linear map between the dual of
`M₂` and `M₁` such that it maps the functional `φ` to `φ ∘ f`. -/
def linear_map.dual_map (f : M₁ →ₗ[R] M₂) : dual R M₂ →ₗ[R] dual R M₁ :=
linear_map.lcomp R R f
@[simp] lemma linear_map.dual_map_apply (f : M₁ →ₗ[R] M₂) (g : dual R M₂) (x : M₁) :
f.dual_map g x = g (f x) :=
linear_map.lcomp_apply f g x
@[simp] lemma linear_map.dual_map_id :
(linear_map.id : M₁ →ₗ[R] M₁).dual_map = linear_map.id :=
by { ext, refl }
lemma linear_map.dual_map_comp_dual_map {M₃ : Type*} [add_comm_group M₃] [module R M₃]
(f : M₁ →ₗ[R] M₂) (g : M₂ →ₗ[R] M₃) :
f.dual_map.comp g.dual_map = (g.comp f).dual_map :=
rfl
/-- The `linear_equiv` version of `linear_map.dual_map`. -/
def linear_equiv.dual_map (f : M₁ ≃ₗ[R] M₂) : dual R M₂ ≃ₗ[R] dual R M₁ :=
{ inv_fun := f.symm.to_linear_map.dual_map,
left_inv :=
begin
intro φ, ext x,
simp only [linear_map.dual_map_apply, linear_equiv.coe_to_linear_map,
linear_map.to_fun_eq_coe, linear_equiv.apply_symm_apply]
end,
right_inv :=
begin
intro φ, ext x,
simp only [linear_map.dual_map_apply, linear_equiv.coe_to_linear_map,
linear_map.to_fun_eq_coe, linear_equiv.symm_apply_apply]
end,
.. f.to_linear_map.dual_map }
@[simp] lemma linear_equiv.dual_map_apply (f : M₁ ≃ₗ[R] M₂) (g : dual R M₂) (x : M₁) :
f.dual_map g x = g (f x) :=
linear_map.lcomp_apply f g x
@[simp] lemma linear_equiv.dual_map_refl :
(linear_equiv.refl R M₁).dual_map = linear_equiv.refl R (dual R M₁) :=
by { ext, refl }
@[simp] lemma linear_equiv.dual_map_symm {f : M₁ ≃ₗ[R] M₂} :
(linear_equiv.dual_map f).symm = linear_equiv.dual_map f.symm := rfl
lemma linear_equiv.dual_map_trans {M₃ : Type*} [add_comm_group M₃] [module R M₃]
(f : M₁ ≃ₗ[R] M₂) (g : M₂ ≃ₗ[R] M₃) :
g.dual_map.trans f.dual_map = (f.trans g).dual_map :=
rfl
namespace linear_map
variable (f : M₁ →ₗ[R] M₂)
lemma ker_dual_map_eq_dual_annihilator_range :
f.dual_map.ker = f.range.dual_annihilator :=
begin
ext φ, split; intro hφ,
{ rw mem_ker at hφ,
rw submodule.mem_dual_annihilator,
rintro y ⟨x, rfl⟩,
rw [← dual_map_apply, hφ, zero_apply] },
{ ext x,
rw dual_map_apply,
rw submodule.mem_dual_annihilator at hφ,
exact hφ (f x) ⟨x, rfl⟩ }
end
lemma range_dual_map_le_dual_annihilator_ker :
f.dual_map.range ≤ f.ker.dual_annihilator :=
begin
rintro _ ⟨ψ, rfl⟩,
simp_rw [submodule.mem_dual_annihilator, mem_ker],
rintro x hx,
rw [dual_map_apply, hx, map_zero]
end
section finite_dimensional
variables {K : Type*} [field K] {V₁ : Type*} {V₂ : Type*}
variables [add_comm_group V₁] [module K V₁] [add_comm_group V₂] [module K V₂]
open finite_dimensional
variable [finite_dimensional K V₂]
@[simp] lemma finrank_range_dual_map_eq_finrank_range (f : V₁ →ₗ[K] V₂) :
finrank K f.dual_map.range = finrank K f.range :=
begin
have := submodule.finrank_quotient_add_finrank f.range,
rw [(subspace.quot_equiv_annihilator f.range).finrank_eq,
← ker_dual_map_eq_dual_annihilator_range] at this,
conv_rhs at this { rw ← subspace.dual_finrank_eq },
refine add_left_injective (finrank K f.dual_map.ker) _,
change _ + _ = _ + _,
rw [finrank_range_add_finrank_ker f.dual_map, add_comm, this],
end
lemma range_dual_map_eq_dual_annihilator_ker [finite_dimensional K V₁] (f : V₁ →ₗ[K] V₂) :
f.dual_map.range = f.ker.dual_annihilator :=
begin
refine eq_of_le_of_finrank_eq f.range_dual_map_le_dual_annihilator_ker _,
have := submodule.finrank_quotient_add_finrank f.ker,
rw (subspace.quot_equiv_annihilator f.ker).finrank_eq at this,
refine add_left_injective (finrank K f.ker) _,
simp_rw [this, finrank_range_dual_map_eq_finrank_range],
exact finrank_range_add_finrank_ker f,
end
end finite_dimensional
end linear_map
|
74a9ec7b286d86968f98b9ae14ba71659b4ce198
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/library/logic/cast.lean
|
43ff778e91a368fb4edf9bc81afcef4d3e346e10
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 5,653
|
lean
|
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
Casts and heterogeneous equality. See also init.datatypes and init.logic.
-/
import logic.eq logic.quantifiers
open eq.ops
namespace heq
universe variable u
variables {A B C : Type.{u}} {a a' : A} {b b' : B} {c : C}
theorem drec_on {C : Π {B : Type} (b : B), a == b → Type} (H₁ : a == b) (H₂ : C a (refl a)) :
C b H₁ :=
heq.rec (λ H₁ : a == a, show C a H₁, from H₂) H₁ H₁
theorem to_cast_eq (H : a == b) : cast (type_eq_of_heq H) a = b :=
drec_on H !cast_eq
end heq
section
universe variables u v
variables {A A' B C : Type.{u}} {P P' : A → Type.{v}} {a a' : A} {b : B}
theorem hcongr_fun {f : Π x, P x} {f' : Π x, P' x} (a : A) (H₁ : f == f') (H₂ : P = P') :
f a == f' a :=
begin
cases H₂, cases H₁, reflexivity
end
theorem hcongr {P' : A' → Type} {f : Π a, P a} {f' : Π a', P' a'} {a : A} {a' : A'}
(Hf : f == f') (HP : P == P') (Ha : a == a') : f a == f' a' :=
begin
cases Ha, cases HP, cases Hf, reflexivity
end
theorem hcongr_arg (f : Πx, P x) {a b : A} (H : a = b) : f a == f b :=
H ▸ (heq.refl (f a))
end
section
variables {A : Type} {B : A → Type} {C : Πa, B a → Type} {D : Πa b, C a b → Type}
variables {a a' : A} {b : B a} {b' : B a'} {c : C a b} {c' : C a' b'}
theorem hcongr_arg2 (f : Πa b, C a b) (Ha : a = a') (Hb : b == b') : f a b == f a' b' :=
hcongr (hcongr_arg f Ha) (hcongr_arg C Ha) Hb
theorem hcongr_arg3 (f : Πa b c, D a b c) (Ha : a = a') (Hb : b == b') (Hc : c == c')
: f a b c == f a' b' c' :=
hcongr (hcongr_arg2 f Ha Hb) (hcongr_arg2 D Ha Hb) Hc
end
section
universe variables u v
variables {A A' B C : Type.{u}} {P P' : A → Type.{v}} {a a' : A} {b : B}
-- should H₁ be explicit (useful in e.g. hproof_irrel)
theorem eq_rec_to_heq {H₁ : a = a'} {p : P a} {p' : P a'} (H₂ : eq.rec_on H₁ p = p') : p == p' :=
by subst H₁; subst H₂
theorem cast_to_heq {H₁ : A = B} (H₂ : cast H₁ a = b) : a == b :=
eq_rec_to_heq H₂
theorem hproof_irrel {a b : Prop} (H : a = b) (H₁ : a) (H₂ : b) : H₁ == H₂ :=
eq_rec_to_heq (proof_irrel (cast H H₁) H₂)
--TODO: generalize to eq.rec. This is a special case of rec_on_comp in eq.lean
theorem cast_trans (Hab : A = B) (Hbc : B = C) (a : A) :
cast Hbc (cast Hab a) = cast (Hab ⬝ Hbc) a :=
by subst Hab
theorem pi_eq (H : P = P') : (Π x, P x) = (Π x, P' x) :=
by subst H
theorem rec_on_app (H : P = P') (f : Π x, P x) (a : A) : eq.rec_on H f a == f a :=
by subst H
theorem rec_on_pull (H : P = P') (f : Π x, P x) (a : A) :
eq.rec_on H f a = eq.rec_on (congr_fun H a) (f a) :=
eq_of_heq (calc
eq.rec_on H f a == f a : rec_on_app H f a
... == eq.rec_on (congr_fun H a) (f a) : heq.symm (eq_rec_heq (congr_fun H a) (f a)))
theorem cast_app (H : P = P') (f : Π x, P x) (a : A) : cast (pi_eq H) f a == f a :=
by subst H
end
-- function extensionality wrt heterogeneous equality
theorem hfunext {A : Type} {B : A → Type} {B' : A → Type} {f : Π x, B x} {g : Π x, B' x}
(H : ∀ a, f a == g a) : f == g :=
cast_to_heq (funext (λ a, eq_of_heq (heq.trans (cast_app (funext (λ x, type_eq_of_heq (H x))) f a) (H a))))
section
variables {A : Type} {B : A → Type} {C : Πa, B a → Type} {D : Πa b, C a b → Type}
{E : Πa b c, D a b c → Type} {F : Type}
variables {a a' : A}
{b : B a} {b' : B a'}
{c : C a b} {c' : C a' b'}
{d : D a b c} {d' : D a' b' c'}
theorem hcongr_arg4 (f : Πa b c d, E a b c d)
(Ha : a = a') (Hb : b == b') (Hc : c == c') (Hd : d == d') : f a b c d == f a' b' c' d' :=
hcongr (hcongr_arg3 f Ha Hb Hc) (hcongr_arg3 E Ha Hb Hc) Hd
theorem dcongr_arg2 (f : Πa, B a → F) (Ha : a = a') (Hb : eq.rec_on Ha b = b')
: f a b = f a' b' :=
eq_of_heq (hcongr_arg2 f Ha (eq_rec_to_heq Hb))
theorem dcongr_arg3 (f : Πa b, C a b → F) (Ha : a = a') (Hb : eq.rec_on Ha b = b')
(Hc : cast (dcongr_arg2 C Ha Hb) c = c') : f a b c = f a' b' c' :=
eq_of_heq (hcongr_arg3 f Ha (eq_rec_to_heq Hb) (eq_rec_to_heq Hc))
theorem dcongr_arg4 (f : Πa b c, D a b c → F) (Ha : a = a') (Hb : eq.rec_on Ha b = b')
(Hc : cast (dcongr_arg2 C Ha Hb) c = c')
(Hd : cast (dcongr_arg3 D Ha Hb Hc) d = d') : f a b c d = f a' b' c' d' :=
eq_of_heq (hcongr_arg4 f Ha (eq_rec_to_heq Hb) (eq_rec_to_heq Hc) (eq_rec_to_heq Hd))
-- mixed versions (we want them for example if C a' b' is a subsingleton, like a proposition.
-- Then proving eq is easier than proving heq)
theorem hdcongr_arg3 (f : Πa b, C a b → F) (Ha : a = a') (Hb : b == b')
(Hc : cast (eq_of_heq (hcongr_arg2 C Ha Hb)) c = c')
: f a b c = f a' b' c' :=
eq_of_heq (hcongr_arg3 f Ha Hb (eq_rec_to_heq Hc))
theorem hhdcongr_arg4 (f : Πa b c, D a b c → F) (Ha : a = a') (Hb : b == b')
(Hc : c == c')
(Hd : cast (dcongr_arg3 D Ha (!eq.rec_on_irrel_arg ⬝ heq.to_cast_eq Hb)
(!eq.rec_on_irrel_arg ⬝ heq.to_cast_eq Hc)) d = d')
: f a b c d = f a' b' c' d' :=
eq_of_heq (hcongr_arg4 f Ha Hb Hc (eq_rec_to_heq Hd))
theorem hddcongr_arg4 (f : Πa b c, D a b c → F) (Ha : a = a') (Hb : b == b')
(Hc : cast (eq_of_heq (hcongr_arg2 C Ha Hb)) c = c')
(Hd : cast (hdcongr_arg3 D Ha Hb Hc) d = d')
: f a b c d = f a' b' c' d' :=
eq_of_heq (hcongr_arg4 f Ha Hb (eq_rec_to_heq Hc) (eq_rec_to_heq Hd))
end
|
2039e836c6ac69d9a7a865504adb2fc588191ce6
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/tests/lean/run/sec_var.lean
|
a004a59ff6ea62a6eee47c7beca3129e6fb727df
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 548
|
lean
|
import logic
section
parameter A : Type
definition foo : ∀ ⦃ a b : A ⦄, a = b → a = b :=
take a b H, H
variable a : A
set_option pp.implicit true
check foo (eq.refl a)
check foo
check foo = (λ (a b : A) (H : a = b), H)
end
check foo = (λ (A : Type) (a b : A) (H : a = b), H)
section
variable A : Type
definition foo2 : ∀ ⦃ a b : A ⦄, a = b → a = b :=
take a b H, H
variable a : A
set_option pp.implicit true
check foo2 A (eq.refl a)
check foo2
check foo2 A = (λ (a b : A) (H : a = b), H)
end
|
b7df2422a3ea8365ab0ad05e1c13f857767aca86
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/src/lake/test/meta/lakefile.lean
|
ec6aefa1bff57a59429cd973c6301efabf725314
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 282
|
lean
|
import Lake
open Lake DSL
package test_meta
#print "lorem"
meta if get_config? baz |>.isSome then #print "baz"
meta if get_config? env = some "foo" then do
#print "foo"
#print "1"
else meta if get_config? env = some "bar" then do
#print "bar"
#print "2"
#print "ipsum"
|
89eb6bd25d2d8779a483ca78a9ae9c86738fb837
|
cf39355caa609c0f33405126beee2739aa3cb77e
|
/tests/lean/run/secnot.lean
|
beb1a45b3e8931beb95ef0d5ec2548decf3bd6f7
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/lean
|
12b87f69d92e614daea8bcc9d4de9a9ace089d0e
|
cce7990ea86a78bdb383e38ed7f9b5ba93c60ce0
|
refs/heads/master
| 1,687,508,156,644
| 1,684,951,104,000
| 1,684,951,104,000
| 169,960,991
| 457
| 107
|
Apache-2.0
| 1,686,744,372,000
| 1,549,790,268,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 411
|
lean
|
section
variable {A : Type}
definition f (a b : A) := a
infixl ` ◀ `:65 := f
variables a b : A
#check a ◀ b
end
inductive List (T : Type) : Type
| nil {} : List
| cons : T → List → List
namespace List
section
variable {T : Type}
notation (name := list) `[` l:(foldr `,` (h t, cons h t) nil) `]` := l
#check [(10:nat), 20, 30]
end
end List
open List
#check [(10:nat), 20, 40]
#check (10:nat) ◀ 20
|
2b2293f7adcf2dc6718b4d4d35de9ec17407f739
|
9c1ad797ec8a5eddb37d34806c543602d9a6bf70
|
/opposites.lean
|
29175d39336ffd38c38ffc87cc25f4292828dd62
|
[] |
no_license
|
timjb/lean-category-theory
|
816eefc3a0582c22c05f4ee1c57ed04e57c0982f
|
12916cce261d08bb8740bc85e0175b75fb2a60f4
|
refs/heads/master
| 1,611,078,926,765
| 1,492,080,000,000
| 1,492,080,000,000
| 88,348,246
| 0
| 0
| null | 1,492,262,499,000
| 1,492,262,498,000
| null |
UTF-8
|
Lean
| false
| false
| 788
|
lean
|
-- Copyright (c) 2017 Scott Morrison. All rights reserved.
-- Released under Apache 2.0 license as described in the file LICENSE.
-- Authors: Stephen Morgan, Scott Morrison
import .functor
open tqft.categories
open tqft.categories.functor
namespace tqft.categories
definition Opposite ( C : Category ) : Category :=
{
Obj := C.Obj,
Hom := λ X Y, C.Hom Y X,
compose := λ _ _ _ f g, C.compose g f,
identity := λ X, C.identity X,
left_identity := ♮,
right_identity := ♮,
associativity := ♮
}
definition OppositeFunctor { C D : Category } ( F : Functor C D ) : Functor (Opposite C) (Opposite D) :=
{
onObjects := F.onObjects,
onMorphisms := λ X Y f, F.onMorphisms f,
identities := ♯,
functoriality := ♯
}
end tqft.categories
|
0f2eec6b42ff89bfb754cb93a57eeac4d444c3c2
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/data/sym/basic.lean
|
7e43f0dc741ad2126eab97b2bfe7b993aae7e8d5
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 18,177
|
lean
|
/-
Copyright (c) 2020 Kyle Miller All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kyle Miller
-/
import data.multiset.basic
import data.vector.basic
import data.setoid.basic
import tactic.apply_fun
/-!
# Symmetric powers
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines symmetric powers of a type. The nth symmetric power
consists of homogeneous n-tuples modulo permutations by the symmetric
group.
The special case of 2-tuples is called the symmetric square, which is
addressed in more detail in `data.sym.sym2`.
TODO: This was created as supporting material for `sym2`; it
needs a fleshed-out interface.
## Tags
symmetric powers
-/
open function
/--
The nth symmetric power is n-tuples up to permutation. We define it
as a subtype of `multiset` since these are well developed in the
library. We also give a definition `sym.sym'` in terms of vectors, and we
show these are equivalent in `sym.sym_equiv_sym'`.
-/
def sym (α : Type*) (n : ℕ) := {s : multiset α // s.card = n}
instance sym.has_coe (α : Type*) (n : ℕ) : has_coe (sym α n) (multiset α) := coe_subtype
/--
This is the `list.perm` setoid lifted to `vector`.
See note [reducible non-instances].
-/
@[reducible]
def vector.perm.is_setoid (α : Type*) (n : ℕ) : setoid (vector α n) :=
(list.is_setoid α).comap subtype.val
local attribute [instance] vector.perm.is_setoid
namespace sym
variables {α β : Type*} {n n' m : ℕ} {s : sym α n} {a b : α}
lemma coe_injective : injective (coe : sym α n → multiset α) := subtype.coe_injective
@[simp, norm_cast] lemma coe_inj {s₁ s₂ : sym α n} : (s₁ : multiset α) = s₂ ↔ s₁ = s₂ :=
coe_injective.eq_iff
/--
Construct an element of the `n`th symmetric power from a multiset of cardinality `n`.
-/
@[simps, pattern]
abbreviation mk (m : multiset α) (h : m.card = n) : sym α n := ⟨m, h⟩
/--
The unique element in `sym α 0`.
-/
@[pattern] def nil : sym α 0 := ⟨0, multiset.card_zero⟩
@[simp] lemma coe_nil : (coe (@sym.nil α)) = (0 : multiset α) := rfl
/--
Inserts an element into the term of `sym α n`, increasing the length by one.
-/
@[pattern] def cons (a : α) (s : sym α n) : sym α n.succ :=
⟨a ::ₘ s.1, by rw [multiset.card_cons, s.2]⟩
infixr ` ::ₛ `:67 := cons
@[simp]
lemma cons_inj_right (a : α) (s s' : sym α n) : a ::ₛ s = a ::ₛ s' ↔ s = s' :=
subtype.ext_iff.trans $ (multiset.cons_inj_right _).trans subtype.ext_iff.symm
@[simp]
lemma cons_inj_left (a a' : α) (s : sym α n) : a ::ₛ s = a' ::ₛ s ↔ a = a' :=
subtype.ext_iff.trans $ multiset.cons_inj_left _
lemma cons_swap (a b : α) (s : sym α n) : a ::ₛ b ::ₛ s = b ::ₛ a ::ₛ s :=
subtype.ext $ multiset.cons_swap a b s.1
lemma coe_cons (s : sym α n) (a : α) : (a ::ₛ s : multiset α) = a ::ₘ s := rfl
/--
This is the quotient map that takes a list of n elements as an n-tuple and produces an nth
symmetric power.
-/
instance : has_lift (vector α n) (sym α n) :=
{ lift := λ x, ⟨↑x.val, (multiset.coe_card _).trans x.2⟩ }
@[simp] lemma of_vector_nil : ↑(vector.nil : vector α 0) = (sym.nil : sym α 0) := rfl
@[simp] lemma of_vector_cons (a : α) (v : vector α n) :
↑(vector.cons a v) = a ::ₛ (↑v : sym α n) := by { cases v, refl }
/--
`α ∈ s` means that `a` appears as one of the factors in `s`.
-/
instance : has_mem α (sym α n) := ⟨λ a s, a ∈ s.1⟩
instance decidable_mem [decidable_eq α] (a : α) (s : sym α n) : decidable (a ∈ s) :=
s.1.decidable_mem _
@[simp]
lemma mem_mk (a : α) (s : multiset α) (h : s.card = n) : a ∈ mk s h ↔ a ∈ s := iff.rfl
@[simp] lemma mem_cons : a ∈ b ::ₛ s ↔ a = b ∨ a ∈ s :=
multiset.mem_cons
@[simp] lemma mem_coe : a ∈ (s : multiset α) ↔ a ∈ s := iff.rfl
lemma mem_cons_of_mem (h : a ∈ s) : a ∈ b ::ₛ s :=
multiset.mem_cons_of_mem h
@[simp] lemma mem_cons_self (a : α) (s : sym α n) : a ∈ a ::ₛ s :=
multiset.mem_cons_self a s.1
lemma cons_of_coe_eq (a : α) (v : vector α n) : a ::ₛ (↑v : sym α n) = ↑(a ::ᵥ v) :=
subtype.ext $ by { cases v, refl }
lemma sound {a b : vector α n} (h : a.val ~ b.val) : (↑a : sym α n) = ↑b :=
subtype.ext $ quotient.sound h
/-- `erase s a h` is the sym that subtracts 1 from the
multiplicity of `a` if a is present in the sym. -/
def erase [decidable_eq α] (s : sym α (n + 1)) (a : α) (h : a ∈ s) : sym α n :=
⟨s.val.erase a, (multiset.card_erase_of_mem h).trans $ s.property.symm ▸ n.pred_succ⟩
@[simp] lemma erase_mk [decidable_eq α] (m : multiset α) (hc : m.card = n + 1) (a : α) (h : a ∈ m) :
(mk m hc).erase a h = mk (m.erase a) (by { rw [multiset.card_erase_of_mem h, hc], refl }) := rfl
@[simp] lemma coe_erase [decidable_eq α] {s : sym α n.succ} {a : α} (h : a ∈ s) :
(s.erase a h : multiset α) = multiset.erase s a := rfl
@[simp] lemma cons_erase [decidable_eq α] {s : sym α n.succ} {a : α} (h : a ∈ s) :
a ::ₛ s.erase a h = s :=
coe_injective $ multiset.cons_erase h
@[simp] lemma erase_cons_head [decidable_eq α] (s : sym α n) (a : α)
(h : a ∈ a ::ₛ s := mem_cons_self a s) : (a ::ₛ s).erase a h = s :=
coe_injective $ multiset.erase_cons_head a s.1
/--
Another definition of the nth symmetric power, using vectors modulo permutations. (See `sym`.)
-/
def sym' (α : Type*) (n : ℕ) := quotient (vector.perm.is_setoid α n)
/--
This is `cons` but for the alternative `sym'` definition.
-/
def cons' {α : Type*} {n : ℕ} : α → sym' α n → sym' α (nat.succ n) :=
λ a, quotient.map (vector.cons a) (λ ⟨l₁, h₁⟩ ⟨l₂, h₂⟩ h, list.perm.cons _ h)
notation (name := sym.cons') a :: b := cons' a b
/--
Multisets of cardinality n are equivalent to length-n vectors up to permutations.
-/
def sym_equiv_sym' {α : Type*} {n : ℕ} : sym α n ≃ sym' α n :=
equiv.subtype_quotient_equiv_quotient_subtype _ _ (λ _, by refl) (λ _ _, by refl)
lemma cons_equiv_eq_equiv_cons (α : Type*) (n : ℕ) (a : α) (s : sym α n) :
a :: sym_equiv_sym' s = sym_equiv_sym' (a ::ₛ s) :=
by { rcases s with ⟨⟨l⟩, _⟩, refl, }
instance : has_zero (sym α 0) := ⟨⟨0, rfl⟩⟩
instance : has_emptyc (sym α 0) := ⟨0⟩
lemma eq_nil_of_card_zero (s : sym α 0) : s = nil :=
subtype.ext $ multiset.card_eq_zero.1 s.2
instance unique_zero : unique (sym α 0) :=
⟨⟨nil⟩, eq_nil_of_card_zero⟩
/-- `replicate n a` is the sym containing only `a` with multiplicity `n`. -/
def replicate (n : ℕ) (a : α) : sym α n := ⟨multiset.replicate n a, multiset.card_replicate _ _⟩
lemma replicate_succ {a : α} {n : ℕ} : replicate n.succ a = a ::ₛ replicate n a := rfl
lemma coe_replicate : (replicate n a : multiset α) = multiset.replicate n a := rfl
@[simp] lemma mem_replicate : b ∈ replicate n a ↔ n ≠ 0 ∧ b = a := multiset.mem_replicate
lemma eq_replicate_iff : s = replicate n a ↔ ∀ b ∈ s, b = a :=
begin
rw [subtype.ext_iff, coe_replicate, multiset.eq_replicate],
exact and_iff_right s.2
end
lemma exists_mem (s : sym α n.succ) : ∃ a, a ∈ s :=
multiset.card_pos_iff_exists_mem.1 $ s.2.symm ▸ n.succ_pos
lemma exists_eq_cons_of_succ (s : sym α n.succ) : ∃ (a : α) (s' : sym α n), s = a ::ₛ s' :=
begin
obtain ⟨a, ha⟩ := exists_mem s,
classical,
exact ⟨a, s.erase a ha, (cons_erase ha).symm⟩,
end
lemma eq_replicate {a : α} {n : ℕ} {s : sym α n} : s = replicate n a ↔ ∀ b ∈ s, b = a :=
subtype.ext_iff.trans $ multiset.eq_replicate.trans $ and_iff_right s.prop
lemma eq_replicate_of_subsingleton [subsingleton α] (a : α) {n : ℕ} (s : sym α n) :
s = replicate n a :=
eq_replicate.2 $ λ b hb, subsingleton.elim _ _
instance [subsingleton α] (n : ℕ) : subsingleton (sym α n) :=
⟨begin
cases n,
{ simp, },
{ intros s s',
obtain ⟨b, -⟩ := exists_mem s,
rw [eq_replicate_of_subsingleton b s', eq_replicate_of_subsingleton b s], },
end⟩
instance inhabited_sym [inhabited α] (n : ℕ) : inhabited (sym α n) :=
⟨replicate n default⟩
instance inhabited_sym' [inhabited α] (n : ℕ) : inhabited (sym' α n) :=
⟨quotient.mk' (vector.replicate n default)⟩
instance (n : ℕ) [is_empty α] : is_empty (sym α n.succ) :=
⟨λ s, by { obtain ⟨a, -⟩ := exists_mem s, exact is_empty_elim a }⟩
instance (n : ℕ) [unique α] : unique (sym α n) := unique.mk' _
lemma replicate_right_inj {a b : α} {n : ℕ} (h : n ≠ 0) : replicate n a = replicate n b ↔ a = b :=
subtype.ext_iff.trans (multiset.replicate_right_inj h)
lemma replicate_right_injective {n : ℕ} (h : n ≠ 0) :
function.injective (replicate n : α → sym α n) :=
λ a b, (replicate_right_inj h).1
instance (n : ℕ) [nontrivial α] : nontrivial (sym α (n + 1)) :=
(replicate_right_injective n.succ_ne_zero).nontrivial
/-- A function `α → β` induces a function `sym α n → sym β n` by applying it to every element of
the underlying `n`-tuple. -/
def map {n : ℕ} (f : α → β) (x : sym α n) : sym β n :=
⟨x.val.map f, by simpa [multiset.card_map] using x.property⟩
@[simp] lemma mem_map {n : ℕ} {f : α → β} {b : β} {l : sym α n} :
b ∈ sym.map f l ↔ ∃ a, a ∈ l ∧ f a = b := multiset.mem_map
/-- Note: `sym.map_id` is not simp-normal, as simp ends up unfolding `id` with `sym.map_congr` -/
@[simp] lemma map_id' {α : Type*} {n : ℕ} (s : sym α n) : sym.map (λ (x : α), x) s = s :=
by simp [sym.map]
lemma map_id {α : Type*} {n : ℕ} (s : sym α n) : sym.map id s = s :=
by simp [sym.map]
@[simp] lemma map_map {α β γ : Type*} {n : ℕ} (g : β → γ) (f : α → β) (s : sym α n) :
sym.map g (sym.map f s) = sym.map (g ∘ f) s :=
by simp [sym.map]
@[simp] lemma map_zero (f : α → β) :
sym.map f (0 : sym α 0) = (0 : sym β 0) := rfl
@[simp] lemma map_cons {n : ℕ} (f : α → β) (a : α) (s : sym α n) :
(a ::ₛ s).map f = (f a) ::ₛ s.map f :=
by simp [map, cons]
@[congr] lemma map_congr {f g : α → β} {s : sym α n} (h : ∀ x ∈ s, f x = g x) :
map f s = map g s := subtype.ext $ multiset.map_congr rfl h
@[simp] lemma map_mk {f : α → β} {m : multiset α} {hc : m.card = n} :
map f (mk m hc) = mk (m.map f) (by simp [hc]) := rfl
@[simp] lemma coe_map (s : sym α n) (f : α → β) : ↑(s.map f) = multiset.map f s := rfl
lemma map_injective {f : α → β} (hf : injective f) (n : ℕ) :
injective (map f : sym α n → sym β n) :=
λ s t h, coe_injective $ multiset.map_injective hf $ coe_inj.2 h
/-- Mapping an equivalence `α ≃ β` using `sym.map` gives an equivalence between `sym α n` and
`sym β n`. -/
@[simps]
def equiv_congr (e : α ≃ β) : sym α n ≃ sym β n :=
{ to_fun := map e,
inv_fun := map e.symm,
left_inv := λ x, by rw [map_map, equiv.symm_comp_self, map_id],
right_inv := λ x, by rw [map_map, equiv.self_comp_symm, map_id] }
/-- "Attach" a proof that `a ∈ s` to each element `a` in `s` to produce
an element of the symmetric power on `{x // x ∈ s}`. -/
def attach (s : sym α n) : sym {x // x ∈ s} n := ⟨s.val.attach, by rw [multiset.card_attach, s.2]⟩
@[simp] lemma attach_mk {m : multiset α} {hc : m.card = n} :
attach (mk m hc) = mk m.attach (multiset.card_attach.trans hc) := rfl
@[simp] lemma coe_attach (s : sym α n) : (s.attach : multiset {a // a ∈ s}) = multiset.attach s :=
rfl
lemma attach_map_coe (s : sym α n) : s.attach.map coe = s :=
coe_injective $ multiset.attach_map_val _
@[simp] lemma mem_attach (s : sym α n) (x : {x // x ∈ s}) : x ∈ s.attach :=
multiset.mem_attach _ _
@[simp] lemma attach_nil : (nil : sym α 0).attach = nil := rfl
@[simp] lemma attach_cons (x : α) (s : sym α n) :
(cons x s).attach = cons ⟨x, mem_cons_self _ _⟩ (s.attach.map (λ x, ⟨x, mem_cons_of_mem x.prop⟩))
:=
coe_injective $ multiset.attach_cons _ _
/-- Change the length of a `sym` using an equality.
The simp-normal form is for the `cast` to be pushed outward. -/
protected def cast {n m : ℕ} (h : n = m) : sym α n ≃ sym α m :=
{ to_fun := λ s, ⟨s.val, s.2.trans h⟩,
inv_fun := λ s, ⟨s.val, s.2.trans h.symm⟩,
left_inv := λ s, subtype.ext rfl,
right_inv := λ s, subtype.ext rfl }
@[simp] lemma cast_rfl : sym.cast rfl s = s := subtype.ext rfl
@[simp] lemma cast_cast {n'' : ℕ} (h : n = n') (h' : n' = n'') :
sym.cast h' (sym.cast h s) = sym.cast (h.trans h') s := rfl
@[simp] lemma coe_cast (h : n = m) : (sym.cast h s : multiset α) = s := rfl
@[simp] lemma mem_cast (h : n = m) : a ∈ sym.cast h s ↔ a ∈ s := iff.rfl
/-- Append a pair of `sym` terms. -/
def append (s : sym α n) (s' : sym α n') : sym α (n + n') :=
⟨s.1 + s'.1, by simp_rw [← s.2, ← s'.2, map_add]⟩
@[simp] lemma append_inj_right (s : sym α n) {t t' : sym α n'} :
s.append t = s.append t' ↔ t = t' :=
subtype.ext_iff.trans $ (add_right_inj _).trans subtype.ext_iff.symm
@[simp] lemma append_inj_left {s s' : sym α n} (t : sym α n') :
s.append t = s'.append t ↔ s = s' :=
subtype.ext_iff.trans $ (add_left_inj _).trans subtype.ext_iff.symm
lemma append_comm (s : sym α n') (s' : sym α n') :
s.append s' = sym.cast (add_comm _ _) (s'.append s) :=
by { ext, simp [append, add_comm], }
@[simp, norm_cast] lemma coe_append (s : sym α n) (s' : sym α n') :
(s.append s' : multiset α) = s + s' := rfl
lemma mem_append_iff {s' : sym α m} : a ∈ s.append s' ↔ a ∈ s ∨ a ∈ s' := multiset.mem_add
/-- Fill a term `m : sym α (n - i)` with `i` copies of `a` to obtain a term of `sym α n`.
This is a convenience wrapper for `m.append (replicate i a)` that adjusts the term using
`sym.cast`. -/
def fill (a : α) (i : fin (n + 1)) (m : sym α (n - i)) : sym α n :=
sym.cast (nat.sub_add_cancel i.is_le) (m.append (replicate i a))
lemma coe_fill {a : α} {i : fin (n + 1)} {m : sym α (n - i)} :
(fill a i m : multiset α) = m + replicate i a := rfl
lemma mem_fill_iff {a b : α} {i : fin (n + 1)} {s : sym α (n - i)} :
a ∈ sym.fill b i s ↔ ((i : ℕ) ≠ 0 ∧ a = b) ∨ a ∈ s :=
by rw [fill, mem_cast, mem_append_iff, or_comm, mem_replicate]
open multiset
/-- Remove every `a` from a given `sym α n`.
Yields the number of copies `i` and a term of `sym α (n - i)`. -/
def filter_ne [decidable_eq α] (a : α) (m : sym α n) : Σ i : fin (n + 1), sym α (n - i) :=
⟨⟨m.1.count a, (count_le_card _ _).trans_lt $ by rw [m.2, nat.lt_succ_iff]⟩,
m.1.filter ((≠) a), eq_tsub_of_add_eq $ eq.trans begin
rw [← countp_eq_card_filter, add_comm],
exact (card_eq_countp_add_countp _ _).symm,
end m.2⟩
lemma sigma_sub_ext {m₁ m₂ : Σ i : fin (n + 1), sym α (n - i)}
(h : (m₁.2 : multiset α) = m₂.2) : m₁ = m₂ :=
sigma.subtype_ext (fin.ext $ by rw [← nat.sub_sub_self m₁.1.is_le, ← nat.sub_sub_self m₂.1.is_le,
← m₁.2.2, ← m₂.2.2, subtype.val_eq_coe, subtype.val_eq_coe, h]) h
lemma fill_filter_ne [decidable_eq α] (a : α) (m : sym α n) :
(m.filter_ne a).2.fill a (m.filter_ne a).1 = m :=
subtype.ext begin
dsimp only [coe_fill, filter_ne, subtype.coe_mk, fin.coe_mk],
ext b, rw [count_add, count_filter, sym.coe_replicate, count_replicate],
obtain rfl | h := eq_or_ne a b,
{ rw [if_pos rfl, if_neg (not_not.2 rfl), zero_add], refl },
{ rw [if_pos h, if_neg h.symm, add_zero], refl },
end
lemma filter_ne_fill [decidable_eq α] (a : α) (m : Σ i : fin (n + 1), sym α (n - i)) (h : a ∉ m.2) :
(m.2.fill a m.1).filter_ne a = m :=
sigma_sub_ext begin
dsimp only [filter_ne, subtype.coe_mk, subtype.val_eq_coe, coe_fill],
rw [filter_add, filter_eq_self.2, add_right_eq_self, eq_zero_iff_forall_not_mem],
{ intros b hb, rw [mem_filter, sym.mem_coe, mem_replicate] at hb, exact hb.2 hb.1.2.symm },
{ exact λ b hb, (hb.ne_of_not_mem h).symm },
end
end sym
section equiv
/-! ### Combinatorial equivalences -/
variables {α : Type*} {n : ℕ}
open sym
namespace sym_option_succ_equiv
/-- Function from the symmetric product over `option` splitting on whether or not
it contains a `none`. -/
def encode [decidable_eq α] (s : sym (option α) n.succ) : sym (option α) n ⊕ sym α n.succ :=
if h : none ∈ s
then sum.inl (s.erase none h)
else sum.inr (s.attach.map $ λ o,
option.get $ option.ne_none_iff_is_some.1 $ ne_of_mem_of_not_mem o.2 h)
@[simp] lemma encode_of_none_mem [decidable_eq α] (s : sym (option α) n.succ) (h : none ∈ s) :
encode s = sum.inl (s.erase none h) := dif_pos h
@[simp] lemma encode_of_not_none_mem [decidable_eq α] (s : sym (option α) n.succ) (h : ¬ none ∈ s) :
encode s = sum.inr (s.attach.map $ λ o,
option.get $ option.ne_none_iff_is_some.1 $ ne_of_mem_of_not_mem o.2 h) := dif_neg h
/-- Inverse of `sym_option_succ_equiv.decode`. -/
@[simp] def decode : sym (option α) n ⊕ sym α n.succ → sym (option α) n.succ
| (sum.inl s) := none ::ₛ s
| (sum.inr s) := s.map embedding.coe_option
@[simp] lemma decode_encode [decidable_eq α] (s : sym (option α) n.succ) :
decode (encode s) = s :=
begin
by_cases h : none ∈ s,
{ simp [h] },
{ simp only [h, decode, not_false_iff, subtype.val_eq_coe, encode_of_not_none_mem,
embedding.coe_option_apply, map_map, comp_app, option.coe_get],
convert s.attach_map_coe }
end
@[simp] lemma encode_decode [decidable_eq α] (s : sym (option α) n ⊕ sym α n.succ) :
encode (decode s) = s :=
begin
obtain (s | s) := s,
{ simp },
{ unfold sym_option_succ_equiv.encode,
split_ifs,
{ obtain ⟨a, _, ha⟩ := multiset.mem_map.mp h,
exact option.some_ne_none _ ha },
{ refine map_injective (option.some_injective _) _ _,
convert eq.trans _ (sym_option_succ_equiv.decode (sum.inr s)).attach_map_coe,
simp } }
end
end sym_option_succ_equiv
/-- The symmetric product over `option` is a disjoint union over simpler symmetric products. -/
@[simps] def sym_option_succ_equiv [decidable_eq α] :
sym (option α) n.succ ≃ sym (option α) n ⊕ sym α n.succ :=
{ to_fun := sym_option_succ_equiv.encode,
inv_fun := sym_option_succ_equiv.decode,
left_inv := sym_option_succ_equiv.decode_encode,
right_inv := sym_option_succ_equiv.encode_decode }
end equiv
|
47436729b73630d760a2734ad57732a64f6984bd
|
87a08a8e9b222ec02f3327dca4ae24590c1b3de9
|
/src/category_theory/types.lean
|
92e77748357bfc123e560319fd564966e25ccaf7
|
[
"Apache-2.0"
] |
permissive
|
naussicaa/mathlib
|
86d05223517a39e80920549a8052f9cf0e0b77b8
|
1ef2c2df20cf45c21675d855436228c7ae02d47a
|
refs/heads/master
| 1,592,104,950,080
| 1,562,073,069,000
| 1,562,073,069,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 4,515
|
lean
|
-- Copyright (c) 2017 Scott Morrison. All rights reserved.
-- Released under Apache 2.0 license as described in the file LICENSE.
-- Authors: Stephen Morgan, Scott Morrison, Johannes Hölzl
import category_theory.functor_category
import category_theory.fully_faithful
import data.equiv.basic
namespace category_theory
universes v v' w u u' -- declare the `v`'s first; see `category_theory.category` for an explanation
instance types : large_category (Sort u) :=
{ hom := λ a b, (a → b),
id := λ a, id,
comp := λ _ _ _ f g, g ∘ f }
@[simp] lemma types_hom {α β : Sort u} : (α ⟶ β) = (α → β) := rfl
@[simp] lemma types_id (X : Sort u) : 𝟙 X = id := rfl
@[simp] lemma types_comp {X Y Z : Sort u} (f : X ⟶ Y) (g : Y ⟶ Z) : f ≫ g = g ∘ f := rfl
namespace functor
variables {J : Type u} [𝒥 : category.{v} J]
include 𝒥
def sections (F : J ⥤ Type w) : set (Π j, F.obj j) :=
{ u | ∀ {j j'} (f : j ⟶ j'), F.map f (u j) = u j'}
end functor
namespace functor_to_types
variables {C : Type u} [𝒞 : category.{v} C] (F G H : C ⥤ Sort w) {X Y Z : C}
include 𝒞
variables (σ : F ⟶ G) (τ : G ⟶ H)
@[simp] lemma map_comp (f : X ⟶ Y) (g : Y ⟶ Z) (a : F.obj X) : (F.map (f ≫ g)) a = (F.map g) ((F.map f) a) :=
by simp
@[simp] lemma map_id (a : F.obj X) : (F.map (𝟙 X)) a = a :=
by simp
lemma naturality (f : X ⟶ Y) (x : F.obj X) : σ.app Y ((F.map f) x) = (G.map f) (σ.app X x) :=
congr_fun (σ.naturality f) x
@[simp] lemma comp (x : F.obj X) : (σ ≫ τ).app X x = τ.app X (σ.app X x) := rfl
variables {D : Type u'} [𝒟 : category.{u'} D] (I J : D ⥤ C) (ρ : I ⟶ J) {W : D}
@[simp] lemma hcomp (x : (I ⋙ F).obj W) : (ρ ◫ σ).app W x = (G.map (ρ.app W)) (σ.app (I.obj W) x) := rfl
end functor_to_types
def ulift_trivial (V : Type u) : ulift.{u} V ≅ V := by tidy
def ulift_functor : Type u ⥤ Type (max u v) :=
{ obj := λ X, ulift.{v} X,
map := λ X Y f, λ x : ulift.{v} X, ulift.up (f x.down) }
@[simp] lemma ulift_functor_map {X Y : Type u} (f : X ⟶ Y) (x : ulift.{v} X) :
ulift_functor.map f x = ulift.up (f x.down) := rfl
instance ulift_functor_faithful : fully_faithful ulift_functor :=
{ preimage := λ X Y f x, (f (ulift.up x)).down,
injectivity' := λ X Y f g p, funext $ λ x,
congr_arg ulift.down ((congr_fun p (ulift.up x)) : ((ulift.up (f x)) = (ulift.up (g x)))) }
def hom_of_element {X : Type u} (x : X) : punit ⟶ X := λ _, x
lemma hom_of_element_eq_iff {X : Type u} (x y : X) :
hom_of_element x = hom_of_element y ↔ x = y :=
⟨λ H, congr_fun H punit.star, by cc⟩
lemma mono_iff_injective {X Y : Type u} (f : X ⟶ Y) : mono f ↔ function.injective f :=
begin
split,
{ intros H x x' h,
resetI,
rw ←hom_of_element_eq_iff at ⊢ h,
exact (cancel_mono f).mp h },
{ refine λ H, ⟨λ Z g h H₂, _⟩,
ext z,
replace H₂ := congr_fun H₂ z,
exact H H₂ }
end
lemma epi_iff_surjective {X Y : Type u} (f : X ⟶ Y) : epi f ↔ function.surjective f :=
begin
split,
{ intros H,
let g : Y ⟶ ulift Prop := λ y, ⟨true⟩,
let h : Y ⟶ ulift Prop := λ y, ⟨∃ x, f x = y⟩,
suffices : f ≫ g = f ≫ h,
{ resetI,
rw cancel_epi at this,
intro y,
replace this := congr_fun this y,
replace this : true = ∃ x, f x = y := congr_arg ulift.down this,
rw ←this,
trivial },
ext x,
change true ↔ ∃ x', f x' = f x,
rw true_iff,
exact ⟨x, rfl⟩ },
{ intro H,
constructor,
intros Z g h H₂,
apply funext,
rw ←forall_iff_forall_surj H,
intro x,
exact (congr_fun H₂ x : _) }
end
end category_theory
-- Isomorphisms in Type and equivalences.
namespace equiv
universe u
variables {X Y : Sort u}
def to_iso (e : X ≃ Y) : X ≅ Y :=
{ hom := e.to_fun,
inv := e.inv_fun,
hom_inv_id' := funext e.left_inv,
inv_hom_id' := funext e.right_inv }
@[simp] lemma to_iso_hom {e : X ≃ Y} : e.to_iso.hom = e := rfl
@[simp] lemma to_iso_inv {e : X ≃ Y} : e.to_iso.inv = e.symm := rfl
end equiv
namespace category_theory.iso
universe u
variables {X Y : Sort u}
def to_equiv (i : X ≅ Y) : X ≃ Y :=
{ to_fun := i.hom,
inv_fun := i.inv,
left_inv := λ x, congr_fun i.hom_inv_id x,
right_inv := λ y, congr_fun i.inv_hom_id y }
@[simp] lemma to_equiv_fun (i : X ≅ Y) : (i.to_equiv : X → Y) = i.hom := rfl
@[simp] lemma to_equiv_symm_fun (i : X ≅ Y) : (i.to_equiv.symm : Y → X) = i.inv := rfl
end category_theory.iso
|
d247eb396f4b996735f24d6ac43f5e8501e8c213
|
4efff1f47634ff19e2f786deadd394270a59ecd2
|
/src/topology/instances/ennreal.lean
|
0c5b88753bfd6f6b4a5e0ecffd2a31e79385afcf
|
[
"Apache-2.0"
] |
permissive
|
agjftucker/mathlib
|
d634cd0d5256b6325e3c55bb7fb2403548371707
|
87fe50de17b00af533f72a102d0adefe4a2285e8
|
refs/heads/master
| 1,625,378,131,941
| 1,599,166,526,000
| 1,599,166,526,000
| 160,748,509
| 0
| 0
|
Apache-2.0
| 1,544,141,789,000
| 1,544,141,789,000
| null |
UTF-8
|
Lean
| false
| false
| 39,193
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Johannes Hölzl
-/
import topology.instances.nnreal
/-!
# Extended non-negative reals
-/
noncomputable theory
open classical set filter metric
open_locale classical
open_locale topological_space
variables {α : Type*} {β : Type*} {γ : Type*}
open_locale ennreal big_operators filter
namespace ennreal
variables {a b c d : ennreal} {r p q : nnreal}
variables {x y z : ennreal} {ε ε₁ ε₂ : ennreal} {s : set ennreal}
section topological_space
open topological_space
/-- Topology on `ennreal`.
Note: this is different from the `emetric_space` topology. The `emetric_space` topology has
`is_open {⊤}`, while this topology doesn't have singleton elements. -/
instance : topological_space ennreal := preorder.topology ennreal
instance : order_topology ennreal := ⟨rfl⟩
instance : t2_space ennreal := by apply_instance -- short-circuit type class inference
instance : second_countable_topology ennreal :=
⟨⟨⋃q ≥ (0:ℚ), {{a : ennreal | a < nnreal.of_real q}, {a : ennreal | ↑(nnreal.of_real q) < a}},
(countable_encodable _).bUnion $ assume a ha, (countable_singleton _).insert _,
le_antisymm
(le_generate_from $ by simp [or_imp_distrib, is_open_lt', is_open_gt'] {contextual := tt})
(le_generate_from $ λ s h, begin
rcases h with ⟨a, hs | hs⟩;
[ rw show s = ⋃q∈{q:ℚ | 0 ≤ q ∧ a < nnreal.of_real q}, {b | ↑(nnreal.of_real q) < b},
from set.ext (assume b, by simp [hs, @ennreal.lt_iff_exists_rat_btwn a b, and_assoc]),
rw show s = ⋃q∈{q:ℚ | 0 ≤ q ∧ ↑(nnreal.of_real q) < a}, {b | b < ↑(nnreal.of_real q)},
from set.ext (assume b, by simp [hs, @ennreal.lt_iff_exists_rat_btwn b a, and_comm, and_assoc])];
{ apply is_open_Union, intro q,
apply is_open_Union, intro hq,
exact generate_open.basic _ (mem_bUnion hq.1 $ by simp) }
end)⟩⟩
lemma embedding_coe : embedding (coe : nnreal → ennreal) :=
⟨⟨begin
refine le_antisymm _ _,
{ rw [@order_topology.topology_eq_generate_intervals ennreal _,
← coinduced_le_iff_le_induced],
refine le_generate_from (assume s ha, _),
rcases ha with ⟨a, rfl | rfl⟩,
show is_open {b : nnreal | a < ↑b},
{ cases a; simp [none_eq_top, some_eq_coe, is_open_lt'] },
show is_open {b : nnreal | ↑b < a},
{ cases a; simp [none_eq_top, some_eq_coe, is_open_gt', is_open_const] } },
{ rw [@order_topology.topology_eq_generate_intervals nnreal _],
refine le_generate_from (assume s ha, _),
rcases ha with ⟨a, rfl | rfl⟩,
exact ⟨Ioi a, is_open_Ioi, by simp [Ioi]⟩,
exact ⟨Iio a, is_open_Iio, by simp [Iio]⟩ }
end⟩,
assume a b, coe_eq_coe.1⟩
lemma is_open_ne_top : is_open {a : ennreal | a ≠ ⊤} := is_open_ne
lemma is_open_Ico_zero : is_open (Ico 0 b) := by { rw ennreal.Ico_eq_Iio, exact is_open_Iio}
lemma coe_range_mem_nhds : range (coe : nnreal → ennreal) ∈ 𝓝 (r : ennreal) :=
have {a : ennreal | a ≠ ⊤} = range (coe : nnreal → ennreal),
from set.ext $ assume a, by cases a; simp [none_eq_top, some_eq_coe],
this ▸ mem_nhds_sets is_open_ne_top coe_ne_top
@[norm_cast] lemma tendsto_coe {f : filter α} {m : α → nnreal} {a : nnreal} :
tendsto (λa, (m a : ennreal)) f (𝓝 ↑a) ↔ tendsto m f (𝓝 a) :=
embedding_coe.tendsto_nhds_iff.symm
lemma continuous_coe {α} [topological_space α] {f : α → nnreal} :
continuous (λa, (f a : ennreal)) ↔ continuous f :=
embedding_coe.continuous_iff.symm
lemma nhds_coe {r : nnreal} : 𝓝 (r : ennreal) = (𝓝 r).map coe :=
by rw [embedding_coe.induced, map_nhds_induced_eq coe_range_mem_nhds]
lemma nhds_coe_coe {r p : nnreal} : 𝓝 ((r : ennreal), (p : ennreal)) =
(𝓝 (r, p)).map (λp:nnreal×nnreal, (p.1, p.2)) :=
begin
rw [(embedding_coe.prod_mk embedding_coe).map_nhds_eq],
rw [← prod_range_range_eq],
exact prod_mem_nhds_sets coe_range_mem_nhds coe_range_mem_nhds
end
lemma continuous_of_real : continuous ennreal.of_real :=
(continuous_coe.2 continuous_id).comp nnreal.continuous_of_real
lemma tendsto_of_real {f : filter α} {m : α → ℝ} {a : ℝ} (h : tendsto m f (𝓝 a)) :
tendsto (λa, ennreal.of_real (m a)) f (𝓝 (ennreal.of_real a)) :=
tendsto.comp (continuous.tendsto continuous_of_real _) h
lemma tendsto_to_nnreal {a : ennreal} : a ≠ ⊤ →
tendsto (ennreal.to_nnreal) (𝓝 a) (𝓝 a.to_nnreal) :=
begin
cases a; simp [some_eq_coe, none_eq_top, nhds_coe, tendsto_map'_iff, (∘)],
exact tendsto_id
end
lemma continuous_on_to_nnreal : continuous_on ennreal.to_nnreal {a | a ≠ ∞} :=
continuous_on_iff_continuous_restrict.2 $ continuous_iff_continuous_at.2 $ λ x,
(tendsto_to_nnreal x.2).comp continuous_at_subtype_coe
lemma tendsto_to_real {a : ennreal} : a ≠ ⊤ → tendsto (ennreal.to_real) (𝓝 a) (𝓝 a.to_real) :=
λ ha, tendsto.comp ((@nnreal.tendsto_coe _ (𝓝 a.to_nnreal) id (a.to_nnreal)).2 tendsto_id)
(tendsto_to_nnreal ha)
lemma tendsto_nhds_top {m : α → ennreal} {f : filter α}
(h : ∀ n : ℕ, ∀ᶠ a in f, ↑n < m a) : tendsto m f (𝓝 ⊤) :=
tendsto_nhds_generate_from $ assume s hs,
match s, hs with
| _, ⟨none, or.inl rfl⟩, hr := (lt_irrefl ⊤ hr).elim
| _, ⟨some r, or.inl rfl⟩, hr :=
let ⟨n, hrn⟩ := exists_nat_gt r in
mem_sets_of_superset (h n) $ assume a hnma, show ↑r < m a, from
lt_trans (show (r : ennreal) < n, from (coe_nat n) ▸ coe_lt_coe.2 hrn) hnma
| _, ⟨a, or.inr rfl⟩, hr := (not_top_lt $ show ⊤ < a, from hr).elim
end
lemma tendsto_nat_nhds_top : tendsto (λ n : ℕ, ↑n) at_top (𝓝 ∞) :=
tendsto_nhds_top $ λ n, mem_at_top_sets.2
⟨n+1, λ m hm, ennreal.coe_nat_lt_coe_nat.2 $ nat.lt_of_succ_le hm⟩
lemma nhds_top : 𝓝 ∞ = ⨅a ≠ ∞, 𝓟 (Ioi a) :=
nhds_top_order.trans $ by simp [lt_top_iff_ne_top, Ioi]
lemma nhds_zero : 𝓝 (0 : ennreal) = ⨅a ≠ 0, 𝓟 (Iio a) :=
nhds_bot_order.trans $ by simp [bot_lt_iff_ne_bot, Iio]
/-- The set of finite `ennreal` numbers is homeomorphic to `nnreal`. -/
def ne_top_homeomorph_nnreal : {a | a ≠ ∞} ≃ₜ nnreal :=
{ to_fun := λ x, ennreal.to_nnreal x,
inv_fun := λ x, ⟨x, coe_ne_top⟩,
left_inv := λ ⟨x, hx⟩, subtype.eq $ coe_to_nnreal hx,
right_inv := λ x, to_nnreal_coe,
continuous_to_fun := continuous_on_iff_continuous_restrict.1 continuous_on_to_nnreal,
continuous_inv_fun := continuous_subtype_mk _ (continuous_coe.2 continuous_id) }
/-- The set of finite `ennreal` numbers is homeomorphic to `nnreal`. -/
def lt_top_homeomorph_nnreal : {a | a < ∞} ≃ₜ nnreal :=
by refine (homeomorph.set_congr $ set.ext $ λ x, _).trans ne_top_homeomorph_nnreal;
simp only [mem_set_of_eq, lt_top_iff_ne_top]
-- using Icc because
-- • don't have 'Ioo (x - ε) (x + ε) ∈ 𝓝 x' unless x > 0
-- • (x - y ≤ ε ↔ x ≤ ε + y) is true, while (x - y < ε ↔ x < ε + y) is not
lemma Icc_mem_nhds : x ≠ ⊤ → 0 < ε → Icc (x - ε) (x + ε) ∈ 𝓝 x :=
begin
assume xt ε0, rw mem_nhds_sets_iff,
by_cases x0 : x = 0,
{ use Iio (x + ε),
have : Iio (x + ε) ⊆ Icc (x - ε) (x + ε), assume a, rw x0, simpa using le_of_lt,
use this, exact ⟨is_open_Iio, mem_Iio_self_add xt ε0⟩ },
{ use Ioo (x - ε) (x + ε), use Ioo_subset_Icc_self,
exact ⟨is_open_Ioo, mem_Ioo_self_sub_add xt x0 ε0 ε0 ⟩ }
end
lemma nhds_of_ne_top : x ≠ ⊤ → 𝓝 x = ⨅ε > 0, 𝓟 (Icc (x - ε) (x + ε)) :=
begin
assume xt, refine le_antisymm _ _,
-- first direction
simp only [le_infi_iff, le_principal_iff], assume ε ε0, exact Icc_mem_nhds xt ε0,
-- second direction
rw nhds_generate_from, refine le_infi (assume s, le_infi $ assume hs, _),
simp only [mem_set_of_eq] at hs, rcases hs with ⟨xs, ⟨a, ha⟩⟩,
cases ha,
{ rw ha at *,
rcases dense xs with ⟨b, ⟨ab, bx⟩⟩,
have xb_pos : x - b > 0 := zero_lt_sub_iff_lt.2 bx,
have xxb : x - (x - b) = b := sub_sub_cancel (by rwa lt_top_iff_ne_top) (le_of_lt bx),
refine infi_le_of_le (x - b) (infi_le_of_le xb_pos _),
simp only [mem_principal_sets, le_principal_iff],
assume y, rintros ⟨h₁, h₂⟩, rw xxb at h₁, calc a < b : ab ... ≤ y : h₁ },
{ rw ha at *,
rcases dense xs with ⟨b, ⟨xb, ba⟩⟩,
have bx_pos : b - x > 0 := zero_lt_sub_iff_lt.2 xb,
have xbx : x + (b - x) = b := add_sub_cancel_of_le (le_of_lt xb),
refine infi_le_of_le (b - x) (infi_le_of_le bx_pos _),
simp only [mem_principal_sets, le_principal_iff],
assume y, rintros ⟨h₁, h₂⟩, rw xbx at h₂, calc y ≤ b : h₂ ... < a : ba },
end
/-- Characterization of neighborhoods for `ennreal` numbers. See also `tendsto_order`
for a version with strict inequalities. -/
protected theorem tendsto_nhds {f : filter α} {u : α → ennreal} {a : ennreal} (ha : a ≠ ⊤) :
tendsto u f (𝓝 a) ↔ ∀ ε > 0, ∀ᶠ x in f, (u x) ∈ Icc (a - ε) (a + ε) :=
by simp only [nhds_of_ne_top ha, tendsto_infi, tendsto_principal, mem_Icc]
protected lemma tendsto_at_top [nonempty β] [semilattice_sup β] {f : β → ennreal} {a : ennreal}
(ha : a ≠ ⊤) : tendsto f at_top (𝓝 a) ↔ ∀ε>0, ∃N, ∀n≥N, (f n) ∈ Icc (a - ε) (a + ε) :=
by simp only [ennreal.tendsto_nhds ha, mem_at_top_sets, mem_set_of_eq, filter.eventually]
lemma tendsto_coe_nnreal_nhds_top {α} {l : filter α} {f : α → nnreal} (h : tendsto f l at_top) :
tendsto (λa, (f a : ennreal)) l (𝓝 ∞) :=
tendsto_nhds_top $ assume n,
have ∀ᶠ a in l, ↑(n+1) ≤ f a := h $ mem_at_top _,
mem_sets_of_superset this $ assume a (ha : ↑(n+1) ≤ f a),
begin
rw [← coe_nat],
dsimp,
exact coe_lt_coe.2 (lt_of_lt_of_le (nat.cast_lt.2 (nat.lt_succ_self _)) ha)
end
instance : has_continuous_add ennreal :=
⟨ continuous_iff_continuous_at.2 $
have hl : ∀a:ennreal, tendsto (λ (p : ennreal × ennreal), p.fst + p.snd) (𝓝 (⊤, a)) (𝓝 ⊤), from
assume a, tendsto_nhds_top $ assume n,
have set.prod {a | ↑n < a } univ ∈ 𝓝 ((⊤:ennreal), a), from
prod_mem_nhds_sets (lt_mem_nhds $ coe_nat n ▸ coe_lt_top) univ_mem_sets,
show {a : ennreal × ennreal | ↑n < a.fst + a.snd} ∈ 𝓝 (⊤, a),
begin filter_upwards [this] assume ⟨a₁, a₂⟩ ⟨h₁, h₂⟩, lt_of_lt_of_le h₁ (le_add_right $ le_refl _) end,
begin
rintro ⟨a₁, a₂⟩,
cases a₁, { simp [continuous_at, none_eq_top, hl a₂], },
cases a₂, { simp [continuous_at, none_eq_top, some_eq_coe, nhds_swap (a₁ : ennreal) ⊤,
tendsto_map'_iff, (∘)], convert hl a₁, simp [add_comm] },
simp [continuous_at, some_eq_coe, nhds_coe_coe, tendsto_map'_iff, (∘)],
simp only [coe_add.symm, tendsto_coe, tendsto_add]
end ⟩
protected lemma tendsto_mul (ha : a ≠ 0 ∨ b ≠ ⊤) (hb : b ≠ 0 ∨ a ≠ ⊤) :
tendsto (λp:ennreal×ennreal, p.1 * p.2) (𝓝 (a, b)) (𝓝 (a * b)) :=
have ht : ∀b:ennreal, b ≠ 0 → tendsto (λp:ennreal×ennreal, p.1 * p.2) (𝓝 ((⊤:ennreal), b)) (𝓝 ⊤),
begin
refine assume b hb, tendsto_nhds_top $ assume n, _,
rcases dense (zero_lt_iff_ne_zero.2 hb) with ⟨ε', hε', hεb'⟩,
rcases ennreal.lt_iff_exists_coe.1 hεb' with ⟨ε, rfl, h⟩,
rcases exists_nat_gt (↑n / ε) with ⟨m, hm⟩,
have hε : ε > 0, from coe_lt_coe.1 hε',
refine mem_sets_of_superset (prod_mem_nhds_sets (lt_mem_nhds $ @coe_lt_top m) (lt_mem_nhds $ h)) _,
rintros ⟨a₁, a₂⟩ ⟨h₁, h₂⟩,
dsimp at h₁ h₂ ⊢,
calc (n:ennreal) = ↑(((n:nnreal) / ε) * ε) :
begin
norm_cast,
simp [nnreal.div_def, mul_assoc, nnreal.inv_mul_cancel (ne_of_gt hε)]
end
... < (↑m * ε : nnreal) : coe_lt_coe.2 $ mul_lt_mul hm (le_refl _) hε (nat.cast_nonneg _)
... ≤ a₁ * a₂ : by rw [coe_mul]; exact canonically_ordered_semiring.mul_le_mul
(le_of_lt h₁)
(le_of_lt h₂)
end,
begin
cases a, {simp [none_eq_top] at hb, simp [none_eq_top, ht b hb, top_mul, hb] },
cases b, {
simp [none_eq_top] at ha,
simp [*, nhds_swap (a : ennreal) ⊤, none_eq_top, some_eq_coe, top_mul, tendsto_map'_iff, (∘), mul_comm] },
simp [some_eq_coe, nhds_coe_coe, tendsto_map'_iff, (∘)],
simp only [coe_mul.symm, tendsto_coe, tendsto_mul]
end
protected lemma tendsto.mul {f : filter α} {ma : α → ennreal} {mb : α → ennreal} {a b : ennreal}
(hma : tendsto ma f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ ⊤) (hmb : tendsto mb f (𝓝 b)) (hb : b ≠ 0 ∨ a ≠ ⊤) :
tendsto (λa, ma a * mb a) f (𝓝 (a * b)) :=
show tendsto ((λp:ennreal×ennreal, p.1 * p.2) ∘ (λa, (ma a, mb a))) f (𝓝 (a * b)), from
tendsto.comp (ennreal.tendsto_mul ha hb) (hma.prod_mk_nhds hmb)
protected lemma tendsto.const_mul {f : filter α} {m : α → ennreal} {a b : ennreal}
(hm : tendsto m f (𝓝 b)) (hb : b ≠ 0 ∨ a ≠ ⊤) : tendsto (λb, a * m b) f (𝓝 (a * b)) :=
by_cases
(assume : a = 0, by simp [this, tendsto_const_nhds])
(assume ha : a ≠ 0, ennreal.tendsto.mul tendsto_const_nhds (or.inl ha) hm hb)
protected lemma tendsto.mul_const {f : filter α} {m : α → ennreal} {a b : ennreal}
(hm : tendsto m f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ ⊤) : tendsto (λx, m x * b) f (𝓝 (a * b)) :=
by simpa only [mul_comm] using ennreal.tendsto.const_mul hm ha
protected lemma continuous_at_const_mul {a b : ennreal} (h : a ≠ ⊤ ∨ b ≠ 0) :
continuous_at ((*) a) b :=
tendsto.const_mul tendsto_id h.symm
protected lemma continuous_at_mul_const {a b : ennreal} (h : a ≠ ⊤ ∨ b ≠ 0) :
continuous_at (λ x, x * a) b :=
tendsto.mul_const tendsto_id h.symm
protected lemma continuous_const_mul {a : ennreal} (ha : a ≠ ⊤) : continuous ((*) a) :=
continuous_iff_continuous_at.2 $ λ x, ennreal.continuous_at_const_mul (or.inl ha)
protected lemma continuous_mul_const {a : ennreal} (ha : a ≠ ⊤) : continuous (λ x, x * a) :=
continuous_iff_continuous_at.2 $ λ x, ennreal.continuous_at_mul_const (or.inl ha)
lemma infi_mul_left {ι} [nonempty ι] {f : ι → ennreal} {a : ennreal}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) :
(⨅ i, a * f i) = a * ⨅ i, f i :=
begin
by_cases H : a = ⊤ ∧ (⨅ i, f i) = 0,
{ rcases h H.1 H.2 with ⟨i, hi⟩,
rw [H.2, mul_zero, ← bot_eq_zero, infi_eq_bot],
exact λ b hb, ⟨i, by rwa [hi, mul_zero, ← bot_eq_zero]⟩ },
{ rw not_and_distrib at H,
exact (map_infi_of_continuous_at_of_monotone' (ennreal.continuous_at_const_mul H)
ennreal.mul_left_mono).symm }
end
lemma infi_mul_right {ι} [nonempty ι] {f : ι → ennreal} {a : ennreal}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) :
(⨅ i, f i * a) = (⨅ i, f i) * a :=
by simpa only [mul_comm a] using infi_mul_left h
protected lemma continuous_inv : continuous (has_inv.inv : ennreal → ennreal) :=
continuous_iff_continuous_at.2 $ λ a, tendsto_order.2
⟨begin
assume b hb,
simp only [@ennreal.lt_inv_iff_lt_inv b],
exact gt_mem_nhds (ennreal.lt_inv_iff_lt_inv.1 hb),
end,
begin
assume b hb,
simp only [gt_iff_lt, @ennreal.inv_lt_iff_inv_lt _ b],
exact lt_mem_nhds (ennreal.inv_lt_iff_inv_lt.1 hb)
end⟩
@[simp] protected lemma tendsto_inv_iff {f : filter α} {m : α → ennreal} {a : ennreal} :
tendsto (λ x, (m x)⁻¹) f (𝓝 a⁻¹) ↔ tendsto m f (𝓝 a) :=
⟨λ h, by simpa only [function.comp, ennreal.inv_inv]
using (ennreal.continuous_inv.tendsto a⁻¹).comp h,
(ennreal.continuous_inv.tendsto a).comp⟩
protected lemma tendsto.div {f : filter α} {ma : α → ennreal} {mb : α → ennreal} {a b : ennreal}
(hma : tendsto ma f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ 0) (hmb : tendsto mb f (𝓝 b)) (hb : b ≠ ⊤ ∨ a ≠ ⊤) :
tendsto (λa, ma a / mb a) f (𝓝 (a / b)) :=
by { apply tendsto.mul hma _ (ennreal.tendsto_inv_iff.2 hmb) _; simp [ha, hb] }
protected lemma tendsto.const_div {f : filter α} {m : α → ennreal} {a b : ennreal}
(hm : tendsto m f (𝓝 b)) (hb : b ≠ ⊤ ∨ a ≠ ⊤) : tendsto (λb, a / m b) f (𝓝 (a / b)) :=
by { apply tendsto.const_mul (ennreal.tendsto_inv_iff.2 hm), simp [hb] }
protected lemma tendsto.div_const {f : filter α} {m : α → ennreal} {a b : ennreal}
(hm : tendsto m f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ 0) : tendsto (λx, m x / b) f (𝓝 (a / b)) :=
by { apply tendsto.mul_const hm, simp [ha] }
protected lemma tendsto_inv_nat_nhds_zero : tendsto (λ n : ℕ, (n : ennreal)⁻¹) at_top (𝓝 0) :=
ennreal.inv_top ▸ ennreal.tendsto_inv_iff.2 tendsto_nat_nhds_top
lemma Sup_add {s : set ennreal} (hs : s.nonempty) : Sup s + a = ⨆b∈s, b + a :=
have Sup ((λb, b + a) '' s) = Sup s + a,
from is_lub.Sup_eq (is_lub_of_is_lub_of_tendsto
(assume x _ y _ h, add_le_add h (le_refl _))
(is_lub_Sup s)
hs
(tendsto.add (tendsto_id' inf_le_left) tendsto_const_nhds)),
by simp [Sup_image, -add_comm] at this; exact this.symm
lemma supr_add {ι : Sort*} {s : ι → ennreal} [h : nonempty ι] : supr s + a = ⨆b, s b + a :=
let ⟨x⟩ := h in
calc supr s + a = Sup (range s) + a : by simp [Sup_range]
... = (⨆b∈range s, b + a) : Sup_add ⟨s x, x, rfl⟩
... = _ : supr_range
lemma add_supr {ι : Sort*} {s : ι → ennreal} [h : nonempty ι] : a + supr s = ⨆b, a + s b :=
by rw [add_comm, supr_add]; simp [add_comm]
lemma supr_add_supr {ι : Sort*} {f g : ι → ennreal} (h : ∀i j, ∃k, f i + g j ≤ f k + g k) :
supr f + supr g = (⨆ a, f a + g a) :=
begin
by_cases hι : nonempty ι,
{ letI := hι,
refine le_antisymm _ (supr_le $ λ a, add_le_add (le_supr _ _) (le_supr _ _)),
simpa [add_supr, supr_add] using
λ i j:ι, show f i + g j ≤ ⨆ a, f a + g a, from
let ⟨k, hk⟩ := h i j in le_supr_of_le k hk },
{ have : ∀f:ι → ennreal, (⨆i, f i) = 0 := assume f, bot_unique (supr_le $ assume i, (hι ⟨i⟩).elim),
rw [this, this, this, zero_add] }
end
lemma supr_add_supr_of_monotone {ι : Sort*} [semilattice_sup ι]
{f g : ι → ennreal} (hf : monotone f) (hg : monotone g) :
supr f + supr g = (⨆ a, f a + g a) :=
supr_add_supr $ assume i j, ⟨i ⊔ j, add_le_add (hf $ le_sup_left) (hg $ le_sup_right)⟩
lemma finset_sum_supr_nat {α} {ι} [semilattice_sup ι] {s : finset α} {f : α → ι → ennreal}
(hf : ∀a, monotone (f a)) :
∑ a in s, supr (f a) = (⨆ n, ∑ a in s, f a n) :=
begin
refine finset.induction_on s _ _,
{ simp,
exact (bot_unique $ supr_le $ assume i, le_refl ⊥).symm },
{ assume a s has ih,
simp only [finset.sum_insert has],
rw [ih, supr_add_supr_of_monotone (hf a)],
assume i j h,
exact (finset.sum_le_sum $ assume a ha, hf a h) }
end
section priority
-- for some reason the next proof fails without changing the priority of this instance
local attribute [instance, priority 1000] classical.prop_decidable
lemma mul_Sup {s : set ennreal} {a : ennreal} : a * Sup s = ⨆i∈s, a * i :=
begin
by_cases hs : ∀x∈s, x = (0:ennreal),
{ have h₁ : Sup s = 0 := (bot_unique $ Sup_le $ assume a ha, (hs a ha).symm ▸ le_refl 0),
have h₂ : (⨆i ∈ s, a * i) = 0 :=
(bot_unique $ supr_le $ assume a, supr_le $ assume ha, by simp [hs a ha]),
rw [h₁, h₂, mul_zero] },
{ simp only [not_forall] at hs,
rcases hs with ⟨x, hx, hx0⟩,
have s₁ : Sup s ≠ 0 :=
zero_lt_iff_ne_zero.1 (lt_of_lt_of_le (zero_lt_iff_ne_zero.2 hx0) (le_Sup hx)),
have : Sup ((λb, a * b) '' s) = a * Sup s :=
is_lub.Sup_eq (is_lub_of_is_lub_of_tendsto
(assume x _ y _ h, canonically_ordered_semiring.mul_le_mul (le_refl _) h)
(is_lub_Sup _)
⟨x, hx⟩
(ennreal.tendsto.const_mul (tendsto_id' inf_le_left) (or.inl s₁))),
rw [this.symm, Sup_image] }
end
end priority
lemma mul_supr {ι : Sort*} {f : ι → ennreal} {a : ennreal} : a * supr f = ⨆i, a * f i :=
by rw [← Sup_range, mul_Sup, supr_range]
lemma supr_mul {ι : Sort*} {f : ι → ennreal} {a : ennreal} : supr f * a = ⨆i, f i * a :=
by rw [mul_comm, mul_supr]; congr; funext; rw [mul_comm]
protected lemma tendsto_coe_sub : ∀{b:ennreal}, tendsto (λb:ennreal, ↑r - b) (𝓝 b) (𝓝 (↑r - b)) :=
begin
refine (forall_ennreal.2 $ and.intro (assume a, _) _),
{ simp [@nhds_coe a, tendsto_map'_iff, (∘), tendsto_coe, coe_sub.symm],
exact nnreal.tendsto.sub tendsto_const_nhds tendsto_id },
simp,
exact (tendsto.congr' (mem_sets_of_superset (lt_mem_nhds $ @coe_lt_top r) $
by simp [le_of_lt] {contextual := tt})) tendsto_const_nhds
end
lemma sub_supr {ι : Sort*} [hι : nonempty ι] {b : ι → ennreal} (hr : a < ⊤) :
a - (⨆i, b i) = (⨅i, a - b i) :=
let ⟨i⟩ := hι in
let ⟨r, eq, _⟩ := lt_iff_exists_coe.mp hr in
have Inf ((λb, ↑r - b) '' range b) = ↑r - (⨆i, b i),
from is_glb.Inf_eq $ is_glb_of_is_lub_of_tendsto
(assume x _ y _, sub_le_sub (le_refl _))
is_lub_supr
⟨_, i, rfl⟩
(tendsto.comp ennreal.tendsto_coe_sub (tendsto_id' inf_le_left)),
by rw [eq, ←this]; simp [Inf_image, infi_range, -mem_range]; exact le_refl _
end topological_space
section tsum
variables {f g : α → ennreal}
@[norm_cast] protected lemma has_sum_coe {f : α → nnreal} {r : nnreal} :
has_sum (λa, (f a : ennreal)) ↑r ↔ has_sum f r :=
have (λs:finset α, ∑ a in s, ↑(f a)) = (coe : nnreal → ennreal) ∘ (λs:finset α, ∑ a in s, f a),
from funext $ assume s, ennreal.coe_finset_sum.symm,
by unfold has_sum; rw [this, tendsto_coe]
protected lemma tsum_coe_eq {f : α → nnreal} (h : has_sum f r) : (∑'a, (f a : ennreal)) = r :=
(ennreal.has_sum_coe.2 h).tsum_eq
protected lemma coe_tsum {f : α → nnreal} : summable f → ↑(tsum f) = (∑'a, (f a : ennreal))
| ⟨r, hr⟩ := by rw [hr.tsum_eq, ennreal.tsum_coe_eq hr]
protected lemma has_sum : has_sum f (⨆s:finset α, ∑ a in s, f a) :=
tendsto_order.2
⟨assume a' ha',
let ⟨s, hs⟩ := lt_supr_iff.mp ha' in
mem_at_top_sets.mpr ⟨s, assume t ht, lt_of_lt_of_le hs $ finset.sum_le_sum_of_subset ht⟩,
assume a' ha',
univ_mem_sets' $ assume s,
have ∑ a in s, f a ≤ ⨆(s : finset α), ∑ a in s, f a,
from le_supr (λ(s : finset α), ∑ a in s, f a) s,
lt_of_le_of_lt this ha'⟩
@[simp] protected lemma summable : summable f := ⟨_, ennreal.has_sum⟩
lemma tsum_coe_ne_top_iff_summable {f : β → nnreal} :
(∑' b, (f b:ennreal)) ≠ ∞ ↔ summable f :=
begin
refine ⟨λ h, _, λ h, ennreal.coe_tsum h ▸ ennreal.coe_ne_top⟩,
lift (∑' b, (f b:ennreal)) to nnreal using h with a ha,
refine ⟨a, ennreal.has_sum_coe.1 _⟩,
rw ha,
exact ennreal.summable.has_sum
end
protected lemma tsum_eq_supr_sum : (∑'a, f a) = (⨆s:finset α, ∑ a in s, f a) :=
ennreal.has_sum.tsum_eq
protected lemma tsum_eq_supr_sum' {ι : Type*} (s : ι → finset α) (hs : ∀ t, ∃ i, t ⊆ s i) :
(∑' a, f a) = ⨆ i, ∑ a in s i, f a :=
begin
rw [ennreal.tsum_eq_supr_sum],
symmetry,
change (⨆i:ι, (λ t : finset α, ∑ a in t, f a) (s i)) = ⨆s:finset α, ∑ a in s, f a,
exact (finset.sum_mono_set f).supr_comp_eq hs
end
protected lemma tsum_sigma {β : α → Type*} (f : Πa, β a → ennreal) :
(∑'p:Σa, β a, f p.1 p.2) = (∑'a b, f a b) :=
tsum_sigma' (assume b, ennreal.summable) ennreal.summable
protected lemma tsum_sigma' {β : α → Type*} (f : (Σ a, β a) → ennreal) :
(∑'p:(Σa, β a), f p) = (∑'a b, f ⟨a, b⟩) :=
tsum_sigma' (assume b, ennreal.summable) ennreal.summable
protected lemma tsum_prod {f : α → β → ennreal} : (∑'p:α×β, f p.1 p.2) = (∑'a, ∑'b, f a b) :=
tsum_prod' ennreal.summable $ λ _, ennreal.summable
protected lemma tsum_comm {f : α → β → ennreal} : (∑'a, ∑'b, f a b) = (∑'b, ∑'a, f a b) :=
tsum_comm' ennreal.summable (λ _, ennreal.summable) (λ _, ennreal.summable)
protected lemma tsum_add : (∑'a, f a + g a) = (∑'a, f a) + (∑'a, g a) :=
tsum_add ennreal.summable ennreal.summable
protected lemma tsum_le_tsum (h : ∀a, f a ≤ g a) : (∑'a, f a) ≤ (∑'a, g a) :=
tsum_le_tsum h ennreal.summable ennreal.summable
protected lemma sum_le_tsum {f : α → ennreal} (s : finset α) : ∑ x in s, f x ≤ ∑' x, f x :=
sum_le_tsum s (λ x hx, zero_le _) ennreal.summable
protected lemma tsum_eq_supr_nat {f : ℕ → ennreal} :
(∑'i:ℕ, f i) = (⨆i:ℕ, ∑ a in finset.range i, f a) :=
ennreal.tsum_eq_supr_sum' _ finset.exists_nat_subset_range
protected lemma le_tsum (a : α) : f a ≤ (∑'a, f a) :=
calc f a = ∑ x in {a}, f x : finset.sum_singleton.symm
... ≤ ∑' x, f x : ennreal.sum_le_tsum {a}
protected lemma tsum_eq_top_of_eq_top : (∃ a, f a = ∞) → (∑' a, f a) = ∞
| ⟨a, ha⟩ := top_unique $ ha ▸ ennreal.le_tsum a
protected lemma ne_top_of_tsum_ne_top (h : (∑' a, f a) ≠ ∞) (a : α) : f a ≠ ∞ :=
λ ha, h $ ennreal.tsum_eq_top_of_eq_top ⟨a, ha⟩
protected lemma tsum_mul_left : (∑'i, a * f i) = a * (∑'i, f i) :=
if h : ∀i, f i = 0 then by simp [h] else
let ⟨i, (hi : f i ≠ 0)⟩ := not_forall.mp h in
have sum_ne_0 : (∑'i, f i) ≠ 0, from ne_of_gt $
calc 0 < f i : lt_of_le_of_ne (zero_le _) hi.symm
... ≤ (∑'i, f i) : ennreal.le_tsum _,
have tendsto (λs:finset α, ∑ j in s, a * f j) at_top (𝓝 (a * (∑'i, f i))),
by rw [← show (*) a ∘ (λs:finset α, ∑ j in s, f j) = λs, ∑ j in s, a * f j,
from funext $ λ s, finset.mul_sum];
exact ennreal.tendsto.const_mul ennreal.summable.has_sum (or.inl sum_ne_0),
has_sum.tsum_eq this
protected lemma tsum_mul_right : (∑'i, f i * a) = (∑'i, f i) * a :=
by simp [mul_comm, ennreal.tsum_mul_left]
@[simp] lemma tsum_supr_eq {α : Type*} (a : α) {f : α → ennreal} :
(∑'b:α, ⨆ (h : a = b), f b) = f a :=
le_antisymm
(by rw [ennreal.tsum_eq_supr_sum]; exact supr_le (assume s,
calc (∑ b in s, ⨆ (h : a = b), f b) ≤ ∑ b in {a}, ⨆ (h : a = b), f b :
finset.sum_le_sum_of_ne_zero $ assume b _ hb,
suffices a = b, by simpa using this.symm,
classical.by_contradiction $ assume h,
by simpa [h] using hb
... = f a : by simp))
(calc f a ≤ (⨆ (h : a = a), f a) : le_supr (λh:a=a, f a) rfl
... ≤ (∑'b:α, ⨆ (h : a = b), f b) : ennreal.le_tsum _)
lemma has_sum_iff_tendsto_nat {f : ℕ → ennreal} (r : ennreal) :
has_sum f r ↔ tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
begin
refine ⟨has_sum.tendsto_sum_nat, assume h, _⟩,
rw [← supr_eq_of_tendsto _ h, ← ennreal.tsum_eq_supr_nat],
{ exact ennreal.summable.has_sum },
{ exact assume s t hst, finset.sum_le_sum_of_subset (finset.range_subset.2 hst) }
end
end tsum
end ennreal
namespace nnreal
lemma exists_le_has_sum_of_le {f g : β → nnreal} {r : nnreal}
(hgf : ∀b, g b ≤ f b) (hfr : has_sum f r) : ∃p≤r, has_sum g p :=
have (∑'b, (g b : ennreal)) ≤ r,
begin
refine has_sum_le (assume b, _) ennreal.summable.has_sum (ennreal.has_sum_coe.2 hfr),
exact ennreal.coe_le_coe.2 (hgf _)
end,
let ⟨p, eq, hpr⟩ := ennreal.le_coe_iff.1 this in
⟨p, hpr, ennreal.has_sum_coe.1 $ eq ▸ ennreal.summable.has_sum⟩
lemma summable_of_le {f g : β → nnreal} (hgf : ∀b, g b ≤ f b) : summable f → summable g
| ⟨r, hfr⟩ := let ⟨p, _, hp⟩ := exists_le_has_sum_of_le hgf hfr in hp.summable
lemma has_sum_iff_tendsto_nat {f : ℕ → nnreal} (r : nnreal) :
has_sum f r ↔ tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
begin
rw [← ennreal.has_sum_coe, ennreal.has_sum_iff_tendsto_nat],
simp only [ennreal.coe_finset_sum.symm],
exact ennreal.tendsto_coe
end
lemma tsum_comp_le_tsum_of_inj {β : Type*} {f : α → nnreal} (hf : summable f)
{i : β → α} (hi : function.injective i) : tsum (f ∘ i) ≤ tsum f :=
tsum_le_tsum_of_inj i hi (λ c hc, zero_le _) (λ b, le_refl _) (summable_comp_injective hf hi) hf
end nnreal
lemma tsum_comp_le_tsum_of_inj {β : Type*} {f : α → ℝ} (hf : summable f) (hn : ∀ a, 0 ≤ f a)
{i : β → α} (hi : function.injective i) : tsum (f ∘ i) ≤ tsum f :=
begin
let g : α → nnreal := λ a, ⟨f a, hn a⟩,
have hg : summable g, by rwa ← nnreal.summable_coe,
convert nnreal.coe_le_coe.2 (nnreal.tsum_comp_le_tsum_of_inj hg hi);
{ rw nnreal.coe_tsum, congr }
end
lemma summable_of_nonneg_of_le {f g : β → ℝ}
(hg : ∀b, 0 ≤ g b) (hgf : ∀b, g b ≤ f b) (hf : summable f) : summable g :=
let f' (b : β) : nnreal := ⟨f b, le_trans (hg b) (hgf b)⟩ in
let g' (b : β) : nnreal := ⟨g b, hg b⟩ in
have summable f', from nnreal.summable_coe.1 hf,
have summable g', from
nnreal.summable_of_le (assume b, (@nnreal.coe_le_coe (g' b) (f' b)).2 $ hgf b) this,
show summable (λb, g' b : β → ℝ), from nnreal.summable_coe.2 this
lemma has_sum_iff_tendsto_nat_of_nonneg {f : ℕ → ℝ} (hf : ∀i, 0 ≤ f i) (r : ℝ) :
has_sum f r ↔ tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
⟨has_sum.tendsto_sum_nat,
assume hfr,
have 0 ≤ r := ge_of_tendsto hfr $ univ_mem_sets' $ assume i,
show 0 ≤ ∑ j in finset.range i, f j, from finset.sum_nonneg $ assume i _, hf i,
let f' (n : ℕ) : nnreal := ⟨f n, hf n⟩, r' : nnreal := ⟨r, this⟩ in
have f_eq : f = (λi:ℕ, (f' i : ℝ)) := rfl,
have r_eq : r = r' := rfl,
begin
rw [f_eq, r_eq, nnreal.has_sum_coe, nnreal.has_sum_iff_tendsto_nat, ← nnreal.tendsto_coe],
simp only [nnreal.coe_sum],
exact hfr
end⟩
lemma infi_real_pos_eq_infi_nnreal_pos {α : Type*} [complete_lattice α] {f : ℝ → α} :
(⨅(n:ℝ) (h : 0 < n), f n) = (⨅(n:nnreal) (h : 0 < n), f n) :=
le_antisymm
(le_infi $ assume n, le_infi $ assume hn, infi_le_of_le n $ infi_le _ (nnreal.coe_pos.2 hn))
(le_infi $ assume r, le_infi $ assume hr, infi_le_of_le ⟨r, le_of_lt hr⟩ $ infi_le _ hr)
section
variables [emetric_space β]
open ennreal filter emetric
/-- In an emetric ball, the distance between points is everywhere finite -/
lemma edist_ne_top_of_mem_ball {a : β} {r : ennreal} (x y : ball a r) : edist x.1 y.1 ≠ ⊤ :=
lt_top_iff_ne_top.1 $
calc edist x y ≤ edist a x + edist a y : edist_triangle_left x.1 y.1 a
... < r + r : by rw [edist_comm a x, edist_comm a y]; exact add_lt_add x.2 y.2
... ≤ ⊤ : le_top
/-- Each ball in an extended metric space gives us a metric space, as the edist
is everywhere finite. -/
def metric_space_emetric_ball (a : β) (r : ennreal) : metric_space (ball a r) :=
emetric_space.to_metric_space edist_ne_top_of_mem_ball
local attribute [instance] metric_space_emetric_ball
lemma nhds_eq_nhds_emetric_ball (a x : β) (r : ennreal) (h : x ∈ ball a r) :
𝓝 x = map (coe : ball a r → β) (𝓝 ⟨x, h⟩) :=
(map_nhds_subtype_coe_eq _ $ mem_nhds_sets emetric.is_open_ball h).symm
end
section
variable [emetric_space α]
open emetric
lemma tendsto_iff_edist_tendsto_0 {l : filter β} {f : β → α} {y : α} :
tendsto f l (𝓝 y) ↔ tendsto (λ x, edist (f x) y) l (𝓝 0) :=
by simp only [emetric.nhds_basis_eball.tendsto_right_iff, emetric.mem_ball,
@tendsto_order ennreal β _ _, forall_prop_of_false ennreal.not_lt_zero, forall_const, true_and]
/-- Yet another metric characterization of Cauchy sequences on integers. This one is often the
most efficient. -/
lemma emetric.cauchy_seq_iff_le_tendsto_0 [nonempty β] [semilattice_sup β] {s : β → α} :
cauchy_seq s ↔ (∃ (b: β → ennreal), (∀ n m N : β, N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N)
∧ (tendsto b at_top (𝓝 0))) :=
⟨begin
assume hs,
rw emetric.cauchy_seq_iff at hs,
/- `s` is Cauchy sequence. The sequence `b` will be constructed by taking
the supremum of the distances between `s n` and `s m` for `n m ≥ N`-/
let b := λN, Sup ((λ(p : β × β), edist (s p.1) (s p.2))''{p | p.1 ≥ N ∧ p.2 ≥ N}),
--Prove that it bounds the distances of points in the Cauchy sequence
have C : ∀ n m N, N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N,
{ refine λm n N hm hn, le_Sup _,
use (prod.mk m n),
simp only [and_true, eq_self_iff_true, set.mem_set_of_eq],
exact ⟨hm, hn⟩ },
--Prove that it tends to `0`, by using the Cauchy property of `s`
have D : tendsto b at_top (𝓝 0),
{ refine tendsto_order.2 ⟨λa ha, absurd ha (ennreal.not_lt_zero), λε εpos, _⟩,
rcases dense εpos with ⟨δ, δpos, δlt⟩,
rcases hs δ δpos with ⟨N, hN⟩,
refine filter.mem_at_top_sets.2 ⟨N, λn hn, _⟩,
have : b n ≤ δ := Sup_le begin
simp only [and_imp, set.mem_image, set.mem_set_of_eq, exists_imp_distrib, prod.exists],
intros d p q hp hq hd,
rw ← hd,
exact le_of_lt (hN p q (le_trans hn hp) (le_trans hn hq))
end,
simpa using lt_of_le_of_lt this δlt },
-- Conclude
exact ⟨b, ⟨C, D⟩⟩
end,
begin
rintros ⟨b, ⟨b_bound, b_lim⟩⟩,
/-b : ℕ → ℝ, b_bound : ∀ (n m N : ℕ), N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N,
b_lim : tendsto b at_top (𝓝 0)-/
refine emetric.cauchy_seq_iff.2 (λε εpos, _),
have : ∀ᶠ n in at_top, b n < ε := (tendsto_order.1 b_lim ).2 _ εpos,
rcases filter.mem_at_top_sets.1 this with ⟨N, hN⟩,
exact ⟨N, λm n hm hn, calc
edist (s m) (s n) ≤ b N : b_bound m n N hm hn
... < ε : (hN _ (le_refl N)) ⟩
end⟩
lemma continuous_of_le_add_edist {f : α → ennreal} (C : ennreal)
(hC : C ≠ ⊤) (h : ∀x y, f x ≤ f y + C * edist x y) : continuous f :=
begin
refine continuous_iff_continuous_at.2 (λx, tendsto_order.2 ⟨_, _⟩),
show ∀e, e < f x → ∀ᶠ y in 𝓝 x, e < f y,
{ assume e he,
let ε := min (f x - e) 1,
have : ε < ⊤ := lt_of_le_of_lt (min_le_right _ _) (by simp [lt_top_iff_ne_top]),
have : 0 < ε := by simp [ε, hC, he, ennreal.zero_lt_one],
have : 0 < C⁻¹ * (ε/2) := bot_lt_iff_ne_bot.2 (by simp [hC, (ne_of_lt this).symm, mul_eq_zero]),
have I : C * (C⁻¹ * (ε/2)) < ε,
{ by_cases C_zero : C = 0,
{ simp [C_zero, ‹0 < ε›] },
{ calc C * (C⁻¹ * (ε/2)) = (C * C⁻¹) * (ε/2) : by simp [mul_assoc]
... = ε/2 : by simp [ennreal.mul_inv_cancel C_zero hC]
... < ε : ennreal.half_lt_self (bot_lt_iff_ne_bot.1 ‹0 < ε›) (lt_top_iff_ne_top.1 ‹ε < ⊤›) }},
have : ball x (C⁻¹ * (ε/2)) ⊆ {y : α | e < f y},
{ rintros y hy,
by_cases htop : f y = ⊤,
{ simp [htop, lt_top_iff_ne_top, ne_top_of_lt he] },
{ simp at hy,
have : e + ε < f y + ε := calc
e + ε ≤ e + (f x - e) : add_le_add_left (min_le_left _ _) _
... = f x : by simp [le_of_lt he]
... ≤ f y + C * edist x y : h x y
... = f y + C * edist y x : by simp [edist_comm]
... ≤ f y + C * (C⁻¹ * (ε/2)) :
add_le_add_left (canonically_ordered_semiring.mul_le_mul (le_refl _) (le_of_lt hy)) _
... < f y + ε : (ennreal.add_lt_add_iff_left (lt_top_iff_ne_top.2 htop)).2 I,
show e < f y, from
(ennreal.add_lt_add_iff_right ‹ε < ⊤›).1 this }},
apply filter.mem_sets_of_superset (ball_mem_nhds _ (‹0 < C⁻¹ * (ε/2)›)) this },
show ∀e, f x < e → ∀ᶠ y in 𝓝 x, f y < e,
{ assume e he,
let ε := min (e - f x) 1,
have : ε < ⊤ := lt_of_le_of_lt (min_le_right _ _) (by simp [lt_top_iff_ne_top]),
have : 0 < ε := by simp [ε, he, ennreal.zero_lt_one],
have : 0 < C⁻¹ * (ε/2) := bot_lt_iff_ne_bot.2 (by simp [hC, (ne_of_lt this).symm, mul_eq_zero]),
have I : C * (C⁻¹ * (ε/2)) < ε,
{ by_cases C_zero : C = 0,
simp [C_zero, ‹0 < ε›],
calc C * (C⁻¹ * (ε/2)) = (C * C⁻¹) * (ε/2) : by simp [mul_assoc]
... = ε/2 : by simp [ennreal.mul_inv_cancel C_zero hC]
... < ε : ennreal.half_lt_self (bot_lt_iff_ne_bot.1 ‹0 < ε›) (lt_top_iff_ne_top.1 ‹ε < ⊤›) },
have : ball x (C⁻¹ * (ε/2)) ⊆ {y : α | f y < e},
{ rintros y hy,
have htop : f x ≠ ⊤ := ne_top_of_lt he,
show f y < e, from calc
f y ≤ f x + C * edist y x : h y x
... ≤ f x + C * (C⁻¹ * (ε/2)) :
add_le_add_left (canonically_ordered_semiring.mul_le_mul (le_refl _) (le_of_lt hy)) _
... < f x + ε : (ennreal.add_lt_add_iff_left (lt_top_iff_ne_top.2 htop)).2 I
... ≤ f x + (e - f x) : add_le_add_left (min_le_left _ _) _
... = e : by simp [le_of_lt he] },
apply filter.mem_sets_of_superset (ball_mem_nhds _ (‹0 < C⁻¹ * (ε/2)›)) this },
end
theorem continuous_edist : continuous (λp:α×α, edist p.1 p.2) :=
begin
apply continuous_of_le_add_edist 2 (by norm_num),
rintros ⟨x, y⟩ ⟨x', y'⟩,
calc edist x y ≤ edist x x' + edist x' y' + edist y' y : edist_triangle4 _ _ _ _
... = edist x' y' + (edist x x' + edist y y') : by simp [edist_comm]; cc
... ≤ edist x' y' + (edist (x, y) (x', y') + edist (x, y) (x', y')) :
add_le_add_left (add_le_add (le_max_left _ _) (le_max_right _ _)) _
... = edist x' y' + 2 * edist (x, y) (x', y') : by rw [← mul_two, mul_comm]
end
theorem continuous.edist [topological_space β] {f g : β → α}
(hf : continuous f) (hg : continuous g) : continuous (λb, edist (f b) (g b)) :=
continuous_edist.comp (hf.prod_mk hg)
theorem filter.tendsto.edist {f g : β → α} {x : filter β} {a b : α}
(hf : tendsto f x (𝓝 a)) (hg : tendsto g x (𝓝 b)) :
tendsto (λx, edist (f x) (g x)) x (𝓝 (edist a b)) :=
(continuous_edist.tendsto (a, b)).comp (hf.prod_mk_nhds hg)
lemma cauchy_seq_of_edist_le_of_tsum_ne_top {f : ℕ → α} (d : ℕ → ennreal)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n) (hd : tsum d ≠ ∞) :
cauchy_seq f :=
begin
lift d to (ℕ → nnreal) using (λ i, ennreal.ne_top_of_tsum_ne_top hd i),
rw ennreal.tsum_coe_ne_top_iff_summable at hd,
exact cauchy_seq_of_edist_le_of_summable d hf hd
end
lemma emetric.is_closed_ball {a : α} {r : ennreal} : is_closed (closed_ball a r) :=
is_closed_le (continuous_id.edist continuous_const) continuous_const
/-- If `edist (f n) (f (n+1))` is bounded above by a function `d : ℕ → ennreal`,
then the distance from `f n` to the limit is bounded by `∑'_{k=n}^∞ d k`. -/
lemma edist_le_tsum_of_edist_le_of_tendsto {f : ℕ → α} (d : ℕ → ennreal)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n)
{a : α} (ha : tendsto f at_top (𝓝 a)) (n : ℕ) :
edist (f n) a ≤ ∑' m, d (n + m) :=
begin
refine le_of_tendsto (tendsto_const_nhds.edist ha)
(mem_at_top_sets.2 ⟨n, λ m hnm, _⟩),
refine le_trans (edist_le_Ico_sum_of_edist_le hnm (λ k _ _, hf k)) _,
rw [finset.sum_Ico_eq_sum_range],
exact sum_le_tsum _ (λ _ _, zero_le _) ennreal.summable
end
/-- If `edist (f n) (f (n+1))` is bounded above by a function `d : ℕ → ennreal`,
then the distance from `f 0` to the limit is bounded by `∑'_{k=0}^∞ d k`. -/
lemma edist_le_tsum_of_edist_le_of_tendsto₀ {f : ℕ → α} (d : ℕ → ennreal)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n)
{a : α} (ha : tendsto f at_top (𝓝 a)) :
edist (f 0) a ≤ ∑' m, d m :=
by simpa using edist_le_tsum_of_edist_le_of_tendsto d hf ha 0
end --section
|
dd1d9982726939fffba510ab2f034d2449350be3
|
e030b0259b777fedcdf73dd966f3f1556d392178
|
/tests/lean/run/smt_destruct.lean
|
a013ee2eb8577811c9a4c3e11e7ebbc558083e8e
|
[
"Apache-2.0"
] |
permissive
|
fgdorais/lean
|
17b46a095b70b21fa0790ce74876658dc5faca06
|
c3b7c54d7cca7aaa25328f0a5660b6b75fe26055
|
refs/heads/master
| 1,611,523,590,686
| 1,484,412,902,000
| 1,484,412,902,000
| 38,489,734
| 0
| 0
| null | 1,435,923,380,000
| 1,435,923,379,000
| null |
UTF-8
|
Lean
| false
| false
| 1,668
|
lean
|
open smt_tactic
lemma ex1 (p q : Prop) : p ∨ q → p ∨ ¬q → ¬p ∨ q → ¬p ∨ ¬q → false :=
by using_smt $ do
intros,
trace_state,
a_1 ← tactic.get_local `a_1,
destruct a_1,
repeat close
lemma ex2 (p q : Prop) : p ∨ q → p ∨ ¬q → ¬p ∨ q → ¬p ∨ ¬q → false :=
begin [smt]
intros,
assert h : p ∨ q,
destruct h
end
lemma ex3 (p q : Prop) : p ∨ q → p ∨ ¬q → ¬p ∨ q → ¬p ∨ ¬q → false :=
begin [smt]
intros,
destruct a_1 -- bad style, it relies on automatically generated names
end
lemma ex4 (p q : Prop) : p ∨ q → p ∨ ¬q → ¬p ∨ q → ¬p ∨ ¬q → false :=
begin [smt] -- the default configuration is classical
intros,
by_cases p
end
lemma ex5 (p q : Prop) [decidable p] : p ∨ q → p ∨ ¬q → ¬p ∨ q → ¬p ∨ ¬q → false :=
begin [smt] with default_smt_config^.set_classical ff,
intros,
by_cases p -- will fail if p is not decidable
end
lemma ex6 (p q : Prop) : p ∨ q → p ∨ ¬q → ¬p ∨ q → p ∧ q :=
begin [smt] -- the default configuration is classical
intros,
by_contradiction,
trace_state,
by_cases p,
end
lemma ex7 (p q : Prop) [decidable p] : p ∨ q → p ∨ ¬q → ¬p ∨ q → p ∧ q :=
begin [smt] with default_smt_config^.set_classical ff,
intros,
by_cases p -- will fail if p is not decidable
end
lemma ex8 (p q : Prop) [decidable p] [decidable q] : p ∨ q → p ∨ ¬q → ¬p ∨ q → p ∧ q :=
begin [smt] with default_smt_config^.set_classical ff,
intros,
by_contradiction, -- will fail if p or q is not decidable
trace_state,
by_cases p -- will fail if p is not decidable
end
|
fa7bc4cfd57e7e6dc4ed718a0a99d10536d4f190
|
9be442d9ec2fcf442516ed6e9e1660aa9071b7bd
|
/doc/BoolExpr.lean
|
a5cb9010ea2ad1b83efce2358bf71d9b1efdc8c2
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
EdAyers/lean4
|
57ac632d6b0789cb91fab2170e8c9e40441221bd
|
37ba0df5841bde51dbc2329da81ac23d4f6a4de4
|
refs/heads/master
| 1,676,463,245,298
| 1,660,619,433,000
| 1,660,619,433,000
| 183,433,437
| 1
| 0
|
Apache-2.0
| 1,657,612,672,000
| 1,556,196,574,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 3,616
|
lean
|
import Std
open Std
open Lean
inductive BoolExpr where
| var (name : String)
| val (b : Bool)
| or (p q : BoolExpr)
| not (p : BoolExpr)
deriving Repr, BEq, DecidableEq
def BoolExpr.isValue : BoolExpr → Bool
| val _ => true
| _ => false
instance : Inhabited BoolExpr where
default := BoolExpr.val false
namespace BoolExpr
deriving instance DecidableEq for BoolExpr
#eval decide (BoolExpr.val true = BoolExpr.val false)
#check (a b : BoolExpr) → Decidable (a = b)
abbrev Context := AssocList String Bool
def denote (ctx : Context) : BoolExpr → Bool
| BoolExpr.or p q => denote ctx p || denote ctx q
| BoolExpr.not p => !denote ctx p
| BoolExpr.val b => b
| BoolExpr.var x => if let some b := ctx.find? x then b else false
def simplify : BoolExpr → BoolExpr
| or p q => mkOr (simplify p) (simplify q)
| not p => mkNot (simplify p)
| e => e
where
mkOr : BoolExpr → BoolExpr → BoolExpr
| p, val true => val true
| p, val false => p
| val true, p => val true
| val false, p => p
| p, q => or p q
mkNot : BoolExpr → BoolExpr
| val b => val (!b)
| p => not p
@[simp] theorem denote_not_Eq (ctx : Context) (p : BoolExpr) : denote ctx (not p) = !denote ctx p := rfl
@[simp] theorem denote_or_Eq (ctx : Context) (p q : BoolExpr) : denote ctx (or p q) = (denote ctx p || denote ctx q) := rfl
@[simp] theorem denote_val_Eq (ctx : Context) (b : Bool) : denote ctx (val b) = b := rfl
@[simp] theorem denote_mkNot_Eq (ctx : Context) (p : BoolExpr) : denote ctx (simplify.mkNot p) = denote ctx (not p) := by
cases p <;> rfl
@[simp] theorem mkOr_p_true (p : BoolExpr) : simplify.mkOr p (val true) = val true := by
cases p with
| val x => cases x <;> rfl
| _ => rfl
@[simp] theorem mkOr_p_false (p : BoolExpr) : simplify.mkOr p (val false) = p := by
cases p with
| val x => cases x <;> rfl
| _ => rfl
@[simp] theorem mkOr_true_p (p : BoolExpr) : simplify.mkOr (val true) p = val true := by
cases p with
| val x => cases x <;> rfl
| _ => rfl
@[simp] theorem mkOr_false_p (p : BoolExpr) : simplify.mkOr (val false) p = p := by
cases p with
| val x => cases x <;> rfl
| _ => rfl
@[simp] theorem denote_mkOr (ctx : Context) (p q : BoolExpr) : denote ctx (simplify.mkOr p q) = denote ctx (or p q) := by
cases p with
| val x => cases q with
| val y => cases x <;> cases y <;> simp
| _ => cases x <;> simp
| _ => cases q with
| val y => cases y <;> simp
| _ => rfl
@[simp] theorem simplify_not (p : BoolExpr) : simplify (not p) = simplify.mkNot (simplify p) := rfl
@[simp] theorem simplify_or (p q : BoolExpr) : simplify (or p q) = simplify.mkOr (simplify p) (simplify q) := rfl
def denote_simplify_eq (ctx : Context) (b : BoolExpr) : denote ctx (simplify b) = denote ctx b :=
by induction b with
| or p q ih₁ ih₂ => simp [ih₁, ih₂]
| not p ih => simp [ih]
| _ => rfl
syntax "`[BExpr|" term "]" : term
macro_rules
| `(`[BExpr| true]) => `(val true)
| `(`[BExpr| false]) => `(val false)
| `(`[BExpr| $x:ident]) => `(var $(quote x.getId.toString))
| `(`[BExpr| $p ∨ $q]) => `(or `[BExpr| $p] `[BExpr| $q])
| `(`[BExpr| ¬ $p]) => `(not `[BExpr| $p])
#check `[BExpr| ¬ p ∨ q]
syntax entry := ident " ↦ " term:max
syntax entry,* "⊢" term : term
macro_rules
| `( $[$xs ↦ $vs],* ⊢ $p) =>
let xs := xs.map fun x => quote x.getId.toString
`(denote (List.toAssocList [$[($xs, $vs)],*]) `[BExpr| $p])
#check b ↦ true ⊢ b ∨ b
#eval a ↦ false, b ↦ false ⊢ b ∨ a
|
aea691778640221530f6385932f8d31a08c65705
|
f3849be5d845a1cb97680f0bbbe03b85518312f0
|
/library/tools/super/misc_preprocessing.lean
|
176f276ed7e37546b6cfaf96bf0acdfdebbbc977
|
[
"Apache-2.0"
] |
permissive
|
bjoeris/lean
|
0ed95125d762b17bfcb54dad1f9721f953f92eeb
|
4e496b78d5e73545fa4f9a807155113d8e6b0561
|
refs/heads/master
| 1,611,251,218,281
| 1,495,337,658,000
| 1,495,337,658,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,136
|
lean
|
/-
Copyright (c) 2016 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner
-/
import .clause .prover_state
open expr list monad
namespace super
meta def is_taut (c : clause) : tactic bool := do
qf ← c.open_constn c.num_quants,
return $ list.bor $ do
l1 ← qf.1.get_lits, guard l1.is_neg,
l2 ← qf.1.get_lits, guard l2.is_pos,
[decidable.to_bool $ l1.formula = l2.formula]
meta def tautology_removal_pre : prover unit :=
preprocessing_rule $ λnew, filter (λc, lift bnot $ is_taut c.c) new
meta def remove_duplicates : list derived_clause → list derived_clause
| [] := []
| (c :: cs) :=
let (same_type, other_type) := partition (λc' : derived_clause, c'.c.type = c.c.type) cs in
{ c with sc := foldl score.min c.sc (same_type.map $ λc, c.sc) } :: remove_duplicates other_type
meta def remove_duplicates_pre : prover unit :=
preprocessing_rule $ λnew,
return $ remove_duplicates new
meta def clause_normalize_pre : prover unit :=
preprocessing_rule $ λnew, for new $ λdc,
do c' ← dc.c.normalize, return { dc with c := c' }
end super
|
d384c7e3a0f44983d8cb270ee5008973a4dcd64a
|
cf39355caa609c0f33405126beee2739aa3cb77e
|
/tests/lean/run/1089.lean
|
d48b1d42497823c5da69952631eb72cc04fb3d63
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/lean
|
12b87f69d92e614daea8bcc9d4de9a9ace089d0e
|
cce7990ea86a78bdb383e38ed7f9b5ba93c60ce0
|
refs/heads/master
| 1,687,508,156,644
| 1,684,951,104,000
| 1,684,951,104,000
| 169,960,991
| 457
| 107
|
Apache-2.0
| 1,686,744,372,000
| 1,549,790,268,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 329
|
lean
|
import system.io
inductive token
| eof : token
| plus : token
| var : string -> token
open token
open option
def to_token : list char → option token
| [] := none
| (c :: cs) :=
let t : option token := match c with
| 'x' := some (var "x")
| 'y' := some (var "y")
| '+' := some plus
| _ := none
end in t
|
3e7cc3fbf457c7eb15c7121e8c1681880e12bcba
|
ae1e94c332e17c7dc7051ce976d5a9eebe7ab8a5
|
/stage0/src/Lean/Data/Json/Printer.lean
|
71c353caeadef2785958f38c82482e554851f9eb
|
[
"Apache-2.0"
] |
permissive
|
dupuisf/lean4
|
d082d13b01243e1de29ae680eefb476961221eef
|
6a39c65bd28eb0e28c3870188f348c8914502718
|
refs/heads/master
| 1,676,948,755,391
| 1,610,665,114,000
| 1,610,665,114,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,814
|
lean
|
/-
Copyright (c) 2019 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner, Marc Huisinga
-/
import Lean.Data.Format
import Lean.Data.Json.Basic
namespace Lean
namespace Json
private def escapeAux (c : Char) (acc : String) : String :=
-- escape ", \, \n and \r, keep all other characters ≥ 0x20 and render characters < 0x20 with \u
if c = '"' then -- hack to prevent emacs from regarding the rest of the file as a string: "
"\\\"" ++ acc
else if c = '\\' then
"\\\\" ++ acc
else if c = '\n' then
"\\n" ++ acc
else if c = '\x0d' then
"\\r" ++ acc
-- the c.val ≤ 0x10ffff check is technically redundant,
-- since Lean chars are unicode scalar values ≤ 0x10ffff.
-- as to whether c.val > 0xffff should be split up and encoded with multiple \u,
-- the JSON standard is not definite: both directly printing the character
-- and encoding it with multiple \u is allowed, and it is up to parsers to make the
-- decision.
else if 0x0020 ≤ c.val ∧ c.val ≤ 0x10ffff then
String.singleton c ++ acc
else
let n := c.toNat;
-- since c.val < 0x20 in this case, this conversion is more involved than necessary
-- (but we keep it for completeness)
"\\u" ++
[ Nat.digitChar (n / 4096),
Nat.digitChar ((n % 4096) / 256),
Nat.digitChar ((n % 256) / 16),
Nat.digitChar (n % 16) ].asString ++
acc
def escape (s : String) : String := do
s.foldr escapeAux ""
def renderString (s : String) : String :=
"\"" ++ escape s ++ "\""
section
@[specialize]
partial def render : Json → Format
| null => "null"
| bool true => "true"
| bool false => "false"
| num s => s.toString
| str s => renderString s
| arr elems =>
let elems := Format.joinSep (elems.map render).toList ("," ++ Format.line);
Format.bracket "[" elems "]"
| obj kvs =>
let renderKV : String → Json → Format := fun k v =>
Format.group (renderString k ++ ":" ++ Format.line ++ render v);
let kvs := Format.joinSep (kvs.fold (fun acc k j => renderKV k j :: acc) []) ("," ++ Format.line);
Format.bracket "{" kvs "}"
end
def pretty (j : Json) (lineWidth := 80) : String :=
Format.pretty (render j) lineWidth
partial def compress : Json → String
| null => "null"
| bool true => "true"
| bool false => "false"
| num s => s.toString
| str s => renderString s
| arr elems =>
let f := ",".intercalate (elems.map compress).toList
s!"[{f}]"
| obj kvs =>
let ckvs := kvs.fold (fun acc k j => s!"{renderString k}:{compress j}" :: acc) []
let ckvs := ",".intercalate ckvs
s!"\{{ckvs}}"
instance : ToFormat Json := ⟨render⟩
instance : ToString Json := ⟨pretty⟩
end Json
end Lean
|
273f4958129029c9a85d1e31e713d81bd34cf6ed
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/library/theories/measure_theory/sigma_algebra.lean
|
95da2a9fdbe31254606958b2d19d083907abcfff
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 12,160
|
lean
|
/-
Copyright (c) 2016 Jacob Gross. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jacob Gross, Jeremy Avigad
Sigma algebras.
-/
import data.set data.nat theories.topology.continuous ..move
open eq.ops set nat
structure sigma_algebra [class] (X : Type) :=
(sets : set (set X))
(univ_mem_sets : univ ∈ sets)
(comp_mem_sets : ∀ {s : set X}, s ∈ sets → (-s ∈ sets))
(cUnion_mem_sets : ∀ {s : ℕ → set X}, (∀ i, s i ∈ sets) → (⋃ i, s i) ∈ sets)
/- Closure properties -/
namespace measure_theory
open sigma_algebra
variables {X : Type} [sigma_algebra X]
definition measurable (t : set X) : Prop := t ∈ sets X
theorem measurable_univ : measurable (@univ X) :=
univ_mem_sets X
theorem measurable_compl {s : set X} (H : measurable s) : measurable (-s) :=
comp_mem_sets H
theorem measurable_of_measurable_compl {s : set X} (H : measurable (-s)) : measurable s :=
!compl_compl ▸ measurable_compl H
theorem measurable_empty : measurable (∅ : set X) :=
compl_univ ▸ measurable_compl measurable_univ
theorem measurable_cUnion {s : ℕ → set X} (H : ∀ i, measurable (s i)) :
measurable (⋃ i, s i) :=
cUnion_mem_sets H
theorem measurable_cInter {s : ℕ → set X} (H : ∀ i, measurable (s i)) :
measurable (⋂ i, s i) :=
have ∀ i, measurable (-(s i)), from take i, measurable_compl (H i),
have measurable (-(⋃ i, -(s i))), from measurable_compl (measurable_cUnion this),
show measurable (⋂ i, s i), by rewrite Inter_eq_comp_Union_comp; apply this
theorem measurable_union {s t : set X} (Hs : measurable s) (Ht : measurable t) :
measurable (s ∪ t) :=
have ∀ i, measurable (bin_ext s t i), by intro i; cases i; exact Hs; exact Ht,
show measurable (s ∪ t), by rewrite -Union_bin_ext; exact measurable_cUnion this
theorem measurable_inter {s t : set X} (Hs : measurable s) (Ht : measurable t) :
measurable (s ∩ t) :=
have ∀ i, measurable (bin_ext s t i), by intro i; cases i; exact Hs; exact Ht,
show measurable (s ∩ t), by rewrite -Inter_bin_ext; exact measurable_cInter this
theorem measurable_diff {s t : set X} (Hs : measurable s) (Ht : measurable t) :
measurable (s \ t) :=
measurable_inter Hs (measurable_compl Ht)
theorem measurable_insert {x : X} {s : set X} (Hx : measurable '{x}) (Hs : measurable s) :
measurable (insert x s) :=
!insert_eq⁻¹ ▸ measurable_union Hx Hs
end measure_theory
/- Measurable functions -/
namespace measure_theory
open sigma_algebra function
variables {X Y Z : Type} {M : sigma_algebra X} {N : sigma_algebra Y} {L : sigma_algebra Z}
definition measurable_fun (f : X → Y) (M : sigma_algebra X) (N : sigma_algebra Y) :=
∀ ⦃s⦄, s ∈ sets Y → f '- s ∈ sets X
theorem measurable_fun_id : measurable_fun (@id X) M M :=
take s, suppose s ∈ sets X, this
theorem measurable_fun_comp {f : X → Y} {g : Y → Z} (Hf : measurable_fun f M N)
(Hg : measurable_fun g N L) :
measurable_fun (g ∘ f) M L :=
take s, assume Hs, Hf (Hg Hs)
section
open classical
theorem measurable_fun_const (c : Y) :
measurable_fun (λ x : X, c) M N :=
take s, assume Hs,
if cs : c ∈ s then
have (λx, c) '- s = @univ X, from eq_univ_of_forall (take x, mem_preimage cs),
by rewrite this; apply measurable_univ
else
have (λx, c) '- s = (∅ : set X),
from eq_empty_of_forall_not_mem (take x, assume H, cs (mem_of_mem_preimage H)),
by rewrite this; apply measurable_empty
end
end measure_theory
/-
-- Properties of sigma algebras
-/
namespace sigma_algebra
open measure_theory
variables {X : Type}
protected theorem eq {M N : sigma_algebra X} (H : @sets X M = @sets X N) :
M = N :=
by cases M; cases N; cases H; apply rfl
/- sigma algebra generated by a set -/
inductive sets_generated_by (G : set (set X)) : set X → Prop :=
| generators_mem : ∀ ⦃s : set X⦄, s ∈ G → sets_generated_by G s
| univ_mem : sets_generated_by G univ
| comp_mem : ∀ ⦃s : set X⦄, sets_generated_by G s → sets_generated_by G (-s)
| cUnion_mem : ∀ ⦃s : ℕ → set X⦄, (∀ i, sets_generated_by G (s i)) →
sets_generated_by G (⋃ i, s i)
protected definition generated_by {X : Type} (G : set (set X)) : sigma_algebra X :=
⦃sigma_algebra,
sets := sets_generated_by G,
univ_mem_sets := sets_generated_by.univ_mem G,
comp_mem_sets := sets_generated_by.comp_mem ,
cUnion_mem_sets := sets_generated_by.cUnion_mem ⦄
theorem sets_generated_by_initial {G : set (set X)} {M : sigma_algebra X} (H : G ⊆ @sets _ M) :
sets_generated_by G ⊆ @sets _ M :=
begin
intro s Hs,
induction Hs with s sG s Hs ssX s Hs sisX,
{exact H sG},
{exact measurable_univ},
{exact measurable_compl ssX},
exact measurable_cUnion sisX
end
theorem measurable_generated_by {G : set (set X)} :
∀₀ s ∈ G, @measurable _ (sigma_algebra.generated_by G) s :=
λ s H, sets_generated_by.generators_mem H
section
variables {Y : Type} {M : sigma_algebra X}
theorem measurable_fun_generated_by (f : X → Y) (G : set (set Y))
(Hg : ∀₀ g ∈ G, f '- g ∈ sets X) : measurable_fun f M (sigma_algebra.generated_by G) :=
begin
intro A HA,
induction HA with Hg A s setsG pre s' HsetsG HsetsG',
exact Hg A,
exact measurable_univ,
rewrite [preimage_compl]; exact measurable_compl pre,
rewrite [preimage_Union]; exact measurable_cUnion HsetsG'
end
end
/- The collection of sigma algebras forms a complete lattice. -/
protected definition le (M N : sigma_algebra X) : Prop := @sets _ M ⊆ @sets _ N
definition sigma_algebra_has_le [instance] :
has_le (sigma_algebra X) :=
has_le.mk sigma_algebra.le
protected theorem le_refl (M : sigma_algebra X) : M ≤ M := subset.refl (@sets _ M)
protected theorem le_trans (M N L : sigma_algebra X) : M ≤ N → N ≤ L → M ≤ L :=
assume H1, assume H2,
subset.trans H1 H2
protected theorem le_antisymm (M N : sigma_algebra X) : M ≤ N → N ≤ M → M = N :=
assume H1, assume H2,
sigma_algebra.eq (subset.antisymm H1 H2)
protected theorem generated_by_initial {G : set (set X)} {M : sigma_algebra X} (H : G ⊆ @sets X M) :
sigma_algebra.generated_by G ≤ M :=
sets_generated_by_initial H
protected definition inf (M N : sigma_algebra X) : sigma_algebra X :=
⦃sigma_algebra,
sets := @sets X M ∩ @sets X N,
univ_mem_sets := abstract and.intro (@measurable_univ X M) (@measurable_univ X N) end,
comp_mem_sets := abstract take s, assume Hs, and.intro
(@measurable_compl X M s (and.elim_left Hs))
(@measurable_compl X N s (and.elim_right Hs)) end,
cUnion_mem_sets := abstract take s, assume Hs, and.intro
(@measurable_cUnion X M s (λ i, and.elim_left (Hs i)))
(@measurable_cUnion X N s (λ i, and.elim_right (Hs i))) end⦄
protected theorem inf_le_left (M N : sigma_algebra X) : sigma_algebra.inf M N ≤ M :=
λ s, !inter_subset_left
protected theorem inf_le_right (M N : sigma_algebra X) : sigma_algebra.inf M N ≤ N :=
λ s, !inter_subset_right
protected theorem le_inf (M N L : sigma_algebra X) (H1 : L ≤ M) (H2 : L ≤ N) :
L ≤ sigma_algebra.inf M N :=
λ s H, and.intro (H1 s H) (H2 s H)
protected definition Inf (MS : set (sigma_algebra X)) : sigma_algebra X :=
⦃sigma_algebra,
sets := ⋂ M ∈ MS, @sets _ M,
univ_mem_sets := abstract take M, assume HM, @measurable_univ X M end,
comp_mem_sets := abstract take s, assume Hs, take M, assume HM,
measurable_compl (Hs M HM) end,
cUnion_mem_sets := abstract take s, assume Hs, take M, assume HM,
measurable_cUnion (λ i, Hs i M HM) end
⦄
protected theorem Inf_le {M : sigma_algebra X} {MS : set (sigma_algebra X)} (MMS : M ∈ MS) :
sigma_algebra.Inf MS ≤ M :=
bInter_subset_of_mem MMS
protected theorem le_Inf {M : sigma_algebra X} {MS : set (sigma_algebra X)} (H : ∀₀ N ∈ MS, M ≤ N) :
M ≤ sigma_algebra.Inf MS :=
take s, assume Hs : s ∈ @sets _ M,
take N, assume NMS : N ∈ MS,
show s ∈ @sets _ N, from H NMS s Hs
protected definition sup (M N : sigma_algebra X) : sigma_algebra X :=
sigma_algebra.generated_by (@sets _ M ∪ @sets _ N)
protected theorem le_sup_left (M N : sigma_algebra X) : M ≤ sigma_algebra.sup M N :=
take s, assume Hs : s ∈ @sets _ M,
measurable_generated_by (or.inl Hs)
protected theorem le_sup_right (M N : sigma_algebra X) : N ≤ sigma_algebra.sup M N :=
take s, assume Hs : s ∈ @sets _ N,
measurable_generated_by (or.inr Hs)
protected theorem sup_le {M N L : sigma_algebra X} (H1 : M ≤ L) (H2 : N ≤ L) :
sigma_algebra.sup M N ≤ L :=
have @sets _ M ∪ @sets _ N ⊆ @sets _ L, from union_subset H1 H2,
sets_generated_by_initial this
protected definition Sup (MS : set (sigma_algebra X)) : sigma_algebra X :=
sigma_algebra.generated_by (⋃ M ∈ MS, @sets _ M)
protected theorem le_Sup {M : sigma_algebra X} {MS : set (sigma_algebra X)} (MMS : M ∈ MS) :
M ≤ sigma_algebra.Sup MS :=
take s, assume Hs : s ∈ @sets _ M,
measurable_generated_by (mem_bUnion MMS Hs)
protected theorem Sup_le {N : sigma_algebra X} {MS : set (sigma_algebra X)} (H : ∀₀ M ∈ MS, M ≤ N) :
sigma_algebra.Sup MS ≤ N :=
have (⋃ M ∈ MS, @sets _ M) ⊆ @sets _ N, from bUnion_subset H,
sets_generated_by_initial this
protected definition complete_lattice [trans_instance] :
complete_lattice (sigma_algebra X) :=
⦃complete_lattice,
le := sigma_algebra.le,
le_refl := sigma_algebra.le_refl,
le_trans := sigma_algebra.le_trans,
le_antisymm := sigma_algebra.le_antisymm,
inf := sigma_algebra.inf,
sup := sigma_algebra.sup,
inf_le_left := sigma_algebra.inf_le_left,
inf_le_right := sigma_algebra.inf_le_right,
le_inf := sigma_algebra.le_inf,
le_sup_left := sigma_algebra.le_sup_left,
le_sup_right := sigma_algebra.le_sup_right,
sup_le := @sigma_algebra.sup_le X,
Inf := sigma_algebra.Inf,
Sup := sigma_algebra.Sup,
Inf_le := @sigma_algebra.Inf_le X,
le_Inf := @sigma_algebra.le_Inf X,
le_Sup := @sigma_algebra.le_Sup X,
Sup_le := @sigma_algebra.Sup_le X⦄
end sigma_algebra
/- Borel sets -/
namespace measure_theory
section
open topology
variables (X : Type) [topology X]
definition borel_algebra : sigma_algebra X :=
sigma_algebra.generated_by (opens X)
variable {X}
definition borel (s : set X) : Prop := @measurable _ (borel_algebra X) s
theorem borel_of_Open {s : set X} (H : Open s) : borel s :=
sigma_algebra.measurable_generated_by H
theorem borel_of_closed {s : set X} (H : closed s) : borel s :=
have borel (-s), from borel_of_Open H,
@measurable_of_measurable_compl _ (borel_algebra X) _ this
end
/- borel functions -/
section
open topology function
variables {X Y Z : Type} [topology X] [topology Y] [topology Z]
definition borel_fun (f : X → Y) := ∀ ⦃s⦄, Open s → borel (f '- s)
theorem borel_fun_id : borel_fun (@id X) := λ s Os, borel_of_Open Os
theorem borel_fun_of_continuous {f : X → Y} (H : continuous f) : borel_fun f :=
λ s Os, borel_of_Open (H Os)
theorem borel_fun_const (c : Y) : borel_fun (λ x : X, c) :=
borel_fun_of_continuous (continuous_const c)
theorem measurable_fun_of_borel_fun {f : X → Y} (H : borel_fun f) :
measurable_fun f (borel_algebra X) (borel_algebra Y) :=
sigma_algebra.measurable_fun_generated_by f (opens Y) H
theorem borel_fun_of_measurable_fun {f : X → Y}
(H : measurable_fun f (borel_algebra X) (borel_algebra Y)) :
borel_fun f :=
λ s Os, H (borel_of_Open Os)
theorem borel_fun_iff (f : X → Y) :
borel_fun f ↔ measurable_fun f (borel_algebra X) (borel_algebra Y) :=
iff.intro measurable_fun_of_borel_fun borel_fun_of_measurable_fun
theorem borel_fun_comp {f : X → Y} {g : Y → Z} (Hf : borel_fun f) (Hg : borel_fun g) :
borel_fun (g ∘ f) :=
λ s Os, measurable_fun_of_borel_fun Hf (Hg Os)
end
end measure_theory
|
d82f131ad328a3b39222de1150c0ca472a776944
|
4d78f6fde72627abc31dd386af0c26b8737db543
|
/MDK-ARM/Project.uvguix.Lean
|
8e9d0205240afba3488bd07afc460b3a4e04293b
|
[
"BSD-2-Clause"
] |
permissive
|
ljprediger/TP_Final_CursoDSP2015
|
b39e30fe2a13bf08dfc484cb1e413e9903b92b76
|
4c80768ef309ed9cc646dd787274f32d84c74b49
|
refs/heads/master
| 1,610,266,559,346
| 1,448,658,586,000
| 1,448,658,586,000
| 46,999,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 136,912
|
lean
|
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<ProjectGui xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="project_guix.xsd">
<SchemaVersion>-5.1</SchemaVersion>
<Header>### uVision Project, (C) Keil Software</Header>
<ViewPool/>
<SECTreeCtrl>
<View>
<WinId>38003</WinId>
<ViewName>Registers</ViewName>
<TableColWidths>115 281</TableColWidths>
</View>
<View>
<WinId>346</WinId>
<ViewName>Code Coverage</ViewName>
<TableColWidths>665 160</TableColWidths>
</View>
<View>
<WinId>204</WinId>
<ViewName>Performance Analyzer</ViewName>
<TableColWidths>825</TableColWidths>
</View>
</SECTreeCtrl>
<TreeListPane>
<View>
<WinId>1506</WinId>
<ViewName>Symbols</ViewName>
<UserString></UserString>
<TableColWidths>56 56 56</TableColWidths>
</View>
<View>
<WinId>1936</WinId>
<ViewName>Watch 1</ViewName>
<UserString></UserString>
<TableColWidths>56 56 56</TableColWidths>
</View>
<View>
<WinId>1937</WinId>
<ViewName>Watch 2</ViewName>
<UserString></UserString>
<TableColWidths>56 56 56</TableColWidths>
</View>
<View>
<WinId>1935</WinId>
<ViewName>Call Stack + Locals</ViewName>
<UserString></UserString>
<TableColWidths>56 56 56</TableColWidths>
</View>
<View>
<WinId>2506</WinId>
<ViewName>Trace Data</ViewName>
<UserString></UserString>
<TableColWidths>75 135 130 95 70 230 200 150</TableColWidths>
</View>
</TreeListPane>
<WindowSettings>
<LogicAnalizer>
<ShowLACursor>0</ShowLACursor>
<ShowSignalInfo>0</ShowSignalInfo>
<ShowCycles>0</ShowCycles>
<LeftSideBarSize>50</LeftSideBarSize>
<TimeBaseIndex>16</TimeBaseIndex>
</LogicAnalizer>
</WindowSettings>
<WinLayoutEx>
<sActiveDebugView></sActiveDebugView>
<WindowPosition>
<length>44</length>
<flags>2</flags>
<showCmd>3</showCmd>
<MinPosition>
<xPos>-32000</xPos>
<yPos>-32000</yPos>
</MinPosition>
<MaxPosition>
<xPos>-1</xPos>
<yPos>-1</yPos>
</MaxPosition>
<NormalPosition>
<Top>78</Top>
<Left>78</Left>
<Right>1103</Right>
<Bottom>603</Bottom>
</NormalPosition>
</WindowPosition>
<MDIClientArea>
<RegID>0</RegID>
<MDITabState>
<Len>60</Len>
<Data>010000000400000001000000010000000100000001000000000000000200000000000000010000000100000000000000280000002800000000000000</Data>
</MDITabState>
</MDIClientArea>
<ViewEx>
<ViewType>0</ViewType>
<ViewName>Build</ViewName>
<Window>
<RegID>-1</RegID>
<PaneID>-1</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>AE0000004F000000F1030000B3000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>AE00000066000000F1030000CA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1005</RegID>
<PaneID>1005</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>030000006600000032010000A7010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>109</RegID>
<PaneID>109</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>030000006600000032010000A7010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000001F010000F2010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1465</RegID>
<PaneID>1465</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1466</RegID>
<PaneID>1466</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1467</RegID>
<PaneID>1467</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1468</RegID>
<PaneID>1468</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1506</RegID>
<PaneID>1506</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>16384</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1913</RegID>
<PaneID>1913</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B100000066000000EE0300009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1935</RegID>
<PaneID>1935</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>32768</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1936</RegID>
<PaneID>1936</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1937</RegID>
<PaneID>1937</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1939</RegID>
<PaneID>1939</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1940</RegID>
<PaneID>1940</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1941</RegID>
<PaneID>1941</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1942</RegID>
<PaneID>1942</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>195</RegID>
<PaneID>195</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>030000006600000032010000A7010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000001F010000F2010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>196</RegID>
<PaneID>196</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>030000006600000032010000A7010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000001F010000F2010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>197</RegID>
<PaneID>197</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>32768</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>03000000DB0100005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>198</RegID>
<PaneID>198</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>32768</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>000000005E010000F1030000D3010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>199</RegID>
<PaneID>199</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>03000000DB0100005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>203</RegID>
<PaneID>203</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>8192</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B100000066000000EE0300009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>204</RegID>
<PaneID>204</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B100000066000000EE0300009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>221</RegID>
<PaneID>221</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>00000000000000000000000000000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>2506</RegID>
<PaneID>2506</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>2507</RegID>
<PaneID>2507</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>343</RegID>
<PaneID>343</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B100000066000000EE0300009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>346</RegID>
<PaneID>346</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B100000066000000EE0300009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35824</RegID>
<PaneID>35824</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B100000066000000EE0300009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35885</RegID>
<PaneID>35885</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35886</RegID>
<PaneID>35886</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35887</RegID>
<PaneID>35887</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35888</RegID>
<PaneID>35888</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35889</RegID>
<PaneID>35889</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35890</RegID>
<PaneID>35890</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35891</RegID>
<PaneID>35891</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35892</RegID>
<PaneID>35892</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35893</RegID>
<PaneID>35893</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35894</RegID>
<PaneID>35894</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35895</RegID>
<PaneID>35895</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35896</RegID>
<PaneID>35896</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35897</RegID>
<PaneID>35897</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35898</RegID>
<PaneID>35898</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35899</RegID>
<PaneID>35899</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35900</RegID>
<PaneID>35900</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35901</RegID>
<PaneID>35901</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35902</RegID>
<PaneID>35902</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35903</RegID>
<PaneID>35903</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35904</RegID>
<PaneID>35904</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35905</RegID>
<PaneID>35905</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>38003</RegID>
<PaneID>38003</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>030000006600000093010000ED010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000001F010000F2010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>38007</RegID>
<PaneID>38007</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>03000000DB0100005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000004E020000CE000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>436</RegID>
<PaneID>436</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>03000000DB0100005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D0000001F010000F2010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>437</RegID>
<PaneID>437</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>440</RegID>
<PaneID>440</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000075010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>560000006D00000000010000FA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>59392</RegID>
<PaneID>59392</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>940</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>8192</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0000000000000000B70300001C000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>59393</RegID>
<PaneID>0</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>00000000AE02000056050000C1020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>59399</RegID>
<PaneID>59399</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>463</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>8192</RecentFrameAlignment>
<RecentRowIndex>1</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>000000001C000000DA01000038000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>59400</RegID>
<PaneID>59400</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>612</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>8192</RecentFrameAlignment>
<RecentRowIndex>2</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>00000000380000006F02000054000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<DockMan>
<Len>2619</Len>
<Data>000000000B000000000000000020000000000000FFFFFFFFFFFFFFFFAE000000B3000000F1030000B7000000000000000100000004000000010000000000000000000000FFFFFFFF06000000CB00000057010000CC000000F08B00005A01000079070000FFFF02000B004354616262656450616E650020000000000000AE00000066000000F1030000CA000000AE0000004F000000F1030000B30000000000000040280046060000000B446973617373656D626C7900000000CB00000001000000FFFFFFFFFFFFFFFF14506572666F726D616E636520416E616C797A6572000000005701000001000000FFFFFFFFFFFFFFFF14506572666F726D616E636520416E616C797A657200000000CC00000001000000FFFFFFFFFFFFFFFF0E4C6F67696320416E616C797A657200000000F08B000001000000FFFFFFFFFFFFFFFF0D436F646520436F766572616765000000005A01000001000000FFFFFFFFFFFFFFFF11496E737472756374696F6E205472616365000000007907000001000000FFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000000000000000000000001000000FFFFFFFFCB00000001000000FFFFFFFFCB000000000000000040000000000000FFFFFFFFFFFFFFFF430300004F000000470300006B010000000000000200000004000000010000000000000000000000FFFFFFFF17000000E2050000CA0900002D8C00002E8C00002F8C0000308C0000318C0000328C0000338C0000348C0000358C0000368C0000378C0000388C0000398C00003A8C00003B8C00003C8C00003D8C00003E8C00003F8C0000408C0000418C0000018000400000000000004703000066000000F103000082010000470300004F000000F10300006B0100000000000040410046170000000753796D626F6C7300000000E205000001000000FFFFFFFFFFFFFFFF0A5472616365204461746100000000CA09000001000000FFFFFFFFFFFFFFFF00000000002D8C000001000000FFFFFFFFFFFFFFFF00000000002E8C000001000000FFFFFFFFFFFFFFFF00000000002F8C000001000000FFFFFFFFFFFFFFFF0000000000308C000001000000FFFFFFFFFFFFFFFF0000000000318C000001000000FFFFFFFFFFFFFFFF0000000000328C000001000000FFFFFFFFFFFFFFFF0000000000338C000001000000FFFFFFFFFFFFFFFF0000000000348C000001000000FFFFFFFFFFFFFFFF0000000000358C000001000000FFFFFFFFFFFFFFFF0000000000368C000001000000FFFFFFFFFFFFFFFF0000000000378C000001000000FFFFFFFFFFFFFFFF0000000000388C000001000000FFFFFFFFFFFFFFFF0000000000398C000001000000FFFFFFFFFFFFFFFF00000000003A8C000001000000FFFFFFFFFFFFFFFF00000000003B8C000001000000FFFFFFFFFFFFFFFF00000000003C8C000001000000FFFFFFFFFFFFFFFF00000000003D8C000001000000FFFFFFFFFFFFFFFF00000000003E8C000001000000FFFFFFFFFFFFFFFF00000000003F8C000001000000FFFFFFFFFFFFFFFF0000000000408C000001000000FFFFFFFFFFFFFFFF0000000000418C000001000000FFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000000000000000000000001000000FFFFFFFFE205000001000000FFFFFFFFE2050000000000000010000001000000FFFFFFFFFFFFFFFF350100004F00000039010000C0010000010000000200001004000000010000009DFFFFFFE8040000FFFFFFFF05000000ED0300006D000000C3000000C400000073940000018000100000010000000000000066000000960100001D020000000000004F00000035010000C00100000000000040410056050000000750726F6A65637401000000ED03000001000000FFFFFFFFFFFFFFFF05426F6F6B73010000006D00000001000000FFFFFFFFFFFFFFFF0946756E6374696F6E7301000000C300000001000000FFFFFFFFFFFFFFFF0954656D706C6174657301000000C400000001000000FFFFFFFFFFFFFFFF09526567697374657273000000007394000001000000FFFFFFFFFFFFFFFF00000000000000000000000000000000000000000000000001000000FFFFFFFFED03000001000000FFFFFFFFED030000000000000080000000000000FFFFFFFFFFFFFFFF000000005A010000F10300005E01000000000000010000000400000001000000000000000000000000000000000000000000000001000000C6000000FFFFFFFF0E0000008F070000930700009407000095070000960700009007000091070000B5010000B8010000B9050000BA050000BB050000BC050000CB090000018000800000000000000000000075010000F1030000EA010000000000005E010000F1030000D301000000000000404100460E0000001343616C6C20537461636B202B204C6F63616C73000000008F07000001000000FFFFFFFFFFFFFFFF0755415254202331000000009307000001000000FFFFFFFFFFFFFFFF0755415254202332000000009407000001000000FFFFFFFFFFFFFFFF0755415254202333000000009507000001000000FFFFFFFFFFFFFFFF15446562756720287072696E74662920566965776572000000009607000001000000FFFFFFFFFFFFFFFF0757617463682031000000009007000001000000FFFFFFFFFFFFFFFF0757617463682032000000009107000001000000FFFFFFFFFFFFFFFF10547261636520457863657074696F6E7300000000B501000001000000FFFFFFFFFFFFFFFF0E4576656E7420436F756E7465727300000000B801000001000000FFFFFFFFFFFFFFFF084D656D6F7279203100000000B905000001000000FFFFFFFFFFFFFFFF084D656D6F7279203200000000BA05000001000000FFFFFFFFFFFFFFFF084D656D6F7279203300000000BB05000001000000FFFFFFFFFFFFFFFF084D656D6F7279203400000000BC05000001000000FFFFFFFFFFFFFFFF105472616365204E617669676174696F6E00000000CB09000001000000FFFFFFFFFFFFFFFFFFFFFFFF0000000001000000000000000000000001000000FFFFFFFFF90100005E010000FD010000D301000000000000020000000400000000000000000000000000000000000000000000000000000002000000C6000000FFFFFFFF8F07000001000000FFFFFFFF8F07000001000000C6000000000000000080000001000000FFFFFFFFFFFFFFFF00000000C001000056050000C4010000010000000100001004000000010000000BFEFFFF56000000FFFFFFFF04000000C5000000C7000000B40100007794000001800080000001000000000000002102000056050000C502000000000000C401000056050000AE0200000000000040820056040000000C4275696C64204F757470757401000000C500000001000000FFFFFFFFFFFFFFFF0D46696E6420496E2046696C657301000000C700000001000000FFFFFFFFFFFFFFFF0A4572726F72204C69737401000000B401000001000000FFFFFFFFFFFFFFFF0742726F77736572010000007794000001000000FFFFFFFFFFFFFFFF00000000000000000000000000000000000000000000000001000000FFFFFFFFC500000001000000FFFFFFFFC5000000000000000000000000000000</Data>
</DockMan>
<ToolBar>
<RegID>59392</RegID>
<Name>File</Name>
<Buttons>
<Len>2021</Len>
<Data>00200000010000002800FFFF01001100434D4643546F6F6C426172427574746F6E00E100000000000000000000000000000000000000000000000100000001000000018001E100000000000001000000000000000000000000000000000100000001000000018003E1000000000400020000000000000000000000000000000001000000010000000180CD7F0000000000000300000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018023E100000000040004000000000000000000000000000000000100000001000000018022E100000000040005000000000000000000000000000000000100000001000000018025E10000000004000600000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001802BE10000000004000700000000000000000000000000000000010000000100000001802CE10000000004000800000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001807A8A0000000004000900000000000000000000000000000000010000000100000001807B8A0000000004000A00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180D3B00000000004000B000000000000000000000000000000000100000001000000018015B10000000004000C0000000000000000000000000000000001000000010000000180F4B00000000004000D000000000000000000000000000000000100000001000000018036B10000000004000E00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180FF88000000000400460000000000000000000000000000000001000000010000000180FE880000000004004500000000000000000000000000000000010000000100000001800B810000000004001300000000000000000000000000000000010000000100000001800C810000000004001400000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180F0880000020000000F000000000000000000000000000000000100000001000000FFFF0100120043555646696E64436F6D626F427574746F6EE8030000000004000000000000000000000000000000000000010000000100000096000000020020500000000003726D739600000000000000020003726D730361726D0000000000000000018024E10000000000001100000000000000000000000000000000010000000100000001800A810000000004001200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018022800000020000001500000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180C488000000000400160000000000000000000000000000000001000000010000000180C988000000000400180000000000000000000000000000000001000000010000000180C788000000000000190000000000000000000000000000000001000000010000000180C8880000000000001700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000FFFF01001500434D4643546F6F6C4261724D656E75427574746F6E4C010000020001001A0000000F2650726F6A6563742057696E646F77000000000000000000000000010000000100000000000000000000000100000008002880DD880000000000001A0000000750726F6A656374000000000000000000000000010000000100000000000000000000000100000000002880DC8B0000000000003A00000005426F6F6B73000000000000000000000000010000000100000000000000000000000100000000002880E18B0000000000003B0000000946756E6374696F6E73000000000000000000000000010000000100000000000000000000000100000000002880E28B000000000000400000000954656D706C6174657300000000000000000000000001000000010000000000000000000000010000000000288018890000000000003D0000000E536F757263652042726F777365720000000000000000000000000100000001000000000000000000000001000000000028800000000000000400FFFFFFFF00000000000000000001000000000000000100000000000000000000000100000000002880D988000000000000390000000C4275696C64204F7574707574000000000000000000000000010000000100000000000000000000000100000000002880E38B000000000000410000000B46696E64204F75747075740000000000000000000000000100000001000000000000000000000001000000000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180FB7F0000000000001B000000000000000000000000000000000100000001000000000000000446696C65AC030000</Data>
</Buttons>
<OriginalItems>
<Len>1423</Len>
<Data>2800FFFF01001100434D4643546F6F6C426172427574746F6E00E1000000000000FFFFFFFF000100000000000000010000000000000001000000018001E1000000000000FFFFFFFF000100000000000000010000000000000001000000018003E1000000000000FFFFFFFF0001000000000000000100000000000000010000000180CD7F000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF000000000000000000010000000000000001000000018023E1000000000000FFFFFFFF000100000000000000010000000000000001000000018022E1000000000000FFFFFFFF000100000000000000010000000000000001000000018025E1000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001802BE1000000000000FFFFFFFF00010000000000000001000000000000000100000001802CE1000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001807A8A000000000000FFFFFFFF00010000000000000001000000000000000100000001807B8A000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180D3B0000000000000FFFFFFFF000100000000000000010000000000000001000000018015B1000000000000FFFFFFFF0001000000000000000100000000000000010000000180F4B0000000000000FFFFFFFF000100000000000000010000000000000001000000018036B1000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180FF88000000000000FFFFFFFF0001000000000000000100000000000000010000000180FE88000000000000FFFFFFFF00010000000000000001000000000000000100000001800B81000000000000FFFFFFFF00010000000000000001000000000000000100000001800C81000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180F088000000000000FFFFFFFF0001000000000000000100000000000000010000000180EE7F000000000000FFFFFFFF000100000000000000010000000000000001000000018024E1000000000000FFFFFFFF00010000000000000001000000000000000100000001800A81000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001802280000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180C488000000000000FFFFFFFF0001000000000000000100000000000000010000000180C988000000000000FFFFFFFF0001000000000000000100000000000000010000000180C788000000000000FFFFFFFF0001000000000000000100000000000000010000000180C888000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180DD88000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180FB7F000000000000FFFFFFFF000100000000000000010000000000000001000000</Data>
</OriginalItems>
<OrigResetItems>
<Len>1423</Len>
<Data>2800FFFF01001100434D4643546F6F6C426172427574746F6E00E100000000000000000000000000000000000000000000000100000001000000018001E100000000000001000000000000000000000000000000000100000001000000018003E1000000000000020000000000000000000000000000000001000000010000000180CD7F0000000000000300000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018023E100000000000004000000000000000000000000000000000100000001000000018022E100000000000005000000000000000000000000000000000100000001000000018025E10000000000000600000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001802BE10000000000000700000000000000000000000000000000010000000100000001802CE10000000000000800000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001807A8A0000000000000900000000000000000000000000000000010000000100000001807B8A0000000000000A00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180D3B00000000000000B000000000000000000000000000000000100000001000000018015B10000000000000C0000000000000000000000000000000001000000010000000180F4B00000000000000D000000000000000000000000000000000100000001000000018036B10000000000000E00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180FF880000000000000F0000000000000000000000000000000001000000010000000180FE880000000000001000000000000000000000000000000000010000000100000001800B810000000000001100000000000000000000000000000000010000000100000001800C810000000000001200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180F088000000000000130000000000000000000000000000000001000000010000000180EE7F00000000000014000000000000000000000000000000000100000001000000018024E10000000000001500000000000000000000000000000000010000000100000001800A810000000000001600000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018022800000000000001700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180C488000000000000180000000000000000000000000000000001000000010000000180C988000000000000190000000000000000000000000000000001000000010000000180C7880000000000001A0000000000000000000000000000000001000000010000000180C8880000000000001B00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180DD880000000000001C00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180FB7F0000000000001D000000000000000000000000000000000100000001000000</Data>
</OrigResetItems>
</ToolBar>
<ToolBar>
<RegID>59399</RegID>
<Name>Build</Name>
<Buttons>
<Len>692</Len>
<Data>00200000010000001000FFFF01001100434D4643546F6F6C426172427574746F6ECF7F0000000004001C0000000000000000000000000000000001000000010000000180D07F0000000000001D000000000000000000000000000000000100000001000000018030800000000000001E00000000000000000000000000000000010000000100000001809E8A0000000000001F0000000000000000000000000000000001000000010000000180D17F0000000004002000000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001804C8A0000000000002100000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000FFFF01001900434D4643546F6F6C426172436F6D626F426F78427574746F6EBA00000000000000000000000000000000000000000000000001000000010000009600000003002050000000000F53544D3332463430312D444953434F960000000000000001000F53544D3332463430312D444953434F000000000180EB880000000000002200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180C07F000000000000230000000000000000000000000000000001000000010000000180B08A000000000400240000000000000000000000000000000001000000010000000180A8010000000000004E00000000000000000000000000000000010000000100000001807202000000000000530000000000000000000000000000000001000000010000000180BE010000000000005000000000000000000000000000000000010000000100000000000000054275696C64CF010000</Data>
</Buttons>
<OriginalItems>
<Len>583</Len>
<Data>1000FFFF01001100434D4643546F6F6C426172427574746F6ECF7F000000000000FFFFFFFF0001000000000000000100000000000000010000000180D07F000000000000FFFFFFFF00010000000000000001000000000000000100000001803080000000000000FFFFFFFF00010000000000000001000000000000000100000001809E8A000000000000FFFFFFFF0001000000000000000100000000000000010000000180D17F000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001804C8A000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001806680000000000000FFFFFFFF0001000000000000000100000000000000010000000180EB88000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180C07F000000000000FFFFFFFF0001000000000000000100000000000000010000000180B08A000000000000FFFFFFFF0001000000000000000100000000000000010000000180A801000000000000FFFFFFFF00010000000000000001000000000000000100000001807202000000000000FFFFFFFF0001000000000000000100000000000000010000000180BE01000000000000FFFFFFFF000100000000000000010000000000000001000000</Data>
</OriginalItems>
<OrigResetItems>
<Len>583</Len>
<Data>1000FFFF01001100434D4643546F6F6C426172427574746F6ECF7F000000000000000000000000000000000000000000000001000000010000000180D07F00000000000001000000000000000000000000000000000100000001000000018030800000000000000200000000000000000000000000000000010000000100000001809E8A000000000000030000000000000000000000000000000001000000010000000180D17F0000000000000400000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001804C8A0000000000000500000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001806680000000000000060000000000000000000000000000000001000000010000000180EB880000000000000700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180C07F000000000000080000000000000000000000000000000001000000010000000180B08A000000000000090000000000000000000000000000000001000000010000000180A8010000000000000A000000000000000000000000000000000100000001000000018072020000000000000B0000000000000000000000000000000001000000010000000180BE010000000000000C000000000000000000000000000000000100000001000000</Data>
</OrigResetItems>
</ToolBar>
<ToolBar>
<RegID>59400</RegID>
<Name>Debug</Name>
<Buttons>
<Len>2247</Len>
<Data>00200000000000001900FFFF01001100434D4643546F6F6C426172427574746F6ECC880000000000002500000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018017800000000000002600000000000000000000000000000000010000000100000001801D800000000000002700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001801A800000000000002800000000000000000000000000000000010000000100000001801B80000000000000290000000000000000000000000000000001000000010000000180E57F0000000000002A00000000000000000000000000000000010000000100000001801C800000000000002B00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018000890000000000002C00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180E48B0000000000002D0000000000000000000000000000000001000000010000000180F07F0000000000002E0000000000000000000000000000000001000000010000000180E8880000000000003700000000000000000000000000000000010000000100000001803B010000000000002F0000000000000000000000000000000001000000010000000180BB8A00000000000030000000000000000000000000000000000100000001000000FFFF01001500434D4643546F6F6C4261724D656E75427574746F6E0E01000000000000310000000D57617463682057696E646F7773000000000000000000000000010000000100000000000000000000000100000002001380D88B00000000000031000000085761746368202631000000000000000000000000010000000100000000000000000000000100000000001380D98B000000000000310000000857617463682026320000000000000000000000000100000001000000000000000000000001000000000013800F01000000000000320000000E4D656D6F72792057696E646F7773000000000000000000000000010000000100000000000000000000000100000004001380D28B00000000000032000000094D656D6F7279202631000000000000000000000000010000000100000000000000000000000100000000001380D38B00000000000032000000094D656D6F7279202632000000000000000000000000010000000100000000000000000000000100000000001380D48B00000000000032000000094D656D6F7279202633000000000000000000000000010000000100000000000000000000000100000000001380D58B00000000000032000000094D656D6F72792026340000000000000000000000000100000001000000000000000000000001000000000013801001000000000000330000000E53657269616C2057696E646F777300000000000000000000000001000000010000000000000000000000010000000400138093070000000000003300000008554152542023263100000000000000000000000001000000010000000000000000000000010000000000138094070000000000003300000008554152542023263200000000000000000000000001000000010000000000000000000000010000000000138095070000000000003300000008554152542023263300000000000000000000000001000000010000000000000000000000010000000000138096070000000000003300000015446562756720287072696E746629205669657765720000000000000000000000000100000001000000000000000000000001000000000013803C010000000000003400000010416E616C797369732057696E646F7773000000000000000000000000010000000100000000000000000000000100000003001380658A000000000000340000000F264C6F67696320416E616C797A6572000000000000000000000000010000000100000000000000000000000100000000001380DC7F0000000000003E0000001526506572666F726D616E636520416E616C797A6572000000000000000000000000010000000100000000000000000000000100000000001380E788000000000000380000000E26436F646520436F76657261676500000000000000000000000001000000010000000000000000000000010000000000138053010000000000003F0000000D54726163652057696E646F77730000000000000000000000000100000001000000000000000000000001000000010013805401000000000000FFFFFFFF115472616365204D656E7520416E63686F720100000000000000010000000000000001000000000000000000000001000000000013802901000000000000350000001553797374656D205669657765722057696E646F77730000000000000000000000000100000001000000000000000000000001000000010013804B01000000000000FFFFFFFF1453797374656D2056696577657220416E63686F720100000000000000010000000000000001000000000000000000000001000000000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000013800189000000000000360000000F26546F6F6C626F782057696E646F7700000000000000000000000001000000010000000000000000000000010000000300138044C5000000000000FFFFFFFF0E5570646174652057696E646F77730100000000000000010000000000000001000000000000000000000001000000000013800000000000000400FFFFFFFF000000000000000000010000000000000001000000000000000000000001000000000013805B01000000000000FFFFFFFF12546F6F6C626F78204D656E75416E63686F72010000000000000001000000000000000100000000000000000000000100000000000000000005446562756764020000</Data>
</Buttons>
<OriginalItems>
<Len>898</Len>
<Data>1900FFFF01001100434D4643546F6F6C426172427574746F6ECC88000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001801780000000000000FFFFFFFF00010000000000000001000000000000000100000001801D80000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001801A80000000000000FFFFFFFF00010000000000000001000000000000000100000001801B80000000000000FFFFFFFF0001000000000000000100000000000000010000000180E57F000000000000FFFFFFFF00010000000000000001000000000000000100000001801C80000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001800089000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180E48B000000000000FFFFFFFF0001000000000000000100000000000000010000000180F07F000000000000FFFFFFFF0001000000000000000100000000000000010000000180E888000000000000FFFFFFFF00010000000000000001000000000000000100000001803B01000000000000FFFFFFFF0001000000000000000100000000000000010000000180BB8A000000000000FFFFFFFF0001000000000000000100000000000000010000000180D88B000000000000FFFFFFFF0001000000000000000100000000000000010000000180D28B000000000000FFFFFFFF00010000000000000001000000000000000100000001809307000000000000FFFFFFFF0001000000000000000100000000000000010000000180658A000000000000FFFFFFFF0001000000000000000100000000000000010000000180C18A000000000000FFFFFFFF0001000000000000000100000000000000010000000180EE8B000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001800189000000000000FFFFFFFF000100000000000000010000000000000001000000</Data>
</OriginalItems>
<OrigResetItems>
<Len>898</Len>
<Data>1900FFFF01001100434D4643546F6F6C426172427574746F6ECC880000000000000000000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018017800000000000000100000000000000000000000000000000010000000100000001801D800000000000000200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001801A800000000000000300000000000000000000000000000000010000000100000001801B80000000000000040000000000000000000000000000000001000000010000000180E57F0000000000000500000000000000000000000000000000010000000100000001801C800000000000000600000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018000890000000000000700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180E48B000000000000080000000000000000000000000000000001000000010000000180F07F000000000000090000000000000000000000000000000001000000010000000180E8880000000000000A00000000000000000000000000000000010000000100000001803B010000000000000B0000000000000000000000000000000001000000010000000180BB8A0000000000000C0000000000000000000000000000000001000000010000000180D88B0000000000000D0000000000000000000000000000000001000000010000000180D28B0000000000000E000000000000000000000000000000000100000001000000018093070000000000000F0000000000000000000000000000000001000000010000000180658A000000000000100000000000000000000000000000000001000000010000000180C18A000000000000110000000000000000000000000000000001000000010000000180EE8B0000000000001200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180018900000000000013000000000000000000000000000000000100000001000000</Data>
</OrigResetItems>
</ToolBar>
<ControlBarsSummary>
<Bars>0</Bars>
<ScreenCX>1366</ScreenCX>
<ScreenCY>768</ScreenCY>
</ControlBarsSummary>
</ViewEx>
<ViewEx>
<ViewType>1</ViewType>
<ViewName>Debug</ViewName>
<Window>
<RegID>-1</RegID>
<PaneID>-1</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4C0100004F00000056050000B3000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>4C0100006600000056050000CA000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1005</RegID>
<PaneID>1005</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000066000000450100001C020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>109</RegID>
<PaneID>109</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000066000000450100001C020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000EB000000BE010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1465</RegID>
<PaneID>1465</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1466</RegID>
<PaneID>1466</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1467</RegID>
<PaneID>1467</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1468</RegID>
<PaneID>1468</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1506</RegID>
<PaneID>1506</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>16384</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1913</RegID>
<PaneID>1913</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4F01000066000000530500009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1935</RegID>
<PaneID>1935</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>32768</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1936</RegID>
<PaneID>1936</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1937</RegID>
<PaneID>1937</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1939</RegID>
<PaneID>1939</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1940</RegID>
<PaneID>1940</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1941</RegID>
<PaneID>1941</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>1942</RegID>
<PaneID>1942</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>195</RegID>
<PaneID>195</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000066000000450100001C020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000EB000000BE010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>196</RegID>
<PaneID>196</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000066000000450100001C020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000EB000000BE010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>197</RegID>
<PaneID>197</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>32768</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000086010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>198</RegID>
<PaneID>198</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>32768</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0000000039020000AB020000AE020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>199</RegID>
<PaneID>199</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000086010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>203</RegID>
<PaneID>203</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>8192</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4C0100006300000056050000B3000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>204</RegID>
<PaneID>204</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4F01000066000000530500009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>221</RegID>
<PaneID>221</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>00000000000000000000000000000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>2506</RegID>
<PaneID>2506</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>2507</RegID>
<PaneID>2507</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>343</RegID>
<PaneID>343</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4F01000066000000530500009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>346</RegID>
<PaneID>346</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4F01000066000000530500009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35824</RegID>
<PaneID>35824</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4F01000066000000530500009A000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35885</RegID>
<PaneID>35885</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35886</RegID>
<PaneID>35886</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35887</RegID>
<PaneID>35887</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35888</RegID>
<PaneID>35888</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35889</RegID>
<PaneID>35889</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35890</RegID>
<PaneID>35890</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35891</RegID>
<PaneID>35891</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35892</RegID>
<PaneID>35892</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35893</RegID>
<PaneID>35893</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35894</RegID>
<PaneID>35894</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35895</RegID>
<PaneID>35895</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35896</RegID>
<PaneID>35896</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35897</RegID>
<PaneID>35897</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35898</RegID>
<PaneID>35898</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35899</RegID>
<PaneID>35899</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35900</RegID>
<PaneID>35900</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35901</RegID>
<PaneID>35901</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35902</RegID>
<PaneID>35902</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35903</RegID>
<PaneID>35903</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35904</RegID>
<PaneID>35904</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>35905</RegID>
<PaneID>35905</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>4A03000066000000EE03000052010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>38003</RegID>
<PaneID>38003</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000066000000450100001C020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000EB000000BE010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>38007</RegID>
<PaneID>38007</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000086010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>22000000390000001A0200009A000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>436</RegID>
<PaneID>436</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0300000086010000EE030000BA010000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000EB000000BE010000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>437</RegID>
<PaneID>437</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>440</RegID>
<PaneID>440</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>B2020000500200005305000095020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>2200000039000000CC000000C6000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>59392</RegID>
<PaneID>59392</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>940</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>8192</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>0000000000000000B70300001C000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>59393</RegID>
<PaneID>0</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>32767</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>4096</RecentFrameAlignment>
<RecentRowIndex>0</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>00000000AE02000056050000C1020000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>59399</RegID>
<PaneID>59399</PaneID>
<IsVisible>0</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>463</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>8192</RecentFrameAlignment>
<RecentRowIndex>1</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>000000001C000000DA01000038000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<Window>
<RegID>59400</RegID>
<PaneID>59400</PaneID>
<IsVisible>1</IsVisible>
<IsFloating>0</IsFloating>
<IsTabbed>0</IsTabbed>
<IsActivated>0</IsActivated>
<MRUWidth>612</MRUWidth>
<PinState>0</PinState>
<RecentFrameAlignment>8192</RecentFrameAlignment>
<RecentRowIndex>2</RecentRowIndex>
<RectRecentDocked>
<Len>16</Len>
<Data>000000001C0000006F02000038000000</Data>
</RectRecentDocked>
<RectRecentFloat>
<Len>16</Len>
<Data>0A0000000A0000006E0000006E000000</Data>
</RectRecentFloat>
</Window>
<DockMan>
<Len>2618</Len>
<Data>000000000B000000000000000020000001000000FFFFFFFFFFFFFFFF4C010000B300000056050000B7000000010000000100001004000000010000000000000000000000FFFFFFFF06000000CB00000057010000CC000000F08B00005A01000079070000FFFF02000B004354616262656450616E6500200000010000004C0100006600000056050000CA0000004C0100004F00000056050000B30000000000000040280056060000000B446973617373656D626C7901000000CB00000001000000FFFFFFFFFFFFFFFF14506572666F726D616E636520416E616C797A6572000000005701000001000000FFFFFFFFFFFFFFFF14506572666F726D616E636520416E616C797A657200000000CC00000001000000FFFFFFFFFFFFFFFF0E4C6F67696320416E616C797A657200000000F08B000001000000FFFFFFFFFFFFFFFF0D436F646520436F766572616765000000005A01000001000000FFFFFFFFFFFFFFFF11496E737472756374696F6E205472616365000000007907000001000000FFFFFFFFFFFFFFFF00000000000000000000000000000000000000000000000001000000FFFFFFFFCB00000001000000FFFFFFFFCB000000000000000040000000000000FFFFFFFFFFFFFFFF430300004F000000470300006B010000000000000200000004000000010000000000000000000000FFFFFFFF17000000E2050000CA0900002D8C00002E8C00002F8C0000308C0000318C0000328C0000338C0000348C0000358C0000368C0000378C0000388C0000398C00003A8C00003B8C00003C8C00003D8C00003E8C00003F8C0000408C0000418C0000018000400000000000004703000066000000F103000082010000470300004F000000F10300006B0100000000000040410046170000000753796D626F6C7300000000E205000001000000FFFFFFFFFFFFFFFF0A5472616365204461746100000000CA09000001000000FFFFFFFFFFFFFFFF00000000002D8C000001000000FFFFFFFFFFFFFFFF00000000002E8C000001000000FFFFFFFFFFFFFFFF00000000002F8C000001000000FFFFFFFFFFFFFFFF0000000000308C000001000000FFFFFFFFFFFFFFFF0000000000318C000001000000FFFFFFFFFFFFFFFF0000000000328C000001000000FFFFFFFFFFFFFFFF0000000000338C000001000000FFFFFFFFFFFFFFFF0000000000348C000001000000FFFFFFFFFFFFFFFF0000000000358C000001000000FFFFFFFFFFFFFFFF0000000000368C000001000000FFFFFFFFFFFFFFFF0000000000378C000001000000FFFFFFFFFFFFFFFF0000000000388C000001000000FFFFFFFFFFFFFFFF0000000000398C000001000000FFFFFFFFFFFFFFFF00000000003A8C000001000000FFFFFFFFFFFFFFFF00000000003B8C000001000000FFFFFFFFFFFFFFFF00000000003C8C000001000000FFFFFFFFFFFFFFFF00000000003D8C000001000000FFFFFFFFFFFFFFFF00000000003E8C000001000000FFFFFFFFFFFFFFFF00000000003F8C000001000000FFFFFFFFFFFFFFFF0000000000408C000001000000FFFFFFFFFFFFFFFF0000000000418C000001000000FFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000000000000000000000001000000FFFFFFFFE205000001000000FFFFFFFFE2050000000000000010000001000000FFFFFFFFFFFFFFFF480100004F0000004C010000350200000100000002000010040000000100000058FFFFFFA3040000FFFFFFFF05000000ED0300006D000000C3000000C400000073940000018000100000010000000000000066000000480100004C020000000000004F00000048010000350200000000000040410056050000000750726F6A65637401000000ED03000001000000FFFFFFFFFFFFFFFF05426F6F6B73000000006D00000001000000FFFFFFFFFFFFFFFF0946756E6374696F6E7300000000C300000001000000FFFFFFFFFFFFFFFF0954656D706C6174657300000000C400000001000000FFFFFFFFFFFFFFFF09526567697374657273010000007394000001000000FFFFFFFFFFFFFFFF04000000000000000000000000000000000000000000000001000000FFFFFFFFED03000001000000FFFFFFFFED030000000000000080000001000000FFFFFFFFFFFFFFFF0000000035020000560500003902000001000000010000100400000001000000000000000000000000000000000000000000000001000000C6000000FFFFFFFF0E0000008F070000930700009407000095070000960700009007000091070000B5010000B8010000B9050000BA050000BB050000BC050000CB09000001800080000001000000AF0200005002000056050000C5020000AF0200003902000056050000AE02000000000000404100560E0000001343616C6C20537461636B202B204C6F63616C73010000008F07000001000000FFFFFFFFFFFFFFFF0755415254202331000000009307000001000000FFFFFFFFFFFFFFFF0755415254202332000000009407000001000000FFFFFFFFFFFFFFFF0755415254202333000000009507000001000000FFFFFFFFFFFFFFFF15446562756720287072696E74662920566965776572000000009607000001000000FFFFFFFFFFFFFFFF0757617463682031000000009007000001000000FFFFFFFFFFFFFFFF0757617463682032000000009107000001000000FFFFFFFFFFFFFFFF10547261636520457863657074696F6E7300000000B501000001000000FFFFFFFFFFFFFFFF0E4576656E7420436F756E7465727300000000B801000001000000FFFFFFFFFFFFFFFF084D656D6F7279203101000000B905000001000000FFFFFFFFFFFFFFFF084D656D6F7279203200000000BA05000001000000FFFFFFFFFFFFFFFF084D656D6F7279203300000000BB05000001000000FFFFFFFFFFFFFFFF084D656D6F7279203400000000BC05000001000000FFFFFFFFFFFFFFFF105472616365204E617669676174696F6E00000000CB09000001000000FFFFFFFFFFFFFFFF000000000000000001000000000000000100000001000000FFFFFFFFAB02000039020000AF020000AE02000001000000020000100400000000000000000000000000000000000000000000000000000002000000C6000000FFFFFFFF8F07000001000000FFFFFFFF8F07000001000000C6000000000000000080000000000000FFFFFFFFFFFFFFFF000000006B010000F10300006F010000000000000100000004000000010000000000000000000000FFFFFFFF04000000C5000000C7000000B401000077940000018000800000000000000000000086010000F1030000EA010000000000006F010000F1030000D30100000000000040820046040000000C4275696C64204F757470757400000000C500000001000000FFFFFFFFFFFFFFFF0D46696E6420496E2046696C657300000000C700000001000000FFFFFFFFFFFFFFFF0A4572726F72204C69737400000000B401000001000000FFFFFFFFFFFFFFFF0642726F777365000000007794000001000000FFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000000000000000000000001000000FFFFFFFFC500000001000000FFFFFFFFC5000000000000000000000000000000</Data>
</DockMan>
<ToolBar>
<RegID>59392</RegID>
<Name>File</Name>
<Buttons>
<Len>2002</Len>
<Data>00200000010000002800FFFF01001100434D4643546F6F6C426172427574746F6E00E100000000000000000000000000000000000000000000000100000001000000018001E100000000000001000000000000000000000000000000000100000001000000018003E1000000000000020000000000000000000000000000000001000000010000000180CD7F0000000000000300000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018023E100000000040004000000000000000000000000000000000100000001000000018022E100000000040005000000000000000000000000000000000100000001000000018025E10000000000000600000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001802BE10000000004000700000000000000000000000000000000010000000100000001802CE10000000004000800000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001807A8A0000000000000900000000000000000000000000000000010000000100000001807B8A0000000004000A00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180D3B00000000000000B000000000000000000000000000000000100000001000000018015B10000000004000C0000000000000000000000000000000001000000010000000180F4B00000000004000D000000000000000000000000000000000100000001000000018036B10000000004000E00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180FF88000000000400460000000000000000000000000000000001000000010000000180FE880000000004004500000000000000000000000000000000010000000100000001800B810000000004001300000000000000000000000000000000010000000100000001800C810000000004001400000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180F0880000020000000F000000000000000000000000000000000100000001000000FFFF0100120043555646696E64436F6D626F427574746F6EE803000000000000000000000000000000000000000000000001000000010000009600000002002050FFFFFFFF0096000000000000000000018024E10000000000001100000000000000000000000000000000010000000100000001800A810000000000001200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018022800000020003001500000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180C488000000000000160000000000000000000000000000000001000000010000000180C988000000000400180000000000000000000000000000000001000000010000000180C788000000000000190000000000000000000000000000000001000000010000000180C8880000000000001700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000FFFF01001500434D4643546F6F6C4261724D656E75427574746F6E4C010000020001001A0000000F2650726F6A6563742057696E646F77000000000000000000000000010000000100000000000000000000000100000008002880DD880000000000001A0000000750726F6A656374000000000000000000000000010000000100000000000000000000000100000000002880DC8B0000000000003A00000005426F6F6B73000000000000000000000000010000000100000000000000000000000100000000002880E18B0000000000003B0000000946756E6374696F6E73000000000000000000000000010000000100000000000000000000000100000000002880E28B000000000000400000000954656D706C6174657300000000000000000000000001000000010000000000000000000000010000000000288018890000000000003D0000000E536F757263652042726F777365720000000000000000000000000100000001000000000000000000000001000000000028800000000000000400FFFFFFFF00000000000000000001000000000000000100000000000000000000000100000000002880D988000000000000390000000C4275696C64204F7574707574000000000000000000000000010000000100000000000000000000000100000000002880E38B000000000000410000000B46696E64204F75747075740000000000000000000000000100000001000000000000000000000001000000000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180FB7F0000000000001B000000000000000000000000000000000100000001000000000000000446696C65AC030000</Data>
</Buttons>
<OriginalItems>
<Len>1423</Len>
<Data>2800FFFF01001100434D4643546F6F6C426172427574746F6E00E1000000000000FFFFFFFF000100000000000000010000000000000001000000018001E1000000000000FFFFFFFF000100000000000000010000000000000001000000018003E1000000000000FFFFFFFF0001000000000000000100000000000000010000000180CD7F000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF000000000000000000010000000000000001000000018023E1000000000000FFFFFFFF000100000000000000010000000000000001000000018022E1000000000000FFFFFFFF000100000000000000010000000000000001000000018025E1000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001802BE1000000000000FFFFFFFF00010000000000000001000000000000000100000001802CE1000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001807A8A000000000000FFFFFFFF00010000000000000001000000000000000100000001807B8A000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180D3B0000000000000FFFFFFFF000100000000000000010000000000000001000000018015B1000000000000FFFFFFFF0001000000000000000100000000000000010000000180F4B0000000000000FFFFFFFF000100000000000000010000000000000001000000018036B1000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180FF88000000000000FFFFFFFF0001000000000000000100000000000000010000000180FE88000000000000FFFFFFFF00010000000000000001000000000000000100000001800B81000000000000FFFFFFFF00010000000000000001000000000000000100000001800C81000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180F088000000000000FFFFFFFF0001000000000000000100000000000000010000000180EE7F000000000000FFFFFFFF000100000000000000010000000000000001000000018024E1000000000000FFFFFFFF00010000000000000001000000000000000100000001800A81000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001802280000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180C488000000000000FFFFFFFF0001000000000000000100000000000000010000000180C988000000000000FFFFFFFF0001000000000000000100000000000000010000000180C788000000000000FFFFFFFF0001000000000000000100000000000000010000000180C888000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180DD88000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180FB7F000000000000FFFFFFFF000100000000000000010000000000000001000000</Data>
</OriginalItems>
<OrigResetItems>
<Len>1423</Len>
<Data>2800FFFF01001100434D4643546F6F6C426172427574746F6E00E100000000000000000000000000000000000000000000000100000001000000018001E100000000000001000000000000000000000000000000000100000001000000018003E1000000000000020000000000000000000000000000000001000000010000000180CD7F0000000000000300000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018023E100000000000004000000000000000000000000000000000100000001000000018022E100000000000005000000000000000000000000000000000100000001000000018025E10000000000000600000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001802BE10000000000000700000000000000000000000000000000010000000100000001802CE10000000000000800000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001807A8A0000000000000900000000000000000000000000000000010000000100000001807B8A0000000000000A00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180D3B00000000000000B000000000000000000000000000000000100000001000000018015B10000000000000C0000000000000000000000000000000001000000010000000180F4B00000000000000D000000000000000000000000000000000100000001000000018036B10000000000000E00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180FF880000000000000F0000000000000000000000000000000001000000010000000180FE880000000000001000000000000000000000000000000000010000000100000001800B810000000000001100000000000000000000000000000000010000000100000001800C810000000000001200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180F088000000000000130000000000000000000000000000000001000000010000000180EE7F00000000000014000000000000000000000000000000000100000001000000018024E10000000000001500000000000000000000000000000000010000000100000001800A810000000000001600000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018022800000000000001700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180C488000000000000180000000000000000000000000000000001000000010000000180C988000000000000190000000000000000000000000000000001000000010000000180C7880000000000001A0000000000000000000000000000000001000000010000000180C8880000000000001B00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180DD880000000000001C00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180FB7F0000000000001D000000000000000000000000000000000100000001000000</Data>
</OrigResetItems>
</ToolBar>
<ToolBar>
<RegID>59399</RegID>
<Name>Build</Name>
<Buttons>
<Len>657</Len>
<Data>00200000000000001000FFFF01001100434D4643546F6F6C426172427574746F6ECF7F0000000000001C0000000000000000000000000000000001000000010000000180D07F0000000000001D000000000000000000000000000000000100000001000000018030800000000000001E00000000000000000000000000000000010000000100000001809E8A0000000000001F0000000000000000000000000000000001000000010000000180D17F0000000000002000000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001804C8A0000000000002100000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000FFFF01001900434D4643546F6F6C426172436F6D626F426F78427574746F6EBA00000000000000000000000000000000000000000000000001000000010000009600000003002050FFFFFFFF00960000000000000000000180EB880000000000002200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180C07F000000000000230000000000000000000000000000000001000000010000000180B08A000000000000240000000000000000000000000000000001000000010000000180A8010000000000004E00000000000000000000000000000000010000000100000001807202000000000000530000000000000000000000000000000001000000010000000180BE010000000000005000000000000000000000000000000000010000000100000000000000054275696C64CF010000</Data>
</Buttons>
<OriginalItems>
<Len>583</Len>
<Data>1000FFFF01001100434D4643546F6F6C426172427574746F6ECF7F000000000000FFFFFFFF0001000000000000000100000000000000010000000180D07F000000000000FFFFFFFF00010000000000000001000000000000000100000001803080000000000000FFFFFFFF00010000000000000001000000000000000100000001809E8A000000000000FFFFFFFF0001000000000000000100000000000000010000000180D17F000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001804C8A000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001806680000000000000FFFFFFFF0001000000000000000100000000000000010000000180EB88000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180C07F000000000000FFFFFFFF0001000000000000000100000000000000010000000180B08A000000000000FFFFFFFF0001000000000000000100000000000000010000000180A801000000000000FFFFFFFF00010000000000000001000000000000000100000001807202000000000000FFFFFFFF0001000000000000000100000000000000010000000180BE01000000000000FFFFFFFF000100000000000000010000000000000001000000</Data>
</OriginalItems>
<OrigResetItems>
<Len>583</Len>
<Data>1000FFFF01001100434D4643546F6F6C426172427574746F6ECF7F000000000000000000000000000000000000000000000001000000010000000180D07F00000000000001000000000000000000000000000000000100000001000000018030800000000000000200000000000000000000000000000000010000000100000001809E8A000000000000030000000000000000000000000000000001000000010000000180D17F0000000000000400000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001804C8A0000000000000500000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001806680000000000000060000000000000000000000000000000001000000010000000180EB880000000000000700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180C07F000000000000080000000000000000000000000000000001000000010000000180B08A000000000000090000000000000000000000000000000001000000010000000180A8010000000000000A000000000000000000000000000000000100000001000000018072020000000000000B0000000000000000000000000000000001000000010000000180BE010000000000000C000000000000000000000000000000000100000001000000</Data>
</OrigResetItems>
</ToolBar>
<ToolBar>
<RegID>59400</RegID>
<Name>Debug</Name>
<Buttons>
<Len>2236</Len>
<Data>00200000010000001900FFFF01001100434D4643546F6F6C426172427574746F6ECC880000000000002500000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018017800000000000002600000000000000000000000000000000010000000100000001801D800000000004002700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001801A800000000000002800000000000000000000000000000000010000000100000001801B80000000000000290000000000000000000000000000000001000000010000000180E57F0000000004002A00000000000000000000000000000000010000000100000001801C800000000000002B00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018000890000000000002C00000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180E48B0000020001002D0000000000000000000000000000000001000000010000000180F07F0000020001002E0000000000000000000000000000000001000000010000000180E8880000020000003700000000000000000000000000000000010000000100000001803B010000020001002F0000000000000000000000000000000001000000010000000180BB8A00000200010030000000000000000000000000000000000100000001000000FFFF01001500434D4643546F6F6C4261724D656E75427574746F6E0E01000002000000310000000D57617463682057696E646F7773000000000000000000000000010000000100000000000000000000000100000002001380D88B00000000000031000000085761746368202631000000000000000000000000010000000100000000000000000000000100000000001380D98B000000000000310000000857617463682026320000000000000000000000000100000001000000000000000000000001000000000013800F0100000200010032000000094D656D6F7279202631000000000000000000000000010000000100000000000000000000000100000004001380D28B00000000000032000000094D656D6F7279202631000000000000000000000000010000000100000000000000000000000100000000001380D38B00000000000032000000094D656D6F7279202632000000000000000000000000010000000100000000000000000000000100000000001380D48B00000000000032000000094D656D6F7279202633000000000000000000000000010000000100000000000000000000000100000000001380D58B00000000000032000000094D656D6F727920263400000000000000000000000001000000010000000000000000000000010000000000138010010000020000003300000008554152542023263100000000000000000000000001000000010000000000000000000000010000000400138093070000000000003300000008554152542023263100000000000000000000000001000000010000000000000000000000010000000000138094070000000000003300000008554152542023263200000000000000000000000001000000010000000000000000000000010000000000138095070000000000003300000008554152542023263300000000000000000000000001000000010000000000000000000000010000000000138096070000000000003300000015446562756720287072696E746629205669657765720000000000000000000000000100000001000000000000000000000001000000000013803C010000020000003400000010416E616C797369732057696E646F7773000000000000000000000000010000000100000000000000000000000100000003001380658A000000000000340000000F264C6F67696320416E616C797A6572000000000000000000000000010000000100000000000000000000000100000000001380DC7F0000000000003E0000001526506572666F726D616E636520416E616C797A6572000000000000000000000000010000000100000000000000000000000100000000001380E788000000000000380000000E26436F646520436F76657261676500000000000000000000000001000000010000000000000000000000010000000000138053010000000000003F0000000D54726163652057696E646F77730000000000000000000000000100000001000000000000000000000001000000010013805401000000000000FFFFFFFF115472616365204D656E7520416E63686F720000000000000000010000000000000001000000000000000000000001000000000013802901000000000000350000001553797374656D205669657765722057696E646F77730000000000000000000000000100000001000000000000000000000001000000010013804B01000000000000FFFFFFFF1453797374656D2056696577657220416E63686F720000000000000000010000000000000001000000000000000000000001000000000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000013800189000002000000360000000F26546F6F6C626F782057696E646F7700000000000000000000000001000000010000000000000000000000010000000300138044C5000000000000FFFFFFFF0E5570646174652057696E646F77730000000000000000010000000000000001000000000000000000000001000000000013800000000000000400FFFFFFFF000000000000000000010000000000000001000000000000000000000001000000000013805B01000000000000FFFFFFFF12546F6F6C626F78204D656E75416E63686F72000000000000000001000000000000000100000000000000000000000100000000000000000005446562756764020000</Data>
</Buttons>
<OriginalItems>
<Len>898</Len>
<Data>1900FFFF01001100434D4643546F6F6C426172427574746F6ECC88000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001801780000000000000FFFFFFFF00010000000000000001000000000000000100000001801D80000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001801A80000000000000FFFFFFFF00010000000000000001000000000000000100000001801B80000000000000FFFFFFFF0001000000000000000100000000000000010000000180E57F000000000000FFFFFFFF00010000000000000001000000000000000100000001801C80000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001800089000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF0000000000000000000100000000000000010000000180E48B000000000000FFFFFFFF0001000000000000000100000000000000010000000180F07F000000000000FFFFFFFF0001000000000000000100000000000000010000000180E888000000000000FFFFFFFF00010000000000000001000000000000000100000001803B01000000000000FFFFFFFF0001000000000000000100000000000000010000000180BB8A000000000000FFFFFFFF0001000000000000000100000000000000010000000180D88B000000000000FFFFFFFF0001000000000000000100000000000000010000000180D28B000000000000FFFFFFFF00010000000000000001000000000000000100000001809307000000000000FFFFFFFF0001000000000000000100000000000000010000000180658A000000000000FFFFFFFF0001000000000000000100000000000000010000000180C18A000000000000FFFFFFFF0001000000000000000100000000000000010000000180EE8B000000000000FFFFFFFF00010000000000000001000000000000000100000001800000000000000000FFFFFFFF00000000000000000001000000000000000100000001800189000000000000FFFFFFFF000100000000000000010000000000000001000000</Data>
</OriginalItems>
<OrigResetItems>
<Len>898</Len>
<Data>1900FFFF01001100434D4643546F6F6C426172427574746F6ECC880000000000000000000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018017800000000000000100000000000000000000000000000000010000000100000001801D800000000000000200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF00000000000000000000000000010000000100000001801A800000000000000300000000000000000000000000000000010000000100000001801B80000000000000040000000000000000000000000000000001000000010000000180E57F0000000000000500000000000000000000000000000000010000000100000001801C800000000000000600000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF000000000000000000000000000100000001000000018000890000000000000700000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180E48B000000000000080000000000000000000000000000000001000000010000000180F07F000000000000090000000000000000000000000000000001000000010000000180E8880000000000000A00000000000000000000000000000000010000000100000001803B010000000000000B0000000000000000000000000000000001000000010000000180BB8A0000000000000C0000000000000000000000000000000001000000010000000180D88B0000000000000D0000000000000000000000000000000001000000010000000180D28B0000000000000E000000000000000000000000000000000100000001000000018093070000000000000F0000000000000000000000000000000001000000010000000180658A000000000000100000000000000000000000000000000001000000010000000180C18A000000000000110000000000000000000000000000000001000000010000000180EE8B0000000000001200000000000000000000000000000000010000000100000001800000000001000000FFFFFFFF0000000000000000000000000001000000010000000180018900000000000013000000000000000000000000000000000100000001000000</Data>
</OrigResetItems>
</ToolBar>
<ControlBarsSummary>
<Bars>0</Bars>
<ScreenCX>1366</ScreenCX>
<ScreenCY>768</ScreenCY>
</ControlBarsSummary>
</ViewEx>
</WinLayoutEx>
</ProjectGui>
|
78ac13919d1b6504a8945363b2c25a002024a50e
|
46125763b4dbf50619e8846a1371029346f4c3db
|
/src/data/pnat/basic.lean
|
5642520be43a74a9cafece359fbea9a39b892262
|
[
"Apache-2.0"
] |
permissive
|
thjread/mathlib
|
a9d97612cedc2c3101060737233df15abcdb9eb1
|
7cffe2520a5518bba19227a107078d83fa725ddc
|
refs/heads/master
| 1,615,637,696,376
| 1,583,953,063,000
| 1,583,953,063,000
| 246,680,271
| 0
| 0
|
Apache-2.0
| 1,583,960,875,000
| 1,583,960,875,000
| null |
UTF-8
|
Lean
| false
| false
| 13,131
|
lean
|
/-
Copyright (c) 2017 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro, Neil Strickland
-/
import tactic.basic
import data.nat.basic data.nat.prime algebra.group_power
/-- `ℕ+` is the type of positive natural numbers. It is defined as a subtype,
and the VM representation of `ℕ+` is the same as `ℕ` because the proof
is not stored. -/
def pnat := {n : ℕ // 0 < n}
notation `ℕ+` := pnat
instance coe_pnat_nat : has_coe ℕ+ ℕ := ⟨subtype.val⟩
instance : has_repr ℕ+ := ⟨λ n, repr n.1⟩
namespace nat
/-- Convert a natural number to a positive natural number. The
positivity assumption is inferred by `dec_trivial`. -/
def to_pnat (n : ℕ) (h : 0 < n . tactic.exact_dec_trivial) : ℕ+ := ⟨n, h⟩
/-- Write a successor as an element of `ℕ+`. -/
def succ_pnat (n : ℕ) : ℕ+ := ⟨succ n, succ_pos n⟩
@[simp] theorem succ_pnat_coe (n : ℕ) : (succ_pnat n : ℕ) = succ n := rfl
theorem succ_pnat_inj {n m : ℕ} : succ_pnat n = succ_pnat m → n = m :=
λ h, by { let h' := congr_arg (coe : ℕ+ → ℕ) h, exact nat.succ_inj h' }
/-- Convert a natural number to a pnat. `n+1` is mapped to itself,
and `0` becomes `1`. -/
def to_pnat' (n : ℕ) : ℕ+ := succ_pnat (pred n)
@[simp] theorem to_pnat'_coe : ∀ (n : ℕ),
((to_pnat' n) : ℕ) = ite (0 < n) n 1
| 0 := rfl
| (m + 1) := by {rw [if_pos (succ_pos m)], refl}
namespace primes
instance coe_pnat : has_coe nat.primes ℕ+ := ⟨λ p, ⟨(p : ℕ), p.property.pos⟩⟩
theorem coe_pnat_nat (p : nat.primes) : ((p : ℕ+) : ℕ) = p := rfl
theorem coe_pnat_inj (p q : nat.primes) : (p : ℕ+) = (q : ℕ+) → p = q := λ h,
begin
replace h : ((p : ℕ+) : ℕ) = ((q : ℕ+) : ℕ) := congr_arg subtype.val h,
rw [coe_pnat_nat, coe_pnat_nat] at h,
exact subtype.eq h,
end
end primes
end nat
namespace pnat
open nat
/-- We now define a long list of structures on ℕ+ induced by
similar structures on ℕ. Most of these behave in a completely
obvious way, but there are a few things to be said about
subtraction, division and powers.
-/
instance : decidable_eq ℕ+ := λ (a b : ℕ+), by apply_instance
instance : decidable_linear_order ℕ+ :=
subtype.decidable_linear_order _
@[simp] theorem pos (n : ℕ+) : 0 < (n : ℕ) := n.2
theorem eq {m n : ℕ+} : (m : ℕ) = n → m = n := subtype.eq
@[simp] theorem mk_coe (n h) : ((⟨n, h⟩ : ℕ+) : ℕ) = n := rfl
instance : add_comm_semigroup ℕ+ :=
{ add := λ a b, ⟨(a + b : ℕ), add_pos a.pos b.pos⟩,
add_comm := λ a b, subtype.eq (add_comm a b),
add_assoc := λ a b c, subtype.eq (add_assoc a b c) }
@[simp] theorem add_coe (m n : ℕ+) : ((m + n : ℕ+) : ℕ) = m + n := rfl
instance coe_add_hom : is_add_hom (coe : ℕ+ → ℕ) := ⟨add_coe⟩
instance : add_left_cancel_semigroup ℕ+ :=
{ add_left_cancel := λ a b c h, by {
replace h := congr_arg (coe : ℕ+ → ℕ) h,
rw [add_coe, add_coe] at h,
exact eq ((add_left_inj (a : ℕ)).mp h)},
.. (pnat.add_comm_semigroup) }
instance : add_right_cancel_semigroup ℕ+ :=
{ add_right_cancel := λ a b c h, by {
replace h := congr_arg (coe : ℕ+ → ℕ) h,
rw [add_coe, add_coe] at h,
exact eq ((add_right_inj (b : ℕ)).mp h)},
.. (pnat.add_comm_semigroup) }
@[simp] theorem ne_zero (n : ℕ+) : (n : ℕ) ≠ 0 := ne_of_gt n.2
theorem to_pnat'_coe {n : ℕ} : 0 < n → (n.to_pnat' : ℕ) = n := succ_pred_eq_of_pos
@[simp] theorem coe_to_pnat' (n : ℕ+) : (n : ℕ).to_pnat' = n := eq (to_pnat'_coe n.pos)
instance : comm_monoid ℕ+ :=
{ mul := λ m n, ⟨m.1 * n.1, mul_pos m.2 n.2⟩,
mul_assoc := λ a b c, subtype.eq (mul_assoc _ _ _),
one := succ_pnat 0,
one_mul := λ a, subtype.eq (one_mul _),
mul_one := λ a, subtype.eq (mul_one _),
mul_comm := λ a b, subtype.eq (mul_comm _ _) }
theorem lt_add_one_iff : ∀ {a b : ℕ+}, a < b + 1 ↔ a ≤ b :=
λ a b, nat.lt_add_one_iff
theorem add_one_le_iff : ∀ {a b : ℕ+}, a + 1 ≤ b ↔ a < b :=
λ a b, nat.add_one_le_iff
@[simp] lemma one_le (n : ℕ+) : (1 : ℕ+) ≤ n := n.2
instance : lattice.order_bot ℕ+ :=
{ bot := 1,
bot_le := λ a, a.property,
..(by apply_instance : partial_order ℕ+) }
@[simp] lemma bot_eq_zero : (⊥ : ℕ+) = 1 := rfl
instance : inhabited ℕ+ := ⟨1⟩
-- Some lemmas that rewrite `pnat.mk n h`, for `n` an explicit numeral, into explicit numerals.
@[simp] lemma mk_one {h} : (⟨1, h⟩ : ℕ+) = (1 : ℕ+) := rfl
@[simp] lemma mk_bit0 (n) {h} : (⟨bit0 n, h⟩ : ℕ+) = (bit0 ⟨n, pos_of_bit0_pos h⟩ : ℕ+) := rfl
@[simp] lemma mk_bit1 (n) {h} {k} : (⟨bit1 n, h⟩ : ℕ+) = (bit1 ⟨n, k⟩ : ℕ+) := rfl
-- Some lemmas that rewrite inequalities between explicit numerals in `pnat`
-- into the corresponding inequalities in `nat`.
-- TODO: perhaps this should not be attempted by `simp`,
-- and instead we should expect `norm_num` to take care of these directly?
-- TODO: these lemmas are perhaps incomplete:
-- * 1 is not represented as a bit0 or bit1
-- * strict inequalities?
@[simp] lemma bit0_le_bit0 (n m : ℕ+) : (bit0 n) ≤ (bit0 m) ↔ (bit0 (n : ℕ)) ≤ (bit0 (m : ℕ)) := iff.refl _
@[simp] lemma bit0_le_bit1 (n m : ℕ+) : (bit0 n) ≤ (bit1 m) ↔ (bit0 (n : ℕ)) ≤ (bit1 (m : ℕ)) := iff.refl _
@[simp] lemma bit1_le_bit0 (n m : ℕ+) : (bit1 n) ≤ (bit0 m) ↔ (bit1 (n : ℕ)) ≤ (bit0 (m : ℕ)) := iff.refl _
@[simp] lemma bit1_le_bit1 (n m : ℕ+) : (bit1 n) ≤ (bit1 m) ↔ (bit1 (n : ℕ)) ≤ (bit1 (m : ℕ)) := iff.refl _
@[simp] theorem one_coe : ((1 : ℕ+) : ℕ) = 1 := rfl
@[simp] theorem mul_coe (m n : ℕ+) : ((m * n : ℕ+) : ℕ) = m * n := rfl
instance coe_mul_hom : is_monoid_hom (coe : ℕ+ → ℕ) :=
{map_one := one_coe, map_mul := mul_coe}
@[simp] lemma coe_bit0 (a : ℕ+) : ((bit0 a : ℕ+) : ℕ) = bit0 (a : ℕ) := rfl
@[simp] lemma coe_bit1 (a : ℕ+) : ((bit1 a : ℕ+) : ℕ) = bit1 (a : ℕ) := rfl
@[simp] theorem pow_coe (m : ℕ+) (n : ℕ) : ((m ^ n : ℕ+) : ℕ) = (m : ℕ) ^ n :=
by induction n with n ih;
[refl, rw [nat.pow_succ, _root_.pow_succ, mul_coe, mul_comm, ih]]
instance : left_cancel_semigroup ℕ+ :=
{ mul_left_cancel := λ a b c h, by {
replace h := congr_arg (coe : ℕ+ → ℕ) h,
exact eq ((nat.mul_left_inj a.pos).mp h)},
.. (pnat.comm_monoid) }
instance : right_cancel_semigroup ℕ+ :=
{ mul_right_cancel := λ a b c h, by {
replace h := congr_arg (coe : ℕ+ → ℕ) h,
exact eq ((nat.mul_right_inj b.pos).mp h)},
.. (pnat.comm_monoid) }
instance : distrib ℕ+ :=
{ left_distrib := λ a b c, eq (mul_add a b c),
right_distrib := λ a b c, eq (add_mul a b c),
..(pnat.add_comm_semigroup), ..(pnat.comm_monoid) }
/-- Subtraction a - b is defined in the obvious way when
a > b, and by a - b = 1 if a ≤ b.
-/
instance : has_sub ℕ+ := ⟨λ a b, to_pnat' (a - b : ℕ)⟩
theorem sub_coe (a b : ℕ+) : ((a - b : ℕ+) : ℕ) = ite (b < a) (a - b : ℕ) 1 :=
begin
change ((to_pnat' ((a : ℕ) - (b : ℕ)) : ℕ)) =
ite ((a : ℕ) > (b : ℕ)) ((a : ℕ) - (b : ℕ)) 1,
split_ifs with h,
{ exact to_pnat'_coe (nat.sub_pos_of_lt h) },
{ rw [nat.sub_eq_zero_iff_le.mpr (le_of_not_gt h)], refl }
end
theorem add_sub_of_lt {a b : ℕ+} : a < b → a + (b - a) = b :=
λ h, eq $ by { rw [add_coe, sub_coe, if_pos h],
exact nat.add_sub_of_le (le_of_lt h) }
/-- We define m % k and m / k in the same way as for nat
except that when m = n * k we take m % k = k and
m / k = n - 1. This ensures that m % k is always positive
and m = (m % k) + k * (m / k) in all cases. Later we
define a function div_exact which gives the usual m / k
in the case where k divides m.
-/
def mod_div_aux : ℕ+ → ℕ → ℕ → ℕ+ × ℕ
| k 0 q := ⟨k, q.pred⟩
| k (r + 1) q := ⟨⟨r + 1, nat.succ_pos r⟩, q⟩
lemma mod_div_aux_spec : ∀ (k : ℕ+) (r q : ℕ) (h : ¬ (r = 0 ∧ q = 0)),
(((mod_div_aux k r q).1 : ℕ) + k * (mod_div_aux k r q).2 = (r + k * q))
| k 0 0 h := (h ⟨rfl, rfl⟩).elim
| k 0 (q + 1) h := by {
change (k : ℕ) + (k : ℕ) * (q + 1).pred = 0 + (k : ℕ) * (q + 1),
rw [nat.pred_succ, nat.mul_succ, zero_add, add_comm]}
| k (r + 1) q h := rfl
def mod_div (m k : ℕ+) : ℕ+ × ℕ := mod_div_aux k ((m : ℕ) % (k : ℕ)) ((m : ℕ) / (k : ℕ))
def mod (m k : ℕ+) : ℕ+ := (mod_div m k).1
def div (m k : ℕ+) : ℕ := (mod_div m k).2
theorem mod_add_div (m k : ℕ+) : (m : ℕ) = (mod m k) + k * (div m k) :=
begin
let h₀ := nat.mod_add_div (m : ℕ) (k : ℕ),
have : ¬ ((m : ℕ) % (k : ℕ) = 0 ∧ (m : ℕ) / (k : ℕ) = 0),
by { rintro ⟨hr, hq⟩, rw [hr, hq, mul_zero, zero_add] at h₀,
exact (m.ne_zero h₀.symm).elim },
have := mod_div_aux_spec k ((m : ℕ) % (k : ℕ)) ((m : ℕ) / (k : ℕ)) this,
exact (this.trans h₀).symm,
end
theorem mod_coe (m k : ℕ+) :
((mod m k) : ℕ) = ite ((m : ℕ) % (k : ℕ) = 0) (k : ℕ) ((m : ℕ) % (k : ℕ)) :=
begin
dsimp [mod, mod_div],
cases (m : ℕ) % (k : ℕ),
{ rw [if_pos rfl], refl },
{ rw [if_neg n.succ_ne_zero], refl }
end
theorem div_coe (m k : ℕ+) :
((div m k) : ℕ) = ite ((m : ℕ) % (k : ℕ) = 0) ((m : ℕ) / (k : ℕ)).pred ((m : ℕ) / (k : ℕ)) :=
begin
dsimp [div, mod_div],
cases (m : ℕ) % (k : ℕ),
{ rw [if_pos rfl], refl },
{ rw [if_neg n.succ_ne_zero], refl }
end
theorem mod_le (m k : ℕ+) : mod m k ≤ m ∧ mod m k ≤ k :=
begin
change ((mod m k) : ℕ) ≤ (m : ℕ) ∧ ((mod m k) : ℕ) ≤ (k : ℕ),
rw [mod_coe], split_ifs,
{ have hm : (m : ℕ) > 0 := m.pos,
rw [← nat.mod_add_div (m : ℕ) (k : ℕ), h, zero_add] at hm ⊢,
by_cases h' : ((m : ℕ) / (k : ℕ)) = 0,
{ rw [h', mul_zero] at hm, exact (lt_irrefl _ hm).elim},
{ let h' := nat.mul_le_mul_left (k : ℕ)
(nat.succ_le_of_lt (nat.pos_of_ne_zero h')),
rw [mul_one] at h', exact ⟨h', le_refl (k : ℕ)⟩ } },
{ exact ⟨nat.mod_le (m : ℕ) (k : ℕ), le_of_lt (nat.mod_lt (m : ℕ) k.pos)⟩ }
end
instance : has_dvd ℕ+ := ⟨λ k m, (k : ℕ) ∣ (m : ℕ)⟩
theorem dvd_iff {k m : ℕ+} : k ∣ m ↔ (k : ℕ) ∣ (m : ℕ) := by {refl}
theorem dvd_iff' {k m : ℕ+} : k ∣ m ↔ mod m k = k :=
begin
change (k : ℕ) ∣ (m : ℕ) ↔ mod m k = k,
rw [nat.dvd_iff_mod_eq_zero], split,
{ intro h, apply eq, rw [mod_coe, if_pos h] },
{ intro h, by_cases h' : (m : ℕ) % (k : ℕ) = 0,
{ exact h'},
{ replace h : ((mod m k) : ℕ) = (k : ℕ) := congr_arg _ h,
rw [mod_coe, if_neg h'] at h,
exact (ne_of_lt (nat.mod_lt (m : ℕ) k.pos) h).elim } }
end
def div_exact {m k : ℕ+} (h : k ∣ m) : ℕ+ :=
⟨(div m k).succ, nat.succ_pos _⟩
theorem mul_div_exact {m k : ℕ+} (h : k ∣ m) : k * (div_exact h) = m :=
begin
apply eq, rw [mul_coe],
change (k : ℕ) * (div m k).succ = m,
rw [mod_add_div m k, dvd_iff'.mp h, nat.mul_succ, add_comm],
end
theorem dvd_iff'' {k n : ℕ+} : k ∣ n ↔ ∃ m, k * m = n :=
⟨λ h, ⟨div_exact h, mul_div_exact h⟩,
λ ⟨m, h⟩, dvd.intro (m : ℕ)
((mul_coe k m).symm.trans (congr_arg subtype.val h))⟩
theorem dvd_intro {k n : ℕ+} (m : ℕ+) (h : k * m = n) : k ∣ n :=
dvd_iff''.mpr ⟨m, h⟩
theorem dvd_refl (m : ℕ+) : m ∣ m := dvd_intro 1 (mul_one m)
theorem dvd_antisymm {m n : ℕ+} : m ∣ n → n ∣ m → m = n :=
λ hmn hnm, subtype.eq (nat.dvd_antisymm hmn hnm)
theorem dvd_trans {k m n : ℕ+} : k ∣ m → m ∣ n → k ∣ n :=
@_root_.dvd_trans ℕ _ (k : ℕ) (m : ℕ) (n : ℕ)
theorem one_dvd (n : ℕ+) : 1 ∣ n := dvd_intro n (one_mul n)
theorem dvd_one_iff (n : ℕ+) : n ∣ 1 ↔ n = 1 :=
⟨λ h, dvd_antisymm h (one_dvd n), λ h, h.symm ▸ (dvd_refl 1)⟩
def gcd (n m : ℕ+) : ℕ+ :=
⟨nat.gcd (n : ℕ) (m : ℕ), nat.gcd_pos_of_pos_left (m : ℕ) n.pos⟩
def lcm (n m : ℕ+) : ℕ+ :=
⟨nat.lcm (n : ℕ) (m : ℕ),
by { let h := mul_pos n.pos m.pos,
rw [← gcd_mul_lcm (n : ℕ) (m : ℕ), mul_comm] at h,
exact pos_of_dvd_of_pos (dvd.intro (nat.gcd (n : ℕ) (m : ℕ)) rfl) h }⟩
@[simp] theorem gcd_coe (n m : ℕ+) : ((gcd n m) : ℕ) = nat.gcd n m := rfl
@[simp] theorem lcm_coe (n m : ℕ+) : ((lcm n m) : ℕ) = nat.lcm n m := rfl
theorem gcd_dvd_left (n m : ℕ+) : (gcd n m) ∣ n := nat.gcd_dvd_left (n : ℕ) (m : ℕ)
theorem gcd_dvd_right (n m : ℕ+) : (gcd n m) ∣ m := nat.gcd_dvd_right (n : ℕ) (m : ℕ)
theorem dvd_gcd {m n k : ℕ+} (hm : k ∣ m) (hn : k ∣ n) : k ∣ gcd m n :=
@nat.dvd_gcd (m : ℕ) (n : ℕ) (k : ℕ) hm hn
theorem dvd_lcm_left (n m : ℕ+) : n ∣ lcm n m := nat.dvd_lcm_left (n : ℕ) (m : ℕ)
theorem dvd_lcm_right (n m : ℕ+) : m ∣ lcm n m := nat.dvd_lcm_right (n : ℕ) (m : ℕ)
theorem lcm_dvd {m n k : ℕ+} (hm : m ∣ k) (hn : n ∣ k) : lcm m n ∣ k :=
@nat.lcm_dvd (m : ℕ) (n : ℕ) (k : ℕ) hm hn
theorem gcd_mul_lcm (n m : ℕ+) : (gcd n m) * (lcm n m) = n * m :=
subtype.eq (nat.gcd_mul_lcm (n : ℕ) (m : ℕ))
def prime (p : ℕ+) : Prop := (p : ℕ).prime
end pnat
|
d20d8e1fd5c2d1a4967428e9b30eafaf89ec8a80
|
5ae26df177f810c5006841e9c73dc56e01b978d7
|
/src/category_theory/single_obj.lean
|
711ad56d35ecb0497f128d1d8b1b75ebaa704966
|
[
"Apache-2.0"
] |
permissive
|
ChrisHughes24/mathlib
|
98322577c460bc6b1fe5c21f42ce33ad1c3e5558
|
a2a867e827c2a6702beb9efc2b9282bd801d5f9a
|
refs/heads/master
| 1,583,848,251,477
| 1,565,164,247,000
| 1,565,164,247,000
| 129,409,993
| 0
| 1
|
Apache-2.0
| 1,565,164,817,000
| 1,523,628,059,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,667
|
lean
|
import category_theory.endomorphism category_theory.groupoid category_theory.Cat
import data.equiv.algebra algebra.Mon.basic
import tactic.find
/-!
Copyright (c) 2019 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
# Single-object category
Single object category with a given monoid of endomorphisms. It is defined to faciliate transfering
some definitions and lemmas (e.g., conjugacy etc.) from category theory to monoids and groups.
## Main definitions
Given a type `α` with a monoid structure, `single_obj α` is `unit` type with `category` structure
such that `End (single_obj α).star` is the monoid `α`. This can be extended to a functor `Mon ⥤
Cat`.
If `α` is a group, then `single_obj α` is a groupoid.
An element `x : α` can be reinterpreted as an element of `End (single_obj.star α)` using
`single_obj.to_End`.
## Implementation notes
- `category_struct.comp` on `End (single_obj.star α)` is `flip (*)`, not `(*)`. This way
multiplication on `End` agrees with the multiplication on `α`.
- By default, Lean puts instances into `category_theory` namespace instead of
`category_theory.single_obj`, so we give all names explicitly.
-/
universes u v w
namespace category_theory
/-- Type tag on `unit` used to define single-object categories and groupoids. -/
def single_obj (α : Type u) : Type := unit
namespace single_obj
variables (α : Type u)
/-- One and `flip (*)` become `id` and `comp` for morphisms of the single object category. -/
instance category_struct [has_one α] [has_mul α] : category_struct (single_obj α) :=
{ hom := λ _ _, α,
comp := λ _ _ _ x y, y * x,
id := λ _, 1 }
/-- Monoid laws become category laws for the single object category. -/
instance category [monoid α] : category (single_obj α) :=
{ comp_id' := λ _ _, one_mul,
id_comp' := λ _ _, mul_one,
assoc' := λ _ _ _ _ x y z, (mul_assoc z y x).symm }
/-- Groupoid structure on `single_obj α` -/
instance groupoid [group α] : groupoid (single_obj α) :=
{ inv := λ _ _ x, x⁻¹,
inv_comp' := λ _ _, mul_right_inv,
comp_inv' := λ _ _, mul_left_inv }
protected def star : single_obj α := unit.star
/-- The endomorphisms monoid of the only object in `single_obj α` is equivalent to the original
monoid α. -/
def to_End_equiv [monoid α] : End (single_obj.star α) ≃* α := mul_equiv.refl α
/-- Reinterpret an element of a monoid as an element of the endomorphisms monoid of the only object
in the `single_obj α` category. -/
def to_End {α} [monoid α] (x : α) : End (single_obj.star α) := x
lemma to_End_def [monoid α] (x : α) : to_End x = x := rfl
/-- There is a 1-1 correspondence between monoid homomorphisms `α → β` and functors between the
corresponding single-object categories. It means that `single_obj` is a fully faithful
functor. -/
def map_hom_equiv (α : Type u) (β : Type v) [monoid α] [monoid β] :
{ f : α → β // is_monoid_hom f } ≃ (single_obj α) ⥤ (single_obj β) :=
{ to_fun := λ f,
{ obj := id,
map := λ _ _, f.1,
map_id' := λ _, f.2.map_one,
map_comp' := λ _ _ _ x y, @is_mul_hom.map_mul _ _ _ _ _ f.2.1 y x },
inv_fun := λ f, ⟨@functor.map _ _ _ _ f (single_obj.star α) (single_obj.star α),
{ map_mul := λ x y, f.map_comp y x, map_one := f.map_id _ }⟩,
left_inv := λ ⟨f, hf⟩, rfl,
right_inv := assume f, by rcases f; obviously }
/-- Reinterpret a monoid homomorphism `f : α → β` as a functor `(single_obj α) ⥤ (single_obj β)`.
See also `map_hom_equiv` for an equivalence between these types. -/
@[reducible] def map_hom {α : Type u} {β : Type v} [monoid α] [monoid β]
(f : α → β) [hf : is_monoid_hom f] :
(single_obj α) ⥤ (single_obj β) :=
map_hom_equiv α β ⟨f, hf⟩
lemma map_hom_id {α : Type u} [monoid α] : map_hom (@id α) = 𝟭 _ := rfl
lemma map_hom_comp {α : Type u} {β : Type v} [monoid α] [monoid β] (f : α → β) [is_monoid_hom f]
{γ : Type w} [monoid γ] (g : β → γ) [is_monoid_hom g] :
map_hom f ⋙ map_hom g = map_hom (g ∘ f) :=
rfl
end single_obj
end category_theory
namespace Mon
open category_theory
/-- The fully faithful functor from `Mon` to `Cat`. -/
def to_Cat : Mon ⥤ Cat :=
{ obj := λ x, Cat.of (single_obj x),
map := λ x y f, single_obj.map_hom f }
instance to_Cat_full : full to_Cat :=
{ preimage := λ x y, (single_obj.map_hom_equiv x y).inv_fun,
witness' := λ x y, (single_obj.map_hom_equiv x y).right_inv }
instance to_Cat_faithful : faithful to_Cat :=
{ injectivity' := λ x y, (single_obj.map_hom_equiv x y).injective }
end Mon
|
b904ab7f27e66286a1a84df6f38cbedb54ed8f95
|
80cc5bf14c8ea85ff340d1d747a127dcadeb966f
|
/src/data/int/modeq.lean
|
353bf0b90196b0b11221cc288230c9c6835eba44
|
[
"Apache-2.0"
] |
permissive
|
lacker/mathlib
|
f2439c743c4f8eb413ec589430c82d0f73b2d539
|
ddf7563ac69d42cfa4a1bfe41db1fed521bd795f
|
refs/heads/master
| 1,671,948,326,773
| 1,601,479,268,000
| 1,601,479,268,000
| 298,686,743
| 0
| 0
|
Apache-2.0
| 1,601,070,794,000
| 1,601,070,794,000
| null |
UTF-8
|
Lean
| false
| false
| 6,084
|
lean
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes
-/
import data.nat.modeq
import tactic.ring
namespace int
def modeq (n a b : ℤ) := a % n = b % n
notation a ` ≡ `:50 b ` [ZMOD `:50 n `]`:0 := modeq n a b
namespace modeq
variables {n m a b c d : ℤ}
@[refl] protected theorem refl (a : ℤ) : a ≡ a [ZMOD n] := @rfl _ _
@[symm] protected theorem symm : a ≡ b [ZMOD n] → b ≡ a [ZMOD n] := eq.symm
@[trans] protected theorem trans : a ≡ b [ZMOD n] → b ≡ c [ZMOD n] → a ≡ c [ZMOD n] := eq.trans
lemma coe_nat_modeq_iff {a b n : ℕ} : a ≡ b [ZMOD n] ↔ a ≡ b [MOD n] :=
by unfold modeq nat.modeq; rw ← int.coe_nat_eq_coe_nat_iff; simp [int.coe_nat_mod]
instance : decidable (a ≡ b [ZMOD n]) := by unfold modeq; apply_instance
theorem modeq_zero_iff : a ≡ 0 [ZMOD n] ↔ n ∣ a :=
by rw [modeq, zero_mod, dvd_iff_mod_eq_zero]
theorem modeq_iff_dvd : a ≡ b [ZMOD n] ↔ (n:ℤ) ∣ b - a :=
by rw [modeq, eq_comm];
simp [int.mod_eq_mod_iff_mod_sub_eq_zero, int.dvd_iff_mod_eq_zero, -euclidean_domain.mod_eq_zero]
theorem modeq_of_dvd_of_modeq (d : m ∣ n) (h : a ≡ b [ZMOD n]) : a ≡ b [ZMOD m] :=
modeq_iff_dvd.2 $ dvd_trans d (modeq_iff_dvd.1 h)
theorem modeq_mul_left' (hc : 0 ≤ c) (h : a ≡ b [ZMOD n]) : c * a ≡ c * b [ZMOD (c * n)] :=
or.cases_on (lt_or_eq_of_le hc) (λ hc,
by unfold modeq;
simp [mul_mod_mul_of_pos _ _ hc, (show _ = _, from h)] )
(λ hc, by simp [hc.symm])
theorem modeq_mul_right' (hc : 0 ≤ c) (h : a ≡ b [ZMOD n]) : a * c ≡ b * c [ZMOD (n * c)] :=
by rw [mul_comm a, mul_comm b, mul_comm n]; exact modeq_mul_left' hc h
theorem modeq_add (h₁ : a ≡ b [ZMOD n]) (h₂ : c ≡ d [ZMOD n]) : a + c ≡ b + d [ZMOD n] :=
modeq_iff_dvd.2 $ by {convert dvd_add (modeq_iff_dvd.1 h₁) (modeq_iff_dvd.1 h₂), ring}
theorem modeq_add_cancel_left (h₁ : a ≡ b [ZMOD n]) (h₂ : a + c ≡ b + d [ZMOD n]) :
c ≡ d [ZMOD n] :=
have d - c = b + d - (a + c) - (b - a) := by ring,
modeq_iff_dvd.2 $ by { rw [this], exact dvd_sub (modeq_iff_dvd.1 h₂) (modeq_iff_dvd.1 h₁) }
theorem modeq_add_cancel_right (h₁ : c ≡ d [ZMOD n]) (h₂ : a + c ≡ b + d [ZMOD n]) :
a ≡ b [ZMOD n] :=
by rw [add_comm a, add_comm b] at h₂; exact modeq_add_cancel_left h₁ h₂
theorem mod_modeq (a n) : a % n ≡ a [ZMOD n] := int.mod_mod _ _
theorem modeq_neg (h : a ≡ b [ZMOD n]) : -a ≡ -b [ZMOD n] :=
modeq_add_cancel_left h (by simp)
theorem modeq_sub (h₁ : a ≡ b [ZMOD n]) (h₂ : c ≡ d [ZMOD n]) : a - c ≡ b - d [ZMOD n] :=
by rw [sub_eq_add_neg, sub_eq_add_neg]; exact modeq_add h₁ (modeq_neg h₂)
theorem modeq_mul_left (c : ℤ) (h : a ≡ b [ZMOD n]) : c * a ≡ c * b [ZMOD n] :=
or.cases_on (le_total 0 c)
(λ hc, modeq_of_dvd_of_modeq (dvd_mul_left _ _) (modeq_mul_left' hc h))
(λ hc, by rw [← neg_neg c, ← neg_mul_eq_neg_mul, ← neg_mul_eq_neg_mul _ b];
exact modeq_neg (modeq_of_dvd_of_modeq (dvd_mul_left _ _)
(modeq_mul_left' (neg_nonneg.2 hc) h)))
theorem modeq_mul_right (c : ℤ) (h : a ≡ b [ZMOD n]) : a * c ≡ b * c [ZMOD n] :=
by rw [mul_comm a, mul_comm b]; exact modeq_mul_left c h
theorem modeq_mul (h₁ : a ≡ b [ZMOD n]) (h₂ : c ≡ d [ZMOD n]) : a * c ≡ b * d [ZMOD n] :=
(modeq_mul_left _ h₂).trans (modeq_mul_right _ h₁)
theorem modeq_of_modeq_mul_left (m : ℤ) (h : a ≡ b [ZMOD m * n]) : a ≡ b [ZMOD n] :=
by rw [modeq_iff_dvd] at *; exact dvd.trans (dvd_mul_left n m) h
theorem modeq_of_modeq_mul_right (m : ℤ) : a ≡ b [ZMOD n * m] → a ≡ b [ZMOD n] :=
mul_comm m n ▸ modeq_of_modeq_mul_left _
lemma modeq_and_modeq_iff_modeq_mul {a b m n : ℤ} (hmn : nat.coprime m.nat_abs n.nat_abs) :
a ≡ b [ZMOD m] ∧ a ≡ b [ZMOD n] ↔ (a ≡ b [ZMOD m * n]) :=
⟨λ h, begin
rw [int.modeq.modeq_iff_dvd, int.modeq.modeq_iff_dvd] at h,
rw [int.modeq.modeq_iff_dvd, ← int.nat_abs_dvd, ← int.dvd_nat_abs,
int.coe_nat_dvd, int.nat_abs_mul],
refine hmn.mul_dvd_of_dvd_of_dvd _ _;
rw [← int.coe_nat_dvd, int.nat_abs_dvd, int.dvd_nat_abs]; tauto
end,
λ h, ⟨int.modeq.modeq_of_modeq_mul_right _ h, int.modeq.modeq_of_modeq_mul_left _ h⟩⟩
lemma gcd_a_modeq (a b : ℕ) : (a : ℤ) * nat.gcd_a a b ≡ nat.gcd a b [ZMOD b] :=
by rw [← add_zero ((a : ℤ) * _), nat.gcd_eq_gcd_ab];
exact int.modeq.modeq_add rfl (int.modeq.modeq_zero_iff.2 (dvd_mul_right _ _)).symm
theorem modeq_add_fac {a b n : ℤ} (c : ℤ) (ha : a ≡ b [ZMOD n]) : a + n*c ≡ b [ZMOD n] :=
calc a + n*c ≡ b + n*c [ZMOD n] : int.modeq.modeq_add ha (int.modeq.refl _)
... ≡ b + 0 [ZMOD n] : int.modeq.modeq_add (int.modeq.refl _)
(int.modeq.modeq_zero_iff.2 (dvd_mul_right _ _))
... ≡ b [ZMOD n] : by simp
open nat
lemma mod_coprime {a b : ℕ} (hab : coprime a b) : ∃ y : ℤ, a * y ≡ 1 [ZMOD b] :=
⟨ gcd_a a b,
have hgcd : nat.gcd a b = 1, from coprime.gcd_eq_one hab,
calc
↑a * gcd_a a b ≡ ↑a*gcd_a a b + ↑b*gcd_b a b [ZMOD ↑b] : int.modeq.symm $
modeq_add_fac _ $ int.modeq.refl _
... ≡ 1 [ZMOD ↑b] : by rw [←gcd_eq_gcd_ab, hgcd]; reflexivity ⟩
lemma exists_unique_equiv (a : ℤ) {b : ℤ} (hb : 0 < b) : ∃ z : ℤ, 0 ≤ z ∧ z < b ∧ z ≡ a [ZMOD b] :=
⟨ a % b, int.mod_nonneg _ (ne_of_gt hb),
have a % b < abs b, from int.mod_lt _ (ne_of_gt hb),
by rwa abs_of_pos hb at this,
by simp [int.modeq] ⟩
lemma exists_unique_equiv_nat (a : ℤ) {b : ℤ} (hb : 0 < b) : ∃ z : ℕ, ↑z < b ∧ ↑z ≡ a [ZMOD b] :=
let ⟨z, hz1, hz2, hz3⟩ := exists_unique_equiv a hb in
⟨z.nat_abs, by split; rw [←int.of_nat_eq_coe, int.of_nat_nat_abs_eq_of_nonneg hz1]; assumption⟩
end modeq
@[simp] lemma mod_mul_right_mod (a b c : ℤ) : a % (b * c) % b = a % b :=
int.modeq.modeq_of_modeq_mul_right _ (int.modeq.mod_modeq _ _)
@[simp] lemma mod_mul_left_mod (a b c : ℤ) : a % (b * c) % c = a % c :=
int.modeq.modeq_of_modeq_mul_left _ (int.modeq.mod_modeq _ _)
end int
|
ce844ac63b993c52abe5ca2f1602c1ff106f6cd5
|
624f6f2ae8b3b1adc5f8f67a365c51d5126be45a
|
/tests/plugin/Default.lean
|
3dfe24a6284d891710d56b5fbe10877a2013b0fc
|
[
"Apache-2.0"
] |
permissive
|
mhuisi/lean4
|
28d35a4febc2e251c7f05492e13f3b05d6f9b7af
|
dda44bc47f3e5d024508060dac2bcb59fd12e4c0
|
refs/heads/master
| 1,621,225,489,283
| 1,585,142,689,000
| 1,585,142,689,000
| 250,590,438
| 0
| 2
|
Apache-2.0
| 1,602,443,220,000
| 1,585,327,814,000
|
C
|
UTF-8
|
Lean
| false
| false
| 330
|
lean
|
import Init.Lean
open Lean
def oh_no : Nat := 0
def snakeLinter : Linter :=
fun env n =>
-- TODO(Sebastian): return actual message with position from syntax tree
if n.toString.contains '_' then throw $ IO.userError "SNAKES!!"
else pure MessageLog.empty
@[init]
def registerSnakeLinter : IO Unit :=
addLinter snakeLinter
|
f5231450a883ef8c861eb4fb3bf575f622edff34
|
46125763b4dbf50619e8846a1371029346f4c3db
|
/src/topology/order.lean
|
8514d0b1368554fb3e03341dff381b5f36f3ba8c
|
[
"Apache-2.0"
] |
permissive
|
thjread/mathlib
|
a9d97612cedc2c3101060737233df15abcdb9eb1
|
7cffe2520a5518bba19227a107078d83fa725ddc
|
refs/heads/master
| 1,615,637,696,376
| 1,583,953,063,000
| 1,583,953,063,000
| 246,680,271
| 0
| 0
|
Apache-2.0
| 1,583,960,875,000
| 1,583,960,875,000
| null |
UTF-8
|
Lean
| false
| false
| 27,306
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro
-/
import topology.basic
/-!
# Ordering on topologies and (co)induced topologies
Topologies on a fixed type `α` are ordered, by reverse inclusion.
That is, for topologies `t₁` and `t₂` on `α`, we write `t₁ ≤ t₂`
if every set open in `t₂` is also open in `t₁`.
(One also calls `t₁` finer than `t₂`, and `t₂` coarser than `t₁`.)
Any function `f : α → β` induces
`induced f : topological_space β → topological_space α`
and `coinduced f : topological_space α → topological_space β`.
Continuity, the ordering on topologies and (co)induced topologies are
related as follows:
* The identity map (α, t₁) → (α, t₂) is continuous iff t₁ ≤ t₂.
* A map f : (α, t) → (β, u) is continuous
iff t ≤ induced f u (`continuous_iff_le_induced`)
iff coinduced f t ≤ u (`continuous_iff_coinduced_le`).
Topologies on α form a complete lattice, with ⊥ the discrete topology
and ⊤ the indiscrete topology.
For a function f : α → β, (coinduced f, induced f) is a Galois connection
between topologies on α and topologies on β.
## Implementation notes
There is a Galois insertion between topologies on α (with the inclusion ordering)
and all collections of sets in α. The complete lattice structure on topologies
on α is defined as the reverse of the one obtained via this Galois insertion.
## Tags
finer, coarser, induced topology, coinduced topology
-/
open set filter lattice classical
open_locale classical topological_space
universes u v w
namespace topological_space
variables {α : Type u}
/-- The open sets of the least topology containing a collection of basic sets. -/
inductive generate_open (g : set (set α)) : set α → Prop
| basic : ∀s∈g, generate_open s
| univ : generate_open univ
| inter : ∀s t, generate_open s → generate_open t → generate_open (s ∩ t)
| sUnion : ∀k, (∀s∈k, generate_open s) → generate_open (⋃₀ k)
/-- The smallest topological space containing the collection `g` of basic sets -/
def generate_from (g : set (set α)) : topological_space α :=
{ is_open := generate_open g,
is_open_univ := generate_open.univ g,
is_open_inter := generate_open.inter,
is_open_sUnion := generate_open.sUnion }
lemma nhds_generate_from {g : set (set α)} {a : α} :
@nhds α (generate_from g) a = (⨅s∈{s | a ∈ s ∧ s ∈ g}, principal s) :=
by rw nhds_def; exact le_antisymm
(infi_le_infi $ assume s, infi_le_infi_const $ assume ⟨as, sg⟩, ⟨as, generate_open.basic _ sg⟩)
(le_infi $ assume s, le_infi $ assume ⟨as, hs⟩,
begin
revert as, clear_, induction hs,
case generate_open.basic : s hs
{ exact assume as, infi_le_of_le s $ infi_le _ ⟨as, hs⟩ },
case generate_open.univ
{ rw [principal_univ],
exact assume _, le_top },
case generate_open.inter : s t hs' ht' hs ht
{ exact assume ⟨has, hat⟩, calc _ ≤ principal s ⊓ principal t : le_inf (hs has) (ht hat)
... = _ : inf_principal },
case generate_open.sUnion : k hk' hk
{ exact λ ⟨t, htk, hat⟩, calc _ ≤ principal t : hk t htk hat
... ≤ _ : le_principal_iff.2 $ subset_sUnion_of_mem htk }
end)
lemma tendsto_nhds_generate_from {β : Type*} {m : α → β} {f : filter α} {g : set (set β)} {b : β}
(h : ∀s∈g, b ∈ s → m ⁻¹' s ∈ f) : tendsto m f (@nhds β (generate_from g) b) :=
by rw [nhds_generate_from]; exact
(tendsto_infi.2 $ assume s, tendsto_infi.2 $ assume ⟨hbs, hsg⟩, tendsto_principal.2 $ h s hsg hbs)
/-- Construct a topology on α given the filter of neighborhoods of each point of α. -/
protected def mk_of_nhds (n : α → filter α) : topological_space α :=
{ is_open := λs, ∀a∈s, s ∈ n a,
is_open_univ := assume x h, univ_mem_sets,
is_open_inter := assume s t hs ht x ⟨hxs, hxt⟩, inter_mem_sets (hs x hxs) (ht x hxt),
is_open_sUnion := assume s hs a ⟨x, hx, hxa⟩, mem_sets_of_superset (hs x hx _ hxa) (set.subset_sUnion_of_mem hx) }
lemma nhds_mk_of_nhds (n : α → filter α) (a : α)
(h₀ : pure ≤ n) (h₁ : ∀{a s}, s ∈ n a → ∃ t ∈ n a, t ⊆ s ∧ ∀a' ∈ t, s ∈ n a') :
@nhds α (topological_space.mk_of_nhds n) a = n a :=
begin
letI := topological_space.mk_of_nhds n,
refine le_antisymm (assume s hs, _) (assume s hs, _),
{ have h₀ : {b | s ∈ n b} ⊆ s := assume b hb, mem_pure_sets.1 $ h₀ b hb,
have h₁ : {b | s ∈ n b} ∈ 𝓝 a,
{ refine mem_nhds_sets (assume b (hb : s ∈ n b), _) hs,
rcases h₁ hb with ⟨t, ht, hts, h⟩,
exact mem_sets_of_superset ht h },
exact mem_sets_of_superset h₁ h₀ },
{ rcases (@mem_nhds_sets_iff α (topological_space.mk_of_nhds n) _ _).1 hs with ⟨t, hts, ht, hat⟩,
exact (n a).sets_of_superset (ht _ hat) hts },
end
end topological_space
section lattice
variables {α : Type u} {β : Type v}
/-- The inclusion ordering on topologies on α. We use it to get a complete
lattice instance via the Galois insertion method, but the partial order
that we will eventually impose on `topological_space α` is the reverse one. -/
def tmp_order : partial_order (topological_space α) :=
{ le := λt s, t.is_open ≤ s.is_open,
le_antisymm := assume t s h₁ h₂, topological_space_eq $ le_antisymm h₁ h₂,
le_refl := assume t, le_refl t.is_open,
le_trans := assume a b c h₁ h₂, @le_trans _ _ a.is_open b.is_open c.is_open h₁ h₂ }
local attribute [instance] tmp_order
/- We'll later restate this lemma in terms of the correct order on `topological_space α`. -/
private lemma generate_from_le_iff_subset_is_open {g : set (set α)} {t : topological_space α} :
topological_space.generate_from g ≤ t ↔ g ⊆ {s | t.is_open s} :=
iff.intro
(assume ht s hs, ht _ $ topological_space.generate_open.basic s hs)
(assume hg s hs, hs.rec_on (assume v hv, hg hv)
t.is_open_univ (assume u v _ _, t.is_open_inter u v) (assume k _, t.is_open_sUnion k))
/-- If `s` equals the collection of open sets in the topology it generates,
then `s` defines a topology. -/
protected def mk_of_closure (s : set (set α))
(hs : {u | (topological_space.generate_from s).is_open u} = s) : topological_space α :=
{ is_open := λu, u ∈ s,
is_open_univ := hs ▸ topological_space.generate_open.univ _,
is_open_inter := hs ▸ topological_space.generate_open.inter,
is_open_sUnion := hs ▸ topological_space.generate_open.sUnion }
lemma mk_of_closure_sets {s : set (set α)}
{hs : {u | (topological_space.generate_from s).is_open u} = s} :
mk_of_closure s hs = topological_space.generate_from s :=
topological_space_eq hs.symm
/-- The Galois insertion between `set (set α)` and `topological_space α` whose lower part
sends a collection of subsets of α to the topology they generate, and whose upper part
sends a topology to its collection of open subsets. -/
def gi_generate_from (α : Type*) :
galois_insertion topological_space.generate_from (λt:topological_space α, {s | t.is_open s}) :=
{ gc := assume g t, generate_from_le_iff_subset_is_open,
le_l_u := assume ts s hs, topological_space.generate_open.basic s hs,
choice := λg hg, mk_of_closure g
(subset.antisymm hg $ generate_from_le_iff_subset_is_open.1 $ le_refl _),
choice_eq := assume s hs, mk_of_closure_sets }
lemma generate_from_mono {α} {g₁ g₂ : set (set α)} (h : g₁ ⊆ g₂) :
topological_space.generate_from g₁ ≤ topological_space.generate_from g₂ :=
(gi_generate_from _).gc.monotone_l h
/-- The complete lattice of topological spaces, but built on the inclusion ordering. -/
def tmp_complete_lattice {α : Type u} : complete_lattice (topological_space α) :=
(gi_generate_from α).lift_complete_lattice
/-- The ordering on topologies on the type `α`.
`t ≤ s` if every set open in `s` is also open in `t` (`t` is finer than `s`). -/
instance : partial_order (topological_space α) :=
{ le := λ t s, s.is_open ≤ t.is_open,
le_antisymm := assume t s h₁ h₂, topological_space_eq $ le_antisymm h₂ h₁,
le_refl := assume t, le_refl t.is_open,
le_trans := assume a b c h₁ h₂, le_trans h₂ h₁ }
lemma le_generate_from_iff_subset_is_open {g : set (set α)} {t : topological_space α} :
t ≤ topological_space.generate_from g ↔ g ⊆ {s | t.is_open s} :=
generate_from_le_iff_subset_is_open
/-- Topologies on `α` form a complete lattice, with `⊥` the discrete topology
and `⊤` the indiscrete topology. The infimum of a collection of topologies
is the topology generated by all their open sets, while the supremem is the
topology whose open sets are those sets open in every member of the collection. -/
instance : complete_lattice (topological_space α) :=
@order_dual.lattice.complete_lattice _ tmp_complete_lattice
/-- A topological space is discrete if every set is open, that is,
its topology equals the discrete topology `⊥`. -/
class discrete_topology (α : Type*) [t : topological_space α] : Prop :=
(eq_bot : t = ⊥)
@[simp] lemma is_open_discrete [topological_space α] [discrete_topology α] (s : set α) :
is_open s :=
(discrete_topology.eq_bot α).symm ▸ trivial
lemma continuous_of_discrete_topology [topological_space α] [discrete_topology α] [topological_space β] {f : α → β} : continuous f :=
λs hs, is_open_discrete _
lemma nhds_bot (α : Type*) : (@nhds α ⊥) = pure :=
begin
refine le_antisymm _ (@pure_le_nhds α ⊥),
assume a s hs,
exact @mem_nhds_sets α ⊥ a s trivial hs
end
lemma nhds_discrete (α : Type*) [topological_space α] [discrete_topology α] : (@nhds α _) = pure :=
(discrete_topology.eq_bot α).symm ▸ nhds_bot α
lemma le_of_nhds_le_nhds {t₁ t₂ : topological_space α} (h : ∀x, @nhds α t₁ x ≤ @nhds α t₂ x) :
t₁ ≤ t₂ :=
assume s, show @is_open α t₂ s → @is_open α t₁ s,
by { simp only [is_open_iff_nhds, le_principal_iff], exact assume hs a ha, h _ $ hs _ ha }
lemma eq_of_nhds_eq_nhds {t₁ t₂ : topological_space α} (h : ∀x, @nhds α t₁ x = @nhds α t₂ x) :
t₁ = t₂ :=
le_antisymm
(le_of_nhds_le_nhds $ assume x, le_of_eq $ h x)
(le_of_nhds_le_nhds $ assume x, le_of_eq $ (h x).symm)
lemma eq_bot_of_singletons_open {t : topological_space α} (h : ∀ x, t.is_open {x}) : t = ⊥ :=
bot_unique $ λ s hs, bUnion_of_singleton s ▸ is_open_bUnion (λ x _, h x)
end lattice
section galois_connection
variables {α : Type*} {β : Type*} {γ : Type*}
/-- Given `f : α → β` and a topology on `β`, the induced topology on `α` is the collection of
sets that are preimages of some open set in `β`. This is the coarsest topology that
makes `f` continuous. -/
def topological_space.induced {α : Type u} {β : Type v} (f : α → β) (t : topological_space β) :
topological_space α :=
{ is_open := λs, ∃s', t.is_open s' ∧ f ⁻¹' s' = s,
is_open_univ := ⟨univ, t.is_open_univ, preimage_univ⟩,
is_open_inter := by rintro s₁ s₂ ⟨s'₁, hs₁, rfl⟩ ⟨s'₂, hs₂, rfl⟩;
exact ⟨s'₁ ∩ s'₂, t.is_open_inter _ _ hs₁ hs₂, preimage_inter⟩,
is_open_sUnion := assume s h,
begin
simp only [classical.skolem] at h,
cases h with f hf,
apply exists.intro (⋃(x : set α) (h : x ∈ s), f x h),
simp only [sUnion_eq_bUnion, preimage_Union, (λx h, (hf x h).right)], refine ⟨_, rfl⟩,
exact (@is_open_Union β _ t _ $ assume i,
show is_open (⋃h, f i h), from @is_open_Union β _ t _ $ assume h, (hf i h).left)
end }
lemma is_open_induced_iff [t : topological_space β] {s : set α} {f : α → β} :
@is_open α (t.induced f) s ↔ (∃t, is_open t ∧ f ⁻¹' t = s) :=
iff.rfl
lemma is_closed_induced_iff [t : topological_space β] {s : set α} {f : α → β} :
@is_closed α (t.induced f) s ↔ (∃t, is_closed t ∧ s = f ⁻¹' t) :=
⟨assume ⟨t, ht, heq⟩, ⟨-t, is_closed_compl_iff.2 ht,
by simp only [preimage_compl, heq, lattice.neg_neg]⟩,
assume ⟨t, ht, heq⟩, ⟨-t, ht, by simp only [preimage_compl, heq.symm]⟩⟩
/-- Given `f : α → β` and a topology on `α`, the coinduced topology on `β` is defined
such that `s:set β` is open if the preimage of `s` is open. This is the finest topology that
makes `f` continuous. -/
def topological_space.coinduced {α : Type u} {β : Type v} (f : α → β) (t : topological_space α) :
topological_space β :=
{ is_open := λs, t.is_open (f ⁻¹' s),
is_open_univ := by rw preimage_univ; exact t.is_open_univ,
is_open_inter := assume s₁ s₂ h₁ h₂, by rw preimage_inter; exact t.is_open_inter _ _ h₁ h₂,
is_open_sUnion := assume s h, by rw [preimage_sUnion]; exact (@is_open_Union _ _ t _ $ assume i,
show is_open (⋃ (H : i ∈ s), f ⁻¹' i), from
@is_open_Union _ _ t _ $ assume hi, h i hi) }
lemma is_open_coinduced {t : topological_space α} {s : set β} {f : α → β} :
@is_open β (topological_space.coinduced f t) s ↔ is_open (f ⁻¹' s) :=
iff.rfl
variables {t t₁ t₂ : topological_space α} {t' : topological_space β} {f : α → β} {g : β → α}
lemma coinduced_le_iff_le_induced {f : α → β } {tα : topological_space α} {tβ : topological_space β} :
tα.coinduced f ≤ tβ ↔ tα ≤ tβ.induced f :=
iff.intro
(assume h s ⟨t, ht, hst⟩, hst ▸ h _ ht)
(assume h s hs, show tα.is_open (f ⁻¹' s), from h _ ⟨s, hs, rfl⟩)
lemma gc_coinduced_induced (f : α → β) :
galois_connection (topological_space.coinduced f) (topological_space.induced f) :=
assume f g, coinduced_le_iff_le_induced
lemma induced_mono (h : t₁ ≤ t₂) : t₁.induced g ≤ t₂.induced g :=
(gc_coinduced_induced g).monotone_u h
lemma coinduced_mono (h : t₁ ≤ t₂) : t₁.coinduced f ≤ t₂.coinduced f :=
(gc_coinduced_induced f).monotone_l h
@[simp] lemma induced_top : (⊤ : topological_space α).induced g = ⊤ :=
(gc_coinduced_induced g).u_top
@[simp] lemma induced_inf : (t₁ ⊓ t₂).induced g = t₁.induced g ⊓ t₂.induced g :=
(gc_coinduced_induced g).u_inf
@[simp] lemma induced_infi {ι : Sort w} {t : ι → topological_space α} :
(⨅i, t i).induced g = (⨅i, (t i).induced g) :=
(gc_coinduced_induced g).u_infi
@[simp] lemma coinduced_bot : (⊥ : topological_space α).coinduced f = ⊥ :=
(gc_coinduced_induced f).l_bot
@[simp] lemma coinduced_sup : (t₁ ⊔ t₂).coinduced f = t₁.coinduced f ⊔ t₂.coinduced f :=
(gc_coinduced_induced f).l_sup
@[simp] lemma coinduced_supr {ι : Sort w} {t : ι → topological_space α} :
(⨆i, t i).coinduced f = (⨆i, (t i).coinduced f) :=
(gc_coinduced_induced f).l_supr
lemma induced_id [t : topological_space α] : t.induced id = t :=
topological_space_eq $ funext $ assume s, propext $
⟨assume ⟨s', hs, h⟩, h ▸ hs, assume hs, ⟨s, hs, rfl⟩⟩
lemma induced_compose [tγ : topological_space γ]
{f : α → β} {g : β → γ} : (tγ.induced g).induced f = tγ.induced (g ∘ f) :=
topological_space_eq $ funext $ assume s, propext $
⟨assume ⟨s', ⟨s, hs, h₂⟩, h₁⟩, h₁ ▸ h₂ ▸ ⟨s, hs, rfl⟩,
assume ⟨s, hs, h⟩, ⟨preimage g s, ⟨s, hs, rfl⟩, h ▸ rfl⟩⟩
lemma coinduced_id [t : topological_space α] : t.coinduced id = t :=
topological_space_eq rfl
lemma coinduced_compose [tα : topological_space α]
{f : α → β} {g : β → γ} : (tα.coinduced f).coinduced g = tα.coinduced (g ∘ f) :=
topological_space_eq rfl
end galois_connection
/- constructions using the complete lattice structure -/
section constructions
open topological_space
variables {α : Type u} {β : Type v}
instance inhabited_topological_space {α : Type u} : inhabited (topological_space α) :=
⟨⊤⟩
instance : topological_space empty := ⊥
instance : discrete_topology empty := ⟨rfl⟩
instance : topological_space unit := ⊥
instance : discrete_topology unit := ⟨rfl⟩
instance : topological_space bool := ⊥
instance : discrete_topology bool := ⟨rfl⟩
instance : topological_space ℕ := ⊥
instance : discrete_topology ℕ := ⟨rfl⟩
instance : topological_space ℤ := ⊥
instance : discrete_topology ℤ := ⟨rfl⟩
instance sierpinski_space : topological_space Prop :=
generate_from {{true}}
lemma le_generate_from {t : topological_space α} { g : set (set α) } (h : ∀s∈g, is_open s) :
t ≤ generate_from g :=
le_generate_from_iff_subset_is_open.2 h
lemma induced_generate_from_eq {α β} {b : set (set β)} {f : α → β} :
(generate_from b).induced f = topological_space.generate_from (preimage f '' b) :=
le_antisymm
(le_generate_from $ ball_image_iff.2 $ assume s hs, ⟨s, generate_open.basic _ hs, rfl⟩)
(coinduced_le_iff_le_induced.1 $ le_generate_from $ assume s hs,
generate_open.basic _ $ mem_image_of_mem _ hs)
/-- This construction is left adjoint to the operation sending a topology on `α`
to its neighborhood filter at a fixed point `a : α`. -/
protected def topological_space.nhds_adjoint (a : α) (f : filter α) : topological_space α :=
{ is_open := λs, a ∈ s → s ∈ f,
is_open_univ := assume s, univ_mem_sets,
is_open_inter := assume s t hs ht ⟨has, hat⟩, inter_mem_sets (hs has) (ht hat),
is_open_sUnion := assume k hk ⟨u, hu, hau⟩, mem_sets_of_superset (hk u hu hau) (subset_sUnion_of_mem hu) }
lemma gc_nhds (a : α) :
galois_connection (topological_space.nhds_adjoint a) (λt, @nhds α t a) :=
assume f t, by { rw le_nhds_iff, exact ⟨λ H s hs has, H _ has hs, λ H s has hs, H _ hs has⟩ }
lemma nhds_mono {t₁ t₂ : topological_space α} {a : α} (h : t₁ ≤ t₂) :
@nhds α t₁ a ≤ @nhds α t₂ a := (gc_nhds a).monotone_u h
lemma nhds_infi {ι : Sort*} {t : ι → topological_space α} {a : α} :
@nhds α (infi t) a = (⨅i, @nhds α (t i) a) := (gc_nhds a).u_infi
lemma nhds_Inf {s : set (topological_space α)} {a : α} :
@nhds α (Inf s) a = (⨅t∈s, @nhds α t a) := (gc_nhds a).u_Inf
lemma nhds_inf {t₁ t₂ : topological_space α} {a : α} :
@nhds α (t₁ ⊓ t₂) a = @nhds α t₁ a ⊓ @nhds α t₂ a := (gc_nhds a).u_inf
lemma nhds_top {a : α} : @nhds α ⊤ a = ⊤ := (gc_nhds a).u_top
local notation `cont` := @continuous _ _
local notation `tspace` := topological_space
open topological_space
variables {γ : Type*} {f : α → β} {ι : Sort*}
lemma continuous_iff_coinduced_le {t₁ : tspace α} {t₂ : tspace β} :
cont t₁ t₂ f ↔ coinduced f t₁ ≤ t₂ := iff.rfl
lemma continuous_iff_le_induced {t₁ : tspace α} {t₂ : tspace β} :
cont t₁ t₂ f ↔ t₁ ≤ induced f t₂ :=
iff.trans continuous_iff_coinduced_le (gc_coinduced_induced f _ _)
theorem continuous_generated_from {t : tspace α} {b : set (set β)}
(h : ∀s∈b, is_open (f ⁻¹' s)) : cont t (generate_from b) f :=
continuous_iff_coinduced_le.2 $ le_generate_from h
lemma continuous_induced_dom {t : tspace β} : cont (induced f t) t f :=
assume s h, ⟨_, h, rfl⟩
lemma continuous_induced_rng {g : γ → α} {t₂ : tspace β} {t₁ : tspace γ}
(h : cont t₁ t₂ (f ∘ g)) : cont t₁ (induced f t₂) g :=
assume s ⟨t, ht, s_eq⟩, s_eq ▸ h t ht
lemma continuous_coinduced_rng {t : tspace α} : cont t (coinduced f t) f :=
assume s h, h
lemma continuous_coinduced_dom {g : β → γ} {t₁ : tspace α} {t₂ : tspace γ}
(h : cont t₁ t₂ (g ∘ f)) : cont (coinduced f t₁) t₂ g :=
assume s hs, h s hs
lemma continuous_le_dom {t₁ t₂ : tspace α} {t₃ : tspace β}
(h₁ : t₂ ≤ t₁) (h₂ : cont t₁ t₃ f) : cont t₂ t₃ f :=
assume s h, h₁ _ (h₂ s h)
lemma continuous_le_rng {t₁ : tspace α} {t₂ t₃ : tspace β}
(h₁ : t₂ ≤ t₃) (h₂ : cont t₁ t₂ f) : cont t₁ t₃ f :=
assume s h, h₂ s (h₁ s h)
lemma continuous_sup_dom {t₁ t₂ : tspace α} {t₃ : tspace β}
(h₁ : cont t₁ t₃ f) (h₂ : cont t₂ t₃ f) : cont (t₁ ⊔ t₂) t₃ f :=
assume s h, ⟨h₁ s h, h₂ s h⟩
lemma continuous_sup_rng_left {t₁ : tspace α} {t₃ t₂ : tspace β} :
cont t₁ t₂ f → cont t₁ (t₂ ⊔ t₃) f :=
continuous_le_rng le_sup_left
lemma continuous_sup_rng_right {t₁ : tspace α} {t₃ t₂ : tspace β} :
cont t₁ t₃ f → cont t₁ (t₂ ⊔ t₃) f :=
continuous_le_rng le_sup_right
lemma continuous_Sup_dom {t₁ : set (tspace α)} {t₂ : tspace β}
(h : ∀t∈t₁, cont t t₂ f) : cont (Sup t₁) t₂ f :=
continuous_iff_le_induced.2 $ Sup_le $ assume t ht, continuous_iff_le_induced.1 $ h t ht
lemma continuous_Sup_rng {t₁ : tspace α} {t₂ : set (tspace β)} {t : tspace β}
(h₁ : t ∈ t₂) (hf : cont t₁ t f) : cont t₁ (Sup t₂) f :=
continuous_iff_coinduced_le.2 $ le_Sup_of_le h₁ $ continuous_iff_coinduced_le.1 hf
lemma continuous_supr_dom {t₁ : ι → tspace α} {t₂ : tspace β}
(h : ∀i, cont (t₁ i) t₂ f) : cont (supr t₁) t₂ f :=
continuous_Sup_dom $ assume t ⟨i, (t_eq : t₁ i = t)⟩, t_eq ▸ h i
lemma continuous_supr_rng {t₁ : tspace α} {t₂ : ι → tspace β} {i : ι}
(h : cont t₁ (t₂ i) f) : cont t₁ (supr t₂) f :=
continuous_Sup_rng ⟨i, rfl⟩ h
lemma continuous_inf_rng {t₁ : tspace α} {t₂ t₃ : tspace β}
(h₁ : cont t₁ t₂ f) (h₂ : cont t₁ t₃ f) : cont t₁ (t₂ ⊓ t₃) f :=
continuous_iff_coinduced_le.2 $ le_inf
(continuous_iff_coinduced_le.1 h₁)
(continuous_iff_coinduced_le.1 h₂)
lemma continuous_inf_dom_left {t₁ t₂ : tspace α} {t₃ : tspace β} :
cont t₁ t₃ f → cont (t₁ ⊓ t₂) t₃ f :=
continuous_le_dom inf_le_left
lemma continuous_inf_dom_right {t₁ t₂ : tspace α} {t₃ : tspace β} :
cont t₂ t₃ f → cont (t₁ ⊓ t₂) t₃ f :=
continuous_le_dom inf_le_right
lemma continuous_Inf_dom {t₁ : set (tspace α)} {t₂ : tspace β} {t : tspace α} (h₁ : t ∈ t₁) :
cont t t₂ f → cont (Inf t₁) t₂ f :=
continuous_le_dom $ Inf_le h₁
lemma continuous_Inf_rng {t₁ : tspace α} {t₂ : set (tspace β)}
(h : ∀t∈t₂, cont t₁ t f) : cont t₁ (Inf t₂) f :=
continuous_iff_coinduced_le.2 $ le_Inf $ assume b hb, continuous_iff_coinduced_le.1 $ h b hb
lemma continuous_infi_dom {t₁ : ι → tspace α} {t₂ : tspace β} {i : ι} :
cont (t₁ i) t₂ f → cont (infi t₁) t₂ f :=
continuous_le_dom $ infi_le _ _
lemma continuous_infi_rng {t₁ : tspace α} {t₂ : ι → tspace β}
(h : ∀i, cont t₁ (t₂ i) f) : cont t₁ (infi t₂) f :=
continuous_iff_coinduced_le.2 $ le_infi $ assume i, continuous_iff_coinduced_le.1 $ h i
lemma continuous_bot {t : tspace β} : cont ⊥ t f :=
continuous_iff_le_induced.2 $ bot_le
lemma continuous_top {t : tspace α} : cont t ⊤ f :=
continuous_iff_coinduced_le.2 $ le_top
/- 𝓝 in the induced topology -/
theorem mem_nhds_induced [T : topological_space α] (f : β → α) (a : β) (s : set β) :
s ∈ @nhds β (topological_space.induced f T) a ↔ ∃ u ∈ 𝓝 (f a), f ⁻¹' u ⊆ s :=
begin
simp only [mem_nhds_sets_iff, is_open_induced_iff, exists_prop, set.mem_set_of_eq],
split,
{ rintros ⟨u, usub, ⟨v, openv, ueq⟩, au⟩,
exact ⟨v, ⟨v, set.subset.refl v, openv, by rwa ←ueq at au⟩, by rw ueq; exact usub⟩ },
rintros ⟨u, ⟨v, vsubu, openv, amem⟩, finvsub⟩,
exact ⟨f ⁻¹' v, set.subset.trans (set.preimage_mono vsubu) finvsub, ⟨⟨v, openv, rfl⟩, amem⟩⟩
end
theorem nhds_induced [T : topological_space α] (f : β → α) (a : β) :
@nhds β (topological_space.induced f T) a = comap f (𝓝 (f a)) :=
filter_eq $ by ext s; rw mem_nhds_induced; rw mem_comap_sets
lemma induced_iff_nhds_eq [tα : topological_space α] [tβ : topological_space β] (f : β → α) :
tβ = tα.induced f ↔ ∀ b, 𝓝 b = comap f (𝓝 $ f b) :=
⟨λ h a, h.symm ▸ nhds_induced f a, λ h, eq_of_nhds_eq_nhds $ λ x, by rw [h, nhds_induced]⟩
theorem map_nhds_induced_of_surjective [T : topological_space α]
{f : β → α} (hf : function.surjective f) (a : β) :
map f (@nhds β (topological_space.induced f T) a) = 𝓝 (f a) :=
by rw [nhds_induced, map_comap_of_surjective hf]
end constructions
section induced
open topological_space
variables {α : Type*} {β : Type*}
variables [t : topological_space β] {f : α → β}
theorem is_open_induced_eq {s : set α} :
@_root_.is_open _ (induced f t) s ↔ s ∈ preimage f '' {s | is_open s} :=
iff.rfl
theorem is_open_induced {s : set β} (h : is_open s) : (induced f t).is_open (f ⁻¹' s) :=
⟨s, h, rfl⟩
lemma map_nhds_induced_eq {a : α} (h : range f ∈ 𝓝 (f a)) :
map f (@nhds α (induced f t) a) = 𝓝 (f a) :=
by rw [nhds_induced, filter.map_comap h]
lemma closure_induced [t : topological_space β] {f : α → β} {a : α} {s : set α}
(hf : ∀x y, f x = f y → x = y) :
a ∈ @closure α (topological_space.induced f t) s ↔ f a ∈ closure (f '' s) :=
have comap f (𝓝 (f a) ⊓ principal (f '' s)) ≠ ⊥ ↔ 𝓝 (f a) ⊓ principal (f '' s) ≠ ⊥,
from ⟨assume h₁ h₂, h₁ $ h₂.symm ▸ comap_bot,
assume h,
forall_sets_nonempty_iff_ne_bot.mp $
assume s₁ ⟨s₂, hs₂, (hs : f ⁻¹' s₂ ⊆ s₁)⟩,
have f '' s ∈ 𝓝 (f a) ⊓ principal (f '' s),
from mem_inf_sets_of_right $ by simp [subset.refl],
have s₂ ∩ f '' s ∈ 𝓝 (f a) ⊓ principal (f '' s),
from inter_mem_sets hs₂ this,
let ⟨b, hb₁, ⟨a, ha, ha₂⟩⟩ := nonempty_of_mem_sets h this in
⟨_, hs $ by rwa [←ha₂] at hb₁⟩⟩,
calc a ∈ @closure α (topological_space.induced f t) s
↔ (@nhds α (topological_space.induced f t) a) ⊓ principal s ≠ ⊥ : by rw [closure_eq_nhds]; refl
... ↔ comap f (𝓝 (f a)) ⊓ principal (f ⁻¹' (f '' s)) ≠ ⊥ : by rw [nhds_induced, preimage_image_eq _ hf]
... ↔ comap f (𝓝 (f a) ⊓ principal (f '' s)) ≠ ⊥ : by rw [comap_inf, ←comap_principal]
... ↔ _ : by rwa [closure_eq_nhds]
end induced
section sierpinski
variables {α : Type*} [topological_space α]
@[simp] lemma is_open_singleton_true : is_open ({true} : set Prop) :=
topological_space.generate_open.basic _ (by simp)
lemma continuous_Prop {p : α → Prop} : continuous p ↔ is_open {x | p x} :=
⟨assume h : continuous p,
have is_open (p ⁻¹' {true}),
from h _ is_open_singleton_true,
by simp [preimage, eq_true] at this; assumption,
assume h : is_open {x | p x},
continuous_generated_from $ assume s (hs : s ∈ {{true}}),
by simp at hs; simp [hs, preimage, eq_true, h]⟩
end sierpinski
section infi
variables {α : Type u} {ι : Type v} {t : ι → topological_space α}
lemma is_open_supr_iff {s : set α} : @is_open _ (⨆ i, t i) s ↔ ∀ i, @is_open _ (t i) s :=
begin
-- s defines a map from α to Prop, which is continuous iff s is open.
suffices : @continuous _ _ (⨆ i, t i) _ s ↔ ∀ i, @continuous _ _ (t i) _ s,
{ simpa only [continuous_Prop] using this },
simp only [continuous_iff_le_induced, supr_le_iff]
end
lemma is_closed_infi_iff {s : set α} : @is_closed _ (⨆ i, t i) s ↔ ∀ i, @is_closed _ (t i) s :=
is_open_supr_iff
end infi
|
07c02c388c78d0742d31bc239b96eaa4a0fe1b78
|
cf39355caa609c0f33405126beee2739aa3cb77e
|
/tests/lean/run/smt_facts_as_hinst_lemmas.lean
|
012fa7f8ec3bdd765466f0327f46882e1769467e
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/lean
|
12b87f69d92e614daea8bcc9d4de9a9ace089d0e
|
cce7990ea86a78bdb383e38ed7f9b5ba93c60ce0
|
refs/heads/master
| 1,687,508,156,644
| 1,684,951,104,000
| 1,684,951,104,000
| 169,960,991
| 457
| 107
|
Apache-2.0
| 1,686,744,372,000
| 1,549,790,268,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 132
|
lean
|
lemma aux : nat.succ 0 = 1 :=
rfl
attribute [ematch] aux
lemma ex (a : nat) : a = 1 → nat.succ 0 = a :=
begin [smt]
close
end
|
a6d21296aacc290c20e7ea54a9ea0004d924e6e9
|
46125763b4dbf50619e8846a1371029346f4c3db
|
/src/topology/basic.lean
|
76432e56356eb5653341393ea2134c603fbc6e66
|
[
"Apache-2.0"
] |
permissive
|
thjread/mathlib
|
a9d97612cedc2c3101060737233df15abcdb9eb1
|
7cffe2520a5518bba19227a107078d83fa725ddc
|
refs/heads/master
| 1,615,637,696,376
| 1,583,953,063,000
| 1,583,953,063,000
| 246,680,271
| 0
| 0
|
Apache-2.0
| 1,583,960,875,000
| 1,583,960,875,000
| null |
UTF-8
|
Lean
| false
| false
| 35,543
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Jeremy Avigad
-/
import order.filter order.filter.bases
/-!
# Basic theory of topological spaces.
The main definition is the type class `topological space α` which endows a type `α` with a topology.
Then `set α` gets predicates `is_open`, `is_closed` and functions `interior`, `closure` and
`frontier`. Each point `x` of `α` gets a neighborhood filter `𝓝 x`.
This file also defines locally finite families of subsets of `α`.
For topological spaces `α` and `β`, a function `f : α → β` and a point `a : α`,
`continuous_at f a` means `f` is continuous at `a`, and global continuity is
`continuous f`. There is also a version of continuity `pcontinuous` for
partially defined functions.
## Implementation notes
Topology in mathlib heavily uses filters (even more than in Bourbaki). See explanations in
`docs/theories/topology.md`.
## References
* [N. Bourbaki, *General Topology*][bourbaki1966]
* [I. M. James, *Topologies and Uniformities*][james1999]
## Tags
topological space, interior, closure, frontier, neighborhood, continuity, continuous function
-/
open set filter lattice classical
open_locale classical
universes u v w
/-- A topology on `α`. -/
structure topological_space (α : Type u) :=
(is_open : set α → Prop)
(is_open_univ : is_open univ)
(is_open_inter : ∀s t, is_open s → is_open t → is_open (s ∩ t))
(is_open_sUnion : ∀s, (∀t∈s, is_open t) → is_open (⋃₀ s))
attribute [class] topological_space
/-- A constructor for topologies by specifying the closed sets,
and showing that they satisfy the appropriate conditions. -/
def topological_space.of_closed {α : Type u} (T : set (set α))
(empty_mem : ∅ ∈ T) (sInter_mem : ∀ A ⊆ T, ⋂₀ A ∈ T) (union_mem : ∀ A B ∈ T, A ∪ B ∈ T) :
topological_space α :=
{ is_open := λ X, -X ∈ T,
is_open_univ := by simp [empty_mem],
is_open_inter := λ s t hs ht, by simpa [set.compl_inter] using union_mem (-s) (-t) hs ht,
is_open_sUnion := λ s hs,
by rw set.compl_sUnion; exact sInter_mem (set.compl '' s)
(λ z ⟨y, hy, hz⟩, by simpa [hz.symm] using hs y hy) }
section topological_space
variables {α : Type u} {β : Type v} {ι : Sort w} {a : α} {s s₁ s₂ : set α} {p p₁ p₂ : α → Prop}
@[ext]
lemma topological_space_eq : ∀ {f g : topological_space α}, f.is_open = g.is_open → f = g
| ⟨a, _, _, _⟩ ⟨b, _, _, _⟩ rfl := rfl
section
variables [t : topological_space α]
include t
/-- `is_open s` means that `s` is open in the ambient topological space on `α` -/
def is_open (s : set α) : Prop := topological_space.is_open t s
@[simp]
lemma is_open_univ : is_open (univ : set α) := topological_space.is_open_univ t
lemma is_open_inter (h₁ : is_open s₁) (h₂ : is_open s₂) : is_open (s₁ ∩ s₂) :=
topological_space.is_open_inter t s₁ s₂ h₁ h₂
lemma is_open_sUnion {s : set (set α)} (h : ∀t ∈ s, is_open t) : is_open (⋃₀ s) :=
topological_space.is_open_sUnion t s h
end
lemma is_open_fold {s : set α} {t : topological_space α} : t.is_open s = @is_open α t s :=
rfl
variables [topological_space α]
lemma is_open_Union {f : ι → set α} (h : ∀i, is_open (f i)) : is_open (⋃i, f i) :=
is_open_sUnion $ by rintro _ ⟨i, rfl⟩; exact h i
lemma is_open_bUnion {s : set β} {f : β → set α} (h : ∀i∈s, is_open (f i)) :
is_open (⋃i∈s, f i) :=
is_open_Union $ assume i, is_open_Union $ assume hi, h i hi
lemma is_open_union (h₁ : is_open s₁) (h₂ : is_open s₂) : is_open (s₁ ∪ s₂) :=
by rw union_eq_Union; exact is_open_Union (bool.forall_bool.2 ⟨h₂, h₁⟩)
@[simp] lemma is_open_empty : is_open (∅ : set α) :=
by rw ← sUnion_empty; exact is_open_sUnion (assume a, false.elim)
lemma is_open_sInter {s : set (set α)} (hs : finite s) : (∀t ∈ s, is_open t) → is_open (⋂₀ s) :=
finite.induction_on hs (λ _, by rw sInter_empty; exact is_open_univ) $
λ a s has hs ih h, by rw sInter_insert; exact
is_open_inter (h _ $ mem_insert _ _) (ih $ λ t, h t ∘ mem_insert_of_mem _)
lemma is_open_bInter {s : set β} {f : β → set α} (hs : finite s) :
(∀i∈s, is_open (f i)) → is_open (⋂i∈s, f i) :=
finite.induction_on hs
(λ _, by rw bInter_empty; exact is_open_univ)
(λ a s has hs ih h, by rw bInter_insert; exact
is_open_inter (h a (mem_insert _ _)) (ih (λ i hi, h i (mem_insert_of_mem _ hi))))
lemma is_open_Inter [fintype β] {s : β → set α}
(h : ∀ i, is_open (s i)) : is_open (⋂ i, s i) :=
suffices is_open (⋂ (i : β) (hi : i ∈ @univ β), s i), by simpa,
is_open_bInter finite_univ (λ i _, h i)
lemma is_open_Inter_prop {p : Prop} {s : p → set α}
(h : ∀ h : p, is_open (s h)) : is_open (Inter s) :=
by by_cases p; simp *
lemma is_open_const {p : Prop} : is_open {a : α | p} :=
by_cases
(assume : p, begin simp only [this]; exact is_open_univ end)
(assume : ¬ p, begin simp only [this]; exact is_open_empty end)
lemma is_open_and : is_open {a | p₁ a} → is_open {a | p₂ a} → is_open {a | p₁ a ∧ p₂ a} :=
is_open_inter
/-- A set is closed if its complement is open -/
def is_closed (s : set α) : Prop := is_open (-s)
@[simp] lemma is_closed_empty : is_closed (∅ : set α) :=
by unfold is_closed; rw compl_empty; exact is_open_univ
@[simp] lemma is_closed_univ : is_closed (univ : set α) :=
by unfold is_closed; rw compl_univ; exact is_open_empty
lemma is_closed_union : is_closed s₁ → is_closed s₂ → is_closed (s₁ ∪ s₂) :=
λ h₁ h₂, by unfold is_closed; rw compl_union; exact is_open_inter h₁ h₂
lemma is_closed_sInter {s : set (set α)} : (∀t ∈ s, is_closed t) → is_closed (⋂₀ s) :=
by simp only [is_closed, compl_sInter, sUnion_image]; exact assume h, is_open_Union $ assume t, is_open_Union $ assume ht, h t ht
lemma is_closed_Inter {f : ι → set α} (h : ∀i, is_closed (f i)) : is_closed (⋂i, f i ) :=
is_closed_sInter $ assume t ⟨i, (heq : f i = t)⟩, heq ▸ h i
@[simp] lemma is_open_compl_iff {s : set α} : is_open (-s) ↔ is_closed s := iff.rfl
@[simp] lemma is_closed_compl_iff {s : set α} : is_closed (-s) ↔ is_open s :=
by rw [←is_open_compl_iff, compl_compl]
lemma is_open_diff {s t : set α} (h₁ : is_open s) (h₂ : is_closed t) : is_open (s \ t) :=
is_open_inter h₁ $ is_open_compl_iff.mpr h₂
lemma is_closed_inter (h₁ : is_closed s₁) (h₂ : is_closed s₂) : is_closed (s₁ ∩ s₂) :=
by rw [is_closed, compl_inter]; exact is_open_union h₁ h₂
lemma is_closed_bUnion {s : set β} {f : β → set α} (hs : finite s) :
(∀i∈s, is_closed (f i)) → is_closed (⋃i∈s, f i) :=
finite.induction_on hs
(λ _, by rw bUnion_empty; exact is_closed_empty)
(λ a s has hs ih h, by rw bUnion_insert; exact
is_closed_union (h a (mem_insert _ _)) (ih (λ i hi, h i (mem_insert_of_mem _ hi))))
lemma is_closed_Union [fintype β] {s : β → set α}
(h : ∀ i, is_closed (s i)) : is_closed (Union s) :=
suffices is_closed (⋃ (i : β) (hi : i ∈ @univ β), s i),
by convert this; simp [set.ext_iff],
is_closed_bUnion finite_univ (λ i _, h i)
lemma is_closed_Union_prop {p : Prop} {s : p → set α}
(h : ∀ h : p, is_closed (s h)) : is_closed (Union s) :=
by by_cases p; simp *
lemma is_closed_imp {p q : α → Prop} (hp : is_open {x | p x})
(hq : is_closed {x | q x}) : is_closed {x | p x → q x} :=
have {x | p x → q x} = (- {x | p x}) ∪ {x | q x}, from set.ext $ λ x, imp_iff_not_or,
by rw [this]; exact is_closed_union (is_closed_compl_iff.mpr hp) hq
lemma is_open_neg : is_closed {a | p a} → is_open {a | ¬ p a} :=
is_open_compl_iff.mpr
/-- The interior of a set `s` is the largest open subset of `s`. -/
def interior (s : set α) : set α := ⋃₀ {t | is_open t ∧ t ⊆ s}
lemma mem_interior {s : set α} {x : α} :
x ∈ interior s ↔ ∃ t ⊆ s, is_open t ∧ x ∈ t :=
by simp only [interior, mem_set_of_eq, exists_prop, and_assoc, and.left_comm]
@[simp] lemma is_open_interior {s : set α} : is_open (interior s) :=
is_open_sUnion $ assume t ⟨h₁, h₂⟩, h₁
lemma interior_subset {s : set α} : interior s ⊆ s :=
sUnion_subset $ assume t ⟨h₁, h₂⟩, h₂
lemma interior_maximal {s t : set α} (h₁ : t ⊆ s) (h₂ : is_open t) : t ⊆ interior s :=
subset_sUnion_of_mem ⟨h₂, h₁⟩
lemma interior_eq_of_open {s : set α} (h : is_open s) : interior s = s :=
subset.antisymm interior_subset (interior_maximal (subset.refl s) h)
lemma interior_eq_iff_open {s : set α} : interior s = s ↔ is_open s :=
⟨assume h, h ▸ is_open_interior, interior_eq_of_open⟩
lemma subset_interior_iff_open {s : set α} : s ⊆ interior s ↔ is_open s :=
by simp only [interior_eq_iff_open.symm, subset.antisymm_iff, interior_subset, true_and]
lemma subset_interior_iff_subset_of_open {s t : set α} (h₁ : is_open s) :
s ⊆ interior t ↔ s ⊆ t :=
⟨assume h, subset.trans h interior_subset, assume h₂, interior_maximal h₂ h₁⟩
lemma interior_mono {s t : set α} (h : s ⊆ t) : interior s ⊆ interior t :=
interior_maximal (subset.trans interior_subset h) is_open_interior
@[simp] lemma interior_empty : interior (∅ : set α) = ∅ :=
interior_eq_of_open is_open_empty
@[simp] lemma interior_univ : interior (univ : set α) = univ :=
interior_eq_of_open is_open_univ
@[simp] lemma interior_interior {s : set α} : interior (interior s) = interior s :=
interior_eq_of_open is_open_interior
@[simp] lemma interior_inter {s t : set α} : interior (s ∩ t) = interior s ∩ interior t :=
subset.antisymm
(subset_inter (interior_mono $ inter_subset_left s t) (interior_mono $ inter_subset_right s t))
(interior_maximal (inter_subset_inter interior_subset interior_subset) $ is_open_inter is_open_interior is_open_interior)
lemma interior_union_is_closed_of_interior_empty {s t : set α} (h₁ : is_closed s) (h₂ : interior t = ∅) :
interior (s ∪ t) = interior s :=
have interior (s ∪ t) ⊆ s, from
assume x ⟨u, ⟨(hu₁ : is_open u), (hu₂ : u ⊆ s ∪ t)⟩, (hx₁ : x ∈ u)⟩,
classical.by_contradiction $ assume hx₂ : x ∉ s,
have u \ s ⊆ t,
from assume x ⟨h₁, h₂⟩, or.resolve_left (hu₂ h₁) h₂,
have u \ s ⊆ interior t,
by rwa subset_interior_iff_subset_of_open (is_open_diff hu₁ h₁),
have u \ s ⊆ ∅,
by rwa h₂ at this,
this ⟨hx₁, hx₂⟩,
subset.antisymm
(interior_maximal this is_open_interior)
(interior_mono $ subset_union_left _ _)
lemma is_open_iff_forall_mem_open : is_open s ↔ ∀ x ∈ s, ∃ t ⊆ s, is_open t ∧ x ∈ t :=
by rw ← subset_interior_iff_open; simp only [subset_def, mem_interior]
/-- The closure of `s` is the smallest closed set containing `s`. -/
def closure (s : set α) : set α := ⋂₀ {t | is_closed t ∧ s ⊆ t}
@[simp] lemma is_closed_closure {s : set α} : is_closed (closure s) :=
is_closed_sInter $ assume t ⟨h₁, h₂⟩, h₁
lemma subset_closure {s : set α} : s ⊆ closure s :=
subset_sInter $ assume t ⟨h₁, h₂⟩, h₂
lemma closure_minimal {s t : set α} (h₁ : s ⊆ t) (h₂ : is_closed t) : closure s ⊆ t :=
sInter_subset_of_mem ⟨h₂, h₁⟩
lemma closure_eq_of_is_closed {s : set α} (h : is_closed s) : closure s = s :=
subset.antisymm (closure_minimal (subset.refl s) h) subset_closure
lemma closure_eq_iff_is_closed {s : set α} : closure s = s ↔ is_closed s :=
⟨assume h, h ▸ is_closed_closure, closure_eq_of_is_closed⟩
lemma closure_subset_iff_subset_of_is_closed {s t : set α} (h₁ : is_closed t) :
closure s ⊆ t ↔ s ⊆ t :=
⟨subset.trans subset_closure, assume h, closure_minimal h h₁⟩
lemma closure_mono {s t : set α} (h : s ⊆ t) : closure s ⊆ closure t :=
closure_minimal (subset.trans h subset_closure) is_closed_closure
lemma monotone_closure (α : Type*) [topological_space α] : monotone (@closure α _) :=
λ _ _, closure_mono
lemma closure_inter_subset_inter_closure (s t : set α) :
closure (s ∩ t) ⊆ closure s ∩ closure t :=
(monotone_closure α).map_inf_le s t
lemma is_closed_of_closure_subset {s : set α} (h : closure s ⊆ s) : is_closed s :=
by rw subset.antisymm subset_closure h; exact is_closed_closure
@[simp] lemma closure_empty : closure (∅ : set α) = ∅ :=
closure_eq_of_is_closed is_closed_empty
lemma closure_empty_iff (s : set α) : closure s = ∅ ↔ s = ∅ :=
⟨subset_eq_empty subset_closure, λ h, h.symm ▸ closure_empty⟩
lemma set.nonempty.closure {s : set α} (h : s.nonempty) :
set.nonempty (closure s) :=
let ⟨x, hx⟩ := h in ⟨x, subset_closure hx⟩
@[simp] lemma closure_univ : closure (univ : set α) = univ :=
closure_eq_of_is_closed is_closed_univ
@[simp] lemma closure_closure {s : set α} : closure (closure s) = closure s :=
closure_eq_of_is_closed is_closed_closure
@[simp] lemma closure_union {s t : set α} : closure (s ∪ t) = closure s ∪ closure t :=
subset.antisymm
(closure_minimal (union_subset_union subset_closure subset_closure) $ is_closed_union is_closed_closure is_closed_closure)
((monotone_closure α).le_map_sup s t)
lemma interior_subset_closure {s : set α} : interior s ⊆ closure s :=
subset.trans interior_subset subset_closure
lemma closure_eq_compl_interior_compl {s : set α} : closure s = - interior (- s) :=
begin
unfold interior closure is_closed,
rw [compl_sUnion, compl_image_set_of],
simp only [compl_subset_compl]
end
@[simp] lemma interior_compl {s : set α} : interior (- s) = - closure s :=
by simp [closure_eq_compl_interior_compl]
@[simp] lemma closure_compl {s : set α} : closure (- s) = - interior s :=
by simp [closure_eq_compl_interior_compl]
theorem mem_closure_iff {s : set α} {a : α} :
a ∈ closure s ↔ ∀ o, is_open o → a ∈ o → (o ∩ s).nonempty :=
⟨λ h o oo ao, classical.by_contradiction $ λ os,
have s ⊆ -o, from λ x xs xo, os ⟨x, xo, xs⟩,
closure_minimal this (is_closed_compl_iff.2 oo) h ao,
λ H c ⟨h₁, h₂⟩, classical.by_contradiction $ λ nc,
let ⟨x, hc, hs⟩ := (H _ h₁ nc) in hc (h₂ hs)⟩
lemma dense_iff_inter_open {s : set α} :
closure s = univ ↔ ∀ U, is_open U → U.nonempty → (U ∩ s).nonempty :=
begin
split ; intro h,
{ rintros U U_op ⟨x, x_in⟩,
exact mem_closure_iff.1 (by simp only [h]) U U_op x_in },
{ apply eq_univ_of_forall, intro x,
rw mem_closure_iff,
intros U U_op x_in,
exact h U U_op ⟨_, x_in⟩ },
end
lemma dense_of_subset_dense {s₁ s₂ : set α} (h : s₁ ⊆ s₂) (hd : closure s₁ = univ) :
closure s₂ = univ :=
by { rw [← univ_subset_iff, ← hd], exact closure_mono h }
/-- The frontier of a set is the set of points between the closure and interior. -/
def frontier (s : set α) : set α := closure s \ interior s
lemma frontier_eq_closure_inter_closure {s : set α} :
frontier s = closure s ∩ closure (- s) :=
by rw [closure_compl, frontier, diff_eq]
/-- The complement of a set has the same frontier as the original set. -/
@[simp] lemma frontier_compl (s : set α) : frontier (-s) = frontier s :=
by simp only [frontier_eq_closure_inter_closure, lattice.neg_neg, inter_comm]
lemma frontier_inter_subset (s t : set α) :
frontier (s ∩ t) ⊆ (frontier s ∩ closure t) ∪ (closure s ∩ frontier t) :=
begin
simp only [frontier_eq_closure_inter_closure, compl_inter, closure_union],
convert inter_subset_inter_left _ (closure_inter_subset_inter_closure s t),
simp only [inter_distrib_left, inter_distrib_right, inter_assoc],
congr' 2,
apply inter_comm
end
lemma frontier_union_subset (s t : set α) :
frontier (s ∪ t) ⊆ (frontier s ∩ closure (-t)) ∪ (closure (-s) ∩ frontier t) :=
by simpa only [frontier_compl, (compl_union _ _).symm]
using frontier_inter_subset (-s) (-t)
lemma is_closed.frontier_eq {s : set α} (hs : is_closed s) : frontier s = s \ interior s :=
by rw [frontier, closure_eq_of_is_closed hs]
lemma is_open.frontier_eq {s : set α} (hs : is_open s) : frontier s = closure s \ s :=
by rw [frontier, interior_eq_of_open hs]
/-- The frontier of a set is closed. -/
lemma is_closed_frontier {s : set α} : is_closed (frontier s) :=
by rw frontier_eq_closure_inter_closure; exact is_closed_inter is_closed_closure is_closed_closure
/-- The frontier of a set has no interior point. -/
lemma interior_frontier {s : set α} (h : is_closed s) : interior (frontier s) = ∅ :=
begin
have A : frontier s = s \ interior s, from h.frontier_eq,
have B : interior (frontier s) ⊆ interior s, by rw A; exact interior_mono (diff_subset _ _),
have C : interior (frontier s) ⊆ frontier s := interior_subset,
have : interior (frontier s) ⊆ (interior s) ∩ (s \ interior s) :=
subset_inter B (by simpa [A] using C),
rwa [inter_diff_self, subset_empty_iff] at this,
end
/-- neighbourhood filter -/
def nhds (a : α) : filter α := (⨅ s ∈ {s : set α | a ∈ s ∧ is_open s}, principal s)
localized "notation `𝓝` := nhds" in topological_space
lemma nhds_def (a : α) : 𝓝 a = (⨅ s ∈ {s : set α | a ∈ s ∧ is_open s}, principal s) := rfl
lemma nhds_basis_opens (a : α) : (𝓝 a).has_basis (λ s : set α, a ∈ s ∧ is_open s) id :=
has_basis_binfi_principal
(λ s ⟨has, hs⟩ t ⟨hat, ht⟩, ⟨s ∩ t, ⟨⟨has, hat⟩, is_open_inter hs ht⟩,
⟨inter_subset_left _ _, inter_subset_right _ _⟩⟩)
⟨univ, ⟨mem_univ a, is_open_univ⟩⟩
lemma le_nhds_iff {f a} : f ≤ 𝓝 a ↔ ∀ s : set α, a ∈ s → is_open s → s ∈ f :=
by simp [nhds_def]
lemma nhds_le_of_le {f a} {s : set α} (h : a ∈ s) (o : is_open s) (sf : principal s ≤ f) : 𝓝 a ≤ f :=
by rw nhds_def; exact infi_le_of_le s (infi_le_of_le ⟨h, o⟩ sf)
lemma mem_nhds_sets_iff {a : α} {s : set α} :
s ∈ 𝓝 a ↔ ∃t⊆s, is_open t ∧ a ∈ t :=
(nhds_basis_opens a).mem_iff.trans
⟨λ ⟨t, ⟨hat, ht⟩, hts⟩, ⟨t, hts, ht, hat⟩, λ ⟨t, hts, ht, hat⟩, ⟨t, ⟨hat, ht⟩, hts⟩⟩
lemma map_nhds {a : α} {f : α → β} :
map f (𝓝 a) = (⨅ s ∈ {s : set α | a ∈ s ∧ is_open s}, principal (image f s)) :=
((nhds_basis_opens a).map f).eq_binfi
attribute [irreducible] nhds
lemma mem_of_nhds {a : α} {s : set α} : s ∈ 𝓝 a → a ∈ s :=
λ H, let ⟨t, ht, _, hs⟩ := mem_nhds_sets_iff.1 H in ht hs
lemma mem_nhds_sets {a : α} {s : set α} (hs : is_open s) (ha : a ∈ s) :
s ∈ 𝓝 a :=
mem_nhds_sets_iff.2 ⟨s, subset.refl _, hs, ha⟩
theorem all_mem_nhds (x : α) (P : set α → Prop) (hP : ∀ s t, s ⊆ t → P s → P t) :
(∀ s ∈ 𝓝 x, P s) ↔ (∀ s, is_open s → x ∈ s → P s) :=
iff.intro
(λ h s os xs, h s (mem_nhds_sets os xs))
(λ h t,
begin
change t ∈ 𝓝 x → P t,
rw mem_nhds_sets_iff,
rintros ⟨s, hs, opens, xs⟩,
exact hP _ _ hs (h s opens xs),
end)
theorem all_mem_nhds_filter (x : α) (f : set α → set β) (hf : ∀ s t, s ⊆ t → f s ⊆ f t)
(l : filter β) :
(∀ s ∈ 𝓝 x, f s ∈ l) ↔ (∀ s, is_open s → x ∈ s → f s ∈ l) :=
all_mem_nhds _ _ (λ s t ssubt h, mem_sets_of_superset h (hf s t ssubt))
theorem rtendsto_nhds {r : rel β α} {l : filter β} {a : α} :
rtendsto r l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → r.core s ∈ l) :=
all_mem_nhds_filter _ _ (λ s t, id) _
theorem rtendsto'_nhds {r : rel β α} {l : filter β} {a : α} :
rtendsto' r l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → r.preimage s ∈ l) :=
by { rw [rtendsto'_def], apply all_mem_nhds_filter, apply rel.preimage_mono }
theorem ptendsto_nhds {f : β →. α} {l : filter β} {a : α} :
ptendsto f l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → f.core s ∈ l) :=
rtendsto_nhds
theorem ptendsto'_nhds {f : β →. α} {l : filter β} {a : α} :
ptendsto' f l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → f.preimage s ∈ l) :=
rtendsto'_nhds
theorem tendsto_nhds {f : β → α} {l : filter β} {a : α} :
tendsto f l (𝓝 a) ↔ (∀ s, is_open s → a ∈ s → f ⁻¹' s ∈ l) :=
all_mem_nhds_filter _ _ (λ s t h, preimage_mono h) _
lemma tendsto_const_nhds {a : α} {f : filter β} : tendsto (λb:β, a) f (𝓝 a) :=
tendsto_nhds.mpr $ assume s hs ha, univ_mem_sets' $ assume _, ha
lemma pure_le_nhds : pure ≤ (𝓝 : α → filter α) :=
assume a s hs, mem_pure_sets.2 $ mem_of_nhds hs
lemma tendsto_pure_nhds {α : Type*} [topological_space β] (f : α → β) (a : α) :
tendsto f (pure a) (𝓝 (f a)) :=
begin
rw [tendsto, filter.map_pure],
exact pure_le_nhds (f a)
end
@[simp] lemma nhds_ne_bot {a : α} : 𝓝 a ≠ ⊥ :=
ne_bot_of_le_ne_bot pure_ne_bot (pure_le_nhds a)
lemma interior_eq_nhds {s : set α} : interior s = {a | 𝓝 a ≤ principal s} :=
set.ext $ λ x, by simp only [mem_interior, le_principal_iff, mem_nhds_sets_iff]; refl
lemma mem_interior_iff_mem_nhds {s : set α} {a : α} :
a ∈ interior s ↔ s ∈ 𝓝 a :=
by simp only [interior_eq_nhds, le_principal_iff]; refl
lemma is_open_iff_nhds {s : set α} : is_open s ↔ ∀a∈s, 𝓝 a ≤ principal s :=
calc is_open s ↔ s ⊆ interior s : subset_interior_iff_open.symm
... ↔ (∀a∈s, 𝓝 a ≤ principal s) : by rw [interior_eq_nhds]; refl
lemma is_open_iff_mem_nhds {s : set α} : is_open s ↔ ∀a∈s, s ∈ 𝓝 a :=
is_open_iff_nhds.trans $ forall_congr $ λ _, imp_congr_right $ λ _, le_principal_iff
lemma closure_eq_nhds {s : set α} : closure s = {a | 𝓝 a ⊓ principal s ≠ ⊥} :=
calc closure s = - interior (- s) : closure_eq_compl_interior_compl
... = {a | ¬ 𝓝 a ≤ principal (-s)} : by rw [interior_eq_nhds]; refl
... = {a | 𝓝 a ⊓ principal s ≠ ⊥} : set.ext $ assume a, not_congr
(inf_eq_bot_iff_le_compl
(show principal s ⊔ principal (-s) = ⊤, by simp only [sup_principal, union_compl_self, principal_univ])
(by simp only [inf_principal, inter_compl_self, principal_empty])).symm
theorem mem_closure_iff_nhds {s : set α} {a : α} :
a ∈ closure s ↔ ∀ t ∈ 𝓝 a, (t ∩ s).nonempty :=
mem_closure_iff.trans
⟨λ H t ht, nonempty.mono
(inter_subset_inter_left _ interior_subset)
(H _ is_open_interior (mem_interior_iff_mem_nhds.2 ht)),
λ H o oo ao, H _ (mem_nhds_sets oo ao)⟩
theorem mem_closure_iff_nhds_basis {a : α} {p : β → Prop} {s : β → set α} (h : (𝓝 a).has_basis p s)
{t : set α} :
a ∈ closure t ↔ ∀ i, p i → ∃ y ∈ t, y ∈ s i :=
mem_closure_iff_nhds.trans
⟨λ H i hi, let ⟨x, hx⟩ := (H _ $ h.mem_of_mem hi) in ⟨x, hx.2, hx.1⟩,
λ H t' ht', let ⟨i, hi, hit⟩ := (h t').1 ht', ⟨x, xt, hx⟩ := H i hi in
⟨x, hit hx, xt⟩⟩
/-- `x` belongs to the closure of `s` if and only if some ultrafilter
supported on `s` converges to `x`. -/
lemma mem_closure_iff_ultrafilter {s : set α} {x : α} :
x ∈ closure s ↔ ∃ (u : ultrafilter α), s ∈ u.val ∧ u.val ≤ 𝓝 x :=
begin
rw closure_eq_nhds, change 𝓝 x ⊓ principal s ≠ ⊥ ↔ _, symmetry,
convert exists_ultrafilter_iff _, ext u,
rw [←le_principal_iff, inf_comm, le_inf_iff]
end
lemma is_closed_iff_nhds {s : set α} : is_closed s ↔ ∀a, 𝓝 a ⊓ principal s ≠ ⊥ → a ∈ s :=
calc is_closed s ↔ closure s = s : by rw [closure_eq_iff_is_closed]
... ↔ closure s ⊆ s : ⟨assume h, by rw h, assume h, subset.antisymm h subset_closure⟩
... ↔ (∀a, 𝓝 a ⊓ principal s ≠ ⊥ → a ∈ s) : by rw [closure_eq_nhds]; refl
lemma closure_inter_open {s t : set α} (h : is_open s) : s ∩ closure t ⊆ closure (s ∩ t) :=
assume a ⟨hs, ht⟩,
have s ∈ 𝓝 a, from mem_nhds_sets h hs,
have 𝓝 a ⊓ principal s = 𝓝 a, from inf_of_le_left $ by rwa le_principal_iff,
have 𝓝 a ⊓ principal (s ∩ t) ≠ ⊥,
from calc 𝓝 a ⊓ principal (s ∩ t) = 𝓝 a ⊓ (principal s ⊓ principal t) : by rw inf_principal
... = 𝓝 a ⊓ principal t : by rw [←inf_assoc, this]
... ≠ ⊥ : by rw [closure_eq_nhds] at ht; assumption,
by rw [closure_eq_nhds]; assumption
lemma closure_diff {s t : set α} : closure s - closure t ⊆ closure (s - t) :=
calc closure s \ closure t = (- closure t) ∩ closure s : by simp only [diff_eq, inter_comm]
... ⊆ closure (- closure t ∩ s) : closure_inter_open $ is_open_compl_iff.mpr $ is_closed_closure
... = closure (s \ closure t) : by simp only [diff_eq, inter_comm]
... ⊆ closure (s \ t) : closure_mono $ diff_subset_diff (subset.refl s) subset_closure
lemma mem_of_closed_of_tendsto {f : β → α} {b : filter β} {a : α} {s : set α}
(hb : b ≠ ⊥) (hf : tendsto f b (𝓝 a)) (hs : is_closed s) (h : f ⁻¹' s ∈ b) : a ∈ s :=
have b.map f ≤ 𝓝 a ⊓ principal s,
from le_trans (le_inf (le_refl _) (le_principal_iff.mpr h)) (inf_le_inf hf (le_refl _)),
is_closed_iff_nhds.mp hs a $ ne_bot_of_le_ne_bot (map_ne_bot hb) this
lemma mem_of_closed_of_tendsto' {f : β → α} {x : filter β} {a : α} {s : set α}
(hf : tendsto f x (𝓝 a)) (hs : is_closed s) (h : x ⊓ principal (f ⁻¹' s) ≠ ⊥) : a ∈ s :=
is_closed_iff_nhds.mp hs _ $ ne_bot_of_le_ne_bot (@map_ne_bot _ _ _ f h) $
le_inf (le_trans (map_mono $ inf_le_left) hf) $
le_trans (map_mono $ inf_le_right_of_le $ by simp only [comap_principal, le_principal_iff]; exact subset.refl _) (@map_comap_le _ _ _ f)
lemma mem_closure_of_tendsto {f : β → α} {b : filter β} {a : α} {s : set α}
(hb : b ≠ ⊥) (hf : tendsto f b (𝓝 a)) (h : f ⁻¹' s ∈ b) : a ∈ closure s :=
mem_of_closed_of_tendsto hb hf (is_closed_closure) $
filter.mem_sets_of_superset h (preimage_mono subset_closure)
/-- Suppose that `f` sends the complement to `s` to a single point `a`, and `l` is some filter.
Then `f` tends to `a` along `l` restricted to `s` if and only it tends to `a` along `l`. -/
lemma tendsto_inf_principal_nhds_iff_of_forall_eq {f : β → α} {l : filter β} {s : set β}
{a : α} (h : ∀ x ∉ s, f x = a) :
tendsto f (l ⊓ principal s) (𝓝 a) ↔ tendsto f l (𝓝 a) :=
begin
rw [tendsto_iff_comap, tendsto_iff_comap],
replace h : principal (-s) ≤ comap f (𝓝 a),
{ rintros U ⟨t, ht, htU⟩ x hx,
have : f x ∈ t, from (h x hx).symm ▸ mem_of_nhds ht,
exact htU this },
refine ⟨λ h', _, le_trans inf_le_left⟩,
have := sup_le h' h,
rw [sup_inf_right, sup_principal, union_compl_self, principal_univ,
inf_top_eq, sup_le_iff] at this,
exact this.1
end
section lim
variables [nonempty α]
/-- If `f` is a filter, then `lim f` is a limit of the filter, if it exists. -/
noncomputable def lim (f : filter α) : α := epsilon $ λa, f ≤ 𝓝 a
lemma lim_spec {f : filter α} (h : ∃a, f ≤ 𝓝 a) : f ≤ 𝓝 (lim f) := epsilon_spec h
end lim
/- locally finite family [General Topology (Bourbaki, 1995)] -/
section locally_finite
/-- A family of sets in `set α` is locally finite if at every point `x:α`,
there is a neighborhood of `x` which meets only finitely many sets in the family -/
def locally_finite (f : β → set α) :=
∀x:α, ∃t ∈ 𝓝 x, finite {i | (f i ∩ t).nonempty }
lemma locally_finite_of_finite {f : β → set α} (h : finite (univ : set β)) : locally_finite f :=
assume x, ⟨univ, univ_mem_sets, finite_subset h $ subset_univ _⟩
lemma locally_finite_subset
{f₁ f₂ : β → set α} (hf₂ : locally_finite f₂) (hf : ∀b, f₁ b ⊆ f₂ b) : locally_finite f₁ :=
assume a,
let ⟨t, ht₁, ht₂⟩ := hf₂ a in
⟨t, ht₁, finite_subset ht₂ $ assume i hi,
hi.mono $ inter_subset_inter (hf i) $ subset.refl _⟩
lemma is_closed_Union_of_locally_finite {f : β → set α}
(h₁ : locally_finite f) (h₂ : ∀i, is_closed (f i)) : is_closed (⋃i, f i) :=
is_open_iff_nhds.mpr $ assume a, assume h : a ∉ (⋃i, f i),
have ∀i, a ∈ -f i,
from assume i hi, h $ mem_Union.2 ⟨i, hi⟩,
have ∀i, - f i ∈ (𝓝 a),
by simp only [mem_nhds_sets_iff]; exact assume i, ⟨- f i, subset.refl _, h₂ i, this i⟩,
let ⟨t, h_sets, (h_fin : finite {i | (f i ∩ t).nonempty })⟩ := h₁ a in
calc 𝓝 a ≤ principal (t ∩ (⋂ i∈{i | (f i ∩ t).nonempty }, - f i)) :
begin
rw [le_principal_iff],
apply @filter.inter_mem_sets _ (𝓝 a) _ _ h_sets,
apply @filter.Inter_mem_sets _ (𝓝 a) _ _ _ h_fin,
exact assume i h, this i
end
... ≤ principal (- ⋃i, f i) :
begin
simp only [principal_mono, subset_def, mem_compl_eq, mem_inter_eq,
mem_Inter, mem_set_of_eq, mem_Union, and_imp, not_exists,
exists_imp_distrib, ne_empty_iff_nonempty, set.nonempty],
exact assume x xt ht i xfi, ht i x xfi xt xfi
end
end locally_finite
end topological_space
section continuous
variables {α : Type*} {β : Type*} {γ : Type*} {δ : Type*}
variables [topological_space α] [topological_space β] [topological_space γ]
open_locale topological_space
/-- A function between topological spaces is continuous if the preimage
of every open set is open. -/
def continuous (f : α → β) := ∀s, is_open s → is_open (f ⁻¹' s)
/-- A function between topological spaces is continuous at a point `x₀`
if `f x` tends to `f x₀` when `x` tends to `x₀`. -/
def continuous_at (f : α → β) (x : α) := tendsto f (𝓝 x) (𝓝 (f x))
lemma continuous_at.preimage_mem_nhds {f : α → β} {x : α} {t : set β} (h : continuous_at f x)
(ht : t ∈ 𝓝 (f x)) : f ⁻¹' t ∈ 𝓝 x :=
h ht
lemma continuous_id : continuous (id : α → α) :=
assume s h, h
lemma continuous.comp {g : β → γ} {f : α → β} (hg : continuous g) (hf : continuous f) :
continuous (g ∘ f) :=
assume s h, hf _ (hg s h)
lemma continuous_at.comp {g : β → γ} {f : α → β} {x : α}
(hg : continuous_at g (f x)) (hf : continuous_at f x) :
continuous_at (g ∘ f) x :=
hg.comp hf
lemma continuous.tendsto {f : α → β} (hf : continuous f) (x) :
tendsto f (𝓝 x) (𝓝 (f x)) :=
((nhds_basis_opens x).tendsto_iff $ nhds_basis_opens $ f x).2 $
λ t ⟨hxt, ht⟩, ⟨f ⁻¹' t, ⟨hxt, hf _ ht⟩, subset.refl _⟩
lemma continuous.continuous_at {f : α → β} {x : α} (h : continuous f) :
continuous_at f x :=
h.tendsto x
lemma continuous_iff_continuous_at {f : α → β} : continuous f ↔ ∀ x, continuous_at f x :=
⟨continuous.tendsto,
assume hf : ∀x, tendsto f (𝓝 x) (𝓝 (f x)),
assume s, assume hs : is_open s,
have ∀a, f a ∈ s → s ∈ 𝓝 (f a),
from λ a ha, mem_nhds_sets hs ha,
show is_open (f ⁻¹' s),
from is_open_iff_nhds.2 $ λ a ha, le_principal_iff.2 $ hf _ (this a ha)⟩
lemma continuous_const {b : β} : continuous (λa:α, b) :=
continuous_iff_continuous_at.mpr $ assume a, tendsto_const_nhds
lemma continuous_at_const {x : α} {b : β} : continuous_at (λ a:α, b) x :=
continuous_const.continuous_at
lemma continuous_at_id {x : α} : continuous_at id x :=
continuous_id.continuous_at
lemma continuous_iff_is_closed {f : α → β} :
continuous f ↔ (∀s, is_closed s → is_closed (f ⁻¹' s)) :=
⟨assume hf s hs, hf (-s) hs,
assume hf s, by rw [←is_closed_compl_iff, ←is_closed_compl_iff]; exact hf _⟩
lemma continuous_at_iff_ultrafilter {f : α → β} (x) : continuous_at f x ↔
∀ g, is_ultrafilter g → g ≤ 𝓝 x → g.map f ≤ 𝓝 (f x) :=
tendsto_iff_ultrafilter f (𝓝 x) (𝓝 (f x))
lemma continuous_iff_ultrafilter {f : α → β} :
continuous f ↔ ∀ x g, is_ultrafilter g → g ≤ 𝓝 x → g.map f ≤ 𝓝 (f x) :=
by simp only [continuous_iff_continuous_at, continuous_at_iff_ultrafilter]
/-- A piecewise defined function `if p then f else g` is continuous, if both `f` and `g`
are continuous, and they coincide on the frontier (boundary) of the set `{a | p a}`. -/
lemma continuous_if {p : α → Prop} {f g : α → β} {h : ∀a, decidable (p a)}
(hp : ∀a∈frontier {a | p a}, f a = g a) (hf : continuous f) (hg : continuous g) :
continuous (λa, @ite (p a) (h a) β (f a) (g a)) :=
continuous_iff_is_closed.mpr $
assume s hs,
have (λa, ite (p a) (f a) (g a)) ⁻¹' s =
(closure {a | p a} ∩ f ⁻¹' s) ∪ (closure {a | ¬ p a} ∩ g ⁻¹' s),
from set.ext $ assume a,
classical.by_cases
(assume : a ∈ frontier {a | p a},
have hac : a ∈ closure {a | p a}, from this.left,
have hai : a ∈ closure {a | ¬ p a},
from have a ∈ - interior {a | p a}, from this.right, by rwa [←closure_compl] at this,
by by_cases p a; simp [h, hp a this, hac, hai, iff_def] {contextual := tt})
(assume hf : a ∈ - frontier {a | p a},
classical.by_cases
(assume : p a,
have hc : a ∈ closure {a | p a}, from subset_closure this,
have hnc : a ∉ closure {a | ¬ p a},
by show a ∉ closure (- {a | p a}); rw [closure_compl]; simpa [frontier, hc] using hf,
by simp [this, hc, hnc])
(assume : ¬ p a,
have hc : a ∈ closure {a | ¬ p a}, from subset_closure this,
have hnc : a ∉ closure {a | p a},
begin
have hc : a ∈ closure (- {a | p a}), from hc,
simp [closure_compl] at hc,
simpa [frontier, hc] using hf
end,
by simp [this, hc, hnc])),
by rw [this]; exact is_closed_union
(is_closed_inter is_closed_closure $ continuous_iff_is_closed.mp hf s hs)
(is_closed_inter is_closed_closure $ continuous_iff_is_closed.mp hg s hs)
/- Continuity and partial functions -/
/-- Continuity of a partial function -/
def pcontinuous (f : α →. β) := ∀ s, is_open s → is_open (f.preimage s)
lemma open_dom_of_pcontinuous {f : α →. β} (h : pcontinuous f) : is_open f.dom :=
by rw [←pfun.preimage_univ]; exact h _ is_open_univ
lemma pcontinuous_iff' {f : α →. β} :
pcontinuous f ↔ ∀ {x y} (h : y ∈ f x), ptendsto' f (𝓝 x) (𝓝 y) :=
begin
split,
{ intros h x y h',
simp only [ptendsto'_def, mem_nhds_sets_iff],
rintros s ⟨t, tsubs, opent, yt⟩,
exact ⟨f.preimage t, pfun.preimage_mono _ tsubs, h _ opent, ⟨y, yt, h'⟩⟩
},
intros hf s os,
rw is_open_iff_nhds,
rintros x ⟨y, ys, fxy⟩ t,
rw [mem_principal_sets],
assume h : f.preimage s ⊆ t,
change t ∈ 𝓝 x,
apply mem_sets_of_superset _ h,
have h' : ∀ s ∈ 𝓝 y, f.preimage s ∈ 𝓝 x,
{ intros s hs,
have : ptendsto' f (𝓝 x) (𝓝 y) := hf fxy,
rw ptendsto'_def at this,
exact this s hs },
show f.preimage s ∈ 𝓝 x,
apply h', rw mem_nhds_sets_iff, exact ⟨s, set.subset.refl _, os, ys⟩
end
lemma image_closure_subset_closure_image {f : α → β} {s : set α} (h : continuous f) :
f '' closure s ⊆ closure (f '' s) :=
have ∀ (a : α), 𝓝 a ⊓ principal s ≠ ⊥ → 𝓝 (f a) ⊓ principal (f '' s) ≠ ⊥,
from assume a ha,
have h₁ : ¬ map f (𝓝 a ⊓ principal s) = ⊥,
by rwa[map_eq_bot_iff],
have h₂ : map f (𝓝 a ⊓ principal s) ≤ 𝓝 (f a) ⊓ principal (f '' s),
from le_inf
(le_trans (map_mono inf_le_left) $ by rw [continuous_iff_continuous_at] at h; exact h a)
(le_trans (map_mono inf_le_right) $ by simp; exact subset.refl _),
ne_bot_of_le_ne_bot h₁ h₂,
by simp [image_subset_iff, closure_eq_nhds]; assumption
lemma mem_closure {s : set α} {t : set β} {f : α → β} {a : α}
(hf : continuous f) (ha : a ∈ closure s) (ht : ∀a∈s, f a ∈ t) : f a ∈ closure t :=
subset.trans (image_closure_subset_closure_image hf) (closure_mono $ image_subset_iff.2 ht) $
(mem_image_of_mem f ha)
end continuous
|
19eef72c82974d1c11b0a2c510c20959660f06bd
|
dd4e652c749fea9ac77e404005cb3470e5f75469
|
/src/trace/trace.lean
|
acb3caa50e670a9d5d7c10dce3497d7c6a78955a
|
[] |
no_license
|
skbaek/cvx
|
e32822ad5943541539966a37dee162b0a5495f55
|
c50c790c9116f9fac8dfe742903a62bdd7292c15
|
refs/heads/master
| 1,623,803,010,339
| 1,618,058,958,000
| 1,618,058,958,000
| 176,293,135
| 3
| 2
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,340
|
lean
|
import data.matrix.basic
universe variables u v
namespace matrix
variables {l m n o : Type u} [fintype l] [fintype m] [fintype n] [fintype o]
variables {α : Type v}
open_locale matrix
section trace
def trace [add_comm_monoid α] (M : matrix m m α) : α :=
finset.univ.sum (λ i, M i i)
variables {L M N : matrix m m α}
@[simp] lemma trace_transpose [add_comm_monoid α] :
Mᵀ.trace = M.trace :=
rfl
lemma trace_add [add_comm_monoid α] :
(M + N).trace = M.trace + N.trace :=
finset.sum_add_distrib
lemma trace_smul [ring α] {a : α} :
(a • M).trace = a * M.trace :=
finset.mul_sum.symm
lemma trace_comm [comm_ring α] :
(M ⬝ N).trace = (N ⬝ M).trace :=
begin
classical,
unfold trace,
dsimp [(⬝)],
rw finset.sum_comm,
simp only [mul_comm],
end
lemma trace_zero [add_comm_monoid α] :
trace (0 : matrix m m α) = 0 :=
by simp only [trace, add_monoid.smul_zero, finset.sum_const, matrix.zero_val]
lemma trace_diagonal [add_comm_monoid α] [decidable_eq m] {d : m → α} :
trace (diagonal d) = finset.univ.sum d :=
by simp [trace, diagonal]
lemma trace_one [decidable_eq m] [add_comm_monoid α] [has_one α] :
trace (1 : matrix m m α) = (fintype.card m : α) :=
begin
unfold has_one.one,
rw [trace_diagonal, finset.sum_const, add_monoid.smul_one],
refl,
end
end trace
end matrix
|
bfabf661fe83f210ebf7e7b219fca6a48a935dc4
|
a9d0fb7b0e4f802bd3857b803e6c5c23d87fef91
|
/library/init/id_locked.lean
|
8bdb06fe91be9624c5e33db6648e73e17f1d9eff
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean-osx
|
4a954262c780e404c1369d6c06516161d07fcb40
|
3670278342d2f4faa49d95b46d86642d7875b47c
|
refs/heads/master
| 1,611,410,334,552
| 1,474,425,686,000
| 1,474,425,686,000
| 12,043,103
| 5
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 708
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import init.meta.tactic init.meta.constructor_tactic
open tactic
/-
Define id_locked using meta-programming because we don't have
syntax for setting reducibility_hints.
See module init.meta.declaration.
-/
run_command do
l ← return $ level.param `l,
Ty ← return $ expr.sort l,
type ← to_expr `(Π {A : %%Ty}, A → A),
val ← to_expr `(λ {A : %%Ty} (a : A), a),
add_decl (declaration.defn `id_locked [`l] type val reducibility_hints.opaque tt)
lemma {u} id_locked_eq {A : Type u} (a : A) : id_locked a = a :=
rfl
|
b28074c6368ece37dfab8d093b3c3bcdda0c2122
|
bbecf0f1968d1fba4124103e4f6b55251d08e9c4
|
/src/ring_theory/algebraic.lean
|
a36ab9ece6c073efd36b03ebe3372050cb3ec967
|
[
"Apache-2.0"
] |
permissive
|
waynemunro/mathlib
|
e3fd4ff49f4cb43d4a8ded59d17be407bc5ee552
|
065a70810b5480d584033f7bbf8e0409480c2118
|
refs/heads/master
| 1,693,417,182,397
| 1,634,644,781,000
| 1,634,644,781,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 10,128
|
lean
|
/-
Copyright (c) 2019 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import linear_algebra.finite_dimensional
import ring_theory.integral_closure
import data.polynomial.integral_normalization
/-!
# Algebraic elements and algebraic extensions
An element of an R-algebra is algebraic over R if it is the root of a nonzero polynomial.
An R-algebra is algebraic over R if and only if all its elements are algebraic over R.
The main result in this file proves transitivity of algebraicity:
a tower of algebraic field extensions is algebraic.
-/
universe variables u v
open_locale classical
open polynomial
section
variables (R : Type u) {A : Type v} [comm_ring R] [ring A] [algebra R A]
/-- An element of an R-algebra is algebraic over R if it is the root of a nonzero polynomial. -/
def is_algebraic (x : A) : Prop :=
∃ p : polynomial R, p ≠ 0 ∧ aeval x p = 0
/-- An element of an R-algebra is transcendental over R if it is not algebraic over R. -/
def transcendental (x : A) : Prop := ¬ is_algebraic R x
variables {R}
/-- A subalgebra is algebraic if all its elements are algebraic. -/
def subalgebra.is_algebraic (S : subalgebra R A) : Prop := ∀ x ∈ S, is_algebraic R x
variables (R A)
/-- An algebra is algebraic if all its elements are algebraic. -/
def algebra.is_algebraic : Prop := ∀ x : A, is_algebraic R x
variables {R A}
/-- A subalgebra is algebraic if and only if it is algebraic an algebra. -/
lemma subalgebra.is_algebraic_iff (S : subalgebra R A) :
S.is_algebraic ↔ @algebra.is_algebraic R S _ _ (S.algebra) :=
begin
delta algebra.is_algebraic subalgebra.is_algebraic,
rw [subtype.forall'],
apply forall_congr, rintro ⟨x, hx⟩,
apply exists_congr, intro p,
apply and_congr iff.rfl,
have h : function.injective (S.val) := subtype.val_injective,
conv_rhs { rw [← h.eq_iff, alg_hom.map_zero], },
rw [← aeval_alg_hom_apply, S.val_apply]
end
/-- An algebra is algebraic if and only if it is algebraic as a subalgebra. -/
lemma algebra.is_algebraic_iff : algebra.is_algebraic R A ↔ (⊤ : subalgebra R A).is_algebraic :=
begin
delta algebra.is_algebraic subalgebra.is_algebraic,
simp only [algebra.mem_top, forall_prop_of_true, iff_self],
end
lemma is_algebraic_iff_not_injective {x : A} : is_algebraic R x ↔
¬ function.injective (polynomial.aeval x : polynomial R →ₐ[R] A) :=
by simp only [is_algebraic, alg_hom.injective_iff, not_forall, and.comm, exists_prop]
end
section zero_ne_one
variables (R : Type u) {A : Type v} [comm_ring R] [nontrivial R] [ring A] [algebra R A]
/-- An integral element of an algebra is algebraic.-/
lemma is_integral.is_algebraic {x : A} (h : is_integral R x) : is_algebraic R x :=
by { rcases h with ⟨p, hp, hpx⟩, exact ⟨p, hp.ne_zero, hpx⟩ }
variables {R}
/-- An element of `R` is algebraic, when viewed as an element of the `R`-algebra `A`. -/
lemma is_algebraic_algebra_map (a : R) : is_algebraic R (algebra_map R A a) :=
⟨X - C a, X_sub_C_ne_zero a, by simp only [aeval_C, aeval_X, alg_hom.map_sub, sub_self]⟩
end zero_ne_one
section field
variables (K : Type u) {A : Type v} [field K] [ring A] [algebra K A]
/-- An element of an algebra over a field is algebraic if and only if it is integral.-/
lemma is_algebraic_iff_is_integral {x : A} :
is_algebraic K x ↔ is_integral K x :=
begin
refine ⟨_, is_integral.is_algebraic K⟩,
rintro ⟨p, hp, hpx⟩,
refine ⟨_, monic_mul_leading_coeff_inv hp, _⟩,
rw [← aeval_def, alg_hom.map_mul, hpx, zero_mul],
end
lemma is_algebraic_iff_is_integral' :
algebra.is_algebraic K A ↔ algebra.is_integral K A :=
⟨λ h x, (is_algebraic_iff_is_integral K).mp (h x),
λ h x, (is_algebraic_iff_is_integral K).mpr (h x)⟩
end field
namespace algebra
variables {K : Type*} {L : Type*} {R : Type*} {S : Type*} {A : Type*}
variables [field K] [field L] [comm_ring R] [comm_ring S] [comm_ring A]
variables [algebra K L] [algebra L A] [algebra K A] [is_scalar_tower K L A]
variables [algebra R S] [algebra S A] [algebra R A] [is_scalar_tower R S A]
/-- If L is an algebraic field extension of K and A is an algebraic algebra over L,
then A is algebraic over K. -/
lemma is_algebraic_trans (L_alg : is_algebraic K L) (A_alg : is_algebraic L A) :
is_algebraic K A :=
begin
simp only [is_algebraic, is_algebraic_iff_is_integral] at L_alg A_alg ⊢,
exact is_integral_trans L_alg A_alg,
end
variables (K L)
/-- If A is an algebraic algebra over R, then A is algebraic over A when S is an extension of R,
and the map from `R` to `S` is injective. -/
lemma is_algebraic_of_larger_base_of_injective (hinj : function.injective (algebra_map R S))
(A_alg : is_algebraic R A) : is_algebraic S A :=
λ x, let ⟨p, hp₁, hp₂⟩ := A_alg x in
⟨p.map (algebra_map _ _),
by rwa [ne.def, ← degree_eq_bot, degree_map' hinj, degree_eq_bot],
by simpa⟩
/-- If A is an algebraic algebra over K, then A is algebraic over L when L is an extension of K -/
lemma is_algebraic_of_larger_base (A_alg : is_algebraic K A) : is_algebraic L A :=
is_algebraic_of_larger_base_of_injective (algebra_map K L).injective A_alg
variables {R S K L}
/-- A field extension is algebraic if it is finite. -/
lemma is_algebraic_of_finite [finite : finite_dimensional K L] : is_algebraic K L :=
λ x, (is_algebraic_iff_is_integral _).mpr (is_integral_of_submodule_noetherian ⊤
(is_noetherian_of_submodule_of_noetherian _ _ _ finite) x algebra.mem_top)
end algebra
variables {R S : Type*} [comm_ring R] [integral_domain R] [comm_ring S]
lemma exists_integral_multiple [algebra R S] {z : S} (hz : is_algebraic R z)
(inj : ∀ x, algebra_map R S x = 0 → x = 0) :
∃ (x : integral_closure R S) (y ≠ (0 : R)),
z * algebra_map R S y = x :=
begin
rcases hz with ⟨p, p_ne_zero, px⟩,
set a := p.leading_coeff with a_def,
have a_ne_zero : a ≠ 0 := mt polynomial.leading_coeff_eq_zero.mp p_ne_zero,
have y_integral : is_integral R (algebra_map R S a) := is_integral_algebra_map,
have x_integral : is_integral R (z * algebra_map R S a) :=
⟨p.integral_normalization,
monic_integral_normalization p_ne_zero,
integral_normalization_aeval_eq_zero px inj⟩,
exact ⟨⟨_, x_integral⟩, a, a_ne_zero, rfl⟩
end
/-- A fraction `(a : S) / (b : S)` can be reduced to `(c : S) / (d : R)`,
if `S` is the integral closure of `R` in an algebraic extension `L` of `R`. -/
lemma is_integral_closure.exists_smul_eq_mul {L : Type*} [field L]
[algebra R S] [algebra S L] [algebra R L] [is_scalar_tower R S L] [is_integral_closure S R L]
(h : algebra.is_algebraic R L) (inj : function.injective (algebra_map R L))
(a : S) {b : S} (hb : b ≠ 0) : ∃ (c : S) (d ≠ (0 : R)), d • a = b * c :=
begin
obtain ⟨c, d, d_ne, hx⟩ := exists_integral_multiple
(h (algebra_map _ L a / algebra_map _ L b))
((ring_hom.injective_iff _).mp inj),
refine ⟨is_integral_closure.mk' S (c : L) c.2, d, d_ne,
is_integral_closure.algebra_map_injective S R L _⟩,
simp only [algebra.smul_def, ring_hom.map_mul, is_integral_closure.algebra_map_mk', ← hx,
← is_scalar_tower.algebra_map_apply],
rw [← mul_assoc _ (_ / _), mul_div_cancel' (algebra_map S L a), mul_comm],
exact mt ((ring_hom.injective_iff _).mp (is_integral_closure.algebra_map_injective S R L) _) hb
end
section field
variables {K L : Type*} [field K] [field L] [algebra K L] (A : subalgebra K L)
lemma inv_eq_of_aeval_div_X_ne_zero {x : L} {p : polynomial K}
(aeval_ne : aeval x (div_X p) ≠ 0) :
x⁻¹ = aeval x (div_X p) / (aeval x p - algebra_map _ _ (p.coeff 0)) :=
begin
rw [inv_eq_iff, inv_div, div_eq_iff, sub_eq_iff_eq_add, mul_comm],
conv_lhs { rw ← div_X_mul_X_add p },
rw [alg_hom.map_add, alg_hom.map_mul, aeval_X, aeval_C],
exact aeval_ne
end
lemma inv_eq_of_root_of_coeff_zero_ne_zero {x : L} {p : polynomial K}
(aeval_eq : aeval x p = 0) (coeff_zero_ne : p.coeff 0 ≠ 0) :
x⁻¹ = - (aeval x (div_X p) / algebra_map _ _ (p.coeff 0)) :=
begin
convert inv_eq_of_aeval_div_X_ne_zero (mt (λ h, (algebra_map K L).injective _) coeff_zero_ne),
{ rw [aeval_eq, zero_sub, div_neg] },
rw ring_hom.map_zero,
convert aeval_eq,
conv_rhs { rw ← div_X_mul_X_add p },
rw [alg_hom.map_add, alg_hom.map_mul, h, zero_mul, zero_add, aeval_C]
end
lemma subalgebra.inv_mem_of_root_of_coeff_zero_ne_zero {x : A} {p : polynomial K}
(aeval_eq : aeval x p = 0) (coeff_zero_ne : p.coeff 0 ≠ 0) : (x⁻¹ : L) ∈ A :=
begin
have : (x⁻¹ : L) = aeval x (div_X p) / (aeval x p - algebra_map _ _ (p.coeff 0)),
{ rw [aeval_eq, subalgebra.coe_zero, zero_sub, div_neg],
convert inv_eq_of_root_of_coeff_zero_ne_zero _ coeff_zero_ne,
{ rw subalgebra.aeval_coe },
{ simpa using aeval_eq } },
rw [this, div_eq_mul_inv, aeval_eq, subalgebra.coe_zero, zero_sub, ← ring_hom.map_neg,
← ring_hom.map_inv],
exact A.mul_mem (aeval x p.div_X).2 (A.algebra_map_mem _),
end
lemma subalgebra.inv_mem_of_algebraic {x : A} (hx : is_algebraic K (x : L)) : (x⁻¹ : L) ∈ A :=
begin
obtain ⟨p, ne_zero, aeval_eq⟩ := hx,
rw [subalgebra.aeval_coe, subalgebra.coe_eq_zero] at aeval_eq,
revert ne_zero aeval_eq,
refine p.rec_on_horner _ _ _,
{ intro h,
contradiction },
{ intros p a hp ha ih ne_zero aeval_eq,
refine A.inv_mem_of_root_of_coeff_zero_ne_zero aeval_eq _,
rwa [coeff_add, hp, zero_add, coeff_C, if_pos rfl] },
{ intros p hp ih ne_zero aeval_eq,
rw [alg_hom.map_mul, aeval_X, mul_eq_zero] at aeval_eq,
cases aeval_eq with aeval_eq x_eq,
{ exact ih hp aeval_eq },
{ rw [x_eq, subalgebra.coe_zero, inv_zero],
exact A.zero_mem } }
end
/-- In an algebraic extension L/K, an intermediate subalgebra is a field. -/
lemma subalgebra.is_field_of_algebraic (hKL : algebra.is_algebraic K L) : is_field A :=
{ mul_inv_cancel := λ a ha, ⟨
⟨a⁻¹, A.inv_mem_of_algebraic (hKL a)⟩,
subtype.ext (mul_inv_cancel (mt (subalgebra.coe_eq_zero _).mp ha))⟩,
.. show nontrivial A, by apply_instance,
.. subalgebra.to_comm_ring A }
end field
|
870e1673ddc602fe726f7e672853c9917250c807
|
69d4931b605e11ca61881fc4f66db50a0a875e39
|
/src/category_theory/limits/shapes/equalizers.lean
|
0d5536579ed0f90162a2bd498f88e9d4f08429be
|
[
"Apache-2.0"
] |
permissive
|
abentkamp/mathlib
|
d9a75d291ec09f4637b0f30cc3880ffb07549ee5
|
5360e476391508e092b5a1e5210bd0ed22dc0755
|
refs/heads/master
| 1,682,382,954,948
| 1,622,106,077,000
| 1,622,106,077,000
| 149,285,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 33,121
|
lean
|
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Markus Himmel
-/
import category_theory.epi_mono
import category_theory.limits.has_limits
/-!
# Equalizers and coequalizers
This file defines (co)equalizers as special cases of (co)limits.
An equalizer is the categorical generalization of the subobject {a ∈ A | f(a) = g(a)} known
from abelian groups or modules. It is a limit cone over the diagram formed by `f` and `g`.
A coequalizer is the dual concept.
## Main definitions
* `walking_parallel_pair` is the indexing category used for (co)equalizer_diagrams
* `parallel_pair` is a functor from `walking_parallel_pair` to our category `C`.
* a `fork` is a cone over a parallel pair.
* there is really only one interesting morphism in a fork: the arrow from the vertex of the fork
to the domain of f and g. It is called `fork.ι`.
* an `equalizer` is now just a `limit (parallel_pair f g)`
Each of these has a dual.
## Main statements
* `equalizer.ι_mono` states that every equalizer map is a monomorphism
* `is_iso_limit_cone_parallel_pair_of_self` states that the identity on the domain of `f` is an
equalizer of `f` and `f`.
## Implementation notes
As with the other special shapes in the limits library, all the definitions here are given as
`abbreviation`s of the general statements for limits, so all the `simp` lemmas and theorems about
general limits can be used.
## References
* [F. Borceux, *Handbook of Categorical Algebra 1*][borceux-vol1]
-/
noncomputable theory
open category_theory
namespace category_theory.limits
local attribute [tidy] tactic.case_bash
universes v u u₂
/-- The type of objects for the diagram indexing a (co)equalizer. -/
@[derive decidable_eq, derive inhabited] inductive walking_parallel_pair : Type v
| zero | one
open walking_parallel_pair
/-- The type family of morphisms for the diagram indexing a (co)equalizer. -/
@[derive decidable_eq] inductive walking_parallel_pair_hom :
walking_parallel_pair → walking_parallel_pair → Type v
| left : walking_parallel_pair_hom zero one
| right : walking_parallel_pair_hom zero one
| id : Π X : walking_parallel_pair.{v}, walking_parallel_pair_hom X X
/-- Satisfying the inhabited linter -/
instance : inhabited (walking_parallel_pair_hom zero one) :=
{ default := walking_parallel_pair_hom.left }
open walking_parallel_pair_hom
/-- Composition of morphisms in the indexing diagram for (co)equalizers. -/
def walking_parallel_pair_hom.comp :
Π (X Y Z : walking_parallel_pair)
(f : walking_parallel_pair_hom X Y) (g : walking_parallel_pair_hom Y Z),
walking_parallel_pair_hom X Z
| _ _ _ (id _) h := h
| _ _ _ left (id one) := left
| _ _ _ right (id one) := right
.
instance walking_parallel_pair_hom_category : small_category walking_parallel_pair :=
{ hom := walking_parallel_pair_hom,
id := walking_parallel_pair_hom.id,
comp := walking_parallel_pair_hom.comp }
@[simp]
lemma walking_parallel_pair_hom_id (X : walking_parallel_pair) :
walking_parallel_pair_hom.id X = 𝟙 X :=
rfl
variables {C : Type u} [category.{v} C]
variables {X Y : C}
/-- `parallel_pair f g` is the diagram in `C` consisting of the two morphisms `f` and `g` with
common domain and codomain. -/
def parallel_pair (f g : X ⟶ Y) : walking_parallel_pair.{v} ⥤ C :=
{ obj := λ x, match x with
| zero := X
| one := Y
end,
map := λ x y h, match x, y, h with
| _, _, (id _) := 𝟙 _
| _, _, left := f
| _, _, right := g
end,
-- `tidy` can cope with this, but it's too slow:
map_comp' := begin rintros (⟨⟩|⟨⟩) (⟨⟩|⟨⟩) (⟨⟩|⟨⟩) ⟨⟩⟨⟩; { unfold_aux, simp; refl }, end, }.
@[simp] lemma parallel_pair_obj_zero (f g : X ⟶ Y) : (parallel_pair f g).obj zero = X := rfl
@[simp] lemma parallel_pair_obj_one (f g : X ⟶ Y) : (parallel_pair f g).obj one = Y := rfl
@[simp] lemma parallel_pair_map_left (f g : X ⟶ Y) : (parallel_pair f g).map left = f := rfl
@[simp] lemma parallel_pair_map_right (f g : X ⟶ Y) : (parallel_pair f g).map right = g := rfl
@[simp] lemma parallel_pair_functor_obj
{F : walking_parallel_pair ⥤ C} (j : walking_parallel_pair) :
(parallel_pair (F.map left) (F.map right)).obj j = F.obj j :=
begin
cases j; refl
end
/-- Every functor indexing a (co)equalizer is naturally isomorphic (actually, equal) to a
`parallel_pair` -/
@[simps]
def diagram_iso_parallel_pair (F : walking_parallel_pair ⥤ C) :
F ≅ parallel_pair (F.map left) (F.map right) :=
nat_iso.of_components (λ j, eq_to_iso $ by cases j; tidy) $ by tidy
/-- A fork on `f` and `g` is just a `cone (parallel_pair f g)`. -/
abbreviation fork (f g : X ⟶ Y) := cone (parallel_pair f g)
/-- A cofork on `f` and `g` is just a `cocone (parallel_pair f g)`. -/
abbreviation cofork (f g : X ⟶ Y) := cocone (parallel_pair f g)
variables {f g : X ⟶ Y}
/-- A fork `t` on the parallel pair `f g : X ⟶ Y` consists of two morphisms `t.π.app zero : t.X ⟶ X`
and `t.π.app one : t.X ⟶ Y`. Of these, only the first one is interesting, and we give it the
shorter name `fork.ι t`. -/
abbreviation fork.ι (t : fork f g) := t.π.app zero
/-- A cofork `t` on the parallel_pair `f g : X ⟶ Y` consists of two morphisms
`t.ι.app zero : X ⟶ t.X` and `t.ι.app one : Y ⟶ t.X`. Of these, only the second one is
interesting, and we give it the shorter name `cofork.π t`. -/
abbreviation cofork.π (t : cofork f g) := t.ι.app one
@[simp] lemma fork.ι_eq_app_zero (t : fork f g) : t.ι = t.π.app zero := rfl
@[simp] lemma cofork.π_eq_app_one (t : cofork f g) : t.π = t.ι.app one := rfl
@[simp, reassoc] lemma fork.app_zero_left (s : fork f g) :
s.π.app zero ≫ f = s.π.app one :=
by rw [←s.w left, parallel_pair_map_left]
@[simp, reassoc] lemma fork.app_zero_right (s : fork f g) :
s.π.app zero ≫ g = s.π.app one :=
by rw [←s.w right, parallel_pair_map_right]
@[simp, reassoc] lemma cofork.left_app_one (s : cofork f g) :
f ≫ s.ι.app one = s.ι.app zero :=
by rw [←s.w left, parallel_pair_map_left]
@[simp, reassoc] lemma cofork.right_app_one (s : cofork f g) :
g ≫ s.ι.app one = s.ι.app zero :=
by rw [←s.w right, parallel_pair_map_right]
/-- A fork on `f g : X ⟶ Y` is determined by the morphism `ι : P ⟶ X` satisfying `ι ≫ f = ι ≫ g`.
-/
@[simps]
def fork.of_ι {P : C} (ι : P ⟶ X) (w : ι ≫ f = ι ≫ g) : fork f g :=
{ X := P,
π :=
{ app := λ X, begin cases X, exact ι, exact ι ≫ f, end,
naturality' := λ X Y f,
begin
cases X; cases Y; cases f; dsimp; simp,
{ dsimp, simp, }, -- See note [dsimp, simp].
{ exact w },
{ dsimp, simp, },
end } }
/-- A cofork on `f g : X ⟶ Y` is determined by the morphism `π : Y ⟶ P` satisfying
`f ≫ π = g ≫ π`. -/
@[simps]
def cofork.of_π {P : C} (π : Y ⟶ P) (w : f ≫ π = g ≫ π) : cofork f g :=
{ X := P,
ι :=
{ app := λ X, walking_parallel_pair.cases_on X (f ≫ π) π,
naturality' := λ i j f, by { cases f; dsimp; simp [w] } } } -- See note [dsimp, simp]
lemma fork.ι_of_ι {P : C} (ι : P ⟶ X) (w : ι ≫ f = ι ≫ g) :
(fork.of_ι ι w).ι = ι := rfl
lemma cofork.π_of_π {P : C} (π : Y ⟶ P) (w : f ≫ π = g ≫ π) :
(cofork.of_π π w).π = π := rfl
@[reassoc]
lemma fork.condition (t : fork f g) : t.ι ≫ f = t.ι ≫ g :=
by rw [t.app_zero_left, t.app_zero_right]
@[reassoc]
lemma cofork.condition (t : cofork f g) : f ≫ t.π = g ≫ t.π :=
by rw [t.left_app_one, t.right_app_one]
/-- To check whether two maps are equalized by both maps of a fork, it suffices to check it for the
first map -/
lemma fork.equalizer_ext (s : fork f g) {W : C} {k l : W ⟶ s.X}
(h : k ≫ fork.ι s = l ≫ fork.ι s) : ∀ (j : walking_parallel_pair),
k ≫ s.π.app j = l ≫ s.π.app j
| zero := h
| one := by rw [←fork.app_zero_left, reassoc_of h]
/-- To check whether two maps are coequalized by both maps of a cofork, it suffices to check it for
the second map -/
lemma cofork.coequalizer_ext (s : cofork f g) {W : C} {k l : s.X ⟶ W}
(h : cofork.π s ≫ k = cofork.π s ≫ l) : ∀ (j : walking_parallel_pair),
s.ι.app j ≫ k = s.ι.app j ≫ l
| zero := by simp only [←cofork.left_app_one, category.assoc, h]
| one := h
lemma fork.is_limit.hom_ext {s : fork f g} (hs : is_limit s) {W : C} {k l : W ⟶ s.X}
(h : k ≫ fork.ι s = l ≫ fork.ι s) : k = l :=
hs.hom_ext $ fork.equalizer_ext _ h
lemma cofork.is_colimit.hom_ext {s : cofork f g} (hs : is_colimit s) {W : C} {k l : s.X ⟶ W}
(h : cofork.π s ≫ k = cofork.π s ≫ l) : k = l :=
hs.hom_ext $ cofork.coequalizer_ext _ h
/-- If `s` is a limit fork over `f` and `g`, then a morphism `k : W ⟶ X` satisfying
`k ≫ f = k ≫ g` induces a morphism `l : W ⟶ s.X` such that `l ≫ fork.ι s = k`. -/
def fork.is_limit.lift' {s : fork f g} (hs : is_limit s) {W : C} (k : W ⟶ X) (h : k ≫ f = k ≫ g) :
{l : W ⟶ s.X // l ≫ fork.ι s = k} :=
⟨hs.lift $ fork.of_ι _ h, hs.fac _ _⟩
/-- If `s` is a colimit cofork over `f` and `g`, then a morphism `k : Y ⟶ W` satisfying
`f ≫ k = g ≫ k` induces a morphism `l : s.X ⟶ W` such that `cofork.π s ≫ l = k`. -/
def cofork.is_colimit.desc' {s : cofork f g} (hs : is_colimit s) {W : C} (k : Y ⟶ W)
(h : f ≫ k = g ≫ k) : {l : s.X ⟶ W // cofork.π s ≫ l = k} :=
⟨hs.desc $ cofork.of_π _ h, hs.fac _ _⟩
/-- This is a slightly more convenient method to verify that a fork is a limit cone. It
only asks for a proof of facts that carry any mathematical content -/
def fork.is_limit.mk (t : fork f g)
(lift : Π (s : fork f g), s.X ⟶ t.X)
(fac : ∀ (s : fork f g), lift s ≫ fork.ι t = fork.ι s)
(uniq : ∀ (s : fork f g) (m : s.X ⟶ t.X)
(w : ∀ j : walking_parallel_pair, m ≫ t.π.app j = s.π.app j), m = lift s) :
is_limit t :=
{ lift := lift,
fac' := λ s j, walking_parallel_pair.cases_on j (fac s) $
by erw [←s.w left, ←t.w left, ←category.assoc, fac]; refl,
uniq' := uniq }
/-- This is another convenient method to verify that a fork is a limit cone. It
only asks for a proof of facts that carry any mathematical content, and allows access to the
same `s` for all parts. -/
def fork.is_limit.mk' {X Y : C} {f g : X ⟶ Y} (t : fork f g)
(create : Π (s : fork f g), {l // l ≫ t.ι = s.ι ∧ ∀ {m}, m ≫ t.ι = s.ι → m = l}) :
is_limit t :=
fork.is_limit.mk t
(λ s, (create s).1)
(λ s, (create s).2.1)
(λ s m w, (create s).2.2 (w zero))
/-- This is a slightly more convenient method to verify that a cofork is a colimit cocone. It
only asks for a proof of facts that carry any mathematical content -/
def cofork.is_colimit.mk (t : cofork f g)
(desc : Π (s : cofork f g), t.X ⟶ s.X)
(fac : ∀ (s : cofork f g), cofork.π t ≫ desc s = cofork.π s)
(uniq : ∀ (s : cofork f g) (m : t.X ⟶ s.X)
(w : ∀ j : walking_parallel_pair, t.ι.app j ≫ m = s.ι.app j), m = desc s) :
is_colimit t :=
{ desc := desc,
fac' := λ s j, walking_parallel_pair.cases_on j
(by erw [←s.w left, ←t.w left, category.assoc, fac]; refl) (fac s),
uniq' := uniq }
/-- This is another convenient method to verify that a fork is a limit cone. It
only asks for a proof of facts that carry any mathematical content, and allows access to the
same `s` for all parts. -/
def cofork.is_colimit.mk' {X Y : C} {f g : X ⟶ Y} (t : cofork f g)
(create : Π (s : cofork f g), {l : t.X ⟶ s.X // t.π ≫ l = s.π ∧ ∀ {m}, t.π ≫ m = s.π → m = l}) :
is_colimit t :=
cofork.is_colimit.mk t
(λ s, (create s).1)
(λ s, (create s).2.1)
(λ s m w, (create s).2.2 (w one))
/--
Given a limit cone for the pair `f g : X ⟶ Y`, for any `Z`, morphisms from `Z` to its point are in
bijection with morphisms `h : Z ⟶ X` such that `h ≫ f = h ≫ g`.
Further, this bijection is natural in `Z`: see `fork.is_limit.hom_iso_natural`.
This is a special case of `is_limit.hom_iso'`, often useful to construct adjunctions.
-/
@[simps]
def fork.is_limit.hom_iso {X Y : C} {f g : X ⟶ Y} {t : fork f g} (ht : is_limit t) (Z : C) :
(Z ⟶ t.X) ≃ {h : Z ⟶ X // h ≫ f = h ≫ g} :=
{ to_fun := λ k, ⟨k ≫ t.ι, by simp⟩,
inv_fun := λ h, (fork.is_limit.lift' ht _ h.prop).1,
left_inv := λ k, fork.is_limit.hom_ext ht (fork.is_limit.lift' _ _ _).prop,
right_inv := λ h, subtype.ext (fork.is_limit.lift' ht _ _).prop }
/-- The bijection of `fork.is_limit.hom_iso` is natural in `Z`. -/
lemma fork.is_limit.hom_iso_natural {X Y : C} {f g : X ⟶ Y} {t : fork f g} (ht : is_limit t)
{Z Z' : C} (q : Z' ⟶ Z) (k : Z ⟶ t.X) :
(fork.is_limit.hom_iso ht _ (q ≫ k) : Z' ⟶ X) = q ≫ (fork.is_limit.hom_iso ht _ k : Z ⟶ X) :=
category.assoc _ _ _
/--
Given a colimit cocone for the pair `f g : X ⟶ Y`, for any `Z`, morphisms from the cocone point
to `Z` are in bijection with morphisms `h : Y ⟶ Z` such that `f ≫ h = g ≫ h`.
Further, this bijection is natural in `Z`: see `cofork.is_colimit.hom_iso_natural`.
This is a special case of `is_colimit.hom_iso'`, often useful to construct adjunctions.
-/
@[simps]
def cofork.is_colimit.hom_iso {X Y : C} {f g : X ⟶ Y} {t : cofork f g} (ht : is_colimit t) (Z : C) :
(t.X ⟶ Z) ≃ {h : Y ⟶ Z // f ≫ h = g ≫ h} :=
{ to_fun := λ k, ⟨t.π ≫ k, by simp⟩,
inv_fun := λ h, (cofork.is_colimit.desc' ht _ h.prop).1,
left_inv := λ k, cofork.is_colimit.hom_ext ht (cofork.is_colimit.desc' _ _ _).prop,
right_inv := λ h, subtype.ext (cofork.is_colimit.desc' ht _ _).prop }
/-- The bijection of `cofork.is_colimit.hom_iso` is natural in `Z`. -/
lemma cofork.is_colimit.hom_iso_natural {X Y : C} {f g : X ⟶ Y} {t : cofork f g} {Z Z' : C}
(q : Z ⟶ Z') (ht : is_colimit t) (k : t.X ⟶ Z) :
(cofork.is_colimit.hom_iso ht _ (k ≫ q) : Y ⟶ Z') =
(cofork.is_colimit.hom_iso ht _ k : Y ⟶ Z) ≫ q :=
(category.assoc _ _ _).symm
/-- This is a helper construction that can be useful when verifying that a category has all
equalizers. Given `F : walking_parallel_pair ⥤ C`, which is really the same as
`parallel_pair (F.map left) (F.map right)`, and a fork on `F.map left` and `F.map right`,
we get a cone on `F`.
If you're thinking about using this, have a look at `has_equalizers_of_has_limit_parallel_pair`,
which you may find to be an easier way of achieving your goal. -/
def cone.of_fork
{F : walking_parallel_pair ⥤ C} (t : fork (F.map left) (F.map right)) : cone F :=
{ X := t.X,
π :=
{ app := λ X, t.π.app X ≫ eq_to_hom (by tidy),
naturality' := λ j j' g, by { cases j; cases j'; cases g; dsimp; simp } } }
/-- This is a helper construction that can be useful when verifying that a category has all
coequalizers. Given `F : walking_parallel_pair ⥤ C`, which is really the same as
`parallel_pair (F.map left) (F.map right)`, and a cofork on `F.map left` and `F.map right`,
we get a cocone on `F`.
If you're thinking about using this, have a look at
`has_coequalizers_of_has_colimit_parallel_pair`, which you may find to be an easier way of
achieving your goal. -/
def cocone.of_cofork
{F : walking_parallel_pair ⥤ C} (t : cofork (F.map left) (F.map right)) : cocone F :=
{ X := t.X,
ι :=
{ app := λ X, eq_to_hom (by tidy) ≫ t.ι.app X,
naturality' := λ j j' g, by { cases j; cases j'; cases g; dsimp; simp } } }
@[simp] lemma cone.of_fork_π
{F : walking_parallel_pair ⥤ C} (t : fork (F.map left) (F.map right)) (j) :
(cone.of_fork t).π.app j = t.π.app j ≫ eq_to_hom (by tidy) := rfl
@[simp] lemma cocone.of_cofork_ι
{F : walking_parallel_pair ⥤ C} (t : cofork (F.map left) (F.map right)) (j) :
(cocone.of_cofork t).ι.app j = eq_to_hom (by tidy) ≫ t.ι.app j := rfl
/-- Given `F : walking_parallel_pair ⥤ C`, which is really the same as
`parallel_pair (F.map left) (F.map right)` and a cone on `F`, we get a fork on
`F.map left` and `F.map right`. -/
def fork.of_cone
{F : walking_parallel_pair ⥤ C} (t : cone F) : fork (F.map left) (F.map right) :=
{ X := t.X,
π := { app := λ X, t.π.app X ≫ eq_to_hom (by tidy) } }
/-- Given `F : walking_parallel_pair ⥤ C`, which is really the same as
`parallel_pair (F.map left) (F.map right)` and a cocone on `F`, we get a cofork on
`F.map left` and `F.map right`. -/
def cofork.of_cocone
{F : walking_parallel_pair ⥤ C} (t : cocone F) : cofork (F.map left) (F.map right) :=
{ X := t.X,
ι := { app := λ X, eq_to_hom (by tidy) ≫ t.ι.app X } }
@[simp] lemma fork.of_cone_π {F : walking_parallel_pair ⥤ C} (t : cone F) (j) :
(fork.of_cone t).π.app j = t.π.app j ≫ eq_to_hom (by tidy) := rfl
@[simp] lemma cofork.of_cocone_ι {F : walking_parallel_pair ⥤ C} (t : cocone F) (j) :
(cofork.of_cocone t).ι.app j = eq_to_hom (by tidy) ≫ t.ι.app j := rfl
/--
Helper function for constructing morphisms between equalizer forks.
-/
@[simps]
def fork.mk_hom {s t : fork f g} (k : s.X ⟶ t.X) (w : k ≫ t.ι = s.ι) : s ⟶ t :=
{ hom := k,
w' :=
begin
rintro ⟨_|_⟩,
{ exact w },
{ simpa using w =≫ f },
end }
/--
To construct an isomorphism between forks,
it suffices to give an isomorphism between the cone points
and check that it commutes with the `ι` morphisms.
-/
@[simps]
def fork.ext {s t : fork f g} (i : s.X ≅ t.X) (w : i.hom ≫ t.ι = s.ι) : s ≅ t :=
{ hom := fork.mk_hom i.hom w,
inv := fork.mk_hom i.inv (by rw [← w, iso.inv_hom_id_assoc]) }
/--
Helper function for constructing morphisms between coequalizer coforks.
-/
@[simps]
def cofork.mk_hom {s t : cofork f g} (k : s.X ⟶ t.X) (w : s.π ≫ k = t.π) : s ⟶ t :=
{ hom := k,
w' :=
begin
rintro ⟨_|_⟩,
simpa using f ≫= w,
exact w,
end }
/--
To construct an isomorphism between coforks,
it suffices to give an isomorphism between the cocone points
and check that it commutes with the `π` morphisms.
-/
def cofork.ext {s t : cofork f g} (i : s.X ≅ t.X) (w : s.π ≫ i.hom = t.π) : s ≅ t :=
{ hom := cofork.mk_hom i.hom w,
inv := cofork.mk_hom i.inv (by rw [iso.comp_inv_eq, w]) }
variables (f g)
section
/--
`has_equalizer f g` represents a particular choice of limiting cone
for the parallel pair of morphisms `f` and `g`.
-/
abbreviation has_equalizer := has_limit (parallel_pair f g)
variables [has_equalizer f g]
/-- If an equalizer of `f` and `g` exists, we can access an arbitrary choice of such by
saying `equalizer f g`. -/
abbreviation equalizer : C := limit (parallel_pair f g)
/-- If an equalizer of `f` and `g` exists, we can access the inclusion
`equalizer f g ⟶ X` by saying `equalizer.ι f g`. -/
abbreviation equalizer.ι : equalizer f g ⟶ X :=
limit.π (parallel_pair f g) zero
/--
An equalizer cone for a parallel pair `f` and `g`.
-/
abbreviation equalizer.fork : fork f g := limit.cone (parallel_pair f g)
@[simp] lemma equalizer.fork_ι :
(equalizer.fork f g).ι = equalizer.ι f g := rfl
@[simp] lemma equalizer.fork_π_app_zero :
(equalizer.fork f g).π.app zero = equalizer.ι f g := rfl
@[reassoc] lemma equalizer.condition : equalizer.ι f g ≫ f = equalizer.ι f g ≫ g :=
fork.condition $ limit.cone $ parallel_pair f g
/-- The equalizer built from `equalizer.ι f g` is limiting. -/
def equalizer_is_equalizer : is_limit (fork.of_ι (equalizer.ι f g) (equalizer.condition f g)) :=
is_limit.of_iso_limit (limit.is_limit _) (fork.ext (iso.refl _) (by tidy))
variables {f g}
/-- A morphism `k : W ⟶ X` satisfying `k ≫ f = k ≫ g` factors through the equalizer of `f` and `g`
via `equalizer.lift : W ⟶ equalizer f g`. -/
abbreviation equalizer.lift {W : C} (k : W ⟶ X) (h : k ≫ f = k ≫ g) : W ⟶ equalizer f g :=
limit.lift (parallel_pair f g) (fork.of_ι k h)
@[simp, reassoc]
lemma equalizer.lift_ι {W : C} (k : W ⟶ X) (h : k ≫ f = k ≫ g) :
equalizer.lift k h ≫ equalizer.ι f g = k :=
limit.lift_π _ _
/-- A morphism `k : W ⟶ X` satisfying `k ≫ f = k ≫ g` induces a morphism `l : W ⟶ equalizer f g`
satisfying `l ≫ equalizer.ι f g = k`. -/
def equalizer.lift' {W : C} (k : W ⟶ X) (h : k ≫ f = k ≫ g) :
{l : W ⟶ equalizer f g // l ≫ equalizer.ι f g = k} :=
⟨equalizer.lift k h, equalizer.lift_ι _ _⟩
/-- Two maps into an equalizer are equal if they are are equal when composed with the equalizer
map. -/
@[ext] lemma equalizer.hom_ext {W : C} {k l : W ⟶ equalizer f g}
(h : k ≫ equalizer.ι f g = l ≫ equalizer.ι f g) : k = l :=
fork.is_limit.hom_ext (limit.is_limit _) h
/-- An equalizer morphism is a monomorphism -/
instance equalizer.ι_mono : mono (equalizer.ι f g) :=
{ right_cancellation := λ Z h k w, equalizer.hom_ext w }
end
section
variables {f g}
/-- The equalizer morphism in any limit cone is a monomorphism. -/
lemma mono_of_is_limit_parallel_pair {c : cone (parallel_pair f g)} (i : is_limit c) :
mono (fork.ι c) :=
{ right_cancellation := λ Z h k w, fork.is_limit.hom_ext i w }
end
section
variables {f g}
/-- The identity determines a cone on the equalizer diagram of `f` and `g` if `f = g`. -/
def id_fork (h : f = g) : fork f g :=
fork.of_ι (𝟙 X) $ h ▸ rfl
/-- The identity on `X` is an equalizer of `(f, g)`, if `f = g`. -/
def is_limit_id_fork (h : f = g) : is_limit (id_fork h) :=
fork.is_limit.mk _
(λ s, fork.ι s)
(λ s, category.comp_id _)
(λ s m h, by { convert h zero, exact (category.comp_id _).symm })
/-- Every equalizer of `(f, g)`, where `f = g`, is an isomorphism. -/
lemma is_iso_limit_cone_parallel_pair_of_eq (h₀ : f = g) {c : cone (parallel_pair f g)}
(h : is_limit c) : is_iso (c.π.app zero) :=
is_iso.of_iso $ is_limit.cone_point_unique_up_to_iso h $ is_limit_id_fork h₀
/-- The equalizer of `(f, g)`, where `f = g`, is an isomorphism. -/
lemma equalizer.ι_of_eq [has_equalizer f g] (h : f = g) : is_iso (equalizer.ι f g) :=
is_iso_limit_cone_parallel_pair_of_eq h $ limit.is_limit _
/-- Every equalizer of `(f, f)` is an isomorphism. -/
lemma is_iso_limit_cone_parallel_pair_of_self {c : cone (parallel_pair f f)} (h : is_limit c) :
is_iso (c.π.app zero) :=
is_iso_limit_cone_parallel_pair_of_eq rfl h
/-- An equalizer that is an epimorphism is an isomorphism. -/
lemma is_iso_limit_cone_parallel_pair_of_epi {c : cone (parallel_pair f g)}
(h : is_limit c) [epi (c.π.app zero)] : is_iso (c.π.app zero) :=
is_iso_limit_cone_parallel_pair_of_eq ((cancel_epi _).1 (fork.condition c)) h
end
instance has_equalizer_of_self : has_equalizer f f :=
has_limit.mk
{ cone := id_fork rfl,
is_limit := is_limit_id_fork rfl }
/-- The equalizer inclusion for `(f, f)` is an isomorphism. -/
instance equalizer.ι_of_self : is_iso (equalizer.ι f f) :=
equalizer.ι_of_eq rfl
/-- The equalizer of a morphism with itself is isomorphic to the source. -/
def equalizer.iso_source_of_self : equalizer f f ≅ X :=
as_iso (equalizer.ι f f)
@[simp] lemma equalizer.iso_source_of_self_hom :
(equalizer.iso_source_of_self f).hom = equalizer.ι f f :=
rfl
@[simp] lemma equalizer.iso_source_of_self_inv :
(equalizer.iso_source_of_self f).inv = equalizer.lift (𝟙 X) (by simp) :=
by { ext, simp [equalizer.iso_source_of_self], }
section
/--
`has_coequalizer f g` represents a particular choice of colimiting cocone
for the parallel pair of morphisms `f` and `g`.
-/
abbreviation has_coequalizer := has_colimit (parallel_pair f g)
variables [has_coequalizer f g]
/-- If a coequalizer of `f` and `g` exists, we can access an arbitrary choice of such by
saying `coequalizer f g`. -/
abbreviation coequalizer : C := colimit (parallel_pair f g)
/-- If a coequalizer of `f` and `g` exists, we can access the corresponding projection by
saying `coequalizer.π f g`. -/
abbreviation coequalizer.π : Y ⟶ coequalizer f g :=
colimit.ι (parallel_pair f g) one
/--
An arbitrary choice of coequalizer cocone for a parallel pair `f` and `g`.
-/
abbreviation coequalizer.cofork : cofork f g := colimit.cocone (parallel_pair f g)
@[simp] lemma coequalizer.cofork_π :
(coequalizer.cofork f g).π = coequalizer.π f g := rfl
@[simp] lemma coequalizer.cofork_ι_app_one :
(coequalizer.cofork f g).ι.app one = coequalizer.π f g := rfl
@[reassoc] lemma coequalizer.condition : f ≫ coequalizer.π f g = g ≫ coequalizer.π f g :=
cofork.condition $ colimit.cocone $ parallel_pair f g
/-- The cofork built from `coequalizer.π f g` is colimiting. -/
def coequalizer_is_coequalizer :
is_colimit (cofork.of_π (coequalizer.π f g) (coequalizer.condition f g)) :=
is_colimit.of_iso_colimit (colimit.is_colimit _) (cofork.ext (iso.refl _) (by tidy))
variables {f g}
/-- Any morphism `k : Y ⟶ W` satisfying `f ≫ k = g ≫ k` factors through the coequalizer of `f`
and `g` via `coequalizer.desc : coequalizer f g ⟶ W`. -/
abbreviation coequalizer.desc {W : C} (k : Y ⟶ W) (h : f ≫ k = g ≫ k) : coequalizer f g ⟶ W :=
colimit.desc (parallel_pair f g) (cofork.of_π k h)
@[simp, reassoc]
lemma coequalizer.π_desc {W : C} (k : Y ⟶ W) (h : f ≫ k = g ≫ k) :
coequalizer.π f g ≫ coequalizer.desc k h = k :=
colimit.ι_desc _ _
/-- Any morphism `k : Y ⟶ W` satisfying `f ≫ k = g ≫ k` induces a morphism
`l : coequalizer f g ⟶ W` satisfying `coequalizer.π ≫ g = l`. -/
def coequalizer.desc' {W : C} (k : Y ⟶ W) (h : f ≫ k = g ≫ k) :
{l : coequalizer f g ⟶ W // coequalizer.π f g ≫ l = k} :=
⟨coequalizer.desc k h, coequalizer.π_desc _ _⟩
/-- Two maps from a coequalizer are equal if they are equal when composed with the coequalizer
map -/
@[ext] lemma coequalizer.hom_ext {W : C} {k l : coequalizer f g ⟶ W}
(h : coequalizer.π f g ≫ k = coequalizer.π f g ≫ l) : k = l :=
cofork.is_colimit.hom_ext (colimit.is_colimit _) h
/-- A coequalizer morphism is an epimorphism -/
instance coequalizer.π_epi : epi (coequalizer.π f g) :=
{ left_cancellation := λ Z h k w, coequalizer.hom_ext w }
end
section
variables {f g}
/-- The coequalizer morphism in any colimit cocone is an epimorphism. -/
lemma epi_of_is_colimit_parallel_pair {c : cocone (parallel_pair f g)} (i : is_colimit c) :
epi (c.ι.app one) :=
{ left_cancellation := λ Z h k w, cofork.is_colimit.hom_ext i w }
end
section
variables {f g}
/-- The identity determines a cocone on the coequalizer diagram of `f` and `g`, if `f = g`. -/
def id_cofork (h : f = g) : cofork f g :=
cofork.of_π (𝟙 Y) $ h ▸ rfl
/-- The identity on `Y` is a coequalizer of `(f, g)`, where `f = g`. -/
def is_colimit_id_cofork (h : f = g) : is_colimit (id_cofork h) :=
cofork.is_colimit.mk _
(λ s, cofork.π s)
(λ s, category.id_comp _)
(λ s m h, by { convert h one, exact (category.id_comp _).symm })
/-- Every coequalizer of `(f, g)`, where `f = g`, is an isomorphism. -/
lemma is_iso_colimit_cocone_parallel_pair_of_eq (h₀ : f = g) {c : cocone (parallel_pair f g)}
(h : is_colimit c) : is_iso (c.ι.app one) :=
is_iso.of_iso $ is_colimit.cocone_point_unique_up_to_iso (is_colimit_id_cofork h₀) h
/-- The coequalizer of `(f, g)`, where `f = g`, is an isomorphism. -/
lemma coequalizer.π_of_eq [has_coequalizer f g] (h : f = g) :
is_iso (coequalizer.π f g) :=
is_iso_colimit_cocone_parallel_pair_of_eq h $ colimit.is_colimit _
/-- Every coequalizer of `(f, f)` is an isomorphism. -/
lemma is_iso_colimit_cocone_parallel_pair_of_self {c : cocone (parallel_pair f f)}
(h : is_colimit c) : is_iso (c.ι.app one) :=
is_iso_colimit_cocone_parallel_pair_of_eq rfl h
/-- A coequalizer that is a monomorphism is an isomorphism. -/
lemma is_iso_limit_cocone_parallel_pair_of_epi {c : cocone (parallel_pair f g)}
(h : is_colimit c) [mono (c.ι.app one)] : is_iso (c.ι.app one) :=
is_iso_colimit_cocone_parallel_pair_of_eq ((cancel_mono _).1 (cofork.condition c)) h
end
instance has_coequalizer_of_self : has_coequalizer f f :=
has_colimit.mk
{ cocone := id_cofork rfl,
is_colimit := is_colimit_id_cofork rfl }
/-- The coequalizer projection for `(f, f)` is an isomorphism. -/
instance coequalizer.π_of_self : is_iso (coequalizer.π f f) :=
coequalizer.π_of_eq rfl
/-- The coequalizer of a morphism with itself is isomorphic to the target. -/
def coequalizer.iso_target_of_self : coequalizer f f ≅ Y :=
(as_iso (coequalizer.π f f)).symm
@[simp] lemma coequalizer.iso_target_of_self_hom :
(coequalizer.iso_target_of_self f).hom = coequalizer.desc (𝟙 Y) (by simp) :=
by { ext, simp [coequalizer.iso_target_of_self], }
@[simp] lemma coequalizer.iso_target_of_self_inv :
(coequalizer.iso_target_of_self f).inv = coequalizer.π f f :=
rfl
section comparison
variables {D : Type u₂} [category.{v} D] (G : C ⥤ D)
/--
The comparison morphism for the equalizer of `f,g`.
This is an isomorphism iff `G` preserves the equalizer of `f,g`; see
`category_theory/limits/preserves/shapes/equalizers.lean`
-/
def equalizer_comparison [has_equalizer f g] [has_equalizer (G.map f) (G.map g)] :
G.obj (equalizer f g) ⟶ equalizer (G.map f) (G.map g) :=
equalizer.lift (G.map (equalizer.ι _ _)) (by simp only [←G.map_comp, equalizer.condition])
@[simp, reassoc]
lemma equalizer_comparison_comp_π [has_equalizer f g] [has_equalizer (G.map f) (G.map g)] :
equalizer_comparison f g G ≫ equalizer.ι (G.map f) (G.map g) = G.map (equalizer.ι f g) :=
equalizer.lift_ι _ _
@[simp, reassoc]
lemma map_lift_equalizer_comparison [has_equalizer f g] [has_equalizer (G.map f) (G.map g)]
{Z : C} {h : Z ⟶ X} (w : h ≫ f = h ≫ g) :
G.map (equalizer.lift h w) ≫ equalizer_comparison f g G =
equalizer.lift (G.map h) (by simp only [←G.map_comp, w]) :=
by { ext, simp [← G.map_comp] }
/-- The comparison morphism for the coequalizer of `f,g`. -/
def coequalizer_comparison [has_coequalizer f g] [has_coequalizer (G.map f) (G.map g)] :
coequalizer (G.map f) (G.map g) ⟶ G.obj (coequalizer f g) :=
coequalizer.desc (G.map (coequalizer.π _ _)) (by simp only [←G.map_comp, coequalizer.condition])
@[simp, reassoc]
lemma ι_comp_coequalizer_comparison [has_coequalizer f g] [has_coequalizer (G.map f) (G.map g)] :
coequalizer.π _ _ ≫ coequalizer_comparison f g G = G.map (coequalizer.π _ _) :=
coequalizer.π_desc _ _
@[simp, reassoc]
lemma coequalizer_comparison_map_desc [has_coequalizer f g] [has_coequalizer (G.map f) (G.map g)]
{Z : C} {h : Y ⟶ Z} (w : f ≫ h = g ≫ h) :
coequalizer_comparison f g G ≫ G.map (coequalizer.desc h w) =
coequalizer.desc (G.map h) (by simp only [←G.map_comp, w]) :=
by { ext, simp [← G.map_comp] }
end comparison
variables (C)
/-- `has_equalizers` represents a choice of equalizer for every pair of morphisms -/
abbreviation has_equalizers := has_limits_of_shape walking_parallel_pair C
/-- `has_coequalizers` represents a choice of coequalizer for every pair of morphisms -/
abbreviation has_coequalizers := has_colimits_of_shape walking_parallel_pair C
/-- If `C` has all limits of diagrams `parallel_pair f g`, then it has all equalizers -/
lemma has_equalizers_of_has_limit_parallel_pair
[Π {X Y : C} {f g : X ⟶ Y}, has_limit (parallel_pair f g)] : has_equalizers C :=
{ has_limit := λ F, has_limit_of_iso (diagram_iso_parallel_pair F).symm }
/-- If `C` has all colimits of diagrams `parallel_pair f g`, then it has all coequalizers -/
lemma has_coequalizers_of_has_colimit_parallel_pair
[Π {X Y : C} {f g : X ⟶ Y}, has_colimit (parallel_pair f g)] : has_coequalizers C :=
{ has_colimit := λ F, has_colimit_of_iso (diagram_iso_parallel_pair F) }
section
-- In this section we show that a split mono `f` equalizes `(retraction f ≫ f)` and `(𝟙 Y)`.
variables {C} [split_mono f]
/--
A split mono `f` equalizes `(retraction f ≫ f)` and `(𝟙 Y)`.
Here we build the cone, and show in `split_mono_equalizes` that it is a limit cone.
-/
@[simps {rhs_md := semireducible}]
def cone_of_split_mono : cone (parallel_pair (𝟙 Y) (retraction f ≫ f)) :=
fork.of_ι f (by simp)
/--
A split mono `f` equalizes `(retraction f ≫ f)` and `(𝟙 Y)`.
-/
def split_mono_equalizes {X Y : C} (f : X ⟶ Y) [split_mono f] : is_limit (cone_of_split_mono f) :=
fork.is_limit.mk' _ $ λ s,
⟨s.ι ≫ retraction f,
by { dsimp, rw [category.assoc, ←s.condition], apply category.comp_id },
λ m hm, by simp [←hm]⟩
end
section
-- In this section we show that a split epi `f` coequalizes `(f ≫ section_ f)` and `(𝟙 X)`.
variables {C} [split_epi f]
/--
A split epi `f` coequalizes `(f ≫ section_ f)` and `(𝟙 X)`.
Here we build the cocone, and show in `split_epi_coequalizes` that it is a colimit cocone.
-/
@[simps {rhs_md := semireducible}]
def cocone_of_split_epi : cocone (parallel_pair (𝟙 X) (f ≫ section_ f)) :=
cofork.of_π f (by simp)
/--
A split epi `f` coequalizes `(f ≫ section_ f)` and `(𝟙 X)`.
-/
def split_epi_coequalizes {X Y : C} (f : X ⟶ Y) [split_epi f] :
is_colimit (cocone_of_split_epi f) :=
cofork.is_colimit.mk' _ $ λ s,
⟨section_ f ≫ s.π,
by { dsimp, rw [← category.assoc, ← s.condition, category.id_comp] },
λ m hm, by simp [← hm]⟩
end
end category_theory.limits
|
998f02787590a7d60fa9dc6053c069273bbbe0d5
|
80cc5bf14c8ea85ff340d1d747a127dcadeb966f
|
/src/data/complex/basic.lean
|
3c31ff2e46bf05c310dc2c13f5bd72b41cd583e4
|
[
"Apache-2.0"
] |
permissive
|
lacker/mathlib
|
f2439c743c4f8eb413ec589430c82d0f73b2d539
|
ddf7563ac69d42cfa4a1bfe41db1fed521bd795f
|
refs/heads/master
| 1,671,948,326,773
| 1,601,479,268,000
| 1,601,479,268,000
| 298,686,743
| 0
| 0
|
Apache-2.0
| 1,601,070,794,000
| 1,601,070,794,000
| null |
UTF-8
|
Lean
| false
| false
| 20,037
|
lean
|
/-
Copyright (c) 2017 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin Buzzard, Mario Carneiro
-/
import data.real.basic
/-!
# The complex numbers
The complex numbers are modelled as ℝ^2 in the obvious way.
-/
/-! ### Definition and basic arithmmetic -/
/-- Complex numbers consist of two `real`s: a real part `re` and an imaginary part `im`. -/
structure complex : Type :=
(re : ℝ) (im : ℝ)
notation `ℂ` := complex
namespace complex
noncomputable instance : decidable_eq ℂ := classical.dec_eq _
/-- The equivalence between the complex numbers and `ℝ × ℝ`. -/
def equiv_real_prod : ℂ ≃ (ℝ × ℝ) :=
{ to_fun := λ z, ⟨z.re, z.im⟩,
inv_fun := λ p, ⟨p.1, p.2⟩,
left_inv := λ ⟨x, y⟩, rfl,
right_inv := λ ⟨x, y⟩, rfl }
@[simp] theorem equiv_real_prod_apply (z : ℂ) : equiv_real_prod z = (z.re, z.im) := rfl
theorem equiv_real_prod_symm_re (x y : ℝ) : (equiv_real_prod.symm (x, y)).re = x := rfl
theorem equiv_real_prod_symm_im (x y : ℝ) : (equiv_real_prod.symm (x, y)).im = y := rfl
@[simp] theorem eta : ∀ z : ℂ, complex.mk z.re z.im = z
| ⟨a, b⟩ := rfl
@[ext]
theorem ext : ∀ {z w : ℂ}, z.re = w.re → z.im = w.im → z = w
| ⟨zr, zi⟩ ⟨_, _⟩ rfl rfl := rfl
theorem ext_iff {z w : ℂ} : z = w ↔ z.re = w.re ∧ z.im = w.im :=
⟨λ H, by simp [H], and.rec ext⟩
instance : has_coe ℝ ℂ := ⟨λ r, ⟨r, 0⟩⟩
@[simp, norm_cast] lemma of_real_re (r : ℝ) : (r : ℂ).re = r := rfl
@[simp, norm_cast] lemma of_real_im (r : ℝ) : (r : ℂ).im = 0 := rfl
@[simp, norm_cast] theorem of_real_inj {z w : ℝ} : (z : ℂ) = w ↔ z = w :=
⟨congr_arg re, congr_arg _⟩
instance : has_zero ℂ := ⟨(0 : ℝ)⟩
instance : inhabited ℂ := ⟨0⟩
@[simp] lemma zero_re : (0 : ℂ).re = 0 := rfl
@[simp] lemma zero_im : (0 : ℂ).im = 0 := rfl
@[simp, norm_cast] lemma of_real_zero : ((0 : ℝ) : ℂ) = 0 := rfl
@[simp] theorem of_real_eq_zero {z : ℝ} : (z : ℂ) = 0 ↔ z = 0 := of_real_inj
theorem of_real_ne_zero {z : ℝ} : (z : ℂ) ≠ 0 ↔ z ≠ 0 := not_congr of_real_eq_zero
instance : has_one ℂ := ⟨(1 : ℝ)⟩
@[simp] lemma one_re : (1 : ℂ).re = 1 := rfl
@[simp] lemma one_im : (1 : ℂ).im = 0 := rfl
@[simp, norm_cast] lemma of_real_one : ((1 : ℝ) : ℂ) = 1 := rfl
instance : has_add ℂ := ⟨λ z w, ⟨z.re + w.re, z.im + w.im⟩⟩
@[simp] lemma add_re (z w : ℂ) : (z + w).re = z.re + w.re := rfl
@[simp] lemma add_im (z w : ℂ) : (z + w).im = z.im + w.im := rfl
@[simp] lemma bit0_re (z : ℂ) : (bit0 z).re = bit0 z.re := rfl
@[simp] lemma bit1_re (z : ℂ) : (bit1 z).re = bit1 z.re := rfl
@[simp] lemma bit0_im (z : ℂ) : (bit0 z).im = bit0 z.im := eq.refl _
@[simp] lemma bit1_im (z : ℂ) : (bit1 z).im = bit0 z.im := add_zero _
@[simp, norm_cast] lemma of_real_add (r s : ℝ) : ((r + s : ℝ) : ℂ) = r + s :=
ext_iff.2 $ by simp
@[simp, norm_cast] lemma of_real_bit0 (r : ℝ) : ((bit0 r : ℝ) : ℂ) = bit0 r :=
ext_iff.2 $ by simp [bit0]
@[simp, norm_cast] lemma of_real_bit1 (r : ℝ) : ((bit1 r : ℝ) : ℂ) = bit1 r :=
ext_iff.2 $ by simp [bit1]
instance : has_neg ℂ := ⟨λ z, ⟨-z.re, -z.im⟩⟩
@[simp] lemma neg_re (z : ℂ) : (-z).re = -z.re := rfl
@[simp] lemma neg_im (z : ℂ) : (-z).im = -z.im := rfl
@[simp, norm_cast] lemma of_real_neg (r : ℝ) : ((-r : ℝ) : ℂ) = -r := ext_iff.2 $ by simp
instance : has_mul ℂ := ⟨λ z w, ⟨z.re * w.re - z.im * w.im, z.re * w.im + z.im * w.re⟩⟩
@[simp] lemma mul_re (z w : ℂ) : (z * w).re = z.re * w.re - z.im * w.im := rfl
@[simp] lemma mul_im (z w : ℂ) : (z * w).im = z.re * w.im + z.im * w.re := rfl
@[simp, norm_cast] lemma of_real_mul (r s : ℝ) : ((r * s : ℝ) : ℂ) = r * s := ext_iff.2 $ by simp
lemma smul_re (r : ℝ) (z : ℂ) : (↑r * z).re = r * z.re := by simp
lemma smul_im (r : ℝ) (z : ℂ) : (↑r * z).im = r * z.im := by simp
lemma of_real_smul (r : ℝ) (z : ℂ) : (↑r * z) = ⟨r * z.re, r * z.im⟩ := ext (smul_re _ _) (smul_im _ _)
/-! ### The imaginary unit, `I` -/
/-- The imaginary unit. -/
def I : ℂ := ⟨0, 1⟩
@[simp] lemma I_re : I.re = 0 := rfl
@[simp] lemma I_im : I.im = 1 := rfl
@[simp] lemma I_mul_I : I * I = -1 := ext_iff.2 $ by simp
lemma I_mul (z : ℂ) : I * z = ⟨-z.im, z.re⟩ :=
ext_iff.2 $ by simp
lemma I_ne_zero : (I : ℂ) ≠ 0 := mt (congr_arg im) zero_ne_one.symm
lemma mk_eq_add_mul_I (a b : ℝ) : complex.mk a b = a + b * I :=
ext_iff.2 $ by simp
@[simp] lemma re_add_im (z : ℂ) : (z.re : ℂ) + z.im * I = z :=
ext_iff.2 $ by simp
/-! ### Commutative ring instance and lemmas -/
instance : comm_ring ℂ :=
by refine { zero := 0, add := (+), neg := has_neg.neg, one := 1, mul := (*), ..};
{ intros, apply ext_iff.2; split; simp; ring }
instance re.is_add_group_hom : is_add_group_hom complex.re :=
{ map_add := complex.add_re }
instance im.is_add_group_hom : is_add_group_hom complex.im :=
{ map_add := complex.add_im }
/-! ### Complex conjugation -/
/-- The complex conjugate. -/
def conj : ℂ →+* ℂ :=
begin
refine_struct { to_fun := λ z : ℂ, (⟨z.re, -z.im⟩ : ℂ), .. };
{ intros, ext; simp [add_comm], },
end
@[simp] lemma conj_re (z : ℂ) : (conj z).re = z.re := rfl
@[simp] lemma conj_im (z : ℂ) : (conj z).im = -z.im := rfl
@[simp] lemma conj_of_real (r : ℝ) : conj r = r := ext_iff.2 $ by simp [conj]
@[simp] lemma conj_I : conj I = -I := ext_iff.2 $ by simp
@[simp] lemma conj_bit0 (z : ℂ) : conj (bit0 z) = bit0 (conj z) := ext_iff.2 $ by simp [bit0]
@[simp] lemma conj_bit1 (z : ℂ) : conj (bit1 z) = bit1 (conj z) := ext_iff.2 $ by simp [bit0]
@[simp] lemma conj_neg_I : conj (-I) = I := ext_iff.2 $ by simp
@[simp] lemma conj_conj (z : ℂ) : conj (conj z) = z :=
ext_iff.2 $ by simp
lemma conj_involutive : function.involutive conj := conj_conj
lemma conj_bijective : function.bijective conj := conj_involutive.bijective
lemma conj_inj {z w : ℂ} : conj z = conj w ↔ z = w :=
conj_bijective.1.eq_iff
@[simp] lemma conj_eq_zero {z : ℂ} : conj z = 0 ↔ z = 0 :=
by simpa using @conj_inj z 0
lemma eq_conj_iff_real {z : ℂ} : conj z = z ↔ ∃ r : ℝ, z = r :=
⟨λ h, ⟨z.re, ext rfl $ eq_zero_of_neg_eq (congr_arg im h)⟩,
λ ⟨h, e⟩, by rw [e, conj_of_real]⟩
lemma eq_conj_iff_re {z : ℂ} : conj z = z ↔ (z.re : ℂ) = z :=
eq_conj_iff_real.trans ⟨by rintro ⟨r, rfl⟩; simp, λ h, ⟨_, h.symm⟩⟩
/-! ### Norm squared -/
/-- The norm squared function. -/
@[pp_nodot] def norm_sq (z : ℂ) : ℝ := z.re * z.re + z.im * z.im
@[simp] lemma norm_sq_of_real (r : ℝ) : norm_sq r = r * r :=
by simp [norm_sq]
@[simp] lemma norm_sq_zero : norm_sq 0 = 0 := by simp [norm_sq]
@[simp] lemma norm_sq_one : norm_sq 1 = 1 := by simp [norm_sq]
@[simp] lemma norm_sq_I : norm_sq I = 1 := by simp [norm_sq]
lemma norm_sq_nonneg (z : ℂ) : 0 ≤ norm_sq z :=
add_nonneg (mul_self_nonneg _) (mul_self_nonneg _)
@[simp] lemma norm_sq_eq_zero {z : ℂ} : norm_sq z = 0 ↔ z = 0 :=
⟨λ h, ext
(eq_zero_of_mul_self_add_mul_self_eq_zero h)
(eq_zero_of_mul_self_add_mul_self_eq_zero $ (add_comm _ _).trans h),
λ h, h.symm ▸ norm_sq_zero⟩
@[simp] lemma norm_sq_pos {z : ℂ} : 0 < norm_sq z ↔ z ≠ 0 :=
by rw [lt_iff_le_and_ne, ne, eq_comm]; simp [norm_sq_nonneg]
@[simp] lemma norm_sq_neg (z : ℂ) : norm_sq (-z) = norm_sq z :=
by simp [norm_sq]
@[simp] lemma norm_sq_conj (z : ℂ) : norm_sq (conj z) = norm_sq z :=
by simp [norm_sq]
@[simp] lemma norm_sq_mul (z w : ℂ) : norm_sq (z * w) = norm_sq z * norm_sq w :=
by dsimp [norm_sq]; ring
lemma norm_sq_add (z w : ℂ) : norm_sq (z + w) =
norm_sq z + norm_sq w + 2 * (z * conj w).re :=
by dsimp [norm_sq]; ring
lemma re_sq_le_norm_sq (z : ℂ) : z.re * z.re ≤ norm_sq z :=
le_add_of_nonneg_right (mul_self_nonneg _)
lemma im_sq_le_norm_sq (z : ℂ) : z.im * z.im ≤ norm_sq z :=
le_add_of_nonneg_left (mul_self_nonneg _)
theorem mul_conj (z : ℂ) : z * conj z = norm_sq z :=
ext_iff.2 $ by simp [norm_sq, mul_comm, sub_eq_neg_add, add_comm]
theorem add_conj (z : ℂ) : z + conj z = (2 * z.re : ℝ) :=
ext_iff.2 $ by simp [two_mul]
/-- The coercion `ℝ → ℂ` as a `ring_hom`. -/
def of_real : ℝ →+* ℂ := ⟨coe, of_real_one, of_real_mul, of_real_zero, of_real_add⟩
@[simp] lemma of_real_eq_coe (r : ℝ) : of_real r = r := rfl
@[simp] lemma I_sq : I ^ 2 = -1 := by rw [pow_two, I_mul_I]
@[simp] lemma sub_re (z w : ℂ) : (z - w).re = z.re - w.re := rfl
@[simp] lemma sub_im (z w : ℂ) : (z - w).im = z.im - w.im := rfl
@[simp, norm_cast] lemma of_real_sub (r s : ℝ) : ((r - s : ℝ) : ℂ) = r - s := ext_iff.2 $ by simp
@[simp, norm_cast] lemma of_real_pow (r : ℝ) (n : ℕ) : ((r ^ n : ℝ) : ℂ) = r ^ n :=
by induction n; simp [*, of_real_mul, pow_succ]
theorem sub_conj (z : ℂ) : z - conj z = (2 * z.im : ℝ) * I :=
ext_iff.2 $ by simp [two_mul, sub_eq_add_neg]
lemma norm_sq_sub (z w : ℂ) : norm_sq (z - w) =
norm_sq z + norm_sq w - 2 * (z * conj w).re :=
by rw [sub_eq_add_neg, norm_sq_add]; simp [-mul_re, add_comm, add_left_comm, sub_eq_add_neg]
/-! ### Inversion -/
noncomputable instance : has_inv ℂ := ⟨λ z, conj z * ((norm_sq z)⁻¹:ℝ)⟩
theorem inv_def (z : ℂ) : z⁻¹ = conj z * ((norm_sq z)⁻¹:ℝ) := rfl
@[simp] lemma inv_re (z : ℂ) : (z⁻¹).re = z.re / norm_sq z := by simp [inv_def, division_def]
@[simp] lemma inv_im (z : ℂ) : (z⁻¹).im = -z.im / norm_sq z := by simp [inv_def, division_def]
@[simp, norm_cast] lemma of_real_inv (r : ℝ) : ((r⁻¹ : ℝ) : ℂ) = r⁻¹ :=
ext_iff.2 $ begin
simp,
by_cases r = 0, { simp [h] },
{ rw [← div_div_eq_div_mul, div_self h, one_div] },
end
protected lemma inv_zero : (0⁻¹ : ℂ) = 0 :=
by rw [← of_real_zero, ← of_real_inv, inv_zero]
protected theorem mul_inv_cancel {z : ℂ} (h : z ≠ 0) : z * z⁻¹ = 1 :=
by rw [inv_def, ← mul_assoc, mul_conj, ← of_real_mul,
mul_inv_cancel (mt norm_sq_eq_zero.1 h), of_real_one]
/-! ### Field instance and lemmas -/
noncomputable instance : field ℂ :=
{ inv := has_inv.inv,
exists_pair_ne := ⟨0, 1, mt (congr_arg re) zero_ne_one⟩,
mul_inv_cancel := @complex.mul_inv_cancel,
inv_zero := complex.inv_zero,
..complex.comm_ring }
lemma div_re (z w : ℂ) : (z / w).re = z.re * w.re / norm_sq w + z.im * w.im / norm_sq w :=
by simp [div_eq_mul_inv, mul_assoc, sub_eq_add_neg]
lemma div_im (z w : ℂ) : (z / w).im = z.im * w.re / norm_sq w - z.re * w.im / norm_sq w :=
by simp [div_eq_mul_inv, mul_assoc, sub_eq_add_neg, add_comm]
@[simp, norm_cast] lemma of_real_div (r s : ℝ) : ((r / s : ℝ) : ℂ) = r / s :=
of_real.map_div r s
@[simp, norm_cast] lemma of_real_fpow (r : ℝ) (n : ℤ) : ((r ^ n : ℝ) : ℂ) = (r : ℂ) ^ n :=
of_real.map_fpow r n
@[simp] lemma div_I (z : ℂ) : z / I = -(z * I) :=
(div_eq_iff_mul_eq I_ne_zero).2 $ by simp [mul_assoc]
@[simp] lemma inv_I : I⁻¹ = -I :=
by simp [inv_eq_one_div]
@[simp] lemma norm_sq_inv (z : ℂ) : norm_sq z⁻¹ = (norm_sq z)⁻¹ :=
if h : z = 0 then by simp [h] else
mul_right_cancel' (mt norm_sq_eq_zero.1 h) $
by rw [← norm_sq_mul]; simp [h, -norm_sq_mul]
@[simp] lemma norm_sq_div (z w : ℂ) : norm_sq (z / w) = norm_sq z / norm_sq w :=
by rw [division_def, norm_sq_mul, norm_sq_inv]; refl
/-! ### Cast lemmas -/
@[simp, norm_cast] theorem of_real_nat_cast (n : ℕ) : ((n : ℝ) : ℂ) = n :=
of_real.map_nat_cast n
@[simp, norm_cast] lemma nat_cast_re (n : ℕ) : (n : ℂ).re = n :=
by rw [← of_real_nat_cast, of_real_re]
@[simp, norm_cast] lemma nat_cast_im (n : ℕ) : (n : ℂ).im = 0 :=
by rw [← of_real_nat_cast, of_real_im]
@[simp, norm_cast] theorem of_real_int_cast (n : ℤ) : ((n : ℝ) : ℂ) = n :=
of_real.map_int_cast n
@[simp, norm_cast] lemma int_cast_re (n : ℤ) : (n : ℂ).re = n :=
by rw [← of_real_int_cast, of_real_re]
@[simp, norm_cast] lemma int_cast_im (n : ℤ) : (n : ℂ).im = 0 :=
by rw [← of_real_int_cast, of_real_im]
@[simp, norm_cast] theorem of_real_rat_cast (n : ℚ) : ((n : ℝ) : ℂ) = n :=
of_real.map_rat_cast n
@[simp, norm_cast] lemma rat_cast_re (q : ℚ) : (q : ℂ).re = q :=
by rw [← of_real_rat_cast, of_real_re]
@[simp, norm_cast] lemma rat_cast_im (q : ℚ) : (q : ℂ).im = 0 :=
by rw [← of_real_rat_cast, of_real_im]
/-! ### Characteristic zero -/
instance char_zero_complex : char_zero ℂ :=
add_group.char_zero_of_inj_zero $ λ n h,
by rwa [← of_real_nat_cast, of_real_eq_zero, nat.cast_eq_zero] at h
theorem re_eq_add_conj (z : ℂ) : (z.re : ℂ) = (z + conj z) / 2 :=
by rw [add_conj]; simp; rw [mul_div_cancel_left (z.re:ℂ) two_ne_zero']
/-! ### Absolute value -/
/-- The complex absolute value function, defined as the square root of the norm squared. -/
@[pp_nodot] noncomputable def abs (z : ℂ) : ℝ := (norm_sq z).sqrt
local notation `abs'` := _root_.abs
@[simp] lemma abs_of_real (r : ℝ) : abs r = abs' r :=
by simp [abs, norm_sq_of_real, real.sqrt_mul_self_eq_abs]
lemma abs_of_nonneg {r : ℝ} (h : 0 ≤ r) : abs r = r :=
(abs_of_real _).trans (abs_of_nonneg h)
lemma abs_of_nat (n : ℕ) : complex.abs n = n :=
calc complex.abs n = complex.abs (n:ℝ) : by rw [of_real_nat_cast]
... = _ : abs_of_nonneg (nat.cast_nonneg n)
lemma mul_self_abs (z : ℂ) : abs z * abs z = norm_sq z :=
real.mul_self_sqrt (norm_sq_nonneg _)
@[simp] lemma abs_zero : abs 0 = 0 := by simp [abs]
@[simp] lemma abs_one : abs 1 = 1 := by simp [abs]
@[simp] lemma abs_I : abs I = 1 := by simp [abs]
@[simp] lemma abs_two : abs 2 = 2 :=
calc abs 2 = abs (2 : ℝ) : by rw [of_real_bit0, of_real_one]
... = (2 : ℝ) : abs_of_nonneg (by norm_num)
lemma abs_nonneg (z : ℂ) : 0 ≤ abs z :=
real.sqrt_nonneg _
@[simp] lemma abs_eq_zero {z : ℂ} : abs z = 0 ↔ z = 0 :=
(real.sqrt_eq_zero $ norm_sq_nonneg _).trans norm_sq_eq_zero
lemma abs_ne_zero {z : ℂ} : abs z ≠ 0 ↔ z ≠ 0 :=
not_congr abs_eq_zero
@[simp] lemma abs_conj (z : ℂ) : abs (conj z) = abs z :=
by simp [abs]
@[simp] lemma abs_mul (z w : ℂ) : abs (z * w) = abs z * abs w :=
by rw [abs, norm_sq_mul, real.sqrt_mul (norm_sq_nonneg _)]; refl
lemma abs_re_le_abs (z : ℂ) : abs' z.re ≤ abs z :=
by rw [mul_self_le_mul_self_iff (_root_.abs_nonneg z.re) (abs_nonneg _),
abs_mul_abs_self, mul_self_abs];
apply re_sq_le_norm_sq
lemma abs_im_le_abs (z : ℂ) : abs' z.im ≤ abs z :=
by rw [mul_self_le_mul_self_iff (_root_.abs_nonneg z.im) (abs_nonneg _),
abs_mul_abs_self, mul_self_abs];
apply im_sq_le_norm_sq
lemma re_le_abs (z : ℂ) : z.re ≤ abs z :=
(abs_le.1 (abs_re_le_abs _)).2
lemma im_le_abs (z : ℂ) : z.im ≤ abs z :=
(abs_le.1 (abs_im_le_abs _)).2
lemma abs_add (z w : ℂ) : abs (z + w) ≤ abs z + abs w :=
(mul_self_le_mul_self_iff (abs_nonneg _)
(add_nonneg (abs_nonneg _) (abs_nonneg _))).2 $
begin
rw [mul_self_abs, add_mul_self_eq, mul_self_abs, mul_self_abs,
add_right_comm, norm_sq_add, add_le_add_iff_left,
mul_assoc, mul_le_mul_left (@two_pos ℝ _)],
simpa [-mul_re] using re_le_abs (z * conj w)
end
instance : is_absolute_value abs :=
{ abv_nonneg := abs_nonneg,
abv_eq_zero := λ _, abs_eq_zero,
abv_add := abs_add,
abv_mul := abs_mul }
open is_absolute_value
@[simp] lemma abs_abs (z : ℂ) : abs' (abs z) = abs z :=
_root_.abs_of_nonneg (abs_nonneg _)
@[simp] lemma abs_pos {z : ℂ} : 0 < abs z ↔ z ≠ 0 := abv_pos abs
@[simp] lemma abs_neg : ∀ z, abs (-z) = abs z := abv_neg abs
lemma abs_sub : ∀ z w, abs (z - w) = abs (w - z) := abv_sub abs
lemma abs_sub_le : ∀ a b c, abs (a - c) ≤ abs (a - b) + abs (b - c) := abv_sub_le abs
@[simp] theorem abs_inv : ∀ z, abs z⁻¹ = (abs z)⁻¹ := abv_inv abs
@[simp] theorem abs_div : ∀ z w, abs (z / w) = abs z / abs w := abv_div abs
lemma abs_abs_sub_le_abs_sub : ∀ z w, abs' (abs z - abs w) ≤ abs (z - w) :=
abs_abv_sub_le_abv_sub abs
lemma abs_le_abs_re_add_abs_im (z : ℂ) : abs z ≤ abs' z.re + abs' z.im :=
by simpa [re_add_im] using abs_add z.re (z.im * I)
lemma abs_re_div_abs_le_one (z : ℂ) : abs' (z.re / z.abs) ≤ 1 :=
if hz : z = 0 then by simp [hz, zero_le_one]
else by { simp_rw [_root_.abs_div, abs_abs, div_le_iff (abs_pos.2 hz), one_mul, abs_re_le_abs] }
lemma abs_im_div_abs_le_one (z : ℂ) : abs' (z.im / z.abs) ≤ 1 :=
if hz : z = 0 then by simp [hz, zero_le_one]
else by { simp_rw [_root_.abs_div, abs_abs, div_le_iff (abs_pos.2 hz), one_mul, abs_im_le_abs] }
@[simp, norm_cast] lemma abs_cast_nat (n : ℕ) : abs (n : ℂ) = n :=
by rw [← of_real_nat_cast, abs_of_nonneg (nat.cast_nonneg n)]
lemma norm_sq_eq_abs (x : ℂ) : norm_sq x = abs x ^ 2 :=
by rw [abs, pow_two, real.mul_self_sqrt (norm_sq_nonneg _)]
/-! ### Cauchy sequences -/
theorem is_cau_seq_re (f : cau_seq ℂ abs) : is_cau_seq abs' (λ n, (f n).re) :=
λ ε ε0, (f.cauchy ε0).imp $ λ i H j ij,
lt_of_le_of_lt (by simpa using abs_re_le_abs (f j - f i)) (H _ ij)
theorem is_cau_seq_im (f : cau_seq ℂ abs) : is_cau_seq abs' (λ n, (f n).im) :=
λ ε ε0, (f.cauchy ε0).imp $ λ i H j ij,
lt_of_le_of_lt (by simpa using abs_im_le_abs (f j - f i)) (H _ ij)
/-- The real part of a complex Cauchy sequence, as a real Cauchy sequence. -/
noncomputable def cau_seq_re (f : cau_seq ℂ abs) : cau_seq ℝ abs' :=
⟨_, is_cau_seq_re f⟩
/-- The imaginary part of a complex Cauchy sequence, as a real Cauchy sequence. -/
noncomputable def cau_seq_im (f : cau_seq ℂ abs) : cau_seq ℝ abs' :=
⟨_, is_cau_seq_im f⟩
lemma is_cau_seq_abs {f : ℕ → ℂ} (hf : is_cau_seq abs f) :
is_cau_seq abs' (abs ∘ f) :=
λ ε ε0, let ⟨i, hi⟩ := hf ε ε0 in
⟨i, λ j hj, lt_of_le_of_lt (abs_abs_sub_le_abs_sub _ _) (hi j hj)⟩
/-- The limit of a Cauchy sequence of complex numbers. -/
noncomputable def lim_aux (f : cau_seq ℂ abs) : ℂ :=
⟨cau_seq.lim (cau_seq_re f), cau_seq.lim (cau_seq_im f)⟩
theorem equiv_lim_aux (f : cau_seq ℂ abs) : f ≈ cau_seq.const abs (lim_aux f) :=
λ ε ε0, (exists_forall_ge_and
(cau_seq.equiv_lim ⟨_, is_cau_seq_re f⟩ _ (half_pos ε0))
(cau_seq.equiv_lim ⟨_, is_cau_seq_im f⟩ _ (half_pos ε0))).imp $
λ i H j ij, begin
cases H _ ij with H₁ H₂,
apply lt_of_le_of_lt (abs_le_abs_re_add_abs_im _),
dsimp [lim_aux] at *,
have := add_lt_add H₁ H₂,
rwa add_halves at this,
end
noncomputable instance : cau_seq.is_complete ℂ abs :=
⟨λ f, ⟨lim_aux f, equiv_lim_aux f⟩⟩
open cau_seq
lemma lim_eq_lim_im_add_lim_re (f : cau_seq ℂ abs) : lim f =
↑(lim (cau_seq_re f)) + ↑(lim (cau_seq_im f)) * I :=
lim_eq_of_equiv_const $
calc f ≈ _ : equiv_lim_aux f
... = cau_seq.const abs (↑(lim (cau_seq_re f)) + ↑(lim (cau_seq_im f)) * I) :
cau_seq.ext (λ _, complex.ext (by simp [lim_aux, cau_seq_re]) (by simp [lim_aux, cau_seq_im]))
lemma lim_re (f : cau_seq ℂ abs) : lim (cau_seq_re f) = (lim f).re :=
by rw [lim_eq_lim_im_add_lim_re]; simp
lemma lim_im (f : cau_seq ℂ abs) : lim (cau_seq_im f) = (lim f).im :=
by rw [lim_eq_lim_im_add_lim_re]; simp
lemma is_cau_seq_conj (f : cau_seq ℂ abs) : is_cau_seq abs (λ n, conj (f n)) :=
λ ε ε0, let ⟨i, hi⟩ := f.2 ε ε0 in
⟨i, λ j hj, by rw [← conj.map_sub, abs_conj]; exact hi j hj⟩
/-- The complex conjugate of a complex Cauchy sequence, as a complex Cauchy sequence. -/
noncomputable def cau_seq_conj (f : cau_seq ℂ abs) : cau_seq ℂ abs :=
⟨_, is_cau_seq_conj f⟩
lemma lim_conj (f : cau_seq ℂ abs) : lim (cau_seq_conj f) = conj (lim f) :=
complex.ext (by simp [cau_seq_conj, (lim_re _).symm, cau_seq_re])
(by simp [cau_seq_conj, (lim_im _).symm, cau_seq_im, (lim_neg _).symm]; refl)
/-- The absolute value of a complex Cauchy sequence, as a real Cauchy sequence. -/
noncomputable def cau_seq_abs (f : cau_seq ℂ abs) : cau_seq ℝ abs' :=
⟨_, is_cau_seq_abs f.2⟩
lemma lim_abs (f : cau_seq ℂ abs) : lim (cau_seq_abs f) = abs (lim f) :=
lim_eq_of_equiv_const (λ ε ε0,
let ⟨i, hi⟩ := equiv_lim f ε ε0 in
⟨i, λ j hj, lt_of_le_of_lt (abs_abs_sub_le_abs_sub _ _) (hi j hj)⟩)
end complex
|
7ece8dd9ea549514089190ce1bc0ec00ecf72094
|
137c667471a40116a7afd7261f030b30180468c2
|
/src/data/dfinsupp.lean
|
c6620a35eb14ccf7bedd784e3a5958373dad2b52
|
[
"Apache-2.0"
] |
permissive
|
bragadeesh153/mathlib
|
46bf814cfb1eecb34b5d1549b9117dc60f657792
|
b577bb2cd1f96eb47031878256856020b76f73cd
|
refs/heads/master
| 1,687,435,188,334
| 1,626,384,207,000
| 1,626,384,207,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 52,068
|
lean
|
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Kenny Lau
-/
import algebra.module.pi
import algebra.big_operators.basic
import data.set.finite
import group_theory.submonoid.membership
/-!
# Dependent functions with finite support
For a non-dependent version see `data/finsupp.lean`.
-/
universes u u₁ u₂ v v₁ v₂ v₃ w x y l
open_locale big_operators
variables (ι : Type u) (β : ι → Type v) {β₁ : ι → Type v₁} {β₂ : ι → Type v₂}
namespace dfinsupp
variable [Π i, has_zero (β i)]
structure pre : Type (max u v) :=
(to_fun : Π i, β i)
(pre_support : multiset ι)
(zero : ∀ i, i ∈ pre_support ∨ to_fun i = 0)
instance inhabited_pre : inhabited (pre ι β) :=
⟨⟨λ i, 0, ∅, λ i, or.inr rfl⟩⟩
instance : setoid (pre ι β) :=
{ r := λ x y, ∀ i, x.to_fun i = y.to_fun i,
iseqv := ⟨λ f i, rfl, λ f g H i, (H i).symm,
λ f g h H1 H2 i, (H1 i).trans (H2 i)⟩ }
end dfinsupp
variable {ι}
/-- A dependent function `Π i, β i` with finite support. -/
@[reducible]
def dfinsupp [Π i, has_zero (β i)] : Type* :=
quotient (dfinsupp.pre.setoid ι β)
variable {β}
notation `Π₀` binders `, ` r:(scoped f, dfinsupp f) := r
infix ` →ₚ `:25 := dfinsupp
namespace dfinsupp
section basic
variables [Π i, has_zero (β i)] [Π i, has_zero (β₁ i)] [Π i, has_zero (β₂ i)]
instance : has_coe_to_fun (Π₀ i, β i) :=
⟨λ _, Π i, β i, λ f, quotient.lift_on f pre.to_fun $ λ _ _, funext⟩
instance : has_zero (Π₀ i, β i) := ⟨⟦⟨0, ∅, λ i, or.inr rfl⟩⟧⟩
instance : inhabited (Π₀ i, β i) := ⟨0⟩
@[simp]
lemma coe_pre_mk (f : Π i, β i) (s : multiset ι) (hf) :
⇑(⟦⟨f, s, hf⟩⟧ : Π₀ i, β i) = f := rfl
@[simp] lemma coe_zero : ⇑(0 : Π₀ i, β i) = 0 := rfl
lemma zero_apply (i : ι) : (0 : Π₀ i, β i) i = 0 := rfl
lemma coe_fn_injective : @function.injective (Π₀ i, β i) (Π i, β i) coe_fn :=
λ f g H, quotient.induction_on₂ f g (λ _ _ H, quotient.sound H) (congr_fun H)
@[ext] lemma ext {f g : Π₀ i, β i} (H : ∀ i, f i = g i) : f = g :=
coe_fn_injective (funext H)
/-- The composition of `f : β₁ → β₂` and `g : Π₀ i, β₁ i` is
`map_range f hf g : Π₀ i, β₂ i`, well defined when `f 0 = 0`.
This preserves the structure on `f`, and exists in various bundled forms for when `f` is itself
bundled:
* `dfinsupp.map_range.add_monoid_hom`
* `dfinsupp.map_range.add_equiv`
* `dfinsupp.map_range.linear_map`
* `dfinsupp.map_range.linear_equiv`
-/
def map_range (f : Π i, β₁ i → β₂ i) (hf : ∀ i, f i 0 = 0) (g : Π₀ i, β₁ i) : Π₀ i, β₂ i :=
quotient.lift_on g (λ x, ⟦(⟨λ i, f i (x.1 i), x.2,
λ i, or.cases_on (x.3 i) or.inl $ λ H, or.inr $ by rw [H, hf]⟩ : pre ι β₂)⟧) $ λ x y H,
quotient.sound $ λ i, by simp only [H i]
@[simp] lemma map_range_apply
(f : Π i, β₁ i → β₂ i) (hf : ∀ i, f i 0 = 0) (g : Π₀ i, β₁ i) (i : ι) :
map_range f hf g i = f i (g i) :=
quotient.induction_on g $ λ x, rfl
@[simp] lemma map_range_id (h : ∀ i, id (0 : β₁ i) = 0 := λ i, rfl) (g : Π₀ (i : ι), β₁ i) :
map_range (λ i, (id : β₁ i → β₁ i)) h g = g :=
by { ext, simp only [map_range_apply, id.def] }
lemma map_range_comp (f : Π i, β₁ i → β₂ i) (f₂ : Π i, β i → β₁ i)
(hf : ∀ i, f i 0 = 0) (hf₂ : ∀ i, f₂ i 0 = 0) (h : ∀ i, (f i ∘ f₂ i) 0 = 0)
(g : Π₀ (i : ι), β i) :
map_range (λ i, f i ∘ f₂ i) h g = map_range f hf (map_range f₂ hf₂ g) :=
by { ext, simp only [map_range_apply] }
@[simp] lemma map_range_zero (f : Π i, β₁ i → β₂ i) (hf : ∀ i, f i 0 = 0) :
map_range f hf (0 : Π₀ i, β₁ i) = 0 :=
by { ext, simp only [map_range_apply, coe_zero, pi.zero_apply, hf] }
/-- Let `f i` be a binary operation `β₁ i → β₂ i → β i` such that `f i 0 0 = 0`.
Then `zip_with f hf` is a binary operation `Π₀ i, β₁ i → Π₀ i, β₂ i → Π₀ i, β i`. -/
def zip_with (f : Π i, β₁ i → β₂ i → β i) (hf : ∀ i, f i 0 0 = 0)
(g₁ : Π₀ i, β₁ i) (g₂ : Π₀ i, β₂ i) : (Π₀ i, β i) :=
begin
refine quotient.lift_on₂ g₁ g₂ (λ x y, ⟦(⟨λ i, f i (x.1 i) (y.1 i), x.2 + y.2,
λ i, _⟩ : pre ι β)⟧) _,
{ cases x.3 i with h1 h1,
{ left, rw multiset.mem_add, left, exact h1 },
cases y.3 i with h2 h2,
{ left, rw multiset.mem_add, right, exact h2 },
right, rw [h1, h2, hf] },
exact λ x₁ x₂ y₁ y₂ H1 H2, quotient.sound $ λ i, by simp only [H1 i, H2 i]
end
@[simp] lemma zip_with_apply
(f : Π i, β₁ i → β₂ i → β i) (hf : ∀ i, f i 0 0 = 0) (g₁ : Π₀ i, β₁ i) (g₂ : Π₀ i, β₂ i) (i : ι) :
zip_with f hf g₁ g₂ i = f i (g₁ i) (g₂ i) :=
quotient.induction_on₂ g₁ g₂ $ λ _ _, rfl
end basic
section algebra
instance [Π i, add_zero_class (β i)] : has_add (Π₀ i, β i) :=
⟨zip_with (λ _, (+)) (λ _, add_zero 0)⟩
lemma add_apply [Π i, add_zero_class (β i)] (g₁ g₂ : Π₀ i, β i) (i : ι) :
(g₁ + g₂) i = g₁ i + g₂ i :=
zip_with_apply _ _ g₁ g₂ i
@[simp] lemma coe_add [Π i, add_zero_class (β i)] (g₁ g₂ : Π₀ i, β i) :
⇑(g₁ + g₂) = g₁ + g₂ :=
funext $ add_apply g₁ g₂
instance [Π i, add_zero_class (β i)] : add_zero_class (Π₀ i, β i) :=
{ zero := 0,
add := (+),
zero_add := λ f, ext $ λ i, by simp only [add_apply, zero_apply, zero_add],
add_zero := λ f, ext $ λ i, by simp only [add_apply, zero_apply, add_zero] }
instance [Π i, add_monoid (β i)] : add_monoid (Π₀ i, β i) :=
{ add_monoid .
zero := 0,
add := (+),
add_assoc := λ f g h, ext $ λ i, by simp only [add_apply, add_assoc],
.. dfinsupp.add_zero_class }
/-- Coercion from a `dfinsupp` to a pi type is an `add_monoid_hom`. -/
def coe_fn_add_monoid_hom [Π i, add_zero_class (β i)] : (Π₀ i, β i) →+ (Π i, β i) :=
{ to_fun := coe_fn, map_zero' := coe_zero, map_add' := coe_add }
/-- Evaluation at a point is an `add_monoid_hom`. This is the finitely-supported version of
`pi.eval_add_monoid_hom`. -/
def eval_add_monoid_hom [Π i, add_zero_class (β i)] (i : ι) : (Π₀ i, β i) →+ β i :=
(pi.eval_add_monoid_hom β i).comp coe_fn_add_monoid_hom
instance is_add_monoid_hom [Π i, add_zero_class (β i)] {i : ι} :
is_add_monoid_hom (λ g : Π₀ i : ι, β i, g i) :=
(eval_add_monoid_hom i).is_add_monoid_hom
instance [Π i, add_group (β i)] : has_neg (Π₀ i, β i) :=
⟨λ f, f.map_range (λ _, has_neg.neg) (λ _, neg_zero)⟩
instance [Π i, add_comm_monoid (β i)] : add_comm_monoid (Π₀ i, β i) :=
{ add_comm := λ f g, ext $ λ i, by simp only [add_apply, add_comm],
.. dfinsupp.add_monoid }
@[simp] lemma coe_finset_sum {α} [Π i, add_comm_monoid (β i)] (s : finset α) (g : α → Π₀ i, β i) :
⇑(∑ a in s, g a) = ∑ a in s, g a :=
(coe_fn_add_monoid_hom : _ →+ (Π i, β i)).map_sum g s
@[simp] lemma finset_sum_apply {α} [Π i, add_comm_monoid (β i)] (s : finset α) (g : α → Π₀ i, β i)
(i : ι) :
(∑ a in s, g a) i = ∑ a in s, g a i :=
(eval_add_monoid_hom i : _ →+ β i).map_sum g s
lemma neg_apply [Π i, add_group (β i)] (g : Π₀ i, β i) (i : ι) : (- g) i = - g i :=
map_range_apply _ _ g i
@[simp] lemma coe_neg [Π i, add_group (β i)] (g : Π₀ i, β i) : ⇑(- g) = - g :=
funext $ neg_apply g
instance [Π i, add_group (β i)] : add_group (Π₀ i, β i) :=
{ add_left_neg := λ f, ext $ λ i, by simp only [add_apply, neg_apply, zero_apply, add_left_neg],
.. dfinsupp.add_monoid,
.. (infer_instance : has_neg (Π₀ i, β i)) }
lemma sub_apply [Π i, add_group (β i)] (g₁ g₂ : Π₀ i, β i) (i : ι) :
(g₁ - g₂) i = g₁ i - g₂ i :=
by rw [sub_eq_add_neg]; simp [sub_eq_add_neg]
@[simp] lemma coe_sub [Π i, add_group (β i)] (g₁ g₂ : Π₀ i, β i) :
⇑(g₁ - g₂) = g₁ - g₂ :=
funext $ sub_apply g₁ g₂
instance [Π i, add_comm_group (β i)] : add_comm_group (Π₀ i, β i) :=
{ add_comm := λ f g, ext $ λ i, by simp only [add_apply, add_comm],
..dfinsupp.add_group }
/-- Dependent functions with finite support inherit a semiring action from an action on each
coordinate. -/
instance {γ : Type w} [monoid γ] [Π i, add_monoid (β i)] [Π i, distrib_mul_action γ (β i)] :
has_scalar γ (Π₀ i, β i) :=
⟨λc v, v.map_range (λ _, (•) c) (λ _, smul_zero _)⟩
lemma smul_apply {γ : Type w} [monoid γ] [Π i, add_monoid (β i)]
[Π i, distrib_mul_action γ (β i)] (b : γ) (v : Π₀ i, β i) (i : ι) :
(b • v) i = b • (v i) :=
map_range_apply _ _ v i
@[simp] lemma coe_smul {γ : Type w} [monoid γ] [Π i, add_monoid (β i)]
[Π i, distrib_mul_action γ (β i)] (b : γ) (v : Π₀ i, β i) :
⇑(b • v) = b • v :=
funext $ smul_apply b v
instance {γ : Type w} {δ : Type*} [monoid γ] [monoid δ]
[Π i, add_monoid (β i)] [Π i, distrib_mul_action γ (β i)] [Π i, distrib_mul_action δ (β i)]
[Π i, smul_comm_class γ δ (β i)] :
smul_comm_class γ δ (Π₀ i, β i) :=
{ smul_comm := λ r s m, ext $ λ i, by simp only [smul_apply, smul_comm r s (m i)] }
instance {γ : Type w} {δ : Type*} [monoid γ] [monoid δ]
[Π i, add_monoid (β i)] [Π i, distrib_mul_action γ (β i)] [Π i, distrib_mul_action δ (β i)]
[has_scalar γ δ] [Π i, is_scalar_tower γ δ (β i)] :
is_scalar_tower γ δ (Π₀ i, β i) :=
{ smul_assoc := λ r s m, ext $ λ i, by simp only [smul_apply, smul_assoc r s (m i)] }
/-- Dependent functions with finite support inherit a `distrib_mul_action` structure from such a
structure on each coordinate. -/
instance {γ : Type w} [monoid γ] [Π i, add_monoid (β i)] [Π i, distrib_mul_action γ (β i)] :
distrib_mul_action γ (Π₀ i, β i) :=
{ smul_zero := λ c, ext $ λ i, by simp only [smul_apply, smul_zero, zero_apply],
smul_add := λ c x y, ext $ λ i, by simp only [add_apply, smul_apply, smul_add],
one_smul := λ x, ext $ λ i, by simp only [smul_apply, one_smul],
mul_smul := λ r s x, ext $ λ i, by simp only [smul_apply, smul_smul],
..dfinsupp.has_scalar }
/-- Dependent functions with finite support inherit a module structure from such a structure on
each coordinate. -/
instance {γ : Type w} [semiring γ] [Π i, add_comm_monoid (β i)] [Π i, module γ (β i)] :
module γ (Π₀ i, β i) :=
{ zero_smul := λ c, ext $ λ i, by simp only [smul_apply, zero_smul, zero_apply],
add_smul := λ c x y, ext $ λ i, by simp only [add_apply, smul_apply, add_smul],
..dfinsupp.distrib_mul_action }
end algebra
section filter_and_subtype_domain
/-- `filter p f` is the function which is `f i` if `p i` is true and 0 otherwise. -/
def filter [Π i, has_zero (β i)] (p : ι → Prop) [decidable_pred p] (f : Π₀ i, β i) : Π₀ i, β i :=
quotient.lift_on f (λ x, ⟦(⟨λ i, if p i then x.1 i else 0, x.2,
λ i, or.cases_on (x.3 i) or.inl $ λ H, or.inr $ by rw [H, if_t_t]⟩ : pre ι β)⟧) $ λ x y H,
quotient.sound $ λ i, by simp only [H i]
@[simp] lemma filter_apply [Π i, has_zero (β i)]
(p : ι → Prop) [decidable_pred p] (i : ι) (f : Π₀ i, β i) :
f.filter p i = if p i then f i else 0 :=
quotient.induction_on f $ λ x, rfl
lemma filter_apply_pos [Π i, has_zero (β i)]
{p : ι → Prop} [decidable_pred p] (f : Π₀ i, β i) {i : ι} (h : p i) :
f.filter p i = f i :=
by simp only [filter_apply, if_pos h]
lemma filter_apply_neg [Π i, has_zero (β i)]
{p : ι → Prop} [decidable_pred p] (f : Π₀ i, β i) {i : ι} (h : ¬ p i) :
f.filter p i = 0 :=
by simp only [filter_apply, if_neg h]
lemma filter_pos_add_filter_neg [Π i, add_zero_class (β i)] (f : Π₀ i, β i)
(p : ι → Prop) [decidable_pred p] :
f.filter p + f.filter (λi, ¬ p i) = f :=
ext $ λ i, by simp only [add_apply, filter_apply]; split_ifs; simp only [add_zero, zero_add]
/-- `subtype_domain p f` is the restriction of the finitely supported function
`f` to the subtype `p`. -/
def subtype_domain [Π i, has_zero (β i)] (p : ι → Prop) [decidable_pred p]
(f : Π₀ i, β i) : Π₀ i : subtype p, β i :=
begin
fapply quotient.lift_on f,
{ intro x,
refine ⟦⟨λ i, x.1 (i : ι),
(x.2.filter p).attach.map $ λ j, ⟨j, (multiset.mem_filter.1 j.2).2⟩, _⟩⟧,
refine λ i, or.cases_on (x.3 i) (λ H, _) or.inr,
left, rw multiset.mem_map, refine ⟨⟨i, multiset.mem_filter.2 ⟨H, i.2⟩⟩, _, subtype.eta _ _⟩,
apply multiset.mem_attach },
intros x y H,
exact quotient.sound (λ i, H i)
end
@[simp] lemma subtype_domain_zero [Π i, has_zero (β i)] {p : ι → Prop} [decidable_pred p] :
subtype_domain p (0 : Π₀ i, β i) = 0 :=
rfl
@[simp] lemma subtype_domain_apply [Π i, has_zero (β i)] {p : ι → Prop} [decidable_pred p]
{i : subtype p} {v : Π₀ i, β i} :
(subtype_domain p v) i = v i :=
quotient.induction_on v $ λ x, rfl
@[simp] lemma subtype_domain_add [Π i, add_zero_class (β i)] {p : ι → Prop} [decidable_pred p]
{v v' : Π₀ i, β i} :
(v + v').subtype_domain p = v.subtype_domain p + v'.subtype_domain p :=
ext $ λ i, by simp only [add_apply, subtype_domain_apply]
instance subtype_domain.is_add_monoid_hom [Π i, add_zero_class (β i)]
{p : ι → Prop} [decidable_pred p] :
is_add_monoid_hom (subtype_domain p : (Π₀ i : ι, β i) → Π₀ i : subtype p, β i) :=
{ map_add := λ _ _, subtype_domain_add, map_zero := subtype_domain_zero }
@[simp]
lemma subtype_domain_neg [Π i, add_group (β i)] {p : ι → Prop} [decidable_pred p] {v : Π₀ i, β i} :
(- v).subtype_domain p = - v.subtype_domain p :=
ext $ λ i, by simp only [neg_apply, subtype_domain_apply]
@[simp] lemma subtype_domain_sub [Π i, add_group (β i)] {p : ι → Prop} [decidable_pred p]
{v v' : Π₀ i, β i} :
(v - v').subtype_domain p = v.subtype_domain p - v'.subtype_domain p :=
ext $ λ i, by simp only [sub_apply, subtype_domain_apply]
end filter_and_subtype_domain
variable [dec : decidable_eq ι]
include dec
section basic
variable [Π i, has_zero (β i)]
omit dec
lemma finite_support (f : Π₀ i, β i) : set.finite {i | f i ≠ 0} :=
begin
classical,
exact quotient.induction_on f (λ x, x.2.to_finset.finite_to_set.subset (λ i H,
multiset.mem_to_finset.2 ((x.3 i).resolve_right H)))
end
include dec
/-- Create an element of `Π₀ i, β i` from a finset `s` and a function `x`
defined on this `finset`. -/
def mk (s : finset ι) (x : Π i : (↑s : set ι), β (i : ι)) : Π₀ i, β i :=
⟦⟨λ i, if H : i ∈ s then x ⟨i, H⟩ else 0, s.1,
λ i, if H : i ∈ s then or.inl H else or.inr $ dif_neg H⟩⟧
@[simp] lemma mk_apply {s : finset ι} {x : Π i : (↑s : set ι), β i} {i : ι} :
(mk s x : Π i, β i) i = if H : i ∈ s then x ⟨i, H⟩ else 0 :=
rfl
theorem mk_injective (s : finset ι) : function.injective (@mk ι β _ _ s) :=
begin
intros x y H,
ext i,
have h1 : (mk s x : Π i, β i) i = (mk s y : Π i, β i) i, {rw H},
cases i with i hi,
change i ∈ s at hi,
dsimp only [mk_apply, subtype.coe_mk] at h1,
simpa only [dif_pos hi] using h1
end
/-- The function `single i b : Π₀ i, β i` sends `i` to `b`
and all other points to `0`. -/
def single (i : ι) (b : β i) : Π₀ i, β i :=
mk {i} $ λ j, eq.rec_on (finset.mem_singleton.1 j.prop).symm b
@[simp] lemma single_apply {i i' b} :
(single i b : Π₀ i, β i) i' = (if h : i = i' then eq.rec_on h b else 0) :=
begin
dsimp only [single],
by_cases h : i = i',
{ have h1 : i' ∈ ({i} : finset ι) := finset.mem_singleton.2 h.symm,
simp only [mk_apply, dif_pos h, dif_pos h1], refl },
{ have h1 : i' ∉ ({i} : finset ι) := finset.not_mem_singleton.2 (ne.symm h),
simp only [mk_apply, dif_neg h, dif_neg h1] }
end
@[simp] lemma single_zero {i} : (single i 0 : Π₀ i, β i) = 0 :=
quotient.sound $ λ j, if H : j ∈ ({i} : finset _)
then by dsimp only; rw [dif_pos H]; cases finset.mem_singleton.1 H; refl
else dif_neg H
@[simp] lemma single_eq_same {i b} : (single i b : Π₀ i, β i) i = b :=
by simp only [single_apply, dif_pos rfl]
lemma single_eq_of_ne {i i' b} (h : i ≠ i') : (single i b : Π₀ i, β i) i' = 0 :=
by simp only [single_apply, dif_neg h]
lemma single_injective {i} : function.injective (single i : β i → Π₀ i, β i) :=
λ x y H, congr_fun (mk_injective _ H) ⟨i, by simp⟩
/-- Like `finsupp.single_eq_single_iff`, but with a `heq` due to dependent types -/
lemma single_eq_single_iff (i j : ι) (xi : β i) (xj : β j) :
dfinsupp.single i xi = dfinsupp.single j xj ↔ i = j ∧ xi == xj ∨ xi = 0 ∧ xj = 0 :=
begin
split,
{ intro h,
by_cases hij : i = j,
{ subst hij,
exact or.inl ⟨rfl, heq_of_eq (dfinsupp.single_injective h)⟩, },
{ have h_coe : ⇑(dfinsupp.single i xi) = dfinsupp.single j xj := congr_arg coe_fn h,
have hci := congr_fun h_coe i,
have hcj := congr_fun h_coe j,
rw dfinsupp.single_eq_same at hci hcj,
rw dfinsupp.single_eq_of_ne (ne.symm hij) at hci,
rw dfinsupp.single_eq_of_ne (hij) at hcj,
exact or.inr ⟨hci, hcj.symm⟩, }, },
{ rintros (⟨hi, hxi⟩ | ⟨hi, hj⟩),
{ subst hi,
rw eq_of_heq hxi, },
{ rw [hi, hj, dfinsupp.single_zero, dfinsupp.single_zero], }, },
end
/-- Equality of sigma types is sufficient (but not necessary) to show equality of `dfinsupp`s. -/
lemma single_eq_of_sigma_eq
{i j} {xi : β i} {xj : β j} (h : (⟨i, xi⟩ : sigma β) = ⟨j, xj⟩) :
dfinsupp.single i xi = dfinsupp.single j xj :=
by { cases h, refl }
/-- Redefine `f i` to be `0`. -/
def erase (i : ι) (f : Π₀ i, β i) : Π₀ i, β i :=
quotient.lift_on f (λ x, ⟦(⟨λ j, if j = i then 0 else x.1 j, x.2,
λ j, or.cases_on (x.3 j) or.inl $ λ H, or.inr $ by simp only [H, if_t_t]⟩ : pre ι β)⟧) $ λ x y H,
quotient.sound $ λ j, if h : j = i then by simp only [if_pos h]
else by simp only [if_neg h, H j]
@[simp] lemma erase_apply {i j : ι} {f : Π₀ i, β i} :
(f.erase i) j = if j = i then 0 else f j :=
quotient.induction_on f $ λ x, rfl
@[simp] lemma erase_same {i : ι} {f : Π₀ i, β i} : (f.erase i) i = 0 :=
by simp
lemma erase_ne {i i' : ι} {f : Π₀ i, β i} (h : i' ≠ i) : (f.erase i) i' = f i' :=
by simp [h]
end basic
section add_monoid
variable [Π i, add_zero_class (β i)]
@[simp] lemma single_add {i : ι} {b₁ b₂ : β i} : single i (b₁ + b₂) = single i b₁ + single i b₂ :=
ext $ assume i',
begin
by_cases h : i = i',
{ subst h, simp only [add_apply, single_eq_same] },
{ simp only [add_apply, single_eq_of_ne h, zero_add] }
end
variables (β)
/-- `dfinsupp.single` as an `add_monoid_hom`. -/
@[simps] def single_add_hom (i : ι) : β i →+ Π₀ i, β i :=
{ to_fun := single i, map_zero' := single_zero, map_add' := λ _ _, single_add }
variables {β}
lemma single_add_erase {i : ι} {f : Π₀ i, β i} : single i (f i) + f.erase i = f :=
ext $ λ i',
if h : i = i'
then by subst h; simp only [add_apply, single_apply, erase_apply, dif_pos rfl, if_pos, add_zero]
else by simp only [add_apply, single_apply, erase_apply, dif_neg h, if_neg (ne.symm h), zero_add]
lemma erase_add_single {i : ι} {f : Π₀ i, β i} : f.erase i + single i (f i) = f :=
ext $ λ i',
if h : i = i'
then by subst h; simp only [add_apply, single_apply, erase_apply, dif_pos rfl, if_pos, zero_add]
else by simp only [add_apply, single_apply, erase_apply, dif_neg h, if_neg (ne.symm h), add_zero]
protected theorem induction {p : (Π₀ i, β i) → Prop} (f : Π₀ i, β i)
(h0 : p 0) (ha : ∀i b (f : Π₀ i, β i), f i = 0 → b ≠ 0 → p f → p (single i b + f)) :
p f :=
begin
refine quotient.induction_on f (λ x, _),
cases x with f s H, revert f H,
apply multiset.induction_on s,
{ intros f H, convert h0, ext i, exact (H i).resolve_left id },
intros i s ih f H,
by_cases H1 : i ∈ s,
{ have H2 : ∀ j, j ∈ s ∨ f j = 0,
{ intro j, cases H j with H2 H2,
{ cases multiset.mem_cons.1 H2 with H3 H3,
{ left, rw H3, exact H1 },
{ left, exact H3 } },
right, exact H2 },
have H3 : (⟦{to_fun := f, pre_support := i ::ₘ s, zero := H}⟧ : Π₀ i, β i)
= ⟦{to_fun := f, pre_support := s, zero := H2}⟧,
{ exact quotient.sound (λ i, rfl) },
rw H3, apply ih },
have H2 : p (erase i ⟦{to_fun := f, pre_support := i ::ₘ s, zero := H}⟧),
{ dsimp only [erase, quotient.lift_on_mk],
have H2 : ∀ j, j ∈ s ∨ ite (j = i) 0 (f j) = 0,
{ intro j, cases H j with H2 H2,
{ cases multiset.mem_cons.1 H2 with H3 H3,
{ right, exact if_pos H3 },
{ left, exact H3 } },
right, split_ifs; [refl, exact H2] },
have H3 : (⟦{to_fun := λ (j : ι), ite (j = i) 0 (f j),
pre_support := i ::ₘ s, zero := _}⟧ : Π₀ i, β i)
= ⟦{to_fun := λ (j : ι), ite (j = i) 0 (f j), pre_support := s, zero := H2}⟧ :=
quotient.sound (λ i, rfl),
rw H3, apply ih },
have H3 : single i _ + _ = (⟦{to_fun := f, pre_support := i ::ₘ s, zero := H}⟧ : Π₀ i, β i) :=
single_add_erase,
rw ← H3,
change p (single i (f i) + _),
cases classical.em (f i = 0) with h h,
{ rw [h, single_zero, zero_add], exact H2 },
refine ha _ _ _ _ h H2,
rw erase_same
end
lemma induction₂ {p : (Π₀ i, β i) → Prop} (f : Π₀ i, β i)
(h0 : p 0) (ha : ∀i b (f : Π₀ i, β i), f i = 0 → b ≠ 0 → p f → p (f + single i b)) :
p f :=
dfinsupp.induction f h0 $ λ i b f h1 h2 h3,
have h4 : f + single i b = single i b + f,
{ ext j, by_cases H : i = j,
{ subst H, simp [h1] },
{ simp [H] } },
eq.rec_on h4 $ ha i b f h1 h2 h3
@[simp] lemma add_closure_Union_range_single :
add_submonoid.closure (⋃ i : ι, set.range (single i : β i → (Π₀ i, β i))) = ⊤ :=
top_unique $ λ x hx, (begin
apply dfinsupp.induction x,
exact add_submonoid.zero_mem _,
exact λ a b f ha hb hf, add_submonoid.add_mem _
(add_submonoid.subset_closure $ set.mem_Union.2 ⟨a, set.mem_range_self _⟩) hf
end)
/-- If two additive homomorphisms from `Π₀ i, β i` are equal on each `single a b`, then
they are equal. -/
lemma add_hom_ext {γ : Type w} [add_zero_class γ] ⦃f g : (Π₀ i, β i) →+ γ⦄
(H : ∀ (i : ι) (y : β i), f (single i y) = g (single i y)) :
f = g :=
begin
refine add_monoid_hom.eq_of_eq_on_mdense add_closure_Union_range_single (λ f hf, _),
simp only [set.mem_Union, set.mem_range] at hf,
rcases hf with ⟨x, y, rfl⟩,
apply H
end
/-- If two additive homomorphisms from `Π₀ i, β i` are equal on each `single a b`, then
they are equal.
See note [partially-applied ext lemmas]. -/
@[ext] lemma add_hom_ext' {γ : Type w} [add_zero_class γ] ⦃f g : (Π₀ i, β i) →+ γ⦄
(H : ∀ x, f.comp (single_add_hom β x) = g.comp (single_add_hom β x)) :
f = g :=
add_hom_ext $ λ x, add_monoid_hom.congr_fun (H x)
end add_monoid
@[simp] lemma mk_add [Π i, add_zero_class (β i)] {s : finset ι} {x y : Π i : (↑s : set ι), β i} :
mk s (x + y) = mk s x + mk s y :=
ext $ λ i, by simp only [add_apply, mk_apply]; split_ifs; [refl, rw zero_add]
@[simp] lemma mk_zero [Π i, has_zero (β i)] {s : finset ι} :
mk s (0 : Π i : (↑s : set ι), β i.1) = 0 :=
ext $ λ i, by simp only [mk_apply]; split_ifs; refl
@[simp] lemma mk_neg [Π i, add_group (β i)] {s : finset ι} {x : Π i : (↑s : set ι), β i.1} :
mk s (-x) = -mk s x :=
ext $ λ i, by simp only [neg_apply, mk_apply]; split_ifs; [refl, rw neg_zero]
@[simp] lemma mk_sub [Π i, add_group (β i)] {s : finset ι} {x y : Π i : (↑s : set ι), β i.1} :
mk s (x - y) = mk s x - mk s y :=
ext $ λ i, by simp only [sub_apply, mk_apply]; split_ifs; [refl, rw sub_zero]
instance [Π i, add_group (β i)] {s : finset ι} : is_add_group_hom (@mk ι β _ _ s) :=
{ map_add := λ _ _, mk_add }
section
variables (γ : Type w) [semiring γ] [Π i, add_comm_monoid (β i)] [Π i, module γ (β i)]
include γ
@[simp] lemma mk_smul {s : finset ι} {c : γ} (x : Π i : (↑s : set ι), β i.1) :
mk s (c • x) = c • mk s x :=
ext $ λ i, by simp only [smul_apply, mk_apply]; split_ifs; [refl, rw smul_zero]
@[simp] lemma single_smul {i : ι} {c : γ} {x : β i} :
single i (c • x) = c • single i x :=
ext $ λ i, by simp only [smul_apply, single_apply]; split_ifs; [cases h, rw smul_zero]; refl
end
section support_basic
variables [Π i, has_zero (β i)] [Π i (x : β i), decidable (x ≠ 0)]
/-- Set `{i | f x ≠ 0}` as a `finset`. -/
def support (f : Π₀ i, β i) : finset ι :=
quotient.lift_on f (λ x, x.2.to_finset.filter $ λ i, x.1 i ≠ 0) $
begin
intros x y Hxy,
ext i, split,
{ intro H,
rcases finset.mem_filter.1 H with ⟨h1, h2⟩,
rw Hxy i at h2,
exact finset.mem_filter.2 ⟨multiset.mem_to_finset.2 $ (y.3 i).resolve_right h2, h2⟩ },
{ intro H,
rcases finset.mem_filter.1 H with ⟨h1, h2⟩,
rw ← Hxy i at h2,
exact finset.mem_filter.2 ⟨multiset.mem_to_finset.2 $ (x.3 i).resolve_right h2, h2⟩ },
end
@[simp] theorem support_mk_subset {s : finset ι} {x : Π i : (↑s : set ι), β i.1} :
(mk s x).support ⊆ s :=
λ i H, multiset.mem_to_finset.1 (finset.mem_filter.1 H).1
@[simp] theorem mem_support_to_fun (f : Π₀ i, β i) (i) : i ∈ f.support ↔ f i ≠ 0 :=
begin
refine quotient.induction_on f (λ x, _),
dsimp only [support, quotient.lift_on_mk],
rw [finset.mem_filter, multiset.mem_to_finset],
exact and_iff_right_of_imp (x.3 i).resolve_right
end
theorem eq_mk_support (f : Π₀ i, β i) : f = mk f.support (λ i, f i) :=
begin
change f = mk f.support (λ i, f i.1),
ext i,
by_cases h : f i ≠ 0; [skip, rw [not_not] at h];
simp [h]
end
@[simp] lemma support_zero : (0 : Π₀ i, β i).support = ∅ := rfl
lemma mem_support_iff (f : Π₀ i, β i) : ∀i:ι, i ∈ f.support ↔ f i ≠ 0 :=
f.mem_support_to_fun
@[simp] lemma support_eq_empty {f : Π₀ i, β i} : f.support = ∅ ↔ f = 0 :=
⟨λ H, ext $ by simpa [finset.ext_iff] using H, by simp {contextual:=tt}⟩
instance decidable_zero : decidable_pred (eq (0 : Π₀ i, β i)) :=
λ f, decidable_of_iff _ $ support_eq_empty.trans eq_comm
lemma support_subset_iff {s : set ι} {f : Π₀ i, β i} :
↑f.support ⊆ s ↔ (∀i∉s, f i = 0) :=
by simp [set.subset_def];
exact forall_congr (assume i, not_imp_comm)
lemma support_single_ne_zero {i : ι} {b : β i} (hb : b ≠ 0) : (single i b).support = {i} :=
begin
ext j, by_cases h : i = j,
{ subst h, simp [hb] },
simp [ne.symm h, h]
end
lemma support_single_subset {i : ι} {b : β i} : (single i b).support ⊆ {i} :=
support_mk_subset
section map_range_and_zip_with
variables [Π i, has_zero (β₁ i)] [Π i, has_zero (β₂ i)]
lemma map_range_def [Π i (x : β₁ i), decidable (x ≠ 0)]
{f : Π i, β₁ i → β₂ i} {hf : ∀ i, f i 0 = 0} {g : Π₀ i, β₁ i} :
map_range f hf g = mk g.support (λ i, f i.1 (g i.1)) :=
begin
ext i,
by_cases h : g i ≠ 0; simp at h; simp [h, hf]
end
@[simp] lemma map_range_single {f : Π i, β₁ i → β₂ i} {hf : ∀ i, f i 0 = 0} {i : ι} {b : β₁ i} :
map_range f hf (single i b) = single i (f i b) :=
dfinsupp.ext $ λ i', by by_cases i = i'; [{subst i', simp}, simp [h, hf]]
variables [Π i (x : β₁ i), decidable (x ≠ 0)] [Π i (x : β₂ i), decidable (x ≠ 0)]
lemma support_map_range {f : Π i, β₁ i → β₂ i} {hf : ∀ i, f i 0 = 0} {g : Π₀ i, β₁ i} :
(map_range f hf g).support ⊆ g.support :=
by simp [map_range_def]
lemma zip_with_def {f : Π i, β₁ i → β₂ i → β i} {hf : ∀ i, f i 0 0 = 0}
{g₁ : Π₀ i, β₁ i} {g₂ : Π₀ i, β₂ i} :
zip_with f hf g₁ g₂ = mk (g₁.support ∪ g₂.support) (λ i, f i.1 (g₁ i.1) (g₂ i.1)) :=
begin
ext i,
by_cases h1 : g₁ i ≠ 0; by_cases h2 : g₂ i ≠ 0;
simp only [not_not, ne.def] at h1 h2; simp [h1, h2, hf]
end
lemma support_zip_with {f : Π i, β₁ i → β₂ i → β i} {hf : ∀ i, f i 0 0 = 0}
{g₁ : Π₀ i, β₁ i} {g₂ : Π₀ i, β₂ i} :
(zip_with f hf g₁ g₂).support ⊆ g₁.support ∪ g₂.support :=
by simp [zip_with_def]
end map_range_and_zip_with
lemma erase_def (i : ι) (f : Π₀ i, β i) :
f.erase i = mk (f.support.erase i) (λ j, f j.1) :=
by { ext j, by_cases h1 : j = i; by_cases h2 : f j ≠ 0; simp at h2; simp [h1, h2] }
@[simp] lemma support_erase (i : ι) (f : Π₀ i, β i) :
(f.erase i).support = f.support.erase i :=
by { ext j, by_cases h1 : j = i; by_cases h2 : f j ≠ 0; simp at h2; simp [h1, h2] }
section filter_and_subtype_domain
variables {p : ι → Prop} [decidable_pred p]
lemma filter_def (f : Π₀ i, β i) :
f.filter p = mk (f.support.filter p) (λ i, f i.1) :=
by ext i; by_cases h1 : p i; by_cases h2 : f i ≠ 0;
simp at h2; simp [h1, h2]
@[simp] lemma support_filter (f : Π₀ i, β i) :
(f.filter p).support = f.support.filter p :=
by ext i; by_cases h : p i; simp [h]
lemma subtype_domain_def (f : Π₀ i, β i) :
f.subtype_domain p = mk (f.support.subtype p) (λ i, f i) :=
by ext i; by_cases h1 : p i; by_cases h2 : f i ≠ 0;
try {simp at h2}; dsimp; simp [h1, h2, ← subtype.val_eq_coe]
@[simp] lemma support_subtype_domain {f : Π₀ i, β i} :
(subtype_domain p f).support = f.support.subtype p :=
by ext i; by_cases h1 : p i; by_cases h2 : f i ≠ 0;
try {simp at h2}; dsimp; simp [h1, h2]
end filter_and_subtype_domain
end support_basic
lemma support_add [Π i, add_zero_class (β i)] [Π i (x : β i), decidable (x ≠ 0)]
{g₁ g₂ : Π₀ i, β i} :
(g₁ + g₂).support ⊆ g₁.support ∪ g₂.support :=
support_zip_with
@[simp] lemma support_neg [Π i, add_group (β i)] [Π i (x : β i), decidable (x ≠ 0)]
{f : Π₀ i, β i} :
support (-f) = support f :=
by ext i; simp
lemma support_smul {γ : Type w} [semiring γ] [Π i, add_comm_monoid (β i)] [Π i, module γ (β i)]
[Π ( i : ι) (x : β i), decidable (x ≠ 0)]
(b : γ) (v : Π₀ i, β i) : (b • v).support ⊆ v.support :=
support_map_range
instance [Π i, has_zero (β i)] [Π i, decidable_eq (β i)] : decidable_eq (Π₀ i, β i) :=
assume f g, decidable_of_iff (f.support = g.support ∧ (∀i∈f.support, f i = g i))
⟨assume ⟨h₁, h₂⟩, ext $ assume i,
if h : i ∈ f.support then h₂ i h else
have hf : f i = 0, by rwa [f.mem_support_iff, not_not] at h,
have hg : g i = 0, by rwa [h₁, g.mem_support_iff, not_not] at h,
by rw [hf, hg],
by intro h; subst h; simp⟩
section prod_and_sum
variables {γ : Type w}
-- [to_additive sum] for dfinsupp.prod doesn't work, the equation lemmas are not generated
/-- `sum f g` is the sum of `g i (f i)` over the support of `f`. -/
def sum [Π i, has_zero (β i)] [Π i (x : β i), decidable (x ≠ 0)] [add_comm_monoid γ]
(f : Π₀ i, β i) (g : Π i, β i → γ) : γ :=
∑ i in f.support, g i (f i)
/-- `prod f g` is the product of `g i (f i)` over the support of `f`. -/
@[to_additive]
def prod [Π i, has_zero (β i)] [Π i (x : β i), decidable (x ≠ 0)] [comm_monoid γ]
(f : Π₀ i, β i) (g : Π i, β i → γ) : γ :=
∏ i in f.support, g i (f i)
@[to_additive]
lemma prod_map_range_index {β₁ : ι → Type v₁} {β₂ : ι → Type v₂}
[Π i, has_zero (β₁ i)] [Π i, has_zero (β₂ i)]
[Π i (x : β₁ i), decidable (x ≠ 0)] [Π i (x : β₂ i), decidable (x ≠ 0)] [comm_monoid γ]
{f : Π i, β₁ i → β₂ i} {hf : ∀ i, f i 0 = 0} {g : Π₀ i, β₁ i} {h : Π i, β₂ i → γ}
(h0 : ∀i, h i 0 = 1) :
(map_range f hf g).prod h = g.prod (λi b, h i (f i b)) :=
begin
rw [map_range_def],
refine (finset.prod_subset support_mk_subset _).trans _,
{ intros i h1 h2,
dsimp, simp [h1] at h2, dsimp at h2,
simp [h1, h2, h0] },
{ refine finset.prod_congr rfl _,
intros i h1,
simp [h1] }
end
@[to_additive]
lemma prod_zero_index [Π i, add_comm_monoid (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_monoid γ] {h : Π i, β i → γ} : (0 : Π₀ i, β i).prod h = 1 :=
rfl
@[to_additive]
lemma prod_single_index [Π i, has_zero (β i)] [Π i (x : β i), decidable (x ≠ 0)] [comm_monoid γ]
{i : ι} {b : β i} {h : Π i, β i → γ} (h_zero : h i 0 = 1) :
(single i b).prod h = h i b :=
begin
by_cases h : b ≠ 0,
{ simp [dfinsupp.prod, support_single_ne_zero h] },
{ rw [not_not] at h, simp [h, prod_zero_index, h_zero], refl }
end
@[to_additive]
lemma prod_neg_index [Π i, add_group (β i)] [Π i (x : β i), decidable (x ≠ 0)] [comm_monoid γ]
{g : Π₀ i, β i} {h : Π i, β i → γ} (h0 : ∀i, h i 0 = 1) :
(-g).prod h = g.prod (λi b, h i (- b)) :=
prod_map_range_index h0
omit dec
@[to_additive]
lemma prod_comm {ι₁ ι₂ : Sort*} {β₁ : ι₁ → Type*} {β₂ : ι₂ → Type*}
[decidable_eq ι₁] [decidable_eq ι₂] [Π i, has_zero (β₁ i)] [Π i, has_zero (β₂ i)]
[Π i (x : β₁ i), decidable (x ≠ 0)] [Π i (x : β₂ i), decidable (x ≠ 0)] [comm_monoid γ]
(f₁ : Π₀ i, β₁ i) (f₂ : Π₀ i, β₂ i) (h : Π i, β₁ i → Π i, β₂ i → γ) :
f₁.prod (λ i₁ x₁, f₂.prod $ λ i₂ x₂, h i₁ x₁ i₂ x₂) =
f₂.prod (λ i₂ x₂, f₁.prod $ λ i₁ x₁, h i₁ x₁ i₂ x₂) := finset.prod_comm
@[simp] lemma sum_apply {ι₁ : Type u₁} [decidable_eq ι₁] {β₁ : ι₁ → Type v₁}
[Π i₁, has_zero (β₁ i₁)] [Π i (x : β₁ i), decidable (x ≠ 0)]
[Π i, add_comm_monoid (β i)]
{f : Π₀ i₁, β₁ i₁} {g : Π i₁, β₁ i₁ → Π₀ i, β i} {i₂ : ι} :
(f.sum g) i₂ = f.sum (λi₁ b, g i₁ b i₂) :=
(f.support.sum_hom (λf : Π₀ i, β i, f i₂)).symm
include dec
lemma support_sum {ι₁ : Type u₁} [decidable_eq ι₁] {β₁ : ι₁ → Type v₁}
[Π i₁, has_zero (β₁ i₁)] [Π i (x : β₁ i), decidable (x ≠ 0)]
[Π i, add_comm_monoid (β i)] [Π i (x : β i), decidable (x ≠ 0)]
{f : Π₀ i₁, β₁ i₁} {g : Π i₁, β₁ i₁ → Π₀ i, β i} :
(f.sum g).support ⊆ f.support.bUnion (λi, (g i (f i)).support) :=
have ∀i₁ : ι, f.sum (λ (i : ι₁) (b : β₁ i), (g i b) i₁) ≠ 0 →
(∃ (i : ι₁), f i ≠ 0 ∧ ¬ (g i (f i)) i₁ = 0),
from assume i₁ h,
let ⟨i, hi, ne⟩ := finset.exists_ne_zero_of_sum_ne_zero h in
⟨i, (f.mem_support_iff i).mp hi, ne⟩,
by simpa [finset.subset_iff, mem_support_iff, finset.mem_bUnion, sum_apply] using this
@[simp, to_additive] lemma prod_one [Π i, add_comm_monoid (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_monoid γ] {f : Π₀ i, β i} :
f.prod (λi b, (1 : γ)) = 1 :=
finset.prod_const_one
@[simp, to_additive] lemma prod_mul [Π i, add_comm_monoid (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_monoid γ] {f : Π₀ i, β i} {h₁ h₂ : Π i, β i → γ} :
f.prod (λi b, h₁ i b * h₂ i b) = f.prod h₁ * f.prod h₂ :=
finset.prod_mul_distrib
@[simp, to_additive] lemma prod_inv [Π i, add_comm_monoid (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_group γ] {f : Π₀ i, β i} {h : Π i, β i → γ} :
f.prod (λi b, (h i b)⁻¹) = (f.prod h)⁻¹ :=
f.support.prod_hom (@has_inv.inv γ _)
@[to_additive]
lemma prod_add_index [Π i, add_comm_monoid (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_monoid γ] {f g : Π₀ i, β i}
{h : Π i, β i → γ} (h_zero : ∀i, h i 0 = 1) (h_add : ∀i b₁ b₂, h i (b₁ + b₂) = h i b₁ * h i b₂) :
(f + g).prod h = f.prod h * g.prod h :=
have f_eq : ∏ i in f.support ∪ g.support, h i (f i) = f.prod h,
from (finset.prod_subset (finset.subset_union_left _ _) $
by simp [mem_support_iff, h_zero] {contextual := tt}).symm,
have g_eq : ∏ i in f.support ∪ g.support, h i (g i) = g.prod h,
from (finset.prod_subset (finset.subset_union_right _ _) $
by simp [mem_support_iff, h_zero] {contextual := tt}).symm,
calc ∏ i in (f + g).support, h i ((f + g) i) =
∏ i in f.support ∪ g.support, h i ((f + g) i) :
finset.prod_subset support_add $
by simp [mem_support_iff, h_zero] {contextual := tt}
... = (∏ i in f.support ∪ g.support, h i (f i)) *
(∏ i in f.support ∪ g.support, h i (g i)) :
by simp [h_add, finset.prod_mul_distrib]
... = _ : by rw [f_eq, g_eq]
@[to_additive]
lemma _root_.submonoid.dfinsupp_prod_mem [Π i, has_zero (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_monoid γ] (S : submonoid γ)
(f : Π₀ i, β i) (g : Π i, β i → γ) (h : ∀ c, f c ≠ 0 → g c (f c) ∈ S) : f.prod g ∈ S :=
S.prod_mem $ λ i hi, h _ $ (f.mem_support_iff _).mp hi
/--
When summing over an `add_monoid_hom`, the decidability assumption is not needed, and the result is
also an `add_monoid_hom`.
-/
def sum_add_hom [Π i, add_zero_class (β i)] [add_comm_monoid γ] (φ : Π i, β i →+ γ) :
(Π₀ i, β i) →+ γ :=
{ to_fun := (λ f,
quotient.lift_on f (λ x, ∑ i in x.2.to_finset, φ i (x.1 i)) $ λ x y H,
begin
have H1 : x.2.to_finset ∩ y.2.to_finset ⊆ x.2.to_finset, from finset.inter_subset_left _ _,
have H2 : x.2.to_finset ∩ y.2.to_finset ⊆ y.2.to_finset, from finset.inter_subset_right _ _,
refine (finset.sum_subset H1 _).symm.trans
((finset.sum_congr rfl _).trans (finset.sum_subset H2 _)),
{ intros i H1 H2, rw finset.mem_inter at H2, rw H i,
simp only [multiset.mem_to_finset] at H1 H2,
rw [(y.3 i).resolve_left (mt (and.intro H1) H2), add_monoid_hom.map_zero] },
{ intros i H1, rw H i },
{ intros i H1 H2, rw finset.mem_inter at H2, rw ← H i,
simp only [multiset.mem_to_finset] at H1 H2,
rw [(x.3 i).resolve_left (mt (λ H3, and.intro H3 H1) H2), add_monoid_hom.map_zero] }
end),
map_add' := assume f g,
begin
refine quotient.induction_on f (λ x, _),
refine quotient.induction_on g (λ y, _),
change ∑ i in _, _ = (∑ i in _, _) + (∑ i in _, _),
simp only, conv { to_lhs, congr, skip, funext, rw add_monoid_hom.map_add },
simp only [finset.sum_add_distrib],
congr' 1,
{ refine (finset.sum_subset _ _).symm,
{ intro i, simp only [multiset.mem_to_finset, multiset.mem_add], exact or.inl },
{ intros i H1 H2, simp only [multiset.mem_to_finset, multiset.mem_add] at H2,
rw [(x.3 i).resolve_left H2, add_monoid_hom.map_zero] } },
{ refine (finset.sum_subset _ _).symm,
{ intro i, simp only [multiset.mem_to_finset, multiset.mem_add], exact or.inr },
{ intros i H1 H2, simp only [multiset.mem_to_finset, multiset.mem_add] at H2,
rw [(y.3 i).resolve_left H2, add_monoid_hom.map_zero] } }
end,
map_zero' := rfl }
@[simp] lemma sum_add_hom_single [Π i, add_zero_class (β i)] [add_comm_monoid γ]
(φ : Π i, β i →+ γ) (i) (x : β i) : sum_add_hom φ (single i x) = φ i x :=
(add_zero _).trans $ congr_arg (φ i) $ show (if H : i ∈ ({i} : finset _) then x else 0) = x,
from dif_pos $ finset.mem_singleton_self i
@[simp] lemma sum_add_hom_comp_single [Π i, add_zero_class (β i)] [add_comm_monoid γ]
(f : Π i, β i →+ γ) (i : ι) :
(sum_add_hom f).comp (single_add_hom β i) = f i :=
add_monoid_hom.ext $ λ x, sum_add_hom_single f i x
/-- While we didn't need decidable instances to define it, we do to reduce it to a sum -/
lemma sum_add_hom_apply [Π i, add_zero_class (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[add_comm_monoid γ] (φ : Π i, β i →+ γ) (f : Π₀ i, β i) :
sum_add_hom φ f = f.sum (λ x, φ x) :=
begin
refine quotient.induction_on f (λ x, _),
change ∑ i in _, _ = (∑ i in finset.filter _ _, _),
rw [finset.sum_filter, finset.sum_congr rfl],
intros i _,
dsimp only,
split_ifs,
refl,
rw [(not_not.mp h), add_monoid_hom.map_zero],
end
lemma _root_.add_submonoid.dfinsupp_sum_add_hom_mem [Π i, add_zero_class (β i)] [add_comm_monoid γ]
(S : add_submonoid γ) (f : Π₀ i, β i) (g : Π i, β i →+ γ) (h : ∀ c, f c ≠ 0 → g c (f c) ∈ S) :
dfinsupp.sum_add_hom g f ∈ S :=
begin
classical,
rw dfinsupp.sum_add_hom_apply,
convert S.dfinsupp_sum_mem _ _ _,
exact h
end
/-- The supremum of a family of commutative additive submonoids is equal to the range of
`finsupp.sum_add_hom`; that is, every element in the `supr` can be produced from taking a finite
number of non-zero elements of `p i`, coercing them to `γ`, and summing them. -/
lemma _root_.add_submonoid.supr_eq_mrange_dfinsupp_sum_add_hom [add_comm_monoid γ]
(p : ι → add_submonoid γ) : supr p = (dfinsupp.sum_add_hom (λ i, (p i).subtype)).mrange :=
begin
apply le_antisymm,
{ apply supr_le _,
intros i y hy,
exact ⟨dfinsupp.single i ⟨y, hy⟩, dfinsupp.sum_add_hom_single _ _ _⟩, },
{ rintros x ⟨v, rfl⟩,
exact add_submonoid.dfinsupp_sum_add_hom_mem _ v _ (λ i _, (le_supr p i : p i ≤ _) (v i).prop) }
end
lemma _root_.add_submonoid.mem_supr_iff_exists_dfinsupp [add_comm_monoid γ]
(p : ι → add_submonoid γ) (x : γ) :
x ∈ supr p ↔ ∃ f : Π₀ i, p i, dfinsupp.sum_add_hom (λ i, (p i).subtype) f = x :=
set_like.ext_iff.mp (add_submonoid.supr_eq_mrange_dfinsupp_sum_add_hom p) x
/-- A variant of `add_submonoid.mem_supr_iff_exists_dfinsupp` with the RHS fully unfolded. -/
lemma _root_.add_submonoid.mem_supr_iff_exists_dfinsupp' [add_comm_monoid γ]
(p : ι → add_submonoid γ) [Π i (x : p i), decidable (x ≠ 0)] (x : γ) :
x ∈ supr p ↔ ∃ f : Π₀ i, p i, f.sum (λ i xi, ↑xi) = x :=
begin
rw add_submonoid.mem_supr_iff_exists_dfinsupp,
simp_rw sum_add_hom_apply,
congr',
end
omit dec
lemma sum_add_hom_comm {ι₁ ι₂ : Sort*} {β₁ : ι₁ → Type*} {β₂ : ι₂ → Type*} {γ : Type*}
[decidable_eq ι₁] [decidable_eq ι₂] [Π i, add_zero_class (β₁ i)] [Π i, add_zero_class (β₂ i)]
[add_comm_monoid γ]
(f₁ : Π₀ i, β₁ i) (f₂ : Π₀ i, β₂ i) (h : Π i j, β₁ i →+ β₂ j →+ γ) :
sum_add_hom (λ i₂, sum_add_hom (λ i₁, h i₁ i₂) f₁) f₂ =
sum_add_hom (λ i₁, sum_add_hom (λ i₂, (h i₁ i₂).flip) f₂) f₁ :=
begin
refine quotient.induction_on₂ f₁ f₂ (λ x₁ x₂, _),
simp only [sum_add_hom, add_monoid_hom.finset_sum_apply, quotient.lift_on_mk,
add_monoid_hom.coe_mk, add_monoid_hom.flip_apply],
exact finset.sum_comm,
end
include dec
/-- The `dfinsupp` version of `finsupp.lift_add_hom`,-/
@[simps apply symm_apply]
def lift_add_hom [Π i, add_zero_class (β i)] [add_comm_monoid γ] :
(Π i, β i →+ γ) ≃+ ((Π₀ i, β i) →+ γ) :=
{ to_fun := sum_add_hom,
inv_fun := λ F i, F.comp (single_add_hom β i),
left_inv := λ x, by { ext, simp },
right_inv := λ ψ, by { ext, simp },
map_add' := λ F G, by { ext, simp } }
/-- The `dfinsupp` version of `finsupp.lift_add_hom_single_add_hom`,-/
@[simp] lemma lift_add_hom_single_add_hom [Π i, add_comm_monoid (β i)] :
lift_add_hom (single_add_hom β) = add_monoid_hom.id (Π₀ i, β i) :=
lift_add_hom.to_equiv.apply_eq_iff_eq_symm_apply.2 rfl
/-- The `dfinsupp` version of `finsupp.lift_add_hom_apply_single`,-/
lemma lift_add_hom_apply_single [Π i, add_zero_class (β i)] [add_comm_monoid γ]
(f : Π i, β i →+ γ) (i : ι) (x : β i) :
lift_add_hom f (single i x) = f i x :=
by simp
/-- The `dfinsupp` version of `finsupp.lift_add_hom_comp_single`,-/
lemma lift_add_hom_comp_single [Π i, add_zero_class (β i)] [add_comm_monoid γ]
(f : Π i, β i →+ γ) (i : ι) :
(lift_add_hom f).comp (single_add_hom β i) = f i :=
by simp
/-- The `dfinsupp` version of `finsupp.comp_lift_add_hom`,-/
lemma comp_lift_add_hom {δ : Type*} [Π i, add_zero_class (β i)] [add_comm_monoid γ]
[add_comm_monoid δ] (g : γ →+ δ) (f : Π i, β i →+ γ) :
g.comp (lift_add_hom f) = lift_add_hom (λ a, g.comp (f a)) :=
lift_add_hom.symm_apply_eq.1 $ funext $ λ a,
by rw [lift_add_hom_symm_apply, add_monoid_hom.comp_assoc, lift_add_hom_comp_single]
@[simp]
lemma sum_add_hom_zero [Π i, add_zero_class (β i)] [add_comm_monoid γ] :
sum_add_hom (λ i, (0 : β i →+ γ)) = 0 :=
(lift_add_hom : (Π i, β i →+ γ) ≃+ _).map_zero
@[simp]
lemma sum_add_hom_add [Π i, add_zero_class (β i)] [add_comm_monoid γ]
(g : Π i, β i →+ γ) (h : Π i, β i →+ γ) :
sum_add_hom (λ i, g i + h i) = sum_add_hom g + sum_add_hom h :=
lift_add_hom.map_add _ _
@[simp]
lemma sum_add_hom_single_add_hom [Π i, add_comm_monoid (β i)] :
sum_add_hom (single_add_hom β) = add_monoid_hom.id _ :=
lift_add_hom_single_add_hom
lemma comp_sum_add_hom {δ : Type*} [Π i, add_zero_class (β i)] [add_comm_monoid γ]
[add_comm_monoid δ] (g : γ →+ δ) (f : Π i, β i →+ γ) :
g.comp (sum_add_hom f) = sum_add_hom (λ a, g.comp (f a)) :=
comp_lift_add_hom _ _
lemma sum_sub_index [Π i, add_group (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[add_comm_group γ] {f g : Π₀ i, β i}
{h : Π i, β i → γ} (h_sub : ∀i b₁ b₂, h i (b₁ - b₂) = h i b₁ - h i b₂) :
(f - g).sum h = f.sum h - g.sum h :=
begin
have := (lift_add_hom (λ a, add_monoid_hom.of_map_sub (h a) (h_sub a))).map_sub f g,
rw [lift_add_hom_apply, sum_add_hom_apply, sum_add_hom_apply, sum_add_hom_apply] at this,
exact this,
end
@[to_additive]
lemma prod_finset_sum_index {γ : Type w} {α : Type x}
[Π i, add_comm_monoid (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_monoid γ]
{s : finset α} {g : α → Π₀ i, β i}
{h : Π i, β i → γ} (h_zero : ∀i, h i 0 = 1) (h_add : ∀i b₁ b₂, h i (b₁ + b₂) = h i b₁ * h i b₂) :
∏ i in s, (g i).prod h = (∑ i in s, g i).prod h :=
begin
classical,
exact finset.induction_on s
(by simp [prod_zero_index])
(by simp [prod_add_index, h_zero, h_add] {contextual := tt})
end
@[to_additive]
lemma prod_sum_index {ι₁ : Type u₁} [decidable_eq ι₁] {β₁ : ι₁ → Type v₁}
[Π i₁, has_zero (β₁ i₁)] [Π i (x : β₁ i), decidable (x ≠ 0)]
[Π i, add_comm_monoid (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_monoid γ]
{f : Π₀ i₁, β₁ i₁} {g : Π i₁, β₁ i₁ → Π₀ i, β i}
{h : Π i, β i → γ} (h_zero : ∀i, h i 0 = 1) (h_add : ∀i b₁ b₂, h i (b₁ + b₂) = h i b₁ * h i b₂) :
(f.sum g).prod h = f.prod (λi b, (g i b).prod h) :=
(prod_finset_sum_index h_zero h_add).symm
@[simp] lemma sum_single [Π i, add_comm_monoid (β i)]
[Π i (x : β i), decidable (x ≠ 0)] {f : Π₀ i, β i} :
f.sum single = f :=
begin
have := add_monoid_hom.congr_fun lift_add_hom_single_add_hom f,
rw [lift_add_hom_apply, sum_add_hom_apply] at this,
exact this,
end
@[to_additive]
lemma prod_subtype_domain_index [Π i, has_zero (β i)] [Π i (x : β i), decidable (x ≠ 0)]
[comm_monoid γ] {v : Π₀ i, β i} {p : ι → Prop} [decidable_pred p]
{h : Π i, β i → γ} (hp : ∀ x ∈ v.support, p x) :
(v.subtype_domain p).prod (λi b, h i b) = v.prod h :=
finset.prod_bij (λp _, p)
(by simp) (by simp)
(assume ⟨a₀, ha₀⟩ ⟨a₁, ha₁⟩, by simp)
(λ i hi, ⟨⟨i, hp i hi⟩, by simpa using hi, rfl⟩)
omit dec
lemma subtype_domain_sum [Π i, add_comm_monoid (β i)]
{s : finset γ} {h : γ → Π₀ i, β i} {p : ι → Prop} [decidable_pred p] :
(∑ c in s, h c).subtype_domain p = ∑ c in s, (h c).subtype_domain p :=
eq.symm (s.sum_hom _)
lemma subtype_domain_finsupp_sum {δ : γ → Type x} [decidable_eq γ]
[Π c, has_zero (δ c)] [Π c (x : δ c), decidable (x ≠ 0)]
[Π i, add_comm_monoid (β i)]
{p : ι → Prop} [decidable_pred p]
{s : Π₀ c, δ c} {h : Π c, δ c → Π₀ i, β i} :
(s.sum h).subtype_domain p = s.sum (λc d, (h c d).subtype_domain p) :=
subtype_domain_sum
end prod_and_sum
/-! ### Bundled versions of `dfinsupp.map_range`
The names should match the equivalent bundled `finsupp.map_range` definitions.
-/
section map_range
omit dec
variables [Π i, add_zero_class (β i)] [Π i, add_zero_class (β₁ i)] [Π i, add_zero_class (β₂ i)]
lemma map_range_add (f : Π i, β₁ i → β₂ i) (hf : ∀ i, f i 0 = 0)
(hf' : ∀ i x y, f i (x + y) = f i x + f i y) (g₁ g₂ : Π₀ i, β₁ i):
map_range f hf (g₁ + g₂) = map_range f hf g₁ + map_range f hf g₂ :=
begin
ext,
simp only [map_range_apply f, coe_add, pi.add_apply, hf']
end
/-- `dfinsupp.map_range` as an `add_monoid_hom`. -/
@[simps apply]
def map_range.add_monoid_hom (f : Π i, β₁ i →+ β₂ i) : (Π₀ i, β₁ i) →+ (Π₀ i, β₂ i) :=
{ to_fun := map_range (λ i x, f i x) (λ i, (f i).map_zero),
map_zero' := map_range_zero _ _,
map_add' := map_range_add _ _ (λ i, (f i).map_add) }
@[simp]
lemma map_range.add_monoid_hom_id :
map_range.add_monoid_hom (λ i, add_monoid_hom.id (β₂ i)) = add_monoid_hom.id _ :=
add_monoid_hom.ext map_range_id
lemma map_range.add_monoid_hom_comp (f : Π i, β₁ i →+ β₂ i) (f₂ : Π i, β i →+ β₁ i):
map_range.add_monoid_hom (λ i, (f i).comp (f₂ i)) =
(map_range.add_monoid_hom f).comp (map_range.add_monoid_hom f₂) :=
add_monoid_hom.ext $ map_range_comp (λ i x, f i x) (λ i x, f₂ i x) _ _ _
/-- `dfinsupp.map_range.add_monoid_hom` as an `add_equiv`. -/
@[simps apply]
def map_range.add_equiv (e : Π i, β₁ i ≃+ β₂ i) : (Π₀ i, β₁ i) ≃+ (Π₀ i, β₂ i) :=
{ to_fun := map_range (λ i x, e i x) (λ i, (e i).map_zero),
inv_fun := map_range (λ i x, (e i).symm x) (λ i, (e i).symm.map_zero),
left_inv := λ x, by rw ←map_range_comp; { simp_rw add_equiv.symm_comp_self, simp },
right_inv := λ x, by rw ←map_range_comp; { simp_rw add_equiv.self_comp_symm, simp },
.. map_range.add_monoid_hom (λ i, (e i).to_add_monoid_hom) }
@[simp]
lemma map_range.add_equiv_refl :
(map_range.add_equiv $ λ i, add_equiv.refl (β₁ i)) = add_equiv.refl _ :=
add_equiv.ext map_range_id
lemma map_range.add_equiv_trans (f : Π i, β i ≃+ β₁ i) (f₂ : Π i, β₁ i ≃+ β₂ i):
map_range.add_equiv (λ i, (f i).trans (f₂ i)) =
(map_range.add_equiv f).trans (map_range.add_equiv f₂) :=
add_equiv.ext $ map_range_comp (λ i x, f₂ i x) (λ i x, f i x) _ _ _
@[simp]
lemma map_range.add_equiv_symm (e : Π i, β₁ i ≃+ β₂ i) :
(map_range.add_equiv e).symm = map_range.add_equiv (λ i, (e i).symm) := rfl
end map_range
end dfinsupp
/-! ### Product and sum lemmas for bundled morphisms -/
section
variables [decidable_eq ι]
namespace monoid_hom
variables {R S : Type*}
variables [Π i, has_zero (β i)] [Π i (x : β i), decidable (x ≠ 0)]
@[simp, to_additive]
lemma map_dfinsupp_prod [comm_monoid R] [comm_monoid S]
(h : R →* S) (f : Π₀ i, β i) (g : Π i, β i → R) :
h (f.prod g) = f.prod (λ a b, h (g a b)) := h.map_prod _ _
@[to_additive]
lemma coe_dfinsupp_prod [monoid R] [comm_monoid S]
(f : Π₀ i, β i) (g : Π i, β i → R →* S) :
⇑(f.prod g) = f.prod (λ a b, (g a b)) := coe_prod _ _
@[simp, to_additive]
lemma dfinsupp_prod_apply [monoid R] [comm_monoid S]
(f : Π₀ i, β i) (g : Π i, β i → R →* S) (r : R) :
(f.prod g) r = f.prod (λ a b, (g a b) r) := finset_prod_apply _ _ _
end monoid_hom
namespace add_monoid_hom
variables {R S : Type*}
open dfinsupp
/-! The above lemmas, repeated for `dfinsupp.sum_add_hom`. -/
@[simp]
lemma map_dfinsupp_sum_add_hom [add_comm_monoid R] [add_comm_monoid S] [Π i, add_comm_monoid (β i)]
(h : R →+ S) (f : Π₀ i, β i) (g : Π i, β i →+ R) :
h (sum_add_hom g f) = sum_add_hom (λ i, h.comp (g i)) f :=
congr_fun (comp_lift_add_hom h g) f
@[simp]
lemma dfinsupp_sum_add_hom_apply [add_zero_class R] [add_comm_monoid S] [Π i, add_comm_monoid (β i)]
(f : Π₀ i, β i) (g : Π i, β i →+ R →+ S) (r : R) :
(sum_add_hom g f) r = sum_add_hom (λ i, (eval r).comp (g i)) f :=
map_dfinsupp_sum_add_hom (eval r) f g
lemma coe_dfinsupp_sum_add_hom [add_zero_class R] [add_comm_monoid S] [Π i, add_comm_monoid (β i)]
(f : Π₀ i, β i) (g : Π i, β i →+ R →+ S) :
⇑(sum_add_hom g f) = sum_add_hom (λ i, (coe_fn R S).comp (g i)) f :=
map_dfinsupp_sum_add_hom (coe_fn R S) f g
end add_monoid_hom
end
|
959a7925ec2602c1dbc883b7e2e8142d0509abc3
|
36938939954e91f23dec66a02728db08a7acfcf9
|
/lean/deps/x86_semantics/src/x86_semantics/sexpr.lean
|
9e754f569e3ed4839b0c27f4f0d8e33f293317d5
|
[
"Apache-2.0"
] |
permissive
|
pnwamk/reopt-vcg
|
f8b56dd0279392a5e1c6aee721be8138e6b558d3
|
c9f9f185fbefc25c36c4b506bbc85fd1a03c3b6d
|
refs/heads/master
| 1,631,145,017,772
| 1,593,549,019,000
| 1,593,549,143,000
| 254,191,418
| 0
| 0
| null | 1,586,377,077,000
| 1,586,377,077,000
| null |
UTF-8
|
Lean
| false
| false
| 10,830
|
lean
|
import .common
namespace mc_semantics
namespace sexpr_rep
def symbol := string
instance : has_append symbol := ⟨string.append⟩
/- A atomic expression within an s-expression. -/
inductive atom
| symbol : symbol → atom
| numeral : ℕ → atom
| string : string → atom
protected
def repr : atom → string
| (atom.symbol s) := s
| (atom.numeral n) := n.repr
| (atom.string s) := "̈\"" ++ s ++ "\""
protected
def symbol.sexpr : symbol → sexpr atom
| s := sexpr.mk_atom (atom.symbol s)
protected
def numeral.sexpr : ℕ → sexpr atom
| n := sexpr.mk_atom (atom.numeral n)
protected
def string.sexpr : string → sexpr atom
| s := sexpr.mk_atom (atom.string s)
protected
def to_char_buffer : atom → char_buffer
| (atom.symbol s) := s.to_char_buffer
| (atom.numeral s) := s.repr.to_char_buffer
| (atom.string s) := ("\"" ++ s ++ "\"").to_char_buffer
/-- Symbol characters allowed in simple symbols -/
def char_symbols : list char := ['$', '_']
/-- Predicate that checks if character is allowed in a simple symbol. -/
inductive is_symbol_char (c:char) : Prop
| is_alpha : char.is_alpha c → is_symbol_char
| is_digit : char.is_digit c → is_symbol_char
| is_other : c ∈ char_symbols → is_symbol_char
namespace is_symbol_char
instance is_decidable : decidable_pred is_symbol_char
| c :=
if f : c.is_alpha then
is_true (is_alpha f)
else if g : c.is_digit then
is_true (is_digit g)
else if h : c ∈ char_symbols then
is_true (is_other h)
else
is_false begin intro p, cases p; contradiction, end
end is_symbol_char
protected
def read {m} [char_reader string m] (read_count:ℕ) : m atom := do
mc ← char_reader.peek_char,
match mc with
| option.none := throw "Unexpected end of stream."
| option.some '\"' := do
char_reader.consume_char,
b ← char_reader.read_while (λc, c ≠ '\"') read_count,
char_reader.consume_char,
pure (atom.string b.to_string)
| option.some c :=
if c.is_digit then (do
b ← char_reader.read_while char.is_digit read_count,
pure (atom.numeral b.to_string.to_nat))
else if is_symbol_char c then (do
-- Read symbol characters
b ← char_reader.read_while is_symbol_char read_count,
pure (atom.symbol b.to_string))
else
throw $ "Unexpected character " ++ c.to_string
end
instance atom_is_atom : sexpr.is_atom atom :=
{ to_char_buffer := sexpr_rep.to_char_buffer
, read := @sexpr_rep.read
}
def app (s:symbol) (l:list (sexpr atom)) := sexpr.app s.sexpr l
end sexpr_rep
open sexpr_rep
def arg_index.sexpr (idx:arg_index) : sexpr atom :=
app "arg" [numeral.sexpr idx]
namespace nat_expr
protected def sexpr : nat_expr → sexpr atom
| (lit x) := numeral.sexpr x
| (var x) := x.sexpr
| (add x y) := app "add" [x.sexpr, y.sexpr]
| (sub x y) := app "sub" [x.sexpr, y.sexpr]
| (mul x y) := app "mul" [x.sexpr, y.sexpr]
| (div x y) := app "div" [x.sexpr, y.sexpr]
instance : has_repr nat_expr := ⟨sexpr.repr ∘ nat_expr.sexpr⟩
end nat_expr
namespace one_of
protected def pp {l:list ℕ} : one_of l → string
| (var i) := i.sexpr.repr
protected def sexpr {l:list ℕ} (x:one_of l) := x.to_nat_expr.sexpr
end one_of
namespace type
protected
def sexpr' : Π(in_fun:bool), type → sexpr atom
| _ (bv w) := app "bv" [w.sexpr]
| _ bit := symbol.sexpr "bit"
| _ float := symbol.sexpr "float"
| _ double := symbol.sexpr "double"
| _ x86_80 := symbol.sexpr "x86_80"
| _ (vec w tp) := app "vec" [w.sexpr, tp.sexpr' ff]
| _ (pair x y) := app "pair" [x.sexpr' ff, y.sexpr' ff]
| in_fun (fn a r) :=
if in_fun then
sexpr.parens [a.sexpr' ff, r.sexpr' tt]
else
app "fun" [a.sexpr' ff, r.sexpr' tt]
protected
def sexpr : type → sexpr atom := type.sexpr' ff
protected
def pp : type → string := sexpr.repr ∘ type.sexpr
end type
end mc_semantics
namespace x86
open mc_semantics
open mc_semantics.sexpr_rep
open mc_semantics.type
namespace reg
/-
protected def sexpr : Π{tp:type}, reg tp → sexpr atom
| ._ (concrete_gpreg idx tp) := symbol.sexpr $ "$" ++
match tp with
| gpreg_type.reg8l := list.nth_le reg.r8l_names idx.val idx.is_lt
| gpreg_type.reg16 := list.nth_le reg.r16_names idx.val idx.is_lt
| gpreg_type.reg32 := list.nth_le reg.r32_names idx.val idx.is_lt
| gpreg_type.reg64 := list.nth_le reg.r64_names idx.val idx.is_lt
end
| ._ (concrete_flagreg idx) := symbol.sexpr $ "$" ++
match list.nth reg.flag_names idx.val with
| (option.some nm) := nm
| option.none := "REVERSED_" ++ idx.val.repr
end
-/
--protected def repr : Π{tp:type}, reg tp → string := λ_, sexpr.repr ∘ reg.sexpr
end reg
namespace addr
protected def sexpr {tp:type} : addr tp → sexpr atom
| (arg idx) := idx.sexpr
end addr
namespace prim
def sexpr : Π{tp:type}, prim tp → sexpr atom
| ._ (eq tp) := app "eq" [tp.sexpr]
| ._ (neq tp) := app "neq" [tp.sexpr]
| ._ (mux tp) := app "mux" [tp.sexpr]
| ._ bit_zero := symbol.sexpr "bit0"
| ._ bit_one := symbol.sexpr "bit1"
| ._ bit_or := symbol.sexpr "bit_or"
| ._ bit_and := symbol.sexpr "bit_and"
| ._ bit_xor := symbol.sexpr "bit_xor"
| ._ (bv_nat w n) := app "bv_nat" [w.sexpr, n.sexpr]
| ._ (add i) := app "add" [i.sexpr]
| ._ (adc i) := app "adc" [i.sexpr]
| ._ (uadc_overflows i) := app "uadc_overflows" [i.sexpr]
| ._ (sadc_overflows i) := app "sadc_overflows" [i.sexpr]
| ._ (sub i) := app "sub" [i.sexpr]
| ._ (ssbb_overflows i) := app "ssbb_overflows" [i.sexpr]
| ._ (usbb_overflows i) := app "usbb_overflows" [i.sexpr]
| ._ (neg tp) := app "neg" [tp.sexpr]
| ._ (mul i) := app "mul" [i.sexpr]
| ._ (quotRem i) := app "quotRem" [i.sexpr]
| ._ (squotRem i) := app "squotRem" [i.sexpr]
| ._ (ule i) := app "ule" [i.sexpr]
| ._ (ult i) := app "ult" [i.sexpr]
| ._ (slice w u l) := app "slice" [w.sexpr, u.sexpr, l.sexpr]
| ._ (sext i o) := app "sext" [i.sexpr, o.sexpr]
| ._ (uext i o) := app "uext" [i.sexpr, o.sexpr]
| ._ (trunc i o) := app "trunc" [i.sexpr, o.sexpr]
| ._ (cat i) := app "cat" [i.sexpr]
| ._ (msb i) := app "msb" [i.sexpr]
| ._ (bv_and i) := app "bv_and" [i.sexpr]
| ._ (bv_or i) := app "bv_or" [i.sexpr]
| ._ (bv_xor i) := app "bv_xor" [i.sexpr]
| ._ (bv_complement i) := app "bv_complement" [i.sexpr]
| ._ (shl i) := app "shl" [i.sexpr]
| ._ (shl_carry i) := app "shl_carry" [i.sexpr]
| ._ (shr i) := app "shr" [i.sexpr]
| ._ (shr_carry i) := app "shr_carry" [i.sexpr]
| ._ (sar i) := app "sar" [i.sexpr]
| ._ (sar_carry i) := app "sar_carry" [i.sexpr]
| ._ (even_parity i) := app "even_parity" [i.sexpr]
| ._ (bsf i) := app "bsf" [i.sexpr]
| ._ (bsr i) := app "bsr" [i.sexpr]
| ._ (bswap i) := app "bswap" [i.sexpr]
| ._ (btc w j) := app "btc" [w.sexpr, j.sexpr]
| ._ (btr w j) := app "btr" [w.sexpr, j.sexpr]
| ._ (bts w j) := app "bts" [w.sexpr, j.sexpr]
| ._ (bv_to_x86_80 w) := app "sext" [w.sexpr]
| ._ float_to_x86_80 := symbol.sexpr "float_to_x86_80"
| ._ double_to_x86_80 := symbol.sexpr "double_to_X86_80"
| ._ x87_fadd := symbol.sexpr "x87_fadd"
| ._ (pair_fst x y) := app "pair_fst" [x.sexpr, y.sexpr]
| ._ (pair_snd x y) := app "pair_snd" [x.sexpr, y.sexpr]
end prim
namespace gpreg_type
protected def sexpr : gpreg_type → sexpr atom
| reg8l := symbol.sexpr "reg8l"
| reg8h := symbol.sexpr "reg8h"
| reg16 := symbol.sexpr "reg16"
| reg32 := symbol.sexpr "reg32"
| reg64 := symbol.sexpr "reg64"
end gpreg_type
namespace concrete_reg
protected def sexpr : Π{tp:type}, concrete_reg tp → sexpr atom
| ._ (gpreg i tp) := app "gpreg" [numeral.sexpr i.val, tp.sexpr]
| ._ (flagreg i) := app "flagreg" [numeral.sexpr i.val]
end concrete_reg
namespace expression
protected
def sexpr' : Π{tp:type} (v:expression tp), list (sexpr atom) → sexpr atom
| ._ (primitive o) l := sexpr.app o.sexpr l
| ._ (@bit_test wr wi r i) l := sexpr_rep.app "bit_test" [wr.sexpr, wi.sexpr, r.sexpr' [], i.sexpr' []]
| ._ (@mulc m x) l := sexpr_rep.app "mulc" [m.sexpr, x.sexpr' []]
| ._ (@quotc m x) l := sexpr_rep.app "quotc" [m.sexpr, x.sexpr' []]
| ._ (@undef tp) l := sexpr_rep.app "undef" [tp.sexpr]
| ._ (@app _ _ f x) l := sexpr' f (x.sexpr' [] :: l)
| ._ (@get_reg _ r) l := sexpr_rep.app "get_reg" [r.sexpr]
| ._ (@read tp a) l := sexpr_rep.app "read" [tp.sexpr, a.sexpr' []]
| ._ (@streg i) l := sexpr_rep.app "streg" [numeral.sexpr i.val]
| ._ (@get_local i tp) l := sexpr_rep.app "read" [numeral.sexpr i, tp.sexpr]
| ._ (@imm_arg i tp) l := sexpr_rep.app "imm_arg" [i.sexpr, tp.sexpr]
| ._ (@addr_arg i) l := sexpr_rep.app "addr_arg" [i.sexpr]
| ._ (@read_arg i tp) l := sexpr_rep.app "read_arg" [i.sexpr, tp.sexpr]
protected def sexpr {tp:type} (e:expression tp) : sexpr atom := e.sexpr' []
end expression
namespace lhs
protected def sexpr : Π{tp:type}, lhs tp → sexpr atom
| ._ (set_reg r) := app "set_reg" [r.sexpr]
| ._ (write_addr a tp) := app "write_addr" [a.sexpr, tp.sexpr]
| ._ (write_arg idx tp) := app "write_arg" [idx.sexpr, tp.sexpr]
| ._ (streg idx) := app "streg" [numeral.sexpr idx.val]
end lhs
namespace event
protected def sexpr : event → sexpr atom
| syscall := app "syscall" []
| (unsupported msg) := app "unsupported" [string.sexpr msg]
| pop_x87_register_stack := app "pop_x87_register_stack" []
| (call addr) := app "call" [addr.sexpr]
| (jmp addr) := app "jmp" [addr.sexpr]
| (branch cond addr) := app "branch" [cond.sexpr, addr.sexpr]
| hlt := app "hlt" []
| (xchg addr1 addr2) := app "xchg" [addr1.sexpr, addr2.sexpr]
| cpuid := app "cpuid" []
end event
namespace action
protected def sexpr : action → sexpr atom
| (set l r) := app "set" [l.sexpr, r.sexpr]
| (set_cond l c v) := app "set_cond" [l.sexpr, c.sexpr, v.sexpr]
| (set_aligned l r a) := app "set_aligned" [l.sexpr, r.sexpr, a.sexpr]
| (local_def idx v) := app "var" [numeral.sexpr idx, v.sexpr]
| (event e) := e.sexpr
end action
namespace binding
def sexpr : binding → sexpr atom
| (one_of l) := app "one_of" (numeral.sexpr <$> l)
| (reg tp) := app "reg" [tp.sexpr]
| (addr tp) := app "addr" [tp.sexpr]
| (imm tp) := app "imm" [tp.sexpr]
| (lhs tp) := app "lhs" [tp.sexpr]
| (expression tp) := app "expr" [tp.sexpr]
--def pp : binding → string := sexpr.repr ∘ binding.sexpr
end binding
namespace pattern
private
def sexpr_bindings : list binding → list (sexpr atom) → list (sexpr atom)
| [] r := r
| (b::r) l := sexpr_bindings r (b.sexpr :: l)
protected
def sexpr (p:pattern) : sexpr atom
:= app "pattern" $ sexpr.parens (sexpr_bindings p.context.bindings [])
:: (action.sexpr <$> p.actions)
end pattern
namespace instruction
def sexpr (i:instruction) : sexpr atom :=
app "instruction" (symbol.sexpr i.mnemonic :: pattern.sexpr <$> i.patterns)
def repr : instruction → string := sexpr.repr ∘ instruction.sexpr
instance : has_repr instruction := ⟨instruction.repr⟩
end instruction
end x86
|
85e7b7e4a7a0bd3ebe24c8f460ec905fc3409a36
|
f41725a360d902d3c7939fdf81a5acaf0d0467f0
|
/src/degree_of_simple_extension.lean
|
fcaa91aaaa9a8d51ac98e1db5c3bfad128eda456
|
[] |
no_license
|
pglutz/galois_theory
|
978765d82b7586c21fd719b84b21d5eea030b25d
|
4561c2c97d4c49377356e1d7a2051dedc87d30ba
|
refs/heads/master
| 1,671,472,063,361
| 1,603,597,360,000
| 1,603,597,360,000
| 281,502,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 9,831
|
lean
|
-- import subfield_stuff
-- import group_theory.subgroup
-- import field_theory.minimal_polynomial
-- import linear_algebra.dimension
-- import linear_algebra.finite_dimensional
-- import linear_algebra.basis
-- import ring_theory.adjoin_root
-- import data.zmod.basic
-- import data.polynomial.basic
-- import adjoin
import ring_theory.adjoin_root
import linear_algebra.finite_dimensional
import field_theory.minimal_polynomial
noncomputable theory
local attribute [instance, priority 100] classical.prop_decidable
open vector_space polynomial finite_dimensional
lemma polynomial.degree_mod_lt {R : Type*} [field R] (p q : polynomial R)
(h : p ≠ 0) : (q % p).degree < p.degree :=
begin
exact euclidean_domain.mod_lt q h,
end
variables {F : Type*} [field F] (p : polynomial F)
def module_map : polynomial.degree_lt F (p.nat_degree) →ₗ[F] adjoin_root p := {
to_fun := λ q, adjoin_root.mk p q,
map_add' := λ _ _, ring_hom.map_add _ _ _,
map_smul' := λ _ _, by simpa [algebra.smul_def, ring_hom.map_mul],
}
lemma module_map_injective : function.injective (module_map p) :=
begin
rw is_add_group_hom.injective_iff,
intros q hq,
change ideal.quotient.mk _ _ = 0 at hq,
rw [ideal.quotient.eq_zero_iff_mem, ideal.mem_span_singleton] at hq,
cases hq with r hr,
cases q with q hq,
rw submodule.coe_mk at hr,
rw [submodule.mk_eq_zero, hr],
rw [mem_degree_lt, hr, degree_mul] at hq,
clear hr q,
by_cases hp : (p = 0),
{ rw [hp, zero_mul] },
by_cases hr : (r = 0),
{ rw [hr, mul_zero] },
rw [degree_eq_nat_degree hp, degree_eq_nat_degree hr, ←with_bot.coe_add, with_bot.coe_lt_coe] at hq,
exfalso,
nlinarith,
end
lemma module_map_surjective' (h : p ≠ 0) : function.surjective (module_map p) :=
begin
intro q,
obtain ⟨q', hq'⟩ : ∃ q', adjoin_root.mk p q' = q := ideal.quotient.mk_surjective q,
use (q' % p),
{ rw [mem_degree_lt, ← degree_eq_nat_degree h],
exact euclidean_domain.mod_lt q' h, },
{ change adjoin_root.mk p (q' % p) = q,
symmetry,
rw [← hq', adjoin_root.mk, ideal.quotient.eq, ideal.mem_span_singleton'],
exact ⟨q' / p, by rw [eq_sub_iff_add_eq, mul_comm, euclidean_domain.div_add_mod]⟩, },
end
lemma module_map_surjective [polynomial.degree p>0]: function.surjective (module_map p) :=
begin
intro q,
have s : function.surjective (adjoin_root.mk p) := ideal.quotient.mk_surjective,
have t : ∃ g : polynomial F, (adjoin_root.mk p) g = q := s q,
cases t with preimage salem,
use (preimage % p),
have u:(preimage % p).degree<p.degree,
{ rw polynomial.mod_def,
have w:=ne_zero_of_degree_gt _inst_2,
{ have alpha: p * C ((p.leading_coeff)⁻¹)=normalize p,
{ dsimp,
rw polynomial.coe_norm_unit_of_ne_zero w},
rw alpha,
have beta:p.degree=(normalize p).degree,
rw [polynomial.degree_normalize],
rw beta,
apply polynomial.degree_mod_by_monic_lt,
exact monic_normalize w,
exact monic.ne_zero (polynomial.monic_normalize w), }, },
have gamma:p.degree=p.nat_degree,
{ exact polynomial.degree_eq_nat_degree (ne_zero_of_degree_gt _inst_2), },
rw gamma at u,
exact mem_degree_lt.mpr u,
--exact monic.ne_zero gamma,
--rw ← polynomial.degree_normalize p,
--have v:(p * C (p.leading_coeff)⁻¹).monic,
--exact polynomial.monic_mul_leading_coeff_inv w,
--have z:p.degree=(p * C (p.leading_coeff)⁻¹).degree,
--simp,
--library_search,
--let y:=polynomial.coe_norm_unit_of_ne_zero _ _,
--apply polynomial.degree_mod_by_monic_lt,
--apply polynomial.degree_div_lt,
--#check polynomial.degree_div_lt,
sorry,
end
lemma module_map_bijective (h : p ≠ 0) : function.bijective (module_map p) :=
⟨module_map_injective p, module_map_surjective' p h⟩
def module_isomorphism (h : p ≠ 0) : polynomial.degree_lt F (p.nat_degree) ≃ₗ[F] adjoin_root p :=
{ .. (module_map p), .. equiv.of_bijective _ (module_map_bijective p h) }
def module_quotient_map : polynomial F →ₐ[F] adjoin_root p :=
{ to_fun := (adjoin_root.mk p).to_fun,
map_zero' := (adjoin_root.mk p).map_zero,
map_add' := (adjoin_root.mk p).map_add,
map_one' := (adjoin_root.mk p).map_one,
map_mul' := (adjoin_root.mk p).map_mul,
commutes' := λ _, rfl, }
def canonical_basis: {n: ℕ| n<polynomial.nat_degree p }→ adjoin_root p:= λ (n:{n: ℕ| n<polynomial.nat_degree p }), adjoin_root.mk p (polynomial.X^(n:ℕ))
lemma canonical_basis_is_basis : is_basis F (canonical_basis p) :=
begin
split,
{ apply linear_independent_iff.mpr,
intros f hf,
dsimp only [canonical_basis] at hf,
dsimp only [finsupp.total] at hf,
rw finsupp.lsum_apply at hf,
dsimp [finsupp.sum] at hf,
simp only [linear_map.id_coe, id.def] at hf,
change f.support.sum (λ a, (f a) • (module_quotient_map p (X ^ ↑a))) = 0 at hf,
simp only [←alg_hom.map_smul] at hf,
haveI : is_add_monoid_hom (module_quotient_map p) := sorry,
rw finset.sum_hom at hf,
sorry,
},
{ sorry, },
end
lemma adjunction_basis : is_basis F (canonical_basis p):=
begin
let degree:=polynomial.nat_degree p,
let x:polynomial F:= polynomial.X,
let S:= {n: ℕ| n<degree},
let ν:= λ (n:S), (x^(n:ℕ)),
let η := (adjoin_root.mk p)∘ν,
have nonneg: degree=0 ∨ degree>0,
exact nat.eq_zero_or_pos degree,
cases nonneg with zero_deg pos_deg,
{sorry},
have comp: η = (adjoin_root.mk p) ∘ ν := rfl,
{ unfold is_basis,
split,
{ apply linear_independent_iff.2,
intros l eq_zero,
have decomp: (finsupp.total S (adjoin_root p) F η) l =
(adjoin_root.mk p) ((finsupp.total ↥S (polynomial F) F ν) l),
{ rw comp,
have is_fin':finset ↥S := finset.univ,
symmetry,
let algebra_1 :=algebra_map F (adjoin_root p),
let algebra_2 :=algebra_map F (polynomial F),
have adjoin_root_is_module_map:(adjoin_root.mk p).to_fun=(module_quotient_map p).to_fun:=by simpa,
have eq_1:(adjoin_root.mk p) ((finsupp.total ↥S (polynomial F) F ν) l) = (module_quotient_map p)((finsupp.total ↥S (polynomial F) F ν) l):=
by simpa[adjoin_root_is_module_map],
have eq_2: (finsupp.total ↥S (adjoin_root p) F ((adjoin_root.mk p) ∘ ν)) l = (finsupp.total ↥S (adjoin_root p) F ((module_quotient_map p) ∘ ν)) l:=
by simpa[adjoin_root_is_module_map],
have eq_3: (module_quotient_map p)((finsupp.total ↥S (polynomial F) F ν) l) = (finsupp.total ↥S (adjoin_root p) F ((module_quotient_map p) ∘ ν)) l:=sorry,
--dsimp[finsupp.total],
--simp[finsupp.lmap_domain_total],
--exact finsupp.lmap_domain_total F,
--simp_rw[finsupp.lmap_domain_total],
simp*,
},
sorry,
},
{ sorry, },
},
end
-- I have written an outline of a different way of proving the theorems above, which
-- does not involve working with bases. The basic idea is to show that (adjoin_root p)
-- is isomorphic to F^(degree p) (which is expressed in mathlib as (fin (nat_degree p) → F)).
-- Also, I think this can be proved for all nonzero polynomials, not just those which happen
-- to be the minimal polynomial for something. Of course, with an arbitrary polynomial p,
-- F[x]/p is not a field, but that doesn't matter if you just care about the vector space
-- structure.
--
-- Here's my proposed strategy for proving the isomorphism:
-- 1) First construct the map from (fin (nat_degree p) → F) to adjoin_root p
-- Hopefully this should be easy because there's a map from (fin (nat_degree p) → F)
-- to polynomial F and so it should give a map on the quotient.
-- 2) Show that this map is linear. Maybe it's better to first show that the map to
-- polynomial F is linear and automatically get a linear map by composing with the
-- quotient.
-- 3) Show that this map is a bijection.begin
-- 4) Use the theorem linear_equiv.of_bijective to finish.
lemma adjunction_degree_finite : finite_dimensional F (adjoin_root p) :=
begin
let degree:=polynomial.nat_degree p,
let x:polynomial F:= polynomial.X,
let S:= {n: ℕ| n<degree},
let η := λ (n:S), adjoin_root.mk p (x^(n:ℕ)),
let ν:= λ (n:S), (x^(n:ℕ)),
have comp: η = (adjoin_root.mk p) ∘ ν := rfl,
have is_fin:fintype S,
exact set.fintype_lt_nat degree,
have basis:is_basis F η,
{ unfold is_basis,
split,
{ apply linear_independent_iff.2,
intros l eq_zero,
have decomp: (finsupp.total ↥S (adjoin_root p) F η) l=(adjoin_root.mk p) ((finsupp.total ↥S (polynomial F) F ν) l),
{ rw comp,
have is_fin':finset ↥S := finset.univ,
symmetry,
let algebra_1 :=algebra_map F (adjoin_root p),
let algebra_2 :=algebra_map F (polynomial F),
have degree_is_positive: 0<degree := sorry,
sorry,
},
sorry,
},
{ sorry, },
},
sorry,
end
/- apply finite_dimensional.of_finite_basis, -/
lemma quotient_degree : (finite_dimensional.findim F (adjoin_root p)) = p.nat_degree :=
begin
sorry
end
lemma adjoin_root_equiv_fin_fun_degree (p : polynomial F) (h : p ≠ 0) :
(fin (nat_degree p) → F) ≃ₗ[F] adjoin_root p :=
begin
sorry,
end
lemma adjoin_root_finite_dimensional (p : polynomial F) (h : p ≠ 0) :
finite_dimensional F (adjoin_root p) :=
linear_equiv.finite_dimensional (adjoin_root_equiv_fin_fun_degree p h)
lemma adjoin_root_findim_eq_degree (p : polynomial F) (h : p ≠ 0) :
findim F (adjoin_root p) = nat_degree p :=
by {rw [← linear_equiv.findim_eq (adjoin_root_equiv_fin_fun_degree p h), findim_fin_fun]}
lemma adjoin_root_dim_eq_degree (p : polynomial F) (h : p ≠ 0) :
dim F (adjoin_root p) = nat_degree p :=
by rw [← linear_equiv.dim_eq (adjoin_root_equiv_fin_fun_degree p h), dim_fin_fun]
|
07f9c5053040af8a789b459f3fba4f77f2ee6ade
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/data/set/intervals/ord_connected_component.lean
|
e425e8a41bc87dedf66fce34338b3a9dff123c26
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 9,190
|
lean
|
/-
Copyright (c) 2022 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import data.set.intervals.ord_connected
import tactic.wlog
/-!
# Order connected components of a set
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we define `set.ord_connected_component s x` to be the set of `y` such that
`set.uIcc x y ⊆ s` and prove some basic facts about this definition. At the moment of writing,
this construction is used only to prove that any linear order with order topology is a T₅ space,
so we only add API needed for this lemma.
-/
open function order_dual
open_locale interval
namespace set
variables {α : Type*} [linear_order α] {s t : set α} {x y z : α}
/-- Order-connected component of a point `x` in a set `s`. It is defined as the set of `y` such that
`set.uIcc x y ⊆ s`. Note that it is empty if and only if `x ∉ s`. -/
def ord_connected_component (s : set α) (x : α) : set α := {y | [x, y] ⊆ s}
lemma mem_ord_connected_component : y ∈ ord_connected_component s x ↔ [x, y] ⊆ s := iff.rfl
lemma dual_ord_connected_component :
ord_connected_component (of_dual ⁻¹' s) (to_dual x) = of_dual ⁻¹' (ord_connected_component s x) :=
ext $ to_dual.surjective.forall.2 $ λ x,
by { rw [mem_ord_connected_component, dual_uIcc], refl }
lemma ord_connected_component_subset : ord_connected_component s x ⊆ s := λ y hy, hy right_mem_uIcc
lemma subset_ord_connected_component {t} [h : ord_connected s] (hs : x ∈ s) (ht : s ⊆ t) :
s ⊆ ord_connected_component t x :=
λ y hy, (h.uIcc_subset hs hy).trans ht
@[simp] lemma self_mem_ord_connected_component : x ∈ ord_connected_component s x ↔ x ∈ s :=
by rw [mem_ord_connected_component, uIcc_self, singleton_subset_iff]
@[simp] lemma nonempty_ord_connected_component : (ord_connected_component s x).nonempty ↔ x ∈ s :=
⟨λ ⟨y, hy⟩, hy $ left_mem_uIcc, λ h, ⟨x, self_mem_ord_connected_component.2 h⟩⟩
@[simp] lemma ord_connected_component_eq_empty : ord_connected_component s x = ∅ ↔ x ∉ s :=
by rw [← not_nonempty_iff_eq_empty, nonempty_ord_connected_component]
@[simp] lemma ord_connected_component_empty : ord_connected_component ∅ x = ∅ :=
ord_connected_component_eq_empty.2 (not_mem_empty x)
@[simp] lemma ord_connected_component_univ : ord_connected_component univ x = univ :=
by simp [ord_connected_component]
lemma ord_connected_component_inter (s t : set α) (x : α) :
ord_connected_component (s ∩ t) x = ord_connected_component s x ∩ ord_connected_component t x :=
by simp [ord_connected_component, set_of_and]
lemma mem_ord_connected_component_comm :
y ∈ ord_connected_component s x ↔ x ∈ ord_connected_component s y :=
by rw [mem_ord_connected_component, mem_ord_connected_component, uIcc_comm]
lemma mem_ord_connected_component_trans (hxy : y ∈ ord_connected_component s x)
(hyz : z ∈ ord_connected_component s y) : z ∈ ord_connected_component s x :=
calc [x, z] ⊆ [x, y] ∪ [y, z] : uIcc_subset_uIcc_union_uIcc
... ⊆ s : union_subset hxy hyz
lemma ord_connected_component_eq (h : [x, y] ⊆ s) :
ord_connected_component s x = ord_connected_component s y :=
ext $ λ z, ⟨mem_ord_connected_component_trans (mem_ord_connected_component_comm.2 h),
mem_ord_connected_component_trans h⟩
instance : ord_connected (ord_connected_component s x) :=
ord_connected_of_uIcc_subset_left $ λ y hy z hz, (uIcc_subset_uIcc_left hz).trans hy
/-- Projection from `s : set α` to `α` sending each order connected component of `s` to a single
point of this component. -/
noncomputable def ord_connected_proj (s : set α) : s → α :=
λ x : s, (nonempty_ord_connected_component.2 x.prop).some
lemma ord_connected_proj_mem_ord_connected_component (s : set α) (x : s) :
ord_connected_proj s x ∈ ord_connected_component s x :=
nonempty.some_mem _
lemma mem_ord_connected_component_ord_connected_proj (s : set α) (x : s) :
↑x ∈ ord_connected_component s (ord_connected_proj s x) :=
mem_ord_connected_component_comm.2 $ ord_connected_proj_mem_ord_connected_component s x
@[simp] lemma ord_connected_component_ord_connected_proj (s : set α) (x : s) :
ord_connected_component s (ord_connected_proj s x) = ord_connected_component s x :=
ord_connected_component_eq $ mem_ord_connected_component_ord_connected_proj _ _
@[simp] lemma ord_connected_proj_eq {x y : s} :
ord_connected_proj s x = ord_connected_proj s y ↔ [(x : α), y] ⊆ s :=
begin
split; intro h,
{ rw [← mem_ord_connected_component, ← ord_connected_component_ord_connected_proj, h,
ord_connected_component_ord_connected_proj, self_mem_ord_connected_component],
exact y.2 },
{ simp only [ord_connected_proj],
congr' 1,
exact ord_connected_component_eq h }
end
/-- A set that intersects each order connected component of a set by a single point. Defined as the
range of `set.ord_connected_proj s`. -/
def ord_connected_section (s : set α) : set α := range $ ord_connected_proj s
lemma dual_ord_connected_section (s : set α) :
ord_connected_section (of_dual ⁻¹' s) = of_dual ⁻¹' (ord_connected_section s) :=
begin
simp only [ord_connected_section, ord_connected_proj],
congr' 1 with x, simp only, congr' 1,
exact dual_ord_connected_component
end
lemma ord_connected_section_subset : ord_connected_section s ⊆ s :=
range_subset_iff.2 $ λ x, ord_connected_component_subset $ nonempty.some_mem _
lemma eq_of_mem_ord_connected_section_of_uIcc_subset (hx : x ∈ ord_connected_section s)
(hy : y ∈ ord_connected_section s) (h : [x, y] ⊆ s) : x = y :=
begin
rcases hx with ⟨x, rfl⟩, rcases hy with ⟨y, rfl⟩,
exact ord_connected_proj_eq.2 (mem_ord_connected_component_trans
(mem_ord_connected_component_trans (ord_connected_proj_mem_ord_connected_component _ _) h)
(mem_ord_connected_component_ord_connected_proj _ _))
end
/-- Given two sets `s t : set α`, the set `set.order_separating_set s t` is the set of points that
belong both to some `set.ord_connected_component tᶜ x`, `x ∈ s`, and to some
`set.ord_connected_component sᶜ x`, `x ∈ t`. In the case of two disjoint closed sets, this is the
union of all open intervals $(a, b)$ such that their endpoints belong to different sets. -/
def ord_separating_set (s t : set α) : set α :=
(⋃ x ∈ s, ord_connected_component tᶜ x) ∩ (⋃ x ∈ t, ord_connected_component sᶜ x)
lemma ord_separating_set_comm (s t : set α) :
ord_separating_set s t = ord_separating_set t s :=
inter_comm _ _
lemma disjoint_left_ord_separating_set : disjoint s (ord_separating_set s t) :=
disjoint.inter_right' _ $ disjoint_Union₂_right.2 $ λ x hx, disjoint_compl_right.mono_right $
ord_connected_component_subset
lemma disjoint_right_ord_separating_set : disjoint t (ord_separating_set s t) :=
ord_separating_set_comm t s ▸ disjoint_left_ord_separating_set
lemma dual_ord_separating_set :
ord_separating_set (of_dual ⁻¹' s) (of_dual ⁻¹' t) = of_dual ⁻¹' (ord_separating_set s t) :=
by simp only [ord_separating_set, mem_preimage, ← to_dual.surjective.Union_comp, of_dual_to_dual,
dual_ord_connected_component, ← preimage_compl, preimage_inter, preimage_Union]
/-- An auxiliary neighborhood that will be used in the proof of `order_topology.t5_space`. -/
def ord_t5_nhd (s t : set α) : set α :=
⋃ x ∈ s, ord_connected_component (tᶜ ∩ (ord_connected_section $ ord_separating_set s t)ᶜ) x
lemma disjoint_ord_t5_nhd : disjoint (ord_t5_nhd s t) (ord_t5_nhd t s) :=
begin
rw disjoint_iff_inf_le,
rintro x ⟨hx₁, hx₂⟩,
rcases mem_Union₂.1 hx₁ with ⟨a, has, ha⟩, clear hx₁,
rcases mem_Union₂.1 hx₂ with ⟨b, hbt, hb⟩, clear hx₂,
rw [mem_ord_connected_component, subset_inter_iff] at ha hb,
wlog hab : a ≤ b,
{ exact this b hbt a has ha hb (le_of_not_le hab) },
cases ha with ha ha', cases hb with hb hb',
have hsub : [a, b] ⊆ (ord_separating_set s t).ord_connected_sectionᶜ,
{ rw [ord_separating_set_comm, uIcc_comm] at hb',
calc [a, b] ⊆ [a, x] ∪ [x, b] : uIcc_subset_uIcc_union_uIcc
... ⊆ (ord_separating_set s t).ord_connected_sectionᶜ : union_subset ha' hb' },
clear ha' hb',
cases le_total x a with hxa hax,
{ exact hb (Icc_subset_uIcc' ⟨hxa, hab⟩) has },
cases le_total b x with hbx hxb,
{ exact ha (Icc_subset_uIcc ⟨hab, hbx⟩) hbt },
have : x ∈ ord_separating_set s t,
{ exact ⟨mem_Union₂.2 ⟨a, has, ha⟩, mem_Union₂.2 ⟨b, hbt, hb⟩⟩ },
lift x to ord_separating_set s t using this,
suffices : ord_connected_component (ord_separating_set s t) x ⊆ [a, b],
from hsub (this $ ord_connected_proj_mem_ord_connected_component _ _) (mem_range_self _),
rintros y (hy : [↑x, y] ⊆ ord_separating_set s t),
rw [uIcc_of_le hab, mem_Icc, ← not_lt, ← not_lt],
exact ⟨λ hya, disjoint_left.1 disjoint_left_ord_separating_set has
(hy $ Icc_subset_uIcc' ⟨hya.le, hax⟩),
λ hyb, disjoint_left.1 disjoint_right_ord_separating_set hbt
(hy $ Icc_subset_uIcc ⟨hxb, hyb.le⟩)⟩
end
end set
|
b0eb4a1ea810a250f40b4ff957f5add42a4a2976
|
86f6f4f8d827a196a32bfc646234b73328aeb306
|
/examples/sets_functions_and_relations/unnamed_367.lean
|
d335fba043f8f630b070dc2cfa9120def2c21d28
|
[] |
no_license
|
jamescheuk91/mathematics_in_lean
|
09f1f87d2b0dce53464ff0cbe592c568ff59cf5e
|
4452499264e2975bca2f42565c0925506ba5dda3
|
refs/heads/master
| 1,679,716,410,967
| 1,613,957,947,000
| 1,613,957,947,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 157
|
lean
|
import data.set.basic
open set
variable {α : Type*}
variables (s t u : set α)
-- BEGIN
example : s ∩ t = t ∩ s :=
subset.antisymm sorry sorry
-- END
|
32bdf86a029d1fc72bd77c54142267e3986a0de5
|
ce89339993655da64b6ccb555c837ce6c10f9ef4
|
/bluejam/topprover/11.lean
|
5d8628cba760a727b3b170b0e17f0471de6c144a
|
[] |
no_license
|
zeptometer/LearnLean
|
ef32dc36a22119f18d843f548d0bb42f907bff5d
|
bb84d5dbe521127ba134d4dbf9559b294a80b9f7
|
refs/heads/master
| 1,625,710,824,322
| 1,601,382,570,000
| 1,601,382,570,000
| 195,228,870
| 2
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,217
|
lean
|
inductive swap_once: list nat → list nat → Prop
| swap : ∀ (h1 h2 : nat) (t : list nat), swap_once (h1 :: h2 :: t) (h2 :: h1 :: t)
| delegate : ∀ (h : nat) (t1 t2 : list nat), swap_once t1 t2 → swap_once (h :: t1) (h :: t2)
inductive is_odd_permutation: list nat → list nat → Prop
| OddPermutation1: forall l1 l2, swap_once l1 l2 -> is_odd_permutation l1 l2
| OddPermutation2: forall l1 l2 l3 l4, swap_once l1 l2 -> swap_once l2 l3 -> is_odd_permutation l3 l4 -> is_odd_permutation l1 l4
inductive no_dup: list nat → Prop
| empty: no_dup []
| first_appear: ∀ (h : nat) (l : list nat), (h ∉ l) → no_dup (h :: l)
lemma eq_len (l1 l2 : list nat) : l1 = l2 → list.length l1 = list.length l2 :=
match l1, l2 with
| list.nil, list.nil := by intros; refl
| list.nil, (h :: t) := by intros; contradiction
| (h :: t), list.nil := by intros; contradiction
| (h1 :: t1), (h2 :: t2) := by intros; simp * at *
end
lemma equals_even (l1 l2 : list nat) : l1 = l2 → no_dup l1 → ¬ is_odd_permutation l1 l2 :=
match l1, l2 with
| list.nil, _ :=
begin
intros,
intro h_n,
cases h_n; cases h_n_a,
end
| (h :: t), list.nil := by intros; simp * at *
| (ha :: t1), (hb :: t2) :=
begin
intros,
intro h_n,
have h_tail_eq : t1 = t2,
simp * at *,
have h_head_eq : ha = hb,
simp * at *,
have h_tail_no_dup : no_dup t1,
simp * at *,
have h_even_sub : ¬ is_odd_permutation t1 t2,
from _match t1 t2 h_tail_eq h_tail_no_dup,
cases h_n,
cases h_n_a,
have: ¬ no_dup (ha :: hb :: h_n_a_t),
intro sub_no_dup,
cases sub_no_dup,
simp * at *,
contradiction,
have : is_odd_permutation t1 t2,
from is_odd_permutation.OddPermutation1 t1 t2 h_n_a_a,
contradiction,
sorry
end
end
example : ∀ (l1 l2 : list nat), no_dup l1 → is_odd_permutation l1 l2 -> l1 ≠ l2 :=
begin
intros l1 l2 h_dp h_odd equiv,
have: ¬ is_odd_permutation l1 l2,
from absurd h_odd (equals_even l1 l2 equiv h_dp),
contradiction,
end
|
766d11a20a099ecfadf49498cae193fd89bdd61a
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/tests/lean/run/abstractExpr.lean
|
155d05bae8126953972cb868ed010e24cca0b793
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 348
|
lean
|
import Lean
open Lean
open Lean.Meta
def test : MetaM Unit := do
let x ← mkFreshExprMVar (mkConst ``Nat)
let y ← mkFreshExprMVar (mkConst ``Nat)
let add := mkConst ``Nat.add
let e := mkApp3 add x (mkNatLit 1) y
IO.println (e.abstract #[x, y])
assert! e.abstract #[x, y] == mkApp3 add (mkBVar 1) (mkNatLit 1) (mkBVar 0)
#eval test
|
d7fcf66b34da6a098b3ca7543a0918405190c8dc
|
c8d830ce6c7de4840cf0c892d8b58e7e8df97e37
|
/src/property_catalogue/LTL/sat/precedes.lean
|
c3c97359c6a1b79e08b5ce2179e82a7768216773
|
[] |
no_license
|
loganrjmurphy/lean-strategies
|
4b8dd54771bb421c929a8bcb93a528ce6c1a70f1
|
020e2a65dc2ab475696dfea5ad8935a0a4085918
|
refs/heads/main
| 1,682,732,168,860
| 1,614,820,630,000
| 1,614,820,630,000
| 278,458,841
| 3
| 0
| null | 1,613,755,728,000
| 1,594,324,763,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 3,655
|
lean
|
import LTS property_catalogue.LTL.patterns tactic common_meta
open tactic
variable {M : LTS}
namespace precedes
namespace globally
-- Proof 1 : Because S never happens
lemma vacuous (P S : formula M) (π : path M) :
(sat (absent.globally S) π) → sat (precedes.globally P S) π
:= λ s, or.inl s
-- Proof 2 : P precedes S because S can't happen before P
local notation π `⊨ `P := sat P π
variables (P Q R : formula M) (π : path M)
lemma by_absent_before (P Q : formula M) (π : path M) :
(π ⊨ absent.before Q P) ∧ (π ⊨ exist.globally P ) →
(π ⊨ precedes.globally P Q) :=
begin
rintros ⟨H1,H2⟩,
rw precedes.globally,
rw [absent.before, sat, imp_iff_not_or] at H1,
cases H1,
left, rw always_eventually_dual, contradiction,
right, assumption,
end
-- Proof 3 : P precedes R because P precedes Q and Q precedes R
lemma by_transitive (P Q R : formula M) (π : path M) :
(π ⊨ precedes.globally P R) ∧ (π ⊨ precedes.globally R Q) →
(π ⊨ precedes.globally P Q) :=
begin
rintros ⟨H1, H2⟩,
cases H2, left, assumption,
cases H1,
cases H2 with k H2, replace H1 := (H1 k), replace H2:= H2.1,
have := sat_em R (π.drop k),replace this := this H2,
contradiction,
rcases H1 with ⟨k,Hk1,Hk2⟩,
rcases H2 with ⟨w, Hw1, Hw2⟩,
right, use k,
split, assumption,
intros i Hi, apply Hw2,
have EM : (k < w) ∨ ¬ (k < w), from em (k < w),
cases EM,
apply lt_trans Hi, assumption,
simp at EM,
have EM2 : (k = w) ∨ ¬ (k = w), from em (k = w),
cases EM2,
rw ← EM2, assumption,
have : w < k, by omega,
replace Hk2 := Hk2 w this,
have := sat_em R (π.drop w),replace this := this Hw1,
contradiction,
end
meta def solve_by_transitive (e₁ e₂ e₃ : expr) (s : string): tactic unit :=
do
tactic.interactive.apply ``(by_transitive %%e₁ %%e₂ %%e₃)
-- e₁ ← tactic_format_expr e₁,
-- e₂ ← tactic_format_expr e₂,
-- e₃ ← tactic_format_expr e₃,
-- return $ s.append $
-- "apply precedes.globally.by_transitive " ++
-- e₁.to_string ++ " " ++ e₂.to_string ++ " " ++ e₃.to_string ++ ",\n"
meta def solve_by_absent_before (e₁ e₂ : expr) (s : string) :
tactic unit :=
do
tactic.interactive.apply ``(by_absent_before %%e₁ %%e₂)
-- e₁ ← e₁.log_format,
-- e₂ ← e₂.log_format,
-- s.log $
-- "apply precedes.globally.by_absent_before " ++
-- e₁ ++ " " ++ e₂ ++ ",\n"
meta def solve (e₁ e₂ : expr) (s : string) : list expr → tactic unit
| [] := return ()
| (h::t) :=
do typ ← infer_type h,
match typ with
| `(sat (precedes.globally _ %%new) _) :=
solve_by_transitive e₁ e₂ new s <|> solve t
| `(sat (absent.before _ _) _) :=
solve_by_absent_before e₁ e₂ s <|> solve t
| _ := solve t
end
end globally
namespace before
-- (◆R) ⇒ ((!P) U (S ⅋ R))
lemma vacuous (P R S: formula M) (π : path M) :
sat (absent.globally S) π → sat (precedes.before P R S) π :=
by {intro H, rw [precedes.before,sat,imp_iff_not_or,
← always_eventually_dual], left, assumption}
lemma by_absent_before (P R S: formula M) (π : path M) :
sat (exist.globally (P ⅋ S)) π
∧ sat (absent.before R (P ⅋ S)) π → sat (precedes.before P R S) π
:=
begin
rintros ⟨L,R⟩,
replace R := R L,
rw [precedes.before, sat, imp_iff_not_or],
exact or.inr R,
end
end before
namespace after
-- (◾!Q) ⅋ ◆(Q & ((!P) W S))
end after
namespace between
-- ◾((Q & (!R) & ◆R) ⇒ (!P U (S ⅋ R)))
end between
end precedes
|
68184ebbd334547bcabf5fe62ea3363534803ec0
|
680b0d1592ce164979dab866b232f6fa743f2cc8
|
/library/data/hf.lean
|
652103f21b5d14b281d15634d0e0921b72e4b8e5
|
[
"Apache-2.0"
] |
permissive
|
syohex/lean
|
657428ab520f8277fc18cf04bea2ad200dbae782
|
081ad1212b686780f3ff8a6d0e5f8a1d29a7d8bc
|
refs/heads/master
| 1,611,274,838,635
| 1,452,668,188,000
| 1,452,668,188,000
| 49,562,028
| 0
| 0
| null | 1,452,675,604,000
| 1,452,675,602,000
| null |
UTF-8
|
Lean
| false
| false
| 25,395
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
Hereditarily finite sets: finite sets whose elements are all hereditarily finite sets.
Remark: all definitions compute, however the performace is quite poor since
we implement this module using a bijection from (finset nat) to nat, and
this bijection is implemeted using the Ackermann coding.
-/
import data.nat data.finset.equiv data.list
open nat binary
open - [notation] finset
definition hf := nat
namespace hf
local attribute hf [reducible]
protected definition prio : num := num.succ std.priority.default
protected definition is_inhabited [instance] : inhabited hf :=
nat.is_inhabited
protected definition has_decidable_eq [reducible] [instance] : decidable_eq hf :=
nat.has_decidable_eq
definition of_finset (s : finset hf) : hf :=
@equiv.to_fun _ _ finset_nat_equiv_nat s
definition to_finset (h : hf) : finset hf :=
@equiv.inv _ _ finset_nat_equiv_nat h
definition to_nat (h : hf) : nat :=
h
definition of_nat (n : nat) : hf :=
n
lemma to_finset_of_finset (s : finset hf) : to_finset (of_finset s) = s :=
@equiv.left_inv _ _ finset_nat_equiv_nat s
lemma of_finset_to_finset (s : hf) : of_finset (to_finset s) = s :=
@equiv.right_inv _ _ finset_nat_equiv_nat s
lemma to_finset_inj {s₁ s₂ : hf} : to_finset s₁ = to_finset s₂ → s₁ = s₂ :=
λ h, function.injective_of_left_inverse of_finset_to_finset h
lemma of_finset_inj {s₁ s₂ : finset hf} : of_finset s₁ = of_finset s₂ → s₁ = s₂ :=
λ h, function.injective_of_left_inverse to_finset_of_finset h
/- empty -/
definition empty : hf :=
of_finset (finset.empty)
notation `∅` := hf.empty
/- insert -/
definition insert (a s : hf) : hf :=
of_finset (finset.insert a (to_finset s))
/- mem -/
definition mem (a : hf) (s : hf) : Prop :=
finset.mem a (to_finset s)
infix ∈ := mem
notation [priority finset.prio] a ∉ b := ¬ mem a b
lemma insert_lt_of_not_mem {a s : hf} : a ∉ s → s < insert a s :=
begin
unfold [insert, of_finset, equiv.to_fun, finset_nat_equiv_nat, mem, to_finset, equiv.inv],
intro h,
rewrite [finset.to_nat_insert h],
rewrite [to_nat_of_nat, -zero_add s at {1}],
apply add_lt_add_right,
apply pow_pos_of_pos _ dec_trivial
end
lemma insert_lt_insert_of_not_mem_of_not_mem_of_lt {a s₁ s₂ : hf}
: a ∉ s₁ → a ∉ s₂ → s₁ < s₂ → insert a s₁ < insert a s₂ :=
begin
unfold [insert, of_finset, equiv.to_fun, finset_nat_equiv_nat, mem, to_finset, equiv.inv],
intro h₁ h₂ h₃,
rewrite [finset.to_nat_insert h₁],
rewrite [finset.to_nat_insert h₂, *to_nat_of_nat],
apply add_lt_add_left h₃
end
open decidable
protected definition decidable_mem [instance] : ∀ a s, decidable (a ∈ s) :=
λ a s, finset.decidable_mem a (to_finset s)
lemma insert_le (a s : hf) : s ≤ insert a s :=
by_cases
(suppose a ∈ s, by rewrite [↑insert, insert_eq_of_mem this, of_finset_to_finset])
(suppose a ∉ s, le_of_lt (insert_lt_of_not_mem this))
lemma not_mem_empty (a : hf) : a ∉ ∅ :=
begin unfold [mem, empty], rewrite to_finset_of_finset, apply finset.not_mem_empty end
lemma mem_insert (a s : hf) : a ∈ insert a s :=
begin unfold [mem, insert], rewrite to_finset_of_finset, apply finset.mem_insert end
lemma mem_insert_of_mem {a s : hf} (b : hf) : a ∈ s → a ∈ insert b s :=
begin unfold [mem, insert], intros, rewrite to_finset_of_finset, apply finset.mem_insert_of_mem, assumption end
lemma eq_or_mem_of_mem_insert {a b s : hf} : a ∈ insert b s → a = b ∨ a ∈ s :=
begin unfold [mem, insert], rewrite to_finset_of_finset, intros, apply eq_or_mem_of_mem_insert, assumption end
theorem mem_of_mem_insert_of_ne {x a : hf} {s : hf} : x ∈ insert a s → x ≠ a → x ∈ s :=
begin unfold [mem, insert], rewrite to_finset_of_finset, intros, apply mem_of_mem_insert_of_ne, repeat assumption end
protected theorem ext {s₁ s₂ : hf} : (∀ a, a ∈ s₁ ↔ a ∈ s₂) → s₁ = s₂ :=
assume h,
assert to_finset s₁ = to_finset s₂, from finset.ext h,
assert of_finset (to_finset s₁) = of_finset (to_finset s₂), by rewrite this,
by rewrite [*of_finset_to_finset at this]; exact this
theorem insert_eq_of_mem {a : hf} {s : hf} : a ∈ s → insert a s = s :=
begin unfold mem, intro h, unfold [mem, insert], rewrite (finset.insert_eq_of_mem h), rewrite of_finset_to_finset end
protected theorem induction [recursor 4] {P : hf → Prop}
(h₁ : P empty) (h₂ : ∀ (a s : hf), a ∉ s → P s → P (insert a s)) (s : hf) : P s :=
assert P (of_finset (to_finset s)), from
@finset.induction _ _ _ h₁
(λ a s nain ih,
begin
unfold [mem, insert] at h₂,
rewrite -(to_finset_of_finset s) at nain,
have P (insert a (of_finset s)), by exact h₂ a (of_finset s) nain ih,
rewrite [↑insert at this, to_finset_of_finset at this],
exact this
end)
(to_finset s),
by rewrite of_finset_to_finset at this; exact this
lemma insert_le_insert_of_le {a s₁ s₂ : hf} : a ∈ s₁ ∨ a ∉ s₂ → s₁ ≤ s₂ → insert a s₁ ≤ insert a s₂ :=
suppose a ∈ s₁ ∨ a ∉ s₂,
suppose s₁ ≤ s₂,
by_cases
(suppose s₁ = s₂, by rewrite this)
(suppose s₁ ≠ s₂,
have s₁ < s₂, from lt_of_le_of_ne `s₁ ≤ s₂` `s₁ ≠ s₂`,
by_cases
(suppose a ∈ s₁, by_cases
(suppose a ∈ s₂, by rewrite [insert_eq_of_mem `a ∈ s₁`, insert_eq_of_mem `a ∈ s₂`]; assumption)
(suppose a ∉ s₂, by rewrite [insert_eq_of_mem `a ∈ s₁`]; exact le.trans `s₁ ≤ s₂` !insert_le))
(suppose a ∉ s₁, by_cases
(suppose a ∈ s₂, or.elim `a ∈ s₁ ∨ a ∉ s₂` (by contradiction) (by contradiction))
(suppose a ∉ s₂, le_of_lt (insert_lt_insert_of_not_mem_of_not_mem_of_lt `a ∉ s₁` `a ∉ s₂` `s₁ < s₂`))))
/- union -/
definition union (s₁ s₂ : hf) : hf :=
of_finset (finset.union (to_finset s₁) (to_finset s₂))
infix [priority hf.prio] ∪ := union
theorem mem_union_left {a : hf} {s₁ : hf} (s₂ : hf) : a ∈ s₁ → a ∈ s₁ ∪ s₂ :=
begin unfold mem, intro h, unfold union, rewrite to_finset_of_finset, apply finset.mem_union_left _ h end
theorem mem_union_l {a : hf} {s₁ : hf} {s₂ : hf} : a ∈ s₁ → a ∈ s₁ ∪ s₂ :=
mem_union_left s₂
theorem mem_union_right {a : hf} {s₂ : hf} (s₁ : hf) : a ∈ s₂ → a ∈ s₁ ∪ s₂ :=
begin unfold mem, intro h, unfold union, rewrite to_finset_of_finset, apply finset.mem_union_right _ h end
theorem mem_union_r {a : hf} {s₂ : hf} {s₁ : hf} : a ∈ s₂ → a ∈ s₁ ∪ s₂ :=
mem_union_right s₁
theorem mem_or_mem_of_mem_union {a : hf} {s₁ s₂ : hf} : a ∈ s₁ ∪ s₂ → a ∈ s₁ ∨ a ∈ s₂ :=
begin unfold [mem, union], rewrite to_finset_of_finset, intro h, apply finset.mem_or_mem_of_mem_union h end
theorem mem_union_iff {a : hf} (s₁ s₂ : hf) : a ∈ s₁ ∪ s₂ ↔ a ∈ s₁ ∨ a ∈ s₂ :=
iff.intro
(λ h, mem_or_mem_of_mem_union h)
(λ d, or.elim d
(λ i, mem_union_left _ i)
(λ i, mem_union_right _ i))
theorem mem_union_eq {a : hf} (s₁ s₂ : hf) : (a ∈ s₁ ∪ s₂) = (a ∈ s₁ ∨ a ∈ s₂) :=
propext !mem_union_iff
theorem union_comm (s₁ s₂ : hf) : s₁ ∪ s₂ = s₂ ∪ s₁ :=
hf.ext (λ a, by rewrite [*mem_union_eq]; exact or.comm)
theorem union_assoc (s₁ s₂ s₃ : hf) : (s₁ ∪ s₂) ∪ s₃ = s₁ ∪ (s₂ ∪ s₃) :=
hf.ext (λ a, by rewrite [*mem_union_eq]; exact or.assoc)
theorem union_left_comm (s₁ s₂ s₃ : hf) : s₁ ∪ (s₂ ∪ s₃) = s₂ ∪ (s₁ ∪ s₃) :=
!left_comm union_comm union_assoc s₁ s₂ s₃
theorem union_right_comm (s₁ s₂ s₃ : hf) : (s₁ ∪ s₂) ∪ s₃ = (s₁ ∪ s₃) ∪ s₂ :=
!right_comm union_comm union_assoc s₁ s₂ s₃
theorem union_self (s : hf) : s ∪ s = s :=
hf.ext (λ a, iff.intro
(λ ain, or.elim (mem_or_mem_of_mem_union ain) (λ i, i) (λ i, i))
(λ i, mem_union_left _ i))
theorem union_empty (s : hf) : s ∪ ∅ = s :=
hf.ext (λ a, iff.intro
(suppose a ∈ s ∪ ∅, or.elim (mem_or_mem_of_mem_union this) (λ i, i) (λ i, absurd i !not_mem_empty))
(suppose a ∈ s, mem_union_left _ this))
theorem empty_union (s : hf) : ∅ ∪ s = s :=
calc ∅ ∪ s = s ∪ ∅ : union_comm
... = s : union_empty
/- inter -/
definition inter (s₁ s₂ : hf) : hf :=
of_finset (finset.inter (to_finset s₁) (to_finset s₂))
infix [priority hf.prio] ∩ := inter
theorem mem_of_mem_inter_left {a : hf} {s₁ s₂ : hf} : a ∈ s₁ ∩ s₂ → a ∈ s₁ :=
begin unfold mem, unfold inter, rewrite to_finset_of_finset, intro h, apply finset.mem_of_mem_inter_left h end
theorem mem_of_mem_inter_right {a : hf} {s₁ s₂ : hf} : a ∈ s₁ ∩ s₂ → a ∈ s₂ :=
begin unfold mem, unfold inter, rewrite to_finset_of_finset, intro h, apply finset.mem_of_mem_inter_right h end
theorem mem_inter {a : hf} {s₁ s₂ : hf} : a ∈ s₁ → a ∈ s₂ → a ∈ s₁ ∩ s₂ :=
begin unfold mem, intro h₁ h₂, unfold inter, rewrite to_finset_of_finset, apply finset.mem_inter h₁ h₂ end
theorem mem_inter_iff (a : hf) (s₁ s₂ : hf) : a ∈ s₁ ∩ s₂ ↔ a ∈ s₁ ∧ a ∈ s₂ :=
iff.intro
(λ h, and.intro (mem_of_mem_inter_left h) (mem_of_mem_inter_right h))
(λ h, mem_inter (and.elim_left h) (and.elim_right h))
theorem mem_inter_eq (a : hf) (s₁ s₂ : hf) : (a ∈ s₁ ∩ s₂) = (a ∈ s₁ ∧ a ∈ s₂) :=
propext !mem_inter_iff
theorem inter_comm (s₁ s₂ : hf) : s₁ ∩ s₂ = s₂ ∩ s₁ :=
hf.ext (λ a, by rewrite [*mem_inter_eq]; exact and.comm)
theorem inter_assoc (s₁ s₂ s₃ : hf) : (s₁ ∩ s₂) ∩ s₃ = s₁ ∩ (s₂ ∩ s₃) :=
hf.ext (λ a, by rewrite [*mem_inter_eq]; exact and.assoc)
theorem inter_left_comm (s₁ s₂ s₃ : hf) : s₁ ∩ (s₂ ∩ s₃) = s₂ ∩ (s₁ ∩ s₃) :=
!left_comm inter_comm inter_assoc s₁ s₂ s₃
theorem inter_right_comm (s₁ s₂ s₃ : hf) : (s₁ ∩ s₂) ∩ s₃ = (s₁ ∩ s₃) ∩ s₂ :=
!right_comm inter_comm inter_assoc s₁ s₂ s₃
theorem inter_self (s : hf) : s ∩ s = s :=
hf.ext (λ a, iff.intro
(λ h, mem_of_mem_inter_right h)
(λ h, mem_inter h h))
theorem inter_empty (s : hf) : s ∩ ∅ = ∅ :=
hf.ext (λ a, iff.intro
(suppose a ∈ s ∩ ∅, absurd (mem_of_mem_inter_right this) !not_mem_empty)
(suppose a ∈ ∅, absurd this !not_mem_empty))
theorem empty_inter (s : hf) : ∅ ∩ s = ∅ :=
calc ∅ ∩ s = s ∩ ∅ : inter_comm
... = ∅ : inter_empty
/- card -/
definition card (s : hf) : nat :=
finset.card (to_finset s)
theorem card_empty : card ∅ = 0 :=
rfl
lemma ne_empty_of_card_eq_succ {s : hf} {n : nat} : card s = succ n → s ≠ ∅ :=
by intros; substvars; contradiction
/- erase -/
definition erase (a : hf) (s : hf) : hf :=
of_finset (erase a (to_finset s))
theorem mem_erase (a : hf) (s : hf) : a ∉ erase a s :=
begin unfold [mem, erase], rewrite to_finset_of_finset, apply finset.mem_erase end
theorem card_erase_of_mem {a : hf} {s : hf} : a ∈ s → card (erase a s) = pred (card s) :=
begin unfold mem, intro h, unfold [erase, card], rewrite to_finset_of_finset, apply finset.card_erase_of_mem h end
theorem card_erase_of_not_mem {a : hf} {s : hf} : a ∉ s → card (erase a s) = card s :=
begin unfold [mem], intro h, unfold [erase, card], rewrite to_finset_of_finset, apply finset.card_erase_of_not_mem h end
theorem erase_empty (a : hf) : erase a ∅ = ∅ :=
rfl
theorem ne_of_mem_erase {a b : hf} {s : hf} : b ∈ erase a s → b ≠ a :=
by intro h beqa; subst b; exact absurd h !mem_erase
theorem mem_of_mem_erase {a b : hf} {s : hf} : b ∈ erase a s → b ∈ s :=
begin unfold [erase, mem], rewrite to_finset_of_finset, intro h, apply mem_of_mem_erase h end
theorem mem_erase_of_ne_of_mem {a b : hf} {s : hf} : a ≠ b → a ∈ s → a ∈ erase b s :=
begin intro h₁, unfold mem, intro h₂, unfold erase, rewrite to_finset_of_finset, apply mem_erase_of_ne_of_mem h₁ h₂ end
theorem mem_erase_iff (a b : hf) (s : hf) : a ∈ erase b s ↔ a ∈ s ∧ a ≠ b :=
iff.intro
(assume H, and.intro (mem_of_mem_erase H) (ne_of_mem_erase H))
(assume H, mem_erase_of_ne_of_mem (and.right H) (and.left H))
theorem mem_erase_eq (a b : hf) (s : hf) : a ∈ erase b s = (a ∈ s ∧ a ≠ b) :=
propext !mem_erase_iff
theorem erase_insert {a : hf} {s : hf} : a ∉ s → erase a (insert a s) = s :=
begin
unfold [mem, erase, insert],
intro h, rewrite [to_finset_of_finset, finset.erase_insert h, of_finset_to_finset]
end
theorem insert_erase {a : hf} {s : hf} : a ∈ s → insert a (erase a s) = s :=
begin
unfold mem, intro h, unfold [insert, erase],
rewrite [to_finset_of_finset, finset.insert_erase h, of_finset_to_finset]
end
/- subset -/
definition subset (s₁ s₂ : hf) : Prop :=
finset.subset (to_finset s₁) (to_finset s₂)
infix [priority hf.prio] ⊆ := subset
theorem empty_subset (s : hf) : ∅ ⊆ s :=
begin unfold [empty, subset], rewrite to_finset_of_finset, apply finset.empty_subset (to_finset s) end
theorem subset.refl (s : hf) : s ⊆ s :=
begin unfold [subset], apply finset.subset.refl (to_finset s) end
theorem subset.trans {s₁ s₂ s₃ : hf} : s₁ ⊆ s₂ → s₂ ⊆ s₃ → s₁ ⊆ s₃ :=
begin unfold [subset], intro h₁ h₂, apply finset.subset.trans h₁ h₂ end
theorem mem_of_subset_of_mem {s₁ s₂ : hf} {a : hf} : s₁ ⊆ s₂ → a ∈ s₁ → a ∈ s₂ :=
begin unfold [subset, mem], intro h₁ h₂, apply finset.mem_of_subset_of_mem h₁ h₂ end
theorem subset.antisymm {s₁ s₂ : hf} : s₁ ⊆ s₂ → s₂ ⊆ s₁ → s₁ = s₂ :=
begin unfold [subset], intro h₁ h₂, apply to_finset_inj (finset.subset.antisymm h₁ h₂) end
-- alternative name
theorem eq_of_subset_of_subset {s₁ s₂ : hf} (H₁ : s₁ ⊆ s₂) (H₂ : s₂ ⊆ s₁) : s₁ = s₂ :=
subset.antisymm H₁ H₂
theorem subset_of_forall {s₁ s₂ : hf} : (∀x, x ∈ s₁ → x ∈ s₂) → s₁ ⊆ s₂ :=
begin unfold [mem, subset], intro h, apply finset.subset_of_forall h end
theorem subset_insert (s : hf) (a : hf) : s ⊆ insert a s :=
begin unfold [subset, insert], rewrite to_finset_of_finset, apply finset.subset_insert (to_finset s) end
theorem eq_empty_of_subset_empty {x : hf} (H : x ⊆ ∅) : x = ∅ :=
subset.antisymm H (empty_subset x)
theorem subset_empty_iff (x : hf) : x ⊆ ∅ ↔ x = ∅ :=
iff.intro eq_empty_of_subset_empty (take xeq, by rewrite xeq; apply subset.refl ∅)
theorem erase_subset_erase (a : hf) {s t : hf} : s ⊆ t → erase a s ⊆ erase a t :=
begin unfold [subset, erase], intro h, rewrite *to_finset_of_finset, apply finset.erase_subset_erase a h end
theorem erase_subset (a : hf) (s : hf) : erase a s ⊆ s :=
begin unfold [subset, erase], rewrite to_finset_of_finset, apply finset.erase_subset a (to_finset s) end
theorem erase_eq_of_not_mem {a : hf} {s : hf} : a ∉ s → erase a s = s :=
begin unfold [mem, erase], intro h, rewrite [finset.erase_eq_of_not_mem h, of_finset_to_finset] end
theorem erase_insert_subset (a : hf) (s : hf) : erase a (insert a s) ⊆ s :=
begin unfold [erase, insert, subset], rewrite [*to_finset_of_finset], apply finset.erase_insert_subset a (to_finset s) end
theorem erase_subset_of_subset_insert {a : hf} {s t : hf} (H : s ⊆ insert a t) : erase a s ⊆ t :=
hf.subset.trans (!hf.erase_subset_erase H) (erase_insert_subset a t)
theorem insert_erase_subset (a : hf) (s : hf) : s ⊆ insert a (erase a s) :=
decidable.by_cases
(assume ains : a ∈ s, by rewrite [!insert_erase ains]; apply subset.refl)
(assume nains : a ∉ s,
suffices s ⊆ insert a s, by rewrite [erase_eq_of_not_mem nains]; assumption,
subset_insert s a)
theorem insert_subset_insert (a : hf) {s t : hf} : s ⊆ t → insert a s ⊆ insert a t :=
begin
unfold [subset, insert], intro h,
rewrite *to_finset_of_finset, apply finset.insert_subset_insert a h
end
theorem subset_insert_of_erase_subset {s t : hf} {a : hf} (H : erase a s ⊆ t) : s ⊆ insert a t :=
subset.trans (insert_erase_subset a s) (!insert_subset_insert H)
theorem subset_insert_iff (s t : hf) (a : hf) : s ⊆ insert a t ↔ erase a s ⊆ t :=
iff.intro !erase_subset_of_subset_insert !subset_insert_of_erase_subset
theorem le_of_subset {s₁ s₂ : hf} : s₁ ⊆ s₂ → s₁ ≤ s₂ :=
begin
revert s₂, induction s₁ with a s₁ nain ih,
take s₂, suppose ∅ ⊆ s₂, !zero_le,
take s₂, suppose insert a s₁ ⊆ s₂,
assert a ∈ s₂, from mem_of_subset_of_mem this !mem_insert,
have a ∉ erase a s₂, from !mem_erase,
have s₁ ⊆ erase a s₂, from subset_of_forall
(take x xin, by_cases
(suppose x = a, by subst x; contradiction)
(suppose x ≠ a,
have x ∈ s₂, from mem_of_subset_of_mem `insert a s₁ ⊆ s₂` (mem_insert_of_mem _ `x ∈ s₁`),
mem_erase_of_ne_of_mem `x ≠ a` `x ∈ s₂`)),
have s₁ ≤ erase a s₂, from ih _ this,
assert insert a s₁ ≤ insert a (erase a s₂), from
insert_le_insert_of_le (or.inr `a ∉ erase a s₂`) this,
by rewrite [insert_erase `a ∈ s₂` at this]; exact this
end
/- image -/
definition image (f : hf → hf) (s : hf) : hf :=
of_finset (finset.image f (to_finset s))
theorem image_empty (f : hf → hf) : image f ∅ = ∅ :=
rfl
theorem mem_image_of_mem (f : hf → hf) {s : hf} {a : hf} : a ∈ s → f a ∈ image f s :=
begin unfold [mem, image], intro h, rewrite to_finset_of_finset, apply finset.mem_image_of_mem f h end
theorem mem_image {f : hf → hf} {s : hf} {a : hf} {b : hf} (H1 : a ∈ s) (H2 : f a = b) : b ∈ image f s :=
eq.subst H2 (mem_image_of_mem f H1)
theorem exists_of_mem_image {f : hf → hf} {s : hf} {b : hf} : b ∈ image f s → ∃a, a ∈ s ∧ f a = b :=
begin unfold [mem, image], rewrite to_finset_of_finset, intro h, apply finset.exists_of_mem_image h end
theorem mem_image_iff (f : hf → hf) {s : hf} {y : hf} : y ∈ image f s ↔ ∃x, x ∈ s ∧ f x = y :=
begin unfold [mem, image], rewrite to_finset_of_finset, apply finset.mem_image_iff end
theorem mem_image_eq (f : hf → hf) {s : hf} {y : hf} : y ∈ image f s = ∃x, x ∈ s ∧ f x = y :=
propext (mem_image_iff f)
theorem mem_image_of_mem_image_of_subset {f : hf → hf} {s t : hf} {y : hf} (H1 : y ∈ image f s) (H2 : s ⊆ t) : y ∈ image f t :=
obtain x `x ∈ s` `f x = y`, from exists_of_mem_image H1,
have x ∈ t, from mem_of_subset_of_mem H2 `x ∈ s`,
show y ∈ image f t, from mem_image `x ∈ t` `f x = y`
theorem image_insert (f : hf → hf) (s : hf) (a : hf) : image f (insert a s) = insert (f a) (image f s) :=
begin unfold [image, insert], rewrite [*to_finset_of_finset, finset.image_insert] end
open function
lemma image_compose {f : hf → hf} {g : hf → hf} {s : hf} : image (f∘g) s = image f (image g s) :=
begin unfold image, rewrite [*to_finset_of_finset, finset.image_compose] end
lemma image_subset {a b : hf} (f : hf → hf) : a ⊆ b → image f a ⊆ image f b :=
begin unfold [subset, image], intro h, rewrite *to_finset_of_finset, apply finset.image_subset f h end
theorem image_union (f : hf → hf) (s t : hf) : image f (s ∪ t) = image f s ∪ image f t :=
begin unfold [image, union], rewrite [*to_finset_of_finset, finset.image_union] end
/- powerset -/
definition powerset (s : hf) : hf :=
of_finset (finset.image of_finset (finset.powerset (to_finset s)))
prefix [priority hf.prio] `𝒫`:100 := powerset
theorem powerset_empty : 𝒫 ∅ = insert ∅ ∅ :=
rfl
theorem powerset_insert {a : hf} {s : hf} : a ∉ s → 𝒫 (insert a s) = 𝒫 s ∪ image (insert a) (𝒫 s) :=
begin unfold [mem, powerset, insert, union, image], rewrite [*to_finset_of_finset], intro h,
have (λ (x : finset hf), of_finset (finset.insert a x)) = (λ (x : finset hf), of_finset (finset.insert a (to_finset (of_finset x)))), from
funext (λ x, by rewrite to_finset_of_finset),
rewrite [finset.powerset_insert h, finset.image_union, -*finset.image_compose,↑compose,this]
end
theorem mem_powerset_iff_subset (s : hf) : ∀ x : hf, x ∈ 𝒫 s ↔ x ⊆ s :=
begin
intro x, unfold [mem, powerset, subset], rewrite [to_finset_of_finset, finset.mem_image_eq], apply iff.intro,
suppose (∃ (w : finset hf), finset.mem w (finset.powerset (to_finset s)) ∧ of_finset w = x),
obtain w h₁ h₂, from this,
begin subst x, rewrite to_finset_of_finset, exact iff.mp !finset.mem_powerset_iff_subset h₁ end,
suppose finset.subset (to_finset x) (to_finset s),
assert finset.mem (to_finset x) (finset.powerset (to_finset s)), from iff.mpr !finset.mem_powerset_iff_subset this,
exists.intro (to_finset x) (and.intro this (of_finset_to_finset x))
end
theorem subset_of_mem_powerset {s t : hf} (H : s ∈ 𝒫 t) : s ⊆ t :=
iff.mp (mem_powerset_iff_subset t s) H
theorem mem_powerset_of_subset {s t : hf} (H : s ⊆ t) : s ∈ 𝒫 t :=
iff.mpr (mem_powerset_iff_subset t s) H
theorem empty_mem_powerset (s : hf) : ∅ ∈ 𝒫 s :=
mem_powerset_of_subset (empty_subset s)
/- hf as lists -/
open - [notation] list
definition of_list (s : list hf) : hf :=
@equiv.to_fun _ _ list_nat_equiv_nat s
definition to_list (h : hf) : list hf :=
@equiv.inv _ _ list_nat_equiv_nat h
lemma to_list_of_list (s : list hf) : to_list (of_list s) = s :=
@equiv.left_inv _ _ list_nat_equiv_nat s
lemma of_list_to_list (s : hf) : of_list (to_list s) = s :=
@equiv.right_inv _ _ list_nat_equiv_nat s
lemma to_list_inj {s₁ s₂ : hf} : to_list s₁ = to_list s₂ → s₁ = s₂ :=
λ h, function.injective_of_left_inverse of_list_to_list h
lemma of_list_inj {s₁ s₂ : list hf} : of_list s₁ = of_list s₂ → s₁ = s₂ :=
λ h, function.injective_of_left_inverse to_list_of_list h
definition nil : hf :=
of_list list.nil
lemma empty_eq_nil : ∅ = nil :=
rfl
definition cons (a l : hf) : hf :=
of_list (list.cons a (to_list l))
infixr :: := cons
lemma cons_ne_nil (a l : hf) : a::l ≠ nil :=
by contradiction
lemma head_eq_of_cons_eq {h₁ h₂ t₁ t₂ : hf} : (h₁::t₁) = (h₂::t₂) → h₁ = h₂ :=
begin unfold cons, intro h, apply list.head_eq_of_cons_eq (of_list_inj h) end
lemma tail_eq_of_cons_eq {h₁ h₂ t₁ t₂ : hf} : (h₁::t₁) = (h₂::t₂) → t₁ = t₂ :=
begin unfold cons, intro h, apply to_list_inj (list.tail_eq_of_cons_eq (of_list_inj h)) end
lemma cons_inj {a : hf} : injective (cons a) :=
take l₁ l₂, assume Pe, tail_eq_of_cons_eq Pe
/- append -/
definition append (l₁ l₂ : hf) : hf :=
of_list (list.append (to_list l₁) (to_list l₂))
notation l₁ ++ l₂ := append l₁ l₂
theorem append_nil_left [simp] (t : hf) : nil ++ t = t :=
begin unfold [nil, append], rewrite [to_list_of_list, list.append_nil_left, of_list_to_list] end
theorem append_cons [simp] (x s t : hf) : (x::s) ++ t = x::(s ++ t) :=
begin unfold [cons, append], rewrite [*to_list_of_list, list.append_cons] end
theorem append_nil_right [simp] (t : hf) : t ++ nil = t :=
begin unfold [nil, append], rewrite [to_list_of_list, list.append_nil_right, of_list_to_list] end
theorem append.assoc [simp] (s t u : hf) : s ++ t ++ u = s ++ (t ++ u) :=
begin unfold append, rewrite [*to_list_of_list, list.append.assoc] end
/- length -/
definition length (l : hf) : nat :=
list.length (to_list l)
theorem length_nil [simp] : length nil = 0 :=
begin unfold [length, nil] end
theorem length_cons [simp] (x t : hf) : length (x::t) = length t + 1 :=
begin unfold [length, cons], rewrite to_list_of_list end
theorem length_append [simp] (s t : hf) : length (s ++ t) = length s + length t :=
begin unfold [length, append], rewrite [to_list_of_list, list.length_append] end
theorem eq_nil_of_length_eq_zero {l : hf} : length l = 0 → l = nil :=
begin unfold [length, nil], intro h, rewrite [-list.eq_nil_of_length_eq_zero h, of_list_to_list] end
theorem ne_nil_of_length_eq_succ {l : hf} {n : nat} : length l = succ n → l ≠ nil :=
begin unfold [length, nil], intro h₁ h₂, subst l, rewrite to_list_of_list at h₁, contradiction end
/- head and tail -/
definition head (l : hf) : hf :=
list.head (to_list l)
theorem head_cons [simp] (a l : hf) : head (a::l) = a :=
begin unfold [head, cons], rewrite to_list_of_list end
private lemma to_list_ne_list_nil {s : hf} : s ≠ nil → to_list s ≠ list.nil :=
begin
unfold nil,
intro h,
suppose to_list s = list.nil,
by rewrite [-this at h, of_list_to_list at h]; exact absurd rfl h
end
theorem head_append [simp] (t : hf) {s : hf} : s ≠ nil → head (s ++ t) = head s :=
begin
unfold [nil, head, append], rewrite to_list_of_list,
suppose s ≠ of_list list.nil,
by rewrite [list.head_append _ (to_list_ne_list_nil this)]
end
definition tail (l : hf) : hf :=
of_list (list.tail (to_list l))
theorem tail_nil [simp] : tail nil = nil :=
begin unfold [tail, nil] end
theorem tail_cons [simp] (a l : hf) : tail (a::l) = l :=
begin unfold [tail, cons], rewrite [to_list_of_list, list.tail_cons, of_list_to_list] end
theorem cons_head_tail {l : hf} : l ≠ nil → (head l)::(tail l) = l :=
begin
unfold [nil, head, tail, cons],
suppose l ≠ of_list list.nil,
by rewrite [to_list_of_list, list.cons_head_tail (to_list_ne_list_nil this), of_list_to_list]
end
end hf
|
9ca79bba52e97e45aff1092fb78e3929e3c662bc
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/analysis/special_functions/trigonometric/inverse.lean
|
2112d3833abbeb767b7a31cf889ed6b34f655605
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 15,401
|
lean
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes, Abhimanyu Pallavi Sudhir, Jean Lo, Calle Sönne, Benjamin Davidson
-/
import analysis.special_functions.trigonometric.basic
import topology.algebra.order.proj_Icc
/-!
# Inverse trigonometric functions.
See also `analysis.special_functions.trigonometric.arctan` for the inverse tan function.
(This is delayed as it is easier to set up after developing complex trigonometric functions.)
Basic inequalities on trigonometric functions.
-/
noncomputable theory
open_locale classical topological_space filter
open set filter
open_locale real
namespace real
/-- Inverse of the `sin` function, returns values in the range `-π / 2 ≤ arcsin x ≤ π / 2`.
It defaults to `-π / 2` on `(-∞, -1)` and to `π / 2` to `(1, ∞)`. -/
@[pp_nodot] noncomputable def arcsin : ℝ → ℝ :=
coe ∘ Icc_extend (neg_le_self zero_le_one) sin_order_iso.symm
lemma arcsin_mem_Icc (x : ℝ) : arcsin x ∈ Icc (-(π / 2)) (π / 2) := subtype.coe_prop _
@[simp] lemma range_arcsin : range arcsin = Icc (-(π / 2)) (π / 2) :=
by { rw [arcsin, range_comp coe], simp [Icc] }
lemma arcsin_le_pi_div_two (x : ℝ) : arcsin x ≤ π / 2 := (arcsin_mem_Icc x).2
lemma neg_pi_div_two_le_arcsin (x : ℝ) : -(π / 2) ≤ arcsin x := (arcsin_mem_Icc x).1
lemma arcsin_proj_Icc (x : ℝ) :
arcsin (proj_Icc (-1) 1 (neg_le_self zero_le_one) x) = arcsin x :=
by rw [arcsin, function.comp_app, Icc_extend_coe, function.comp_app, Icc_extend]
lemma sin_arcsin' {x : ℝ} (hx : x ∈ Icc (-1 : ℝ) 1) : sin (arcsin x) = x :=
by simpa [arcsin, Icc_extend_of_mem _ _ hx, -order_iso.apply_symm_apply]
using subtype.ext_iff.1 (sin_order_iso.apply_symm_apply ⟨x, hx⟩)
lemma sin_arcsin {x : ℝ} (hx₁ : -1 ≤ x) (hx₂ : x ≤ 1) : sin (arcsin x) = x :=
sin_arcsin' ⟨hx₁, hx₂⟩
lemma arcsin_sin' {x : ℝ} (hx : x ∈ Icc (-(π / 2)) (π / 2)) : arcsin (sin x) = x :=
inj_on_sin (arcsin_mem_Icc _) hx $ by rw [sin_arcsin (neg_one_le_sin _) (sin_le_one _)]
lemma arcsin_sin {x : ℝ} (hx₁ : -(π / 2) ≤ x) (hx₂ : x ≤ π / 2) : arcsin (sin x) = x :=
arcsin_sin' ⟨hx₁, hx₂⟩
lemma strict_mono_on_arcsin : strict_mono_on arcsin (Icc (-1) 1) :=
(subtype.strict_mono_coe _).comp_strict_mono_on $
sin_order_iso.symm.strict_mono.strict_mono_on_Icc_extend _
lemma monotone_arcsin : monotone arcsin :=
(subtype.mono_coe _).comp $ sin_order_iso.symm.monotone.Icc_extend _
lemma inj_on_arcsin : inj_on arcsin (Icc (-1) 1) := strict_mono_on_arcsin.inj_on
lemma arcsin_inj {x y : ℝ} (hx₁ : -1 ≤ x) (hx₂ : x ≤ 1) (hy₁ : -1 ≤ y) (hy₂ : y ≤ 1) :
arcsin x = arcsin y ↔ x = y :=
inj_on_arcsin.eq_iff ⟨hx₁, hx₂⟩ ⟨hy₁, hy₂⟩
@[continuity]
lemma continuous_arcsin : continuous arcsin :=
continuous_subtype_coe.comp sin_order_iso.symm.continuous.Icc_extend'
lemma continuous_at_arcsin {x : ℝ} : continuous_at arcsin x :=
continuous_arcsin.continuous_at
lemma arcsin_eq_of_sin_eq {x y : ℝ} (h₁ : sin x = y) (h₂ : x ∈ Icc (-(π / 2)) (π / 2)) :
arcsin y = x :=
begin
subst y,
exact inj_on_sin (arcsin_mem_Icc _) h₂ (sin_arcsin' (sin_mem_Icc x))
end
@[simp] lemma arcsin_zero : arcsin 0 = 0 :=
arcsin_eq_of_sin_eq sin_zero ⟨neg_nonpos.2 pi_div_two_pos.le, pi_div_two_pos.le⟩
@[simp] lemma arcsin_one : arcsin 1 = π / 2 :=
arcsin_eq_of_sin_eq sin_pi_div_two $ right_mem_Icc.2 (neg_le_self pi_div_two_pos.le)
lemma arcsin_of_one_le {x : ℝ} (hx : 1 ≤ x) : arcsin x = π / 2 :=
by rw [← arcsin_proj_Icc, proj_Icc_of_right_le _ hx, subtype.coe_mk, arcsin_one]
lemma arcsin_neg_one : arcsin (-1) = -(π / 2) :=
arcsin_eq_of_sin_eq (by rw [sin_neg, sin_pi_div_two]) $
left_mem_Icc.2 (neg_le_self pi_div_two_pos.le)
lemma arcsin_of_le_neg_one {x : ℝ} (hx : x ≤ -1) : arcsin x = -(π / 2) :=
by rw [← arcsin_proj_Icc, proj_Icc_of_le_left _ hx, subtype.coe_mk, arcsin_neg_one]
@[simp] lemma arcsin_neg (x : ℝ) : arcsin (-x) = -arcsin x :=
begin
cases le_total x (-1) with hx₁ hx₁,
{ rw [arcsin_of_le_neg_one hx₁, neg_neg, arcsin_of_one_le (le_neg.2 hx₁)] },
cases le_total 1 x with hx₂ hx₂,
{ rw [arcsin_of_one_le hx₂, arcsin_of_le_neg_one (neg_le_neg hx₂)] },
refine arcsin_eq_of_sin_eq _ _,
{ rw [sin_neg, sin_arcsin hx₁ hx₂] },
{ exact ⟨neg_le_neg (arcsin_le_pi_div_two _), neg_le.2 (neg_pi_div_two_le_arcsin _)⟩ }
end
lemma arcsin_le_iff_le_sin {x y : ℝ} (hx : x ∈ Icc (-1 : ℝ) 1) (hy : y ∈ Icc (-(π / 2)) (π / 2)) :
arcsin x ≤ y ↔ x ≤ sin y :=
by rw [← arcsin_sin' hy, strict_mono_on_arcsin.le_iff_le hx (sin_mem_Icc _), arcsin_sin' hy]
lemma arcsin_le_iff_le_sin' {x y : ℝ} (hy : y ∈ Ico (-(π / 2)) (π / 2)) :
arcsin x ≤ y ↔ x ≤ sin y :=
begin
cases le_total x (-1) with hx₁ hx₁,
{ simp [arcsin_of_le_neg_one hx₁, hy.1, hx₁.trans (neg_one_le_sin _)] },
cases lt_or_le 1 x with hx₂ hx₂,
{ simp [arcsin_of_one_le hx₂.le, hy.2.not_le, (sin_le_one y).trans_lt hx₂] },
exact arcsin_le_iff_le_sin ⟨hx₁, hx₂⟩ (mem_Icc_of_Ico hy)
end
lemma le_arcsin_iff_sin_le {x y : ℝ} (hx : x ∈ Icc (-(π / 2)) (π / 2)) (hy : y ∈ Icc (-1 : ℝ) 1) :
x ≤ arcsin y ↔ sin x ≤ y :=
by rw [← neg_le_neg_iff, ← arcsin_neg,
arcsin_le_iff_le_sin ⟨neg_le_neg hy.2, neg_le.2 hy.1⟩ ⟨neg_le_neg hx.2, neg_le.2 hx.1⟩,
sin_neg, neg_le_neg_iff]
lemma le_arcsin_iff_sin_le' {x y : ℝ} (hx : x ∈ Ioc (-(π / 2)) (π / 2)) :
x ≤ arcsin y ↔ sin x ≤ y :=
by rw [← neg_le_neg_iff, ← arcsin_neg, arcsin_le_iff_le_sin' ⟨neg_le_neg hx.2, neg_lt.2 hx.1⟩,
sin_neg, neg_le_neg_iff]
lemma arcsin_lt_iff_lt_sin {x y : ℝ} (hx : x ∈ Icc (-1 : ℝ) 1) (hy : y ∈ Icc (-(π / 2)) (π / 2)) :
arcsin x < y ↔ x < sin y :=
not_le.symm.trans $ (not_congr $ le_arcsin_iff_sin_le hy hx).trans not_le
lemma arcsin_lt_iff_lt_sin' {x y : ℝ} (hy : y ∈ Ioc (-(π / 2)) (π / 2)) :
arcsin x < y ↔ x < sin y :=
not_le.symm.trans $ (not_congr $ le_arcsin_iff_sin_le' hy).trans not_le
lemma lt_arcsin_iff_sin_lt {x y : ℝ} (hx : x ∈ Icc (-(π / 2)) (π / 2)) (hy : y ∈ Icc (-1 : ℝ) 1) :
x < arcsin y ↔ sin x < y :=
not_le.symm.trans $ (not_congr $ arcsin_le_iff_le_sin hy hx).trans not_le
lemma lt_arcsin_iff_sin_lt' {x y : ℝ} (hx : x ∈ Ico (-(π / 2)) (π / 2)) :
x < arcsin y ↔ sin x < y :=
not_le.symm.trans $ (not_congr $ arcsin_le_iff_le_sin' hx).trans not_le
lemma arcsin_eq_iff_eq_sin {x y : ℝ} (hy : y ∈ Ioo (-(π / 2)) (π / 2)) :
arcsin x = y ↔ x = sin y :=
by simp only [le_antisymm_iff, arcsin_le_iff_le_sin' (mem_Ico_of_Ioo hy),
le_arcsin_iff_sin_le' (mem_Ioc_of_Ioo hy)]
@[simp] lemma arcsin_nonneg {x : ℝ} : 0 ≤ arcsin x ↔ 0 ≤ x :=
(le_arcsin_iff_sin_le' ⟨neg_lt_zero.2 pi_div_two_pos, pi_div_two_pos.le⟩).trans $ by rw [sin_zero]
@[simp] lemma arcsin_nonpos {x : ℝ} : arcsin x ≤ 0 ↔ x ≤ 0 :=
neg_nonneg.symm.trans $ arcsin_neg x ▸ arcsin_nonneg.trans neg_nonneg
@[simp] lemma arcsin_eq_zero_iff {x : ℝ} : arcsin x = 0 ↔ x = 0 :=
by simp [le_antisymm_iff]
@[simp] lemma zero_eq_arcsin_iff {x} : 0 = arcsin x ↔ x = 0 :=
eq_comm.trans arcsin_eq_zero_iff
@[simp] lemma arcsin_pos {x : ℝ} : 0 < arcsin x ↔ 0 < x :=
lt_iff_lt_of_le_iff_le arcsin_nonpos
@[simp] lemma arcsin_lt_zero {x : ℝ} : arcsin x < 0 ↔ x < 0 :=
lt_iff_lt_of_le_iff_le arcsin_nonneg
@[simp] lemma arcsin_lt_pi_div_two {x : ℝ} : arcsin x < π / 2 ↔ x < 1 :=
(arcsin_lt_iff_lt_sin' (right_mem_Ioc.2 $ neg_lt_self pi_div_two_pos)).trans $
by rw sin_pi_div_two
@[simp] lemma neg_pi_div_two_lt_arcsin {x : ℝ} : -(π / 2) < arcsin x ↔ -1 < x :=
(lt_arcsin_iff_sin_lt' $ left_mem_Ico.2 $ neg_lt_self pi_div_two_pos).trans $
by rw [sin_neg, sin_pi_div_two]
@[simp] lemma arcsin_eq_pi_div_two {x : ℝ} : arcsin x = π / 2 ↔ 1 ≤ x :=
⟨λ h, not_lt.1 $ λ h', (arcsin_lt_pi_div_two.2 h').ne h, arcsin_of_one_le⟩
@[simp] lemma pi_div_two_eq_arcsin {x} : π / 2 = arcsin x ↔ 1 ≤ x :=
eq_comm.trans arcsin_eq_pi_div_two
@[simp] lemma pi_div_two_le_arcsin {x} : π / 2 ≤ arcsin x ↔ 1 ≤ x :=
(arcsin_le_pi_div_two x).le_iff_eq.trans pi_div_two_eq_arcsin
@[simp] lemma arcsin_eq_neg_pi_div_two {x : ℝ} : arcsin x = -(π / 2) ↔ x ≤ -1 :=
⟨λ h, not_lt.1 $ λ h', (neg_pi_div_two_lt_arcsin.2 h').ne' h, arcsin_of_le_neg_one⟩
@[simp] lemma neg_pi_div_two_eq_arcsin {x} : -(π / 2) = arcsin x ↔ x ≤ -1 :=
eq_comm.trans arcsin_eq_neg_pi_div_two
@[simp] lemma arcsin_le_neg_pi_div_two {x} : arcsin x ≤ -(π / 2) ↔ x ≤ -1 :=
(neg_pi_div_two_le_arcsin x).le_iff_eq.trans arcsin_eq_neg_pi_div_two
@[simp] lemma pi_div_four_le_arcsin {x} : π / 4 ≤ arcsin x ↔ sqrt 2 / 2 ≤ x :=
by { rw [← sin_pi_div_four, le_arcsin_iff_sin_le'], have := pi_pos, split; linarith }
lemma maps_to_sin_Ioo : maps_to sin (Ioo (-(π / 2)) (π / 2)) (Ioo (-1) 1) :=
λ x h, by rwa [mem_Ioo, ← arcsin_lt_pi_div_two, ← neg_pi_div_two_lt_arcsin,
arcsin_sin h.1.le h.2.le]
/-- `real.sin` as a `local_homeomorph` between `(-π / 2, π / 2)` and `(-1, 1)`. -/
@[simp] def sin_local_homeomorph : local_homeomorph ℝ ℝ :=
{ to_fun := sin,
inv_fun := arcsin,
source := Ioo (-(π / 2)) (π / 2),
target := Ioo (-1) 1,
map_source' := maps_to_sin_Ioo,
map_target' := λ y hy, ⟨neg_pi_div_two_lt_arcsin.2 hy.1, arcsin_lt_pi_div_two.2 hy.2⟩,
left_inv' := λ x hx, arcsin_sin hx.1.le hx.2.le,
right_inv' := λ y hy, sin_arcsin hy.1.le hy.2.le,
open_source := is_open_Ioo,
open_target := is_open_Ioo,
continuous_to_fun := continuous_sin.continuous_on,
continuous_inv_fun := continuous_arcsin.continuous_on }
lemma cos_arcsin_nonneg (x : ℝ) : 0 ≤ cos (arcsin x) :=
cos_nonneg_of_mem_Icc ⟨neg_pi_div_two_le_arcsin _, arcsin_le_pi_div_two _⟩
-- The junk values for `arcsin` and `sqrt` make this true even outside `[-1, 1]`.
lemma cos_arcsin (x : ℝ) : cos (arcsin x) = sqrt (1 - x ^ 2) :=
begin
by_cases hx₁ : -1 ≤ x, swap,
{ rw not_le at hx₁,
rw [arcsin_of_le_neg_one hx₁.le, cos_neg, cos_pi_div_two, sqrt_eq_zero_of_nonpos],
nlinarith },
by_cases hx₂ : x ≤ 1, swap,
{ rw not_le at hx₂,
rw [arcsin_of_one_le hx₂.le, cos_pi_div_two, sqrt_eq_zero_of_nonpos],
nlinarith },
have : sin (arcsin x) ^ 2 + cos (arcsin x) ^ 2 = 1 := sin_sq_add_cos_sq (arcsin x),
rw [← eq_sub_iff_add_eq', ← sqrt_inj (sq_nonneg _) (sub_nonneg.2 (sin_sq_le_one (arcsin x))),
sq, sqrt_mul_self (cos_arcsin_nonneg _)] at this,
rw [this, sin_arcsin hx₁ hx₂],
end
-- The junk values for `arcsin` and `sqrt` make this true even outside `[-1, 1]`.
lemma tan_arcsin (x : ℝ) : tan (arcsin x) = x / sqrt (1 - x ^ 2) :=
begin
rw [tan_eq_sin_div_cos, cos_arcsin],
by_cases hx₁ : -1 ≤ x, swap,
{ have h : sqrt (1 - x ^ 2) = 0, { exact sqrt_eq_zero_of_nonpos (by nlinarith) }, rw h, simp },
by_cases hx₂ : x ≤ 1, swap,
{ have h : sqrt (1 - x ^ 2) = 0, { exact sqrt_eq_zero_of_nonpos (by nlinarith) }, rw h, simp },
rw sin_arcsin hx₁ hx₂
end
/-- Inverse of the `cos` function, returns values in the range `0 ≤ arccos x` and `arccos x ≤ π`.
It defaults to `π` on `(-∞, -1)` and to `0` to `(1, ∞)`. -/
@[pp_nodot] noncomputable def arccos (x : ℝ) : ℝ :=
π / 2 - arcsin x
lemma arccos_eq_pi_div_two_sub_arcsin (x : ℝ) : arccos x = π / 2 - arcsin x := rfl
lemma arcsin_eq_pi_div_two_sub_arccos (x : ℝ) : arcsin x = π / 2 - arccos x :=
by simp [arccos]
lemma arccos_le_pi (x : ℝ) : arccos x ≤ π :=
by unfold arccos; linarith [neg_pi_div_two_le_arcsin x]
lemma arccos_nonneg (x : ℝ) : 0 ≤ arccos x :=
by unfold arccos; linarith [arcsin_le_pi_div_two x]
@[simp] lemma arccos_pos {x : ℝ} : 0 < arccos x ↔ x < 1 :=
by simp [arccos]
lemma cos_arccos {x : ℝ} (hx₁ : -1 ≤ x) (hx₂ : x ≤ 1) : cos (arccos x) = x :=
by rw [arccos, cos_pi_div_two_sub, sin_arcsin hx₁ hx₂]
lemma arccos_cos {x : ℝ} (hx₁ : 0 ≤ x) (hx₂ : x ≤ π) : arccos (cos x) = x :=
by rw [arccos, ← sin_pi_div_two_sub, arcsin_sin]; simp [sub_eq_add_neg]; linarith
lemma strict_anti_on_arccos : strict_anti_on arccos (Icc (-1) 1) :=
λ x hx y hy h, sub_lt_sub_left (strict_mono_on_arcsin hx hy h) _
lemma arccos_inj_on : inj_on arccos (Icc (-1) 1) := strict_anti_on_arccos.inj_on
lemma arccos_inj {x y : ℝ} (hx₁ : -1 ≤ x) (hx₂ : x ≤ 1) (hy₁ : -1 ≤ y) (hy₂ : y ≤ 1) :
arccos x = arccos y ↔ x = y :=
arccos_inj_on.eq_iff ⟨hx₁, hx₂⟩ ⟨hy₁, hy₂⟩
@[simp] lemma arccos_zero : arccos 0 = π / 2 := by simp [arccos]
@[simp] lemma arccos_one : arccos 1 = 0 := by simp [arccos]
@[simp] lemma arccos_neg_one : arccos (-1) = π := by simp [arccos, add_halves]
@[simp] lemma arccos_eq_zero {x} : arccos x = 0 ↔ 1 ≤ x :=
by simp [arccos, sub_eq_zero]
@[simp] lemma arccos_eq_pi_div_two {x} : arccos x = π / 2 ↔ x = 0 :=
by simp [arccos]
@[simp] lemma arccos_eq_pi {x} : arccos x = π ↔ x ≤ -1 :=
by rw [arccos, sub_eq_iff_eq_add, ← sub_eq_iff_eq_add', div_two_sub_self, neg_pi_div_two_eq_arcsin]
lemma arccos_neg (x : ℝ) : arccos (-x) = π - arccos x :=
by rw [← add_halves π, arccos, arcsin_neg, arccos, add_sub_assoc, sub_sub_self, sub_neg_eq_add]
lemma arccos_of_one_le {x : ℝ} (hx : 1 ≤ x) : arccos x = 0 :=
by rw [arccos, arcsin_of_one_le hx, sub_self]
lemma arccos_of_le_neg_one {x : ℝ} (hx : x ≤ -1) : arccos x = π :=
by rw [arccos, arcsin_of_le_neg_one hx, sub_neg_eq_add, add_halves']
-- The junk values for `arccos` and `sqrt` make this true even outside `[-1, 1]`.
lemma sin_arccos (x : ℝ) : sin (arccos x) = sqrt (1 - x ^ 2) :=
begin
by_cases hx₁ : -1 ≤ x, swap,
{ rw not_le at hx₁,
rw [arccos_of_le_neg_one hx₁.le, sin_pi, sqrt_eq_zero_of_nonpos],
nlinarith },
by_cases hx₂ : x ≤ 1, swap,
{ rw not_le at hx₂,
rw [arccos_of_one_le hx₂.le, sin_zero, sqrt_eq_zero_of_nonpos],
nlinarith },
rw [arccos_eq_pi_div_two_sub_arcsin, sin_pi_div_two_sub, cos_arcsin]
end
@[simp] lemma arccos_le_pi_div_two {x} : arccos x ≤ π / 2 ↔ 0 ≤ x := by simp [arccos]
@[simp] lemma arccos_lt_pi_div_two {x : ℝ} : arccos x < π / 2 ↔ 0 < x := by simp [arccos]
@[simp] lemma arccos_le_pi_div_four {x} : arccos x ≤ π / 4 ↔ sqrt 2 / 2 ≤ x :=
by { rw [arccos, ← pi_div_four_le_arcsin], split; { intro, linarith } }
@[continuity]
lemma continuous_arccos : continuous arccos := continuous_const.sub continuous_arcsin
-- The junk values for `arccos` and `sqrt` make this true even outside `[-1, 1]`.
lemma tan_arccos (x : ℝ) : tan (arccos x) = sqrt (1 - x ^ 2) / x :=
by rw [arccos, tan_pi_div_two_sub, tan_arcsin, inv_div]
-- The junk values for `arccos` and `sqrt` make this true even for `1 < x`.
lemma arccos_eq_arcsin {x : ℝ} (h : 0 ≤ x) :
arccos x = arcsin (sqrt (1 - x ^ 2)) :=
(arcsin_eq_of_sin_eq (sin_arccos _)
⟨(left.neg_nonpos_iff.2 (div_nonneg pi_pos.le (by norm_num))).trans (arccos_nonneg _),
arccos_le_pi_div_two.2 h⟩).symm
-- The junk values for `arcsin` and `sqrt` make this true even for `1 < x`.
lemma arcsin_eq_arccos {x : ℝ} (h : 0 ≤ x) :
arcsin x = arccos (sqrt (1 - x ^ 2)) :=
begin
rw [eq_comm, ← cos_arcsin],
exact arccos_cos (arcsin_nonneg.2 h)
((arcsin_le_pi_div_two _).trans (div_le_self pi_pos.le one_le_two))
end
end real
|
6ae4ca6ac24d5b90d9ae3a74c67860b5900cdcd8
|
9dd3f3912f7321eb58ee9aa8f21778ad6221f87c
|
/tests/lean/run/t10.lean
|
5b0b3ab86ee049ed3a65629422788ce8691f988d
|
[
"Apache-2.0"
] |
permissive
|
bre7k30/lean
|
de893411bcfa7b3c5572e61b9e1c52951b310aa4
|
5a924699d076dab1bd5af23a8f910b433e598d7a
|
refs/heads/master
| 1,610,900,145,817
| 1,488,006,845,000
| 1,488,006,845,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 109
|
lean
|
set_option pp.colors true
set_option pp.unicode false
print options
set_option pp.unicode true
print options
|
655d25fe28e4fd69a9e7eec9f80add268cb26da4
|
acc85b4be2c618b11fc7cb3005521ae6858a8d07
|
/data/set/countable.lean
|
9416a473da1f1223cb7bdd7887df6728786d5de0
|
[
"Apache-2.0"
] |
permissive
|
linpingchuan/mathlib
|
d49990b236574df2a45d9919ba43c923f693d341
|
5ad8020f67eb13896a41cc7691d072c9331b1f76
|
refs/heads/master
| 1,626,019,377,808
| 1,508,048,784,000
| 1,508,048,784,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 6,538
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Johannes Hölzl
Countable sets.
-/
import data.encodable data.set.finite logic.function
noncomputable theory
open function set encodable
open classical (hiding some)
local attribute [instance] decidable_inhabited prop_decidable
universes u v w
variables {α : Type u} {β : Type v} {γ : Type w}
namespace set
/-- Countable sets
A set is countable if there exists a injective functions from the set into the natural numbers.
This is choosen instead of surjective functions, as this would require that α is non empty.
-/
def countable (s : set α) : Prop := ∃f:α → ℕ, ∀x ∈ s, ∀y ∈ s, f x = f y → x = y
lemma countable_iff_exists_surjective [ne : inhabited α] {s : set α} :
countable s ↔ (∃f:ℕ → α, s ⊆ range f) :=
iff.intro
(assume ⟨f, hf⟩, ⟨inv_fun_on f s, assume a ha, ⟨f a, inv_fun_on_eq' hf ha⟩⟩)
(assume ⟨f, hf⟩, ⟨inv_fun f, assume x hx y hy h,
calc x = f (inv_fun f x) : (inv_fun_eq $ hf hx).symm
... = f (inv_fun f y) : by rw [h]
... = y : inv_fun_eq $ hf hy⟩)
lemma countable.to_encodable {s : set α} (h : countable s) : encodable {a // a ∈ s} :=
let f := classical.some h in
have hf : ∀x∈s, ∀y∈s, f x = f y → x = y, from classical.some_spec h,
let f' : {a // a ∈ s} → ℕ := f ∘ subtype.val in
encodable_of_inj f' $ assume ⟨a, ha⟩ ⟨b, hb⟩ (h : f a = f b), subtype.eq $ hf a ha b hb h
lemma countable_encodable' {s : set α} (e : encodable {a // a∈s}) : countable s :=
⟨λx, if h : x ∈ s then @encode _ e ⟨x, h⟩ else 0, assume x hx y hy h,
have @encode _ e ⟨x, hx⟩ = @encode _ e ⟨y, hy⟩,
by simp [hx, hy] at h; assumption,
have decode {a // a∈s} (@encode _ e ⟨x, hx⟩) = decode {a // a∈s} (@encode _ e ⟨y, hy⟩),
from congr_arg _ this,
by simp [encodek] at this; injection this⟩
lemma countable_encodable [e : encodable α] {s : set α} : countable s :=
⟨encode, assume x _ y _ eq,
have decode α (encode x) = decode α (encode y), from congr_arg _ eq,
by simpa [encodek]⟩
@[simp] lemma countable_empty : countable (∅ : set α) :=
⟨λ_, 0, by simp⟩
@[simp] lemma countable_singleton {a : α} : countable ({a} : set α) :=
⟨λ_, 0, by simp⟩
lemma countable_subset {s₁ s₂ : set α} (h : s₁ ⊆ s₂) : countable s₂ → countable s₁
| ⟨f, hf⟩ := ⟨f, assume x hx y hy eq, hf x (h hx) y (h hy) eq⟩
lemma countable_image {s : set α} {f : α → β} (hs : countable s) : countable (f '' s) :=
let f' : {a // a ∈ s} → {b // b ∈ f '' s} := λ⟨a, ha⟩, ⟨f a, mem_image_of_mem f ha⟩ in
have hf' : surjective f', from assume ⟨b, a, ha, hab⟩, ⟨⟨a, ha⟩, subtype.eq hab⟩,
countable_encodable' $ @encodable_of_inj _ _ hs.to_encodable (surj_inv hf') (injective_surj_inv hf')
lemma countable_sUnion {s : set (set α)} (hs : countable s) (h : ∀a∈s, countable a) :
countable (⋃₀ s) :=
by_cases
(assume : nonempty α, let ⟨a⟩ := this, inh : inhabited α := ⟨a⟩ in
let ⟨fs, hfs⟩ := countable_iff_exists_surjective.mp hs in
have ∀t, ∃ft:ℕ → α, t ∈ s → t ⊆ range ft,
from assume t,
by_cases
(assume : t ∈ s,
let ⟨ft, hft⟩ := (@countable_iff_exists_surjective α inh _).mp $ h t this in
⟨ft, assume _, hft⟩)
(assume : t ∉ s, ⟨λ_, a, assume h, (this h).elim⟩),
have ∃ft:(∀t:set α, ℕ → α), ∀t∈s, t ⊆ range (ft t),
by simp [classical.skolem] at this; assumption,
let ⟨ft, hft⟩ := this in
(@countable_iff_exists_surjective α inh _).mpr
⟨(λp:ℕ×ℕ, ft (fs p.1) p.2) ∘ nat.unpair,
by simp [subset_def];
from assume a t ha ht,
let ⟨i, hi⟩ := hfs ht, ⟨j, hj⟩ := hft t ht ha in
⟨nat.mkpair i j, by simp [function.comp, nat.unpair_mkpair, hi, hj]⟩⟩)
(assume : ¬ nonempty α, ⟨λ_, 0, assume a, (this ⟨a⟩).elim⟩)
lemma countable_bUnion {s : set α} {t : α → set β} (hs : countable s) (ht : ∀a∈s, countable (t a)) :
countable (⋃a∈s, t a) :=
have ⋃₀ (t '' s) = (⋃a∈s, t a), from lattice.Sup_image,
by rw [←this];
from (countable_sUnion (countable_image hs) $ assume a ⟨s', hs', eq⟩, eq ▸ ht s' hs')
lemma countable_Union {t : α → set β} [encodable α] (ht : ∀a, countable (t a)) :
countable (⋃a, t a) :=
suffices countable (⋃a∈(univ : set α), t a), by simpa,
countable_bUnion countable_encodable (assume a _, ht a)
lemma countable_Union_Prop {p : Prop} {t : p → set β} (ht : ∀h:p, countable (t h)) :
countable (⋃h:p, t h) :=
by by_cases p; simp [h, ht]
lemma countable_union {s₁ s₂ : set α} (h₁ : countable s₁) (h₂ : countable s₂) : countable (s₁ ∪ s₂) :=
have s₁ ∪ s₂ = (⨆b ∈ ({tt, ff} : set bool), bool.cases_on b s₁ s₂),
by simp [lattice.supr_or, lattice.supr_sup_eq]; refl,
by rw [this]; from countable_bUnion countable_encodable (assume b,
match b with
| tt := by simp [h₂]
| ff := by simp [h₁]
end)
lemma countable_insert {s : set α} {a : α} (h : countable s) : countable (insert a s) :=
by rw [set.insert_eq]; from countable_union countable_singleton h
lemma countable_finite {s : set α} (h : finite s) : countable s :=
h.rec_on countable_empty $ assume a s _ _, countable_insert
lemma countable_set_of_finite_subset {s : set α} (h : countable s) :
countable {t | finite t ∧ t ⊆ s } :=
have {t | finite t ∧ t ⊆ s } ⊆
(λt, {a:α | ∃h:a∈s, subtype.mk a h ∈ t} : finset {a:α // a ∈ s} → set α) '' univ,
from assume t ht,
begin
cases ht with ht₁ ht₂,
induction ht₁,
case finite.empty { exact ⟨∅, mem_univ _, by simp⟩ },
case finite.insert a t ha ht ih {
exact
have has : a ∈ s, from ht₂ $ mem_insert _ _,
have t ⊆ s, from assume x hx, ht₂ $ mem_insert_of_mem _ hx,
let ⟨t', ht', eq⟩ := ih this in
⟨insert ⟨a, has⟩ t', mem_univ _,
set.ext $ assume x,
begin
simp [eq.symm, iff_def, or_imp_distrib, has] {contextual:=tt},
constructor,
exact assume hxs hxt', ⟨hxs, or.inr hxt'⟩,
exact assume hxs, or.imp (congr_arg subtype.val) (assume hxt', ⟨hxs, hxt'⟩)
end⟩ }
end,
by have enc := h.to_encodable; exact
countable_subset this (countable_image countable_encodable)
end set
|
9cf5ce6b1568f852d0c0e437cf77330cef0160fa
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/category_theory/thin.lean
|
174ec06b38aca13eb82645e171986eabc2ba0136
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,157
|
lean
|
/-
Copyright (c) 2019 Scott Morrison, Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Bhavik Mehta
-/
import category_theory.functor.category
import category_theory.isomorphism
/-!
# Thin categories
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> https://github.com/leanprover-community/mathlib4/pull/822
> Any changes to this file require a corresponding PR to mathlib4.
A thin category (also known as a sparse category) is a category with at most one morphism between
each pair of objects.
Examples include posets, but also some indexing categories (diagrams) for special shapes of
(co)limits.
To construct a category instance one only needs to specify the `category_struct` part,
as the axioms hold for free.
If `C` is thin, then the category of functors to `C` is also thin.
Further, to show two objects are isomorphic in a thin category, it suffices only to give a morphism
in each direction.
-/
universes v₁ v₂ u₁ u₂
namespace category_theory
variables {C : Type u₁}
section
variables [category_struct.{v₁} C] [quiver.is_thin C]
/-- Construct a category instance from a category_struct, using the fact that
hom spaces are subsingletons to prove the axioms. -/
def thin_category : category C := {}.
end
-- We don't assume anything about where the category instance on `C` came from.
-- In particular this allows `C` to be a preorder, with the category instance inherited from the
-- preorder structure.
variables [category.{v₁} C] {D : Type u₂} [category.{v₂} D]
variable [quiver.is_thin C]
/-- If `C` is a thin category, then `D ⥤ C` is a thin category. -/
instance functor_thin : quiver.is_thin (D ⥤ C) :=
λ _ _, ⟨λ α β, nat_trans.ext α β (funext (λ _, subsingleton.elim _ _))⟩
/-- To show `X ≅ Y` in a thin category, it suffices to just give any morphism in each direction. -/
def iso_of_both_ways {X Y : C} (f : X ⟶ Y) (g : Y ⟶ X) : X ≅ Y :=
{ hom := f, inv := g }
instance subsingleton_iso {X Y : C} : subsingleton (X ≅ Y) :=
⟨by { intros i₁ i₂, ext1, apply subsingleton.elim }⟩
end category_theory
|
c68500705d1a38cc118c8a730d608e144a62d332
|
4d2583807a5ac6caaffd3d7a5f646d61ca85d532
|
/src/number_theory/pythagorean_triples.lean
|
4611650d5ebbf0f75a5e2af7247200da1cdd8b49
|
[
"Apache-2.0"
] |
permissive
|
AntoineChambert-Loir/mathlib
|
64aabb896129885f12296a799818061bc90da1ff
|
07be904260ab6e36a5769680b6012f03a4727134
|
refs/heads/master
| 1,693,187,631,771
| 1,636,719,886,000
| 1,636,719,886,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 25,919
|
lean
|
/-
Copyright (c) 2020 Paul van Wamelen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Paul van Wamelen
-/
import algebra.field
import ring_theory.int.basic
import algebra.group_with_zero.power
import tactic.ring
import tactic.ring_exp
import tactic.field_simp
import data.zmod.basic
/-!
# Pythagorean Triples
The main result is the classification of Pythagorean triples. The final result is for general
Pythagorean triples. It follows from the more interesting relatively prime case. We use the
"rational parametrization of the circle" method for the proof. The parametrization maps the point
`(x / z, y / z)` to the slope of the line through `(-1 , 0)` and `(x / z, y / z)`. This quickly
shows that `(x / z, y / z) = (2 * m * n / (m ^ 2 + n ^ 2), (m ^ 2 - n ^ 2) / (m ^ 2 + n ^ 2))` where
`m / n` is the slope. In order to identify numerators and denominators we now need results showing
that these are coprime. This is easy except for the prime 2. In order to deal with that we have to
analyze the parity of `x`, `y`, `m` and `n` and eliminate all the impossible cases. This takes up
the bulk of the proof below.
-/
lemma sq_ne_two_fin_zmod_four (z : zmod 4) : z * z ≠ 2 :=
begin
change fin 4 at z,
fin_cases z; norm_num [fin.ext_iff, fin.coe_bit0, fin.coe_bit1]
end
lemma int.sq_ne_two_mod_four (z : ℤ) : (z * z) % 4 ≠ 2 :=
suffices ¬ (z * z) % (4 : ℕ) = 2 % (4 : ℕ), by norm_num at this,
begin
rw ← zmod.int_coe_eq_int_coe_iff',
simpa using sq_ne_two_fin_zmod_four _
end
noncomputable theory
open_locale classical
/-- Three integers `x`, `y`, and `z` form a Pythagorean triple if `x * x + y * y = z * z`. -/
def pythagorean_triple (x y z : ℤ) : Prop := x * x + y * y = z * z
/-- Pythagorean triples are interchangable, i.e `x * x + y * y = y * y + x * x = z * z`.
This comes from additive commutativity. -/
lemma pythagorean_triple_comm {x y z : ℤ} :
(pythagorean_triple x y z) ↔ (pythagorean_triple y x z) :=
by { delta pythagorean_triple, rw add_comm }
/-- The zeroth Pythagorean triple is all zeros. -/
lemma pythagorean_triple.zero : pythagorean_triple 0 0 0 :=
by simp only [pythagorean_triple, zero_mul, zero_add]
namespace pythagorean_triple
variables {x y z : ℤ} (h : pythagorean_triple x y z)
include h
lemma eq : x * x + y * y = z * z := h
@[symm]
lemma symm :
pythagorean_triple y x z :=
by rwa [pythagorean_triple_comm]
/-- A triple is still a triple if you multiply `x`, `y` and `z`
by a constant `k`. -/
lemma mul (k : ℤ) : pythagorean_triple (k * x) (k * y) (k * z) :=
begin
by_cases hk : k = 0,
{ simp only [pythagorean_triple, hk, zero_mul, zero_add], },
{ calc (k * x) * (k * x) + (k * y) * (k * y)
= k ^ 2 * (x * x + y * y) : by ring
... = k ^ 2 * (z * z) : by rw h.eq
... = (k * z) * (k * z) : by ring }
end
omit h
/-- `(k*x, k*y, k*z)` is a Pythagorean triple if and only if
`(x, y, z)` is also a triple. -/
lemma mul_iff (k : ℤ) (hk : k ≠ 0) :
pythagorean_triple (k * x) (k * y) (k * z) ↔ pythagorean_triple x y z :=
begin
refine ⟨_, λ h, h.mul k⟩,
simp only [pythagorean_triple],
intro h,
rw ← mul_left_inj' (mul_ne_zero hk hk),
convert h using 1; ring,
end
include h
/-- A Pythagorean triple `x, y, z` is “classified” if there exist integers `k, m, n` such that
either
* `x = k * (m ^ 2 - n ^ 2)` and `y = k * (2 * m * n)`, or
* `x = k * (2 * m * n)` and `y = k * (m ^ 2 - n ^ 2)`. -/
@[nolint unused_arguments] def is_classified := ∃ (k m n : ℤ),
((x = k * (m ^ 2 - n ^ 2) ∧ y = k * (2 * m * n))
∨ (x = k * (2 * m * n) ∧ y = k * (m ^ 2 - n ^ 2)))
∧ int.gcd m n = 1
/-- A primitive pythogorean triple `x, y, z` is a pythagorean triple with `x` and `y` coprime.
Such a triple is “primitively classified” if there exist coprime integers `m, n` such that either
* `x = m ^ 2 - n ^ 2` and `y = 2 * m * n`, or
* `x = 2 * m * n` and `y = m ^ 2 - n ^ 2`.
-/
@[nolint unused_arguments] def is_primitive_classified := ∃ (m n : ℤ),
((x = m ^ 2 - n ^ 2 ∧ y = 2 * m * n)
∨ (x = 2 * m * n ∧ y = m ^ 2 - n ^ 2))
∧ int.gcd m n = 1
∧ ((m % 2 = 0 ∧ n % 2 = 1) ∨ (m % 2 = 1 ∧ n % 2 = 0))
lemma mul_is_classified (k : ℤ) (hc : h.is_classified) : (h.mul k).is_classified :=
begin
obtain ⟨l, m, n, ⟨⟨rfl, rfl⟩ | ⟨rfl, rfl⟩, co⟩⟩ := hc,
{ use [k * l, m, n], apply and.intro _ co, left, split; ring },
{ use [k * l, m, n], apply and.intro _ co, right, split; ring },
end
lemma even_odd_of_coprime (hc : int.gcd x y = 1) :
(x % 2 = 0 ∧ y % 2 = 1) ∨ (x % 2 = 1 ∧ y % 2 = 0) :=
begin
cases int.mod_two_eq_zero_or_one x with hx hx;
cases int.mod_two_eq_zero_or_one y with hy hy,
{ -- x even, y even
exfalso,
apply nat.not_coprime_of_dvd_of_dvd (dec_trivial : 1 < 2) _ _ hc,
{ apply int.dvd_nat_abs_of_of_nat_dvd, apply int.dvd_of_mod_eq_zero hx },
{ apply int.dvd_nat_abs_of_of_nat_dvd, apply int.dvd_of_mod_eq_zero hy } },
{ left, exact ⟨hx, hy⟩ }, -- x even, y odd
{ right, exact ⟨hx, hy⟩ }, -- x odd, y even
{ -- x odd, y odd
exfalso,
obtain ⟨x0, y0, rfl, rfl⟩ : ∃ x0 y0, x = x0* 2 + 1 ∧ y = y0 * 2 + 1,
{ cases exists_eq_mul_left_of_dvd (int.dvd_sub_of_mod_eq hx) with x0 hx2,
cases exists_eq_mul_left_of_dvd (int.dvd_sub_of_mod_eq hy) with y0 hy2,
rw sub_eq_iff_eq_add at hx2 hy2, exact ⟨x0, y0, hx2, hy2⟩ },
apply int.sq_ne_two_mod_four z,
rw show z * z = 4 * (x0 * x0 + x0 + y0 * y0 + y0) + 2, by { rw ← h.eq, ring },
norm_num [int.add_mod] }
end
lemma gcd_dvd : (int.gcd x y : ℤ) ∣ z :=
begin
by_cases h0 : int.gcd x y = 0,
{ have hx : x = 0, { apply int.nat_abs_eq_zero.mp, apply nat.eq_zero_of_gcd_eq_zero_left h0 },
have hy : y = 0, { apply int.nat_abs_eq_zero.mp, apply nat.eq_zero_of_gcd_eq_zero_right h0 },
have hz : z = 0,
{ simpa only [pythagorean_triple, hx, hy, add_zero, zero_eq_mul, mul_zero, or_self] using h },
simp only [hz, dvd_zero], },
obtain ⟨k, x0, y0, k0, h2, rfl, rfl⟩ :
∃ (k : ℕ) x0 y0, 0 < k ∧ int.gcd x0 y0 = 1 ∧ x = x0 * k ∧ y = y0 * k :=
int.exists_gcd_one' (nat.pos_of_ne_zero h0),
rw [int.gcd_mul_right, h2, int.nat_abs_of_nat, one_mul],
rw [← int.pow_dvd_pow_iff (dec_trivial : 0 < 2), sq z, ← h.eq],
rw (by ring : x0 * k * (x0 * k) + y0 * k * (y0 * k) = k ^ 2 * (x0 * x0 + y0 * y0)),
exact dvd_mul_right _ _
end
lemma normalize : pythagorean_triple (x / int.gcd x y) (y / int.gcd x y) (z / int.gcd x y) :=
begin
by_cases h0 : int.gcd x y = 0,
{ have hx : x = 0, { apply int.nat_abs_eq_zero.mp, apply nat.eq_zero_of_gcd_eq_zero_left h0 },
have hy : y = 0, { apply int.nat_abs_eq_zero.mp, apply nat.eq_zero_of_gcd_eq_zero_right h0 },
have hz : z = 0,
{ simpa only [pythagorean_triple, hx, hy, add_zero, zero_eq_mul, mul_zero, or_self] using h },
simp only [hx, hy, hz, int.zero_div], exact zero },
rcases h.gcd_dvd with ⟨z0, rfl⟩,
obtain ⟨k, x0, y0, k0, h2, rfl, rfl⟩ :
∃ (k : ℕ) x0 y0, 0 < k ∧ int.gcd x0 y0 = 1 ∧ x = x0 * k ∧ y = y0 * k :=
int.exists_gcd_one' (nat.pos_of_ne_zero h0),
have hk : (k : ℤ) ≠ 0, { norm_cast, rwa pos_iff_ne_zero at k0 },
rw [int.gcd_mul_right, h2, int.nat_abs_of_nat, one_mul] at h ⊢,
rw [mul_comm x0, mul_comm y0, mul_iff k hk] at h,
rwa [int.mul_div_cancel _ hk, int.mul_div_cancel _ hk, int.mul_div_cancel_left _ hk],
end
lemma is_classified_of_is_primitive_classified (hp : h.is_primitive_classified) :
h.is_classified :=
begin
obtain ⟨m, n, H⟩ := hp,
use [1, m, n],
rcases H with ⟨⟨rfl, rfl⟩ | ⟨rfl, rfl⟩, co, pp⟩;
{ apply and.intro _ co, rw one_mul, rw one_mul, tauto }
end
lemma is_classified_of_normalize_is_primitive_classified
(hc : h.normalize.is_primitive_classified) : h.is_classified :=
begin
convert h.normalize.mul_is_classified (int.gcd x y)
(is_classified_of_is_primitive_classified h.normalize hc);
rw int.mul_div_cancel',
{ exact int.gcd_dvd_left x y },
{ exact int.gcd_dvd_right x y },
{ exact h.gcd_dvd }
end
lemma ne_zero_of_coprime (hc : int.gcd x y = 1) : z ≠ 0 :=
begin
suffices : 0 < z * z, { rintro rfl, norm_num at this },
rw [← h.eq, ← sq, ← sq],
have hc' : int.gcd x y ≠ 0, { rw hc, exact one_ne_zero },
cases int.ne_zero_of_gcd hc' with hxz hyz,
{ apply lt_add_of_pos_of_le (sq_pos_of_ne_zero x hxz) (sq_nonneg y) },
{ apply lt_add_of_le_of_pos (sq_nonneg x) (sq_pos_of_ne_zero y hyz) }
end
lemma is_primitive_classified_of_coprime_of_zero_left (hc : int.gcd x y = 1) (hx : x = 0) :
h.is_primitive_classified :=
begin
subst x,
change nat.gcd 0 (int.nat_abs y) = 1 at hc,
rw [nat.gcd_zero_left (int.nat_abs y)] at hc,
cases int.nat_abs_eq y with hy hy,
{ use [1, 0], rw [hy, hc, int.gcd_zero_right], norm_num },
{ use [0, 1], rw [hy, hc, int.gcd_zero_left], norm_num }
end
lemma coprime_of_coprime (hc : int.gcd x y = 1) : int.gcd y z = 1 :=
begin
by_contradiction H,
obtain ⟨p, hp, hpy, hpz⟩ := nat.prime.not_coprime_iff_dvd.mp H,
apply hp.not_dvd_one,
rw [← hc],
apply nat.dvd_gcd (int.prime.dvd_nat_abs_of_coe_dvd_sq hp _ _) hpy,
rw [sq, eq_sub_of_add_eq h],
rw [← int.coe_nat_dvd_left] at hpy hpz,
exact dvd_sub ((hpz).mul_right _) ((hpy).mul_right _),
end
end pythagorean_triple
section circle_equiv_gen
/-!
### A parametrization of the unit circle
For the classification of pythogorean triples, we will use a parametrization of the unit circle.
-/
variables {K : Type*} [field K]
/-- A parameterization of the unit circle that is useful for classifying Pythagorean triples.
(To be applied in the case where `K = ℚ`.) -/
def circle_equiv_gen (hk : ∀ x : K, 1 + x^2 ≠ 0) :
K ≃ {p : K × K // p.1^2 + p.2^2 = 1 ∧ p.2 ≠ -1} :=
{ to_fun := λ x, ⟨⟨2 * x / (1 + x^2), (1 - x^2) / (1 + x^2)⟩,
by { field_simp [hk x, div_pow], ring },
begin
simp only [ne.def, div_eq_iff (hk x), ←neg_mul_eq_neg_mul, one_mul, neg_add,
sub_eq_add_neg, add_left_inj],
simpa only [eq_neg_iff_add_eq_zero, one_pow] using hk 1,
end⟩,
inv_fun := λ p, (p : K × K).1 / ((p : K × K).2 + 1),
left_inv := λ x,
begin
have h2 : (1 + 1 : K) = 2 := rfl,
have h3 : (2 : K) ≠ 0, { convert hk 1, rw [one_pow 2, h2] },
field_simp [hk x, h2, add_assoc, add_comm, add_sub_cancel'_right, mul_comm],
end,
right_inv := λ ⟨⟨x, y⟩, hxy, hy⟩,
begin
change x ^ 2 + y ^ 2 = 1 at hxy,
have h2 : y + 1 ≠ 0, { apply mt eq_neg_of_add_eq_zero, exact hy },
have h3 : (y + 1) ^ 2 + x ^ 2 = 2 * (y + 1),
{ rw [(add_neg_eq_iff_eq_add.mpr hxy.symm).symm], ring },
have h4 : (2 : K) ≠ 0, { convert hk 1, rw one_pow 2, refl },
simp only [prod.mk.inj_iff, subtype.mk_eq_mk],
split,
{ field_simp [h3], ring },
{ field_simp [h3], rw [← add_neg_eq_iff_eq_add.mpr hxy.symm], ring }
end }
@[simp] lemma circle_equiv_apply (hk : ∀ x : K, 1 + x^2 ≠ 0) (x : K) :
(circle_equiv_gen hk x : K × K) = ⟨2 * x / (1 + x^2), (1 - x^2) / (1 + x^2)⟩ := rfl
@[simp] lemma circle_equiv_symm_apply (hk : ∀ x : K, 1 + x^2 ≠ 0)
(v : {p : K × K // p.1^2 + p.2^2 = 1 ∧ p.2 ≠ -1}) :
(circle_equiv_gen hk).symm v = (v : K × K).1 / ((v : K × K).2 + 1) := rfl
end circle_equiv_gen
private lemma coprime_sq_sub_sq_add_of_even_odd {m n : ℤ} (h : int.gcd m n = 1)
(hm : m % 2 = 0) (hn : n % 2 = 1) :
int.gcd (m ^ 2 - n ^ 2) (m ^ 2 + n ^ 2) = 1 :=
begin
by_contradiction H,
obtain ⟨p, hp, hp1, hp2⟩ := nat.prime.not_coprime_iff_dvd.mp H,
rw ← int.coe_nat_dvd_left at hp1 hp2,
have h2m : (p : ℤ) ∣ 2 * m ^ 2, { convert dvd_add hp2 hp1, ring },
have h2n : (p : ℤ) ∣ 2 * n ^ 2, { convert dvd_sub hp2 hp1, ring },
have hmc : p = 2 ∨ p ∣ int.nat_abs m :=
prime_two_or_dvd_of_dvd_two_mul_pow_self_two hp h2m,
have hnc : p = 2 ∨ p ∣ int.nat_abs n :=
prime_two_or_dvd_of_dvd_two_mul_pow_self_two hp h2n,
by_cases h2 : p = 2,
{ have h3 : (m ^ 2 + n ^ 2) % 2 = 1, { norm_num [sq, int.add_mod, int.mul_mod, hm, hn] },
have h4 : (m ^ 2 + n ^ 2) % 2 = 0, { apply int.mod_eq_zero_of_dvd, rwa h2 at hp2 },
rw h4 at h3, exact zero_ne_one h3 },
{ apply hp.not_dvd_one,
rw ← h,
exact nat.dvd_gcd (or.resolve_left hmc h2) (or.resolve_left hnc h2), }
end
private lemma coprime_sq_sub_sq_add_of_odd_even {m n : ℤ} (h : int.gcd m n = 1)
(hm : m % 2 = 1) (hn : n % 2 = 0):
int.gcd (m ^ 2 - n ^ 2) (m ^ 2 + n ^ 2) = 1 :=
begin
rw [int.gcd, ← int.nat_abs_neg (m ^ 2 - n ^ 2)],
rw [(by ring : -(m ^ 2 - n ^ 2) = n ^ 2 - m ^ 2), add_comm],
apply coprime_sq_sub_sq_add_of_even_odd _ hn hm, rwa [int.gcd_comm],
end
private lemma coprime_sq_sub_mul_of_even_odd {m n : ℤ} (h : int.gcd m n = 1)
(hm : m % 2 = 0) (hn : n % 2 = 1) :
int.gcd (m ^ 2 - n ^ 2) (2 * m * n) = 1 :=
begin
by_contradiction H,
obtain ⟨p, hp, hp1, hp2⟩ := nat.prime.not_coprime_iff_dvd.mp H,
rw ← int.coe_nat_dvd_left at hp1 hp2,
have hnp : ¬ (p : ℤ) ∣ int.gcd m n,
{ rw h, norm_cast, exact mt nat.dvd_one.mp (nat.prime.ne_one hp) },
cases int.prime.dvd_mul hp hp2 with hp2m hpn,
{ rw int.nat_abs_mul at hp2m,
cases (nat.prime.dvd_mul hp).mp hp2m with hp2 hpm,
{ have hp2' : p = 2 := (nat.le_of_dvd zero_lt_two hp2).antisymm hp.two_le,
revert hp1, rw hp2',
apply mt int.mod_eq_zero_of_dvd,
norm_num [sq, int.sub_mod, int.mul_mod, hm, hn] },
apply mt (int.dvd_gcd (int.coe_nat_dvd_left.mpr hpm)) hnp,
apply (or_self _).mp, apply int.prime.dvd_mul' hp,
rw (by ring : n * n = - (m ^ 2 - n ^ 2) + m * m),
apply dvd_add (dvd_neg_of_dvd hp1),
exact dvd_mul_of_dvd_left (int.coe_nat_dvd_left.mpr hpm) m },
rw int.gcd_comm at hnp,
apply mt (int.dvd_gcd (int.coe_nat_dvd_left.mpr hpn)) hnp,
apply (or_self _).mp, apply int.prime.dvd_mul' hp,
rw (by ring : m * m = (m ^ 2 - n ^ 2) + n * n),
apply dvd_add hp1,
exact (int.coe_nat_dvd_left.mpr hpn).mul_right n
end
private lemma coprime_sq_sub_mul_of_odd_even {m n : ℤ} (h : int.gcd m n = 1)
(hm : m % 2 = 1) (hn : n % 2 = 0) :
int.gcd (m ^ 2 - n ^ 2) (2 * m * n) = 1 :=
begin
rw [int.gcd, ← int.nat_abs_neg (m ^ 2 - n ^ 2)],
rw [(by ring : 2 * m * n = 2 * n * m), (by ring : -(m ^ 2 - n ^ 2) = n ^ 2 - m ^ 2)],
apply coprime_sq_sub_mul_of_even_odd _ hn hm, rwa [int.gcd_comm]
end
private lemma coprime_sq_sub_mul {m n : ℤ} (h : int.gcd m n = 1)
(hmn : (m % 2 = 0 ∧ n % 2 = 1) ∨ (m % 2 = 1 ∧ n % 2 = 0)) :
int.gcd (m ^ 2 - n ^ 2) (2 * m * n) = 1 :=
begin
cases hmn with h1 h2,
{ exact coprime_sq_sub_mul_of_even_odd h h1.left h1.right },
{ exact coprime_sq_sub_mul_of_odd_even h h2.left h2.right }
end
private lemma coprime_sq_sub_sq_sum_of_odd_odd {m n : ℤ} (h : int.gcd m n = 1)
(hm : m % 2 = 1) (hn : n % 2 = 1) :
2 ∣ m ^ 2 + n ^ 2
∧ 2 ∣ m ^ 2 - n ^ 2
∧ ((m ^ 2 - n ^ 2) / 2) % 2 = 0
∧ int.gcd ((m ^ 2 - n ^ 2) / 2) ((m ^ 2 + n ^ 2) / 2) = 1 :=
begin
cases exists_eq_mul_left_of_dvd (int.dvd_sub_of_mod_eq hm) with m0 hm2,
cases exists_eq_mul_left_of_dvd (int.dvd_sub_of_mod_eq hn) with n0 hn2,
rw sub_eq_iff_eq_add at hm2 hn2, subst m, subst n,
have h1 : (m0 * 2 + 1) ^ 2 + (n0 * 2 + 1) ^ 2 = 2 * (2 * (m0 ^ 2 + n0 ^ 2 + m0 + n0) + 1),
by ring_exp,
have h2 : (m0 * 2 + 1) ^ 2 - (n0 * 2 + 1) ^ 2 = 2 * (2 * (m0 ^ 2 - n0 ^ 2 + m0 - n0)),
by ring_exp,
have h3 : ((m0 * 2 + 1) ^ 2 - (n0 * 2 + 1) ^ 2) / 2 % 2 = 0,
{ rw [h2, int.mul_div_cancel_left, int.mul_mod_right], exact dec_trivial },
refine ⟨⟨_, h1⟩, ⟨_, h2⟩, h3, _⟩,
have h20 : (2:ℤ) ≠ 0 := dec_trivial,
rw [h1, h2, int.mul_div_cancel_left _ h20, int.mul_div_cancel_left _ h20],
by_contra h4,
obtain ⟨p, hp, hp1, hp2⟩ := nat.prime.not_coprime_iff_dvd.mp h4,
apply hp.not_dvd_one,
rw ← h,
rw ← int.coe_nat_dvd_left at hp1 hp2,
apply nat.dvd_gcd,
{ apply int.prime.dvd_nat_abs_of_coe_dvd_sq hp,
convert dvd_add hp1 hp2, ring_exp },
{ apply int.prime.dvd_nat_abs_of_coe_dvd_sq hp,
convert dvd_sub hp2 hp1, ring_exp },
end
namespace pythagorean_triple
variables {x y z : ℤ} (h : pythagorean_triple x y z)
include h
lemma is_primitive_classified_aux (hc : x.gcd y = 1) (hzpos : 0 < z)
{m n : ℤ} (hm2n2 : 0 < m ^ 2 + n ^ 2)
(hv2 : (x : ℚ) / z = 2 * m * n / (m ^ 2 + n ^ 2))
(hw2 : (y : ℚ) / z = (m ^ 2 - n ^ 2) / (m ^ 2 + n ^ 2))
(H : int.gcd (m ^ 2 - n ^ 2) (m ^ 2 + n ^ 2) = 1)
(co : int.gcd m n = 1)
(pp : (m % 2 = 0 ∧ n % 2 = 1) ∨ (m % 2 = 1 ∧ n % 2 = 0)):
h.is_primitive_classified :=
begin
have hz : z ≠ 0, apply ne_of_gt hzpos,
have h2 : y = m ^ 2 - n ^ 2 ∧ z = m ^ 2 + n ^ 2,
{ apply rat.div_int_inj hzpos hm2n2 (h.coprime_of_coprime hc) H, rw [hw2], norm_cast },
use [m, n], apply and.intro _ (and.intro co pp), right,
refine ⟨_, h2.left⟩,
rw [← rat.coe_int_inj _ _, ← div_left_inj' ((mt (rat.coe_int_inj z 0).mp) hz), hv2, h2.right],
norm_cast
end
theorem is_primitive_classified_of_coprime_of_odd_of_pos
(hc : int.gcd x y = 1) (hyo : y % 2 = 1) (hzpos : 0 < z) :
h.is_primitive_classified :=
begin
by_cases h0 : x = 0, { exact h.is_primitive_classified_of_coprime_of_zero_left hc h0 },
let v := (x : ℚ) / z,
let w := (y : ℚ) / z,
have hz : z ≠ 0, apply ne_of_gt hzpos,
have hq : v ^ 2 + w ^ 2 = 1,
{ field_simp [hz, sq], norm_cast, exact h },
have hvz : v ≠ 0, { field_simp [hz], exact h0 },
have hw1 : w ≠ -1,
{ contrapose! hvz with hw1,
rw [hw1, neg_sq, one_pow, add_left_eq_self] at hq,
exact pow_eq_zero hq, },
have hQ : ∀ x : ℚ, 1 + x^2 ≠ 0,
{ intro q, apply ne_of_gt, exact lt_add_of_pos_of_le zero_lt_one (sq_nonneg q) },
have hp : (⟨v, w⟩ : ℚ × ℚ) ∈ {p : ℚ × ℚ | p.1^2 + p.2^2 = 1 ∧ p.2 ≠ -1} := ⟨hq, hw1⟩,
let q := (circle_equiv_gen hQ).symm ⟨⟨v, w⟩, hp⟩,
have ht4 : v = 2 * q / (1 + q ^ 2) ∧ w = (1 - q ^ 2) / (1 + q ^ 2),
{ apply prod.mk.inj,
have := ((circle_equiv_gen hQ).apply_symm_apply ⟨⟨v, w⟩, hp⟩).symm,
exact congr_arg subtype.val this, },
let m := (q.denom : ℤ),
let n := q.num,
have hm0 : m ≠ 0, { norm_cast, apply rat.denom_ne_zero q },
have hq2 : q = n / m := (rat.num_div_denom q).symm,
have hm2n2 : 0 < m ^ 2 + n ^ 2,
{ apply lt_add_of_pos_of_le _ (sq_nonneg n),
exact lt_of_le_of_ne (sq_nonneg m) (ne.symm (pow_ne_zero 2 hm0)) },
have hw2 : w = (m ^ 2 - n ^ 2) / (m ^ 2 + n ^ 2),
{ rw [ht4.2, hq2], field_simp [hm2n2, rat.denom_ne_zero q, -rat.num_div_denom] },
have hm2n20 : (m : ℚ) ^ 2 + (n : ℚ) ^ 2 ≠ 0,
{ norm_cast, simpa only [int.coe_nat_pow] using ne_of_gt hm2n2 },
have hv2 : v = 2 * m * n / (m ^ 2 + n ^ 2),
{ apply eq.symm, apply (div_eq_iff hm2n20).mpr, rw [ht4.1], field_simp [hQ q],
rw [hq2] {occs := occurrences.pos [2, 3]},
field_simp [rat.denom_ne_zero q, -rat.num_div_denom],
ring },
have hnmcp : int.gcd n m = 1 := q.cop,
have hmncp : int.gcd m n = 1, { rw int.gcd_comm, exact hnmcp },
cases int.mod_two_eq_zero_or_one m with hm2 hm2;
cases int.mod_two_eq_zero_or_one n with hn2 hn2,
{ -- m even, n even
exfalso,
have h1 : 2 ∣ (int.gcd n m : ℤ),
{ exact int.dvd_gcd (int.dvd_of_mod_eq_zero hn2) (int.dvd_of_mod_eq_zero hm2) },
rw hnmcp at h1, revert h1, norm_num },
{ -- m even, n odd
apply h.is_primitive_classified_aux hc hzpos hm2n2 hv2 hw2 _ hmncp,
{ apply or.intro_left, exact and.intro hm2 hn2 },
{ apply coprime_sq_sub_sq_add_of_even_odd hmncp hm2 hn2 } },
{ -- m odd, n even
apply h.is_primitive_classified_aux hc hzpos hm2n2 hv2 hw2 _ hmncp,
{ apply or.intro_right, exact and.intro hm2 hn2 },
apply coprime_sq_sub_sq_add_of_odd_even hmncp hm2 hn2 },
{ -- m odd, n odd
exfalso,
have h1 : 2 ∣ m ^ 2 + n ^ 2 ∧ 2 ∣ m ^ 2 - n ^ 2
∧ ((m ^ 2 - n ^ 2) / 2) % 2 = 0 ∧ int.gcd ((m ^ 2 - n ^ 2) / 2) ((m ^ 2 + n ^ 2) / 2) = 1,
{ exact coprime_sq_sub_sq_sum_of_odd_odd hmncp hm2 hn2 },
have h2 : y = (m ^ 2 - n ^ 2) / 2 ∧ z = (m ^ 2 + n ^ 2) / 2,
{ apply rat.div_int_inj hzpos _ (h.coprime_of_coprime hc) h1.2.2.2,
{ show w = _, rw [←rat.mk_eq_div, ←(rat.div_mk_div_cancel_left (by norm_num : (2 : ℤ) ≠ 0))],
rw [int.div_mul_cancel h1.1, int.div_mul_cancel h1.2.1, hw2], norm_cast },
{ apply (mul_lt_mul_right (by norm_num : 0 < (2 : ℤ))).mp,
rw [int.div_mul_cancel h1.1, zero_mul], exact hm2n2 } },
rw [h2.1, h1.2.2.1] at hyo,
revert hyo,
norm_num }
end
theorem is_primitive_classified_of_coprime_of_pos (hc : int.gcd x y = 1) (hzpos : 0 < z):
h.is_primitive_classified :=
begin
cases h.even_odd_of_coprime hc with h1 h2,
{ exact (h.is_primitive_classified_of_coprime_of_odd_of_pos hc h1.right hzpos) },
rw int.gcd_comm at hc,
obtain ⟨m, n, H⟩ := (h.symm.is_primitive_classified_of_coprime_of_odd_of_pos hc h2.left hzpos),
use [m, n], tauto
end
theorem is_primitive_classified_of_coprime (hc : int.gcd x y = 1) : h.is_primitive_classified :=
begin
by_cases hz : 0 < z,
{ exact h.is_primitive_classified_of_coprime_of_pos hc hz },
have h' : pythagorean_triple x y (-z),
{ simpa [pythagorean_triple, neg_mul_neg] using h.eq, },
apply h'.is_primitive_classified_of_coprime_of_pos hc,
apply lt_of_le_of_ne _ (h'.ne_zero_of_coprime hc).symm,
exact le_neg.mp (not_lt.mp hz)
end
theorem classified : h.is_classified :=
begin
by_cases h0 : int.gcd x y = 0,
{ have hx : x = 0, { apply int.nat_abs_eq_zero.mp, apply nat.eq_zero_of_gcd_eq_zero_left h0 },
have hy : y = 0, { apply int.nat_abs_eq_zero.mp, apply nat.eq_zero_of_gcd_eq_zero_right h0 },
use [0, 1, 0], norm_num [hx, hy], },
apply h.is_classified_of_normalize_is_primitive_classified,
apply h.normalize.is_primitive_classified_of_coprime,
apply int.gcd_div_gcd_div_gcd (nat.pos_of_ne_zero h0),
end
omit h
theorem coprime_classification :
pythagorean_triple x y z ∧ int.gcd x y = 1 ↔
∃ m n, ((x = m ^ 2 - n ^ 2 ∧ y = 2 * m * n) ∨
(x = 2 * m * n ∧ y = m ^ 2 - n ^ 2))
∧ (z = m ^ 2 + n ^ 2 ∨ z = - (m ^ 2 + n ^ 2))
∧ int.gcd m n = 1
∧ ((m % 2 = 0 ∧ n % 2 = 1) ∨ (m % 2 = 1 ∧ n % 2 = 0)) :=
begin
split,
{ intro h,
obtain ⟨m, n, H⟩ := h.left.is_primitive_classified_of_coprime h.right,
use [m, n],
rcases H with ⟨⟨rfl, rfl⟩ | ⟨rfl, rfl⟩, co, pp⟩,
{ refine ⟨or.inl ⟨rfl, rfl⟩, _, co, pp⟩,
have : z ^ 2 = (m ^ 2 + n ^ 2) ^ 2,
{ rw [sq, ← h.left.eq], ring },
simpa using eq_or_eq_neg_of_sq_eq_sq _ _ this },
{ refine ⟨or.inr ⟨rfl, rfl⟩, _, co, pp⟩,
have : z ^ 2 = (m ^ 2 + n ^ 2) ^ 2,
{ rw [sq, ← h.left.eq], ring },
simpa using eq_or_eq_neg_of_sq_eq_sq _ _ this } },
{ delta pythagorean_triple,
rintro ⟨m, n, ⟨rfl, rfl⟩ | ⟨rfl, rfl⟩, rfl | rfl, co, pp⟩;
{ split, { ring }, exact coprime_sq_sub_mul co pp }
<|>
{ split, { ring }, rw int.gcd_comm, exact coprime_sq_sub_mul co pp } }
end
/-- by assuming `x` is odd and `z` is positive we get a slightly more precise classification of
the pythagorean triple `x ^ 2 + y ^ 2 = z ^ 2`-/
theorem coprime_classification' {x y z : ℤ} (h : pythagorean_triple x y z)
(h_coprime : int.gcd x y = 1) (h_parity : x % 2 = 1) (h_pos : 0 < z) :
∃ m n, x = m ^ 2 - n ^ 2
∧ y = 2 * m * n
∧ z = m ^ 2 + n ^ 2
∧ int.gcd m n = 1
∧ ((m % 2 = 0 ∧ n % 2 = 1) ∨ (m % 2 = 1 ∧ n % 2 = 0))
∧ 0 ≤ m :=
begin
obtain ⟨m, n, ht1, ht2, ht3, ht4⟩ :=
pythagorean_triple.coprime_classification.mp (and.intro h h_coprime),
cases le_or_lt 0 m with hm hm,
{ use [m, n],
cases ht1 with h_odd h_even,
{ apply and.intro h_odd.1,
apply and.intro h_odd.2,
cases ht2 with h_pos h_neg,
{ apply and.intro h_pos (and.intro ht3 (and.intro ht4 hm)) },
{ exfalso, revert h_pos, rw h_neg,
exact imp_false.mpr (not_lt.mpr (neg_nonpos.mpr (add_nonneg (sq_nonneg m)
(sq_nonneg n)))) } },
exfalso,
rcases h_even with ⟨rfl, -⟩,
rw [mul_assoc, int.mul_mod_right] at h_parity,
exact zero_ne_one h_parity },
{ use [-m, -n],
cases ht1 with h_odd h_even,
{ rw [neg_sq m],
rw [neg_sq n],
apply and.intro h_odd.1,
split, { rw h_odd.2, ring },
cases ht2 with h_pos h_neg,
{ apply and.intro h_pos,
split,
{ delta int.gcd, rw [int.nat_abs_neg, int.nat_abs_neg], exact ht3 },
{ rw [int.neg_mod_two, int.neg_mod_two],
apply and.intro ht4, linarith } },
{ exfalso, revert h_pos, rw h_neg,
exact imp_false.mpr (not_lt.mpr (neg_nonpos.mpr (add_nonneg (sq_nonneg m)
(sq_nonneg n)))) } },
exfalso,
rcases h_even with ⟨rfl, -⟩,
rw [mul_assoc, int.mul_mod_right] at h_parity,
exact zero_ne_one h_parity }
end
/-- **Formula for Pythagorean Triples** -/
theorem classification :
pythagorean_triple x y z ↔
∃ k m n, ((x = k * (m ^ 2 - n ^ 2) ∧ y = k * (2 * m * n)) ∨
(x = k * (2 * m * n) ∧ y = k * (m ^ 2 - n ^ 2)))
∧ (z = k * (m ^ 2 + n ^ 2) ∨ z = - k * (m ^ 2 + n ^ 2)) :=
begin
split,
{ intro h,
obtain ⟨k, m, n, H⟩ := h.classified,
use [k, m, n],
rcases H with ⟨rfl, rfl⟩ | ⟨rfl, rfl⟩,
{ refine ⟨or.inl ⟨rfl, rfl⟩, _⟩,
have : z ^ 2 = (k * (m ^ 2 + n ^ 2)) ^ 2,
{ rw [sq, ← h.eq], ring },
simpa using eq_or_eq_neg_of_sq_eq_sq _ _ this },
{ refine ⟨or.inr ⟨rfl, rfl⟩, _⟩,
have : z ^ 2 = (k * (m ^ 2 + n ^ 2)) ^ 2,
{ rw [sq, ← h.eq], ring },
simpa using eq_or_eq_neg_of_sq_eq_sq _ _ this } },
{ rintro ⟨k, m, n, ⟨rfl, rfl⟩ | ⟨rfl, rfl⟩, rfl | rfl⟩;
delta pythagorean_triple; ring }
end
end pythagorean_triple
|
e3b20afc7f2b69d06eff06f88550b4563ee9af4a
|
38bf3fd2bb651ab70511408fcf70e2029e2ba310
|
/src/data/nat/modeq.lean
|
a98f47cf21799c9e4ffd173f31b159c6ae0e62ff
|
[
"Apache-2.0"
] |
permissive
|
JaredCorduan/mathlib
|
130392594844f15dad65a9308c242551bae6cd2e
|
d5de80376088954d592a59326c14404f538050a1
|
refs/heads/master
| 1,595,862,206,333
| 1,570,816,457,000
| 1,570,816,457,000
| 209,134,499
| 0
| 0
|
Apache-2.0
| 1,568,746,811,000
| 1,568,746,811,000
| null |
UTF-8
|
Lean
| false
| false
| 9,007
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
Modular equality relation.
-/
import data.int.gcd algebra.ordered_ring
namespace nat
/-- Modular equality. `modeq n a b`, or `a ≡ b [MOD n]`, means
that `a - b` is a multiple of `n`. -/
@[derive decidable]
def modeq (n a b : ℕ) := a % n = b % n
notation a ` ≡ `:50 b ` [MOD `:50 n `]`:0 := modeq n a b
namespace modeq
variables {n m a b c d : ℕ}
@[refl] protected theorem refl (a : ℕ) : a ≡ a [MOD n] := @rfl _ _
@[symm] protected theorem symm : a ≡ b [MOD n] → b ≡ a [MOD n] := eq.symm
@[trans] protected theorem trans : a ≡ b [MOD n] → b ≡ c [MOD n] → a ≡ c [MOD n] := eq.trans
theorem modeq_zero_iff : a ≡ 0 [MOD n] ↔ n ∣ a :=
by rw [modeq, zero_mod, dvd_iff_mod_eq_zero]
theorem modeq_iff_dvd : a ≡ b [MOD n] ↔ (n:ℤ) ∣ b - a :=
by rw [modeq, eq_comm, ← int.coe_nat_inj'];
simp [int.mod_eq_mod_iff_mod_sub_eq_zero, int.dvd_iff_mod_eq_zero]
theorem modeq_of_dvd : (n:ℤ) ∣ b - a → a ≡ b [MOD n] := modeq_iff_dvd.2
theorem dvd_of_modeq : a ≡ b [MOD n] → (n:ℤ) ∣ b - a := modeq_iff_dvd.1
theorem mod_modeq (a n) : a % n ≡ a [MOD n] := nat.mod_mod _ _
theorem modeq_of_dvd_of_modeq (d : m ∣ n) (h : a ≡ b [MOD n]) : a ≡ b [MOD m] :=
modeq_of_dvd $ dvd_trans (int.coe_nat_dvd.2 d) (dvd_of_modeq h)
theorem modeq_mul_left' (c : ℕ) (h : a ≡ b [MOD n]) : c * a ≡ c * b [MOD (c * n)] :=
by unfold modeq at *; rw [mul_mod_mul_left, mul_mod_mul_left, h]
theorem modeq_mul_left (c : ℕ) (h : a ≡ b [MOD n]) : c * a ≡ c * b [MOD n] :=
modeq_of_dvd_of_modeq (dvd_mul_left _ _) $ modeq_mul_left' _ h
theorem modeq_mul_right' (c : ℕ) (h : a ≡ b [MOD n]) : a * c ≡ b * c [MOD (n * c)] :=
by rw [mul_comm a, mul_comm b, mul_comm n]; exact modeq_mul_left' c h
theorem modeq_mul_right (c : ℕ) (h : a ≡ b [MOD n]) : a * c ≡ b * c [MOD n] :=
by rw [mul_comm a, mul_comm b]; exact modeq_mul_left c h
theorem modeq_mul (h₁ : a ≡ b [MOD n]) (h₂ : c ≡ d [MOD n]) : a * c ≡ b * d [MOD n] :=
(modeq_mul_left _ h₂).trans (modeq_mul_right _ h₁)
theorem modeq_add (h₁ : a ≡ b [MOD n]) (h₂ : c ≡ d [MOD n]) : a + c ≡ b + d [MOD n] :=
modeq_of_dvd $ by simpa using dvd_add (dvd_of_modeq h₁) (dvd_of_modeq h₂)
theorem modeq_add_cancel_left (h₁ : a ≡ b [MOD n]) (h₂ : a + c ≡ b + d [MOD n]) : c ≡ d [MOD n] :=
have (n:ℤ) ∣ a + (-a + (d + -c)),
by simpa using _root_.dvd_sub (dvd_of_modeq h₂) (dvd_of_modeq h₁),
modeq_of_dvd $ by rwa add_neg_cancel_left at this
theorem modeq_add_cancel_right (h₁ : c ≡ d [MOD n]) (h₂ : a + c ≡ b + d [MOD n]) : a ≡ b [MOD n] :=
by rw [add_comm a, add_comm b] at h₂; exact modeq_add_cancel_left h₁ h₂
theorem modeq_of_modeq_mul_left (m : ℕ) (h : a ≡ b [MOD m * n]) : a ≡ b [MOD n] :=
by rw [modeq_iff_dvd] at *; exact dvd.trans (dvd_mul_left (n : ℤ) (m : ℤ)) h
theorem modeq_of_modeq_mul_right (m : ℕ) : a ≡ b [MOD n * m] → a ≡ b [MOD n] :=
mul_comm m n ▸ modeq_of_modeq_mul_left _
def chinese_remainder (co : coprime n m) (a b : ℕ) : {k // k ≡ a [MOD n] ∧ k ≡ b [MOD m]} :=
⟨let (c, d) := xgcd n m in int.to_nat ((b * c * n + a * d * m) % (n * m)), begin
rw xgcd_val, dsimp [chinese_remainder._match_1],
rw [modeq_iff_dvd, modeq_iff_dvd],
rw [int.to_nat_of_nonneg], swap,
{ by_cases h₁ : n = 0, {simp [coprime, h₁] at co, substs m n, simp},
by_cases h₂ : m = 0, {simp [coprime, h₂] at co, substs m n, simp},
exact int.mod_nonneg _
(mul_ne_zero (int.coe_nat_ne_zero.2 h₁) (int.coe_nat_ne_zero.2 h₂)) },
have := gcd_eq_gcd_ab n m, simp [co.gcd_eq_one, mul_comm] at this,
rw [int.mod_def, ← sub_add, ← sub_add]; split,
{ refine dvd_add _ (dvd_trans (dvd_mul_right _ _) (dvd_mul_right _ _)),
rw [add_comm, ← sub_sub], refine _root_.dvd_sub _ (dvd_mul_left _ _),
have := congr_arg ((*) ↑a) this,
exact ⟨_, by rwa [mul_add, ← mul_assoc, ← mul_assoc, mul_one, mul_comm,
← sub_eq_iff_eq_add] at this⟩ },
{ refine dvd_add _ (dvd_trans (dvd_mul_left _ _) (dvd_mul_right _ _)),
rw [← sub_sub], refine _root_.dvd_sub _ (dvd_mul_left _ _),
have := congr_arg ((*) ↑b) this,
exact ⟨_, by rwa [mul_add, ← mul_assoc, ← mul_assoc, mul_one, mul_comm _ ↑m,
← sub_eq_iff_eq_add'] at this⟩ }
end⟩
lemma modeq_and_modeq_iff_modeq_mul {a b m n : ℕ} (hmn : coprime m n) :
a ≡ b [MOD m] ∧ a ≡ b [MOD n] ↔ (a ≡ b [MOD m * n]) :=
⟨λ h, begin
rw [nat.modeq.modeq_iff_dvd, nat.modeq.modeq_iff_dvd, ← int.dvd_nat_abs,
int.coe_nat_dvd, ← int.dvd_nat_abs, int.coe_nat_dvd] at h,
rw [nat.modeq.modeq_iff_dvd, ← int.dvd_nat_abs, int.coe_nat_dvd],
exact hmn.mul_dvd_of_dvd_of_dvd h.1 h.2
end,
λ h, ⟨nat.modeq.modeq_of_modeq_mul_right _ h, nat.modeq.modeq_of_modeq_mul_left _ h⟩⟩
lemma coprime_of_mul_modeq_one (b : ℕ) {a n : ℕ} (h : a * b ≡ 1 [MOD n]) : coprime a n :=
nat.coprime_of_dvd' (λ k ⟨ka, hka⟩ ⟨kb, hkb⟩, int.coe_nat_dvd.1 begin
rw [hka, hkb, modeq_iff_dvd] at h,
cases h with z hz,
rw [sub_eq_iff_eq_add] at hz,
rw [hz, int.coe_nat_mul, mul_assoc, mul_assoc, int.coe_nat_mul, ← mul_add],
exact dvd_mul_right _ _,
end)
end modeq
@[simp] lemma mod_mul_right_mod (a b c : ℕ) : a % (b * c) % b = a % b :=
modeq.modeq_of_modeq_mul_right _ (modeq.mod_modeq _ _)
@[simp] lemma mod_mul_left_mod (a b c : ℕ) : a % (b * c) % c = a % c :=
modeq.modeq_of_modeq_mul_left _ (modeq.mod_modeq _ _)
lemma odd_mul_odd {n m : ℕ} (hn1 : n % 2 = 1) (hm1 : m % 2 = 1) : (n * m) % 2 = 1 :=
show (n * m) % 2 = (1 * 1) % 2, from nat.modeq.modeq_mul hn1 hm1
lemma odd_mul_odd_div_two {m n : ℕ} (hm1 : m % 2 = 1) (hn1 : n % 2 = 1) :
(m * n) / 2 = m * (n / 2) + m / 2 :=
have hm0 : 0 < m := nat.pos_of_ne_zero (λ h, by simp * at *),
have hn0 : 0 < n := nat.pos_of_ne_zero (λ h, by simp * at *),
(nat.mul_left_inj (show 0 < 2, from dec_trivial)).1 $
by rw [mul_add, two_mul_odd_div_two hm1, mul_left_comm, two_mul_odd_div_two hn1,
two_mul_odd_div_two (nat.odd_mul_odd hm1 hn1), nat.mul_sub_left_distrib, mul_one,
← nat.add_sub_assoc hm0, nat.sub_add_cancel (le_mul_of_one_le_right' (nat.zero_le _) hn0)]
lemma odd_of_mod_four_eq_one {n : ℕ} (h : n % 4 = 1) : n % 2 = 1 :=
@modeq.modeq_of_modeq_mul_left 2 n 1 2 h
lemma odd_of_mod_four_eq_three {n : ℕ} (h : n % 4 = 3) : n % 2 = 1 :=
@modeq.modeq_of_modeq_mul_left 2 n 3 2 h
end nat
namespace list
variable {α : Type*}
lemma nth_rotate : ∀ {l : list α} {n m : ℕ} (hml : m < l.length),
(l.rotate n).nth m = l.nth ((m + n) % l.length)
| [] n m hml := (nat.not_lt_zero _ hml).elim
| l 0 m hml := by simp [nat.mod_eq_of_lt hml]
| (a::l) (n+1) m hml :=
have h₃ : m < list.length (l ++ [a]), by simpa using hml,
(lt_or_eq_of_le (nat.le_of_lt_succ $ nat.mod_lt (m + n)
(lt_of_le_of_lt (nat.zero_le _) hml))).elim
(λ hml',
have h₁ : (m + (n + 1)) % ((a :: l : list α).length) =
(m + n) % ((a :: l : list α).length) + 1,
from calc (m + (n + 1)) % (l.length + 1) =
((m + n) % (l.length + 1) + 1) % (l.length + 1) :
add_assoc m n 1 ▸ nat.modeq.modeq_add (nat.mod_mod _ _).symm rfl
... = (m + n) % (l.length + 1) + 1 : nat.mod_eq_of_lt (nat.succ_lt_succ hml'),
have h₂ : (m + n) % (l ++ [a]).length < l.length, by simpa [nat.add_one] using hml',
by rw [list.rotate_cons_succ, nth_rotate h₃, list.nth_append h₂, h₁, list.nth]; simp)
(λ hml',
have h₁ : (m + (n + 1)) % (l.length + 1) = 0,
from calc (m + (n + 1)) % (l.length + 1) = (l.length + 1) % (l.length + 1) :
add_assoc m n 1 ▸ nat.modeq.modeq_add
(hml'.trans (nat.mod_eq_of_lt (nat.lt_succ_self _)).symm) rfl
... = 0 : by simp,
have h₂ : l.length < (l ++ [a]).length, by simp [nat.lt_succ_self],
by rw [list.length, list.rotate_cons_succ, nth_rotate h₃, list.length_append,
list.length_cons, list.length, zero_add, hml', h₁, list.nth_concat_length]; refl)
lemma rotate_eq_self_iff_eq_repeat [hα : nonempty α] : ∀ {l : list α},
(∀ n, l.rotate n = l) ↔ ∃ a, l = list.repeat a l.length
| [] := ⟨λ h, nonempty.elim hα (λ a, ⟨a, by simp⟩), by simp⟩
| (a::l) :=
⟨λ h, ⟨a, list.ext_le (by simp) $ λ n hn h₁,
begin
rw [← option.some_inj, ← list.nth_le_nth],
conv {to_lhs, rw ← h ((list.length (a :: l)) - n)},
rw [nth_rotate hn, nat.add_sub_cancel' (le_of_lt hn),
nat.mod_self, nth_le_repeat _ hn], refl
end⟩,
λ ⟨a, ha⟩ n, ha.symm ▸ list.ext_le (by simp)
(λ m hm h,
have hm' : (m + n) % (list.repeat a (list.length (a :: l))).length < list.length (a :: l),
by rw list.length_repeat; exact nat.mod_lt _ (nat.succ_pos _),
by rw [nth_le_repeat, ← option.some_inj, ← list.nth_le_nth, nth_rotate h, list.nth_le_nth,
nth_le_repeat]; simp * at *)⟩
end list
|
e848f82394cc408a23fcdd001ae66a3ca32dd6f7
|
35677d2df3f081738fa6b08138e03ee36bc33cad
|
/src/data/equiv/denumerable.lean
|
654c4551474c3b949bc226e4536e355832b34d18
|
[
"Apache-2.0"
] |
permissive
|
gebner/mathlib
|
eab0150cc4f79ec45d2016a8c21750244a2e7ff0
|
cc6a6edc397c55118df62831e23bfbd6e6c6b4ab
|
refs/heads/master
| 1,625,574,853,976
| 1,586,712,827,000
| 1,586,712,827,000
| 99,101,412
| 1
| 0
|
Apache-2.0
| 1,586,716,389,000
| 1,501,667,958,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 9,110
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
Denumerable (countably infinite) types, as a typeclass extending
encodable. This is used to provide explicit encode/decode functions
from nat, where the functions are known inverses of each other.
-/
import data.equiv.encodable data.sigma data.fintype.basic data.list.min_max
open nat
section prio
set_option default_priority 100 -- see Note [default priority]
/-- A denumerable type is one which is (constructively) bijective with ℕ.
Although we already have a name for this property, namely `α ≃ ℕ`,
we are here interested in using it as a typeclass. -/
class denumerable (α : Type*) extends encodable α :=
(decode_inv : ∀ n, ∃ a ∈ decode n, encode a = n)
end prio
namespace denumerable
section
variables {α : Type*} {β : Type*} [denumerable α] [denumerable β]
open encodable
theorem decode_is_some (α) [denumerable α] (n : ℕ) :
(decode α n).is_some :=
option.is_some_iff_exists.2 $
(decode_inv n).imp $ λ a, Exists.fst
def of_nat (α) [f : denumerable α] (n : ℕ) : α :=
option.get (decode_is_some α n)
@[simp, priority 900]
theorem decode_eq_of_nat (α) [denumerable α] (n : ℕ) :
decode α n = some (of_nat α n) :=
option.eq_some_of_is_some _
@[simp] theorem of_nat_of_decode {n b}
(h : decode α n = some b) : of_nat α n = b :=
option.some.inj $ (decode_eq_of_nat _ _).symm.trans h
@[simp] theorem encode_of_nat (n) : encode (of_nat α n) = n :=
let ⟨a, h, e⟩ := decode_inv n in
by rwa [of_nat_of_decode h]
@[simp] theorem of_nat_encode (a) : of_nat α (encode a) = a :=
of_nat_of_decode (encodek _)
def eqv (α) [denumerable α] : α ≃ ℕ :=
⟨encode, of_nat α, of_nat_encode, encode_of_nat⟩
def mk' {α} (e : α ≃ ℕ) : denumerable α :=
{ encode := e,
decode := some ∘ e.symm,
encodek := λ a, congr_arg some (e.symm_apply_apply _),
decode_inv := λ n, ⟨_, rfl, e.apply_symm_apply _⟩ }
def of_equiv (α) {β} [denumerable α] (e : β ≃ α) : denumerable β :=
{ decode_inv := λ n, by simp,
..encodable.of_equiv _ e }
@[simp] theorem of_equiv_of_nat (α) {β} [denumerable α] (e : β ≃ α)
(n) : @of_nat β (of_equiv _ e) n = e.symm (of_nat α n) :=
by apply of_nat_of_decode; show option.map _ _ = _; simp
def equiv₂ (α β) [denumerable α] [denumerable β] : α ≃ β := (eqv α).trans (eqv β).symm
instance nat : denumerable nat := ⟨λ n, ⟨_, rfl, rfl⟩⟩
@[simp] theorem of_nat_nat (n) : of_nat ℕ n = n := rfl
instance option : denumerable (option α) := ⟨λ n, by cases n; simp⟩
instance sum : denumerable (α ⊕ β) :=
⟨λ n, begin
suffices : ∃ a ∈ @decode_sum α β _ _ n,
encode_sum a = bit (bodd n) (div2 n), {simpa [bit_decomp]},
simp [decode_sum]; cases bodd n; simp [decode_sum, bit, encode_sum]
end⟩
section sigma
variables {γ : α → Type*} [∀ a, denumerable (γ a)]
instance sigma : denumerable (sigma γ) :=
⟨λ n, by simp [decode_sigma]; exact ⟨_, _, ⟨rfl, heq.rfl⟩, by simp⟩⟩
@[simp] theorem sigma_of_nat_val (n : ℕ) :
of_nat (sigma γ) n = ⟨of_nat α (unpair n).1, of_nat (γ _) (unpair n).2⟩ :=
option.some.inj $
by rw [← decode_eq_of_nat, decode_sigma_val]; simp; refl
end sigma
instance prod : denumerable (α × β) :=
of_equiv _ (equiv.sigma_equiv_prod α β).symm
@[simp] theorem prod_of_nat_val (n : ℕ) :
of_nat (α × β) n = (of_nat α (unpair n).1, of_nat β (unpair n).2) :=
by simp; refl
@[simp] theorem prod_nat_of_nat : of_nat (ℕ × ℕ) = unpair :=
by funext; simp
instance int : denumerable ℤ := denumerable.mk' equiv.int_equiv_nat
instance pnat : denumerable ℕ+ := denumerable.mk' equiv.pnat_equiv_nat
instance ulift : denumerable (ulift α) := of_equiv _ equiv.ulift
instance plift : denumerable (plift α) := of_equiv _ equiv.plift
def pair : α × α ≃ α := equiv₂ _ _
end
end denumerable
namespace nat.subtype
open function encodable
variables {s : set ℕ} [infinite s]
section classical
open_locale classical
lemma exists_succ (x : s) : ∃ n, x.1 + n + 1 ∈ s :=
classical.by_contradiction $ λ h,
have ∀ (a : ℕ) (ha : a ∈ s), a < x.val.succ,
from λ a ha, lt_of_not_ge (λ hax, h ⟨a - (x.1 + 1),
by rwa [add_right_comm, nat.add_sub_cancel' hax]⟩),
infinite.not_fintype
⟨(((multiset.range x.1.succ).filter (∈ s)).pmap
(λ (y : ℕ) (hy : y ∈ s), subtype.mk y hy)
(by simp [-multiset.range_succ])).to_finset,
by simpa [subtype.ext, multiset.mem_filter, -multiset.range_succ]⟩
end classical
variable [decidable_pred s]
def succ (x : s) : s :=
have h : ∃ m, x.1 + m + 1 ∈ s, from exists_succ x,
⟨x.1 + nat.find h + 1, nat.find_spec h⟩
lemma succ_le_of_lt {x y : s} (h : y < x) : succ y ≤ x :=
have hx : ∃ m, y.1 + m + 1 ∈ s, from exists_succ _,
let ⟨k, hk⟩ := nat.exists_eq_add_of_lt h in
have nat.find hx ≤ k, from nat.find_min' _ (hk ▸ x.2),
show y.1 + nat.find hx + 1 ≤ x.1,
by rw hk; exact add_le_add_right (add_le_add_left this _) _
lemma le_succ_of_forall_lt_le {x y : s} (h : ∀ z < x, z ≤ y) : x ≤ succ y :=
have hx : ∃ m, y.1 + m + 1 ∈ s, from exists_succ _,
show x.1 ≤ y.1 + nat.find hx + 1,
from le_of_not_gt $ λ hxy,
have y.1 + nat.find hx + 1 ≤ y.1 := h ⟨_, nat.find_spec hx⟩ hxy,
not_lt_of_le this $
calc y.1 ≤ y.1 + nat.find hx : le_add_of_nonneg_right (nat.zero_le _)
... < y.1 + nat.find hx + 1 : nat.lt_succ_self _
lemma lt_succ_self (x : s) : x < succ x :=
calc x.1 ≤ x.1 + _ : le_add_right (le_refl _)
... < succ x : nat.lt_succ_self (x.1 + _)
lemma lt_succ_iff_le {x y : s} : x < succ y ↔ x ≤ y :=
⟨λ h, le_of_not_gt (λ h', not_le_of_gt h (succ_le_of_lt h')),
λ h, lt_of_le_of_lt h (lt_succ_self _)⟩
def of_nat (s : set ℕ) [decidable_pred s] [infinite s] : ℕ → s
| 0 := ⊥
| (n+1) := succ (of_nat n)
lemma of_nat_surjective_aux : ∀ {x : ℕ} (hx : x ∈ s), ∃ n, of_nat s n = ⟨x, hx⟩
| x := λ hx, let t : list s := ((list.range x).filter (λ y, y ∈ s)).pmap
(λ (y : ℕ) (hy : y ∈ s), ⟨y, hy⟩) (by simp) in
have hmt : ∀ {y : s}, y ∈ t ↔ y < ⟨x, hx⟩,
by simp [list.mem_filter, subtype.ext, t]; intros; refl,
have wf : ∀ m : s, list.maximum t = m → m.1 < x,
from λ m hmax, by simpa [hmt] using list.maximum_mem hmax,
begin
cases hmax : list.maximum t with m,
{ exact ⟨0, le_antisymm (@bot_le s _ _)
(le_of_not_gt (λ h, list.not_mem_nil (⊥ : s) $
by rw [← list.maximum_eq_none.1 hmax, hmt]; exact h))⟩ },
{ cases of_nat_surjective_aux m.2 with a ha,
exact ⟨a + 1, le_antisymm
(by rw of_nat; exact succ_le_of_lt (by rw ha; exact wf _ hmax)) $
by rw of_nat; exact le_succ_of_forall_lt_le
(λ z hz, by rw ha; cases m; exact list.le_maximum_of_mem (hmt.2 hz) hmax)⟩ }
end
using_well_founded {dec_tac := `[tauto]}
lemma of_nat_surjective : surjective (of_nat s) :=
λ ⟨x, hx⟩, of_nat_surjective_aux hx
private def to_fun_aux (x : s) : ℕ :=
(list.range x).countp s
private lemma to_fun_aux_eq (x : s) :
to_fun_aux x = ((finset.range x).filter s).card :=
by rw [to_fun_aux, list.countp_eq_length_filter]; refl
open finset
private lemma right_inverse_aux : ∀ n, to_fun_aux (of_nat s n) = n
| 0 := begin
rw [to_fun_aux_eq, card_eq_zero, eq_empty_iff_forall_not_mem],
assume n,
rw [mem_filter, of_nat, mem_range],
assume h,
exact not_lt_of_le bot_le (show (⟨n, h.2⟩ : s) < ⊥, from h.1)
end
| (n+1) := have ih : to_fun_aux (of_nat s n) = n, from right_inverse_aux n,
have h₁ : (of_nat s n : ℕ) ∉ (range (of_nat s n)).filter s, by simp,
have h₂ : (range (succ (of_nat s n))).filter s =
insert (of_nat s n) ((range (of_nat s n)).filter s),
begin
simp only [finset.ext, mem_insert, mem_range, mem_filter],
assume m,
exact ⟨λ h, by simp only [h.2, and_true]; exact or.symm
(lt_or_eq_of_le ((@lt_succ_iff_le _ _ _ ⟨m, h.2⟩ _).1 h.1)),
λ h, h.elim (λ h, h.symm ▸ ⟨lt_succ_self _, subtype.property _⟩)
(λ h, ⟨lt_of_le_of_lt (le_of_lt h.1) (lt_succ_self _), h.2⟩)⟩
end,
begin
clear_aux_decl,
simp only [to_fun_aux_eq, of_nat, range_succ] at *,
conv {to_rhs, rw [← ih, ← card_insert_of_not_mem h₁, ← h₂] },
end
def denumerable (s : set ℕ) [decidable_pred s] [infinite s] : denumerable s :=
denumerable.of_equiv ℕ
{ to_fun := to_fun_aux,
inv_fun := of_nat s,
left_inv := left_inverse_of_surjective_of_right_inverse
of_nat_surjective right_inverse_aux,
right_inv := right_inverse_aux }
end nat.subtype
namespace denumerable
open encodable
def of_encodable_of_infinite (α : Type*) [encodable α] [infinite α] : denumerable α :=
begin
letI := @decidable_range_encode α _;
letI : infinite (set.range (@encode α _)) :=
infinite.of_injective _ (equiv.set.range _ encode_injective).injective,
letI := nat.subtype.denumerable (set.range (@encode α _)),
exact denumerable.of_equiv (set.range (@encode α _))
(equiv_range_encode α)
end
end denumerable
|
21e16caeb78980646a0209171076370688aab2b7
|
dd4e652c749fea9ac77e404005cb3470e5f75469
|
/src/eigenvectors/hermitian_form.lean
|
b202fe939b9972a07132bd0dcfc9bc63ed622c26
|
[] |
no_license
|
skbaek/cvx
|
e32822ad5943541539966a37dee162b0a5495f55
|
c50c790c9116f9fac8dfe742903a62bdd7292c15
|
refs/heads/master
| 1,623,803,010,339
| 1,618,058,958,000
| 1,618,058,958,000
| 176,293,135
| 3
| 2
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 616
|
lean
|
import linear_algebra.sesquilinear_form
universes u v
structure hermitian_form (R : Type u) (M : Type v) [ring R] (I : R ≃+* Rᵒᵖ)
[add_comm_group M] [module R M] extends sesq_form R M I :=
(sesq_comm: ∀ (x y : M), (I (sesq x y)).unop = sesq y x)
variables {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M]
variables {I : R ≃+* Rᵒᵖ} {H : hermitian_form R M I}
instance : has_coe_to_fun (hermitian_form R M I) :=
⟨_, λ H, H.to_sesq_form.sesq⟩
lemma hermitian_form.comm (H : hermitian_form R M I) : ∀ (x y : M), (I (H x y)).unop = H y x :=
by apply hermitian_form.sesq_comm
|
89aa4300fddabaeaf7c15142bf02528d008ef703
|
57c233acf9386e610d99ed20ef139c5f97504ba3
|
/src/tactic/ring.lean
|
bea374067d3f082aeaa8dd40700f1fb31193fa6c
|
[
"Apache-2.0"
] |
permissive
|
robertylewis/mathlib
|
3d16e3e6daf5ddde182473e03a1b601d2810952c
|
1d13f5b932f5e40a8308e3840f96fc882fae01f0
|
refs/heads/master
| 1,651,379,945,369
| 1,644,276,960,000
| 1,644,276,960,000
| 98,875,504
| 0
| 0
|
Apache-2.0
| 1,644,253,514,000
| 1,501,495,700,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 30,707
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import tactic.norm_num
/-!
# `ring`
Evaluate expressions in the language of commutative (semi)rings.
Based on <http://www.cs.ru.nl/~freek/courses/tt-2014/read/10.1.1.61.3041.pdf> .
-/
namespace tactic
namespace ring
/-- The normal form that `ring` uses is mediated by the function `horner a x n b := a * x ^ n + b`.
The reason we use a definition rather than the (more readable) expression on the right is because
this expression contains a number of typeclass arguments in different positions, while `horner`
contains only one `comm_semiring` instance at the top level. See also `horner_expr` for a
description of normal form. -/
def horner {α} [comm_semiring α] (a x : α) (n : ℕ) (b : α) := a * x ^ n + b
/-- This cache contains data required by the `ring` tactic during execution. -/
meta structure cache :=
(α : expr)
(univ : level)
(comm_semiring_inst : expr)
(red : transparency)
(ic : ref instance_cache)
(nc : ref instance_cache)
(atoms : ref (buffer expr))
/-- The monad that `ring` works in. This is a reader monad containing a mutable cache (using `ref`
for mutability), as well as the list of atoms-up-to-defeq encountered thus far, used for atom
sorting. -/
@[derive [monad, alternative]]
meta def ring_m (α : Type) : Type :=
reader_t cache tactic α
/-- Get the `ring` data from the monad. -/
meta def get_cache : ring_m cache := reader_t.read
/-- Get an already encountered atom by its index. -/
meta def get_atom (n : ℕ) : ring_m expr :=
⟨λ c, do es ← read_ref c.atoms, pure (es.read' n)⟩
/-- Get the index corresponding to an atomic expression, if it has already been encountered, or
put it in the list of atoms and return the new index, otherwise. -/
meta def add_atom (e : expr) : ring_m ℕ :=
⟨λ c, do
let red := c.red,
es ← read_ref c.atoms,
es.iterate failed (λ n e' t, t <|> (is_def_eq e e' red $> n)) <|>
(es.size <$ write_ref c.atoms (es.push_back e))⟩
/-- Lift a tactic into the `ring_m` monad. -/
@[inline] meta def lift {α} (m : tactic α) : ring_m α := reader_t.lift m
/-- Run a `ring_m` tactic in the tactic monad. This version of `ring_m.run` uses an external
atoms ref, so that subexpressions can be named across multiple `ring_m` calls. -/
meta def ring_m.run' (red : transparency) (atoms : ref (buffer expr))
(e : expr) {α} (m : ring_m α) : tactic α :=
do α ← infer_type e,
u ← mk_meta_univ,
infer_type α >>= unify (expr.sort (level.succ u)),
u ← get_univ_assignment u,
ic ← mk_instance_cache α,
(ic, c) ← ic.get ``comm_semiring,
nc ← mk_instance_cache `(ℕ),
using_new_ref ic $ λ r,
using_new_ref nc $ λ nr,
reader_t.run m ⟨α, u, c, red, r, nr, atoms⟩
/-- Run a `ring_m` tactic in the tactic monad. -/
meta def ring_m.run (red : transparency) (e : expr) {α} (m : ring_m α) : tactic α :=
using_new_ref mk_buffer $ λ atoms, ring_m.run' red atoms e m
/-- Lift an instance cache tactic (probably from `norm_num`) to the `ring_m` monad. This version
is abstract over the instance cache in question (either the ring `α`, or `ℕ` for exponents). -/
@[inline] meta def ic_lift' (icf : cache → ref instance_cache) {α}
(f : instance_cache → tactic (instance_cache × α)) : ring_m α :=
⟨λ c, do
let r := icf c,
ic ← read_ref r,
(ic', a) ← f ic,
a <$ write_ref r ic'⟩
/-- Lift an instance cache tactic (probably from `norm_num`) to the `ring_m` monad. This uses
the instance cache corresponding to the ring `α`. -/
@[inline] meta def ic_lift {α} : (instance_cache → tactic (instance_cache × α)) → ring_m α :=
ic_lift' cache.ic
/-- Lift an instance cache tactic (probably from `norm_num`) to the `ring_m` monad. This uses
the instance cache corresponding to `ℕ`, which is used for computations in the exponent. -/
@[inline] meta def nc_lift {α} : (instance_cache → tactic (instance_cache × α)) → ring_m α :=
ic_lift' cache.nc
/-- Apply a theorem that expects a `comm_semiring` instance. This is a special case of
`ic_lift mk_app`, but it comes up often because `horner` and all its theorems have this assumption;
it also does not require the tactic monad which improves access speed a bit. -/
meta def cache.cs_app (c : cache) (n : name) : list expr → expr :=
(@expr.const tt n [c.univ] c.α c.comm_semiring_inst).mk_app
/-- Every expression in the language of commutative semirings can be viewed as a sum of monomials,
where each monomial is a product of powers of atoms. We fix a global order on atoms (up to
definitional equality), and then separate the terms according to their smallest atom. So the top
level expression is `a * x^n + b` where `x` is the smallest atom and `n > 0` is a numeral, and
`n` is maximal (so `a` contains at least one monomial not containing an `x`), and `b` contains no
monomials with an `x` (hence all atoms in `b` are larger than `x`).
If there is no `x` satisfying these constraints, then the expression must be a numeral. Even though
we are working over rings, we allow rational constants when these can be interpreted in the ring,
so we can solve problems like `x / 3 = 1 / 3 * x` even though these are not technically in the
language of rings.
These constraints ensure that there is a unique normal form for each ring expression, and so the
algorithm is simply to calculate the normal form of each side and compare for equality.
To allow us to efficiently pattern match on normal forms, we maintain this inductive type that
holds a normalized expression together with its structure. All the `expr`s in this type could be
removed without loss of information, and conversely the `horner_expr` structure and the `ℕ` and
`ℚ` values can be recovered from the top level `expr`, but we keep both in order to keep proof
producing normalization functions efficient. -/
meta inductive horner_expr : Type
| const (e : expr) (coeff : ℚ) : horner_expr
| xadd (e : expr) (a : horner_expr) (x : expr × ℕ) (n : expr × ℕ) (b : horner_expr) : horner_expr
/-- Get the expression corresponding to a `horner_expr`. This can be calculated recursively from
the structure, but we cache the exprs in all subterms so that this function can be computed in
constant time. -/
meta def horner_expr.e : horner_expr → expr
| (horner_expr.const e _) := e
| (horner_expr.xadd e _ _ _ _) := e
/-- Is this expr the constant `0`? -/
meta def horner_expr.is_zero : horner_expr → bool
| (horner_expr.const _ c) := c = 0
| _ := ff
meta instance : has_coe horner_expr expr := ⟨horner_expr.e⟩
meta instance : has_coe_to_fun horner_expr (λ _, expr → expr) := ⟨λ e, ⇑(e : expr)⟩
/-- Construct a `xadd` node, generating the cached expr using the input cache. -/
meta def horner_expr.xadd' (c : cache) (a : horner_expr)
(x : expr × ℕ) (n : expr × ℕ) (b : horner_expr) : horner_expr :=
horner_expr.xadd (c.cs_app ``horner [a, x.1, n.1, b]) a x n b
open horner_expr
/-- Pretty printer for `horner_expr`. -/
meta def horner_expr.to_string : horner_expr → string
| (const e c) := to_string (e, c)
| (xadd e a x (_, n) b) :=
"(" ++ a.to_string ++ ") * (" ++ to_string x.1 ++ ")^"
++ to_string n ++ " + " ++ b.to_string
/-- Pretty printer for `horner_expr`. -/
meta def horner_expr.pp : horner_expr → tactic format
| (const e c) := pp (e, c)
| (xadd e a x (_, n) b) := do
pa ← a.pp, pb ← b.pp, px ← pp x.1,
return $ "(" ++ pa ++ ") * (" ++ px ++ ")^" ++ to_string n ++ " + " ++ pb
meta instance : has_to_tactic_format horner_expr := ⟨horner_expr.pp⟩
/-- Reflexivity conversion for a `horner_expr`. -/
meta def horner_expr.refl_conv (e : horner_expr) : ring_m (horner_expr × expr) :=
do p ← lift $ mk_eq_refl e, return (e, p)
theorem zero_horner {α} [comm_semiring α] (x n b) :
@horner α _ 0 x n b = b :=
by simp [horner]
theorem horner_horner {α} [comm_semiring α] (a₁ x n₁ n₂ b n')
(h : n₁ + n₂ = n') :
@horner α _ (horner a₁ x n₁ 0) x n₂ b = horner a₁ x n' b :=
by simp [h.symm, horner, pow_add, mul_assoc]
/-- Evaluate `horner a n x b` where `a` and `b` are already in normal form. -/
meta def eval_horner : horner_expr → expr × ℕ → expr × ℕ → horner_expr → ring_m (horner_expr × expr)
| ha@(const a coeff) x n b := do
c ← get_cache,
if coeff = 0 then
return (b, c.cs_app ``zero_horner [x.1, n.1, b])
else (xadd' c ha x n b).refl_conv
| ha@(xadd a a₁ x₁ n₁ b₁) x n b := do
c ← get_cache,
if x₁.2 = x.2 ∧ b₁.e.to_nat = some 0 then do
(n', h) ← nc_lift $ λ nc, norm_num.prove_add_nat' nc n₁.1 n.1,
return (xadd' c a₁ x (n', n₁.2 + n.2) b,
c.cs_app ``horner_horner [a₁, x.1, n₁.1, n.1, b, n', h])
else (xadd' c ha x n b).refl_conv
theorem const_add_horner {α} [comm_semiring α] (k a x n b b') (h : k + b = b') :
k + @horner α _ a x n b = horner a x n b' :=
by simp [h.symm, horner]; cc
theorem horner_add_const {α} [comm_semiring α] (a x n b k b') (h : b + k = b') :
@horner α _ a x n b + k = horner a x n b' :=
by simp [h.symm, horner, add_assoc]
theorem horner_add_horner_lt {α} [comm_semiring α] (a₁ x n₁ b₁ a₂ n₂ b₂ k a' b')
(h₁ : n₁ + k = n₂) (h₂ : (a₁ + horner a₂ x k 0 : α) = a') (h₃ : b₁ + b₂ = b') :
@horner α _ a₁ x n₁ b₁ + horner a₂ x n₂ b₂ = horner a' x n₁ b' :=
by simp [h₂.symm, h₃.symm, h₁.symm, horner, pow_add, mul_add, mul_comm, mul_left_comm]; cc
theorem horner_add_horner_gt {α} [comm_semiring α] (a₁ x n₁ b₁ a₂ n₂ b₂ k a' b')
(h₁ : n₂ + k = n₁) (h₂ : (horner a₁ x k 0 + a₂ : α) = a') (h₃ : b₁ + b₂ = b') :
@horner α _ a₁ x n₁ b₁ + horner a₂ x n₂ b₂ = horner a' x n₂ b' :=
by simp [h₂.symm, h₃.symm, h₁.symm, horner, pow_add, mul_add, mul_comm, mul_left_comm]; cc
theorem horner_add_horner_eq {α} [comm_semiring α] (a₁ x n b₁ a₂ b₂ a' b' t)
(h₁ : a₁ + a₂ = a') (h₂ : b₁ + b₂ = b') (h₃ : horner a' x n b' = t) :
@horner α _ a₁ x n b₁ + horner a₂ x n b₂ = t :=
by simp [h₃.symm, h₂.symm, h₁.symm, horner, add_mul, mul_comm (x ^ n)]; cc
/-- Evaluate `a + b` where `a` and `b` are already in normal form. -/
meta def eval_add : horner_expr → horner_expr → ring_m (horner_expr × expr)
| (const e₁ c₁) (const e₂ c₂) := ic_lift $ λ ic, do
let n := c₁ + c₂,
(ic, e) ← ic.of_rat n,
(ic, p) ← norm_num.prove_add_rat ic e₁ e₂ e c₁ c₂ n,
return (ic, const e n, p)
| he₁@(const e₁ c₁) he₂@(xadd e₂ a x n b) := do
c ← get_cache,
if c₁ = 0 then ic_lift $ λ ic, do
(ic, p) ← ic.mk_app ``zero_add [e₂],
return (ic, he₂, p)
else do
(b', h) ← eval_add he₁ b,
return (xadd' c a x n b',
c.cs_app ``const_add_horner [e₁, a, x.1, n.1, b, b', h])
| he₁@(xadd e₁ a x n b) he₂@(const e₂ c₂) := do
c ← get_cache,
if c₂ = 0 then ic_lift $ λ ic, do
(ic, p) ← ic.mk_app ``add_zero [e₁],
return (ic, he₁, p)
else do
(b', h) ← eval_add b he₂,
return (xadd' c a x n b',
c.cs_app ``horner_add_const [a, x.1, n.1, b, e₂, b', h])
| he₁@(xadd e₁ a₁ x₁ n₁ b₁) he₂@(xadd e₂ a₂ x₂ n₂ b₂) := do
c ← get_cache,
if x₁.2 < x₂.2 then do
(b', h) ← eval_add b₁ he₂,
return (xadd' c a₁ x₁ n₁ b',
c.cs_app ``horner_add_const [a₁, x₁.1, n₁.1, b₁, e₂, b', h])
else if x₁.2 ≠ x₂.2 then do
(b', h) ← eval_add he₁ b₂,
return (xadd' c a₂ x₂ n₂ b',
c.cs_app ``const_add_horner [e₁, a₂, x₂.1, n₂.1, b₂, b', h])
else if n₁.2 < n₂.2 then do
let k := n₂.2 - n₁.2,
(ek, h₁) ← nc_lift (λ nc, do
(nc, ek) ← nc.of_nat k,
(nc, h₁) ← norm_num.prove_add_nat nc n₁.1 ek n₂.1,
return (nc, ek, h₁)),
α0 ← ic_lift $ λ ic, ic.mk_app ``has_zero.zero [],
(a', h₂) ← eval_add a₁ (xadd' c a₂ x₁ (ek, k) (const α0 0)),
(b', h₃) ← eval_add b₁ b₂,
return (xadd' c a' x₁ n₁ b',
c.cs_app ``horner_add_horner_lt [a₁, x₁.1, n₁.1, b₁, a₂, n₂.1, b₂, ek, a', b', h₁, h₂, h₃])
else if n₁.2 ≠ n₂.2 then do
let k := n₁.2 - n₂.2,
(ek, h₁) ← nc_lift (λ nc, do
(nc, ek) ← nc.of_nat k,
(nc, h₁) ← norm_num.prove_add_nat nc n₂.1 ek n₁.1,
return (nc, ek, h₁)),
α0 ← ic_lift $ λ ic, ic.mk_app ``has_zero.zero [],
(a', h₂) ← eval_add (xadd' c a₁ x₁ (ek, k) (const α0 0)) a₂,
(b', h₃) ← eval_add b₁ b₂,
return (xadd' c a' x₁ n₂ b',
c.cs_app ``horner_add_horner_gt [a₁, x₁.1, n₁.1, b₁, a₂, n₂.1, b₂, ek, a', b', h₁, h₂, h₃])
else do
(a', h₁) ← eval_add a₁ a₂,
(b', h₂) ← eval_add b₁ b₂,
(t, h₃) ← eval_horner a' x₁ n₁ b',
return (t, c.cs_app ``horner_add_horner_eq
[a₁, x₁.1, n₁.1, b₁, a₂, b₂, a', b', t, h₁, h₂, h₃])
theorem horner_neg {α} [comm_ring α] (a x n b a' b')
(h₁ : -a = a') (h₂ : -b = b') :
-@horner α _ a x n b = horner a' x n b' :=
by simp [h₂.symm, h₁.symm, horner]; cc
/-- Evaluate `-a` where `a` is already in normal form. -/
meta def eval_neg : horner_expr → ring_m (horner_expr × expr)
| (const e coeff) := do
(e', p) ← ic_lift $ λ ic, norm_num.prove_neg ic e,
return (const e' (-coeff), p)
| (xadd e a x n b) := do
c ← get_cache,
(a', h₁) ← eval_neg a,
(b', h₂) ← eval_neg b,
p ← ic_lift $ λ ic, ic.mk_app ``horner_neg [a, x.1, n.1, b, a', b', h₁, h₂],
return (xadd' c a' x n b', p)
theorem horner_const_mul {α} [comm_semiring α] (c a x n b a' b')
(h₁ : c * a = a') (h₂ : c * b = b') :
c * @horner α _ a x n b = horner a' x n b' :=
by simp [h₂.symm, h₁.symm, horner, mul_add, mul_assoc]
theorem horner_mul_const {α} [comm_semiring α] (a x n b c a' b')
(h₁ : a * c = a') (h₂ : b * c = b') :
@horner α _ a x n b * c = horner a' x n b' :=
by simp [h₂.symm, h₁.symm, horner, add_mul, mul_right_comm]
/-- Evaluate `k * a` where `k` is a rational numeral and `a` is in normal form. -/
meta def eval_const_mul (k : expr × ℚ) :
horner_expr → ring_m (horner_expr × expr)
| (const e coeff) := do
(e', p) ← ic_lift $ λ ic, norm_num.prove_mul_rat ic k.1 e k.2 coeff,
return (const e' (k.2 * coeff), p)
| (xadd e a x n b) := do
c ← get_cache,
(a', h₁) ← eval_const_mul a,
(b', h₂) ← eval_const_mul b,
return (xadd' c a' x n b',
c.cs_app ``horner_const_mul [k.1, a, x.1, n.1, b, a', b', h₁, h₂])
theorem horner_mul_horner_zero {α} [comm_semiring α] (a₁ x n₁ b₁ a₂ n₂ aa t)
(h₁ : @horner α _ a₁ x n₁ b₁ * a₂ = aa)
(h₂ : horner aa x n₂ 0 = t) :
horner a₁ x n₁ b₁ * horner a₂ x n₂ 0 = t :=
by rw [← h₂, ← h₁];
simp [horner, mul_add, mul_comm, mul_left_comm, mul_assoc]
theorem horner_mul_horner {α} [comm_semiring α]
(a₁ x n₁ b₁ a₂ n₂ b₂ aa haa ab bb t)
(h₁ : @horner α _ a₁ x n₁ b₁ * a₂ = aa)
(h₂ : horner aa x n₂ 0 = haa)
(h₃ : a₁ * b₂ = ab) (h₄ : b₁ * b₂ = bb)
(H : haa + horner ab x n₁ bb = t) :
horner a₁ x n₁ b₁ * horner a₂ x n₂ b₂ = t :=
by rw [← H, ← h₂, ← h₁, ← h₃, ← h₄];
simp [horner, mul_add, mul_comm, mul_left_comm, mul_assoc]
/-- Evaluate `a * b` where `a` and `b` are in normal form. -/
meta def eval_mul : horner_expr → horner_expr → ring_m (horner_expr × expr)
| (const e₁ c₁) (const e₂ c₂) := do
(e', p) ← ic_lift $ λ ic, norm_num.prove_mul_rat ic e₁ e₂ c₁ c₂,
return (const e' (c₁ * c₂), p)
| (const e₁ c₁) e₂ :=
if c₁ = 0 then do
c ← get_cache,
α0 ← ic_lift $ λ ic, ic.mk_app ``has_zero.zero [],
p ← ic_lift $ λ ic, ic.mk_app ``zero_mul [e₂],
return (const α0 0, p)
else if c₁ = 1 then do
p ← ic_lift $ λ ic, ic.mk_app ``one_mul [e₂],
return (e₂, p)
else eval_const_mul (e₁, c₁) e₂
| e₁ he₂@(const e₂ c₂) := do
p₁ ← ic_lift $ λ ic, ic.mk_app ``mul_comm [e₁, e₂],
(e', p₂) ← eval_mul he₂ e₁,
p ← lift $ mk_eq_trans p₁ p₂, return (e', p)
| he₁@(xadd e₁ a₁ x₁ n₁ b₁) he₂@(xadd e₂ a₂ x₂ n₂ b₂) := do
c ← get_cache,
if x₁.2 < x₂.2 then do
(a', h₁) ← eval_mul a₁ he₂,
(b', h₂) ← eval_mul b₁ he₂,
return (xadd' c a' x₁ n₁ b',
c.cs_app ``horner_mul_const [a₁, x₁.1, n₁.1, b₁, e₂, a', b', h₁, h₂])
else if x₁.2 ≠ x₂.2 then do
(a', h₁) ← eval_mul he₁ a₂,
(b', h₂) ← eval_mul he₁ b₂,
return (xadd' c a' x₂ n₂ b',
c.cs_app ``horner_const_mul [e₁, a₂, x₂.1, n₂.1, b₂, a', b', h₁, h₂])
else do
(aa, h₁) ← eval_mul he₁ a₂,
α0 ← ic_lift $ λ ic, ic.mk_app ``has_zero.zero [],
(haa, h₂) ← eval_horner aa x₁ n₂ (const α0 0),
if b₂.is_zero then
return (haa, c.cs_app ``horner_mul_horner_zero
[a₁, x₁.1, n₁.1, b₁, a₂, n₂.1, aa, haa, h₁, h₂])
else do
(ab, h₃) ← eval_mul a₁ b₂,
(bb, h₄) ← eval_mul b₁ b₂,
(t, H) ← eval_add haa (xadd' c ab x₁ n₁ bb),
return (t, c.cs_app ``horner_mul_horner
[a₁, x₁.1, n₁.1, b₁, a₂, n₂.1, b₂, aa, haa, ab, bb, t, h₁, h₂, h₃, h₄, H])
theorem horner_pow {α} [comm_semiring α] (a x n m n' a') (h₁ : n * m = n') (h₂ : a ^ m = a') :
@horner α _ a x n 0 ^ m = horner a' x n' 0 :=
by simp [h₁.symm, h₂.symm, horner, mul_pow, pow_mul]
theorem pow_succ {α} [comm_semiring α] (a n b c)
(h₁ : (a:α) ^ n = b) (h₂ : b * a = c) : a ^ (n + 1) = c :=
by rw [← h₂, ← h₁, pow_succ']
/-- Evaluate `a ^ n` where `a` is in normal form and `n` is a natural numeral. -/
meta def eval_pow : horner_expr → expr × ℕ → ring_m (horner_expr × expr)
| e (_, 0) := do
c ← get_cache,
α1 ← ic_lift $ λ ic, ic.mk_app ``has_one.one [],
p ← ic_lift $ λ ic, ic.mk_app ``pow_zero [e],
return (const α1 1, p)
| e (_, 1) := do
p ← ic_lift $ λ ic, ic.mk_app ``pow_one [e],
return (e, p)
| (const e coeff) (e₂, m) := ic_lift $ λ ic, do
(ic, e', p) ← norm_num.prove_pow e coeff ic e₂,
return (ic, const e' (coeff ^ m), p)
| he@(xadd e a x n b) m := do
c ← get_cache,
match b.e.to_nat with
| some 0 := do
(n', h₁) ← nc_lift $ λ nc, norm_num.prove_mul_rat nc n.1 m.1 n.2 m.2,
(a', h₂) ← eval_pow a m,
α0 ← ic_lift $ λ ic, ic.mk_app ``has_zero.zero [],
return (xadd' c a' x (n', n.2 * m.2) (const α0 0),
c.cs_app ``horner_pow [a, x.1, n.1, m.1, n', a', h₁, h₂])
| _ := do
e₂ ← nc_lift $ λ nc, nc.of_nat (m.2-1),
(tl, hl) ← eval_pow he (e₂, m.2-1),
(t, p₂) ← eval_mul tl he,
return (t, c.cs_app ``pow_succ [e, e₂, tl, t, hl, p₂])
end
theorem horner_atom {α} [comm_semiring α] (x : α) : x = horner 1 x 1 0 :=
by simp [horner]
/-- Evaluate `a` where `a` is an atom. -/
meta def eval_atom (e : expr) : ring_m (horner_expr × expr) :=
do c ← get_cache,
i ← add_atom e,
α0 ← ic_lift $ λ ic, ic.mk_app ``has_zero.zero [],
α1 ← ic_lift $ λ ic, ic.mk_app ``has_one.one [],
return (xadd' c (const α1 1) (e, i) (`(1), 1) (const α0 0),
c.cs_app ``horner_atom [e])
lemma subst_into_pow {α} [monoid α] (l r tl tr t)
(prl : (l : α) = tl) (prr : (r : ℕ) = tr) (prt : tl ^ tr = t) : l ^ r = t :=
by rw [prl, prr, prt]
lemma unfold_sub {α} [add_group α] (a b c : α)
(h : a + -b = c) : a - b = c :=
by rw [sub_eq_add_neg, h]
lemma unfold_div {α} [division_ring α] (a b c : α)
(h : a * b⁻¹ = c) : a / b = c :=
by rw [div_eq_mul_inv, h]
/-- Evaluate a ring expression `e` recursively to normal form, together with a proof of
equality. -/
meta def eval : expr → ring_m (horner_expr × expr)
| `(%%e₁ + %%e₂) := do
(e₁', p₁) ← eval e₁,
(e₂', p₂) ← eval e₂,
(e', p') ← eval_add e₁' e₂',
p ← ic_lift $ λ ic, ic.mk_app ``norm_num.subst_into_add [e₁, e₂, e₁', e₂', e', p₁, p₂, p'],
return (e', p)
| e@`(@has_sub.sub %%α %%inst %%e₁ %%e₂) :=
mcond (succeeds (lift $ mk_app ``comm_ring [α] >>= mk_instance))
(do
e₂' ← ic_lift $ λ ic, ic.mk_app ``has_neg.neg [e₂],
e ← ic_lift $ λ ic, ic.mk_app ``has_add.add [e₁, e₂'],
(e', p) ← eval e,
p' ← ic_lift $ λ ic, ic.mk_app ``unfold_sub [e₁, e₂, e', p],
return (e', p'))
(eval_atom e)
| `(- %%e) := do
(e₁, p₁) ← eval e,
(e₂, p₂) ← eval_neg e₁,
p ← ic_lift $ λ ic, ic.mk_app ``norm_num.subst_into_neg [e, e₁, e₂, p₁, p₂],
return (e₂, p)
| `(%%e₁ * %%e₂) := do
(e₁', p₁) ← eval e₁,
(e₂', p₂) ← eval e₂,
(e', p') ← eval_mul e₁' e₂',
p ← ic_lift $ λ ic, ic.mk_app ``norm_num.subst_into_mul [e₁, e₂, e₁', e₂', e', p₁, p₂, p'],
return (e', p)
| e@`(has_inv.inv %%_) := (do
(e', p) ← lift $ norm_num.derive e <|> refl_conv e,
n ← lift $ e'.to_rat,
return (const e' n, p)) <|> eval_atom e
| e@`(@has_div.div _ %%inst %%e₁ %%e₂) := mcond
(succeeds (do
inst' ← ic_lift $ λ ic, ic.mk_app ``div_inv_monoid.to_has_div [],
lift $ is_def_eq inst inst'))
(do
e₂' ← ic_lift $ λ ic, ic.mk_app ``has_inv.inv [e₂],
e ← ic_lift $ λ ic, ic.mk_app ``has_mul.mul [e₁, e₂'],
(e', p) ← eval e,
p' ← ic_lift $ λ ic, ic.mk_app ``unfold_div [e₁, e₂, e', p],
return (e', p'))
(eval_atom e)
| e@`(@has_pow.pow _ _ %%P %%e₁ %%e₂) := do
(e₂', p₂) ← lift $ norm_num.derive e₂ <|> refl_conv e₂,
match e₂'.to_nat, P with
| some k, `(monoid.has_pow) := do
(e₁', p₁) ← eval e₁,
(e', p') ← eval_pow e₁' (e₂, k),
p ← ic_lift $ λ ic, ic.mk_app ``subst_into_pow [e₁, e₂, e₁', e₂', e', p₁, p₂, p'],
return (e', p)
| _, _ := eval_atom e
end
| e := match e.to_nat with
| some n := (const e (rat.of_int n)).refl_conv
| none := eval_atom e
end
/-- Evaluate a ring expression `e` recursively to normal form, together with a proof of
equality. -/
meta def eval' (red : transparency) (atoms : ref (buffer expr))
(e : expr) : tactic (expr × expr) :=
ring_m.run' red atoms e $ do (e', p) ← eval e, return (e', p)
theorem horner_def' {α} [comm_semiring α] (a x n b) : @horner α _ a x n b = x ^ n * a + b :=
by simp [horner, mul_comm]
theorem mul_assoc_rev {α} [semigroup α] (a b c : α) : a * (b * c) = a * b * c :=
by simp [mul_assoc]
theorem pow_add_rev {α} [monoid α] (a : α) (m n : ℕ) : a ^ m * a ^ n = a ^ (m + n) :=
by simp [pow_add]
theorem pow_add_rev_right {α} [monoid α] (a b : α) (m n : ℕ) :
b * a ^ m * a ^ n = b * a ^ (m + n) :=
by simp [pow_add, mul_assoc]
theorem add_neg_eq_sub {α} [add_group α] (a b : α) : a + -b = a - b := (sub_eq_add_neg a b).symm
/-- If `ring` fails to close the goal, it falls back on normalizing the expression to a "pretty"
form so that you can see why it failed. This setting adjusts the resulting form:
* `raw` is the form that `ring` actually uses internally, with iterated applications of `horner`.
Not very readable but useful if you don't want any postprocessing.
This results in terms like `horner (horner (horner 3 y 1 0) x 2 1) x 1 (horner 1 y 1 0)`.
* `horner` maintains the Horner form structure, but it unfolds the `horner` definition itself,
and tries to otherwise minimize parentheses.
This results in terms like `(3 * x ^ 2 * y + 1) * x + y`.
* `SOP` means sum of products form, expanding everything to monomials.
This results in terms like `3 * x ^ 3 * y + x + y`. -/
@[derive [has_reflect, decidable_eq]]
inductive normalize_mode | raw | SOP | horner
instance : inhabited normalize_mode := ⟨normalize_mode.horner⟩
/-- A `ring`-based normalization simplifier that rewrites ring expressions into the specified mode.
See `normalize`. This version takes a list of atoms to persist across multiple calls. -/
meta def normalize' (atoms : ref (buffer expr))
(red : transparency) (mode := normalize_mode.horner) (e : expr) : tactic (expr × expr) :=
do
pow_lemma ← simp_lemmas.mk.add_simp ``pow_one,
let lemmas := match mode with
| normalize_mode.SOP :=
[``horner_def', ``add_zero, ``mul_one, ``mul_add, ``mul_sub,
``mul_assoc_rev, ``pow_add_rev, ``pow_add_rev_right,
``mul_neg_eq_neg_mul_symm, ``add_neg_eq_sub]
| normalize_mode.horner :=
[``horner.equations._eqn_1, ``add_zero, ``one_mul, ``pow_one,
``neg_mul_eq_neg_mul_symm, ``add_neg_eq_sub]
| _ := []
end,
lemmas ← lemmas.mfoldl simp_lemmas.add_simp simp_lemmas.mk,
trans_conv
(λ e, do
guard (mode ≠ normalize_mode.raw),
(e', pr, _) ← simplify simp_lemmas.mk [] e,
pure (e', pr))
(λ e, do
a ← read_ref atoms,
(a, e', pr) ← ext_simplify_core a {}
simp_lemmas.mk (λ _, failed) (λ a _ _ _ e, do
write_ref atoms a,
(new_e, pr) ← match mode with
| normalize_mode.raw := eval' red atoms
| normalize_mode.horner := trans_conv (eval' red atoms)
(λ e, do (e', prf, _) ← simplify lemmas [] e, pure (e', prf))
| normalize_mode.SOP :=
trans_conv (eval' red atoms) $
trans_conv (λ e, do (e', prf, _) ← simplify lemmas [] e, pure (e', prf)) $
simp_bottom_up' (λ e, norm_num.derive e <|> pow_lemma.rewrite e)
end e,
guard (¬ new_e =ₐ e),
a ← read_ref atoms,
pure (a, new_e, some pr, ff))
(λ _ _ _ _ _, failed) `eq e,
write_ref atoms a,
pure (e', pr))
e
/-- A `ring`-based normalization simplifier that rewrites ring expressions into the specified mode.
* `raw` is the form that `ring` actually uses internally, with iterated applications of `horner`.
Not very readable but useful if you don't want any postprocessing.
This results in terms like `horner (horner (horner 3 y 1 0) x 2 1) x 1 (horner 1 y 1 0)`.
* `horner` maintains the Horner form structure, but it unfolds the `horner` definition itself,
and tries to otherwise minimize parentheses.
This results in terms like `(3 * x ^ 2 * y + 1) * x + y`.
* `SOP` means sum of products form, expanding everything to monomials.
This results in terms like `3 * x ^ 3 * y + x + y`. -/
meta def normalize (red : transparency) (mode := normalize_mode.horner) (e : expr) :
tactic (expr × expr) :=
using_new_ref mk_buffer $ λ atoms, normalize' atoms red mode e
end ring
namespace interactive
open tactic.ring
setup_tactic_parser
/-- Tactic for solving equations in the language of *commutative* (semi)rings.
This version of `ring` fails if the target is not an equality
that is provable by the axioms of commutative (semi)rings. -/
meta def ring1 (red : parse (tk "!")?) : tactic unit :=
let transp := if red.is_some then semireducible else reducible in
do `(%%e₁ = %%e₂) ← target >>= instantiate_mvars,
((e₁', p₁), (e₂', p₂)) ← ring_m.run transp e₁ $
prod.mk <$> eval e₁ <*> eval e₂,
is_def_eq e₁' e₂',
p ← mk_eq_symm p₂ >>= mk_eq_trans p₁,
tactic.exact p
/-- Parser for `ring_nf`'s `mode` argument, which can only be the "keywords" `raw`, `horner` or
`SOP`. (Because these are not actually keywords we use a name parser and postprocess the result.)
-/
meta def ring.mode : lean.parser ring.normalize_mode :=
with_desc "(SOP|raw|horner)?" $
do mode ← ident?, match mode with
| none := pure ring.normalize_mode.horner
| some `horner := pure ring.normalize_mode.horner
| some `SOP := pure ring.normalize_mode.SOP
| some `raw := pure ring.normalize_mode.raw
| _ := failed
end
/-- Simplification tactic for expressions in the language of commutative (semi)rings,
which rewrites all ring expressions into a normal form. When writing a normal form,
`ring_nf SOP` will use sum-of-products form instead of horner form.
`ring_nf!` will use a more aggressive reducibility setting to identify atoms.
-/
meta def ring_nf (red : parse (tk "!")?) (SOP : parse ring.mode) (loc : parse location) :
tactic unit :=
do ns ← loc.get_locals,
let transp := if red.is_some then semireducible else reducible,
tt ← using_new_ref mk_buffer $ λ atoms,
tactic.replace_at (normalize' atoms transp SOP) ns loc.include_goal
| fail "ring_nf failed to simplify",
when loc.include_goal $ try tactic.reflexivity
/-- Tactic for solving equations in the language of *commutative* (semi)rings.
`ring!` will use a more aggressive reducibility setting to identify atoms.
If the goal is not solvable, it falls back to rewriting all ring expressions
into a normal form, with a suggestion to use `ring_nf` instead, if this is the intent.
See also `ring1`, which is the same as `ring` but without the fallback behavior.
Based on [Proving Equalities in a Commutative Ring Done Right
in Coq](http://www.cs.ru.nl/~freek/courses/tt-2014/read/10.1.1.61.3041.pdf) by Benjamin Grégoire
and Assia Mahboubi.
-/
meta def ring (red : parse (tk "!")?) : tactic unit :=
ring1 red <|>
(ring_nf red normalize_mode.horner (loc.ns [none]) >> trace "Try this: ring_nf")
add_hint_tactic "ring"
add_tactic_doc
{ name := "ring",
category := doc_category.tactic,
decl_names := [``ring, ``ring_nf, ``ring1],
inherit_description_from := ``ring,
tags := ["arithmetic", "simplification", "decision procedure"] }
end interactive
end tactic
namespace conv.interactive
open conv interactive
open tactic tactic.interactive (ring.mode ring1)
open tactic.ring (normalize normalize_mode.horner)
local postfix `?`:9001 := optional
/--
Normalises expressions in commutative (semi-)rings inside of a `conv` block using the tactic `ring`.
-/
meta def ring_nf (red : parse (lean.parser.tk "!")?) (SOP : parse ring.mode) : conv unit :=
let transp := if red.is_some then semireducible else reducible in
replace_lhs (normalize transp SOP)
<|> fail "ring_nf failed to simplify"
/--
Normalises expressions in commutative (semi-)rings inside of a `conv` block using the tactic `ring`.
-/
meta def ring (red : parse (lean.parser.tk "!")?) : conv unit :=
let transp := if red.is_some then semireducible else reducible in
discharge_eq_lhs (ring1 red)
<|> (replace_lhs (normalize transp normalize_mode.horner) >> trace "Try this: ring_nf")
<|> fail "ring failed to simplify"
end conv.interactive
|
4366b872c7a3558601feb5e46a7165cb0f80b41f
|
a721fe7446524f18ba361625fc01033d9c8b7a78
|
/src/principia/mylist/mylist.lean
|
66bfe029011347f567c08348339a9a735d34f0f9
|
[] |
no_license
|
Sterrs/leaning
|
8fd80d1f0a6117a220bb2e57ece639b9a63deadc
|
3901cc953694b33adda86cb88ca30ba99594db31
|
refs/heads/master
| 1,627,023,822,744
| 1,616,515,221,000
| 1,616,515,221,000
| 245,512,190
| 2
| 0
| null | 1,616,429,050,000
| 1,583,527,118,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 17,154
|
lean
|
-- vim: ts=2 sw=0 sts=-1 et ai tw=70
import ..mynat.basic
import ..mynat.lt
import ..mynat.induction
namespace hidden
universe u
-- list of elements of type T
inductive mylist (T: Sort u)
-- allow empty to infer its type. It doesn't seem to work very often
| empty {}: mylist
| cons (head: T) (tail: mylist): mylist
open mylist
open mynat
namespace mylist
variable {T: Sort u}
-- Haskell-like convention, use x::xs to pattern match head/tail
variables {x y z: T}
variables {xs ys zs lst lst1 lst2 lst3: mylist T}
variables {m n: mynat}
-- I'm really just sort of making this up as I go along
-- It would be nice to have notation like [1, 2, 3]
notation h :: t := cons h t
def singleton (x: T) := x :: (empty: mylist T)
theorem cons_injective_1: x :: xs = y :: ys → x = y :=
λ h, (cons.inj h).left
theorem cons_injective_2: x :: xs = y :: ys → xs = ys :=
λ h, (cons.inj h).right
-- we don't define append explicitly, since lists are define by
-- recursion on the tail. Also note that concat is defined by
-- recursion on the first argument, so you should generally induct on
-- the first argument.
def concat: mylist T → mylist T → mylist T
| empty lst := lst
| (x :: xs) lst := x :: (concat xs lst)
notation lst1 ++ lst2 := concat lst1 lst2
@[simp] theorem empty_concat: (empty: mylist T) ++ lst = lst := rfl
@[simp]
theorem cons_concat: (x :: xs) ++ lst = x :: (xs ++ lst) := rfl
@[simp]
theorem concat_empty: lst ++ (empty: mylist T) = lst :=
begin
induction lst with lst_head lst_tail lst_ih, {
simp,
}, {
simp [lst_ih],
},
end
@[simp]
theorem singleton_concat_cons: singleton x ++ lst = x :: lst := rfl
@[simp]
theorem cons_not_empty: x :: xs ≠ empty :=
begin
assume h,
cases h,
end
@[simp]
theorem concat_assoc: (lst1 ++ lst2) ++ lst3 = lst1 ++ (lst2 ++ lst3) :=
begin
induction lst1 with lst1_head lst1_tail lst1_ih, {
simp,
}, {
simp [lst1_ih],
},
end
instance concat_is_assoc (T: Type u): is_associative (mylist T) concat :=
⟨λ a b c, concat_assoc⟩
def len: mylist T → mynat
| empty := 0
| (_ :: xs) := succ (len xs)
@[simp]
theorem empty_len: len (empty: mylist T) = 0 := rfl
@[simp]
theorem len_cons_succ: len (x :: xs) = succ (len xs) := rfl
@[simp]
theorem len_singleton: len (singleton x) = 1 := len_cons_succ
theorem len_of_refl: lst1 = lst2 → len lst1 = len lst2 :=
begin
assume h,
rw h,
end
@[simp]
theorem len_concat_add: len (lst1 ++ lst2) = len lst1 + len lst2 :=
begin
induction lst1 with lst1_head lst1_tail lst1_ih, {
simp,
}, {
simp [lst1_ih],
},
end
def rev: mylist T → mylist T
| empty := empty
| (x :: xs) := (rev xs) ++ singleton x
@[simp] theorem rev_empty: rev (empty: mylist T) = empty := rfl
@[simp] theorem rev_cons: rev (x :: xs) = (rev xs) ++ singleton x := rfl
@[simp] theorem rev_singleton: rev (singleton x) = singleton x := rfl
@[simp]
theorem rev_append: rev (lst ++ singleton x) = x :: rev lst :=
begin
induction lst with lst_head lst_tail lst_ih, {
refl,
}, {
rw [cons_concat, rev_cons, lst_ih],
refl,
},
end
theorem rev_concat: rev (lst1 ++ lst2) = rev lst2 ++ rev lst1 :=
begin
induction lst1 with lst1_head lst1_tail lst1_ih, {
rw rev_empty,
rw concat_empty,
refl,
}, {
rw [cons_concat, rev_cons, lst1_ih, rev_cons, concat_assoc],
},
end
@[simp]
theorem rev_rev: rev (rev lst) = lst :=
begin
induction lst with lst_head lst_tail lst_ih, {
simp,
}, {
simp [lst_ih, rev_concat],
},
end
@[simp]
theorem rev_len: len (rev lst) = len lst :=
begin
induction lst with lst_head lst_tail lst_ih, {
simp,
}, {
simp [lst_ih],
},
end
theorem empty_iff_len_zero: lst = empty ↔ len lst = 0 :=
begin
split, {
assume h,
rw h,
refl,
}, {
assume h,
cases lst, {
refl,
}, {
exfalso,
from succ_ne_zero h,
},
},
end
theorem nonempty_iff_len_nonzero: lst ≠ empty ↔ len lst ≠ 0 :=
iff_to_contrapositive empty_iff_len_zero
theorem rev_not_empty: lst ≠ empty → rev lst ≠ empty :=
begin
repeat {rw nonempty_iff_len_nonzero},
rw rev_len,
assume h, from h,
end
-- These are some "maybe" operations, which are undefined on empty lists
-- (or sometimes lists of a certain length), so they take as argument
-- a proof that the input list is of the correct form,
-- which is a dependent type thing. Maybe they're supposed to be Πs
-- first element
def head: Π lst: mylist T, lst ≠ empty → T
| empty h := absurd rfl h
| (x :: _) _ := x
@[simp] theorem first_cons (h: x :: xs ≠ empty): head (x :: xs) h = x := rfl
-- everything except first element
def tail: Π lst: mylist T, lst ≠ empty → mylist T
| empty h := absurd rfl h
| (_ :: xs) _ := xs
@[simp] theorem tail_cons (h: x :: xs ≠ empty): tail (x :: xs) h = xs := rfl
-- everything except last element
def init: Π lst: mylist T, lst ≠ empty → mylist T
| empty h := absurd rfl h
| (x :: empty) _ := empty
| (x :: y :: xs) _ := x :: init (y :: xs) cons_not_empty
@[simp]
theorem init_singleton (h: singleton x ≠ empty):
init (singleton x) h = empty := rfl
@[simp]
theorem init_ccons (h: x :: y :: xs ≠ empty):
init (x :: y :: xs) h = x :: init (y :: xs) cons_not_empty := rfl
-- last element
def last: Π lst: mylist T, lst ≠ empty → T
| empty h := absurd rfl h
| (x :: empty) _ := x
| (x :: y :: xs) h := last (y :: xs) cons_not_empty
@[simp]
theorem last_singleton (h: singleton x ≠ empty):
last (singleton x) h = x := rfl
@[simp]
theorem last_ccons (h: x :: y :: xs ≠ empty):
last (x :: y :: xs) h = last (y :: xs) cons_not_empty := rfl
private theorem len_cons_succ_cancel1 (h: succ n ≤ len (x :: xs)): n ≤ len xs :=
begin
simp at h,
from le_succ_cancel h,
end
private theorem absurd_succ_le_zero: ¬succ n ≤ 0 :=
begin
assume h,
simp [le_iff_lt_succ] at h,
exfalso, from lt_nzero (lt_succ_cancel h),
end
-- the first n elements
def take: Π n: mynat, Π lst: mylist T, n ≤ len lst → mylist T
| 0 _ _ := empty
| (succ n) empty h := absurd h absurd_succ_le_zero
| (succ n) (x :: xs) h := x :: take n xs (len_cons_succ_cancel1 h)
@[simp] theorem take_zero (h: 0 ≤ len lst): take 0 lst h = empty := rfl
@[simp]
theorem take_succ_cons
(h: succ n ≤ len (x :: xs)):
take (succ n) (x :: xs) h
= x :: take n xs (len_cons_succ_cancel1 h) := rfl
-- everything except the first n elements
def drop: Π n: mynat, Π lst: mylist T, n ≤ len lst → mylist T
| 0 lst _ := lst
| (succ n) empty h := absurd h absurd_succ_le_zero
| (succ n) (_ :: xs) h := drop n xs (len_cons_succ_cancel1 h)
@[simp] theorem drop_zero (h: 0 ≤ len lst): drop 0 lst h = lst := rfl
@[simp]
theorem drop_succ_cons
(h: succ n ≤ len (x :: xs)):
drop (succ n) (x :: xs) h = drop n xs (len_cons_succ_cancel1 h) := rfl
private theorem len_cons_succ_cancel2 (h: succ n < len (x :: xs)):
n < len xs :=
begin
simp at h,
from lt_succ_cancel h,
end
-- the nth element
def get: Π n: mynat, Π lst: mylist T, n < len lst → T
| n empty h := absurd h lt_nzero
| 0 (x :: _) _ := x
| (succ n) (x :: xs) h := get n xs (len_cons_succ_cancel2 h)
@[simp]
theorem get_zero_cons
(h: 0 < len (x :: xs)):
get 0 (x :: xs) h = x := rfl
@[simp]
theorem get_succ_cons
(h: succ n < len (x :: xs)):
get (succ n) (x :: xs) h = get n xs (len_cons_succ_cancel2 h) := rfl
-- TODO: state this without tactic mode
@[simp] theorem get_zero_head (h: 0 < len lst):
get 0 lst h = head lst
begin
assume h',
simp [h', lt_nrefl] at h,
assumption,
end :=
begin
cases lst, {
refl,
}, {
simp,
},
end
theorem cons_head_tail (h: lst ≠ empty): head lst h :: tail lst h = lst :=
begin
cases lst, {
contradiction,
}, {
simp,
},
end
theorem len_tail (h: lst ≠ empty): len lst = succ (len (tail lst h)) :=
begin
cases lst, {
contradiction,
}, {
simp,
},
end
-- I didn't really think this one through
theorem len_init (h: lst ≠ empty): len lst = succ (len (init lst h)) :=
begin
induction lst, {
contradiction,
}, {
cases lst_tail, {
refl,
}, {
have := lst_ih cons_not_empty,
dsimp [len] at this,
dsimp [len],
rw this,
},
},
end
theorem append_init_last (h: lst ≠ empty):
init lst h ++ (singleton (last lst h)) = lst :=
begin
induction lst, {
from absurd rfl h,
}, {
cases lst_tail, {
refl,
}, {
rw init_ccons h,
rw last_ccons h,
simp,
apply lst_ih,
},
},
end
private theorem succ_le_impl_le (h: succ n ≤ len lst): n ≤ len lst :=
(le_cancel_strong 1 h)
theorem len_take_succ
(hsnl: succ n ≤ len lst):
len (take (succ n) lst hsnl)
= succ (len (take n lst (succ_le_impl_le hsnl))) :=
begin
induction n with n_n n_ih generalizing lst, {
simp,
cases lst, {
exfalso,
simp at hsnl, -- clearly absurd. Is there a quicker way?
cases hsnl with d hd,
from mynat.no_confusion (add_integral hd.symm),
}, {
-- why on Earth is this SO DIFFICULT
-- I don't understand why I can't go straight to the rw
have h: (1: mynat) = succ 0 := rfl,
conv {
to_lhs,
congr,
congr,
rw h,
},
rw take_succ_cons,
simp,
},
}, {
cases lst, {
exfalso,
cases hsnl with d hd,
from mynat.no_confusion (add_integral hd.symm),
}, {
simp,
rw len_cons_succ at hsnl,
from n_ih (le_succ_cancel hsnl),
},
},
end
@[simp]
theorem len_take (hnl: n ≤ len lst):
len (take n lst hnl) = n :=
begin
induction n, {
refl,
}, {
-- really all the hard work happens in len_take_succ
simp [len_take_succ],
apply n_ih,
},
end
theorem take_concat_drop (hnl: n ≤ len lst):
take n lst hnl ++ drop n lst hnl = lst :=
begin
induction n generalizing lst, {
refl,
}, {
cases lst, {
simp at hnl,
cases hnl with d hd,
rw succ_add at hd,
cases hd,
}, {
simp,
apply n_ih,
},
},
end
theorem len_drop (hnl: n ≤ len lst):
len (drop n lst hnl) + n = len lst :=
begin
conv {
to_lhs, congr, skip,
rw ←len_take hnl,
},
rw [add_comm, ←len_concat_add, take_concat_drop],
end
theorem get_head_drop
(hnl: n < len lst):
get n lst hnl = head (drop n lst (lt_impl_le hnl)) (
begin
rw nonempty_iff_len_nonzero,
have := len_drop (lt_impl_le hnl),
rw ←this at hnl,
conv at hnl {
to_lhs,
rw ←add_zero n,
rw add_comm,
},
have this2 := lt_cancel n hnl,
assume h,
rw h at this2,
from lt_nrefl this2,
end
) :=
begin
induction n generalizing lst, {
simp,
}, {
cases lst, {
exfalso, from lt_nzero hnl,
}, {
simp,
apply n_ih,
},
},
end
theorem concat_cancel_left: lst1 ++ lst2 = lst1 ++ lst3 → lst2 = lst3 :=
begin
assume hl1l2l1l3,
induction lst1, {
simp at hl1l2l1l3,
assumption,
}, {
simp at hl1l2l1l3,
apply lst1_ih,
assumption,
},
end
theorem rev_injective: rev lst1 = rev lst2 → lst1 = lst2 :=
begin
-- this is a bit silly. Is there a better way to apply
-- something to both sides in Lean?
assume hrl1rl2,
suffices hrr: rev (rev lst1) = rev (rev lst2), {
repeat {rw rev_rev at hrr},
assumption,
}, {
rw hrl1rl2,
},
end
theorem concat_cancel_right: lst2 ++ lst1 = lst3 ++ lst1 → lst2 = lst3 :=
begin
assume hl1l2l1l3,
apply rev_injective,
apply @concat_cancel_left _ (rev lst1),
rw [←rev_concat, hl1l2l1l3, rev_concat],
end
@[simp]
theorem take_all (h: len lst ≤ len lst): take (len lst) lst h = lst :=
begin
induction lst with lst_head lst_tail lst_ih, {
refl,
}, {
simp,
from lst_ih _,
},
end
@[simp]
theorem drop_all (h: len lst ≤ len lst): drop (len lst) lst h = empty :=
begin
induction lst with lst_head lst_tail lst_ih, {
refl,
}, {
simp,
from lst_ih _,
},
end
theorem take_idem
(hml: m ≤ len lst):
take m (take m lst hml) (
begin
rw len_take,
end
) = take m lst hml :=
begin
induction m with m hm generalizing lst, {
refl,
}, {
cases lst, {
exfalso,
from lt_nzero (lt_iff_succ_le.mpr hml),
}, {
simp,
apply hm,
},
},
end
theorem take_take
(hml: m ≤ len lst)
(hnl: n ≤ len (take m lst hml)):
take n (take m lst hml) hnl = take n lst (
begin
rw len_take at hnl,
from le_trans hnl hml,
end
) :=
begin
induction n with n hn generalizing m lst, {
refl,
}, {
cases lst, {
exfalso,
rw len_take at hnl,
from lt_nzero (lt_iff_succ_le.mpr (le_trans hnl hml)),
}, {
cases m, {
exfalso,
simp at hnl,
from lt_nzero (lt_iff_succ_le.mpr hnl),
}, {
simp,
apply hn,
},
},
},
end
theorem drop_drop
(hml: m ≤ len lst)
(hnl: n ≤ len (drop m lst hml)):
drop n (drop m lst hml) hnl = drop (m + n) lst (
begin
rw ←len_drop hml,
rw add_comm m n,
from le_comb hnl le_refl,
end
) :=
begin
induction m with m hm generalizing n lst, {
simp,
}, {
cases lst, {
exfalso,
from succ_ne_zero (le_zero hml),
}, {
simp,
apply hm,
},
},
end
theorem drop_one_tail
(h1l: 1 ≤ len lst):
drop 1 lst h1l = tail lst (
begin
rw nonempty_iff_len_nonzero,
assume h,
rw h at h1l,
from succ_nle_zero h1l,
end
) :=
begin
cases lst, {
refl,
}, {
refl,
},
end
theorem take_init
(hnl: len xs ≤ len (xs ++ singleton x)):
take (len xs) (xs ++ singleton x) hnl = xs :=
begin
induction xs, {
refl,
}, {
simp,
apply xs_ih,
},
end
theorem take_ignore
(hnl: n ≤ len xs):
take n (xs ++ singleton x) (
begin
rw len_concat_add,
from le_add_rhs hnl,
end
) = take n xs hnl :=
begin
suffices h:
take n (take (len xs) (xs ++ singleton x) _) _ = take n xs hnl, {
rw @take_take _ (xs ++ singleton x) (len xs) n _ _ at h,
from h,
}, {
conv {
to_lhs,
congr, skip,
rw take_init,
},
}, {
simp,
from @le_to_add _ 1,
}, {
simp,
assumption,
},
end
theorem rev_drop_take
(hmn: m + n = len lst):
drop m lst (
begin
rw ←hmn,
from le_to_add,
end
) = rev (take n (rev lst) (
begin
rw rev_len,
rw ←hmn,
rw add_comm,
from le_to_add,
end
)) :=
begin
induction m with m hm generalizing lst, {
simp at hmn,
conv in n {rw hmn},
conv in (len lst) {rw ←rev_len},
rw take_all,
conv in zero {rw zz},
rw drop_zero,
rw rev_rev,
}, {
cases lst, {
exfalso,
rw succ_add at hmn,
from succ_ne_zero hmn,
}, {
rw drop_succ_cons,
have hmlt: m + n = len lst_tail, {
simp at hmn,
assumption,
},
have := hm hmlt, {
rw this,
conv in (rev (lst_head :: lst_tail)) {rw rev_cons},
rw take_ignore,
},
},
},
end
-- for some reason this has to be a Type ???
-- otherwise has_mem complains
variable {T': Type u}
variables {x' y' z': T'}
variables {xs' ys' zs': mylist T'}
-- the well-founded relation "is one strictly shorter than the other"
def shorter (lst1 lst2: mylist T) := len lst1 < len lst2
private theorem shorter_lt:
shorter lst1 lst2 ↔ lt (len lst1) (len lst2) := iff.rfl
-- it should be easier than this, right? how on earth do I just say
-- "because < is well-founded".
theorem shorter_well_founded: well_founded (@shorter T) :=
begin
split,
-- can't figure out how to use "generalizing"
suffices h:
∀ a: mylist T,
∀ n,
n = len a → acc shorter a, {
intro a,
from h a (len a) rfl,
}, {
intros a n, revert n a,
assume hnla,
apply strict_strong_induction
(λ n, ∀ a : mylist T, n = len a → acc shorter a), {
intro n,
assume h_ih,
intro a,
assume hnla,
split,
intro y,
assume hysa,
rw [shorter_lt, ←hnla] at hysa,
from h_ih (len y) hysa y rfl,
},
},
end
-- this made the red lines go away
namespace lwf
instance: has_well_founded (mylist T) :=
⟨shorter, shorter_well_founded⟩
end lwf
-- attempts at defining things that recurse on the init
-- private def lst_is_odd: mylist T → Prop
-- | empty := false
-- | (x :: xs) := have shorter (init (x :: xs) (cons_not_empty)) (x :: xs),
-- from begin
-- rw shorter_lt,
-- have: ∀ x y, lt x y ↔ x < y := (λ x y, iff.rfl),
-- rw this,
-- rw lt_iff_succ_le,
-- rw ←len_init,
-- from le_refl,
-- end,
-- ¬lst_is_odd (init (x :: xs) (cons_not_empty))
-- TODO: make this work
-- def palindrome: mylist T → Prop
-- | empty := true
-- | (x :: empty) := true
-- | (x :: y :: xs) := x = last (y :: xs) (cons_not_empty _ _)
-- ∧ palindrome (init (y :: xs) (cons_not_empty _ _))
end mylist
end hidden
|
4494c56fcf966f1a686a47941d6cb925e4aa7f11
|
aa3f8992ef7806974bc1ffd468baa0c79f4d6643
|
/tests/lean/run/class11.lean
|
faf7b9fa622be3809df077d707c3f1e2e0748bb0
|
[
"Apache-2.0"
] |
permissive
|
codyroux/lean
|
7f8dff750722c5382bdd0a9a9275dc4bb2c58dd3
|
0cca265db19f7296531e339192e9b9bae4a31f8b
|
refs/heads/master
| 1,610,909,964,159
| 1,407,084,399,000
| 1,416,857,075,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 184
|
lean
|
import logic
constant C {A : Type} : A → Prop
class C
constant f {A : Type} (a : A) [H : C a] : Prop
definition g {A : Type} (a b : A) {H1 : C a} {H2 : C b} : Prop :=
f a ∧ f b
|
f6367c86efedaba307d125e7a30893c13c8f07be
|
a7dd8b83f933e72c40845fd168dde330f050b1c9
|
/src/topology/algebra/uniform_ring.lean
|
f5e0b03eeea657a1bd2487f6040e54fb17f78d4d
|
[
"Apache-2.0"
] |
permissive
|
NeilStrickland/mathlib
|
10420e92ee5cb7aba1163c9a01dea2f04652ed67
|
3efbd6f6dff0fb9b0946849b43b39948560a1ffe
|
refs/heads/master
| 1,589,043,046,346
| 1,558,938,706,000
| 1,558,938,706,000
| 181,285,984
| 0
| 0
|
Apache-2.0
| 1,568,941,848,000
| 1,555,233,833,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 6,577
|
lean
|
/-
Copyright (c) 2018 Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Patrick Massot, Johannes Hölzl
Theory of topological rings with uniform structure.
-/
import topology.algebra.group_completion topology.algebra.ring
open classical set lattice filter topological_space add_comm_group
local attribute [instance] classical.prop_decidable
noncomputable theory
namespace uniform_space.completion
open dense_embedding uniform_space
variables (α : Type*) [ring α] [uniform_space α] [uniform_add_group α] [topological_ring α] [separated α]
instance is_Z_bilin_mul : is_Z_bilin (λp:α×α, p.1 * p.2) :=
⟨assume a a' b, add_mul a a' b, assume a b b', mul_add a b b'⟩
instance : has_one (completion α) := ⟨(1:α)⟩
instance : has_mul (completion α) :=
⟨λa b, extend (dense_embedding_coe.prod dense_embedding_coe)
((coe : α → completion α) ∘ (λp:α×α, p.1 * p.2)) (a, b)⟩
lemma coe_one : ((1 : α) : completion α) = 1 := rfl
lemma continuous_mul' : continuous (λp:completion α×completion α, p.1 * p.2) :=
suffices continuous $ extend (dense_embedding_coe.prod dense_embedding_coe) $
((coe : α → completion α) ∘ (λp:α×α, p.1 * p.2)),
{ convert this, ext ⟨a, b⟩, refl },
extend_Z_bilin dense_embedding_coe dense_embedding_coe ((continuous_coe α).comp continuous_mul')
section rules
variables {α}
lemma coe_mul (a b : α) : ((a * b : α) : completion α) = a * b :=
eq.symm (extend_e_eq (dense_embedding_coe.prod dense_embedding_coe) (a, b))
lemma continuous_mul {β : Type*} [topological_space β] {f g : β → completion α}
(hf : continuous f) (hg : continuous g) : continuous (λb, f b * g b) :=
(continuous_mul' α).comp (continuous.prod_mk hf hg)
end rules
instance : ring (completion α) :=
{ one_mul := assume a, completion.induction_on a
(is_closed_eq (continuous_mul continuous_const continuous_id) continuous_id)
(assume a, by rw [← coe_one, ← coe_mul, one_mul]),
mul_one := assume a, completion.induction_on a
(is_closed_eq (continuous_mul continuous_id continuous_const) continuous_id)
(assume a, by rw [← coe_one, ← coe_mul, mul_one]),
mul_assoc := assume a b c, completion.induction_on₃ a b c
(is_closed_eq
(continuous_mul (continuous_mul continuous_fst (continuous_fst.comp continuous_snd))
(continuous_snd.comp continuous_snd))
(continuous_mul continuous_fst
(continuous_mul (continuous_fst.comp continuous_snd) (continuous_snd.comp continuous_snd))))
(assume a b c, by rw [← coe_mul, ← coe_mul, ← coe_mul, ← coe_mul, mul_assoc]),
left_distrib := assume a b c, completion.induction_on₃ a b c
(is_closed_eq
(continuous_mul continuous_fst (continuous_add
(continuous_fst.comp continuous_snd)
(continuous_snd.comp continuous_snd)))
(continuous_add
(continuous_mul continuous_fst (continuous_fst.comp continuous_snd))
(continuous_mul continuous_fst (continuous_snd.comp continuous_snd))))
(assume a b c, by rw [← coe_add, ← coe_mul, ← coe_mul, ← coe_mul, ←coe_add, mul_add]),
right_distrib := assume a b c, completion.induction_on₃ a b c
(is_closed_eq
(continuous_mul (continuous_add continuous_fst
(continuous_fst.comp continuous_snd)) (continuous_snd.comp continuous_snd))
(continuous_add
(continuous_mul continuous_fst (continuous_snd.comp continuous_snd))
(continuous_mul (continuous_fst.comp continuous_snd) (continuous_snd.comp continuous_snd))))
(assume a b c, by rw [← coe_add, ← coe_mul, ← coe_mul, ← coe_mul, ←coe_add, add_mul]),
..completion.add_comm_group, ..completion.has_mul α, ..completion.has_one α }
instance is_ring_hom_coe : is_ring_hom (coe : α → completion α) :=
⟨coe_one α, assume a b, coe_mul a b, assume a b, coe_add a b⟩
universe u
instance is_ring_hom_extension
{β : Type u} [uniform_space β] [ring β] [uniform_add_group β] [topological_ring β]
[complete_space β] [separated β]
{f : α → β} [is_ring_hom f] (hf : continuous f) :
is_ring_hom (completion.extension f) :=
have hf : uniform_continuous f, from uniform_continuous_of_continuous hf,
{ map_one := by rw [← coe_one, extension_coe hf, is_ring_hom.map_one f],
map_add := assume a b, completion.induction_on₂ a b
(is_closed_eq
(continuous_extension.comp continuous_add')
(continuous_add (continuous_extension.comp continuous_fst) (continuous_extension.comp continuous_snd)))
(assume a b,
by rw [← coe_add, extension_coe hf, extension_coe hf, extension_coe hf, is_add_group_hom.map_add f]),
map_mul := assume a b, completion.induction_on₂ a b
(is_closed_eq
(continuous_extension.comp (continuous_mul' α))
(_root_.continuous_mul (continuous_extension.comp continuous_fst) (continuous_extension.comp continuous_snd)))
(assume a b,
by rw [← coe_mul, extension_coe hf, extension_coe hf, extension_coe hf, is_ring_hom.map_mul f]) }
end uniform_space.completion
namespace uniform_space
variables {α : Type*}
lemma ring_sep_rel (α) [comm_ring α] [uniform_space α] [uniform_add_group α] [topological_ring α] :
separation_setoid α = submodule.quotient_rel (ideal.closure ⊥) :=
setoid.ext $ assume x y, group_separation_rel x y
lemma ring_sep_quot (α) [r : comm_ring α] [uniform_space α] [uniform_add_group α] [topological_ring α] :
quotient (separation_setoid α) = (⊥ : ideal α).closure.quotient :=
by rw [@ring_sep_rel α r]; refl
def sep_quot_equiv_ring_quot (α)
[r : comm_ring α] [uniform_space α] [uniform_add_group α] [topological_ring α] :
quotient (separation_setoid α) ≃ (⊥ : ideal α).closure.quotient :=
quotient.congr $ assume x y, group_separation_rel x y
/- TODO: use a form of transport a.k.a. lift definition a.k.a. transfer -/
instance [comm_ring α] [uniform_space α] [uniform_add_group α] [topological_ring α] :
comm_ring (quotient (separation_setoid α)) :=
by rw ring_sep_quot α; apply_instance
instance [comm_ring α] [uniform_space α] [uniform_add_group α] [topological_ring α] :
topological_ring (quotient (separation_setoid α)) :=
begin
convert topological_ring_quotient (⊥ : ideal α).closure,
{ apply ring_sep_rel },
{ dsimp [topological_ring_quotient_topology, quotient.topological_space, to_topological_space],
congr,
apply ring_sep_rel,
apply ring_sep_rel },
{ apply ring_sep_rel },
{ simp [uniform_space.comm_ring] },
end
end uniform_space
|
305166bf58f95f2bd2ea9609b6e28e0ab23c0478
|
a9d0fb7b0e4f802bd3857b803e6c5c23d87fef91
|
/library/data/nat/examples/fib.lean
|
eaee98a1efacced66fc94162b96fd0522cb92cbe
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean-osx
|
4a954262c780e404c1369d6c06516161d07fcb40
|
3670278342d2f4faa49d95b46d86642d7875b47c
|
refs/heads/master
| 1,611,410,334,552
| 1,474,425,686,000
| 1,474,425,686,000
| 12,043,103
| 5
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,470
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
import data.nat
open nat
definition fib : nat → nat
| 0 := 1
| 1 := 1
| (n+2) := fib (n+1) + fib n
private definition fib_fast_aux : nat → (nat × nat)
| 0 := (0, 1)
| 1 := (1, 1)
| (n+2) :=
match (fib_fast_aux (n+1)) with
| (fn, fn1) := (fn1, fn1 + fn)
end
open prod -- Get .1 .2 notation for pairs
definition fib_fast (n : nat) := (fib_fast_aux n).2
-- We now prove that fib_fast and fib are equal
lemma fib_fast_aux_lemma : ∀ n, (fib_fast_aux (succ n)).1 = (fib_fast_aux n).2
| 0 := rfl
| 1 := rfl
| (succ (succ n)) :=
sorry
/-
begin
-- TODO(Leo): fix unfold
-- unfold fib_fast_aux at {1}, esimp,
-- rewrite [-prod.eta (fib_fast_aux _)],
apply sorry
end
-/
theorem fib_eq_fib_fast : ∀ n, fib_fast n = fib n
| 0 := rfl
| 1 := rfl
| (succ (succ n)) :=
sorry
/-
begin
have feq : fib_fast n = fib n, from fib_eq_fib_fast n,
have f1eq : fib_fast (succ n) = fib (succ n), from fib_eq_fib_fast (succ n),
-- TODO(Leo): fix unfold
apply sorry
/-
unfold [fib, fib_fast, fib_fast_aux],
rewrite [-prod.eta (fib_fast_aux _)],
fold fib_fast (succ n), rewrite f1eq,
rewrite fib_fast_aux_lemma,
fold fib_fast n, rewrite feq,
-/
end
-/
|
99c25bb06800b37ca25785be6165d958870b0e2e
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/scripts/lint_mathlib.lean
|
f4ae1f3d52ca71256400e5220d164e7fdfff000c
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 3,745
|
lean
|
/-
Copyright (c) 2020 Robert Y. Lewis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Robert Y. Lewis, Gabriel Ebner
-/
import tactic.lint
import system.io -- these are required
import all -- then import everything, to parse the library for failing linters
/-!
# lint_mathlib
Script that runs the linters listed in `mathlib_linters` on all of mathlib.
As a side effect, the file `nolints.txt` is generated in the current
directory. This script needs to be run in the root directory of mathlib.
It assumes that files generated by `mk_all.sh` are present.
This is used by the CI script for mathlib.
Usage: `lean --run scripts/lint_mathlib.lean`
The optional flag `--github` can be placed at the end of the line, this is intended for CI and
when this flag is enabled `lint_mathlib` will output error codes understood by GitHub actions.
These tag linter failures with the position in the source code causing the failure. The purpose of
this is to cause GitHub to annotate the files tab of PRs with the linter failure messages when a
linter fails.
-/
open native tactic
/--
Returns the contents of the `nolints.txt` file.
-/
meta def mk_nolint_file (env : environment) (mathlib_path_len : ℕ)
(results : list (name × linter × rb_map name string)) : format := do
let failed_decls_by_file := rb_lmap.of_list (do
(linter_name, _, decls) ← results,
(decl_name, _) ← decls.to_list,
let file_name := (env.decl_olean decl_name).get_or_else "",
pure (file_name.popn mathlib_path_len, decl_name.to_string, linter_name.last)),
format.intercalate format.line $
"import .all" ::
"run_cmd tactic.skip" :: do
(file_name, decls) ← failed_decls_by_file.to_list.reverse,
"" :: ("-- " ++ file_name) :: do
(decl, linters) ← (rb_lmap.of_list decls).to_list.reverse,
pure $ "apply_nolint " ++ decl ++ " " ++ " ".intercalate linters
/--
Parses the list of lines of the `nolints.txt` into an `rb_lmap` from linters to declarations.
-/
meta def parse_nolints (lines : list string) : rb_lmap name name :=
rb_lmap.of_list $ do
line ← lines,
guard $ line.front = 'a',
_ :: decl :: linters ← pure $ line.split (= ' ') | [],
let decl := name.from_string decl,
linter ← linters,
pure (linter, decl)
open io io.fs
/--
Reads the `nolints.txt`, and returns it as an `rb_lmap` from linters to declarations.
-/
meta def read_nolints_file (fn := "scripts/nolints.txt") : io (rb_lmap name name) := do
cont ← io.fs.read_file fn,
pure $ parse_nolints $ cont.to_string.split (= '\n')
meta instance coe_tactic_to_io {α} : has_coe (tactic α) (io α) :=
⟨run_tactic⟩
/--
Writes a file with the given contents.
-/
meta def io.write_file (fn : string) (contents : string) : io unit := do
h ← mk_file_handle fn mode.write,
put_str h contents,
close h
/-- Runs when called with `lean --run` -/
meta def main : io unit := do
env ← get_env,
args ← io.cmdline_args,
mathlib_path ← get_mathlib_dir,
decls ← lint_project_decls mathlib_path,
linters ← get_linters mathlib_linters,
let non_auto_decls := decls.filter (λ d, ¬ d.is_auto_or_internal env),
results₀ ← lint_core decls non_auto_decls linters,
nolint_file ← read_nolints_file,
let results := (do
(linter_name, linter, decls) ← results₀,
[(linter_name, linter, (nolint_file.find linter_name).foldl rb_map.erase decls)]),
let emit_workflow_commands : bool := "--github" ∈ args,
io.print $ to_string $ format_linter_results env results decls non_auto_decls
mathlib_path.length "in mathlib" tt lint_verbosity.medium linters.length emit_workflow_commands,
io.write_file "nolints.txt" $ to_string $ mk_nolint_file env mathlib_path.length results₀,
if results.all (λ r, r.2.2.empty) then pure () else io.fail ""
|
1d4c5feb90c0d404e9548dfb2b63650c16e187b4
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/category_theory/triangulated/basic.lean
|
7cfece83be0f9c38dc65fd9b62fe34d9f8dcb7ce
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,010
|
lean
|
/-
Copyright (c) 2021 Luke Kershaw. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Luke Kershaw
-/
import data.int.basic
import category_theory.shift
/-!
# Triangles
This file contains the definition of triangles in an additive category with an additive shift.
It also defines morphisms between these triangles.
TODO: generalise this to n-angles in n-angulated categories as in https://arxiv.org/abs/1006.4592
-/
noncomputable theory
open category_theory
open category_theory.limits
universes v v₀ v₁ v₂ u u₀ u₁ u₂
namespace category_theory.pretriangulated
open category_theory.category
/-
We work in a category `C` equipped with a shift.
-/
variables (C : Type u) [category.{v} C] [has_shift C ℤ]
/--
A triangle in `C` is a sextuple `(X,Y,Z,f,g,h)` where `X,Y,Z` are objects of `C`,
and `f : X ⟶ Y`, `g : Y ⟶ Z`, `h : Z ⟶ X⟦1⟧` are morphisms in `C`.
See <https://stacks.math.columbia.edu/tag/0144>.
-/
structure triangle := mk' ::
(obj₁ : C)
(obj₂ : C)
(obj₃ : C)
(mor₁ : obj₁ ⟶ obj₂)
(mor₂ : obj₂ ⟶ obj₃)
(mor₃ : obj₃ ⟶ obj₁⟦(1:ℤ)⟧)
variable {C}
/--
A triangle `(X,Y,Z,f,g,h)` in `C` is defined by the morphisms `f : X ⟶ Y`, `g : Y ⟶ Z`
and `h : Z ⟶ X⟦1⟧`.
-/
@[simps]
def triangle.mk {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) (h : Z ⟶ X⟦(1:ℤ)⟧) : triangle C :=
{ obj₁ := X,
obj₂ := Y,
obj₃ := Z,
mor₁ := f,
mor₂ := g,
mor₃ := h }
section
variables [has_zero_object C] [has_zero_morphisms C]
open_locale zero_object
instance : inhabited (triangle C) :=
⟨⟨0,0,0,0,0,0⟩⟩
/--
For each object in `C`, there is a triangle of the form `(X,X,0,𝟙 X,0,0)`
-/
@[simps]
def contractible_triangle (X : C) : triangle C := triangle.mk (𝟙 X) (0 : X ⟶ 0) 0
end
/--
A morphism of triangles `(X,Y,Z,f,g,h) ⟶ (X',Y',Z',f',g',h')` in `C` is a triple of morphisms
`a : X ⟶ X'`, `b : Y ⟶ Y'`, `c : Z ⟶ Z'` such that
`a ≫ f' = f ≫ b`, `b ≫ g' = g ≫ c`, and `a⟦1⟧' ≫ h = h' ≫ c`.
In other words, we have a commutative diagram:
```
f g h
X ───> Y ───> Z ───> X⟦1⟧
│ │ │ │
│a │b │c │a⟦1⟧'
V V V V
X' ───> Y' ───> Z' ───> X'⟦1⟧
f' g' h'
```
See <https://stacks.math.columbia.edu/tag/0144>.
-/
@[ext]
structure triangle_morphism (T₁ : triangle C) (T₂ : triangle C) :=
(hom₁ : T₁.obj₁ ⟶ T₂.obj₁)
(hom₂ : T₁.obj₂ ⟶ T₂.obj₂)
(hom₃ : T₁.obj₃ ⟶ T₂.obj₃)
(comm₁' : T₁.mor₁ ≫ hom₂ = hom₁ ≫ T₂.mor₁ . obviously)
(comm₂' : T₁.mor₂ ≫ hom₃ = hom₂ ≫ T₂.mor₂ . obviously)
(comm₃' : T₁.mor₃ ≫ hom₁⟦1⟧' = hom₃ ≫ T₂.mor₃ . obviously)
restate_axiom triangle_morphism.comm₁'
restate_axiom triangle_morphism.comm₂'
restate_axiom triangle_morphism.comm₃'
attribute [simp, reassoc] triangle_morphism.comm₁ triangle_morphism.comm₂ triangle_morphism.comm₃
/--
The identity triangle morphism.
-/
@[simps]
def triangle_morphism_id (T : triangle C) : triangle_morphism T T :=
{ hom₁ := 𝟙 T.obj₁,
hom₂ := 𝟙 T.obj₂,
hom₃ := 𝟙 T.obj₃ }
instance (T : triangle C) : inhabited (triangle_morphism T T) := ⟨triangle_morphism_id T⟩
variables {T₁ T₂ T₃ : triangle C}
/--
Composition of triangle morphisms gives a triangle morphism.
-/
@[simps]
def triangle_morphism.comp (f : triangle_morphism T₁ T₂) (g : triangle_morphism T₂ T₃) :
triangle_morphism T₁ T₃ :=
{ hom₁ := f.hom₁ ≫ g.hom₁,
hom₂ := f.hom₂ ≫ g.hom₂,
hom₃ := f.hom₃ ≫ g.hom₃ }
/--
Triangles with triangle morphisms form a category.
-/
@[simps]
instance triangle_category : category (triangle C) :=
{ hom := λ A B, triangle_morphism A B,
id := λ A, triangle_morphism_id A,
comp := λ A B C f g, f.comp g }
end category_theory.pretriangulated
|
0f2a8723cf3f5493909a941b0a038639025016ca
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/category_theory/limits/final.lean
|
4ef8f7ff0158f1f547fa25e0f3b51c9481bb4fa7
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 20,973
|
lean
|
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.punit
import category_theory.structured_arrow
import category_theory.is_connected
import category_theory.limits.yoneda
import category_theory.limits.types
/-!
# Final and initial functors
A functor `F : C ⥤ D` is final if for every `d : D`,
the comma category of morphisms `d ⟶ F.obj c` is connected.
Dually, a functor `F : C ⥤ D` is initial if for every `d : D`,
the comma category of morphisms `F.obj c ⟶ d` is connected.
We show that right adjoints are examples of final functors, while
left adjoints are examples of initial functors.
For final functors, we prove that the following three statements are equivalent:
1. `F : C ⥤ D` is final.
2. Every functor `G : D ⥤ E` has a colimit if and only if `F ⋙ G` does,
and these colimits are isomorphic via `colimit.pre G F`.
3. `colimit (F ⋙ coyoneda.obj (op d)) ≅ punit`.
Starting at 1. we show (in `cocones_equiv`) that
the categories of cocones over `G : D ⥤ E` and over `F ⋙ G` are equivalent.
(In fact, via an equivalence which does not change the cocone point.)
This readily implies 2., as `comp_has_colimit`, `has_colimit_of_comp`, and `colimit_iso`.
From 2. we can specialize to `G = coyoneda.obj (op d)` to obtain 3., as `colimit_comp_coyoneda_iso`.
From 3., we prove 1. directly in `cofinal_of_colimit_comp_coyoneda_iso_punit`.
Dually, we prove that if a functor `F : C ⥤ D` is initial, then any functor `G : D ⥤ E` has a
limit if and only if `F ⋙ G` does, and these limits are isomorphic via `limit.pre G F`.
## Naming
There is some discrepancy in the literature about naming; some say 'cofinal' instead of 'final'.
The explanation for this is that the 'co' prefix here is *not* the usual category-theoretic one
indicating duality, but rather indicating the sense of "along with".
## Future work
Dualise condition 3 above and the implications 2 ⇒ 3 and 3 ⇒ 1 to initial functors.
## References
* https://stacks.math.columbia.edu/tag/09WN
* https://ncatlab.org/nlab/show/final+functor
* Borceux, Handbook of Categorical Algebra I, Section 2.11.
(Note he reverses the roles of definition and main result relative to here!)
-/
noncomputable theory
universes v v₁ v₂ v₃ u₁ u₂ u₃
namespace category_theory
namespace functor
open opposite
open category_theory.limits
section arbitrary_universe
variables {C : Type u₁} [category.{v₁} C]
variables {D : Type u₂} [category.{v₂} D]
/--
A functor `F : C ⥤ D` is final if for every `d : D`, the comma category of morphisms `d ⟶ F.obj c`
is connected.
See <https://stacks.math.columbia.edu/tag/04E6>
-/
class final (F : C ⥤ D) : Prop :=
(out (d : D) : is_connected (structured_arrow d F))
attribute [instance] final.out
/--
A functor `F : C ⥤ D` is initial if for every `d : D`, the comma category of morphisms
`F.obj c ⟶ d` is connected.
-/
class initial (F : C ⥤ D) : Prop :=
(out (d : D) : is_connected (costructured_arrow F d))
attribute [instance] initial.out
instance final_op_of_initial (F : C ⥤ D) [initial F] : final F.op :=
{ out := λ d, is_connected_of_equivalent (costructured_arrow_op_equivalence F (unop d)) }
instance initial_op_of_final (F : C ⥤ D) [final F] : initial F.op :=
{ out := λ d, is_connected_of_equivalent (structured_arrow_op_equivalence F (unop d)) }
lemma final_of_initial_op (F : C ⥤ D) [initial F.op] : final F :=
{ out := λ d, @is_connected_of_is_connected_op _ _
(is_connected_of_equivalent (structured_arrow_op_equivalence F d).symm) }
lemma initial_of_final_op (F : C ⥤ D) [final F.op] : initial F :=
{ out := λ d, @is_connected_of_is_connected_op _ _
(is_connected_of_equivalent (costructured_arrow_op_equivalence F d).symm) }
/-- If a functor `R : D ⥤ C` is a right adjoint, it is final. -/
lemma final_of_adjunction {L : C ⥤ D} {R : D ⥤ C} (adj : L ⊣ R) : final R :=
{ out := λ c,
let u : structured_arrow c R := structured_arrow.mk (adj.unit.app c) in
@zigzag_is_connected _ _ ⟨u⟩ $ λ f g, relation.refl_trans_gen.trans
(relation.refl_trans_gen.single (show zag f u, from
or.inr ⟨structured_arrow.hom_mk ((adj.hom_equiv c f.right).symm f.hom) (by simp)⟩))
(relation.refl_trans_gen.single (show zag u g, from
or.inl ⟨structured_arrow.hom_mk ((adj.hom_equiv c g.right).symm g.hom) (by simp)⟩)) }
/-- If a functor `L : C ⥤ D` is a left adjoint, it is initial. -/
lemma initial_of_adjunction {L : C ⥤ D} {R : D ⥤ C} (adj : L ⊣ R) : initial L :=
{ out := λ d,
let u : costructured_arrow L d := costructured_arrow.mk (adj.counit.app d) in
@zigzag_is_connected _ _ ⟨u⟩ $ λ f g, relation.refl_trans_gen.trans
(relation.refl_trans_gen.single (show zag f u, from
or.inl ⟨costructured_arrow.hom_mk (adj.hom_equiv f.left d f.hom) (by simp)⟩))
(relation.refl_trans_gen.single (show zag u g, from
or.inr ⟨costructured_arrow.hom_mk (adj.hom_equiv g.left d g.hom) (by simp)⟩)) }
@[priority 100]
instance final_of_is_right_adjoint (F : C ⥤ D) [h : is_right_adjoint F] : final F :=
final_of_adjunction h.adj
@[priority 100]
instance initial_of_is_left_adjoint (F : C ⥤ D) [h : is_left_adjoint F] : initial F :=
initial_of_adjunction h.adj
namespace final
variables (F : C ⥤ D) [final F]
instance (d : D) : nonempty (structured_arrow d F) := is_connected.is_nonempty
variables {E : Type u₃} [category.{v₃} E] (G : D ⥤ E)
/--
When `F : C ⥤ D` is cofinal, we denote by `lift F d` an arbitrary choice of object in `C` such that
there exists a morphism `d ⟶ F.obj (lift F d)`.
-/
def lift (d : D) : C :=
(classical.arbitrary (structured_arrow d F)).right
/--
When `F : C ⥤ D` is cofinal, we denote by `hom_to_lift` an arbitrary choice of morphism
`d ⟶ F.obj (lift F d)`.
-/
def hom_to_lift (d : D) : d ⟶ F.obj (lift F d) :=
(classical.arbitrary (structured_arrow d F)).hom
/--
We provide an induction principle for reasoning about `lift` and `hom_to_lift`.
We want to perform some construction (usually just a proof) about
the particular choices `lift F d` and `hom_to_lift F d`,
it suffices to perform that construction for some other pair of choices
(denoted `X₀ : C` and `k₀ : d ⟶ F.obj X₀` below),
and to show how to transport such a construction
*both* directions along a morphism between such choices.
-/
def induction {d : D} (Z : Π (X : C) (k : d ⟶ F.obj X), Sort*)
(h₁ : Π X₁ X₂ (k₁ : d ⟶ F.obj X₁) (k₂ : d ⟶ F.obj X₂) (f : X₁ ⟶ X₂),
(k₁ ≫ F.map f = k₂) → Z X₁ k₁ → Z X₂ k₂)
(h₂ : Π X₁ X₂ (k₁ : d ⟶ F.obj X₁) (k₂ : d ⟶ F.obj X₂) (f : X₁ ⟶ X₂),
(k₁ ≫ F.map f = k₂) → Z X₂ k₂ → Z X₁ k₁)
{X₀ : C} {k₀ : d ⟶ F.obj X₀} (z : Z X₀ k₀) : Z (lift F d) (hom_to_lift F d) :=
begin
apply nonempty.some,
apply @is_preconnected_induction _ _ _
(λ (Y : structured_arrow d F), Z Y.right Y.hom) _ _ { right := X₀, hom := k₀, } z,
{ intros j₁ j₂ f a, fapply h₁ _ _ _ _ f.right _ a, convert f.w.symm, dsimp, simp, },
{ intros j₁ j₂ f a, fapply h₂ _ _ _ _ f.right _ a, convert f.w.symm, dsimp, simp, },
end
variables {F G}
/--
Given a cocone over `F ⋙ G`, we can construct a `cocone G` with the same cocone point.
-/
@[simps]
def extend_cocone : cocone (F ⋙ G) ⥤ cocone G :=
{ obj := λ c,
{ X := c.X,
ι :=
{ app := λ X, G.map (hom_to_lift F X) ≫ c.ι.app (lift F X),
naturality' := λ X Y f,
begin
dsimp, simp,
-- This would be true if we'd chosen `lift F X` to be `lift F Y`
-- and `hom_to_lift F X` to be `f ≫ hom_to_lift F Y`.
apply induction F
(λ Z k, G.map f ≫ G.map (hom_to_lift F Y) ≫ c.ι.app (lift F Y) = G.map k ≫ c.ι.app Z),
{ intros Z₁ Z₂ k₁ k₂ g a z,
rw [←a, functor.map_comp, category.assoc, ←functor.comp_map, c.w, z], },
{ intros Z₁ Z₂ k₁ k₂ g a z,
rw [←a, functor.map_comp, category.assoc, ←functor.comp_map, c.w] at z,
rw z, },
{ rw [←functor.map_comp_assoc], },
end } },
map := λ X Y f,
{ hom := f.hom, } }
@[simp]
lemma colimit_cocone_comp_aux (s : cocone (F ⋙ G)) (j : C) :
G.map (hom_to_lift F (F.obj j)) ≫ s.ι.app (lift F (F.obj j)) =
s.ι.app j :=
begin
-- This point is that this would be true if we took `lift (F.obj j)` to just be `j`
-- and `hom_to_lift (F.obj j)` to be `𝟙 (F.obj j)`.
apply induction F (λ X k, G.map k ≫ s.ι.app X = (s.ι.app j : _)),
{ intros j₁ j₂ k₁ k₂ f w h, rw ←w, rw ← s.w f at h, simpa using h, },
{ intros j₁ j₂ k₁ k₂ f w h, rw ←w at h, rw ← s.w f, simpa using h, },
{ exact s.w (𝟙 _), },
end
variables (F G)
/--
If `F` is cofinal,
the category of cocones on `F ⋙ G` is equivalent to the category of cocones on `G`,
for any `G : D ⥤ E`.
-/
@[simps]
def cocones_equiv : cocone (F ⋙ G) ≌ cocone G :=
{ functor := extend_cocone,
inverse := cocones.whiskering F,
unit_iso := nat_iso.of_components (λ c, cocones.ext (iso.refl _) (by tidy)) (by tidy),
counit_iso := nat_iso.of_components (λ c, cocones.ext (iso.refl _) (by tidy)) (by tidy), }.
variables {G}
/--
When `F : C ⥤ D` is cofinal, and `t : cocone G` for some `G : D ⥤ E`,
`t.whisker F` is a colimit cocone exactly when `t` is.
-/
def is_colimit_whisker_equiv (t : cocone G) : is_colimit (t.whisker F) ≃ is_colimit t :=
is_colimit.of_cocone_equiv (cocones_equiv F G).symm
/--
When `F` is cofinal, and `t : cocone (F ⋙ G)`,
`extend_cocone.obj t` is a colimit coconne exactly when `t` is.
-/
def is_colimit_extend_cocone_equiv (t : cocone (F ⋙ G)) :
is_colimit (extend_cocone.obj t) ≃ is_colimit t :=
is_colimit.of_cocone_equiv (cocones_equiv F G)
/-- Given a colimit cocone over `G : D ⥤ E` we can construct a colimit cocone over `F ⋙ G`. -/
@[simps]
def colimit_cocone_comp (t : colimit_cocone G) :
colimit_cocone (F ⋙ G) :=
{ cocone := _,
is_colimit := (is_colimit_whisker_equiv F _).symm (t.is_colimit) }
@[priority 100]
instance comp_has_colimit [has_colimit G] :
has_colimit (F ⋙ G) :=
has_colimit.mk (colimit_cocone_comp F (get_colimit_cocone G))
lemma colimit_pre_is_iso_aux {t : cocone G} (P : is_colimit t) :
((is_colimit_whisker_equiv F _).symm P).desc (t.whisker F) = 𝟙 t.X :=
begin
dsimp [is_colimit_whisker_equiv],
apply P.hom_ext,
intro j,
dsimp, simp,
end
instance colimit_pre_is_iso [has_colimit G] :
is_iso (colimit.pre G F) :=
begin
rw colimit.pre_eq (colimit_cocone_comp F (get_colimit_cocone G)) (get_colimit_cocone G),
erw colimit_pre_is_iso_aux,
dsimp,
apply_instance,
end
section
variables (G)
/--
When `F : C ⥤ D` is cofinal, and `G : D ⥤ E` has a colimit, then `F ⋙ G` has a colimit also and
`colimit (F ⋙ G) ≅ colimit G`
https://stacks.math.columbia.edu/tag/04E7
-/
def colimit_iso [has_colimit G] : colimit (F ⋙ G) ≅ colimit G := as_iso (colimit.pre G F)
end
/-- Given a colimit cocone over `F ⋙ G` we can construct a colimit cocone over `G`. -/
@[simps]
def colimit_cocone_of_comp (t : colimit_cocone (F ⋙ G)) :
colimit_cocone G :=
{ cocone := extend_cocone.obj t.cocone,
is_colimit := (is_colimit_extend_cocone_equiv F _).symm (t.is_colimit), }
/--
When `F` is cofinal, and `F ⋙ G` has a colimit, then `G` has a colimit also.
We can't make this an instance, because `F` is not determined by the goal.
(Even if this weren't a problem, it would cause a loop with `comp_has_colimit`.)
-/
lemma has_colimit_of_comp [has_colimit (F ⋙ G)] :
has_colimit G :=
has_colimit.mk (colimit_cocone_of_comp F (get_colimit_cocone (F ⋙ G)))
section
local attribute [instance] has_colimit_of_comp
/--
When `F` is cofinal, and `F ⋙ G` has a colimit, then `G` has a colimit also and
`colimit (F ⋙ G) ≅ colimit G`
https://stacks.math.columbia.edu/tag/04E7
-/
def colimit_iso' [has_colimit (F ⋙ G)] : colimit (F ⋙ G) ≅ colimit G := as_iso (colimit.pre G F)
end
end final
end arbitrary_universe
namespace final
variables {C : Type v} [category.{v} C] {D : Type v} [category.{v} D] (F : C ⥤ D) [final F]
/--
If the universal morphism `colimit (F ⋙ coyoneda.obj (op d)) ⟶ colimit (coyoneda.obj (op d))`
is an isomorphism (as it always is when `F` is cofinal),
then `colimit (F ⋙ coyoneda.obj (op d)) ≅ punit`
(simply because `colimit (coyoneda.obj (op d)) ≅ punit`).
-/
def colimit_comp_coyoneda_iso (d : D) [is_iso (colimit.pre (coyoneda.obj (op d)) F)] :
colimit (F ⋙ coyoneda.obj (op d)) ≅ punit :=
as_iso (colimit.pre (coyoneda.obj (op d)) F) ≪≫ coyoneda.colimit_coyoneda_iso (op d)
lemma zigzag_of_eqv_gen_quot_rel {F : C ⥤ D} {d : D} {f₁ f₂ : Σ X, d ⟶ F.obj X}
(t : eqv_gen (types.quot.rel.{v v} (F ⋙ coyoneda.obj (op d))) f₁ f₂) :
zigzag (structured_arrow.mk f₁.2) (structured_arrow.mk f₂.2) :=
begin
induction t,
case eqv_gen.rel : x y r
{ obtain ⟨f, w⟩ := r,
fconstructor,
swap 2, fconstructor,
left, fsplit,
exact { right := f, } },
case eqv_gen.refl
{ fconstructor, },
case eqv_gen.symm : x y h ih
{ apply zigzag_symmetric,
exact ih, },
case eqv_gen.trans : x y z h₁ h₂ ih₁ ih₂
{ apply relation.refl_trans_gen.trans,
exact ih₁, exact ih₂, }
end
/--
If `colimit (F ⋙ coyoneda.obj (op d)) ≅ punit` for all `d : D`, then `F` is cofinal.
-/
lemma cofinal_of_colimit_comp_coyoneda_iso_punit
(I : Π d, colimit (F ⋙ coyoneda.obj (op d)) ≅ punit) : final F :=
⟨λ d, begin
haveI : nonempty (structured_arrow d F),
{ have := (I d).inv punit.star,
obtain ⟨j, y, rfl⟩ := limits.types.jointly_surjective'.{v v} this,
exact ⟨structured_arrow.mk y⟩, },
apply zigzag_is_connected,
rintros ⟨⟨⟨⟩⟩,X₁,f₁⟩ ⟨⟨⟨⟩⟩,X₂,f₂⟩,
dsimp at *,
let y₁ := colimit.ι (F ⋙ coyoneda.obj (op d)) X₁ f₁,
let y₂ := colimit.ι (F ⋙ coyoneda.obj (op d)) X₂ f₂,
have e : y₁ = y₂,
{ apply (I d).to_equiv.injective, ext, },
have t := types.colimit_eq.{v v} e,
clear e y₁ y₂,
exact zigzag_of_eqv_gen_quot_rel t,
end⟩
end final
namespace initial
variables {C : Type u₁} [category.{v₁} C] {D : Type u₂} [category.{v₂} D] (F : C ⥤ D) [initial F]
instance (d : D) : nonempty (costructured_arrow F d) := is_connected.is_nonempty
variables {E : Type u₃} [category.{v₃} E] (G : D ⥤ E)
/--
When `F : C ⥤ D` is initial, we denote by `lift F d` an arbitrary choice of object in `C` such that
there exists a morphism `F.obj (lift F d) ⟶ d`.
-/
def lift (d : D) : C := (classical.arbitrary (costructured_arrow F d)).left
/--
When `F : C ⥤ D` is initial, we denote by `hom_to_lift` an arbitrary choice of morphism
`F.obj (lift F d) ⟶ d`.
-/
def hom_to_lift (d : D) : F.obj (lift F d) ⟶ d :=
(classical.arbitrary (costructured_arrow F d)).hom
/--
We provide an induction principle for reasoning about `lift` and `hom_to_lift`.
We want to perform some construction (usually just a proof) about
the particular choices `lift F d` and `hom_to_lift F d`,
it suffices to perform that construction for some other pair of choices
(denoted `X₀ : C` and `k₀ : F.obj X₀ ⟶ d` below),
and to show how to transport such a construction
*both* directions along a morphism between such choices.
-/
def induction {d : D} (Z : Π (X : C) (k : F.obj X ⟶ d), Sort*)
(h₁ : Π X₁ X₂ (k₁ : F.obj X₁ ⟶ d) (k₂ : F.obj X₂ ⟶ d) (f : X₁ ⟶ X₂),
(F.map f ≫ k₂ = k₁) → Z X₁ k₁ → Z X₂ k₂)
(h₂ : Π X₁ X₂ (k₁ : F.obj X₁ ⟶ d) (k₂ : F.obj X₂ ⟶ d) (f : X₁ ⟶ X₂),
(F.map f ≫ k₂ = k₁) → Z X₂ k₂ → Z X₁ k₁)
{X₀ : C} {k₀ : F.obj X₀ ⟶ d} (z : Z X₀ k₀) : Z (lift F d) (hom_to_lift F d) :=
begin
apply nonempty.some,
apply @is_preconnected_induction _ _ _
(λ Y : costructured_arrow F d, Z Y.left Y.hom) _ _ { left := X₀, hom := k₀ } z,
{ intros j₁ j₂ f a, fapply h₁ _ _ _ _ f.left _ a, convert f.w, dsimp, simp, },
{ intros j₁ j₂ f a, fapply h₂ _ _ _ _ f.left _ a, convert f.w, dsimp, simp, },
end
variables {F G}
/--
Given a cone over `F ⋙ G`, we can construct a `cone G` with the same cocone point.
-/
@[simps]
def extend_cone : cone (F ⋙ G) ⥤ cone G :=
{ obj := λ c,
{ X := c.X,
π :=
{ app := λ d, c.π.app (lift F d) ≫ G.map (hom_to_lift F d),
naturality' := λ X Y f,
begin
dsimp, simp,
-- This would be true if we'd chosen `lift F Y` to be `lift F X`
-- and `hom_to_lift F Y` to be `hom_to_lift F X ≫ f`.
apply induction F (λ Z k, (c.π.app Z ≫ G.map k : c.X ⟶ _) =
c.π.app (lift F X) ≫ G.map (hom_to_lift F X) ≫ G.map f),
{ intros Z₁ Z₂ k₁ k₂ g a z,
rw [←a, functor.map_comp, ←functor.comp_map, ←category.assoc, ←category.assoc, c.w] at z,
rw [z, category.assoc] },
{ intros Z₁ Z₂ k₁ k₂ g a z,
rw [←a, functor.map_comp, ←functor.comp_map, ←category.assoc, ←category.assoc,
c.w, z, category.assoc] },
{ rw [←functor.map_comp], },
end } },
map := λ X Y f,
{ hom := f.hom, } }
@[simp]
lemma limit_cone_comp_aux (s : cone (F ⋙ G)) (j : C) :
s.π.app (lift F (F.obj j)) ≫ G.map (hom_to_lift F (F.obj j)) =
s.π.app j :=
begin
-- This point is that this would be true if we took `lift (F.obj j)` to just be `j`
-- and `hom_to_lift (F.obj j)` to be `𝟙 (F.obj j)`.
apply induction F (λ X k, s.π.app X ≫ G.map k = (s.π.app j : _)),
{ intros j₁ j₂ k₁ k₂ f w h, rw ←s.w f, rw ←w at h, simpa using h, },
{ intros j₁ j₂ k₁ k₂ f w h, rw ←s.w f at h, rw ←w, simpa using h, },
{ exact s.w (𝟙 _), },
end
variables (F G)
/--
If `F` is initial,
the category of cones on `F ⋙ G` is equivalent to the category of cones on `G`,
for any `G : D ⥤ E`.
-/
@[simps]
def cones_equiv : cone (F ⋙ G) ≌ cone G :=
{ functor := extend_cone,
inverse := cones.whiskering F,
unit_iso := nat_iso.of_components (λ c, cones.ext (iso.refl _) (by tidy)) (by tidy),
counit_iso := nat_iso.of_components (λ c, cones.ext (iso.refl _) (by tidy)) (by tidy), }.
variables {G}
/--
When `F : C ⥤ D` is initial, and `t : cone G` for some `G : D ⥤ E`,
`t.whisker F` is a limit cone exactly when `t` is.
-/
def is_limit_whisker_equiv (t : cone G) : is_limit (t.whisker F) ≃ is_limit t :=
is_limit.of_cone_equiv (cones_equiv F G).symm
/--
When `F` is initial, and `t : cone (F ⋙ G)`,
`extend_cone.obj t` is a limit cone exactly when `t` is.
-/
def is_limit_extend_cone_equiv (t : cone (F ⋙ G)) :
is_limit (extend_cone.obj t) ≃ is_limit t :=
is_limit.of_cone_equiv (cones_equiv F G)
/-- Given a limit cone over `G : D ⥤ E` we can construct a limit cone over `F ⋙ G`. -/
@[simps]
def limit_cone_comp (t : limit_cone G) :
limit_cone (F ⋙ G) :=
{ cone := _,
is_limit := (is_limit_whisker_equiv F _).symm (t.is_limit) }
@[priority 100]
instance comp_has_limit [has_limit G] :
has_limit (F ⋙ G) :=
has_limit.mk (limit_cone_comp F (get_limit_cone G))
lemma limit_pre_is_iso_aux {t : cone G} (P : is_limit t) :
((is_limit_whisker_equiv F _).symm P).lift (t.whisker F) = 𝟙 t.X :=
begin
dsimp [is_limit_whisker_equiv],
apply P.hom_ext,
intro j,
simp,
end
instance limit_pre_is_iso [has_limit G] :
is_iso (limit.pre G F) :=
begin
rw limit.pre_eq (limit_cone_comp F (get_limit_cone G)) (get_limit_cone G),
erw limit_pre_is_iso_aux,
dsimp,
apply_instance,
end
section
variables (G)
/--
When `F : C ⥤ D` is initial, and `G : D ⥤ E` has a limit, then `F ⋙ G` has a limit also and
`limit (F ⋙ G) ≅ limit G`
https://stacks.math.columbia.edu/tag/04E7
-/
def limit_iso [has_limit G] : limit (F ⋙ G) ≅ limit G := (as_iso (limit.pre G F)).symm
end
/-- Given a limit cone over `F ⋙ G` we can construct a limit cone over `G`. -/
@[simps]
def limit_cone_of_comp (t : limit_cone (F ⋙ G)) :
limit_cone G :=
{ cone := extend_cone.obj t.cone,
is_limit := (is_limit_extend_cone_equiv F _).symm (t.is_limit), }
/--
When `F` is initial, and `F ⋙ G` has a limit, then `G` has a limit also.
We can't make this an instance, because `F` is not determined by the goal.
(Even if this weren't a problem, it would cause a loop with `comp_has_limit`.)
-/
lemma has_limit_of_comp [has_limit (F ⋙ G)] :
has_limit G :=
has_limit.mk (limit_cone_of_comp F (get_limit_cone (F ⋙ G)))
section
local attribute [instance] has_limit_of_comp
/--
When `F` is initial, and `F ⋙ G` has a limit, then `G` has a limit also and
`limit (F ⋙ G) ≅ limit G`
https://stacks.math.columbia.edu/tag/04E7
-/
def limit_iso' [has_limit (F ⋙ G)] : limit (F ⋙ G) ≅ limit G :=
(as_iso (limit.pre G F)).symm
end
end initial
end functor
end category_theory
|
d9270f464c9370094a66331dc72b4b9ce2cf142a
|
a45212b1526d532e6e83c44ddca6a05795113ddc
|
/src/group_theory/group_action.lean
|
680484e3e62bdef4b172be403a518e5000c3a4b8
|
[
"Apache-2.0"
] |
permissive
|
fpvandoorn/mathlib
|
b21ab4068db079cbb8590b58fda9cc4bc1f35df4
|
b3433a51ea8bc07c4159c1073838fc0ee9b8f227
|
refs/heads/master
| 1,624,791,089,608
| 1,556,715,231,000
| 1,556,715,231,000
| 165,722,980
| 5
| 0
|
Apache-2.0
| 1,552,657,455,000
| 1,547,494,646,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 6,548
|
lean
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes
-/
import data.set.finite group_theory.coset
universes u v w
variables {α : Type u} {β : Type v} {γ : Type w}
/-- Typeclass for types with a scalar multiplication operation, denoted `•` (`\bu`) -/
class has_scalar (α : Type u) (γ : Type v) := (smul : α → γ → γ)
infixr ` • `:73 := has_scalar.smul
/-- Typeclass for multiplictive actions by monoids. This generalizes group actions. -/
class mul_action (α : Type u) (β : Type v) [monoid α] extends has_scalar α β :=
(one_smul : ∀ b : β, (1 : α) • b = b)
(mul_smul : ∀ (x y : α) (b : β), (x * y) • b = x • y • b)
section
variables [monoid α] [mul_action α β]
theorem mul_smul (a₁ a₂ : α) (b : β) : (a₁ * a₂) • b = a₁ • a₂ • b := mul_action.mul_smul _ _ _
variable (α)
@[simp] theorem one_smul (b : β) : (1 : α) • b = b := mul_action.one_smul α _
end
namespace mul_action
variables (α) [monoid α] [mul_action α β]
def orbit (b : β) := set.range (λ x : α, x • b)
variable {α}
@[simp] lemma mem_orbit_iff {b₁ b₂ : β} : b₂ ∈ orbit α b₁ ↔ ∃ x : α, x • b₁ = b₂ :=
iff.rfl
@[simp] lemma mem_orbit (b : β) (x : α) : x • b ∈ orbit α b :=
⟨x, rfl⟩
@[simp] lemma mem_orbit_self (b : β) : b ∈ orbit α b :=
⟨1, by simp [mul_action.one_smul]⟩
instance orbit_fintype (b : β) [fintype α] [decidable_eq β] :
fintype (orbit α b) := set.fintype_range _
variable (α)
def stabilizer (b : β) : set α :=
{x : α | x • b = b}
variable {α}
@[simp] lemma mem_stabilizer_iff {b : β} {x : α} :
x ∈ stabilizer α b ↔ x • b = b := iff.rfl
variables (α) (β)
def fixed_points : set β := {b : β | ∀ x, x ∈ stabilizer α b}
variables {α} (β)
@[simp] lemma mem_fixed_points {b : β} :
b ∈ fixed_points α β ↔ ∀ x : α, x • b = b := iff.rfl
lemma mem_fixed_points' {b : β} : b ∈ fixed_points α β ↔
(∀ b', b' ∈ orbit α b → b' = b) :=
⟨λ h b h₁, let ⟨x, hx⟩ := mem_orbit_iff.1 h₁ in hx ▸ h x,
λ h b, mem_stabilizer_iff.2 (h _ (mem_orbit _ _))⟩
def comp_hom [monoid γ] (g : γ → α) [is_monoid_hom g] :
mul_action γ β :=
{ smul := λ x b, (g x) • b,
one_smul := by simp [is_monoid_hom.map_one g, mul_action.one_smul],
mul_smul := by simp [is_monoid_hom.map_mul g, mul_action.mul_smul] }
end mul_action
namespace mul_action
variables [group α] [mul_action α β]
section
open mul_action quotient_group
variables (α) (β)
def to_perm (g : α) : equiv.perm β :=
{ to_fun := (•) g,
inv_fun := (•) g⁻¹,
left_inv := λ a, by rw [← mul_action.mul_smul, inv_mul_self, mul_action.one_smul],
right_inv := λ a, by rw [← mul_action.mul_smul, mul_inv_self, mul_action.one_smul] }
variables {α} {β}
instance : is_group_hom (to_perm α β) :=
{ map_mul := λ x y, equiv.ext _ _ (λ a, mul_action.mul_smul x y a) }
lemma bijective (g : α) : function.bijective (λ b : β, g • b) :=
(to_perm α β g).bijective
lemma orbit_eq_iff {a b : β} :
orbit α a = orbit α b ↔ a ∈ orbit α b:=
⟨λ h, h ▸ mem_orbit_self _,
λ ⟨x, (hx : x • b = a)⟩, set.ext (λ c, ⟨λ ⟨y, (hy : y • a = c)⟩, ⟨y * x,
show (y * x) • b = c, by rwa [mul_action.mul_smul, hx]⟩,
λ ⟨y, (hy : y • b = c)⟩, ⟨y * x⁻¹,
show (y * x⁻¹) • a = c, by
conv {to_rhs, rw [← hy, ← mul_one y, ← inv_mul_self x, ← mul_assoc,
mul_action.mul_smul, hx]}⟩⟩)⟩
instance (b : β) : is_subgroup (stabilizer α b) :=
{ one_mem := mul_action.one_smul _ _,
mul_mem := λ x y (hx : x • b = b) (hy : y • b = b),
show (x * y) • b = b, by rw mul_action.mul_smul; simp *,
inv_mem := λ x (hx : x • b = b), show x⁻¹ • b = b,
by rw [← hx, ← mul_action.mul_smul, inv_mul_self, mul_action.one_smul, hx] }
variables (α) (β)
def orbit_rel : setoid β :=
{ r := λ a b, a ∈ orbit α b,
iseqv := ⟨mem_orbit_self, λ a b, by simp [orbit_eq_iff.symm, eq_comm],
λ a b, by simp [orbit_eq_iff.symm, eq_comm] {contextual := tt}⟩ }
variables {β}
open quotient_group
noncomputable def orbit_equiv_quotient_stabilizer (b : β) :
orbit α b ≃ quotient (stabilizer α b) :=
equiv.symm (@equiv.of_bijective _ _
(λ x : quotient (stabilizer α b), quotient.lift_on' x
(λ x, (⟨x • b, mem_orbit _ _⟩ : orbit α b))
(λ g h (H : _ = _), subtype.eq $ (mul_action.bijective (g⁻¹)).1
$ show g⁻¹ • (g • b) = g⁻¹ • (h • b),
by rw [← mul_action.mul_smul, ← mul_action.mul_smul,
H, inv_mul_self, mul_action.one_smul]))
⟨λ g h, quotient.induction_on₂' g h (λ g h H, quotient.sound' $
have H : g • b = h • b := subtype.mk.inj H,
show (g⁻¹ * h) • b = b,
by rw [mul_action.mul_smul, ← H, ← mul_action.mul_smul, inv_mul_self, mul_action.one_smul]),
λ ⟨b, ⟨g, hgb⟩⟩, ⟨g, subtype.eq hgb⟩⟩)
end
open quotient_group mul_action is_subgroup
def mul_left_cosets (H : set α) [is_subgroup H]
(x : α) (y : quotient H) : quotient H :=
quotient.lift_on' y (λ y, quotient_group.mk ((x : α) * y))
(λ a b (hab : _ ∈ H), quotient_group.eq.2
(by rwa [mul_inv_rev, ← mul_assoc, mul_assoc (a⁻¹), inv_mul_self, mul_one]))
instance (H : set α) [is_subgroup H] : mul_action α (quotient H) :=
{ smul := mul_left_cosets H,
one_smul := λ a, quotient.induction_on' a (λ a, quotient_group.eq.2
(by simp [is_submonoid.one_mem])),
mul_smul := λ x y a, quotient.induction_on' a (λ a, quotient_group.eq.2
(by simp [mul_inv_rev, is_submonoid.one_mem, mul_assoc])) }
instance mul_left_cosets_comp_subtype_val (H I : set α) [is_subgroup H] [is_subgroup I] :
mul_action I (quotient H) :=
mul_action.comp_hom (quotient H) (subtype.val : I → α)
end mul_action
/-- Typeclass for multiplicative actions on additive structures. This generalizes group modules. -/
class distrib_mul_action (α : Type u) (β : Type v) [monoid α] [add_monoid β] extends mul_action α β :=
(smul_add : ∀(r : α) (x y : β), r • (x + y) = r • x + r • y)
(smul_zero {} : ∀(r : α), r • (0 : β) = 0)
section
variables [monoid α] [add_monoid β] [distrib_mul_action α β]
theorem smul_add (a : α) (b₁ b₂ : β) : a • (b₁ + b₂) = a • b₁ + a • b₂ :=
distrib_mul_action.smul_add _ _ _
@[simp] theorem smul_zero (a : α) : a • (0 : β) = 0 :=
distrib_mul_action.smul_zero _
end
|
778fa98887a9183703bc9bca7416edc593ec11ed
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/field_theory/mv_polynomial_auto.lean
|
e467777a0e87b32f742b54fa9d58ef7080b94e52
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,118
|
lean
|
/-
Copyright (c) 2019 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Johannes Hölzl
Multivariate functions of the form `α^n → α` are isomorphic to multivariate polynomials in
`n` variables.
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.ring_theory.ideal.operations
import Mathlib.linear_algebra.finsupp_vector_space
import Mathlib.algebra.char_p.basic
import Mathlib.PostPort
universes u v u_1 u_2
namespace Mathlib
namespace mv_polynomial
theorem quotient_mk_comp_C_injective (σ : Type u) (α : Type v) [field α]
(I : ideal (mv_polynomial σ α)) (hI : I ≠ ⊤) :
function.injective ⇑(ring_hom.comp (ideal.quotient.mk I) C) :=
sorry
def restrict_total_degree (σ : Type u) (α : Type v) [field α] (m : ℕ) :
submodule α (mv_polynomial σ α) :=
finsupp.supported α α (set_of fun (n : σ →₀ ℕ) => (finsupp.sum n fun (n : σ) (e : ℕ) => e) ≤ m)
theorem mem_restrict_total_degree (σ : Type u) (α : Type v) [field α] (m : ℕ)
(p : mv_polynomial σ α) : p ∈ restrict_total_degree σ α m ↔ total_degree p ≤ m :=
sorry
def restrict_degree (σ : Type u) (α : Type v) (m : ℕ) [field α] : submodule α (mv_polynomial σ α) :=
finsupp.supported α α (set_of fun (n : σ →₀ ℕ) => ∀ (i : σ), coe_fn n i ≤ m)
theorem mem_restrict_degree {σ : Type u} {α : Type v} [field α] (p : mv_polynomial σ α) (n : ℕ) :
p ∈ restrict_degree σ α n ↔ ∀ (s : σ →₀ ℕ), s ∈ finsupp.support p → ∀ (i : σ), coe_fn s i ≤ n :=
sorry
theorem mem_restrict_degree_iff_sup {σ : Type u} {α : Type v} [field α] (p : mv_polynomial σ α)
(n : ℕ) : p ∈ restrict_degree σ α n ↔ ∀ (i : σ), multiset.count i (degrees p) ≤ n :=
sorry
theorem map_range_eq_map {σ : Type u} {α : Type v} {β : Type u_1} [comm_ring α] [comm_ring β]
(p : mv_polynomial σ α) (f : α →+* β) :
finsupp.map_range (⇑f) (ring_hom.map_zero f) p = coe_fn (map f) p :=
sorry
theorem is_basis_monomials (σ : Type u) (α : Type v) [field α] :
is_basis α fun (s : σ →₀ ℕ) => monomial s 1 :=
sorry
end mv_polynomial
namespace mv_polynomial
theorem dim_mv_polynomial (σ : Type u) (α : Type u) [field α] :
vector_space.dim α (mv_polynomial σ α) = cardinal.mk (σ →₀ ℕ) :=
sorry
end mv_polynomial
namespace mv_polynomial
protected instance char_p (σ : Type u_1) (R : Type u_2) [comm_ring R] (p : ℕ) [char_p R p] :
char_p (mv_polynomial σ R) p :=
char_p.mk
fun (n : ℕ) =>
eq.mpr (id (Eq._oldrec (Eq.refl (↑n = 0 ↔ p ∣ n)) (Eq.symm (C_eq_coe_nat n))))
(eq.mpr (id (Eq._oldrec (Eq.refl (coe_fn C ↑n = 0 ↔ p ∣ n)) (Eq.symm C_0)))
(eq.mpr
(id
(Eq._oldrec (Eq.refl (coe_fn C ↑n = coe_fn C 0 ↔ p ∣ n)) (propext (C_inj R (↑n) 0))))
(eq.mpr
(id (Eq._oldrec (Eq.refl (↑n = 0 ↔ p ∣ n)) (propext (char_p.cast_eq_zero_iff R p n))))
(iff.refl (p ∣ n)))))
end Mathlib
|
a1e7ece706602a496f84c99d94eff3706b602cf6
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/linear_algebra/pi_tensor_product.lean
|
451d7d9408fbd821d20d83dd03a8fb5c4c65115a
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 24,376
|
lean
|
/-
Copyright (c) 2020 Frédéric Dupuis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Frédéric Dupuis, Eric Wieser
-/
import group_theory.congruence
import linear_algebra.multilinear.tensor_product
/-!
# Tensor product of an indexed family of modules over commutative semirings
We define the tensor product of an indexed family `s : ι → Type*` of modules over commutative
semirings. We denote this space by `⨂[R] i, s i` and define it as `free_add_monoid (R × Π i, s i)`
quotiented by the appropriate equivalence relation. The treatment follows very closely that of the
binary tensor product in `linear_algebra/tensor_product.lean`.
## Main definitions
* `pi_tensor_product R s` with `R` a commutative semiring and `s : ι → Type*` is the tensor product
of all the `s i`'s. This is denoted by `⨂[R] i, s i`.
* `tprod R f` with `f : Π i, s i` is the tensor product of the vectors `f i` over all `i : ι`.
This is bundled as a multilinear map from `Π i, s i` to `⨂[R] i, s i`.
* `lift_add_hom` constructs an `add_monoid_hom` from `(⨂[R] i, s i)` to some space `F` from a
function `φ : (R × Π i, s i) → F` with the appropriate properties.
* `lift φ` with `φ : multilinear_map R s E` is the corresponding linear map
`(⨂[R] i, s i) →ₗ[R] E`. This is bundled as a linear equivalence.
* `pi_tensor_product.reindex e` re-indexes the components of `⨂[R] i : ι, M` along `e : ι ≃ ι₂`.
* `pi_tensor_product.tmul_equiv` equivalence between a `tensor_product` of `pi_tensor_product`s and
a single `pi_tensor_product`.
## Notations
* `⨂[R] i, s i` is defined as localized notation in locale `tensor_product`
* `⨂ₜ[R] i, f i` with `f : Π i, f i` is defined globally as the tensor product of all the `f i`'s.
## Implementation notes
* We define it via `free_add_monoid (R × Π i, s i)` with the `R` representing a "hidden" tensor
factor, rather than `free_add_monoid (Π i, s i)` to ensure that, if `ι` is an empty type,
the space is isomorphic to the base ring `R`.
* We have not restricted the index type `ι` to be a `fintype`, as nothing we do here strictly
requires it. However, problems may arise in the case where `ι` is infinite; use at your own
caution.
## TODO
* Define tensor powers, symmetric subspace, etc.
* API for the various ways `ι` can be split into subsets; connect this with the binary
tensor product.
* Include connection with holors.
* Port more of the API from the binary tensor product over to this case.
## Tags
multilinear, tensor, tensor product
-/
open function
section semiring
variables {ι ι₂ ι₃ : Type*} [decidable_eq ι] [decidable_eq ι₂] [decidable_eq ι₃]
variables {R : Type*} [comm_semiring R]
variables {R₁ R₂ : Type*}
variables {s : ι → Type*} [∀ i, add_comm_monoid (s i)] [∀ i, module R (s i)]
variables {M : Type*} [add_comm_monoid M] [module R M]
variables {E : Type*} [add_comm_monoid E] [module R E]
variables {F : Type*} [add_comm_monoid F]
namespace pi_tensor_product
include R
variables (R) (s)
/-- The relation on `free_add_monoid (R × Π i, s i)` that generates a congruence whose quotient is
the tensor product. -/
inductive eqv : free_add_monoid (R × Π i, s i) → free_add_monoid (R × Π i, s i) → Prop
| of_zero : ∀ (r : R) (f : Π i, s i) (i : ι) (hf : f i = 0), eqv (free_add_monoid.of (r, f)) 0
| of_zero_scalar : ∀ (f : Π i, s i), eqv (free_add_monoid.of (0, f)) 0
| of_add : ∀ (r : R) (f : Π i, s i) (i : ι) (m₁ m₂ : s i), eqv
(free_add_monoid.of (r, update f i m₁) + free_add_monoid.of (r, update f i m₂))
(free_add_monoid.of (r, update f i (m₁ + m₂)))
| of_add_scalar : ∀ (r r' : R) (f : Π i, s i), eqv
(free_add_monoid.of (r, f) + free_add_monoid.of (r', f))
(free_add_monoid.of (r + r', f))
| of_smul : ∀ (r : R) (f : Π i, s i) (i : ι) (r' : R), eqv
(free_add_monoid.of (r, update f i (r' • (f i))))
(free_add_monoid.of (r' * r, f))
| add_comm : ∀ x y, eqv (x + y) (y + x)
end pi_tensor_product
variables (R) (s)
/-- `pi_tensor_product R s` with `R` a commutative semiring and `s : ι → Type*` is the tensor
product of all the `s i`'s. This is denoted by `⨂[R] i, s i`. -/
def pi_tensor_product : Type* :=
(add_con_gen (pi_tensor_product.eqv R s)).quotient
variables {R}
/- This enables the notation `⨂[R] i : ι, s i` for the pi tensor product, given `s : ι → Type*`. -/
localized "notation (name := pi_tensor_product)
`⨂[`:100 R `] ` binders `, ` r:(scoped:67 f, pi_tensor_product R f) := r" in tensor_product
open_locale tensor_product
namespace pi_tensor_product
section module
instance : add_comm_monoid (⨂[R] i, s i) :=
{ add_comm := λ x y, add_con.induction_on₂ x y $ λ x y, quotient.sound' $
add_con_gen.rel.of _ _ $ eqv.add_comm _ _,
.. (add_con_gen (pi_tensor_product.eqv R s)).add_monoid }
instance : inhabited (⨂[R] i, s i) := ⟨0⟩
variables (R) {s}
/-- `tprod_coeff R r f` with `r : R` and `f : Π i, s i` is the tensor product of the vectors `f i`
over all `i : ι`, multiplied by the coefficient `r`. Note that this is meant as an auxiliary
definition for this file alone, and that one should use `tprod` defined below for most purposes. -/
def tprod_coeff (r : R) (f : Π i, s i) : ⨂[R] i, s i := add_con.mk' _ $ free_add_monoid.of (r, f)
variables {R}
lemma zero_tprod_coeff (f : Π i, s i) : tprod_coeff R 0 f = 0 :=
quotient.sound' $ add_con_gen.rel.of _ _ $ eqv.of_zero_scalar _
lemma zero_tprod_coeff' (z : R) (f : Π i, s i) (i : ι) (hf: f i = 0) : tprod_coeff R z f = 0 :=
quotient.sound' $ add_con_gen.rel.of _ _ $ eqv.of_zero _ _ i hf
lemma add_tprod_coeff (z : R) (f : Π i, s i) (i : ι) (m₁ m₂ : s i) :
tprod_coeff R z (update f i m₁) + tprod_coeff R z (update f i m₂) =
tprod_coeff R z (update f i (m₁ + m₂)) :=
quotient.sound' $ add_con_gen.rel.of _ _ (eqv.of_add z f i m₁ m₂)
lemma add_tprod_coeff' (z₁ z₂ : R) (f : Π i, s i) :
tprod_coeff R z₁ f + tprod_coeff R z₂ f = tprod_coeff R (z₁ + z₂) f :=
quotient.sound' $ add_con_gen.rel.of _ _ (eqv.of_add_scalar z₁ z₂ f)
lemma smul_tprod_coeff_aux (z : R) (f : Π i, s i) (i : ι) (r : R) :
tprod_coeff R z (update f i (r • f i)) = tprod_coeff R (r * z) f :=
quotient.sound' $ add_con_gen.rel.of _ _ $ eqv.of_smul _ _ _ _
lemma smul_tprod_coeff (z : R) (f : Π i, s i) (i : ι) (r : R₁)
[has_smul R₁ R] [is_scalar_tower R₁ R R] [has_smul R₁ (s i)] [is_scalar_tower R₁ R (s i)] :
tprod_coeff R z (update f i (r • f i)) = tprod_coeff R (r • z) f :=
begin
have h₁ : r • z = (r • (1 : R)) * z := by rw [smul_mul_assoc, one_mul],
have h₂ : r • (f i) = (r • (1 : R)) • f i := (smul_one_smul _ _ _).symm,
rw [h₁, h₂],
exact smul_tprod_coeff_aux z f i _,
end
/-- Construct an `add_monoid_hom` from `(⨂[R] i, s i)` to some space `F` from a function
`φ : (R × Π i, s i) → F` with the appropriate properties. -/
def lift_add_hom (φ : (R × Π i, s i) → F)
(C0 : ∀ (r : R) (f : Π i, s i) (i : ι) (hf : f i = 0), φ (r, f) = 0)
(C0' : ∀ (f : Π i, s i), φ (0, f) = 0)
(C_add : ∀ (r : R) (f : Π i, s i) (i : ι) (m₁ m₂ : s i),
φ (r, update f i m₁) + φ (r, update f i m₂) = φ (r, update f i (m₁ + m₂)))
(C_add_scalar : ∀ (r r' : R) (f : Π i, s i),
φ (r , f) + φ (r', f) = φ (r + r', f))
(C_smul : ∀ (r : R) (f : Π i, s i) (i : ι) (r' : R),
φ (r, update f i (r' • (f i))) = φ (r' * r, f))
: (⨂[R] i, s i) →+ F :=
(add_con_gen (pi_tensor_product.eqv R s)).lift (free_add_monoid.lift φ) $ add_con.add_con_gen_le $
λ x y hxy, match x, y, hxy with
| _, _, (eqv.of_zero r' f i hf) := (add_con.ker_rel _).2 $
by simp [free_add_monoid.lift_eval_of, C0 r' f i hf]
| _, _, (eqv.of_zero_scalar f) := (add_con.ker_rel _).2 $
by simp [free_add_monoid.lift_eval_of, C0']
| _, _, (eqv.of_add z f i m₁ m₂) := (add_con.ker_rel _).2 $
by simp [free_add_monoid.lift_eval_of, C_add]
| _, _, (eqv.of_add_scalar z₁ z₂ f) := (add_con.ker_rel _).2 $
by simp [free_add_monoid.lift_eval_of, C_add_scalar]
| _, _, (eqv.of_smul z f i r') := (add_con.ker_rel _).2 $
by simp [free_add_monoid.lift_eval_of, C_smul]
| _, _, (eqv.add_comm x y) := (add_con.ker_rel _).2 $
by simp_rw [add_monoid_hom.map_add, add_comm]
end
@[elab_as_eliminator]
protected theorem induction_on'
{C : (⨂[R] i, s i) → Prop}
(z : ⨂[R] i, s i)
(C1 : ∀ {r : R} {f : Π i, s i}, C (tprod_coeff R r f))
(Cp : ∀ {x y}, C x → C y → C (x + y)) : C z :=
begin
have C0 : C 0,
{ have h₁ := @C1 0 0,
rwa [zero_tprod_coeff] at h₁ },
refine add_con.induction_on z (λ x, free_add_monoid.rec_on x C0 _),
simp_rw add_con.coe_add,
refine λ f y ih, Cp _ ih,
convert @C1 f.1 f.2,
simp only [prod.mk.eta],
end
section distrib_mul_action
variables [monoid R₁] [distrib_mul_action R₁ R] [smul_comm_class R₁ R R]
variables [monoid R₂] [distrib_mul_action R₂ R] [smul_comm_class R₂ R R]
-- Most of the time we want the instance below this one, which is easier for typeclass resolution
-- to find.
instance has_smul' : has_smul R₁ (⨂[R] i, s i) :=
⟨λ r, lift_add_hom (λ f : R × Π i, s i, tprod_coeff R (r • f.1) f.2)
(λ r' f i hf, by simp_rw [zero_tprod_coeff' _ f i hf])
(λ f, by simp [zero_tprod_coeff])
(λ r' f i m₁ m₂, by simp [add_tprod_coeff])
(λ r' r'' f, by simp [add_tprod_coeff', mul_add])
(λ z f i r', by simp [smul_tprod_coeff, mul_smul_comm])⟩
instance : has_smul R (⨂[R] i, s i) := pi_tensor_product.has_smul'
lemma smul_tprod_coeff' (r : R₁) (z : R) (f : Π i, s i) :
r • (tprod_coeff R z f) = tprod_coeff R (r • z) f := rfl
protected theorem smul_add (r : R₁) (x y : ⨂[R] i, s i) :
r • (x + y) = r • x + r • y :=
add_monoid_hom.map_add _ _ _
instance distrib_mul_action' : distrib_mul_action R₁ (⨂[R] i, s i) :=
{ smul := (•),
smul_add := λ r x y, add_monoid_hom.map_add _ _ _,
mul_smul := λ r r' x, pi_tensor_product.induction_on' x
(λ r'' f, by simp [smul_tprod_coeff', smul_smul])
(λ x y ihx ihy, by simp_rw [pi_tensor_product.smul_add, ihx, ihy]),
one_smul := λ x, pi_tensor_product.induction_on' x
(λ f, by simp [smul_tprod_coeff' _ _])
(λ z y ihz ihy, by simp_rw [pi_tensor_product.smul_add, ihz, ihy]),
smul_zero := λ r, add_monoid_hom.map_zero _ }
instance smul_comm_class' [smul_comm_class R₁ R₂ R] : smul_comm_class R₁ R₂ (⨂[R] i, s i) :=
⟨λ r' r'' x, pi_tensor_product.induction_on' x
(λ xr xf, by simp only [smul_tprod_coeff', smul_comm])
(λ z y ihz ihy, by simp_rw [pi_tensor_product.smul_add, ihz, ihy])⟩
instance is_scalar_tower' [has_smul R₁ R₂] [is_scalar_tower R₁ R₂ R] :
is_scalar_tower R₁ R₂ (⨂[R] i, s i) :=
⟨λ r' r'' x, pi_tensor_product.induction_on' x
(λ xr xf, by simp only [smul_tprod_coeff', smul_assoc])
(λ z y ihz ihy, by simp_rw [pi_tensor_product.smul_add, ihz, ihy])⟩
end distrib_mul_action
-- Most of the time we want the instance below this one, which is easier for typeclass resolution
-- to find.
instance module' [semiring R₁] [module R₁ R] [smul_comm_class R₁ R R] : module R₁ (⨂[R] i, s i) :=
{ smul := (•),
add_smul := λ r r' x, pi_tensor_product.induction_on' x
(λ r f, by simp [smul_tprod_coeff' _ _, add_smul, add_tprod_coeff'])
(λ x y ihx ihy, by simp [pi_tensor_product.smul_add, ihx, ihy, add_add_add_comm]),
zero_smul := λ x, pi_tensor_product.induction_on' x
(λ r f, by { simp_rw [smul_tprod_coeff' _ _, zero_smul], exact zero_tprod_coeff _ })
(λ x y ihx ihy, by rw [pi_tensor_product.smul_add, ihx, ihy, add_zero]),
..pi_tensor_product.distrib_mul_action' }
-- shortcut instances
instance : module R (⨂[R] i, s i) := pi_tensor_product.module'
instance : smul_comm_class R R (⨂[R] i, s i) := pi_tensor_product.smul_comm_class'
instance : is_scalar_tower R R (⨂[R] i, s i) := pi_tensor_product.is_scalar_tower'
variables {R}
variables (R)
/-- The canonical `multilinear_map R s (⨂[R] i, s i)`. -/
def tprod : multilinear_map R s (⨂[R] i, s i) :=
{ to_fun := tprod_coeff R 1,
map_add' := λ f i x y, (add_tprod_coeff (1 : R) f i x y).symm,
map_smul' := λ f i r x,
by simp_rw [smul_tprod_coeff', ←smul_tprod_coeff (1 : R) _ i, update_idem, update_same] }
variables {R}
notation `⨂ₜ[`:100 R`] ` binders `, ` r:(scoped:67 f, tprod R f) := r
@[simp]
lemma tprod_coeff_eq_smul_tprod (z : R) (f : Π i, s i) : tprod_coeff R z f = z • tprod R f :=
begin
have : z = z • (1 : R) := by simp only [mul_one, algebra.id.smul_eq_mul],
conv_lhs { rw this },
rw ←smul_tprod_coeff',
refl,
end
@[elab_as_eliminator]
protected theorem induction_on
{C : (⨂[R] i, s i) → Prop}
(z : ⨂[R] i, s i)
(C1 : ∀ {r : R} {f : Π i, s i}, C (r • (tprod R f)))
(Cp : ∀ {x y}, C x → C y → C (x + y)) : C z :=
begin
simp_rw ←tprod_coeff_eq_smul_tprod at C1,
exact pi_tensor_product.induction_on' z @C1 @Cp,
end
@[ext]
theorem ext {φ₁ φ₂ : (⨂[R] i, s i) →ₗ[R] E}
(H : φ₁.comp_multilinear_map (tprod R) = φ₂.comp_multilinear_map (tprod R)) : φ₁ = φ₂ :=
begin
refine linear_map.ext _,
refine λ z,
(pi_tensor_product.induction_on' z _ (λ x y hx hy, by rw [φ₁.map_add, φ₂.map_add, hx, hy])),
{ intros r f,
rw [tprod_coeff_eq_smul_tprod, φ₁.map_smul, φ₂.map_smul],
apply _root_.congr_arg,
exact multilinear_map.congr_fun H f }
end
end module
section multilinear
open multilinear_map
variables {s}
/-- Auxiliary function to constructing a linear map `(⨂[R] i, s i) → E` given a
`multilinear map R s E` with the property that its composition with the canonical
`multilinear_map R s (⨂[R] i, s i)` is the given multilinear map. -/
def lift_aux (φ : multilinear_map R s E) : (⨂[R] i, s i) →+ E :=
lift_add_hom (λ (p : R × Π i, s i), p.1 • (φ p.2))
(λ z f i hf, by rw [map_coord_zero φ i hf, smul_zero])
(λ f, by rw [zero_smul])
(λ z f i m₁ m₂, by rw [←smul_add, φ.map_add])
(λ z₁ z₂ f, by rw [←add_smul])
(λ z f i r, by simp [φ.map_smul, smul_smul, mul_comm])
lemma lift_aux_tprod (φ : multilinear_map R s E) (f : Π i, s i) : lift_aux φ (tprod R f) = φ f :=
by simp only [lift_aux, lift_add_hom, tprod, multilinear_map.coe_mk, tprod_coeff,
free_add_monoid.lift_eval_of, one_smul, add_con.lift_mk']
lemma lift_aux_tprod_coeff (φ : multilinear_map R s E) (z : R) (f : Π i, s i) :
lift_aux φ (tprod_coeff R z f) = z • φ f :=
by simp [lift_aux, lift_add_hom, tprod_coeff, free_add_monoid.lift_eval_of]
lemma lift_aux.smul {φ : multilinear_map R s E} (r : R) (x : ⨂[R] i, s i) :
lift_aux φ (r • x) = r • lift_aux φ x :=
begin
refine pi_tensor_product.induction_on' x _ _,
{ intros z f,
rw [smul_tprod_coeff' r z f, lift_aux_tprod_coeff, lift_aux_tprod_coeff, smul_assoc] },
{ intros z y ihz ihy,
rw [smul_add, (lift_aux φ).map_add, ihz, ihy, (lift_aux φ).map_add, smul_add] }
end
/-- Constructing a linear map `(⨂[R] i, s i) → E` given a `multilinear_map R s E` with the
property that its composition with the canonical `multilinear_map R s E` is
the given multilinear map `φ`. -/
def lift : (multilinear_map R s E) ≃ₗ[R] ((⨂[R] i, s i) →ₗ[R] E) :=
{ to_fun := λ φ, { map_smul' := lift_aux.smul, .. lift_aux φ },
inv_fun := λ φ', φ'.comp_multilinear_map (tprod R),
left_inv := λ φ, by { ext, simp [lift_aux_tprod, linear_map.comp_multilinear_map] },
right_inv := λ φ, by { ext, simp [lift_aux_tprod] },
map_add' := λ φ₁ φ₂, by { ext, simp [lift_aux_tprod] },
map_smul' := λ r φ₂, by { ext, simp [lift_aux_tprod] } }
variables {φ : multilinear_map R s E}
@[simp] lemma lift.tprod (f : Π i, s i) : lift φ (tprod R f) = φ f := lift_aux_tprod φ f
theorem lift.unique' {φ' : (⨂[R] i, s i) →ₗ[R] E} (H : φ'.comp_multilinear_map (tprod R) = φ) :
φ' = lift φ :=
ext $ H.symm ▸ (lift.symm_apply_apply φ).symm
theorem lift.unique {φ' : (⨂[R] i, s i) →ₗ[R] E} (H : ∀ f, φ' (tprod R f) = φ f) :
φ' = lift φ :=
lift.unique' (multilinear_map.ext H)
@[simp]
theorem lift_symm (φ' : (⨂[R] i, s i) →ₗ[R] E) : lift.symm φ' = φ'.comp_multilinear_map (tprod R) :=
rfl
@[simp]
theorem lift_tprod : lift (tprod R : multilinear_map R s _) = linear_map.id :=
eq.symm $ lift.unique' rfl
section
variables (R M)
/-- Re-index the components of the tensor power by `e`.
For simplicity, this is defined only for homogeneously- (rather than dependently-) typed components.
-/
def reindex (e : ι ≃ ι₂) : ⨂[R] i : ι, M ≃ₗ[R] ⨂[R] i : ι₂, M :=
linear_equiv.of_linear
(lift (dom_dom_congr e.symm (tprod R : multilinear_map R _ (⨂[R] i : ι₂, M))))
(lift (dom_dom_congr e (tprod R : multilinear_map R _ (⨂[R] i : ι, M))))
(by { ext, simp only [linear_map.comp_apply, linear_map.id_apply, lift_tprod,
linear_map.comp_multilinear_map_apply, lift.tprod,
dom_dom_congr_apply, equiv.apply_symm_apply] })
(by { ext, simp only [linear_map.comp_apply, linear_map.id_apply, lift_tprod,
linear_map.comp_multilinear_map_apply, lift.tprod,
dom_dom_congr_apply, equiv.symm_apply_apply] })
end
@[simp] lemma reindex_tprod (e : ι ≃ ι₂) (f : Π i, M) :
reindex R M e (tprod R f) = tprod R (λ i, f (e.symm i)) :=
lift_aux_tprod _ f
@[simp] lemma reindex_comp_tprod (e : ι ≃ ι₂) :
(reindex R M e : ⨂[R] i : ι, M →ₗ[R] ⨂[R] i : ι₂, M).comp_multilinear_map (tprod R) =
(tprod R : multilinear_map R (λ i, M) _).dom_dom_congr e.symm :=
multilinear_map.ext $ reindex_tprod e
@[simp] lemma lift_comp_reindex (e : ι ≃ ι₂) (φ : multilinear_map R (λ _ : ι₂, M) E) :
(lift φ) ∘ₗ ↑(reindex R M e) = lift (φ.dom_dom_congr e.symm) :=
by { ext, simp, }
@[simp]
lemma lift_reindex (e : ι ≃ ι₂) (φ : multilinear_map R (λ _, M) E) (x : ⨂[R] i, M) :
lift φ (reindex R M e x) = lift (φ.dom_dom_congr e.symm) x :=
linear_map.congr_fun (lift_comp_reindex e φ) x
@[simp] lemma reindex_trans (e : ι ≃ ι₂) (e' : ι₂ ≃ ι₃) :
(reindex R M e).trans (reindex R M e') = reindex R M (e.trans e') :=
begin
apply linear_equiv.to_linear_map_injective,
ext f,
simp only [linear_equiv.trans_apply, linear_equiv.coe_coe, reindex_tprod,
linear_map.coe_comp_multilinear_map, function.comp_app, multilinear_map.dom_dom_congr_apply,
reindex_comp_tprod],
congr,
end
@[simp] lemma reindex_reindex (e : ι ≃ ι₂) (e' : ι₂ ≃ ι₃) (x : ⨂[R] i, M) :
reindex R M e' (reindex R M e x) = reindex R M (e.trans e') x :=
linear_equiv.congr_fun (reindex_trans e e' : _ = reindex R M (e.trans e')) x
@[simp] lemma reindex_symm (e : ι ≃ ι₂) :
(reindex R M e).symm = reindex R M e.symm := rfl
@[simp] lemma reindex_refl : reindex R M (equiv.refl ι) = linear_equiv.refl R _ :=
begin
apply linear_equiv.to_linear_map_injective,
ext1,
rw [reindex_comp_tprod, linear_equiv.refl_to_linear_map, equiv.refl_symm],
refl,
end
variables (ι)
/-- The tensor product over an empty index type `ι` is isomorphic to the base ring. -/
@[simps symm_apply]
def is_empty_equiv [is_empty ι] : ⨂[R] i : ι, M ≃ₗ[R] R :=
{ to_fun := lift (const_of_is_empty R 1),
inv_fun := λ r, r • tprod R (@is_empty_elim _ _ _),
left_inv := λ x, by
{ apply x.induction_on,
{ intros r f,
have := subsingleton.elim f is_empty_elim,
simp [this], },
{ simp only,
intros x y hx hy,
simp [add_smul, hx, hy] }},
right_inv := λ t, by simp only [mul_one, algebra.id.smul_eq_mul, const_of_is_empty_apply,
linear_map.map_smul, pi_tensor_product.lift.tprod],
map_add' := linear_map.map_add _,
map_smul' := linear_map.map_smul _, }
@[simp]
lemma is_empty_equiv_apply_tprod [is_empty ι] (f : ι → M) : is_empty_equiv ι (tprod R f) = 1 :=
lift.tprod _
variables {ι}
/-- The tensor product over an single index is isomorphic to the module -/
@[simps symm_apply]
def subsingleton_equiv [subsingleton ι] (i₀ : ι) : ⨂[R] i : ι, M ≃ₗ[R] M :=
{ to_fun := lift (multilinear_map.of_subsingleton R M i₀),
inv_fun := λ m, tprod R (λ v, m),
left_inv := λ x, by
{ dsimp only,
have : ∀ (f : ι → M) (z : M), (λ i : ι, z) = update f i₀ z,
{ intros f z,
ext i,
rw [subsingleton.elim i i₀, function.update_same] },
apply x.induction_on,
{ intros r f,
simp only [linear_map.map_smul, lift.tprod, of_subsingleton_apply, function.eval,
this f, multilinear_map.map_smul, update_eq_self], },
{ intros x y hx hy,
simp only [multilinear_map.map_add, this 0 (_ + _), linear_map.map_add, ←this 0 (lift _ _),
hx, hy] } },
right_inv := λ t, by simp only [of_subsingleton_apply, lift.tprod, function.eval_apply],
map_add' := linear_map.map_add _,
map_smul' := linear_map.map_smul _, }
@[simp]
lemma subsingleton_equiv_apply_tprod [subsingleton ι] (i : ι) (f : ι → M) :
subsingleton_equiv i (tprod R f) = f i :=
lift.tprod _
section tmul
/-- Collapse a `tensor_product` of `pi_tensor_product`s. -/
private def tmul : (⨂[R] i : ι, M) ⊗[R] (⨂[R] i : ι₂, M) →ₗ[R] ⨂[R] i : ι ⊕ ι₂, M :=
tensor_product.lift
{ to_fun := λ a, pi_tensor_product.lift $ pi_tensor_product.lift
(multilinear_map.curry_sum_equiv R _ _ M _ (tprod R)) a,
map_add' := λ a b, by simp only [linear_equiv.map_add, linear_map.map_add],
map_smul' := λ r a, by simp only [linear_equiv.map_smul, linear_map.map_smul,
ring_hom.id_apply], }
private lemma tmul_apply (a : ι → M) (b : ι₂ → M) :
tmul ((⨂ₜ[R] i, a i) ⊗ₜ[R] (⨂ₜ[R] i, b i)) = ⨂ₜ[R] i, sum.elim a b i :=
begin
erw [tensor_product.lift.tmul, pi_tensor_product.lift.tprod, pi_tensor_product.lift.tprod],
refl
end
/-- Expand `pi_tensor_product` into a `tensor_product` of two factors. -/
private def tmul_symm : ⨂[R] i : ι ⊕ ι₂, M →ₗ[R] (⨂[R] i : ι, M) ⊗[R] (⨂[R] i : ι₂, M) :=
-- by using tactic mode, we avoid the need for a lot of `@`s and `_`s
pi_tensor_product.lift $ by apply multilinear_map.dom_coprod; [exact tprod R, exact tprod R]
private lemma tmul_symm_apply (a : ι ⊕ ι₂ → M) :
tmul_symm (⨂ₜ[R] i, a i) = (⨂ₜ[R] i, a (sum.inl i)) ⊗ₜ[R] (⨂ₜ[R] i, a (sum.inr i)) :=
pi_tensor_product.lift.tprod _
variables (R M)
local attribute [ext] tensor_product.ext
/-- Equivalence between a `tensor_product` of `pi_tensor_product`s and a single
`pi_tensor_product` indexed by a `sum` type.
For simplicity, this is defined only for homogeneously- (rather than dependently-) typed components.
-/
def tmul_equiv : (⨂[R] i : ι, M) ⊗[R] (⨂[R] i : ι₂, M) ≃ₗ[R] ⨂[R] i : ι ⊕ ι₂, M :=
linear_equiv.of_linear tmul tmul_symm
(by { ext x,
show tmul (tmul_symm (tprod R x)) = tprod R x, -- Speed up the call to `simp`.
simp only [tmul_symm_apply, tmul_apply, sum.elim_comp_inl_inr], })
(by { ext x y,
show tmul_symm (tmul (tprod R x ⊗ₜ[R] tprod R y)) = tprod R x ⊗ₜ[R] tprod R y,
simp only [tmul_apply, tmul_symm_apply, sum.elim_inl, sum.elim_inr], })
@[simp] lemma tmul_equiv_apply (a : ι → M) (b : ι₂ → M) :
tmul_equiv R M ((⨂ₜ[R] i, a i) ⊗ₜ[R] (⨂ₜ[R] i, b i)) = ⨂ₜ[R] i, sum.elim a b i :=
tmul_apply a b
@[simp] lemma tmul_equiv_symm_apply (a : ι ⊕ ι₂ → M) :
(tmul_equiv R M).symm (⨂ₜ[R] i, a i) = (⨂ₜ[R] i, a (sum.inl i)) ⊗ₜ[R] (⨂ₜ[R] i, a (sum.inr i)) :=
tmul_symm_apply a
end tmul
end multilinear
end pi_tensor_product
end semiring
section ring
namespace pi_tensor_product
open pi_tensor_product
open_locale tensor_product
variables {ι : Type*} [decidable_eq ι] {R : Type*} [comm_ring R]
variables {s : ι → Type*} [∀ i, add_comm_group (s i)] [∀ i, module R (s i)]
/- Unlike for the binary tensor product, we require `R` to be a `comm_ring` here, otherwise
this is false in the case where `ι` is empty. -/
instance : add_comm_group (⨂[R] i, s i) := module.add_comm_monoid_to_add_comm_group R
end pi_tensor_product
end ring
|
d2dba6ff661a6056cebeb0a1e381ee5a662147d9
|
4950bf76e5ae40ba9f8491647d0b6f228ddce173
|
/src/field_theory/adjoin.lean
|
6902d619eafc3dad9ea550965d523d6031fa82f3
|
[
"Apache-2.0"
] |
permissive
|
ntzwq/mathlib
|
ca50b21079b0a7c6781c34b62199a396dd00cee2
|
36eec1a98f22df82eaccd354a758ef8576af2a7f
|
refs/heads/master
| 1,675,193,391,478
| 1,607,822,996,000
| 1,607,822,996,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 22,692
|
lean
|
/-
Copyright (c) 2020 Thomas Browning and Patrick Lutz. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Thomas Browning and Patrick Lutz
-/
import field_theory.intermediate_field
import field_theory.splitting_field
import field_theory.fixed
/-!
# Adjoining Elements to Fields
In this file we introduce the notion of adjoining elements to fields.
This isn't quite the same as adjoining elements to rings.
For example, `algebra.adjoin K {x}` might not include `x⁻¹`.
## Main results
- `adjoin_adjoin_left`: adjoining S and then T is the same as adjoining `S ∪ T`.
- `bot_eq_top_of_dim_adjoin_eq_one`: if `F⟮x⟯` has dimension `1` over `F` for every `x`
in `E` then `F = E`
## Notation
- `F⟮α⟯`: adjoin a single element `α` to `F`.
-/
open finite_dimensional
open_locale classical
namespace intermediate_field
section adjoin_def
variables (F : Type*) [field F] {E : Type*} [field E] [algebra F E] (S : set E)
/-- `adjoin F S` extends a field `F` by adjoining a set `S ⊆ E`. -/
def adjoin : intermediate_field F E :=
{ algebra_map_mem' := λ x, subfield.subset_closure (or.inl (set.mem_range_self x)),
..subfield.closure (set.range (algebra_map F E) ∪ S) }
end adjoin_def
section lattice
variables {F : Type*} [field F] {E : Type*} [field E] [algebra F E]
@[simp] lemma adjoin_le_iff {S : set E} {T : intermediate_field F E} : adjoin F S ≤ T ↔ S ≤ T :=
⟨λ H, le_trans (le_trans (set.subset_union_right _ _) subfield.subset_closure) H,
λ H, (@subfield.closure_le E _ (set.range (algebra_map F E) ∪ S) T.to_subfield).mpr
(set.union_subset (intermediate_field.set_range_subset T) H)⟩
lemma gc : galois_connection (adjoin F : set E → intermediate_field F E) coe := λ _ _, adjoin_le_iff
/-- Galois insertion between `adjoin` and `coe`. -/
def gi : galois_insertion (adjoin F : set E → intermediate_field F E) coe :=
{ choice := λ S _, adjoin F S,
gc := intermediate_field.gc,
le_l_u := λ S, (intermediate_field.gc (S : set E) (adjoin F S)).1 $ le_refl _,
choice_eq := λ _ _, rfl }
instance : complete_lattice (intermediate_field F E) :=
galois_insertion.lift_complete_lattice intermediate_field.gi
instance : inhabited (intermediate_field F E) := ⟨⊤⟩
lemma mem_bot {x : E} : x ∈ (⊥ : intermediate_field F E) ↔ x ∈ set.range (algebra_map F E) :=
begin
suffices : set.range (algebra_map F E) = (⊥ : intermediate_field F E),
{ rw this, refl },
{ change set.range (algebra_map F E) = subfield.closure (set.range (algebra_map F E) ∪ ∅),
simp [←set.image_univ, ←ring_hom.map_field_closure] }
end
lemma mem_top {x : E} : x ∈ (⊤ : intermediate_field F E) :=
subfield.subset_closure $ or.inr trivial
@[simp] lemma bot_to_subalgebra : (⊥ : intermediate_field F E).to_subalgebra = ⊥ :=
by { ext, rw [mem_to_subalgebra, algebra.mem_bot, mem_bot] }
@[simp] lemma top_to_subalgebra : (⊤ : intermediate_field F E).to_subalgebra = ⊤ :=
by { ext, rw [mem_to_subalgebra, iff_true_right algebra.mem_top], exact mem_top }
/-- Construct an algebra isomorphism from an equality of subalgebras -/
def subalgebra.equiv_of_eq {X Y : subalgebra F E} (h : X = Y) : X ≃ₐ[F] Y :=
by refine { to_fun := λ x, ⟨x, _⟩, inv_fun := λ x, ⟨x, _⟩, .. }; tidy
/-- The bottom intermediate_field is isomorphic to the field. -/
noncomputable def bot_equiv : (⊥ : intermediate_field F E) ≃ₐ[F] F :=
(subalgebra.equiv_of_eq bot_to_subalgebra).trans (algebra.bot_equiv F E)
@[simp] lemma bot_equiv_def (x : F) :
bot_equiv (algebra_map F (⊥ : intermediate_field F E) x) = x :=
alg_equiv.commutes bot_equiv x
noncomputable instance algebra_over_bot : algebra (⊥ : intermediate_field F E) F :=
ring_hom.to_algebra intermediate_field.bot_equiv.to_alg_hom.to_ring_hom
instance is_scalar_tower_over_bot : is_scalar_tower (⊥ : intermediate_field F E) F E :=
is_scalar_tower.of_algebra_map_eq
begin
intro x,
let ϕ := algebra.of_id F (⊥ : subalgebra F E),
let ψ := alg_equiv.of_bijective ϕ ((algebra.bot_equiv F E).symm.bijective),
change (↑x : E) = ↑(ψ (ψ.symm ⟨x, _⟩)),
rw alg_equiv.apply_symm_apply ψ ⟨x, _⟩,
refl
end
/-- The top intermediate_field is isomorphic to the field. -/
noncomputable def top_equiv : (⊤ : intermediate_field F E) ≃ₐ[F] E :=
(subalgebra.equiv_of_eq top_to_subalgebra).trans algebra.top_equiv
@[simp] lemma top_equiv_def (x : (⊤ : intermediate_field F E)) : top_equiv x = ↑x :=
begin
suffices : algebra.to_top (top_equiv x) = algebra.to_top (x : E),
{ rwa subtype.ext_iff at this },
exact alg_equiv.apply_symm_apply (alg_equiv.of_bijective algebra.to_top
⟨λ _ _, subtype.mk.inj, λ x, ⟨x.val, by { ext, refl }⟩⟩ : E ≃ₐ[F] (⊤ : subalgebra F E))
(subalgebra.equiv_of_eq top_to_subalgebra x),
end
@[simp] lemma coe_bot_eq_self (K : intermediate_field F E) : ↑(⊥ : intermediate_field K E) = K :=
by { ext, rw [mem_lift2, mem_bot], exact set.ext_iff.mp subtype.range_coe x }
@[simp] lemma coe_top_eq_top (K : intermediate_field F E) :
↑(⊤ : intermediate_field K E) = (⊤ : intermediate_field F E) :=
intermediate_field.ext'_iff.mpr (set.ext_iff.mpr (λ _, iff_of_true mem_top mem_top))
end lattice
section adjoin_def
variables (F : Type*) [field F] {E : Type*} [field E] [algebra F E] (S : set E)
lemma adjoin_eq_range_algebra_map_adjoin :
(adjoin F S : set E) = set.range (algebra_map (adjoin F S) E) := (subtype.range_coe).symm
lemma adjoin.algebra_map_mem (x : F) : algebra_map F E x ∈ adjoin F S :=
intermediate_field.algebra_map_mem (adjoin F S) x
lemma adjoin.range_algebra_map_subset : set.range (algebra_map F E) ⊆ adjoin F S :=
begin
intros x hx,
cases hx with f hf,
rw ← hf,
exact adjoin.algebra_map_mem F S f,
end
instance adjoin.field_coe : has_coe_t F (adjoin F S) :=
{coe := λ x, ⟨algebra_map F E x, adjoin.algebra_map_mem F S x⟩}
lemma subset_adjoin : S ⊆ adjoin F S :=
λ x hx, subfield.subset_closure (or.inr hx)
instance adjoin.set_coe : has_coe_t S (adjoin F S) :=
{coe := λ x, ⟨x,subset_adjoin F S (subtype.mem x)⟩}
@[mono] lemma adjoin.mono (T : set E) (h : S ⊆ T) : adjoin F S ≤ adjoin F T :=
galois_connection.monotone_l gc h
lemma adjoin_contains_field_as_subfield (F : subfield E) : (F : set E) ⊆ adjoin F S :=
λ x hx, adjoin.algebra_map_mem F S ⟨x, hx⟩
lemma subset_adjoin_of_subset_left {F : subfield E} {T : set E} (HT : T ⊆ F) : T ⊆ adjoin F S :=
λ x hx, (adjoin F S).algebra_map_mem ⟨x, HT hx⟩
lemma subset_adjoin_of_subset_right {T : set E} (H : T ⊆ S) : T ⊆ adjoin F S :=
λ x hx, subset_adjoin F S (H hx)
@[simp] lemma adjoin_empty (F E : Type*) [field F] [field E] [algebra F E] :
adjoin F (∅ : set E) = ⊥ :=
eq_bot_iff.mpr (adjoin_le_iff.mpr (set.empty_subset _))
/-- If `K` is a field with `F ⊆ K` and `S ⊆ K` then `adjoin F S ≤ K`. -/
lemma adjoin_le_subfield {K : subfield E} (HF : set.range (algebra_map F E) ⊆ K)
(HS : S ⊆ K) : (adjoin F S).to_subfield ≤ K :=
begin
apply subfield.closure_le.mpr,
rw set.union_subset_iff,
exact ⟨HF, HS⟩,
end
lemma adjoin_subset_adjoin_iff {F' : Type*} [field F'] [algebra F' E]
{S S' : set E} : (adjoin F S : set E) ⊆ adjoin F' S' ↔
set.range (algebra_map F E) ⊆ adjoin F' S' ∧ S ⊆ adjoin F' S' :=
⟨λ h, ⟨trans (adjoin.range_algebra_map_subset _ _) h, trans (subset_adjoin _ _) h⟩,
λ ⟨hF, hS⟩, subfield.closure_le.mpr (set.union_subset hF hS)⟩
/-- `F[S][T] = F[S ∪ T]` -/
lemma adjoin_adjoin_left (T : set E) : ↑(adjoin (adjoin F S) T) = adjoin F (S ∪ T) :=
begin
rw intermediate_field.ext'_iff,
change ↑(adjoin (adjoin F S) T) = _,
apply set.eq_of_subset_of_subset; rw adjoin_subset_adjoin_iff; split,
{ rintros _ ⟨⟨x, hx⟩, rfl⟩, exact adjoin.mono _ _ _ (set.subset_union_left _ _) hx },
{ exact subset_adjoin_of_subset_right _ _ (set.subset_union_right _ _) },
{ exact subset_adjoin_of_subset_left _ (adjoin.range_algebra_map_subset _ _) },
{ exact set.union_subset
(subset_adjoin_of_subset_left _ (subset_adjoin _ _))
(subset_adjoin _ _) },
end
@[simp] lemma adjoin_insert_adjoin (x : E) :
adjoin F (insert x (adjoin F S : set E)) = adjoin F (insert x S) :=
le_antisymm
(adjoin_le_iff.mpr (set.insert_subset.mpr ⟨subset_adjoin _ _ (set.mem_insert _ _),
adjoin_le_iff.mpr (subset_adjoin_of_subset_right _ _ (set.subset_insert _ _))⟩))
(adjoin.mono _ _ _ (set.insert_subset_insert (subset_adjoin _ _)))
/-- `F[S][T] = F[T][S]` -/
lemma adjoin_adjoin_comm (T : set E) :
↑(adjoin (adjoin F S) T) = (↑(adjoin (adjoin F T) S) : (intermediate_field F E)) :=
by rw [adjoin_adjoin_left, adjoin_adjoin_left, set.union_comm]
lemma adjoin_map {E' : Type*} [field E'] [algebra F E'] (f : E →ₐ[F] E') :
(adjoin F S).map f = adjoin F (f '' S) :=
begin
ext x,
show x ∈ (subfield.closure (set.range (algebra_map F E) ∪ S)).map (f : E →+* E') ↔
x ∈ subfield.closure (set.range (algebra_map F E') ∪ f '' S),
rw [ring_hom.map_field_closure, set.image_union, ← set.range_comp, ← ring_hom.coe_comp,
f.comp_algebra_map],
refl,
end
lemma algebra_adjoin_le_adjoin : algebra.adjoin F S ≤ (adjoin F S).to_subalgebra :=
algebra.adjoin_le (subset_adjoin _ _)
lemma adjoin_eq_algebra_adjoin (inv_mem : ∀ x ∈ algebra.adjoin F S, x⁻¹ ∈ algebra.adjoin F S) :
(adjoin F S).to_subalgebra = algebra.adjoin F S :=
le_antisymm
(show adjoin F S ≤
{ neg_mem' := λ x, (algebra.adjoin F S).neg_mem, inv_mem' := inv_mem, .. algebra.adjoin F S},
from adjoin_le_iff.mpr (algebra.subset_adjoin))
(algebra_adjoin_le_adjoin _ _)
lemma eq_adjoin_of_eq_algebra_adjoin (K : intermediate_field F E)
(h : K.to_subalgebra = algebra.adjoin F S) : K = adjoin F S :=
begin
apply to_subalgebra_injective,
rw h,
refine (adjoin_eq_algebra_adjoin _ _ _).symm,
intros x,
convert K.inv_mem,
rw ← h,
refl
end
@[elab_as_eliminator]
lemma adjoin_induction {s : set E} {p : E → Prop} {x} (h : x ∈ adjoin F s)
(Hs : ∀ x ∈ s, p x) (Hmap : ∀ x, p (algebra_map F E x))
(Hadd : ∀ x y, p x → p y → p (x + y))
(Hneg : ∀ x, p x → p (-x))
(Hinv : ∀ x, p x → p x⁻¹)
(Hmul : ∀ x y, p x → p y → p (x * y)) : p x :=
subfield.closure_induction h (λ x hx, or.cases_on hx (λ ⟨x, hx⟩, hx ▸ Hmap x) (Hs x))
((algebra_map F E).map_one ▸ Hmap 1)
Hadd Hneg Hinv Hmul
/--
Variation on `set.insert` to enable good notation for adjoining elements to fields.
Used to preferentially use `singleton` rather than `insert` when adjoining one element.
-/
--this definition of notation is courtesy of Kyle Miller on zulip
class insert {α : Type*} (s : set α) :=
(insert : α → set α)
@[priority 1000]
instance insert_empty {α : Type*} : insert (∅ : set α) :=
{ insert := λ x, @singleton _ _ set.has_singleton x }
@[priority 900]
instance insert_nonempty {α : Type*} (s : set α) : insert s :=
{ insert := λ x, set.insert x s }
notation K`⟮`:std.prec.max_plus l:(foldr `, ` (h t, insert.insert t h) ∅) `⟯` := adjoin K l
section adjoin_simple
variables (α : E)
lemma mem_adjoin_simple_self : α ∈ F⟮α⟯ :=
subset_adjoin F {α} (set.mem_singleton α)
/-- generator of `F⟮α⟯` -/
def adjoin_simple.gen : F⟮α⟯ := ⟨α, mem_adjoin_simple_self F α⟩
@[simp] lemma adjoin_simple.algebra_map_gen : algebra_map F⟮α⟯ E (adjoin_simple.gen F α) = α := rfl
lemma adjoin_simple_adjoin_simple (β : E) : ↑F⟮α⟯⟮β⟯ = F⟮α, β⟯ :=
adjoin_adjoin_left _ _ _
lemma adjoin_simple_comm (β : E) : ↑F⟮α⟯⟮β⟯ = (↑F⟮β⟯⟮α⟯ : intermediate_field F E) :=
adjoin_adjoin_comm _ _ _
-- TODO: develop the API for `subalgebra.is_field_of_algebraic` so it can be used here
lemma adjoin_simple_to_subalgebra_of_integral (hα : is_integral F α) :
(F⟮α⟯).to_subalgebra = algebra.adjoin F {α} :=
begin
apply adjoin_eq_algebra_adjoin,
intros x hx,
by_cases x = 0,
{ rw [h, inv_zero], exact subalgebra.zero_mem (algebra.adjoin F {α}) },
let ϕ := alg_equiv.adjoin_singleton_equiv_adjoin_root_minimal_polynomial F α hα,
let inv := (@adjoin_root.field F _ _ (minimal_polynomial.irreducible hα)).inv,
suffices : ϕ ⟨x, hx⟩ * inv (ϕ ⟨x, hx⟩) = 1,
{ convert subtype.mem (ϕ.symm (inv (ϕ ⟨x, hx⟩))),
refine (eq_inv_of_mul_right_eq_one _).symm,
apply_fun ϕ.symm at this,
rw [alg_equiv.map_one, alg_equiv.map_mul, alg_equiv.symm_apply_apply] at this,
rw [←subsemiring.coe_one, ←this, subsemiring.coe_mul, subtype.coe_mk] },
rw field.mul_inv_cancel (mt (λ key, _) h),
rw ← ϕ.map_zero at key,
change ↑(⟨x, hx⟩ : algebra.adjoin F {α}) = _,
rw [ϕ.injective key, submodule.coe_zero]
end
end adjoin_simple
end adjoin_def
section adjoin_intermediate_field_lattice
variables {F : Type*} [field F] {E : Type*} [field E] [algebra F E] {α : E} {S : set E}
@[simp] lemma adjoin_eq_bot_iff : adjoin F S = ⊥ ↔ S ⊆ (⊥ : intermediate_field F E) :=
by { rw [eq_bot_iff, adjoin_le_iff], refl, }
@[simp] lemma adjoin_simple_eq_bot_iff : F⟮α⟯ = ⊥ ↔ α ∈ (⊥ : intermediate_field F E) :=
by { rw adjoin_eq_bot_iff, exact set.singleton_subset_iff }
@[simp] lemma adjoin_zero : F⟮(0 : E)⟯ = ⊥ :=
adjoin_simple_eq_bot_iff.mpr (zero_mem ⊥)
@[simp] lemma adjoin_one : F⟮(1 : E)⟯ = ⊥ :=
adjoin_simple_eq_bot_iff.mpr (one_mem ⊥)
@[simp] lemma adjoin_int (n : ℤ) : F⟮(n : E)⟯ = ⊥ :=
adjoin_simple_eq_bot_iff.mpr (coe_int_mem ⊥ n)
@[simp] lemma adjoin_nat (n : ℕ) : F⟮(n : E)⟯ = ⊥ :=
adjoin_simple_eq_bot_iff.mpr (coe_int_mem ⊥ n)
section adjoin_dim
open finite_dimensional vector_space
@[simp] lemma dim_intermediate_field_eq_dim_subalgebra :
dim F (adjoin F S).to_subalgebra = dim F (adjoin F S) := rfl
@[simp] lemma findim_intermediate_field_eq_findim_subalgebra :
findim F (adjoin F S).to_subalgebra = findim F (adjoin F S) := rfl
@[simp] lemma to_subalgebra_eq_iff {K L : intermediate_field F E} :
K.to_subalgebra = L.to_subalgebra ↔ K = L :=
by { rw [subalgebra.ext_iff, intermediate_field.ext'_iff, set.ext_iff], refl }
lemma dim_adjoin_eq_one_iff : dim F (adjoin F S) = 1 ↔ S ⊆ (⊥ : intermediate_field F E) :=
by rw [←dim_intermediate_field_eq_dim_subalgebra, subalgebra.dim_eq_one_iff,
←bot_to_subalgebra, to_subalgebra_eq_iff, adjoin_eq_bot_iff]
lemma dim_adjoin_simple_eq_one_iff : dim F F⟮α⟯ = 1 ↔ α ∈ (⊥ : intermediate_field F E) :=
by { rw [dim_adjoin_eq_one_iff], exact set.singleton_subset_iff }
lemma findim_adjoin_eq_one_iff : findim F (adjoin F S) = 1 ↔ S ⊆ (⊥ : intermediate_field F E) :=
by rw [←findim_intermediate_field_eq_findim_subalgebra, subalgebra.findim_eq_one_iff,
←bot_to_subalgebra, to_subalgebra_eq_iff, adjoin_eq_bot_iff]
lemma findim_adjoin_simple_eq_one_iff : findim F F⟮α⟯ = 1 ↔ α ∈ (⊥ : intermediate_field F E) :=
by { rw [findim_adjoin_eq_one_iff], exact set.singleton_subset_iff }
/-- If `F⟮x⟯` has dimension `1` over `F` for every `x ∈ E` then `F = E`. -/
lemma bot_eq_top_of_dim_adjoin_eq_one (h : ∀ x : E, dim F F⟮x⟯ = 1) :
(⊥ : intermediate_field F E) = ⊤ :=
begin
ext,
rw iff_true_right intermediate_field.mem_top,
exact dim_adjoin_simple_eq_one_iff.mp (h x),
end
lemma bot_eq_top_of_findim_adjoin_eq_one (h : ∀ x : E, findim F F⟮x⟯ = 1) :
(⊥ : intermediate_field F E) = ⊤ :=
begin
ext,
rw iff_true_right intermediate_field.mem_top,
exact findim_adjoin_simple_eq_one_iff.mp (h x),
end
lemma subsingleton_of_dim_adjoin_eq_one (h : ∀ x : E, dim F F⟮x⟯ = 1) :
subsingleton (intermediate_field F E) :=
subsingleton_of_bot_eq_top (bot_eq_top_of_dim_adjoin_eq_one h)
lemma subsingleton_of_findim_adjoin_eq_one (h : ∀ x : E, findim F F⟮x⟯ = 1) :
subsingleton (intermediate_field F E) :=
subsingleton_of_bot_eq_top (bot_eq_top_of_findim_adjoin_eq_one h)
/-- If `F⟮x⟯` has dimension `≤1` over `F` for every `x ∈ E` then `F = E`. -/
lemma bot_eq_top_of_findim_adjoin_le_one [finite_dimensional F E]
(h : ∀ x : E, findim F F⟮x⟯ ≤ 1) : (⊥ : intermediate_field F E) = ⊤ :=
begin
apply bot_eq_top_of_findim_adjoin_eq_one,
exact λ x, by linarith [h x, show 0 < findim F F⟮x⟯, from findim_pos],
end
lemma subsingleton_of_findim_adjoin_le_one [finite_dimensional F E]
(h : ∀ x : E, findim F F⟮x⟯ ≤ 1) : subsingleton (intermediate_field F E) :=
subsingleton_of_bot_eq_top (bot_eq_top_of_findim_adjoin_le_one h)
end adjoin_dim
end adjoin_intermediate_field_lattice
section adjoin_integral_element
variables (F : Type*) [field F] {E : Type*} [field E] [algebra F E] {α : E}
variables {K : Type*} [field K] [algebra F K]
lemma aeval_gen_minimal_polynomial (h : is_integral F α) :
polynomial.aeval (adjoin_simple.gen F α) (minimal_polynomial h) = 0 :=
begin
ext,
convert minimal_polynomial.aeval h,
conv in (polynomial.aeval α) { rw [← adjoin_simple.algebra_map_gen F α] },
exact is_scalar_tower.algebra_map_aeval F F⟮α⟯ E _ _
end
/-- algebra isomorphism between `adjoin_root` and `F⟮α⟯` -/
noncomputable def adjoin_root_equiv_adjoin (h : is_integral F α) :
adjoin_root (minimal_polynomial h) ≃ₐ[F] F⟮α⟯ :=
alg_equiv.of_bijective (alg_hom.mk (adjoin_root.lift (algebra_map F F⟮α⟯)
(adjoin_simple.gen F α) (aeval_gen_minimal_polynomial F h)) (ring_hom.map_one _)
(λ x y, ring_hom.map_mul _ x y) (ring_hom.map_zero _) (λ x y, ring_hom.map_add _ x y)
(by { exact λ _, adjoin_root.lift_of })) (begin
set f := adjoin_root.lift _ _ (aeval_gen_minimal_polynomial F h),
haveI := minimal_polynomial.irreducible h,
split,
{ exact ring_hom.injective f },
{ suffices : F⟮α⟯.to_subfield ≤ ring_hom.field_range ((F⟮α⟯.to_subfield.subtype).comp f),
{ exact λ x, Exists.cases_on (this (subtype.mem x)) (λ y hy, ⟨y, subtype.ext hy.2⟩) },
exact subfield.closure_le.mpr (set.union_subset (λ x hx, Exists.cases_on hx (λ y hy, ⟨y,
⟨subfield.mem_top y, by { rw [ring_hom.comp_apply, adjoin_root.lift_of], exact hy }⟩⟩))
(set.singleton_subset_iff.mpr ⟨adjoin_root.root (minimal_polynomial h),
⟨subfield.mem_top (adjoin_root.root (minimal_polynomial h)),
by { rw [ring_hom.comp_apply, adjoin_root.lift_root], refl }⟩⟩)) } end)
lemma adjoin_root_equiv_adjoin_apply_root (h : is_integral F α) :
adjoin_root_equiv_adjoin F h (adjoin_root.root (minimal_polynomial h)) =
adjoin_simple.gen F α :=
begin
refine adjoin_root.lift_root,
{ exact minimal_polynomial h },
{ exact aeval_gen_minimal_polynomial F h }
end
/-- Algebra homomorphism `F⟮α⟯ →ₐ[F] K` are in bijection with the set of roots
of `minimal_polynomial α` in `K`. -/
noncomputable def alg_hom_adjoin_integral_equiv (h : is_integral F α) :
(F⟮α⟯ →ₐ[F] K) ≃ {x // x ∈ ((minimal_polynomial h).map (algebra_map F K)).roots} :=
let ϕ := adjoin_root_equiv_adjoin F h,
swap1 : (F⟮α⟯ →ₐ[F] K) ≃ (adjoin_root (minimal_polynomial h) →ₐ[F] K) :=
{ to_fun := λ f, f.comp ϕ.to_alg_hom,
inv_fun := λ f, f.comp ϕ.symm.to_alg_hom,
left_inv := λ _, by { ext, simp only [alg_equiv.coe_alg_hom,
alg_equiv.to_alg_hom_eq_coe, alg_hom.comp_apply, alg_equiv.apply_symm_apply]},
right_inv := λ _, by { ext, simp only [alg_equiv.symm_apply_apply,
alg_equiv.coe_alg_hom, alg_equiv.to_alg_hom_eq_coe, alg_hom.comp_apply] } },
swap2 := adjoin_root.equiv F K (minimal_polynomial h) (minimal_polynomial.ne_zero h) in
swap1.trans swap2
/-- Fintype of algebra homomorphism `F⟮α⟯ →ₐ[F] K` -/
noncomputable def fintype_of_alg_hom_adjoin_integral (h : is_integral F α) :
fintype (F⟮α⟯ →ₐ[F] K) :=
fintype.of_equiv _ (alg_hom_adjoin_integral_equiv F h).symm
lemma card_alg_hom_adjoin_integral (h : is_integral F α) (h_sep : (minimal_polynomial h).separable)
(h_splits : (minimal_polynomial h).splits (algebra_map F K)) :
@fintype.card (F⟮α⟯ →ₐ[F] K) (fintype_of_alg_hom_adjoin_integral F h) =
(minimal_polynomial h).nat_degree :=
begin
let s := ((minimal_polynomial h).map (algebra_map F K)).roots.to_finset,
have H := λ x, multiset.mem_to_finset,
rw [fintype.card_congr (alg_hom_adjoin_integral_equiv F h), fintype.card_of_subtype s H,
polynomial.nat_degree_eq_card_roots h_splits, multiset.to_finset_card_of_nodup],
exact polynomial.nodup_roots ((polynomial.separable_map (algebra_map F K)).mpr h_sep),
end
end adjoin_integral_element
section induction
variables {F : Type*} [field F] {E : Type*} [field E] [algebra F E]
/-- An intermediate field `S` is finitely generated if there exists `t : finset E` such that
`intermediate_field.adjoin F t = S`. -/
def fg (S : intermediate_field F E) : Prop := ∃ (t : finset E), adjoin F ↑t = S
lemma fg_adjoin_finset (t : finset E) : (adjoin F (↑t : set E)).fg :=
⟨t, rfl⟩
theorem fg_def {S : intermediate_field F E} : S.fg ↔ ∃ t : set E, set.finite t ∧ adjoin F t = S :=
⟨λ ⟨t, ht⟩, ⟨↑t, set.finite_mem_finset t, ht⟩,
λ ⟨t, ht1, ht2⟩, ⟨ht1.to_finset, by rwa set.finite.coe_to_finset⟩⟩
theorem fg_bot : (⊥ : intermediate_field F E).fg :=
⟨∅, adjoin_empty F E⟩
lemma fg_of_fg_to_subalgebra (S : intermediate_field F E)
(h : S.to_subalgebra.fg) : S.fg :=
begin
cases h with t ht,
exact ⟨t, (eq_adjoin_of_eq_algebra_adjoin _ _ _ ht.symm).symm⟩
end
lemma fg_of_noetherian (S : intermediate_field F E)
[is_noetherian F E] : S.fg :=
S.fg_of_fg_to_subalgebra S.to_subalgebra.fg_of_noetherian
lemma induction_on_adjoin_finset (S : finset E) (P : intermediate_field F E → Prop) (base : P ⊥)
(ih : ∀ (K : intermediate_field F E) (x ∈ S), P K → P ↑K⟮x⟯) : P (adjoin F ↑S) :=
begin
apply finset.induction_on' S,
{ exact base },
{ intros a s h1 _ _ h4,
rw [finset.coe_insert, set.insert_eq, set.union_comm, ←adjoin_adjoin_left],
exact ih (adjoin F s) a h1 h4 }
end
lemma induction_on_adjoin_fg (P : intermediate_field F E → Prop)
(base : P ⊥) (ih : ∀ (K : intermediate_field F E) (x : E), P K → P ↑K⟮x⟯)
(K : intermediate_field F E) (hK : K.fg) : P K :=
begin
obtain ⟨S, rfl⟩ := hK,
exact induction_on_adjoin_finset S P base (λ K x _ hK, ih K x hK),
end
lemma induction_on_adjoin [fd : finite_dimensional F E] (P : intermediate_field F E → Prop)
(base : P ⊥) (ih : ∀ (K : intermediate_field F E) (x : E), P K → P ↑K⟮x⟯)
(K : intermediate_field F E) : P K :=
induction_on_adjoin_fg P base ih K K.fg_of_noetherian
end induction
end intermediate_field
|
5b86127f6b12e4ae2872ffa15a8859fc96c95e3d
|
82b86ba2ae0d5aed0f01f49c46db5afec0eb2bd7
|
/stage0/src/Lean/Elab/Quotation.lean
|
049e448e6fa3bad4cb9902dac8afe4abc31d87e5
|
[
"Apache-2.0"
] |
permissive
|
banksonian/lean4
|
3a2e6b0f1eb63aa56ff95b8d07b2f851072d54dc
|
78da6b3aa2840693eea354a41e89fc5b212a5011
|
refs/heads/master
| 1,673,703,624,165
| 1,605,123,551,000
| 1,605,123,551,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 15,474
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sebastian Ullrich
Elaboration of syntax quotations as terms and patterns (in `match_syntax`). See also `./Hygiene.lean` for the basic
hygiene workings and data types.
-/
import Lean.Syntax
import Lean.ResolveName
import Lean.Elab.Term
namespace Lean.Elab.Term.Quotation
open Lean.Syntax (isQuot isAntiquot)
open Meta
-- Antiquotations can be escaped as in `$$x`, which is useful for nesting macros.
def isEscapedAntiquot (stx : Syntax) : Bool :=
!stx[1].getArgs.isEmpty
def unescapeAntiquot (stx : Syntax) : Syntax :=
if isAntiquot stx then
stx.setArg 1 $ mkNullNode stx[1].getArgs.pop
else
stx
def getAntiquotTerm (stx : Syntax) : Syntax :=
let e := stx[2]
if e.isIdent then e
else
-- `e` is from `"(" >> termParser >> ")"`
e[1]
def antiquotKind? : Syntax → Option SyntaxNodeKind
| Syntax.node (Name.str k "antiquot" _) args =>
if args[3].isOfKind `antiquotName then some k
else
-- we treat all antiquotations where the kind was left implicit (`$e`) the same (see `elimAntiquotChoices`)
some Name.anonymous
| _ => none
-- `$e*` is an antiquotation "splice" matching an arbitrary number of syntax nodes
def isAntiquotSplice (stx : Syntax) : Bool :=
isAntiquot stx && stx[4].getOptional?.isSome
-- If any item of a `many` node is an antiquotation splice, its result should
-- be substituted into the `many` node's children
def isAntiquotSplicePat (stx : Syntax) : Bool :=
stx.isOfKind nullKind && stx.getArgs.any fun arg => isAntiquotSplice arg && !isEscapedAntiquot arg
/-- A term like `($e) is actually ambiguous: the antiquotation could be of kind `term`,
or `ident`, or ... . But it shouldn't really matter because antiquotations without
explicit kinds behave the same at runtime. So we replace `choice` nodes that contain
at least one implicit antiquotation with that antiquotation. -/
private partial def elimAntiquotChoices : Syntax → Syntax
| Syntax.node `choice args => match args.find? fun arg => antiquotKind? arg == Name.anonymous with
| some anti => anti
| none => Syntax.node `choice $ args.map elimAntiquotChoices
| Syntax.node k args => Syntax.node k $ args.map elimAntiquotChoices
| stx => stx
-- Elaborate the content of a syntax quotation term
private partial def quoteSyntax : Syntax → TermElabM Syntax
| Syntax.ident info rawVal val preresolved => do
-- Add global scopes at compilation time (now), add macro scope at runtime (in the quotation).
-- See the paper for details.
let r ← resolveGlobalName val
let preresolved := r ++ preresolved
let val := quote val
-- `scp` is bound in stxQuot.expand
`(Syntax.ident (SourceInfo.mk none none none) $(quote rawVal) (addMacroScope mainModule $val scp) $(quote preresolved))
-- if antiquotation, insert contents as-is, else recurse
| stx@(Syntax.node k _) => do
if isAntiquot stx && !isEscapedAntiquot stx then
-- splices must occur in a `many` node
if isAntiquotSplice stx then throwErrorAt stx "unexpected antiquotation splice"
else pure $ getAntiquotTerm stx
else
let empty ← `(Array.empty);
-- if escaped antiquotation, decrement by one escape level
let stx := unescapeAntiquot stx
let args ← stx.getArgs.foldlM (fun args arg =>
if k == nullKind && isAntiquotSplice arg then
-- antiquotation splice pattern: inject args array
`(Array.append $args $(getAntiquotTerm arg))
else do
let arg ← quoteSyntax arg;
`(Array.push $args $arg)) empty
`(Syntax.node $(quote k) $args)
| Syntax.atom info val =>
`(Syntax.atom (SourceInfo.mk none none none) $(quote val))
| Syntax.missing => unreachable!
def stxQuot.expand (stx : Syntax) : TermElabM Syntax := do
let quoted := stx[1]
/- Syntax quotations are monadic values depending on the current macro scope. For efficiency, we bind
the macro scope once for each quotation, then build the syntax tree in a completely pure computation
depending on this binding. Note that regular function calls do not introduce a new macro scope (i.e.
we preserve referential transparency), so we can refer to this same `scp` inside `quoteSyntax` by
including it literally in a syntax quotation. -/
-- TODO: simplify to `(do scp ← getCurrMacroScope; pure $(quoteSyntax quoted))
let stx ← quoteSyntax (elimAntiquotChoices quoted);
`(Bind.bind getCurrMacroScope (fun scp => Bind.bind getMainModule (fun mainModule => Pure.pure $stx)))
/- NOTE: It may seem like the newly introduced binding `scp` may accidentally
capture identifiers in an antiquotation introduced by `quoteSyntax`. However,
note that the syntax quotation above enjoys the same hygiene guarantees as
anywhere else in Lean; that is, we implement hygienic quotations by making
use of the hygienic quotation support of the bootstrapped Lean compiler!
Aside: While this might sound "dangerous", it is in fact less reliant on a
"chain of trust" than other bootstrapping parts of Lean: because this
implementation itself never uses `scp` (or any other identifier) both inside
and outside quotations, it can actually correctly be compiled by an
unhygienic (but otherwise correct) implementation of syntax quotations. As
long as it is then compiled again with the resulting executable (i.e. up to
stage 2), the result is a correct hygienic implementation. In this sense the
implementation is "self-stabilizing". It was in fact originally compiled
by an unhygienic prototype implementation. -/
@[builtinTermElab Parser.Level.quot] def elabLevelQuot : TermElab := adaptExpander stxQuot.expand
@[builtinTermElab Parser.Term.quot] def elabTermQuot : TermElab := adaptExpander stxQuot.expand
@[builtinTermElab Parser.Term.funBinder.quot] def elabfunBinderQuot : TermElab := adaptExpander stxQuot.expand
@[builtinTermElab Parser.Tactic.quot] def elabTacticQuot : TermElab := adaptExpander stxQuot.expand
@[builtinTermElab Parser.Tactic.quotSeq] def elabTacticQuotSeq : TermElab := adaptExpander stxQuot.expand
@[builtinTermElab Parser.Term.stx.quot] def elabStxQuot : TermElab := adaptExpander stxQuot.expand
@[builtinTermElab Parser.Term.doElem.quot] def elabDoElemQuot : TermElab := adaptExpander stxQuot.expand
/- match_syntax -/
-- an "alternative" of patterns plus right-hand side
private abbrev Alt := List Syntax × Syntax
/-- Information on a pattern's head that influences the compilation of a single
match step. -/
structure HeadInfo :=
-- Node kind to match, if any
(kind : Option SyntaxNodeKind := none)
-- Nested patterns for each argument, if any. In a single match step, we only
-- check that the arity matches. The arity is usually implied by the node kind,
-- but not in the case of `many` nodes.
(argPats : Option (Array Syntax) := none)
-- Function to apply to the right-hand side in case the match succeeds. Used to
-- bind pattern variables.
(rhsFn : Syntax → TermElabM Syntax := pure)
instance : Inhabited HeadInfo := ⟨{}⟩
/-- `h1.generalizes h2` iff h1 is equal to or more general than h2, i.e. it matches all nodes
h2 matches. This induces a partial ordering. -/
def HeadInfo.generalizes : HeadInfo → HeadInfo → Bool
| { kind := none, .. }, _ => true
| { kind := some k1, argPats := none, .. },
{ kind := some k2, .. } => k1 == k2
| { kind := some k1, argPats := some ps1, .. },
{ kind := some k2, argPats := some ps2, .. } => k1 == k2 && ps1.size == ps2.size
| _, _ => false
private def getHeadInfo (alt : Alt) : HeadInfo :=
let pat := alt.fst.head!;
let unconditional (rhsFn) := { rhsFn := rhsFn : HeadInfo };
-- variable pattern
if pat.isIdent then unconditional $ fun rhs => `(let $pat := discr; $rhs)
-- wildcard pattern
else if pat.isOfKind `Lean.Parser.Term.hole then unconditional pure
-- quotation pattern
else if isQuot pat then
let quoted := pat[1]
if quoted.isAtom then
-- We assume that atoms are uniquely determined by the node kind and never have to be checked
unconditional pure
else if isAntiquot quoted && !isEscapedAntiquot quoted then
-- quotation contains a single antiquotation
let k := antiquotKind? quoted;
-- Antiquotation kinds like `$id:ident` influence the parser, but also need to be considered by
-- match_syntax (but not by quotation terms). For example, `($id:ident) and `($e) are not
-- distinguishable without checking the kind of the node to be captured. Note that some
-- antiquotations like the latter one for terms do not correspond to any actual node kind
-- (signified by `k == Name.anonymous`), so we would only check for `ident` here.
--
-- if stx.isOfKind `ident then
-- let id := stx; ...
-- else
-- let e := stx; ...
let kind := if k == Name.anonymous then none else k
let anti := getAntiquotTerm quoted
-- Splices should only appear inside a nullKind node, see next case
if isAntiquotSplice quoted then unconditional $ fun _ => throwErrorAt quoted "unexpected antiquotation splice"
else if anti.isIdent then { kind := kind, rhsFn := fun rhs => `(let $anti := discr; $rhs) }
else unconditional fun _ => throwErrorAt anti ("match_syntax: antiquotation must be variable " ++ toString anti)
else if isAntiquotSplicePat quoted && quoted.getArgs.size == 1 then
-- quotation is a single antiquotation splice => bind args array
let anti := getAntiquotTerm quoted[0]
unconditional fun rhs => `(let $anti := Syntax.getArgs discr; $rhs)
-- TODO: support for more complex antiquotation splices
else
-- not an antiquotation or escaped antiquotation: match head shape
let quoted := unescapeAntiquot quoted
let argPats := quoted.getArgs.map (pat.setArg 1);
{ kind := quoted.getKind, argPats := argPats }
else
unconditional $ fun _ => throwErrorAt pat ("match_syntax: unexpected pattern kind " ++ toString pat)
-- Assuming that the first pattern of the alternative is taken, replace it with patterns (if any) for its
-- child nodes.
-- Ex: `($a + (- $b)) => `($a), `(+), `(- $b)
-- Note: The atom pattern `(+) will be discarded in a later step
private def explodeHeadPat (numArgs : Nat) : HeadInfo × Alt → TermElabM Alt
| (info, (pat::pats, rhs)) => do
let newPats := match info.argPats with
| some argPats => argPats.toList
| none => List.replicate numArgs $ Unhygienic.run `(_)
let rhs ← info.rhsFn rhs
pure (newPats ++ pats, rhs)
| _ => unreachable!
private partial def compileStxMatch : List Syntax → List Alt → TermElabM Syntax
| [], ([], rhs)::_ => pure rhs -- nothing left to match
| _, [] => throwError "non-exhaustive 'match_syntax'"
| discr::discrs, alts => do
let alts := (alts.map getHeadInfo).zip alts;
-- Choose a most specific pattern, ie. a minimal element according to `generalizes`.
-- If there are multiple minimal elements, the choice does not matter.
let (info, alt) := alts.tail!.foldl (fun (min : HeadInfo × Alt) (alt : HeadInfo × Alt) => if min.1.generalizes alt.1 then alt else min) alts.head!;
-- introduce pattern matches on the discriminant's children if there are any nested patterns
let newDiscrs ← match info.argPats with
| some pats => (List.range pats.size).mapM fun i => `(Syntax.getArg discr $(quote i))
| none => pure []
-- collect matching alternatives and explode them
let yesAlts := alts.filter fun (alt : HeadInfo × Alt) => alt.1.generalizes info
let yesAlts ← yesAlts.mapM $ explodeHeadPat newDiscrs.length
-- NOTE: use fresh macro scopes for recursive call so that different `discr`s introduced by the quotations below do not collide
let yes ← withFreshMacroScope $ compileStxMatch (newDiscrs ++ discrs) yesAlts
let some kind ← pure info.kind
-- unconditional match step
| `(let discr := $discr; $yes)
-- conditional match step
let noAlts := (alts.filter $ fun (alt : HeadInfo × Alt) => !info.generalizes alt.1).map (·.2)
let no ← withFreshMacroScope $ compileStxMatch (discr::discrs) noAlts
let cond ← match info.argPats with
| some pats => `(and (Syntax.isOfKind discr $(quote kind)) (BEq.beq (Array.size (Syntax.getArgs discr)) $(quote pats.size)))
| none => `(Syntax.isOfKind discr $(quote kind))
`(let discr := $discr; ite (Eq $cond true) $yes $no)
| _, _ => unreachable!
private partial def getPatternVarsAux : Syntax → List Syntax
| stx@(Syntax.node k args) =>
if isAntiquot stx && !isEscapedAntiquot stx then
let anti := getAntiquotTerm stx
if anti.isIdent then [anti]
else []
else
List.join $ args.toList.map getPatternVarsAux
| _ => []
-- Get all pattern vars (as `Syntax.ident`s) in `stx`
partial def getPatternVars (stx : Syntax) : List Syntax :=
if isQuot stx then do
let quoted := stx.getArg 1;
getPatternVarsAux stx
else if stx.isIdent then
[stx]
else []
-- Transform alternatives by binding all right-hand sides to outside the match_syntax in order to prevent
-- code duplication during match_syntax compilation
private def letBindRhss (cont : List Alt → TermElabM Syntax) : List Alt → List Alt → TermElabM Syntax
| [], altsRev' => cont altsRev'.reverse
| (pats, rhs)::alts, altsRev' => do
let vars := List.join $ pats.map getPatternVars
match vars with
-- no antiquotations => introduce Unit parameter to preserve evaluation order
| [] =>
-- NOTE: references binding below
let rhs' ← `(rhs ())
-- NOTE: new macro scope so that introduced bindings do not collide
let stx ← withFreshMacroScope $ letBindRhss cont alts ((pats, rhs')::altsRev')
`(let rhs := fun _ => $rhs; $stx)
| _ =>
-- rhs ← `(fun $vars* => $rhs)
let rhs := Syntax.node `Lean.Parser.Term.fun #[mkAtom "fun", Syntax.node `null vars.toArray, mkAtom "=>", rhs]
let rhs' ← `(rhs)
let stx ← withFreshMacroScope $ letBindRhss cont alts ((pats, rhs')::altsRev')
`(let rhs := $rhs; $stx)
def match_syntax.expand (stx : Syntax) : TermElabM Syntax := do
let discr := stx[1]
let alts := stx[3][1]
let alts ← alts.getSepArgs.mapM $ fun alt => do
let pats := alt.getArg 0;
let pat ←
if pats.getArgs.size == 1 then pure pats[0]
else throwError "match_syntax: expected exactly one pattern per alternative"
let pat := if isQuot pat then pat.setArg 1 $ elimAntiquotChoices $ pat[1] else pat
match pat.find? $ fun stx => stx.getKind == choiceKind with
| some choiceStx => throwErrorAt choiceStx "invalid pattern, nested syntax has multiple interpretations"
| none =>
let rhs := alt.getArg 2
pure ([pat], rhs)
-- letBindRhss (compileStxMatch stx [discr]) alts.toList []
compileStxMatch [discr] alts.toList
@[builtinTermElab «match_syntax»] def elabMatchSyntax : TermElab :=
adaptExpander match_syntax.expand
end Lean.Elab.Term.Quotation
|
063704d01ef451c2db7d96ce1b08d6c975e1cbc9
|
86f6f4f8d827a196a32bfc646234b73328aeb306
|
/examples/logic/unnamed_1999.lean
|
c504902337e35bf9bbfab920c19f3b51427b17f8
|
[] |
no_license
|
jamescheuk91/mathematics_in_lean
|
09f1f87d2b0dce53464ff0cbe592c568ff59cf5e
|
4452499264e2975bca2f42565c0925506ba5dda3
|
refs/heads/master
| 1,679,716,410,967
| 1,613,957,947,000
| 1,613,957,947,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 214
|
lean
|
import data.real.basic
-- BEGIN
example {x : ℝ} (h : x ≠ 0) : x < 0 ∨ x > 0 :=
begin
rcases lt_trichotomy x 0 with xlt | xeq | xgt,
{ left, exact xlt },
{ contradiction },
right, exact xgt
end
-- END
|
b94f0576484e97dd738008b1d617ffbf7c49c171
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/data/matrix/hadamard.lean
|
2985d309e17d2ac5a57d70e81fe88953baac548a
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 3,387
|
lean
|
/-
Copyright (c) 2021 Lu-Ming Zhang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Lu-Ming Zhang
-/
import linear_algebra.matrix.trace
/-!
# Hadamard product of matrices
This file defines the Hadamard product `matrix.hadamard`
and contains basic properties about them.
## Main definition
- `matrix.hadamard`: defines the Hadamard product,
which is the pointwise product of two matrices of the same size.
## Notation
* `⊙`: the Hadamard product `matrix.hadamard`;
## References
* <https://en.wikipedia.org/wiki/hadamard_product_(matrices)>
## Tags
hadamard product, hadamard
-/
variables {α β γ m n : Type*}
variables {R : Type*}
namespace matrix
open_locale matrix big_operators
/-- `matrix.hadamard` defines the Hadamard product,
which is the pointwise product of two matrices of the same size.-/
@[simp]
def hadamard [has_mul α] (A : matrix m n α) (B : matrix m n α) : matrix m n α
| i j := A i j * B i j
localized "infix ` ⊙ `:100 := matrix.hadamard" in matrix
section basic_properties
variables (A : matrix m n α) (B : matrix m n α) (C : matrix m n α)
/- commutativity -/
lemma hadamard_comm [comm_semigroup α] : A ⊙ B = B ⊙ A :=
ext $ λ _ _, mul_comm _ _
/- associativity -/
lemma hadamard_assoc [semigroup α] : A ⊙ B ⊙ C = A ⊙ (B ⊙ C) :=
ext $ λ _ _, mul_assoc _ _ _
/- distributivity -/
lemma hadamard_add [distrib α] : A ⊙ (B + C) = A ⊙ B + A ⊙ C :=
ext $ λ _ _, left_distrib _ _ _
lemma add_hadamard [distrib α] : (B + C) ⊙ A = B ⊙ A + C ⊙ A :=
ext $ λ _ _, right_distrib _ _ _
/- scalar multiplication -/
section scalar
@[simp] lemma smul_hadamard [has_mul α] [has_scalar R α] [is_scalar_tower R α α] (k : R) :
(k • A) ⊙ B = k • A ⊙ B :=
ext $ λ _ _, smul_mul_assoc _ _ _
@[simp] lemma hadamard_smul [has_mul α] [has_scalar R α] [smul_comm_class R α α] (k : R):
A ⊙ (k • B) = k • A ⊙ B :=
ext $ λ _ _, mul_smul_comm _ _ _
end scalar
section zero
variables [mul_zero_class α]
@[simp] lemma hadamard_zero : A ⊙ (0 : matrix m n α) = 0 :=
ext $ λ _ _, mul_zero _
@[simp] lemma zero_hadamard : (0 : matrix m n α) ⊙ A = 0 :=
ext $ λ _ _, zero_mul _
end zero
section one
variables [decidable_eq n] [mul_zero_one_class α]
variables (M : matrix n n α)
lemma hadamard_one : M ⊙ (1 : matrix n n α) = diagonal (λ i, M i i) :=
by { ext, by_cases h : i = j; simp [h] }
lemma one_hadamard : (1 : matrix n n α) ⊙ M = diagonal (λ i, M i i) :=
by { ext, by_cases h : i = j; simp [h] }
end one
section diagonal
variables [decidable_eq n] [mul_zero_class α]
lemma diagonal_hadamard_diagonal (v : n → α) (w : n → α) :
diagonal v ⊙ diagonal w = diagonal (v * w) :=
ext $ λ _ _, (apply_ite2 _ _ _ _ _ _).trans (congr_arg _ $ zero_mul 0)
end diagonal
section trace
variables [fintype m] [fintype n]
variables (R) [semiring α] [semiring R] [module R α]
lemma sum_hadamard_eq : ∑ (i : m) (j : n), (A ⊙ B) i j = trace (A ⬝ Bᵀ) :=
rfl
lemma dot_product_vec_mul_hadamard [decidable_eq m] [decidable_eq n] (v : m → α) (w : n → α) :
dot_product (vec_mul v (A ⊙ B)) w = trace (diagonal v ⬝ A ⬝ (B ⬝ diagonal w)ᵀ) :=
begin
rw [←sum_hadamard_eq, finset.sum_comm],
simp [dot_product, vec_mul, finset.sum_mul, mul_assoc],
end
end trace
end basic_properties
end matrix
|
e68800067c39c705650b34492ced926840e85fa7
|
cf3e23df8bb565635298b2fb52d319f281f42bf6
|
/src/compact_unit_ball.lean
|
fbb23f6b4dca38ff3a2b0b0a037c8a2c2ea4e2db
|
[] |
no_license
|
PatrickMassot/compact_unit_ball
|
435545cd83da49bddeff77c6e3064236c5aa01e2
|
85a9dc82b73b99334adf7ed7cd595c593c6f4aac
|
refs/heads/lean-3.4.2
| 1,601,137,103,131
| 1,575,623,707,000
| 1,575,623,707,000
| 226,289,799
| 0
| 0
| null | 1,575,623,668,000
| 1,575,623,668,000
| null |
UTF-8
|
Lean
| false
| false
| 12,525
|
lean
|
import analysis.normed_space.banach
import analysis.normed_space.basic
import linear_algebra.basic
import linear_algebra.basis
import linear_algebra.dimension
import linear_algebra.finite_dimensional
import topology.subset_properties
import set_theory.cardinal
import data.real.basic
import topology.sequences
import order.bounded_lattice
import analysis.specific_limits
import analysis.normed_space.finite_dimension
noncomputable theory
open_locale classical
local attribute [instance, priority 20000] nat.has_zero nat.has_one real.domain
local attribute [instance, priority 10000] mul_action.to_has_scalar distrib_mul_action.to_mul_action
semimodule.to_distrib_mul_action module.to_semimodule vector_space.to_module normed_space.to_vector_space
ring.to_monoid normed_ring.to_ring normed_field.to_normed_ring add_group.to_add_monoid
add_comm_group.to_add_group normed_group.to_add_comm_group ring.to_semiring add_comm_group.to_add_comm_monoid
normed_field.to_discrete_field normed_field.to_has_norm nondiscrete_normed_field.to_normed_field
zero_ne_one_class.to_has_zero zero_ne_one_class.to_has_one domain.to_zero_ne_one_class
division_ring.to_domain field.to_division_ring discrete_field.to_field
set_option class.instance_max_depth 100
open metric set
universe u
-- completeness of k is only assumed so we can use the library to prove that
-- the span of a finite set is closed. Can someone find a non-complete
-- field k and a normed vector space V over k in which there is finite
-- dimensional subspace which is not closed?
variables
{k : Type} [nondiscrete_normed_field k] [complete_space k]
{V : Type} [normed_group V] [normed_space k V]
lemma nontrivial_norm_gt_one: ∃ x : k, x ≠ 0 ∧ norm x > 1 :=
begin
cases normed_field.exists_one_lt_norm k with x hx,
refine ⟨x, _, hx⟩,
intro hzero,
rw [hzero, norm_zero] at hx,
linarith,
end
lemma nontrivial_arbitrarily_small_norm
{e : ℝ} (he : e > 0) : ∃ x : k, x ≠ 0 ∧ norm x < e :=
begin
rcases normed_field.exists_norm_lt k he with ⟨x, hx₁, hx₂⟩,
refine ⟨x, _, hx₂⟩,
intro xzero,
rw [xzero, norm_zero] at hx₁,
linarith
end
lemma nontrivial_norm_lt_one: ∃ x : k, x ≠ 0 ∧ norm x < 1 :=
nontrivial_arbitrarily_small_norm (by linarith)
lemma ball_span_ash {A : set V}
(hyp : ∀ v : V, norm v ≤ 1 → v ∈ submodule.span k A) (v : V) :
v ∈ submodule.span k A :=
begin
by_cases v0 : v = 0,
{ rw v0,
exact submodule.zero_mem _ },
{ have norm_pos : 0 < norm v, from (norm_pos_iff v).mpr v0,
obtain ⟨x, hx₁, hx₂⟩ : ∃ x : k, x ≠ 0 ∧ norm x < 1 / norm v,
from nontrivial_arbitrarily_small_norm (one_div_pos_of_pos norm_pos),
rw ← submodule.smul_mem_iff (submodule.span k A) hx₁,
apply hyp (x • v),
rw [norm_smul],
exact le_of_lt (mul_lt_of_lt_div norm_pos hx₂) }
end
/-- If A spans the closed unit ball then it spans all of V -/
lemma ball_span {A : set V} (H : (closed_ball 0 1 : set V) ⊆ submodule.span k A) : ∀ v : V, v ∈ submodule.span k A :=
begin
apply ball_span_ash,
intros v hv,
rw ← dist_zero_right at hv,
exact H hv,
end
theorem finite_dimensional_span_of_finite {A : set V} (hA : finite A) :
finite_dimensional k ↥(submodule.span k A) :=
begin
apply is_noetherian_of_fg_of_noetherian,
exact submodule.fg_def.2 ⟨A, hA, rfl⟩,
end
-- the span of a finite set is (sequentially) closed
lemma finite_span_seq_closed
(A : set V) : finite A → is_seq_closed (↑(submodule.span k A) : set V) :=
begin
intro h_fin,
apply is_seq_closed_of_is_closed,
haveI : finite_dimensional k ↥(submodule.span k A) := finite_dimensional_span_of_finite h_fin,
exact submodule.closed_of_finite_dimensional _,
end
--turns cover into a choice function
lemma cover_to_func (A X : set V) {r : ℝ} (hX : X ⊆ (⋃a ∈ A, ball a r))
(a : V) (ha : a ∈ A) :
∃(f : V → V), ∀x, f x ∈ A ∧ (x ∈ X → x ∈ ball (f x) r) :=
begin
classical,
have : ∀(x : V), ∃a, a ∈ A ∧ (x ∈ X → x ∈ ball a r) :=
begin
assume x,
by_cases h : x ∈ X,
{ simpa [h] using hX h },
{ exact ⟨a, ha, by simp [h]⟩ }
end,
choose f hf using this,
exact ⟨f, λx, hf x⟩
end
-- if the closed unit ball is compact, then there is
-- a function which selects the center of a ball of radius 1/2 close to it
lemma compact_choice_func (hc : compact (closed_ball 0 1 : set V))
{r : k} (hr : norm r > 0) :
∃ A : set V, A ⊆ (closed_ball 0 1 : set V) ∧ finite A ∧
∃ f : V → V, ∀ x : V, f x ∈ A ∧ (x ∈ (closed_ball 0 1 : set V)
→ x ∈ ball (f x) (1/norm r)) :=
begin
let B : set V := closed_ball 0 1,
have cover : B ⊆ ⋃ a ∈ B, ball a (1 / norm r),
{
intros x hx,
simp,
use x,
rw dist_self,
split,
simp at hx,
exact hx,
norm_num,
exact hr,
},
obtain ⟨A, A_sub, A_fin, HcoverA⟩ :
∃ A ⊆ B, finite A ∧ B ⊆ ⋃ a ∈ A, ball a (1/ norm r) :=
compact_elim_finite_subcover_image hc (by simp[is_open_ball]) cover,
-- need that A is non-empty to construct a choice function!
have x_in : (0 : V) ∈ B,
simp,
norm_num,
obtain ⟨a, a_in, ha⟩ : ∃ a ∈ A, dist (0 : V) a < 1/ norm r,
by simpa using HcoverA x_in,
have hfunc := cover_to_func A B HcoverA a a_in,
existsi A,
split,
exact A_sub,
split,
exact A_fin,
exact hfunc,
end
def aux_seq (v : V) (x : k) (f : V → V) : ℕ → V
| 0 := v
| (n + 1) := x • (aux_seq n - f (aux_seq n))
def partial_sum (f : ℕ → V) : ℕ → V
| 0 := f 0
| (n + 1) := f (n + 1) + partial_sum n
def approx_seq (v : V) (x : k) (f : V → V) : ℕ → V :=
partial_sum (λ n : ℕ, (1/x)^n • f(aux_seq v x f n))
-- if a sequence w n satisfies the bound |v - w n| < e^(n+1), where 0 < e < 1
-- then w n converges to v
lemma bound_power_convergence (w : ℕ → V) {v : V}
{x : k} (hx : norm x > 1)
(h : ∀ n : ℕ, norm (v - w n) ≤ (1 / norm x)^(n + 1)) :
filter.tendsto w filter.at_top (nhds v) :=
begin
have hlt1 : 1 / norm x < 1,
{
rw div_lt_one_iff_lt,
exact hx,
apply lt_trans zero_lt_one,
exact hx,
},
have hpos : 1 / norm x > 0,
{
norm_num,
apply lt_trans zero_lt_one,
exact hx,
},
have H : ∀ n : ℕ, norm (w n - v) ≤ (1 / norm x)^n,
intro n,
rw <- dist_eq_norm,
rw dist_comm,
rw dist_eq_norm,
apply le_trans,
exact h n,
rw pow_succ,
have h1 : (1 / norm x) * (1 / norm x)^n ≤ 1 * (1 / norm x)^n,
rw mul_le_mul_right,
rw le_iff_eq_or_lt,
right,
exact hlt1,
apply pow_pos,
exact hpos,
rw one_mul at h1,
exact h1,
rw tendsto_iff_norm_tendsto_zero,
apply squeeze_zero,
intro t,
exact norm_nonneg (w t - v),
exact H,
apply tendsto_pow_at_top_nhds_0_of_lt_1,
rw le_iff_eq_or_lt,
right,
exact hpos,
exact hlt1,
end
-- the approximation sequence (w n below) is contained in the span of A
-- this is true since by construction w n is a linear combination
-- of the elements in A
lemma approx_in_span (A : set V) (v : V) (x : k) (f : V → V)
(hf : ∀ x : V, f x ∈ A):
∀ n : ℕ, approx_seq v x f n ∈ submodule.span k A :=
begin
let w := approx_seq v x f,
have hw : w = approx_seq v x f,
refl,
intro n,
rw submodule.mem_span,
intros p hp,
induction n with n hn,
{
have h1 : w 0 = (1/x)^0 • f(v), refl,
simp at h1,
rw <- hw,
rw h1,
have h2 : f v ∈ A := hf v,
exact hp h2,
},{
have h1 : w (n+1) = (1/x)^(n+1) • f(aux_seq v x f (n+1)) + w(n),
refl,
rw <- hw,
rw h1,
have h2 := hp (hf (aux_seq v x f (n+1))),
have h3 := submodule.smul_mem p ((1/x)^(n+1)) h2,
apply submodule.add_mem p,
exact h3,
exact hn,
},
end
theorem compact_unit_ball_implies_finite_dim
(Hcomp : compact (closed_ball 0 1 : set V)) : finite_dimensional k V :=
begin
-- there is an x in k such that norm x > 1
have hbignum : ∃ x : k, x ≠ 0 ∧ norm x > 1,
exact nontrivial_norm_gt_one,
cases hbignum with x hx,
have hxpos : norm x > 0,
{
have h : (1:ℝ) > 0,
norm_num,
exact gt_trans hx.2 h,
},
-- use compactness to find a finite set A
-- and function f : V -> A such that for every
-- v in closed_ball 0 1, |f v - v| < 1/ norm x
obtain ⟨A, A_sub, A_fin, Hexistsf⟩ := compact_choice_func Hcomp hxpos,
obtain ⟨f, hf⟩ := Hexistsf,
-- suffices to show closed_ball 0 1 is spanned by A
apply finite_dimensional.of_fg,
rw submodule.fg_def,
existsi A,
split,
exact A_fin,
rw submodule.eq_top_iff',
apply ball_span,
-- let v in closed_ball 0 1 and show that it is in the span of A
-- to do so we construct a sequence w n such that
-- w n -> v and w n in span A for all n
-- for this we do a kind of 'dyadic approximation' of v using the set A
-- then we use that the span of a finite set is closed to conclude
intros v hv,
let u : ℕ → V := aux_seq v x f,
let w : ℕ → V := partial_sum (λ n : ℕ, (1/x)^n • f(u(n))),
-- show that w n is in the span of A
have hw : ∀ n, w n ∈ submodule.span k A,
{
have hf' : ∀ x : V, f x ∈ A,
intro x,
exact (hf x).1,
exact approx_in_span A v x f hf',
},
-- this is just some algebraic manipulation
have hdist : ∀ n : ℕ, v - w n = (1/x)^(n+1) • u (n+1),
{
intro n,
induction n with n hn,
{
have h1 : w 0 = (1/x)^0 • f(v), refl,
rw zero_add,
rw pow_one,
rw pow_zero at h1,
rw one_smul at h1,
rw h1,
have h2 : u 1 = x • (v - f(v)), refl,
rw h2,
rw smul_smul,
rw one_div_mul_cancel,
rw one_smul,
exact hx.1,
}, {
have h1 : w (n+1) = (1/x)^(n+1) • f(u (n+1)) + w(n), refl,
have h2 : u (n+2) = x • (u (n+1) - f (u (n+1))), refl,
rw h2,
rw pow_succ,
rw smul_smul,
rw mul_comm,
rw <- mul_assoc,
rw mul_one_div_cancel,
rw one_mul,
rw smul_sub,
rw <- hn,
rw h1,
rw add_comm _ (w n),
rw sub_sub v (w n) _,
exact hx.1,
},
},
-- main bound for convergence using the expression hdist
have hbound : ∀ n : ℕ, norm (v - w n) ≤ (1/norm x)^(n+1),
{
intro n,
rw hdist n,
rw norm_smul,
have hu : u(n+1) = x • (u(n) - f(u(n))), refl,
rw hu,
have husmall : norm (u n) ≤ 1,
{
induction n with n hn,
have h0 : u 0 = v, refl,
rw h0,
rw <- dist_zero_right,
exact hv,
have hu' : u(n + 1) = x • (u(n) - f(u(n))), refl,
rw hu',
have h2 := (hf (u n)).2,
rw norm_smul,
rw <- dist_eq_norm,
have h3 := hn hu',
rw <- dist_zero_right at h3,
have h4 : dist (u n) (f (u n)) < 1/ norm x,
exact h2 h3,
rw le_iff_eq_or_lt,
right,
rw <- mul_lt_mul_left hxpos at h4,
rw mul_one_div_cancel at h4,
exact h4,
exact mt (norm_eq_zero x).1 hx.1,
},
have h2 : 0 < (1/norm x),
norm_num,
exact hxpos,
have h1 : 0 < (1/ norm x)^(n+1),
exact pow_pos h2 (n+1),
have h3 : norm ((1/ x)^(n+1)) * norm (x • (u n - f(u n))) ≤ norm ((1/ x)^(n+1)) * 1 →
(1/norm x)^(n+1) * norm (x • (u n - f(u n))) ≤ (1/norm x)^(n+1),
intro h,
rw mul_one at h,
rw normed_field.norm_pow at h,
rw normed_field.norm_div at h,
rw normed_field.norm_one at h,
exact h,
rw normed_field.norm_pow,
rw normed_field.norm_div,
rw normed_field.norm_one,
apply h3,
rw normed_field.norm_pow,
rw normed_field.norm_div,
rw normed_field.norm_one,
rw mul_le_mul_left h1,
rw norm_smul,
rw <- dist_zero_right at husmall,
have h4 := (hf (u n)).2 husmall,
rw <- dist_eq_norm,
rw <- mul_le_mul_left h2,
rw <- mul_assoc,
rw mul_one,
rw one_div_mul_cancel,
rw one_mul,
rw le_iff_eq_or_lt,
right,
exact h4,
exact mt (norm_eq_zero x).1 hx.1,
},
-- w n -> v as n -> infty
have hlim : filter.tendsto w filter.at_top (nhds v : filter V)
:= bound_power_convergence w hx.2 hbound,
-- the span of a finite set is (sequentially) closed
let S : set V := ↑(submodule.span k A),
have hspan_closed : is_seq_closed S := finite_span_seq_closed A A_fin,
have hw' : ∀ n, w n ∈ S,
exact hw,
-- since S is closed, the limit v of w n is in S, as required.
have hinS: v ∈ S,
exact mem_of_is_seq_closed hspan_closed hw' hlim,
exact hinS,
end
|
331e812f756d2a03fdc5ed3de8506ff413151cd5
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/topology/sets/order.lean
|
b11ca9821a8d3e393834f1efc39372c9f66e2865
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,479
|
lean
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import order.upper_lower.basic
import topology.sets.closeds
/-!
# Clopen upper sets
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we define the type of clopen upper sets.
-/
open set topological_space
variables {α β : Type*} [topological_space α] [has_le α] [topological_space β] [has_le β]
/-! ### Compact open sets -/
/-- The type of clopen upper sets of a topological space. -/
structure clopen_upper_set (α : Type*) [topological_space α] [has_le α] extends clopens α :=
(upper' : is_upper_set carrier)
namespace clopen_upper_set
instance : set_like (clopen_upper_set α) α :=
{ coe := λ s, s.carrier,
coe_injective' := λ s t h, by { obtain ⟨⟨_, _⟩, _⟩ := s, obtain ⟨⟨_, _⟩, _⟩ := t, congr' } }
lemma upper (s : clopen_upper_set α) : is_upper_set (s : set α) := s.upper'
lemma clopen (s : clopen_upper_set α) : is_clopen (s : set α) := s.clopen'
/-- Reinterpret a upper clopen as an upper set. -/
@[simps] def to_upper_set (s : clopen_upper_set α) : upper_set α := ⟨s, s.upper⟩
@[ext] protected lemma ext {s t : clopen_upper_set α} (h : (s : set α) = t) : s = t :=
set_like.ext' h
@[simp] lemma coe_mk (s : clopens α) (h) : (mk s h : set α) = s := rfl
instance : has_sup (clopen_upper_set α) :=
⟨λ s t, ⟨s.to_clopens ⊔ t.to_clopens, s.upper.union t.upper⟩⟩
instance : has_inf (clopen_upper_set α) :=
⟨λ s t, ⟨s.to_clopens ⊓ t.to_clopens, s.upper.inter t.upper⟩⟩
instance : has_top (clopen_upper_set α) := ⟨⟨⊤, is_upper_set_univ⟩⟩
instance : has_bot (clopen_upper_set α) := ⟨⟨⊥, is_upper_set_empty⟩⟩
instance : lattice (clopen_upper_set α) :=
set_like.coe_injective.lattice _ (λ _ _, rfl) (λ _ _, rfl)
instance : bounded_order (clopen_upper_set α) :=
bounded_order.lift (coe : _ → set α) (λ _ _, id) rfl rfl
@[simp] lemma coe_sup (s t : clopen_upper_set α) : (↑(s ⊔ t) : set α) = s ∪ t := rfl
@[simp] lemma coe_inf (s t : clopen_upper_set α) : (↑(s ⊓ t) : set α) = s ∩ t := rfl
@[simp] lemma coe_top : (↑(⊤ : clopen_upper_set α) : set α) = univ := rfl
@[simp] lemma coe_bot : (↑(⊥ : clopen_upper_set α) : set α) = ∅ := rfl
instance : inhabited (clopen_upper_set α) := ⟨⊥⟩
end clopen_upper_set
|
3b184b8a006deac173045fbeb13f9135ca3d88ea
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/analysis/calculus/extend_deriv.lean
|
b8abed6db88a7984d1502827b4f9f7187035970e
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,452
|
lean
|
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.analysis.calculus.mean_value
import Mathlib.tactic.monotonicity.default
import Mathlib.PostPort
universes u_1 u_2
namespace Mathlib
/-!
# Extending differentiability to the boundary
We investigate how differentiable functions inside a set extend to differentiable functions
on the boundary. For this, it suffices that the function and its derivative admit limits there.
A general version of this statement is given in `has_fderiv_at_boundary_of_tendsto_fderiv`.
One-dimensional versions, in which one wants to obtain differentiability at the left endpoint or
the right endpoint of an interval, are given in
`has_deriv_at_interval_left_endpoint_of_tendsto_deriv` and
`has_deriv_at_interval_right_endpoint_of_tendsto_deriv`. These versions are formulated in terms
of the one-dimensional derivative `deriv ℝ f`.
-/
/-- If a function `f` is differentiable in a convex open set and continuous on its closure, and its
derivative converges to a limit `f'` at a point on the boundary, then `f` is differentiable there
with derivative `f'`. -/
theorem has_fderiv_at_boundary_of_tendsto_fderiv {E : Type u_1} [normed_group E] [normed_space ℝ E] {F : Type u_2} [normed_group F] [normed_space ℝ F] {f : E → F} {s : set E} {x : E} {f' : continuous_linear_map ℝ E F} (f_diff : differentiable_on ℝ f s) (s_conv : convex s) (s_open : is_open s) (f_cont : ∀ (y : E), y ∈ closure s → continuous_within_at f s y) (h : filter.tendsto (fun (y : E) => fderiv ℝ f y) (nhds_within x s) (nhds f')) : has_fderiv_within_at f f' (closure s) x := sorry
/-- If a function is differentiable on the right of a point `a : ℝ`, continuous at `a`, and
its derivative also converges at `a`, then `f` is differentiable on the right at `a`. -/
theorem has_deriv_at_interval_left_endpoint_of_tendsto_deriv {E : Type u_1} [normed_group E] [normed_space ℝ E] {s : set ℝ} {e : E} {a : ℝ} {f : ℝ → E} (f_diff : differentiable_on ℝ f s) (f_lim : continuous_within_at f s a) (hs : s ∈ nhds_within a (set.Ioi a)) (f_lim' : filter.tendsto (fun (x : ℝ) => deriv f x) (nhds_within a (set.Ioi a)) (nhds e)) : has_deriv_within_at f e (set.Ici a) a := sorry
/-- If a function is differentiable on the left of a point `a : ℝ`, continuous at `a`, and
its derivative also converges at `a`, then `f` is differentiable on the left at `a`. -/
theorem has_deriv_at_interval_right_endpoint_of_tendsto_deriv {E : Type u_1} [normed_group E] [normed_space ℝ E] {s : set ℝ} {e : E} {a : ℝ} {f : ℝ → E} (f_diff : differentiable_on ℝ f s) (f_lim : continuous_within_at f s a) (hs : s ∈ nhds_within a (set.Iio a)) (f_lim' : filter.tendsto (fun (x : ℝ) => deriv f x) (nhds_within a (set.Iio a)) (nhds e)) : has_deriv_within_at f e (set.Iic a) a := sorry
/-- If a real function `f` has a derivative `g` everywhere but at a point, and `f` and `g` are
continuous at this point, then `g` is also the derivative of `f` at this point. -/
theorem has_deriv_at_of_has_deriv_at_of_ne {E : Type u_1} [normed_group E] [normed_space ℝ E] {f : ℝ → E} {g : ℝ → E} {x : ℝ} (f_diff : ∀ (y : ℝ), y ≠ x → has_deriv_at f (g y) y) (hf : continuous_at f x) (hg : continuous_at g x) : has_deriv_at f (g x) x := sorry
|
586888afa5213e3ce60884ed5166a6e1aa439072
|
63abd62053d479eae5abf4951554e1064a4c45b4
|
/src/analysis/analytic/basic.lean
|
289267cc877171d83ad2a68a573306e0c8440983
|
[
"Apache-2.0"
] |
permissive
|
Lix0120/mathlib
|
0020745240315ed0e517cbf32e738d8f9811dd80
|
e14c37827456fc6707f31b4d1d16f1f3a3205e91
|
refs/heads/master
| 1,673,102,855,024
| 1,604,151,044,000
| 1,604,151,044,000
| 308,930,245
| 0
| 0
|
Apache-2.0
| 1,604,164,710,000
| 1,604,163,547,000
| null |
UTF-8
|
Lean
| false
| false
| 37,762
|
lean
|
/-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import analysis.calculus.times_cont_diff
import tactic.omega
import analysis.special_functions.pow
/-!
# Analytic functions
A function is analytic in one dimension around `0` if it can be written as a converging power series
`Σ pₙ zⁿ`. This definition can be extended to any dimension (even in infinite dimension) by
requiring that `pₙ` is a continuous `n`-multilinear map. In general, `pₙ` is not unique (in two
dimensions, taking `p₂ (x, y) (x', y') = x y'` or `y x'` gives the same map when applied to a
vector `(x, y) (x, y)`). A way to guarantee uniqueness is to take a symmetric `pₙ`, but this is not
always possible in nonzero characteristic (in characteristic 2, the previous example has no
symmetric representative). Therefore, we do not insist on symmetry or uniqueness in the definition,
and we only require the existence of a converging series.
The general framework is important to say that the exponential map on bounded operators on a Banach
space is analytic, as well as the inverse on invertible operators.
## Main definitions
Let `p` be a formal multilinear series from `E` to `F`, i.e., `p n` is a multilinear map on `E^n`
for `n : ℕ`.
* `p.radius`: the largest `r : ennreal` such that `∥p n∥ * r^n` grows subexponentially, defined as
a liminf.
* `p.le_radius_of_bound`, `p.bound_of_lt_radius`, `p.geometric_bound_of_lt_radius`: relating the
value of the radius with the growth of `∥p n∥ * r^n`.
* `p.partial_sum n x`: the sum `∑_{i = 0}^{n-1} pᵢ xⁱ`.
* `p.sum x`: the sum `∑'_{i = 0}^{∞} pᵢ xⁱ`.
Additionally, let `f` be a function from `E` to `F`.
* `has_fpower_series_on_ball f p x r`: on the ball of center `x` with radius `r`,
`f (x + y) = ∑'_n pₙ yⁿ`.
* `has_fpower_series_at f p x`: on some ball of center `x` with positive radius, holds
`has_fpower_series_on_ball f p x r`.
* `analytic_at 𝕜 f x`: there exists a power series `p` such that holds
`has_fpower_series_at f p x`.
We develop the basic properties of these notions, notably:
* If a function admits a power series, it is continuous (see
`has_fpower_series_on_ball.continuous_on` and `has_fpower_series_at.continuous_at` and
`analytic_at.continuous_at`).
* In a complete space, the sum of a formal power series with positive radius is well defined on the
disk of convergence, see `formal_multilinear_series.has_fpower_series_on_ball`.
* If a function admits a power series in a ball, then it is analytic at any point `y` of this ball,
and the power series there can be expressed in terms of the initial power series `p` as
`p.change_origin y`. See `has_fpower_series_on_ball.change_origin`. It follows in particular that
the set of points at which a given function is analytic is open, see `is_open_analytic_at`.
## Implementation details
We only introduce the radius of convergence of a power series, as `p.radius`.
For a power series in finitely many dimensions, there is a finer (directional, coordinate-dependent)
notion, describing the polydisk of convergence. This notion is more specific, and not necessary to
build the general theory. We do not define it here.
-/
noncomputable theory
variables {𝕜 : Type*} [nondiscrete_normed_field 𝕜]
{E : Type*} [normed_group E] [normed_space 𝕜 E]
{F : Type*} [normed_group F] [normed_space 𝕜 F]
{G : Type*} [normed_group G] [normed_space 𝕜 G]
open_locale topological_space classical big_operators
open filter
/-! ### The radius of a formal multilinear series -/
namespace formal_multilinear_series
/-- The radius of a formal multilinear series is the largest `r` such that the sum `Σ pₙ yⁿ`
converges for all `∥y∥ < r`. -/
def radius (p : formal_multilinear_series 𝕜 E F) : ennreal :=
liminf at_top (λ n, 1/((nnnorm (p n)) ^ (1 / (n : ℝ)) : nnreal))
/--If `∥pₙ∥ rⁿ` is bounded in `n`, then the radius of `p` is at least `r`. -/
lemma le_radius_of_bound (p : formal_multilinear_series 𝕜 E F) (C : nnreal) {r : nnreal}
(h : ∀ (n : ℕ), nnnorm (p n) * r^n ≤ C) : (r : ennreal) ≤ p.radius :=
begin
have L : tendsto (λ n : ℕ, (r : ennreal) / ((C + 1)^(1/(n : ℝ)) : nnreal))
at_top (𝓝 ((r : ennreal) / ((C + 1)^(0 : ℝ) : nnreal))),
{ apply ennreal.tendsto.div tendsto_const_nhds,
{ simp },
{ rw ennreal.tendsto_coe,
apply tendsto_const_nhds.nnrpow (tendsto_const_div_at_top_nhds_0_nat 1),
simp },
{ simp } },
have A : ∀ n : ℕ , 0 < n →
(r : ennreal) ≤ ((C + 1)^(1/(n : ℝ)) : nnreal) * (1 / (nnnorm (p n) ^ (1/(n:ℝ)) : nnreal)),
{ assume n npos,
simp only [one_div, mul_assoc, mul_one, eq.symm ennreal.mul_div_assoc],
rw [ennreal.le_div_iff_mul_le _ _, ← nnreal.pow_nat_rpow_nat_inv r npos, ← ennreal.coe_mul,
ennreal.coe_le_coe, ← nnreal.mul_rpow, mul_comm],
{ exact nnreal.rpow_le_rpow (le_trans (h n) (le_add_right (le_refl _))) (by simp) },
{ simp },
{ simp } },
have B : ∀ᶠ (n : ℕ) in at_top,
(r : ennreal) / ((C + 1)^(1/(n : ℝ)) : nnreal) ≤ 1 / (nnnorm (p n) ^ (1/(n:ℝ)) : nnreal),
{ apply eventually_at_top.2 ⟨1, λ n hn, _⟩,
rw [ennreal.div_le_iff_le_mul, mul_comm],
{ apply A n hn },
{ simp },
{ simp } },
have D : liminf at_top (λ n : ℕ, (r : ennreal) / ((C + 1)^(1/(n : ℝ)) : nnreal)) ≤ p.radius :=
liminf_le_liminf B,
rw L.liminf_eq at D,
simpa using D
end
/-- For `r` strictly smaller than the radius of `p`, then `∥pₙ∥ rⁿ` is bounded. -/
lemma bound_of_lt_radius (p : formal_multilinear_series 𝕜 E F) {r : nnreal}
(h : (r : ennreal) < p.radius) : ∃ (C : nnreal), ∀ n, nnnorm (p n) * r^n ≤ C :=
begin
obtain ⟨N, hN⟩ : ∃ (N : ℕ), ∀ n, n ≥ N → (r : ennreal) < 1 / ↑(nnnorm (p n) ^ (1 / (n : ℝ))) :=
eventually.exists_forall_of_at_top (eventually_lt_of_lt_liminf h),
obtain ⟨D, hD⟩ : ∃D, ∀ x ∈ (↑((finset.range N.succ).image (λ i, nnnorm (p i) * r^i))), x ≤ D :=
finset.bdd_above _,
refine ⟨max D 1, λ n, _⟩,
cases le_or_lt n N with hn hn,
{ refine le_trans _ (le_max_left D 1),
apply hD,
have : n ∈ finset.range N.succ := list.mem_range.mpr (nat.lt_succ_iff.mpr hn),
exact finset.mem_image_of_mem _ this },
{ by_cases hpn : nnnorm (p n) = 0, { simp [hpn] },
have A : nnnorm (p n) ^ (1 / (n : ℝ)) ≠ 0, by simp [nnreal.rpow_eq_zero_iff, hpn],
have B : r < (nnnorm (p n) ^ (1 / (n : ℝ)))⁻¹,
{ have := hN n (le_of_lt hn),
rwa [ennreal.div_def, ← ennreal.coe_inv A, one_mul, ennreal.coe_lt_coe] at this },
rw [nnreal.lt_inv_iff_mul_lt A, mul_comm] at B,
have : (nnnorm (p n) ^ (1 / (n : ℝ)) * r) ^ n ≤ 1 :=
pow_le_one n (zero_le (nnnorm (p n) ^ (1 / ↑n) * r)) (le_of_lt B),
rw [mul_pow, one_div, nnreal.rpow_nat_inv_pow_nat _ (lt_of_le_of_lt (zero_le _) hn)]
at this,
exact le_trans this (le_max_right _ _) },
end
/-- For `r` strictly smaller than the radius of `p`, then `∥pₙ∥ rⁿ` tends to zero exponentially. -/
lemma geometric_bound_of_lt_radius (p : formal_multilinear_series 𝕜 E F) {r : nnreal}
(h : (r : ennreal) < p.radius) : ∃ a C, a < 1 ∧ ∀ n, nnnorm (p n) * r^n ≤ C * a^n :=
begin
obtain ⟨t, rt, tp⟩ : ∃ (t : nnreal), (r : ennreal) < t ∧ (t : ennreal) < p.radius :=
ennreal.lt_iff_exists_nnreal_btwn.1 h,
rw ennreal.coe_lt_coe at rt,
have tpos : t ≠ 0 := ne_of_gt (lt_of_le_of_lt (zero_le _) rt),
obtain ⟨C, hC⟩ : ∃ (C : nnreal), ∀ n, nnnorm (p n) * t^n ≤ C := p.bound_of_lt_radius tp,
refine ⟨r / t, C, nnreal.div_lt_one_of_lt rt, λ n, _⟩,
calc nnnorm (p n) * r ^ n
= (nnnorm (p n) * t ^ n) * (r / t) ^ n : by { field_simp [tpos], ac_refl }
... ≤ C * (r / t) ^ n : mul_le_mul_of_nonneg_right (hC n) (zero_le _)
end
/-- The radius of the sum of two formal series is at least the minimum of their two radii. -/
lemma min_radius_le_radius_add (p q : formal_multilinear_series 𝕜 E F) :
min p.radius q.radius ≤ (p + q).radius :=
begin
refine le_of_forall_ge_of_dense (λ r hr, _),
cases r, { simpa using hr },
obtain ⟨Cp, hCp⟩ : ∃ (C : nnreal), ∀ n, nnnorm (p n) * r^n ≤ C :=
p.bound_of_lt_radius (lt_of_lt_of_le hr (min_le_left _ _)),
obtain ⟨Cq, hCq⟩ : ∃ (C : nnreal), ∀ n, nnnorm (q n) * r^n ≤ C :=
q.bound_of_lt_radius (lt_of_lt_of_le hr (min_le_right _ _)),
have : ∀ n, nnnorm ((p + q) n) * r^n ≤ Cp + Cq,
{ assume n,
calc nnnorm (p n + q n) * r ^ n
≤ (nnnorm (p n) + nnnorm (q n)) * r ^ n :
mul_le_mul_of_nonneg_right (norm_add_le (p n) (q n)) (zero_le (r ^ n))
... ≤ Cp + Cq : by { rw add_mul, exact add_le_add (hCp n) (hCq n) } },
exact (p + q).le_radius_of_bound _ this
end
lemma radius_neg (p : formal_multilinear_series 𝕜 E F) : (-p).radius = p.radius :=
by simp [formal_multilinear_series.radius, nnnorm_neg]
/-- Given a formal multilinear series `p` and a vector `x`, then `p.sum x` is the sum `Σ pₙ xⁿ`. A
priori, it only behaves well when `∥x∥ < p.radius`. -/
protected def sum (p : formal_multilinear_series 𝕜 E F) (x : E) : F :=
tsum (λn:ℕ, p n (λ(i : fin n), x))
/-- Given a formal multilinear series `p` and a vector `x`, then `p.partial_sum n x` is the sum
`Σ pₖ xᵏ` for `k ∈ {0,..., n-1}`. -/
def partial_sum (p : formal_multilinear_series 𝕜 E F) (n : ℕ) (x : E) : F :=
∑ k in finset.range n, p k (λ(i : fin k), x)
/-- The partial sums of a formal multilinear series are continuous. -/
lemma partial_sum_continuous (p : formal_multilinear_series 𝕜 E F) (n : ℕ) :
continuous (p.partial_sum n) :=
by continuity
end formal_multilinear_series
/-! ### Expanding a function as a power series -/
section
variables {f g : E → F} {p pf pg : formal_multilinear_series 𝕜 E F} {x : E} {r r' : ennreal}
/-- Given a function `f : E → F` and a formal multilinear series `p`, we say that `f` has `p` as
a power series on the ball of radius `r > 0` around `x` if `f (x + y) = ∑' pₙ yⁿ` for all `∥y∥ < r`. -/
structure has_fpower_series_on_ball
(f : E → F) (p : formal_multilinear_series 𝕜 E F) (x : E) (r : ennreal) : Prop :=
(r_le : r ≤ p.radius)
(r_pos : 0 < r)
(has_sum : ∀ {y}, y ∈ emetric.ball (0 : E) r → has_sum (λn:ℕ, p n (λ(i : fin n), y)) (f (x + y)))
/-- Given a function `f : E → F` and a formal multilinear series `p`, we say that `f` has `p` as
a power series around `x` if `f (x + y) = ∑' pₙ yⁿ` for all `y` in a neighborhood of `0`. -/
def has_fpower_series_at (f : E → F) (p : formal_multilinear_series 𝕜 E F) (x : E) :=
∃ r, has_fpower_series_on_ball f p x r
variable (𝕜)
/-- Given a function `f : E → F`, we say that `f` is analytic at `x` if it admits a convergent power
series expansion around `x`. -/
def analytic_at (f : E → F) (x : E) :=
∃ (p : formal_multilinear_series 𝕜 E F), has_fpower_series_at f p x
variable {𝕜}
lemma has_fpower_series_on_ball.has_fpower_series_at (hf : has_fpower_series_on_ball f p x r) :
has_fpower_series_at f p x := ⟨r, hf⟩
lemma has_fpower_series_at.analytic_at (hf : has_fpower_series_at f p x) : analytic_at 𝕜 f x :=
⟨p, hf⟩
lemma has_fpower_series_on_ball.analytic_at (hf : has_fpower_series_on_ball f p x r) :
analytic_at 𝕜 f x :=
hf.has_fpower_series_at.analytic_at
lemma has_fpower_series_on_ball.radius_pos (hf : has_fpower_series_on_ball f p x r) :
0 < p.radius :=
lt_of_lt_of_le hf.r_pos hf.r_le
lemma has_fpower_series_at.radius_pos (hf : has_fpower_series_at f p x) :
0 < p.radius :=
let ⟨r, hr⟩ := hf in hr.radius_pos
lemma has_fpower_series_on_ball.mono
(hf : has_fpower_series_on_ball f p x r) (r'_pos : 0 < r') (hr : r' ≤ r) :
has_fpower_series_on_ball f p x r' :=
⟨le_trans hr hf.1, r'_pos, λ y hy, hf.has_sum (emetric.ball_subset_ball hr hy)⟩
lemma has_fpower_series_on_ball.add
(hf : has_fpower_series_on_ball f pf x r) (hg : has_fpower_series_on_ball g pg x r) :
has_fpower_series_on_ball (f + g) (pf + pg) x r :=
{ r_le := le_trans (le_min_iff.2 ⟨hf.r_le, hg.r_le⟩) (pf.min_radius_le_radius_add pg),
r_pos := hf.r_pos,
has_sum := λ y hy, (hf.has_sum hy).add (hg.has_sum hy) }
lemma has_fpower_series_at.add
(hf : has_fpower_series_at f pf x) (hg : has_fpower_series_at g pg x) :
has_fpower_series_at (f + g) (pf + pg) x :=
begin
rcases hf with ⟨rf, hrf⟩,
rcases hg with ⟨rg, hrg⟩,
have P : 0 < min rf rg, by simp [hrf.r_pos, hrg.r_pos],
exact ⟨min rf rg, (hrf.mono P (min_le_left _ _)).add (hrg.mono P (min_le_right _ _))⟩
end
lemma analytic_at.add (hf : analytic_at 𝕜 f x) (hg : analytic_at 𝕜 g x) :
analytic_at 𝕜 (f + g) x :=
let ⟨pf, hpf⟩ := hf, ⟨qf, hqf⟩ := hg in (hpf.add hqf).analytic_at
lemma has_fpower_series_on_ball.neg (hf : has_fpower_series_on_ball f pf x r) :
has_fpower_series_on_ball (-f) (-pf) x r :=
{ r_le := by { rw pf.radius_neg, exact hf.r_le },
r_pos := hf.r_pos,
has_sum := λ y hy, (hf.has_sum hy).neg }
lemma has_fpower_series_at.neg
(hf : has_fpower_series_at f pf x) : has_fpower_series_at (-f) (-pf) x :=
let ⟨rf, hrf⟩ := hf in hrf.neg.has_fpower_series_at
lemma analytic_at.neg (hf : analytic_at 𝕜 f x) : analytic_at 𝕜 (-f) x :=
let ⟨pf, hpf⟩ := hf in hpf.neg.analytic_at
lemma has_fpower_series_on_ball.sub
(hf : has_fpower_series_on_ball f pf x r) (hg : has_fpower_series_on_ball g pg x r) :
has_fpower_series_on_ball (f - g) (pf - pg) x r :=
hf.add hg.neg
lemma has_fpower_series_at.sub
(hf : has_fpower_series_at f pf x) (hg : has_fpower_series_at g pg x) :
has_fpower_series_at (f - g) (pf - pg) x :=
hf.add hg.neg
lemma analytic_at.sub (hf : analytic_at 𝕜 f x) (hg : analytic_at 𝕜 g x) :
analytic_at 𝕜 (f - g) x :=
hf.add hg.neg
lemma has_fpower_series_on_ball.coeff_zero (hf : has_fpower_series_on_ball f pf x r)
(v : fin 0 → E) : pf 0 v = f x :=
begin
have v_eq : v = (λ i, 0), by { ext i, apply fin_zero_elim i },
have zero_mem : (0 : E) ∈ emetric.ball (0 : E) r, by simp [hf.r_pos],
have : ∀ i ≠ 0, pf i (λ j, 0) = 0,
{ assume i hi,
have : 0 < i := bot_lt_iff_ne_bot.mpr hi,
apply continuous_multilinear_map.map_coord_zero _ (⟨0, this⟩ : fin i),
refl },
have A := (hf.has_sum zero_mem).unique (has_sum_single _ this),
simpa [v_eq] using A.symm,
end
lemma has_fpower_series_at.coeff_zero (hf : has_fpower_series_at f pf x) (v : fin 0 → E) :
pf 0 v = f x :=
let ⟨rf, hrf⟩ := hf in hrf.coeff_zero v
/-- If a function admits a power series expansion, then it is exponentially close to the partial
sums of this power series on strict subdisks of the disk of convergence. -/
lemma has_fpower_series_on_ball.uniform_geometric_approx {r' : nnreal}
(hf : has_fpower_series_on_ball f p x r) (h : (r' : ennreal) < r) :
∃ (a C : nnreal), a < 1 ∧ (∀ y ∈ metric.ball (0 : E) r', ∀ n,
∥f (x + y) - p.partial_sum n y∥ ≤ C * a ^ n) :=
begin
obtain ⟨a, C, ha, hC⟩ : ∃ a C, a < 1 ∧ ∀ n, nnnorm (p n) * r' ^n ≤ C * a^n :=
p.geometric_bound_of_lt_radius (lt_of_lt_of_le h hf.r_le),
refine ⟨a, C / (1 - a), ha, λ y hy n, _⟩,
have yr' : ∥y∥ < r', by { rw ball_0_eq at hy, exact hy },
have : y ∈ emetric.ball (0 : E) r,
{ rw [emetric.mem_ball, edist_eq_coe_nnnorm],
apply lt_trans _ h,
rw [ennreal.coe_lt_coe, ← nnreal.coe_lt_coe],
exact yr' },
simp only [nnreal.coe_sub (le_of_lt ha), nnreal.coe_sub, nnreal.coe_div, nnreal.coe_one],
rw [← dist_eq_norm, dist_comm, dist_eq_norm, ← mul_div_right_comm],
apply norm_sub_le_of_geometric_bound_of_has_sum ha _ (hf.has_sum this),
assume n,
calc ∥(p n) (λ (i : fin n), y)∥
≤ ∥p n∥ * (∏ i : fin n, ∥y∥) : continuous_multilinear_map.le_op_norm _ _
... = nnnorm (p n) * (nnnorm y)^n : by simp
... ≤ nnnorm (p n) * r' ^ n :
mul_le_mul_of_nonneg_left (pow_le_pow_of_le_left (nnreal.coe_nonneg _) (le_of_lt yr') _)
(nnreal.coe_nonneg _)
... ≤ C * a ^ n : by exact_mod_cast hC n,
end
/-- If a function admits a power series expansion at `x`, then it is the uniform limit of the
partial sums of this power series on strict subdisks of the disk of convergence, i.e., `f (x + y)`
is the uniform limit of `p.partial_sum n y` there. -/
lemma has_fpower_series_on_ball.tendsto_uniformly_on {r' : nnreal}
(hf : has_fpower_series_on_ball f p x r) (h : (r' : ennreal) < r) :
tendsto_uniformly_on (λ n y, p.partial_sum n y) (λ y, f (x + y)) at_top (metric.ball (0 : E) r') :=
begin
rcases hf.uniform_geometric_approx h with ⟨a, C, ha, hC⟩,
refine metric.tendsto_uniformly_on_iff.2 (λ ε εpos, _),
have L : tendsto (λ n, (C : ℝ) * a^n) at_top (𝓝 ((C : ℝ) * 0)) :=
tendsto_const_nhds.mul (tendsto_pow_at_top_nhds_0_of_lt_1 (a.2) ha),
rw mul_zero at L,
apply ((tendsto_order.1 L).2 ε εpos).mono (λ n hn, _),
assume y hy,
rw dist_eq_norm,
exact lt_of_le_of_lt (hC y hy n) hn
end
/-- If a function admits a power series expansion at `x`, then it is the locally uniform limit of
the partial sums of this power series on the disk of convergence, i.e., `f (x + y)`
is the locally uniform limit of `p.partial_sum n y` there. -/
lemma has_fpower_series_on_ball.tendsto_locally_uniformly_on
(hf : has_fpower_series_on_ball f p x r) :
tendsto_locally_uniformly_on (λ n y, p.partial_sum n y) (λ y, f (x + y))
at_top (emetric.ball (0 : E) r) :=
begin
assume u hu x hx,
rcases ennreal.lt_iff_exists_nnreal_btwn.1 hx with ⟨r', xr', hr'⟩,
have : emetric.ball (0 : E) r' ∈ 𝓝 x :=
mem_nhds_sets emetric.is_open_ball xr',
refine ⟨emetric.ball (0 : E) r', mem_nhds_within_of_mem_nhds this, _⟩,
simpa [metric.emetric_ball_nnreal] using hf.tendsto_uniformly_on hr' u hu
end
/-- If a function admits a power series expansion at `x`, then it is the uniform limit of the
partial sums of this power series on strict subdisks of the disk of convergence, i.e., `f y`
is the uniform limit of `p.partial_sum n (y - x)` there. -/
lemma has_fpower_series_on_ball.tendsto_uniformly_on' {r' : nnreal}
(hf : has_fpower_series_on_ball f p x r) (h : (r' : ennreal) < r) :
tendsto_uniformly_on (λ n y, p.partial_sum n (y - x)) f at_top (metric.ball (x : E) r') :=
begin
convert (hf.tendsto_uniformly_on h).comp (λ y, y - x),
{ ext z, simp },
{ ext z, simp [dist_eq_norm] }
end
/-- If a function admits a power series expansion at `x`, then it is the locally uniform limit of
the partial sums of this power series on the disk of convergence, i.e., `f y`
is the locally uniform limit of `p.partial_sum n (y - x)` there. -/
lemma has_fpower_series_on_ball.tendsto_locally_uniformly_on'
(hf : has_fpower_series_on_ball f p x r) :
tendsto_locally_uniformly_on (λ n y, p.partial_sum n (y - x)) f at_top (emetric.ball (x : E) r) :=
begin
have A : continuous_on (λ (y : E), y - x) (emetric.ball (x : E) r) :=
(continuous_id.sub continuous_const).continuous_on,
convert (hf.tendsto_locally_uniformly_on).comp (λ (y : E), y - x) _ A,
{ ext z, simp },
{ assume z, simp [edist_eq_coe_nnnorm, edist_eq_coe_nnnorm_sub] }
end
/-- If a function admits a power series expansion on a disk, then it is continuous there. -/
lemma has_fpower_series_on_ball.continuous_on
(hf : has_fpower_series_on_ball f p x r) : continuous_on f (emetric.ball x r) :=
hf.tendsto_locally_uniformly_on'.continuous_on $ λ n,
((p.partial_sum_continuous n).comp (continuous_id.sub continuous_const)).continuous_on
lemma has_fpower_series_at.continuous_at (hf : has_fpower_series_at f p x) : continuous_at f x :=
let ⟨r, hr⟩ := hf in hr.continuous_on.continuous_at (emetric.ball_mem_nhds x (hr.r_pos))
lemma analytic_at.continuous_at (hf : analytic_at 𝕜 f x) : continuous_at f x :=
let ⟨p, hp⟩ := hf in hp.continuous_at
/-- In a complete space, the sum of a converging power series `p` admits `p` as a power series.
This is not totally obvious as we need to check the convergence of the series. -/
lemma formal_multilinear_series.has_fpower_series_on_ball [complete_space F]
(p : formal_multilinear_series 𝕜 E F) (h : 0 < p.radius) :
has_fpower_series_on_ball p.sum p 0 p.radius :=
{ r_le := le_refl _,
r_pos := h,
has_sum := λ y hy, begin
rw zero_add,
replace hy : (nnnorm y : ennreal) < p.radius,
by { convert hy, exact (edist_eq_coe_nnnorm _).symm },
obtain ⟨a, C, ha, hC⟩ : ∃ a C, a < 1 ∧ ∀ n, nnnorm (p n) * (nnnorm y)^n ≤ C * a^n :=
p.geometric_bound_of_lt_radius hy,
refine (summable_of_norm_bounded (λ n, (C : ℝ) * a ^ n)
((summable_geometric_of_lt_1 a.2 ha).mul_left _) (λ n, _)).has_sum,
calc ∥(p n) (λ (i : fin n), y)∥
≤ ∥p n∥ * (∏ i : fin n, ∥y∥) : continuous_multilinear_map.le_op_norm _ _
... = nnnorm (p n) * (nnnorm y)^n : by simp
... ≤ C * a ^ n : by exact_mod_cast hC n
end }
lemma has_fpower_series_on_ball.sum [complete_space F] (h : has_fpower_series_on_ball f p x r)
{y : E} (hy : y ∈ emetric.ball (0 : E) r) : f (x + y) = p.sum y :=
begin
have A := h.has_sum hy,
have B := (p.has_fpower_series_on_ball h.radius_pos).has_sum (lt_of_lt_of_le hy h.r_le),
simpa using A.unique B
end
/-- The sum of a converging power series is continuous in its disk of convergence. -/
lemma formal_multilinear_series.continuous_on [complete_space F] :
continuous_on p.sum (emetric.ball 0 p.radius) :=
begin
by_cases h : 0 < p.radius,
{ exact (p.has_fpower_series_on_ball h).continuous_on },
{ simp at h,
simp [h, continuous_on_empty] }
end
end
/-!
### Changing origin in a power series
If a function is analytic in a disk `D(x, R)`, then it is analytic in any disk contained in that
one. Indeed, one can write
$$
f (x + y + z) = \sum_{n} p_n (y + z)^n = \sum_{n, k} \choose n k p_n y^{n-k} z^k
= \sum_{k} (\sum_{n} \choose n k p_n y^{n-k}) z^k.
$$
The corresponding power series has thus a `k`-th coefficient equal to
`\sum_{n} \choose n k p_n y^{n-k}`. In the general case where `pₙ` is a multilinear map, this has
to be interpreted suitably: instead of having a binomial coefficient, one should sum over all
possible subsets `s` of `fin n` of cardinal `k`, and attribute `z` to the indices in `s` and
`y` to the indices outside of `s`.
In this paragraph, we implement this. The new power series is called `p.change_origin y`. Then, we
check its convergence and the fact that its sum coincides with the original sum. The outcome of this
discussion is that the set of points where a function is analytic is open.
-/
namespace formal_multilinear_series
variables (p : formal_multilinear_series 𝕜 E F) {x y : E} {r : nnreal}
/--
Changing the origin of a formal multilinear series `p`, so that
`p.sum (x+y) = (p.change_origin x).sum y` when this makes sense.
Here, we don't use the bracket notation `⟨n, s, hs⟩` in place of the argument `i` in the lambda,
as this leads to a bad definition with auxiliary `_match` statements,
but we will try to use pattern matching in lambdas as much as possible in the proofs below
to increase readability.
-/
def change_origin (x : E) :
formal_multilinear_series 𝕜 E F :=
λ k, tsum (λi, (p i.1).restr i.2.1 i.2.2 x :
(Σ (n : ℕ), {s : finset (fin n) // finset.card s = k}) → (E [×k]→L[𝕜] F))
/-- Auxiliary lemma controlling the summability of the sequence appearing in the definition of
`p.change_origin`, first version. -/
-- Note here and below it is necessary to use `@` and provide implicit arguments using `_`,
-- so that it is possible to use pattern matching in the lambda.
-- Overall this seems a good trade-off in readability.
lemma change_origin_summable_aux1 (h : (nnnorm x + r : ennreal) < p.radius) :
@summable ℝ _ _ _ ((λ ⟨n, s⟩, ∥p n∥ * ∥x∥ ^ (n - s.card) * r ^ s.card) :
(Σ (n : ℕ), finset (fin n)) → ℝ) :=
begin
obtain ⟨a, C, ha, hC⟩ :
∃ a C, a < 1 ∧ ∀ n, nnnorm (p n) * (nnnorm x + r) ^ n ≤ C * a^n :=
p.geometric_bound_of_lt_radius h,
let Bnnnorm : (Σ (n : ℕ), finset (fin n)) → nnreal :=
λ ⟨n, s⟩, nnnorm (p n) * (nnnorm x) ^ (n - s.card) * r ^ s.card,
have : ((λ ⟨n, s⟩, ∥p n∥ * ∥x∥ ^ (n - s.card) * r ^ s.card) :
(Σ (n : ℕ), finset (fin n)) → ℝ) = (λ b, (Bnnnorm b : ℝ)),
by { ext ⟨n, s⟩, simp [Bnnnorm, nnreal.coe_pow, coe_nnnorm] },
rw [this, nnreal.summable_coe, ← ennreal.tsum_coe_ne_top_iff_summable],
apply ne_of_lt,
calc (∑' b, ↑(Bnnnorm b))
= (∑' n, (∑' s, ↑(Bnnnorm ⟨n, s⟩))) : by exact ennreal.tsum_sigma' _
... ≤ (∑' n, (((nnnorm (p n) * (nnnorm x + r)^n) : nnreal) : ennreal)) :
begin
refine ennreal.tsum_le_tsum (λ n, _),
rw [tsum_fintype, ← ennreal.coe_finset_sum, ennreal.coe_le_coe],
apply le_of_eq,
calc ∑ s : finset (fin n), Bnnnorm ⟨n, s⟩
= ∑ s : finset (fin n), nnnorm (p n) * ((nnnorm x) ^ (n - s.card) * r ^ s.card) :
by simp [← mul_assoc]
... = nnnorm (p n) * (nnnorm x + r) ^ n :
by { rw [add_comm, ← finset.mul_sum, ← fin.sum_pow_mul_eq_add_pow], congr' with s : 1, ring }
end
... ≤ (∑' (n : ℕ), (C * a ^ n : ennreal)) :
tsum_le_tsum (λ n, by exact_mod_cast hC n) ennreal.summable ennreal.summable
... < ⊤ :
by simp [ennreal.mul_eq_top, ha, ennreal.tsum_mul_left, ennreal.tsum_geometric,
ennreal.lt_top_iff_ne_top]
end
/-- Auxiliary lemma controlling the summability of the sequence appearing in the definition of
`p.change_origin`, second version. -/
lemma change_origin_summable_aux2 (h : (nnnorm x + r : ennreal) < p.radius) :
@summable ℝ _ _ _ ((λ ⟨k, n, s, hs⟩, ∥(p n).restr s hs x∥ * ↑r ^ k) :
(Σ (k : ℕ) (n : ℕ), {s : finset (fin n) // finset.card s = k}) → ℝ) :=
begin
let γ : ℕ → Type* := λ k, (Σ (n : ℕ), {s : finset (fin n) // s.card = k}),
let Bnorm : (Σ (n : ℕ), finset (fin n)) → ℝ := λ ⟨n, s⟩, ∥p n∥ * ∥x∥ ^ (n - s.card) * r ^ s.card,
have SBnorm : summable Bnorm := p.change_origin_summable_aux1 h,
let Anorm : (Σ (n : ℕ), finset (fin n)) → ℝ := λ ⟨n, s⟩, ∥(p n).restr s rfl x∥ * r ^ s.card,
have SAnorm : summable Anorm,
{ refine summable_of_norm_bounded _ SBnorm (λ i, _),
rcases i with ⟨n, s⟩,
suffices H : ∥(p n).restr s rfl x∥ * (r : ℝ) ^ s.card ≤
(∥p n∥ * ∥x∥ ^ (n - finset.card s) * r ^ s.card),
{ have : ∥(r: ℝ)∥ = r, by rw [real.norm_eq_abs, abs_of_nonneg (nnreal.coe_nonneg _)],
simpa [Anorm, Bnorm, this] using H },
exact mul_le_mul_of_nonneg_right ((p n).norm_restr s rfl x)
(pow_nonneg (nnreal.coe_nonneg _) _) },
let e : (Σ (n : ℕ), finset (fin n)) ≃
(Σ (k : ℕ) (n : ℕ), {s : finset (fin n) // finset.card s = k}) :=
{ to_fun := λ ⟨n, s⟩, ⟨s.card, n, s, rfl⟩,
inv_fun := λ ⟨k, n, s, hs⟩, ⟨n, s⟩,
left_inv := λ ⟨n, s⟩, rfl,
right_inv := λ ⟨k, n, s, hs⟩, by { induction hs, refl } },
rw ← e.summable_iff,
convert SAnorm,
ext ⟨n, s⟩,
refl
end
/-- An auxiliary definition for `change_origin_radius`. -/
def change_origin_summable_aux_j (k : ℕ) :
(Σ (n : ℕ), {s : finset (fin n) // finset.card s = k})
→ (Σ (k : ℕ) (n : ℕ), {s : finset (fin n) // finset.card s = k}) :=
λ ⟨n, s, hs⟩, ⟨k, n, s, hs⟩
lemma change_origin_summable_aux_j_injective (k : ℕ) :
function.injective (change_origin_summable_aux_j k) :=
begin
rintros ⟨_, ⟨_, _⟩⟩ ⟨_, ⟨_, _⟩⟩ a,
simp only [change_origin_summable_aux_j, true_and, eq_self_iff_true, heq_iff_eq, sigma.mk.inj_iff] at a,
rcases a with ⟨rfl, a⟩,
simpa using a,
end
/-- Auxiliary lemma controlling the summability of the sequence appearing in the definition of
`p.change_origin`, third version. -/
lemma change_origin_summable_aux3 (k : ℕ) (h : (nnnorm x : ennreal) < p.radius) :
@summable ℝ _ _ _ (λ ⟨n, s, hs⟩, ∥(p n).restr s hs x∥ :
(Σ (n : ℕ), {s : finset (fin n) // finset.card s = k}) → ℝ) :=
begin
obtain ⟨r, rpos, hr⟩ : ∃ (r : nnreal), 0 < r ∧ ((nnnorm x + r) : ennreal) < p.radius :=
ennreal.lt_iff_exists_add_pos_lt.mp h,
have S : @summable ℝ _ _ _ ((λ ⟨n, s, hs⟩, ∥(p n).restr s hs x∥ * (r : ℝ) ^ k) :
(Σ (n : ℕ), {s : finset (fin n) // finset.card s = k}) → ℝ),
{ convert (p.change_origin_summable_aux2 hr).comp_injective
(change_origin_summable_aux_j_injective k),
-- again, cleanup that could be done by `tidy`:
ext ⟨_, ⟨_, _⟩⟩, refl },
have : (r : ℝ)^k ≠ 0, by simp [pow_ne_zero, nnreal.coe_eq_zero, ne_of_gt rpos],
apply (summable_mul_right_iff this).2,
convert S,
-- again, cleanup that could be done by `tidy`:
ext ⟨_, ⟨_, _⟩⟩, refl,
end
-- FIXME this causes a deterministic timeout with `-T50000`
/-- The radius of convergence of `p.change_origin x` is at least `p.radius - ∥x∥`. In other words,
`p.change_origin x` is well defined on the largest ball contained in the original ball of
convergence.-/
lemma change_origin_radius : p.radius - nnnorm x ≤ (p.change_origin x).radius :=
begin
by_cases h : p.radius ≤ nnnorm x,
{ have : radius p - ↑(nnnorm x) = 0 := ennreal.sub_eq_zero_of_le h,
rw this,
exact zero_le _ },
replace h : (nnnorm x : ennreal) < p.radius, by simpa using h,
refine le_of_forall_ge_of_dense (λ r hr, _),
cases r, { simpa using hr },
rw [ennreal.lt_sub_iff_add_lt, add_comm] at hr,
let A : (Σ (k : ℕ) (n : ℕ), {s : finset (fin n) // finset.card s = k}) → ℝ :=
λ ⟨k, n, s, hs⟩, ∥(p n).restr s hs x∥ * (r : ℝ) ^ k,
have SA : summable A := p.change_origin_summable_aux2 hr,
have A_nonneg : ∀ i, 0 ≤ A i,
{ rintros ⟨k, n, s, hs⟩,
change 0 ≤ ∥(p n).restr s hs x∥ * (r : ℝ) ^ k,
refine mul_nonneg (norm_nonneg _) (pow_nonneg (nnreal.coe_nonneg _) _) },
have tsum_nonneg : 0 ≤ tsum A := tsum_nonneg A_nonneg,
apply le_radius_of_bound _ (nnreal.of_real (tsum A)) (λ k, _),
rw [← nnreal.coe_le_coe, nnreal.coe_mul, nnreal.coe_pow, coe_nnnorm,
nnreal.coe_of_real _ tsum_nonneg],
calc ∥change_origin p x k∥ * ↑r ^ k
= ∥@tsum (E [×k]→L[𝕜] F) _ _ _ (λ i, (p i.1).restr i.2.1 i.2.2 x :
(Σ (n : ℕ), {s : finset (fin n) // finset.card s = k}) → (E [×k]→L[𝕜] F))∥ * ↑r ^ k : rfl
... ≤ tsum (λ i, ∥(p i.1).restr i.2.1 i.2.2 x∥ :
(Σ (n : ℕ), {s : finset (fin n) // finset.card s = k}) → ℝ) * ↑r ^ k :
begin
apply mul_le_mul_of_nonneg_right _ (pow_nonneg (nnreal.coe_nonneg _) _),
apply norm_tsum_le_tsum_norm,
convert p.change_origin_summable_aux3 k h,
ext a,
tidy
end
... = tsum (λ i, ∥(p i.1).restr i.2.1 i.2.2 x∥ * ↑r ^ k :
(Σ (n : ℕ), {s : finset (fin n) // finset.card s = k}) → ℝ) :
by { rw tsum_mul_right, convert p.change_origin_summable_aux3 k h, tidy }
... = tsum (A ∘ change_origin_summable_aux_j k) : by { congr, tidy }
... ≤ tsum A : tsum_comp_le_tsum_of_inj SA A_nonneg (change_origin_summable_aux_j_injective k)
end
-- From this point on, assume that the space is complete, to make sure that series that converge
-- in norm also converge in `F`.
variable [complete_space F]
/-- The `k`-th coefficient of `p.change_origin` is the sum of a summable series. -/
lemma change_origin_has_sum (k : ℕ) (h : (nnnorm x : ennreal) < p.radius) :
@has_sum (E [×k]→L[𝕜] F) _ _ _ ((λ i, (p i.1).restr i.2.1 i.2.2 x) :
(Σ (n : ℕ), {s : finset (fin n) // finset.card s = k}) → (E [×k]→L[𝕜] F))
(p.change_origin x k) :=
begin
apply summable.has_sum,
apply summable_of_summable_norm,
convert p.change_origin_summable_aux3 k h,
tidy
end
/-- Summing the series `p.change_origin x` at a point `y` gives back `p (x + y)`-/
theorem change_origin_eval (h : (nnnorm x + nnnorm y : ennreal) < p.radius) :
has_sum ((λk:ℕ, p.change_origin x k (λ (i : fin k), y))) (p.sum (x + y)) :=
begin
/- The series on the left is a series of series. If we order the terms differently, we get back
to `p.sum (x + y)`, in which the `n`-th term is expanded by multilinearity. In the proof below,
the term on the left is the sum of a series of terms `A`, the sum on the right is the sum of a
series of terms `B`, and we show that they correspond to each other by reordering to conclude the
proof. -/
have radius_pos : 0 < p.radius := lt_of_le_of_lt (zero_le _) h,
-- `A` is the terms of the series whose sum gives the series for `p.change_origin`
let A : (Σ (k : ℕ) (n : ℕ), {s : finset (fin n) // s.card = k}) → F :=
λ ⟨k, n, s, hs⟩, (p n).restr s hs x (λ(i : fin k), y),
-- `B` is the terms of the series whose sum gives `p (x + y)`, after expansion by multilinearity.
let B : (Σ (n : ℕ), finset (fin n)) → F := λ ⟨n, s⟩, (p n).restr s rfl x (λ (i : fin s.card), y),
let Bnorm : (Σ (n : ℕ), finset (fin n)) → ℝ := λ ⟨n, s⟩, ∥p n∥ * ∥x∥ ^ (n - s.card) * ∥y∥ ^ s.card,
have SBnorm : summable Bnorm, by convert p.change_origin_summable_aux1 h,
have SB : summable B,
{ refine summable_of_norm_bounded _ SBnorm _,
rintros ⟨n, s⟩,
calc ∥(p n).restr s rfl x (λ (i : fin s.card), y)∥
≤ ∥(p n).restr s rfl x∥ * ∥y∥ ^ s.card :
begin
convert ((p n).restr s rfl x).le_op_norm (λ (i : fin s.card), y),
simp [(finset.prod_const (∥y∥))],
end
... ≤ (∥p n∥ * ∥x∥ ^ (n - s.card)) * ∥y∥ ^ s.card :
mul_le_mul_of_nonneg_right ((p n).norm_restr _ _ _) (pow_nonneg (norm_nonneg _) _) },
-- Check that indeed the sum of `B` is `p (x + y)`.
have has_sum_B : has_sum B (p.sum (x + y)),
{ have K1 : ∀ n, has_sum (λ (s : finset (fin n)), B ⟨n, s⟩) (p n (λ (i : fin n), x + y)),
{ assume n,
have : (p n) (λ (i : fin n), y + x) = ∑ s : finset (fin n),
p n (finset.piecewise s (λ (i : fin n), y) (λ (i : fin n), x)) :=
(p n).map_add_univ (λ i, y) (λ i, x),
simp [add_comm y x] at this,
rw this,
exact has_sum_fintype _ },
have K2 : has_sum (λ (n : ℕ), (p n) (λ (i : fin n), x + y)) (p.sum (x + y)),
{ have : x + y ∈ emetric.ball (0 : E) p.radius,
{ apply lt_of_le_of_lt _ h,
rw [edist_eq_coe_nnnorm, ← ennreal.coe_add, ennreal.coe_le_coe],
exact norm_add_le x y },
simpa using (p.has_fpower_series_on_ball radius_pos).has_sum this },
exact has_sum.sigma_of_has_sum K2 K1 SB },
-- Deduce that the sum of `A` is also `p (x + y)`, as the terms `A` and `B` are the same up to
-- reordering
have has_sum_A : has_sum A (p.sum (x + y)),
{ let e : (Σ (n : ℕ), finset (fin n)) ≃
(Σ (k : ℕ) (n : ℕ), {s : finset (fin n) // finset.card s = k}) :=
{ to_fun := λ ⟨n, s⟩, ⟨s.card, n, s, rfl⟩,
inv_fun := λ ⟨k, n, s, hs⟩, ⟨n, s⟩,
left_inv := λ ⟨n, s⟩, rfl,
right_inv := λ ⟨k, n, s, hs⟩, by { induction hs, refl } },
have : A ∘ e = B, by { ext ⟨⟩, refl },
rw ← e.has_sum_iff,
convert has_sum_B },
-- Summing `A ⟨k, c⟩` with fixed `k` and varying `c` is exactly the `k`-th term in the series
-- defining `p.change_origin`, by definition
have J : ∀k, has_sum (λ c, A ⟨k, c⟩) (p.change_origin x k (λ(i : fin k), y)),
{ assume k,
have : (nnnorm x : ennreal) < radius p := lt_of_le_of_lt (le_add_right (le_refl _)) h,
convert continuous_multilinear_map.has_sum_eval (p.change_origin_has_sum k this)
(λ(i : fin k), y),
ext i,
tidy },
exact has_sum_A.sigma J
end
end formal_multilinear_series
section
variables [complete_space F] {f : E → F} {p : formal_multilinear_series 𝕜 E F} {x y : E}
{r : ennreal}
/-- If a function admits a power series expansion `p` on a ball `B (x, r)`, then it also admits a
power series on any subball of this ball (even with a different center), given by `p.change_origin`.
-/
theorem has_fpower_series_on_ball.change_origin
(hf : has_fpower_series_on_ball f p x r) (h : (nnnorm y : ennreal) < r) :
has_fpower_series_on_ball f (p.change_origin y) (x + y) (r - nnnorm y) :=
{ r_le := begin
apply le_trans _ p.change_origin_radius,
exact ennreal.sub_le_sub hf.r_le (le_refl _)
end,
r_pos := by simp [h],
has_sum := begin
assume z hz,
have A : (nnnorm y : ennreal) + nnnorm z < r,
{ have : edist z 0 < r - ↑(nnnorm y) := hz,
rwa [edist_eq_coe_nnnorm, ennreal.lt_sub_iff_add_lt, add_comm] at this },
convert p.change_origin_eval (lt_of_lt_of_le A hf.r_le),
have : y + z ∈ emetric.ball (0 : E) r := calc
edist (y + z) 0 ≤ ↑(nnnorm y) + ↑(nnnorm z) :
by { rw [edist_eq_coe_nnnorm, ← ennreal.coe_add, ennreal.coe_le_coe], exact norm_add_le y z }
... < r : A,
simpa only [add_assoc] using hf.sum this
end }
lemma has_fpower_series_on_ball.analytic_at_of_mem
(hf : has_fpower_series_on_ball f p x r) (h : y ∈ emetric.ball x r) :
analytic_at 𝕜 f y :=
begin
have : (nnnorm (y - x) : ennreal) < r, by simpa [edist_eq_coe_nnnorm_sub] using h,
have := hf.change_origin this,
rw [add_sub_cancel'_right] at this,
exact this.analytic_at
end
variables (𝕜 f)
lemma is_open_analytic_at : is_open {x | analytic_at 𝕜 f x} :=
begin
rw is_open_iff_forall_mem_open,
assume x hx,
rcases hx with ⟨p, r, hr⟩,
refine ⟨emetric.ball x r, λ y hy, hr.analytic_at_of_mem hy, emetric.is_open_ball, _⟩,
simp only [edist_self, emetric.mem_ball, hr.r_pos]
end
variables {𝕜 f}
end
|
37b647748b75e89353f2076efcc6774ab98c96b5
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/tests/lean/run/e11.lean
|
ab7bef72e0db683bd330c9911af1cf28763fbb2e
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 914
|
lean
|
prelude
precedence `+`:65
namespace nat
constant nat : Type.{1}
constant add : nat → nat → nat
infixl + := add
end nat
namespace int
open nat (nat)
constant int : Type.{1}
constant add : int → int → int
infixl + := add
constant of_nat : nat → int
attribute of_nat [coercion]
end int
section
-- Open "only" the notation and declarations from the namespaces nat and int
open [notation] nat
open [notation] int
open [decl] nat
open [decl] int
variables n m : nat
variables i j : int
check n + m
check i + j
-- The following check does not work, since we are not open the coercions
-- check n + i
-- Here is a possible trick for this kind of configuration
definition add_ni (a : nat) (b : int) := (of_nat a) + b
definition add_in (a : int) (b : nat) := a + (of_nat b)
infixl + := add_ni
infixl + := add_in
check add_ni
check i + n
check n + i
end
|
e67fb70624e882ecfcaecf53fd1b3b05fbb93798
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/measure_theory/function/l1_space.lean
|
552e0764bfc5a40d51461ca1ad837124fb99b297
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 51,674
|
lean
|
/-
Copyright (c) 2019 Zhouhang Zhou. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Zhouhang Zhou
-/
import measure_theory.function.lp_order
/-!
# Integrable functions and `L¹` space
In the first part of this file, the predicate `integrable` is defined and basic properties of
integrable functions are proved.
Such a predicate is already available under the name `mem_ℒp 1`. We give a direct definition which
is easier to use, and show that it is equivalent to `mem_ℒp 1`
In the second part, we establish an API between `integrable` and the space `L¹` of equivalence
classes of integrable functions, already defined as a special case of `L^p` spaces for `p = 1`.
## Notation
* `α →₁[μ] β` is the type of `L¹` space, where `α` is a `measure_space` and `β` is a
`normed_add_comm_group` with a `second_countable_topology`. `f : α →ₘ β` is a "function" in `L¹`.
In comments, `[f]` is also used to denote an `L¹` function.
`₁` can be typed as `\1`.
## Main definitions
* Let `f : α → β` be a function, where `α` is a `measure_space` and `β` a `normed_add_comm_group`.
Then `has_finite_integral f` means `(∫⁻ a, ∥f a∥₊) < ∞`.
* If `β` is moreover a `measurable_space` then `f` is called `integrable` if
`f` is `measurable` and `has_finite_integral f` holds.
## Implementation notes
To prove something for an arbitrary integrable function, a useful theorem is
`integrable.induction` in the file `set_integral`.
## Tags
integrable, function space, l1
-/
noncomputable theory
open_locale classical topological_space big_operators ennreal measure_theory nnreal
open set filter topological_space ennreal emetric measure_theory
variables {α β γ δ : Type*} {m : measurable_space α} {μ ν : measure α} [measurable_space δ]
variables [normed_add_comm_group β]
variables [normed_add_comm_group γ]
namespace measure_theory
/-! ### Some results about the Lebesgue integral involving a normed group -/
lemma lintegral_nnnorm_eq_lintegral_edist (f : α → β) :
∫⁻ a, ∥f a∥₊ ∂μ = ∫⁻ a, edist (f a) 0 ∂μ :=
by simp only [edist_eq_coe_nnnorm]
lemma lintegral_norm_eq_lintegral_edist (f : α → β) :
∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ = ∫⁻ a, edist (f a) 0 ∂μ :=
by simp only [of_real_norm_eq_coe_nnnorm, edist_eq_coe_nnnorm]
lemma lintegral_edist_triangle {f g h : α → β}
(hf : ae_strongly_measurable f μ) (hh : ae_strongly_measurable h μ) :
∫⁻ a, edist (f a) (g a) ∂μ ≤ ∫⁻ a, edist (f a) (h a) ∂μ + ∫⁻ a, edist (g a) (h a) ∂μ :=
begin
rw ← lintegral_add_left' (hf.edist hh),
refine lintegral_mono (λ a, _),
apply edist_triangle_right
end
lemma lintegral_nnnorm_zero : ∫⁻ a : α, ∥(0 : β)∥₊ ∂μ = 0 := by simp
lemma lintegral_nnnorm_add_left
{f : α → β} (hf : ae_strongly_measurable f μ) (g : α → γ) :
∫⁻ a, ∥f a∥₊ + ∥g a∥₊ ∂μ = ∫⁻ a, ∥f a∥₊ ∂μ + ∫⁻ a, ∥g a∥₊ ∂μ :=
lintegral_add_left' hf.ennnorm _
lemma lintegral_nnnorm_add_right
(f : α → β) {g : α → γ} (hg : ae_strongly_measurable g μ) :
∫⁻ a, ∥f a∥₊ + ∥g a∥₊ ∂μ = ∫⁻ a, ∥f a∥₊ ∂μ + ∫⁻ a, ∥g a∥₊ ∂μ :=
lintegral_add_right' _ hg.ennnorm
lemma lintegral_nnnorm_neg {f : α → β} :
∫⁻ a, ∥(-f) a∥₊ ∂μ = ∫⁻ a, ∥f a∥₊ ∂μ :=
by simp only [pi.neg_apply, nnnorm_neg]
/-! ### The predicate `has_finite_integral` -/
/-- `has_finite_integral f μ` means that the integral `∫⁻ a, ∥f a∥ ∂μ` is finite.
`has_finite_integral f` means `has_finite_integral f volume`. -/
def has_finite_integral {m : measurable_space α} (f : α → β) (μ : measure α . volume_tac) : Prop :=
∫⁻ a, ∥f a∥₊ ∂μ < ∞
lemma has_finite_integral_iff_norm (f : α → β) :
has_finite_integral f μ ↔ ∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ < ∞ :=
by simp only [has_finite_integral, of_real_norm_eq_coe_nnnorm]
lemma has_finite_integral_iff_edist (f : α → β) :
has_finite_integral f μ ↔ ∫⁻ a, edist (f a) 0 ∂μ < ∞ :=
by simp only [has_finite_integral_iff_norm, edist_dist, dist_zero_right]
lemma has_finite_integral_iff_of_real {f : α → ℝ} (h : 0 ≤ᵐ[μ] f) :
has_finite_integral f μ ↔ ∫⁻ a, ennreal.of_real (f a) ∂μ < ∞ :=
by rw [has_finite_integral, lintegral_nnnorm_eq_of_ae_nonneg h]
lemma has_finite_integral_iff_of_nnreal {f : α → ℝ≥0} :
has_finite_integral (λ x, (f x : ℝ)) μ ↔ ∫⁻ a, f a ∂μ < ∞ :=
by simp [has_finite_integral_iff_norm]
lemma has_finite_integral.mono {f : α → β} {g : α → γ} (hg : has_finite_integral g μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ ≤ ∥g a∥) : has_finite_integral f μ :=
begin
simp only [has_finite_integral_iff_norm] at *,
calc ∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ ≤ ∫⁻ (a : α), (ennreal.of_real ∥g a∥) ∂μ :
lintegral_mono_ae (h.mono $ assume a h, of_real_le_of_real h)
... < ∞ : hg
end
lemma has_finite_integral.mono' {f : α → β} {g : α → ℝ} (hg : has_finite_integral g μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ ≤ g a) : has_finite_integral f μ :=
hg.mono $ h.mono $ λ x hx, le_trans hx (le_abs_self _)
lemma has_finite_integral.congr' {f : α → β} {g : α → γ} (hf : has_finite_integral f μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ = ∥g a∥) :
has_finite_integral g μ :=
hf.mono $ eventually_eq.le $ eventually_eq.symm h
lemma has_finite_integral_congr' {f : α → β} {g : α → γ} (h : ∀ᵐ a ∂μ, ∥f a∥ = ∥g a∥) :
has_finite_integral f μ ↔ has_finite_integral g μ :=
⟨λ hf, hf.congr' h, λ hg, hg.congr' $ eventually_eq.symm h⟩
lemma has_finite_integral.congr {f g : α → β} (hf : has_finite_integral f μ) (h : f =ᵐ[μ] g) :
has_finite_integral g μ :=
hf.congr' $ h.fun_comp norm
lemma has_finite_integral_congr {f g : α → β} (h : f =ᵐ[μ] g) :
has_finite_integral f μ ↔ has_finite_integral g μ :=
has_finite_integral_congr' $ h.fun_comp norm
lemma has_finite_integral_const_iff {c : β} :
has_finite_integral (λ x : α, c) μ ↔ c = 0 ∨ μ univ < ∞ :=
by simp [has_finite_integral, lintegral_const, lt_top_iff_ne_top, or_iff_not_imp_left]
lemma has_finite_integral_const [is_finite_measure μ] (c : β) :
has_finite_integral (λ x : α, c) μ :=
has_finite_integral_const_iff.2 (or.inr $ measure_lt_top _ _)
lemma has_finite_integral_of_bounded [is_finite_measure μ] {f : α → β} {C : ℝ}
(hC : ∀ᵐ a ∂μ, ∥f a∥ ≤ C) : has_finite_integral f μ :=
(has_finite_integral_const C).mono' hC
lemma has_finite_integral.mono_measure {f : α → β} (h : has_finite_integral f ν) (hμ : μ ≤ ν) :
has_finite_integral f μ :=
lt_of_le_of_lt (lintegral_mono' hμ le_rfl) h
lemma has_finite_integral.add_measure {f : α → β} (hμ : has_finite_integral f μ)
(hν : has_finite_integral f ν) : has_finite_integral f (μ + ν) :=
begin
simp only [has_finite_integral, lintegral_add_measure] at *,
exact add_lt_top.2 ⟨hμ, hν⟩
end
lemma has_finite_integral.left_of_add_measure {f : α → β} (h : has_finite_integral f (μ + ν)) :
has_finite_integral f μ :=
h.mono_measure $ measure.le_add_right $ le_rfl
lemma has_finite_integral.right_of_add_measure {f : α → β} (h : has_finite_integral f (μ + ν)) :
has_finite_integral f ν :=
h.mono_measure $ measure.le_add_left $ le_rfl
@[simp] lemma has_finite_integral_add_measure {f : α → β} :
has_finite_integral f (μ + ν) ↔ has_finite_integral f μ ∧ has_finite_integral f ν :=
⟨λ h, ⟨h.left_of_add_measure, h.right_of_add_measure⟩, λ h, h.1.add_measure h.2⟩
lemma has_finite_integral.smul_measure {f : α → β} (h : has_finite_integral f μ) {c : ℝ≥0∞}
(hc : c ≠ ∞) : has_finite_integral f (c • μ) :=
begin
simp only [has_finite_integral, lintegral_smul_measure] at *,
exact mul_lt_top hc h.ne
end
@[simp] lemma has_finite_integral_zero_measure {m : measurable_space α} (f : α → β) :
has_finite_integral f (0 : measure α) :=
by simp only [has_finite_integral, lintegral_zero_measure, with_top.zero_lt_top]
variables (α β μ)
@[simp] lemma has_finite_integral_zero : has_finite_integral (λa:α, (0:β)) μ :=
by simp [has_finite_integral]
variables {α β μ}
lemma has_finite_integral.neg {f : α → β} (hfi : has_finite_integral f μ) :
has_finite_integral (-f) μ :=
by simpa [has_finite_integral] using hfi
@[simp] lemma has_finite_integral_neg_iff {f : α → β} :
has_finite_integral (-f) μ ↔ has_finite_integral f μ :=
⟨λ h, neg_neg f ▸ h.neg, has_finite_integral.neg⟩
lemma has_finite_integral.norm {f : α → β} (hfi : has_finite_integral f μ) :
has_finite_integral (λa, ∥f a∥) μ :=
have eq : (λa, (nnnorm ∥f a∥ : ℝ≥0∞)) = λa, (∥f a∥₊ : ℝ≥0∞),
by { funext, rw nnnorm_norm },
by { rwa [has_finite_integral, eq] }
lemma has_finite_integral_norm_iff (f : α → β) :
has_finite_integral (λa, ∥f a∥) μ ↔ has_finite_integral f μ :=
has_finite_integral_congr' $ eventually_of_forall $ λ x, norm_norm (f x)
lemma has_finite_integral_to_real_of_lintegral_ne_top
{f : α → ℝ≥0∞} (hf : ∫⁻ x, f x ∂μ ≠ ∞) :
has_finite_integral (λ x, (f x).to_real) μ :=
begin
have : ∀ x, (∥(f x).to_real∥₊ : ℝ≥0∞) =
@coe ℝ≥0 ℝ≥0∞ _ (⟨(f x).to_real, ennreal.to_real_nonneg⟩ : ℝ≥0),
{ intro x, rw real.nnnorm_of_nonneg },
simp_rw [has_finite_integral, this],
refine lt_of_le_of_lt (lintegral_mono (λ x, _)) (lt_top_iff_ne_top.2 hf),
by_cases hfx : f x = ∞,
{ simp [hfx] },
{ lift f x to ℝ≥0 using hfx with fx,
simp [← h] }
end
lemma is_finite_measure_with_density_of_real {f : α → ℝ} (hfi : has_finite_integral f μ) :
is_finite_measure (μ.with_density (λ x, ennreal.of_real $ f x)) :=
begin
refine is_finite_measure_with_density ((lintegral_mono $ λ x, _).trans_lt hfi).ne,
exact real.of_real_le_ennnorm (f x)
end
section dominated_convergence
variables {F : ℕ → α → β} {f : α → β} {bound : α → ℝ}
lemma all_ae_of_real_F_le_bound (h : ∀ n, ∀ᵐ a ∂μ, ∥F n a∥ ≤ bound a) :
∀ n, ∀ᵐ a ∂μ, ennreal.of_real ∥F n a∥ ≤ ennreal.of_real (bound a) :=
λn, (h n).mono $ λ a h, ennreal.of_real_le_of_real h
lemma all_ae_tendsto_of_real_norm (h : ∀ᵐ a ∂μ, tendsto (λ n, F n a) at_top $ 𝓝 $ f a) :
∀ᵐ a ∂μ, tendsto (λn, ennreal.of_real ∥F n a∥) at_top $ 𝓝 $ ennreal.of_real ∥f a∥ :=
h.mono $
λ a h, tendsto_of_real $ tendsto.comp (continuous.tendsto continuous_norm _) h
lemma all_ae_of_real_f_le_bound (h_bound : ∀ n, ∀ᵐ a ∂μ, ∥F n a∥ ≤ bound a)
(h_lim : ∀ᵐ a ∂μ, tendsto (λ n, F n a) at_top (𝓝 (f a))) :
∀ᵐ a ∂μ, ennreal.of_real ∥f a∥ ≤ ennreal.of_real (bound a) :=
begin
have F_le_bound := all_ae_of_real_F_le_bound h_bound,
rw ← ae_all_iff at F_le_bound,
apply F_le_bound.mp ((all_ae_tendsto_of_real_norm h_lim).mono _),
assume a tendsto_norm F_le_bound,
exact le_of_tendsto' tendsto_norm (F_le_bound)
end
lemma has_finite_integral_of_dominated_convergence {F : ℕ → α → β} {f : α → β} {bound : α → ℝ}
(bound_has_finite_integral : has_finite_integral bound μ)
(h_bound : ∀ n, ∀ᵐ a ∂μ, ∥F n a∥ ≤ bound a)
(h_lim : ∀ᵐ a ∂μ, tendsto (λ n, F n a) at_top (𝓝 (f a))) :
has_finite_integral f μ :=
/- `∥F n a∥ ≤ bound a` and `∥F n a∥ --> ∥f a∥` implies `∥f a∥ ≤ bound a`,
and so `∫ ∥f∥ ≤ ∫ bound < ∞` since `bound` is has_finite_integral -/
begin
rw has_finite_integral_iff_norm,
calc ∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ ≤ ∫⁻ a, ennreal.of_real (bound a) ∂μ :
lintegral_mono_ae $ all_ae_of_real_f_le_bound h_bound h_lim
... < ∞ :
begin
rw ← has_finite_integral_iff_of_real,
{ exact bound_has_finite_integral },
exact (h_bound 0).mono (λ a h, le_trans (norm_nonneg _) h)
end
end
lemma tendsto_lintegral_norm_of_dominated_convergence
{F : ℕ → α → β} {f : α → β} {bound : α → ℝ}
(F_measurable : ∀ n, ae_strongly_measurable (F n) μ)
(bound_has_finite_integral : has_finite_integral bound μ)
(h_bound : ∀ n, ∀ᵐ a ∂μ, ∥F n a∥ ≤ bound a)
(h_lim : ∀ᵐ a ∂μ, tendsto (λ n, F n a) at_top (𝓝 (f a))) :
tendsto (λn, ∫⁻ a, (ennreal.of_real ∥F n a - f a∥) ∂μ) at_top (𝓝 0) :=
have f_measurable : ae_strongly_measurable f μ :=
ae_strongly_measurable_of_tendsto_ae _ F_measurable h_lim,
let b := λ a, 2 * ennreal.of_real (bound a) in
/- `∥F n a∥ ≤ bound a` and `F n a --> f a` implies `∥f a∥ ≤ bound a`, and thus by the
triangle inequality, have `∥F n a - f a∥ ≤ 2 * (bound a). -/
have hb : ∀ n, ∀ᵐ a ∂μ, ennreal.of_real ∥F n a - f a∥ ≤ b a,
begin
assume n,
filter_upwards [all_ae_of_real_F_le_bound h_bound n, all_ae_of_real_f_le_bound h_bound h_lim]
with a h₁ h₂,
calc ennreal.of_real ∥F n a - f a∥ ≤ (ennreal.of_real ∥F n a∥) + (ennreal.of_real ∥f a∥) :
begin
rw [← ennreal.of_real_add],
apply of_real_le_of_real,
{ apply norm_sub_le }, { exact norm_nonneg _ }, { exact norm_nonneg _ }
end
... ≤ (ennreal.of_real (bound a)) + (ennreal.of_real (bound a)) : add_le_add h₁ h₂
... = b a : by rw ← two_mul
end,
/- On the other hand, `F n a --> f a` implies that `∥F n a - f a∥ --> 0` -/
have h : ∀ᵐ a ∂μ, tendsto (λ n, ennreal.of_real ∥F n a - f a∥) at_top (𝓝 0),
begin
rw ← ennreal.of_real_zero,
refine h_lim.mono (λ a h, (continuous_of_real.tendsto _).comp _),
rwa ← tendsto_iff_norm_tendsto_zero
end,
/- Therefore, by the dominated convergence theorem for nonnegative integration, have
` ∫ ∥f a - F n a∥ --> 0 ` -/
begin
suffices h : tendsto (λn, ∫⁻ a, (ennreal.of_real ∥F n a - f a∥) ∂μ) at_top (𝓝 (∫⁻ (a:α), 0 ∂μ)),
{ rwa lintegral_zero at h },
-- Using the dominated convergence theorem.
refine tendsto_lintegral_of_dominated_convergence' _ _ hb _ _,
-- Show `λa, ∥f a - F n a∥` is almost everywhere measurable for all `n`
{ exact λ n, measurable_of_real.comp_ae_measurable
((F_measurable n).sub f_measurable).norm.ae_measurable },
-- Show `2 * bound` is has_finite_integral
{ rw has_finite_integral_iff_of_real at bound_has_finite_integral,
{ calc ∫⁻ a, b a ∂μ = 2 * ∫⁻ a, ennreal.of_real (bound a) ∂μ :
by { rw lintegral_const_mul', exact coe_ne_top }
... ≠ ∞ : mul_ne_top coe_ne_top bound_has_finite_integral.ne },
filter_upwards [h_bound 0] with _ h using le_trans (norm_nonneg _) h },
-- Show `∥f a - F n a∥ --> 0`
{ exact h }
end
end dominated_convergence
section pos_part
/-! Lemmas used for defining the positive part of a `L¹` function -/
lemma has_finite_integral.max_zero {f : α → ℝ} (hf : has_finite_integral f μ) :
has_finite_integral (λa, max (f a) 0) μ :=
hf.mono $ eventually_of_forall $ λ x, by simp [abs_le, le_abs_self]
lemma has_finite_integral.min_zero {f : α → ℝ} (hf : has_finite_integral f μ) :
has_finite_integral (λa, min (f a) 0) μ :=
hf.mono $ eventually_of_forall $ λ x,
by simp [abs_le, neg_le, neg_le_abs_self, abs_eq_max_neg, le_total]
end pos_part
section normed_space
variables {𝕜 : Type*} [normed_field 𝕜] [normed_space 𝕜 β]
lemma has_finite_integral.smul (c : 𝕜) {f : α → β} : has_finite_integral f μ →
has_finite_integral (c • f) μ :=
begin
simp only [has_finite_integral], assume hfi,
calc
∫⁻ (a : α), ∥c • f a∥₊ ∂μ = ∫⁻ (a : α), (∥c∥₊) * ∥f a∥₊ ∂μ :
by simp only [nnnorm_smul, ennreal.coe_mul]
... < ∞ :
begin
rw lintegral_const_mul',
exacts [mul_lt_top coe_ne_top hfi.ne, coe_ne_top]
end
end
lemma has_finite_integral_smul_iff {c : 𝕜} (hc : c ≠ 0) (f : α → β) :
has_finite_integral (c • f) μ ↔ has_finite_integral f μ :=
begin
split,
{ assume h,
simpa only [smul_smul, inv_mul_cancel hc, one_smul] using h.smul c⁻¹ },
exact has_finite_integral.smul _
end
lemma has_finite_integral.const_mul {f : α → ℝ} (h : has_finite_integral f μ) (c : ℝ) :
has_finite_integral (λ x, c * f x) μ :=
(has_finite_integral.smul c h : _)
lemma has_finite_integral.mul_const {f : α → ℝ} (h : has_finite_integral f μ) (c : ℝ) :
has_finite_integral (λ x, f x * c) μ :=
by simp_rw [mul_comm, h.const_mul _]
end normed_space
/-! ### The predicate `integrable` -/
-- variables [measurable_space β] [measurable_space γ] [measurable_space δ]
/-- `integrable f μ` means that `f` is measurable and that the integral `∫⁻ a, ∥f a∥ ∂μ` is finite.
`integrable f` means `integrable f volume`. -/
def integrable {α} {m : measurable_space α} (f : α → β) (μ : measure α . volume_tac) : Prop :=
ae_strongly_measurable f μ ∧ has_finite_integral f μ
lemma mem_ℒp_one_iff_integrable {f : α → β} : mem_ℒp f 1 μ ↔ integrable f μ :=
by simp_rw [integrable, has_finite_integral, mem_ℒp, snorm_one_eq_lintegral_nnnorm]
lemma integrable.ae_strongly_measurable {f : α → β} (hf : integrable f μ) :
ae_strongly_measurable f μ :=
hf.1
lemma integrable.ae_measurable [measurable_space β] [borel_space β]
{f : α → β} (hf : integrable f μ) :
ae_measurable f μ :=
hf.ae_strongly_measurable.ae_measurable
lemma integrable.has_finite_integral {f : α → β} (hf : integrable f μ) : has_finite_integral f μ :=
hf.2
lemma integrable.mono {f : α → β} {g : α → γ}
(hg : integrable g μ) (hf : ae_strongly_measurable f μ) (h : ∀ᵐ a ∂μ, ∥f a∥ ≤ ∥g a∥) :
integrable f μ :=
⟨hf, hg.has_finite_integral.mono h⟩
lemma integrable.mono' {f : α → β} {g : α → ℝ}
(hg : integrable g μ) (hf : ae_strongly_measurable f μ) (h : ∀ᵐ a ∂μ, ∥f a∥ ≤ g a) :
integrable f μ :=
⟨hf, hg.has_finite_integral.mono' h⟩
lemma integrable.congr' {f : α → β} {g : α → γ}
(hf : integrable f μ) (hg : ae_strongly_measurable g μ) (h : ∀ᵐ a ∂μ, ∥f a∥ = ∥g a∥) :
integrable g μ :=
⟨hg, hf.has_finite_integral.congr' h⟩
lemma integrable_congr' {f : α → β} {g : α → γ}
(hf : ae_strongly_measurable f μ) (hg : ae_strongly_measurable g μ) (h : ∀ᵐ a ∂μ, ∥f a∥ = ∥g a∥) :
integrable f μ ↔ integrable g μ :=
⟨λ h2f, h2f.congr' hg h, λ h2g, h2g.congr' hf $ eventually_eq.symm h⟩
lemma integrable.congr {f g : α → β} (hf : integrable f μ) (h : f =ᵐ[μ] g) :
integrable g μ :=
⟨hf.1.congr h, hf.2.congr h⟩
lemma integrable_congr {f g : α → β} (h : f =ᵐ[μ] g) :
integrable f μ ↔ integrable g μ :=
⟨λ hf, hf.congr h, λ hg, hg.congr h.symm⟩
lemma integrable_const_iff {c : β} : integrable (λ x : α, c) μ ↔ c = 0 ∨ μ univ < ∞ :=
begin
have : ae_strongly_measurable (λ (x : α), c) μ := ae_strongly_measurable_const,
rw [integrable, and_iff_right this, has_finite_integral_const_iff]
end
lemma integrable_const [is_finite_measure μ] (c : β) : integrable (λ x : α, c) μ :=
integrable_const_iff.2 $ or.inr $ measure_lt_top _ _
lemma mem_ℒp.integrable_norm_rpow {f : α → β} {p : ℝ≥0∞}
(hf : mem_ℒp f p μ) (hp_ne_zero : p ≠ 0) (hp_ne_top : p ≠ ∞) :
integrable (λ (x : α), ∥f x∥ ^ p.to_real) μ :=
begin
rw ← mem_ℒp_one_iff_integrable,
exact hf.norm_rpow hp_ne_zero hp_ne_top,
end
lemma mem_ℒp.integrable_norm_rpow' [is_finite_measure μ] {f : α → β} {p : ℝ≥0∞}
(hf : mem_ℒp f p μ) :
integrable (λ (x : α), ∥f x∥ ^ p.to_real) μ :=
begin
by_cases h_zero : p = 0,
{ simp [h_zero, integrable_const] },
by_cases h_top : p = ∞,
{ simp [h_top, integrable_const] },
exact hf.integrable_norm_rpow h_zero h_top
end
lemma integrable.mono_measure {f : α → β} (h : integrable f ν) (hμ : μ ≤ ν) : integrable f μ :=
⟨h.ae_strongly_measurable.mono_measure hμ, h.has_finite_integral.mono_measure hμ⟩
lemma integrable.of_measure_le_smul {μ' : measure α} (c : ℝ≥0∞) (hc : c ≠ ∞)
(hμ'_le : μ' ≤ c • μ) {f : α → β} (hf : integrable f μ) :
integrable f μ' :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.of_measure_le_smul c hc hμ'_le, }
lemma integrable.add_measure {f : α → β} (hμ : integrable f μ) (hν : integrable f ν) :
integrable f (μ + ν) :=
begin
simp_rw ← mem_ℒp_one_iff_integrable at hμ hν ⊢,
refine ⟨hμ.ae_strongly_measurable.add_measure hν.ae_strongly_measurable, _⟩,
rw [snorm_one_add_measure, ennreal.add_lt_top],
exact ⟨hμ.snorm_lt_top, hν.snorm_lt_top⟩,
end
lemma integrable.left_of_add_measure {f : α → β} (h : integrable f (μ + ν)) : integrable f μ :=
by { rw ← mem_ℒp_one_iff_integrable at h ⊢, exact h.left_of_add_measure, }
lemma integrable.right_of_add_measure {f : α → β} (h : integrable f (μ + ν)) : integrable f ν :=
by { rw ← mem_ℒp_one_iff_integrable at h ⊢, exact h.right_of_add_measure, }
@[simp] lemma integrable_add_measure {f : α → β} :
integrable f (μ + ν) ↔ integrable f μ ∧ integrable f ν :=
⟨λ h, ⟨h.left_of_add_measure, h.right_of_add_measure⟩, λ h, h.1.add_measure h.2⟩
@[simp] lemma integrable_zero_measure {m : measurable_space α} {f : α → β} :
integrable f (0 : measure α) :=
⟨ae_strongly_measurable_zero_measure f, has_finite_integral_zero_measure f⟩
theorem integrable_finset_sum_measure {ι} {m : measurable_space α} {f : α → β}
{μ : ι → measure α} {s : finset ι} :
integrable f (∑ i in s, μ i) ↔ ∀ i ∈ s, integrable f (μ i) :=
by induction s using finset.induction_on; simp [*]
lemma integrable.smul_measure {f : α → β} (h : integrable f μ) {c : ℝ≥0∞} (hc : c ≠ ∞) :
integrable f (c • μ) :=
by { rw ← mem_ℒp_one_iff_integrable at h ⊢, exact h.smul_measure hc, }
lemma integrable_smul_measure {f : α → β} {c : ℝ≥0∞} (h₁ : c ≠ 0) (h₂ : c ≠ ∞) :
integrable f (c • μ) ↔ integrable f μ :=
⟨λ h, by simpa only [smul_smul, ennreal.inv_mul_cancel h₁ h₂, one_smul]
using h.smul_measure (ennreal.inv_ne_top.2 h₁), λ h, h.smul_measure h₂⟩
lemma integrable.to_average {f : α → β} (h : integrable f μ) :
integrable f ((μ univ)⁻¹ • μ) :=
begin
rcases eq_or_ne μ 0 with rfl|hne,
{ rwa smul_zero },
{ apply h.smul_measure, simpa }
end
lemma integrable_average [is_finite_measure μ] {f : α → β} :
integrable f ((μ univ)⁻¹ • μ) ↔ integrable f μ :=
(eq_or_ne μ 0).by_cases (λ h, by simp [h]) $ λ h,
integrable_smul_measure (ennreal.inv_ne_zero.2 $ measure_ne_top _ _)
(ennreal.inv_ne_top.2 $ mt measure.measure_univ_eq_zero.1 h)
lemma integrable_map_measure {f : α → δ} {g : δ → β}
(hg : ae_strongly_measurable g (measure.map f μ)) (hf : ae_measurable f μ) :
integrable g (measure.map f μ) ↔ integrable (g ∘ f) μ :=
by { simp_rw ← mem_ℒp_one_iff_integrable, exact mem_ℒp_map_measure_iff hg hf, }
lemma integrable.comp_ae_measurable {f : α → δ} {g : δ → β}
(hg : integrable g (measure.map f μ)) (hf : ae_measurable f μ) : integrable (g ∘ f) μ :=
(integrable_map_measure hg.ae_strongly_measurable hf).mp hg
lemma integrable.comp_measurable {f : α → δ} {g : δ → β}
(hg : integrable g (measure.map f μ)) (hf : measurable f) : integrable (g ∘ f) μ :=
hg.comp_ae_measurable hf.ae_measurable
lemma _root_.measurable_embedding.integrable_map_iff
{f : α → δ} (hf : measurable_embedding f) {g : δ → β} :
integrable g (measure.map f μ) ↔ integrable (g ∘ f) μ :=
by { simp_rw ← mem_ℒp_one_iff_integrable, exact hf.mem_ℒp_map_measure_iff, }
lemma integrable_map_equiv (f : α ≃ᵐ δ) (g : δ → β) :
integrable g (measure.map f μ) ↔ integrable (g ∘ f) μ :=
by { simp_rw ← mem_ℒp_one_iff_integrable, exact f.mem_ℒp_map_measure_iff, }
lemma measure_preserving.integrable_comp {ν : measure δ} {g : δ → β}
{f : α → δ} (hf : measure_preserving f μ ν) (hg : ae_strongly_measurable g ν) :
integrable (g ∘ f) μ ↔ integrable g ν :=
by { rw ← hf.map_eq at hg ⊢, exact (integrable_map_measure hg hf.measurable.ae_measurable).symm }
lemma measure_preserving.integrable_comp_emb {f : α → δ} {ν} (h₁ : measure_preserving f μ ν)
(h₂ : measurable_embedding f) {g : δ → β} :
integrable (g ∘ f) μ ↔ integrable g ν :=
h₁.map_eq ▸ iff.symm h₂.integrable_map_iff
lemma lintegral_edist_lt_top {f g : α → β}
(hf : integrable f μ) (hg : integrable g μ) :
∫⁻ a, edist (f a) (g a) ∂μ < ∞ :=
lt_of_le_of_lt
(lintegral_edist_triangle hf.ae_strongly_measurable ae_strongly_measurable_zero)
(ennreal.add_lt_top.2 $ by { simp_rw [pi.zero_apply, ← has_finite_integral_iff_edist],
exact ⟨hf.has_finite_integral, hg.has_finite_integral⟩ })
variables (α β μ)
@[simp] lemma integrable_zero : integrable (λ _, (0 : β)) μ :=
by simp [integrable, ae_strongly_measurable_const]
variables {α β μ}
lemma integrable.add' {f g : α → β} (hf : integrable f μ) (hg : integrable g μ) :
has_finite_integral (f + g) μ :=
calc ∫⁻ a, ∥f a + g a∥₊ ∂μ ≤ ∫⁻ a, ∥f a∥₊ + ∥g a∥₊ ∂μ :
lintegral_mono (λ a, by exact_mod_cast nnnorm_add_le _ _)
... = _ : lintegral_nnnorm_add_left hf.ae_strongly_measurable _
... < ∞ : add_lt_top.2 ⟨hf.has_finite_integral, hg.has_finite_integral⟩
lemma integrable.add
{f g : α → β} (hf : integrable f μ) (hg : integrable g μ) : integrable (f + g) μ :=
⟨hf.ae_strongly_measurable.add hg.ae_strongly_measurable, hf.add' hg⟩
lemma integrable_finset_sum' {ι} (s : finset ι)
{f : ι → α → β} (hf : ∀ i ∈ s, integrable (f i) μ) : integrable (∑ i in s, f i) μ :=
finset.sum_induction f (λ g, integrable g μ) (λ _ _, integrable.add)
(integrable_zero _ _ _) hf
lemma integrable_finset_sum {ι} (s : finset ι)
{f : ι → α → β} (hf : ∀ i ∈ s, integrable (f i) μ) : integrable (λ a, ∑ i in s, f i a) μ :=
by simpa only [← finset.sum_apply] using integrable_finset_sum' s hf
lemma integrable.neg {f : α → β} (hf : integrable f μ) : integrable (-f) μ :=
⟨hf.ae_strongly_measurable.neg, hf.has_finite_integral.neg⟩
@[simp] lemma integrable_neg_iff {f : α → β} :
integrable (-f) μ ↔ integrable f μ :=
⟨λ h, neg_neg f ▸ h.neg, integrable.neg⟩
lemma integrable.sub {f g : α → β}
(hf : integrable f μ) (hg : integrable g μ) : integrable (f - g) μ :=
by simpa only [sub_eq_add_neg] using hf.add hg.neg
lemma integrable.norm {f : α → β} (hf : integrable f μ) :
integrable (λ a, ∥f a∥) μ :=
⟨hf.ae_strongly_measurable.norm, hf.has_finite_integral.norm⟩
lemma integrable.inf {β} [normed_lattice_add_comm_group β] {f g : α → β}
(hf : integrable f μ) (hg : integrable g μ) :
integrable (f ⊓ g) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf hg ⊢, exact hf.inf hg, }
lemma integrable.sup {β} [normed_lattice_add_comm_group β] {f g : α → β}
(hf : integrable f μ) (hg : integrable g μ) :
integrable (f ⊔ g) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf hg ⊢, exact hf.sup hg, }
lemma integrable.abs {β} [normed_lattice_add_comm_group β] {f : α → β} (hf : integrable f μ) :
integrable (λ a, |f a|) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.abs, }
lemma integrable.bdd_mul {F : Type*} [normed_division_ring F]
{f g : α → F} (hint : integrable g μ) (hm : ae_strongly_measurable f μ)
(hfbdd : ∃ C, ∀ x, ∥f x∥ ≤ C) :
integrable (λ x, f x * g x) μ :=
begin
casesI is_empty_or_nonempty α with hα hα,
{ rw μ.eq_zero_of_is_empty,
exact integrable_zero_measure },
{ refine ⟨hm.mul hint.1, _⟩,
obtain ⟨C, hC⟩ := hfbdd,
have hCnonneg : 0 ≤ C := le_trans (norm_nonneg _) (hC hα.some),
have : (λ x, ∥f x * g x∥₊) ≤ λ x, ⟨C, hCnonneg⟩ * ∥g x∥₊,
{ intro x,
simp only [nnnorm_mul],
exact mul_le_mul_of_nonneg_right (hC x) (zero_le _) },
refine lt_of_le_of_lt (lintegral_mono_nnreal this) _,
simp only [ennreal.coe_mul],
rw lintegral_const_mul' _ _ ennreal.coe_ne_top,
exact ennreal.mul_lt_top ennreal.coe_ne_top (ne_of_lt hint.2) },
end
lemma integrable_norm_iff {f : α → β} (hf : ae_strongly_measurable f μ) :
integrable (λa, ∥f a∥) μ ↔ integrable f μ :=
by simp_rw [integrable, and_iff_right hf, and_iff_right hf.norm, has_finite_integral_norm_iff]
lemma integrable_of_norm_sub_le {f₀ f₁ : α → β} {g : α → ℝ}
(hf₁_m : ae_strongly_measurable f₁ μ)
(hf₀_i : integrable f₀ μ)
(hg_i : integrable g μ)
(h : ∀ᵐ a ∂μ, ∥f₀ a - f₁ a∥ ≤ g a) :
integrable f₁ μ :=
begin
have : ∀ᵐ a ∂μ, ∥f₁ a∥ ≤ ∥f₀ a∥ + g a,
{ apply h.mono,
intros a ha,
calc ∥f₁ a∥ ≤ ∥f₀ a∥ + ∥f₀ a - f₁ a∥ : norm_le_insert _ _
... ≤ ∥f₀ a∥ + g a : add_le_add_left ha _ },
exact integrable.mono' (hf₀_i.norm.add hg_i) hf₁_m this
end
lemma integrable.prod_mk {f : α → β} {g : α → γ} (hf : integrable f μ) (hg : integrable g μ) :
integrable (λ x, (f x, g x)) μ :=
⟨hf.ae_strongly_measurable.prod_mk hg.ae_strongly_measurable,
(hf.norm.add' hg.norm).mono $ eventually_of_forall $ λ x,
calc max ∥f x∥ ∥g x∥ ≤ ∥f x∥ + ∥g x∥ : max_le_add_of_nonneg (norm_nonneg _) (norm_nonneg _)
... ≤ ∥(∥f x∥ + ∥g x∥)∥ : le_abs_self _⟩
lemma mem_ℒp.integrable {q : ℝ≥0∞} (hq1 : 1 ≤ q) {f : α → β} [is_finite_measure μ]
(hfq : mem_ℒp f q μ) : integrable f μ :=
mem_ℒp_one_iff_integrable.mp (hfq.mem_ℒp_of_exponent_le hq1)
lemma lipschitz_with.integrable_comp_iff_of_antilipschitz {K K'} {f : α → β} {g : β → γ}
(hg : lipschitz_with K g) (hg' : antilipschitz_with K' g) (g0 : g 0 = 0) :
integrable (g ∘ f) μ ↔ integrable f μ :=
by simp [← mem_ℒp_one_iff_integrable, hg.mem_ℒp_comp_iff_of_antilipschitz hg' g0]
lemma integrable.real_to_nnreal {f : α → ℝ} (hf : integrable f μ) :
integrable (λ x, ((f x).to_nnreal : ℝ)) μ :=
begin
refine ⟨hf.ae_strongly_measurable.ae_measurable
.real_to_nnreal.coe_nnreal_real.ae_strongly_measurable, _⟩,
rw has_finite_integral_iff_norm,
refine lt_of_le_of_lt _ ((has_finite_integral_iff_norm _).1 hf.has_finite_integral),
apply lintegral_mono,
assume x,
simp [ennreal.of_real_le_of_real, abs_le, le_abs_self],
end
lemma of_real_to_real_ae_eq {f : α → ℝ≥0∞} (hf : ∀ᵐ x ∂μ, f x < ∞) :
(λ x, ennreal.of_real (f x).to_real) =ᵐ[μ] f :=
begin
filter_upwards [hf],
assume x hx,
simp only [hx.ne, of_real_to_real, ne.def, not_false_iff],
end
lemma coe_to_nnreal_ae_eq {f : α → ℝ≥0∞} (hf : ∀ᵐ x ∂μ, f x < ∞) :
(λ x, ((f x).to_nnreal : ℝ≥0∞)) =ᵐ[μ] f :=
begin
filter_upwards [hf],
assume x hx,
simp only [hx.ne, ne.def, not_false_iff, coe_to_nnreal],
end
section
variables {E : Type*} [normed_add_comm_group E] [normed_space ℝ E]
lemma integrable_with_density_iff_integrable_coe_smul
{f : α → ℝ≥0} (hf : measurable f) {g : α → E} :
integrable g (μ.with_density (λ x, f x)) ↔ integrable (λ x, (f x : ℝ) • g x) μ :=
begin
by_cases H : ae_strongly_measurable (λ (x : α), (f x : ℝ) • g x) μ,
{ simp only [integrable, ae_strongly_measurable_with_density_iff hf, has_finite_integral, H,
true_and],
rw lintegral_with_density_eq_lintegral_mul₀' hf.coe_nnreal_ennreal.ae_measurable,
{ congr',
ext1 x,
simp only [nnnorm_smul, nnreal.nnnorm_eq, coe_mul, pi.mul_apply] },
{ rw ae_measurable_with_density_ennreal_iff hf,
convert H.ennnorm,
ext1 x,
simp only [nnnorm_smul, nnreal.nnnorm_eq, coe_mul] } },
{ simp only [integrable, ae_strongly_measurable_with_density_iff hf, H, false_and] }
end
lemma integrable_with_density_iff_integrable_smul {f : α → ℝ≥0} (hf : measurable f) {g : α → E} :
integrable g (μ.with_density (λ x, f x)) ↔ integrable (λ x, f x • g x) μ :=
integrable_with_density_iff_integrable_coe_smul hf
lemma integrable_with_density_iff_integrable_smul'
{f : α → ℝ≥0∞} (hf : measurable f) (hflt : ∀ᵐ x ∂μ, f x < ∞) {g : α → E} :
integrable g (μ.with_density f) ↔ integrable (λ x, (f x).to_real • g x) μ :=
begin
rw [← with_density_congr_ae (coe_to_nnreal_ae_eq hflt),
integrable_with_density_iff_integrable_smul],
{ refl },
{ exact hf.ennreal_to_nnreal },
end
lemma integrable_with_density_iff_integrable_coe_smul₀
{f : α → ℝ≥0} (hf : ae_measurable f μ) {g : α → E} :
integrable g (μ.with_density (λ x, f x)) ↔ integrable (λ x, (f x : ℝ) • g x) μ :=
calc
integrable g (μ.with_density (λ x, f x))
↔ integrable g (μ.with_density (λ x, hf.mk f x)) :
begin
suffices : (λ x, (f x : ℝ≥0∞)) =ᵐ[μ] (λ x, hf.mk f x), by rw with_density_congr_ae this,
filter_upwards [hf.ae_eq_mk] with x hx,
simp [hx],
end
... ↔ integrable (λ x, (hf.mk f x : ℝ) • g x) μ :
integrable_with_density_iff_integrable_coe_smul hf.measurable_mk
... ↔ integrable (λ x, (f x : ℝ) • g x) μ :
begin
apply integrable_congr,
filter_upwards [hf.ae_eq_mk] with x hx,
simp [hx],
end
lemma integrable_with_density_iff_integrable_smul₀
{f : α → ℝ≥0} (hf : ae_measurable f μ) {g : α → E} :
integrable g (μ.with_density (λ x, f x)) ↔ integrable (λ x, f x • g x) μ :=
integrable_with_density_iff_integrable_coe_smul₀ hf
end
lemma integrable_with_density_iff {f : α → ℝ≥0∞} (hf : measurable f)
(hflt : ∀ᵐ x ∂μ, f x < ∞) {g : α → ℝ} :
integrable g (μ.with_density f) ↔ integrable (λ x, g x * (f x).to_real) μ :=
begin
have : (λ x, g x * (f x).to_real) = (λ x, (f x).to_real • g x), by simp [mul_comm],
rw this,
exact integrable_with_density_iff_integrable_smul' hf hflt,
end
section
variables {E : Type*} [normed_add_comm_group E] [normed_space ℝ E]
lemma mem_ℒ1_smul_of_L1_with_density {f : α → ℝ≥0} (f_meas : measurable f)
(u : Lp E 1 (μ.with_density (λ x, f x))) :
mem_ℒp (λ x, f x • u x) 1 μ :=
mem_ℒp_one_iff_integrable.2 $ (integrable_with_density_iff_integrable_smul f_meas).1 $
mem_ℒp_one_iff_integrable.1 (Lp.mem_ℒp u)
variable (μ)
/-- The map `u ↦ f • u` is an isometry between the `L^1` spaces for `μ.with_density f` and `μ`. -/
noncomputable def with_density_smul_li {f : α → ℝ≥0} (f_meas : measurable f) :
Lp E 1 (μ.with_density (λ x, f x)) →ₗᵢ[ℝ] Lp E 1 μ :=
{ to_fun := λ u, (mem_ℒ1_smul_of_L1_with_density f_meas u).to_Lp _,
map_add' :=
begin
assume u v,
ext1,
filter_upwards [(mem_ℒ1_smul_of_L1_with_density f_meas u).coe_fn_to_Lp,
(mem_ℒ1_smul_of_L1_with_density f_meas v).coe_fn_to_Lp,
(mem_ℒ1_smul_of_L1_with_density f_meas (u + v)).coe_fn_to_Lp,
Lp.coe_fn_add ((mem_ℒ1_smul_of_L1_with_density f_meas u).to_Lp _)
((mem_ℒ1_smul_of_L1_with_density f_meas v).to_Lp _),
(ae_with_density_iff f_meas.coe_nnreal_ennreal).1 (Lp.coe_fn_add u v)],
assume x hu hv huv h' h'',
rw [huv, h', pi.add_apply, hu, hv],
rcases eq_or_ne (f x) 0 with hx|hx,
{ simp only [hx, zero_smul, add_zero] },
{ rw [h'' _, pi.add_apply, smul_add],
simpa only [ne.def, ennreal.coe_eq_zero] using hx }
end,
map_smul' :=
begin
assume r u,
ext1,
filter_upwards [(ae_with_density_iff f_meas.coe_nnreal_ennreal).1 (Lp.coe_fn_smul r u),
(mem_ℒ1_smul_of_L1_with_density f_meas (r • u)).coe_fn_to_Lp,
Lp.coe_fn_smul r ((mem_ℒ1_smul_of_L1_with_density f_meas u).to_Lp _),
(mem_ℒ1_smul_of_L1_with_density f_meas u).coe_fn_to_Lp],
assume x h h' h'' h''',
rw [ring_hom.id_apply, h', h'', pi.smul_apply, h'''],
rcases eq_or_ne (f x) 0 with hx|hx,
{ simp only [hx, zero_smul, smul_zero] },
{ rw [h _, smul_comm, pi.smul_apply],
simpa only [ne.def, ennreal.coe_eq_zero] using hx }
end,
norm_map' :=
begin
assume u,
simp only [snorm, linear_map.coe_mk, Lp.norm_to_Lp, one_ne_zero, ennreal.one_ne_top,
ennreal.one_to_real, if_false, snorm', ennreal.rpow_one, _root_.div_one, Lp.norm_def],
rw lintegral_with_density_eq_lintegral_mul_non_measurable _ f_meas.coe_nnreal_ennreal
(filter.eventually_of_forall (λ x, ennreal.coe_lt_top)),
congr' 1,
apply lintegral_congr_ae,
filter_upwards [(mem_ℒ1_smul_of_L1_with_density f_meas u).coe_fn_to_Lp] with x hx,
rw [hx, pi.mul_apply],
change ↑∥(f x : ℝ) • u x∥₊ = ↑(f x) * ↑∥u x∥₊,
simp only [nnnorm_smul, nnreal.nnnorm_eq, ennreal.coe_mul],
end }
@[simp] lemma with_density_smul_li_apply {f : α → ℝ≥0} (f_meas : measurable f)
(u : Lp E 1 (μ.with_density (λ x, f x))) :
with_density_smul_li μ f_meas u =
(mem_ℒ1_smul_of_L1_with_density f_meas u).to_Lp (λ x, f x • u x) :=
rfl
end
lemma mem_ℒ1_to_real_of_lintegral_ne_top
{f : α → ℝ≥0∞} (hfm : ae_measurable f μ) (hfi : ∫⁻ x, f x ∂μ ≠ ∞) :
mem_ℒp (λ x, (f x).to_real) 1 μ :=
begin
rw [mem_ℒp, snorm_one_eq_lintegral_nnnorm],
exact ⟨(ae_measurable.ennreal_to_real hfm).ae_strongly_measurable,
has_finite_integral_to_real_of_lintegral_ne_top hfi⟩
end
lemma integrable_to_real_of_lintegral_ne_top
{f : α → ℝ≥0∞} (hfm : ae_measurable f μ) (hfi : ∫⁻ x, f x ∂μ ≠ ∞) :
integrable (λ x, (f x).to_real) μ :=
mem_ℒp_one_iff_integrable.1 $ mem_ℒ1_to_real_of_lintegral_ne_top hfm hfi
section pos_part
/-! ### Lemmas used for defining the positive part of a `L¹` function -/
lemma integrable.pos_part {f : α → ℝ} (hf : integrable f μ) : integrable (λ a, max (f a) 0) μ :=
⟨(hf.ae_strongly_measurable.ae_measurable.max ae_measurable_const).ae_strongly_measurable,
hf.has_finite_integral.max_zero⟩
lemma integrable.neg_part {f : α → ℝ} (hf : integrable f μ) : integrable (λ a, max (-f a) 0) μ :=
hf.neg.pos_part
end pos_part
section normed_space
variables {𝕜 : Type*} [normed_field 𝕜] [normed_space 𝕜 β]
lemma integrable.smul (c : 𝕜) {f : α → β}
(hf : integrable f μ) : integrable (c • f) μ :=
⟨hf.ae_strongly_measurable.const_smul c, hf.has_finite_integral.smul c⟩
lemma integrable_smul_iff {c : 𝕜} (hc : c ≠ 0) (f : α → β) :
integrable (c • f) μ ↔ integrable f μ :=
and_congr (ae_strongly_measurable_const_smul_iff₀ hc) (has_finite_integral_smul_iff hc f)
lemma integrable.const_mul {f : α → ℝ} (h : integrable f μ) (c : ℝ) :
integrable (λ x, c * f x) μ :=
integrable.smul c h
lemma integrable.const_mul' {f : α → ℝ} (h : integrable f μ) (c : ℝ) :
integrable ((λ (x : α), c) * f) μ :=
integrable.smul c h
lemma integrable.mul_const {f : α → ℝ} (h : integrable f μ) (c : ℝ) :
integrable (λ x, f x * c) μ :=
by simp_rw [mul_comm, h.const_mul _]
lemma integrable.mul_const' {f : α → ℝ} (h : integrable f μ) (c : ℝ) :
integrable (f * (λ (x : α), c)) μ :=
integrable.mul_const h c
lemma integrable.div_const {f : α → ℝ} (h : integrable f μ) (c : ℝ) :
integrable (λ x, f x / c) μ :=
by simp_rw [div_eq_mul_inv, h.mul_const]
lemma integrable.bdd_mul' {f g : α → ℝ} {c : ℝ} (hg : integrable g μ)
(hf : ae_strongly_measurable f μ) (hf_bound : ∀ᵐ x ∂μ, ∥f x∥ ≤ c) :
integrable (λ x, f x * g x) μ :=
begin
refine integrable.mono' (hg.norm.smul c) (hf.mul hg.1) _,
filter_upwards [hf_bound] with x hx,
rw [pi.smul_apply, smul_eq_mul],
exact (norm_mul_le _ _).trans (mul_le_mul_of_nonneg_right hx (norm_nonneg _)),
end
end normed_space
section normed_space_over_complete_field
variables {𝕜 : Type*} [nontrivially_normed_field 𝕜] [complete_space 𝕜]
variables {E : Type*} [normed_add_comm_group E] [normed_space 𝕜 E]
lemma integrable_smul_const {f : α → 𝕜} {c : E} (hc : c ≠ 0) :
integrable (λ x, f x • c) μ ↔ integrable f μ :=
begin
simp_rw [integrable, ae_strongly_measurable_smul_const_iff hc, and.congr_right_iff,
has_finite_integral, nnnorm_smul, ennreal.coe_mul],
intro hf, rw [lintegral_mul_const' _ _ ennreal.coe_ne_top, ennreal.mul_lt_top_iff],
have : ∀ x : ℝ≥0∞, x = 0 → x < ∞ := by simp,
simp [hc, or_iff_left_of_imp (this _)]
end
end normed_space_over_complete_field
section is_R_or_C
variables {𝕜 : Type*} [is_R_or_C 𝕜] {f : α → 𝕜}
lemma integrable.of_real {f : α → ℝ} (hf : integrable f μ) :
integrable (λ x, (f x : 𝕜)) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.of_real }
lemma integrable.re_im_iff :
integrable (λ x, is_R_or_C.re (f x)) μ ∧ integrable (λ x, is_R_or_C.im (f x)) μ ↔
integrable f μ :=
by { simp_rw ← mem_ℒp_one_iff_integrable, exact mem_ℒp_re_im_iff }
lemma integrable.re (hf : integrable f μ) : integrable (λ x, is_R_or_C.re (f x)) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.re, }
lemma integrable.im (hf : integrable f μ) : integrable (λ x, is_R_or_C.im (f x)) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.im, }
end is_R_or_C
section inner_product
variables {𝕜 E : Type*} [is_R_or_C 𝕜] [inner_product_space 𝕜 E] {f : α → E}
local notation `⟪`x`, `y`⟫` := @inner 𝕜 E _ x y
lemma integrable.const_inner (c : E) (hf : integrable f μ) : integrable (λ x, ⟪c, f x⟫) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.const_inner c, }
lemma integrable.inner_const (hf : integrable f μ) (c : E) : integrable (λ x, ⟪f x, c⟫) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.inner_const c, }
end inner_product
section trim
variables {H : Type*} [normed_add_comm_group H] {m0 : measurable_space α} {μ' : measure α}
{f : α → H}
lemma integrable.trim (hm : m ≤ m0) (hf_int : integrable f μ') (hf : strongly_measurable[m] f) :
integrable f (μ'.trim hm) :=
begin
refine ⟨hf.ae_strongly_measurable, _⟩,
rw [has_finite_integral, lintegral_trim hm _],
{ exact hf_int.2, },
{ exact @strongly_measurable.ennnorm _ m _ _ f hf },
end
lemma integrable_of_integrable_trim (hm : m ≤ m0) (hf_int : integrable f (μ'.trim hm)) :
integrable f μ' :=
begin
obtain ⟨hf_meas_ae, hf⟩ := hf_int,
refine ⟨ae_strongly_measurable_of_ae_strongly_measurable_trim hm hf_meas_ae, _⟩,
rw has_finite_integral at hf ⊢,
rwa lintegral_trim_ae hm _ at hf,
exact ae_strongly_measurable.ennnorm hf_meas_ae
end
end trim
section sigma_finite
variables {E : Type*} {m0 : measurable_space α} [normed_add_comm_group E]
lemma integrable_of_forall_fin_meas_le' {μ : measure α} (hm : m ≤ m0) [sigma_finite (μ.trim hm)]
(C : ℝ≥0∞) (hC : C < ∞) {f : α → E} (hf_meas : ae_strongly_measurable f μ)
(hf : ∀ s, measurable_set[m] s → μ s ≠ ∞ → ∫⁻ x in s, ∥f x∥₊ ∂μ ≤ C) :
integrable f μ :=
⟨hf_meas, (lintegral_le_of_forall_fin_meas_le' hm C hf_meas.ennnorm hf).trans_lt hC⟩
lemma integrable_of_forall_fin_meas_le [sigma_finite μ]
(C : ℝ≥0∞) (hC : C < ∞) {f : α → E} (hf_meas : ae_strongly_measurable f μ)
(hf : ∀ s : set α, measurable_set s → μ s ≠ ∞ → ∫⁻ x in s, ∥f x∥₊ ∂μ ≤ C) :
integrable f μ :=
@integrable_of_forall_fin_meas_le' _ _ _ _ _ _ _ (by rwa trim_eq_self) C hC _ hf_meas hf
end sigma_finite
/-! ### The predicate `integrable` on measurable functions modulo a.e.-equality -/
namespace ae_eq_fun
section
/-- A class of almost everywhere equal functions is `integrable` if its function representative
is integrable. -/
def integrable (f : α →ₘ[μ] β) : Prop := integrable f μ
lemma integrable_mk {f : α → β} (hf : ae_strongly_measurable f μ ) :
(integrable (mk f hf : α →ₘ[μ] β)) ↔ measure_theory.integrable f μ :=
begin
simp [integrable],
apply integrable_congr,
exact coe_fn_mk f hf
end
lemma integrable_coe_fn {f : α →ₘ[μ] β} : (measure_theory.integrable f μ) ↔ integrable f :=
by rw [← integrable_mk, mk_coe_fn]
lemma integrable_zero : integrable (0 : α →ₘ[μ] β) :=
(integrable_zero α β μ).congr (coe_fn_mk _ _).symm
end
section
lemma integrable.neg {f : α →ₘ[μ] β} : integrable f → integrable (-f) :=
induction_on f $ λ f hfm hfi, (integrable_mk _).2 ((integrable_mk hfm).1 hfi).neg
section
lemma integrable_iff_mem_L1 {f : α →ₘ[μ] β} : integrable f ↔ f ∈ (α →₁[μ] β) :=
by rw [← integrable_coe_fn, ← mem_ℒp_one_iff_integrable, Lp.mem_Lp_iff_mem_ℒp]
lemma integrable.add {f g : α →ₘ[μ] β} : integrable f → integrable g → integrable (f + g) :=
begin
refine induction_on₂ f g (λ f hf g hg hfi hgi, _),
simp only [integrable_mk, mk_add_mk] at hfi hgi ⊢,
exact hfi.add hgi
end
lemma integrable.sub {f g : α →ₘ[μ] β} (hf : integrable f) (hg : integrable g) :
integrable (f - g) :=
(sub_eq_add_neg f g).symm ▸ hf.add hg.neg
end
section normed_space
variables {𝕜 : Type*} [normed_field 𝕜] [normed_space 𝕜 β]
lemma integrable.smul {c : 𝕜} {f : α →ₘ[μ] β} : integrable f → integrable (c • f) :=
induction_on f $ λ f hfm hfi, (integrable_mk _).2 $ ((integrable_mk hfm).1 hfi).smul _
end normed_space
end
end ae_eq_fun
namespace L1
lemma integrable_coe_fn (f : α →₁[μ] β) :
integrable f μ :=
by { rw ← mem_ℒp_one_iff_integrable, exact Lp.mem_ℒp f }
lemma has_finite_integral_coe_fn (f : α →₁[μ] β) :
has_finite_integral f μ :=
(integrable_coe_fn f).has_finite_integral
lemma strongly_measurable_coe_fn (f : α →₁[μ] β) : strongly_measurable f :=
Lp.strongly_measurable f
lemma measurable_coe_fn [measurable_space β] [borel_space β] (f : α →₁[μ] β) :
measurable f :=
(Lp.strongly_measurable f).measurable
lemma ae_strongly_measurable_coe_fn (f : α →₁[μ] β) : ae_strongly_measurable f μ :=
Lp.ae_strongly_measurable f
lemma ae_measurable_coe_fn [measurable_space β] [borel_space β] (f : α →₁[μ] β) :
ae_measurable f μ :=
(Lp.strongly_measurable f).measurable.ae_measurable
lemma edist_def (f g : α →₁[μ] β) :
edist f g = ∫⁻ a, edist (f a) (g a) ∂μ :=
by { simp [Lp.edist_def, snorm, snorm'], simp [edist_eq_coe_nnnorm_sub] }
lemma dist_def (f g : α →₁[μ] β) :
dist f g = (∫⁻ a, edist (f a) (g a) ∂μ).to_real :=
by { simp [Lp.dist_def, snorm, snorm'], simp [edist_eq_coe_nnnorm_sub] }
lemma norm_def (f : α →₁[μ] β) :
∥f∥ = (∫⁻ a, ∥f a∥₊ ∂μ).to_real :=
by { simp [Lp.norm_def, snorm, snorm'] }
/-- Computing the norm of a difference between two L¹-functions. Note that this is not a
special case of `norm_def` since `(f - g) x` and `f x - g x` are not equal
(but only a.e.-equal). -/
lemma norm_sub_eq_lintegral (f g : α →₁[μ] β) :
∥f - g∥ = (∫⁻ x, (∥f x - g x∥₊ : ℝ≥0∞) ∂μ).to_real :=
begin
rw [norm_def],
congr' 1,
rw lintegral_congr_ae,
filter_upwards [Lp.coe_fn_sub f g] with _ ha,
simp only [ha, pi.sub_apply],
end
lemma of_real_norm_eq_lintegral (f : α →₁[μ] β) :
ennreal.of_real ∥f∥ = ∫⁻ x, (∥f x∥₊ : ℝ≥0∞) ∂μ :=
by { rw [norm_def, ennreal.of_real_to_real], exact ne_of_lt (has_finite_integral_coe_fn f) }
/-- Computing the norm of a difference between two L¹-functions. Note that this is not a
special case of `of_real_norm_eq_lintegral` since `(f - g) x` and `f x - g x` are not equal
(but only a.e.-equal). -/
lemma of_real_norm_sub_eq_lintegral (f g : α →₁[μ] β) :
ennreal.of_real ∥f - g∥ = ∫⁻ x, (∥f x - g x∥₊ : ℝ≥0∞) ∂μ :=
begin
simp_rw [of_real_norm_eq_lintegral, ← edist_eq_coe_nnnorm],
apply lintegral_congr_ae,
filter_upwards [Lp.coe_fn_sub f g] with _ ha,
simp only [ha, pi.sub_apply],
end
end L1
namespace integrable
/-- Construct the equivalence class `[f]` of an integrable function `f`, as a member of the
space `L1 β 1 μ`. -/
def to_L1 (f : α → β) (hf : integrable f μ) : α →₁[μ] β :=
(mem_ℒp_one_iff_integrable.2 hf).to_Lp f
@[simp] lemma to_L1_coe_fn (f : α →₁[μ] β) (hf : integrable f μ) : hf.to_L1 f = f :=
by simp [integrable.to_L1]
lemma coe_fn_to_L1 {f : α → β} (hf : integrable f μ) : hf.to_L1 f =ᵐ[μ] f :=
ae_eq_fun.coe_fn_mk _ _
@[simp] lemma to_L1_zero (h : integrable (0 : α → β) μ) : h.to_L1 0 = 0 := rfl
@[simp] lemma to_L1_eq_mk (f : α → β) (hf : integrable f μ) :
(hf.to_L1 f : α →ₘ[μ] β) = ae_eq_fun.mk f hf.ae_strongly_measurable :=
rfl
@[simp] lemma to_L1_eq_to_L1_iff (f g : α → β) (hf : integrable f μ) (hg : integrable g μ) :
to_L1 f hf = to_L1 g hg ↔ f =ᵐ[μ] g :=
mem_ℒp.to_Lp_eq_to_Lp_iff _ _
lemma to_L1_add (f g : α → β) (hf : integrable f μ) (hg : integrable g μ) :
to_L1 (f + g) (hf.add hg) = to_L1 f hf + to_L1 g hg := rfl
lemma to_L1_neg (f : α → β) (hf : integrable f μ) :
to_L1 (- f) (integrable.neg hf) = - to_L1 f hf := rfl
lemma to_L1_sub (f g : α → β) (hf : integrable f μ) (hg : integrable g μ) :
to_L1 (f - g) (hf.sub hg) = to_L1 f hf - to_L1 g hg := rfl
lemma norm_to_L1 (f : α → β) (hf : integrable f μ) :
∥hf.to_L1 f∥ = ennreal.to_real (∫⁻ a, edist (f a) 0 ∂μ) :=
by { simp [to_L1, snorm, snorm'], simp [edist_eq_coe_nnnorm] }
lemma norm_to_L1_eq_lintegral_norm (f : α → β) (hf : integrable f μ) :
∥hf.to_L1 f∥ = ennreal.to_real (∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ) :=
by { rw [norm_to_L1, lintegral_norm_eq_lintegral_edist] }
@[simp] lemma edist_to_L1_to_L1 (f g : α → β) (hf : integrable f μ) (hg : integrable g μ) :
edist (hf.to_L1 f) (hg.to_L1 g) = ∫⁻ a, edist (f a) (g a) ∂μ :=
by { simp [integrable.to_L1, snorm, snorm'], simp [edist_eq_coe_nnnorm_sub] }
@[simp] lemma edist_to_L1_zero (f : α → β) (hf : integrable f μ) :
edist (hf.to_L1 f) 0 = ∫⁻ a, edist (f a) 0 ∂μ :=
by { simp [integrable.to_L1, snorm, snorm'], simp [edist_eq_coe_nnnorm] }
variables {𝕜 : Type*} [normed_field 𝕜] [normed_space 𝕜 β]
lemma to_L1_smul (f : α → β) (hf : integrable f μ) (k : 𝕜) :
to_L1 (λ a, k • f a) (hf.smul k) = k • to_L1 f hf := rfl
lemma to_L1_smul' (f : α → β) (hf : integrable f μ) (k : 𝕜) :
to_L1 (k • f) (hf.smul k) = k • to_L1 f hf := rfl
end integrable
end measure_theory
open measure_theory
variables {E : Type*} [normed_add_comm_group E]
{𝕜 : Type*} [nontrivially_normed_field 𝕜] [normed_space 𝕜 E]
{H : Type*} [normed_add_comm_group H] [normed_space 𝕜 H]
lemma measure_theory.integrable.apply_continuous_linear_map {φ : α → H →L[𝕜] E}
(φ_int : integrable φ μ) (v : H) : integrable (λ a, φ a v) μ :=
(φ_int.norm.mul_const ∥v∥).mono' (φ_int.ae_strongly_measurable.apply_continuous_linear_map v)
(eventually_of_forall $ λ a, (φ a).le_op_norm v)
lemma continuous_linear_map.integrable_comp {φ : α → H} (L : H →L[𝕜] E)
(φ_int : integrable φ μ) : integrable (λ (a : α), L (φ a)) μ :=
((integrable.norm φ_int).const_mul ∥L∥).mono'
(L.continuous.comp_ae_strongly_measurable φ_int.ae_strongly_measurable)
(eventually_of_forall $ λ a, L.le_op_norm (φ a))
|
86b59a4ec8f5eaf3fbf58918e14f24c8c007e96c
|
88fb7558b0636ec6b181f2a548ac11ad3919f8a5
|
/library/init/meta/default.lean
|
61f5b03fb5c6b9e6eddc4fde2f9563226d15d62c
|
[
"Apache-2.0"
] |
permissive
|
moritayasuaki/lean
|
9f666c323cb6fa1f31ac597d777914aed41e3b7a
|
ae96ebf6ee953088c235ff7ae0e8c95066ba8001
|
refs/heads/master
| 1,611,135,440,814
| 1,493,852,869,000
| 1,493,852,869,000
| 90,269,903
| 0
| 0
| null | 1,493,906,291,000
| 1,493,906,291,000
| null |
UTF-8
|
Lean
| false
| false
| 915
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import init.meta.name init.meta.options init.meta.format init.meta.rb_map
import init.meta.level init.meta.expr init.meta.environment init.meta.attribute
import init.meta.tactic init.meta.contradiction_tactic init.meta.constructor_tactic
import init.meta.injection_tactic init.meta.relation_tactics init.meta.fun_info
import init.meta.congr_lemma init.meta.match_tactic init.meta.ac_tactics
import init.meta.backward init.meta.rewrite_tactic
import init.meta.mk_dec_eq_instance init.meta.mk_inhabited_instance
import init.meta.simp_tactic init.meta.set_get_option_tactics
import init.meta.interactive init.meta.converter init.meta.vm
import init.meta.comp_value_tactics init.meta.smt
import init.meta.async_tactic init.meta.inductive_compiler
|
511f9ca515c0464393f073316ed6d7f6f18b23d8
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/algebra/hierarchy_design.lean
|
bf4e6b491e0d0389d25c12ae7e3c1b53a1acee62
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 9,132
|
lean
|
/-
Copyright (c) 2021 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Eric Wieser
-/
import tactic.doc_commands
/-!
# Documentation of the algebraic hierarchy
A library note giving advice on modifying the algebraic hierarchy.
(It is not intended as a "tour".)
TODO: Add sections about interactions with topological typeclasses, and order typeclasses.
-/
/--
# The algebraic hierarchy
In any theorem proving environment,
there are difficult decisions surrounding the design of the "algebraic hierarchy".
There is a danger of exponential explosion in the number of gadgets,
especially once interactions between algebraic and order/topological/etc structures are considered.
In mathlib, we try to avoid this by only introducing new algebraic typeclasses either
1. when there is "real mathematics" to be done with them, or
2. when there is a meaninful gain in simplicity by factoring out a common substructure.
(As examples, at this point we don't have `loop`, or `unital_magma`,
but we do have `lie_submodule` and `topological_field`!
We also have `group_with_zero`, as an exemplar of point 2.)
Generally in mathlib we use the extension mechanism (so `comm_ring` extends `ring`)
rather than mixins (e.g. with separate `ring` and `comm_mul` classes),
in part because of the potential blow-up in term sizes described at
https://www.ralfj.de/blog/2019/05/15/typeclasses-exponential-blowup.html
However there is tension here, as it results in considerable duplication in the API,
particularly in the interaction with order structures.
This library note is not intended as a design document
justifying and explaining the history of mathlib's algebraic hierarchy!
Instead it is intended as a developer's guide, for contributors wanting to extend
(either new leaves, or new intermediate classes) the algebraic hierarchy as it exists.
(Ideally we would have both a tour guide to the existing hierarchy,
and an account of the design choices.
See https://arxiv.org/abs/1910.09336 for an overview of mathlib as a whole,
with some attention to the algebraic hierarchy and
https://leanprover-community.github.io/mathlib-overview.html
for a summary of what is in mathlib today.)
## Instances
When adding a new typeclass `Z` to the algebraic hierarchy
one should attempt to add the following constructions and results,
when applicable:
* Instances transferred elementwise to products, like `prod.monoid`.
See `algebra.group.prod` for more examples.
```
instance prod.Z [Z M] [Z N] : Z (M × N) := ...
```
* Instances transferred elementwise to pi types, like `pi.monoid`.
See `algebra.group.pi` for more examples.
```
instance pi.Z [∀ i, Z $ f i] : Z (Π i : I, f i) := ...
```
* Instances transferred to `mul_opposite M`, like `mul_opposite.monoid`.
See `algebra.opposites` for more examples.
```
instance mul_opposite.Z [Z M] : Z (mul_opposite M) := ...
```
* Instances transferred to `ulift M`, like `ulift.monoid`.
See `algebra.group.ulift` for more examples.
```
instance ulift.Z [Z M] : Z (ulift M) := ...
```
* Definitions for transferring the proof fields of instances along
injective or surjective functions that agree on the data fields,
like `function.injective.monoid` and `function.surjective.monoid`.
We make these definitions `@[reducible]`, see note [reducible non-instances].
See `algebra.group.inj_surj` for more examples.
```
@[reducible]
def function.injective.Z [Z M₂] (f : M₁ → M₂) (hf : injective f)
(one : f 1 = 1) (mul : ∀ x y, f (x * y) = f x * f y) : Z M₁ := ...
@[reducible]
def function.surjective.Z [Z M₁] (f : M₁ → M₂) (hf : surjective f)
(one : f 1 = 1) (mul : ∀ x y, f (x * y) = f x * f y) : Z M₂ := ...
```
* Instances transferred elementwise to `finsupp`s, like `finsupp.semigroup`.
See `data.finsupp.pointwise` for more examples.
```
instance finsupp.Z [Z β] : Z (α →₀ β) := ...
```
* Instances transferred elementwise to `set`s, like `set.monoid`.
See `algebra.pointwise` for more examples.
```
instance set.Z [Z α] : Z (set α) := ...
```
* Definitions for transferring the entire structure across an equivalence, like `equiv.monoid`.
See `data.equiv.transfer_instance` for more examples. See also the `transport` tactic.
```
def equiv.Z (e : α ≃ β) [Z β] : Z α := ...
/- When there is a new notion of `Z`-equiv: -/
def equiv.Z_equiv (e : α ≃ β) [Z β] : by { letI := equiv.Z e, exact α ≃Z β } := ...
```
## Subobjects
When a new typeclass `Z` adds new data fields,
you should also create a new `sub_Z` `structure` with a `carrier` field.
This can be a lot of work; for now try to closely follow the existing examples
(e.g. `submonoid`, `subring`, `subalgebra`).
We would very much like to provide some automation here, but a prerequisite will be making
all the existing APIs more uniform.
If `Z` extends `Y`, then `sub_Z` should usually extend `sub_Y`.
When `Z` adds only new proof fields to an existing structure `Y`,
you should provide instances transferring
`Z α` to `Z (sub_Y α)`, like `submonoid.to_comm_monoid`.
Typically this is done using the `function.injective.Z` definition mentioned above.
```
instance sub_Y.to_Z [Z α] : Z (sub_Y α) :=
coe_injective.Z coe ...
```
## Morphisms and equivalences
## Category theory
For many algebraic structures, particularly ones used in representation theory, algebraic geometry,
etc., we also define "bundled" versions, which carry `category` instances.
These bundled versions are usually named in camel case,
so for example we have `AddCommGroup` as a bundled `add_comm_group`,
and `TopCommRing` (which bundles together `comm_ring`, `topological_space`, and `topological_ring`).
These bundled versions have many appealing features:
* a uniform notation for morphisms `X ⟶ Y`
* a uniform notation (and definition) for isomorphisms `X ≅ Y`
* a uniform API for subobjects, via the partial order `subobject X`
* interoperability with unbundled structures, via coercions to `Type`
(so if `G : AddCommGroup`, you can treat `G` as a type,
and it automatically has an `add_comm_group` instance)
and lifting maps `AddCommGroup.of G`, when `G` is a type with an `add_comm_group` instance.
If, for example you do the work of proving that a typeclass `Z` has a good notion of tensor product,
you are strongly encouraged to provide the corresponding `monoidal_category` instance
on a bundled version.
This ensures that the API for tensor products is complete, and enables use of general machinery.
Similarly if you prove universal properties, or adjunctions, you are encouraged to state these
using categorical language!
One disadvantage of the bundled approach is that we can only speak of morphisms between
objects living in the same type-theoretic universe.
In practice this is rarely a problem.
# Making a pull request
With so many moving parts, how do you actually go about changing the algebraic hierarchy?
We're still evolving how to handle this, but the current suggestion is:
* If you're adding a new "leaf" class, the requirements are lower,
and an initial PR can just add whatever is immediately needed.
* A new "intermediate" class, especially low down in the hierarchy,
needs to be careful about leaving gaps.
In a perfect world, there would be a group of simultaneous PRs that basically cover everything!
(Or at least an expectation that PRs may not be merged immediately while waiting on other
PRs that fill out the API.)
However "perfect is the enemy of good", and it would also be completely reasonable
to add a TODO list in the main module doc-string for the new class,
briefly listing the parts of the API which still need to be provided.
Hopefully this document makes it easy to assemble this list.
Another alternative to a TODO list in the doc-strings is adding github issues.
-/
library_note "the algebraic hierarchy"
/--
Some definitions that define objects of a class cannot be instances, because they have an
explicit argument that does not occur in the conclusion. An example is `preorder.lift` that has a
function `f : α → β` as an explicit argument to lift a preorder on `β` to a preorder on `α`.
If these definitions are used to define instances of this class *and* this class is an argument to
some other type-class so that type-class inference will have to unfold these instances to check
for definitional equality, then these definitions should be marked `@[reducible]`.
For example, `preorder.lift` is used to define `units.preorder` and `partial_order.lift` is used
to define `units.partial_order`. In some cases it is important that type-class inference can
recognize that `units.preorder` and `units.partial_order` give rise to the same `has_le` instance.
For example, you might have another class that takes `[has_le α]` as an argument, and this argument
sometimes comes from `units.preorder` and sometimes from `units.partial_order`.
Therefore, `preorder.lift` and `partial_order.lift` are marked `@[reducible]`.
-/
library_note "reducible non-instances"
|
383e2601866ade35fa123c7824daa89992c5ed27
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/category_theory/functor/flat.lean
|
90f129c513cfb26eb1cf71c6c11e35ca0f3add40
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 16,324
|
lean
|
/-
Copyright (c) 2021 Andrew Yang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andrew Yang
-/
import category_theory.limits.filtered_colimit_commutes_finite_limit
import category_theory.limits.preserves.functor_category
import category_theory.limits.bicones
import category_theory.limits.comma
import category_theory.limits.preserves.finite
import category_theory.limits.shapes.finite_limits
/-!
# Representably flat functors
We define representably flat functors as functors such that the category of structured arrows
over `X` is cofiltered for each `X`. This concept is also known as flat functors as in [Elephant]
Remark C2.3.7, and this name is suggested by Mike Shulman in
https://golem.ph.utexas.edu/category/2011/06/flat_functors_and_morphisms_of.html to avoid
confusion with other notions of flatness.
This definition is equivalent to left exact functors (functors that preserves finite limits) when
`C` has all finite limits.
## Main results
* `flat_of_preserves_finite_limits`: If `F : C ⥤ D` preserves finite limits and `C` has all finite
limits, then `F` is flat.
* `preserves_finite_limits_of_flat`: If `F : C ⥤ D` is flat, then it preserves all finite limits.
* `preserves_finite_limits_iff_flat`: If `C` has all finite limits,
then `F` is flat iff `F` is left_exact.
* `Lan_preserves_finite_limits_of_flat`: If `F : C ⥤ D` is a flat functor between small categories,
then the functor `Lan F.op` between presheaves of sets preserves all finite limits.
* `flat_iff_Lan_flat`: If `C`, `D` are small and `C` has all finite limits, then `F` is flat iff
`Lan F.op : (Cᵒᵖ ⥤ Type*) ⥤ (Dᵒᵖ ⥤ Type*)` is flat.
* `preserves_finite_limits_iff_Lan_preserves_finite_limits`: If `C`, `D` are small and `C` has all
finite limits, then `F` preserves finite limits iff `Lan F.op : (Cᵒᵖ ⥤ Type*) ⥤ (Dᵒᵖ ⥤ Type*)`
does.
-/
universes w v₁ v₂ v₃ u₁ u₂ u₃
open category_theory
open category_theory.limits
open opposite
namespace category_theory
namespace structured_arrow_cone
open structured_arrow
variables {C : Type u₁} [category.{v₁} C] {D : Type u₂} [category.{v₁} D]
variables {J : Type w} [small_category J]
variables {K : J ⥤ C} (F : C ⥤ D) (c : cone K)
/--
Given a cone `c : cone K` and a map `f : X ⟶ c.X`, we can construct a cone of structured
arrows over `X` with `f` as the cone point. This is the underlying diagram.
-/
@[simps]
def to_diagram : J ⥤ structured_arrow c.X K :=
{ obj := λ j, structured_arrow.mk (c.π.app j),
map := λ j k g, structured_arrow.hom_mk g (by simpa) }
/-- Given a diagram of `structured_arrow X F`s, we may obtain a cone with cone point `X`. -/
@[simps]
def diagram_to_cone {X : D} (G : J ⥤ structured_arrow X F) : cone (G ⋙ proj X F ⋙ F) :=
{ X := X, π := { app := λ j, (G.obj j).hom } }
/--
Given a cone `c : cone K` and a map `f : X ⟶ F.obj c.X`, we can construct a cone of structured
arrows over `X` with `f` as the cone point.
-/
@[simps]
def to_cone {X : D} (f : X ⟶ F.obj c.X) :
cone (to_diagram (F.map_cone c) ⋙ map f ⋙ pre _ K F) :=
{ X := mk f, π := { app := λ j, hom_mk (c.π.app j) rfl,
naturality' := λ j k g, by { ext, dsimp, simp } } }
end structured_arrow_cone
section representably_flat
variables {C : Type u₁} [category.{v₁} C] {D : Type u₂} [category.{v₂} D]
variables {E : Type u₃} [category.{v₃} E]
/--
A functor `F : C ⥤ D` is representably-flat functor if the comma category `(X/F)`
is cofiltered for each `X : C`.
-/
class representably_flat (F : C ⥤ D) : Prop :=
(cofiltered : ∀ (X : D), is_cofiltered (structured_arrow X F))
attribute [instance] representably_flat.cofiltered
local attribute [instance] is_cofiltered.nonempty
instance representably_flat.id : representably_flat (𝟭 C) :=
begin
constructor,
intro X,
haveI : nonempty (structured_arrow X (𝟭 C)) := ⟨structured_arrow.mk (𝟙 _)⟩,
rsufficesI : is_cofiltered_or_empty (structured_arrow X (𝟭 C)),
{ constructor },
constructor,
{ intros Y Z,
use structured_arrow.mk (𝟙 _),
use structured_arrow.hom_mk Y.hom (by erw [functor.id_map, category.id_comp]),
use structured_arrow.hom_mk Z.hom (by erw [functor.id_map, category.id_comp]) },
{ intros Y Z f g,
use structured_arrow.mk (𝟙 _),
use structured_arrow.hom_mk Y.hom (by erw [functor.id_map, category.id_comp]),
ext,
transitivity Z.hom; simp }
end
instance representably_flat.comp (F : C ⥤ D) (G : D ⥤ E)
[representably_flat F] [representably_flat G] : representably_flat (F ⋙ G) :=
begin
constructor,
intro X,
haveI : nonempty (structured_arrow X (F ⋙ G)),
{ have f₁ : structured_arrow X G := nonempty.some infer_instance,
have f₂ : structured_arrow f₁.right F := nonempty.some infer_instance,
exact ⟨structured_arrow.mk (f₁.hom ≫ G.map f₂.hom)⟩ },
rsufficesI : is_cofiltered_or_empty (structured_arrow X (F ⋙ G)),
{ constructor },
constructor,
{ intros Y Z,
let W := @is_cofiltered.min (structured_arrow X G) _ _
(structured_arrow.mk Y.hom) (structured_arrow.mk Z.hom),
let Y' : W ⟶ _ := is_cofiltered.min_to_left _ _,
let Z' : W ⟶ _ := is_cofiltered.min_to_right _ _,
let W' := @is_cofiltered.min (structured_arrow W.right F) _ _
(structured_arrow.mk Y'.right) (structured_arrow.mk Z'.right),
let Y'' : W' ⟶ _ := is_cofiltered.min_to_left _ _,
let Z'' : W' ⟶ _ := is_cofiltered.min_to_right _ _,
use structured_arrow.mk (W.hom ≫ G.map W'.hom),
use structured_arrow.hom_mk Y''.right (by simp [← G.map_comp]),
use structured_arrow.hom_mk Z''.right (by simp [← G.map_comp]) },
{ intros Y Z f g,
let W := @is_cofiltered.eq (structured_arrow X G) _ _
(structured_arrow.mk Y.hom) (structured_arrow.mk Z.hom)
(structured_arrow.hom_mk (F.map f.right) (structured_arrow.w f))
(structured_arrow.hom_mk (F.map g.right) (structured_arrow.w g)),
let h : W ⟶ _ := is_cofiltered.eq_hom _ _,
let h_cond : h ≫ _ = h ≫ _ := is_cofiltered.eq_condition _ _,
let W' := @is_cofiltered.eq (structured_arrow W.right F) _ _
(structured_arrow.mk h.right) (structured_arrow.mk (h.right ≫ F.map f.right))
(structured_arrow.hom_mk f.right rfl)
(structured_arrow.hom_mk g.right (congr_arg comma_morphism.right h_cond).symm),
let h' : W' ⟶ _ := is_cofiltered.eq_hom _ _,
let h'_cond : h' ≫ _ = h' ≫ _ := is_cofiltered.eq_condition _ _,
use structured_arrow.mk (W.hom ≫ G.map W'.hom),
use structured_arrow.hom_mk h'.right (by simp [← G.map_comp]),
ext,
exact (congr_arg comma_morphism.right h'_cond : _) }
end
end representably_flat
section has_limit
variables {C : Type u₁} [category.{v₁} C] {D : Type u₂} [category.{v₁} D]
local attribute [instance] has_finite_limits_of_has_finite_limits_of_size
lemma cofiltered_of_has_finite_limits [has_finite_limits C] : is_cofiltered C :=
{ cocone_objs := λ A B, ⟨limits.prod A B, limits.prod.fst, limits.prod.snd, trivial⟩,
cocone_maps := λ A B f g, ⟨equalizer f g, equalizer.ι f g, equalizer.condition f g⟩,
nonempty := ⟨⊤_ C⟩ }
lemma flat_of_preserves_finite_limits [has_finite_limits C] (F : C ⥤ D)
[preserves_finite_limits F] : representably_flat F := ⟨λ X,
begin
haveI : has_finite_limits (structured_arrow X F) :=
begin
apply has_finite_limits_of_has_finite_limits_of_size.{v₁} (structured_arrow X F),
intros J sJ fJ, resetI, constructor
end,
exact cofiltered_of_has_finite_limits
end⟩
namespace preserves_finite_limits_of_flat
open structured_arrow
open structured_arrow_cone
variables {J : Type v₁} [small_category J] [fin_category J] {K : J ⥤ C}
variables (F : C ⥤ D) [representably_flat F] {c : cone K} (hc : is_limit c) (s : cone (K ⋙ F))
include hc
/--
(Implementation).
Given a limit cone `c : cone K` and a cone `s : cone (K ⋙ F)` with `F` representably flat,
`s` can factor through `F.map_cone c`.
-/
noncomputable def lift : s.X ⟶ F.obj c.X :=
let s' := is_cofiltered.cone (to_diagram s ⋙ structured_arrow.pre _ K F) in
s'.X.hom ≫ (F.map $ hc.lift $
(cones.postcompose ({ app := λ X, 𝟙 _, naturality' := by simp }
: (to_diagram s ⋙ pre s.X K F) ⋙ proj s.X F ⟶ K)).obj $
(structured_arrow.proj s.X F).map_cone s')
lemma fac (x : J) : lift F hc s ≫ (F.map_cone c).π.app x = s.π.app x :=
by simpa [lift, ←functor.map_comp]
local attribute [simp] eq_to_hom_map
lemma uniq {K : J ⥤ C} {c : cone K} (hc : is_limit c)
(s : cone (K ⋙ F)) (f₁ f₂ : s.X ⟶ F.obj c.X)
(h₁ : ∀ (j : J), f₁ ≫ (F.map_cone c).π.app j = s.π.app j)
(h₂ : ∀ (j : J), f₂ ≫ (F.map_cone c).π.app j = s.π.app j) : f₁ = f₂ :=
begin
-- We can make two cones over the diagram of `s` via `f₁` and `f₂`.
let α₁ : to_diagram (F.map_cone c) ⋙ map f₁ ⟶ to_diagram s :=
{ app := λ X, eq_to_hom (by simp [←h₁]), naturality' := λ _ _ _, by { ext, simp } },
let α₂ : to_diagram (F.map_cone c) ⋙ map f₂ ⟶ to_diagram s :=
{ app := λ X, eq_to_hom (by simp [←h₂]), naturality' := λ _ _ _, by { ext, simp } },
let c₁ : cone (to_diagram s ⋙ pre s.X K F) :=
(cones.postcompose (whisker_right α₁ (pre s.X K F) : _)).obj (to_cone F c f₁),
let c₂ : cone (to_diagram s ⋙ pre s.X K F) :=
(cones.postcompose (whisker_right α₂ (pre s.X K F) : _)).obj (to_cone F c f₂),
-- The two cones can then be combined and we may obtain a cone over the two cones since
-- `structured_arrow s.X F` is cofiltered.
let c₀ := is_cofiltered.cone (bicone_mk _ c₁ c₂),
let g₁ : c₀.X ⟶ c₁.X := c₀.π.app (bicone.left),
let g₂ : c₀.X ⟶ c₂.X := c₀.π.app (bicone.right),
-- Then `g₁.right` and `g₂.right` are two maps from the same cone into the `c`.
have : ∀ (j : J), g₁.right ≫ c.π.app j = g₂.right ≫ c.π.app j,
{ intro j,
injection c₀.π.naturality (bicone_hom.left j) with _ e₁,
injection c₀.π.naturality (bicone_hom.right j) with _ e₂,
simpa using e₁.symm.trans e₂ },
have : c.extend g₁.right = c.extend g₂.right,
{ unfold cone.extend, congr' 1, ext x, apply this },
-- And thus they are equal as `c` is the limit.
have : g₁.right = g₂.right,
calc g₁.right = hc.lift (c.extend g₁.right) : by { apply hc.uniq (c.extend _), tidy }
... = hc.lift (c.extend g₂.right) : by { congr, exact this }
... = g₂.right : by { symmetry, apply hc.uniq (c.extend _), tidy },
-- Finally, since `fᵢ` factors through `F(gᵢ)`, the result follows.
calc f₁ = 𝟙 _ ≫ f₁ : by simp
... = c₀.X.hom ≫ F.map g₁.right : g₁.w
... = c₀.X.hom ≫ F.map g₂.right : by rw this
... = 𝟙 _ ≫ f₂ : g₂.w.symm
... = f₂ : by simp
end
end preserves_finite_limits_of_flat
/-- Representably flat functors preserve finite limits. -/
noncomputable
def preserves_finite_limits_of_flat (F : C ⥤ D) [representably_flat F] :
preserves_finite_limits F :=
begin
apply preserves_finite_limits_of_preserves_finite_limits_of_size,
intros J _ _, constructor,
intros K, constructor,
intros c hc,
exactI { lift := preserves_finite_limits_of_flat.lift F hc,
fac' := preserves_finite_limits_of_flat.fac F hc,
uniq' := λ s m h, by
{ apply preserves_finite_limits_of_flat.uniq F hc,
exact h,
exact preserves_finite_limits_of_flat.fac F hc s } }
end
/--
If `C` is finitely cocomplete, then `F : C ⥤ D` is representably flat iff it preserves
finite limits.
-/
noncomputable
def preserves_finite_limits_iff_flat [has_finite_limits C] (F : C ⥤ D) :
representably_flat F ≃ preserves_finite_limits F :=
{ to_fun := λ _, by exactI preserves_finite_limits_of_flat F,
inv_fun := λ _, by exactI flat_of_preserves_finite_limits F,
left_inv := λ _, proof_irrel _ _,
right_inv := λ x, by { cases x, unfold preserves_finite_limits_of_flat,
dunfold preserves_finite_limits_of_preserves_finite_limits_of_size, congr } }
end has_limit
section small_category
variables {C D : Type u₁} [small_category C] [small_category D] (E : Type u₂) [category.{u₁} E]
/--
(Implementation)
The evaluation of `Lan F` at `X` is the colimit over the costructured arrows over `X`.
-/
noncomputable
def Lan_evaluation_iso_colim (F : C ⥤ D) (X : D)
[∀ (X : D), has_colimits_of_shape (costructured_arrow F X) E] :
Lan F ⋙ (evaluation D E).obj X ≅
((whiskering_left _ _ E).obj (costructured_arrow.proj F X)) ⋙ colim :=
nat_iso.of_components (λ G, colim.map_iso (iso.refl _))
begin
intros G H i,
ext,
simp only [functor.comp_map, colimit.ι_desc_assoc, functor.map_iso_refl, evaluation_obj_map,
whiskering_left_obj_map, category.comp_id, Lan_map_app, category.assoc],
erw [colimit.ι_pre_assoc (Lan.diagram F H X) (costructured_arrow.map j.hom),
category.id_comp, category.comp_id, colimit.ι_map],
rcases j with ⟨j_left, ⟨⟨⟩⟩, j_hom⟩,
congr,
rw [costructured_arrow.map_mk, category.id_comp, costructured_arrow.mk]
end
variables [concrete_category.{u₁} E] [has_limits E] [has_colimits E]
variables [reflects_limits (forget E)] [preserves_filtered_colimits (forget E)]
variables [preserves_limits (forget E)]
/--
If `F : C ⥤ D` is a representably flat functor between small categories, then the functor
`Lan F.op` that takes presheaves over `C` to presheaves over `D` preserves finite limits.
-/
noncomputable
instance Lan_preserves_finite_limits_of_flat (F : C ⥤ D) [representably_flat F] :
preserves_finite_limits (Lan F.op : _ ⥤ (Dᵒᵖ ⥤ E)) :=
begin
apply preserves_finite_limits_of_preserves_finite_limits_of_size.{u₁},
intros J _ _, resetI,
apply preserves_limits_of_shape_of_evaluation (Lan F.op : (Cᵒᵖ ⥤ E) ⥤ (Dᵒᵖ ⥤ E)) J,
intro K,
haveI : is_filtered (costructured_arrow F.op K) :=
is_filtered.of_equivalence (structured_arrow_op_equivalence F (unop K)),
exact preserves_limits_of_shape_of_nat_iso (Lan_evaluation_iso_colim _ _ _).symm,
end
instance Lan_flat_of_flat (F : C ⥤ D) [representably_flat F] :
representably_flat (Lan F.op : _ ⥤ (Dᵒᵖ ⥤ E)) := flat_of_preserves_finite_limits _
variable [has_finite_limits C]
noncomputable
instance Lan_preserves_finite_limits_of_preserves_finite_limits (F : C ⥤ D)
[preserves_finite_limits F] : preserves_finite_limits (Lan F.op : _ ⥤ (Dᵒᵖ ⥤ E)) :=
begin
haveI := flat_of_preserves_finite_limits F,
apply_instance
end
lemma flat_iff_Lan_flat (F : C ⥤ D) :
representably_flat F ↔ representably_flat (Lan F.op : _ ⥤ (Dᵒᵖ ⥤ Type u₁)) :=
⟨λ H, by exactI infer_instance, λ H,
begin
resetI,
haveI := preserves_finite_limits_of_flat (Lan F.op : _ ⥤ (Dᵒᵖ ⥤ Type u₁)),
haveI : preserves_finite_limits F :=
begin
apply preserves_finite_limits_of_preserves_finite_limits_of_size.{u₁},
intros, resetI, apply preserves_limit_of_Lan_presesrves_limit
end,
apply flat_of_preserves_finite_limits
end⟩
/--
If `C` is finitely complete, then `F : C ⥤ D` preserves finite limits iff
`Lan F.op : (Cᵒᵖ ⥤ Type*) ⥤ (Dᵒᵖ ⥤ Type*)` preserves finite limits.
-/
noncomputable
def preserves_finite_limits_iff_Lan_preserves_finite_limits (F : C ⥤ D) :
preserves_finite_limits F ≃ preserves_finite_limits (Lan F.op : _ ⥤ (Dᵒᵖ ⥤ Type u₁)) :=
{ to_fun := λ _, by exactI infer_instance,
inv_fun := λ _,
begin
apply preserves_finite_limits_of_preserves_finite_limits_of_size.{u₁},
intros, resetI, apply preserves_limit_of_Lan_presesrves_limit
end,
left_inv := λ x,
begin
cases x, unfold preserves_finite_limits_of_flat,
dunfold preserves_finite_limits_of_preserves_finite_limits_of_size, congr
end,
right_inv := λ x,
begin
cases x,
unfold preserves_finite_limits_of_flat,
congr,
unfold category_theory.Lan_preserves_finite_limits_of_preserves_finite_limits
category_theory.Lan_preserves_finite_limits_of_flat,
dunfold preserves_finite_limits_of_preserves_finite_limits_of_size, congr
end }
end small_category
end category_theory
|
d786c0d4652a661986c68b60b9f28016e5555645
|
63abd62053d479eae5abf4951554e1064a4c45b4
|
/src/topology/continuous_map.lean
|
851599ae9c7a613e1651cd3b4fc2131e68091253
|
[
"Apache-2.0"
] |
permissive
|
Lix0120/mathlib
|
0020745240315ed0e517cbf32e738d8f9811dd80
|
e14c37827456fc6707f31b4d1d16f1f3a3205e91
|
refs/heads/master
| 1,673,102,855,024
| 1,604,151,044,000
| 1,604,151,044,000
| 308,930,245
| 0
| 0
|
Apache-2.0
| 1,604,164,710,000
| 1,604,163,547,000
| null |
UTF-8
|
Lean
| false
| false
| 1,752
|
lean
|
/-
Copyright © 2020 Nicolò Cavalleri. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Nicolò Cavalleri.
-/
import topology.subset_properties
import topology.tactic
/-!
# Continuous bundled map
In this file we define the type `continuous_map` of continuous bundled maps.
-/
/-- Bundled continuous maps. -/
@[protect_proj]
structure continuous_map (α : Type*) (β : Type*)
[topological_space α] [topological_space β] :=
(to_fun : α → β)
(continuous_to_fun : continuous to_fun . tactic.interactive.continuity')
notation `C(` α `, ` β `)` := continuous_map α β
namespace continuous_map
attribute [continuity] continuous_map.continuous_to_fun
variables {α : Type*} {β : Type*} {γ : Type*}
variables [topological_space α] [topological_space β] [topological_space γ]
instance : has_coe_to_fun (C(α, β)) := ⟨_, continuous_map.to_fun⟩
variables {α β} {f g : continuous_map α β}
protected lemma continuous (f : C(α, β)) : continuous f := f.continuous_to_fun
@[continuity] lemma coe_continuous : continuous (f : α → β) := f.continuous_to_fun
@[ext] theorem ext (H : ∀ x, f x = g x) : f = g :=
by cases f; cases g; congr'; exact funext H
instance [inhabited β] : inhabited C(α, β) :=
⟨{ to_fun := λ _, default _, }⟩
lemma coe_inj ⦃f g : C(α, β)⦄ (h : (f : α → β) = g) : f = g :=
by cases f; cases g; cases h; refl
/-- The identity as a continuous map. -/
def id : C(α, α) := ⟨id⟩
/-- The composition of continuous maps, as a continuous map. -/
def comp (f : C(β, γ)) (g : C(α, β)) : C(α, γ) := ⟨f ∘ g⟩
/-- Constant map as a continuous map -/
def const (b : β) : C(α, β) := ⟨λ x, b⟩
end continuous_map
|
25de7eedcd1eea9a9e26e5e474f4a4973f605be1
|
3dc4623269159d02a444fe898d33e8c7e7e9461b
|
/.github/workflows/project_1_a_decrire/lean-scheme-submission/src/instances/affine_scheme.lean
|
d4c2446aac70fd71577454ff7f27258e832b9b52
|
[] |
no_license
|
Or7ando/lean
|
cc003e6c41048eae7c34aa6bada51c9e9add9e66
|
d41169cf4e416a0d42092fb6bdc14131cee9dd15
|
refs/heads/master
| 1,650,600,589,722
| 1,587,262,906,000
| 1,587,262,906,000
| 255,387,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,008
|
lean
|
/-
An affine scheme is a scheme.
-/
import topology.opens
import spectrum_of_a_ring.spec_locally_ringed_space
import scheme
universe u
noncomputable theory
open topological_space
variables (R : Type u) [comm_ring R]
-- Spec(R) is a locally ringed space and it covers itself.
def affine_scheme : scheme (Spec R) :=
{ carrier := Spec.locally_ringed_space R,
Haffinecov :=
begin
existsi ({
γ := punit,
Uis := λ x, opens.univ,
Hcov := opens.ext $ set.ext $ λ x,
⟨λ Hx, trivial, λ Hx, ⟨set.univ, ⟨⟨opens.univ, ⟨⟨punit.star, rfl⟩, rfl⟩⟩, Hx⟩⟩⟩ }
: covering.univ (Spec R)),
intros i,
use [R, by apply_instance],
use [presheaf_of_rings.pullback_id (structure_sheaf.presheaf R)],
split,
{ dsimp [presheaf_of_rings.pullback_id],
apply opens.ext; dsimp,
rw set.image_id,
refl, },
{ exact presheaf_of_rings.pullback_id.iso (structure_sheaf.presheaf R), }
end }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.