blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 7
139
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
16
| license_type
stringclasses 2
values | repo_name
stringlengths 7
55
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 6
values | visit_date
int64 1,471B
1,694B
| revision_date
int64 1,378B
1,694B
| committer_date
int64 1,378B
1,694B
| github_id
float64 1.33M
604M
⌀ | star_events_count
int64 0
43.5k
| fork_events_count
int64 0
1.5k
| gha_license_id
stringclasses 6
values | gha_event_created_at
int64 1,402B
1,695B
⌀ | gha_created_at
int64 1,359B
1,637B
⌀ | gha_language
stringclasses 19
values | src_encoding
stringclasses 2
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 1
class | length_bytes
int64 3
6.4M
| extension
stringclasses 4
values | content
stringlengths 3
6.12M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a9a62aa26fb68418abf43975bdd3f52824cc002
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/tests/lean/run/lemma.lean
|
31fffea0bad16160ec50402c2d4ecc584e0932b9
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 254
|
lean
|
macro mods:declModifiers "lemma" n:declId sig:declSig val:declVal : command => `($mods:declModifiers theorem $n $sig $val)
lemma fooSimple (n : Nat) : Prop :=
if n = 0 then True else False
lemma fooPat : Nat → Prop
| 0 => True
| n+1 => False
|
79180e4b308a7b3def20f9bbf69cb1f9e644a59c
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/combinatorics/additive/salem_spencer.lean
|
7b6fc3d940e27f75595cb020d665e1e2689820c6
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 18,156
|
lean
|
/-
Copyright (c) 2021 Yaël Dillies, Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies, Bhavik Mehta
-/
import algebra.hom.freiman
import analysis.asymptotics.asymptotics
import analysis.convex.strict_convex_space
/-!
# Salem-Spencer sets and Roth numbers
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines Salem-Spencer sets and the Roth number of a set.
A Salem-Spencer set is a set without arithmetic progressions of length `3`. Equivalently, the
average of any two distinct elements is not in the set.
The Roth number of a finset is the size of its biggest Salem-Spencer subset. This is a more general
definition than the one often found in mathematical litterature, where the `n`-th Roth number is
the size of the biggest Salem-Spencer subset of `{0, ..., n - 1}`.
## Main declarations
* `mul_salem_spencer`: Predicate for a set to be multiplicative Salem-Spencer.
* `add_salem_spencer`: Predicate for a set to be additive Salem-Spencer.
* `mul_roth_number`: The multiplicative Roth number of a finset.
* `add_roth_number`: The additive Roth number of a finset.
* `roth_number_nat`: The Roth number of a natural. This corresponds to
`add_roth_number (finset.range n)`.
## TODO
* Can `add_salem_spencer_iff_eq_right` be made more general?
* Generalize `mul_salem_spencer.image` to Freiman homs
## Tags
Salem-Spencer, Roth, arithmetic progression, average, three-free
-/
open finset function metric nat
open_locale pointwise
variables {F α β 𝕜 E : Type*}
section salem_spencer
open set
section monoid
variables [monoid α] [monoid β] (s t : set α)
/-- A multiplicative Salem-Spencer, aka non averaging, set `s` in a monoid is a set such that the
multiplicative average of any two distinct elements is not in the set. -/
@[to_additive "A Salem-Spencer, aka non averaging, set `s` in an additive monoid
is a set such that the average of any two distinct elements is not in the set."]
def mul_salem_spencer : Prop := ∀ ⦃a b c⦄, a ∈ s → b ∈ s → c ∈ s → a * b = c * c → a = b
/-- Whether a given finset is Salem-Spencer is decidable. -/
@[to_additive "Whether a given finset is Salem-Spencer is decidable."]
instance {α : Type*} [decidable_eq α] [monoid α] {s : finset α} :
decidable (mul_salem_spencer (s : set α)) :=
decidable_of_iff (∀ a ∈ s, ∀ b ∈ s, ∀ c ∈ s, a * b = c * c → a = b)
⟨λ h a b c ha hb hc, h a ha b hb c hc, λ h a ha b hb c hc, h ha hb hc⟩
variables {s t}
@[to_additive]
lemma mul_salem_spencer.mono (h : t ⊆ s) (hs : mul_salem_spencer s) : mul_salem_spencer t :=
λ a b c ha hb hc, hs (h ha) (h hb) (h hc)
@[simp, to_additive]
lemma mul_salem_spencer_empty : mul_salem_spencer (∅ : set α) := λ a _ _ ha, ha.elim
@[to_additive]
lemma set.subsingleton.mul_salem_spencer (hs : s.subsingleton) : mul_salem_spencer s :=
λ a b _ ha hb _ _, hs ha hb
@[simp, to_additive]
lemma mul_salem_spencer_singleton (a : α) : mul_salem_spencer ({a} : set α) :=
subsingleton_singleton.mul_salem_spencer
@[to_additive add_salem_spencer.prod]
lemma mul_salem_spencer.prod {t : set β} (hs : mul_salem_spencer s) (ht : mul_salem_spencer t) :
mul_salem_spencer (s ×ˢ t) :=
λ a b c ha hb hc h,
prod.ext (hs ha.1 hb.1 hc.1 (prod.ext_iff.1 h).1) (ht ha.2 hb.2 hc.2 (prod.ext_iff.1 h).2)
@[to_additive]
lemma mul_salem_spencer_pi {ι : Type*} {α : ι → Type*} [Π i, monoid (α i)] {s : Π i, set (α i)}
(hs : ∀ i, mul_salem_spencer (s i)) :
mul_salem_spencer ((univ : set ι).pi s) :=
λ a b c ha hb hc h, funext $ λ i, hs i (ha i trivial) (hb i trivial) (hc i trivial) $ congr_fun h i
end monoid
section comm_monoid
variables [comm_monoid α] [comm_monoid β] {s : set α} {a : α}
@[to_additive]
lemma mul_salem_spencer.of_image [fun_like F α (λ _, β)] [freiman_hom_class F s β 2] (f : F)
(hf : s.inj_on f) (h : mul_salem_spencer (f '' s)) :
mul_salem_spencer s :=
λ a b c ha hb hc habc, hf ha hb $ h (mem_image_of_mem _ ha) (mem_image_of_mem _ hb)
(mem_image_of_mem _ hc) $ map_mul_map_eq_map_mul_map f ha hb hc hc habc
-- TODO: Generalize to Freiman homs
@[to_additive]
lemma mul_salem_spencer.image [mul_hom_class F α β] (f : F) (hf : (s * s).inj_on f)
(h : mul_salem_spencer s) :
mul_salem_spencer (f '' s) :=
begin
rintro _ _ _ ⟨a, ha, rfl⟩ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ habc,
rw h ha hb hc (hf (mul_mem_mul ha hb) (mul_mem_mul hc hc) $ by rwa [map_mul, map_mul]),
end
end comm_monoid
section cancel_comm_monoid
variables [cancel_comm_monoid α] {s : set α} {a : α}
@[to_additive]
lemma mul_salem_spencer_insert :
mul_salem_spencer (insert a s) ↔ mul_salem_spencer s ∧
(∀ ⦃b c⦄, b ∈ s → c ∈ s → a * b = c * c → a = b) ∧
∀ ⦃b c⦄, b ∈ s → c ∈ s → b * c = a * a → b = c :=
begin
refine ⟨λ hs, ⟨hs.mono (subset_insert _ _),
λ b c hb hc, hs (or.inl rfl) (or.inr hb) (or.inr hc),
λ b c hb hc, hs (or.inr hb) (or.inr hc) (or.inl rfl)⟩, _⟩,
rintro ⟨hs, ha, ha'⟩ b c d hb hc hd h,
rw mem_insert_iff at hb hc hd,
obtain rfl | hb := hb;
obtain rfl | hc := hc,
{ refl },
all_goals { obtain rfl | hd := hd },
{ exact (mul_left_cancel h).symm },
{ exact ha hc hd h },
{ exact mul_right_cancel h },
{ exact (ha hb hd $ (mul_comm _ _).trans h).symm },
{ exact ha' hb hc h },
{ exact hs hb hc hd h }
end
@[simp, to_additive]
lemma mul_salem_spencer_pair (a b : α) : mul_salem_spencer ({a, b} : set α) :=
begin
rw mul_salem_spencer_insert,
refine ⟨mul_salem_spencer_singleton _, _, _⟩,
{ rintro c d (rfl : c = b) (rfl : d = c),
exact mul_right_cancel },
{ rintro c d (rfl : c = b) (rfl : d = c) _,
refl }
end
@[to_additive]
lemma mul_salem_spencer.mul_left (hs : mul_salem_spencer s) : mul_salem_spencer ((*) a '' s) :=
begin
rintro _ _ _ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ ⟨d, hd, rfl⟩ h,
rw [mul_mul_mul_comm, mul_mul_mul_comm a d] at h,
rw hs hb hc hd (mul_left_cancel h),
end
@[to_additive]
lemma mul_salem_spencer.mul_right (hs : mul_salem_spencer s) : mul_salem_spencer ((* a) '' s) :=
begin
rintro _ _ _ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ ⟨d, hd, rfl⟩ h,
rw [mul_mul_mul_comm, mul_mul_mul_comm d] at h,
rw hs hb hc hd (mul_right_cancel h),
end
@[to_additive]
lemma mul_salem_spencer_mul_left_iff : mul_salem_spencer ((*) a '' s) ↔ mul_salem_spencer s :=
⟨λ hs b c d hb hc hd h, mul_left_cancel (hs (mem_image_of_mem _ hb) (mem_image_of_mem _ hc)
(mem_image_of_mem _ hd) $ by rw [mul_mul_mul_comm, h, mul_mul_mul_comm]),
mul_salem_spencer.mul_left⟩
@[to_additive]
lemma mul_salem_spencer_mul_right_iff :
mul_salem_spencer ((* a) '' s) ↔ mul_salem_spencer s :=
⟨λ hs b c d hb hc hd h, mul_right_cancel (hs (set.mem_image_of_mem _ hb) (set.mem_image_of_mem _ hc)
(set.mem_image_of_mem _ hd) $ by rw [mul_mul_mul_comm, h, mul_mul_mul_comm]),
mul_salem_spencer.mul_right⟩
end cancel_comm_monoid
section ordered_cancel_comm_monoid
variables [ordered_cancel_comm_monoid α] {s : set α} {a : α}
@[to_additive]
lemma mul_salem_spencer_insert_of_lt (hs : ∀ i ∈ s, i < a) :
mul_salem_spencer (insert a s) ↔ mul_salem_spencer s ∧
∀ ⦃b c⦄, b ∈ s → c ∈ s → a * b = c * c → a = b :=
begin
refine mul_salem_spencer_insert.trans _,
rw ←and_assoc,
exact and_iff_left (λ b c hb hc h, ((mul_lt_mul_of_lt_of_lt (hs _ hb) (hs _ hc)).ne h).elim),
end
end ordered_cancel_comm_monoid
section cancel_comm_monoid_with_zero
variables [cancel_comm_monoid_with_zero α] [no_zero_divisors α] {s : set α} {a : α}
lemma mul_salem_spencer.mul_left₀ (hs : mul_salem_spencer s) (ha : a ≠ 0) :
mul_salem_spencer ((*) a '' s) :=
begin
rintro _ _ _ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ ⟨d, hd, rfl⟩ h,
rw [mul_mul_mul_comm, mul_mul_mul_comm a d] at h,
rw hs hb hc hd (mul_left_cancel₀ (mul_ne_zero ha ha) h),
end
lemma mul_salem_spencer.mul_right₀ (hs : mul_salem_spencer s) (ha : a ≠ 0) :
mul_salem_spencer ((* a) '' s) :=
begin
rintro _ _ _ ⟨b, hb, rfl⟩ ⟨c, hc, rfl⟩ ⟨d, hd, rfl⟩ h,
rw [mul_mul_mul_comm, mul_mul_mul_comm d] at h,
rw hs hb hc hd (mul_right_cancel₀ (mul_ne_zero ha ha) h),
end
lemma mul_salem_spencer_mul_left_iff₀ (ha : a ≠ 0) :
mul_salem_spencer ((*) a '' s) ↔ mul_salem_spencer s :=
⟨λ hs b c d hb hc hd h, mul_left_cancel₀ ha
(hs (set.mem_image_of_mem _ hb) (set.mem_image_of_mem _ hc) (set.mem_image_of_mem _ hd) $
by rw [mul_mul_mul_comm, h, mul_mul_mul_comm]),
λ hs, hs.mul_left₀ ha⟩
lemma mul_salem_spencer_mul_right_iff₀ (ha : a ≠ 0) :
mul_salem_spencer ((* a) '' s) ↔ mul_salem_spencer s :=
⟨λ hs b c d hb hc hd h, mul_right_cancel₀ ha
(hs (set.mem_image_of_mem _ hb) (set.mem_image_of_mem _ hc) (set.mem_image_of_mem _ hd) $
by rw [mul_mul_mul_comm, h, mul_mul_mul_comm]),
λ hs, hs.mul_right₀ ha⟩
end cancel_comm_monoid_with_zero
section nat
lemma add_salem_spencer_iff_eq_right {s : set ℕ} :
add_salem_spencer s ↔ ∀ ⦃a b c⦄, a ∈ s → b ∈ s → c ∈ s → a + b = c + c → a = c :=
begin
refine forall₄_congr (λ a b c _, forall₃_congr $ λ _ _ habc, ⟨_, _⟩),
{ rintro rfl,
simp_rw ←two_mul at habc,
exact mul_left_cancel₀ two_ne_zero habc },
{ rintro rfl,
exact (add_left_cancel habc).symm }
end
end nat
/-- The frontier of a closed strictly convex set only contains trivial arithmetic progressions.
The idea is that an arithmetic progression is contained on a line and the frontier of a strictly
convex set does not contain lines. -/
lemma add_salem_spencer_frontier [linear_ordered_field 𝕜] [topological_space E] [add_comm_monoid E]
[module 𝕜 E] {s : set E} (hs₀ : is_closed s) (hs₁ : strict_convex 𝕜 s) :
add_salem_spencer (frontier s) :=
begin
intros a b c ha hb hc habc,
obtain rfl : (1 / 2 : 𝕜) • a + (1 / 2 : 𝕜) • b = c,
{ rwa [←smul_add, one_div, inv_smul_eq_iff₀ (show (2 : 𝕜) ≠ 0, by norm_num), two_smul] },
exact hs₁.eq (hs₀.frontier_subset ha) (hs₀.frontier_subset hb) one_half_pos one_half_pos
(add_halves _) hc.2,
end
lemma add_salem_spencer_sphere [normed_add_comm_group E] [normed_space ℝ E]
[strict_convex_space ℝ E] (x : E) (r : ℝ) : add_salem_spencer (sphere x r) :=
begin
obtain rfl | hr := eq_or_ne r 0,
{ rw sphere_zero,
exact add_salem_spencer_singleton _ },
{ convert add_salem_spencer_frontier is_closed_ball (strict_convex_closed_ball ℝ x r),
exact (frontier_closed_ball _ hr).symm }
end
end salem_spencer
open finset
section roth_number
variables [decidable_eq α]
section monoid
variables [monoid α] [decidable_eq β] [monoid β] (s t : finset α)
/-- The multiplicative Roth number of a finset is the cardinality of its biggest multiplicative
Salem-Spencer subset. -/
@[to_additive "The additive Roth number of a finset is the cardinality of its biggest additive
Salem-Spencer subset. The usual Roth number corresponds to `add_roth_number (finset.range n)`, see
`roth_number_nat`. "]
def mul_roth_number : finset α →o ℕ :=
⟨λ s, nat.find_greatest (λ m, ∃ t ⊆ s, t.card = m ∧ mul_salem_spencer (t : set α)) s.card,
begin
rintro t u htu,
refine nat.find_greatest_mono (λ m, _) (card_le_of_subset htu),
rintro ⟨v, hvt, hv⟩,
exact ⟨v, hvt.trans htu, hv⟩,
end⟩
@[to_additive]
lemma mul_roth_number_le : mul_roth_number s ≤ s.card := by convert nat.find_greatest_le s.card
@[to_additive]
lemma mul_roth_number_spec : ∃ t ⊆ s, t.card = mul_roth_number s ∧ mul_salem_spencer (t : set α) :=
@nat.find_greatest_spec _ _ (λ m, ∃ t ⊆ s, t.card = m ∧ mul_salem_spencer (t : set α)) _
(nat.zero_le _) ⟨∅, empty_subset _, card_empty, mul_salem_spencer_empty⟩
variables {s t} {n : ℕ}
@[to_additive]
lemma mul_salem_spencer.le_mul_roth_number (hs : mul_salem_spencer (s : set α)) (h : s ⊆ t) :
s.card ≤ mul_roth_number t :=
le_find_greatest (card_le_of_subset h) ⟨s, h, rfl, hs⟩
@[to_additive]
lemma mul_salem_spencer.roth_number_eq (hs : mul_salem_spencer (s : set α)) :
mul_roth_number s = s.card :=
(mul_roth_number_le _).antisymm $ hs.le_mul_roth_number $ subset.refl _
@[simp, to_additive]
lemma mul_roth_number_empty : mul_roth_number (∅ : finset α) = 0 :=
nat.eq_zero_of_le_zero $ (mul_roth_number_le _).trans card_empty.le
@[simp, to_additive]
lemma mul_roth_number_singleton (a : α) : mul_roth_number ({a} : finset α) = 1 :=
begin
convert mul_salem_spencer.roth_number_eq _,
rw coe_singleton,
exact mul_salem_spencer_singleton a,
end
@[to_additive]
lemma mul_roth_number_union_le (s t : finset α) :
mul_roth_number (s ∪ t) ≤ mul_roth_number s + mul_roth_number t :=
let ⟨u, hus, hcard, hu⟩ := mul_roth_number_spec (s ∪ t) in
calc
mul_roth_number (s ∪ t)
= u.card : hcard.symm
... = (u ∩ s ∪ u ∩ t).card
: by rw [←inter_distrib_left, (inter_eq_left_iff_subset _ _).2 hus]
... ≤ (u ∩ s).card + (u ∩ t).card : card_union_le _ _
... ≤ mul_roth_number s + mul_roth_number t
: add_le_add ((hu.mono $ inter_subset_left _ _).le_mul_roth_number $ inter_subset_right _ _)
((hu.mono $ inter_subset_left _ _).le_mul_roth_number $ inter_subset_right _ _)
@[to_additive]
lemma le_mul_roth_number_product (s : finset α) (t : finset β) :
mul_roth_number s * mul_roth_number t ≤ mul_roth_number (s ×ˢ t) :=
begin
obtain ⟨u, hus, hucard, hu⟩ := mul_roth_number_spec s,
obtain ⟨v, hvt, hvcard, hv⟩ := mul_roth_number_spec t,
rw [←hucard, ←hvcard, ←card_product],
refine mul_salem_spencer.le_mul_roth_number _ (product_subset_product hus hvt),
rw coe_product,
exact hu.prod hv,
end
@[to_additive]
lemma mul_roth_number_lt_of_forall_not_mul_salem_spencer
(h : ∀ t ∈ powerset_len n s, ¬mul_salem_spencer ((t : finset α) : set α)) :
mul_roth_number s < n :=
begin
obtain ⟨t, hts, hcard, ht⟩ := mul_roth_number_spec s,
rw [←hcard, ←not_le],
intro hn,
obtain ⟨u, hut, rfl⟩ := exists_smaller_set t n hn,
exact h _ (mem_powerset_len.2 ⟨hut.trans hts, rfl⟩) (ht.mono hut),
end
end monoid
section cancel_comm_monoid
variables [cancel_comm_monoid α] (s : finset α) (a : α)
@[simp, to_additive] lemma mul_roth_number_map_mul_left :
mul_roth_number (s.map $ mul_left_embedding a) = mul_roth_number s :=
begin
refine le_antisymm _ _,
{ obtain ⟨u, hus, hcard, hu⟩ := mul_roth_number_spec (s.map $ mul_left_embedding a),
rw subset_map_iff at hus,
obtain ⟨u, hus, rfl⟩ := hus,
rw coe_map at hu,
rw [←hcard, card_map],
exact (mul_salem_spencer_mul_left_iff.1 hu).le_mul_roth_number hus },
{ obtain ⟨u, hus, hcard, hu⟩ := mul_roth_number_spec s,
have h : mul_salem_spencer (u.map $ mul_left_embedding a : set α),
{ rw coe_map,
exact hu.mul_left },
convert h.le_mul_roth_number (map_subset_map.2 hus),
rw [card_map, hcard] }
end
@[simp, to_additive] lemma mul_roth_number_map_mul_right :
mul_roth_number (s.map $ mul_right_embedding a) = mul_roth_number s :=
by rw [←mul_left_embedding_eq_mul_right_embedding, mul_roth_number_map_mul_left s a]
end cancel_comm_monoid
end roth_number
section roth_number_nat
variables {s : finset ℕ} {k n : ℕ}
/-- The Roth number of a natural `N` is the largest integer `m` for which there is a subset of
`range N` of size `m` with no arithmetic progression of length 3.
Trivially, `roth_number_nat N ≤ N`, but Roth's theorem (proved in 1953) shows that
`roth_number_nat N = o(N)` and the construction by Behrend gives a lower bound of the form
`N * exp(-C sqrt(log(N))) ≤ roth_number_nat N`.
A significant refinement of Roth's theorem by Bloom and Sisask announced in 2020 gives
`roth_number_nat N = O(N / (log N)^(1+c))` for an absolute constant `c`. -/
def roth_number_nat : ℕ →o ℕ :=
⟨λ n, add_roth_number (range n), add_roth_number.mono.comp range_mono⟩
lemma roth_number_nat_def (n : ℕ) : roth_number_nat n = add_roth_number (range n) := rfl
lemma roth_number_nat_le (N : ℕ) : roth_number_nat N ≤ N :=
(add_roth_number_le _).trans (card_range _).le
lemma roth_number_nat_spec (n : ℕ) :
∃ t ⊆ range n, t.card = roth_number_nat n ∧ add_salem_spencer (t : set ℕ) :=
add_roth_number_spec _
/-- A verbose specialization of `add_salem_spencer.le_add_roth_number`, sometimes convenient in
practice. -/
lemma add_salem_spencer.le_roth_number_nat (s : finset ℕ) (hs : add_salem_spencer (s : set ℕ))
(hsn : ∀ x ∈ s, x < n) (hsk : s.card = k) :
k ≤ roth_number_nat n :=
hsk.ge.trans $ hs.le_add_roth_number $ λ x hx, mem_range.2 $ hsn x hx
/-- The Roth number is a subadditive function. Note that by Fekete's lemma this shows that
the limit `roth_number_nat N / N` exists, but Roth's theorem gives the stronger result that this
limit is actually `0`. -/
lemma roth_number_nat_add_le (M N : ℕ) :
roth_number_nat (M + N) ≤ roth_number_nat M + roth_number_nat N :=
begin
simp_rw roth_number_nat_def,
rw [range_add_eq_union, ←add_roth_number_map_add_left (range N) M],
exact add_roth_number_union_le _ _,
end
@[simp] lemma roth_number_nat_zero : roth_number_nat 0 = 0 := rfl
lemma add_roth_number_Ico (a b : ℕ) : add_roth_number (Ico a b) = roth_number_nat (b - a) :=
begin
obtain h | h := le_total b a,
{ rw [tsub_eq_zero_of_le h, Ico_eq_empty_of_le h, roth_number_nat_zero, add_roth_number_empty] },
convert add_roth_number_map_add_left _ a,
rw [range_eq_Ico, map_eq_image],
convert (image_add_left_Ico 0 (b - a) _).symm,
exact (add_tsub_cancel_of_le h).symm,
end
open asymptotics filter
lemma roth_number_nat_is_O_with_id :
is_O_with 1 at_top (λ N, (roth_number_nat N : ℝ)) (λ N, (N : ℝ)) :=
is_O_with_of_le _ $ by simpa only [real.norm_coe_nat, nat.cast_le] using roth_number_nat_le
/-- The Roth number has the trivial bound `roth_number_nat N = O(N)`. -/
lemma roth_number_nat_is_O_id : (λ N, (roth_number_nat N : ℝ)) =O[at_top] (λ N, (N : ℝ)) :=
roth_number_nat_is_O_with_id.is_O
end roth_number_nat
|
12e4c44578d63ca9cb05c864a23502db92a510a9
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/analysis/analytic/composition.lean
|
e7d04555e7627420923c15e6904650f28fc01ff1
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 58,624
|
lean
|
/-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Johan Commelin
-/
import analysis.analytic.basic
import combinatorics.composition
/-!
# Composition of analytic functions
in this file we prove that the composition of analytic functions is analytic.
The argument is the following. Assume `g z = ∑' qₙ (z, ..., z)` and `f y = ∑' pₖ (y, ..., y)`. Then
`g (f y) = ∑' qₙ (∑' pₖ (y, ..., y), ..., ∑' pₖ (y, ..., y))
= ∑' qₙ (p_{i₁} (y, ..., y), ..., p_{iₙ} (y, ..., y))`.
For each `n` and `i₁, ..., iₙ`, define a `i₁ + ... + iₙ` multilinear function mapping
`(y₀, ..., y_{i₁ + ... + iₙ - 1})` to
`qₙ (p_{i₁} (y₀, ..., y_{i₁-1}), p_{i₂} (y_{i₁}, ..., y_{i₁ + i₂ - 1}), ..., p_{iₙ} (....)))`.
Then `g ∘ f` is obtained by summing all these multilinear functions.
To formalize this, we use compositions of an integer `N`, i.e., its decompositions into
a sum `i₁ + ... + iₙ` of positive integers. Given such a composition `c` and two formal
multilinear series `q` and `p`, let `q.comp_along_composition p c` be the above multilinear
function. Then the `N`-th coefficient in the power series expansion of `g ∘ f` is the sum of these
terms over all `c : composition N`.
To complete the proof, we need to show that this power series has a positive radius of convergence.
This follows from the fact that `composition N` has cardinality `2^(N-1)` and estimates on
the norm of `qₙ` and `pₖ`, which give summability. We also need to show that it indeed converges to
`g ∘ f`. For this, we note that the composition of partial sums converges to `g ∘ f`, and that it
corresponds to a part of the whole sum, on a subset that increases to the whole space. By
summability of the norms, this implies the overall convergence.
## Main results
* `q.comp p` is the formal composition of the formal multilinear series `q` and `p`.
* `has_fpower_series_at.comp` states that if two functions `g` and `f` admit power series expansions
`q` and `p`, then `g ∘ f` admits a power series expansion given by `q.comp p`.
* `analytic_at.comp` states that the composition of analytic functions is analytic.
* `formal_multilinear_series.comp_assoc` states that composition is associative on formal
multilinear series.
## Implementation details
The main technical difficulty is to write down things. In particular, we need to define precisely
`q.comp_along_composition p c` and to show that it is indeed a continuous multilinear
function. This requires a whole interface built on the class `composition`. Once this is set,
the main difficulty is to reorder the sums, writing the composition of the partial sums as a sum
over some subset of `Σ n, composition n`. We need to check that the reordering is a bijection,
running over difficulties due to the dependent nature of the types under consideration, that are
controlled thanks to the interface for `composition`.
The associativity of composition on formal multilinear series is a nontrivial result: it does not
follow from the associativity of composition of analytic functions, as there is no uniqueness for
the formal multilinear series representing a function (and also, it holds even when the radius of
convergence of the series is `0`). Instead, we give a direct proof, which amounts to reordering
double sums in a careful way. The change of variables is a canonical (combinatorial) bijection
`composition.sigma_equiv_sigma_pi` between `(Σ (a : composition n), composition a.length)` and
`(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i))`, and is described
in more details below in the paragraph on associativity.
-/
noncomputable theory
variables {𝕜 : Type*} {E F G H : Type*}
open filter list
open_locale topological_space big_operators classical nnreal ennreal
section topological
variables [comm_ring 𝕜] [add_comm_group E] [add_comm_group F] [add_comm_group G]
variables [module 𝕜 E] [module 𝕜 F] [module 𝕜 G]
variables [topological_space E] [topological_space F] [topological_space G]
/-! ### Composing formal multilinear series -/
namespace formal_multilinear_series
variables [topological_add_group E] [has_continuous_const_smul 𝕜 E]
variables [topological_add_group F] [has_continuous_const_smul 𝕜 F]
variables [topological_add_group G] [has_continuous_const_smul 𝕜 G]
/-!
In this paragraph, we define the composition of formal multilinear series, by summing over all
possible compositions of `n`.
-/
/-- Given a formal multilinear series `p`, a composition `c` of `n` and the index `i` of a
block of `c`, we may define a function on `fin n → E` by picking the variables in the `i`-th block
of `n`, and applying the corresponding coefficient of `p` to these variables. This function is
called `p.apply_composition c v i` for `v : fin n → E` and `i : fin c.length`. -/
def apply_composition
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (c : composition n) :
(fin n → E) → (fin (c.length) → F) :=
λ v i, p (c.blocks_fun i) (v ∘ (c.embedding i))
lemma apply_composition_ones (p : formal_multilinear_series 𝕜 E F) (n : ℕ) :
p.apply_composition (composition.ones n) =
λ v i, p 1 (λ _, v (fin.cast_le (composition.length_le _) i)) :=
begin
funext v i,
apply p.congr (composition.ones_blocks_fun _ _),
intros j hjn hj1,
obtain rfl : j = 0, { linarith },
refine congr_arg v _,
rw [fin.ext_iff, fin.coe_cast_le, composition.ones_embedding, fin.coe_mk],
end
lemma apply_composition_single (p : formal_multilinear_series 𝕜 E F) {n : ℕ} (hn : 0 < n)
(v : fin n → E) : p.apply_composition (composition.single n hn) v = λ j, p n v :=
begin
ext j,
refine p.congr (by simp) (λ i hi1 hi2, _),
dsimp,
congr' 1,
convert composition.single_embedding hn ⟨i, hi2⟩,
cases j,
have : j_val = 0 := le_bot_iff.1 (nat.lt_succ_iff.1 j_property),
unfold_coes,
congr; try { assumption <|> simp },
end
@[simp] lemma remove_zero_apply_composition
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (c : composition n) :
p.remove_zero.apply_composition c = p.apply_composition c :=
begin
ext v i,
simp [apply_composition, zero_lt_one.trans_le (c.one_le_blocks_fun i), remove_zero_of_pos],
end
/-- Technical lemma stating how `p.apply_composition` commutes with updating variables. This
will be the key point to show that functions constructed from `apply_composition` retain
multilinearity. -/
lemma apply_composition_update
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (c : composition n)
(j : fin n) (v : fin n → E) (z : E) :
p.apply_composition c (function.update v j z) =
function.update (p.apply_composition c v) (c.index j)
(p (c.blocks_fun (c.index j))
(function.update (v ∘ (c.embedding (c.index j))) (c.inv_embedding j) z)) :=
begin
ext k,
by_cases h : k = c.index j,
{ rw h,
let r : fin (c.blocks_fun (c.index j)) → fin n := c.embedding (c.index j),
simp only [function.update_same],
change p (c.blocks_fun (c.index j)) ((function.update v j z) ∘ r) = _,
let j' := c.inv_embedding j,
suffices B : (function.update v j z) ∘ r = function.update (v ∘ r) j' z,
by rw B,
suffices C : (function.update v (r j') z) ∘ r = function.update (v ∘ r) j' z,
by { convert C, exact (c.embedding_comp_inv j).symm },
exact function.update_comp_eq_of_injective _ (c.embedding _).injective _ _ },
{ simp only [h, function.update_eq_self, function.update_noteq, ne.def, not_false_iff],
let r : fin (c.blocks_fun k) → fin n := c.embedding k,
change p (c.blocks_fun k) ((function.update v j z) ∘ r) = p (c.blocks_fun k) (v ∘ r),
suffices B : (function.update v j z) ∘ r = v ∘ r, by rw B,
apply function.update_comp_eq_of_not_mem_range,
rwa c.mem_range_embedding_iff' }
end
@[simp] lemma comp_continuous_linear_map_apply_composition {n : ℕ}
(p : formal_multilinear_series 𝕜 F G) (f : E →L[𝕜] F) (c : composition n) (v : fin n → E) :
(p.comp_continuous_linear_map f).apply_composition c v = p.apply_composition c (f ∘ v) :=
by simp [apply_composition]
end formal_multilinear_series
namespace continuous_multilinear_map
open formal_multilinear_series
variables [topological_add_group E] [has_continuous_const_smul 𝕜 E]
variables [topological_add_group F] [has_continuous_const_smul 𝕜 F]
/-- Given a formal multilinear series `p`, a composition `c` of `n` and a continuous multilinear
map `f` in `c.length` variables, one may form a continuous multilinear map in `n` variables by
applying the right coefficient of `p` to each block of the composition, and then applying `f` to
the resulting vector. It is called `f.comp_along_composition p c`. -/
def comp_along_composition {n : ℕ}
(p : formal_multilinear_series 𝕜 E F) (c : composition n)
(f : continuous_multilinear_map 𝕜 (λ (i : fin c.length), F) G) :
continuous_multilinear_map 𝕜 (λ i : fin n, E) G :=
{ to_fun := λ v, f (p.apply_composition c v),
map_add' := λ v i x y, by simp only [apply_composition_update,
continuous_multilinear_map.map_add],
map_smul' := λ v i c x, by simp only [apply_composition_update,
continuous_multilinear_map.map_smul],
cont := f.cont.comp $ continuous_pi $ λ i, (coe_continuous _).comp $ continuous_pi $ λ j,
continuous_apply _, }
@[simp] lemma comp_along_composition_apply {n : ℕ}
(p : formal_multilinear_series 𝕜 E F) (c : composition n)
(f : continuous_multilinear_map 𝕜 (λ (i : fin c.length), F) G) (v : fin n → E) :
(f.comp_along_composition p c) v = f (p.apply_composition c v) := rfl
end continuous_multilinear_map
namespace formal_multilinear_series
variables [topological_add_group E] [has_continuous_const_smul 𝕜 E]
variables [topological_add_group F] [has_continuous_const_smul 𝕜 F]
variables [topological_add_group G] [has_continuous_const_smul 𝕜 G]
/-- Given two formal multilinear series `q` and `p` and a composition `c` of `n`, one may
form a continuous multilinear map in `n` variables by applying the right coefficient of `p` to each
block of the composition, and then applying `q c.length` to the resulting vector. It is
called `q.comp_along_composition p c`. -/
def comp_along_composition {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) : continuous_multilinear_map 𝕜 (λ i : fin n, E) G :=
(q c.length).comp_along_composition p c
@[simp] lemma comp_along_composition_apply {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) (v : fin n → E) :
(q.comp_along_composition p c) v = q c.length (p.apply_composition c v) := rfl
/-- Formal composition of two formal multilinear series. The `n`-th coefficient in the composition
is defined to be the sum of `q.comp_along_composition p c` over all compositions of
`n`. In other words, this term (as a multilinear function applied to `v_0, ..., v_{n-1}`) is
`∑'_{k} ∑'_{i₁ + ... + iₖ = n} pₖ (q_{i_1} (...), ..., q_{i_k} (...))`, where one puts all variables
`v_0, ..., v_{n-1}` in increasing order in the dots.
In general, the composition `q ∘ p` only makes sense when the constant coefficient of `p` vanishes.
We give a general formula but which ignores the value of `p 0` instead.
-/
protected def comp (q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) :
formal_multilinear_series 𝕜 E G :=
λ n, ∑ c : composition n, q.comp_along_composition p c
/-- The `0`-th coefficient of `q.comp p` is `q 0`. Since these maps are multilinear maps in zero
variables, but on different spaces, we can not state this directly, so we state it when applied to
arbitrary vectors (which have to be the zero vector). -/
lemma comp_coeff_zero (q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(v : fin 0 → E) (v' : fin 0 → F) :
(q.comp p) 0 v = q 0 v' :=
begin
let c : composition 0 := composition.ones 0,
dsimp [formal_multilinear_series.comp],
have : {c} = (finset.univ : finset (composition 0)),
{ apply finset.eq_of_subset_of_card_le; simp [finset.card_univ, composition_card 0] },
rw [← this, finset.sum_singleton, comp_along_composition_apply],
symmetry, congr'
end
@[simp] lemma comp_coeff_zero'
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (v : fin 0 → E) :
(q.comp p) 0 v = q 0 (λ i, 0) :=
q.comp_coeff_zero p v _
/-- The `0`-th coefficient of `q.comp p` is `q 0`. When `p` goes from `E` to `E`, this can be
expressed as a direct equality -/
lemma comp_coeff_zero'' (q : formal_multilinear_series 𝕜 E F)
(p : formal_multilinear_series 𝕜 E E) :
(q.comp p) 0 = q 0 :=
by { ext v, exact q.comp_coeff_zero p _ _ }
/-- The first coefficient of a composition of formal multilinear series is the composition of the
first coefficients seen as continuous linear maps. -/
lemma comp_coeff_one (q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(v : fin 1 → E) : (q.comp p) 1 v = q 1 (λ i, p 1 v) :=
begin
have : {composition.ones 1} = (finset.univ : finset (composition 1)) :=
finset.eq_univ_of_card _ (by simp [composition_card]),
simp only [formal_multilinear_series.comp, comp_along_composition_apply, ← this,
finset.sum_singleton],
refine q.congr (by simp) (λ i hi1 hi2, _),
simp only [apply_composition_ones],
exact p.congr rfl (λ j hj1 hj2, by congr)
end
lemma remove_zero_comp_of_pos (q : formal_multilinear_series 𝕜 F G)
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (hn : 0 < n) :
q.remove_zero.comp p n = q.comp p n :=
begin
ext v,
simp only [formal_multilinear_series.comp, comp_along_composition,
continuous_multilinear_map.comp_along_composition_apply, continuous_multilinear_map.sum_apply],
apply finset.sum_congr rfl (λ c hc, _),
rw remove_zero_of_pos _ (c.length_pos_of_pos hn)
end
@[simp] lemma comp_remove_zero (q : formal_multilinear_series 𝕜 F G)
(p : formal_multilinear_series 𝕜 E F) :
q.comp p.remove_zero = q.comp p :=
by { ext n, simp [formal_multilinear_series.comp] }
end formal_multilinear_series
end topological
variables [nondiscrete_normed_field 𝕜]
[normed_group E] [normed_space 𝕜 E]
[normed_group F] [normed_space 𝕜 F]
[normed_group G] [normed_space 𝕜 G]
[normed_group H] [normed_space 𝕜 H]
namespace formal_multilinear_series
/-- The norm of `f.comp_along_composition p c` is controlled by the product of
the norms of the relevant bits of `f` and `p`. -/
lemma comp_along_composition_bound {n : ℕ}
(p : formal_multilinear_series 𝕜 E F) (c : composition n)
(f : continuous_multilinear_map 𝕜 (λ (i : fin c.length), F) G) (v : fin n → E) :
∥f.comp_along_composition p c v∥ ≤
∥f∥ * (∏ i, ∥p (c.blocks_fun i)∥) * (∏ i : fin n, ∥v i∥) :=
calc ∥f.comp_along_composition p c v∥ = ∥f (p.apply_composition c v)∥ : rfl
... ≤ ∥f∥ * ∏ i, ∥p.apply_composition c v i∥ : continuous_multilinear_map.le_op_norm _ _
... ≤ ∥f∥ * ∏ i, ∥p (c.blocks_fun i)∥ *
∏ j : fin (c.blocks_fun i), ∥(v ∘ (c.embedding i)) j∥ :
begin
apply mul_le_mul_of_nonneg_left _ (norm_nonneg _),
refine finset.prod_le_prod (λ i hi, norm_nonneg _) (λ i hi, _),
apply continuous_multilinear_map.le_op_norm,
end
... = ∥f∥ * (∏ i, ∥p (c.blocks_fun i)∥) *
∏ i (j : fin (c.blocks_fun i)), ∥(v ∘ (c.embedding i)) j∥ :
by rw [finset.prod_mul_distrib, mul_assoc]
... = ∥f∥ * (∏ i, ∥p (c.blocks_fun i)∥) * (∏ i : fin n, ∥v i∥) :
by { rw [← c.blocks_fin_equiv.prod_comp, ← finset.univ_sigma_univ, finset.prod_sigma],
congr }
/-- The norm of `q.comp_along_composition p c` is controlled by the product of
the norms of the relevant bits of `q` and `p`. -/
lemma comp_along_composition_norm {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) :
∥q.comp_along_composition p c∥ ≤ ∥q c.length∥ * ∏ i, ∥p (c.blocks_fun i)∥ :=
continuous_multilinear_map.op_norm_le_bound _
(mul_nonneg (norm_nonneg _) (finset.prod_nonneg (λ i hi, norm_nonneg _)))
(comp_along_composition_bound _ _ _)
lemma comp_along_composition_nnnorm {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) :
∥q.comp_along_composition p c∥₊ ≤ ∥q c.length∥₊ * ∏ i, ∥p (c.blocks_fun i)∥₊ :=
by { rw ← nnreal.coe_le_coe, push_cast, exact q.comp_along_composition_norm p c }
/-!
### The identity formal power series
We will now define the identity power series, and show that it is a neutral element for left and
right composition.
-/
section
variables (𝕜 E)
/-- The identity formal multilinear series, with all coefficients equal to `0` except for `n = 1`
where it is (the continuous multilinear version of) the identity. -/
def id : formal_multilinear_series 𝕜 E E
| 0 := 0
| 1 := (continuous_multilinear_curry_fin1 𝕜 E E).symm (continuous_linear_map.id 𝕜 E)
| _ := 0
/-- The first coefficient of `id 𝕜 E` is the identity. -/
@[simp] lemma id_apply_one (v : fin 1 → E) : (formal_multilinear_series.id 𝕜 E) 1 v = v 0 := rfl
/-- The `n`th coefficient of `id 𝕜 E` is the identity when `n = 1`. We state this in a dependent
way, as it will often appear in this form. -/
lemma id_apply_one' {n : ℕ} (h : n = 1) (v : fin n → E) :
(id 𝕜 E) n v = v ⟨0, h.symm ▸ zero_lt_one⟩ :=
begin
subst n,
apply id_apply_one
end
/-- For `n ≠ 1`, the `n`-th coefficient of `id 𝕜 E` is zero, by definition. -/
@[simp] lemma id_apply_ne_one {n : ℕ} (h : n ≠ 1) : (formal_multilinear_series.id 𝕜 E) n = 0 :=
by { cases n, { refl }, cases n, { contradiction }, refl }
end
@[simp] theorem comp_id (p : formal_multilinear_series 𝕜 E F) : p.comp (id 𝕜 E) = p :=
begin
ext1 n,
dsimp [formal_multilinear_series.comp],
rw finset.sum_eq_single (composition.ones n),
show comp_along_composition p (id 𝕜 E) (composition.ones n) = p n,
{ ext v,
rw comp_along_composition_apply,
apply p.congr (composition.ones_length n),
intros,
rw apply_composition_ones,
refine congr_arg v _,
rw [fin.ext_iff, fin.coe_cast_le, fin.coe_mk, fin.coe_mk], },
show ∀ (b : composition n),
b ∈ finset.univ → b ≠ composition.ones n → comp_along_composition p (id 𝕜 E) b = 0,
{ assume b _ hb,
obtain ⟨k, hk, lt_k⟩ : ∃ (k : ℕ) (H : k ∈ composition.blocks b), 1 < k :=
composition.ne_ones_iff.1 hb,
obtain ⟨i, i_lt, hi⟩ : ∃ (i : ℕ) (h : i < b.blocks.length), b.blocks.nth_le i h = k :=
nth_le_of_mem hk,
let j : fin b.length := ⟨i, b.blocks_length ▸ i_lt⟩,
have A : 1 < b.blocks_fun j := by convert lt_k,
ext v,
rw [comp_along_composition_apply, continuous_multilinear_map.zero_apply],
apply continuous_multilinear_map.map_coord_zero _ j,
dsimp [apply_composition],
rw id_apply_ne_one _ _ (ne_of_gt A),
refl },
{ simp }
end
@[simp] theorem id_comp (p : formal_multilinear_series 𝕜 E F) (h : p 0 = 0) : (id 𝕜 F).comp p = p :=
begin
ext1 n,
by_cases hn : n = 0,
{ rw [hn, h],
ext v,
rw [comp_coeff_zero', id_apply_ne_one _ _ zero_ne_one],
refl },
{ dsimp [formal_multilinear_series.comp],
have n_pos : 0 < n := bot_lt_iff_ne_bot.mpr hn,
rw finset.sum_eq_single (composition.single n n_pos),
show comp_along_composition (id 𝕜 F) p (composition.single n n_pos) = p n,
{ ext v,
rw [comp_along_composition_apply, id_apply_one' _ _ (composition.single_length n_pos)],
dsimp [apply_composition],
refine p.congr rfl (λ i him hin, congr_arg v $ _),
ext, simp },
show ∀ (b : composition n),
b ∈ finset.univ → b ≠ composition.single n n_pos → comp_along_composition (id 𝕜 F) p b = 0,
{ assume b _ hb,
have A : b.length ≠ 1, by simpa [composition.eq_single_iff_length] using hb,
ext v,
rw [comp_along_composition_apply, id_apply_ne_one _ _ A],
refl },
{ simp } }
end
/-! ### Summability properties of the composition of formal power series-/
section
/-- If two formal multilinear series have positive radius of convergence, then the terms appearing
in the definition of their composition are also summable (when multiplied by a suitable positive
geometric term). -/
theorem comp_summable_nnreal
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(hq : 0 < q.radius) (hp : 0 < p.radius) :
∃ r > (0 : ℝ≥0),
summable (λ i : Σ n, composition n, ∥q.comp_along_composition p i.2∥₊ * r ^ i.1) :=
begin
/- This follows from the fact that the growth rate of `∥qₙ∥` and `∥pₙ∥` is at most geometric,
giving a geometric bound on each `∥q.comp_along_composition p op∥`, together with the
fact that there are `2^(n-1)` compositions of `n`, giving at most a geometric loss. -/
rcases ennreal.lt_iff_exists_nnreal_btwn.1 (lt_min ennreal.zero_lt_one hq) with ⟨rq, rq_pos, hrq⟩,
rcases ennreal.lt_iff_exists_nnreal_btwn.1 (lt_min ennreal.zero_lt_one hp) with ⟨rp, rp_pos, hrp⟩,
simp only [lt_min_iff, ennreal.coe_lt_one_iff, ennreal.coe_pos] at hrp hrq rp_pos rq_pos,
obtain ⟨Cq, hCq0, hCq⟩ : ∃ Cq > 0, ∀ n, ∥q n∥₊ * rq^n ≤ Cq :=
q.nnnorm_mul_pow_le_of_lt_radius hrq.2,
obtain ⟨Cp, hCp1, hCp⟩ : ∃ Cp ≥ 1, ∀ n, ∥p n∥₊ * rp^n ≤ Cp,
{ rcases p.nnnorm_mul_pow_le_of_lt_radius hrp.2 with ⟨Cp, -, hCp⟩,
exact ⟨max Cp 1, le_max_right _ _, λ n, (hCp n).trans (le_max_left _ _)⟩ },
let r0 : ℝ≥0 := (4 * Cp)⁻¹,
have r0_pos : 0 < r0 := nnreal.inv_pos.2 (mul_pos zero_lt_four (zero_lt_one.trans_le hCp1)),
set r : ℝ≥0 := rp * rq * r0,
have r_pos : 0 < r := mul_pos (mul_pos rp_pos rq_pos) r0_pos,
have I : ∀ (i : Σ (n : ℕ), composition n),
∥q.comp_along_composition p i.2∥₊ * r ^ i.1 ≤ Cq / 4 ^ i.1,
{ rintros ⟨n, c⟩,
have A,
calc ∥q c.length∥₊ * rq ^ n ≤ ∥q c.length∥₊* rq ^ c.length :
mul_le_mul' le_rfl (pow_le_pow_of_le_one rq.2 hrq.1.le c.length_le)
... ≤ Cq : hCq _,
have B,
calc ((∏ i, ∥p (c.blocks_fun i)∥₊) * rp ^ n)
= ∏ i, ∥p (c.blocks_fun i)∥₊ * rp ^ c.blocks_fun i :
by simp only [finset.prod_mul_distrib, finset.prod_pow_eq_pow_sum, c.sum_blocks_fun]
... ≤ ∏ i : fin c.length, Cp : finset.prod_le_prod' (λ i _, hCp _)
... = Cp ^ c.length : by simp
... ≤ Cp ^ n : pow_le_pow hCp1 c.length_le,
calc ∥q.comp_along_composition p c∥₊ * r ^ n
≤ (∥q c.length∥₊ * ∏ i, ∥p (c.blocks_fun i)∥₊) * r ^ n :
mul_le_mul' (q.comp_along_composition_nnnorm p c) le_rfl
... = (∥q c.length∥₊ * rq ^ n) * ((∏ i, ∥p (c.blocks_fun i)∥₊) * rp ^ n) * r0 ^ n :
by { simp only [r, mul_pow], ring }
... ≤ Cq * Cp ^ n * r0 ^ n : mul_le_mul' (mul_le_mul' A B) le_rfl
... = Cq / 4 ^ n :
begin
simp only [r0],
field_simp [mul_pow, (zero_lt_one.trans_le hCp1).ne'],
ring
end },
refine ⟨r, r_pos, nnreal.summable_of_le I _⟩,
simp_rw div_eq_mul_inv,
refine summable.mul_left _ _,
have : ∀ n : ℕ, has_sum (λ c : composition n, (4 ^ n : ℝ≥0)⁻¹) (2 ^ (n - 1) / 4 ^ n),
{ intro n,
convert has_sum_fintype (λ c : composition n, (4 ^ n : ℝ≥0)⁻¹),
simp [finset.card_univ, composition_card, div_eq_mul_inv] },
refine nnreal.summable_sigma.2 ⟨λ n, (this n).summable, (nnreal.summable_nat_add_iff 1).1 _⟩,
convert (nnreal.summable_geometric (nnreal.div_lt_one_of_lt one_lt_two)).mul_left (1 / 4),
ext1 n,
rw [(this _).tsum_eq, add_tsub_cancel_right],
field_simp [← mul_assoc, pow_succ', mul_pow, show (4 : ℝ≥0) = 2 * 2, from (two_mul 2).symm,
mul_right_comm]
end
end
/-- Bounding below the radius of the composition of two formal multilinear series assuming
summability over all compositions. -/
theorem le_comp_radius_of_summable
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (r : ℝ≥0)
(hr : summable (λ i : (Σ n, composition n), ∥q.comp_along_composition p i.2∥₊ * r ^ i.1)) :
(r : ℝ≥0∞) ≤ (q.comp p).radius :=
begin
refine le_radius_of_bound_nnreal _
(∑' i : (Σ n, composition n), ∥comp_along_composition q p i.snd∥₊ * r ^ i.fst) (λ n, _),
calc ∥formal_multilinear_series.comp q p n∥₊ * r ^ n ≤
∑' (c : composition n), ∥comp_along_composition q p c∥₊ * r ^ n :
begin
rw [tsum_fintype, ← finset.sum_mul],
exact mul_le_mul' (nnnorm_sum_le _ _) le_rfl
end
... ≤ ∑' (i : Σ (n : ℕ), composition n), ∥comp_along_composition q p i.snd∥₊ * r ^ i.fst :
nnreal.tsum_comp_le_tsum_of_inj hr sigma_mk_injective
end
/-!
### Composing analytic functions
Now, we will prove that the composition of the partial sums of `q` and `p` up to order `N` is
given by a sum over some large subset of `Σ n, composition n` of `q.comp_along_composition p`, to
deduce that the series for `q.comp p` indeed converges to `g ∘ f` when `q` is a power series for
`g` and `p` is a power series for `f`.
This proof is a big reindexing argument of a sum. Since it is a bit involved, we define first
the source of the change of variables (`comp_partial_source`), its target
(`comp_partial_target`) and the change of variables itself (`comp_change_of_variables`) before
giving the main statement in `comp_partial_sum`. -/
/-- Source set in the change of variables to compute the composition of partial sums of formal
power series.
See also `comp_partial_sum`. -/
def comp_partial_sum_source (m M N : ℕ) : finset (Σ n, (fin n) → ℕ) :=
finset.sigma (finset.Ico m M) (λ (n : ℕ), fintype.pi_finset (λ (i : fin n), finset.Ico 1 N) : _)
@[simp] lemma mem_comp_partial_sum_source_iff (m M N : ℕ) (i : Σ n, (fin n) → ℕ) :
i ∈ comp_partial_sum_source m M N ↔
(m ≤ i.1 ∧ i.1 < M) ∧ ∀ (a : fin i.1), 1 ≤ i.2 a ∧ i.2 a < N :=
by simp only [comp_partial_sum_source, finset.mem_Ico, fintype.mem_pi_finset, finset.mem_sigma,
iff_self]
/-- Change of variables appearing to compute the composition of partial sums of formal
power series -/
def comp_change_of_variables (m M N : ℕ) (i : Σ n, (fin n) → ℕ)
(hi : i ∈ comp_partial_sum_source m M N) : (Σ n, composition n) :=
begin
rcases i with ⟨n, f⟩,
rw mem_comp_partial_sum_source_iff at hi,
refine ⟨∑ j, f j, of_fn (λ a, f a), λ i hi', _, by simp [sum_of_fn]⟩,
obtain ⟨j, rfl⟩ : ∃ (j : fin n), f j = i, by rwa [mem_of_fn, set.mem_range] at hi',
exact (hi.2 j).1
end
@[simp] lemma comp_change_of_variables_length
(m M N : ℕ) {i : Σ n, (fin n) → ℕ} (hi : i ∈ comp_partial_sum_source m M N) :
composition.length (comp_change_of_variables m M N i hi).2 = i.1 :=
begin
rcases i with ⟨k, blocks_fun⟩,
dsimp [comp_change_of_variables],
simp only [composition.length, map_of_fn, length_of_fn]
end
lemma comp_change_of_variables_blocks_fun
(m M N : ℕ) {i : Σ n, (fin n) → ℕ} (hi : i ∈ comp_partial_sum_source m M N) (j : fin i.1) :
(comp_change_of_variables m M N i hi).2.blocks_fun
⟨j, (comp_change_of_variables_length m M N hi).symm ▸ j.2⟩ = i.2 j :=
begin
rcases i with ⟨n, f⟩,
dsimp [composition.blocks_fun, composition.blocks, comp_change_of_variables],
simp only [map_of_fn, nth_le_of_fn', function.comp_app],
apply congr_arg,
exact fin.eta _ _
end
/-- Target set in the change of variables to compute the composition of partial sums of formal
power series, here given a a set. -/
def comp_partial_sum_target_set (m M N : ℕ) : set (Σ n, composition n) :=
{i | (m ≤ i.2.length) ∧ (i.2.length < M) ∧ (∀ (j : fin i.2.length), i.2.blocks_fun j < N)}
lemma comp_partial_sum_target_subset_image_comp_partial_sum_source
(m M N : ℕ) (i : Σ n, composition n) (hi : i ∈ comp_partial_sum_target_set m M N) :
∃ j (hj : j ∈ comp_partial_sum_source m M N), i = comp_change_of_variables m M N j hj :=
begin
rcases i with ⟨n, c⟩,
refine ⟨⟨c.length, c.blocks_fun⟩, _, _⟩,
{ simp only [comp_partial_sum_target_set, set.mem_set_of_eq] at hi,
simp only [mem_comp_partial_sum_source_iff, hi.left, hi.right, true_and, and_true],
exact λ a, c.one_le_blocks' _ },
{ dsimp [comp_change_of_variables],
rw composition.sigma_eq_iff_blocks_eq,
simp only [composition.blocks_fun, composition.blocks, subtype.coe_eta, nth_le_map'],
conv_lhs { rw ← of_fn_nth_le c.blocks } }
end
/-- Target set in the change of variables to compute the composition of partial sums of formal
power series, here given a a finset.
See also `comp_partial_sum`. -/
def comp_partial_sum_target (m M N : ℕ) : finset (Σ n, composition n) :=
set.finite.to_finset $ ((finset.finite_to_set _).dependent_image _).subset $
comp_partial_sum_target_subset_image_comp_partial_sum_source m M N
@[simp] lemma mem_comp_partial_sum_target_iff {m M N : ℕ} {a : Σ n, composition n} :
a ∈ comp_partial_sum_target m M N ↔
m ≤ a.2.length ∧ a.2.length < M ∧ (∀ (j : fin a.2.length), a.2.blocks_fun j < N) :=
by simp [comp_partial_sum_target, comp_partial_sum_target_set]
/-- `comp_change_of_variables m M N` is a bijection between `comp_partial_sum_source m M N`
and `comp_partial_sum_target m M N`, yielding equal sums for functions that correspond to each
other under the bijection. As `comp_change_of_variables m M N` is a dependent function, stating
that it is a bijection is not directly possible, but the consequence on sums can be stated
more easily. -/
lemma comp_change_of_variables_sum {α : Type*} [add_comm_monoid α] (m M N : ℕ)
(f : (Σ (n : ℕ), fin n → ℕ) → α) (g : (Σ n, composition n) → α)
(h : ∀ e (he : e ∈ comp_partial_sum_source m M N),
f e = g (comp_change_of_variables m M N e he)) :
∑ e in comp_partial_sum_source m M N, f e = ∑ e in comp_partial_sum_target m M N, g e :=
begin
apply finset.sum_bij (comp_change_of_variables m M N),
-- We should show that the correspondance we have set up is indeed a bijection
-- between the index sets of the two sums.
-- 1 - show that the image belongs to `comp_partial_sum_target m N N`
{ rintros ⟨k, blocks_fun⟩ H,
rw mem_comp_partial_sum_source_iff at H,
simp only [mem_comp_partial_sum_target_iff, composition.length, composition.blocks, H.left,
map_of_fn, length_of_fn, true_and, comp_change_of_variables],
assume j,
simp only [composition.blocks_fun, (H.right _).right, nth_le_of_fn'] },
-- 2 - show that the composition gives the `comp_along_composition` application
{ rintros ⟨k, blocks_fun⟩ H,
rw h },
-- 3 - show that the map is injective
{ rintros ⟨k, blocks_fun⟩ ⟨k', blocks_fun'⟩ H H' heq,
obtain rfl : k = k',
{ have := (comp_change_of_variables_length m M N H).symm,
rwa [heq, comp_change_of_variables_length] at this, },
congr,
funext i,
calc blocks_fun i = (comp_change_of_variables m M N _ H).2.blocks_fun _ :
(comp_change_of_variables_blocks_fun m M N H i).symm
... = (comp_change_of_variables m M N _ H').2.blocks_fun _ :
begin
apply composition.blocks_fun_congr; try { rw heq },
refl
end
... = blocks_fun' i : comp_change_of_variables_blocks_fun m M N H' i },
-- 4 - show that the map is surjective
{ assume i hi,
apply comp_partial_sum_target_subset_image_comp_partial_sum_source m M N i,
simpa [comp_partial_sum_target] using hi }
end
/-- The auxiliary set corresponding to the composition of partial sums asymptotically contains
all possible compositions. -/
lemma comp_partial_sum_target_tendsto_at_top :
tendsto (λ N, comp_partial_sum_target 0 N N) at_top at_top :=
begin
apply monotone.tendsto_at_top_finset,
{ assume m n hmn a ha,
have : ∀ i, i < m → i < n := λ i hi, lt_of_lt_of_le hi hmn,
tidy },
{ rintros ⟨n, c⟩,
simp only [mem_comp_partial_sum_target_iff],
obtain ⟨n, hn⟩ : bdd_above ↑(finset.univ.image (λ (i : fin c.length), c.blocks_fun i)) :=
finset.bdd_above _,
refine ⟨max n c.length + 1, bot_le, lt_of_le_of_lt (le_max_right n c.length) (lt_add_one _),
λ j, lt_of_le_of_lt (le_trans _ (le_max_left _ _)) (lt_add_one _)⟩,
apply hn,
simp only [finset.mem_image_of_mem, finset.mem_coe, finset.mem_univ] }
end
/-- Composing the partial sums of two multilinear series coincides with the sum over all
compositions in `comp_partial_sum_target 0 N N`. This is precisely the motivation for the
definition of `comp_partial_sum_target`. -/
lemma comp_partial_sum
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (N : ℕ) (z : E) :
q.partial_sum N (∑ i in finset.Ico 1 N, p i (λ j, z)) =
∑ i in comp_partial_sum_target 0 N N, q.comp_along_composition p i.2 (λ j, z) :=
begin
-- we expand the composition, using the multilinearity of `q` to expand along each coordinate.
suffices H : ∑ n in finset.range N, ∑ r in fintype.pi_finset (λ (i : fin n), finset.Ico 1 N),
q n (λ (i : fin n), p (r i) (λ j, z)) =
∑ i in comp_partial_sum_target 0 N N, q.comp_along_composition p i.2 (λ j, z),
by simpa only [formal_multilinear_series.partial_sum,
continuous_multilinear_map.map_sum_finset] using H,
-- rewrite the first sum as a big sum over a sigma type, in the finset
-- `comp_partial_sum_target 0 N N`
rw [finset.range_eq_Ico, finset.sum_sigma'],
-- use `comp_change_of_variables_sum`, saying that this change of variables respects sums
apply comp_change_of_variables_sum 0 N N,
rintros ⟨k, blocks_fun⟩ H,
apply congr _ (comp_change_of_variables_length 0 N N H).symm,
intros,
rw ← comp_change_of_variables_blocks_fun 0 N N H,
refl
end
end formal_multilinear_series
open formal_multilinear_series
/-- If two functions `g` and `f` have power series `q` and `p` respectively at `f x` and `x`, then
`g ∘ f` admits the power series `q.comp p` at `x`. -/
theorem has_fpower_series_at.comp {g : F → G} {f : E → F}
{q : formal_multilinear_series 𝕜 F G} {p : formal_multilinear_series 𝕜 E F} {x : E}
(hg : has_fpower_series_at g q (f x)) (hf : has_fpower_series_at f p x) :
has_fpower_series_at (g ∘ f) (q.comp p) x :=
begin
/- Consider `rf` and `rg` such that `f` and `g` have power series expansion on the disks
of radius `rf` and `rg`. -/
rcases hg with ⟨rg, Hg⟩,
rcases hf with ⟨rf, Hf⟩,
/- The terms defining `q.comp p` are geometrically summable in a disk of some radius `r`. -/
rcases q.comp_summable_nnreal p Hg.radius_pos Hf.radius_pos with ⟨r, r_pos : 0 < r, hr⟩,
/- We will consider `y` which is smaller than `r` and `rf`, and also small enough that
`f (x + y)` is close enough to `f x` to be in the disk where `g` is well behaved. Let
`min (r, rf, δ)` be this new radius.-/
have : continuous_at f x := Hf.analytic_at.continuous_at,
obtain ⟨δ, δpos, hδ⟩ : ∃ (δ : ℝ≥0∞) (H : 0 < δ),
∀ {z : E}, z ∈ emetric.ball x δ → f z ∈ emetric.ball (f x) rg,
{ have : emetric.ball (f x) rg ∈ 𝓝 (f x) := emetric.ball_mem_nhds _ Hg.r_pos,
rcases emetric.mem_nhds_iff.1 (Hf.analytic_at.continuous_at this) with ⟨δ, δpos, Hδ⟩,
exact ⟨δ, δpos, λ z hz, Hδ hz⟩ },
let rf' := min rf δ,
have min_pos : 0 < min rf' r,
by simp only [r_pos, Hf.r_pos, δpos, lt_min_iff, ennreal.coe_pos, and_self],
/- We will show that `g ∘ f` admits the power series `q.comp p` in the disk of
radius `min (r, rf', δ)`. -/
refine ⟨min rf' r, _⟩,
refine ⟨le_trans (min_le_right rf' r)
(formal_multilinear_series.le_comp_radius_of_summable q p r hr), min_pos, λ y hy, _⟩,
/- Let `y` satisfy `∥y∥ < min (r, rf', δ)`. We want to show that `g (f (x + y))` is the sum of
`q.comp p` applied to `y`. -/
-- First, check that `y` is small enough so that estimates for `f` and `g` apply.
have y_mem : y ∈ emetric.ball (0 : E) rf :=
(emetric.ball_subset_ball (le_trans (min_le_left _ _) (min_le_left _ _))) hy,
have fy_mem : f (x + y) ∈ emetric.ball (f x) rg,
{ apply hδ,
have : y ∈ emetric.ball (0 : E) δ :=
(emetric.ball_subset_ball (le_trans (min_le_left _ _) (min_le_right _ _))) hy,
simpa [edist_eq_coe_nnnorm_sub, edist_eq_coe_nnnorm] },
/- Now the proof starts. To show that the sum of `q.comp p` at `y` is `g (f (x + y))`, we will
write `q.comp p` applied to `y` as a big sum over all compositions. Since the sum is
summable, to get its convergence it suffices to get the convergence along some increasing sequence
of sets. We will use the sequence of sets `comp_partial_sum_target 0 n n`, along which the sum is
exactly the composition of the partial sums of `q` and `p`, by design. To show that it converges
to `g (f (x + y))`, pointwise convergence would not be enough, but we have uniform convergence
to save the day. -/
-- First step: the partial sum of `p` converges to `f (x + y)`.
have A : tendsto (λ n, ∑ a in finset.Ico 1 n, p a (λ b, y)) at_top (𝓝 (f (x + y) - f x)),
{ have L : ∀ᶠ n in at_top, ∑ a in finset.range n, p a (λ b, y) - f x =
∑ a in finset.Ico 1 n, p a (λ b, y),
{ rw eventually_at_top,
refine ⟨1, λ n hn, _⟩,
symmetry,
rw [eq_sub_iff_add_eq', finset.range_eq_Ico, ← Hf.coeff_zero (λi, y),
finset.sum_eq_sum_Ico_succ_bot hn] },
have : tendsto (λ n, ∑ a in finset.range n, p a (λ b, y) - f x) at_top (𝓝 (f (x + y) - f x)) :=
(Hf.has_sum y_mem).tendsto_sum_nat.sub tendsto_const_nhds,
exact tendsto.congr' L this },
-- Second step: the composition of the partial sums of `q` and `p` converges to `g (f (x + y))`.
have B : tendsto (λ n, q.partial_sum n (∑ a in finset.Ico 1 n, p a (λ b, y)))
at_top (𝓝 (g (f (x + y)))),
{ -- we use the fact that the partial sums of `q` converge locally uniformly to `g`, and that
-- composition passes to the limit under locally uniform convergence.
have B₁ : continuous_at (λ (z : F), g (f x + z)) (f (x + y) - f x),
{ refine continuous_at.comp _ (continuous_const.add continuous_id).continuous_at,
simp only [add_sub_cancel'_right, id.def],
exact Hg.continuous_on.continuous_at (is_open.mem_nhds (emetric.is_open_ball) fy_mem) },
have B₂ : f (x + y) - f x ∈ emetric.ball (0 : F) rg,
by simpa [edist_eq_coe_nnnorm, edist_eq_coe_nnnorm_sub] using fy_mem,
rw [← emetric.is_open_ball.nhds_within_eq B₂] at A,
convert Hg.tendsto_locally_uniformly_on.tendsto_comp B₁.continuous_within_at B₂ A,
simp only [add_sub_cancel'_right] },
-- Third step: the sum over all compositions in `comp_partial_sum_target 0 n n` converges to
-- `g (f (x + y))`. As this sum is exactly the composition of the partial sum, this is a direct
-- consequence of the second step
have C : tendsto (λ n,
∑ i in comp_partial_sum_target 0 n n, q.comp_along_composition p i.2 (λ j, y))
at_top (𝓝 (g (f (x + y)))),
by simpa [comp_partial_sum] using B,
-- Fourth step: the sum over all compositions is `g (f (x + y))`. This follows from the
-- convergence along a subsequence proved in the third step, and the fact that the sum is Cauchy
-- thanks to the summability properties.
have D : has_sum (λ i : (Σ n, composition n),
q.comp_along_composition p i.2 (λ j, y)) (g (f (x + y))),
{ have cau : cauchy_seq (λ (s : finset (Σ n, composition n)),
∑ i in s, q.comp_along_composition p i.2 (λ j, y)),
{ apply cauchy_seq_finset_of_norm_bounded _ (nnreal.summable_coe.2 hr) _,
simp only [coe_nnnorm, nnreal.coe_mul, nnreal.coe_pow],
rintros ⟨n, c⟩,
calc ∥(comp_along_composition q p c) (λ (j : fin n), y)∥
≤ ∥comp_along_composition q p c∥ * ∏ j : fin n, ∥y∥ :
by apply continuous_multilinear_map.le_op_norm
... ≤ ∥comp_along_composition q p c∥ * (r : ℝ) ^ n :
begin
apply mul_le_mul_of_nonneg_left _ (norm_nonneg _),
rw [finset.prod_const, finset.card_fin],
apply pow_le_pow_of_le_left (norm_nonneg _),
rw [emetric.mem_ball, edist_eq_coe_nnnorm] at hy,
have := (le_trans (le_of_lt hy) (min_le_right _ _)),
rwa [ennreal.coe_le_coe, ← nnreal.coe_le_coe, coe_nnnorm] at this
end },
exact tendsto_nhds_of_cauchy_seq_of_subseq cau
comp_partial_sum_target_tendsto_at_top C },
-- Fifth step: the sum over `n` of `q.comp p n` can be expressed as a particular resummation of
-- the sum over all compositions, by grouping together the compositions of the same
-- integer `n`. The convergence of the whole sum therefore implies the converence of the sum
-- of `q.comp p n`
have E : has_sum (λ n, (q.comp p) n (λ j, y)) (g (f (x + y))),
{ apply D.sigma,
assume n,
dsimp [formal_multilinear_series.comp],
convert has_sum_fintype _,
simp only [continuous_multilinear_map.sum_apply],
refl },
exact E
end
/-- If two functions `g` and `f` are analytic respectively at `f x` and `x`, then `g ∘ f` is
analytic at `x`. -/
theorem analytic_at.comp {g : F → G} {f : E → F} {x : E}
(hg : analytic_at 𝕜 g (f x)) (hf : analytic_at 𝕜 f x) : analytic_at 𝕜 (g ∘ f) x :=
let ⟨q, hq⟩ := hg, ⟨p, hp⟩ := hf in (hq.comp hp).analytic_at
/-!
### Associativity of the composition of formal multilinear series
In this paragraph, we prove the associativity of the composition of formal power series.
By definition,
```
(r.comp q).comp p n v
= ∑_{i₁ + ... + iₖ = n} (r.comp q)ₖ (p_{i₁} (v₀, ..., v_{i₁ -1}), p_{i₂} (...), ..., p_{iₖ}(...))
= ∑_{a : composition n} (r.comp q) a.length (apply_composition p a v)
```
decomposing `r.comp q` in the same way, we get
```
(r.comp q).comp p n v
= ∑_{a : composition n} ∑_{b : composition a.length}
r b.length (apply_composition q b (apply_composition p a v))
```
On the other hand,
```
r.comp (q.comp p) n v = ∑_{c : composition n} r c.length (apply_composition (q.comp p) c v)
```
Here, `apply_composition (q.comp p) c v` is a vector of length `c.length`, whose `i`-th term is
given by `(q.comp p) (c.blocks_fun i) (v_l, v_{l+1}, ..., v_{m-1})` where `{l, ..., m-1}` is the
`i`-th block in the composition `c`, of length `c.blocks_fun i` by definition. To compute this term,
we expand it as `∑_{dᵢ : composition (c.blocks_fun i)} q dᵢ.length (apply_composition p dᵢ v')`,
where `v' = (v_l, v_{l+1}, ..., v_{m-1})`. Therefore, we get
```
r.comp (q.comp p) n v =
∑_{c : composition n} ∑_{d₀ : composition (c.blocks_fun 0),
..., d_{c.length - 1} : composition (c.blocks_fun (c.length - 1))}
r c.length (λ i, q dᵢ.length (apply_composition p dᵢ v'ᵢ))
```
To show that these terms coincide, we need to explain how to reindex the sums to put them in
bijection (and then the terms we are summing will correspond to each other). Suppose we have a
composition `a` of `n`, and a composition `b` of `a.length`. Then `b` indicates how to group
together some blocks of `a`, giving altogether `b.length` blocks of blocks. These blocks of blocks
can be called `d₀, ..., d_{a.length - 1}`, and one obtains a composition `c` of `n` by saying that
each `dᵢ` is one single block. Conversely, if one starts from `c` and the `dᵢ`s, one can concatenate
the `dᵢ`s to obtain a composition `a` of `n`, and register the lengths of the `dᵢ`s in a composition
`b` of `a.length`.
An example might be enlightening. Suppose `a = [2, 2, 3, 4, 2]`. It is a composition of
length 5 of 13. The content of the blocks may be represented as `0011222333344`.
Now take `b = [2, 3]` as a composition of `a.length = 5`. It says that the first 2 blocks of `a`
should be merged, and the last 3 blocks of `a` should be merged, giving a new composition of `13`
made of two blocks of length `4` and `9`, i.e., `c = [4, 9]`. But one can also remember that
the new first block was initially made of two blocks of size `2`, so `d₀ = [2, 2]`, and the new
second block was initially made of three blocks of size `3`, `4` and `2`, so `d₁ = [3, 4, 2]`.
This equivalence is called `composition.sigma_equiv_sigma_pi n` below.
We start with preliminary results on compositions, of a very specialized nature, then define the
equivalence `composition.sigma_equiv_sigma_pi n`, and we deduce finally the associativity of
composition of formal multilinear series in `formal_multilinear_series.comp_assoc`.
-/
namespace composition
variable {n : ℕ}
/-- Rewriting equality in the dependent type `Σ (a : composition n), composition a.length)` in
non-dependent terms with lists, requiring that the blocks coincide. -/
lemma sigma_composition_eq_iff (i j : Σ (a : composition n), composition a.length) :
i = j ↔ i.1.blocks = j.1.blocks ∧ i.2.blocks = j.2.blocks :=
begin
refine ⟨by rintro rfl; exact ⟨rfl, rfl⟩, _⟩,
rcases i with ⟨a, b⟩,
rcases j with ⟨a', b'⟩,
rintros ⟨h, h'⟩,
have H : a = a', by { ext1, exact h },
induction H, congr, ext1, exact h'
end
/-- Rewriting equality in the dependent type
`Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)` in
non-dependent terms with lists, requiring that the lists of blocks coincide. -/
lemma sigma_pi_composition_eq_iff
(u v : Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) :
u = v ↔ of_fn (λ i, (u.2 i).blocks) = of_fn (λ i, (v.2 i).blocks) :=
begin
refine ⟨λ H, by rw H, λ H, _⟩,
rcases u with ⟨a, b⟩,
rcases v with ⟨a', b'⟩,
dsimp at H,
have h : a = a',
{ ext1,
have : map list.sum (of_fn (λ (i : fin (composition.length a)), (b i).blocks)) =
map list.sum (of_fn (λ (i : fin (composition.length a')), (b' i).blocks)), by rw H,
simp only [map_of_fn] at this,
change of_fn (λ (i : fin (composition.length a)), (b i).blocks.sum) =
of_fn (λ (i : fin (composition.length a')), (b' i).blocks.sum) at this,
simpa [composition.blocks_sum, composition.of_fn_blocks_fun] using this },
induction h,
simp only [true_and, eq_self_iff_true, heq_iff_eq],
ext i : 2,
have : nth_le (of_fn (λ (i : fin (composition.length a)), (b i).blocks)) i (by simp [i.is_lt]) =
nth_le (of_fn (λ (i : fin (composition.length a)), (b' i).blocks)) i (by simp [i.is_lt]) :=
nth_le_of_eq H _,
rwa [nth_le_of_fn, nth_le_of_fn] at this
end
/-- When `a` is a composition of `n` and `b` is a composition of `a.length`, `a.gather b` is the
composition of `n` obtained by gathering all the blocks of `a` corresponding to a block of `b`.
For instance, if `a = [6, 5, 3, 5, 2]` and `b = [2, 3]`, one should gather together
the first two blocks of `a` and its last three blocks, giving `a.gather b = [11, 10]`. -/
def gather (a : composition n) (b : composition a.length) : composition n :=
{ blocks := (a.blocks.split_wrt_composition b).map sum,
blocks_pos :=
begin
rw forall_mem_map_iff,
intros j hj,
suffices H : ∀ i ∈ j, 1 ≤ i, from
calc 0 < j.length : length_pos_of_mem_split_wrt_composition hj
... ≤ j.sum : length_le_sum_of_one_le _ H,
intros i hi,
apply a.one_le_blocks,
rw ← a.blocks.join_split_wrt_composition b,
exact mem_join_of_mem hj hi,
end,
blocks_sum := by { rw [← sum_join, join_split_wrt_composition, a.blocks_sum] } }
lemma length_gather (a : composition n) (b : composition a.length) :
length (a.gather b) = b.length :=
show (map list.sum (a.blocks.split_wrt_composition b)).length = b.blocks.length,
by rw [length_map, length_split_wrt_composition]
/-- An auxiliary function used in the definition of `sigma_equiv_sigma_pi` below, associating to
two compositions `a` of `n` and `b` of `a.length`, and an index `i` bounded by the length of
`a.gather b`, the subcomposition of `a` made of those blocks belonging to the `i`-th block of
`a.gather b`. -/
def sigma_composition_aux (a : composition n) (b : composition a.length)
(i : fin (a.gather b).length) :
composition ((a.gather b).blocks_fun i) :=
{ blocks := nth_le (a.blocks.split_wrt_composition b) i
(by { rw [length_split_wrt_composition, ← length_gather], exact i.2 }),
blocks_pos := assume i hi, a.blocks_pos
(by { rw ← a.blocks.join_split_wrt_composition b,
exact mem_join_of_mem (nth_le_mem _ _ _) hi }),
blocks_sum := by simp only [composition.blocks_fun, nth_le_map', composition.gather] }
lemma length_sigma_composition_aux (a : composition n) (b : composition a.length)
(i : fin b.length) :
composition.length (composition.sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ i.2⟩) =
composition.blocks_fun b i :=
show list.length (nth_le (split_wrt_composition a.blocks b) i _) = blocks_fun b i,
by { rw [nth_le_map_rev list.length, nth_le_of_eq (map_length_split_wrt_composition _ _)], refl }
lemma blocks_fun_sigma_composition_aux (a : composition n) (b : composition a.length)
(i : fin b.length) (j : fin (blocks_fun b i)) :
blocks_fun (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ i.2⟩)
⟨j, (length_sigma_composition_aux a b i).symm ▸ j.2⟩ = blocks_fun a (embedding b i j) :=
show nth_le (nth_le _ _ _) _ _ = nth_le a.blocks _ _,
by { rw [nth_le_of_eq (nth_le_split_wrt_composition _ _ _), nth_le_drop', nth_le_take'], refl }
/-- Auxiliary lemma to prove that the composition of formal multilinear series is associative.
Consider a composition `a` of `n` and a composition `b` of `a.length`. Grouping together some
blocks of `a` according to `b` as in `a.gather b`, one can compute the total size of the blocks
of `a` up to an index `size_up_to b i + j` (where the `j` corresponds to a set of blocks of `a`
that do not fill a whole block of `a.gather b`). The first part corresponds to a sum of blocks
in `a.gather b`, and the second one to a sum of blocks in the next block of
`sigma_composition_aux a b`. This is the content of this lemma. -/
lemma size_up_to_size_up_to_add (a : composition n) (b : composition a.length)
{i j : ℕ} (hi : i < b.length) (hj : j < blocks_fun b ⟨i, hi⟩) :
size_up_to a (size_up_to b i + j) = size_up_to (a.gather b) i +
(size_up_to (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ hi⟩) j) :=
begin
induction j with j IHj,
{ show sum (take ((b.blocks.take i).sum) a.blocks) =
sum (take i (map sum (split_wrt_composition a.blocks b))),
induction i with i IH,
{ refl },
{ have A : i < b.length := nat.lt_of_succ_lt hi,
have B : i < list.length (map list.sum (split_wrt_composition a.blocks b)), by simp [A],
have C : 0 < blocks_fun b ⟨i, A⟩ := composition.blocks_pos' _ _ _,
rw [sum_take_succ _ _ B, ← IH A C],
have : take (sum (take i b.blocks)) a.blocks =
take (sum (take i b.blocks)) (take (sum (take (i+1) b.blocks)) a.blocks),
{ rw [take_take, min_eq_left],
apply monotone_sum_take _ (nat.le_succ _) },
rw [this, nth_le_map', nth_le_split_wrt_composition,
← take_append_drop (sum (take i b.blocks))
((take (sum (take (nat.succ i) b.blocks)) a.blocks)), sum_append],
congr,
rw [take_append_drop] } },
{ have A : j < blocks_fun b ⟨i, hi⟩ := lt_trans (lt_add_one j) hj,
have B : j < length (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ hi⟩),
by { convert A, rw [← length_sigma_composition_aux], refl },
have C : size_up_to b i + j < size_up_to b (i + 1),
{ simp only [size_up_to_succ b hi, add_lt_add_iff_left],
exact A },
have D : size_up_to b i + j < length a := lt_of_lt_of_le C (b.size_up_to_le _),
have : size_up_to b i + nat.succ j = (size_up_to b i + j).succ := rfl,
rw [this, size_up_to_succ _ D, IHj A, size_up_to_succ _ B],
simp only [sigma_composition_aux, add_assoc, add_left_inj, fin.coe_mk],
rw [nth_le_of_eq (nth_le_split_wrt_composition _ _ _), nth_le_drop', nth_le_take _ _ C] }
end
/--
Natural equivalence between `(Σ (a : composition n), composition a.length)` and
`(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i))`, that shows up as a
change of variables in the proof that composition of formal multilinear series is associative.
Consider a composition `a` of `n` and a composition `b` of `a.length`. Then `b` indicates how to
group together some blocks of `a`, giving altogether `b.length` blocks of blocks. These blocks of
blocks can be called `d₀, ..., d_{a.length - 1}`, and one obtains a composition `c` of `n` by
saying that each `dᵢ` is one single block. The map `⟨a, b⟩ → ⟨c, (d₀, ..., d_{a.length - 1})⟩` is
the direct map in the equiv.
Conversely, if one starts from `c` and the `dᵢ`s, one can join the `dᵢ`s to obtain a composition
`a` of `n`, and register the lengths of the `dᵢ`s in a composition `b` of `a.length`. This is the
inverse map of the equiv.
-/
def sigma_equiv_sigma_pi (n : ℕ) :
(Σ (a : composition n), composition a.length) ≃
(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) :=
{ to_fun := λ i, ⟨i.1.gather i.2, i.1.sigma_composition_aux i.2⟩,
inv_fun := λ i, ⟨
{ blocks := (of_fn (λ j, (i.2 j).blocks)).join,
blocks_pos :=
begin
simp only [and_imp, list.mem_join, exists_imp_distrib, forall_mem_of_fn_iff],
exact λ i j hj, composition.blocks_pos _ hj
end,
blocks_sum := by simp [sum_of_fn, composition.blocks_sum, composition.sum_blocks_fun] },
{ blocks := of_fn (λ j, (i.2 j).length),
blocks_pos := forall_mem_of_fn_iff.2
(λ j, composition.length_pos_of_pos _ (composition.blocks_pos' _ _ _)),
blocks_sum := by { dsimp only [composition.length], simp [sum_of_fn] } }⟩,
left_inv :=
begin
-- the fact that we have a left inverse is essentially `join_split_wrt_composition`,
-- but we need to massage it to take care of the dependent setting.
rintros ⟨a, b⟩,
rw sigma_composition_eq_iff,
dsimp,
split,
{ have A := length_map list.sum (split_wrt_composition a.blocks b),
conv_rhs { rw [← join_split_wrt_composition a.blocks b,
← of_fn_nth_le (split_wrt_composition a.blocks b)] },
congr,
{ exact A },
{ exact (fin.heq_fun_iff A).2 (λ i, rfl) } },
{ have B : composition.length (composition.gather a b) = list.length b.blocks :=
composition.length_gather _ _,
conv_rhs { rw [← of_fn_nth_le b.blocks] },
congr' 1,
apply (fin.heq_fun_iff B).2 (λ i, _),
rw [sigma_composition_aux, composition.length, nth_le_map_rev list.length,
nth_le_of_eq (map_length_split_wrt_composition _ _)], refl }
end,
right_inv :=
begin
-- the fact that we have a right inverse is essentially `split_wrt_composition_join`,
-- but we need to massage it to take care of the dependent setting.
rintros ⟨c, d⟩,
have : map list.sum (of_fn (λ (i : fin (composition.length c)), (d i).blocks)) = c.blocks,
by simp [map_of_fn, (∘), composition.blocks_sum, composition.of_fn_blocks_fun],
rw sigma_pi_composition_eq_iff,
dsimp,
congr,
{ ext1,
dsimp [composition.gather],
rwa split_wrt_composition_join,
simp only [map_of_fn] },
{ rw fin.heq_fun_iff,
{ assume i,
dsimp [composition.sigma_composition_aux],
rw [nth_le_of_eq (split_wrt_composition_join _ _ _)],
{ simp only [nth_le_of_fn'] },
{ simp only [map_of_fn] } },
{ congr,
ext1,
dsimp [composition.gather],
rwa split_wrt_composition_join,
simp only [map_of_fn] } }
end }
end composition
namespace formal_multilinear_series
open composition
theorem comp_assoc (r : formal_multilinear_series 𝕜 G H) (q : formal_multilinear_series 𝕜 F G)
(p : formal_multilinear_series 𝕜 E F) :
(r.comp q).comp p = r.comp (q.comp p) :=
begin
ext n v,
/- First, rewrite the two compositions appearing in the theorem as two sums over complicated
sigma types, as in the description of the proof above. -/
let f : (Σ (a : composition n), composition a.length) → H :=
λ c, r c.2.length (apply_composition q c.2 (apply_composition p c.1 v)),
let g : (Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) → H :=
λ c, r c.1.length (λ (i : fin c.1.length),
q (c.2 i).length (apply_composition p (c.2 i) (v ∘ c.1.embedding i))),
suffices : ∑ c, f c = ∑ c, g c,
by simpa only [formal_multilinear_series.comp, continuous_multilinear_map.sum_apply,
comp_along_composition_apply, continuous_multilinear_map.map_sum, finset.sum_sigma',
apply_composition],
/- Now, we use `composition.sigma_equiv_sigma_pi n` to change
variables in the second sum, and check that we get exactly the same sums. -/
rw ← (sigma_equiv_sigma_pi n).sum_comp,
/- To check that we have the same terms, we should check that we apply the same component of
`r`, and the same component of `q`, and the same component of `p`, to the same coordinate of
`v`. This is true by definition, but at each step one needs to convince Lean that the types
one considers are the same, using a suitable congruence lemma to avoid dependent type issues.
This dance has to be done three times, one for `r`, one for `q` and one for `p`.-/
apply finset.sum_congr rfl,
rintros ⟨a, b⟩ _,
dsimp [f, g, sigma_equiv_sigma_pi],
-- check that the `r` components are the same. Based on `composition.length_gather`
apply r.congr (composition.length_gather a b).symm,
intros i hi1 hi2,
-- check that the `q` components are the same. Based on `length_sigma_composition_aux`
apply q.congr (length_sigma_composition_aux a b _).symm,
intros j hj1 hj2,
-- check that the `p` components are the same. Based on `blocks_fun_sigma_composition_aux`
apply p.congr (blocks_fun_sigma_composition_aux a b _ _).symm,
intros k hk1 hk2,
-- finally, check that the coordinates of `v` one is using are the same. Based on
-- `size_up_to_size_up_to_add`.
refine congr_arg v (fin.eq_of_veq _),
dsimp [composition.embedding],
rw [size_up_to_size_up_to_add _ _ hi1 hj1, add_assoc],
end
end formal_multilinear_series
|
ca1c963b55982d64018e52a4212a6025f11cba4c
|
9dd3f3912f7321eb58ee9aa8f21778ad6221f87c
|
/tests/lean/notation8.lean
|
04a7152b62a19132405056eadeddf1a9ed941ecd
|
[
"Apache-2.0"
] |
permissive
|
bre7k30/lean
|
de893411bcfa7b3c5572e61b9e1c52951b310aa4
|
5a924699d076dab1bd5af23a8f910b433e598d7a
|
refs/heads/master
| 1,610,900,145,817
| 1,488,006,845,000
| 1,488,006,845,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 119
|
lean
|
constant f : nat → nat → nat
constant g : nat → nat
check f (1 + g 1) $ g 2 + 2
check f (g 1) $ f (1 + 1) $ g 2
|
88dc44eb47bdcfcf3d767ac84811675c0eec7c58
|
84a9fc5d67b057efbf3a74e2eaa539d7f9badc27
|
/lib/data/pow.lean
|
eb4c7dea6404d4aa4e5d202e2c736ad3a35ac194
|
[] |
no_license
|
agentultra/lean-modexp
|
2606769ea3f1c4417d0b4c3d448c655ed97a3f67
|
f59a433db8aa7b62d9ae26f0037b4f23cfb7afed
|
refs/heads/master
| 1,610,731,162,684
| 1,502,413,558,000
| 1,502,413,558,000
| 99,747,888
| 0
| 0
| null | 1,502,238,319,000
| 1,502,238,319,000
| null |
UTF-8
|
Lean
| false
| false
| 2,329
|
lean
|
universe u
section pow
open nat
variable {α : Type u}
section basics
variables [has_one α] [has_mul α]
def pow (b : α) : ℕ → α
| 0 := 1
| (succ n) := pow n * b
local infix `^` := pow
@[simp] lemma pow_zero (b : α) : b^0 = 1 := rfl
end basics
local infix `^` := pow
section comm_monoid
variables [comm_monoid α]
local infix `^` := pow
lemma one_pow {n : ℕ} (k : ℕ) : (1 : α)^k = 1 :=
begin
induction k,
{ simp },
{ dunfold pow, simp [ih_1], }
end
lemma pow_add (p : α) (x y : ℕ)
: p^(x+y) = p^x * p^y :=
begin
induction x,
case zero { simp },
case succ x
{ simp [nat.add_succ] at *,
dunfold pow,
simp [ih_1], },
end
lemma pow_mul (p : α) (x y : ℕ)
: p^(x*y) = ( p^x )^y :=
begin
induction y,
case zero { simp },
case succ y
{ simp [mul_succ,pow_add,ih_1],
dunfold pow, ac_refl },
end
lemma pow_pow_comm (p : α) (x y : ℕ)
: p^x^y = p^y^x :=
by rw [← pow_mul p,mul_comm,pow_mul]
end comm_monoid
section linear_ordered_semiring
variable [linear_ordered_semiring α]
lemma pow_pos_of_pos (p : α) {x : ℕ}
(H : 1 ≤ p)
: 1 ≤ p^x :=
begin
induction x,
case zero
{ dunfold pow, refl },
case succ x
{ dunfold pow,
rw ← one_mul (1 : α),
apply mul_le_mul ih_1 H,
{ apply zero_le_one },
{ transitivity (1 : α),
{ apply zero_le_one, },
apply ih_1 } }
end
lemma pow_lt_pow (p : α) {x y : ℕ}
(Hp : p > 1)
(Hlt : x < y)
: p^x < p^y :=
begin
induction y,
case zero
{ exfalso,
apply nat.not_lt_zero _ Hlt },
case succ y
{ dunfold pow,
rw ← mul_one (p^x),
have Hle : x ≤ y := le_of_succ_le_succ Hlt,
have Hpow_pos : 0 < p ^ x,
{ apply @lt_of_lt_of_le α _ _ 1,
{ apply zero_lt_one },
{ apply pow_pos_of_pos,
apply le_of_lt Hp } },
rw le_iff_lt_or_eq at Hle,
cases Hle with Hlt Heq,
{ transitivity p^x * p,
{ apply mul_lt_mul_of_pos_left Hp,
apply Hpow_pos },
{ apply mul_lt_mul_of_pos_right,
{ apply ih_1 Hlt },
{ apply lt_trans _ Hp,
apply zero_lt_one } } },
{ subst y,
apply mul_lt_mul_of_pos_left Hp,
apply Hpow_pos } }
end
end linear_ordered_semiring
end pow
lemma nat_pow_def (p x : ℕ)
: nat.pow p x = pow p x :=
by { induction x, refl, simp [nat.pow,pow,ih_1], }
|
aa9302808d0178197eda287be3cfe54ca873a31e
|
4fa161becb8ce7378a709f5992a594764699e268
|
/src/data/nat/choose.lean
|
d2af6c931e9bae1a6894165cdf58454b550169ae
|
[
"Apache-2.0"
] |
permissive
|
laughinggas/mathlib
|
e4aa4565ae34e46e834434284cb26bd9d67bc373
|
86dcd5cda7a5017c8b3c8876c89a510a19d49aad
|
refs/heads/master
| 1,669,496,232,688
| 1,592,831,995,000
| 1,592,831,995,000
| 274,155,979
| 0
| 0
|
Apache-2.0
| 1,592,835,190,000
| 1,592,835,189,000
| null |
UTF-8
|
Lean
| false
| false
| 6,198
|
lean
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes, Bhavik Mehta, Patrick Stevens
-/
import tactic.linarith
import algebra.big_operators
open nat
open_locale big_operators
lemma nat.prime.dvd_choose_add {p a b : ℕ} (hap : a < p) (hbp : b < p) (h : p ≤ a + b)
(hp : prime p) : p ∣ choose (a + b) a :=
have h₁ : p ∣ fact (a + b), from hp.dvd_fact.2 h,
have h₂ : ¬p ∣ fact a, from mt hp.dvd_fact.1 (not_le_of_gt hap),
have h₃ : ¬p ∣ fact b, from mt hp.dvd_fact.1 (not_le_of_gt hbp),
by
rw [← choose_mul_fact_mul_fact (le.intro rfl), mul_assoc, hp.dvd_mul, hp.dvd_mul,
norm_num.sub_nat_pos (a + b) a b rfl] at h₁;
exact h₁.resolve_right (not_or_distrib.2 ⟨h₂, h₃⟩)
lemma nat.prime.dvd_choose_self {p k : ℕ} (hk : 0 < k) (hkp : k < p) (hp : prime p) :
p ∣ choose p k :=
begin
have r : k + (p - k) = p,
by rw [← nat.add_sub_assoc (nat.le_of_lt hkp) k, nat.add_sub_cancel_left],
have e : p ∣ choose (k + (p - k)) k,
by exact nat.prime.dvd_choose_add hkp (sub_lt (lt.trans hk hkp) hk) (by rw r) hp,
rwa r at e,
end
/-- Show that choose is increasing for small values of the right argument. -/
lemma choose_le_succ_of_lt_half_left {r n : ℕ} (h : r < n/2) :
choose n r ≤ choose n (r+1) :=
begin
refine le_of_mul_le_mul_right _ (nat.lt_sub_left_of_add_lt (lt_of_lt_of_le h (nat.div_le_self n 2))),
rw ← choose_succ_right_eq,
apply nat.mul_le_mul_left,
rw [← nat.lt_iff_add_one_le, nat.lt_sub_left_iff_add_lt, ← mul_two],
exact lt_of_lt_of_le (mul_lt_mul_of_pos_right h zero_lt_two) (nat.div_mul_le_self n 2),
end
/-- Show that for small values of the right argument, the middle value is largest. -/
private lemma choose_le_middle_of_le_half_left {n r : ℕ} (hr : r ≤ n/2) :
choose n r ≤ choose n (n/2) :=
decreasing_induction
(λ _ k a,
(eq_or_lt_of_le a).elim
(λ t, t.symm ▸ le_refl _)
(λ h, trans (choose_le_succ_of_lt_half_left h) (k h)))
hr (λ _, le_refl _) hr
/-- `choose n r` is maximised when `r` is `n/2`. -/
lemma choose_le_middle (r n : ℕ) : nat.choose n r ≤ nat.choose n (n/2) :=
begin
cases le_or_gt r n with b b,
{ cases le_or_lt r (n/2) with a h,
{ apply choose_le_middle_of_le_half_left a },
{ rw ← choose_symm b,
apply choose_le_middle_of_le_half_left,
rw [div_lt_iff_lt_mul' zero_lt_two] at h,
rw [le_div_iff_mul_le' zero_lt_two, nat.mul_sub_right_distrib, nat.sub_le_iff,
mul_two, nat.add_sub_cancel],
exact le_of_lt h } },
{ rw nat.choose_eq_zero_of_lt b,
apply nat.zero_le }
end
section binomial
open finset
variables {α : Type*}
/-- A version of the binomial theorem for noncommutative semirings. -/
theorem commute.add_pow [semiring α] {x y : α} (h : commute x y) (n : ℕ) :
(x + y) ^ n = ∑ m in range (n + 1), x ^ m * y ^ (n - m) * choose n m :=
begin
let t : ℕ → ℕ → α := λ n m, x ^ m * (y ^ (n - m)) * (choose n m),
change (x + y) ^ n = ∑ m in range (n + 1), t n m,
have h_first : ∀ n, t n 0 = y ^ n :=
λ n, by { dsimp [t], rw[choose_zero_right, nat.cast_one, mul_one, one_mul] },
have h_last : ∀ n, t n n.succ = 0 :=
λ n, by { dsimp [t], rw [choose_succ_self, nat.cast_zero, mul_zero] },
have h_middle : ∀ (n i : ℕ), (i ∈ finset.range n.succ) →
((t n.succ) ∘ nat.succ) i = x * (t n i) + y * (t n i.succ) :=
begin
intros n i h_mem,
have h_le : i ≤ n := nat.le_of_lt_succ (finset.mem_range.mp h_mem),
dsimp [t],
rw [choose_succ_succ, nat.cast_add, mul_add],
congr' 1,
{ rw[pow_succ x, succ_sub_succ, mul_assoc, mul_assoc, mul_assoc] },
{ rw[← mul_assoc y, ← mul_assoc y, (h.symm.pow_right i.succ).eq],
by_cases h_eq : i = n,
{ rw [h_eq, choose_succ_self, nat.cast_zero, mul_zero, mul_zero] },
{ rw[succ_sub (lt_of_le_of_ne h_le h_eq)],
rw[pow_succ y, mul_assoc, mul_assoc, mul_assoc, mul_assoc] } }
end,
induction n with n ih,
{ rw [pow_zero, sum_range_succ, range_zero, sum_empty, add_zero],
dsimp [t], rw [choose_self, nat.cast_one, mul_one, mul_one] },
{ rw[sum_range_succ', h_first],
rw[finset.sum_congr rfl (h_middle n), finset.sum_add_distrib, add_assoc],
rw[pow_succ (x + y), ih, add_mul, finset.mul_sum, finset.mul_sum],
congr' 1,
rw[finset.sum_range_succ', finset.sum_range_succ, h_first, h_last,
mul_zero, zero_add, pow_succ] }
end
/-- The binomial theorem-/
theorem add_pow [comm_semiring α] (x y : α) (n : ℕ) :
(x + y) ^ n = ∑ m in range (n + 1), x ^ m * y ^ (n - m) * choose n m :=
(commute.all x y).add_pow n
/-- The sum of entries in a row of Pascal's triangle -/
theorem sum_range_choose (n : ℕ) :
∑ m in range (n + 1), choose n m = 2 ^ n :=
by simpa using (add_pow 1 1 n).symm
/-!
# Specific facts about binomial coefficients and their sums
-/
lemma sum_range_choose_halfway (m : nat) :
∑ i in range (m + 1), nat.choose (2 * m + 1) i = 4 ^ m :=
have ∑ i in range (m + 1), choose (2 * m + 1) (2 * m + 1 - i) =
∑ i in range (m + 1), choose (2 * m + 1) i,
from sum_congr rfl $ λ i hi, choose_symm $ by linarith [mem_range.1 hi],
(nat.mul_right_inj zero_lt_two).1 $
calc 2 * (∑ i in range (m + 1), nat.choose (2 * m + 1) i) =
(∑ i in range (m + 1), nat.choose (2 * m + 1) i) +
∑ i in range (m + 1), nat.choose (2 * m + 1) (2 * m + 1 - i) :
by rw [two_mul, this]
... = (∑ i in range (m + 1), nat.choose (2 * m + 1) i) +
∑ i in Ico (m + 1) (2 * m + 2), nat.choose (2 * m + 1) i :
by { rw [range_eq_Ico, sum_Ico_reflect], { congr, omega }, omega }
... = ∑ i in range (2 * m + 2), nat.choose (2 * m + 1) i : sum_range_add_sum_Ico _ (by omega)
... = 2^(2 * m + 1) : sum_range_choose (2 * m + 1)
... = 2 * 4^m : by { rw [nat.pow_succ, mul_comm, nat.pow_mul], refl }
lemma choose_middle_le_pow (n : ℕ) : choose (2 * n + 1) n ≤ 4 ^ n :=
begin
have t : choose (2 * n + 1) n ≤ ∑ i in finset.range (n + 1), choose (2 * n + 1) i :=
finset.single_le_sum (λ x _, by linarith) (finset.self_mem_range_succ n),
simpa [sum_range_choose_halfway n] using t
end
end binomial
|
868d2f284604674b6d17e55363965e3402398935
|
947fa6c38e48771ae886239b4edce6db6e18d0fb
|
/src/set_theory/cardinal/ordinal.lean
|
6dffdf258ff18cbf3d5b83349ed181eb0650f49e
|
[
"Apache-2.0"
] |
permissive
|
ramonfmir/mathlib
|
c5dc8b33155473fab97c38bd3aa6723dc289beaa
|
14c52e990c17f5a00c0cc9e09847af16fabbed25
|
refs/heads/master
| 1,661,979,343,526
| 1,660,830,384,000
| 1,660,830,384,000
| 182,072,989
| 0
| 0
| null | 1,555,585,876,000
| 1,555,585,876,000
| null |
UTF-8
|
Lean
| false
| false
| 47,850
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Floris van Doorn
-/
import data.finsupp.multiset
import order.bounded
import set_theory.ordinal.principal
import tactic.linarith
/-!
# Cardinals and ordinals
Relationships between cardinals and ordinals, properties of cardinals that are proved
using ordinals.
## Main definitions
* The function `cardinal.aleph'` gives the cardinals listed by their ordinal
index, and is the inverse of `cardinal.aleph_idx`.
`aleph' n = n`, `aleph' ω = ℵ₀`, `aleph' (ω + 1) = succ ℵ₀`, etc.
It is an order isomorphism between ordinals and cardinals.
* The function `cardinal.aleph` gives the infinite cardinals listed by their
ordinal index. `aleph 0 = ℵ₀`, `aleph 1 = succ ℵ₀` is the first
uncountable cardinal, and so on.
* The function `cardinal.beth` enumerates the Beth cardinals. `beth 0 = ℵ₀`,
`beth (succ o) = 2 ^ beth o`, and for a limit ordinal `o`, `beth o` is the supremum of `beth a`
for `a < o`.
## Main Statements
* `cardinal.mul_eq_max` and `cardinal.add_eq_max` state that the product (resp. sum) of two infinite
cardinals is just their maximum. Several variations around this fact are also given.
* `cardinal.mk_list_eq_mk` : when `α` is infinite, `α` and `list α` have the same cardinality.
* simp lemmas for inequalities between `bit0 a` and `bit1 b` are registered, making `simp`
able to prove inequalities about numeral cardinals.
## Tags
cardinal arithmetic (for infinite cardinals)
-/
noncomputable theory
open function cardinal set equiv order
open_locale classical cardinal ordinal
universes u v w
namespace cardinal
section using_ordinals
open ordinal
theorem ord_is_limit {c} (co : ℵ₀ ≤ c) : (ord c).is_limit :=
begin
refine ⟨λ h, aleph_0_ne_zero _, λ a, lt_imp_lt_of_le_imp_le (λ h, _)⟩,
{ rw [←ordinal.le_zero, ord_le] at h,
simpa only [card_zero, nonpos_iff_eq_zero] using co.trans h },
{ rw ord_le at h ⊢,
rwa [←@add_one_of_aleph_0_le (card a), ←card_succ],
rw [←ord_le, ←le_succ_of_is_limit, ord_le],
{ exact co.trans h },
{ rw ord_aleph_0, exact omega_is_limit } }
end
/-! ### Aleph cardinals -/
/-- The `aleph'` index function, which gives the ordinal index of a cardinal.
(The `aleph'` part is because unlike `aleph` this counts also the
finite stages. So `aleph_idx n = n`, `aleph_idx ω = ω`,
`aleph_idx ℵ₁ = ω + 1` and so on.)
In this definition, we register additionally that this function is an initial segment,
i.e., it is order preserving and its range is an initial segment of the ordinals.
For the basic function version, see `aleph_idx`.
For an upgraded version stating that the range is everything, see `aleph_idx.rel_iso`. -/
def aleph_idx.initial_seg : @initial_seg cardinal ordinal (<) (<) :=
@rel_embedding.collapse cardinal ordinal (<) (<) _ cardinal.ord.order_embedding.lt_embedding
/-- The `aleph'` index function, which gives the ordinal index of a cardinal.
(The `aleph'` part is because unlike `aleph` this counts also the
finite stages. So `aleph_idx n = n`, `aleph_idx ω = ω`,
`aleph_idx ℵ₁ = ω + 1` and so on.)
For an upgraded version stating that the range is everything, see `aleph_idx.rel_iso`. -/
def aleph_idx : cardinal → ordinal := aleph_idx.initial_seg
@[simp] theorem aleph_idx.initial_seg_coe :
(aleph_idx.initial_seg : cardinal → ordinal) = aleph_idx := rfl
@[simp] theorem aleph_idx_lt {a b} : aleph_idx a < aleph_idx b ↔ a < b :=
aleph_idx.initial_seg.to_rel_embedding.map_rel_iff
@[simp] theorem aleph_idx_le {a b} : aleph_idx a ≤ aleph_idx b ↔ a ≤ b :=
by rw [← not_lt, ← not_lt, aleph_idx_lt]
theorem aleph_idx.init {a b} : b < aleph_idx a → ∃ c, aleph_idx c = b :=
aleph_idx.initial_seg.init _ _
/-- The `aleph'` index function, which gives the ordinal index of a cardinal.
(The `aleph'` part is because unlike `aleph` this counts also the
finite stages. So `aleph_idx n = n`, `aleph_idx ℵ₀ = ω`,
`aleph_idx ℵ₁ = ω + 1` and so on.)
In this version, we register additionally that this function is an order isomorphism
between cardinals and ordinals.
For the basic function version, see `aleph_idx`. -/
def aleph_idx.rel_iso : @rel_iso cardinal.{u} ordinal.{u} (<) (<) :=
@rel_iso.of_surjective cardinal.{u} ordinal.{u} (<) (<) aleph_idx.initial_seg.{u} $
(initial_seg.eq_or_principal aleph_idx.initial_seg.{u}).resolve_right $
λ ⟨o, e⟩, begin
have : ∀ c, aleph_idx c < o := λ c, (e _).2 ⟨_, rfl⟩,
refine ordinal.induction_on o _ this, introsI α r _ h,
let s := ⨆ a, inv_fun aleph_idx (ordinal.typein r a),
apply (lt_succ s).not_le,
have I : injective aleph_idx := aleph_idx.initial_seg.to_embedding.injective,
simpa only [typein_enum, left_inverse_inv_fun I (succ s)] using le_csupr
(cardinal.bdd_above_range.{u u} (λ a : α, inv_fun aleph_idx (ordinal.typein r a)))
(ordinal.enum r _ (h (succ s)))
end
@[simp] theorem aleph_idx.rel_iso_coe :
(aleph_idx.rel_iso : cardinal → ordinal) = aleph_idx := rfl
@[simp] theorem type_cardinal : @type cardinal (<) _ = ordinal.univ.{u (u+1)} :=
by rw ordinal.univ_id; exact quotient.sound ⟨aleph_idx.rel_iso⟩
@[simp] theorem mk_cardinal : #cardinal = univ.{u (u+1)} :=
by simpa only [card_type, card_univ] using congr_arg card type_cardinal
/-- The `aleph'` function gives the cardinals listed by their ordinal
index, and is the inverse of `aleph_idx`.
`aleph' n = n`, `aleph' ω = ω`, `aleph' (ω + 1) = succ ℵ₀`, etc.
In this version, we register additionally that this function is an order isomorphism
between ordinals and cardinals.
For the basic function version, see `aleph'`. -/
def aleph'.rel_iso := cardinal.aleph_idx.rel_iso.symm
/-- The `aleph'` function gives the cardinals listed by their ordinal
index, and is the inverse of `aleph_idx`.
`aleph' n = n`, `aleph' ω = ω`, `aleph' (ω + 1) = succ ℵ₀`, etc. -/
def aleph' : ordinal → cardinal := aleph'.rel_iso
@[simp] theorem aleph'.rel_iso_coe :
(aleph'.rel_iso : ordinal → cardinal) = aleph' := rfl
@[simp] theorem aleph'_lt {o₁ o₂ : ordinal} : aleph' o₁ < aleph' o₂ ↔ o₁ < o₂ :=
aleph'.rel_iso.map_rel_iff
@[simp] theorem aleph'_le {o₁ o₂ : ordinal} : aleph' o₁ ≤ aleph' o₂ ↔ o₁ ≤ o₂ :=
le_iff_le_iff_lt_iff_lt.2 aleph'_lt
@[simp] theorem aleph'_aleph_idx (c : cardinal) : aleph' c.aleph_idx = c :=
cardinal.aleph_idx.rel_iso.to_equiv.symm_apply_apply c
@[simp] theorem aleph_idx_aleph' (o : ordinal) : (aleph' o).aleph_idx = o :=
cardinal.aleph_idx.rel_iso.to_equiv.apply_symm_apply o
@[simp] theorem aleph'_zero : aleph' 0 = 0 :=
by { rw [← nonpos_iff_eq_zero, ← aleph'_aleph_idx 0, aleph'_le], apply ordinal.zero_le }
@[simp] theorem aleph'_succ {o : ordinal} : aleph' (succ o) = succ (aleph' o) :=
begin
apply (succ_le_of_lt $ aleph'_lt.2 $ lt_succ o).antisymm' (cardinal.aleph_idx_le.1 $ _),
rw [aleph_idx_aleph', succ_le_iff, ← aleph'_lt, aleph'_aleph_idx],
apply lt_succ
end
@[simp] theorem aleph'_nat : ∀ n : ℕ, aleph' n = n
| 0 := aleph'_zero
| (n+1) := show aleph' (succ n) = n.succ,
by rw [aleph'_succ, aleph'_nat, nat_succ]
theorem aleph'_le_of_limit {o : ordinal} (l : o.is_limit) {c} :
aleph' o ≤ c ↔ ∀ o' < o, aleph' o' ≤ c :=
⟨λ h o' h', (aleph'_le.2 $ h'.le).trans h,
λ h, begin
rw [←aleph'_aleph_idx c, aleph'_le, limit_le l],
intros x h',
rw [←aleph'_le, aleph'_aleph_idx],
exact h _ h'
end⟩
theorem aleph'_limit {o : ordinal} (ho : is_limit o) : aleph' o = ⨆ a : Iio o, aleph' a :=
begin
refine le_antisymm _ (csupr_le' (λ i, aleph'_le.2 (le_of_lt i.2))),
rw aleph'_le_of_limit ho,
exact λ a ha, le_csupr (bdd_above_of_small _) (⟨a, ha⟩ : Iio o)
end
@[simp] theorem aleph'_omega : aleph' ω = ℵ₀ :=
eq_of_forall_ge_iff $ λ c, begin
simp only [aleph'_le_of_limit omega_is_limit, lt_omega, exists_imp_distrib, aleph_0_le],
exact forall_swap.trans (forall_congr $ λ n, by simp only [forall_eq, aleph'_nat]),
end
/-- `aleph'` and `aleph_idx` form an equivalence between `ordinal` and `cardinal` -/
@[simp] def aleph'_equiv : ordinal ≃ cardinal :=
⟨aleph', aleph_idx, aleph_idx_aleph', aleph'_aleph_idx⟩
/-- The `aleph` function gives the infinite cardinals listed by their
ordinal index. `aleph 0 = ℵ₀`, `aleph 1 = succ ℵ₀` is the first
uncountable cardinal, and so on. -/
def aleph (o : ordinal) : cardinal := aleph' (ω + o)
@[simp] theorem aleph_lt {o₁ o₂ : ordinal} : aleph o₁ < aleph o₂ ↔ o₁ < o₂ :=
aleph'_lt.trans (add_lt_add_iff_left _)
@[simp] theorem aleph_le {o₁ o₂ : ordinal} : aleph o₁ ≤ aleph o₂ ↔ o₁ ≤ o₂ :=
le_iff_le_iff_lt_iff_lt.2 aleph_lt
@[simp] theorem max_aleph_eq (o₁ o₂ : ordinal) : max (aleph o₁) (aleph o₂) = aleph (max o₁ o₂) :=
begin
cases le_total (aleph o₁) (aleph o₂) with h h,
{ rw [max_eq_right h, max_eq_right (aleph_le.1 h)] },
{ rw [max_eq_left h, max_eq_left (aleph_le.1 h)] }
end
@[simp] theorem aleph_succ {o : ordinal} : aleph (succ o) = succ (aleph o) :=
by rw [aleph, add_succ, aleph'_succ, aleph]
@[simp] theorem aleph_zero : aleph 0 = ℵ₀ :=
by rw [aleph, add_zero, aleph'_omega]
theorem aleph_limit {o : ordinal} (ho : is_limit o) : aleph o = ⨆ a : Iio o, aleph a :=
begin
apply le_antisymm _ (csupr_le' _),
{ rw [aleph, aleph'_limit (ho.add _)],
refine csupr_mono' (bdd_above_of_small _) _,
rintro ⟨i, hi⟩,
cases lt_or_le i ω,
{ rcases lt_omega.1 h with ⟨n, rfl⟩,
use ⟨0, ho.pos⟩,
simpa using (nat_lt_aleph_0 n).le },
{ exact ⟨⟨_, (sub_lt_of_le h).2 hi⟩, aleph'_le.2 (le_add_sub _ _)⟩ } },
{ exact λ i, aleph_le.2 (le_of_lt i.2) }
end
theorem aleph_0_le_aleph' {o : ordinal} : ℵ₀ ≤ aleph' o ↔ ω ≤ o :=
by rw [← aleph'_omega, aleph'_le]
theorem aleph_0_le_aleph (o : ordinal) : ℵ₀ ≤ aleph o :=
by { rw [aleph, aleph_0_le_aleph'], apply ordinal.le_add_right }
theorem aleph'_pos {o : ordinal} (ho : 0 < o) : 0 < aleph' o :=
by rwa [←aleph'_zero, aleph'_lt]
theorem aleph_pos (o : ordinal) : 0 < aleph o :=
aleph_0_pos.trans_le (aleph_0_le_aleph o)
@[simp] theorem aleph_to_nat (o : ordinal) : (aleph o).to_nat = 0 :=
to_nat_apply_of_aleph_0_le $ aleph_0_le_aleph o
@[simp] theorem aleph_to_part_enat (o : ordinal) : (aleph o).to_part_enat = ⊤ :=
to_part_enat_apply_of_aleph_0_le $ aleph_0_le_aleph o
instance nonempty_out_aleph (o : ordinal) : nonempty (aleph o).ord.out.α :=
begin
rw [out_nonempty_iff_ne_zero, ←ord_zero],
exact λ h, (ord_injective h).not_gt (aleph_pos o)
end
theorem ord_aleph_is_limit (o : ordinal) : is_limit (aleph o).ord :=
ord_is_limit $ aleph_0_le_aleph _
instance (o : ordinal) : no_max_order (aleph o).ord.out.α :=
out_no_max_of_succ_lt (ord_aleph_is_limit o).2
theorem exists_aleph {c : cardinal} : ℵ₀ ≤ c ↔ ∃ o, c = aleph o :=
⟨λ h, ⟨aleph_idx c - ω,
by { rw [aleph, ordinal.add_sub_cancel_of_le, aleph'_aleph_idx],
rwa [← aleph_0_le_aleph', aleph'_aleph_idx] }⟩,
λ ⟨o, e⟩, e.symm ▸ aleph_0_le_aleph _⟩
theorem aleph'_is_normal : is_normal (ord ∘ aleph') :=
⟨λ o, ord_lt_ord.2 $ aleph'_lt.2 $ lt_succ o,
λ o l a, by simp only [ord_le, aleph'_le_of_limit l]⟩
theorem aleph_is_normal : is_normal (ord ∘ aleph) :=
aleph'_is_normal.trans $ add_is_normal ω
theorem succ_aleph_0 : succ ℵ₀ = aleph 1 :=
by rw [←aleph_zero, ←aleph_succ, ordinal.succ_zero]
lemma aleph_0_lt_aleph_one : ℵ₀ < aleph 1 :=
by { rw ←succ_aleph_0, apply lt_succ }
lemma countable_iff_lt_aleph_one {α : Type*} (s : set α) : s.countable ↔ #s < aleph 1 :=
by rw [←succ_aleph_0, lt_succ_iff, mk_set_le_aleph_0]
/-- Ordinals that are cardinals are unbounded. -/
theorem ord_card_unbounded : unbounded (<) {b : ordinal | b.card.ord = b} :=
unbounded_lt_iff.2 $ λ a, ⟨_, ⟨(by { dsimp, rw card_ord }), (lt_ord_succ_card a).le⟩⟩
theorem eq_aleph'_of_eq_card_ord {o : ordinal} (ho : o.card.ord = o) : ∃ a, (aleph' a).ord = o :=
⟨cardinal.aleph_idx.rel_iso o.card, by simpa using ho⟩
/-- `ord ∘ aleph'` enumerates the ordinals that are cardinals. -/
theorem ord_aleph'_eq_enum_card : ord ∘ aleph' = enum_ord {b : ordinal | b.card.ord = b} :=
begin
rw [←eq_enum_ord _ ord_card_unbounded, range_eq_iff],
exact ⟨aleph'_is_normal.strict_mono, ⟨(λ a, (by { dsimp, rw card_ord })),
λ b hb, eq_aleph'_of_eq_card_ord hb⟩⟩
end
/-- Infinite ordinals that are cardinals are unbounded. -/
theorem ord_card_unbounded' : unbounded (<) {b : ordinal | b.card.ord = b ∧ ω ≤ b} :=
(unbounded_lt_inter_le ω).2 ord_card_unbounded
theorem eq_aleph_of_eq_card_ord {o : ordinal} (ho : o.card.ord = o) (ho' : ω ≤ o) :
∃ a, (aleph a).ord = o :=
begin
cases eq_aleph'_of_eq_card_ord ho with a ha,
use a - ω,
unfold aleph,
rwa ordinal.add_sub_cancel_of_le,
rwa [←aleph_0_le_aleph', ←ord_le_ord, ha, ord_aleph_0]
end
/-- `ord ∘ aleph` enumerates the infinite ordinals that are cardinals. -/
theorem ord_aleph_eq_enum_card :
ord ∘ aleph = enum_ord {b : ordinal | b.card.ord = b ∧ ω ≤ b} :=
begin
rw ←eq_enum_ord _ ord_card_unbounded',
use aleph_is_normal.strict_mono,
rw range_eq_iff,
refine ⟨(λ a, ⟨_, _⟩), λ b hb, eq_aleph_of_eq_card_ord hb.1 hb.2⟩,
{ rw card_ord },
{ rw [←ord_aleph_0, ord_le_ord],
exact aleph_0_le_aleph _ }
end
/-! ### Beth cardinals -/
/-- Beth numbers are defined so that `beth 0 = ℵ₀`, `beth (succ o) = 2 ^ (beth o)`, and when `o` is
a limit ordinal, `beth o` is the supremum of `beth o'` for `o' < o`.
Assuming the generalized continuum hypothesis, which is undecidable in ZFC, `beth o = aleph o` for
every `o`. -/
def beth (o : ordinal.{u}) : cardinal.{u} :=
limit_rec_on o aleph_0 (λ _ x, 2 ^ x) (λ a ha IH, ⨆ b : Iio a, IH b.1 b.2)
@[simp] theorem beth_zero : beth 0 = aleph_0 :=
limit_rec_on_zero _ _ _
@[simp] theorem beth_succ (o : ordinal) : beth (succ o) = 2 ^ beth o :=
limit_rec_on_succ _ _ _ _
theorem beth_limit {o : ordinal} : is_limit o → beth o = ⨆ a : Iio o, beth a :=
limit_rec_on_limit _ _ _ _
theorem beth_strict_mono : strict_mono beth :=
begin
intros a b,
induction b using ordinal.induction with b IH generalizing a,
intro h,
rcases zero_or_succ_or_limit b with rfl | ⟨c, rfl⟩ | hb,
{ exact (ordinal.not_lt_zero a h).elim },
{ rw lt_succ_iff at h,
rw beth_succ,
apply lt_of_le_of_lt _ (cantor _),
rcases eq_or_lt_of_le h with rfl | h, { refl },
exact (IH c (lt_succ c) h).le },
{ apply (cantor _).trans_le,
rw [beth_limit hb, ←beth_succ],
exact le_csupr (bdd_above_of_small _) (⟨_, hb.succ_lt h⟩ : Iio b) }
end
@[simp] theorem beth_lt {o₁ o₂ : ordinal} : beth o₁ < beth o₂ ↔ o₁ < o₂ :=
beth_strict_mono.lt_iff_lt
@[simp] theorem beth_le {o₁ o₂ : ordinal} : beth o₁ ≤ beth o₂ ↔ o₁ ≤ o₂ :=
beth_strict_mono.le_iff_le
theorem aleph_le_beth (o : ordinal) : aleph o ≤ beth o :=
begin
apply limit_rec_on o,
{ simp },
{ intros o h,
rw [aleph_succ, beth_succ, succ_le_iff],
exact (cantor _).trans_le (power_le_power_left two_ne_zero' h) },
{ intros o ho IH,
rw [aleph_limit ho, beth_limit ho],
exact csupr_mono (bdd_above_of_small _) (λ x, IH x.1 x.2) }
end
theorem aleph_0_le_beth (o : ordinal) : ℵ₀ ≤ beth o :=
(aleph_0_le_aleph o).trans $ aleph_le_beth o
theorem beth_pos (o : ordinal) : 0 < beth o :=
aleph_0_pos.trans_le $ aleph_0_le_beth o
theorem beth_ne_zero (o : ordinal) : beth o ≠ 0 :=
(beth_pos o).ne'
/-! ### Properties of `mul` -/
/-- If `α` is an infinite type, then `α × α` and `α` have the same cardinality. -/
theorem mul_eq_self {c : cardinal} (h : ℵ₀ ≤ c) : c * c = c :=
begin
refine le_antisymm _
(by simpa only [mul_one] using
mul_le_mul_left' (one_le_aleph_0.trans h) c),
-- the only nontrivial part is `c * c ≤ c`. We prove it inductively.
refine acc.rec_on (cardinal.lt_wf.apply c) (λ c _,
quotient.induction_on c $ λ α IH ol, _) h,
-- consider the minimal well-order `r` on `α` (a type with cardinality `c`).
rcases ord_eq α with ⟨r, wo, e⟩, resetI,
letI := linear_order_of_STO' r,
haveI : is_well_order α (<) := wo,
-- Define an order `s` on `α × α` by writing `(a, b) < (c, d)` if `max a b < max c d`, or
-- the max are equal and `a < c`, or the max are equal and `a = c` and `b < d`.
let g : α × α → α := λ p, max p.1 p.2,
let f : α × α ↪ ordinal × (α × α) :=
⟨λ p:α×α, (typein (<) (g p), p), λ p q, congr_arg prod.snd⟩,
let s := f ⁻¹'o (prod.lex (<) (prod.lex (<) (<))),
-- this is a well order on `α × α`.
haveI : is_well_order _ s := (rel_embedding.preimage _ _).is_well_order,
/- it suffices to show that this well order is smaller than `r`
if it were larger, then `r` would be a strict prefix of `s`. It would be contained in
`β × β` for some `β` of cardinality `< c`. By the inductive assumption, this set has the
same cardinality as `β` (or it is finite if `β` is finite), so it is `< c`, which is a
contradiction. -/
suffices : type s ≤ type r, {exact card_le_card this},
refine le_of_forall_lt (λ o h, _),
rcases typein_surj s h with ⟨p, rfl⟩,
rw [← e, lt_ord],
refine lt_of_le_of_lt
(_ : _ ≤ card (succ (typein (<) (g p))) * card (succ (typein (<) (g p)))) _,
{ have : {q | s q p} ⊆ insert (g p) {x | x < g p} ×ˢ insert (g p) {x | x < g p},
{ intros q h,
simp only [s, embedding.coe_fn_mk, order.preimage, typein_lt_typein, prod.lex_def, typein_inj]
at h,
exact max_le_iff.1 (le_iff_lt_or_eq.2 $ h.imp_right and.left) },
suffices H : (insert (g p) {x | r x (g p)} : set α) ≃ ({x | r x (g p)} ⊕ punit),
{ exact ⟨(set.embedding_of_subset _ _ this).trans
((equiv.set.prod _ _).trans (H.prod_congr H)).to_embedding⟩ },
refine (equiv.set.insert _).trans
((equiv.refl _).sum_congr punit_equiv_punit),
apply @irrefl _ r },
cases lt_or_le (card (succ (typein (<) (g p)))) ℵ₀ with qo qo,
{ exact (mul_lt_aleph_0 qo qo).trans_le ol },
{ suffices, {exact (IH _ this qo).trans_lt this},
rw ← lt_ord, apply (ord_is_limit ol).2,
rw [mk_def, e], apply typein_lt_type }
end
end using_ordinals
/-- If `α` and `β` are infinite types, then the cardinality of `α × β` is the maximum
of the cardinalities of `α` and `β`. -/
theorem mul_eq_max {a b : cardinal} (ha : ℵ₀ ≤ a) (hb : ℵ₀ ≤ b) : a * b = max a b :=
le_antisymm
(mul_eq_self (ha.trans (le_max_left a b)) ▸
mul_le_mul' (le_max_left _ _) (le_max_right _ _)) $
max_le
(by simpa only [mul_one] using
mul_le_mul_left' (one_le_aleph_0.trans hb) a)
(by simpa only [one_mul] using
mul_le_mul_right' (one_le_aleph_0.trans ha) b)
@[simp] theorem mul_mk_eq_max {α β : Type*} [infinite α] [infinite β] : #α * #β = max (#α) (#β) :=
mul_eq_max (aleph_0_le_mk α) (aleph_0_le_mk β)
@[simp] theorem aleph_mul_aleph (o₁ o₂ : ordinal) : aleph o₁ * aleph o₂ = aleph (max o₁ o₂) :=
by rw [cardinal.mul_eq_max (aleph_0_le_aleph o₁) (aleph_0_le_aleph o₂), max_aleph_eq]
@[simp] theorem aleph_0_mul_eq {a : cardinal} (ha : ℵ₀ ≤ a) : ℵ₀ * a = a :=
(mul_eq_max le_rfl ha).trans (max_eq_right ha)
@[simp] theorem mul_aleph_0_eq {a : cardinal} (ha : ℵ₀ ≤ a) : a * ℵ₀ = a :=
(mul_eq_max ha le_rfl).trans (max_eq_left ha)
@[simp] theorem aleph_0_mul_mk_eq {α : Type*} [infinite α] : ℵ₀ * #α = #α :=
aleph_0_mul_eq (aleph_0_le_mk α)
@[simp] theorem mk_mul_aleph_0_eq {α : Type*} [infinite α] : #α * ℵ₀ = #α :=
mul_aleph_0_eq (aleph_0_le_mk α)
@[simp] theorem aleph_0_mul_aleph (o : ordinal) : ℵ₀ * aleph o = aleph o :=
aleph_0_mul_eq (aleph_0_le_aleph o)
@[simp] theorem aleph_mul_aleph_0 (o : ordinal) : aleph o * ℵ₀ = aleph o :=
mul_aleph_0_eq (aleph_0_le_aleph o)
theorem mul_lt_of_lt {a b c : cardinal} (hc : ℵ₀ ≤ c) (h1 : a < c) (h2 : b < c) : a * b < c :=
(mul_le_mul' (le_max_left a b) (le_max_right a b)).trans_lt $
(lt_or_le (max a b) ℵ₀).elim
(λ h, (mul_lt_aleph_0 h h).trans_le hc)
(λ h, by { rw mul_eq_self h, exact max_lt h1 h2 })
lemma mul_le_max_of_aleph_0_le_left {a b : cardinal} (h : ℵ₀ ≤ a) : a * b ≤ max a b :=
begin
convert mul_le_mul' (le_max_left a b) (le_max_right a b),
rw mul_eq_self,
refine h.trans (le_max_left a b)
end
lemma mul_eq_max_of_aleph_0_le_left {a b : cardinal} (h : ℵ₀ ≤ a) (h' : b ≠ 0) : a * b = max a b :=
begin
cases le_or_lt ℵ₀ b with hb hb, { exact mul_eq_max h hb },
refine (mul_le_max_of_aleph_0_le_left h).antisymm _,
have : b ≤ a, from hb.le.trans h,
rw [max_eq_left this],
convert mul_le_mul_left' (one_le_iff_ne_zero.mpr h') _, rw [mul_one],
end
lemma mul_eq_max_of_aleph_0_le_right {a b : cardinal} (h' : a ≠ 0) (h : ℵ₀ ≤ b) : a * b = max a b :=
begin
rw [mul_comm, max_comm],
exact mul_eq_max_of_aleph_0_le_left h h'
end
lemma mul_eq_max' {a b : cardinal} (h : ℵ₀ ≤ a * b) : a * b = max a b :=
begin
rcases aleph_0_le_mul_iff.mp h with ⟨ha, hb, ha' | hb'⟩,
{ exact mul_eq_max_of_aleph_0_le_left ha' hb },
{ exact mul_eq_max_of_aleph_0_le_right ha hb' }
end
theorem mul_le_max (a b : cardinal) : a * b ≤ max (max a b) ℵ₀ :=
begin
rcases eq_or_ne a 0 with rfl | ha0, { simp },
rcases eq_or_ne b 0 with rfl | hb0, { simp },
cases le_or_lt ℵ₀ a with ha ha,
{ rw [mul_eq_max_of_aleph_0_le_left ha hb0],
exact le_max_left _ _ },
{ cases le_or_lt ℵ₀ b with hb hb,
{ rw [mul_comm, mul_eq_max_of_aleph_0_le_left hb ha0, max_comm],
exact le_max_left _ _ },
{ exact le_max_of_le_right (mul_lt_aleph_0 ha hb).le } }
end
lemma mul_eq_left {a b : cardinal} (ha : ℵ₀ ≤ a) (hb : b ≤ a) (hb' : b ≠ 0) : a * b = a :=
by { rw [mul_eq_max_of_aleph_0_le_left ha hb', max_eq_left hb] }
lemma mul_eq_right {a b : cardinal} (hb : ℵ₀ ≤ b) (ha : a ≤ b) (ha' : a ≠ 0) : a * b = b :=
by { rw [mul_comm, mul_eq_left hb ha ha'] }
lemma le_mul_left {a b : cardinal} (h : b ≠ 0) : a ≤ b * a :=
by { convert mul_le_mul_right' (one_le_iff_ne_zero.mpr h) _,
rw [one_mul] }
lemma le_mul_right {a b : cardinal} (h : b ≠ 0) : a ≤ a * b :=
by { rw [mul_comm], exact le_mul_left h }
lemma mul_eq_left_iff {a b : cardinal} : a * b = a ↔ ((max ℵ₀ b ≤ a ∧ b ≠ 0) ∨ b = 1 ∨ a = 0) :=
begin
rw max_le_iff,
refine ⟨λ h, _, _⟩,
{ cases le_or_lt ℵ₀ a with ha ha,
{ have : a ≠ 0, { rintro rfl, exact ha.not_lt aleph_0_pos },
left, use ha,
{ rw ←not_lt, exact λ hb, ne_of_gt (hb.trans_le (le_mul_left this)) h },
{ rintro rfl, apply this, rw mul_zero at h, exact h.symm }},
right, by_cases h2a : a = 0, { exact or.inr h2a },
have hb : b ≠ 0, { rintro rfl, apply h2a, rw mul_zero at h, exact h.symm },
left, rw [←h, mul_lt_aleph_0_iff, lt_aleph_0, lt_aleph_0] at ha,
rcases ha with rfl|rfl|⟨⟨n, rfl⟩, ⟨m, rfl⟩⟩, contradiction, contradiction,
rw ←ne at h2a, rw ←one_le_iff_ne_zero at h2a hb, norm_cast at h2a hb h ⊢,
apply le_antisymm _ hb, rw ←not_lt,
apply λ h2b, ne_of_gt _ h, conv_lhs { rw ←mul_one n },
rwa mul_lt_mul_left, apply nat.lt_of_succ_le h2a },
{ rintro (⟨⟨ha, hab⟩, hb⟩|rfl|rfl),
{ rw [mul_eq_max_of_aleph_0_le_left ha hb, max_eq_left hab] },
all_goals { simp }}
end
/-! ### Properties of `add` -/
/-- If `α` is an infinite type, then `α ⊕ α` and `α` have the same cardinality. -/
theorem add_eq_self {c : cardinal} (h : ℵ₀ ≤ c) : c + c = c :=
le_antisymm
(by simpa only [nat.cast_bit0, nat.cast_one, mul_eq_self h, two_mul] using
mul_le_mul_right' ((nat_lt_aleph_0 2).le.trans h) c)
(self_le_add_left c c)
/-- If `α` is an infinite type, then the cardinality of `α ⊕ β` is the maximum
of the cardinalities of `α` and `β`. -/
theorem add_eq_max {a b : cardinal} (ha : ℵ₀ ≤ a) : a + b = max a b :=
le_antisymm
(add_eq_self (ha.trans (le_max_left a b)) ▸
add_le_add (le_max_left _ _) (le_max_right _ _)) $
max_le (self_le_add_right _ _) (self_le_add_left _ _)
theorem add_eq_max' {a b : cardinal} (ha : ℵ₀ ≤ b) : a + b = max a b :=
by rw [add_comm, max_comm, add_eq_max ha]
@[simp] theorem add_mk_eq_max {α β : Type*} [infinite α] : #α + #β = max (#α) (#β) :=
add_eq_max (aleph_0_le_mk α)
@[simp] theorem add_mk_eq_max' {α β : Type*} [infinite β] : #α + #β = max (#α) (#β) :=
add_eq_max' (aleph_0_le_mk β)
theorem add_le_max (a b : cardinal) : a + b ≤ max (max a b) ℵ₀ :=
begin
cases le_or_lt ℵ₀ a with ha ha,
{ rw [add_eq_max ha],
exact le_max_left _ _ },
{ cases le_or_lt ℵ₀ b with hb hb,
{ rw [add_comm, add_eq_max hb, max_comm],
exact le_max_left _ _ },
{ exact le_max_of_le_right (add_lt_aleph_0 ha hb).le } }
end
theorem add_le_of_le {a b c : cardinal} (hc : ℵ₀ ≤ c) (h1 : a ≤ c) (h2 : b ≤ c) : a + b ≤ c :=
(add_le_add h1 h2).trans $ le_of_eq $ add_eq_self hc
theorem add_lt_of_lt {a b c : cardinal} (hc : ℵ₀ ≤ c) (h1 : a < c) (h2 : b < c) : a + b < c :=
(add_le_add (le_max_left a b) (le_max_right a b)).trans_lt $
(lt_or_le (max a b) ℵ₀).elim
(λ h, (add_lt_aleph_0 h h).trans_le hc)
(λ h, by rw add_eq_self h; exact max_lt h1 h2)
lemma eq_of_add_eq_of_aleph_0_le {a b c : cardinal} (h : a + b = c) (ha : a < c) (hc : ℵ₀ ≤ c) :
b = c :=
begin
apply le_antisymm,
{ rw [← h], apply self_le_add_left },
rw[← not_lt], intro hb,
have : a + b < c := add_lt_of_lt hc ha hb,
simpa [h, lt_irrefl] using this
end
lemma add_eq_left {a b : cardinal} (ha : ℵ₀ ≤ a) (hb : b ≤ a) : a + b = a :=
by { rw [add_eq_max ha, max_eq_left hb] }
lemma add_eq_right {a b : cardinal} (hb : ℵ₀ ≤ b) (ha : a ≤ b) : a + b = b :=
by { rw [add_comm, add_eq_left hb ha] }
lemma add_eq_left_iff {a b : cardinal} : a + b = a ↔ (max ℵ₀ b ≤ a ∨ b = 0) :=
begin
rw max_le_iff,
refine ⟨λ h, _, _⟩,
{ cases (le_or_lt ℵ₀ a) with ha ha,
{ left, use ha, rw ←not_lt, apply λ hb, ne_of_gt _ h,
exact hb.trans_le (self_le_add_left b a) },
right, rw [←h, add_lt_aleph_0_iff, lt_aleph_0, lt_aleph_0] at ha,
rcases ha with ⟨⟨n, rfl⟩, ⟨m, rfl⟩⟩, norm_cast at h ⊢,
rw [←add_right_inj, h, add_zero] },
{ rintro (⟨h1, h2⟩|h3),
{ rw [add_eq_max h1, max_eq_left h2] },
{ rw [h3, add_zero] } }
end
lemma add_eq_right_iff {a b : cardinal} : a + b = b ↔ (max ℵ₀ a ≤ b ∨ a = 0) :=
by { rw [add_comm, add_eq_left_iff] }
lemma add_one_eq {a : cardinal} (ha : ℵ₀ ≤ a) : a + 1 = a :=
add_eq_left ha (one_le_aleph_0.trans ha)
@[simp] lemma mk_add_one_eq {α : Type*} [infinite α] : #α + 1 = #α :=
add_one_eq (aleph_0_le_mk α)
protected lemma eq_of_add_eq_add_left {a b c : cardinal} (h : a + b = a + c) (ha : a < ℵ₀) :
b = c :=
begin
cases le_or_lt ℵ₀ b with hb hb,
{ have : a < b := ha.trans_le hb,
rw [add_eq_right hb this.le, eq_comm] at h,
rw [eq_of_add_eq_of_aleph_0_le h this hb] },
{ have hc : c < ℵ₀,
{ rw ←not_le, intro hc,
apply lt_irrefl ℵ₀, apply (hc.trans (self_le_add_left _ a)).trans_lt,
rw ←h, apply add_lt_aleph_0 ha hb },
rw lt_aleph_0 at *,
rcases ha with ⟨n, rfl⟩, rcases hb with ⟨m, rfl⟩, rcases hc with ⟨k, rfl⟩,
norm_cast at h ⊢, apply add_left_cancel h }
end
protected lemma eq_of_add_eq_add_right {a b c : cardinal} (h : a + b = c + b) (hb : b < ℵ₀) :
a = c :=
by { rw [add_comm a b, add_comm c b] at h, exact cardinal.eq_of_add_eq_add_left h hb }
@[simp] theorem aleph_add_aleph (o₁ o₂ : ordinal) : aleph o₁ + aleph o₂ = aleph (max o₁ o₂) :=
by rw [cardinal.add_eq_max (aleph_0_le_aleph o₁), max_aleph_eq]
theorem principal_add_ord {c : cardinal} (hc : ℵ₀ ≤ c) : ordinal.principal (+) c.ord :=
λ a b ha hb, by { rw [lt_ord, ordinal.card_add] at *, exact add_lt_of_lt hc ha hb }
theorem principal_add_aleph (o : ordinal) : ordinal.principal (+) (aleph o).ord :=
principal_add_ord $ aleph_0_le_aleph o
/-! ### Properties about power -/
theorem pow_le {κ μ : cardinal.{u}} (H1 : ℵ₀ ≤ κ) (H2 : μ < ℵ₀) : κ ^ μ ≤ κ :=
let ⟨n, H3⟩ := lt_aleph_0.1 H2 in
H3.symm ▸ (quotient.induction_on κ (λ α H1, nat.rec_on n
(lt_of_lt_of_le (by { rw [nat.cast_zero, power_zero], exact one_lt_aleph_0 }) H1).le
(λ n ih, trans_rel_left _
(by { rw [nat.cast_succ, power_add, power_one], exact mul_le_mul_right' ih _ })
(mul_eq_self H1))) H1)
theorem pow_eq {κ μ : cardinal.{u}} (H1 : ℵ₀ ≤ κ) (H2 : 1 ≤ μ) (H3 : μ < ℵ₀) : κ ^ μ = κ :=
(pow_le H1 H3).antisymm $ self_le_power κ H2
lemma power_self_eq {c : cardinal} (h : ℵ₀ ≤ c) : c ^ c = 2 ^ c :=
begin
apply ((power_le_power_right $ (cantor c).le).trans _).antisymm,
{ convert power_le_power_right ((nat_lt_aleph_0 2).le.trans h), apply nat.cast_two.symm },
{ rw [←power_mul, mul_eq_self h] }
end
lemma prod_eq_two_power {ι : Type u} [infinite ι] {c : ι → cardinal.{v}} (h₁ : ∀ i, 2 ≤ c i)
(h₂ : ∀ i, lift.{u} (c i) ≤ lift.{v} (#ι)) :
prod c = 2 ^ lift.{v} (#ι) :=
begin
rw [← lift_id' (prod c), lift_prod, ← lift_two_power],
apply le_antisymm,
{ refine (prod_le_prod _ _ h₂).trans_eq _,
rw [prod_const, lift_lift, ← lift_power, power_self_eq (aleph_0_le_mk ι), lift_umax.{u v}] },
{ rw [← prod_const', lift_prod],
refine prod_le_prod _ _ (λ i, _),
rw [lift_two, ← lift_two.{u v}, lift_le],
exact h₁ i }
end
lemma power_eq_two_power {c₁ c₂ : cardinal} (h₁ : ℵ₀ ≤ c₁) (h₂ : 2 ≤ c₂) (h₂' : c₂ ≤ c₁) :
c₂ ^ c₁ = 2 ^ c₁ :=
le_antisymm (power_self_eq h₁ ▸ power_le_power_right h₂') (power_le_power_right h₂)
lemma nat_power_eq {c : cardinal.{u}} (h : ℵ₀ ≤ c) {n : ℕ} (hn : 2 ≤ n) :
(n : cardinal.{u}) ^ c = 2 ^ c :=
power_eq_two_power h (by assumption_mod_cast) ((nat_lt_aleph_0 n).le.trans h)
lemma power_nat_le {c : cardinal.{u}} {n : ℕ} (h : ℵ₀ ≤ c) : c ^ n ≤ c :=
pow_le h (nat_lt_aleph_0 n)
lemma power_nat_eq {c : cardinal.{u}} {n : ℕ} (h1 : ℵ₀ ≤ c) (h2 : 1 ≤ n) : c ^ n = c :=
pow_eq h1 (by exact_mod_cast h2) (nat_lt_aleph_0 n)
lemma power_nat_le_max {c : cardinal.{u}} {n : ℕ} : c ^ (n : cardinal.{u}) ≤ max c ℵ₀ :=
begin
cases le_or_lt ℵ₀ c with hc hc,
{ exact le_max_of_le_left (power_nat_le hc) },
{ exact le_max_of_le_right ((power_lt_aleph_0 hc (nat_lt_aleph_0 _)).le) }
end
lemma powerlt_aleph_0 {c : cardinal} (h : ℵ₀ ≤ c) : c ^< ℵ₀ = c :=
begin
apply le_antisymm,
{ rw powerlt_le, intro c', rw lt_aleph_0, rintro ⟨n, rfl⟩, apply power_nat_le h },
convert le_powerlt c one_lt_aleph_0, rw power_one
end
lemma powerlt_aleph_0_le (c : cardinal) : c ^< ℵ₀ ≤ max c ℵ₀ :=
begin
cases le_or_lt ℵ₀ c,
{ rw powerlt_aleph_0 h, apply le_max_left },
rw powerlt_le,
exact λ c' hc', (power_lt_aleph_0 h hc').le.trans (le_max_right _ _)
end
/-! ### Computing cardinality of various types -/
@[simp] theorem mk_list_eq_mk (α : Type u) [infinite α] : #(list α) = #α :=
have H1 : ℵ₀ ≤ #α := aleph_0_le_mk α,
eq.symm $ le_antisymm ⟨⟨λ x, [x], λ x y H, (list.cons.inj H).1⟩⟩ $
calc #(list α)
= sum (λ n : ℕ, #α ^ (n : cardinal.{u})) : mk_list_eq_sum_pow α
... ≤ sum (λ n : ℕ, #α) : sum_le_sum _ _ $ λ n, pow_le H1 $ nat_lt_aleph_0 n
... = #α : by simp [H1]
theorem mk_list_eq_aleph_0 (α : Type u) [countable α] [nonempty α] : #(list α) = ℵ₀ :=
mk_le_aleph_0.antisymm (aleph_0_le_mk _)
theorem mk_list_eq_max_mk_aleph_0 (α : Type u) [nonempty α] : #(list α) = max (#α) ℵ₀ :=
begin
casesI finite_or_infinite α,
{ rw [mk_list_eq_aleph_0, eq_comm, max_eq_right],
exact mk_le_aleph_0 },
{ rw [mk_list_eq_mk, eq_comm, max_eq_left],
exact aleph_0_le_mk α }
end
theorem mk_list_le_max (α : Type u) : #(list α) ≤ max ℵ₀ (#α) :=
begin
casesI finite_or_infinite α,
{ exact mk_le_aleph_0.trans (le_max_left _ _) },
{ rw mk_list_eq_mk,
apply le_max_right }
end
@[simp] theorem mk_finset_of_infinite (α : Type u) [infinite α] : #(finset α) = #α :=
eq.symm $ le_antisymm (mk_le_of_injective (λ x y, finset.singleton_inj.1)) $
calc #(finset α) ≤ #(list α) : mk_le_of_surjective list.to_finset_surjective
... = #α : mk_list_eq_mk α
@[simp] lemma mk_finsupp_lift_of_infinite (α : Type u) (β : Type v) [infinite α] [has_zero β]
[nontrivial β] : #(α →₀ β) = max (lift.{v} (#α)) (lift.{u} (#β)) :=
begin
apply le_antisymm,
{ calc #(α →₀ β) ≤ # (finset (α × β)) : mk_le_of_injective (finsupp.graph_injective α β)
... = #(α × β) : mk_finset_of_infinite _
... = max (lift.{v} (#α)) (lift.{u} (#β)) :
by rw [mk_prod, mul_eq_max_of_aleph_0_le_left]; simp },
{ apply max_le;
rw [←lift_id (# (α →₀ β)), ←lift_umax],
{ cases exists_ne (0 : β) with b hb,
exact lift_mk_le.{u (max u v) v}.2 ⟨⟨_, finsupp.single_left_injective hb⟩⟩ },
{ inhabit α,
exact lift_mk_le.{v (max u v) u}.2 ⟨⟨_, finsupp.single_injective default⟩⟩ } }
end
lemma mk_finsupp_of_infinite (α β : Type u) [infinite α] [has_zero β]
[nontrivial β] : #(α →₀ β) = max (#α) (#β) :=
by simp
@[simp] lemma mk_finsupp_lift_of_infinite' (α : Type u) (β : Type v) [nonempty α]
[has_zero β] [infinite β] : #(α →₀ β) = max (lift.{v} (#α)) (lift.{u} (#β)) :=
begin
casesI fintype_or_infinite α,
{ rw mk_finsupp_lift_of_fintype,
have : ℵ₀ ≤ (#β).lift := aleph_0_le_lift.2 (aleph_0_le_mk β),
rw [max_eq_right (le_trans _ this), power_nat_eq this],
exacts [fintype.card_pos, lift_le_aleph_0.2 (lt_aleph_0_of_finite _).le] },
{ apply mk_finsupp_lift_of_infinite },
end
lemma mk_finsupp_of_infinite' (α β : Type u) [nonempty α] [has_zero β] [infinite β] :
#(α →₀ β) = max (#α) (#β) := by simp
lemma mk_finsupp_nat (α : Type u) [nonempty α] : #(α →₀ ℕ) = max (#α) ℵ₀ := by simp
@[simp] lemma mk_multiset_of_nonempty (α : Type u) [nonempty α] : #(multiset α) = max (#α) ℵ₀ :=
multiset.to_finsupp.to_equiv.cardinal_eq.trans (mk_finsupp_nat α)
lemma mk_multiset_of_infinite (α : Type u) [infinite α] : #(multiset α) = #α := by simp
@[simp] lemma mk_multiset_of_is_empty (α : Type u) [is_empty α] : #(multiset α) = 1 :=
multiset.to_finsupp.to_equiv.cardinal_eq.trans (by simp)
lemma mk_multiset_of_countable (α : Type u) [countable α] [nonempty α] : #(multiset α) = ℵ₀ :=
multiset.to_finsupp.to_equiv.cardinal_eq.trans (by simp)
lemma mk_bounded_set_le_of_infinite (α : Type u) [infinite α] (c : cardinal) :
#{t : set α // #t ≤ c} ≤ #α ^ c :=
begin
refine le_trans _ (by rw [←add_one_eq (aleph_0_le_mk α)]),
induction c using cardinal.induction_on with β,
fapply mk_le_of_surjective,
{ intro f, use sum.inl ⁻¹' range f,
refine le_trans (mk_preimage_of_injective _ _ (λ x y, sum.inl.inj)) _,
apply mk_range_le },
rintro ⟨s, ⟨g⟩⟩,
use λ y, if h : ∃(x : s), g x = y then sum.inl (classical.some h).val else sum.inr ⟨⟩,
apply subtype.eq, ext,
split,
{ rintro ⟨y, h⟩, dsimp only at h, by_cases h' : ∃ (z : s), g z = y,
{ rw [dif_pos h'] at h, cases sum.inl.inj h, exact (classical.some h').2 },
{ rw [dif_neg h'] at h, cases h }},
{ intro h, have : ∃(z : s), g z = g ⟨x, h⟩, exact ⟨⟨x, h⟩, rfl⟩,
use g ⟨x, h⟩, dsimp only, rw [dif_pos this], congr',
suffices : classical.some this = ⟨x, h⟩, exact congr_arg subtype.val this,
apply g.2, exact classical.some_spec this }
end
lemma mk_bounded_set_le (α : Type u) (c : cardinal) :
#{t : set α // #t ≤ c} ≤ max (#α) ℵ₀ ^ c :=
begin
transitivity #{t : set (ulift.{u} ℕ ⊕ α) // #t ≤ c},
{ refine ⟨embedding.subtype_map _ _⟩, apply embedding.image,
use sum.inr, apply sum.inr.inj, intros s hs, exact mk_image_le.trans hs },
apply (mk_bounded_set_le_of_infinite (ulift.{u} ℕ ⊕ α) c).trans,
rw [max_comm, ←add_eq_max]; refl
end
lemma mk_bounded_subset_le {α : Type u} (s : set α) (c : cardinal.{u}) :
#{t : set α // t ⊆ s ∧ #t ≤ c} ≤ max (#s) ℵ₀ ^ c :=
begin
refine le_trans _ (mk_bounded_set_le s c),
refine ⟨embedding.cod_restrict _ _ _⟩,
use λ t, coe ⁻¹' t.1,
{ rintros ⟨t, ht1, ht2⟩ ⟨t', h1t', h2t'⟩ h, apply subtype.eq, dsimp only at h ⊢,
refine (preimage_eq_preimage' _ _).1 h; rw [subtype.range_coe]; assumption },
rintro ⟨t, h1t, h2t⟩, exact (mk_preimage_of_injective _ _ subtype.val_injective).trans h2t
end
/-! ### Properties of `compl` -/
lemma mk_compl_of_infinite {α : Type*} [infinite α] (s : set α) (h2 : #s < #α) :
#(sᶜ : set α) = #α :=
by { refine eq_of_add_eq_of_aleph_0_le _ h2 (aleph_0_le_mk α), exact mk_sum_compl s }
lemma mk_compl_finset_of_infinite {α : Type*} [infinite α] (s : finset α) :
#((↑s)ᶜ : set α) = #α :=
by { apply mk_compl_of_infinite, exact (finset_card_lt_aleph_0 s).trans_le (aleph_0_le_mk α) }
lemma mk_compl_eq_mk_compl_infinite {α : Type*} [infinite α] {s t : set α} (hs : #s < #α)
(ht : #t < #α) : #(sᶜ : set α) = #(tᶜ : set α) :=
by { rw [mk_compl_of_infinite s hs, mk_compl_of_infinite t ht] }
lemma mk_compl_eq_mk_compl_finite_lift {α : Type u} {β : Type v} [fintype α]
{s : set α} {t : set β} (h1 : lift.{max v w} (#α) = lift.{max u w} (#β))
(h2 : lift.{max v w} (#s) = lift.{max u w} (#t)) :
lift.{max v w} (#(sᶜ : set α)) = lift.{max u w} (#(tᶜ : set β)) :=
begin
rcases lift_mk_eq.1 h1 with ⟨e⟩, letI : fintype β := fintype.of_equiv α e,
replace h1 : fintype.card α = fintype.card β := (fintype.of_equiv_card _).symm,
classical,
lift s to finset α using s.to_finite,
lift t to finset β using t.to_finite,
simp only [finset.coe_sort_coe, mk_coe_finset, lift_nat_cast, nat.cast_inj] at h2,
simp only [← finset.coe_compl, finset.coe_sort_coe, mk_coe_finset, finset.card_compl,
lift_nat_cast, nat.cast_inj, h1, h2]
end
lemma mk_compl_eq_mk_compl_finite {α β : Type u} [fintype α] {s : set α} {t : set β}
(h1 : #α = #β) (h : #s = #t) : #(sᶜ : set α) = #(tᶜ : set β) :=
by { rw ← lift_inj, apply mk_compl_eq_mk_compl_finite_lift; rwa [lift_inj] }
lemma mk_compl_eq_mk_compl_finite_same {α : Type*} [fintype α] {s t : set α}
(h : #s = #t) : #(sᶜ : set α) = #(tᶜ : set α) :=
mk_compl_eq_mk_compl_finite rfl h
/-! ### Extending an injection to an equiv -/
theorem extend_function {α β : Type*} {s : set α} (f : s ↪ β)
(h : nonempty ((sᶜ : set α) ≃ ((range f)ᶜ : set β))) :
∃ (g : α ≃ β), ∀ x : s, g x = f x :=
begin
intros, have := h, cases this with g,
let h : α ≃ β := (set.sum_compl (s : set α)).symm.trans
((sum_congr (equiv.of_injective f f.2) g).trans
(set.sum_compl (range f))),
refine ⟨h, _⟩, rintro ⟨x, hx⟩, simp [set.sum_compl_symm_apply_of_mem, hx]
end
theorem extend_function_finite {α β : Type*} [fintype α] {s : set α} (f : s ↪ β)
(h : nonempty (α ≃ β)) : ∃ (g : α ≃ β), ∀ x : s, g x = f x :=
begin
apply extend_function f,
cases id h with g,
rw [← lift_mk_eq] at h,
rw [←lift_mk_eq, mk_compl_eq_mk_compl_finite_lift h],
rw [mk_range_eq_lift], exact f.2
end
theorem extend_function_of_lt {α β : Type*} {s : set α} (f : s ↪ β) (hs : #s < #α)
(h : nonempty (α ≃ β)) : ∃ (g : α ≃ β), ∀ x : s, g x = f x :=
begin
casesI fintype_or_infinite α,
{ exact extend_function_finite f h },
{ apply extend_function f, cases id h with g, haveI := infinite.of_injective _ g.injective,
rw [← lift_mk_eq'] at h ⊢,
rwa [mk_compl_of_infinite s hs, mk_compl_of_infinite],
rwa [← lift_lt, mk_range_eq_of_injective f.injective, ← h, lift_lt] },
end
section bit
/-!
This section proves inequalities for `bit0` and `bit1`, enabling `simp` to solve inequalities
for numeral cardinals. The complexity of the resulting algorithm is not good, as in some cases
`simp` reduces an inequality to a disjunction of two situations, depending on whether a cardinal
is finite or infinite. Since the evaluation of the branches is not lazy, this is bad. It is good
enough for practical situations, though.
For specific numbers, these inequalities could also be deduced from the corresponding
inequalities of natural numbers using `norm_cast`:
```
example : (37 : cardinal) < 42 :=
by { norm_cast, norm_num }
```
-/
lemma bit0_ne_zero (a : cardinal) : ¬bit0 a = 0 ↔ ¬a = 0 :=
by simp [bit0]
@[simp] lemma bit1_ne_zero (a : cardinal) : ¬bit1 a = 0 :=
by simp [bit1]
@[simp] lemma zero_lt_bit0 (a : cardinal) : 0 < bit0 a ↔ 0 < a :=
by { rw ←not_iff_not, simp [bit0], }
@[simp] lemma zero_lt_bit1 (a : cardinal) : 0 < bit1 a :=
zero_lt_one.trans_le (self_le_add_left _ _)
@[simp] lemma one_le_bit0 (a : cardinal) : 1 ≤ bit0 a ↔ 0 < a :=
⟨λ h, (zero_lt_bit0 a).mp (zero_lt_one.trans_le h),
λ h, (one_le_iff_pos.mpr h).trans (self_le_add_left a a)⟩
@[simp] lemma one_le_bit1 (a : cardinal) : 1 ≤ bit1 a :=
self_le_add_left _ _
theorem bit0_eq_self {c : cardinal} (h : ℵ₀ ≤ c) : bit0 c = c :=
add_eq_self h
@[simp] theorem bit0_lt_aleph_0 {c : cardinal} : bit0 c < ℵ₀ ↔ c < ℵ₀ :=
by simp [bit0, add_lt_aleph_0_iff]
@[simp] theorem aleph_0_le_bit0 {c : cardinal} : ℵ₀ ≤ bit0 c ↔ ℵ₀ ≤ c :=
by { rw ←not_iff_not, simp }
@[simp] theorem bit1_eq_self_iff {c : cardinal} : bit1 c = c ↔ ℵ₀ ≤ c :=
begin
by_cases h : ℵ₀ ≤ c,
{ simp only [bit1, bit0_eq_self h, h, eq_self_iff_true, add_one_of_aleph_0_le] },
{ refine iff_of_false (ne_of_gt _) h,
rcases lt_aleph_0.1 (not_le.1 h) with ⟨n, rfl⟩,
norm_cast,
dsimp [bit1, bit0],
linarith }
end
@[simp] theorem bit1_lt_aleph_0 {c : cardinal} : bit1 c < ℵ₀ ↔ c < ℵ₀ :=
by simp [bit1, bit0, add_lt_aleph_0_iff, one_lt_aleph_0]
@[simp] theorem aleph_0_le_bit1 {c : cardinal} : ℵ₀ ≤ bit1 c ↔ ℵ₀ ≤ c :=
by { rw ←not_iff_not, simp }
@[simp] lemma bit0_le_bit0 {a b : cardinal} : bit0 a ≤ bit0 b ↔ a ≤ b :=
begin
cases le_or_lt ℵ₀ a with ha ha; cases le_or_lt ℵ₀ b with hb hb,
{ rw [bit0_eq_self ha, bit0_eq_self hb] },
{ rw bit0_eq_self ha,
refine iff_of_false (λ h, _) (hb.trans_le ha).not_le,
have A : bit0 b < ℵ₀, by simpa using hb,
exact lt_irrefl _ ((A.trans_le ha).trans_le h) },
{ rw bit0_eq_self hb,
exact iff_of_true ((bit0_lt_aleph_0.2 ha).le.trans hb) (ha.le.trans hb) },
{ rcases lt_aleph_0.1 ha with ⟨m, rfl⟩,
rcases lt_aleph_0.1 hb with ⟨n, rfl⟩,
norm_cast,
exact bit0_le_bit0 }
end
@[simp] lemma bit0_le_bit1 {a b : cardinal} : bit0 a ≤ bit1 b ↔ a ≤ b :=
begin
cases le_or_lt ℵ₀ a with ha ha; cases le_or_lt ℵ₀ b with hb hb,
{ rw [bit0_eq_self ha, bit1_eq_self_iff.2 hb] },
{ rw bit0_eq_self ha,
refine iff_of_false (λ h, _) (hb.trans_le ha).not_le,
have A : bit1 b < ℵ₀, by simpa using hb,
exact lt_irrefl _ ((A.trans_le ha).trans_le h) },
{ rw bit1_eq_self_iff.2 hb,
exact iff_of_true ((bit0_lt_aleph_0.2 ha).le.trans hb) (ha.le.trans hb) },
{ rcases lt_aleph_0.1 ha with ⟨m, rfl⟩,
rcases lt_aleph_0.1 hb with ⟨n, rfl⟩,
norm_cast,
exact nat.bit0_le_bit1_iff }
end
@[simp] lemma bit1_le_bit1 {a b : cardinal} : bit1 a ≤ bit1 b ↔ a ≤ b :=
⟨λ h, bit0_le_bit1.1 ((self_le_add_right (bit0 a) 1).trans h), λ h,
(add_le_add_right (add_le_add_left h a) 1).trans (add_le_add_right (add_le_add_right h b) 1)⟩
@[simp] lemma bit1_le_bit0 {a b : cardinal} : bit1 a ≤ bit0 b ↔ (a < b ∨ (a ≤ b ∧ ℵ₀ ≤ a)) :=
begin
cases le_or_lt ℵ₀ a with ha ha; cases le_or_lt ℵ₀ b with hb hb,
{ simp only [bit1_eq_self_iff.mpr ha, bit0_eq_self hb, ha, and_true],
refine ⟨λ h, or.inr h, λ h, _⟩,
cases h,
{ exact le_of_lt h },
{ exact h } },
{ rw bit1_eq_self_iff.2 ha,
refine iff_of_false (λ h, _) (λ h, _),
{ have A : bit0 b < ℵ₀, by simpa using hb,
exact lt_irrefl _ ((A.trans_le ha).trans_le h) },
{ exact not_le_of_lt (hb.trans_le ha) (h.elim le_of_lt and.left) } },
{ rw bit0_eq_self hb,
exact iff_of_true ((bit1_lt_aleph_0.2 ha).le.trans hb) (or.inl $ ha.trans_le hb) },
{ rcases lt_aleph_0.1 ha with ⟨m, rfl⟩,
rcases lt_aleph_0.1 hb with ⟨n, rfl⟩,
norm_cast,
simp [not_le.mpr ha] }
end
@[simp] lemma bit0_lt_bit0 {a b : cardinal} : bit0 a < bit0 b ↔ a < b :=
begin
cases le_or_lt ℵ₀ a with ha ha; cases le_or_lt ℵ₀ b with hb hb,
{ rw [bit0_eq_self ha, bit0_eq_self hb] },
{ rw bit0_eq_self ha,
refine iff_of_false (λ h, _) (hb.le.trans ha).not_lt,
have A : bit0 b < ℵ₀, by simpa using hb,
exact lt_irrefl _ ((A.trans_le ha).trans h) },
{ rw bit0_eq_self hb,
exact iff_of_true ((bit0_lt_aleph_0.2 ha).trans_le hb) (ha.trans_le hb) },
{ rcases lt_aleph_0.1 ha with ⟨m, rfl⟩,
rcases lt_aleph_0.1 hb with ⟨n, rfl⟩,
norm_cast,
exact bit0_lt_bit0 }
end
@[simp] lemma bit1_lt_bit0 {a b : cardinal} : bit1 a < bit0 b ↔ a < b :=
begin
cases le_or_lt ℵ₀ a with ha ha; cases le_or_lt ℵ₀ b with hb hb,
{ rw [bit1_eq_self_iff.2 ha, bit0_eq_self hb] },
{ rw bit1_eq_self_iff.2 ha,
refine iff_of_false (λ h, _) (hb.le.trans ha).not_lt,
have A : bit0 b < ℵ₀, by simpa using hb,
exact lt_irrefl _ ((A.trans_le ha).trans h) },
{ rw bit0_eq_self hb,
exact iff_of_true ((bit1_lt_aleph_0.2 ha).trans_le hb) (ha.trans_le hb) },
{ rcases lt_aleph_0.1 ha with ⟨m, rfl⟩,
rcases lt_aleph_0.1 hb with ⟨n, rfl⟩,
norm_cast,
exact nat.bit1_lt_bit0_iff }
end
@[simp] lemma bit1_lt_bit1 {a b : cardinal} : bit1 a < bit1 b ↔ a < b :=
begin
cases le_or_lt ℵ₀ a with ha ha; cases le_or_lt ℵ₀ b with hb hb,
{ rw [bit1_eq_self_iff.2 ha, bit1_eq_self_iff.2 hb] },
{ rw bit1_eq_self_iff.2 ha,
refine iff_of_false (λ h, _) (hb.le.trans ha).not_lt,
have A : bit1 b < ℵ₀, by simpa using hb,
exact lt_irrefl _ ((A.trans_le ha).trans h) },
{ rw bit1_eq_self_iff.2 hb,
exact iff_of_true ((bit1_lt_aleph_0.2 ha).trans_le hb) (ha.trans_le hb) },
{ rcases lt_aleph_0.1 ha with ⟨m, rfl⟩,
rcases lt_aleph_0.1 hb with ⟨n, rfl⟩,
norm_cast,
exact bit1_lt_bit1 }
end
@[simp] lemma bit0_lt_bit1 {a b : cardinal} : bit0 a < bit1 b ↔ (a < b ∨ (a ≤ b ∧ a < ℵ₀)) :=
begin
cases le_or_lt ℵ₀ a with ha ha; cases le_or_lt ℵ₀ b with hb hb,
{ simp [bit0_eq_self ha, bit1_eq_self_iff.2 hb, not_lt.mpr ha] },
{ rw bit0_eq_self ha,
refine iff_of_false (λ h, _) (λ h, _),
{ have A : bit1 b < ℵ₀, by simpa using hb,
exact lt_irrefl _ ((A.trans_le ha).trans h) },
{ exact (hb.trans_le ha).not_le (h.elim le_of_lt and.left) } },
{ rw [bit1_eq_self_iff.2 hb],
exact iff_of_true ((bit0_lt_aleph_0.2 ha).trans_le hb) (or.inl $ ha.trans_le hb) },
{ rcases lt_aleph_0.1 ha with ⟨m, rfl⟩,
rcases lt_aleph_0.1 hb with ⟨n, rfl⟩,
norm_cast,
simp only [ha, and_true, nat.bit0_lt_bit1_iff, or_iff_right_of_imp le_of_lt] }
end
lemma one_lt_two : (1 : cardinal) < 2 :=
-- This strategy works generally to prove inequalities between numerals in `cardinality`.
by { norm_cast, norm_num }
@[simp] lemma one_lt_bit0 {a : cardinal} : 1 < bit0 a ↔ 0 < a :=
by simp [←bit1_zero]
@[simp] lemma one_lt_bit1 (a : cardinal) : 1 < bit1 a ↔ 0 < a :=
by simp [←bit1_zero]
end bit
end cardinal
|
708b74f9376f0d9f5f5d24208bf0b368b0a1aa5e
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/analysis/normed/group/infinite_sum.lean
|
79d5aa4ad15c57fcf7fb9798296599eda7464b0d
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 8,522
|
lean
|
/-
Copyright (c) 2021 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Heather Macbeth, Johannes Hölzl, Yury Kudryashov
-/
import analysis.normed.group.basic
import topology.instances.nnreal
/-!
# Infinite sums in (semi)normed groups
In a complete (semi)normed group,
- `summable_iff_vanishing_norm`: a series `∑' i, f i` is summable if and only if for any `ε > 0`,
there exists a finite set `s` such that the sum `∑ i in t, f i` over any finite set `t` disjoint
with `s` has norm less than `ε`;
- `summable_of_norm_bounded`, `summable_of_norm_bounded_eventually`: if `∥f i∥` is bounded above by
a summable series `∑' i, g i`, then `∑' i, f i` is summable as well; the same is true if the
inequality hold only off some finite set.
- `tsum_of_norm_bounded`, `has_sum.norm_le_of_bounded`: if `∥f i∥ ≤ g i`, where `∑' i, g i` is a
summable series, then `∥∑' i, f i∥ ≤ ∑' i, g i`.
## Tags
infinite series, absolute convergence, normed group
-/
open_locale classical big_operators topological_space nnreal
open finset filter metric
variables {ι α E F : Type*} [seminormed_add_comm_group E] [seminormed_add_comm_group F]
lemma cauchy_seq_finset_iff_vanishing_norm {f : ι → E} :
cauchy_seq (λ s : finset ι, ∑ i in s, f i) ↔
∀ε > (0 : ℝ), ∃s:finset ι, ∀t, disjoint t s → ∥ ∑ i in t, f i ∥ < ε :=
begin
rw [cauchy_seq_finset_iff_vanishing, nhds_basis_ball.forall_iff],
{ simp only [ball_zero_eq, set.mem_set_of_eq] },
{ rintros s t hst ⟨s', hs'⟩,
exact ⟨s', λ t' ht', hst $ hs' _ ht'⟩ }
end
lemma summable_iff_vanishing_norm [complete_space E] {f : ι → E} :
summable f ↔ ∀ε > (0 : ℝ), ∃s:finset ι, ∀t, disjoint t s → ∥ ∑ i in t, f i ∥ < ε :=
by rw [summable_iff_cauchy_seq_finset, cauchy_seq_finset_iff_vanishing_norm]
lemma cauchy_seq_finset_of_norm_bounded_eventually {f : ι → E} {g : ι → ℝ} (hg : summable g)
(h : ∀ᶠ i in cofinite, ∥f i∥ ≤ g i) : cauchy_seq (λ s, ∑ i in s, f i) :=
begin
refine cauchy_seq_finset_iff_vanishing_norm.2 (λ ε hε, _),
rcases summable_iff_vanishing_norm.1 hg ε hε with ⟨s, hs⟩,
refine ⟨s ∪ h.to_finset, λ t ht, _⟩,
have : ∀ i ∈ t, ∥f i∥ ≤ g i,
{ intros i hi,
simp only [disjoint_left, mem_union, not_or_distrib, h.mem_to_finset, set.mem_compl_iff,
not_not] at ht,
exact (ht hi).2 },
calc ∥∑ i in t, f i∥ ≤ ∑ i in t, g i : norm_sum_le_of_le _ this
... ≤ ∥∑ i in t, g i∥ : le_abs_self _
... < ε : hs _ (ht.mono_right le_sup_left),
end
lemma cauchy_seq_finset_of_norm_bounded {f : ι → E} (g : ι → ℝ) (hg : summable g)
(h : ∀i, ∥f i∥ ≤ g i) : cauchy_seq (λ s : finset ι, ∑ i in s, f i) :=
cauchy_seq_finset_of_norm_bounded_eventually hg $ eventually_of_forall h
/-- A version of the **direct comparison test** for conditionally convergent series.
See `cauchy_seq_finset_of_norm_bounded` for the same statement about absolutely convergent ones. -/
lemma cauchy_seq_range_of_norm_bounded {f : ℕ → E} (g : ℕ → ℝ)
(hg : cauchy_seq (λ n, ∑ i in range n, g i)) (hf : ∀ i, ∥f i∥ ≤ g i) :
cauchy_seq (λ n, ∑ i in range n, f i) :=
begin
refine metric.cauchy_seq_iff'.2 (λ ε hε, _),
refine (metric.cauchy_seq_iff'.1 hg ε hε).imp (λ N hg n hn, _),
specialize hg n hn,
rw [dist_eq_norm, ←sum_Ico_eq_sub _ hn] at ⊢ hg,
calc ∥∑ k in Ico N n, f k∥
≤ ∑ k in _, ∥f k∥ : norm_sum_le _ _
... ≤ ∑ k in _, g k : sum_le_sum (λ x _, hf x)
... ≤ ∥∑ k in _, g k∥ : le_abs_self _
... < ε : hg
end
lemma cauchy_seq_finset_of_summable_norm {f : ι → E} (hf : summable (λa, ∥f a∥)) :
cauchy_seq (λ s : finset ι, ∑ a in s, f a) :=
cauchy_seq_finset_of_norm_bounded _ hf (assume i, le_rfl)
/-- If a function `f` is summable in norm, and along some sequence of finsets exhausting the space
its sum is converging to a limit `a`, then this holds along all finsets, i.e., `f` is summable
with sum `a`. -/
lemma has_sum_of_subseq_of_summable {f : ι → E} (hf : summable (λa, ∥f a∥))
{s : α → finset ι} {p : filter α} [ne_bot p]
(hs : tendsto s p at_top) {a : E} (ha : tendsto (λ b, ∑ i in s b, f i) p (𝓝 a)) :
has_sum f a :=
tendsto_nhds_of_cauchy_seq_of_subseq (cauchy_seq_finset_of_summable_norm hf) hs ha
lemma has_sum_iff_tendsto_nat_of_summable_norm {f : ℕ → E} {a : E} (hf : summable (λi, ∥f i∥)) :
has_sum f a ↔ tendsto (λn:ℕ, ∑ i in range n, f i) at_top (𝓝 a) :=
⟨λ h, h.tendsto_sum_nat,
λ h, has_sum_of_subseq_of_summable hf tendsto_finset_range h⟩
/-- The direct comparison test for series: if the norm of `f` is bounded by a real function `g`
which is summable, then `f` is summable. -/
lemma summable_of_norm_bounded
[complete_space E] {f : ι → E} (g : ι → ℝ) (hg : summable g) (h : ∀i, ∥f i∥ ≤ g i) :
summable f :=
by { rw summable_iff_cauchy_seq_finset, exact cauchy_seq_finset_of_norm_bounded g hg h }
lemma has_sum.norm_le_of_bounded {f : ι → E} {g : ι → ℝ} {a : E} {b : ℝ}
(hf : has_sum f a) (hg : has_sum g b) (h : ∀ i, ∥f i∥ ≤ g i) :
∥a∥ ≤ b :=
le_of_tendsto_of_tendsto' hf.norm hg $ λ s, norm_sum_le_of_le _ $ λ i hi, h i
/-- Quantitative result associated to the direct comparison test for series: If `∑' i, g i` is
summable, and for all `i`, `∥f i∥ ≤ g i`, then `∥∑' i, f i∥ ≤ ∑' i, g i`. Note that we do not
assume that `∑' i, f i` is summable, and it might not be the case if `α` is not a complete space. -/
lemma tsum_of_norm_bounded {f : ι → E} {g : ι → ℝ} {a : ℝ} (hg : has_sum g a)
(h : ∀ i, ∥f i∥ ≤ g i) :
∥∑' i : ι, f i∥ ≤ a :=
begin
by_cases hf : summable f,
{ exact hf.has_sum.norm_le_of_bounded hg h },
{ rw [tsum_eq_zero_of_not_summable hf, norm_zero],
exact ge_of_tendsto' hg (λ s, sum_nonneg $ λ i hi, (norm_nonneg _).trans (h i)) }
end
/-- If `∑' i, ∥f i∥` is summable, then `∥∑' i, f i∥ ≤ (∑' i, ∥f i∥)`. Note that we do not assume
that `∑' i, f i` is summable, and it might not be the case if `α` is not a complete space. -/
lemma norm_tsum_le_tsum_norm {f : ι → E} (hf : summable (λi, ∥f i∥)) :
∥∑' i, f i∥ ≤ ∑' i, ∥f i∥ :=
tsum_of_norm_bounded hf.has_sum $ λ i, le_rfl
/-- Quantitative result associated to the direct comparison test for series: If `∑' i, g i` is
summable, and for all `i`, `∥f i∥₊ ≤ g i`, then `∥∑' i, f i∥₊ ≤ ∑' i, g i`. Note that we
do not assume that `∑' i, f i` is summable, and it might not be the case if `α` is not a complete
space. -/
lemma tsum_of_nnnorm_bounded {f : ι → E} {g : ι → ℝ≥0} {a : ℝ≥0} (hg : has_sum g a)
(h : ∀ i, ∥f i∥₊ ≤ g i) :
∥∑' i : ι, f i∥₊ ≤ a :=
begin
simp only [← nnreal.coe_le_coe, ← nnreal.has_sum_coe, coe_nnnorm] at *,
exact tsum_of_norm_bounded hg h
end
/-- If `∑' i, ∥f i∥₊` is summable, then `∥∑' i, f i∥₊ ≤ ∑' i, ∥f i∥₊`. Note that
we do not assume that `∑' i, f i` is summable, and it might not be the case if `α` is not a complete
space. -/
lemma nnnorm_tsum_le {f : ι → E} (hf : summable (λi, ∥f i∥₊)) :
∥∑' i, f i∥₊ ≤ ∑' i, ∥f i∥₊ :=
tsum_of_nnnorm_bounded hf.has_sum (λ i, le_rfl)
variable [complete_space E]
/-- Variant of the direct comparison test for series: if the norm of `f` is eventually bounded by a
real function `g` which is summable, then `f` is summable. -/
lemma summable_of_norm_bounded_eventually {f : ι → E} (g : ι → ℝ) (hg : summable g)
(h : ∀ᶠ i in cofinite, ∥f i∥ ≤ g i) : summable f :=
summable_iff_cauchy_seq_finset.2 $ cauchy_seq_finset_of_norm_bounded_eventually hg h
lemma summable_of_nnnorm_bounded {f : ι → E} (g : ι → ℝ≥0) (hg : summable g)
(h : ∀i, ∥f i∥₊ ≤ g i) : summable f :=
summable_of_norm_bounded (λ i, (g i : ℝ)) (nnreal.summable_coe.2 hg) (λ i, by exact_mod_cast h i)
lemma summable_of_summable_norm {f : ι → E} (hf : summable (λa, ∥f a∥)) : summable f :=
summable_of_norm_bounded _ hf (assume i, le_rfl)
lemma summable_of_summable_nnnorm {f : ι → E} (hf : summable (λ a, ∥f a∥₊)) : summable f :=
summable_of_nnnorm_bounded _ hf (assume i, le_rfl)
|
3ab961c3cbad05d887828171c91c2c740966cfdb
|
3fed20a6f59e2663e48ee3bfc33dbc79256b4a50
|
/src/test.lean
|
99948834abedc93555bf117cc4fc612f817d252e
|
[] |
no_license
|
arademaker/alc-lean
|
575203dae75f466cc686831d8b0d230fc3c00ced
|
46d5b582d4272493a26d6d47c0bfa0622c52aae4
|
refs/heads/master
| 1,622,696,540,378
| 1,618,442,995,000
| 1,618,442,995,000
| 37,130,780
| 4
| 3
| null | 1,618,365,416,000
| 1,433,853,111,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,992
|
lean
|
import alc
namespace test
open ALC
open ALC.Concept
inductive ac : Type
| man : ac
| woman : ac
inductive ar : Type
| hasChild : ar
@[reducible]
def ic : VarConcept → set ℕ
| 1 := ({2,4} : set ℕ)
| 2 := ({1,3} : set ℕ)
@[reducible]
def ir : VarRole → set (ℕ × ℕ)
| _ := ({(1,2),(4,3)} : set (ℕ × ℕ))
@[reducible]
def i := Interpretation.mk ℕ ic ir
-- below, the concept is not reduced to {2,4} but to a equivalent λ-term.
#check C#1
#check Concept.Atomic 1
#reduce interp i (C#1)
#reduce interp i (Some (R#1) (C#1))
-- ∀ hasChild ∃ hasChild man
-- Ax hasChild Ex hasChild man
#reduce Ax (R#2) : (Ex (R#1) : (C#1))
-- Ax hasChild Ex hasChild man equiv Ex hasChild Ax hasChild ¬ man
#reduce interp i (Ex (R#1) : ¬ₐ (C#1))
#reduce interp i ¬ₐ (Ax (R#1) : (C#2))
-- list of labels [∀ hasChild,∃ hasChild]
#reduce list.head [Label.Forall (Role.Atomic ar.hasChild),Label.Exist (Role.Atomic ar.hasChild)]
#check R#1
#reduce ir 1
-- instead of 'compute' concepts, let us proof things about the interpretation
example :
interp i (Some (R#1) (C#1)) = ({1} : set ℕ) :=
begin
dsimp [interp,r_interp,i],
rw [ic,ir],
ext n,
apply iff.intro,
{ intro h1,
simp at *,
apply (exists.elim h1),
simp, intros a ha hb,
finish,
},
{ intros h1,
norm_num at h1,
rw h1,
apply exists.intro 2,
finish,
}
end
-- Mario's proof
example :
interp i (Every (R#1) (C#1)) = ({4}ᶜ : set ℕ) :=
begin
ext n,
simp [interp, r_interp, ir, ic],
split,
{ rintro h rfl,
have := h 3, revert this,
norm_num,
},
{ rintro h _ (⟨rfl, rfl⟩|⟨rfl, rfl⟩), {norm_num},
cases h rfl },
end
-- if i, ic and ir were not 'reducible'
example :
interp i (Every (R#1) (C#1)) = ({4}ᶜ : set ℕ) :=
begin
dsimp [i, interp],
ext n,
simp [r_interp, ir],
split,
{ rintro H rfl,
have := H 3, revert this, rw ic at *,
norm_num },
{ rintro h _ (⟨rfl, rfl⟩|⟨rfl, rfl⟩), rw ic at *, {norm_num},
cases h rfl },
end
-- a detailed proof
example :
interp i (Every (R#1) (C#1)) = ({4}ᶜ : set ℕ) :=
begin
ext n,
simp [interp, r_interp, ir, ic],
split,
{ rintro h rfl,
have := h 3, revert this,
norm_num,
},
{ intros h1 a h2,
cases h2 with h2a h2b,
exact or.inl h2a.2,
exfalso, exact h1 h2b.left,
},
end
-- detailed proofs for the steps closed with 'finish' above.
example (h : 1 = 4) : false :=
begin
-- if succ 0 = succ 3 then 0 = 3 because succ is injective
have h1 := (nat.succ_injective h),
apply nat.succ_ne_zero _ h1.symm,
end
example (n a : ℕ) (ha : n = 1 ∧ a = 2 ∨ n = 4 ∧ a = 3)
(hb : a = 2 ∨ a = 4) : n = 1 :=
begin
by_contradiction,
cases hb with hb1 hb2,
cases ha with ha1 ha2,
exact h ha1.1,
have hx := and.intro ha2.2 hb1,
finish,
finish,
end
example (α : Type*) (p q : α → Prop) : (∀ x : α, p x ∧ q x) → ∀ y : α, p y :=
begin
intros h x,
exact (h x).1,
end
end test
|
202a48b105aa25e58e5fa822c6d1e13daa83a7cf
|
9d2e3d5a2e2342a283affd97eead310c3b528a24
|
/src/hints/thursday/afternoon/category_theory/exercise3/hint7.lean
|
8af891402cbb9914355657e743e46fda97e77058
|
[] |
permissive
|
Vtec234/lftcm2020
|
ad2610ab614beefe44acc5622bb4a7fff9a5ea46
|
bbbd4c8162f8c2ef602300ab8fdeca231886375d
|
refs/heads/master
| 1,668,808,098,623
| 1,594,989,081,000
| 1,594,990,079,000
| 280,423,039
| 0
| 0
|
MIT
| 1,594,990,209,000
| 1,594,990,209,000
| null |
UTF-8
|
Lean
| false
| false
| 781
|
lean
|
import for_mathlib.category_theory -- This imports some simp lemmas that I realised belong in mathlib while writing this exercise.
open category_theory
variables {C : Type*} [category C]
variables {D : Type*} [category D]
lemma equiv_preserves_mono {X Y : C} (f : X ⟶ Y) [mono f] (e : C ≌ D) :
mono (e.functor.map f) :=
begin
tidy,
replace w := congr_arg (λ k, e.inverse.map k) w,
simp at w,
-- We can see that `w` is now in a position that we can use the hyothesis `mono f`.
-- However there's a problem, which is that `A ≫ B ≫ C` is implicitly right-associated,
-- so we can't directly use `rw cancel_mono f at w`.
-- We first need to shuffle the parentheses around using associativity:
rw [←category.assoc] at w, -- and maybe a second time?
end
|
58c4a0a97a949843fac305d86c67633ada284c32
|
130c49f47783503e462c16b2eff31933442be6ff
|
/tests/lean/run/trans.lean
|
90913f80fb7336cf747a87b31ac3fb4c02613eba
|
[
"Apache-2.0"
] |
permissive
|
Hazel-Brown/lean4
|
8aa5860e282435ffc30dcdfccd34006c59d1d39c
|
79e6732fc6bbf5af831b76f310f9c488d44e7a16
|
refs/heads/master
| 1,689,218,208,951
| 1,629,736,869,000
| 1,629,736,896,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 597
|
lean
|
class Trans (r : α → β → Prop) (s : β → γ → Prop) (t : outParam (α → γ → Prop)) where
trans : r a b → s b c → t a c
export Trans (trans)
instance : Trans (α := Nat) (β := Nat) (γ := Nat) (.≤.) (.≤.) (.≤.) where
trans := Nat.le_trans
instance : Trans (α := Int) (β := Int) (γ := Int) (.≤.) (.≤.) (.≤.) where
trans := sorry
theorem ex1 {a b c d : Nat} (h1 : a ≤ b) (h2 : b ≤ c) (h3 : c ≤ d) : a ≤ d :=
trans h1 <| trans h2 h3
theorem ex2 {a b c d : Int} (h1 : a ≤ b) (h2 : b ≤ c) (h3 : c ≤ d) : a ≤ d :=
trans h1 <| trans h2 h3
|
273cfa611c5d3aeecf55bf6d450520383e8d6c14
|
ce89339993655da64b6ccb555c837ce6c10f9ef4
|
/bluejam/topprover/31.lean
|
748c6d62abc4eebe7b160f4377ef36a3e8cf406a
|
[] |
no_license
|
zeptometer/LearnLean
|
ef32dc36a22119f18d843f548d0bb42f907bff5d
|
bb84d5dbe521127ba134d4dbf9559b294a80b9f7
|
refs/heads/master
| 1,625,710,824,322
| 1,601,382,570,000
| 1,601,382,570,000
| 195,228,870
| 2
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,915
|
lean
|
def binom: ℕ → ℕ → ℕ
| _ 0 := 1
| 0 _ := 0
| (m + 1) (n + 1) := binom m (n + 1) + binom m n
def binom_sum: ℕ → ℕ → ℕ
| n 0 := binom n 0
| n (k + 1) := binom_sum n k + binom n (k + 1)
lemma is_zero: ∀ k, binom 0 (k + 1) = 0 :=
begin
intros,
induction k,
simp [binom],
assumption
end
lemma binom_sum_zero: ∀ (n : ℕ), binom_sum 0 n = 1 :=
begin
intros,
induction n,
simp [binom_sum, binom],
simp [binom_sum, binom],
rw [n_ih, is_zero]
end
lemma binom_exceeds: ∀ (n : ℕ), ∀ (k : ℕ), binom n (n + 1 + k) = 0 :=
begin
intro n,
induction n,
simp [is_zero],
intros,
rw [←nat.add_one],
calc binom (n_n + 1) (n_n + 1 + 1 + k)
= binom (n_n + 1) ((n_n + 1 + k) + 1)
: by simp [nat.add_assoc, nat.add_comm]
... = binom n_n (n_n + 1 + k + 1) + binom n_n (n_n + 1 + k)
: by rw [binom]
... = binom n_n (n_n + 1 + k + 1) + 0 : by rw [n_ih]
... = binom n_n (n_n + 1 + (k + 1))
: by simp [nat.add_zero, nat.add_assoc]
... = 0 : by rw [n_ih]
end
lemma binom_sum_continue: ∀ n, binom_sum n (n + 1) = binom_sum n n :=
begin
intros,
simp [binom_sum, binom_exceeds n 0]
end
lemma binom_sum_split: ∀ (n: ℕ), ∀ (k: ℕ), binom_sum (n + 1) (k + 1)
= binom_sum n (k + 1) + binom_sum n k :=
begin
intro n, induction n,
intro k, induction k,
simp [binom, binom_sum],
rw [binom_sum_zero] at *,
simp * at *,
assumption,
intro k, induction k,
simp [binom_sum, binom],
calc binom_sum (nat.succ n_n + 1) (nat.succ k_n + 1)
= binom_sum (nat.succ n_n + 1) (nat.succ k_n)
+ binom (nat.succ n_n + 1) (nat.succ k_n + 1)
: by rw binom_sum
... = binom_sum (nat.succ n_n) (k_n + 1) + binom_sum (nat.succ n_n) k_n
+ binom (nat.succ n_n + 1) (nat.succ k_n + 1)
: by rw k_ih
... = (binom_sum (nat.succ n_n) (k_n + 1) + binom_sum (nat.succ n_n) k_n)
+ binom (nat.succ n_n) (nat.succ k_n + 1) + binom (nat.succ n_n) (nat.succ k_n)
: by simp [binom]
... = (binom_sum (nat.succ n_n) (k_n + 1) + binom (nat.succ n_n) (nat.succ k_n + 1))
+ (binom_sum (nat.succ n_n) k_n) + binom (nat.succ n_n) (nat.succ k_n)
: by simp [nat.add_assoc, nat.add_comm]
... = binom_sum (nat.succ n_n) (nat.succ k_n + 1)
+ binom_sum (nat.succ n_n) (nat.succ k_n)
: by simp [binom_sum]
end
example: ∀ (n: ℕ), binom_sum n n = 2^n :=
begin
intros,
induction n,
simp [binom_sum, binom],
simp [binom_sum_split],
simp [binom_sum_continue],
simp *,
calc 2 ^ n_n + 2 ^ n_n
= 1 * 2 ^ n_n + 2 ^ n_n : by rw nat.one_mul
... = 2 * 2 ^ n_n : by rw ←nat.succ_mul
... = 2 ^ n_n * 2 : by rw ←mul_comm
... = 2 ^ nat.succ n_n : by rw nat.pow_succ
end
|
50990385dc46fa6a9f2035b27d7580b6b50f2e48
|
6b10c15e653d49d146378acda9f3692e9b5b1950
|
/examples/logic/unnamed_14.lean
|
3f26dcb9b9839b5ffdf7cf8c97bead35094de150
|
[] |
no_license
|
gebner/mathematics_in_lean
|
3cf7f18767208ea6c3307ec3a67c7ac266d8514d
|
6d1462bba46d66a9b948fc1aef2714fd265cde0b
|
refs/heads/master
| 1,655,301,945,565
| 1,588,697,505,000
| 1,588,697,505,000
| 261,523,603
| 0
| 0
| null | 1,588,695,611,000
| 1,588,695,610,000
| null |
UTF-8
|
Lean
| false
| false
| 314
|
lean
|
variables A B : Prop
variable α : Type*
variable P : α → Prop
-- if A then B
#check A → B
-- A and B
#check A ∧ B
-- A or B
#check A ∨ B
-- not A
#check ¬ A
-- A if and only if B
#check A ↔ B
#check true
#check false
-- for every x, P x
#check ∀ x, P x
-- for some x, P x
#check ∃ x, P x
|
4027e8e5fb78eb9819993b2be98ac6b67aaf3337
|
1b8f093752ba748c5ca0083afef2959aaa7dace5
|
/src/category_theory/universal/zero.lean
|
cae7d23acb7b60d7b74295a2eb25207af06814aa
|
[] |
no_license
|
khoek/lean-category-theory
|
7ec4cda9cc64a5a4ffeb84712ac7d020dbbba386
|
63dcb598e9270a3e8b56d1769eb4f825a177cd95
|
refs/heads/master
| 1,585,251,725,759
| 1,539,344,445,000
| 1,539,344,445,000
| 145,281,070
| 0
| 0
| null | 1,534,662,376,000
| 1,534,662,376,000
| null |
UTF-8
|
Lean
| false
| false
| 2,208
|
lean
|
import category_theory.limits.terminal
open category_theory
universes u v
namespace category_theory.limits
variables {C : Type u} [𝒞 : category.{u v} C]
include 𝒞
structure is_zero (t : C) :=
(lift : ∀ (s : C), s ⟶ t)
(uniq_lift' : ∀ (s : C) (m : s ⟶ t), m = lift s . obviously)
(desc : ∀ (s : C), t ⟶ s)
(uniq_desc' : ∀ (s : C) (m : t ⟶ s), m = desc s . obviously)
namespace is_zero
def to_is_initial {t : C} (Z : is_zero.{u v} t) : is_initial.{u v} t := { desc := Z.desc, uniq' := Z.uniq_desc' }
def to_is_terminal {t : C} (Z : is_zero.{u v} t) : is_terminal.{u v} t := { lift := Z.lift, uniq' := Z.uniq_lift' }
end is_zero
restate_axiom is_zero.uniq_lift'
restate_axiom is_zero.uniq_desc'
attribute [search,back'] is_zero.uniq_lift is_zero.uniq_desc
@[extensionality] lemma is_zero.ext {X : C} (P Q : is_zero.{u v} X) : P = Q :=
begin tactic.unfreeze_local_instances, cases P, cases Q, congr, obviously, end
instance hom_to_zero_subsingleton (X Z : C) (B : is_zero.{u v} Z) : subsingleton (X ⟶ Z) :=
limits.hom_to_terminal_subsingleton X Z B.to_is_terminal
instance hom_from_zero_subsingleton (Z X : C) (B : is_zero.{u v} Z) : subsingleton (Z ⟶ X) :=
limits.hom_from_initial_subsingleton Z X B.to_is_initial
variable (C)
class has_zero_object :=
(zero : C)
(is_zero : is_zero.{u v} zero)
end category_theory.limits
namespace category_theory.limits
def zero_object := has_zero_object.zero.{u v}
variables {C : Type u} [𝒞 : category.{u v} C]
include 𝒞
variables [has_zero_object.{u v} C]
def zero_is_zero : is_zero.{u v} (zero_object.{u v} C) := has_zero_object.is_zero C
def zero_morphism (X Y : C) : X ⟶ Y := (zero_is_zero.lift.{u v} X) ≫ (zero_is_zero.desc.{u v} Y)
instance hom_has_zero (X Y : C) : _root_.has_zero (X ⟶ Y) := { zero := zero_morphism X Y }
@[simp] lemma zero_morphism_left {X Y Z : C} (f : Y ⟶ Z) : (zero_morphism X Y) ≫ f = zero_morphism X Z :=
begin
unfold zero_morphism,
rw category.assoc,
sorry,
end
@[simp] lemma zero_morphism_right {X Y Z : C} (f : X ⟶ Y) : f ≫ (zero_morphism Y Z) = zero_morphism X Z :=
begin
unfold zero_morphism,
rw ← category.assoc,
sorry,
end
end category_theory.limits
|
b6e69ef721c2abb8abacd3c640c49c9fac413698
|
7cef822f3b952965621309e88eadf618da0c8ae9
|
/src/data/real/cau_seq_completion.lean
|
6356d1d92246ec0c17f2c78aec387a8f35374122
|
[
"Apache-2.0"
] |
permissive
|
rmitta/mathlib
|
8d90aee30b4db2b013e01f62c33f297d7e64a43d
|
883d974b608845bad30ae19e27e33c285200bf84
|
refs/heads/master
| 1,585,776,832,544
| 1,576,874,096,000
| 1,576,874,096,000
| 153,663,165
| 0
| 2
|
Apache-2.0
| 1,544,806,490,000
| 1,539,884,365,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 9,474
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Robert Y. Lewis
Generalizes the Cauchy completion of (ℚ, abs) to the completion of a
commutative ring with absolute value.
-/
import data.real.cau_seq
namespace cau_seq.completion
open cau_seq
section
parameters {α : Type*} [discrete_linear_ordered_field α]
parameters {β : Type*} [comm_ring β] {abv : β → α} [is_absolute_value abv]
def Cauchy := @quotient (cau_seq _ abv) cau_seq.equiv
def mk : cau_seq _ abv → Cauchy := quotient.mk
@[simp] theorem mk_eq_mk (f) : @eq Cauchy ⟦f⟧ (mk f) := rfl
theorem mk_eq {f g} : mk f = mk g ↔ f ≈ g := quotient.eq
def of_rat (x : β) : Cauchy := mk (const abv x)
instance : has_zero Cauchy := ⟨of_rat 0⟩
instance : has_one Cauchy := ⟨of_rat 1⟩
instance : inhabited Cauchy := ⟨0⟩
theorem of_rat_zero : of_rat 0 = 0 := rfl
theorem of_rat_one : of_rat 1 = 1 := rfl
@[simp] theorem mk_eq_zero {f} : mk f = 0 ↔ lim_zero f :=
by have : mk f = 0 ↔ lim_zero (f - 0) := quotient.eq;
rwa sub_zero at this
instance : has_add Cauchy :=
⟨λ x y, quotient.lift_on₂ x y (λ f g, mk (f + g)) $
λ f₁ g₁ f₂ g₂ hf hg, quotient.sound $
by simpa [(≈), setoid.r] using add_lim_zero hf hg⟩
@[simp] theorem mk_add (f g : cau_seq β abv) : mk f + mk g = mk (f + g) := rfl
instance : has_neg Cauchy :=
⟨λ x, quotient.lift_on x (λ f, mk (-f)) $
λ f₁ f₂ hf, quotient.sound $
by simpa [(≈), setoid.r] using neg_lim_zero hf⟩
@[simp] theorem mk_neg (f : cau_seq β abv) : -mk f = mk (-f) := rfl
instance : has_mul Cauchy :=
⟨λ x y, quotient.lift_on₂ x y (λ f g, mk (f * g)) $
λ f₁ g₁ f₂ g₂ hf hg, quotient.sound $
by simpa [(≈), setoid.r, mul_add, mul_comm] using
add_lim_zero (mul_lim_zero_right g₁ hf) (mul_lim_zero_right f₂ hg)⟩
@[simp] theorem mk_mul (f g : cau_seq β abv) : mk f * mk g = mk (f * g) := rfl
theorem of_rat_add (x y : β) : of_rat (x + y) = of_rat x + of_rat y :=
congr_arg mk (const_add _ _)
theorem of_rat_neg (x : β) : of_rat (-x) = -of_rat x :=
congr_arg mk (const_neg _)
theorem of_rat_mul (x y : β) : of_rat (x * y) = of_rat x * of_rat y :=
congr_arg mk (const_mul _ _)
private lemma zero_def : 0 = mk 0 := rfl
private lemma one_def : 1 = mk 1 := rfl
instance : comm_ring Cauchy :=
by refine { neg := has_neg.neg,
add := (+), zero := 0, mul := (*), one := 1, .. };
{ repeat {refine λ a, quotient.induction_on a (λ _, _)},
simp [zero_def, one_def, mul_left_comm, mul_comm, mul_add] }
theorem of_rat_sub (x y : β) : of_rat (x - y) = of_rat x - of_rat y :=
congr_arg mk (const_sub _ _)
end
open_locale classical
section
parameters {α : Type*} [discrete_linear_ordered_field α]
parameters {β : Type*} [discrete_field β] {abv : β → α} [is_absolute_value abv]
local notation `Cauchy` := @Cauchy _ _ _ _ abv _
noncomputable instance : has_inv Cauchy :=
⟨λ x, quotient.lift_on x
(λ f, mk $ if h : lim_zero f then 0 else inv f h) $
λ f g fg, begin
have := lim_zero_congr fg,
by_cases hf : lim_zero f,
{ simp [hf, this.1 hf, setoid.refl] },
{ have hg := mt this.2 hf, simp [hf, hg],
have If : mk (inv f hf) * mk f = 1 := mk_eq.2 (inv_mul_cancel hf),
have Ig : mk (inv g hg) * mk g = 1 := mk_eq.2 (inv_mul_cancel hg),
rw [mk_eq.2 fg, ← Ig] at If,
rw mul_comm at Ig,
rw [← mul_one (mk (inv f hf)), ← Ig, ← mul_assoc, If,
mul_assoc, Ig, mul_one] }
end⟩
@[simp] theorem inv_zero : (0 : Cauchy)⁻¹ = 0 :=
congr_arg mk $ by rw dif_pos; [refl, exact zero_lim_zero]
@[simp] theorem inv_mk {f} (hf) : (@mk α _ β _ abv _ f)⁻¹ = mk (inv f hf) :=
congr_arg mk $ by rw dif_neg
lemma cau_seq_zero_ne_one : ¬ (0 : cau_seq _ abv) ≈ 1 := λ h,
have lim_zero (1 - 0), from setoid.symm h,
have lim_zero 1, by simpa,
one_ne_zero $ const_lim_zero.1 this
lemma zero_ne_one : (0 : Cauchy) ≠ 1 :=
λ h, cau_seq_zero_ne_one $ mk_eq.1 h
protected theorem inv_mul_cancel {x : Cauchy} : x ≠ 0 → x⁻¹ * x = 1 :=
quotient.induction_on x $ λ f hf, begin
simp at hf, simp [hf],
exact quotient.sound (cau_seq.inv_mul_cancel hf)
end
noncomputable def discrete_field : discrete_field Cauchy :=
{ inv := has_inv.inv,
inv_mul_cancel := @cau_seq.completion.inv_mul_cancel,
mul_inv_cancel := λ x x0, by rw [mul_comm, cau_seq.completion.inv_mul_cancel x0],
zero_ne_one := zero_ne_one,
inv_zero := inv_zero,
has_decidable_eq := by apply_instance,
..cau_seq.completion.comm_ring }
local attribute [instance] discrete_field
theorem of_rat_inv (x : β) : of_rat (x⁻¹) = ((of_rat x)⁻¹ : Cauchy) :=
congr_arg mk $ by split_ifs with h; try {simp [const_lim_zero.1 h]}; refl
theorem of_rat_div (x y : β) : of_rat (x / y) = (of_rat x / of_rat y : Cauchy) :=
by simp only [div_eq_inv_mul, of_rat_inv, of_rat_mul]
end
end cau_seq.completion
variables {α : Type*} [discrete_linear_ordered_field α]
namespace cau_seq
section
variables (β : Type*) [ring β] (abv : β → α) [is_absolute_value abv]
class is_complete :=
(is_complete : ∀ s : cau_seq β abv, ∃ b : β, s ≈ const abv b)
end
section
variables {β : Type*} [ring β] {abv : β → α} [is_absolute_value abv]
variable [is_complete β abv]
lemma complete : ∀ s : cau_seq β abv, ∃ b : β, s ≈ const abv b :=
is_complete.is_complete
noncomputable def lim (s : cau_seq β abv) := classical.some (complete s)
lemma equiv_lim (s : cau_seq β abv) : s ≈ const abv (lim s) :=
classical.some_spec (complete s)
lemma eq_lim_of_const_equiv {f : cau_seq β abv} {x : β} (h : cau_seq.const abv x ≈ f) : x = lim f :=
const_equiv.mp $ setoid.trans h $ equiv_lim f
lemma lim_eq_of_equiv_const {f : cau_seq β abv} {x : β} (h : f ≈ cau_seq.const abv x) : lim f = x :=
(eq_lim_of_const_equiv $ setoid.symm h).symm
lemma lim_eq_lim_of_equiv {f g : cau_seq β abv} (h : f ≈ g) : lim f = lim g :=
lim_eq_of_equiv_const $ setoid.trans h $ equiv_lim g
@[simp] lemma lim_const (x : β) : lim (const abv x) = x :=
lim_eq_of_equiv_const $ setoid.refl _
lemma lim_add (f g : cau_seq β abv) : lim f + lim g = lim (f + g) :=
eq_lim_of_const_equiv $ show lim_zero (const abv (lim f + lim g) - (f + g)),
by rw [const_add, add_sub_comm];
exact add_lim_zero (setoid.symm (equiv_lim f)) (setoid.symm (equiv_lim g))
lemma lim_mul_lim (f g : cau_seq β abv) : lim f * lim g = lim (f * g) :=
eq_lim_of_const_equiv $ show lim_zero (const abv (lim f * lim g) - f * g),
from have h : const abv (lim f * lim g) - f * g = (const abv (lim f) - f) * g
+ const abv (lim f) * (const abv (lim g) - g) :=
by simp [const_mul (lim f), mul_add, add_mul],
by rw h; exact add_lim_zero (mul_lim_zero_left _ (setoid.symm (equiv_lim _)))
(mul_lim_zero_right _ (setoid.symm (equiv_lim _)))
lemma lim_mul (f : cau_seq β abv) (x : β) : lim f * x = lim (f * const abv x) :=
by rw [← lim_mul_lim, lim_const]
lemma lim_neg (f : cau_seq β abv) : lim (-f) = -lim f :=
lim_eq_of_equiv_const (show lim_zero (-f - const abv (-lim f)),
by rw [const_neg, sub_neg_eq_add, add_comm];
exact setoid.symm (equiv_lim f))
lemma lim_eq_zero_iff (f : cau_seq β abv) : lim f = 0 ↔ lim_zero f :=
⟨assume h,
by have hf := equiv_lim f;
rw h at hf;
exact (lim_zero_congr hf).mpr (const_lim_zero.mpr rfl),
assume h,
have h₁ : f = (f - const abv 0) := ext (λ n, by simp [sub_apply, const_apply]),
by rw h₁ at h; exact lim_eq_of_equiv_const h ⟩
end
section
variables {β : Type*} [discrete_field β] {abv : β → α} [is_absolute_value abv] [is_complete β abv]
lemma lim_inv {f : cau_seq β abv} (hf : ¬ lim_zero f) : lim (inv f hf) = (lim f)⁻¹ :=
have hl : lim f ≠ 0 := by rwa ← lim_eq_zero_iff at hf,
lim_eq_of_equiv_const $ show lim_zero (inv f hf - const abv (lim f)⁻¹),
from have h₁ : ∀ (g f : cau_seq β abv) (hf : ¬ lim_zero f), lim_zero (g - f * inv f hf * g) :=
λ g f hf, by rw [← one_mul g, ← mul_assoc, ← sub_mul, mul_one, mul_comm, mul_comm f];
exact mul_lim_zero_right _ (setoid.symm (cau_seq.inv_mul_cancel _)),
have h₂ : lim_zero ((inv f hf - const abv (lim f)⁻¹) - (const abv (lim f) - f) *
(inv f hf * const abv (lim f)⁻¹)) :=
by rw [sub_mul, ← sub_add, sub_sub, sub_add_eq_sub_sub, sub_right_comm, sub_add];
exact show lim_zero (inv f hf - const abv (lim f) * (inv f hf * const abv (lim f)⁻¹)
- (const abv (lim f)⁻¹ - f * (inv f hf * const abv (lim f)⁻¹))),
from sub_lim_zero
(by rw [← mul_assoc, mul_right_comm, const_inv hl]; exact h₁ _ _ _)
(by rw [← mul_assoc]; exact h₁ _ _ _),
(lim_zero_congr h₂).mpr $ mul_lim_zero_left _ (setoid.symm (equiv_lim f))
end
section
variables [is_complete α abs]
lemma lim_le {f : cau_seq α abs} {x : α}
(h : f ≤ cau_seq.const abs x) : lim f ≤ x :=
cau_seq.const_le.1 $ cau_seq.le_of_eq_of_le (setoid.symm (equiv_lim f)) h
lemma le_lim {f : cau_seq α abs} {x : α}
(h : cau_seq.const abs x ≤ f) : x ≤ lim f :=
cau_seq.const_le.1 $ cau_seq.le_of_le_of_eq h (equiv_lim f)
lemma lt_lim {f : cau_seq α abs} {x : α}
(h : cau_seq.const abs x < f) : x < lim f :=
cau_seq.const_lt.1 $ cau_seq.lt_of_lt_of_eq h (equiv_lim f)
lemma lim_lt {f : cau_seq α abs} {x : α}
(h : f < cau_seq.const abs x) : lim f < x :=
cau_seq.const_lt.1 $ cau_seq.lt_of_eq_of_lt (setoid.symm (equiv_lim f)) h
end
end cau_seq
|
ea851515c1e8a809a8c99620b275e4e67b525125
|
367134ba5a65885e863bdc4507601606690974c1
|
/src/tactic/omega/int/main.lean
|
3f4a19eda0a36c2ad0016d3578524007e96fa9f2
|
[
"Apache-2.0"
] |
permissive
|
kodyvajjha/mathlib
|
9bead00e90f68269a313f45f5561766cfd8d5cad
|
b98af5dd79e13a38d84438b850a2e8858ec21284
|
refs/heads/master
| 1,624,350,366,310
| 1,615,563,062,000
| 1,615,563,062,000
| 162,666,963
| 0
| 0
|
Apache-2.0
| 1,545,367,651,000
| 1,545,367,651,000
| null |
UTF-8
|
Lean
| false
| false
| 6,288
|
lean
|
/-
Copyright (c) 2019 Seul Baek. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Seul Baek
-/
/-
Main procedure for linear integer arithmetic.
-/
import tactic.omega.prove_unsats
import tactic.omega.int.dnf
open tactic
namespace omega
namespace int
open_locale omega.int
run_cmd mk_simp_attr `sugar
attribute [sugar]
ne not_le not_lt
int.lt_iff_add_one_le
or_false false_or
and_true true_and
ge gt mul_add add_mul
one_mul mul_one
mul_comm sub_eq_add_neg
imp_iff_not_or
iff_iff_not_or_and_or_not
meta def desugar := `[try {simp only with sugar}]
lemma univ_close_of_unsat_clausify (m : nat) (p : preform) :
clauses.unsat (dnf (¬* p)) → univ_close p (λ x, 0) m | h1 :=
begin
apply univ_close_of_valid,
apply valid_of_unsat_not,
apply unsat_of_clauses_unsat,
exact h1
end
/-- Given a (p : preform), return the expr of a (t : univ_close m p) -/
meta def prove_univ_close (m : nat) (p : preform) : tactic expr :=
do x ← prove_unsats (dnf (¬*p)),
return `(univ_close_of_unsat_clausify %%`(m) %%`(p) %%x)
/-- Reification to imtermediate shadow syntax that retains exprs -/
meta def to_exprterm : expr → tactic exprterm
| `(- %%x) := --return (exprterm.exp (-1 : int) x)
( do z ← eval_expr' int x,
return (exprterm.cst (-z : int)) ) <|>
( return $ exprterm.exp (-1 : int) x )
| `(%%mx * %%zx) :=
do z ← eval_expr' int zx,
return (exprterm.exp z mx)
| `(%%t1x + %%t2x) :=
do t1 ← to_exprterm t1x,
t2 ← to_exprterm t2x,
return (exprterm.add t1 t2)
| x :=
( do z ← eval_expr' int x,
return (exprterm.cst z) ) <|>
( return $ exprterm.exp 1 x )
/-- Reification to imtermediate shadow syntax that retains exprs -/
meta def to_exprform : expr → tactic exprform
| `(%%tx1 = %%tx2) :=
do t1 ← to_exprterm tx1,
t2 ← to_exprterm tx2,
return (exprform.eq t1 t2)
| `(%%tx1 ≤ %%tx2) :=
do t1 ← to_exprterm tx1,
t2 ← to_exprterm tx2,
return (exprform.le t1 t2)
| `(¬ %%px) := do p ← to_exprform px, return (exprform.not p)
| `(%%px ∨ %%qx) :=
do p ← to_exprform px,
q ← to_exprform qx,
return (exprform.or p q)
| `(%%px ∧ %%qx) :=
do p ← to_exprform px,
q ← to_exprform qx,
return (exprform.and p q)
| `(_ → %%px) := to_exprform px
| x := trace "Cannot reify expr : " >> trace x >> failed
/-- List of all unreified exprs -/
meta def exprterm.exprs : exprterm → list expr
| (exprterm.cst _) := []
| (exprterm.exp _ x) := [x]
| (exprterm.add t s) := list.union t.exprs s.exprs
/-- List of all unreified exprs -/
meta def exprform.exprs : exprform → list expr
| (exprform.eq t s) := list.union t.exprs s.exprs
| (exprform.le t s) := list.union t.exprs s.exprs
| (exprform.not p) := p.exprs
| (exprform.or p q) := list.union p.exprs q.exprs
| (exprform.and p q) := list.union p.exprs q.exprs
/-- Reification to an intermediate shadow syntax which eliminates exprs,
but still includes non-canonical terms -/
meta def exprterm.to_preterm (xs : list expr) : exprterm → tactic preterm
| (exprterm.cst k) := return & k
| (exprterm.exp k x) :=
let m := xs.index_of x in
if m < xs.length
then return (k ** m)
else failed
| (exprterm.add xa xb) :=
do a ← xa.to_preterm,
b ← xb.to_preterm,
return (a +* b)
/-- Reification to an intermediate shadow syntax which eliminates exprs,
but still includes non-canonical terms -/
meta def exprform.to_preform (xs : list expr) : exprform → tactic preform
| (exprform.eq xa xb) :=
do a ← xa.to_preterm xs,
b ← xb.to_preterm xs,
return (a =* b)
| (exprform.le xa xb) :=
do a ← xa.to_preterm xs,
b ← xb.to_preterm xs,
return (a ≤* b)
| (exprform.not xp) :=
do p ← xp.to_preform,
return ¬* p
| (exprform.or xp xq) :=
do p ← xp.to_preform,
q ← xq.to_preform,
return (p ∨* q)
| (exprform.and xp xq) :=
do p ← xp.to_preform,
q ← xq.to_preform,
return (p ∧* q)
/-- Reification to an intermediate shadow syntax which eliminates exprs,
but still includes non-canonical terms. -/
meta def to_preform (x : expr) : tactic (preform × nat) :=
do xf ← to_exprform x,
let xs := xf.exprs,
f ← xf.to_preform xs,
return (f, xs.length)
/-- Return expr of proof of current LIA goal -/
meta def prove : tactic expr :=
do (p,m) ← target >>= to_preform,
trace_if_enabled `omega p,
prove_univ_close m p
/-- Succeed iff argument is the expr of ℤ -/
meta def eq_int (x : expr) : tactic unit :=
if x = `(int) then skip else failed
/-- Check whether argument is expr of a well-formed formula of LIA-/
meta def wff : expr → tactic unit
| `(¬ %%px) := wff px
| `(%%px ∨ %%qx) := wff px >> wff qx
| `(%%px ∧ %%qx) := wff px >> wff qx
| `(%%px ↔ %%qx) := wff px >> wff qx
| `(%%(expr.pi _ _ px qx)) :=
monad.cond
(if expr.has_var px then return tt else is_prop px)
(wff px >> wff qx)
(eq_int px >> wff qx)
| `(@has_lt.lt %%dx %%h _ _) := eq_int dx
| `(@has_le.le %%dx %%h _ _) := eq_int dx
| `(@eq %%dx _ _) := eq_int dx
| `(@ge %%dx %%h _ _) := eq_int dx
| `(@gt %%dx %%h _ _) := eq_int dx
| `(@ne %%dx _ _) := eq_int dx
| `(true) := skip
| `(false) := skip
| _ := failed
/-- Succeed iff argument is expr of term whose type is wff -/
meta def wfx (x : expr) : tactic unit :=
infer_type x >>= wff
/-- Intro all universal quantifiers over ℤ -/
meta def intro_ints_core : tactic unit :=
do x ← target,
match x with
| (expr.pi _ _ `(int) _) := intro_fresh >> intro_ints_core
| _ := skip
end
meta def intro_ints : tactic unit :=
do (expr.pi _ _ `(int) _) ← target,
intro_ints_core
/-- If the goal has universal quantifiers over integers, introduce all of them.
Otherwise, revert all hypotheses that are formulas of linear integer arithmetic. -/
meta def preprocess : tactic unit :=
intro_ints <|> (revert_cond_all wfx >> desugar)
end int
end omega
open omega.int
/-- The core omega tactic for integers. -/
meta def omega_int (is_manual : bool) : tactic unit :=
desugar ; (if is_manual then skip else preprocess) ; prove >>= apply >> skip
|
bc93e86b09cbf474fedff06bc39f385f0bf8469c
|
359199d7253811b032ab92108191da7336eba86e
|
/src/homework/q_5_f.lean
|
94aa3b1012172ae746cb38785d6a34e65f72eaea
|
[] |
no_license
|
arte-et-marte/my_cs2120f21
|
0bc6215cb5018a3b7c90d9d399a173233f587064
|
91609c3609ad81fda895bee8b97cc76813241e17
|
refs/heads/main
| 1,693,298,928,348
| 1,634,931,202,000
| 1,634,931,202,000
| 399,946,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 399
|
lean
|
theorem q_5_f : ∀ (P Q: Prop), ¬(P ∧ Q) → ¬P ∨ ¬Q :=
begin
assume P Q,
assume h,
have p_or_np := classical.em P,
have q_or_nq := classical.em Q,
cases p_or_np with p np,
cases q_or_nq with q nq,
-- case 1:
have pandq := and.intro p q,
have f := h pandq,
exact false.elim f,
-- case 2:
exact or.intro_right (¬P) nq,
-- case 3:
exact or.intro_left (¬Q) np,
end
|
5470967e24834a6023e7e03832cbc66eab86ce9e
|
00de0c30dd1b090ed139f65c82ea6deb48c3f4c2
|
/src/linear_algebra/projection.lean
|
e0de3fe415614cea330d922d525a90af1115c392
|
[
"Apache-2.0"
] |
permissive
|
paulvanwamelen/mathlib
|
4b9c5c19eec71b475f3dd515cd8785f1c8515f26
|
79e296bdc9f83b9447dc1b81730d36f63a99f72d
|
refs/heads/master
| 1,667,766,172,625
| 1,590,239,595,000
| 1,590,239,595,000
| 266,392,625
| 0
| 0
|
Apache-2.0
| 1,590,257,277,000
| 1,590,257,277,000
| null |
UTF-8
|
Lean
| false
| false
| 7,698
|
lean
|
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Yury Kudryashov
-/
import linear_algebra.basic
/-!
# Projection to a subspace
In this file we define
* `linear_proj_of_is_compl (p q : submodule R E) (h : is_compl p q)`: the projection of a module `E`
to a submodule `p` along its complement `q`; it is the unique linear map `f : E → p` such that
`f x = x` for `x ∈ p` and `f x = 0` for `x ∈ q`.
* `is_compl_equiv_proj p`: equivalence between submodules `q` such that `is_compl p q` and
projections `f : E → p`, `∀ x ∈ p, f x = x`.
We also provide some lemmas justifying correctness of our definitions.
## Tags
projection, complement subspace
-/
variables {R : Type*} {E : Type*} [ring R] [add_comm_group E] [module R E] (p q : submodule R E)
noncomputable theory
namespace submodule
open linear_map
/-- If `q` is a complement of `p`, then `M/p ≃ q`. -/
def quotient_equiv_of_is_compl (h : is_compl p q) : p.quotient ≃ₗ[R] q :=
linear_equiv.symm $ linear_equiv.of_bijective (p.mkq.comp q.subtype)
(by simp only [ker_comp, ker_mkq, disjoint_iff_comap_eq_bot.1 h.symm.disjoint])
(by simp only [range_comp, range_subtype, map_mkq_eq_top, h.sup_eq_top])
@[simp] lemma quotient_equiv_of_is_compl_symm_apply (h : is_compl p q) (x : q) :
(quotient_equiv_of_is_compl p q h).symm x = quotient.mk x := rfl
@[simp] lemma quotient_equiv_of_is_compl_apply_mk_coe (h : is_compl p q) (x : q) :
quotient_equiv_of_is_compl p q h (quotient.mk x) = x :=
(quotient_equiv_of_is_compl p q h).apply_symm_apply x
@[simp] lemma mk_quotient_equiv_of_is_compl_apply (h : is_compl p q) (x : p.quotient) :
(quotient.mk (quotient_equiv_of_is_compl p q h x) : p.quotient) = x :=
(quotient_equiv_of_is_compl p q h).symm_apply_apply x
/-- If `q` is a complement of `p`, then `p × q` is isomorphic to `E`. It is the unique
linear map `f : E → p` such that `f x = x` for `x ∈ p` and `f x = 0` for `x ∈ q`. -/
def prod_equiv_of_is_compl (h : is_compl p q) : (p × q) ≃ₗ[R] E :=
begin
apply linear_equiv.of_bijective (p.subtype.coprod q.subtype),
{ simp only [ker_eq_bot', prod.forall, subtype_apply, prod.mk_eq_zero, coprod_apply],
-- TODO: if I add `submodule.forall`, it unfolds the outer `∀` but not the inner one.
rintros ⟨x, hx⟩ ⟨y, hy⟩,
simp only [coe_mk, mk_eq_zero, ← eq_neg_iff_add_eq_zero],
rintro rfl,
rw [neg_mem_iff] at hx,
simp [disjoint_def.1 h.disjoint y hx hy] },
{ rw [← sup_eq_range, h.sup_eq_top] }
end
@[simp] lemma coe_prod_equiv_of_is_compl (h : is_compl p q) :
(prod_equiv_of_is_compl p q h : (p × q) →ₗ[R] E) = p.subtype.coprod q.subtype := rfl
@[simp] lemma coe_prod_equiv_of_is_compl' (h : is_compl p q) (x : p × q) :
prod_equiv_of_is_compl p q h x = x.1 + x.2 := rfl
@[simp] lemma prod_equiv_of_is_compl_symm_apply_left (h : is_compl p q) (x : p) :
(prod_equiv_of_is_compl p q h).symm x = (x, 0) :=
(prod_equiv_of_is_compl p q h).symm_apply_eq.2 $ by simp
@[simp] lemma prod_equiv_of_is_compl_symm_apply_right (h : is_compl p q) (x : q) :
(prod_equiv_of_is_compl p q h).symm x = (0, x) :=
(prod_equiv_of_is_compl p q h).symm_apply_eq.2 $ by simp
@[simp] lemma prod_equiv_of_is_compl_symm_apply_fst_eq_zero (h : is_compl p q) {x : E} :
((prod_equiv_of_is_compl p q h).symm x).1 = 0 ↔ x ∈ q :=
begin
conv_rhs { rw [← (prod_equiv_of_is_compl p q h).apply_symm_apply x] },
rw [coe_prod_equiv_of_is_compl', submodule.add_mem_iff_left _ (submodule.coe_mem _),
mem_right_iff_eq_zero_of_disjoint h.disjoint]
end
@[simp] lemma prod_equiv_of_is_compl_symm_apply_snd_eq_zero (h : is_compl p q) {x : E} :
((prod_equiv_of_is_compl p q h).symm x).2 = 0 ↔ x ∈ p :=
begin
conv_rhs { rw [← (prod_equiv_of_is_compl p q h).apply_symm_apply x] },
rw [coe_prod_equiv_of_is_compl', submodule.add_mem_iff_right _ (submodule.coe_mem _),
mem_left_iff_eq_zero_of_disjoint h.disjoint]
end
/-- Projection to a submodule along its complement. -/
def linear_proj_of_is_compl (h : is_compl p q) :
E →ₗ[R] p :=
(linear_map.fst R p q).comp $ (prod_equiv_of_is_compl p q h).symm
variables {p q}
@[simp] lemma linear_proj_of_is_compl_apply_left (h : is_compl p q) (x : p) :
linear_proj_of_is_compl p q h x = x :=
by simp [linear_proj_of_is_compl]
@[simp] lemma linear_proj_of_is_compl_range (h : is_compl p q) :
(linear_proj_of_is_compl p q h).range = ⊤ :=
by simp [linear_proj_of_is_compl, range_comp]
@[simp] lemma linear_proj_of_is_compl_apply_eq_zero_iff (h : is_compl p q) {x : E} :
linear_proj_of_is_compl p q h x = 0 ↔ x ∈ q:=
by simp [linear_proj_of_is_compl]
lemma linear_proj_of_is_compl_apply_right' (h : is_compl p q) (x : E) (hx : x ∈ q) :
linear_proj_of_is_compl p q h x = 0 :=
(linear_proj_of_is_compl_apply_eq_zero_iff h).2 hx
@[simp] lemma linear_proj_of_is_compl_apply_right (h : is_compl p q) (x : q) :
linear_proj_of_is_compl p q h x = 0 :=
linear_proj_of_is_compl_apply_right' h x x.2
@[simp] lemma linear_proj_of_is_compl_ker (h : is_compl p q) :
(linear_proj_of_is_compl p q h).ker = q :=
ext $ λ x, mem_ker.trans (linear_proj_of_is_compl_apply_eq_zero_iff h)
lemma linear_proj_of_is_compl_comp_subtype (h : is_compl p q) :
(linear_proj_of_is_compl p q h).comp p.subtype = id :=
linear_map.ext $ linear_proj_of_is_compl_apply_left h
lemma linear_proj_of_is_compl_idempotent (h : is_compl p q) (x : E) :
linear_proj_of_is_compl p q h (linear_proj_of_is_compl p q h x) =
linear_proj_of_is_compl p q h x :=
linear_proj_of_is_compl_apply_left h _
end submodule
namespace linear_map
variable {p}
open submodule
lemma ker_id_sub_eq_of_proj (f : E →ₗ[R] p) (hf : ∀ x : p, f x = x) :
ker (id - p.subtype.comp f) = p :=
begin
ext x,
simp only [comp_apply, mem_ker, subtype_apply, sub_apply, id_apply, sub_eq_zero],
exact ⟨λ h, h.symm ▸ submodule.coe_mem _, λ hx, by erw [hf ⟨x, hx⟩, subtype.coe_mk]⟩
end
lemma is_compl_of_proj (f : E →ₗ[R] p) (hf : ∀ x : p, f x = x) :
is_compl p f.ker :=
begin
split,
{ rintros x ⟨hpx, hfx⟩,
erw [mem_coe, mem_ker, hf ⟨x, hpx⟩, mk_eq_zero] at hfx,
simp only [hfx, mem_coe, zero_mem] },
{ intros x hx,
rw [mem_sup'],
refine ⟨f x, ⟨x - f x, _⟩, add_sub_cancel'_right _ _⟩,
rw [mem_ker, linear_map.map_sub, hf, sub_self] }
end
@[simp] lemma linear_proj_of_is_compl_of_proj (f : E →ₗ[R] p) (hf : ∀ x : p, f x = x) :
p.linear_proj_of_is_compl f.ker (f.is_compl_of_proj hf) = f :=
begin
ext x,
have : x ∈ p ⊔ f.ker,
{ simp only [(f.is_compl_of_proj hf).sup_eq_top, mem_top] },
rcases mem_sup'.1 this with ⟨x, y, rfl⟩,
simp [hf]
end
end linear_map
namespace submodule
/-- Equivalence between submoddules `q` such that `is_compl p q` and linear maps `f : E →ₗ[R] p`
such that `∀ x : p, f x = x`. -/
def is_compl_equiv_proj :
{q // is_compl p q} ≃ {f : E →ₗ[R] p // ∀ x : p, f x = x} :=
{ to_fun := λ q, ⟨linear_proj_of_is_compl p q q.2, linear_proj_of_is_compl_apply_left q.2⟩,
inv_fun := λ f, ⟨(f : E →ₗ[R] p).ker, f.1.is_compl_of_proj f.2⟩,
left_inv := λ ⟨q, hq⟩, by simp only [linear_proj_of_is_compl_ker, subtype.coe_mk],
right_inv := λ ⟨f, hf⟩, subtype.eq $ f.linear_proj_of_is_compl_of_proj hf }
@[simp] lemma coe_is_compl_equiv_proj_apply (q : {q // is_compl p q}) :
(p.is_compl_equiv_proj q : E →ₗ[R] p) = linear_proj_of_is_compl p q q.2 := rfl
@[simp] lemma coe_is_compl_equiv_proj_symm_apply (f : {f : E →ₗ[R] p // ∀ x : p, f x = x}) :
(p.is_compl_equiv_proj.symm f : submodule R E) = (f : E →ₗ[R] p).ker := rfl
end submodule
|
24e8a383639a5efac8710290db467620eb9506ed
|
bd12a817ba941113eb7fdb7ddf0979d9ed9386a0
|
/src/category_theory/category.lean
|
74fece41d1b6c3a5b239f9b867eeaebb01835efa
|
[
"Apache-2.0"
] |
permissive
|
flypitch/mathlib
|
563d9c3356c2885eb6cefaa704d8d86b89b74b15
|
70cd00bc20ad304f2ac0886b2291b44261787607
|
refs/heads/master
| 1,590,167,818,658
| 1,557,762,121,000
| 1,557,762,121,000
| 186,450,076
| 0
| 0
|
Apache-2.0
| 1,557,762,289,000
| 1,557,762,288,000
| null |
UTF-8
|
Lean
| false
| false
| 6,273
|
lean
|
/-
Copyright (c) 2017 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Stephen Morgan, Scott Morrison, Johannes Hölzl, Reid Barton
Defines a category, as a typeclass parametrised by the type of objects.
Introduces notations
`X ⟶ Y` for the morphism spaces,
`f ≫ g` for composition in the 'arrows' convention.
Users may like to add `f ⊚ g` for composition in the standard convention, using
```
local notation f ` ⊚ `:80 g:80 := category.comp g f -- type as \oo
```
-/
import tactic.basic
import tactic.tidy
universes v u -- The order in this declaration matters: v often needs to be explicitly specified while u often can be omitted
namespace category_theory
/-
The propositional fields of `category` are annotated with the auto_param `obviously`,
which is defined here as a
[`replacer` tactic](https://github.com/leanprover/mathlib/blob/master/docs/tactics.md#def_replacer).
We then immediately set up `obviously` to call `tidy`. Later, this can be replaced with more
powerful tactics.
-/
def_replacer obviously
@[obviously] meta def obviously' := tactic.tidy
class has_hom (obj : Sort u) : Sort (max u (v+1)) :=
(hom : obj → obj → Sort v)
infixr ` ⟶ `:10 := has_hom.hom -- type as \h
class category_struct (obj : Sort u)
extends has_hom.{v} obj : Sort (max u (v+1)) :=
(id : Π X : obj, hom X X)
(comp : Π {X Y Z : obj}, (X ⟶ Y) → (Y ⟶ Z) → (X ⟶ Z))
notation `𝟙` := category_struct.id -- type as \b1
infixr ` ≫ `:80 := category_struct.comp -- type as \gg
/--
The typeclass `category C` describes morphisms associated to objects of type `C`.
The universe levels of the objects and morphisms are unconstrained, and will often need to be
specified explicitly, as `category.{v} C`. (See also `large_category` and `small_category`.)
-/
class category (obj : Sort u)
extends category_struct.{v} obj : Sort (max u (v+1)) :=
(id_comp' : ∀ {X Y : obj} (f : hom X Y), 𝟙 X ≫ f = f . obviously)
(comp_id' : ∀ {X Y : obj} (f : hom X Y), f ≫ 𝟙 Y = f . obviously)
(assoc' : ∀ {W X Y Z : obj} (f : hom W X) (g : hom X Y) (h : hom Y Z),
(f ≫ g) ≫ h = f ≫ (g ≫ h) . obviously)
-- `restate_axiom` is a command that creates a lemma from a structure field,
-- discarding any auto_param wrappers from the type.
-- (It removes a backtick from the name, if it finds one, and otherwise adds "_lemma".)
restate_axiom category.id_comp'
restate_axiom category.comp_id'
restate_axiom category.assoc'
attribute [simp] category.id_comp category.comp_id category.assoc
attribute [trans] category_struct.comp
lemma category.assoc_symm {C : Type u} [category.{v} C] {W X Y Z : C} (f : W ⟶ X) (g : X ⟶ Y) (h : Y ⟶ Z) :
f ≫ (g ≫ h) = (f ≫ g) ≫ h :=
by rw ←category.assoc
/--
A `large_category` has objects in one universe level higher than the universe level of
the morphisms. It is useful for examples such as the category of types, or the category
of groups, etc.
-/
abbreviation large_category (C : Sort (u+1)) : Sort (u+1) := category.{u} C
/--
A `small_category` has objects and morphisms in the same universe level.
-/
abbreviation small_category (C : Sort u) : Sort (u+1) := category.{u} C
section
variables {C : Sort u} [𝒞 : category.{v} C] {X Y Z : C}
include 𝒞
lemma eq_of_comp_left_eq {f g : X ⟶ Y} (w : ∀ {Z : C} (h : Y ⟶ Z), f ≫ h = g ≫ h) : f = g :=
by { convert w (𝟙 Y), tidy }
lemma eq_of_comp_right_eq {f g : Y ⟶ Z} (w : ∀ {X : C} (h : X ⟶ Y), h ≫ f = h ≫ g) : f = g :=
by { convert w (𝟙 Y), tidy }
lemma eq_of_comp_left_eq' (f g : X ⟶ Y) (w : (λ {Z : C} (h : Y ⟶ Z), f ≫ h) = (λ {Z : C} (h : Y ⟶ Z), g ≫ h)) : f = g :=
eq_of_comp_left_eq (λ Z h, by convert congr_fun (congr_fun w Z) h)
lemma eq_of_comp_right_eq' (f g : Y ⟶ Z) (w : (λ {X : C} (h : X ⟶ Y), h ≫ f) = (λ {X : C} (h : X ⟶ Y), h ≫ g)) : f = g :=
eq_of_comp_right_eq (λ X h, by convert congr_fun (congr_fun w X) h)
lemma id_of_comp_left_id (f : X ⟶ X) (w : ∀ {Y : C} (g : X ⟶ Y), f ≫ g = g) : f = 𝟙 X :=
by { convert w (𝟙 X), tidy }
lemma id_of_comp_right_id (f : X ⟶ X) (w : ∀ {Y : C} (g : Y ⟶ X), g ≫ f = g) : f = 𝟙 X :=
by { convert w (𝟙 X), tidy }
class epi (f : X ⟶ Y) : Prop :=
(left_cancellation : Π {Z : C} (g h : Y ⟶ Z) (w : f ≫ g = f ≫ h), g = h)
class mono (f : X ⟶ Y) : Prop :=
(right_cancellation : Π {Z : C} (g h : Z ⟶ X) (w : g ≫ f = h ≫ f), g = h)
@[simp] lemma cancel_epi (f : X ⟶ Y) [epi f] (g h : Y ⟶ Z) : (f ≫ g = f ≫ h) ↔ g = h :=
⟨ λ p, epi.left_cancellation g h p, begin intro a, subst a end ⟩
@[simp] lemma cancel_mono (f : X ⟶ Y) [mono f] (g h : Z ⟶ X) : (g ≫ f = h ≫ f) ↔ g = h :=
⟨ λ p, mono.right_cancellation g h p, begin intro a, subst a end ⟩
end
section
variable (C : Type u)
variable [category.{v} C]
universe u'
instance ulift_category : category.{v} (ulift.{u'} C) :=
{ hom := λ X Y, (X.down ⟶ Y.down),
id := λ X, 𝟙 X.down,
comp := λ _ _ _ f g, f ≫ g }
-- We verify that this previous instance can lift small categories to large categories.
example (D : Type u) [small_category D] : large_category (ulift.{u+1} D) := by apply_instance
end
section
variables {C : Type u}
def End [has_hom.{v} C] (X : C) := X ⟶ X
instance End.has_one [category_struct.{v+1} C] {X : C} : has_one (End X) := by refine { one := 𝟙 X }
instance End.has_mul [category_struct.{v+1} C] {X : C} : has_mul (End X) := by refine { mul := λ x y, x ≫ y }
instance End.monoid [category.{v+1} C] {X : C} : monoid (End X) :=
by refine { .. End.has_one, .. End.has_mul, .. }; dsimp [has_mul.mul,has_one.one]; obviously
@[simp] lemma End.one_def {C : Type u} [category_struct.{v+1} C] {X : C} : (1 : End X) = 𝟙 X := rfl
@[simp] lemma End.mul_def {C : Type u} [category_struct.{v+1} C] {X : C} (xs ys : End X) : xs * ys = xs ≫ ys := rfl
end
end category_theory
open category_theory
namespace preorder
variables (α : Type u)
instance small_category [preorder α] : small_category α :=
{ hom := λ U V, ulift (plift (U ≤ V)),
id := λ X, ⟨ ⟨ le_refl X ⟩ ⟩,
comp := λ X Y Z f g, ⟨ ⟨ le_trans _ _ _ f.down.down g.down.down ⟩ ⟩ }
end preorder
|
81a39a3e8aaa680994c4760ba8065c8c51b01a71
|
31f556cdeb9239ffc2fad8f905e33987ff4feab9
|
/stage0/src/Init/Data/Int/Basic.lean
|
b06174ea6a84beae37cd0fe8ebce749c9f9274a8
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
tobiasgrosser/lean4
|
ce0fd9cca0feba1100656679bf41f0bffdbabb71
|
ebdbdc10436a4d9d6b66acf78aae7a23f5bd073f
|
refs/heads/master
| 1,673,103,412,948
| 1,664,930,501,000
| 1,664,930,501,000
| 186,870,185
| 0
| 0
|
Apache-2.0
| 1,665,129,237,000
| 1,557,939,901,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,656
|
lean
|
/-
Copyright (c) 2016 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura
The integers, with addition, multiplication, and subtraction.
-/
prelude
import Init.Coe
import Init.Data.Nat.Div
import Init.Data.List.Basic
open Nat
/-! # the Type, coercions, and notation -/
inductive Int : Type where
| ofNat : Nat → Int
| negSucc : Nat → Int
attribute [extern "lean_nat_to_int"] Int.ofNat
attribute [extern "lean_int_neg_succ_of_nat"] Int.negSucc
instance : Coe Nat Int := ⟨Int.ofNat⟩
instance : OfNat Int n where
ofNat := Int.ofNat n
namespace Int
instance : Inhabited Int := ⟨ofNat 0⟩
def negOfNat : Nat → Int
| 0 => 0
| succ m => negSucc m
set_option bootstrap.genMatcherCode false in
@[extern "lean_int_neg"]
protected def neg (n : @& Int) : Int :=
match n with
| ofNat n => negOfNat n
| negSucc n => succ n
def subNatNat (m n : Nat) : Int :=
match (n - m : Nat) with
| 0 => ofNat (m - n) -- m ≥ n
| (succ k) => negSucc k
set_option bootstrap.genMatcherCode false in
@[extern "lean_int_add"]
protected def add (m n : @& Int) : Int :=
match m, n with
| ofNat m, ofNat n => ofNat (m + n)
| ofNat m, negSucc n => subNatNat m (succ n)
| negSucc m, ofNat n => subNatNat n (succ m)
| negSucc m, negSucc n => negSucc (succ (m + n))
set_option bootstrap.genMatcherCode false in
@[extern "lean_int_mul"]
protected def mul (m n : @& Int) : Int :=
match m, n with
| ofNat m, ofNat n => ofNat (m * n)
| ofNat m, negSucc n => negOfNat (m * succ n)
| negSucc m, ofNat n => negOfNat (succ m * n)
| negSucc m, negSucc n => ofNat (succ m * succ n)
/--
The `Neg Int` default instance must have priority higher than `low` since
the default instance `OfNat Nat n` has `low` priority.
```
#check -42
```
-/
@[defaultInstance mid]
instance : Neg Int where
neg := Int.neg
instance : Add Int where
add := Int.add
instance : Mul Int where
mul := Int.mul
@[extern "lean_int_sub"]
protected def sub (m n : @& Int) : Int :=
m + (- n)
instance : Sub Int where
sub := Int.sub
inductive NonNeg : Int → Prop where
| mk (n : Nat) : NonNeg (ofNat n)
protected def le (a b : Int) : Prop := NonNeg (b - a)
instance : LE Int where
le := Int.le
protected def lt (a b : Int) : Prop := (a + 1) ≤ b
instance : LT Int where
lt := Int.lt
set_option bootstrap.genMatcherCode false in
@[extern "lean_int_dec_eq"]
protected def decEq (a b : @& Int) : Decidable (a = b) :=
match a, b with
| ofNat a, ofNat b => match decEq a b with
| isTrue h => isTrue <| h ▸ rfl
| isFalse h => isFalse <| fun h' => Int.noConfusion h' (fun h' => absurd h' h)
| negSucc a, negSucc b => match decEq a b with
| isTrue h => isTrue <| h ▸ rfl
| isFalse h => isFalse <| fun h' => Int.noConfusion h' (fun h' => absurd h' h)
| ofNat _, negSucc _ => isFalse <| fun h => Int.noConfusion h
| negSucc _, ofNat _ => isFalse <| fun h => Int.noConfusion h
instance : DecidableEq Int := Int.decEq
set_option bootstrap.genMatcherCode false in
@[extern "lean_int_dec_nonneg"]
private def decNonneg (m : @& Int) : Decidable (NonNeg m) :=
match m with
| ofNat m => isTrue <| NonNeg.mk m
| negSucc _ => isFalse <| fun h => nomatch h
@[extern "lean_int_dec_le"]
instance decLe (a b : @& Int) : Decidable (a ≤ b) :=
decNonneg _
@[extern "lean_int_dec_lt"]
instance decLt (a b : @& Int) : Decidable (a < b) :=
decNonneg _
set_option bootstrap.genMatcherCode false in
@[extern "lean_nat_abs"]
def natAbs (m : @& Int) : Nat :=
match m with
| ofNat m => m
| negSucc m => m.succ
@[extern "lean_int_div"]
def div : (@& Int) → (@& Int) → Int
| ofNat m, ofNat n => ofNat (m / n)
| ofNat m, negSucc n => -ofNat (m / succ n)
| negSucc m, ofNat n => -ofNat (succ m / n)
| negSucc m, negSucc n => ofNat (succ m / succ n)
@[extern "lean_int_mod"]
def mod : (@& Int) → (@& Int) → Int
| ofNat m, ofNat n => ofNat (m % n)
| ofNat m, negSucc n => ofNat (m % succ n)
| negSucc m, ofNat n => -ofNat (succ m % n)
| negSucc m, negSucc n => -ofNat (succ m % succ n)
instance : Div Int where
div := Int.div
instance : Mod Int where
mod := Int.mod
def toNat : Int → Nat
| ofNat n => n
| negSucc _ => 0
def natMod (m n : Int) : Nat := (m % n).toNat
protected def pow (m : Int) : Nat → Int
| 0 => 1
| succ n => Int.pow m n * m
instance : HPow Int Nat Int where
hPow := Int.pow
instance : LawfulBEq Int where
eq_of_beq h := by simp [BEq.beq] at h; assumption
rfl := by simp [BEq.beq]
end Int
|
1852ad34301e829c134d6649539e56142f49b99e
|
fa02ed5a3c9c0adee3c26887a16855e7841c668b
|
/src/ring_theory/ideal/basic.lean
|
6ac7c326b72ff180f037b87f2c197d4d27b60de0
|
[
"Apache-2.0"
] |
permissive
|
jjgarzella/mathlib
|
96a345378c4e0bf26cf604aed84f90329e4896a2
|
395d8716c3ad03747059d482090e2bb97db612c8
|
refs/heads/master
| 1,686,480,124,379
| 1,625,163,323,000
| 1,625,163,323,000
| 281,190,421
| 2
| 0
|
Apache-2.0
| 1,595,268,170,000
| 1,595,268,169,000
| null |
UTF-8
|
Lean
| false
| false
| 36,098
|
lean
|
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau, Chris Hughes, Mario Carneiro
-/
import algebra.associated
import linear_algebra.basic
import order.zorn
import order.atoms
import order.compactly_generated
/-!
# Ideals over a ring
This file defines `ideal R`, the type of ideals over a commutative ring `R`.
## Implementation notes
`ideal R` is implemented using `submodule R R`, where `•` is interpreted as `*`.
## TODO
Support one-sided ideals, and ideals over non-commutative rings.
See `algebra.ring_quot` for quotients of non-commutative rings.
-/
universes u v w
variables {α : Type u} {β : Type v}
open set function
open_locale classical big_operators
/-- A (left) ideal in a semiring `R` is an additive submonoid `s` such that
`a * b ∈ s` whenever `b ∈ s`. If `R` is a ring, then `s` is an additive subgroup. -/
@[reducible] def ideal (R : Type u) [semiring R] := submodule R R
section semiring
namespace ideal
variables [semiring α] (I : ideal α) {a b : α}
protected lemma zero_mem : (0 : α) ∈ I := I.zero_mem
protected lemma add_mem : a ∈ I → b ∈ I → a + b ∈ I := I.add_mem
variables (a)
lemma mul_mem_left : b ∈ I → a * b ∈ I := I.smul_mem a
variables {a}
@[ext] lemma ext {I J : ideal α} (h : ∀ x, x ∈ I ↔ x ∈ J) : I = J :=
submodule.ext h
theorem eq_top_of_unit_mem
(x y : α) (hx : x ∈ I) (h : y * x = 1) : I = ⊤ :=
eq_top_iff.2 $ λ z _, calc
z = z * (y * x) : by simp [h]
... = (z * y) * x : eq.symm $ mul_assoc z y x
... ∈ I : I.mul_mem_left _ hx
theorem eq_top_of_is_unit_mem {x} (hx : x ∈ I) (h : is_unit x) : I = ⊤ :=
let ⟨y, hy⟩ := h.exists_left_inv in eq_top_of_unit_mem I x y hx hy
theorem eq_top_iff_one : I = ⊤ ↔ (1:α) ∈ I :=
⟨by rintro rfl; trivial,
λ h, eq_top_of_unit_mem _ _ 1 h (by simp)⟩
theorem ne_top_iff_one : I ≠ ⊤ ↔ (1:α) ∉ I :=
not_congr I.eq_top_iff_one
@[simp]
theorem unit_mul_mem_iff_mem {x y : α} (hy : is_unit y) : y * x ∈ I ↔ x ∈ I :=
begin
refine ⟨λ h, _, λ h, I.mul_mem_left y h⟩,
obtain ⟨y', hy'⟩ := hy.exists_left_inv,
have := I.mul_mem_left y' h,
rwa [← mul_assoc, hy', one_mul] at this,
end
/-- The ideal generated by a subset of a ring -/
def span (s : set α) : ideal α := submodule.span α s
lemma subset_span {s : set α} : s ⊆ span s := submodule.subset_span
lemma span_le {s : set α} {I} : span s ≤ I ↔ s ⊆ I := submodule.span_le
lemma span_mono {s t : set α} : s ⊆ t → span s ≤ span t := submodule.span_mono
@[simp] lemma span_eq : span (I : set α) = I := submodule.span_eq _
@[simp] lemma span_singleton_one : span ({1} : set α) = ⊤ :=
(eq_top_iff_one _).2 $ subset_span $ mem_singleton _
lemma mem_span_insert {s : set α} {x y} :
x ∈ span (insert y s) ↔ ∃ a (z ∈ span s), x = a * y + z := submodule.mem_span_insert
lemma mem_span_singleton' {x y : α} :
x ∈ span ({y} : set α) ↔ ∃ a, a * y = x := submodule.mem_span_singleton
lemma span_eq_bot {s : set α} : span s = ⊥ ↔ ∀ x ∈ s, (x:α) = 0 := submodule.span_eq_bot
@[simp] lemma span_singleton_eq_bot {x} : span ({x} : set α) = ⊥ ↔ x = 0 :=
submodule.span_singleton_eq_bot
@[simp] lemma span_zero : span (0 : set α) = ⊥ := by rw [←set.singleton_zero, span_singleton_eq_bot]
@[simp] lemma span_one : span (1 : set α) = ⊤ := by rw [←set.singleton_one, span_singleton_one]
/--
The ideal generated by an arbitrary binary relation.
-/
def of_rel (r : α → α → Prop) : ideal α :=
submodule.span α { x | ∃ (a b) (h : r a b), x + b = a }
/-- An ideal `P` of a ring `R` is prime if `P ≠ R` and `xy ∈ P → x ∈ P ∨ y ∈ P` -/
class is_prime (I : ideal α) : Prop :=
(ne_top' : I ≠ ⊤)
(mem_or_mem' : ∀ {x y : α}, x * y ∈ I → x ∈ I ∨ y ∈ I)
theorem is_prime_iff {I : ideal α} :
is_prime I ↔ I ≠ ⊤ ∧ ∀ {x y : α}, x * y ∈ I → x ∈ I ∨ y ∈ I :=
⟨λ h, ⟨h.1, h.2⟩, λ h, ⟨h.1, h.2⟩⟩
theorem is_prime.ne_top {I : ideal α} (hI : I.is_prime) : I ≠ ⊤ := hI.1
theorem is_prime.mem_or_mem {I : ideal α} (hI : I.is_prime) :
∀ {x y : α}, x * y ∈ I → x ∈ I ∨ y ∈ I := hI.2
theorem is_prime.mem_or_mem_of_mul_eq_zero {I : ideal α} (hI : I.is_prime)
{x y : α} (h : x * y = 0) : x ∈ I ∨ y ∈ I :=
hI.mem_or_mem (h.symm ▸ I.zero_mem)
theorem is_prime.mem_of_pow_mem {I : ideal α} (hI : I.is_prime)
{r : α} (n : ℕ) (H : r^n ∈ I) : r ∈ I :=
begin
induction n with n ih,
{ rw pow_zero at H, exact (mt (eq_top_iff_one _).2 hI.1).elim H },
{ rw pow_succ at H, exact or.cases_on (hI.mem_or_mem H) id ih }
end
lemma not_is_prime_iff {I : ideal α} : ¬ I.is_prime ↔ I = ⊤ ∨ ∃ (x ∉ I) (y ∉ I), x * y ∈ I :=
begin
simp_rw [ideal.is_prime_iff, not_and_distrib, ne.def, not_not, not_forall, not_or_distrib],
exact or_congr iff.rfl
⟨λ ⟨x, y, hxy, hx, hy⟩, ⟨x, hx, y, hy, hxy⟩, λ ⟨x, hx, y, hy, hxy⟩, ⟨x, y, hxy, hx, hy⟩⟩
end
theorem zero_ne_one_of_proper {I : ideal α} (h : I ≠ ⊤) : (0:α) ≠ 1 :=
λ hz, I.ne_top_iff_one.1 h $ hz ▸ I.zero_mem
lemma bot_prime {R : Type*} [integral_domain R] : (⊥ : ideal R).is_prime :=
⟨λ h, one_ne_zero (by rwa [ideal.eq_top_iff_one, submodule.mem_bot] at h),
λ x y h, mul_eq_zero.mp (by simpa only [submodule.mem_bot] using h)⟩
/-- An ideal is maximal if it is maximal in the collection of proper ideals. -/
class is_maximal (I : ideal α) : Prop := (out : is_coatom I)
theorem is_maximal_def {I : ideal α} : I.is_maximal ↔ is_coatom I := ⟨λ h, h.1, λ h, ⟨h⟩⟩
theorem is_maximal.ne_top {I : ideal α} (h : I.is_maximal) : I ≠ ⊤ := (is_maximal_def.1 h).1
theorem is_maximal_iff {I : ideal α} : I.is_maximal ↔
(1:α) ∉ I ∧ ∀ (J : ideal α) x, I ≤ J → x ∉ I → x ∈ J → (1:α) ∈ J :=
is_maximal_def.trans $ and_congr I.ne_top_iff_one $ forall_congr $ λ J,
by rw [lt_iff_le_not_le]; exact
⟨λ H x h hx₁ hx₂, J.eq_top_iff_one.1 $
H ⟨h, not_subset.2 ⟨_, hx₂, hx₁⟩⟩,
λ H ⟨h₁, h₂⟩, let ⟨x, xJ, xI⟩ := not_subset.1 h₂ in
J.eq_top_iff_one.2 $ H x h₁ xI xJ⟩
theorem is_maximal.eq_of_le {I J : ideal α}
(hI : I.is_maximal) (hJ : J ≠ ⊤) (IJ : I ≤ J) : I = J :=
eq_iff_le_not_lt.2 ⟨IJ, λ h, hJ (hI.1.2 _ h)⟩
instance : is_coatomic (ideal α) :=
begin
apply complete_lattice.coatomic_of_top_compact,
rw ←span_singleton_one,
exact submodule.singleton_span_is_compact_element 1,
end
/-- Krull's theorem: if `I` is an ideal that is not the whole ring, then it is included in some
maximal ideal. -/
theorem exists_le_maximal (I : ideal α) (hI : I ≠ ⊤) :
∃ M : ideal α, M.is_maximal ∧ I ≤ M :=
let ⟨m, hm⟩ := (eq_top_or_exists_le_coatom I).resolve_left hI in ⟨m, ⟨⟨hm.1⟩, hm.2⟩⟩
variables (α)
/-- Krull's theorem: a nontrivial ring has a maximal ideal. -/
theorem exists_maximal [nontrivial α] : ∃ M : ideal α, M.is_maximal :=
let ⟨I, ⟨hI, _⟩⟩ := exists_le_maximal (⊥ : ideal α) bot_ne_top in ⟨I, hI⟩
variables {α}
instance [nontrivial α] : nontrivial (ideal α) :=
begin
rcases @exists_maximal α _ _ with ⟨M, hM, _⟩,
exact nontrivial_of_ne M ⊤ hM
end
/-- If P is not properly contained in any maximal ideal then it is not properly contained
in any proper ideal -/
lemma maximal_of_no_maximal {R : Type u} [comm_semiring R] {P : ideal R}
(hmax : ∀ m : ideal R, P < m → ¬is_maximal m) (J : ideal R) (hPJ : P < J) : J = ⊤ :=
begin
by_contradiction hnonmax,
rcases exists_le_maximal J hnonmax with ⟨M, hM1, hM2⟩,
exact hmax M (lt_of_lt_of_le hPJ hM2) hM1,
end
theorem mem_span_pair {x y z : α} :
z ∈ span ({x, y} : set α) ↔ ∃ a b, a * x + b * y = z :=
by simp [mem_span_insert, mem_span_singleton', @eq_comm _ _ z]
theorem is_maximal.exists_inv {I : ideal α}
(hI : I.is_maximal) {x} (hx : x ∉ I) : ∃ y, ∃ i ∈ I, y * x + i = 1 :=
begin
cases is_maximal_iff.1 hI with H₁ H₂,
rcases mem_span_insert.1 (H₂ (span (insert x I)) x
(set.subset.trans (subset_insert _ _) subset_span)
hx (subset_span (mem_insert _ _))) with ⟨y, z, hz, hy⟩,
refine ⟨y, z, _, hy.symm⟩,
rwa ← span_eq I,
end
section lattice
variables {R : Type u} [semiring R]
lemma mem_sup_left {S T : ideal R} : ∀ {x : R}, x ∈ S → x ∈ S ⊔ T :=
show S ≤ S ⊔ T, from le_sup_left
lemma mem_sup_right {S T : ideal R} : ∀ {x : R}, x ∈ T → x ∈ S ⊔ T :=
show T ≤ S ⊔ T, from le_sup_right
lemma mem_supr_of_mem {ι : Type*} {S : ι → ideal R} (i : ι) :
∀ {x : R}, x ∈ S i → x ∈ supr S :=
show S i ≤ supr S, from le_supr _ _
lemma mem_Sup_of_mem {S : set (ideal R)} {s : ideal R}
(hs : s ∈ S) : ∀ {x : R}, x ∈ s → x ∈ Sup S :=
show s ≤ Sup S, from le_Sup hs
theorem mem_Inf {s : set (ideal R)} {x : R} :
x ∈ Inf s ↔ ∀ ⦃I⦄, I ∈ s → x ∈ I :=
⟨λ hx I his, hx I ⟨I, infi_pos his⟩, λ H I ⟨J, hij⟩, hij ▸ λ S ⟨hj, hS⟩, hS ▸ H hj⟩
@[simp] lemma mem_inf {I J : ideal R} {x : R} : x ∈ I ⊓ J ↔ x ∈ I ∧ x ∈ J := iff.rfl
@[simp] lemma mem_infi {ι : Type*} {I : ι → ideal R} {x : R} : x ∈ infi I ↔ ∀ i, x ∈ I i :=
submodule.mem_infi _
@[simp] lemma mem_bot {x : R} : x ∈ (⊥ : ideal R) ↔ x = 0 :=
submodule.mem_bot _
end lattice
section pi
variables (ι : Type v)
/-- `I^n` as an ideal of `R^n`. -/
def pi : ideal (ι → α) :=
{ carrier := { x | ∀ i, x i ∈ I },
zero_mem' := λ i, I.zero_mem,
add_mem' := λ a b ha hb i, I.add_mem (ha i) (hb i),
smul_mem' := λ a b hb i, I.mul_mem_left (a i) (hb i) }
lemma mem_pi (x : ι → α) : x ∈ I.pi ι ↔ ∀ i, x i ∈ I := iff.rfl
end pi
end ideal
end semiring
section comm_semiring
variables {a b : α}
-- A separate namespace definition is needed because the variables were historically in a different
-- order.
namespace ideal
variables [comm_semiring α] (I : ideal α)
@[simp]
theorem mul_unit_mem_iff_mem {x y : α} (hy : is_unit y) : x * y ∈ I ↔ x ∈ I :=
mul_comm y x ▸ unit_mul_mem_iff_mem I hy
lemma mem_span_singleton {x y : α} :
x ∈ span ({y} : set α) ↔ y ∣ x :=
mem_span_singleton'.trans $ exists_congr $ λ _, by rw [eq_comm, mul_comm]
lemma span_singleton_le_span_singleton {x y : α} :
span ({x} : set α) ≤ span ({y} : set α) ↔ y ∣ x :=
span_le.trans $ singleton_subset_iff.trans mem_span_singleton
lemma span_singleton_eq_span_singleton {α : Type u} [integral_domain α] {x y : α} :
span ({x} : set α) = span ({y} : set α) ↔ associated x y :=
begin
rw [←dvd_dvd_iff_associated, le_antisymm_iff, and_comm],
apply and_congr;
rw span_singleton_le_span_singleton,
end
lemma span_singleton_mul_right_unit {a : α} (h2 : is_unit a) (x : α) :
span ({x * a} : set α) = span {x} :=
begin
apply le_antisymm,
{ rw span_singleton_le_span_singleton, use a},
{ rw span_singleton_le_span_singleton, rw is_unit.mul_right_dvd h2}
end
lemma span_singleton_mul_left_unit {a : α} (h2 : is_unit a) (x : α) :
span ({a * x} : set α) = span {x} := by rw [mul_comm, span_singleton_mul_right_unit h2]
lemma span_singleton_eq_top {x} : span ({x} : set α) = ⊤ ↔ is_unit x :=
by rw [is_unit_iff_dvd_one, ← span_singleton_le_span_singleton, span_singleton_one,
eq_top_iff]
theorem span_singleton_prime {p : α} (hp : p ≠ 0) :
is_prime (span ({p} : set α)) ↔ prime p :=
by simp [is_prime_iff, prime, span_singleton_eq_top, hp, mem_span_singleton]
theorem is_maximal.is_prime {I : ideal α} (H : I.is_maximal) : I.is_prime :=
⟨H.1.1, λ x y hxy, or_iff_not_imp_left.2 $ λ hx, begin
let J : ideal α := submodule.span α (insert x ↑I),
have IJ : I ≤ J := (set.subset.trans (subset_insert _ _) subset_span),
have xJ : x ∈ J := ideal.subset_span (set.mem_insert x I),
cases is_maximal_iff.1 H with _ oJ,
specialize oJ J x IJ hx xJ,
rcases submodule.mem_span_insert.mp oJ with ⟨a, b, h, oe⟩,
obtain (F : y * 1 = y * (a • x + b)) := congr_arg (λ g : α, y * g) oe,
rw [← mul_one y, F, mul_add, mul_comm, smul_eq_mul, mul_assoc],
refine submodule.add_mem I (I.mul_mem_left a hxy) (submodule.smul_mem I y _),
rwa submodule.span_eq at h,
end⟩
@[priority 100] -- see Note [lower instance priority]
instance is_maximal.is_prime' (I : ideal α) : ∀ [H : I.is_maximal], I.is_prime :=
is_maximal.is_prime
lemma span_singleton_lt_span_singleton [integral_domain β] {x y : β} :
span ({x} : set β) < span ({y} : set β) ↔ dvd_not_unit y x :=
by rw [lt_iff_le_not_le, span_singleton_le_span_singleton, span_singleton_le_span_singleton,
dvd_and_not_dvd_iff]
lemma factors_decreasing [integral_domain β] (b₁ b₂ : β) (h₁ : b₁ ≠ 0) (h₂ : ¬ is_unit b₂) :
span ({b₁ * b₂} : set β) < span {b₁} :=
lt_of_le_not_le (ideal.span_le.2 $ singleton_subset_iff.2 $
ideal.mem_span_singleton.2 ⟨b₂, rfl⟩) $ λ h,
h₂ $ is_unit_of_dvd_one _ $ (mul_dvd_mul_iff_left h₁).1 $
by rwa [mul_one, ← ideal.span_singleton_le_span_singleton]
variables (b)
lemma mul_mem_right (h : a ∈ I) : a * b ∈ I := mul_comm b a ▸ I.mul_mem_left b h
variables {b}
lemma pow_mem_of_mem (ha : a ∈ I) (n : ℕ) (hn : 0 < n) : a ^ n ∈ I :=
nat.cases_on n (not.elim dec_trivial) (λ m hm, (pow_succ a m).symm ▸ I.mul_mem_right (a^m) ha) hn
theorem is_prime.mul_mem_iff_mem_or_mem {I : ideal α} (hI : I.is_prime) :
∀ {x y : α}, x * y ∈ I ↔ x ∈ I ∨ y ∈ I :=
λ x y, ⟨hI.mem_or_mem, by { rintro (h | h), exacts [I.mul_mem_right y h, I.mul_mem_left x h] }⟩
theorem is_prime.pow_mem_iff_mem {I : ideal α} (hI : I.is_prime)
{r : α} (n : ℕ) (hn : 0 < n) : r ^ n ∈ I ↔ r ∈ I :=
⟨hI.mem_of_pow_mem n, (λ hr, I.pow_mem_of_mem hr n hn)⟩
end ideal
end comm_semiring
section ring
namespace ideal
variables [ring α] (I : ideal α) {a b : α}
lemma neg_mem_iff : -a ∈ I ↔ a ∈ I := I.neg_mem_iff
lemma add_mem_iff_left : b ∈ I → (a + b ∈ I ↔ a ∈ I) := I.add_mem_iff_left
lemma add_mem_iff_right : a ∈ I → (a + b ∈ I ↔ b ∈ I) := I.add_mem_iff_right
protected lemma sub_mem : a ∈ I → b ∈ I → a - b ∈ I := I.sub_mem
lemma mem_span_insert' {s : set α} {x y} :
x ∈ span (insert y s) ↔ ∃a, x + a * y ∈ span s := submodule.mem_span_insert'
end ideal
end ring
section division_ring
variables {K : Type u} [division_ring K] (I : ideal K)
namespace ideal
/-- All ideals in a division ring are trivial. -/
lemma eq_bot_or_top : I = ⊥ ∨ I = ⊤ :=
begin
rw or_iff_not_imp_right,
change _ ≠ _ → _,
rw ideal.ne_top_iff_one,
intro h1,
rw eq_bot_iff,
intros r hr,
by_cases H : r = 0, {simpa},
simpa [H, h1] using I.mul_mem_left r⁻¹ hr,
end
lemma eq_bot_of_prime [h : I.is_prime] : I = ⊥ :=
or_iff_not_imp_right.mp I.eq_bot_or_top h.1
lemma bot_is_maximal : is_maximal (⊥ : ideal K) :=
⟨⟨λ h, absurd ((eq_top_iff_one (⊤ : ideal K)).mp rfl) (by rw ← h; simp),
λ I hI, or_iff_not_imp_left.mp (eq_bot_or_top I) (ne_of_gt hI)⟩⟩
end ideal
end division_ring
section comm_ring
namespace ideal
variables [comm_ring α] (I : ideal α) {a b : α}
/-- The quotient `R/I` of a ring `R` by an ideal `I`.
The ideal quotient of `I` is defined to equal the quotient of `I` as an `R`-submodule of `R`.
This definition is marked `reducible` so that typeclass instances can be shared between
`ideal.quotient I` and `submodule.quotient I`.
-/
-- Note that at present `ideal` means a left-ideal,
-- so this quotient is only useful in a commutative ring.
-- We should develop quotients by two-sided ideals as well.
@[reducible]
def quotient (I : ideal α) := I.quotient
namespace quotient
variables {I} {x y : α}
instance (I : ideal α) : has_one I.quotient := ⟨submodule.quotient.mk 1⟩
instance (I : ideal α) : has_mul I.quotient :=
⟨λ a b, quotient.lift_on₂' a b (λ a b, submodule.quotient.mk (a * b)) $
λ a₁ a₂ b₁ b₂ h₁ h₂, quot.sound $ begin
have F := I.add_mem (I.mul_mem_left a₂ h₁) (I.mul_mem_right b₁ h₂),
have : a₁ * a₂ - b₁ * b₂ = a₂ * (a₁ - b₁) + (a₂ - b₂) * b₁,
{ rw [mul_sub, sub_mul, sub_add_sub_cancel, mul_comm, mul_comm b₁] },
rw ← this at F,
change _ ∈ _, convert F,
end⟩
instance (I : ideal α) : comm_ring I.quotient :=
{ mul := (*),
one := 1,
mul_assoc := λ a b c, quotient.induction_on₃' a b c $
λ a b c, congr_arg submodule.quotient.mk (mul_assoc a b c),
mul_comm := λ a b, quotient.induction_on₂' a b $
λ a b, congr_arg submodule.quotient.mk (mul_comm a b),
one_mul := λ a, quotient.induction_on' a $
λ a, congr_arg submodule.quotient.mk (one_mul a),
mul_one := λ a, quotient.induction_on' a $
λ a, congr_arg submodule.quotient.mk (mul_one a),
left_distrib := λ a b c, quotient.induction_on₃' a b c $
λ a b c, congr_arg submodule.quotient.mk (left_distrib a b c),
right_distrib := λ a b c, quotient.induction_on₃' a b c $
λ a b c, congr_arg submodule.quotient.mk (right_distrib a b c),
..submodule.quotient.add_comm_group I }
/-- The ring homomorphism from a ring `R` to a quotient ring `R/I`. -/
def mk (I : ideal α) : α →+* I.quotient :=
⟨λ a, submodule.quotient.mk a, rfl, λ _ _, rfl, rfl, λ _ _, rfl⟩
instance : inhabited (quotient I) := ⟨mk I 37⟩
protected theorem eq : mk I x = mk I y ↔ x - y ∈ I := submodule.quotient.eq I
@[simp] theorem mk_eq_mk (x : α) : (submodule.quotient.mk x : quotient I) = mk I x := rfl
lemma eq_zero_iff_mem {I : ideal α} : mk I a = 0 ↔ a ∈ I :=
by conv {to_rhs, rw ← sub_zero a }; exact quotient.eq'
theorem zero_eq_one_iff {I : ideal α} : (0 : I.quotient) = 1 ↔ I = ⊤ :=
eq_comm.trans $ eq_zero_iff_mem.trans (eq_top_iff_one _).symm
theorem zero_ne_one_iff {I : ideal α} : (0 : I.quotient) ≠ 1 ↔ I ≠ ⊤ :=
not_congr zero_eq_one_iff
protected theorem nontrivial {I : ideal α} (hI : I ≠ ⊤) : nontrivial I.quotient :=
⟨⟨0, 1, zero_ne_one_iff.2 hI⟩⟩
lemma mk_surjective : function.surjective (mk I) :=
λ y, quotient.induction_on' y (λ x, exists.intro x rfl)
/-- If `I` is an ideal of a commutative ring `R`, if `q : R → R/I` is the quotient map, and if
`s ⊆ R` is a subset, then `q⁻¹(q(s)) = ⋃ᵢ(i + s)`, the union running over all `i ∈ I`. -/
lemma quotient_ring_saturate (I : ideal α) (s : set α) :
mk I ⁻¹' (mk I '' s) = (⋃ x : I, (λ y, x.1 + y) '' s) :=
begin
ext x,
simp only [mem_preimage, mem_image, mem_Union, ideal.quotient.eq],
exact ⟨λ ⟨a, a_in, h⟩, ⟨⟨_, I.neg_mem h⟩, a, a_in, by simp⟩,
λ ⟨⟨i, hi⟩, a, ha, eq⟩,
⟨a, ha, by rw [← eq, sub_add_eq_sub_sub_swap, sub_self, zero_sub]; exact I.neg_mem hi⟩⟩
end
instance (I : ideal α) [hI : I.is_prime] : integral_domain I.quotient :=
{ eq_zero_or_eq_zero_of_mul_eq_zero := λ a b,
quotient.induction_on₂' a b $ λ a b hab,
(hI.mem_or_mem (eq_zero_iff_mem.1 hab)).elim
(or.inl ∘ eq_zero_iff_mem.2)
(or.inr ∘ eq_zero_iff_mem.2),
.. quotient.comm_ring I,
.. quotient.nontrivial hI.1 }
lemma is_integral_domain_iff_prime (I : ideal α) : is_integral_domain I.quotient ↔ I.is_prime :=
⟨ λ ⟨h1, h2, h3⟩, ⟨zero_ne_one_iff.1 $ @zero_ne_one _ _ ⟨h1⟩, λ x y h,
by { simp only [←eq_zero_iff_mem, (mk I).map_mul] at ⊢ h, exact h3 _ _ h}⟩,
λ h, by exactI integral_domain.to_is_integral_domain I.quotient⟩
lemma exists_inv {I : ideal α} [hI : I.is_maximal] :
∀ {a : I.quotient}, a ≠ 0 → ∃ b : I.quotient, a * b = 1 :=
begin
rintro ⟨a⟩ h,
rcases hI.exists_inv (mt eq_zero_iff_mem.2 h) with ⟨b, c, hc, abc⟩,
rw [mul_comm] at abc,
refine ⟨mk _ b, quot.sound _⟩, --quot.sound hb
rw ← eq_sub_iff_add_eq' at abc,
rw [abc, ← neg_mem_iff, neg_sub] at hc,
convert hc,
end
/-- quotient by maximal ideal is a field. def rather than instance, since users will have
computable inverses in some applications -/
protected noncomputable def field (I : ideal α) [hI : I.is_maximal] : field I.quotient :=
{ inv := λ a, if ha : a = 0 then 0 else classical.some (exists_inv ha),
mul_inv_cancel := λ a (ha : a ≠ 0), show a * dite _ _ _ = _,
by rw dif_neg ha;
exact classical.some_spec (exists_inv ha),
inv_zero := dif_pos rfl,
..quotient.integral_domain I }
/-- If the quotient by an ideal is a field, then the ideal is maximal. -/
theorem maximal_of_is_field (I : ideal α)
(hqf : is_field I.quotient) : I.is_maximal :=
begin
apply ideal.is_maximal_iff.2,
split,
{ intro h,
rcases hqf.exists_pair_ne with ⟨⟨x⟩, ⟨y⟩, hxy⟩,
exact hxy (ideal.quotient.eq.2 (mul_one (x - y) ▸ I.mul_mem_left _ h)) },
{ intros J x hIJ hxnI hxJ,
rcases hqf.mul_inv_cancel (mt ideal.quotient.eq_zero_iff_mem.1 hxnI) with ⟨⟨y⟩, hy⟩,
rw [← zero_add (1 : α), ← sub_self (x * y), sub_add],
refine J.sub_mem (J.mul_mem_right _ hxJ) (hIJ (ideal.quotient.eq.1 hy)) }
end
/-- The quotient of a ring by an ideal is a field iff the ideal is maximal. -/
theorem maximal_ideal_iff_is_field_quotient (I : ideal α) :
I.is_maximal ↔ is_field I.quotient :=
⟨λ h, @field.to_is_field I.quotient (@ideal.quotient.field _ _ I h),
λ h, maximal_of_is_field I h⟩
variable [comm_ring β]
/-- Given a ring homomorphism `f : α →+* β` sending all elements of an ideal to zero,
lift it to the quotient by this ideal. -/
def lift (S : ideal α) (f : α →+* β) (H : ∀ (a : α), a ∈ S → f a = 0) :
quotient S →+* β :=
{ to_fun := λ x, quotient.lift_on' x f $ λ (a b) (h : _ ∈ _),
eq_of_sub_eq_zero $ by rw [← f.map_sub, H _ h],
map_one' := f.map_one,
map_zero' := f.map_zero,
map_add' := λ a₁ a₂, quotient.induction_on₂' a₁ a₂ f.map_add,
map_mul' := λ a₁ a₂, quotient.induction_on₂' a₁ a₂ f.map_mul }
@[simp] lemma lift_mk (S : ideal α) (f : α →+* β) (H : ∀ (a : α), a ∈ S → f a = 0) :
lift S f H (mk S a) = f a := rfl
end quotient
/-- Quotienting by equal ideals gives equivalent rings.
See also `submodule.quot_equiv_of_eq`.
-/
def quot_equiv_of_eq {R : Type*} [comm_ring R] {I J : ideal R} (h : I = J) :
I.quotient ≃+* J.quotient :=
{ map_mul' := by { rintro ⟨x⟩ ⟨y⟩, refl },
.. submodule.quot_equiv_of_eq I J h }
section pi
variables (ι : Type v)
/-- `R^n/I^n` is a `R/I`-module. -/
instance module_pi : module (I.quotient) (I.pi ι).quotient :=
begin
refine { smul := λ c m, quotient.lift_on₂' c m (λ r m, submodule.quotient.mk $ r • m) _, .. },
{ intros c₁ m₁ c₂ m₂ hc hm,
change c₁ - c₂ ∈ I at hc,
change m₁ - m₂ ∈ (I.pi ι) at hm,
apply ideal.quotient.eq.2,
have : c₁ • (m₂ - m₁) ∈ I.pi ι,
{ rw ideal.mem_pi,
intro i,
simp only [smul_eq_mul, pi.smul_apply, pi.sub_apply],
apply ideal.mul_mem_left,
rw ←ideal.neg_mem_iff,
simpa only [neg_sub] using hm i },
rw [←ideal.add_mem_iff_left (I.pi ι) this, sub_eq_add_neg, add_comm, ←add_assoc, ←smul_add,
sub_add_cancel, ←sub_eq_add_neg, ←sub_smul, ideal.mem_pi],
exact λ i, I.mul_mem_right _ hc },
all_goals { rintro ⟨a⟩ ⟨b⟩ ⟨c⟩ <|> rintro ⟨a⟩,
simp only [(•), submodule.quotient.quot_mk_eq_mk, ideal.quotient.mk_eq_mk],
change ideal.quotient.mk _ _ = ideal.quotient.mk _ _,
congr' with i, simp [mul_assoc, mul_add, add_mul] }
end
/-- `R^n/I^n` is isomorphic to `(R/I)^n` as an `R/I`-module. -/
noncomputable def pi_quot_equiv : (I.pi ι).quotient ≃ₗ[I.quotient] (ι → I.quotient) :=
{ to_fun := λ x, quotient.lift_on' x (λ f i, ideal.quotient.mk I (f i)) $
λ a b hab, funext (λ i, ideal.quotient.eq.2 (hab i)),
map_add' := by { rintros ⟨_⟩ ⟨_⟩, refl },
map_smul' := by { rintros ⟨_⟩ ⟨_⟩, refl },
inv_fun := λ x, ideal.quotient.mk (I.pi ι) $ λ i, quotient.out' (x i),
left_inv :=
begin
rintro ⟨x⟩,
exact ideal.quotient.eq.2 (λ i, ideal.quotient.eq.1 (quotient.out_eq' _))
end,
right_inv :=
begin
intro x,
ext i,
obtain ⟨r, hr⟩ := @quot.exists_rep _ _ (x i),
simp_rw ←hr,
convert quotient.out_eq' _
end }
/-- If `f : R^n → R^m` is an `R`-linear map and `I ⊆ R` is an ideal, then the image of `I^n` is
contained in `I^m`. -/
lemma map_pi {ι} [fintype ι] {ι' : Type w} (x : ι → α) (hi : ∀ i, x i ∈ I)
(f : (ι → α) →ₗ[α] (ι' → α)) (i : ι') : f x i ∈ I :=
begin
rw pi_eq_sum_univ x,
simp only [finset.sum_apply, smul_eq_mul, linear_map.map_sum, pi.smul_apply, linear_map.map_smul],
exact I.sum_mem (λ j hj, I.mul_mem_right _ (hi j))
end
end pi
end ideal
end comm_ring
namespace ring
variables {R : Type*} [comm_ring R]
lemma not_is_field_of_subsingleton {R : Type*} [ring R] [subsingleton R] : ¬ is_field R :=
λ ⟨⟨x, y, hxy⟩, _, _⟩, hxy (subsingleton.elim x y)
lemma exists_not_is_unit_of_not_is_field [nontrivial R] (hf : ¬ is_field R) :
∃ x ≠ (0 : R), ¬ is_unit x :=
begin
have : ¬ _ := λ h, hf ⟨exists_pair_ne R, mul_comm, h⟩,
simp_rw is_unit_iff_exists_inv,
push_neg at ⊢ this,
obtain ⟨x, hx, not_unit⟩ := this,
exact ⟨x, hx, not_unit⟩
end
lemma not_is_field_iff_exists_ideal_bot_lt_and_lt_top [nontrivial R] :
¬ is_field R ↔ ∃ I : ideal R, ⊥ < I ∧ I < ⊤ :=
begin
split,
{ intro h,
obtain ⟨x, nz, nu⟩ := exists_not_is_unit_of_not_is_field h,
use ideal.span {x},
rw [bot_lt_iff_ne_bot, lt_top_iff_ne_top],
exact ⟨mt ideal.span_singleton_eq_bot.mp nz, mt ideal.span_singleton_eq_top.mp nu⟩ },
{ rintros ⟨I, bot_lt, lt_top⟩ hf,
obtain ⟨x, mem, ne_zero⟩ := set_like.exists_of_lt bot_lt,
rw submodule.mem_bot at ne_zero,
obtain ⟨y, hy⟩ := hf.mul_inv_cancel ne_zero,
rw [lt_top_iff_ne_top, ne.def, ideal.eq_top_iff_one, ← hy] at lt_top,
exact lt_top (I.mul_mem_right _ mem), }
end
lemma not_is_field_iff_exists_prime [nontrivial R] :
¬ is_field R ↔ ∃ p : ideal R, p ≠ ⊥ ∧ p.is_prime :=
not_is_field_iff_exists_ideal_bot_lt_and_lt_top.trans
⟨λ ⟨I, bot_lt, lt_top⟩, let ⟨p, hp, le_p⟩ := I.exists_le_maximal (lt_top_iff_ne_top.mp lt_top) in
⟨p, bot_lt_iff_ne_bot.mp (lt_of_lt_of_le bot_lt le_p), hp.is_prime⟩,
λ ⟨p, ne_bot, prime⟩, ⟨p, bot_lt_iff_ne_bot.mpr ne_bot, lt_top_iff_ne_top.mpr prime.1⟩⟩
/-- When a ring is not a field, the maximal ideals are nontrivial. -/
lemma ne_bot_of_is_maximal_of_not_is_field [nontrivial R] {M : ideal R} (max : M.is_maximal)
(not_field : ¬ is_field R) : M ≠ ⊥ :=
begin
rintros h,
rw h at max,
rcases max with ⟨⟨h1, h2⟩⟩,
obtain ⟨I, hIbot, hItop⟩ := not_is_field_iff_exists_ideal_bot_lt_and_lt_top.mp not_field,
exact ne_of_lt hItop (h2 I hIbot),
end
end ring
namespace ideal
/-- Maximal ideals in a non-field are nontrivial. -/
variables {R : Type u} [comm_ring R] [nontrivial R]
lemma bot_lt_of_maximal (M : ideal R) [hm : M.is_maximal] (non_field : ¬ is_field R) : ⊥ < M :=
begin
rcases (ring.not_is_field_iff_exists_ideal_bot_lt_and_lt_top.1 non_field)
with ⟨I, Ibot, Itop⟩,
split, finish,
intro mle,
apply @irrefl _ (<) _ (⊤ : ideal R),
have : M = ⊥ := eq_bot_iff.mpr mle,
rw this at *,
rwa hm.1.2 I Ibot at Itop,
end
end ideal
variables {a b : α}
/-- The set of non-invertible elements of a monoid. -/
def nonunits (α : Type u) [monoid α] : set α := { a | ¬is_unit a }
@[simp] theorem mem_nonunits_iff [monoid α] : a ∈ nonunits α ↔ ¬ is_unit a := iff.rfl
theorem mul_mem_nonunits_right [comm_monoid α] :
b ∈ nonunits α → a * b ∈ nonunits α :=
mt is_unit_of_mul_is_unit_right
theorem mul_mem_nonunits_left [comm_monoid α] :
a ∈ nonunits α → a * b ∈ nonunits α :=
mt is_unit_of_mul_is_unit_left
theorem zero_mem_nonunits [semiring α] : 0 ∈ nonunits α ↔ (0:α) ≠ 1 :=
not_congr is_unit_zero_iff
@[simp] theorem one_not_mem_nonunits [monoid α] : (1:α) ∉ nonunits α :=
not_not_intro is_unit_one
theorem coe_subset_nonunits [semiring α] {I : ideal α} (h : I ≠ ⊤) :
(I : set α) ⊆ nonunits α :=
λ x hx hu, h $ I.eq_top_of_is_unit_mem hx hu
lemma exists_max_ideal_of_mem_nonunits [comm_semiring α] (h : a ∈ nonunits α) :
∃ I : ideal α, I.is_maximal ∧ a ∈ I :=
begin
have : ideal.span ({a} : set α) ≠ ⊤,
{ intro H, rw ideal.span_singleton_eq_top at H, contradiction },
rcases ideal.exists_le_maximal _ this with ⟨I, Imax, H⟩,
use [I, Imax], apply H, apply ideal.subset_span, exact set.mem_singleton a
end
/-- A commutative ring is local if it has a unique maximal ideal. Note that
`local_ring` is a predicate. -/
class local_ring (α : Type u) [comm_ring α] extends nontrivial α : Prop :=
(is_local : ∀ (a : α), (is_unit a) ∨ (is_unit (1 - a)))
namespace local_ring
variables [comm_ring α] [local_ring α]
lemma is_unit_or_is_unit_one_sub_self (a : α) :
(is_unit a) ∨ (is_unit (1 - a)) :=
is_local a
lemma is_unit_of_mem_nonunits_one_sub_self (a : α) (h : (1 - a) ∈ nonunits α) :
is_unit a :=
or_iff_not_imp_right.1 (is_local a) h
lemma is_unit_one_sub_self_of_mem_nonunits (a : α) (h : a ∈ nonunits α) :
is_unit (1 - a) :=
or_iff_not_imp_left.1 (is_local a) h
lemma nonunits_add {x y} (hx : x ∈ nonunits α) (hy : y ∈ nonunits α) :
x + y ∈ nonunits α :=
begin
rintros ⟨u, hu⟩,
apply hy,
suffices : is_unit ((↑u⁻¹ : α) * y),
{ rcases this with ⟨s, hs⟩,
use u * s,
convert congr_arg (λ z, (u : α) * z) hs,
rw ← mul_assoc, simp },
rw show (↑u⁻¹ * y) = (1 - ↑u⁻¹ * x),
{ rw eq_sub_iff_add_eq,
replace hu := congr_arg (λ z, (↑u⁻¹ : α) * z) hu.symm,
simpa [mul_add, add_comm] using hu },
apply is_unit_one_sub_self_of_mem_nonunits,
exact mul_mem_nonunits_right hx
end
variable (α)
/-- The ideal of elements that are not units. -/
def maximal_ideal : ideal α :=
{ carrier := nonunits α,
zero_mem' := zero_mem_nonunits.2 $ zero_ne_one,
add_mem' := λ x y hx hy, nonunits_add hx hy,
smul_mem' := λ a x, mul_mem_nonunits_right }
instance maximal_ideal.is_maximal : (maximal_ideal α).is_maximal :=
begin
rw ideal.is_maximal_iff,
split,
{ intro h, apply h, exact is_unit_one },
{ intros I x hI hx H,
erw not_not at hx,
rcases hx with ⟨u,rfl⟩,
simpa using I.mul_mem_left ↑u⁻¹ H }
end
lemma maximal_ideal_unique :
∃! I : ideal α, I.is_maximal :=
⟨maximal_ideal α, maximal_ideal.is_maximal α,
λ I hI, hI.eq_of_le (maximal_ideal.is_maximal α).1.1 $
λ x hx, hI.1.1 ∘ I.eq_top_of_is_unit_mem hx⟩
variable {α}
lemma eq_maximal_ideal {I : ideal α} (hI : I.is_maximal) : I = maximal_ideal α :=
unique_of_exists_unique (maximal_ideal_unique α) hI $ maximal_ideal.is_maximal α
lemma le_maximal_ideal {J : ideal α} (hJ : J ≠ ⊤) : J ≤ maximal_ideal α :=
begin
rcases ideal.exists_le_maximal J hJ with ⟨M, hM1, hM2⟩,
rwa ←eq_maximal_ideal hM1
end
@[simp] lemma mem_maximal_ideal (x) :
x ∈ maximal_ideal α ↔ x ∈ nonunits α := iff.rfl
end local_ring
lemma local_of_nonunits_ideal [comm_ring α] (hnze : (0:α) ≠ 1)
(h : ∀ x y ∈ nonunits α, x + y ∈ nonunits α) : local_ring α :=
{ exists_pair_ne := ⟨0, 1, hnze⟩,
is_local := λ x, or_iff_not_imp_left.mpr $ λ hx,
begin
by_contra H,
apply h _ _ hx H,
simp [-sub_eq_add_neg, add_sub_cancel'_right]
end }
lemma local_of_unique_max_ideal [comm_ring α] (h : ∃! I : ideal α, I.is_maximal) :
local_ring α :=
local_of_nonunits_ideal
(let ⟨I, Imax, _⟩ := h in (λ (H : 0 = 1), Imax.1.1 $ I.eq_top_iff_one.2 $ H ▸ I.zero_mem))
$ λ x y hx hy H,
let ⟨I, Imax, Iuniq⟩ := h in
let ⟨Ix, Ixmax, Hx⟩ := exists_max_ideal_of_mem_nonunits hx in
let ⟨Iy, Iymax, Hy⟩ := exists_max_ideal_of_mem_nonunits hy in
have xmemI : x ∈ I, from ((Iuniq Ix Ixmax) ▸ Hx),
have ymemI : y ∈ I, from ((Iuniq Iy Iymax) ▸ Hy),
Imax.1.1 $ I.eq_top_of_is_unit_mem (I.add_mem xmemI ymemI) H
lemma local_of_unique_nonzero_prime (R : Type u) [comm_ring R]
(h : ∃! P : ideal R, P ≠ ⊥ ∧ ideal.is_prime P) : local_ring R :=
local_of_unique_max_ideal begin
rcases h with ⟨P, ⟨hPnonzero, hPnot_top, _⟩, hPunique⟩,
refine ⟨P, ⟨⟨hPnot_top, _⟩⟩, λ M hM, hPunique _ ⟨_, ideal.is_maximal.is_prime hM⟩⟩,
{ refine ideal.maximal_of_no_maximal (λ M hPM hM, ne_of_lt hPM _),
exact (hPunique _ ⟨ne_bot_of_gt hPM, ideal.is_maximal.is_prime hM⟩).symm },
{ rintro rfl,
exact hPnot_top (hM.1.2 P (bot_lt_iff_ne_bot.2 hPnonzero)) },
end
lemma local_of_surjective {A B : Type*} [comm_ring A] [local_ring A] [comm_ring B] [nontrivial B]
(f : A →+* B) (hf : function.surjective f) :
local_ring B :=
{ is_local :=
begin
intros b,
obtain ⟨a, rfl⟩ := hf b,
apply (local_ring.is_unit_or_is_unit_one_sub_self a).imp f.is_unit_map _,
rw [← f.map_one, ← f.map_sub],
apply f.is_unit_map,
end,
.. ‹nontrivial B› }
/-- A local ring homomorphism is a homomorphism between local rings
such that the image of the maximal ideal of the source is contained within
the maximal ideal of the target. -/
class is_local_ring_hom [semiring α] [semiring β] (f : α →+* β) : Prop :=
(map_nonunit : ∀ a, is_unit (f a) → is_unit a)
instance is_local_ring_hom_id (A : Type*) [semiring A] : is_local_ring_hom (ring_hom.id A) :=
{ map_nonunit := λ a, id }
@[simp] lemma is_unit_map_iff {A B : Type*} [semiring A] [semiring B] (f : A →+* B)
[is_local_ring_hom f] (a) :
is_unit (f a) ↔ is_unit a :=
⟨is_local_ring_hom.map_nonunit a, f.is_unit_map⟩
instance is_local_ring_hom_comp {A B C : Type*} [semiring A] [semiring B] [semiring C]
(g : B →+* C) (f : A →+* B) [is_local_ring_hom g] [is_local_ring_hom f] :
is_local_ring_hom (g.comp f) :=
{ map_nonunit := λ a, is_local_ring_hom.map_nonunit a ∘ is_local_ring_hom.map_nonunit (f a) }
@[simp] lemma is_unit_of_map_unit [semiring α] [semiring β] (f : α →+* β) [is_local_ring_hom f]
(a) (h : is_unit (f a)) : is_unit a :=
is_local_ring_hom.map_nonunit a h
theorem of_irreducible_map [semiring α] [semiring β] (f : α →+* β) [h : is_local_ring_hom f] {x : α}
(hfx : irreducible (f x)) : irreducible x :=
⟨λ h, hfx.not_unit $ is_unit.map f.to_monoid_hom h, λ p q hx, let ⟨H⟩ := h in
or.imp (H p) (H q) $ hfx.is_unit_or_is_unit $ f.map_mul p q ▸ congr_arg f hx⟩
section
open local_ring
variables [comm_ring α] [local_ring α] [comm_ring β] [local_ring β]
variables (f : α →+* β) [is_local_ring_hom f]
lemma map_nonunit (a) (h : a ∈ maximal_ideal α) : f a ∈ maximal_ideal β :=
λ H, h $ is_unit_of_map_unit f a H
end
namespace local_ring
variables [comm_ring α] [local_ring α] [comm_ring β] [local_ring β]
variable (α)
/-- The residue field of a local ring is the quotient of the ring by its maximal ideal. -/
def residue_field := (maximal_ideal α).quotient
noncomputable instance residue_field.field : field (residue_field α) :=
ideal.quotient.field (maximal_ideal α)
noncomputable instance : inhabited (residue_field α) := ⟨37⟩
/-- The quotient map from a local ring to its residue field. -/
def residue : α →+* (residue_field α) :=
ideal.quotient.mk _
namespace residue_field
variables {α β}
/-- The map on residue fields induced by a local homomorphism between local rings -/
noncomputable def map (f : α →+* β) [is_local_ring_hom f] :
residue_field α →+* residue_field β :=
ideal.quotient.lift (maximal_ideal α) ((ideal.quotient.mk _).comp f) $
λ a ha,
begin
erw ideal.quotient.eq_zero_iff_mem,
exact map_nonunit f a ha
end
end residue_field
end local_ring
namespace field
variables [field α]
@[priority 100] -- see Note [lower instance priority]
instance : local_ring α :=
{ is_local := λ a,
if h : a = 0
then or.inr (by rw [h, sub_zero]; exact is_unit_one)
else or.inl $ is_unit.mk0 a h }
end field
|
5e9beeb27697b67fd147999363db3b566e550c79
|
53f7fd7840fe4979de24f9611c89f2ee1b4fc5dc
|
/src/tactic.lean
|
9e589ab6255482e5d48f1dcfe6693a8b7a70582e
|
[] |
no_license
|
cipher1024/search-trees
|
6d1925283c6ce47d84bdf41ca40d8317ff2e64d7
|
0e0ea0ee59f0b0499ebad1ef6f34f09ec9666cde
|
refs/heads/master
| 1,673,300,158,759
| 1,603,984,112,000
| 1,603,984,112,000
| 308,132,998
| 1
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 8,678
|
lean
|
import tactic.default
universes u v
-- #check @option.elim
-- def option.elim {α β} (x : β) (f : α → β) : option α → β
-- | none := x
-- | (some y) := f y
-- #exit
namespace list
def mfilter_map {m} [applicative m] {α β} (f : α → m (option β)) : list α → m (list.{v} β)
| [] := pure []
| (x :: xs) :=
(λ a, option.elim a id (::)) <$> f x <*> mfilter_map xs
def mfilter_map' {m} [applicative m] [alternative m] {α β} (f : α → m β) : list α → m (list.{v} β)
| [] := pure []
| (x :: xs) :=
((::) <$> f x <|> pure id) <*> mfilter_map' xs
end list
namespace tactic
open native
meta def rename_many' (renames : name_map name) (strict := tt) (use_unique_names := ff)
: tactic (list (name × expr)) :=
do let hyp_name : expr → name :=
if use_unique_names then expr.local_uniq_name else expr.local_pp_name,
ctx ← revertible_local_context,
-- The part of the context after (but including) the first hypthesis that
-- must be renamed.
let ctx_suffix := ctx.drop_while (λ h, (renames.find $ hyp_name h).is_none),
when strict $ do {
let ctx_names := rb_map.set_of_list (ctx_suffix.map hyp_name),
let invalid_renames :=
(renames.to_list.map prod.fst).filter (λ h, ¬ ctx_names.contains h),
when ¬ invalid_renames.empty $ fail $ format.join
[ "Cannot rename these hypotheses:\n"
, format.join $ (invalid_renames.map to_fmt).intersperse ", "
, format.line
, "This is because these hypotheses either do not occur in the\n"
, "context or they occur before a frozen local instance.\n"
, "In the latter case, try `tactic.unfreeze_local_instances`."
]
},
-- The new names for all hypotheses in ctx_suffix.
let new_names :=
ctx_suffix.map $ λ h,
(renames.find $ hyp_name h).get_or_else h.local_pp_name,
revert_lst ctx_suffix,
-- trace_state,
xs ← intro_lst new_names,
xs' ← xs.mmap infer_type,
-- trace $ ctx_suffix.zip $ xs.zip xs',
pure $ (ctx_suffix.map expr.local_uniq_name).zip xs
meta def find_hyp (pat : pexpr) (f : expr → tactic unit) : tactic unit := do
ls ← local_context,
pat ← pexpr_to_pattern pat,
ls.mfirst $ λ h, do
t ← infer_type h,
match_pattern pat t,
f h
meta def find_all_hyps (pat : pexpr) (f : expr → tactic unit) : tactic unit := do
ls ← local_context,
pat ← pexpr_to_pattern pat,
ls.mmap' $ λ h, try $ do
t ← infer_type h,
match_pattern pat t,
f h
namespace interactive
setup_tactic_parser
meta def find_hyp (id : parse ident) (_ : parse (tk ":=")) (pat : parse texpr)
(_ : parse (tk "then")) (tac : itactic) : tactic unit := do
ls ← local_context,
pat ← pexpr_to_pattern pat,
ls.mfirst $ λ h, do
t ← infer_type h,
match_pattern pat t,
rename_many (native.rb_map.of_list [(h.local_uniq_name, id)]) tt tt,
tac,
rename_many $ native.rb_map.of_list [(id, h.local_pp_name)]
-- #exit
meta def find_all_hyps (id : parse ident) (_ : parse (tk ":=")) (pat : parse texpr)
(_ : parse (tk "then")) (tac : itactic) : tactic unit := do
ls ← local_context,
pat ← pexpr_to_pattern pat,
ls.reverse.mmap' (λ h,
-- trace σ,
-- let h := h.instantiate_locals $ list.reverse σ,
try_or_report_error $ do {
t ← infer_type h,
match_pattern pat t,
-- trace!"{h} : {t}",
n ← tactic.revert h,
intro id,
intron $ n-1,
-- xs ← rename_many' (native.rb_map.of_list [(h.local_uniq_name, id)]) tt tt,
tac,
h' ← get_local id,
n ← tactic.revert h',
intro h.local_pp_name,
intron $ n-1,
skip }),
skip
meta def match_le_or_lt : expr → option (expr × expr)
| `(%%x < %%y) := pure (x, y)
| `(%%x ≤ %%y) := pure (x, y)
| `(%%x > %%y) := pure (y, x)
| `(%%x ≥ %%y) := pure (y, x)
| _ := none
meta def match_le : expr → option (expr × expr)
| `(%%x ≤ %%y) := pure (x, y)
| `(%%x ≥ %%y) := pure (y, x)
| _ := none
meta def match_lt : expr → option (expr × expr)
| `(%%x < %%y) := pure (x, y)
| `(%%x > %%y) := pure (y, x)
| _ := none
#print list.filter_map
inductive edge
| lt | le
instance : has_to_string edge :=
⟨ λ e, match e with
| edge.lt := "lt"
| edge.le := "le"
end ⟩
meta instance edge.has_to_format : has_to_format edge :=
⟨ λ e, to_fmt $ to_string e ⟩
meta instance rb_lmap.has_to_format {α β} [has_to_tactic_format α] [has_to_tactic_format β] : has_to_tactic_format (native.rb_lmap α β) :=
by delta native.rb_lmap; apply_instance
open native
meta def graph := (native.rb_lmap expr (expr × edge × expr))
meta instance graph.has_to_format : has_to_tactic_format graph :=
by delta graph; apply_instance
meta def dfs_trans' (g : graph) (r : ref expr_set) (v : expr) : edge → expr → expr → tactic expr
| e x h := do
x ← instantiate_mvars x,
-- trace!"visit {x}, going to {v}",
vs ← read_ref r,
-- trace!"seen: {vs}",
if vs.contains x then failed
else if v = x then pure h
else do
write_ref r $ vs.insert x,
-- trace (g.find x),
(g.find x).mfirst $ λ ⟨h',e',y⟩, do
-- trace!"try: {x}, {y}",
(e,h) ← match e, e' with
| edge.lt, edge.lt := prod.mk edge.lt <$> mk_app ``lt_trans [h, h']
| edge.lt, edge.le := prod.mk edge.lt <$> mk_app ``lt_of_lt_of_le [h, h']
| edge.le, edge.lt := prod.mk edge.lt <$> mk_app ``lt_of_le_of_lt [h, h']
| edge.le, edge.le := prod.mk edge.le <$> mk_app ``le_trans [h, h']
end,
-- trace"ok",
dfs_trans' e y h
meta def dfs_trans (g : graph) (v v' : expr) : tactic expr :=
using_new_ref mk_expr_set $ λ r, do
h ← mk_mapp ``le_refl [none, none, v],
dfs_trans' g r v' edge.le v h
#check cc_state
lemma lt_of_eq_of_lt_of_eq {α} {R : α → α → Prop} {x x' y' y : α} (h₀ : x = x') (h₁ : R x' y') (h₂ : y' = y) :
R x y := by subst_vars; exact h₁
-- lemma t_of_eq_of_lt_of_eq {α} [has_lt α] {x x' y' y : α} (h₀ : x = x') (h₁ : x' < y') (h₂ : y' = y) :
-- x < y := by subst_vars; exact h₁
meta def chain_trans : tactic unit := do
tgt ← target,
(x, y) ← match_le_or_lt tgt,
α ← infer_type x,
s ← cc_state.mk_using_hs,
ls ← local_context >>= list.mfilter_map'
(λ h, do t ← infer_type h,
do { (e, x, y) ← prod.mk edge.le <$> match_le t <|>
prod.mk edge.lt <$> match_lt t,
let x' := s.root x,
let y' := s.root y,
x_pr ← s.eqv_proof x' x,
y_pr ← s.eqv_proof y y',
h' ← mk_app ``lt_of_eq_of_lt_of_eq [x_pr, h, y_pr],
-- trace!"h : {infer_type h}",
-- trace!"h' : {infer_type h'}",
infer_type x >>= is_def_eq α,
pure [(h', e, x', y')] }),
-- <|>
-- do { (x, y) ← match_eq t,
-- infer_type x >>= is_def_eq α,
-- h₀ ← mk_eq_symm h >>= mk_app ``le_of_eq ∘ list.ret,
-- h₁ ← mk_app ``le_of_eq [h],
-- pure [(h₀, edge.le, y, x), (h₁, edge.le, x, y)] }),
let m := list.foldl (λ (m : graph) (e : _ × _ × _ × _),
let ⟨pr,e,x,y⟩ := e in
m.insert x (pr,e,y)) (native.rb_lmap.mk expr (expr × edge × expr)) ls.join,
x ← whnf x,
y ← whnf y,
pr ← dfs_trans m x y,
tactic.apply pr <|>
mk_app ``le_of_lt [pr] >>= tactic.apply,
skip
end interactive
setup_tactic_parser
precedence `=?`:0
import_private set_cases_tags
import_private cases_postprocess
meta def interactive.trichotomy (x : parse texpr) (_ : parse $ tk "=?") (y : parse texpr) (hyp : parse $ tk "with" *> ident <|> pure `h) : tactic unit := do
x ← to_expr x,
y ← to_expr y,
α ← infer_type x,
inst ← mk_app ``linear_order [α] >>= mk_instance,
h' ← mk_mapp ``cmp_compares [α, inst, x, y] >>= note hyp none,
e ← mk_app ``cmp [x, y],
n ← revert_kdependencies e,
x ← get_unused_name,
tactic.generalize e x,
h ← tactic.intro1,
s ← simp_lemmas.mk.add_simp ``ordering.compares,
in_tag ← get_main_tag,
focus1 $ do
hs ← cases_core h,
set_cases_tags in_tag $ cases_postprocess hs,
gs ← get_goals,
gs ← (gs.zip hs).mmap $ λ ⟨g,h,_,σ⟩, do
{ set_goals [g],
interactive.propagate_tags $ do
{ dsimp_target (some s) [] { fail_if_unchanged := ff },
intron_no_renames (n) },
get_goals },
set_goals gs.join
end tactic
example {x y : ℤ} (h : x ≤ y) (h : x < y) : true :=
begin
find_hyp hh := x ≤ _ then { replace hh : x = y, admit },
trivial
end
example {x y z w u v : ℕ} {a b : ℤ} (h₀ : x ≤ y) (h₁ : y < z) (h : y < u) (h₂ : z < w) (h₃ : w = u) (h₄ : u < v) : x ≤ u :=
begin
chain_trans,
end
|
6f9e1e920c1cc290f93370f06112706dcb3bce5c
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/linear_algebra/matrix/to_lin.lean
|
bfceaddb01a3af4208933931b54f3afd943bf862
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 29,179
|
lean
|
/-
Copyright (c) 2019 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Patrick Massot, Casper Putz, Anne Baanen
-/
import data.matrix.block
import linear_algebra.matrix.finite_dimensional
import linear_algebra.std_basis
import ring_theory.algebra_tower
import algebra.module.algebra
/-!
# Linear maps and matrices
This file defines the maps to send matrices to a linear map,
and to send linear maps between modules with a finite bases
to matrices. This defines a linear equivalence between linear maps
between finite-dimensional vector spaces and matrices indexed by
the respective bases.
## Main definitions
In the list below, and in all this file, `R` is a commutative ring (semiring
is sometimes enough), `M` and its variations are `R`-modules, `ι`, `κ`, `n` and `m` are finite
types used for indexing.
* `linear_map.to_matrix`: given bases `v₁ : ι → M₁` and `v₂ : κ → M₂`,
the `R`-linear equivalence from `M₁ →ₗ[R] M₂` to `matrix κ ι R`
* `matrix.to_lin`: the inverse of `linear_map.to_matrix`
* `linear_map.to_matrix'`: the `R`-linear equivalence from `(m → R) →ₗ[R] (n → R)`
to `matrix m n R` (with the standard basis on `m → R` and `n → R`)
* `matrix.to_lin'`: the inverse of `linear_map.to_matrix'`
* `alg_equiv_matrix`: given a basis indexed by `n`, the `R`-algebra equivalence between
`R`-endomorphisms of `M` and `matrix n n R`
## Tags
linear_map, matrix, linear_equiv, diagonal, det, trace
-/
noncomputable theory
open linear_map matrix set submodule
open_locale big_operators
open_locale matrix
universes u v w
section to_matrix'
instance {n m} [fintype m] [decidable_eq m] [fintype n] [decidable_eq n] (R) [fintype R] :
fintype (matrix m n R) := by unfold matrix; apply_instance
variables {R : Type*} [comm_ring R]
variables {l m n : Type*}
/-- `matrix.mul_vec M` is a linear map. -/
def matrix.mul_vec_lin [fintype n] (M : matrix m n R) : (n → R) →ₗ[R] (m → R) :=
{ to_fun := M.mul_vec,
map_add' := λ v w, funext (λ i, dot_product_add _ _ _),
map_smul' := λ c v, funext (λ i, dot_product_smul _ _ _) }
@[simp] lemma matrix.mul_vec_lin_apply [fintype n] (M : matrix m n R) (v : n → R) :
matrix.mul_vec_lin M v = M.mul_vec v := rfl
variables [fintype n] [decidable_eq n]
@[simp] lemma matrix.mul_vec_std_basis (M : matrix m n R) (i j) :
M.mul_vec (std_basis R (λ _, R) j 1) i = M i j :=
begin
have : (∑ j', M i j' * if j = j' then 1 else 0) = M i j,
{ simp_rw [mul_boole, finset.sum_ite_eq, finset.mem_univ, if_true] },
convert this,
ext,
split_ifs with h; simp only [std_basis_apply],
{ rw [h, function.update_same] },
{ rw [function.update_noteq (ne.symm h), pi.zero_apply] }
end
/-- Linear maps `(n → R) →ₗ[R] (m → R)` are linearly equivalent to `matrix m n R`. -/
def linear_map.to_matrix' : ((n → R) →ₗ[R] (m → R)) ≃ₗ[R] matrix m n R :=
{ to_fun := λ f i j, f (std_basis R (λ _, R) j 1) i,
inv_fun := matrix.mul_vec_lin,
right_inv := λ M, by { ext i j, simp only [matrix.mul_vec_std_basis, matrix.mul_vec_lin_apply] },
left_inv := λ f, begin
apply (pi.basis_fun R n).ext,
intro j, ext i,
simp only [pi.basis_fun_apply, matrix.mul_vec_std_basis, matrix.mul_vec_lin_apply]
end,
map_add' := λ f g, by { ext i j, simp only [pi.add_apply, linear_map.add_apply] },
map_smul' := λ c f, by { ext i j, simp only [pi.smul_apply, linear_map.smul_apply,
ring_hom.id_apply] } }
/-- A `matrix m n R` is linearly equivalent to a linear map `(n → R) →ₗ[R] (m → R)`. -/
def matrix.to_lin' : matrix m n R ≃ₗ[R] ((n → R) →ₗ[R] (m → R)) :=
linear_map.to_matrix'.symm
@[simp] lemma linear_map.to_matrix'_symm :
(linear_map.to_matrix'.symm : matrix m n R ≃ₗ[R] _) = matrix.to_lin' :=
rfl
@[simp] lemma matrix.to_lin'_symm :
(matrix.to_lin'.symm : ((n → R) →ₗ[R] (m → R)) ≃ₗ[R] _) = linear_map.to_matrix' :=
rfl
@[simp] lemma linear_map.to_matrix'_to_lin' (M : matrix m n R) :
linear_map.to_matrix' (matrix.to_lin' M) = M :=
linear_map.to_matrix'.apply_symm_apply M
@[simp] lemma matrix.to_lin'_to_matrix' (f : (n → R) →ₗ[R] (m → R)) :
matrix.to_lin' (linear_map.to_matrix' f) = f :=
matrix.to_lin'.apply_symm_apply f
@[simp] lemma linear_map.to_matrix'_apply (f : (n → R) →ₗ[R] (m → R)) (i j) :
linear_map.to_matrix' f i j = f (λ j', if j' = j then 1 else 0) i :=
begin
simp only [linear_map.to_matrix', linear_equiv.coe_mk],
congr,
ext j',
split_ifs with h,
{ rw [h, std_basis_same] },
apply std_basis_ne _ _ _ _ h
end
@[simp] lemma matrix.to_lin'_apply (M : matrix m n R) (v : n → R) :
matrix.to_lin' M v = M.mul_vec v := rfl
@[simp] lemma matrix.to_lin'_one :
matrix.to_lin' (1 : matrix n n R) = id :=
by { ext, simp [linear_map.one_apply, std_basis_apply] }
@[simp] lemma linear_map.to_matrix'_id :
(linear_map.to_matrix' (linear_map.id : (n → R) →ₗ[R] (n → R))) = 1 :=
by { ext, rw [matrix.one_apply, linear_map.to_matrix'_apply, id_apply] }
@[simp] lemma matrix.to_lin'_mul [fintype m] [decidable_eq m] (M : matrix l m R)
(N : matrix m n R) : matrix.to_lin' (M ⬝ N) = (matrix.to_lin' M).comp (matrix.to_lin' N) :=
by { ext, simp }
/-- Shortcut lemma for `matrix.to_lin'_mul` and `linear_map.comp_apply` -/
lemma matrix.to_lin'_mul_apply [fintype m] [decidable_eq m] (M : matrix l m R)
(N : matrix m n R) (x) : matrix.to_lin' (M ⬝ N) x = (matrix.to_lin' M (matrix.to_lin' N x)) :=
by rw [matrix.to_lin'_mul, linear_map.comp_apply]
lemma linear_map.to_matrix'_comp [fintype l] [decidable_eq l]
(f : (n → R) →ₗ[R] (m → R)) (g : (l → R) →ₗ[R] (n → R)) :
(f.comp g).to_matrix' = f.to_matrix' ⬝ g.to_matrix' :=
suffices (f.comp g) = (f.to_matrix' ⬝ g.to_matrix').to_lin',
by rw [this, linear_map.to_matrix'_to_lin'],
by rw [matrix.to_lin'_mul, matrix.to_lin'_to_matrix', matrix.to_lin'_to_matrix']
lemma linear_map.to_matrix'_mul [fintype m] [decidable_eq m]
(f g : (m → R) →ₗ[R] (m → R)) :
(f * g).to_matrix' = f.to_matrix' ⬝ g.to_matrix' :=
linear_map.to_matrix'_comp f g
@[simp] lemma linear_map.to_matrix'_algebra_map (x : R) :
linear_map.to_matrix' (algebra_map R (module.End R (n → R)) x) = scalar n x :=
by simp [module.algebra_map_End_eq_smul_id]
lemma matrix.ker_to_lin'_eq_bot_iff {M : matrix n n R} :
M.to_lin'.ker = ⊥ ↔ ∀ v, M.mul_vec v = 0 → v = 0 :=
by simp only [submodule.eq_bot_iff, linear_map.mem_ker, matrix.to_lin'_apply]
/-- If `M` and `M'` are each other's inverse matrices, they provide an equivalence between `m → A`
and `n → A` corresponding to `M.mul_vec` and `M'.mul_vec`. -/
@[simps]
def matrix.to_lin'_of_inv [fintype m] [decidable_eq m]
{M : matrix m n R} {M' : matrix n m R}
(hMM' : M ⬝ M' = 1) (hM'M : M' ⬝ M = 1) :
(m → R) ≃ₗ[R] (n → R) :=
{ to_fun := matrix.to_lin' M',
inv_fun := M.to_lin',
left_inv := λ x, by rw [← matrix.to_lin'_mul_apply, hMM', matrix.to_lin'_one, id_apply],
right_inv := λ x, by rw [← matrix.to_lin'_mul_apply, hM'M, matrix.to_lin'_one, id_apply],
.. matrix.to_lin' M' }
/-- Linear maps `(n → R) →ₗ[R] (n → R)` are algebra equivalent to `matrix n n R`. -/
def linear_map.to_matrix_alg_equiv' : ((n → R) →ₗ[R] (n → R)) ≃ₐ[R] matrix n n R :=
alg_equiv.of_linear_equiv linear_map.to_matrix' linear_map.to_matrix'_mul
linear_map.to_matrix'_algebra_map
/-- A `matrix n n R` is algebra equivalent to a linear map `(n → R) →ₗ[R] (n → R)`. -/
def matrix.to_lin_alg_equiv' : matrix n n R ≃ₐ[R] ((n → R) →ₗ[R] (n → R)) :=
linear_map.to_matrix_alg_equiv'.symm
@[simp] lemma linear_map.to_matrix_alg_equiv'_symm :
(linear_map.to_matrix_alg_equiv'.symm : matrix n n R ≃ₐ[R] _) = matrix.to_lin_alg_equiv' :=
rfl
@[simp] lemma matrix.to_lin_alg_equiv'_symm :
(matrix.to_lin_alg_equiv'.symm : ((n → R) →ₗ[R] (n → R)) ≃ₐ[R] _) =
linear_map.to_matrix_alg_equiv' :=
rfl
@[simp] lemma linear_map.to_matrix_alg_equiv'_to_lin_alg_equiv' (M : matrix n n R) :
linear_map.to_matrix_alg_equiv' (matrix.to_lin_alg_equiv' M) = M :=
linear_map.to_matrix_alg_equiv'.apply_symm_apply M
@[simp] lemma matrix.to_lin_alg_equiv'_to_matrix_alg_equiv' (f : (n → R) →ₗ[R] (n → R)) :
matrix.to_lin_alg_equiv' (linear_map.to_matrix_alg_equiv' f) = f :=
matrix.to_lin_alg_equiv'.apply_symm_apply f
@[simp] lemma linear_map.to_matrix_alg_equiv'_apply (f : (n → R) →ₗ[R] (n → R)) (i j) :
linear_map.to_matrix_alg_equiv' f i j = f (λ j', if j' = j then 1 else 0) i :=
by simp [linear_map.to_matrix_alg_equiv']
@[simp] lemma matrix.to_lin_alg_equiv'_apply (M : matrix n n R) (v : n → R) :
matrix.to_lin_alg_equiv' M v = M.mul_vec v := rfl
@[simp] lemma matrix.to_lin_alg_equiv'_one :
matrix.to_lin_alg_equiv' (1 : matrix n n R) = id :=
by { ext, simp [matrix.one_apply, std_basis_apply] }
@[simp] lemma linear_map.to_matrix_alg_equiv'_id :
(linear_map.to_matrix_alg_equiv' (linear_map.id : (n → R) →ₗ[R] (n → R))) = 1 :=
by { ext, rw [matrix.one_apply, linear_map.to_matrix_alg_equiv'_apply, id_apply] }
@[simp] lemma matrix.to_lin_alg_equiv'_mul (M N : matrix n n R) :
matrix.to_lin_alg_equiv' (M ⬝ N) =
(matrix.to_lin_alg_equiv' M).comp (matrix.to_lin_alg_equiv' N) :=
by { ext, simp }
lemma linear_map.to_matrix_alg_equiv'_comp (f g : (n → R) →ₗ[R] (n → R)) :
(f.comp g).to_matrix_alg_equiv' = f.to_matrix_alg_equiv' ⬝ g.to_matrix_alg_equiv' :=
suffices (f.comp g) = (f.to_matrix_alg_equiv' ⬝ g.to_matrix_alg_equiv').to_lin_alg_equiv',
by rw [this, linear_map.to_matrix_alg_equiv'_to_lin_alg_equiv'],
by rw [matrix.to_lin_alg_equiv'_mul, matrix.to_lin_alg_equiv'_to_matrix_alg_equiv',
matrix.to_lin_alg_equiv'_to_matrix_alg_equiv']
lemma linear_map.to_matrix_alg_equiv'_mul
(f g : (n → R) →ₗ[R] (n → R)) :
(f * g).to_matrix_alg_equiv' = f.to_matrix_alg_equiv' ⬝ g.to_matrix_alg_equiv' :=
linear_map.to_matrix_alg_equiv'_comp f g
lemma matrix.rank_vec_mul_vec {K m n : Type u} [field K] [fintype n] [decidable_eq n]
(w : m → K) (v : n → K) :
rank (vec_mul_vec w v).to_lin' ≤ 1 :=
begin
rw [vec_mul_vec_eq, matrix.to_lin'_mul],
refine le_trans (rank_comp_le1 _ _) _,
refine (rank_le_domain _).trans_eq _,
rw [dim_fun', fintype.card_unit, nat.cast_one]
end
end to_matrix'
section to_matrix
variables {R : Type*} [comm_ring R]
variables {l m n : Type*} [fintype n] [fintype m] [decidable_eq n]
variables {M₁ M₂ : Type*} [add_comm_group M₁] [add_comm_group M₂] [module R M₁] [module R M₂]
variables (v₁ : basis n R M₁) (v₂ : basis m R M₂)
/-- Given bases of two modules `M₁` and `M₂` over a commutative ring `R`, we get a linear
equivalence between linear maps `M₁ →ₗ M₂` and matrices over `R` indexed by the bases. -/
def linear_map.to_matrix : (M₁ →ₗ[R] M₂) ≃ₗ[R] matrix m n R :=
linear_equiv.trans (linear_equiv.arrow_congr v₁.equiv_fun v₂.equiv_fun) linear_map.to_matrix'
/-- `linear_map.to_matrix'` is a particular case of `linear_map.to_matrix`, for the standard basis
`pi.basis_fun R n`. -/
lemma linear_map.to_matrix_eq_to_matrix' :
linear_map.to_matrix (pi.basis_fun R n) (pi.basis_fun R n) = linear_map.to_matrix' :=
rfl
/-- Given bases of two modules `M₁` and `M₂` over a commutative ring `R`, we get a linear
equivalence between matrices over `R` indexed by the bases and linear maps `M₁ →ₗ M₂`. -/
def matrix.to_lin : matrix m n R ≃ₗ[R] (M₁ →ₗ[R] M₂) :=
(linear_map.to_matrix v₁ v₂).symm
/-- `matrix.to_lin'` is a particular case of `matrix.to_lin`, for the standard basis
`pi.basis_fun R n`. -/
lemma matrix.to_lin_eq_to_lin' :
matrix.to_lin (pi.basis_fun R n) (pi.basis_fun R m) = matrix.to_lin' :=
rfl
@[simp] lemma linear_map.to_matrix_symm :
(linear_map.to_matrix v₁ v₂).symm = matrix.to_lin v₁ v₂ :=
rfl
@[simp] lemma matrix.to_lin_symm :
(matrix.to_lin v₁ v₂).symm = linear_map.to_matrix v₁ v₂ :=
rfl
@[simp] lemma matrix.to_lin_to_matrix (f : M₁ →ₗ[R] M₂) :
matrix.to_lin v₁ v₂ (linear_map.to_matrix v₁ v₂ f) = f :=
by rw [← matrix.to_lin_symm, linear_equiv.apply_symm_apply]
@[simp] lemma linear_map.to_matrix_to_lin (M : matrix m n R) :
linear_map.to_matrix v₁ v₂ (matrix.to_lin v₁ v₂ M) = M :=
by rw [← matrix.to_lin_symm, linear_equiv.symm_apply_apply]
lemma linear_map.to_matrix_apply (f : M₁ →ₗ[R] M₂) (i : m) (j : n) :
linear_map.to_matrix v₁ v₂ f i j = v₂.repr (f (v₁ j)) i :=
begin
rw [linear_map.to_matrix, linear_equiv.trans_apply, linear_map.to_matrix'_apply,
linear_equiv.arrow_congr_apply, basis.equiv_fun_symm_apply, finset.sum_eq_single j,
if_pos rfl, one_smul, basis.equiv_fun_apply],
{ intros j' _ hj',
rw [if_neg hj', zero_smul] },
{ intro hj,
have := finset.mem_univ j,
contradiction }
end
lemma linear_map.to_matrix_transpose_apply (f : M₁ →ₗ[R] M₂) (j : n) :
(linear_map.to_matrix v₁ v₂ f)ᵀ j = v₂.repr (f (v₁ j)) :=
funext $ λ i, f.to_matrix_apply _ _ i j
lemma linear_map.to_matrix_apply' (f : M₁ →ₗ[R] M₂) (i : m) (j : n) :
linear_map.to_matrix v₁ v₂ f i j = v₂.repr (f (v₁ j)) i :=
linear_map.to_matrix_apply v₁ v₂ f i j
lemma linear_map.to_matrix_transpose_apply' (f : M₁ →ₗ[R] M₂) (j : n) :
(linear_map.to_matrix v₁ v₂ f)ᵀ j = v₂.repr (f (v₁ j)) :=
linear_map.to_matrix_transpose_apply v₁ v₂ f j
lemma matrix.to_lin_apply (M : matrix m n R) (v : M₁) :
matrix.to_lin v₁ v₂ M v = ∑ j, M.mul_vec (v₁.repr v) j • v₂ j :=
show v₂.equiv_fun.symm (matrix.to_lin' M (v₁.repr v)) = _,
by rw [matrix.to_lin'_apply, v₂.equiv_fun_symm_apply]
@[simp] lemma matrix.to_lin_self (M : matrix m n R) (i : n) :
matrix.to_lin v₁ v₂ M (v₁ i) = ∑ j, M j i • v₂ j :=
begin
rw [matrix.to_lin_apply, finset.sum_congr rfl (λ j hj, _)],
rw [basis.repr_self, matrix.mul_vec, dot_product, finset.sum_eq_single i,
finsupp.single_eq_same, mul_one],
{ intros i' _ i'_ne, rw [finsupp.single_eq_of_ne i'_ne.symm, mul_zero] },
{ intros,
have := finset.mem_univ i,
contradiction },
end
/-- This will be a special case of `linear_map.to_matrix_id_eq_basis_to_matrix`. -/
lemma linear_map.to_matrix_id : linear_map.to_matrix v₁ v₁ id = 1 :=
begin
ext i j,
simp [linear_map.to_matrix_apply, matrix.one_apply, finsupp.single, eq_comm]
end
lemma linear_map.to_matrix_one : linear_map.to_matrix v₁ v₁ 1 = 1 :=
linear_map.to_matrix_id v₁
@[simp]
lemma matrix.to_lin_one : matrix.to_lin v₁ v₁ 1 = id :=
by rw [← linear_map.to_matrix_id v₁, matrix.to_lin_to_matrix]
theorem linear_map.to_matrix_reindex_range [decidable_eq M₁] [decidable_eq M₂]
(f : M₁ →ₗ[R] M₂) (k : m) (i : n) :
linear_map.to_matrix v₁.reindex_range v₂.reindex_range f
⟨v₂ k, mem_range_self k⟩ ⟨v₁ i, mem_range_self i⟩ =
linear_map.to_matrix v₁ v₂ f k i :=
by simp_rw [linear_map.to_matrix_apply, basis.reindex_range_self, basis.reindex_range_repr]
variables {M₃ : Type*} [add_comm_group M₃] [module R M₃] (v₃ : basis l R M₃)
lemma linear_map.to_matrix_comp [fintype l] [decidable_eq m] (f : M₂ →ₗ[R] M₃) (g : M₁ →ₗ[R] M₂) :
linear_map.to_matrix v₁ v₃ (f.comp g) =
linear_map.to_matrix v₂ v₃ f ⬝ linear_map.to_matrix v₁ v₂ g :=
by simp_rw [linear_map.to_matrix, linear_equiv.trans_apply,
linear_equiv.arrow_congr_comp _ v₂.equiv_fun, linear_map.to_matrix'_comp]
lemma linear_map.to_matrix_mul (f g : M₁ →ₗ[R] M₁) :
linear_map.to_matrix v₁ v₁ (f * g) =
linear_map.to_matrix v₁ v₁ f ⬝ linear_map.to_matrix v₁ v₁ g :=
by { rw [show (@has_mul.mul (M₁ →ₗ[R] M₁) _) = linear_map.comp, from rfl,
linear_map.to_matrix_comp v₁ v₁ v₁ f g] }
@[simp] lemma linear_map.to_matrix_algebra_map (x : R) :
linear_map.to_matrix v₁ v₁ (algebra_map R (module.End R M₁) x) = scalar n x :=
by simp [module.algebra_map_End_eq_smul_id, linear_map.to_matrix_id]
lemma linear_map.to_matrix_mul_vec_repr (f : M₁ →ₗ[R] M₂) (x : M₁) :
(linear_map.to_matrix v₁ v₂ f).mul_vec (v₁.repr x) = v₂.repr (f x) :=
by { ext i,
rw [← matrix.to_lin'_apply, linear_map.to_matrix, linear_equiv.trans_apply,
matrix.to_lin'_to_matrix', linear_equiv.arrow_congr_apply, v₂.equiv_fun_apply],
congr,
exact v₁.equiv_fun.symm_apply_apply x }
lemma matrix.to_lin_mul [fintype l] [decidable_eq m] (A : matrix l m R) (B : matrix m n R) :
matrix.to_lin v₁ v₃ (A ⬝ B) =
(matrix.to_lin v₂ v₃ A).comp (matrix.to_lin v₁ v₂ B) :=
begin
apply (linear_map.to_matrix v₁ v₃).injective,
haveI : decidable_eq l := λ _ _, classical.prop_decidable _,
rw linear_map.to_matrix_comp v₁ v₂ v₃,
repeat { rw linear_map.to_matrix_to_lin },
end
/-- Shortcut lemma for `matrix.to_lin_mul` and `linear_map.comp_apply`. -/
lemma matrix.to_lin_mul_apply [fintype l] [decidable_eq m]
(A : matrix l m R) (B : matrix m n R) (x) :
matrix.to_lin v₁ v₃ (A ⬝ B) x =
(matrix.to_lin v₂ v₃ A) (matrix.to_lin v₁ v₂ B x) :=
by rw [matrix.to_lin_mul v₁ v₂, linear_map.comp_apply]
/-- If `M` and `M` are each other's inverse matrices, `matrix.to_lin M` and `matrix.to_lin M'`
form a linear equivalence. -/
@[simps]
def matrix.to_lin_of_inv [decidable_eq m]
{M : matrix m n R} {M' : matrix n m R}
(hMM' : M ⬝ M' = 1) (hM'M : M' ⬝ M = 1) :
M₁ ≃ₗ[R] M₂ :=
{ to_fun := matrix.to_lin v₁ v₂ M,
inv_fun := matrix.to_lin v₂ v₁ M',
left_inv := λ x, by rw [← matrix.to_lin_mul_apply, hM'M, matrix.to_lin_one, id_apply],
right_inv := λ x, by rw [← matrix.to_lin_mul_apply, hMM', matrix.to_lin_one, id_apply],
.. matrix.to_lin v₁ v₂ M }
/-- Given a basis of a module `M₁` over a commutative ring `R`, we get an algebra
equivalence between linear maps `M₁ →ₗ M₁` and square matrices over `R` indexed by the basis. -/
def linear_map.to_matrix_alg_equiv :
(M₁ →ₗ[R] M₁) ≃ₐ[R] matrix n n R :=
alg_equiv.of_linear_equiv (linear_map.to_matrix v₁ v₁) (linear_map.to_matrix_mul v₁)
(linear_map.to_matrix_algebra_map v₁)
/-- Given a basis of a module `M₁` over a commutative ring `R`, we get an algebra
equivalence between square matrices over `R` indexed by the basis and linear maps `M₁ →ₗ M₁`. -/
def matrix.to_lin_alg_equiv : matrix n n R ≃ₐ[R] (M₁ →ₗ[R] M₁) :=
(linear_map.to_matrix_alg_equiv v₁).symm
@[simp] lemma linear_map.to_matrix_alg_equiv_symm :
(linear_map.to_matrix_alg_equiv v₁).symm = matrix.to_lin_alg_equiv v₁ :=
rfl
@[simp] lemma matrix.to_lin_alg_equiv_symm :
(matrix.to_lin_alg_equiv v₁).symm = linear_map.to_matrix_alg_equiv v₁ :=
rfl
@[simp] lemma matrix.to_lin_alg_equiv_to_matrix_alg_equiv (f : M₁ →ₗ[R] M₁) :
matrix.to_lin_alg_equiv v₁ (linear_map.to_matrix_alg_equiv v₁ f) = f :=
by rw [← matrix.to_lin_alg_equiv_symm, alg_equiv.apply_symm_apply]
@[simp] lemma linear_map.to_matrix_alg_equiv_to_lin_alg_equiv (M : matrix n n R) :
linear_map.to_matrix_alg_equiv v₁ (matrix.to_lin_alg_equiv v₁ M) = M :=
by rw [← matrix.to_lin_alg_equiv_symm, alg_equiv.symm_apply_apply]
lemma linear_map.to_matrix_alg_equiv_apply (f : M₁ →ₗ[R] M₁) (i j : n) :
linear_map.to_matrix_alg_equiv v₁ f i j = v₁.repr (f (v₁ j)) i :=
by simp [linear_map.to_matrix_alg_equiv, linear_map.to_matrix_apply]
lemma linear_map.to_matrix_alg_equiv_transpose_apply (f : M₁ →ₗ[R] M₁) (j : n) :
(linear_map.to_matrix_alg_equiv v₁ f)ᵀ j = v₁.repr (f (v₁ j)) :=
funext $ λ i, f.to_matrix_apply _ _ i j
lemma linear_map.to_matrix_alg_equiv_apply' (f : M₁ →ₗ[R] M₁) (i j : n) :
linear_map.to_matrix_alg_equiv v₁ f i j = v₁.repr (f (v₁ j)) i :=
linear_map.to_matrix_alg_equiv_apply v₁ f i j
lemma linear_map.to_matrix_alg_equiv_transpose_apply' (f : M₁ →ₗ[R] M₁) (j : n) :
(linear_map.to_matrix_alg_equiv v₁ f)ᵀ j = v₁.repr (f (v₁ j)) :=
linear_map.to_matrix_alg_equiv_transpose_apply v₁ f j
lemma matrix.to_lin_alg_equiv_apply (M : matrix n n R) (v : M₁) :
matrix.to_lin_alg_equiv v₁ M v = ∑ j, M.mul_vec (v₁.repr v) j • v₁ j :=
show v₁.equiv_fun.symm (matrix.to_lin_alg_equiv' M (v₁.repr v)) = _,
by rw [matrix.to_lin_alg_equiv'_apply, v₁.equiv_fun_symm_apply]
@[simp] lemma matrix.to_lin_alg_equiv_self (M : matrix n n R) (i : n) :
matrix.to_lin_alg_equiv v₁ M (v₁ i) = ∑ j, M j i • v₁ j :=
matrix.to_lin_self _ _ _ _
lemma linear_map.to_matrix_alg_equiv_id : linear_map.to_matrix_alg_equiv v₁ id = 1 :=
by simp_rw [linear_map.to_matrix_alg_equiv, alg_equiv.of_linear_equiv_apply,
linear_map.to_matrix_id]
@[simp]
lemma matrix.to_lin_alg_equiv_one : matrix.to_lin_alg_equiv v₁ 1 = id :=
by rw [← linear_map.to_matrix_alg_equiv_id v₁, matrix.to_lin_alg_equiv_to_matrix_alg_equiv]
theorem linear_map.to_matrix_alg_equiv_reindex_range [decidable_eq M₁]
(f : M₁ →ₗ[R] M₁) (k i : n) :
linear_map.to_matrix_alg_equiv v₁.reindex_range f
⟨v₁ k, mem_range_self k⟩ ⟨v₁ i, mem_range_self i⟩ =
linear_map.to_matrix_alg_equiv v₁ f k i :=
by simp_rw [linear_map.to_matrix_alg_equiv_apply,
basis.reindex_range_self, basis.reindex_range_repr]
lemma linear_map.to_matrix_alg_equiv_comp (f g : M₁ →ₗ[R] M₁) :
linear_map.to_matrix_alg_equiv v₁ (f.comp g) =
linear_map.to_matrix_alg_equiv v₁ f ⬝ linear_map.to_matrix_alg_equiv v₁ g :=
by simp [linear_map.to_matrix_alg_equiv, linear_map.to_matrix_comp v₁ v₁ v₁ f g]
lemma linear_map.to_matrix_alg_equiv_mul (f g : M₁ →ₗ[R] M₁) :
linear_map.to_matrix_alg_equiv v₁ (f * g) =
linear_map.to_matrix_alg_equiv v₁ f ⬝ linear_map.to_matrix_alg_equiv v₁ g :=
by { rw [show (@has_mul.mul (M₁ →ₗ[R] M₁) _) = linear_map.comp, from rfl,
linear_map.to_matrix_alg_equiv_comp v₁ f g] }
lemma matrix.to_lin_alg_equiv_mul (A B : matrix n n R) :
matrix.to_lin_alg_equiv v₁ (A ⬝ B) =
(matrix.to_lin_alg_equiv v₁ A).comp (matrix.to_lin_alg_equiv v₁ B) :=
by convert matrix.to_lin_mul v₁ v₁ v₁ A B
end to_matrix
namespace algebra
section lmul
variables {R S T : Type*} [comm_ring R] [comm_ring S] [comm_ring T]
variables [algebra R S] [algebra S T] [algebra R T] [is_scalar_tower R S T]
variables {m n : Type*} [fintype m] [decidable_eq m] [decidable_eq n]
variables (b : basis m R S) (c : basis n S T)
open algebra
lemma to_matrix_lmul' (x : S) (i j) :
linear_map.to_matrix b b (lmul R S x) i j = b.repr (x * b j) i :=
by rw [linear_map.to_matrix_apply', lmul_apply]
@[simp] lemma to_matrix_lsmul (x : R) (i j) :
linear_map.to_matrix b b (algebra.lsmul R S x) i j = if i = j then x else 0 :=
by { rw [linear_map.to_matrix_apply', algebra.lsmul_coe, linear_equiv.map_smul, finsupp.smul_apply,
b.repr_self_apply, smul_eq_mul, mul_boole],
congr' 1; simp only [eq_comm] }
/-- `left_mul_matrix b x` is the matrix corresponding to the linear map `λ y, x * y`.
`left_mul_matrix_eq_repr_mul` gives a formula for the entries of `left_mul_matrix`.
This definition is useful for doing (more) explicit computations with `algebra.lmul`,
such as the trace form or norm map for algebras.
-/
noncomputable def left_mul_matrix : S →ₐ[R] matrix m m R :=
{ to_fun := λ x, linear_map.to_matrix b b (algebra.lmul R S x),
map_zero' := by rw [alg_hom.map_zero, linear_equiv.map_zero],
map_one' := by rw [alg_hom.map_one, linear_map.to_matrix_one],
map_add' := λ x y, by rw [alg_hom.map_add, linear_equiv.map_add],
map_mul' := λ x y, by rw [alg_hom.map_mul, linear_map.to_matrix_mul, matrix.mul_eq_mul],
commutes' := λ r, by { ext, rw [lmul_algebra_map, to_matrix_lsmul,
algebra_map_matrix_apply, id.map_eq_self] } }
lemma left_mul_matrix_apply (x : S) :
left_mul_matrix b x = linear_map.to_matrix b b (lmul R S x) := rfl
lemma left_mul_matrix_eq_repr_mul (x : S) (i j) :
left_mul_matrix b x i j = b.repr (x * b j) i :=
-- This is defeq to just `to_matrix_lmul' b x i j`,
-- but the unfolding goes a lot faster with this explicit `rw`.
by rw [left_mul_matrix_apply, to_matrix_lmul' b x i j]
lemma left_mul_matrix_mul_vec_repr (x y : S) :
(left_mul_matrix b x).mul_vec (b.repr y) = b.repr (x * y) :=
linear_map.to_matrix_mul_vec_repr b b (algebra.lmul R S x) y
@[simp] lemma to_matrix_lmul_eq (x : S) :
linear_map.to_matrix b b (lmul R S x) = left_mul_matrix b x :=
rfl
lemma left_mul_matrix_injective : function.injective (left_mul_matrix b) :=
λ x x' h, calc x = algebra.lmul R S x 1 : (mul_one x).symm
... = algebra.lmul R S x' 1 : by rw (linear_map.to_matrix b b).injective h
... = x' : mul_one x'
variable [fintype n]
lemma smul_left_mul_matrix (x) (ik jk) :
left_mul_matrix (b.smul c) x ik jk =
left_mul_matrix b (left_mul_matrix c x ik.2 jk.2) ik.1 jk.1 :=
by simp only [left_mul_matrix_apply, linear_map.to_matrix_apply, mul_comm, basis.smul_apply,
basis.smul_repr, finsupp.smul_apply, algebra.lmul_apply, id.smul_eq_mul,
linear_equiv.map_smul, mul_smul_comm]
lemma smul_left_mul_matrix_algebra_map (x : S) :
left_mul_matrix (b.smul c) (algebra_map _ _ x) = block_diagonal (λ k, left_mul_matrix b x) :=
begin
ext ⟨i, k⟩ ⟨j, k'⟩,
rw [smul_left_mul_matrix, alg_hom.commutes, block_diagonal_apply, algebra_map_matrix_apply],
split_ifs with h; simp [h],
end
lemma smul_left_mul_matrix_algebra_map_eq (x : S) (i j k) :
left_mul_matrix (b.smul c) (algebra_map _ _ x) (i, k) (j, k) = left_mul_matrix b x i j :=
by rw [smul_left_mul_matrix_algebra_map, block_diagonal_apply_eq]
lemma smul_left_mul_matrix_algebra_map_ne (x : S) (i j) {k k'}
(h : k ≠ k') : left_mul_matrix (b.smul c) (algebra_map _ _ x) (i, k) (j, k') = 0 :=
by rw [smul_left_mul_matrix_algebra_map, block_diagonal_apply_ne _ _ _ h]
end lmul
end algebra
namespace linear_map
section finite_dimensional
open_locale classical
variables {K : Type*} [field K]
variables {V : Type*} [add_comm_group V] [module K V] [finite_dimensional K V]
variables {W : Type*} [add_comm_group W] [module K W] [finite_dimensional K W]
instance finite_dimensional : finite_dimensional K (V →ₗ[K] W) :=
linear_equiv.finite_dimensional
(linear_map.to_matrix (basis.of_vector_space K V) (basis.of_vector_space K W)).symm
section
variables {A : Type*} [ring A] [algebra K A] [module A V] [is_scalar_tower K A V]
[module A W] [is_scalar_tower K A W]
/-- Linear maps over a `k`-algebra are finite dimensional (over `k`) if both the source and
target are, since they form a subspace of all `k`-linear maps. -/
instance finite_dimensional' : finite_dimensional K (V →ₗ[A] W) :=
finite_dimensional.of_injective (restrict_scalars_linear_map K A V W)
(restrict_scalars_injective _)
end
/--
The dimension of the space of linear transformations is the product of the dimensions of the
domain and codomain.
-/
@[simp] lemma finrank_linear_map :
finite_dimensional.finrank K (V →ₗ[K] W) =
(finite_dimensional.finrank K V) * (finite_dimensional.finrank K W) :=
begin
let hbV := basis.of_vector_space K V,
let hbW := basis.of_vector_space K W,
rw [linear_equiv.finrank_eq (linear_map.to_matrix hbV hbW), matrix.finrank_matrix,
finite_dimensional.finrank_eq_card_basis hbV, finite_dimensional.finrank_eq_card_basis hbW,
mul_comm],
end
end finite_dimensional
end linear_map
section
variables {R : Type v} [comm_ring R] {n : Type*} [decidable_eq n]
variables {M M₁ M₂ : Type*} [add_comm_group M] [module R M]
variables [add_comm_group M₁] [module R M₁] [add_comm_group M₂] [module R M₂]
/-- The natural equivalence between linear endomorphisms of finite free modules and square matrices
is compatible with the algebra structures. -/
def alg_equiv_matrix' [fintype n] : module.End R (n → R) ≃ₐ[R] matrix n n R :=
{ map_mul' := linear_map.to_matrix'_comp,
map_add' := linear_map.to_matrix'.map_add,
commutes' := λ r, by { change (r • (linear_map.id : module.End R _)).to_matrix' = r • 1,
rw ←linear_map.to_matrix'_id, refl, apply_instance },
..linear_map.to_matrix' }
/-- A linear equivalence of two modules induces an equivalence of algebras of their
endomorphisms. -/
def linear_equiv.alg_conj (e : M₁ ≃ₗ[R] M₂) :
module.End R M₁ ≃ₐ[R] module.End R M₂ :=
{ map_mul' := λ f g, by apply e.arrow_congr_comp,
map_add' := e.conj.map_add,
commutes' := λ r, by { change e.conj (r • linear_map.id) = r • linear_map.id,
rw [linear_equiv.map_smul, linear_equiv.conj_id], },
..e.conj }
/-- A basis of a module induces an equivalence of algebras from the endomorphisms of the module to
square matrices. -/
def alg_equiv_matrix [fintype n] (h : basis n R M) : module.End R M ≃ₐ[R] matrix n n R :=
h.equiv_fun.alg_conj.trans alg_equiv_matrix'
end
|
2dfe961faab73323e9f1fa02335be7ebaef79ac5
|
e4a7c8ab8b68ca0e53d2c21397320ea590fa01c6
|
/test/field.lean
|
5ff7d282fef2ad6260e520e23bac4b8be6737828
|
[] |
no_license
|
lean-forward/field
|
3ff5dc5f43de40f35481b375f8c871cd0a07c766
|
7e2127ad485aec25e58a1b9c82a6bb74a599467a
|
refs/heads/master
| 1,590,947,010,909
| 1,563,811,881,000
| 1,563,811,881,000
| 190,415,651
| 1
| 0
| null | 1,563,643,371,000
| 1,559,746,688,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 1,140
|
lean
|
import tactic.polya.field
open polya.field tactic tactic.polya.field
def i : num := 0
def j : num := 1
def t1 : nterm ℚ := i * (1 / 2 : ℚ) + i * (1 / 2 : ℚ) - i * (1 : ℚ)
def t2 : nterm ℚ := i * 1 - i
--def t : nterm ℚ := ( i * (1 / 2) + i * (1 / 2) ) * i⁻¹
def t : nterm ℚ := i * i⁻¹
run_cmd ( trace t.norm )
theorem slow : t1.norm = 0 := rfl
theorem fast : t2.norm = 0 := rfl
meta def test_on (e : expr) : tactic unit :=
do
(t, s) ← (term_of_expr e).run ∅,
let nt := norm γ t,
trace nt,
nterm_to_expr s nt >>= trace
constants x y z : ℚ
run_cmd test_on `(x - y + z)
--theorem test : x * (1 / 10) + x * (1 / 10) - x * (1 / 5) = 0 :=
--by field1
example : x * (1 / 4) + x * (1 / 4) = x * (1 / 2):=
by field1
example (h : x = y) : x * (1 / 4) + x * (1 / 4) = y * (1 / 2) :=
begin
field1,
rw h,
end
run_cmd ( do
e ← to_expr ``( (x * (1 / 2) + x * (1 / 2)) * x ⁻¹ ),
(new_e, pr, mv, l, s) ← norm_expr e ∅,
trace new_e
)
example : true :=
by do
e ← to_expr ``(x * x⁻¹),
--this should create no new goals
(new_e, pr, mv, mvs, s) ← norm_expr e ∅,
exact `(trivial)
|
c9c145a2a1d475175f5d61bf506a7300ed3b51fc
|
c5b07d17b3c9fb19e4b302465d237fd1d988c14f
|
/src/demo.lean
|
5daeb11b79c9bc5992c0bbac313c53cd5c22e4ba
|
[
"MIT"
] |
permissive
|
skaslev/papers
|
acaec61602b28c33d6115e53913b2002136aa29b
|
f15b379f3c43bbd0a37ac7bb75f4278f7e901389
|
refs/heads/master
| 1,665,505,770,318
| 1,660,378,602,000
| 1,660,378,602,000
| 14,101,547
| 0
| 1
|
MIT
| 1,595,414,522,000
| 1,383,542,702,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 1,448
|
lean
|
import isos.bool
import isos.option
import isos.list
import sampler
def out {A} [has_repr A] (a : io A) : io unit :=
do
x <- a,
io.put_str_ln $ repr x
open sample
#eval out $ gen_fseq 50 $ fin 40000
#eval out $ gen_fseqₛ 50 $ bounded_ogf_iso₁ bool.ogf_iso 0
#eval out $ gen_fseqₛ 50 $ X.sized_ogf list bool 3
#eval out $ gen_fseqₛ 50 $ X.bounded_ogf list bool 3
#eval out $ gen_fseqₛ 50 $ X.sized_ogf₁ nat 10
#eval out $ gen_fseqₛ 50 $ X.bounded_ogf₁ nat 10
#eval out $ gen_fseqₛ 50 $ sized_ogf_iso (@option.ogf_iso bool) 1
#eval out $ gen_fseqₛ 50 $ bounded_ogf_iso (@option.ogf_iso bool) 1
#eval out $ gen_fseqₛ 50 $ @sized_ogf_iso _ _ _ (bounded_ogf_iso option.ogf_iso 1) (@list.ogf_iso (option bool)) 3
#eval out $ gen_fseqₛ 50 $ @bounded_ogf_iso _ _ _ (bounded_ogf_iso option.ogf_iso 1) (@list.ogf_iso (option bool)) 3
#eval out $ gen_fseqₛ 50 $ bounded_ogf (delta 2) bool 20
#eval out $ genₛ $ bounded_ogf (const 500) bool 10
#eval out $ genₛ $ bounded_ogf (const 500) bool 10
#eval out $ genₛ $ bounded_ogf (const 500) bool 10
#eval out $ gen_fseqₛ 50 $ bounded_ogf (const 2) bool 10
#eval take 50 $ delta 10
#eval take 50 $ option.cf
#eval take 50 $ ogf.cmul (delta 10) option.cf
#eval out $ genₛ $ sized_ogf (ogf.cmul (delta 10) option.cf) bool 10
#eval out $ genₛ $ sized_ogf (ogf.cmul (delta 10) option.cf) bool 11
#eval out $ genₛ $ sized_ogf (ogf.cmul (delta 10) option.cf) bool 12
|
d1e81f10b0580d3dc0a6c187d2ca414ccc841811
|
367134ba5a65885e863bdc4507601606690974c1
|
/src/data/multiset/functor.lean
|
47c8193f8b6ac9c94bc5bb758a3470c41d3e5a31
|
[
"Apache-2.0"
] |
permissive
|
kodyvajjha/mathlib
|
9bead00e90f68269a313f45f5561766cfd8d5cad
|
b98af5dd79e13a38d84438b850a2e8858ec21284
|
refs/heads/master
| 1,624,350,366,310
| 1,615,563,062,000
| 1,615,563,062,000
| 162,666,963
| 0
| 0
|
Apache-2.0
| 1,545,367,651,000
| 1,545,367,651,000
| null |
UTF-8
|
Lean
| false
| false
| 4,265
|
lean
|
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro, Johannes Hölzl, Simon Hudon, Kenny Lau
-/
import data.multiset.basic
import control.traversable.lemmas
import control.traversable.instances
/-!
# Functoriality of `multiset`.
-/
universes u
namespace multiset
open list
instance : functor multiset :=
{ map := @map }
@[simp] lemma fmap_def {α' β'} {s : multiset α'} (f : α' → β') : f <$> s = s.map f := rfl
instance : is_lawful_functor multiset :=
by refine { .. }; intros; simp
open is_lawful_traversable is_comm_applicative
variables {F : Type u → Type u} [applicative F] [is_comm_applicative F]
variables {α' β' : Type u} (f : α' → F β')
def traverse : multiset α' → F (multiset β') :=
quotient.lift (functor.map coe ∘ traversable.traverse f)
begin
introv p, unfold function.comp,
induction p,
case perm.nil { refl },
case perm.cons {
have : multiset.cons <$> f p_x <*> (coe <$> traverse f p_l₁) =
multiset.cons <$> f p_x <*> (coe <$> traverse f p_l₂),
{ rw [p_ih] },
simpa with functor_norm },
case perm.swap {
have : (λa b (l:list β'), (↑(a :: b :: l) : multiset β')) <$> f p_y <*> f p_x =
(λa b l, ↑(a :: b :: l)) <$> f p_x <*> f p_y,
{ rw [is_comm_applicative.commutative_map],
congr, funext a b l, simpa [flip] using perm.swap b a l },
simp [(∘), this] with functor_norm },
case perm.trans { simp [*] }
end
instance : monad multiset :=
{ pure := λ α x, x ::ₘ 0,
bind := @bind,
.. multiset.functor }
@[simp] lemma pure_def {α} : (pure : α → multiset α) = (λ x, x ::ₘ 0) := rfl
@[simp] lemma bind_def {α β} : (>>=) = @bind α β := rfl
instance : is_lawful_monad multiset :=
{ bind_pure_comp_eq_map := λ α β f s, multiset.induction_on s rfl $ λ a s ih, by simp,
pure_bind := λ α β x f, by simp,
bind_assoc := @bind_assoc }
open functor
open traversable is_lawful_traversable
@[simp]
lemma lift_coe {α β : Type*} (x : list α) (f : list α → β)
(h : ∀ a b : list α, a ≈ b → f a = f b) :
quotient.lift f h (x : multiset α) = f x :=
quotient.lift_mk _ _ _
@[simp]
lemma map_comp_coe {α β} (h : α → β) :
functor.map h ∘ coe = (coe ∘ functor.map h : list α → multiset β) :=
by funext; simp [functor.map]
lemma id_traverse {α : Type*} (x : multiset α) :
traverse id.mk x = x :=
quotient.induction_on x begin intro, simp [traverse], refl end
lemma comp_traverse {G H : Type* → Type*}
[applicative G] [applicative H]
[is_comm_applicative G] [is_comm_applicative H]
{α β γ : Type*}
(g : α → G β) (h : β → H γ) (x : multiset α) :
traverse (comp.mk ∘ functor.map h ∘ g) x =
comp.mk (functor.map (traverse h) (traverse g x)) :=
quotient.induction_on x
(by intro;
simp [traverse,comp_traverse] with functor_norm;
simp [(<$>),(∘)] with functor_norm)
lemma map_traverse {G : Type* → Type*}
[applicative G] [is_comm_applicative G]
{α β γ : Type*}
(g : α → G β) (h : β → γ)
(x : multiset α) :
functor.map (functor.map h) (traverse g x) =
traverse (functor.map h ∘ g) x :=
quotient.induction_on x
(by intro; simp [traverse] with functor_norm;
rw [is_lawful_functor.comp_map, map_traverse])
lemma traverse_map {G : Type* → Type*}
[applicative G] [is_comm_applicative G]
{α β γ : Type*}
(g : α → β) (h : β → G γ)
(x : multiset α) :
traverse h (map g x) =
traverse (h ∘ g) x :=
quotient.induction_on x
(by intro; simp [traverse];
rw [← traversable.traverse_map h g];
[ refl, apply_instance ])
lemma naturality {G H : Type* → Type*}
[applicative G] [applicative H]
[is_comm_applicative G] [is_comm_applicative H]
(eta : applicative_transformation G H)
{α β : Type*} (f : α → G β) (x : multiset α) :
eta (traverse f x) = traverse (@eta _ ∘ f) x :=
quotient.induction_on x
(by intro; simp [traverse,is_lawful_traversable.naturality] with functor_norm)
end multiset
|
6a7ee6930b8988e934890310a84a65ea9f3af271
|
3bd26f8e9c7eeb6ae77ac4ba709b5b3c65b8d7cf
|
/mono_epi.lean
|
43214a93e4cfd6e5b5884914283e02519247d4c9
|
[] |
no_license
|
koba-e964/lean-work
|
afac5677efef6905fce29cac44f36f309c3bcd62
|
6ab0506b9bd4e5a2e1ba6312d4ac6bdaf6ae1594
|
refs/heads/master
| 1,659,773,150,740
| 1,659,289,453,000
| 1,659,289,453,000
| 100,273,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,852
|
lean
|
namespace mono_epi
variables {A B: Type}
def is_inj (f: A -> B) :=
forall x1 x2, f x1 = f x2 -> x1 = x2
def is_surj (f: A -> B) :=
forall y, exists x, y = f x
def is_mono (f: A -> B) :=
forall (X: Type) (g1 g2: X -> A), f ∘ g1 = f ∘ g2 -> g1 = g2
def is_epi (f: A -> B) :=
forall (X: Type) (g1 g2: B -> X), g1 ∘ f = g2 ∘ f -> g1 = g2
lemma inj_iff_mono: forall (f: A -> B), is_inj f <-> is_mono f :=
begin
intro f,
split,
{
intros finj X g1 g2 fg,
apply funext,
intro x,
apply finj,
change (f ∘ g1) x = (f ∘ g2) x,
rw fg,
},
{
intros fmono x1 x2 feq,
change (fun _, x1) unit.star = (fun _, x2) unit.star,
apply congr_fun,
apply fmono,
apply funext,
intro x,
exact feq,
},
end
lemma surj_iff_epi: forall (f: A -> B), is_surj f <-> is_epi f :=
begin
intro f,
split,
{
intros fsurj X g1 g2 gf,
apply funext,
intro y,
cases (fsurj y) with x eq,
rw eq,
change (g1 ∘ f) x = (g2 ∘ f) x,
rw gf,
},
{
intros fepi y,
let g1: B -> set B := λx, {y: B | y = x /\ exists x, y = f x},
let g2: B -> set B := λx, {x},
have h1 := fepi (set B) g1 g2,
have : g1 ∘ f = g2 ∘ f,
{
apply funext,
intro x,
change g1 (f x) = g2 (f x),
simp [g1, g2],
apply funext,
intro y,
apply iff.to_eq,
split,
{
intro h,
cases h with h1 h2,
exact h1,
},
{
intro h,
split,
{
exact h,
},
{
existsi x,
exact h,
}
},
},
have h1 := congr_fun (h1 this) y,
simp [g1, g2] at h1,
have : y ∈ {y_1 : B | y_1 = y /\ ∃ (x : A), y_1 = f x},
{
rw h1,
simp [singleton],
},
simp at this,
exact this,
},
end
end mono_epi
|
21540e7184e43bacc57202e1fda40bc83749b4c7
|
5c7fe6c4a9d4079b5457ffa5f061797d42a1cd65
|
/src/library/src_ordered_field_lemmas.lean
|
95c1e58a8b52a5f593435ed88113b2cd0ed2a440
|
[] |
no_license
|
gihanmarasingha/mth1001_tutorial
|
8e0817feeb96e7c1bb3bac49b63e3c9a3a329061
|
bb277eebd5013766e1418365b91416b406275130
|
refs/heads/master
| 1,675,008,746,310
| 1,607,993,443,000
| 1,607,993,443,000
| 321,511,270
| 3
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 14,856
|
lean
|
import .src_ordered_field tactic
namespace mth1001
namespace myreal
section ordered
variables {R : Type} [myordered_field R]
open_locale classical
open myordered_field
lemma pos_one : pos (1 : R) :=
begin
rcases trichotomy (1 : R) with ⟨hpo, _, _ ⟩ | ⟨_, hoe, _ ⟩ | ⟨hnpo, hnoe, hpno⟩ ,
{ exact hpo, },
{ exact absurd hoe zero_ne_one.symm, },
{ exfalso, apply hnpo,
convert pos_mul_of_pos_of_pos _ _ hpno hpno,
rw [neg_mul_neg_self, one_mul], },
end
lemma pos_nat (n : ℕ) : n ≠ 0 → pos (n : R) :=
begin
induction n with k hk,
{ intro _, contradiction, },
{ intro _,
rw coe_nat_succ,
by_cases h₁ : k = 0,
{ rw h₁,
change pos((0 : R) + (1 : R)),
rw zero_add,
exact pos_one, },
{ exact pos_add_of_pos_of_pos _ _ (hk h₁) pos_one }, },
end
lemma lt_iff_pos_sub (x y : R) : x < y ↔ pos (y -x) := by refl
lemma lt_iff_pos_neg (x y : R) : x < y ↔ pos (y + -x) := by refl
lemma zero_lt_one' : (0 : R) < (1 : R) :=
begin
rw [lt_iff_pos_sub, sub_zero],
exact pos_one,
end
lemma gt_zero_mul_of_gt_zero_of_gt_zero {a b : R} (h₁ : 0 < a) (h₂ : 0 < b) : 0 < a * b :=
begin
rw [lt_iff_pos_sub, sub_zero] at *,
exact pos_mul_of_pos_of_pos a b h₁ h₂,
end
lemma mul_pos (a b : R) : 0 < a → 0 < b → 0 < a * b :=
begin
intros h₁ h₂,
rw [lt_iff_pos_sub, sub_zero] at *,
exact pos_mul_of_pos_of_pos a b h₁ h₂,
end
lemma neg_pos {x : R} : 0 < -x ↔ x < 0:=
begin
repeat {rw lt_iff_pos_neg},
rw zero_add,
have : -(0 : R) = (0 : R),
{ rw [←add_zero (-(0 : R) : R), neg_add], },
rw [this, add_zero],
end
lemma trichotomy' (x y: R) : x < y ∧ ¬x = y ∧ ¬y < x ∨
¬x < y ∧ x = y ∧ ¬y < x ∨
¬x < y ∧ ¬x = y ∧ y < x :=
begin
repeat {rw lt_iff_pos_sub},
have : x - y = -(y - x),
{ rw [sub_eq_add_neg', sub_eq_add_neg', neg_add_eq_neg_add_neg', neg_neg], },
rw this,
rw [@eq_comm _ x y, (sub_eq_zero_iff_eq y x).symm],
exact trichotomy (y + -x),
end
lemma lt_trans {x y z : R} : x < y → y < z → x < z :=
begin
repeat {rw lt_iff_pos_sub},
intros pyx pzy,
have : z - x = (z - y) + (y - x),
{ repeat {rw sub_eq_add_neg'},
rw [←add_assoc, add_assoc z _ _, neg_add, add_zero], },
rw this,
exact pos_add_of_pos_of_pos _ _ pzy pyx,
end
lemma add_lt_add_iff_right_mpr {x y : R} (z : R) : x < y → x + z < y + z :=
begin
repeat {rw lt_iff_pos_sub},
apply eq.substr,
rw [sub_eq_add_neg', neg_add_eq_neg_add_neg', ←add_assoc],
rw [add_assoc y, add_neg', add_zero, sub_eq_add_neg'],
end
lemma add_lt_add_iff_right_mp {x y : R} (z : R) : x + z < y + z → x < y :=
begin
intro h,
convert add_lt_add_iff_right_mpr (-z) h;
rw [add_assoc, add_neg', add_zero],
end
lemma add_lt_add_iff_right {x y : R} (z : R) : x + z < y + z ↔ x < y :=
iff.intro (add_lt_add_iff_right_mp z) (add_lt_add_iff_right_mpr z)
theorem neg_lt_neg_iff {a b : R} : -a < -b ↔ b < a :=
begin
have h₁ : -a < -b ↔ -a + a < -b + a, from (add_lt_add_iff_right a).symm,
have h₂ : b < a ↔ b + -a < a + -a, from (add_lt_add_iff_right (-a)).symm,
have h₃ : -b + a = - (b + -a),
{ rw [neg_add_eq_neg_add_neg', neg_neg, add_comm], },
rw [h₁, h₂, neg_add, add_neg', h₃, neg_pos],
end
lemma mul_lt_mul_left_mpr {x y z : R} : 0 < z → x < y → z * x < z * y :=
begin
repeat {rw lt_iff_pos_sub},
rw [←mul_sub, sub_zero],
exact pos_mul_of_pos_of_pos _ _,
end
theorem add_lt_add {a b c d : R} : a < b → c < d → a + c < b + d :=
begin
repeat {rw lt_iff_pos_sub},
intros h₁ h₂,
convert pos_add_of_pos_of_pos _ _ h₁ h₂,
repeat {rw sub_eq_add_neg'},
rw neg_add_eq_neg_add_neg',
rw [add_assoc, add_comm (-c) (-a), ←add_assoc d, add_comm d (-a), add_assoc (-a), ←add_assoc],
end
lemma lt_irrefl {x : R} : ¬x < x :=
begin
rcases (trichotomy' x x) with ⟨_, _, nxx⟩ | ⟨nxx, _⟩ | ⟨nxx, _⟩;
exact nxx,
end
theorem ne_of_gt {a b : R} (h : b < a) : a ≠ b :=
λ k, lt_irrefl (@eq.subst _ (λ x, b < x) a b k h)
lemma le_iff_lt_or_eq {x y : R} : x ≤ y ↔ ((x < y) ∨ x = y) := by refl
lemma le_refl (x : R) : x ≤ x := or.inr rfl
lemma not_le_iff_lt (x y : R) : ¬(x ≤ y) ↔ (y < x) :=
begin
rw le_iff_lt_or_eq,
push_neg,
rcases trichotomy' x y with ⟨hxlty, _, _⟩ | ⟨_, hxy, hnyltx ⟩ | ⟨hnxlty, hnxy, hxlty ⟩ ,
{ split,
{ rintro ⟨hnxy, _⟩,
contradiction, },
{ intros hyltx, exfalso,
exact lt_irrefl (lt_trans hxlty hyltx), }, },
{ split,
{ rintro ⟨_, hnxy⟩,
contradiction, },
{ intro hyltx, contradiction, }, },
{ split,
{ intro _, exact hxlty, },
{ intro _, exact ⟨hnxlty, hnxy⟩, }, },
end
lemma not_lt_iff_le (x y : R) : ¬(x < y) ↔ (y ≤ x) :=
by rw [←not_le_iff_lt, not_not]
lemma lt_iff_le_not_le (a b : R) : a < b ↔ a ≤ b ∧ ¬b ≤ a :=
begin
rw [not_le_iff_lt, le_iff_lt_or_eq, or_and_distrib_right, and_self],
exact ⟨λ h, or.inl h, λ h, or.elim h id (λ k, k.2)⟩,
end
lemma neg_nonneg {x : R} : 0 ≤ -x ↔ x ≤ 0 :=
begin
repeat {rw le_iff_lt_or_eq},
have k : 0 < -x ↔ x < 0, from neg_pos,
split,
{ rintro (h₁ | h₂),
{ left, rwa ←k, },
{ right, rw [←neg_neg x, ←h₂, neg_zero], } },
{ rintro (h | rfl),
{ left, rwa k, },
{ right, rw neg_zero, }, },
end
lemma le_trans (x y z : R) : x ≤ y → y ≤ z → x ≤ z :=
begin
rintro (h₁ | rfl) (h₂ | rfl),
{ left, exact lt_trans h₁ h₂},
{ left, exact h₁ },
{ left, exact h₂, },
{ right, refl, },
end
lemma lt_of_le_of_lt {a b c : R} (h₁ : a ≤ b) (h₂ : b < c) : a < c :=
begin
cases h₁ with altb aeqb,
{ exact lt_trans altb h₂, },
{ rw aeqb, exact h₂, },
end
lemma le_total (x y : R) : x ≤ y ∨ y ≤ x :=
begin
rcases trichotomy' x y with ⟨xltx, _⟩ | ⟨_, xeqy, _⟩ | ⟨_, _, yltx⟩,
{ left, left, exact xltx, },
{ left, right, exact xeqy, },
{ right, left, exact yltx, },
end
lemma anti_symm (x y : R) : x ≤ y → y ≤ x → x = y :=
begin
intros h₁ h₂,
have h : (x ≠ y → false), -- Could do this using `by_contra`, but that's slow.
{ intro h,
have h₃ : y < x := or.elim h₂ id (λ k, absurd k.symm h),
have h₄ : x < y := or.elim h₁ id (λ k, absurd k h),
exact lt_irrefl (lt_trans h₃ h₄), },
rw [←(@not_not (x=y))],
exact h,
end
theorem neg_le_neg_iff {a b : R} : -a ≤ -b ↔ b ≤ a :=
begin
repeat {rw le_iff_lt_or_eq},
split,
{ rintro (hlt | heq),
{ left, rwa ←neg_lt_neg_iff, },
{ right, rw [←neg_neg a, heq, neg_neg], }, },
{ rintro (hlt | heq),
{ left, rwa neg_lt_neg_iff, },
{ right, rw heq, }, },
end
theorem add_le_add {a b c d : R} : a ≤ b → c ≤ d → a + c ≤ b + d :=
begin
rintro (h₁| rfl) (h₂ | rfl),
{ left, exact add_lt_add h₁ h₂ },
{ left, exact add_lt_add_iff_right_mpr c h₁, },
{ left, repeat {rw add_comm a _},
exact add_lt_add_iff_right_mpr a h₂, },
{ exact le_refl _, },
end
theorem add_le_add_left (a b: R) : a ≤ b → ∀ (c : R), c + a ≤ c + b :=
λ aleb c, add_le_add (le_refl c) aleb
theorem mul_self_non_neg (a : R) : 0 ≤ a * a:=
begin
rcases trichotomy' 0 a with ⟨posa, _⟩ | ⟨_, eq0, _⟩ | ⟨_, _, nega⟩,
{ left,
convert mul_lt_mul_left_mpr posa posa,
rw mul_zero, },
{ right,
rw [←eq0, mul_zero], },
{ left,
rw ←neg_pos at nega,
rw ←neg_mul_neg_self,
convert mul_lt_mul_left_mpr nega nega,
rw mul_zero, },
end
lemma non_neg_mul_of_non_neg_of_non_neg {a b : R} (h₁ : 0 ≤ a) (h₂ : 0 ≤ b) : 0 ≤ a * b :=
begin
cases h₁ with apos aeq0,
{ cases h₂ with bpos beq0,
{ left, exact gt_zero_mul_of_gt_zero_of_gt_zero apos bpos, },
{ right, rw [←beq0, mul_zero], }, },
{ right,
rw [←aeq0, zero_mul], },
end
lemma non_neg_of_non_neg_mul_of_pos {x y : R} (h₁ : 0 ≤ x * y) (h₂ : 0 < x) : 0 ≤ y :=
begin
cases h₁ with xlty xyeq0,
{ by_contra h₃,
rw [not_le_iff_lt, ←neg_pos] at h₃,
have h₄ : 0 < x * -y, from gt_zero_mul_of_gt_zero_of_gt_zero h₂ h₃,
rw [←neg_mul_eq_mul_neg, neg_pos] at h₄,
exact lt_irrefl (lt_trans h₄ xlty), },
{ right,
rw mul_comm at xyeq0,
symmetry,
apply eq_zero_of_not_eq_zero_of_mul_not_eq_zero _ _ (ne_of_gt h₂) (xyeq0.symm), },
end
lemma non_neg_mul_iff_non_neg_and_non_neg_or_non_pos_and_non_pos (a b : R)
: 0 ≤ a * b ↔ (0 ≤ a ∧ 0 ≤ b) ∨ (a ≤ 0 ∧ b ≤ 0) :=
begin
split,
{ intro h₁,
by_cases h₂ : 0 ≤ a,
{ by_cases h₃ : a = 0,
{ rw h₃,
exact or.elim (le_total b 0) (λ h₄, or.inr ⟨le_refl 0, h₄⟩) (λ h₄, or.inl ⟨le_refl 0, h₄⟩), },
{ have h₄ : 0 < a, from or.elim h₂ id (λ aeq0, absurd aeq0.symm h₃),
have h₅ : 0 ≤ b, from non_neg_of_non_neg_mul_of_pos h₁ h₄,
exact or.inl ⟨or.inl h₄, h₅⟩, }, },
{ rw not_le_iff_lt at h₂,
right,
have k : b ≤ 0,
{ by_contra h₃,
rw not_le_iff_lt at h₃,
rw ←neg_pos at h₂,
have h₄ : 0 < b * -a, from gt_zero_mul_of_gt_zero_of_gt_zero h₃ h₂,
rw [←neg_mul_eq_mul_neg, mul_comm, neg_pos] at h₄,
exact lt_irrefl (lt_of_le_of_lt h₁ h₄), },
exact ⟨or.inl h₂, k⟩, }, },
{ rintro (⟨h₁, h₂⟩ | ⟨h₁, h₂⟩),
{ exact non_neg_mul_of_non_neg_of_non_neg h₁ h₂, },
{ rw ←neg_mul_neg a b,
rw ←neg_nonneg at h₁ h₂,
exact non_neg_mul_of_non_neg_of_non_neg h₁ h₂, }, },
end
theorem inv_pos {a : R} (h : a ≠ 0) : 0 < a⁻¹ ↔ 0 < a :=
begin
split,
{ intro k,
have h₂ : 0 ≤ a * a, from mul_self_non_neg a,
cases h₂ with posaa eq0,
{ convert mul_lt_mul_left_mpr k posaa,
{ rw mul_zero, },
{ rw [←mul_assoc, inv_mul a h, one_mul], }, },
{ have h₃ : a = 0,
{ cases eq_zero_or_eq_zero_of_mul_eq_zero a a eq0.symm;
assumption, },
exact absurd h₃ h, }, },
{ intro k,
have h₂ : 0 ≤ (a⁻¹ * a⁻¹), from mul_self_non_neg a⁻¹,
cases h₂ with posainvsq eq0,
{ convert mul_lt_mul_left_mpr k posainvsq,
{ rw mul_zero, },
{ rw [←mul_assoc, mul_inv a h, one_mul], }, },
{ have h₃ : a⁻¹ = 0,
{ cases eq_zero_or_eq_zero_of_mul_eq_zero _ _ eq0.symm;
assumption, },
exact absurd h₃ (inv_ne_zero h), }, },
end
theorem inv_lt_inv {a b : R} (h₁ : 0 < a) (h₂ : 0 < b) : a⁻¹ < b⁻¹ ↔ b < a :=
begin
split,
{ intro h₃,
have h₄ : a * a⁻¹ < a * b⁻¹, from mul_lt_mul_left_mpr h₁ h₃,
have h₅ : a ≠ 0, from ne_of_gt h₁,
rw (mul_inv a h₅) at h₄,
have h₆ : b * 1 < b * (a * b⁻¹), from mul_lt_mul_left_mpr h₂ h₄,
have h₇ : b ≠ 0, from ne_of_gt h₂,
rw [mul_one, mul_comm, mul_assoc, inv_mul b h₇, mul_one] at h₆,
exact h₆, },
{ intro h₃,
have h₅ : a ≠ 0, from ne_of_gt h₁,
have k₁ : a⁻¹ > 0, from (inv_pos h₅).mpr h₁,
have h₄ : a⁻¹ * b < a⁻¹ * a, from mul_lt_mul_left_mpr k₁ h₃,
rw inv_mul a h₅ at h₄,
have h₇ : b ≠ 0, from ne_of_gt h₂,
have k₂ : b⁻¹ > 0, from (inv_pos h₇).mpr h₂,
have h₆ : b⁻¹ * (a⁻¹ * b) < b⁻¹ * 1, from mul_lt_mul_left_mpr k₂ h₄,
rw [mul_comm, mul_assoc, mul_inv b h₇, mul_one, mul_one] at h₆,
exact h₆, },
end
end ordered
section max_abs
variables {R : Type} [myordered_field R]
open_locale classical
open myordered_field
lemma le_max_right (a b : R) : b ≤ max a b :=
begin
unfold max,
by_cases h : b ≤ a,
{ rwa (if_pos h), },
{ rw (if_neg h),
exact le_refl b, },
end
lemma le_max_left (a b : R) : a ≤ max a b :=
begin
unfold max,
by_cases h : b ≤ a,
{ rw (if_pos h),
exact le_refl a, },
{ rw (if_neg h),
rw not_le_iff_lt at h,
left, exact h, },
end
lemma max_choice (a b : R) : max a b = a ∨ max a b = b :=
begin
unfold max,
by_cases h : b ≤ a,
{ rw (if_pos h), left, refl, },
{ rw (if_neg h),
right, refl, },
end
lemma neg_le_abs (a : R) : -a ≤ abs a :=
begin
unfold abs max,
by_cases h : -a ≤ a,
{ rw (if_pos h), exact h, },
{ rw (if_neg h), exact le_refl (-a), },
end
lemma le_abs_self (a : R) : a ≤ abs a :=
begin
unfold abs max,
by_cases h : -a ≤ a,
{ rw (if_pos h), right, refl, },
{ rw (if_neg h), left,
rw le_iff_lt_or_eq at h,
push_neg at h,
rw [not_lt_iff_le, le_iff_lt_or_eq, or_and_distrib_right] at h,
rcases h with ⟨haltma, _⟩ | ⟨hama, hnama⟩,
{ exact haltma, },
{ exact absurd hama hnama.symm, }, },
end
theorem triangle_inequality (x y : R) : abs (x + y) ≤ abs x + abs y :=
begin
by_cases h : -(x+y) ≤ x+y,
{ have : abs (x+y) = x + y,
{ unfold abs max,
rw (if_pos h), },
rw this,
have h₁ : x ≤ abs x, from le_abs_self x,
have h₂ : y ≤ abs y, from le_abs_self y,
exact add_le_add h₁ h₂, },
{ have : abs (x+y) = -(x+y),
{ unfold abs max,
rw (if_neg h), },
rw this,
rw [neg_add_eq_neg_add_neg', add_comm],
have h₁ : -x ≤ abs x, from neg_le_abs x,
have h₂ : -y ≤ abs y, from neg_le_abs y,
exact add_le_add h₁ h₂, },
end
end max_abs
section upper_bounds
variables {R : Type} [myordered_field R]
theorem sup_uniqueness (S : set R) (a b : R) (h₁ : is_sup a S) (h₂ : is_sup b S) : a = b :=
anti_symm _ _ (h₁.right b h₂.left) (h₂.right a h₁.left)
theorem empty_set_upper_bound (u : R) : upper_bound u ∅ :=
λ s, (set.mem_empty_eq s) ▸ false.elim
end upper_bounds
section instance_linear_ordered_comm_ring
variables {R : Type} [myordered_field R]
instance : linear_ordered_comm_ring R :=
{ add := comm_group.add,
add_assoc := comm_group.add_assoc,
zero := comm_group.zero,
zero_add := zero_add,
add_zero := add_zero,
neg := comm_group.neg,
add_left_neg := neg_add,
add_comm := comm_group.add_comm,
mul := myfield.mul,
mul_assoc := myfield.mul_assoc,
one := myfield.one,
one_mul := one_mul,
mul_one := myfield.mul_one,
left_distrib := myfield.mul_add,
right_distrib := add_mul,
le := le,
lt := lt,
lt_iff_le_not_le := lt_iff_le_not_le,
le_refl := le_refl,
le_trans := le_trans,
le_antisymm := anti_symm,
add_le_add_left := add_le_add_left,
zero_ne_one := myfield.zero_ne_one,
mul_pos := mul_pos,
le_total := le_total,
zero_lt_one := zero_lt_one',
mul_comm := myfield.mul_comm,
}
end instance_linear_ordered_comm_ring
end myreal
end mth1001
|
44f9182f7b7a396fc88172d7f062da1aea305783
|
097294e9b80f0d9893ac160b9c7219aa135b51b9
|
/instructor/types/option/dm_option_preliminary.lean
|
c1721d6e7075af1522d88e3067c80e10d497b92d
|
[] |
no_license
|
AbigailCastro17/CS2102-Discrete-Math
|
cf296251be9418ce90206f5e66bde9163e21abf9
|
d741e4d2d6a9b2e0c8380e51706218b8f608cee4
|
refs/heads/main
| 1,682,891,087,358
| 1,621,401,341,000
| 1,621,401,341,000
| 368,749,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 405
|
lean
|
namespace hidden
inductive dm_option (α : Type): Type
| none : dm_option
| some (a : α) : dm_option
/-
Heres an example of a representation in Lean of
a partial function, p, from ℕ to ℕ, such that
p(n) = 0 if n=0 and p(n) is undefined otherwise.
-/
def p : ℕ → dm_option ℕ
| nat.zero := dm_option.some nat.zero
| _ := dm_option.none ℕ -- needs explicit type!
end hidden
|
47d55e63217902c5cee29491970ee38f1de6b006
|
f4bff2062c030df03d65e8b69c88f79b63a359d8
|
/src/game/series/tempLevel04.lean
|
02c093c10e0f69edcfd491ccadebb77b17da5aa9
|
[
"Apache-2.0"
] |
permissive
|
adastra7470/real-number-game
|
776606961f52db0eb824555ed2f8e16f92216ea3
|
f9dcb7d9255a79b57e62038228a23346c2dc301b
|
refs/heads/master
| 1,669,221,575,893
| 1,594,669,800,000
| 1,594,669,800,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 359
|
lean
|
import game.series.L01defs
variable X : Type --hide
/-
Idea 04: root test
-/
/- Lemma
If $A$ and $B$ are sets of any type $X$, then
$$ A \subseteq A\cup B.$$
-/
theorem subset_union_left (A B : set X) : A ⊆ A ∪ B :=
begin
--change ∀ (x : α), x ∈ A → x ∈ A ∪ B, --they may want to do this
intros x hx,
left, exact hx, done
end
|
fa36fa16394f9482cbe08c5c78faf7c8a08a473c
|
9be442d9ec2fcf442516ed6e9e1660aa9071b7bd
|
/src/Lean/Elab/BuiltinTerm.lean
|
6f005ac32eca4284b59d3d7ff8ea9ed2437cc317
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
EdAyers/lean4
|
57ac632d6b0789cb91fab2170e8c9e40441221bd
|
37ba0df5841bde51dbc2329da81ac23d4f6a4de4
|
refs/heads/master
| 1,676,463,245,298
| 1,660,619,433,000
| 1,660,619,433,000
| 183,433,437
| 1
| 0
|
Apache-2.0
| 1,657,612,672,000
| 1,556,196,574,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 13,517
|
lean
|
/-
Copyright (c) 2021 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Lean.Elab.Term
import Lean.Elab.Eval
namespace Lean.Elab.Term
open Meta
@[builtinTermElab «prop»] def elabProp : TermElab := fun _ _ =>
return mkSort levelZero
private def elabOptLevel (stx : Syntax) : TermElabM Level :=
if stx.isNone then
pure levelZero
else
elabLevel stx[0]
@[builtinTermElab «sort»] def elabSort : TermElab := fun stx _ =>
return mkSort (← elabOptLevel stx[1])
@[builtinTermElab «type»] def elabTypeStx : TermElab := fun stx _ =>
return mkSort (mkLevelSucc (← elabOptLevel stx[1]))
/-!
the method `resolveName` adds a completion point for it using the given
expected type. Thus, we propagate the expected type if `stx[0]` is an identifier.
It doesn't "hurt" if the identifier can be resolved because the expected type is not used in this case.
Recall that if the name resolution fails a synthetic sorry is returned.-/
@[builtinTermElab «pipeCompletion»] def elabPipeCompletion : TermElab := fun stx expectedType? => do
let e ← elabTerm stx[0] none
unless e.isSorry do
addDotCompletionInfo stx e expectedType?
throwErrorAt stx[1] "invalid field notation, identifier or numeral expected"
@[builtinTermElab «completion»] def elabCompletion : TermElab := fun stx expectedType? => do
/- `ident.` is ambiguous in Lean, we may try to be completing a declaration name or access a "field". -/
if stx[0].isIdent then
/- If we can elaborate the identifier successfully, we assume it is a dot-completion. Otherwise, we treat it as
identifier completion with a dangling `.`.
Recall that the server falls back to identifier completion when dot-completion fails. -/
let s ← saveState
try
let e ← elabTerm stx[0] none
addDotCompletionInfo stx e expectedType?
catch _ =>
s.restore
addCompletionInfo <| CompletionInfo.id stx stx[0].getId (danglingDot := true) (← getLCtx) expectedType?
throwErrorAt stx[1] "invalid field notation, identifier or numeral expected"
else
elabPipeCompletion stx expectedType?
@[builtinTermElab «hole»] def elabHole : TermElab := fun stx expectedType? => do
let mvar ← mkFreshExprMVar expectedType?
registerMVarErrorHoleInfo mvar.mvarId! stx
pure mvar
@[builtinTermElab «syntheticHole»] def elabSyntheticHole : TermElab := fun stx expectedType? => do
let arg := stx[1]
let userName := if arg.isIdent then arg.getId else Name.anonymous
let mkNewHole : Unit → TermElabM Expr := fun _ => do
let kind := if (← read).inPattern then MetavarKind.natural else MetavarKind.syntheticOpaque
let mvar ← mkFreshExprMVar expectedType? kind userName
registerMVarErrorHoleInfo mvar.mvarId! stx
return mvar
if userName.isAnonymous || (← read).inPattern then
mkNewHole ()
else
match (← getMCtx).findUserName? userName with
| none => mkNewHole ()
| some mvarId =>
let mvar := mkMVar mvarId
let mvarDecl ← getMVarDecl mvarId
let lctx ← getLCtx
if mvarDecl.lctx.isSubPrefixOf lctx then
return mvar
else match (← getExprMVarAssignment? mvarId) with
| some val =>
let val ← instantiateMVars val
if (← MetavarContext.isWellFormed lctx val) then
return val
else
withLCtx mvarDecl.lctx mvarDecl.localInstances do
throwError "synthetic hole has already been defined and assigned to value incompatible with the current context{indentExpr val}"
| none =>
if (← mvarId.isDelayedAssigned) then
-- We can try to improve this case if needed.
throwError "synthetic hole has already beend defined and delayed assigned with an incompatible local context"
else if lctx.isSubPrefixOf mvarDecl.lctx then
let mvarNew ← mkNewHole ()
mvarId.assign mvarNew
return mvarNew
else
throwError "synthetic hole has already been defined with an incompatible local context"
@[builtinTermElab «letMVar»] def elabLetMVar : TermElab := fun stx expectedType? => do
match stx with
| `(let_mvar% ? $n := $e; $b) =>
match (← getMCtx).findUserName? n.getId with
| some _ => throwError "invalid 'let_mvar%', metavariable '?{n.getId}' has already been used"
| none =>
let e ← elabTerm e none
let mvar ← mkFreshExprMVar (← inferType e) MetavarKind.syntheticOpaque n.getId
mvar.mvarId!.assign e
-- We use `mkSaveInfoAnnotation` to make sure the info trees for `e` are saved even if `b` is a metavariable.
return mkSaveInfoAnnotation (← elabTerm b expectedType?)
| _ => throwUnsupportedSyntax
private def getMVarFromUserName (ident : Syntax) : MetaM Expr := do
match (← getMCtx).findUserName? ident.getId with
| none => throwError "unknown metavariable '?{ident.getId}'"
| some mvarId => instantiateMVars (mkMVar mvarId)
@[builtinTermElab «waitIfTypeMVar»] def elabWaitIfTypeMVar : TermElab := fun stx expectedType? => do
match stx with
| `(wait_if_type_mvar% ? $n; $b) =>
tryPostponeIfMVar (← inferType (← getMVarFromUserName n))
elabTerm b expectedType?
| _ => throwUnsupportedSyntax
@[builtinTermElab «waitIfTypeContainsMVar»] def elabWaitIfTypeContainsMVar : TermElab := fun stx expectedType? => do
match stx with
| `(wait_if_type_contains_mvar% ? $n; $b) =>
if (← instantiateMVars (← inferType (← getMVarFromUserName n))).hasExprMVar then
tryPostpone
elabTerm b expectedType?
| _ => throwUnsupportedSyntax
@[builtinTermElab «waitIfContainsMVar»] def elabWaitIfContainsMVar : TermElab := fun stx expectedType? => do
match stx with
| `(wait_if_contains_mvar% ? $n; $b) =>
if (← getMVarFromUserName n).hasExprMVar then
tryPostpone
elabTerm b expectedType?
| _ => throwUnsupportedSyntax
private def mkTacticMVar (type : Expr) (tacticCode : Syntax) : TermElabM Expr := do
let mvar ← mkFreshExprMVar type MetavarKind.syntheticOpaque
let mvarId := mvar.mvarId!
let ref ← getRef
registerSyntheticMVar ref mvarId <| SyntheticMVarKind.tactic tacticCode (← saveContext)
return mvar
@[builtinTermElab byTactic] def elabByTactic : TermElab := fun stx expectedType? => do
match expectedType? with
| some expectedType => mkTacticMVar expectedType stx
| none =>
tryPostpone
throwError ("invalid 'by' tactic, expected type has not been provided")
@[builtinTermElab noImplicitLambda] def elabNoImplicitLambda : TermElab := fun stx expectedType? =>
elabTerm stx[1] (mkNoImplicitLambdaAnnotation <$> expectedType?)
@[builtinTermElab cdot] def elabBadCDot : TermElab := fun _ _ =>
throwError "invalid occurrence of `·` notation, it must be surrounded by parentheses (e.g. `(· + 1)`)"
@[builtinTermElab str] def elabStrLit : TermElab := fun stx _ => do
match stx.isStrLit? with
| some val => pure $ mkStrLit val
| none => throwIllFormedSyntax
private def mkFreshTypeMVarFor (expectedType? : Option Expr) : TermElabM Expr := do
let typeMVar ← mkFreshTypeMVar MetavarKind.synthetic
match expectedType? with
| some expectedType => discard <| isDefEq expectedType typeMVar
| _ => pure ()
return typeMVar
@[builtinTermElab num] def elabNumLit : TermElab := fun stx expectedType? => do
let val ← match stx.isNatLit? with
| some val => pure val
| none => throwIllFormedSyntax
let typeMVar ← mkFreshTypeMVarFor expectedType?
let u ← getDecLevel typeMVar
let mvar ← mkInstMVar (mkApp2 (Lean.mkConst ``OfNat [u]) typeMVar (mkRawNatLit val))
let r := mkApp3 (Lean.mkConst ``OfNat.ofNat [u]) typeMVar (mkRawNatLit val) mvar
registerMVarErrorImplicitArgInfo mvar.mvarId! stx r
return r
@[builtinTermElab rawNatLit] def elabRawNatLit : TermElab := fun stx _ => do
match stx[1].isNatLit? with
| some val => return mkRawNatLit val
| none => throwIllFormedSyntax
@[builtinTermElab scientific]
def elabScientificLit : TermElab := fun stx expectedType? => do
match stx.isScientificLit? with
| none => throwIllFormedSyntax
| some (m, sign, e) =>
let typeMVar ← mkFreshTypeMVarFor expectedType?
let u ← getDecLevel typeMVar
let mvar ← mkInstMVar (mkApp (Lean.mkConst ``OfScientific [u]) typeMVar)
let r := mkApp5 (Lean.mkConst ``OfScientific.ofScientific [u]) typeMVar mvar (mkRawNatLit m) (toExpr sign) (mkRawNatLit e)
registerMVarErrorImplicitArgInfo mvar.mvarId! stx r
return r
@[builtinTermElab char] def elabCharLit : TermElab := fun stx _ => do
match stx.isCharLit? with
| some val => return mkApp (Lean.mkConst ``Char.ofNat) (mkRawNatLit val.toNat)
| none => throwIllFormedSyntax
@[builtinTermElab quotedName] def elabQuotedName : TermElab := fun stx _ =>
match stx[0].isNameLit? with
| some val => pure $ toExpr val
| none => throwIllFormedSyntax
@[builtinTermElab doubleQuotedName] def elabDoubleQuotedName : TermElab := fun stx _ =>
return toExpr (← resolveGlobalConstNoOverloadWithInfo stx[2])
@[builtinTermElab declName] def elabDeclName : TermElab := adaptExpander fun _ => do
let some declName ← getDeclName?
| throwError "invalid `decl_name%` macro, the declaration name is not available"
return (quote declName : Term)
@[builtinTermElab Parser.Term.withDeclName] def elabWithDeclName : TermElab := fun stx expectedType? => do
let id := stx[2].getId
let id := if stx[1].isNone then id else (← getCurrNamespace) ++ id
let e := stx[3]
withMacroExpansion stx e <| withDeclName id <| elabTerm e expectedType?
@[builtinTermElab typeOf] def elabTypeOf : TermElab := fun stx _ => do
inferType (← elabTerm stx[1] none)
/--
Recall that `mkTermInfo` does not create an `ofTermInfo` node in the info tree
if `e` corresponds to a hole that is going to be filled "later" by executing a tactic or resuming elaboration.
This behavior is problematic for auxiliary elaboration steps that are "almost" no-ops.
For example, consider the elaborator for
```
ensure_type_of% s msg e
```
It elaborates `s`, infers its type `t`, and then elaborates `e` ensuring the resulting type is `t`.
If the elaboration of `e` is postponed, then the result is just a metavariable, and an `ofTermInfo` would not be created.
This happens because `ensure_type_of%` is almost a no-op. The elaboration of `s` does not directly contribute to the
final result, just its type.
To make sure, we don't miss any information in the `InfoTree`, we can just create a "silent" annotation to force
`mTermInfo` to create a node for the `ensure_type_of% s msg e` even if `e` has been postponed.
Another possible solution is to elaborate `ensure_type_of% s msg e` as `ensureType s e` where `ensureType` has type
```
ensureType (s e : α) := e
```
We decided to use the silent notation because `ensure_type_of%` is heavily used in the `Do` elaborator, and the extra
overhead could be significant.
-/
private def mkSilentAnnotationIfHole (e : Expr) : TermElabM Expr := do
if (← isTacticOrPostponedHole? e).isSome then
return mkAnnotation `_silent e
else
return e
@[builtinTermElab ensureTypeOf] def elabEnsureTypeOf : TermElab := fun stx _ =>
match stx[2].isStrLit? with
| none => throwIllFormedSyntax
| some msg => do
let refTerm ← elabTerm stx[1] none
let refTermType ← inferType refTerm
-- See comment at `mkSilentAnnotationIfHole`
mkSilentAnnotationIfHole (← elabTermEnsuringType stx[3] refTermType (errorMsgHeader? := msg))
@[builtinTermElab ensureExpectedType] def elabEnsureExpectedType : TermElab := fun stx expectedType? =>
match stx[1].isStrLit? with
| none => throwIllFormedSyntax
| some msg => elabTermEnsuringType stx[2] expectedType? (errorMsgHeader? := msg)
@[builtinTermElab «open»] def elabOpen : TermElab := fun stx expectedType? => do
try
pushScope
let openDecls ← elabOpenDecl stx[1]
withTheReader Core.Context (fun ctx => { ctx with openDecls := openDecls }) do
elabTerm stx[3] expectedType?
finally
popScope
@[builtinTermElab «set_option»] def elabSetOption : TermElab := fun stx expectedType? => do
let options ← Elab.elabSetOption stx[1] stx[2]
withTheReader Core.Context (fun ctx => { ctx with maxRecDepth := maxRecDepth.get options, options := options }) do
elabTerm stx[4] expectedType?
@[builtinTermElab withAnnotateTerm] def elabWithAnnotateTerm : TermElab := fun stx expectedType? => do
match stx with
| `(with_annotate_term $stx $e) =>
withInfoContext' stx (elabTerm e expectedType?) (mkTermInfo .anonymous (expectedType? := expectedType?) stx)
| _ => throwUnsupportedSyntax
private unsafe def evalFilePathUnsafe (stx : Syntax) : TermElabM System.FilePath :=
evalTerm System.FilePath (Lean.mkConst ``System.FilePath) stx
@[implementedBy evalFilePathUnsafe]
private opaque evalFilePath (stx : Syntax) : TermElabM System.FilePath
@[builtinTermElab includeStr] def elabIncludeStr : TermElab
| `(include_str $path:term), _ => do
let path ← evalFilePath path
let ctx ← readThe Lean.Core.Context
let srcPath := System.FilePath.mk ctx.fileName
let some srcDir := srcPath.parent
| throwError "cannot compute parent directory of '{srcPath}'"
let path := srcDir / path
mkStrLit <$> IO.FS.readFile path
| _, _ => throwUnsupportedSyntax
end Lean.Elab.Term
|
81176f5c97c8dc2dcd095b0f1d1541b473f3462d
|
2fbe653e4bc441efde5e5d250566e65538709888
|
/src/linear_algebra/nonsingular_inverse.lean
|
c46991a428ff33990462b90c88aa93b2ca777df6
|
[
"Apache-2.0"
] |
permissive
|
aceg00/mathlib
|
5e15e79a8af87ff7eb8c17e2629c442ef24e746b
|
8786ea6d6d46d6969ac9a869eb818bf100802882
|
refs/heads/master
| 1,649,202,698,930
| 1,580,924,783,000
| 1,580,924,783,000
| 149,197,272
| 0
| 0
|
Apache-2.0
| 1,537,224,208,000
| 1,537,224,207,000
| null |
UTF-8
|
Lean
| false
| false
| 12,430
|
lean
|
/-
Copyright (c) 2019 Tim Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Tim Baanen.
Inverses for nonsingular square matrices.
-/
import algebra.big_operators
import data.matrix.basic
import linear_algebra.determinant
/-!
# Nonsingular inverses
In this file, we define an inverse for square matrices of invertible
determinant. For matrices that are not square or not of full rank, there is a
more general notion of pseudoinverses. Unfortunately, the definition of
pseudoinverses is typically in terms of inverses of nonsingular matrices, so we
need to define those first. The file also doesn't define a `has_inv` instance
for `matrix` so that can be used for the pseudoinverse instead.
The definition of inverse used in this file is the adjugate divided by the determinant.
The adjugate is calculated with Cramer's rule, which we introduce first.
The vectors returned by Cramer's rule are given by the linear map `cramer`,
which sends a matrix `A` and vector `b` to the vector consisting of the
determinant of replacing the `i`th column of `A` with `b` at index `i`
(written as `(A.update_column i b).det`).
Using Cramer's rule, we can compute for each matrix `A` the matrix `adjugate A`.
The entries of the adjugate are the determinants of each minor of `A`.
Instead of defining a minor to be `A` with column `i` and row `j` deleted, we
replace the `i`th column of `A` with the `j`th basis vector; this has the same
determinant as the minor but more importantly equals Cramer's rule applied
to `A` and the `j`th basis vector, simplifying the subsequent proofs.
We prove the adjugate behaves like `det A • A⁻¹`. Finally, we show that dividing
the adjugate by `det A` (if possible), giving a matrix `nonsing_inv A`, will
result in a multiplicative inverse to `A`.
## References
* https://en.wikipedia.org/wiki/Cramer's_rule#Finding_inverse_matrix
## Tags
matrix inverse, cramer, cramer's rule, adjugate
-/
namespace matrix
universes u v
variables {n : Type u} [fintype n] [decidable_eq n] {α : Type v}
open_locale matrix
open equiv equiv.perm finset
section update
/-- Update, i.e. replace the `i`th column of matrix `A` with the values in `b`. -/
def update_column (A : matrix n n α) (i : n) (b : n → α) : matrix n n α :=
function.update A i b
/-- Update, i.e. replace the `i`th row of matrix `A` with the values in `b`. -/
def update_row (A : matrix n n α) (j : n) (b : n → α) : matrix n n α :=
λ i, function.update (A i) j (b i)
variables {A : matrix n n α} {i j : n} {b : n → α}
@[simp] lemma update_column_self : update_column A i b i = b := function.update_same i b A
@[simp] lemma update_row_self : update_row A j b i j = b i := function.update_same j (b i) (A i)
@[simp] lemma update_column_ne {i' : n} (i_ne : i' ≠ i) : update_column A i b i' = A i' :=
function.update_noteq i_ne b A
@[simp] lemma update_row_ne {j' : n} (j_ne : j' ≠ j) : update_row A j b i j' = A i j' :=
function.update_noteq j_ne (b i) (A i)
lemma update_column_val {i' : n} : update_column A i b i' j = if i' = i then b j else A i' j :=
begin
by_cases i' = i,
{ rw [h, update_column_self, if_pos rfl] },
{ rw [update_column_ne h, if_neg h] }
end
lemma update_row_val {j' : n} : update_row A j b i j' = if j' = j then b i else A i j' :=
begin
by_cases j' = j,
{ rw [h, update_row_self, if_pos rfl] },
{ rw [update_row_ne h, if_neg h] }
end
lemma update_column_transpose : update_column Aᵀ i b = (update_row A i b)ᵀ :=
begin
ext i' j,
rw [transpose_val, update_column_val, update_row_val],
refl
end
end update
section cramer
/-!
### `cramer` section
Introduce the linear map `cramer` with values defined by `cramer_map`.
After defining `cramer_map` and showing it is linear,
we will restrict our proofs to using `cramer`.
-/
variables [comm_ring α] (A : matrix n n α) (b : n → α)
/--
`cramer_map A b i` is the determinant of the matrix `A` with column `i` replaced with `b`,
and thus `cramer_map A b` is the vector output by Cramer's rule on `A` and `b`.
If `A ⬝ x = b` has a unique solution in `x`, `cramer_map` sends a square matrix `A`
and vector `b` to the vector `x` such that `A ⬝ x = b`.
Otherwise, the outcome of `cramer_map` is well-defined but not necessarily useful.
-/
def cramer_map (i : n) : α := (A.update_column i b).det
lemma cramer_map_is_linear (i : n) : is_linear_map α (λ b, cramer_map A b i) :=
begin
have : Π {f : n → n} {i : n} (x : n → α),
finset.prod univ (λ (i' : n), (update_column A i x)ᵀ (f i') i')
= finset.prod univ (λ (i' : n), if i' = i then x (f i') else A i' (f i')),
{ intros, congr, ext i', rw [transpose_val, update_column_val] },
split,
{ intros x y,
repeat { rw [cramer_map, ←det_transpose, det] },
rw [←sum_add_distrib],
congr, ext σ,
rw [←mul_add ↑↑(sign σ)],
congr,
repeat { erw [this, finset.prod_ite _ _ (id : α → α)] },
erw [finset.filter_eq', if_pos (mem_univ i), prod_singleton, prod_singleton,
prod_singleton, ←add_mul],
refl },
{ intros c x,
repeat { rw [cramer_map, ←det_transpose, det] },
rw [smul_eq_mul, mul_sum],
congr, ext σ,
rw [←mul_assoc, mul_comm c, mul_assoc], congr,
repeat { erw [this, finset.prod_ite _ _ (id : α → α)] },
erw [finset.filter_eq', if_pos (mem_univ i),
prod_singleton, prod_singleton, mul_assoc],
refl }
end
lemma cramer_is_linear : is_linear_map α (cramer_map A) :=
begin
split; intros; ext i,
{ apply (cramer_map_is_linear A i).1 },
{ apply (cramer_map_is_linear A i).2 }
end
/-- The linear map of vectors associated to Cramer's rule.
To help the elaborator, we need to make the type `α` an explicit argument to
`cramer`. Otherwise, the coercion `⇑(cramer A) : (n → α) → (n → α)` gives an
error because it fails to infer the type (even though `α` can be inferred from
`A : matrix n n α`).
-/
def cramer (α : Type v) [comm_ring α] (A : matrix n n α) : (n → α) →ₗ[α] (n → α) :=
is_linear_map.mk' (cramer_map A) (cramer_is_linear A)
lemma cramer_apply (i : n) : cramer α A b i = (A.update_column i b).det := rfl
/-- Applying Cramer's rule to a column of the matrix gives a scaled basis vector. -/
lemma cramer_column_self (i : n) :
cramer α A (A i) = (λ j, if i = j then A.det else 0) :=
begin
ext j,
rw cramer_apply,
by_cases i = j,
{ -- i = j: this entry should be `A.det`
rw [if_pos h, ←h],
congr, ext i',
by_cases h : i' = i, { rw [h, update_column_self] }, { rw [update_column_ne h]} },
{ -- i ≠ j: this entry should be 0
rw [if_neg h],
apply det_zero_of_column_eq h,
rw [update_column_self, update_column_ne],
apply h }
end
/-- Use linearity of `cramer` to take it out of a summation. -/
lemma sum_cramer {β} (s : finset β) (f : β → n → α) :
s.sum (λ x, cramer α A (f x)) = cramer α A (sum s f) :=
(linear_map.map_sum (cramer α A)).symm
/-- Use linearity of `cramer` and vector evaluation to take `cramer A _ i` out of a summation. -/
lemma sum_cramer_apply {β} (s : finset β) (f : n → β → α) (i : n) :
s.sum (λ x, cramer α A (λ j, f j x) i) = cramer α A (λ (j : n), s.sum (f j)) i :=
calc s.sum (λ x, cramer α A (λ j, f j x) i)
= s.sum (λ x, cramer α A (λ j, f j x)) i : (pi.finset_sum_apply i s _).symm
... = cramer α A (λ (j : n), s.sum (f j)) i :
by { rw [sum_cramer, cramer_apply], congr, ext j, apply pi.finset_sum_apply }
end cramer
section adjugate
/-! ### `adjugate` section
Define the `adjugate` matrix and a few equations.
These will hold for any matrix over a commutative ring,
while the `inv` section is specifically for invertible matrices.
-/
variable [comm_ring α]
/-- The adjugate matrix is the transpose of the cofactor matrix.
Typically, the cofactor matrix is defined by taking the determinant of minors,
i.e. the matrix with a row and column removed.
However, the proof of `mul_adjugate` becomes a lot easier if we define the
minor as replacing a column with a basis vector, since it allows us to use
facts about the `cramer` map.
-/
def adjugate (A : matrix n n α) : matrix n n α := λ i, cramer α A (λ j, if i = j then 1 else 0)
lemma adjugate_def (A : matrix n n α) :
adjugate A = λ i, cramer α A (λ j, if i = j then 1 else 0) := rfl
lemma adjugate_val (A : matrix n n α) (i j : n) :
adjugate A i j = (A.update_column j (λ j, if i = j then 1 else 0)).det := rfl
lemma adjugate_transpose (A : matrix n n α) : (adjugate A)ᵀ = adjugate (Aᵀ) :=
begin
ext i j,
rw [transpose_val, adjugate_val, adjugate_val, update_column_transpose, det_transpose],
apply finset.sum_congr rfl,
intros σ _,
congr' 1,
by_cases i = σ j,
{ -- Everything except `(i , j)` (= `(σ j , j)`) is given by A, and the rest is a single `1`.
congr; ext j',
have := (@equiv.injective _ _ σ j j' : σ j = σ j' → j = j'),
rw [update_column_val, update_row_val],
finish },
{ -- Otherwise, we need to show that there is a `0` somewhere in the product.
have : univ.prod (λ (j' : n), update_row A j (λ (i' : n), ite (i = i') 1 0) (σ j') j') = 0,
{ apply prod_eq_zero (mem_univ j),
rw [update_row_self],
exact if_neg h },
rw this,
apply prod_eq_zero (mem_univ (σ⁻¹ i)),
erw [apply_symm_apply σ i, update_column_self],
apply if_neg,
intro h',
exact h ((symm_apply_eq σ).mp h'.symm) }
end
lemma mul_adjugate_val (A : matrix n n α) (i j k) :
A i k * adjugate A k j = cramer α A (λ j, if k = j then A i k else 0) j :=
begin
erw [←smul_eq_mul, ←pi.smul_apply, ←linear_map.smul],
congr, ext,
rw [pi.smul_apply, smul_eq_mul, mul_ite]
end
lemma mul_adjugate (A : matrix n n α) : A ⬝ adjugate A = A.det • 1 :=
begin
ext i j,
rw [mul_val, smul_val, one_val, mul_ite],
calc
sum univ (λ (k : n), A i k * adjugate A k j)
= sum univ (λ (k : n), cramer α A (λ j, if k = j then A i k else 0) j)
: by {congr, ext k, apply mul_adjugate_val A i j k}
... = cramer α A (λ j, sum univ (λ (k : n), if k = j then A i k else 0)) j
: sum_cramer_apply A univ (λ (j k : n), if k = j then A i k else 0) j
... = cramer α A (A i) j : by { rw [cramer_apply], congr, ext,
rw [sum_ite_eq' univ x (A i), if_pos (mem_univ _)] }
... = if i = j then det A else 0 : by rw cramer_column_self
end
lemma adjugate_mul (A : matrix n n α) : adjugate A ⬝ A = A.det • 1 :=
calc adjugate A ⬝ A = (Aᵀ ⬝ (adjugate Aᵀ))ᵀ :
by rw [←adjugate_transpose, ←transpose_mul, transpose_transpose]
... = A.det • 1 : by rw [mul_adjugate (Aᵀ), det_transpose, transpose_smul, transpose_one]
end adjugate
section inv
/-! ### `inv` section
Defines the matrix `nonsing_inv A` and proves it is the inverse matrix
of a square matrix `A` as long as `det A` has a multiplicative inverse.
-/
variables [comm_ring α] [has_inv α]
/-- The inverse of a nonsingular matrix.
This is not the most general possible definition, so we don't instantiate `has_inv` (yet).
-/
def nonsing_inv (A : matrix n n α) : matrix n n α := (A.det)⁻¹ • adjugate A
lemma nonsing_inv_val (A : matrix n n α) (i j : n) :
A.nonsing_inv i j = (A.det)⁻¹ * adjugate A i j := rfl
lemma transpose_nonsing_inv (A : matrix n n α) : (A.nonsing_inv)ᵀ = (Aᵀ).nonsing_inv :=
by {ext, simp [transpose_val, nonsing_inv_val, det_transpose, (adjugate_transpose A).symm]}
section
-- Increase max depth to allow inference of `mul_action α (matrix n n α)`.
set_option class.instance_max_depth 60
/-- The `nonsing_inv` of `A` is a right inverse. -/
theorem mul_nonsing_inv (A : matrix n n α) (inv_mul_cancel : A.det⁻¹ * A.det = 1) :
A ⬝ nonsing_inv A = 1 :=
by erw [mul_smul, mul_adjugate, smul_smul, inv_mul_cancel, @one_smul _ _ _ (pi.mul_action _)]
end
/-- The `nonsing_inv` of `A` is a left inverse. -/
theorem nonsing_inv_mul (A : matrix n n α) (inv_mul_cancel : A.det⁻¹ * A.det = 1) :
nonsing_inv A ⬝ A = 1 :=
have inv_mul_cancel' : (det Aᵀ)⁻¹ * det Aᵀ = 1 := by {rw det_transpose, assumption},
calc nonsing_inv A ⬝ A
= (Aᵀ ⬝ nonsing_inv (Aᵀ))ᵀ : by rw [transpose_mul, transpose_nonsing_inv, transpose_transpose]
... = 1ᵀ : by rw [mul_nonsing_inv Aᵀ inv_mul_cancel']
... = 1 : transpose_one
end inv
end matrix
|
59d95be38cb641b3b6c821d3748abdf736e19ee3
|
c777c32c8e484e195053731103c5e52af26a25d1
|
/src/algebra/category/Module/abelian.lean
|
ebb1f0aa591da704ba4fe8763cf68bc6887ae74d
|
[
"Apache-2.0"
] |
permissive
|
kbuzzard/mathlib
|
2ff9e85dfe2a46f4b291927f983afec17e946eb8
|
58537299e922f9c77df76cb613910914a479c1f7
|
refs/heads/master
| 1,685,313,702,744
| 1,683,974,212,000
| 1,683,974,212,000
| 128,185,277
| 1
| 0
| null | 1,522,920,600,000
| 1,522,920,600,000
| null |
UTF-8
|
Lean
| false
| false
| 4,129
|
lean
|
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import linear_algebra.isomorphisms
import algebra.category.Module.kernels
import algebra.category.Module.limits
import category_theory.abelian.exact
/-!
# The category of left R-modules is abelian.
Additionally, two linear maps are exact in the categorical sense iff `range f = ker g`.
-/
open category_theory
open category_theory.limits
noncomputable theory
universes w v u
namespace Module
variables {R : Type u} [ring R] {M N : Module.{v} R} (f : M ⟶ N)
/-- In the category of modules, every monomorphism is normal. -/
def normal_mono (hf : mono f) : normal_mono f :=
{ Z := of R (N ⧸ f.range),
g := f.range.mkq,
w := linear_map.range_mkq_comp _,
is_limit :=
is_kernel.iso_kernel _ _ (kernel_is_limit _)
/- The following [invalid Lean code](https://github.com/leanprover-community/lean/issues/341)
might help you understand what's going on here:
```
calc
M ≃ₗ[R] f.ker.quotient : (submodule.quot_equiv_of_eq_bot _ (ker_eq_bot_of_mono _)).symm
... ≃ₗ[R] f.range : linear_map.quot_ker_equiv_range f
... ≃ₗ[R] r.range.mkq.ker : linear_equiv.of_eq _ _ (submodule.ker_mkq _).symm
```
-/
(linear_equiv.to_Module_iso'
((submodule.quot_equiv_of_eq_bot _ (ker_eq_bot_of_mono _)).symm ≪≫ₗ
((linear_map.quot_ker_equiv_range f) ≪≫ₗ
(linear_equiv.of_eq _ _ (submodule.ker_mkq _).symm)))) $
by { ext, refl } }
/-- In the category of modules, every epimorphism is normal. -/
def normal_epi (hf : epi f) : normal_epi f :=
{ W := of R f.ker,
g := f.ker.subtype,
w := linear_map.comp_ker_subtype _,
is_colimit :=
is_cokernel.cokernel_iso _ _ (cokernel_is_colimit _)
(linear_equiv.to_Module_iso'
/- The following invalid Lean code might help you understand what's going on here:
```
calc f.ker.subtype.range.quotient
≃ₗ[R] f.ker.quotient : submodule.quot_equiv_of_eq _ _ (submodule.range_subtype _)
... ≃ₗ[R] f.range : linear_map.quot_ker_equiv_range f
... ≃ₗ[R] N : linear_equiv.of_top _ (range_eq_top_of_epi _)
```
-/
(((submodule.quot_equiv_of_eq _ _ (submodule.range_subtype _)) ≪≫ₗ
(linear_map.quot_ker_equiv_range f)) ≪≫ₗ
(linear_equiv.of_top _ (range_eq_top_of_epi _)))) $
by { ext, refl } }
/-- The category of R-modules is abelian. -/
instance abelian : abelian (Module R) :=
{ has_finite_products := ⟨λ n, limits.has_limits_of_shape_of_has_limits⟩,
has_kernels := limits.has_kernels_of_has_equalizers (Module R),
has_cokernels := has_cokernels_Module,
normal_mono_of_mono := λ X Y, normal_mono,
normal_epi_of_epi := λ X Y, normal_epi }
section reflects_limits
/- We need to put this in this weird spot because we need to know that the category of modules
is balanced. -/
instance forget_reflects_limits_of_size :
reflects_limits_of_size.{v v} (forget (Module.{max v w} R)) :=
reflects_limits_of_reflects_isomorphisms
instance forget₂_reflects_limits_of_size :
reflects_limits_of_size.{v v} (forget₂ (Module.{max v w} R) AddCommGroup.{max v w}) :=
reflects_limits_of_reflects_isomorphisms
instance forget_reflects_limits : reflects_limits (forget (Module.{v} R)) :=
Module.forget_reflects_limits_of_size.{v v}
instance forget₂_reflects_limits : reflects_limits (forget₂ (Module.{v} R) AddCommGroup.{v}) :=
Module.forget₂_reflects_limits_of_size.{v v}
end reflects_limits
variables {O : Module.{v} R} (g : N ⟶ O)
open linear_map
local attribute [instance] preadditive.has_equalizers_of_has_kernels
theorem exact_iff : exact f g ↔ f.range = g.ker :=
begin
rw abelian.exact_iff' f g (kernel_is_limit _) (cokernel_is_colimit _),
exact ⟨λ h, le_antisymm (range_le_ker_iff.2 h.1) (ker_le_range_iff.2 h.2),
λ h, ⟨range_le_ker_iff.1 $ le_of_eq h, ker_le_range_iff.1 $ le_of_eq h.symm⟩⟩
end
end Module
|
9b62d69d260b3b06aaa6998b9ec9d2b16364c7c6
|
ba4794a0deca1d2aaa68914cd285d77880907b5c
|
/src/game/world9/level1.lean
|
ca2eae6c1301a69b6022e293fab103a0d557f094
|
[
"Apache-2.0"
] |
permissive
|
ChrisHughes24/natural_number_game
|
c7c00aa1f6a95004286fd456ed13cf6e113159ce
|
9d09925424da9f6275e6cfe427c8bcf12bb0944f
|
refs/heads/master
| 1,600,715,773,528
| 1,573,910,462,000
| 1,573,910,462,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,513
|
lean
|
import game.world8.level13 -- hide
import game.world3.level9
namespace mynat -- hide
/-
# Advanced Multiplication World
## Level 1: `mul_pos`
Welcome to Advanced Multiplication World! Before attempting this
world you should have completed seven other worlds, including
Multiplication World and Advanced Addition World. There are four
levels in this world.
Recall that if `b : mynat` is a hypothesis and you do `cases b with n`,
your one goal will split into two goals,
namely the cases `b = 0` and `b = succ(n)`. So `cases` here is like
a weaker version of induction (you don't get the inductive hypothesis).
## Tricks
1) if your goal is `⊢ X ≠ Y` then `intro h` will give you `h : X = Y` and
a goal of `⊢ false`. This is because `X ≠ Y` *means* `(X = Y) → false`.
Conversely if your goal is `false` and you have `h : X ≠ Y` as a hypothesis
then `apply h` will turn the goal into `X = Y`.
2) if `hab : succ (3 * x + 2 * y + 1) = 0` is a hypothesis and your goal is `⊢ false`,
then `exact succ_ne_zero _ hab` will solve the goal, because Lean will figure
out that `_` is supposed to be `3 * x + 2 * y + 1`.
-/
/- Theorem
The product of two non-zero natural numbers is non-zero.
-/
theorem mul_pos (a b : mynat) : a ≠ 0 → b ≠ 0 → a * b ≠ 0 :=
begin [less_leaky]
intros ha hb,
intro hab,
cases b with b,
apply hb,
refl,
rw mul_succ at hab,
apply ha,
cases a with a,
refl,
rw add_succ at hab,
exfalso,
exact succ_ne_zero _ hab,
end
end mynat -- hide
|
25cf590e6fe024fc9c30d7be51465956b79a5c19
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/data/analysis/filter_auto.lean
|
ba1aa02e490b94734d3aa92475fe05ffd6f983b7
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 9,279
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
Computational realization of filters (experimental).
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.order.filter.cofinite
import Mathlib.PostPort
universes u_1 u_2 l u_3 u_4 u_5
namespace Mathlib
/-- A `cfilter α σ` is a realization of a filter (base) on `α`,
represented by a type `σ` together with operations for the top element and
the binary inf operation. -/
structure cfilter (α : Type u_1) (σ : Type u_2) [partial_order α] where
f : σ → α
pt : σ
inf : σ → σ → σ
inf_le_left : ∀ (a b : σ), f (inf a b) ≤ f a
inf_le_right : ∀ (a b : σ), f (inf a b) ≤ f b
namespace cfilter
protected instance has_coe_to_fun {α : Type u_1} {σ : Type u_3} [partial_order α] :
has_coe_to_fun (cfilter α σ) :=
has_coe_to_fun.mk (fun (x : cfilter α σ) => σ → α) f
@[simp] theorem coe_mk {α : Type u_1} {σ : Type u_3} [partial_order α] (f : σ → α) (pt : σ)
(inf : σ → σ → σ) (h₁ : ∀ (a b : σ), f (inf a b) ≤ f a) (h₂ : ∀ (a b : σ), f (inf a b) ≤ f b)
(a : σ) : coe_fn (mk f pt inf h₁ h₂) a = f a :=
rfl
/-- Map a cfilter to an equivalent representation type. -/
def of_equiv {α : Type u_1} {σ : Type u_3} {τ : Type u_4} [partial_order α] (E : σ ≃ τ) :
cfilter α σ → cfilter α τ :=
sorry
@[simp] theorem of_equiv_val {α : Type u_1} {σ : Type u_3} {τ : Type u_4} [partial_order α]
(E : σ ≃ τ) (F : cfilter α σ) (a : τ) :
coe_fn (of_equiv E F) a = coe_fn F (coe_fn (equiv.symm E) a) :=
sorry
/-- The filter represented by a `cfilter` is the collection of supersets of
elements of the filter base. -/
def to_filter {α : Type u_1} {σ : Type u_3} (F : cfilter (set α) σ) : filter α :=
filter.mk (set_of fun (a : set α) => ∃ (b : σ), coe_fn F b ⊆ a) sorry sorry sorry
@[simp] theorem mem_to_filter_sets {α : Type u_1} {σ : Type u_3} (F : cfilter (set α) σ)
{a : set α} : a ∈ to_filter F ↔ ∃ (b : σ), coe_fn F b ⊆ a :=
iff.rfl
end cfilter
/-- A realizer for filter `f` is a cfilter which generates `f`. -/
structure filter.realizer {α : Type u_1} (f : filter α) where
σ : Type u_5
F : cfilter (set α) σ
eq : cfilter.to_filter F = f
protected def cfilter.to_realizer {α : Type u_1} {σ : Type u_3} (F : cfilter (set α) σ) :
filter.realizer (cfilter.to_filter F) :=
filter.realizer.mk σ F sorry
namespace filter.realizer
theorem mem_sets {α : Type u_1} {f : filter α} (F : realizer f) {a : set α} :
a ∈ f ↔ ∃ (b : σ F), coe_fn (F F) b ⊆ a :=
sorry
-- Used because it has better definitional equalities than the eq.rec proof
def of_eq {α : Type u_1} {f : filter α} {g : filter α} (e : f = g) (F : realizer f) : realizer g :=
mk (σ F) (F F) sorry
/-- A filter realizes itself. -/
def of_filter {α : Type u_1} (f : filter α) : realizer f :=
mk (↥(sets f))
(cfilter.mk subtype.val { val := set.univ, property := univ_mem_sets }
(fun (_x : ↥(sets f)) => sorry) sorry sorry)
sorry
/-- Transfer a filter realizer to another realizer on a different base type. -/
def of_equiv {α : Type u_1} {τ : Type u_4} {f : filter α} (F : realizer f) (E : σ F ≃ τ) :
realizer f :=
mk τ (cfilter.of_equiv E (F F)) sorry
@[simp] theorem of_equiv_σ {α : Type u_1} {τ : Type u_4} {f : filter α} (F : realizer f)
(E : σ F ≃ τ) : σ (of_equiv F E) = τ :=
rfl
@[simp] theorem of_equiv_F {α : Type u_1} {τ : Type u_4} {f : filter α} (F : realizer f)
(E : σ F ≃ τ) (s : τ) : coe_fn (F (of_equiv F E)) s = coe_fn (F F) (coe_fn (equiv.symm E) s) :=
sorry
/-- `unit` is a realizer for the principal filter -/
protected def principal {α : Type u_1} (s : set α) : realizer (principal s) :=
mk Unit
(cfilter.mk (fun (_x : Unit) => s) Unit.unit (fun (_x _x : Unit) => Unit.unit) sorry sorry)
sorry
@[simp] theorem principal_σ {α : Type u_1} (s : set α) : σ (realizer.principal s) = Unit := rfl
@[simp] theorem principal_F {α : Type u_1} (s : set α) (u : Unit) :
coe_fn (F (realizer.principal s)) u = s :=
rfl
/-- `unit` is a realizer for the top filter -/
protected def top {α : Type u_1} : realizer ⊤ := of_eq principal_univ (realizer.principal set.univ)
@[simp] theorem top_σ {α : Type u_1} : σ realizer.top = Unit := rfl
@[simp] theorem top_F {α : Type u_1} (u : Unit) : coe_fn (F realizer.top) u = set.univ := rfl
/-- `unit` is a realizer for the bottom filter -/
protected def bot {α : Type u_1} : realizer ⊥ := of_eq principal_empty (realizer.principal ∅)
@[simp] theorem bot_σ {α : Type u_1} : σ realizer.bot = Unit := rfl
@[simp] theorem bot_F {α : Type u_1} (u : Unit) : coe_fn (F realizer.bot) u = ∅ := rfl
/-- Construct a realizer for `map m f` given a realizer for `f` -/
protected def map {α : Type u_1} {β : Type u_2} (m : α → β) {f : filter α} (F : realizer f) :
realizer (map m f) :=
mk (σ F)
(cfilter.mk (fun (s : σ F) => m '' coe_fn (F F) s) (cfilter.pt (F F)) (cfilter.inf (F F)) sorry
sorry)
sorry
@[simp] theorem map_σ {α : Type u_1} {β : Type u_2} (m : α → β) {f : filter α} (F : realizer f) :
σ (realizer.map m F) = σ F :=
rfl
@[simp] theorem map_F {α : Type u_1} {β : Type u_2} (m : α → β) {f : filter α} (F : realizer f)
(s : σ (realizer.map m F)) : coe_fn (F (realizer.map m F)) s = m '' coe_fn (F F) s :=
rfl
/-- Construct a realizer for `comap m f` given a realizer for `f` -/
protected def comap {α : Type u_1} {β : Type u_2} (m : α → β) {f : filter β} (F : realizer f) :
realizer (comap m f) :=
mk (σ F)
(cfilter.mk (fun (s : σ F) => m ⁻¹' coe_fn (F F) s) (cfilter.pt (F F)) (cfilter.inf (F F)) sorry
sorry)
sorry
/-- Construct a realizer for the sup of two filters -/
protected def sup {α : Type u_1} {f : filter α} {g : filter α} (F : realizer f) (G : realizer g) :
realizer (f ⊔ g) :=
mk (σ F × σ G)
(cfilter.mk (fun (_x : σ F × σ G) => sorry) (cfilter.pt (F F), cfilter.pt (F G))
(fun (_x : σ F × σ G) => sorry) sorry sorry)
sorry
/-- Construct a realizer for the inf of two filters -/
protected def inf {α : Type u_1} {f : filter α} {g : filter α} (F : realizer f) (G : realizer g) :
realizer (f ⊓ g) :=
mk (σ F × σ G)
(cfilter.mk (fun (_x : σ F × σ G) => sorry) (cfilter.pt (F F), cfilter.pt (F G))
(fun (_x : σ F × σ G) => sorry) sorry sorry)
sorry
/-- Construct a realizer for the cofinite filter -/
protected def cofinite {α : Type u_1} [DecidableEq α] : realizer cofinite :=
mk (finset α)
(cfilter.mk (fun (s : finset α) => set_of fun (a : α) => ¬a ∈ s) ∅ has_union.union sorry sorry)
sorry
/-- Construct a realizer for filter bind -/
protected def bind {α : Type u_1} {β : Type u_2} {f : filter α} {m : α → filter β} (F : realizer f)
(G : (i : α) → realizer (m i)) : realizer (bind f m) :=
mk (sigma fun (s : σ F) => (i : α) → i ∈ coe_fn (F F) s → σ (G i))
(cfilter.mk (fun (_x : sigma fun (s : σ F) => (i : α) → i ∈ coe_fn (F F) s → σ (G i)) => sorry)
(sigma.mk (cfilter.pt (F F))
fun (i : α) (H : i ∈ coe_fn (F F) (cfilter.pt (F F))) => cfilter.pt (F (G i)))
(fun (_x : sigma fun (s : σ F) => (i : α) → i ∈ coe_fn (F F) s → σ (G i)) => sorry) sorry
sorry)
sorry
/-- Construct a realizer for indexed supremum -/
protected def Sup {α : Type u_1} {β : Type u_2} {f : α → filter β} (F : (i : α) → realizer (f i)) :
realizer (supr fun (i : α) => f i) :=
let F' : realizer (supr fun (i : α) => f i) := of_eq sorry (realizer.bind realizer.top F);
of_equiv F'
((fun (this : (sigma fun (u : Unit) => (i : α) → True → σ (F i)) ≃ ((i : α) → σ (F i))) => this)
(equiv.mk (fun (_x : sigma fun (u : Unit) => (i : α) → True → σ (F i)) => sorry)
(fun (f_1 : (i : α) → σ (F i)) => sigma.mk Unit.unit fun (i : α) (_x : True) => f_1 i) sorry
sorry))
/-- Construct a realizer for the product of filters -/
protected def prod {α : Type u_1} {f : filter α} {g : filter α} (F : realizer f) (G : realizer g) :
realizer (filter.prod f g) :=
realizer.inf (realizer.comap prod.fst F) (realizer.comap prod.snd G)
theorem le_iff {α : Type u_1} {f : filter α} {g : filter α} (F : realizer f) (G : realizer g) :
f ≤ g ↔ ∀ (b : σ G), ∃ (a : σ F), coe_fn (F F) a ≤ coe_fn (F G) b :=
sorry
theorem tendsto_iff {α : Type u_1} {β : Type u_2} (f : α → β) {l₁ : filter α} {l₂ : filter β}
(L₁ : realizer l₁) (L₂ : realizer l₂) :
tendsto f l₁ l₂ ↔
∀ (b : σ L₂), ∃ (a : σ L₁), ∀ (x : α), x ∈ coe_fn (F L₁) a → f x ∈ coe_fn (F L₂) b :=
iff.trans (le_iff (realizer.map f L₁) L₂)
(forall_congr
fun (b : σ L₂) => exists_congr fun (a : σ (realizer.map f L₁)) => set.image_subset_iff)
theorem ne_bot_iff {α : Type u_1} {f : filter α} (F : realizer f) :
f ≠ ⊥ ↔ ∀ (a : σ F), set.nonempty (coe_fn (F F) a) :=
sorry
end Mathlib
|
b9637196e63b62ef742e09dd1162db9060373df3
|
5e3548e65f2c037cb94cd5524c90c623fbd6d46a
|
/src_icannos_totilas/aops/2012-USAMO-Problem_4.lean
|
0a1928fbc9a10a4b64f7145984a84186ba5c315c
|
[] |
no_license
|
ahayat16/lean_exos
|
d4f08c30adb601a06511a71b5ffb4d22d12ef77f
|
682f2552d5b04a8c8eb9e4ab15f875a91b03845c
|
refs/heads/main
| 1,693,101,073,585
| 1,636,479,336,000
| 1,636,479,336,000
| 415,000,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 170
|
lean
|
import data.nat.factorial
import data.pnat.basic
theorem USAMO_Problem_4_2012 (f : pnat -> pnat) :
(∀ n : pnat, f(pnat.factorial n) = nat.factorial f(n) )
:= sorry
|
51b8a6743a31fbd421e99589ffce8101c2e346ac
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/order/complete_boolean_algebra.lean
|
01e1f01f043267ea03afc74176b237cf750f4bd2
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 14,306
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Yaël Dillies
-/
import order.complete_lattice
import order.directed
import logic.equiv.set
/-!
# Frames, completely distributive lattices and Boolean algebras
In this file we define and provide API for frames, completely distributive lattices and completely
distributive Boolean algebras.
## Typeclasses
* `order.frame`: Frame: A complete lattice whose `⊓` distributes over `⨆`.
* `order.coframe`: Coframe: A complete lattice whose `⊔` distributes over `⨅`.
* `complete_distrib_lattice`: Completely distributive lattices: A complete lattice whose `⊓` and `⊔`
distribute over `⨆` and `⨅` respectively.
* `complete_boolean_algebra`: Completely distributive Boolean algebra: A Boolean algebra whose `⊓`
and `⊔` distribute over `⨆` and `⨅` respectively.
A set of opens gives rise to a topological space precisely if it forms a frame. Such a frame is also
completely distributive, but not all frames are. `filter` is a coframe but not a completely
distributive lattice.
## TODO
Add instances for `prod`
## References
* [Wikipedia, *Complete Heyting algebra*](https://en.wikipedia.org/wiki/Complete_Heyting_algebra)
* [Francis Borceux, *Handbook of Categorical Algebra III*][borceux-vol3]
-/
set_option old_structure_cmd true
open function set
universes u v w
variables {α : Type u} {β : Type v} {ι : Sort w} {κ : ι → Sort*}
/-- A frame, aka complete Heyting algebra, is a complete lattice whose `⊓` distributes over `⨆`. -/
class order.frame (α : Type*) extends complete_lattice α :=
(inf_Sup_le_supr_inf (a : α) (s : set α) : a ⊓ Sup s ≤ ⨆ b ∈ s, a ⊓ b)
/-- A coframe, aka complete Brouwer algebra or complete co-Heyting algebra, is a complete lattice
whose `⊔` distributes over `⨅`. -/
class order.coframe (α : Type*) extends complete_lattice α :=
(infi_sup_le_sup_Inf (a : α) (s : set α) : (⨅ b ∈ s, a ⊔ b) ≤ a ⊔ Inf s)
open order
/-- A completely distributive lattice is a complete lattice whose `⊔` and `⊓` respectively
distribute over `⨅` and `⨆`. -/
class complete_distrib_lattice (α : Type*) extends frame α :=
(infi_sup_le_sup_Inf : ∀ a s, (⨅ b ∈ s, a ⊔ b) ≤ a ⊔ Inf s)
@[priority 100] -- See note [lower instance priority]
instance complete_distrib_lattice.to_coframe [complete_distrib_lattice α] : coframe α :=
{ .. ‹complete_distrib_lattice α› }
section frame
variables [frame α] {s t : set α} {a b : α}
instance order_dual.coframe : coframe αᵒᵈ :=
{ infi_sup_le_sup_Inf := frame.inf_Sup_le_supr_inf, ..order_dual.complete_lattice α }
lemma inf_Sup_eq : a ⊓ Sup s = ⨆ b ∈ s, a ⊓ b :=
(frame.inf_Sup_le_supr_inf _ _).antisymm supr_inf_le_inf_Sup
lemma Sup_inf_eq : Sup s ⊓ b = ⨆ a ∈ s, a ⊓ b :=
by simpa only [inf_comm] using @inf_Sup_eq α _ s b
lemma supr_inf_eq (f : ι → α) (a : α) : (⨆ i, f i) ⊓ a = ⨆ i, f i ⊓ a :=
by rw [supr, Sup_inf_eq, supr_range]
lemma inf_supr_eq (a : α) (f : ι → α) : a ⊓ (⨆ i, f i) = ⨆ i, a ⊓ f i :=
by simpa only [inf_comm] using supr_inf_eq f a
lemma bsupr_inf_eq {f : Π i, κ i → α} (a : α) : (⨆ i j, f i j) ⊓ a = ⨆ i j, f i j ⊓ a :=
by simp only [supr_inf_eq]
lemma inf_bsupr_eq {f : Π i, κ i → α} (a : α) : a ⊓ (⨆ i j, f i j) = ⨆ i j, a ⊓ f i j :=
by simp only [inf_supr_eq]
lemma supr_inf_supr {ι ι' : Type*} {f : ι → α} {g : ι' → α} :
(⨆ i, f i) ⊓ (⨆ j, g j) = ⨆ i : ι × ι', f i.1 ⊓ g i.2 :=
by simp only [inf_supr_eq, supr_inf_eq, supr_prod]
lemma bsupr_inf_bsupr {ι ι' : Type*} {f : ι → α} {g : ι' → α} {s : set ι} {t : set ι'} :
(⨆ i ∈ s, f i) ⊓ (⨆ j ∈ t, g j) = ⨆ p ∈ s ×ˢ t, f (p : ι × ι').1 ⊓ g p.2 :=
begin
simp only [supr_subtype', supr_inf_supr],
exact (equiv.surjective _).supr_congr (equiv.set.prod s t).symm (λ x, rfl)
end
lemma Sup_inf_Sup : Sup s ⊓ Sup t = ⨆ p ∈ s ×ˢ t, (p : α × α).1 ⊓ p.2 :=
by simp only [Sup_eq_supr, bsupr_inf_bsupr]
lemma supr_disjoint_iff {f : ι → α} : disjoint (⨆ i, f i) a ↔ ∀ i, disjoint (f i) a :=
by simp only [disjoint_iff, supr_inf_eq, supr_eq_bot]
lemma disjoint_supr_iff {f : ι → α} : disjoint a (⨆ i, f i) ↔ ∀ i, disjoint a (f i) :=
by simpa only [disjoint.comm] using supr_disjoint_iff
lemma supr₂_disjoint_iff {f : Π i, κ i → α} :
disjoint (⨆ i j, f i j) a ↔ ∀ i j, disjoint (f i j) a :=
by simp_rw supr_disjoint_iff
lemma disjoint_supr₂_iff {f : Π i, κ i → α} :
disjoint a (⨆ i j, f i j) ↔ ∀ i j, disjoint a (f i j) :=
by simp_rw disjoint_supr_iff
lemma Sup_disjoint_iff {s : set α} : disjoint (Sup s) a ↔ ∀ b ∈ s, disjoint b a :=
by simp only [disjoint_iff, Sup_inf_eq, supr_eq_bot]
lemma disjoint_Sup_iff {s : set α} : disjoint a (Sup s) ↔ ∀ b ∈ s, disjoint a b :=
by simpa only [disjoint.comm] using Sup_disjoint_iff
lemma supr_inf_of_monotone {ι : Type*} [preorder ι] [is_directed ι (≤)] {f g : ι → α}
(hf : monotone f) (hg : monotone g) :
(⨆ i, f i ⊓ g i) = (⨆ i, f i) ⊓ (⨆ i, g i) :=
begin
refine (le_supr_inf_supr f g).antisymm _,
rw [supr_inf_supr],
refine supr_mono' (λ i, _),
rcases directed_of (≤) i.1 i.2 with ⟨j, h₁, h₂⟩,
exact ⟨j, inf_le_inf (hf h₁) (hg h₂)⟩
end
lemma supr_inf_of_antitone {ι : Type*} [preorder ι] [is_directed ι (swap (≤))] {f g : ι → α}
(hf : antitone f) (hg : antitone g) :
(⨆ i, f i ⊓ g i) = (⨆ i, f i) ⊓ (⨆ i, g i) :=
@supr_inf_of_monotone α _ ιᵒᵈ _ _ f g hf.dual_left hg.dual_left
instance pi.frame {ι : Type*} {π : ι → Type*} [Π i, frame (π i)] : frame (Π i, π i) :=
{ inf_Sup_le_supr_inf := λ a s i,
by simp only [complete_lattice.Sup, Sup_apply, supr_apply, pi.inf_apply, inf_supr_eq,
← supr_subtype''],
..pi.complete_lattice }
@[priority 100] -- see Note [lower instance priority]
instance frame.to_distrib_lattice : distrib_lattice α :=
distrib_lattice.of_inf_sup_le $ λ a b c,
by rw [←Sup_pair, ←Sup_pair, inf_Sup_eq, ←Sup_image, image_pair]
end frame
section coframe
variables [coframe α] {s t : set α} {a b : α}
instance order_dual.frame : frame αᵒᵈ :=
{ inf_Sup_le_supr_inf := coframe.infi_sup_le_sup_Inf, ..order_dual.complete_lattice α }
lemma sup_Inf_eq : a ⊔ Inf s = ⨅ b ∈ s, a ⊔ b := @inf_Sup_eq αᵒᵈ _ _ _
lemma Inf_sup_eq : Inf s ⊔ b = ⨅ a ∈ s, a ⊔ b := @Sup_inf_eq αᵒᵈ _ _ _
lemma infi_sup_eq (f : ι → α) (a : α) : (⨅ i, f i) ⊔ a = ⨅ i, f i ⊔ a := @supr_inf_eq αᵒᵈ _ _ _ _
lemma sup_infi_eq (a : α) (f : ι → α) : a ⊔ (⨅ i, f i) = ⨅ i, a ⊔ f i := @inf_supr_eq αᵒᵈ _ _ _ _
lemma binfi_sup_eq {f : Π i, κ i → α} (a : α) : (⨅ i j, f i j) ⊔ a = ⨅ i j, f i j ⊔ a :=
@bsupr_inf_eq αᵒᵈ _ _ _ _ _
lemma sup_binfi_eq {f : Π i, κ i → α} (a : α) : a ⊔ (⨅ i j, f i j) = ⨅ i j, a ⊔ f i j :=
@inf_bsupr_eq αᵒᵈ _ _ _ _ _
lemma infi_sup_infi {ι ι' : Type*} {f : ι → α} {g : ι' → α} :
(⨅ i, f i) ⊔ (⨅ i, g i) = ⨅ i : ι × ι', f i.1 ⊔ g i.2 :=
@supr_inf_supr αᵒᵈ _ _ _ _ _
lemma binfi_sup_binfi {ι ι' : Type*} {f : ι → α} {g : ι' → α} {s : set ι} {t : set ι'} :
(⨅ i ∈ s, f i) ⊔ (⨅ j ∈ t, g j) = ⨅ p ∈ s ×ˢ t, f (p : ι × ι').1 ⊔ g p.2 :=
@bsupr_inf_bsupr αᵒᵈ _ _ _ _ _ _ _
theorem Inf_sup_Inf : Inf s ⊔ Inf t = (⨅ p ∈ s ×ˢ t, (p : α × α).1 ⊔ p.2) :=
@Sup_inf_Sup αᵒᵈ _ _ _
lemma infi_sup_of_monotone {ι : Type*} [preorder ι] [is_directed ι (swap (≤))] {f g : ι → α}
(hf : monotone f) (hg : monotone g) :
(⨅ i, f i ⊔ g i) = (⨅ i, f i) ⊔ (⨅ i, g i) :=
supr_inf_of_antitone hf.dual_right hg.dual_right
lemma infi_sup_of_antitone {ι : Type*} [preorder ι] [is_directed ι (≤)] {f g : ι → α}
(hf : antitone f) (hg : antitone g) :
(⨅ i, f i ⊔ g i) = (⨅ i, f i) ⊔ (⨅ i, g i) :=
supr_inf_of_monotone hf.dual_right hg.dual_right
instance pi.coframe {ι : Type*} {π : ι → Type*} [Π i, coframe (π i)] : coframe (Π i, π i) :=
{ Inf := Inf,
infi_sup_le_sup_Inf := λ a s i,
by simp only [←sup_infi_eq, Inf_apply, ←infi_subtype'', infi_apply, pi.sup_apply],
..pi.complete_lattice }
@[priority 100] -- see Note [lower instance priority]
instance coframe.to_distrib_lattice : distrib_lattice α :=
{ le_sup_inf := λ a b c, by rw [←Inf_pair, ←Inf_pair, sup_Inf_eq, ←Inf_image, image_pair],
..‹coframe α› }
end coframe
section complete_distrib_lattice
variables [complete_distrib_lattice α] {a b : α} {s t : set α}
instance : complete_distrib_lattice αᵒᵈ := { ..order_dual.frame, ..order_dual.coframe }
instance pi.complete_distrib_lattice {ι : Type*} {π : ι → Type*}
[Π i, complete_distrib_lattice (π i)] : complete_distrib_lattice (Π i, π i) :=
{ ..pi.frame, ..pi.coframe }
end complete_distrib_lattice
/-- A complete Boolean algebra is a completely distributive Boolean algebra. -/
class complete_boolean_algebra α extends boolean_algebra α, complete_distrib_lattice α
instance pi.complete_boolean_algebra {ι : Type*} {π : ι → Type*}
[∀ i, complete_boolean_algebra (π i)] : complete_boolean_algebra (Π i, π i) :=
{ .. pi.boolean_algebra, .. pi.complete_distrib_lattice }
instance Prop.complete_boolean_algebra : complete_boolean_algebra Prop :=
{ infi_sup_le_sup_Inf := λ p s, iff.mp $
by simp only [forall_or_distrib_left, complete_lattice.Inf, infi_Prop_eq, sup_Prop_eq],
inf_Sup_le_supr_inf := λ p s, iff.mp $
by simp only [complete_lattice.Sup, exists_and_distrib_left, inf_Prop_eq, supr_Prop_eq],
.. Prop.boolean_algebra, .. Prop.complete_lattice }
section complete_boolean_algebra
variables [complete_boolean_algebra α] {a b : α} {s : set α} {f : ι → α}
theorem compl_infi : (infi f)ᶜ = (⨆ i, (f i)ᶜ) :=
le_antisymm
(compl_le_of_compl_le $ le_infi $ λ i, compl_le_of_compl_le $ le_supr (compl ∘ f) i)
(supr_le $ λ i, compl_le_compl $ infi_le _ _)
theorem compl_supr : (supr f)ᶜ = (⨅ i, (f i)ᶜ) :=
compl_injective (by simp [compl_infi])
lemma compl_Inf : (Inf s)ᶜ = (⨆ i ∈ s, iᶜ) := by simp only [Inf_eq_infi, compl_infi]
lemma compl_Sup : (Sup s)ᶜ = (⨅ i ∈ s, iᶜ) := by simp only [Sup_eq_supr, compl_supr]
lemma compl_Inf' : (Inf s)ᶜ = Sup (compl '' s) := compl_Inf.trans Sup_image.symm
lemma compl_Sup' : (Sup s)ᶜ = Inf (compl '' s) := compl_Sup.trans Inf_image.symm
end complete_boolean_algebra
section lift
/-- Pullback an `order.frame` along an injection. -/
@[reducible] -- See note [reducible non-instances]
protected def function.injective.frame [has_sup α] [has_inf α] [has_Sup α] [has_Inf α] [has_top α]
[has_bot α] [frame β] (f : α → β) (hf : injective f) (map_sup : ∀ a b, f (a ⊔ b) = f a ⊔ f b)
(map_inf : ∀ a b, f (a ⊓ b) = f a ⊓ f b) (map_Sup : ∀ s, f (Sup s) = ⨆ a ∈ s, f a)
(map_Inf : ∀ s, f (Inf s) = ⨅ a ∈ s, f a) (map_top : f ⊤ = ⊤) (map_bot : f ⊥ = ⊥) :
frame α :=
{ inf_Sup_le_supr_inf := λ a s, begin
change f (a ⊓ Sup s) ≤ f _,
rw [←Sup_image, map_inf, map_Sup s, inf_bsupr_eq],
simp_rw ←map_inf,
exact ((map_Sup _).trans supr_image).ge,
end,
..hf.complete_lattice f map_sup map_inf map_Sup map_Inf map_top map_bot }
/-- Pullback an `order.coframe` along an injection. -/
@[reducible] -- See note [reducible non-instances]
protected def function.injective.coframe [has_sup α] [has_inf α] [has_Sup α] [has_Inf α] [has_top α]
[has_bot α] [coframe β] (f : α → β) (hf : injective f) (map_sup : ∀ a b, f (a ⊔ b) = f a ⊔ f b)
(map_inf : ∀ a b, f (a ⊓ b) = f a ⊓ f b) (map_Sup : ∀ s, f (Sup s) = ⨆ a ∈ s, f a)
(map_Inf : ∀ s, f (Inf s) = ⨅ a ∈ s, f a) (map_top : f ⊤ = ⊤) (map_bot : f ⊥ = ⊥) :
coframe α :=
{ infi_sup_le_sup_Inf := λ a s, begin
change f _ ≤ f (a ⊔ Inf s),
rw [←Inf_image, map_sup, map_Inf s, sup_binfi_eq],
simp_rw ←map_sup,
exact ((map_Inf _).trans infi_image).le,
end,
..hf.complete_lattice f map_sup map_inf map_Sup map_Inf map_top map_bot }
/-- Pullback a `complete_distrib_lattice` along an injection. -/
@[reducible] -- See note [reducible non-instances]
protected def function.injective.complete_distrib_lattice [has_sup α] [has_inf α] [has_Sup α]
[has_Inf α] [has_top α] [has_bot α] [complete_distrib_lattice β]
(f : α → β) (hf : function.injective f) (map_sup : ∀ a b, f (a ⊔ b) = f a ⊔ f b)
(map_inf : ∀ a b, f (a ⊓ b) = f a ⊓ f b) (map_Sup : ∀ s, f (Sup s) = ⨆ a ∈ s, f a)
(map_Inf : ∀ s, f (Inf s) = ⨅ a ∈ s, f a) (map_top : f ⊤ = ⊤) (map_bot : f ⊥ = ⊥) :
complete_distrib_lattice α :=
{ ..hf.frame f map_sup map_inf map_Sup map_Inf map_top map_bot,
..hf.coframe f map_sup map_inf map_Sup map_Inf map_top map_bot }
/-- Pullback a `complete_boolean_algebra` along an injection. -/
@[reducible] -- See note [reducible non-instances]
protected def function.injective.complete_boolean_algebra [has_sup α] [has_inf α] [has_Sup α]
[has_Inf α] [has_top α] [has_bot α] [has_compl α] [has_sdiff α] [complete_boolean_algebra β]
(f : α → β) (hf : function.injective f) (map_sup : ∀ a b, f (a ⊔ b) = f a ⊔ f b)
(map_inf : ∀ a b, f (a ⊓ b) = f a ⊓ f b) (map_Sup : ∀ s, f (Sup s) = ⨆ a ∈ s, f a)
(map_Inf : ∀ s, f (Inf s) = ⨅ a ∈ s, f a) (map_top : f ⊤ = ⊤) (map_bot : f ⊥ = ⊥)
(map_compl : ∀ a, f aᶜ = (f a)ᶜ) (map_sdiff : ∀ a b, f (a \ b) = f a \ f b) :
complete_boolean_algebra α :=
{ ..hf.complete_distrib_lattice f map_sup map_inf map_Sup map_Inf map_top map_bot,
..hf.boolean_algebra f map_sup map_inf map_top map_bot map_compl map_sdiff }
end lift
namespace punit
variables (s : set punit.{u+1}) (x y : punit.{u+1})
instance : complete_boolean_algebra punit :=
by refine_struct
{ Sup := λ _, star,
Inf := λ _, star,
..punit.boolean_algebra };
intros; trivial <|> simp only [eq_iff_true_of_subsingleton, not_true, and_false]
@[simp] lemma Sup_eq : Sup s = star := rfl
@[simp] lemma Inf_eq : Inf s = star := rfl
end punit
|
7ef76b3fec170ac5494fdec565ec0e7ccda82fba
|
4efff1f47634ff19e2f786deadd394270a59ecd2
|
/src/category_theory/monoidal/of_has_finite_products.lean
|
7a4575e527e21981436ce5b307ad90bb3e144ba4
|
[
"Apache-2.0"
] |
permissive
|
agjftucker/mathlib
|
d634cd0d5256b6325e3c55bb7fb2403548371707
|
87fe50de17b00af533f72a102d0adefe4a2285e8
|
refs/heads/master
| 1,625,378,131,941
| 1,599,166,526,000
| 1,599,166,526,000
| 160,748,509
| 0
| 0
|
Apache-2.0
| 1,544,141,789,000
| 1,544,141,789,000
| null |
UTF-8
|
Lean
| false
| false
| 11,757
|
lean
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Simon Hudon
-/
import category_theory.monoidal.braided
import category_theory.limits.shapes.binary_products
import category_theory.limits.shapes.terminal
/-!
# The natural monoidal structure on any category with finite (co)products.
A category with a monoidal structure provided in this way is sometimes called a (co)cartesian category,
although this is also sometimes used to mean a finitely complete category.
(See <https://ncatlab.org/nlab/show/cartesian+category>.)
As this works with either products or coproducts,
and sometimes we want to think of a different monoidal structure entirely,
we don't set up either construct as an instance.
## Implementation
For the sake of nicer definitional properties,
we rely on `has_terminal` and `has_binary_products` instead of `has_finite_products`,
so that if a particular category provides customised instances of these
we pick those up instead.
-/
universes v u
namespace category_theory
variables (C : Type u) [category.{v} C] {X Y : C}
namespace limits
section
variables {C} [has_binary_products C]
/-- The braiding isomorphism which swaps a binary product. -/
@[simps] def prod.braiding (P Q : C) : P ⨯ Q ≅ Q ⨯ P :=
{ hom := prod.lift prod.snd prod.fst,
inv := prod.lift prod.snd prod.fst }
/-- The braiding isomorphism can be passed through a map by swapping the order. -/
@[reassoc] lemma braid_natural {W X Y Z : C} (f : X ⟶ Y) (g : Z ⟶ W) :
prod.map f g ≫ (prod.braiding _ _).hom = (prod.braiding _ _).hom ≫ prod.map g f :=
by tidy
@[simp, reassoc] lemma prod.symmetry' (P Q : C) :
prod.lift prod.snd prod.fst ≫ prod.lift prod.snd prod.fst = 𝟙 (P ⨯ Q) :=
by tidy
/-- The braiding isomorphism is symmetric. -/
@[reassoc] lemma prod.symmetry (P Q : C) :
(prod.braiding P Q).hom ≫ (prod.braiding Q P).hom = 𝟙 _ :=
by simp
/-- The associator isomorphism for binary products. -/
@[simps] def prod.associator
(P Q R : C) : (P ⨯ Q) ⨯ R ≅ P ⨯ (Q ⨯ R) :=
{ hom :=
prod.lift
(prod.fst ≫ prod.fst)
(prod.lift (prod.fst ≫ prod.snd) prod.snd),
inv :=
prod.lift
(prod.lift prod.fst (prod.snd ≫ prod.fst))
(prod.snd ≫ prod.snd) }
/-- The product functor can be decomposed. -/
def prod_functor_left_comp (X Y : C) :
prod_functor.obj (X ⨯ Y) ≅ prod_functor.obj Y ⋙ prod_functor.obj X :=
nat_iso.of_components (prod.associator _ _) (by tidy)
@[reassoc]
lemma prod.pentagon (W X Y Z : C) :
prod.map ((prod.associator W X Y).hom) (𝟙 Z) ≫
(prod.associator W (X ⨯ Y) Z).hom ≫ prod.map (𝟙 W) ((prod.associator X Y Z).hom) =
(prod.associator (W ⨯ X) Y Z).hom ≫ (prod.associator W X (Y ⨯ Z)).hom :=
by tidy
@[reassoc]
lemma prod.associator_naturality {X₁ X₂ X₃ Y₁ Y₂ Y₃ : C} (f₁ : X₁ ⟶ Y₁) (f₂ : X₂ ⟶ Y₂) (f₃ : X₃ ⟶ Y₃) :
prod.map (prod.map f₁ f₂) f₃ ≫ (prod.associator Y₁ Y₂ Y₃).hom =
(prod.associator X₁ X₂ X₃).hom ≫ prod.map f₁ (prod.map f₂ f₃) :=
by tidy
variables [has_terminal C]
/-- The left unitor isomorphism for binary products with the terminal object. -/
@[simps] def prod.left_unitor
(P : C) : ⊤_ C ⨯ P ≅ P :=
{ hom := prod.snd,
inv := prod.lift (terminal.from P) (𝟙 _) }
/-- The right unitor isomorphism for binary products with the terminal object. -/
@[simps] def prod.right_unitor
(P : C) : P ⨯ ⊤_ C ≅ P :=
{ hom := prod.fst,
inv := prod.lift (𝟙 _) (terminal.from P) }
@[reassoc]
lemma prod_left_unitor_hom_naturality (f : X ⟶ Y):
prod.map (𝟙 _) f ≫ (prod.left_unitor Y).hom = (prod.left_unitor X).hom ≫ f :=
prod.map_snd _ _
@[reassoc]
lemma prod_left_unitor_inv_naturality (f : X ⟶ Y):
(prod.left_unitor X).inv ≫ prod.map (𝟙 _) f = f ≫ (prod.left_unitor Y).inv :=
by rw [iso.inv_comp_eq, ← category.assoc, iso.eq_comp_inv, prod_left_unitor_hom_naturality]
@[reassoc]
lemma prod_right_unitor_hom_naturality (f : X ⟶ Y):
prod.map f (𝟙 _) ≫ (prod.right_unitor Y).hom = (prod.right_unitor X).hom ≫ f :=
prod.map_fst _ _
@[reassoc]
lemma prod_right_unitor_inv_naturality (f : X ⟶ Y):
(prod.right_unitor X).inv ≫ prod.map f (𝟙 _) = f ≫ (prod.right_unitor Y).inv :=
by rw [iso.inv_comp_eq, ← category.assoc, iso.eq_comp_inv, prod_right_unitor_hom_naturality]
lemma prod.triangle (X Y : C) :
(prod.associator X (⊤_ C) Y).hom ≫ prod.map (𝟙 X) ((prod.left_unitor Y).hom) =
prod.map ((prod.right_unitor X).hom) (𝟙 Y) :=
by tidy
end
section
variables {C} [has_binary_coproducts C]
/-- The braiding isomorphism which swaps a binary coproduct. -/
@[simps] def coprod.braiding (P Q : C) : P ⨿ Q ≅ Q ⨿ P :=
{ hom := coprod.desc coprod.inr coprod.inl,
inv := coprod.desc coprod.inr coprod.inl }
@[simp] lemma coprod.symmetry' (P Q : C) :
coprod.desc coprod.inr coprod.inl ≫ coprod.desc coprod.inr coprod.inl = 𝟙 (P ⨿ Q) :=
by tidy
/-- The braiding isomorphism is symmetric. -/
lemma coprod.symmetry (P Q : C) :
(coprod.braiding P Q).hom ≫ (coprod.braiding Q P).hom = 𝟙 _ :=
by simp
/-- The associator isomorphism for binary coproducts. -/
@[simps] def coprod.associator
(P Q R : C) : (P ⨿ Q) ⨿ R ≅ P ⨿ (Q ⨿ R) :=
{ hom :=
coprod.desc
(coprod.desc coprod.inl (coprod.inl ≫ coprod.inr))
(coprod.inr ≫ coprod.inr),
inv :=
coprod.desc
(coprod.inl ≫ coprod.inl)
(coprod.desc (coprod.inr ≫ coprod.inl) coprod.inr) }
lemma coprod.pentagon (W X Y Z : C) :
coprod.map ((coprod.associator W X Y).hom) (𝟙 Z) ≫
(coprod.associator W (X ⨿ Y) Z).hom ≫ coprod.map (𝟙 W) ((coprod.associator X Y Z).hom) =
(coprod.associator (W ⨿ X) Y Z).hom ≫ (coprod.associator W X (Y ⨿ Z)).hom :=
by tidy
lemma coprod.associator_naturality {X₁ X₂ X₃ Y₁ Y₂ Y₃ : C} (f₁ : X₁ ⟶ Y₁) (f₂ : X₂ ⟶ Y₂) (f₃ : X₃ ⟶ Y₃) :
coprod.map (coprod.map f₁ f₂) f₃ ≫ (coprod.associator Y₁ Y₂ Y₃).hom =
(coprod.associator X₁ X₂ X₃).hom ≫ coprod.map f₁ (coprod.map f₂ f₃) :=
by tidy
variables [has_initial C]
/-- The left unitor isomorphism for binary coproducts with the initial object. -/
@[simps] def coprod.left_unitor
(P : C) : ⊥_ C ⨿ P ≅ P :=
{ hom := coprod.desc (initial.to P) (𝟙 _),
inv := coprod.inr }
/-- The right unitor isomorphism for binary coproducts with the initial object. -/
@[simps] def coprod.right_unitor
(P : C) : P ⨿ ⊥_ C ≅ P :=
{ hom := coprod.desc (𝟙 _) (initial.to P),
inv := coprod.inl }
lemma coprod.triangle (X Y : C) :
(coprod.associator X (⊥_ C) Y).hom ≫ coprod.map (𝟙 X) ((coprod.left_unitor Y).hom) =
coprod.map ((coprod.right_unitor X).hom) (𝟙 Y) :=
by tidy
end
end limits
open category_theory.limits
section
local attribute [tidy] tactic.case_bash
/-- A category with a terminal object and binary products has a natural monoidal structure. -/
def monoidal_of_has_finite_products [has_terminal C] [has_binary_products C] : monoidal_category C :=
{ tensor_unit := ⊤_ C,
tensor_obj := λ X Y, X ⨯ Y,
tensor_hom := λ _ _ _ _ f g, limits.prod.map f g,
associator := prod.associator,
left_unitor := prod.left_unitor,
right_unitor := prod.right_unitor,
pentagon' := prod.pentagon,
triangle' := prod.triangle,
associator_naturality' := @prod.associator_naturality _ _ _, }
end
section
local attribute [instance] monoidal_of_has_finite_products
open monoidal_category
/--
The monoidal structure coming from finite products is symmetric.
-/
@[simps]
def symmetric_of_has_finite_products [has_terminal C] [has_binary_products C] :
symmetric_category C :=
{ braiding := limits.prod.braiding,
braiding_naturality' := λ X X' Y Y' f g,
by { dsimp [tensor_hom], ext; simp, },
hexagon_forward' := λ X Y Z,
by ext; { dsimp [monoidal_of_has_finite_products], simp; dsimp; simp, },
hexagon_reverse' := λ X Y Z,
by ext; { dsimp [monoidal_of_has_finite_products], simp; dsimp; simp, },
symmetry' := λ X Y, by { dsimp, simp, refl, }, }
end
namespace monoidal_of_has_finite_products
variables [has_terminal C] [has_binary_products C]
local attribute [instance] monoidal_of_has_finite_products
@[simp]
lemma tensor_obj (X Y : C) : X ⊗ Y = (X ⨯ Y) := rfl
@[simp]
lemma tensor_hom {W X Y Z : C} (f : W ⟶ X) (g : Y ⟶ Z) : f ⊗ g = limits.prod.map f g := rfl
@[simp]
lemma left_unitor_hom (X : C) : (λ_ X).hom = limits.prod.snd := rfl
@[simp]
lemma left_unitor_inv (X : C) : (λ_ X).inv = prod.lift (terminal.from X) (𝟙 _) := rfl
@[simp]
lemma right_unitor_hom (X : C) : (ρ_ X).hom = limits.prod.fst := rfl
@[simp]
lemma right_unitor_inv (X : C) : (ρ_ X).inv = prod.lift (𝟙 _) (terminal.from X) := rfl
-- We don't mark this as a simp lemma, even though in many particular
-- categories the right hand side will simplify significantly further.
-- For now, we'll plan to create specialised simp lemmas in each particular category.
lemma associator_hom (X Y Z : C) :
(α_ X Y Z).hom =
prod.lift
(limits.prod.fst ≫ limits.prod.fst)
(prod.lift (limits.prod.fst ≫ limits.prod.snd) limits.prod.snd) := rfl
end monoidal_of_has_finite_products
section
local attribute [tidy] tactic.case_bash
/-- A category with an initial object and binary coproducts has a natural monoidal structure. -/
def monoidal_of_has_finite_coproducts [has_initial C] [has_binary_coproducts C] : monoidal_category C :=
{ tensor_unit := ⊥_ C,
tensor_obj := λ X Y, X ⨿ Y,
tensor_hom := λ _ _ _ _ f g, limits.coprod.map f g,
associator := coprod.associator,
left_unitor := coprod.left_unitor,
right_unitor := coprod.right_unitor,
pentagon' := coprod.pentagon,
triangle' := coprod.triangle,
associator_naturality' := @coprod.associator_naturality _ _ _, }
end
section
local attribute [instance] monoidal_of_has_finite_coproducts
open monoidal_category
/--
The monoidal structure coming from finite coproducts is symmetric.
-/
@[simps]
def symmetric_of_has_finite_coproducts [has_initial C] [has_binary_coproducts C] :
symmetric_category C :=
{ braiding := limits.coprod.braiding,
braiding_naturality' := λ X X' Y Y' f g,
by { dsimp [tensor_hom], ext; simp, },
hexagon_forward' := λ X Y Z,
by ext; { dsimp [monoidal_of_has_finite_coproducts], simp; dsimp; simp, },
hexagon_reverse' := λ X Y Z,
by ext; { dsimp [monoidal_of_has_finite_coproducts], simp; dsimp; simp, },
symmetry' := λ X Y, by { dsimp, simp, refl, }, }
end
namespace monoidal_of_has_finite_coproducts
variables [has_initial C] [has_binary_coproducts C]
local attribute [instance] monoidal_of_has_finite_coproducts
@[simp]
lemma tensor_obj (X Y : C) : X ⊗ Y = (X ⨿ Y) := rfl
@[simp]
lemma tensor_hom {W X Y Z : C} (f : W ⟶ X) (g : Y ⟶ Z) : f ⊗ g = limits.coprod.map f g := rfl
@[simp]
lemma left_unitor_hom (X : C) : (λ_ X).hom = coprod.desc (initial.to X) (𝟙 _) := rfl
@[simp]
lemma right_unitor_hom (X : C) : (ρ_ X).hom = coprod.desc (𝟙 _) (initial.to X) := rfl
@[simp]
lemma left_unitor_inv (X : C) : (λ_ X).inv = limits.coprod.inr := rfl
@[simp]
lemma right_unitor_inv (X : C) : (ρ_ X).inv = limits.coprod.inl := rfl
-- We don't mark this as a simp lemma, even though in many particular
-- categories the right hand side will simplify significantly further.
-- For now, we'll plan to create specialised simp lemmas in each particular category.
lemma associator_hom (X Y Z : C) :
(α_ X Y Z).hom =
coprod.desc
(coprod.desc coprod.inl (coprod.inl ≫ coprod.inr))
(coprod.inr ≫ coprod.inr) := rfl
end monoidal_of_has_finite_coproducts
end category_theory
|
7fb20117684a32af3a42f0a748757835b5eb7615
|
80cc5bf14c8ea85ff340d1d747a127dcadeb966f
|
/src/topology/algebra/module.lean
|
0d63cffad8e1ae47e939d8deca39993faa330c60
|
[
"Apache-2.0"
] |
permissive
|
lacker/mathlib
|
f2439c743c4f8eb413ec589430c82d0f73b2d539
|
ddf7563ac69d42cfa4a1bfe41db1fed521bd795f
|
refs/heads/master
| 1,671,948,326,773
| 1,601,479,268,000
| 1,601,479,268,000
| 298,686,743
| 0
| 0
|
Apache-2.0
| 1,601,070,794,000
| 1,601,070,794,000
| null |
UTF-8
|
Lean
| false
| false
| 47,608
|
lean
|
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jan-David Salchow, Sébastien Gouëzel, Jean Lo, Yury Kudryashov
-/
import topology.algebra.ring
import topology.uniform_space.uniform_embedding
import ring_theory.algebra
import linear_algebra.projection
/-!
# Theory of topological modules and continuous linear maps.
We define classes `topological_semimodule`, `topological_module` and `topological_vector_spaces`,
as extensions of the corresponding algebraic classes where the algebraic operations are continuous.
We also define continuous linear maps, as linear maps between topological modules which are
continuous. The set of continuous linear maps between the topological `R`-modules `M` and `M₂` is
denoted by `M →L[R] M₂`.
Continuous linear equivalences are denoted by `M ≃L[R] M₂`.
## Implementation notes
Topological vector spaces are defined as an `abbreviation` for topological modules,
if the base ring is a field. This has as advantage that topological vector spaces are completely
transparent for type class inference, which means that all instances for topological modules
are immediately picked up for vector spaces as well.
A cosmetic disadvantage is that one can not extend topological vector spaces.
The solution is to extend `topological_module` instead.
-/
open filter
open_locale topological_space big_operators
universes u v w u'
/-- A topological semimodule, over a semiring which is also a topological space, is a
semimodule in which scalar multiplication is continuous. In applications, R will be a topological
semiring and M a topological additive semigroup, but this is not needed for the definition -/
class topological_semimodule (R : Type u) (M : Type v)
[semiring R] [topological_space R]
[topological_space M] [add_comm_monoid M]
[semimodule R M] : Prop :=
(continuous_smul : continuous (λp : R × M, p.1 • p.2))
section
variables {R : Type u} {M : Type v}
[semiring R] [topological_space R]
[topological_space M] [add_comm_monoid M]
[semimodule R M] [topological_semimodule R M]
lemma continuous_smul : continuous (λp:R×M, p.1 • p.2) :=
topological_semimodule.continuous_smul
@[continuity]
lemma continuous.smul {α : Type*} [topological_space α] {f : α → R} {g : α → M}
(hf : continuous f) (hg : continuous g) : continuous (λp, f p • g p) :=
continuous_smul.comp (hf.prod_mk hg)
lemma tendsto_smul {c : R} {x : M} : tendsto (λp:R×M, p.fst • p.snd) (𝓝 (c, x)) (𝓝 (c • x)) :=
continuous_smul.tendsto _
lemma filter.tendsto.smul {α : Type*} {l : filter α} {f : α → R} {g : α → M} {c : R} {x : M}
(hf : tendsto f l (𝓝 c)) (hg : tendsto g l (𝓝 x)) : tendsto (λ a, f a • g a) l (𝓝 (c • x)) :=
tendsto_smul.comp (hf.prod_mk_nhds hg)
end
instance topological_semiring.to_semimodule {R : Type*} [topological_space R]
[semiring R] [topological_semiring R] :
topological_semimodule R R :=
{ continuous_smul := continuous_mul }
/-- A topological module, over a ring which is also a topological space, is a module in which
scalar multiplication is continuous. In applications, `R` will be a topological ring and `M` a
topological additive group, but this is not needed for the definition -/
abbreviation topological_module (R : Type u) (M : Type v)
[ring R] [topological_space R]
[topological_space M] [add_comm_group M] [module R M] :=
topological_semimodule R M
/-- A topological vector space is a topological module over a field. -/
abbreviation topological_vector_space (R : Type u) (M : Type v)
[field R] [topological_space R]
[topological_space M] [add_comm_group M] [module R M] :=
topological_module R M
section
variables {R : Type*} {M : Type*}
[ring R] [topological_space R]
[topological_space M] [add_comm_group M]
[module R M] [topological_module R M]
/-- Scalar multiplication by a unit is a homeomorphism from a
topological module onto itself. -/
protected def homeomorph.smul_of_unit (a : units R) : M ≃ₜ M :=
{ to_fun := λ x, (a : R) • x,
inv_fun := λ x, ((a⁻¹ : units R) : R) • x,
right_inv := λ x, calc (a : R) • ((a⁻¹ : units R) : R) • x = x :
by rw [smul_smul, units.mul_inv, one_smul],
left_inv := λ x, calc ((a⁻¹ : units R) : R) • (a : R) • x = x :
by rw [smul_smul, units.inv_mul, one_smul],
continuous_to_fun := continuous_const.smul continuous_id,
continuous_inv_fun := continuous_const.smul continuous_id }
lemma is_open_map_smul_of_unit (a : units R) : is_open_map (λ (x : M), (a : R) • x) :=
(homeomorph.smul_of_unit a).is_open_map
lemma is_closed_map_smul_of_unit (a : units R) : is_closed_map (λ (x : M), (a : R) • x) :=
(homeomorph.smul_of_unit a).is_closed_map
/-- If `M` is a topological module over `R` and `0` is a limit of invertible elements of `R`, then
`⊤` is the only submodule of `M` with a nonempty interior.
This is the case, e.g., if `R` is a nondiscrete normed field. -/
lemma submodule.eq_top_of_nonempty_interior' [has_continuous_add M]
[ne_bot (𝓝[{x : R | is_unit x}] 0)]
(s : submodule R M) (hs : (interior (s:set M)).nonempty) :
s = ⊤ :=
begin
rcases hs with ⟨y, hy⟩,
refine (submodule.eq_top_iff'.2 $ λ x, _),
rw [mem_interior_iff_mem_nhds] at hy,
have : tendsto (λ c:R, y + c • x) (𝓝[{x : R | is_unit x}] 0) (𝓝 (y + (0:R) • x)),
from tendsto_const_nhds.add ((tendsto_nhds_within_of_tendsto_nhds tendsto_id).smul
tendsto_const_nhds),
rw [zero_smul, add_zero] at this,
rcases nonempty_of_mem_sets (inter_mem_sets (mem_map.1 (this hy)) self_mem_nhds_within)
with ⟨_, hu, u, rfl⟩,
have hy' : y ∈ ↑s := mem_of_nhds hy,
exact (s.smul_mem_iff' _).1 ((s.add_mem_iff_right hy').1 hu)
end
end
section
variables {R : Type*} {M : Type*} {a : R}
[field R] [topological_space R]
[topological_space M] [add_comm_group M]
[vector_space R M] [topological_vector_space R M]
/-- Scalar multiplication by a non-zero field element is a
homeomorphism from a topological vector space onto itself. -/
protected def homeomorph.smul_of_ne_zero (ha : a ≠ 0) : M ≃ₜ M :=
{.. homeomorph.smul_of_unit (units.mk0 a ha)}
lemma is_open_map_smul_of_ne_zero (ha : a ≠ 0) : is_open_map (λ (x : M), a • x) :=
(homeomorph.smul_of_ne_zero ha).is_open_map
lemma is_closed_map_smul_of_ne_zero (ha : a ≠ 0) : is_closed_map (λ (x : M), a • x) :=
(homeomorph.smul_of_ne_zero ha).is_closed_map
end
/-- Continuous linear maps between modules. We only put the type classes that are necessary for the
definition, although in applications `M` and `M₂` will be topological modules over the topological
ring `R`. -/
structure continuous_linear_map
(R : Type*) [semiring R]
(M : Type*) [topological_space M] [add_comm_monoid M]
(M₂ : Type*) [topological_space M₂] [add_comm_monoid M₂]
[semimodule R M] [semimodule R M₂]
extends linear_map R M M₂ :=
(cont : continuous to_fun . tactic.interactive.continuity')
notation M ` →L[`:25 R `] ` M₂ := continuous_linear_map R M M₂
/-- Continuous linear equivalences between modules. We only put the type classes that are necessary
for the definition, although in applications `M` and `M₂` will be topological modules over the
topological ring `R`. -/
@[nolint has_inhabited_instance]
structure continuous_linear_equiv
(R : Type*) [semiring R]
(M : Type*) [topological_space M] [add_comm_monoid M]
(M₂ : Type*) [topological_space M₂] [add_comm_monoid M₂]
[semimodule R M] [semimodule R M₂]
extends linear_equiv R M M₂ :=
(continuous_to_fun : continuous to_fun . tactic.interactive.continuity')
(continuous_inv_fun : continuous inv_fun . tactic.interactive.continuity')
notation M ` ≃L[`:50 R `] ` M₂ := continuous_linear_equiv R M M₂
namespace continuous_linear_map
section semiring
/- Properties that hold for non-necessarily commutative semirings. -/
variables
{R : Type*} [semiring R]
{M : Type*} [topological_space M] [add_comm_monoid M]
{M₂ : Type*} [topological_space M₂] [add_comm_monoid M₂]
{M₃ : Type*} [topological_space M₃] [add_comm_monoid M₃]
{M₄ : Type*} [topological_space M₄] [add_comm_monoid M₄]
[semimodule R M] [semimodule R M₂] [semimodule R M₃] [semimodule R M₄]
/-- Coerce continuous linear maps to linear maps. -/
instance : has_coe (M →L[R] M₂) (M →ₗ[R] M₂) := ⟨to_linear_map⟩
/-- Coerce continuous linear maps to functions. -/
-- see Note [function coercion]
instance to_fun : has_coe_to_fun $ M →L[R] M₂ := ⟨λ _, M → M₂, λ f, f⟩
@[simp] lemma coe_mk (f : M →ₗ[R] M₂) (h) : (mk f h : M →ₗ[R] M₂) = f := rfl
@[simp] lemma coe_mk' (f : M →ₗ[R] M₂) (h) : (mk f h : M → M₂) = f := rfl
@[continuity]
protected lemma continuous (f : M →L[R] M₂) : continuous f := f.2
theorem coe_injective : function.injective (coe : (M →L[R] M₂) → (M →ₗ[R] M₂)) :=
by { intros f g H, cases f, cases g, congr' 1, exact H }
theorem coe_inj ⦃f g : M →L[R] M₂⦄ (H : (f : M → M₂) = g) : f = g :=
coe_injective $ linear_map.coe_inj H
@[ext] theorem ext {f g : M →L[R] M₂} (h : ∀ x, f x = g x) : f = g :=
coe_inj $ funext h
theorem ext_iff {f g : M →L[R] M₂} : f = g ↔ ∀ x, f x = g x :=
⟨λ h x, by rw h, by ext⟩
variables (c : R) (f g : M →L[R] M₂) (h : M₂ →L[R] M₃) (x y z : M)
-- make some straightforward lemmas available to `simp`.
@[simp] lemma map_zero : f (0 : M) = 0 := (to_linear_map _).map_zero
@[simp] lemma map_add : f (x + y) = f x + f y := (to_linear_map _).map_add _ _
@[simp] lemma map_smul : f (c • x) = c • f x := (to_linear_map _).map_smul _ _
lemma map_sum {ι : Type*} (s : finset ι) (g : ι → M) :
f (∑ i in s, g i) = ∑ i in s, f (g i) := f.to_linear_map.map_sum
@[simp, norm_cast] lemma coe_coe : ((f : M →ₗ[R] M₂) : (M → M₂)) = (f : M → M₂) := rfl
/-- The continuous map that is constantly zero. -/
instance: has_zero (M →L[R] M₂) := ⟨⟨0, continuous_const⟩⟩
instance : inhabited (M →L[R] M₂) := ⟨0⟩
@[simp] lemma zero_apply : (0 : M →L[R] M₂) x = 0 := rfl
@[simp, norm_cast] lemma coe_zero : ((0 : M →L[R] M₂) : M →ₗ[R] M₂) = 0 := rfl
/- no simp attribute on the next line as simp does not always simplify `0 x` to `0`
when `0` is the zero function, while it does for the zero continuous linear map,
and this is the most important property we care about. -/
@[norm_cast] lemma coe_zero' : ((0 : M →L[R] M₂) : M → M₂) = 0 := rfl
section
variables (R M)
/-- the identity map as a continuous linear map. -/
def id : M →L[R] M :=
⟨linear_map.id, continuous_id⟩
end
instance : has_one (M →L[R] M) := ⟨id R M⟩
lemma id_apply : id R M x = x := rfl
@[simp, norm_cast] lemma coe_id : (id R M : M →ₗ[R] M) = linear_map.id := rfl
@[simp, norm_cast] lemma coe_id' : (id R M : M → M) = _root_.id := rfl
@[simp] lemma one_apply : (1 : M →L[R] M) x = x := rfl
section add
variables [has_continuous_add M₂]
instance : has_add (M →L[R] M₂) :=
⟨λ f g, ⟨f + g, f.2.add g.2⟩⟩
@[simp] lemma add_apply : (f + g) x = f x + g x := rfl
@[simp, norm_cast] lemma coe_add : (((f + g) : M →L[R] M₂) : M →ₗ[R] M₂) = (f : M →ₗ[R] M₂) + g := rfl
@[norm_cast] lemma coe_add' : (((f + g) : M →L[R] M₂) : M → M₂) = (f : M → M₂) + g := rfl
instance : add_comm_monoid (M →L[R] M₂) :=
by { refine {zero := 0, add := (+), ..}; intros; ext;
apply_rules [zero_add, add_assoc, add_zero, add_left_neg, add_comm] }
lemma sum_apply {ι : Type*} (t : finset ι) (f : ι → M →L[R] M₂) (b : M) :
(∑ d in t, f d) b = ∑ d in t, f d b :=
begin
haveI : is_add_monoid_hom (λ (g : M →L[R] M₂), g b) :=
{ map_add := λ f g, continuous_linear_map.add_apply f g b, map_zero := by simp },
exact (finset.sum_hom t (λ g : M →L[R] M₂, g b)).symm
end
end add
/-- Composition of bounded linear maps. -/
def comp (g : M₂ →L[R] M₃) (f : M →L[R] M₂) : M →L[R] M₃ :=
⟨linear_map.comp g.to_linear_map f.to_linear_map, g.2.comp f.2⟩
@[simp, norm_cast] lemma coe_comp : ((h.comp f) : (M →ₗ[R] M₃)) = (h : M₂ →ₗ[R] M₃).comp f := rfl
@[simp, norm_cast] lemma coe_comp' : ((h.comp f) : (M → M₃)) = (h : M₂ → M₃) ∘ f := rfl
@[simp] theorem comp_id : f.comp (id R M) = f :=
ext $ λ x, rfl
@[simp] theorem id_comp : (id R M₂).comp f = f :=
ext $ λ x, rfl
@[simp] theorem comp_zero : f.comp (0 : M₃ →L[R] M) = 0 :=
by { ext, simp }
@[simp] theorem zero_comp : (0 : M₂ →L[R] M₃).comp f = 0 :=
by { ext, simp }
@[simp] lemma comp_add [has_continuous_add M₂] [has_continuous_add M₃]
(g : M₂ →L[R] M₃) (f₁ f₂ : M →L[R] M₂) :
g.comp (f₁ + f₂) = g.comp f₁ + g.comp f₂ :=
by { ext, simp }
@[simp] lemma add_comp [has_continuous_add M₃]
(g₁ g₂ : M₂ →L[R] M₃) (f : M →L[R] M₂) :
(g₁ + g₂).comp f = g₁.comp f + g₂.comp f :=
by { ext, simp }
theorem comp_assoc (h : M₃ →L[R] M₄) (g : M₂ →L[R] M₃) (f : M →L[R] M₂) :
(h.comp g).comp f = h.comp (g.comp f) :=
rfl
instance : has_mul (M →L[R] M) := ⟨comp⟩
lemma mul_def (f g : M →L[R] M) : f * g = f.comp g := rfl
@[simp] lemma coe_mul (f g : M →L[R] M) : ⇑(f * g) = f ∘ g := rfl
lemma mul_apply (f g : M →L[R] M) (x : M) : (f * g) x = f (g x) := rfl
/-- The cartesian product of two bounded linear maps, as a bounded linear map. -/
protected def prod (f₁ : M →L[R] M₂) (f₂ : M →L[R] M₃) : M →L[R] (M₂ × M₃) :=
{ cont := f₁.2.prod_mk f₂.2,
..f₁.to_linear_map.prod f₂.to_linear_map }
@[simp, norm_cast] lemma coe_prod (f₁ : M →L[R] M₂) (f₂ : M →L[R] M₃) :
(f₁.prod f₂ : M →ₗ[R] M₂ × M₃) = linear_map.prod f₁ f₂ :=
rfl
@[simp, norm_cast] lemma prod_apply (f₁ : M →L[R] M₂) (f₂ : M →L[R] M₃) (x : M) :
f₁.prod f₂ x = (f₁ x, f₂ x) :=
rfl
/-- Kernel of a continuous linear map. -/
def ker (f : M →L[R] M₂) : submodule R M := (f : M →ₗ[R] M₂).ker
@[norm_cast] lemma ker_coe : (f : M →ₗ[R] M₂).ker = f.ker := rfl
@[simp] lemma mem_ker {f : M →L[R] M₂} {x} : x ∈ f.ker ↔ f x = 0 := linear_map.mem_ker
lemma is_closed_ker [t1_space M₂] : is_closed (f.ker : set M) :=
continuous_iff_is_closed.1 f.cont _ is_closed_singleton
@[simp] lemma apply_ker (x : f.ker) : f x = 0 := mem_ker.1 x.2
lemma is_complete_ker {M' : Type*} [uniform_space M'] [complete_space M'] [add_comm_monoid M']
[semimodule R M'] [t1_space M₂] (f : M' →L[R] M₂) :
is_complete (f.ker : set M') :=
f.is_closed_ker.is_complete
instance complete_space_ker {M' : Type*} [uniform_space M'] [complete_space M'] [add_comm_monoid M']
[semimodule R M'] [t1_space M₂] (f : M' →L[R] M₂) :
complete_space f.ker :=
f.is_closed_ker.complete_space_coe
@[simp] lemma ker_prod (f : M →L[R] M₂) (g : M →L[R] M₃) :
ker (f.prod g) = ker f ⊓ ker g :=
linear_map.ker_prod f g
/-- Range of a continuous linear map. -/
def range (f : M →L[R] M₂) : submodule R M₂ := (f : M →ₗ[R] M₂).range
lemma range_coe : (f.range : set M₂) = set.range f := linear_map.range_coe _
lemma mem_range {f : M →L[R] M₂} {y} : y ∈ f.range ↔ ∃ x, f x = y := linear_map.mem_range
lemma range_prod_le (f : M →L[R] M₂) (g : M →L[R] M₃) :
range (f.prod g) ≤ (range f).prod (range g) :=
(f : M →ₗ[R] M₂).range_prod_le g
/-- Restrict codomain of a continuous linear map. -/
def cod_restrict (f : M →L[R] M₂) (p : submodule R M₂) (h : ∀ x, f x ∈ p) :
M →L[R] p :=
{ cont := continuous_subtype_mk h f.continuous,
to_linear_map := (f : M →ₗ[R] M₂).cod_restrict p h}
@[norm_cast] lemma coe_cod_restrict (f : M →L[R] M₂) (p : submodule R M₂) (h : ∀ x, f x ∈ p) :
(f.cod_restrict p h : M →ₗ[R] p) = (f : M →ₗ[R] M₂).cod_restrict p h :=
rfl
@[simp] lemma coe_cod_restrict_apply (f : M →L[R] M₂) (p : submodule R M₂) (h : ∀ x, f x ∈ p) (x) :
(f.cod_restrict p h x : M₂) = f x :=
rfl
@[simp] lemma ker_cod_restrict (f : M →L[R] M₂) (p : submodule R M₂) (h : ∀ x, f x ∈ p) :
ker (f.cod_restrict p h) = ker f :=
(f : M →ₗ[R] M₂).ker_cod_restrict p h
/-- Embedding of a submodule into the ambient space as a continuous linear map. -/
def subtype_val (p : submodule R M) : p →L[R] M :=
{ cont := continuous_subtype_val,
to_linear_map := p.subtype }
@[simp, norm_cast] lemma coe_subtype_val (p : submodule R M) :
(subtype_val p : p →ₗ[R] M) = p.subtype :=
rfl
@[simp, norm_cast] lemma subtype_val_apply (p : submodule R M) (x : p) :
(subtype_val p : p → M) x = x :=
rfl
variables (R M M₂)
/-- `prod.fst` as a `continuous_linear_map`. -/
def fst : M × M₂ →L[R] M :=
{ cont := continuous_fst, to_linear_map := linear_map.fst R M M₂ }
/-- `prod.snd` as a `continuous_linear_map`. -/
def snd : M × M₂ →L[R] M₂ :=
{ cont := continuous_snd, to_linear_map := linear_map.snd R M M₂ }
variables {R M M₂}
@[simp, norm_cast] lemma coe_fst : (fst R M M₂ : M × M₂ →ₗ[R] M) = linear_map.fst R M M₂ := rfl
@[simp, norm_cast] lemma coe_fst' : (fst R M M₂ : M × M₂ → M) = prod.fst := rfl
@[simp, norm_cast] lemma coe_snd : (snd R M M₂ : M × M₂ →ₗ[R] M₂) = linear_map.snd R M M₂ := rfl
@[simp, norm_cast] lemma coe_snd' : (snd R M M₂ : M × M₂ → M₂) = prod.snd := rfl
@[simp] lemma fst_prod_snd : (fst R M M₂).prod (snd R M M₂) = id R (M × M₂) := ext $ λ ⟨x, y⟩, rfl
/-- `prod.map` of two continuous linear maps. -/
def prod_map (f₁ : M →L[R] M₂) (f₂ : M₃ →L[R] M₄) : (M × M₃) →L[R] (M₂ × M₄) :=
(f₁.comp (fst R M M₃)).prod (f₂.comp (snd R M M₃))
@[simp, norm_cast] lemma coe_prod_map (f₁ : M →L[R] M₂) (f₂ : M₃ →L[R] M₄) :
(f₁.prod_map f₂ : (M × M₃) →ₗ[R] (M₂ × M₄)) = ((f₁ : M →ₗ[R] M₂).prod_map (f₂ : M₃ →ₗ[R] M₄)) :=
rfl
@[simp, norm_cast] lemma coe_prod_map' (f₁ : M →L[R] M₂) (f₂ : M₃ →L[R] M₄) :
⇑(f₁.prod_map f₂) = prod.map f₁ f₂ :=
rfl
/-- The continuous linear map given by `(x, y) ↦ f₁ x + f₂ y`. -/
def coprod [has_continuous_add M₃] (f₁ : M →L[R] M₃) (f₂ : M₂ →L[R] M₃) :
(M × M₂) →L[R] M₃ :=
⟨linear_map.coprod f₁ f₂, (f₁.cont.comp continuous_fst).add (f₂.cont.comp continuous_snd)⟩
@[norm_cast, simp] lemma coe_coprod [has_continuous_add M₃]
(f₁ : M →L[R] M₃) (f₂ : M₂ →L[R] M₃) :
(f₁.coprod f₂ : (M × M₂) →ₗ[R] M₃) = linear_map.coprod f₁ f₂ :=
rfl
@[simp] lemma coprod_apply [has_continuous_add M₃] (f₁ : M →L[R] M₃) (f₂ : M₂ →L[R] M₃) (x) :
f₁.coprod f₂ x = f₁ x.1 + f₂ x.2 := rfl
variables [topological_space R] [topological_semimodule R M₂]
/-- The linear map `λ x, c x • f`. Associates to a scalar-valued linear map and an element of
`M₂` the `M₂`-valued linear map obtained by multiplying the two (a.k.a. tensoring by `M₂`).
See also `continuous_linear_map.smul_rightₗ` and `continuous_linear_map.smul_rightL`. -/
def smul_right (c : M →L[R] R) (f : M₂) : M →L[R] M₂ :=
{ cont := c.2.smul continuous_const,
..c.to_linear_map.smul_right f }
@[simp]
lemma smul_right_apply {c : M →L[R] R} {f : M₂} {x : M} :
(smul_right c f : M → M₂) x = (c : M → R) x • f :=
rfl
@[simp]
lemma smul_right_one_one (c : R →L[R] M₂) : smul_right 1 ((c : R → M₂) 1) = c :=
by ext; simp [-continuous_linear_map.map_smul, (continuous_linear_map.map_smul _ _ _).symm]
@[simp]
lemma smul_right_one_eq_iff {f f' : M₂} :
smul_right (1 : R →L[R] R) f = smul_right 1 f' ↔ f = f' :=
⟨λ h, have (smul_right (1 : R →L[R] R) f : R → M₂) 1 = (smul_right (1 : R →L[R] R) f' : R → M₂) 1,
by rw h,
by simp at this; assumption,
by cc⟩
lemma smul_right_comp [topological_semimodule R R] {x : M₂} {c : R} :
(smul_right 1 x : R →L[R] M₂).comp (smul_right 1 c : R →L[R] R) = smul_right 1 (c • x) :=
by { ext, simp [mul_smul] }
end semiring
section pi
variables
{R : Type*} [semiring R]
{M : Type*} [topological_space M] [add_comm_monoid M] [semimodule R M]
{M₂ : Type*} [topological_space M₂] [add_comm_monoid M₂] [semimodule R M₂]
{ι : Type*} {φ : ι → Type*} [∀i, topological_space (φ i)] [∀i, add_comm_monoid (φ i)] [∀i, semimodule R (φ i)]
/-- `pi` construction for continuous linear functions. From a family of continuous linear functions
it produces a continuous linear function into a family of topological modules. -/
def pi (f : Πi, M →L[R] φ i) : M →L[R] (Πi, φ i) :=
⟨linear_map.pi (λ i, (f i : M →ₗ[R] φ i)),
continuous_pi (λ i, (f i).continuous)⟩
@[simp] lemma pi_apply (f : Πi, M →L[R] φ i) (c : M) (i : ι) :
pi f c i = f i c := rfl
lemma pi_eq_zero (f : Πi, M →L[R] φ i) : pi f = 0 ↔ (∀i, f i = 0) :=
by simp only [ext_iff, pi_apply, function.funext_iff]; exact ⟨λh a b, h b a, λh a b, h b a⟩
lemma pi_zero : pi (λi, 0 : Πi, M →L[R] φ i) = 0 := by ext; refl
lemma pi_comp (f : Πi, M →L[R] φ i) (g : M₂ →L[R] M) : (pi f).comp g = pi (λi, (f i).comp g) := rfl
/-- The projections from a family of topological modules are continuous linear maps. -/
def proj (i : ι) : (Πi, φ i) →L[R] φ i :=
⟨linear_map.proj i, continuous_apply _⟩
@[simp] lemma proj_apply (i : ι) (b : Πi, φ i) : (proj i : (Πi, φ i) →L[R] φ i) b = b i := rfl
lemma proj_pi (f : Πi, M₂ →L[R] φ i) (i : ι) : (proj i).comp (pi f) = f i :=
ext $ assume c, rfl
lemma infi_ker_proj : (⨅i, ker (proj i) : submodule R (Πi, φ i)) = ⊥ :=
linear_map.infi_ker_proj
variables (R φ)
/-- If `I` and `J` are complementary index sets, the product of the kernels of the `J`th projections of
`φ` is linearly equivalent to the product over `I`. -/
def infi_ker_proj_equiv {I J : set ι} [decidable_pred (λi, i ∈ I)]
(hd : disjoint I J) (hu : set.univ ⊆ I ∪ J) :
(⨅i ∈ J, ker (proj i) : submodule R (Πi, φ i)) ≃L[R] (Πi:I, φ i) :=
⟨ linear_map.infi_ker_proj_equiv R φ hd hu,
continuous_pi (λ i, begin
have := @continuous_subtype_coe _ _ (λ x, x ∈ (⨅i ∈ J, ker (proj i) : submodule R (Πi, φ i))),
have := continuous.comp (by exact continuous_apply i) this,
exact this
end),
continuous_subtype_mk _ (continuous_pi (λ i, begin
dsimp, split_ifs; [apply continuous_apply, exact continuous_const]
end)) ⟩
end pi
section ring
variables
{R : Type*} [ring R]
{M : Type*} [topological_space M] [add_comm_group M]
{M₂ : Type*} [topological_space M₂] [add_comm_group M₂]
{M₃ : Type*} [topological_space M₃] [add_comm_group M₃]
{M₄ : Type*} [topological_space M₄] [add_comm_group M₄]
[semimodule R M] [semimodule R M₂] [semimodule R M₃] [semimodule R M₄]
variables (c : R) (f g : M →L[R] M₂) (h : M₂ →L[R] M₃) (x y z : M)
@[simp] lemma map_neg : f (-x) = - (f x) := (to_linear_map _).map_neg _
@[simp] lemma map_sub : f (x - y) = f x - f y := (to_linear_map _).map_sub _ _
@[simp] lemma sub_apply' (x : M) : ((f : M →ₗ[R] M₂) - g) x = f x - g x := rfl
lemma range_prod_eq {f : M →L[R] M₂} {g : M →L[R] M₃} (h : ker f ⊔ ker g = ⊤) :
range (f.prod g) = (range f).prod (range g) :=
linear_map.range_prod_eq h
section
variables [topological_add_group M₂]
instance : has_neg (M →L[R] M₂) := ⟨λ f, ⟨-f, f.2.neg⟩⟩
@[simp] lemma neg_apply : (-f) x = - (f x) := rfl
@[simp, norm_cast] lemma coe_neg : (((-f) : M →L[R] M₂) : M →ₗ[R] M₂) = -(f : M →ₗ[R] M₂) := rfl
@[norm_cast] lemma coe_neg' : (((-f) : M →L[R] M₂) : M → M₂) = -(f : M → M₂) := rfl
instance : add_comm_group (M →L[R] M₂) :=
by { refine {zero := 0, add := (+), neg := has_neg.neg, ..}; intros; ext;
apply_rules [zero_add, add_assoc, add_zero, add_left_neg, add_comm] }
lemma sub_apply (x : M) : (f - g) x = f x - g x := rfl
@[simp, norm_cast] lemma coe_sub : (((f - g) : M →L[R] M₂) : M →ₗ[R] M₂) = (f : M →ₗ[R] M₂) - g := rfl
@[simp, norm_cast] lemma coe_sub' : (((f - g) : M →L[R] M₂) : M → M₂) = (f : M → M₂) - g := rfl
end
instance [topological_add_group M] : ring (M →L[R] M) :=
{ mul := (*),
one := 1,
mul_one := λ _, ext $ λ _, rfl,
one_mul := λ _, ext $ λ _, rfl,
mul_assoc := λ _ _ _, ext $ λ _, rfl,
left_distrib := λ _ _ _, ext $ λ _, map_add _ _ _,
right_distrib := λ _ _ _, ext $ λ _, linear_map.add_apply _ _ _,
..continuous_linear_map.add_comm_group }
lemma smul_right_one_pow [topological_space R]
[topological_add_group R] [topological_semimodule R R] (c : R) (n : ℕ) :
(smul_right 1 c : R →L[R] R)^n = smul_right 1 (c^n) :=
begin
induction n with n ihn,
{ ext, simp },
{ rw [pow_succ, ihn, mul_def, smul_right_comp, smul_eq_mul, pow_succ'] }
end
/-- Given a right inverse `f₂ : M₂ →L[R] M` to `f₁ : M →L[R] M₂`,
`proj_ker_of_right_inverse f₁ f₂ h` is the projection `M →L[R] f₁.ker` along `f₂.range`. -/
def proj_ker_of_right_inverse [topological_add_group M] (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M)
(h : function.right_inverse f₂ f₁) :
M →L[R] f₁.ker :=
(id R M - f₂.comp f₁).cod_restrict f₁.ker $ λ x, by simp [h (f₁ x)]
@[simp] lemma coe_proj_ker_of_right_inverse_apply [topological_add_group M]
(f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M) (h : function.right_inverse f₂ f₁) (x : M) :
(f₁.proj_ker_of_right_inverse f₂ h x : M) = x - f₂ (f₁ x) :=
rfl
@[simp] lemma proj_ker_of_right_inverse_apply_idem [topological_add_group M]
(f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M) (h : function.right_inverse f₂ f₁) (x : f₁.ker) :
f₁.proj_ker_of_right_inverse f₂ h x = x :=
subtype.ext_iff_val.2 $ by simp
@[simp] lemma proj_ker_of_right_inverse_comp_inv [topological_add_group M]
(f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M) (h : function.right_inverse f₂ f₁) (y : M₂) :
f₁.proj_ker_of_right_inverse f₂ h (f₂ y) = 0 :=
subtype.ext_iff_val.2 $ by simp [h y]
end ring
section comm_ring
variables
{R : Type*} [comm_ring R] [topological_space R]
{M : Type*} [topological_space M] [add_comm_group M]
{M₂ : Type*} [topological_space M₂] [add_comm_group M₂]
{M₃ : Type*} [topological_space M₃] [add_comm_group M₃]
[module R M] [module R M₂] [module R M₃] [topological_module R M₃]
instance : has_scalar R (M →L[R] M₃) :=
⟨λ c f, ⟨c • f, continuous_const.smul f.2⟩⟩
variables (c : R) (h : M₂ →L[R] M₃) (f g : M →L[R] M₂) (x y z : M)
@[simp] lemma smul_comp : (c • h).comp f = c • (h.comp f) := rfl
variable [topological_module R M₂]
@[simp] lemma smul_apply : (c • f) x = c • (f x) := rfl
@[simp, norm_cast] lemma coe_apply : (((c • f) : M →L[R] M₂) : M →ₗ[R] M₂) = c • (f : M →ₗ[R] M₂) := rfl
@[norm_cast] lemma coe_apply' : (((c • f) : M →L[R] M₂) : M → M₂) = c • (f : M → M₂) := rfl
@[simp] lemma comp_smul : h.comp (c • f) = c • (h.comp f) := by { ext, simp }
variable [topological_add_group M₂]
instance : module R (M →L[R] M₂) :=
{ smul_zero := λ _, ext $ λ _, smul_zero _,
zero_smul := λ _, ext $ λ _, zero_smul _ _,
one_smul := λ _, ext $ λ _, one_smul _ _,
mul_smul := λ _ _ _, ext $ λ _, mul_smul _ _ _,
add_smul := λ _ _ _, ext $ λ _, add_smul _ _ _,
smul_add := λ _ _ _, ext $ λ _, smul_add _ _ _ }
instance : algebra R (M₂ →L[R] M₂) :=
algebra.of_semimodule' (λ c f, ext $ λ x, rfl) (λ c f, ext $ λ x, f.map_smul c x)
/-- Given `c : E →L[𝕜] 𝕜`, `c.smul_rightₗ` is the linear map from `F` to `E →L[𝕜] F`
sending `f` to `λ e, c e • f`. See also `continuous_linear_map.smul_rightL`. -/
def smul_rightₗ (c : M →L[R] R) : M₂ →ₗ[R] (M →L[R] M₂) :=
{ to_fun := c.smul_right,
map_add' := λ x y, by { ext e, simp [smul_add] },
map_smul' := λ a x, by { ext e, simp [smul_comm] } }
end comm_ring
end continuous_linear_map
namespace continuous_linear_equiv
section add_comm_monoid
variables {R : Type*} [semiring R]
{M : Type*} [topological_space M] [add_comm_monoid M]
{M₂ : Type*} [topological_space M₂] [add_comm_monoid M₂]
{M₃ : Type*} [topological_space M₃] [add_comm_monoid M₃]
{M₄ : Type*} [topological_space M₄] [add_comm_monoid M₄]
[semimodule R M] [semimodule R M₂] [semimodule R M₃] [semimodule R M₄]
/-- A continuous linear equivalence induces a continuous linear map. -/
def to_continuous_linear_map (e : M ≃L[R] M₂) : M →L[R] M₂ :=
{ cont := e.continuous_to_fun,
..e.to_linear_equiv.to_linear_map }
/-- Coerce continuous linear equivs to continuous linear maps. -/
instance : has_coe (M ≃L[R] M₂) (M →L[R] M₂) := ⟨to_continuous_linear_map⟩
/-- Coerce continuous linear equivs to maps. -/
-- see Note [function coercion]
instance : has_coe_to_fun (M ≃L[R] M₂) := ⟨λ _, M → M₂, λ f, f⟩
@[simp] theorem coe_def_rev (e : M ≃L[R] M₂) : e.to_continuous_linear_map = e := rfl
@[simp] theorem coe_apply (e : M ≃L[R] M₂) (b : M) : (e : M →L[R] M₂) b = e b := rfl
@[norm_cast] lemma coe_coe (e : M ≃L[R] M₂) : ((e : M →L[R] M₂) : M → M₂) = e := rfl
@[ext] lemma ext {f g : M ≃L[R] M₂} (h : (f : M → M₂) = g) : f = g :=
begin
cases f; cases g,
simp only,
ext x,
induction h,
refl
end
/-- A continuous linear equivalence induces a homeomorphism. -/
def to_homeomorph (e : M ≃L[R] M₂) : M ≃ₜ M₂ := { ..e }
-- Make some straightforward lemmas available to `simp`.
@[simp] lemma map_zero (e : M ≃L[R] M₂) : e (0 : M) = 0 := (e : M →L[R] M₂).map_zero
@[simp] lemma map_add (e : M ≃L[R] M₂) (x y : M) : e (x + y) = e x + e y :=
(e : M →L[R] M₂).map_add x y
@[simp] lemma map_smul (e : M ≃L[R] M₂) (c : R) (x : M) : e (c • x) = c • (e x) :=
(e : M →L[R] M₂).map_smul c x
@[simp] lemma map_eq_zero_iff (e : M ≃L[R] M₂) {x : M} : e x = 0 ↔ x = 0 :=
e.to_linear_equiv.map_eq_zero_iff
attribute [continuity]
continuous_linear_equiv.continuous_to_fun continuous_linear_equiv.continuous_inv_fun
@[continuity]
protected lemma continuous (e : M ≃L[R] M₂) : continuous (e : M → M₂) :=
e.continuous_to_fun
protected lemma continuous_on (e : M ≃L[R] M₂) {s : set M} : continuous_on (e : M → M₂) s :=
e.continuous.continuous_on
protected lemma continuous_at (e : M ≃L[R] M₂) {x : M} : continuous_at (e : M → M₂) x :=
e.continuous.continuous_at
protected lemma continuous_within_at (e : M ≃L[R] M₂) {s : set M} {x : M} :
continuous_within_at (e : M → M₂) s x :=
e.continuous.continuous_within_at
lemma comp_continuous_on_iff
{α : Type*} [topological_space α] (e : M ≃L[R] M₂) (f : α → M) (s : set α) :
continuous_on (e ∘ f) s ↔ continuous_on f s :=
e.to_homeomorph.comp_continuous_on_iff _ _
lemma comp_continuous_iff
{α : Type*} [topological_space α] (e : M ≃L[R] M₂) (f : α → M) :
continuous (e ∘ f) ↔ continuous f :=
e.to_homeomorph.comp_continuous_iff _
/-- An extensionality lemma for `R ≃L[R] M`. -/
lemma ext₁ [topological_space R] {f g : R ≃L[R] M} (h : f 1 = g 1) : f = g :=
ext $ funext $ λ x, mul_one x ▸ by rw [← smul_eq_mul, map_smul, h, map_smul]
section
variables (R M)
/-- The identity map as a continuous linear equivalence. -/
@[refl] protected def refl : M ≃L[R] M :=
{ continuous_to_fun := continuous_id,
continuous_inv_fun := continuous_id,
.. linear_equiv.refl R M }
end
@[simp, norm_cast] lemma coe_refl :
(continuous_linear_equiv.refl R M : M →L[R] M) = continuous_linear_map.id R M := rfl
@[simp, norm_cast] lemma coe_refl' :
(continuous_linear_equiv.refl R M : M → M) = id := rfl
/-- The inverse of a continuous linear equivalence as a continuous linear equivalence-/
@[symm] protected def symm (e : M ≃L[R] M₂) : M₂ ≃L[R] M :=
{ continuous_to_fun := e.continuous_inv_fun,
continuous_inv_fun := e.continuous_to_fun,
.. e.to_linear_equiv.symm }
@[simp] lemma symm_to_linear_equiv (e : M ≃L[R] M₂) :
e.symm.to_linear_equiv = e.to_linear_equiv.symm :=
by { ext, refl }
/-- The composition of two continuous linear equivalences as a continuous linear equivalence. -/
@[trans] protected def trans (e₁ : M ≃L[R] M₂) (e₂ : M₂ ≃L[R] M₃) : M ≃L[R] M₃ :=
{ continuous_to_fun := e₂.continuous_to_fun.comp e₁.continuous_to_fun,
continuous_inv_fun := e₁.continuous_inv_fun.comp e₂.continuous_inv_fun,
.. e₁.to_linear_equiv.trans e₂.to_linear_equiv }
@[simp] lemma trans_to_linear_equiv (e₁ : M ≃L[R] M₂) (e₂ : M₂ ≃L[R] M₃) :
(e₁.trans e₂).to_linear_equiv = e₁.to_linear_equiv.trans e₂.to_linear_equiv :=
by { ext, refl }
/-- Product of two continuous linear equivalences. The map comes from `equiv.prod_congr`. -/
def prod (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) : (M × M₃) ≃L[R] (M₂ × M₄) :=
{ continuous_to_fun := e.continuous_to_fun.prod_map e'.continuous_to_fun,
continuous_inv_fun := e.continuous_inv_fun.prod_map e'.continuous_inv_fun,
.. e.to_linear_equiv.prod e'.to_linear_equiv }
@[simp, norm_cast] lemma prod_apply (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) (x) :
e.prod e' x = (e x.1, e' x.2) := rfl
@[simp, norm_cast] lemma coe_prod (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) :
(e.prod e' : (M × M₃) →L[R] (M₂ × M₄)) = (e : M →L[R] M₂).prod_map (e' : M₃ →L[R] M₄) :=
rfl
theorem bijective (e : M ≃L[R] M₂) : function.bijective e := e.to_linear_equiv.to_equiv.bijective
theorem injective (e : M ≃L[R] M₂) : function.injective e := e.to_linear_equiv.to_equiv.injective
theorem surjective (e : M ≃L[R] M₂) : function.surjective e := e.to_linear_equiv.to_equiv.surjective
@[simp] theorem trans_apply (e₁ : M ≃L[R] M₂) (e₂ : M₂ ≃L[R] M₃) (c : M) :
(e₁.trans e₂) c = e₂ (e₁ c) :=
rfl
@[simp] theorem apply_symm_apply (e : M ≃L[R] M₂) (c : M₂) : e (e.symm c) = c := e.1.6 c
@[simp] theorem symm_apply_apply (e : M ≃L[R] M₂) (b : M) : e.symm (e b) = b := e.1.5 b
@[simp] theorem symm_trans_apply (e₁ : M₂ ≃L[R] M) (e₂ : M₃ ≃L[R] M₂) (c : M) :
(e₂.trans e₁).symm c = e₂.symm (e₁.symm c) :=
rfl
@[simp, norm_cast]
lemma comp_coe (f : M ≃L[R] M₂) (f' : M₂ ≃L[R] M₃) :
(f' : M₂ →L[R] M₃).comp (f : M →L[R] M₂) = (f.trans f' : M →L[R] M₃) :=
rfl
@[simp] theorem coe_comp_coe_symm (e : M ≃L[R] M₂) :
(e : M →L[R] M₂).comp (e.symm : M₂ →L[R] M) = continuous_linear_map.id R M₂ :=
continuous_linear_map.ext e.apply_symm_apply
@[simp] theorem coe_symm_comp_coe (e : M ≃L[R] M₂) :
(e.symm : M₂ →L[R] M).comp (e : M →L[R] M₂) = continuous_linear_map.id R M :=
continuous_linear_map.ext e.symm_apply_apply
lemma symm_comp_self (e : M ≃L[R] M₂) :
(e.symm : M₂ → M) ∘ (e : M → M₂) = id :=
by{ ext x, exact symm_apply_apply e x }
lemma self_comp_symm (e : M ≃L[R] M₂) :
(e : M → M₂) ∘ (e.symm : M₂ → M) = id :=
by{ ext x, exact apply_symm_apply e x }
@[simp] lemma symm_comp_self' (e : M ≃L[R] M₂) :
((e.symm : M₂ →L[R] M) : M₂ → M) ∘ ((e : M →L[R] M₂) : M → M₂) = id :=
symm_comp_self e
@[simp] lemma self_comp_symm' (e : M ≃L[R] M₂) :
((e : M →L[R] M₂) : M → M₂) ∘ ((e.symm : M₂ →L[R] M) : M₂ → M) = id :=
self_comp_symm e
@[simp] theorem symm_symm (e : M ≃L[R] M₂) : e.symm.symm = e :=
by { ext x, refl }
@[simp] lemma refl_symm :
(continuous_linear_equiv.refl R M).symm = continuous_linear_equiv.refl R M :=
rfl
theorem symm_symm_apply (e : M ≃L[R] M₂) (x : M) : e.symm.symm x = e x :=
rfl
lemma symm_apply_eq (e : M ≃L[R] M₂) {x y} : e.symm x = y ↔ x = e y :=
e.to_linear_equiv.symm_apply_eq
lemma eq_symm_apply (e : M ≃L[R] M₂) {x y} : y = e.symm x ↔ e y = x :=
e.to_linear_equiv.eq_symm_apply
/-- Create a `continuous_linear_equiv` from two `continuous_linear_map`s that are
inverse of each other. -/
def equiv_of_inverse (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M) (h₁ : function.left_inverse f₂ f₁)
(h₂ : function.right_inverse f₂ f₁) :
M ≃L[R] M₂ :=
{ to_fun := f₁,
continuous_to_fun := f₁.continuous,
inv_fun := f₂,
continuous_inv_fun := f₂.continuous,
left_inv := h₁,
right_inv := h₂,
.. f₁ }
@[simp] lemma equiv_of_inverse_apply (f₁ : M →L[R] M₂) (f₂ h₁ h₂ x) :
equiv_of_inverse f₁ f₂ h₁ h₂ x = f₁ x :=
rfl
@[simp] lemma symm_equiv_of_inverse (f₁ : M →L[R] M₂) (f₂ h₁ h₂) :
(equiv_of_inverse f₁ f₂ h₁ h₂).symm = equiv_of_inverse f₂ f₁ h₂ h₁ :=
rfl
variable (M)
/-- The continuous linear equivalences from `M` to itself form a group under composition. -/
instance automorphism_group : group (M ≃L[R] M) :=
{ mul := λ f g, g.trans f,
one := continuous_linear_equiv.refl R M,
inv := λ f, f.symm,
mul_assoc := λ f g h, by {ext, refl},
mul_one := λ f, by {ext, refl},
one_mul := λ f, by {ext, refl},
mul_left_inv := λ f, by {ext, exact f.left_inv x} }
end add_comm_monoid
section add_comm_group
variables {R : Type*} [semiring R]
{M : Type*} [topological_space M] [add_comm_group M]
{M₂ : Type*} [topological_space M₂] [add_comm_group M₂]
{M₃ : Type*} [topological_space M₃] [add_comm_group M₃]
{M₄ : Type*} [topological_space M₄] [add_comm_group M₄]
[semimodule R M] [semimodule R M₂] [semimodule R M₃] [semimodule R M₄]
variables [topological_add_group M₄]
/-- Equivalence given by a block lower diagonal matrix. `e` and `e'` are diagonal square blocks,
and `f` is a rectangular block below the diagonal. -/
def skew_prod (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) (f : M →L[R] M₄) :
(M × M₃) ≃L[R] M₂ × M₄ :=
{ continuous_to_fun := (e.continuous_to_fun.comp continuous_fst).prod_mk
((e'.continuous_to_fun.comp continuous_snd).add $ f.continuous.comp continuous_fst),
continuous_inv_fun := (e.continuous_inv_fun.comp continuous_fst).prod_mk
(e'.continuous_inv_fun.comp $ continuous_snd.sub $ f.continuous.comp $
e.continuous_inv_fun.comp continuous_fst),
.. e.to_linear_equiv.skew_prod e'.to_linear_equiv ↑f }
@[simp] lemma skew_prod_apply (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) (f : M →L[R] M₄) (x) :
e.skew_prod e' f x = (e x.1, e' x.2 + f x.1) := rfl
@[simp] lemma skew_prod_symm_apply (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) (f : M →L[R] M₄) (x) :
(e.skew_prod e' f).symm x = (e.symm x.1, e'.symm (x.2 - f (e.symm x.1))) := rfl
end add_comm_group
section ring
variables {R : Type*} [ring R]
{M : Type*} [topological_space M] [add_comm_group M] [semimodule R M]
{M₂ : Type*} [topological_space M₂] [add_comm_group M₂] [semimodule R M₂]
@[simp] lemma map_sub (e : M ≃L[R] M₂) (x y : M) : e (x - y) = e x - e y :=
(e : M →L[R] M₂).map_sub x y
@[simp] lemma map_neg (e : M ≃L[R] M₂) (x : M) : e (-x) = -e x := (e : M →L[R] M₂).map_neg x
section
/-! The next theorems cover the identification between `M ≃L[𝕜] M`and the group of units of the ring
`M →L[R] M`. -/
variables [topological_add_group M]
/-- An invertible continuous linear map `f` determines a continuous equivalence from `M` to itself.
-/
def of_unit (f : units (M →L[R] M)) : (M ≃L[R] M) :=
{ to_linear_equiv :=
{ to_fun := f.val,
map_add' := by simp,
map_smul' := by simp,
inv_fun := f.inv,
left_inv := λ x, show (f.inv * f.val) x = x, by {rw f.inv_val, simp},
right_inv := λ x, show (f.val * f.inv) x = x, by {rw f.val_inv, simp}, },
continuous_to_fun := f.val.continuous,
continuous_inv_fun := f.inv.continuous }
/-- A continuous equivalence from `M` to itself determines an invertible continuous linear map. -/
def to_unit (f : (M ≃L[R] M)) : units (M →L[R] M) :=
{ val := f,
inv := f.symm,
val_inv := by {ext, simp},
inv_val := by {ext, simp} }
variables (R M)
/-- The units of the algebra of continuous `R`-linear endomorphisms of `M` is multiplicatively
equivalent to the type of continuous linear equivalences between `M` and itself. -/
def units_equiv : units (M →L[R] M) ≃* (M ≃L[R] M) :=
{ to_fun := of_unit,
inv_fun := to_unit,
left_inv := λ f, by {ext, refl},
right_inv := λ f, by {ext, refl},
map_mul' := λ x y, by {ext, refl} }
@[simp] lemma units_equiv_apply (f : units (M →L[R] M)) (x : M) :
(units_equiv R M f) x = f x := rfl
end
section
variables (R) [topological_space R] [topological_module R R]
/-- Continuous linear equivalences `R ≃L[R] R` are enumerated by `units R`. -/
def units_equiv_aut : units R ≃ (R ≃L[R] R) :=
{ to_fun := λ u, equiv_of_inverse
(continuous_linear_map.smul_right 1 ↑u)
(continuous_linear_map.smul_right 1 ↑u⁻¹)
(λ x, by simp) (λ x, by simp),
inv_fun := λ e, ⟨e 1, e.symm 1,
by rw [← smul_eq_mul, ← map_smul, smul_eq_mul, mul_one, symm_apply_apply],
by rw [← smul_eq_mul, ← map_smul, smul_eq_mul, mul_one, apply_symm_apply]⟩,
left_inv := λ u, units.ext $ by simp,
right_inv := λ e, ext₁ $ by simp }
variable {R}
@[simp] lemma units_equiv_aut_apply (u : units R) (x : R) : units_equiv_aut R u x = x * u := rfl
@[simp] lemma units_equiv_aut_apply_symm (u : units R) (x : R) :
(units_equiv_aut R u).symm x = x * ↑u⁻¹ := rfl
@[simp] lemma units_equiv_aut_symm_apply (e : R ≃L[R] R) :
↑((units_equiv_aut R).symm e) = e 1 :=
rfl
end
variables [topological_add_group M]
open continuous_linear_map (id fst snd subtype_val mem_ker)
/-- A pair of continuous linear maps such that `f₁ ∘ f₂ = id` generates a continuous
linear equivalence `e` between `M` and `M₂ × f₁.ker` such that `(e x).2 = x` for `x ∈ f₁.ker`,
`(e x).1 = f₁ x`, and `(e (f₂ y)).2 = 0`. The map is given by `e x = (f₁ x, x - f₂ (f₁ x))`. -/
def equiv_of_right_inverse (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M) (h : function.right_inverse f₂ f₁) :
M ≃L[R] M₂ × f₁.ker :=
equiv_of_inverse (f₁.prod (f₁.proj_ker_of_right_inverse f₂ h)) (f₂.coprod (subtype_val f₁.ker))
(λ x, by simp)
(λ ⟨x, y⟩, by simp [h x])
@[simp] lemma fst_equiv_of_right_inverse (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M)
(h : function.right_inverse f₂ f₁) (x : M) :
(equiv_of_right_inverse f₁ f₂ h x).1 = f₁ x := rfl
@[simp] lemma snd_equiv_of_right_inverse (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M)
(h : function.right_inverse f₂ f₁) (x : M) :
((equiv_of_right_inverse f₁ f₂ h x).2 : M) = x - f₂ (f₁ x) := rfl
@[simp] lemma equiv_of_right_inverse_symm_apply (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M)
(h : function.right_inverse f₂ f₁) (y : M₂ × f₁.ker) :
(equiv_of_right_inverse f₁ f₂ h).symm y = f₂ y.1 + y.2 := rfl
end ring
end continuous_linear_equiv
namespace continuous_linear_map
open_locale classical
variables {R : Type*} {M : Type*} {M₂ : Type*} [topological_space M] [topological_space M₂]
section
variables [semiring R]
variables [add_comm_monoid M₂] [semimodule R M₂]
variables [add_comm_monoid M] [semimodule R M]
/-- Introduce a function `inverse` from `M →L[R] M₂` to `M₂ →L[R] M`, which sends `f` to `f.symm` if
`f` is a continuous linear equivalence and to `0` otherwise. This definition is somewhat ad hoc,
but one needs a fully (rather than partially) defined inverse function for some purposes, including
for calculus. -/
noncomputable def inverse : (M →L[R] M₂) → (M₂ →L[R] M) :=
λ f, if h : ∃ (e : M ≃L[R] M₂), (e : M →L[R] M₂) = f then ((classical.some h).symm : M₂ →L[R] M) else 0
/-- By definition, if `f` is invertible then `inverse f = f.symm`. -/
@[simp] lemma inverse_equiv (e : M ≃L[R] M₂) : inverse (e : M →L[R] M₂) = e.symm :=
begin
have h : ∃ (e' : M ≃L[R] M₂), (e' : M →L[R] M₂) = ↑e := ⟨e, rfl⟩,
simp only [inverse, dif_pos h],
congr,
ext x,
have h' := classical.some_spec h,
simpa using continuous_linear_map.ext_iff.1 (h') x -- for some reason `h'` cannot be substituted here
end
/-- By definition, if `f` is not invertible then `inverse f = 0`. -/
@[simp] lemma inverse_non_equiv (f : M →L[R] M₂) (h : ¬∃ (e' : M ≃L[R] M₂), ↑e' = f) :
inverse f = 0 :=
dif_neg h
end
section
variables [ring R]
variables [add_comm_group M] [topological_add_group M] [module R M]
variables [add_comm_group M₂] [module R M₂]
@[simp] lemma ring_inverse_equiv (e : M ≃L[R] M) :
ring.inverse ↑e = inverse (e : M →L[R] M) :=
begin
suffices :
ring.inverse ((((continuous_linear_equiv.units_equiv _ _).symm e) : M →L[R] M)) = inverse ↑e,
{ convert this },
simp,
refl,
end
/-- The function `continuous_linear_equiv.inverse` can be written in terms of `ring.inverse` for the
ring of self-maps of the domain. -/
lemma to_ring_inverse (e : M ≃L[R] M₂) (f : M →L[R] M₂) :
inverse f = (ring.inverse ((e.symm : (M₂ →L[R] M)).comp f)).comp e.symm :=
begin
by_cases h₁ : ∃ (e' : M ≃L[R] M₂), ↑e' = f,
{ obtain ⟨e', he'⟩ := h₁,
rw ← he',
ext,
simp },
{ suffices : ¬is_unit ((e.symm : M₂ →L[R] M).comp f),
{ simp [this, h₁] },
revert h₁,
contrapose!,
rintros ⟨F, hF⟩,
use (continuous_linear_equiv.units_equiv _ _ F).trans e,
ext,
simp [hF] }
end
lemma ring_inverse_eq_map_inverse : ring.inverse = @inverse R M M _ _ _ _ _ _ _ :=
begin
ext,
simp [to_ring_inverse (continuous_linear_equiv.refl R M)],
end
end
end continuous_linear_map
namespace submodule
variables
{R : Type*} [ring R]
{M : Type*} [topological_space M] [add_comm_group M] [module R M]
{M₂ : Type*} [topological_space M₂] [add_comm_group M₂] [module R M₂]
open continuous_linear_map
/-- A submodule `p` is called *complemented* if there exists a continuous projection `M →ₗ[R] p`. -/
def closed_complemented (p : submodule R M) : Prop := ∃ f : M →L[R] p, ∀ x : p, f x = x
lemma closed_complemented.has_closed_complement {p : submodule R M} [t1_space p]
(h : closed_complemented p) :
∃ (q : submodule R M) (hq : is_closed (q : set M)), is_compl p q :=
exists.elim h $ λ f hf, ⟨f.ker, f.is_closed_ker, linear_map.is_compl_of_proj hf⟩
protected lemma closed_complemented.is_closed [topological_add_group M] [t1_space M]
{p : submodule R M} (h : closed_complemented p) :
is_closed (p : set M) :=
begin
rcases h with ⟨f, hf⟩,
have : ker (id R M - (subtype_val p).comp f) = p := linear_map.ker_id_sub_eq_of_proj hf,
exact this ▸ (is_closed_ker _)
end
@[simp] lemma closed_complemented_bot : closed_complemented (⊥ : submodule R M) :=
⟨0, λ x, by simp only [zero_apply, eq_zero_of_bot_submodule x]⟩
@[simp] lemma closed_complemented_top : closed_complemented (⊤ : submodule R M) :=
⟨(id R M).cod_restrict ⊤ (λ x, trivial), λ x, subtype.ext_iff_val.2 $ by simp⟩
end submodule
lemma continuous_linear_map.closed_complemented_ker_of_right_inverse {R : Type*} [ring R]
{M : Type*} [topological_space M] [add_comm_group M]
{M₂ : Type*} [topological_space M₂] [add_comm_group M₂] [module R M] [module R M₂]
[topological_add_group M] (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M)
(h : function.right_inverse f₂ f₁) :
f₁.ker.closed_complemented :=
⟨f₁.proj_ker_of_right_inverse f₂ h, f₁.proj_ker_of_right_inverse_apply_idem f₂ h⟩
|
92c4f5f7cf2130b0880c2080c7565d84e60b5a84
|
acc85b4be2c618b11fc7cb3005521ae6858a8d07
|
/tactic/rcases.lean
|
31f5f5c16977887903f76f185e837a454618c9db
|
[
"Apache-2.0"
] |
permissive
|
linpingchuan/mathlib
|
d49990b236574df2a45d9919ba43c923f693d341
|
5ad8020f67eb13896a41cc7691d072c9331b1f76
|
refs/heads/master
| 1,626,019,377,808
| 1,508,048,784,000
| 1,508,048,784,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,404
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import data.dlist
open lean lean.parser
namespace tactic
inductive rcases_patt : Type
| one : name → rcases_patt
| many : list (list rcases_patt) → rcases_patt
#print instances has_reflect
instance rcases_patt.inhabited : inhabited rcases_patt :=
⟨rcases_patt.one `_⟩
def rcases_patt.name : rcases_patt → name
| (rcases_patt.one n) := n
| _ := `_
meta instance rcases_patt.has_reflect : has_reflect rcases_patt
| (rcases_patt.one n) := `(_)
| (rcases_patt.many l) := `(λ l, rcases_patt.many l).subst $
by have := rcases_patt.has_reflect; exact list.reflect l
meta def rcases.process_constructor :
nat → list rcases_patt → list name × list rcases_patt
| 0 ids := ([], [])
| 1 [] := ([`_], [default _])
| 1 [id] := ([id.name], [id])
| 1 ids := ([`_], [rcases_patt.many [ids]])
| (n+1) ids :=
let (ns, ps) := rcases.process_constructor n ids.tail,
p := ids.head in
(p.name :: ns, p :: ps)
meta def rcases.process_constructors (params : nat) :
list name → list (list rcases_patt) →
tactic (dlist name × list (name × list rcases_patt))
| [] ids := pure (dlist.empty, [])
| (c::cs) ids := do
fn ← mk_const c,
n ← get_arity fn,
let (h, t) := by from match cs, ids.tail with
| [], _::_ := ([rcases_patt.many ids], [])
| _, _ := (ids.head, ids.tail)
end,
let (ns, ps) := rcases.process_constructor (n - params) h,
(l, r) ← rcases.process_constructors cs t,
pure (dlist.of_list ns ++ l, (c, ps) :: r)
private def align {α β} (p : α → β → Prop) [∀ a b, decidable (p a b)] :
list α → list β → list (α × β)
| (a::as) (b::bs) :=
if p a b then (a, b) :: align as bs else align as (b::bs)
| _ _ := []
meta def rcases.continue
(rcases_core : list (list rcases_patt) → expr → tactic (list expr))
(n : nat) : list (rcases_patt × expr) → tactic (list expr)
| [] := intron n >> get_goals
| ((rcases_patt.many ids, e) :: l) := do
gs ← rcases_core ids e,
list.join <$> gs.mmap (λ g, set_goals [g] >> rcases.continue l)
| (_ :: l) := rcases.continue l
meta def rcases_core (n : nat) : list (list rcases_patt) → expr → tactic (list expr)
| ids e := do
t ← infer_type e >>= whnf,
env ← get_env,
let I := t.get_app_fn.const_name,
when (¬env.is_inductive I) $
fail format!"rcases tactic failed, {e} is not an inductive datatype",
let params := env.inductive_num_params I,
let c := env.constructors_of I,
(ids, r) ← rcases.process_constructors params c ids,
l ← cases_core e ids.to_list,
gs ← get_goals,
list.join <$> (align (λ (a : _ × _) (b : _ × _ × _), a.1 = b.2.1) r (gs.zip l)).mmap
(λ⟨⟨_, ps⟩, g, _, hs, _⟩,
set_goals [g] >> rcases.continue rcases_core n (ps.zip hs))
meta def rcases (p : pexpr) (ids : list (list rcases_patt)) : tactic unit :=
do e ← i_to_expr p,
if e.is_local_constant then
focus1 (rcases_core 0 ids e >>= set_goals)
else do
x ← mk_fresh_name,
n ← revert_kdependencies e semireducible,
(tactic.generalize e x)
<|>
(do t ← infer_type e,
tactic.assertv x t e,
get_local x >>= tactic.revert,
return ()),
h ← tactic.intro1,
focus1 (rcases_core 0 ids h >>= set_goals)
end tactic
|
faf834852054d74bfc94ad535aaecc599e2b5e70
|
9be442d9ec2fcf442516ed6e9e1660aa9071b7bd
|
/tests/lean/run/subtype_inj.lean
|
28582904d88b8b90d6f8e7de49646a2d499fbae1
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
EdAyers/lean4
|
57ac632d6b0789cb91fab2170e8c9e40441221bd
|
37ba0df5841bde51dbc2329da81ac23d4f6a4de4
|
refs/heads/master
| 1,676,463,245,298
| 1,660,619,433,000
| 1,660,619,433,000
| 183,433,437
| 1
| 0
|
Apache-2.0
| 1,657,612,672,000
| 1,556,196,574,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 182
|
lean
|
theorem subtype_inj (A: Type) (p: A → Prop) (a b: A) (pa: p a) (pb: p b) : (⟨a, pa⟩: {a//p a}) = (⟨b, pb⟩: {b//p b}) → a = b := by
intro eq
injection eq
assumption
|
ce8935912274a01ad21b9d8e02223c0d39b93bc3
|
e09201d437062e1f95e6e5360aab0c9f947901aa
|
/src/regular/pumping_lemma.lean
|
605ff674598d272d5150ffccca9bde628fae975f
|
[] |
no_license
|
VArtem/lean-regular-languages
|
34f4b093f28ef2f09ba7e684e642a0f97c901560
|
e877243188253d0ac17ccf0ae2da7bf608686ff0
|
refs/heads/master
| 1,683,590,111,306
| 1,622,307,234,000
| 1,622,307,234,000
| 284,232,653
| 7
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,015
|
lean
|
import automata.dfa
import regular.regex
import data.list.basic
import regular.list_lemmas
open DFA list
namespace pumping
variables {S : Type} {Q : Type} {L : set (list S)} [fintype S] [fintype Q] [decidable_eq Q]
variables {w : list S}
lemma dfa_word_split (d : DFA S Q) (st : Q) (w : list S):
(fintype.card Q) ≤ length w →
∃ (x y z : list S) (t : Q), x ++ y ++ z = w ∧ (x ++ y).length ≤ (fintype.card Q) ∧ y ≠ [] ∧ go d st x = t ∧ go d t y = t :=
begin
rintro hlen,
have tmp2 : (finset.univ : finset Q).card < (finset.range (fintype.card Q + 1)).card, from by {
simp only [hlen, finset.card_range],
rw nat.lt_succ_iff,
refl,
},
have tmp3 := finset.exists_ne_map_eq_of_card_lt_of_maps_to tmp2,
specialize tmp3 (λ a _, finset.mem_univ (go d st (take a w))),
rcases tmp3 with ⟨x, hx, y, hy, x_ne_y, go_xy_eq⟩,
rw finset.mem_range at hx hy,
replace hx := nat.le_of_lt_succ hx,
replace hy := nat.le_of_lt_succ hy,
wlog x_lt_y : x ≤ y,
replace x_lt_y := nat.lt_of_le_and_ne x_lt_y x_ne_y,
use [take x $ take y w, drop x $ take y w, drop y w, go d st (take x w)],
simp only [true_and, take_append_drop, eq_self_iff_true],
refine ⟨_, _, _, _⟩, {
rwa [length_take, min_eq_left (le_trans hy hlen)],
}, {
exact drop_of_take_of_lt_ne_nil x_lt_y (le_trans hy hlen),
}, {
rw [take_take, min_eq_left_of_lt x_lt_y],
}, {
rw [← dfa_go_append', go_xy_eq],
congr,
exact take_append_drop_of_lt x_lt_y,
}
end
lemma dfa_go_repeat {d : DFA S Q} {st : Q} {w: list S} {k : ℕ} :
go d st w = st → go d st (repeat w k).join = st :=
begin
intro go_base,
induction k, {
simp only [join, go_finish, repeat],
}, {
simp only [join, repeat_succ],
rwa [dfa_go_append', go_base],
}
end
lemma pumping_lemma :
dfa_lang L →
(∃ (n : ℕ), ∀ w, w ∈ L → n ≤ length w →
(∃ (x y z : list S), x ++ y ++ z = w ∧ y ≠ [] ∧ (x ++ y).length ≤ n ∧
∀ (k : ℕ), x ++ (repeat y k).join ++ z ∈ L)) :=
begin
rintro ⟨Q, _, _, dfa, rfl⟩,
resetI,
use fintype.card Q,
rintro w w_dfa w_len,
rcases dfa_word_split dfa dfa.start w w_len with ⟨x, y, z, t, xyz, xy_len, ynil, hx, hy⟩,
refine ⟨x, y, z, xyz, ynil, xy_len, λ k, _⟩,
simp only [lang_of_dfa, dfa_accepts_word, set.mem_set_of_eq] at w_dfa ⊢,
rw ← xyz at w_dfa,
rw [append_assoc, dfa_go_append', hx, dfa_go_append'] at w_dfa ⊢,
rw dfa_go_repeat hy,
rwa hy at w_dfa,
end
lemma pumping_lemma_negation {L : set (list S)} :
(∀ n : ℕ, ∃ (w : list S), w ∈ L ∧ n ≤ length w ∧
∀ (x y z : list S), x ++ y ++ z = w → y ≠ [] → (x ++ y).length ≤ n →
∃ k : ℕ, x ++ (repeat y k).join ++ z ∉ L) → ¬dfa_lang L:=
begin
contrapose,
push_neg,
refine pumping_lemma,
end
end pumping
|
b97664b5d9f660f633bf7face9a5070490e7ce84
|
0c6b99c0daded009943e570c13367c8cc7d3bcae
|
/chapter6/chapter6.lean
|
e8cbcb1735680fd8453167ff8eec4e4a19dadc1d
|
[] |
no_license
|
TateKennington/logic-and-proof-exercises
|
e3c5c5b12b6238b47b0c5acf8717923bd471a535
|
acca8882026e7b643453eb096d3021cd043005bd
|
refs/heads/main
| 1,687,638,489,616
| 1,626,858,474,000
| 1,626,858,474,000
| 371,871,374
| 4
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 492
|
lean
|
open classical
variables A B P Q: Prop
example: ¬(¬A ∨ B) → A :=
assume h,
have ¬A → false, from
assume nA,
have ¬A ∨ B, from or.inl nA,
h this,
by_contradiction this
example: (¬P ∧ ¬Q) → ¬(P ∨ Q) :=
assume h,
assume h_or,
show false, from
have nP: ¬P, from and.left h,
have nQ: ¬Q, from and.right h,
have h₁: P → false, from assume :P, nP this,
have h₂: Q → false, from assume :Q, nQ this,
or.elim h_or h₁ h₂
|
f2803c83e9aff4c8bed86700661ba07fe2743c93
|
63abd62053d479eae5abf4951554e1064a4c45b4
|
/src/algebra/group/prod.lean
|
2afe2225d5021fec2d7078820e623334da54cdc3
|
[
"Apache-2.0"
] |
permissive
|
Lix0120/mathlib
|
0020745240315ed0e517cbf32e738d8f9811dd80
|
e14c37827456fc6707f31b4d1d16f1f3a3205e91
|
refs/heads/master
| 1,673,102,855,024
| 1,604,151,044,000
| 1,604,151,044,000
| 308,930,245
| 0
| 0
|
Apache-2.0
| 1,604,164,710,000
| 1,604,163,547,000
| null |
UTF-8
|
Lean
| false
| false
| 10,017
|
lean
|
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon, Patrick Massot, Yury Kudryashov
-/
import algebra.group.hom
import data.equiv.mul_add
import data.prod
/-!
# Monoid, group etc structures on `M × N`
In this file we define one-binop (`monoid`, `group` etc) structures on `M × N`. We also prove
trivial `simp` lemmas, and define the following operations on `monoid_hom`s:
* `fst M N : M × N →* M`, `snd M N : M × N →* N`: projections `prod.fst` and `prod.snd`
as `monoid_hom`s;
* `inl M N : M →* M × N`, `inr M N : N →* M × N`: inclusions of first/second monoid
into the product;
* `f.prod g : `M →* N × P`: sends `x` to `(f x, g x)`;
* `f.coprod g : M × N →* P`: sends `(x, y)` to `f x * g y`;
* `f.prod_map g : M × N → M' × N'`: `prod.map f g` as a `monoid_hom`,
sends `(x, y)` to `(f x, g y)`.
-/
variables {A : Type*} {B : Type*} {G : Type*} {H : Type*} {M : Type*} {N : Type*} {P : Type*}
namespace prod
@[to_additive]
instance [has_mul M] [has_mul N] : has_mul (M × N) := ⟨λ p q, ⟨p.1 * q.1, p.2 * q.2⟩⟩
@[simp, to_additive]
lemma fst_mul [has_mul M] [has_mul N] (p q : M × N) : (p * q).1 = p.1 * q.1 := rfl
@[simp, to_additive]
lemma snd_mul [has_mul M] [has_mul N] (p q : M × N) : (p * q).2 = p.2 * q.2 := rfl
@[simp, to_additive]
lemma mk_mul_mk [has_mul M] [has_mul N] (a₁ a₂ : M) (b₁ b₂ : N) :
(a₁, b₁) * (a₂, b₂) = (a₁ * a₂, b₁ * b₂) := rfl
@[to_additive]
instance [has_one M] [has_one N] : has_one (M × N) := ⟨(1, 1)⟩
@[simp, to_additive]
lemma fst_one [has_one M] [has_one N] : (1 : M × N).1 = 1 := rfl
@[simp, to_additive]
lemma snd_one [has_one M] [has_one N] : (1 : M × N).2 = 1 := rfl
@[to_additive]
lemma one_eq_mk [has_one M] [has_one N] : (1 : M × N) = (1, 1) := rfl
@[simp, to_additive]
lemma mk_eq_one [has_one M] [has_one N] {x : M} {y : N} : (x, y) = 1 ↔ x = 1 ∧ y = 1 :=
mk.inj_iff
@[to_additive]
lemma fst_mul_snd [monoid M] [monoid N] (p : M × N) :
(p.fst, 1) * (1, p.snd) = p :=
ext (mul_one p.1) (one_mul p.2)
@[to_additive]
instance [has_inv M] [has_inv N] : has_inv (M × N) := ⟨λp, (p.1⁻¹, p.2⁻¹)⟩
@[simp, to_additive]
lemma fst_inv [has_inv G] [has_inv H] (p : G × H) : (p⁻¹).1 = (p.1)⁻¹ := rfl
@[simp, to_additive]
lemma snd_inv [has_inv G] [has_inv H] (p : G × H) : (p⁻¹).2 = (p.2)⁻¹ := rfl
@[simp, to_additive]
lemma inv_mk [has_inv G] [has_inv H] (a : G) (b : H) : (a, b)⁻¹ = (a⁻¹, b⁻¹) := rfl
@[to_additive]
instance [semigroup M] [semigroup N] : semigroup (M × N) :=
{ mul_assoc := assume a b c, mk.inj_iff.mpr ⟨mul_assoc _ _ _, mul_assoc _ _ _⟩,
.. prod.has_mul }
@[to_additive]
instance [monoid M] [monoid N] : monoid (M × N) :=
{ one_mul := assume a, prod.rec_on a $ λa b, mk.inj_iff.mpr ⟨one_mul _, one_mul _⟩,
mul_one := assume a, prod.rec_on a $ λa b, mk.inj_iff.mpr ⟨mul_one _, mul_one _⟩,
.. prod.semigroup, .. prod.has_one }
@[to_additive]
instance [group G] [group H] : group (G × H) :=
{ mul_left_inv := assume a, mk.inj_iff.mpr ⟨mul_left_inv _, mul_left_inv _⟩,
.. prod.monoid, .. prod.has_inv }
@[simp] lemma fst_sub [add_group A] [add_group B] (a b : A × B) : (a - b).1 = a.1 - b.1 := rfl
@[simp] lemma snd_sub [add_group A] [add_group B] (a b : A × B) : (a - b).2 = a.2 - b.2 := rfl
@[simp] lemma mk_sub_mk [add_group A] [add_group B] (x₁ x₂ : A) (y₁ y₂ : B) :
(x₁, y₁) - (x₂, y₂) = (x₁ - x₂, y₁ - y₂) := rfl
@[to_additive]
instance [comm_semigroup G] [comm_semigroup H] : comm_semigroup (G × H) :=
{ mul_comm := assume a b, mk.inj_iff.mpr ⟨mul_comm _ _, mul_comm _ _⟩,
.. prod.semigroup }
@[to_additive]
instance [left_cancel_semigroup G] [left_cancel_semigroup H] :
left_cancel_semigroup (G × H) :=
{ mul_left_cancel := λ a b c h, prod.ext (mul_left_cancel (prod.ext_iff.1 h).1)
(mul_left_cancel (prod.ext_iff.1 h).2),
.. prod.semigroup }
@[to_additive]
instance [right_cancel_semigroup G] [right_cancel_semigroup H] :
right_cancel_semigroup (G × H) :=
{ mul_right_cancel := λ a b c h, prod.ext (mul_right_cancel (prod.ext_iff.1 h).1)
(mul_right_cancel (prod.ext_iff.1 h).2),
.. prod.semigroup }
@[to_additive]
instance [comm_monoid M] [comm_monoid N] : comm_monoid (M × N) :=
{ .. prod.comm_semigroup, .. prod.monoid }
@[to_additive]
instance [comm_group G] [comm_group H] : comm_group (G × H) :=
{ .. prod.comm_semigroup, .. prod.group }
end prod
namespace monoid_hom
variables (M N) [monoid M] [monoid N]
/-- Given monoids `M`, `N`, the natural projection homomorphism from `M × N` to `M`.-/
@[to_additive "Given additive monoids `A`, `B`, the natural projection homomorphism
from `A × B` to `A`"]
def fst : M × N →* M := ⟨prod.fst, rfl, λ _ _, rfl⟩
/-- Given monoids `M`, `N`, the natural projection homomorphism from `M × N` to `N`.-/
@[to_additive "Given additive monoids `A`, `B`, the natural projection homomorphism
from `A × B` to `B`"]
def snd : M × N →* N := ⟨prod.snd, rfl, λ _ _, rfl⟩
/-- Given monoids `M`, `N`, the natural inclusion homomorphism from `M` to `M × N`. -/
@[to_additive "Given additive monoids `A`, `B`, the natural inclusion homomorphism
from `A` to `A × B`."]
def inl : M →* M × N :=
⟨λ x, (x, 1), rfl, λ _ _, prod.ext rfl (one_mul 1).symm⟩
/-- Given monoids `M`, `N`, the natural inclusion homomorphism from `N` to `M × N`. -/
@[to_additive "Given additive monoids `A`, `B`, the natural inclusion homomorphism
from `B` to `A × B`."]
def inr : N →* M × N :=
⟨λ y, (1, y), rfl, λ _ _, prod.ext (one_mul 1).symm rfl⟩
variables {M N}
@[simp, to_additive] lemma coe_fst : ⇑(fst M N) = prod.fst := rfl
@[simp, to_additive] lemma coe_snd : ⇑(snd M N) = prod.snd := rfl
@[simp, to_additive] lemma inl_apply (x) : inl M N x = (x, 1) := rfl
@[simp, to_additive] lemma inr_apply (y) : inr M N y = (1, y) := rfl
@[simp, to_additive] lemma fst_comp_inl : (fst M N).comp (inl M N) = id M := rfl
@[simp, to_additive] lemma snd_comp_inl : (snd M N).comp (inl M N) = 1 := rfl
@[simp, to_additive] lemma fst_comp_inr : (fst M N).comp (inr M N) = 1 := rfl
@[simp, to_additive] lemma snd_comp_inr : (snd M N).comp (inr M N) = id N := rfl
section prod
variable [monoid P]
/-- Combine two `monoid_hom`s `f : M →* N`, `g : M →* P` into `f.prod g : M →* N × P`
given by `(f.prod g) x = (f x, g x)` -/
@[to_additive prod "Combine two `add_monoid_hom`s `f : M →+ N`, `g : M →+ P` into
`f.prod g : M →+ N × P` given by `(f.prod g) x = (f x, g x)`"]
protected def prod (f : M →* N) (g : M →* P) : M →* N × P :=
{ to_fun := λ x, (f x, g x),
map_one' := prod.ext f.map_one g.map_one,
map_mul' := λ x y, prod.ext (f.map_mul x y) (g.map_mul x y) }
@[simp, to_additive prod_apply]
lemma prod_apply (f : M →* N) (g : M →* P) (x) : f.prod g x = (f x, g x) := rfl
@[simp, to_additive fst_comp_prod]
lemma fst_comp_prod (f : M →* N) (g : M →* P) : (fst N P).comp (f.prod g) = f :=
ext $ λ x, rfl
@[simp, to_additive snd_comp_prod]
lemma snd_comp_prod (f : M →* N) (g : M →* P) : (snd N P).comp (f.prod g) = g :=
ext $ λ x, rfl
@[simp, to_additive prod_unique]
lemma prod_unique (f : M →* N × P) :
((fst N P).comp f).prod ((snd N P).comp f) = f :=
ext $ λ x, by simp only [prod_apply, coe_fst, coe_snd, comp_apply, prod.mk.eta]
end prod
section prod_map
variables {M' : Type*} {N' : Type*} [monoid M'] [monoid N'] [monoid P]
(f : M →* M') (g : N →* N')
/-- `prod.map` as a `monoid_hom`. -/
@[to_additive prod_map "`prod.map` as an `add_monoid_hom`"]
def prod_map : M × N →* M' × N' := (f.comp (fst M N)).prod (g.comp (snd M N))
@[to_additive prod_map_def]
lemma prod_map_def : prod_map f g = (f.comp (fst M N)).prod (g.comp (snd M N)) := rfl
@[simp, to_additive coe_prod_map]
lemma coe_prod_map : ⇑(prod_map f g) = prod.map f g := rfl
@[to_additive prod_comp_prod_map]
lemma prod_comp_prod_map (f : P →* M) (g : P →* N) (f' : M →* M') (g' : N →* N') :
(f'.prod_map g').comp (f.prod g) = (f'.comp f).prod (g'.comp g) :=
rfl
end prod_map
section coprod
variables [comm_monoid P] (f : M →* P) (g : N →* P)
/-- Coproduct of two `monoid_hom`s with the same codomain:
`f.coprod g (p : M × N) = f p.1 * g p.2`. -/
@[to_additive "Coproduct of two `add_monoid_hom`s with the same codomain:
`f.coprod g (p : M × N) = f p.1 + g p.2`."]
def coprod : M × N →* P := f.comp (fst M N) * g.comp (snd M N)
@[simp, to_additive]
lemma coprod_apply (p : M × N) : f.coprod g p = f p.1 * g p.2 := rfl
@[simp, to_additive]
lemma coprod_comp_inl : (f.coprod g).comp (inl M N) = f :=
ext $ λ x, by simp [coprod_apply]
@[simp, to_additive]
lemma coprod_comp_inr : (f.coprod g).comp (inr M N) = g :=
ext $ λ x, by simp [coprod_apply]
@[simp, to_additive] lemma coprod_unique (f : M × N →* P) :
(f.comp (inl M N)).coprod (f.comp (inr M N)) = f :=
ext $ λ x, by simp [coprod_apply, inl_apply, inr_apply, ← map_mul]
@[simp, to_additive] lemma coprod_inl_inr {M N : Type*} [comm_monoid M] [comm_monoid N] :
(inl M N).coprod (inr M N) = id (M × N) :=
coprod_unique (id $ M × N)
lemma comp_coprod {Q : Type*} [comm_monoid Q] (h : P →* Q) (f : M →* P) (g : N →* P) :
h.comp (f.coprod g) = (h.comp f).coprod (h.comp g) :=
ext $ λ x, by simp
end coprod
end monoid_hom
namespace mul_equiv
variables (M N) [monoid M] [monoid N]
/-- The equivalence between `M × N` and `N × M` given by swapping the components is multiplicative. -/
@[to_additive prod_comm "The equivalence between `M × N` and `N × M` given by swapping the components is
additive."]
def prod_comm : M × N ≃* N × M :=
{ map_mul' := λ ⟨x₁, y₁⟩ ⟨x₂, y₂⟩, rfl, ..equiv.prod_comm M N }
@[simp, to_additive coe_prod_comm] lemma coe_prod_comm : ⇑(prod_comm M N) = prod.swap := rfl
@[simp, to_additive coe_prod_comm_symm] lemma coe_prod_comm_symm :
⇑((prod_comm M N).symm) = prod.swap := rfl
end mul_equiv
|
721591d2661f2a523b63d01a0a2b9802c08ea09d
|
737dc4b96c97368cb66b925eeea3ab633ec3d702
|
/stage0/src/Lean/Data.lean
|
de84c8f7331178a9aa07d9a41b1005b40f273811
|
[
"Apache-2.0"
] |
permissive
|
Bioye97/lean4
|
1ace34638efd9913dc5991443777b01a08983289
|
bc3900cbb9adda83eed7e6affeaade7cfd07716d
|
refs/heads/master
| 1,690,589,820,211
| 1,631,051,000,000
| 1,631,067,598,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 594
|
lean
|
/-
Copyright (c) 2020 Sebastian Ullrich. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sebastian Ullrich
-/
import Lean.Data.Format
import Lean.Data.Parsec
import Lean.Data.Json
import Lean.Data.Xml
import Lean.Data.JsonRpc
import Lean.Data.KVMap
import Lean.Data.LBool
import Lean.Data.LOption
import Lean.Data.Lsp
import Lean.Data.Name
import Lean.Data.Occurrences
import Lean.Data.OpenDecl
import Lean.Data.Options
import Lean.Data.Position
import Lean.Data.SMap
import Lean.Data.Trie
import Lean.Data.PrefixTree
import Lean.Data.NameTrie
|
9f57c70e19b2c3ec45f8ba0b3597cdbd8bec778a
|
15457aa8f007f15bd4343ddc208925e54b867944
|
/src/a.lean
|
67dd9e9dab4a3ce2968b177ac757d2514c99b92d
|
[] |
no_license
|
alexjbest/pole-test
|
d7c599f96fbf879abd6ac562c55a46036f743c9d
|
a8458dea5dfa337d85bf50c4698eae668759a487
|
refs/heads/master
| 1,691,966,250,606
| 1,632,236,826,000
| 1,632,236,826,000
| 408,866,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 122
|
lean
|
open tactic
lemma a1 : true :=
begin
sleep 20000,
trivial
end
lemma a2 : true :=
begin
sleep 10000,
trivial
end
|
d405d6929ea777401932d2f614944ff17a8cdfef
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/tactic/suggest.lean
|
e17723827fe58ab252c4d4d32967cccd3a428807
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 9,841
|
lean
|
/-
Copyright (c) 2019 Lucas Allen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Lucas Allen and Scott Morrison
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.mllist
import Mathlib.tactic.solve_by_elim
import Mathlib.PostPort
universes l
namespace Mathlib
/-!
# `suggest` and `library_search`
`suggest` and `library_search` are a pair of tactics for applying lemmas from the library to the
current goal.
* `suggest` prints a list of `exact ...` or `refine ...` statements, which may produce new goals
* `library_search` prints a single `exact ...` which closes the goal, or fails
-/
namespace tactic
namespace suggest
/-- Map a name (typically a head symbol) to a "canonical" definitional synonym.
Given a name `n`, we want a name `n'` such that a sufficiently applied
expression with head symbol `n` is always definitionally equal to an expression
with head symbol `n'`.
Thus, we can search through all lemmas with a result type of `n'`
to solve a goal with head symbol `n`.
For example, `>` is mapped to `<` because `a > b` is definitionally equal to `b < a`,
and `not` is mapped to `false` because `¬ a` is definitionally equal to `p → false`
The default is that the original argument is returned, so `<` is just mapped to `<`.
`normalize_synonym` is called for every lemma in the library, so it needs to be fast.
-/
-- TODO this is a hack; if you suspect more cases here would help, please report them
/--
Compute the head symbol of an expression, then normalise synonyms.
This is only used when analysing the goal, so it is okay to do more expensive analysis here.
-/
-- We may want to tweak this further?
-- We first have a various "customisations":
-- Because in `ℕ` `a.succ ≤ b` is definitionally `a < b`,
-- we add some special cases to allow looking for `<` lemmas even when the goal has a `≤`.
-- Note we only do this in the `ℕ` case, for performance.
-- And then the generic cases:
/--
A declaration can match the head symbol of the current goal in four possible ways:
* `ex` : an exact match
* `mp` : the declaration returns an `iff`, and the right hand side matches the goal
* `mpr` : the declaration returns an `iff`, and the left hand side matches the goal
* `both`: the declaration returns an `iff`, and the both sides match the goal
-/
inductive head_symbol_match
where
| ex : head_symbol_match
| mp : head_symbol_match
| mpr : head_symbol_match
| both : head_symbol_match
/-- a textual representation of a `head_symbol_match`, for trace debugging. -/
def head_symbol_match.to_string : head_symbol_match → string :=
sorry
/-- Determine if, and in which way, a given expression matches the specified head symbol. -/
/-- A package of `declaration` metadata, including the way in which its type matches the head symbol
which we are searching for. -/
/--
Generate a `decl_data` from the given declaration if
it matches the head symbol `hs` for the current goal.
-/
-- We used to check here for private declarations, or declarations with certain suffixes.
-- It turns out `apply` is so fast, it's better to just try them all.
/-- Retrieve all library definitions with a given head symbol. -/
/--
We unpack any element of a list of `decl_data` corresponding to an `↔` statement that could apply
in both directions into two separate elements.
This ensures that both directions can be independently returned by `suggest`,
and avoids a problem where the application of one direction prevents
the application of the other direction. (See `exp_le_exp` in the tests.)
-/
/--
Apply the lemma `e`, then attempt to close all goals using
`solve_by_elim opt`, failing if `close_goals = tt`
and there are any goals remaining.
Returns the number of subgoals which were closed using `solve_by_elim`.
-/
-- Implementation note: as this is used by both `library_search` and `suggest`,
-- we first run `solve_by_elim` separately on the independent goals,
-- whether or not `close_goals` is set,
-- and then run `solve_by_elim { all_goals := tt }`,
-- requiring that it succeeds if `close_goals = tt`.
/--
Apply the declaration `d` (or the forward and backward implications separately, if it is an `iff`),
and then attempt to solve the subgoal using `apply_and_solve`.
Returns the number of subgoals successfully closed.
-/
/-- An `application` records the result of a successful application of a library lemma. -/
end suggest
-- Call `apply_declaration`, then prepare the tactic script and
-- count the number of local hypotheses used.
-- (This tactic block is only executed when we evaluate the mllist,
-- so we need to do the `focus1` here.)
-- implementation note: we produce a `tactic (mllist tactic application)` first,
-- because it's easier to work in the tactic monad, but in a moment we squash this
-- down to an `mllist tactic application`.
/--
The core `suggest` tactic.
It attempts to apply a declaration from the library,
then solve new goals using `solve_by_elim`.
It returns a list of `application`s consisting of fields:
* `state`, a tactic state resulting from the successful application of a declaration from
the library,
* `script`, a string of the form `Try this: refine ...` or `Try this: exact ...` which will
reproduce that tactic state,
* `decl`, an `option declaration` indicating the declaration that was applied
(or none, if `solve_by_elim` succeeded),
* `num_goals`, the number of remaining goals, and
* `hyps_used`, the number of local hypotheses used in the solution.
-/
/--
See `suggest_core`.
Returns a list of at most `limit` `application`s,
sorted by number of goals, and then (reverse) number of hypotheses used.
-/
/--
Returns a list of at most `limit` strings, of the form `Try this: exact ...` or
`Try this: refine ...`, which make progress on the current goal using a declaration
from the library.
-/
/--
Returns a string of the form `Try this: exact ...`, which closes the current goal.
-/
namespace interactive
/--
`suggest` tries to apply suitable theorems/defs from the library, and generates
a list of `exact ...` or `refine ...` scripts that could be used at this step.
It leaves the tactic state unchanged. It is intended as a complement of the search
function in your editor, the `#find` tactic, and `library_search`.
`suggest` takes an optional natural number `num` as input and returns the first `num`
(or less, if all possibilities are exhausted) possibilities ordered by length of lemma names.
The default for `num` is `50`.
For performance reasons `suggest` uses monadic lazy lists (`mllist`). This means that
`suggest` might miss some results if `num` is not large enough. However, because
`suggest` uses monadic lazy lists, smaller values of `num` run faster than larger values.
You can add additional lemmas to be used along with local hypotheses
after the application of a library lemma,
using the same syntax as for `solve_by_elim`, e.g.
```
example {a b c d: nat} (h₁ : a < c) (h₂ : b < d) : max (c + d) (a + b) = (c + d) :=
begin
suggest [add_lt_add], -- Says: `Try this: exact max_eq_left_of_lt (add_lt_add h₁ h₂)`
end
```
You can also use `suggest with attr` to include all lemmas with the attribute `attr`.
-/
/--
`suggest` lists possible usages of the `refine` tactic and leaves the tactic state unchanged.
It is intended as a complement of the search function in your editor, the `#find` tactic, and
`library_search`.
`suggest` takes an optional natural number `num` as input and returns the first `num` (or less, if
all possibilities are exhausted) possibilities ordered by length of lemma names.
The default for `num` is `50`.
For performance reasons `suggest` uses monadic lazy lists (`mllist`). This means that `suggest`
might miss some results if `num` is not large enough. However, because `suggest` uses monadic
lazy lists, smaller values of `num` run faster than larger values.
An example of `suggest` in action,
```lean
example (n : nat) : n < n + 1 :=
begin suggest, sorry end
```
prints the list,
```lean
Try this: exact nat.lt.base n
Try this: exact nat.lt_succ_self n
Try this: refine not_le.mp _
Try this: refine gt_iff_lt.mp _
Try this: refine nat.lt.step _
Try this: refine lt_of_not_ge _
...
```
-/
-- Turn off `Try this: exact ...` trace message for `library_search`
/--
`library_search` is a tactic to identify existing lemmas in the library. It tries to close the
current goal by applying a lemma from the library, then discharging any new goals using
`solve_by_elim`.
If it succeeds, it prints a trace message `exact ...` which can replace the invocation
of `library_search`.
Typical usage is:
```lean
example (n m k : ℕ) : n * (m - k) = n * m - n * k :=
by library_search -- Try this: exact nat.mul_sub_left_distrib n m k
```
By default `library_search` only unfolds `reducible` definitions
when attempting to match lemmas against the goal.
Previously, it would unfold most definitions, sometimes giving surprising answers, or slow answers.
The old behaviour is still available via `library_search!`.
You can add additional lemmas to be used along with local hypotheses
after the application of a library lemma,
using the same syntax as for `solve_by_elim`, e.g.
```
example {a b c d: nat} (h₁ : a < c) (h₂ : b < d) : max (c + d) (a + b) = (c + d) :=
begin
library_search [add_lt_add], -- Says: `Try this: exact max_eq_left_of_lt (add_lt_add h₁ h₂)`
end
```
You can also use `library_search with attr` to include all lemmas with the attribute `attr`.
-/
end interactive
/-- Invoking the hole command `library_search` ("Use `library_search` to complete the goal") calls
the tactic `library_search` to produce a proof term with the type of the hole.
Running it on
```lean
example : 0 < 1 :=
{!!}
```
produces
```lean
example : 0 < 1 :=
nat.one_pos
```
-/
|
20b94c9fde7be9137cfb6e064a6a8d2c69478c0b
|
32317185abf7e7c963f4c67c190aec61af6b3628
|
/library/algebra/ring_power.lean
|
bcde5a2eafdacfda35b2dbf76d2f2e3c22acb3e3
|
[
"Apache-2.0"
] |
permissive
|
Andrew-Zipperer-unorganized/lean
|
198a2317f21198cd8d26e7085e484b86277f17f7
|
dcb35008e1474a0abebe632b1dced120e5f8c009
|
refs/heads/master
| 1,622,526,520,945
| 1,453,576,559,000
| 1,454,612,842,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 4,856
|
lean
|
/-
Copyright (c) 2015 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Jeremy Avigad
Properties of the power operation in various structures, including ordered rings and fields.
-/
import .group_power .ordered_field
open nat
variable {A : Type}
section semiring
variable [s : semiring A]
include s
definition semiring_has_pow_nat [reducible] [instance] : has_pow_nat A :=
monoid_has_pow_nat
theorem zero_pow {m : ℕ} (mpos : m > 0) : 0^m = (0 : A) :=
have h₁ : ∀ m : nat, (0 : A)^(succ m) = (0 : A),
begin
intro m, induction m,
rewrite pow_one,
apply zero_mul
end,
obtain m' (h₂ : m = succ m'), from exists_eq_succ_of_pos mpos,
show 0^m = 0, by rewrite h₂; apply h₁
end semiring
section integral_domain
variable [s : integral_domain A]
include s
definition integral_domain_has_pow_nat [reducible] [instance] : has_pow_nat A :=
monoid_has_pow_nat
theorem eq_zero_of_pow_eq_zero {a : A} {m : ℕ} (H : a^m = 0) : a = 0 :=
or.elim (eq_zero_or_pos m)
(suppose m = 0,
by rewrite [`m = 0` at H, pow_zero at H]; apply absurd H (ne.symm zero_ne_one))
(suppose m > 0,
have h₁ : ∀ m, a^succ m = 0 → a = 0,
begin
intro m,
induction m with m ih,
{rewrite pow_one; intros; assumption},
rewrite pow_succ,
intro H,
cases eq_zero_or_eq_zero_of_mul_eq_zero H with h₃ h₄,
assumption,
exact ih h₄
end,
obtain m' (h₂ : m = succ m'), from exists_eq_succ_of_pos `m > 0`,
show a = 0, by rewrite h₂ at H; apply h₁ m' H)
theorem pow_ne_zero_of_ne_zero {a : A} {m : ℕ} (H : a ≠ 0) : a^m ≠ 0 :=
assume H', H (eq_zero_of_pow_eq_zero H')
end integral_domain
section division_ring
variable [s : division_ring A]
include s
theorem division_ring.pow_ne_zero_of_ne_zero {a : A} {m : ℕ} (H : a ≠ 0) : a^m ≠ 0 :=
or.elim (eq_zero_or_pos m)
(suppose m = 0,
by rewrite [`m = 0`, pow_zero]; exact (ne.symm zero_ne_one))
(suppose m > 0,
have h₁ : ∀ m, a^succ m ≠ 0,
begin
intro m,
induction m with m ih,
{rewrite pow_one; assumption},
rewrite pow_succ,
apply division_ring.mul_ne_zero H ih
end,
obtain m' (h₂ : m = succ m'), from exists_eq_succ_of_pos `m > 0`,
show a^m ≠ 0, by rewrite h₂; apply h₁ m')
end division_ring
section linear_ordered_semiring
variable [s : linear_ordered_semiring A]
include s
theorem pow_pos_of_pos {x : A} (i : ℕ) (H : x > 0) : x^i > 0 :=
begin
induction i with [j, ih],
{show (1 : A) > 0, from zero_lt_one},
{show x^(succ j) > 0, from mul_pos H ih}
end
theorem pow_nonneg_of_nonneg {x : A} (i : ℕ) (H : x ≥ 0) : x^i ≥ 0 :=
begin
induction i with j ih,
{show (1 : A) ≥ 0, from le_of_lt zero_lt_one},
{show x^(succ j) ≥ 0, from mul_nonneg H ih}
end
theorem pow_le_pow_of_le {x y : A} (i : ℕ) (H₁ : 0 ≤ x) (H₂ : x ≤ y) : x^i ≤ y^i :=
begin
induction i with i ih,
{rewrite *pow_zero, apply le.refl},
rewrite *pow_succ,
have H : 0 ≤ x^i, from pow_nonneg_of_nonneg i H₁,
apply mul_le_mul H₂ ih H (le.trans H₁ H₂)
end
theorem pow_ge_one {x : A} (i : ℕ) (xge1 : x ≥ 1) : x^i ≥ 1 :=
assert H : x^i ≥ 1^i, from pow_le_pow_of_le i (le_of_lt zero_lt_one) xge1,
by rewrite one_pow at H; exact H
theorem pow_gt_one {x : A} {i : ℕ} (xgt1 : x > 1) (ipos : i > 0) : x^i > 1 :=
assert xpos : x > 0, from lt.trans zero_lt_one xgt1,
begin
induction i with [i, ih],
{exfalso, exact !lt.irrefl ipos},
have xige1 : x^i ≥ 1, from pow_ge_one _ (le_of_lt xgt1),
rewrite [pow_succ, -mul_one 1],
apply mul_lt_mul xgt1 xige1 zero_lt_one,
apply le_of_lt xpos
end
end linear_ordered_semiring
section decidable_linear_ordered_comm_ring
variable [s : decidable_linear_ordered_comm_ring A]
include s
definition decidable_linear_ordered_comm_ring_has_pow_nat [reducible] [instance] : has_pow_nat A :=
monoid_has_pow_nat
theorem abs_pow (a : A) (n : ℕ) : abs (a^n) = abs a^n :=
begin
induction n with n ih,
rewrite [*pow_zero, (abs_of_nonneg zero_le_one : abs (1 : A) = 1)],
rewrite [*pow_succ, abs_mul, ih]
end
end decidable_linear_ordered_comm_ring
section field
variable [s : field A]
include s
theorem field.div_pow (a : A) {b : A} {n : ℕ} (bnz : b ≠ 0) : (a / b)^n = a^n / b^n :=
begin
induction n with n ih,
rewrite [*pow_zero, div_one],
have bnnz : b^n ≠ 0, from division_ring.pow_ne_zero_of_ne_zero bnz,
rewrite [*pow_succ, ih, !field.div_mul_div bnz bnnz]
end
end field
section discrete_field
variable [s : discrete_field A]
include s
theorem div_pow (a : A) {b : A} {n : ℕ} : (a / b)^n = a^n / b^n :=
begin
induction n with n ih,
rewrite [*pow_zero, div_one],
rewrite [*pow_succ, ih, div_mul_div]
end
end discrete_field
|
f180f0a69b5ff863d7c02d5cfa2803f28637bbee
|
bb31430994044506fa42fd667e2d556327e18dfe
|
/src/algebra/smul_with_zero.lean
|
e37fcce9ff05c9b41e84d17fb6128931ba0bb42a
|
[
"Apache-2.0"
] |
permissive
|
sgouezel/mathlib
|
0cb4e5335a2ba189fa7af96d83a377f83270e503
|
00638177efd1b2534fc5269363ebf42a7871df9a
|
refs/heads/master
| 1,674,527,483,042
| 1,673,665,568,000
| 1,673,665,568,000
| 119,598,202
| 0
| 0
| null | 1,517,348,647,000
| 1,517,348,646,000
| null |
UTF-8
|
Lean
| false
| false
| 7,516
|
lean
|
/-
Copyright (c) 2021 Damiano Testa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Damiano Testa
-/
import algebra.group_power.basic
import algebra.ring.opposite
import group_theory.group_action.opposite
import group_theory.group_action.prod
/-!
# Introduce `smul_with_zero`
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In analogy with the usual monoid action on a Type `M`, we introduce an action of a
`monoid_with_zero` on a Type with `0`.
In particular, for Types `R` and `M`, both containing `0`, we define `smul_with_zero R M` to
be the typeclass where the products `r • 0` and `0 • m` vanish for all `r : R` and all `m : M`.
Moreover, in the case in which `R` is a `monoid_with_zero`, we introduce the typeclass
`mul_action_with_zero R M`, mimicking group actions and having an absorbing `0` in `R`.
Thus, the action is required to be compatible with
* the unit of the monoid, acting as the identity;
* the zero of the monoid_with_zero, acting as zero;
* associativity of the monoid.
We also add an `instance`:
* any `monoid_with_zero` has a `mul_action_with_zero R R` acting on itself.
## Main declarations
* `smul_monoid_with_zero_hom`: Scalar multiplication bundled as a morphism of monoids with zero.
-/
variables {R R' M M' : Type*}
section has_zero
variables (R M)
/-- `smul_with_zero` is a class consisting of a Type `R` with `0 ∈ R` and a scalar multiplication
of `R` on a Type `M` with `0`, such that the equality `r • m = 0` holds if at least one among `r`
or `m` equals `0`. -/
class smul_with_zero [has_zero R] [has_zero M] extends smul_zero_class R M :=
(zero_smul : ∀ m : M, (0 : R) • m = 0)
instance mul_zero_class.to_smul_with_zero [mul_zero_class R] : smul_with_zero R R :=
{ smul := (*),
smul_zero := mul_zero,
zero_smul := zero_mul }
/-- Like `mul_zero_class.to_smul_with_zero`, but multiplies on the right. -/
instance mul_zero_class.to_opposite_smul_with_zero [mul_zero_class R] : smul_with_zero Rᵐᵒᵖ R :=
{ smul := (•),
smul_zero := λ r, zero_mul _,
zero_smul := mul_zero }
variables (R) {M} [has_zero R] [has_zero M] [smul_with_zero R M]
@[simp] lemma zero_smul (m : M) : (0 : R) • m = 0 := smul_with_zero.zero_smul m
variables {R} {a : R} {b : M}
lemma smul_eq_zero_of_left (h : a = 0) (b : M) : a • b = 0 := h.symm ▸ zero_smul _ b
lemma smul_eq_zero_of_right (a : R) (h : b = 0) : a • b = 0 := h.symm ▸ smul_zero a
lemma left_ne_zero_of_smul : a • b ≠ 0 → a ≠ 0 := mt $ λ h, smul_eq_zero_of_left h b
lemma right_ne_zero_of_smul : a • b ≠ 0 → b ≠ 0 := mt $ smul_eq_zero_of_right a
variables {R M} [has_zero R'] [has_zero M'] [has_smul R M']
/-- Pullback a `smul_with_zero` structure along an injective zero-preserving homomorphism.
See note [reducible non-instances]. -/
@[reducible]
protected def function.injective.smul_with_zero
(f : zero_hom M' M) (hf : function.injective f) (smul : ∀ (a : R) b, f (a • b) = a • f b) :
smul_with_zero R M' :=
{ smul := (•),
zero_smul := λ a, hf $ by simp [smul],
smul_zero := λ a, hf $ by simp [smul]}
/-- Pushforward a `smul_with_zero` structure along a surjective zero-preserving homomorphism.
See note [reducible non-instances]. -/
@[reducible]
protected def function.surjective.smul_with_zero
(f : zero_hom M M') (hf : function.surjective f) (smul : ∀ (a : R) b, f (a • b) = a • f b) :
smul_with_zero R M' :=
{ smul := (•),
zero_smul := λ m, by { rcases hf m with ⟨x, rfl⟩, simp [←smul] },
smul_zero := λ c, by simp only [← f.map_zero, ← smul, smul_zero] }
variables (M)
/-- Compose a `smul_with_zero` with a `zero_hom`, with action `f r' • m` -/
def smul_with_zero.comp_hom (f : zero_hom R' R) : smul_with_zero R' M :=
{ smul := (•) ∘ f,
smul_zero := λ m, by simp,
zero_smul := λ m, by simp }
end has_zero
instance add_monoid.nat_smul_with_zero [add_monoid M] : smul_with_zero ℕ M :=
{ smul_zero := nsmul_zero,
zero_smul := zero_nsmul }
instance add_group.int_smul_with_zero [add_group M] : smul_with_zero ℤ M :=
{ smul_zero := zsmul_zero,
zero_smul := zero_zsmul }
section monoid_with_zero
variables [monoid_with_zero R] [monoid_with_zero R'] [has_zero M]
variables (R M)
/-- An action of a monoid with zero `R` on a Type `M`, also with `0`, extends `mul_action` and
is compatible with `0` (both in `R` and in `M`), with `1 ∈ R`, and with associativity of
multiplication on the monoid `M`. -/
class mul_action_with_zero extends mul_action R M :=
-- these fields are copied from `smul_with_zero`, as `extends` behaves poorly
(smul_zero : ∀ r : R, r • (0 : M) = 0)
(zero_smul : ∀ m : M, (0 : R) • m = 0)
@[priority 100] -- see Note [lower instance priority]
instance mul_action_with_zero.to_smul_with_zero [m : mul_action_with_zero R M] :
smul_with_zero R M :=
{..m}
/-- See also `semiring.to_module` -/
instance monoid_with_zero.to_mul_action_with_zero : mul_action_with_zero R R :=
{ ..mul_zero_class.to_smul_with_zero R,
..monoid.to_mul_action R }
/-- Like `monoid_with_zero.to_mul_action_with_zero`, but multiplies on the right. See also
`semiring.to_opposite_module` -/
instance monoid_with_zero.to_opposite_mul_action_with_zero : mul_action_with_zero Rᵐᵒᵖ R :=
{ ..mul_zero_class.to_opposite_smul_with_zero R,
..monoid.to_opposite_mul_action R }
variables {R M} [mul_action_with_zero R M] [has_zero M'] [has_smul R M']
/-- Pullback a `mul_action_with_zero` structure along an injective zero-preserving homomorphism.
See note [reducible non-instances]. -/
@[reducible]
protected def function.injective.mul_action_with_zero
(f : zero_hom M' M) (hf : function.injective f) (smul : ∀ (a : R) b, f (a • b) = a • f b) :
mul_action_with_zero R M' :=
{ ..hf.mul_action f smul, ..hf.smul_with_zero f smul }
/-- Pushforward a `mul_action_with_zero` structure along a surjective zero-preserving homomorphism.
See note [reducible non-instances]. -/
@[reducible]
protected def function.surjective.mul_action_with_zero
(f : zero_hom M M') (hf : function.surjective f) (smul : ∀ (a : R) b, f (a • b) = a • f b) :
mul_action_with_zero R M' :=
{ ..hf.mul_action f smul, ..hf.smul_with_zero f smul }
variables (M)
/-- Compose a `mul_action_with_zero` with a `monoid_with_zero_hom`, with action `f r' • m` -/
def mul_action_with_zero.comp_hom (f : R' →*₀ R) : mul_action_with_zero R' M :=
{ smul := (•) ∘ f,
mul_smul := λ r s m, by simp [mul_smul],
one_smul := λ m, by simp,
.. smul_with_zero.comp_hom M f.to_zero_hom}
end monoid_with_zero
section group_with_zero
variables {α β : Type*} [group_with_zero α] [group_with_zero β] [mul_action_with_zero α β]
lemma smul_inv₀ [smul_comm_class α β β] [is_scalar_tower α β β] (c : α) (x : β) :
(c • x)⁻¹ = c⁻¹ • x⁻¹ :=
begin
obtain rfl | hc := eq_or_ne c 0,
{ simp only [inv_zero, zero_smul] },
obtain rfl | hx := eq_or_ne x 0,
{ simp only [inv_zero, smul_zero] },
{ refine inv_eq_of_mul_eq_one_left _,
rw [smul_mul_smul, inv_mul_cancel hc, inv_mul_cancel hx, one_smul] }
end
end group_with_zero
/-- Scalar multiplication as a monoid homomorphism with zero. -/
@[simps]
def smul_monoid_with_zero_hom {α β : Type*} [monoid_with_zero α] [mul_zero_one_class β]
[mul_action_with_zero α β] [is_scalar_tower α β β] [smul_comm_class α β β] :
α × β →*₀ β :=
{ map_zero' := smul_zero _,
.. smul_monoid_hom }
|
615ad0d565d81ab44c5cf7a1f3f9388753e76c0a
|
ee8cdbabf07f77e7be63a449b8483ce308d37218
|
/lean/src/valid/mathd-numbertheory-200.lean
|
10b7238abf08900aa1c758993fbe065317b6c32f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
zeta1999/miniF2F
|
6d66c75d1c18152e224d07d5eed57624f731d4b7
|
c1ba9629559c5273c92ec226894baa0c1ce27861
|
refs/heads/main
| 1,681,897,460,642
| 1,620,646,361,000
| 1,620,646,361,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 238
|
lean
|
/-
Copyright (c) 2021 OpenAI. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kunhao Zheng
-/
import data.nat.basic
import data.real.basic
example : 139 % 11 = 7 :=
begin
norm_num,
end
|
5b1eb686dafc7fe030c5befa59b2e4aa2dba2d55
|
367134ba5a65885e863bdc4507601606690974c1
|
/src/category_theory/monoidal/functor.lean
|
9c15f78506d26987b16233b3f7c19f8fbb97b499
|
[
"Apache-2.0"
] |
permissive
|
kodyvajjha/mathlib
|
9bead00e90f68269a313f45f5561766cfd8d5cad
|
b98af5dd79e13a38d84438b850a2e8858ec21284
|
refs/heads/master
| 1,624,350,366,310
| 1,615,563,062,000
| 1,615,563,062,000
| 162,666,963
| 0
| 0
|
Apache-2.0
| 1,545,367,651,000
| 1,545,367,651,000
| null |
UTF-8
|
Lean
| false
| false
| 12,529
|
lean
|
/-
Copyright (c) 2018 Michael Jendrusch. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Michael Jendrusch, Scott Morrison, Bhavik Mehta
-/
import category_theory.monoidal.category
import category_theory.adjunction.basic
/-!
# (Lax) monoidal functors
A lax monoidal functor `F` between monoidal categories `C` and `D`
is a functor between the underlying categories equipped with morphisms
* `ε : 𝟙_ D ⟶ F.obj (𝟙_ C)` (called the unit morphism)
* `μ X Y : (F.obj X) ⊗ (F.obj Y) ⟶ F.obj (X ⊗ Y)` (called the tensorator, or strength).
satisfying various axioms.
A monoidal functor is a lax monoidal functor for which `ε` and `μ` are isomorphisms.
We show that the composition of (lax) monoidal functors gives a (lax) monoidal functor.
See also `category_theory.monoidal.functorial` for a typeclass decorating an object-level
function with the additional data of a monoidal functor.
This is useful when stating that a pre-existing functor is monoidal.
See `category_theory.monoidal.natural_transformation` for monoidal natural transformations.
We show in `category_theory.monoidal.Mon_` that lax monoidal functors take monoid objects
to monoid objects.
## Future work
* Oplax monoidal functors.
## References
See https://stacks.math.columbia.edu/tag/0FFL.
-/
open category_theory
universes v₁ v₂ v₃ u₁ u₂ u₃
open category_theory.category
open category_theory.functor
namespace category_theory
section
open monoidal_category
variables (C : Type u₁) [category.{v₁} C] [monoidal_category.{v₁} C]
(D : Type u₂) [category.{v₂} D] [monoidal_category.{v₂} D]
/-- A lax monoidal functor is a functor `F : C ⥤ D` between monoidal categories,
equipped with morphisms `ε : 𝟙 _D ⟶ F.obj (𝟙_ C)` and `μ X Y : F.obj X ⊗ F.obj Y ⟶ F.obj (X ⊗ Y)`,
satisfying the appropriate coherences. -/
structure lax_monoidal_functor extends C ⥤ D :=
-- unit morphism
(ε : 𝟙_ D ⟶ obj (𝟙_ C))
-- tensorator
(μ : Π X Y : C, (obj X) ⊗ (obj Y) ⟶ obj (X ⊗ Y))
(μ_natural' : ∀ {X Y X' Y' : C}
(f : X ⟶ Y) (g : X' ⟶ Y'),
((map f) ⊗ (map g)) ≫ μ Y Y' = μ X X' ≫ map (f ⊗ g)
. obviously)
-- associativity of the tensorator
(associativity' : ∀ (X Y Z : C),
(μ X Y ⊗ 𝟙 (obj Z)) ≫ μ (X ⊗ Y) Z ≫ map (α_ X Y Z).hom
= (α_ (obj X) (obj Y) (obj Z)).hom ≫ (𝟙 (obj X) ⊗ μ Y Z) ≫ μ X (Y ⊗ Z)
. obviously)
-- unitality
(left_unitality' : ∀ X : C,
(λ_ (obj X)).hom
= (ε ⊗ 𝟙 (obj X)) ≫ μ (𝟙_ C) X ≫ map (λ_ X).hom
. obviously)
(right_unitality' : ∀ X : C,
(ρ_ (obj X)).hom
= (𝟙 (obj X) ⊗ ε) ≫ μ X (𝟙_ C) ≫ map (ρ_ X).hom
. obviously)
restate_axiom lax_monoidal_functor.μ_natural'
attribute [simp, reassoc] lax_monoidal_functor.μ_natural
restate_axiom lax_monoidal_functor.left_unitality'
attribute [simp] lax_monoidal_functor.left_unitality
restate_axiom lax_monoidal_functor.right_unitality'
attribute [simp] lax_monoidal_functor.right_unitality
restate_axiom lax_monoidal_functor.associativity'
attribute [simp, reassoc] lax_monoidal_functor.associativity
-- When `rewrite_search` lands, add @[search] attributes to
-- lax_monoidal_functor.μ_natural lax_monoidal_functor.left_unitality
-- lax_monoidal_functor.right_unitality lax_monoidal_functor.associativity
/--
A monoidal functor is a lax monoidal functor for which the tensorator and unitor as isomorphisms.
See https://stacks.math.columbia.edu/tag/0FFL.
-/
structure monoidal_functor
extends lax_monoidal_functor.{v₁ v₂} C D :=
(ε_is_iso : is_iso ε . tactic.apply_instance)
(μ_is_iso : Π X Y : C, is_iso (μ X Y) . tactic.apply_instance)
attribute [instance] monoidal_functor.ε_is_iso monoidal_functor.μ_is_iso
variables {C D}
/--
The unit morphism of a (strong) monoidal functor as an isomorphism.
-/
def monoidal_functor.ε_iso (F : monoidal_functor.{v₁ v₂} C D) :
tensor_unit D ≅ F.obj (tensor_unit C) :=
as_iso F.ε
/--
The tensorator of a (strong) monoidal functor as an isomorphism.
-/
def monoidal_functor.μ_iso (F : monoidal_functor.{v₁ v₂} C D) (X Y : C) :
(F.obj X) ⊗ (F.obj Y) ≅ F.obj (X ⊗ Y) :=
as_iso (F.μ X Y)
end
open monoidal_category
namespace lax_monoidal_functor
variables (C : Type u₁) [category.{v₁} C] [monoidal_category.{v₁} C]
/-- The identity lax monoidal functor. -/
@[simps] def id : lax_monoidal_functor.{v₁ v₁} C C :=
{ ε := 𝟙 _,
μ := λ X Y, 𝟙 _,
.. 𝟭 C }
instance : inhabited (lax_monoidal_functor C C) := ⟨id C⟩
end lax_monoidal_functor
namespace monoidal_functor
section
variables {C : Type u₁} [category.{v₁} C] [monoidal_category.{v₁} C]
variables {D : Type u₂} [category.{v₂} D] [monoidal_category.{v₂} D]
lemma map_tensor (F : monoidal_functor.{v₁ v₂} C D) {X Y X' Y' : C} (f : X ⟶ Y) (g : X' ⟶ Y') :
F.map (f ⊗ g) = inv (F.μ X X') ≫ ((F.map f) ⊗ (F.map g)) ≫ F.μ Y Y' :=
by simp
lemma map_left_unitor (F : monoidal_functor.{v₁ v₂} C D) (X : C) :
F.map (λ_ X).hom = inv (F.μ (𝟙_ C) X) ≫ (inv F.ε ⊗ 𝟙 (F.obj X)) ≫ (λ_ (F.obj X)).hom :=
begin
simp only [lax_monoidal_functor.left_unitality],
slice_rhs 2 3 { rw ←comp_tensor_id, simp, },
simp,
end
lemma map_right_unitor (F : monoidal_functor.{v₁ v₂} C D) (X : C) :
F.map (ρ_ X).hom = inv (F.μ X (𝟙_ C)) ≫ (𝟙 (F.obj X) ⊗ inv F.ε) ≫ (ρ_ (F.obj X)).hom :=
begin
simp only [lax_monoidal_functor.right_unitality],
slice_rhs 2 3 { rw ←id_tensor_comp, simp, },
simp,
end
/-- The tensorator as a natural isomorphism. -/
def μ_nat_iso (F : monoidal_functor.{v₁ v₂} C D) :
(functor.prod F.to_functor F.to_functor) ⋙ (tensor D) ≅ (tensor C) ⋙ F.to_functor :=
nat_iso.of_components
(by { intros, apply F.μ_iso })
(by { intros, apply F.to_lax_monoidal_functor.μ_natural })
end
section
variables (C : Type u₁) [category.{v₁} C] [monoidal_category.{v₁} C]
/-- The identity monoidal functor. -/
@[simps] def id : monoidal_functor.{v₁ v₁} C C :=
{ ε := 𝟙 _,
μ := λ X Y, 𝟙 _,
.. 𝟭 C }
instance : inhabited (monoidal_functor C C) := ⟨id C⟩
end
end monoidal_functor
variables {C : Type u₁} [category.{v₁} C] [monoidal_category.{v₁} C]
variables {D : Type u₂} [category.{v₂} D] [monoidal_category.{v₂} D]
variables {E : Type u₃} [category.{v₃} E] [monoidal_category.{v₃} E]
namespace lax_monoidal_functor
variables (F : lax_monoidal_functor.{v₁ v₂} C D) (G : lax_monoidal_functor.{v₂ v₃} D E)
-- The proofs here are horrendous; rewrite_search helps a lot.
/-- The composition of two lax monoidal functors is again lax monoidal. -/
@[simps] def comp : lax_monoidal_functor.{v₁ v₃} C E :=
{ ε := G.ε ≫ (G.map F.ε),
μ := λ X Y, G.μ (F.obj X) (F.obj Y) ≫ G.map (F.μ X Y),
μ_natural' := λ _ _ _ _ f g,
begin
simp only [functor.comp_map, assoc],
rw [←category.assoc, lax_monoidal_functor.μ_natural, category.assoc, ←map_comp, ←map_comp,
←lax_monoidal_functor.μ_natural]
end,
associativity' := λ X Y Z,
begin
dsimp,
rw id_tensor_comp,
slice_rhs 3 4 { rw [← G.to_functor.map_id, G.μ_natural], },
slice_rhs 1 3 { rw ←G.associativity, },
rw comp_tensor_id,
slice_lhs 2 3 { rw [← G.to_functor.map_id, G.μ_natural], },
rw [category.assoc, category.assoc, category.assoc, category.assoc, category.assoc,
←G.to_functor.map_comp, ←G.to_functor.map_comp, ←G.to_functor.map_comp,
←G.to_functor.map_comp, F.associativity],
end,
left_unitality' := λ X,
begin
dsimp,
rw [G.left_unitality, comp_tensor_id, category.assoc, category.assoc],
apply congr_arg,
rw [F.left_unitality, map_comp, ←nat_trans.id_app, ←category.assoc,
←lax_monoidal_functor.μ_natural, nat_trans.id_app, map_id, ←category.assoc, map_comp],
end,
right_unitality' := λ X,
begin
dsimp,
rw [G.right_unitality, id_tensor_comp, category.assoc, category.assoc],
apply congr_arg,
rw [F.right_unitality, map_comp, ←nat_trans.id_app, ←category.assoc,
←lax_monoidal_functor.μ_natural, nat_trans.id_app, map_id, ←category.assoc, map_comp],
end,
.. (F.to_functor) ⋙ (G.to_functor) }.
infixr ` ⊗⋙ `:80 := comp
end lax_monoidal_functor
namespace monoidal_functor
variables (F : monoidal_functor.{v₁ v₂} C D) (G : monoidal_functor.{v₂ v₃} D E)
/-- The composition of two monoidal functors is again monoidal. -/
@[simps]
def comp : monoidal_functor.{v₁ v₃} C E :=
{ ε_is_iso := by { dsimp, apply_instance },
μ_is_iso := by { dsimp, apply_instance },
.. (F.to_lax_monoidal_functor).comp (G.to_lax_monoidal_functor) }.
infixr ` ⊗⋙ `:80 := comp -- We overload notation; potentially dangerous, but it seems to work.
end monoidal_functor
/--
If we have a right adjoint functor `G` to a monoidal functor `F`, then `G` has a lax monoidal
structure as well.
-/
@[simps]
def monoidal_adjoint (F : monoidal_functor C D) {G : D ⥤ C} (h : F.to_functor ⊣ G) :
lax_monoidal_functor D C :=
{ to_functor := G,
ε := h.hom_equiv _ _ (inv F.ε),
μ := λ X Y,
h.hom_equiv _ (X ⊗ Y) (inv (F.μ (G.obj X) (G.obj Y)) ≫ (h.counit.app X ⊗ h.counit.app Y)),
μ_natural' := λ X Y X' Y' f g,
begin
rw [←h.hom_equiv_naturality_left, ←h.hom_equiv_naturality_right, equiv.apply_eq_iff_eq, assoc,
is_iso.eq_inv_comp, ←F.to_lax_monoidal_functor.μ_natural_assoc, is_iso.hom_inv_id_assoc,
←tensor_comp, adjunction.counit_naturality, adjunction.counit_naturality, tensor_comp],
end,
associativity' := λ X Y Z,
begin
rw [←h.hom_equiv_naturality_right, ←h.hom_equiv_naturality_left, ←h.hom_equiv_naturality_left,
←h.hom_equiv_naturality_left, equiv.apply_eq_iff_eq,
← cancel_epi (F.to_lax_monoidal_functor.μ (G.obj X ⊗ G.obj Y) (G.obj Z)),
← cancel_epi (F.to_lax_monoidal_functor.μ (G.obj X) (G.obj Y) ⊗ 𝟙 (F.obj (G.obj Z))),
F.to_lax_monoidal_functor.associativity_assoc (G.obj X) (G.obj Y) (G.obj Z),
←F.to_lax_monoidal_functor.μ_natural_assoc, assoc, is_iso.hom_inv_id_assoc,
←F.to_lax_monoidal_functor.μ_natural_assoc, is_iso.hom_inv_id_assoc, ←tensor_comp,
←tensor_comp, id_comp, functor.map_id, functor.map_id, id_comp, ←tensor_comp_assoc,
←tensor_comp_assoc, id_comp, id_comp, h.hom_equiv_unit, h.hom_equiv_unit, functor.map_comp,
assoc, assoc, h.counit_naturality, h.left_triangle_components_assoc, is_iso.hom_inv_id_assoc,
functor.map_comp, assoc, h.counit_naturality, h.left_triangle_components_assoc,
is_iso.hom_inv_id_assoc],
exact associator_naturality (h.counit.app X) (h.counit.app Y) (h.counit.app Z),
end,
left_unitality' := λ X,
begin
rw [←h.hom_equiv_naturality_right, ←h.hom_equiv_naturality_left, ←equiv.symm_apply_eq,
h.hom_equiv_counit, F.map_left_unitor, h.hom_equiv_unit, assoc, assoc, assoc, F.map_tensor,
assoc, assoc, is_iso.hom_inv_id_assoc, ←tensor_comp_assoc, functor.map_id, id_comp,
functor.map_comp, assoc, h.counit_naturality, h.left_triangle_components_assoc,
←left_unitor_naturality, ←tensor_comp_assoc, id_comp, comp_id],
end,
right_unitality' := λ X,
begin
rw [←h.hom_equiv_naturality_right, ←h.hom_equiv_naturality_left, ←equiv.symm_apply_eq,
h.hom_equiv_counit, F.map_right_unitor, assoc, assoc, ←right_unitor_naturality,
←tensor_comp_assoc, comp_id, id_comp, h.hom_equiv_unit, F.map_tensor, assoc, assoc, assoc,
is_iso.hom_inv_id_assoc, functor.map_comp, functor.map_id, ←tensor_comp_assoc, assoc,
h.counit_naturality, h.left_triangle_components_assoc, id_comp],
end }.
/-- If a monoidal functor `F` is an equivalence of categories then its inverse is also monoidal. -/
def monoidal_inverse (F : monoidal_functor C D) [is_equivalence F.to_functor] :
monoidal_functor D C :=
{ to_lax_monoidal_functor := monoidal_adjoint F (as_equivalence _).to_adjunction,
ε_is_iso := by { dsimp [equivalence.to_adjunction], apply_instance },
μ_is_iso := λ X Y, by { dsimp [equivalence.to_adjunction], apply_instance } }
@[simp]
lemma monoidal_inverse_to_functor (F : monoidal_functor C D) [is_equivalence F.to_functor] :
(monoidal_inverse F).to_functor = F.to_functor.inv := rfl
end category_theory
|
ed1ca50097767b825d9988f48ff8f86e3bcb9456
|
957a80ea22c5abb4f4670b250d55534d9db99108
|
/tests/lean/run/simp_univ_metavars.lean
|
74aa45569eab99351809b87d86d70657b9a69bbd
|
[
"Apache-2.0"
] |
permissive
|
GaloisInc/lean
|
aa1e64d604051e602fcf4610061314b9a37ab8cd
|
f1ec117a24459b59c6ff9e56a1d09d9e9e60a6c0
|
refs/heads/master
| 1,592,202,909,807
| 1,504,624,387,000
| 1,504,624,387,000
| 75,319,626
| 2
| 1
|
Apache-2.0
| 1,539,290,164,000
| 1,480,616,104,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 2,608
|
lean
|
meta def blast : tactic unit := using_smt $ return ()
structure { u v } Category :=
(Obj : Type u )
(Hom : Obj -> Obj -> Type v)
(identity : Π X : Obj, Hom X X)
(compose : Π ⦃X Y Z : Obj⦄, Hom X Y → Hom Y Z → Hom X Z)
(left_identity : ∀ ⦃X Y : Obj⦄ (f : Hom X Y), compose (identity _) f = f)
structure Functor (C : Category) (D : Category) :=
(onObjects : C^.Obj → D^.Obj)
(onMorphisms : Π ⦃X Y : C^.Obj⦄,
C^.Hom X Y → D^.Hom (onObjects X) (onObjects Y))
structure NaturalTransformation { C D : Category } ( F G : Functor C D ) :=
(components: Π X : C^.Obj, D^.Hom (F^.onObjects X) (G^.onObjects X))
definition IdentityNaturalTransformation { C D : Category } (F : Functor C D) : NaturalTransformation F F :=
{
components := λ X, D^.identity (F^.onObjects X)
}
definition vertical_composition_of_NaturalTransformations
{ C D : Category }
{ F G H : Functor C D }
( α : NaturalTransformation F G )
( β : NaturalTransformation G H ) : NaturalTransformation F H :=
{
components := λ X, D^.compose (α^.components X) (β^.components X)
}
-- We'll want to be able to prove that two natural transformations are equal if they are componentwise equal.
lemma NaturalTransformations_componentwise_equal
{ C D : Category }
{ F G : Functor C D }
( α β : NaturalTransformation F G )
( w : ∀ X : C^.Obj, α^.components X = β^.components X ) : α = β :=
begin
induction α with αc,
induction β with βc,
have hc : αc = βc := funext w,
subst hc
end
@[simp]
lemma vertical_composition_of_NaturalTransformations_components
{ C D : Category }
{ F G H : Functor C D }
{ α : NaturalTransformation F G }
{ β : NaturalTransformation G H }
{ X : C^.Obj } :
(vertical_composition_of_NaturalTransformations α β)^.components X = D^.compose (α^.components X) (β^.components X) :=
by blast
@[simp]
lemma IdentityNaturalTransformation_components
{ C D : Category }
{ F : Functor C D }
{ X : C^.Obj } :
(IdentityNaturalTransformation F)^.components X = D^.identity (F^.onObjects X) :=
by blast
definition FunctorCategory ( C D : Category ) : Category :=
{
Obj := Functor C D,
Hom := λ F G, NaturalTransformation F G,
identity := λ F, IdentityNaturalTransformation F,
compose := @vertical_composition_of_NaturalTransformations C D,
left_identity := begin
intros F G f,
apply NaturalTransformations_componentwise_equal,
intros,
simp [ D^.left_identity ]
end
}
|
ae7e589c198b3a3740b90fa0301d011d5fcdc5b0
|
5d95c8513fa8592ce314d1f40c23ad5eecfe1e34
|
/src/util/meta/exceptional.lean
|
360f2d4a31d8634c248900851a5ed7cf7ce1098d
|
[
"Apache-2.0"
] |
permissive
|
solovay/lean-universal
|
6b792513ced2fe82218e7828400743375dd59e24
|
417ed5e1b030e547912cbfefe34df9d3d01c2b65
|
refs/heads/master
| 1,598,052,603,315
| 1,565,981,123,000
| 1,565,981,123,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 476
|
lean
|
-- Copyright © 2019 François G. Dorais. All rights reserved.
-- Released under Apache 2.0 license as described in the file LICENSE.
namespace exceptional
meta def orelse (α : Type) : exceptional α → exceptional α → exceptional α
| (success x) _ := success x
| (exception _ _) (success x) := success x
| (exception _ m₁) (exception _ m₂) := exception α (λ o, m₁ o ++ "; " ++ m₂ o)
meta instance : has_orelse exceptional := ⟨orelse⟩
end exceptional
|
9ecccfd4d8ccbcbe8c263f2685e6336a724d9d3b
|
f618aea02cb4104ad34ecf3b9713065cc0d06103
|
/src/data/equiv/basic.lean
|
b3d89e07f747bcc26979c9236b62dc8e59f77c1e
|
[
"Apache-2.0"
] |
permissive
|
joehendrix/mathlib
|
84b6603f6be88a7e4d62f5b1b0cbb523bb82b9a5
|
c15eab34ad754f9ecd738525cb8b5a870e834ddc
|
refs/heads/master
| 1,589,606,591,630
| 1,555,946,393,000
| 1,555,946,393,000
| 182,813,854
| 0
| 0
| null | 1,555,946,309,000
| 1,555,946,308,000
| null |
UTF-8
|
Lean
| false
| false
| 33,432
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Mario Carneiro
In the standard library we cannot assume the univalence axiom.
We say two types are equivalent if they are isomorphic.
Two equivalent types have the same cardinality.
-/
import logic.function logic.unique data.set.basic data.bool data.quot
open function
universes u v w
variables {α : Sort u} {β : Sort v} {γ : Sort w}
/-- `α ≃ β` is the type of functions from `α → β` with a two-sided inverse. -/
structure equiv (α : Sort*) (β : Sort*) :=
(to_fun : α → β)
(inv_fun : β → α)
(left_inv : left_inverse inv_fun to_fun)
(right_inv : right_inverse inv_fun to_fun)
namespace equiv
/-- `perm α` is the type of bijections from `α` to itself. -/
@[reducible] def perm (α : Sort*) := equiv α α
infix ` ≃ `:50 := equiv
instance : has_coe_to_fun (α ≃ β) :=
⟨_, to_fun⟩
@[simp] theorem coe_fn_mk (f : α → β) (g l r) : (equiv.mk f g l r : α → β) = f :=
rfl
theorem eq_of_to_fun_eq : ∀ {e₁ e₂ : equiv α β}, (e₁ : α → β) = e₂ → e₁ = e₂
| ⟨f₁, g₁, l₁, r₁⟩ ⟨f₂, g₂, l₂, r₂⟩ h :=
have f₁ = f₂, from h,
have g₁ = g₂, from funext $ assume x,
have f₁ (g₁ x) = f₂ (g₂ x), from (r₁ x).trans (r₂ x).symm,
have f₁ (g₁ x) = f₁ (g₂ x), by subst f₂; exact this,
show g₁ x = g₂ x, from injective_of_left_inverse l₁ this,
by simp *
@[extensionality] lemma ext (f g : equiv α β) (H : ∀ x, f x = g x) : f = g :=
eq_of_to_fun_eq (funext H)
@[extensionality] lemma perm.ext (σ τ : equiv.perm α) (H : ∀ x, σ x = τ x) : σ = τ :=
equiv.ext _ _ H
@[refl] protected def refl (α : Sort*) : α ≃ α := ⟨id, id, λ x, rfl, λ x, rfl⟩
@[symm] protected def symm (e : α ≃ β) : β ≃ α := ⟨e.inv_fun, e.to_fun, e.right_inv, e.left_inv⟩
@[trans] protected def trans (e₁ : α ≃ β) (e₂ : β ≃ γ) : α ≃ γ :=
⟨e₂.to_fun ∘ e₁.to_fun, e₁.inv_fun ∘ e₂.inv_fun,
e₂.left_inv.comp e₁.left_inv, e₂.right_inv.comp e₁.right_inv⟩
protected theorem injective : ∀ f : α ≃ β, injective f
| ⟨f, g, h₁, h₂⟩ := injective_of_left_inverse h₁
protected theorem surjective : ∀ f : α ≃ β, surjective f
| ⟨f, g, h₁, h₂⟩ := surjective_of_has_right_inverse ⟨_, h₂⟩
protected theorem bijective (f : α ≃ β) : bijective f :=
⟨f.injective, f.surjective⟩
protected theorem subsingleton (e : α ≃ β) : ∀ [subsingleton β], subsingleton α
| ⟨H⟩ := ⟨λ a b, e.injective (H _ _)⟩
protected def decidable_eq (e : α ≃ β) [H : decidable_eq β] : decidable_eq α
| a b := decidable_of_iff _ e.injective.eq_iff
protected def cast {α β : Sort*} (h : α = β) : α ≃ β :=
⟨cast h, cast h.symm, λ x, by cases h; refl, λ x, by cases h; refl⟩
@[simp] theorem coe_fn_symm_mk (f : α → β) (g l r) : ((equiv.mk f g l r).symm : β → α) = g :=
rfl
@[simp] theorem refl_apply (x : α) : equiv.refl α x = x := rfl
@[simp] theorem trans_apply (f : α ≃ β) (g : β ≃ γ) (a : α) : (f.trans g) a = g (f a) := rfl
@[simp] theorem apply_symm_apply : ∀ (e : α ≃ β) (x : β), e (e.symm x) = x
| ⟨f₁, g₁, l₁, r₁⟩ x := by simp [equiv.symm]; rw r₁
@[simp] theorem symm_apply_apply : ∀ (e : α ≃ β) (x : α), e.symm (e x) = x
| ⟨f₁, g₁, l₁, r₁⟩ x := by simp [equiv.symm]; rw l₁
@[simp] lemma symm_trans_apply (f : α ≃ β) (g : β ≃ γ) (a : γ) :
(f.trans g).symm a = f.symm (g.symm a) := rfl
@[simp] theorem apply_eq_iff_eq : ∀ (f : α ≃ β) (x y : α), f x = f y ↔ x = y
| ⟨f₁, g₁, l₁, r₁⟩ x y := (injective_of_left_inverse l₁).eq_iff
@[simp] theorem cast_apply {α β} (h : α = β) (x : α) : equiv.cast h x = cast h x := rfl
lemma symm_apply_eq {α β} (e : α ≃ β) {x y} : e.symm x = y ↔ x = e y :=
⟨λ H, by simp [H.symm], λ H, by simp [H]⟩
lemma eq_symm_apply {α β} (e : α ≃ β) {x y} : y = e.symm x ↔ e y = x :=
(eq_comm.trans e.symm_apply_eq).trans eq_comm
@[simp] theorem symm_symm (e : α ≃ β) : e.symm.symm = e := by cases e; refl
@[simp] theorem trans_refl (e : α ≃ β) : e.trans (equiv.refl β) = e := by cases e; refl
@[simp] theorem refl_trans (e : α ≃ β) : (equiv.refl α).trans e = e := by cases e; refl
@[simp] theorem symm_trans (e : α ≃ β) : e.symm.trans e = equiv.refl β := ext _ _ (by simp)
@[simp] theorem trans_symm (e : α ≃ β) : e.trans e.symm = equiv.refl α := ext _ _ (by simp)
lemma trans_assoc {δ} (ab : α ≃ β) (bc : β ≃ γ) (cd : γ ≃ δ) :
(ab.trans bc).trans cd = ab.trans (bc.trans cd) :=
equiv.ext _ _ $ assume a, rfl
theorem left_inverse_symm (f : equiv α β) : left_inverse f.symm f := f.left_inv
theorem right_inverse_symm (f : equiv α β) : function.right_inverse f.symm f := f.right_inv
def equiv_congr {δ} (ab : α ≃ β) (cd : γ ≃ δ) : (α ≃ γ) ≃ (β ≃ δ) :=
⟨ λac, (ab.symm.trans ac).trans cd, λbd, ab.trans $ bd.trans $ cd.symm,
assume ac, begin simp [trans_assoc], rw [← trans_assoc], simp end,
assume ac, begin simp [trans_assoc], rw [← trans_assoc], simp end, ⟩
def perm_congr {α : Type*} {β : Type*} (e : α ≃ β) : perm α ≃ perm β :=
equiv_congr e e
protected lemma image_eq_preimage {α β} (e : α ≃ β) (s : set α) : e '' s = e.symm ⁻¹' s :=
set.ext $ assume x, set.mem_image_iff_of_inverse e.left_inv e.right_inv
protected lemma subset_image {α β} (e : α ≃ β) (s : set α) (t : set β) : t ⊆ e '' s ↔ e.symm '' t ⊆ s :=
by rw [set.image_subset_iff, e.image_eq_preimage]
lemma symm_image_image {α β} (f : equiv α β) (s : set α) : f.symm '' (f '' s) = s :=
by { rw [← set.image_comp], simp }
protected lemma image_compl {α β} (f : equiv α β) (s : set α) :
f '' -s = -(f '' s) :=
set.image_compl_eq f.bijective
/- The group of permutations (self-equivalences) of a type `α` -/
namespace perm
instance perm_group {α : Type u} : group (perm α) :=
begin
refine { mul := λ f g, equiv.trans g f, one := equiv.refl α, inv:= equiv.symm, ..};
intros; apply equiv.ext; try { apply trans_apply },
apply symm_apply_apply
end
@[simp] theorem mul_apply {α : Type u} (f g : perm α) (x) : (f * g) x = f (g x) :=
equiv.trans_apply _ _ _
@[simp] theorem one_apply {α : Type u} (x) : (1 : perm α) x = x := rfl
@[simp] lemma inv_apply_self {α : Type u} (f : perm α) (x) :
f⁻¹ (f x) = x := equiv.symm_apply_apply _ _
@[simp] lemma apply_inv_self {α : Type u} (f : perm α) (x) :
f (f⁻¹ x) = x := equiv.apply_symm_apply _ _
lemma one_def {α : Type u} : (1 : perm α) = equiv.refl α := rfl
lemma mul_def {α : Type u} (f g : perm α) : f * g = g.trans f := rfl
lemma inv_def {α : Type u} (f : perm α) : f⁻¹ = f.symm := rfl
end perm
def equiv_empty (h : α → false) : α ≃ empty :=
⟨λ x, (h x).elim, λ e, e.rec _, λ x, (h x).elim, λ e, e.rec _⟩
def false_equiv_empty : false ≃ empty :=
equiv_empty _root_.id
def equiv_pempty (h : α → false) : α ≃ pempty :=
⟨λ x, (h x).elim, λ e, e.rec _, λ x, (h x).elim, λ e, e.rec _⟩
def false_equiv_pempty : false ≃ pempty :=
equiv_pempty _root_.id
def empty_equiv_pempty : empty ≃ pempty :=
equiv_pempty $ empty.rec _
def pempty_equiv_pempty : pempty.{v} ≃ pempty.{w} :=
equiv_pempty pempty.elim
def empty_of_not_nonempty {α : Sort*} (h : ¬ nonempty α) : α ≃ empty :=
equiv_empty $ assume a, h ⟨a⟩
def pempty_of_not_nonempty {α : Sort*} (h : ¬ nonempty α) : α ≃ pempty :=
equiv_pempty $ assume a, h ⟨a⟩
def prop_equiv_punit {p : Prop} (h : p) : p ≃ punit :=
⟨λ x, (), λ x, h, λ _, rfl, λ ⟨⟩, rfl⟩
def true_equiv_punit : true ≃ punit := prop_equiv_punit trivial
protected def ulift {α : Type u} : ulift α ≃ α :=
⟨ulift.down, ulift.up, ulift.up_down, λ a, rfl⟩
protected def plift : plift α ≃ α :=
⟨plift.down, plift.up, plift.up_down, plift.down_up⟩
@[congr] def arrow_congr {α₁ β₁ α₂ β₂ : Sort*} : α₁ ≃ α₂ → β₁ ≃ β₂ → (α₁ → β₁) ≃ (α₂ → β₂)
| ⟨f₁, g₁, l₁, r₁⟩ ⟨f₂, g₂, l₂, r₂⟩ :=
⟨λ (h : α₁ → β₁) (a : α₂), f₂ (h (g₁ a)),
λ (h : α₂ → β₂) (a : α₁), g₂ (h (f₁ a)),
λ h, by funext a; dsimp; rw [l₁, l₂],
λ h, by funext a; dsimp; rw [r₁, r₂]⟩
def punit_equiv_punit : punit.{v} ≃ punit.{w} :=
⟨λ _, punit.star, λ _, punit.star, λ u, by cases u; refl, λ u, by cases u; reflexivity⟩
section
@[simp] def arrow_punit_equiv_punit (α : Sort*) : (α → punit.{v}) ≃ punit.{w} :=
⟨λ f, punit.star, λ u f, punit.star, λ f, by funext x; cases f x; refl, λ u, by cases u; reflexivity⟩
@[simp] def punit_arrow_equiv (α : Sort*) : (punit.{u} → α) ≃ α :=
⟨λ f, f punit.star, λ a u, a, λ f, by funext x; cases x; refl, λ u, rfl⟩
@[simp] def empty_arrow_equiv_punit (α : Sort*) : (empty → α) ≃ punit.{u} :=
⟨λ f, punit.star, λ u e, e.rec _, λ f, funext $ λ x, x.rec _, λ u, by cases u; refl⟩
@[simp] def pempty_arrow_equiv_punit (α : Sort*) : (pempty → α) ≃ punit.{u} :=
⟨λ f, punit.star, λ u e, e.rec _, λ f, funext $ λ x, x.rec _, λ u, by cases u; refl⟩
@[simp] def false_arrow_equiv_punit (α : Sort*) : (false → α) ≃ punit.{u} :=
calc (false → α) ≃ (empty → α) : arrow_congr false_equiv_empty (equiv.refl _)
... ≃ punit : empty_arrow_equiv_punit _
end
@[congr] def prod_congr {α₁ β₁ α₂ β₂ : Sort*} (e₁ : α₁ ≃ α₂) (e₂ :β₁ ≃ β₂) : (α₁ × β₁) ≃ (α₂ × β₂) :=
⟨λp, (e₁ p.1, e₂ p.2), λp, (e₁.symm p.1, e₂.symm p.2),
λ ⟨a, b⟩, show (e₁.symm (e₁ a), e₂.symm (e₂ b)) = (a, b), by rw [symm_apply_apply, symm_apply_apply],
λ ⟨a, b⟩, show (e₁ (e₁.symm a), e₂ (e₂.symm b)) = (a, b), by rw [apply_symm_apply, apply_symm_apply]⟩
@[simp] theorem prod_congr_apply {α₁ β₁ α₂ β₂ : Sort*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) (a : α₁) (b : β₁) :
prod_congr e₁ e₂ (a, b) = (e₁ a, e₂ b) :=
rfl
@[simp] def prod_comm (α β : Sort*) : (α × β) ≃ (β × α) :=
⟨λ p, (p.2, p.1), λ p, (p.2, p.1), λ⟨a, b⟩, rfl, λ⟨a, b⟩, rfl⟩
@[simp] def prod_assoc (α β γ : Sort*) : ((α × β) × γ) ≃ (α × (β × γ)) :=
⟨λ p, ⟨p.1.1, ⟨p.1.2, p.2⟩⟩, λp, ⟨⟨p.1, p.2.1⟩, p.2.2⟩, λ ⟨⟨a, b⟩, c⟩, rfl, λ ⟨a, ⟨b, c⟩⟩, rfl⟩
@[simp] theorem prod_assoc_apply {α β γ : Sort*} (p : (α × β) × γ) :
prod_assoc α β γ p = ⟨p.1.1, ⟨p.1.2, p.2⟩⟩ := rfl
section
@[simp] def prod_punit (α : Sort*) : (α × punit.{u+1}) ≃ α :=
⟨λ p, p.1, λ a, (a, punit.star), λ ⟨_, punit.star⟩, rfl, λ a, rfl⟩
@[simp] theorem prod_punit_apply {α : Sort*} (a : α × punit.{u+1}) : prod_punit α a = a.1 := rfl
@[simp] def punit_prod (α : Sort*) : (punit.{u+1} × α) ≃ α :=
calc (punit × α) ≃ (α × punit) : prod_comm _ _
... ≃ α : prod_punit _
@[simp] theorem punit_prod_apply {α : Sort*} (a : punit.{u+1} × α) : punit_prod α a = a.2 := rfl
@[simp] def prod_empty (α : Sort*) : (α × empty) ≃ empty :=
equiv_empty (λ ⟨_, e⟩, e.rec _)
@[simp] def empty_prod (α : Sort*) : (empty × α) ≃ empty :=
equiv_empty (λ ⟨e, _⟩, e.rec _)
@[simp] def prod_pempty (α : Sort*) : (α × pempty) ≃ pempty :=
equiv_pempty (λ ⟨_, e⟩, e.rec _)
@[simp] def pempty_prod (α : Sort*) : (pempty × α) ≃ pempty :=
equiv_pempty (λ ⟨e, _⟩, e.rec _)
end
section
open sum
def psum_equiv_sum (α β : Sort*) : psum α β ≃ (α ⊕ β) :=
⟨λ s, psum.cases_on s inl inr,
λ s, sum.cases_on s psum.inl psum.inr,
λ s, by cases s; refl,
λ s, by cases s; refl⟩
def sum_congr {α₁ β₁ α₂ β₂ : Sort*} : α₁ ≃ α₂ → β₁ ≃ β₂ → (α₁ ⊕ β₁) ≃ (α₂ ⊕ β₂)
| ⟨f₁, g₁, l₁, r₁⟩ ⟨f₂, g₂, l₂, r₂⟩ :=
⟨λ s, match s with inl a₁ := inl (f₁ a₁) | inr b₁ := inr (f₂ b₁) end,
λ s, match s with inl a₂ := inl (g₁ a₂) | inr b₂ := inr (g₂ b₂) end,
λ s, match s with inl a := congr_arg inl (l₁ a) | inr a := congr_arg inr (l₂ a) end,
λ s, match s with inl a := congr_arg inl (r₁ a) | inr a := congr_arg inr (r₂ a) end⟩
@[simp] theorem sum_congr_apply_inl {α₁ β₁ α₂ β₂ : Sort*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) (a : α₁) :
sum_congr e₁ e₂ (inl a) = inl (e₁ a) :=
by cases e₁; cases e₂; refl
@[simp] theorem sum_congr_apply_inr {α₁ β₁ α₂ β₂ : Sort*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) (b : β₁) :
sum_congr e₁ e₂ (inr b) = inr (e₂ b) :=
by cases e₁; cases e₂; refl
def bool_equiv_punit_sum_punit : bool ≃ (punit.{u+1} ⊕ punit.{v+1}) :=
⟨λ b, cond b (inr punit.star) (inl punit.star),
λ s, sum.rec_on s (λ_, ff) (λ_, tt),
λ b, by cases b; refl,
λ s, by rcases s with ⟨⟨⟩⟩ | ⟨⟨⟩⟩; refl⟩
noncomputable def Prop_equiv_bool : Prop ≃ bool :=
⟨λ p, @to_bool p (classical.prop_decidable _),
λ b, b, λ p, by simp, λ b, by simp⟩
@[simp] def sum_comm (α β : Sort*) : (α ⊕ β) ≃ (β ⊕ α) :=
⟨λ s, match s with inl a := inr a | inr b := inl b end,
λ s, match s with inl b := inr b | inr a := inl a end,
λ s, by cases s; refl,
λ s, by cases s; refl⟩
@[simp] def sum_assoc (α β γ : Sort*) : ((α ⊕ β) ⊕ γ) ≃ (α ⊕ (β ⊕ γ)) :=
⟨λ s, match s with inl (inl a) := inl a | inl (inr b) := inr (inl b) | inr c := inr (inr c) end,
λ s, match s with inl a := inl (inl a) | inr (inl b) := inl (inr b) | inr (inr c) := inr c end,
λ s, by rcases s with ⟨_ | _⟩ | _; refl,
λ s, by rcases s with _ | _ | _; refl⟩
@[simp] theorem sum_assoc_apply_in1 {α β γ} (a) : sum_assoc α β γ (inl (inl a)) = inl a := rfl
@[simp] theorem sum_assoc_apply_in2 {α β γ} (b) : sum_assoc α β γ (inl (inr b)) = inr (inl b) := rfl
@[simp] theorem sum_assoc_apply_in3 {α β γ} (c) : sum_assoc α β γ (inr c) = inr (inr c) := rfl
@[simp] def sum_empty (α : Sort*) : (α ⊕ empty) ≃ α :=
⟨λ s, match s with inl a := a | inr e := empty.rec _ e end,
inl,
λ s, by rcases s with _ | ⟨⟨⟩⟩; refl,
λ a, rfl⟩
@[simp] def empty_sum (α : Sort*) : (empty ⊕ α) ≃ α :=
(sum_comm _ _).trans $ sum_empty _
@[simp] def sum_pempty (α : Sort*) : (α ⊕ pempty) ≃ α :=
⟨λ s, match s with inl a := a | inr e := pempty.rec _ e end,
inl,
λ s, by rcases s with _ | ⟨⟨⟩⟩; refl,
λ a, rfl⟩
@[simp] def pempty_sum (α : Sort*) : (pempty ⊕ α) ≃ α :=
(sum_comm _ _).trans $ sum_pempty _
@[simp] def option_equiv_sum_punit (α : Sort*) : option α ≃ (α ⊕ punit.{u+1}) :=
⟨λ o, match o with none := inr punit.star | some a := inl a end,
λ s, match s with inr _ := none | inl a := some a end,
λ o, by cases o; refl,
λ s, by rcases s with _ | ⟨⟨⟩⟩; refl⟩
def sum_equiv_sigma_bool (α β : Sort*) : (α ⊕ β) ≃ (Σ b: bool, cond b α β) :=
⟨λ s, match s with inl a := ⟨tt, a⟩ | inr b := ⟨ff, b⟩ end,
λ s, match s with ⟨tt, a⟩ := inl a | ⟨ff, b⟩ := inr b end,
λ s, by cases s; refl,
λ s, by rcases s with ⟨_|_, _⟩; refl⟩
def equiv_fib {α β : Type*} (f : α → β) :
α ≃ Σ y : β, {x // f x = y} :=
⟨λ x, ⟨f x, x, rfl⟩, λ x, x.2.1, λ x, rfl, λ ⟨y, x, rfl⟩, rfl⟩
end
section
def Pi_congr_right {α} {β₁ β₂ : α → Sort*} (F : ∀ a, β₁ a ≃ β₂ a) : (Π a, β₁ a) ≃ (Π a, β₂ a) :=
⟨λ H a, F a (H a), λ H a, (F a).symm (H a),
λ H, funext $ by simp, λ H, funext $ by simp⟩
end
section
def psigma_equiv_sigma {α} (β : α → Sort*) : psigma β ≃ sigma β :=
⟨λ ⟨a, b⟩, ⟨a, b⟩, λ ⟨a, b⟩, ⟨a, b⟩, λ ⟨a, b⟩, rfl, λ ⟨a, b⟩, rfl⟩
def sigma_congr_right {α} {β₁ β₂ : α → Sort*} (F : ∀ a, β₁ a ≃ β₂ a) : sigma β₁ ≃ sigma β₂ :=
⟨λ ⟨a, b⟩, ⟨a, F a b⟩, λ ⟨a, b⟩, ⟨a, (F a).symm b⟩,
λ ⟨a, b⟩, congr_arg (sigma.mk a) $ symm_apply_apply (F a) b,
λ ⟨a, b⟩, congr_arg (sigma.mk a) $ apply_symm_apply (F a) b⟩
def sigma_congr_left {α₁ α₂} {β : α₂ → Sort*} : ∀ f : α₁ ≃ α₂, (Σ a:α₁, β (f a)) ≃ (Σ a:α₂, β a)
| ⟨f, g, l, r⟩ :=
⟨λ ⟨a, b⟩, ⟨f a, b⟩, λ ⟨a, b⟩, ⟨g a, @@eq.rec β b (r a).symm⟩,
λ ⟨a, b⟩, match g (f a), l a : ∀ a' (h : a' = a),
@sigma.mk _ (β ∘ f) _ (@@eq.rec β b (congr_arg f h.symm)) = ⟨a, b⟩ with
| _, rfl := rfl end,
λ ⟨a, b⟩, match f (g a), _ : ∀ a' (h : a' = a), sigma.mk a' (@@eq.rec β b h.symm) = ⟨a, b⟩ with
| _, rfl := rfl end⟩
def sigma_equiv_prod (α β : Sort*) : (Σ_:α, β) ≃ (α × β) :=
⟨λ ⟨a, b⟩, ⟨a, b⟩, λ ⟨a, b⟩, ⟨a, b⟩, λ ⟨a, b⟩, rfl, λ ⟨a, b⟩, rfl⟩
def sigma_equiv_prod_of_equiv {α β} {β₁ : α → Sort*} (F : ∀ a, β₁ a ≃ β) : sigma β₁ ≃ (α × β) :=
(sigma_congr_right F).trans (sigma_equiv_prod α β)
end
section
def arrow_prod_equiv_prod_arrow (α β γ : Type*) : (γ → α × β) ≃ ((γ → α) × (γ → β)) :=
⟨λ f, (λ c, (f c).1, λ c, (f c).2),
λ p c, (p.1 c, p.2 c),
λ f, funext $ λ c, prod.mk.eta,
λ p, by cases p; refl⟩
def arrow_arrow_equiv_prod_arrow (α β γ : Sort*) : (α → β → γ) ≃ (α × β → γ) :=
⟨λ f, λ p, f p.1 p.2,
λ f, λ a b, f (a, b),
λ f, rfl,
λ f, by funext p; cases p; refl⟩
open sum
def sum_arrow_equiv_prod_arrow (α β γ : Type*) : ((α ⊕ β) → γ) ≃ ((α → γ) × (β → γ)) :=
⟨λ f, (f ∘ inl, f ∘ inr),
λ p s, sum.rec_on s p.1 p.2,
λ f, by funext s; cases s; refl,
λ p, by cases p; refl⟩
def sum_prod_distrib (α β γ : Sort*) : ((α ⊕ β) × γ) ≃ ((α × γ) ⊕ (β × γ)) :=
⟨λ p, match p with (inl a, c) := inl (a, c) | (inr b, c) := inr (b, c) end,
λ s, match s with inl (a, c) := (inl a, c) | inr (b, c) := (inr b, c) end,
λ p, by rcases p with ⟨_ | _, _⟩; refl,
λ s, by rcases s with ⟨_, _⟩ | ⟨_, _⟩; refl⟩
@[simp] theorem sum_prod_distrib_apply_left {α β γ} (a : α) (c : γ) :
sum_prod_distrib α β γ (sum.inl a, c) = sum.inl (a, c) := rfl
@[simp] theorem sum_prod_distrib_apply_right {α β γ} (b : β) (c : γ) :
sum_prod_distrib α β γ (sum.inr b, c) = sum.inr (b, c) := rfl
def prod_sum_distrib (α β γ : Sort*) : (α × (β ⊕ γ)) ≃ ((α × β) ⊕ (α × γ)) :=
calc (α × (β ⊕ γ)) ≃ ((β ⊕ γ) × α) : prod_comm _ _
... ≃ ((β × α) ⊕ (γ × α)) : sum_prod_distrib _ _ _
... ≃ ((α × β) ⊕ (α × γ)) : sum_congr (prod_comm _ _) (prod_comm _ _)
@[simp] theorem prod_sum_distrib_apply_left {α β γ} (a : α) (b : β) :
prod_sum_distrib α β γ (a, sum.inl b) = sum.inl (a, b) := rfl
@[simp] theorem prod_sum_distrib_apply_right {α β γ} (a : α) (c : γ) :
prod_sum_distrib α β γ (a, sum.inr c) = sum.inr (a, c) := rfl
def bool_prod_equiv_sum (α : Type u) : (bool × α) ≃ (α ⊕ α) :=
calc (bool × α) ≃ ((unit ⊕ unit) × α) : prod_congr bool_equiv_punit_sum_punit (equiv.refl _)
... ≃ (α × (unit ⊕ unit)) : prod_comm _ _
... ≃ ((α × unit) ⊕ (α × unit)) : prod_sum_distrib _ _ _
... ≃ (α ⊕ α) : sum_congr (prod_punit _) (prod_punit _)
end
section
open sum nat
def nat_equiv_nat_sum_punit : ℕ ≃ (ℕ ⊕ punit.{u+1}) :=
⟨λ n, match n with zero := inr punit.star | succ a := inl a end,
λ s, match s with inl n := succ n | inr punit.star := zero end,
λ n, begin cases n, repeat { refl } end,
λ s, begin cases s with a u, { refl }, {cases u, { refl }} end⟩
@[simp] def nat_sum_punit_equiv_nat : (ℕ ⊕ punit.{u+1}) ≃ ℕ :=
nat_equiv_nat_sum_punit.symm
def int_equiv_nat_sum_nat : ℤ ≃ (ℕ ⊕ ℕ) :=
by refine ⟨_, _, _, _⟩; intro z; {cases z; [left, right]; assumption} <|> {cases z; refl}
end
def list_equiv_of_equiv {α β : Type*} : α ≃ β → list α ≃ list β
| ⟨f, g, l, r⟩ :=
by refine ⟨list.map f, list.map g, λ x, _, λ x, _⟩;
simp [id_of_left_inverse l, id_of_right_inverse r]
def fin_equiv_subtype (n : ℕ) : fin n ≃ {m // m < n} :=
⟨λ x, ⟨x.1, x.2⟩, λ x, ⟨x.1, x.2⟩, λ ⟨a, b⟩, rfl,λ ⟨a, b⟩, rfl⟩
def decidable_eq_of_equiv [decidable_eq β] (e : α ≃ β) : decidable_eq α
| a₁ a₂ := decidable_of_iff (e a₁ = e a₂) e.injective.eq_iff
def inhabited_of_equiv [inhabited β] (e : α ≃ β) : inhabited α :=
⟨e.symm (default _)⟩
def unique_of_equiv (e : α ≃ β) (h : unique β) : unique α :=
unique.of_surjective e.symm.surjective
def unique_congr (e : α ≃ β) : unique α ≃ unique β :=
{ to_fun := e.symm.unique_of_equiv,
inv_fun := e.unique_of_equiv,
left_inv := λ _, subsingleton.elim _ _,
right_inv := λ _, subsingleton.elim _ _ }
section
open subtype
def subtype_congr {p : α → Prop} {q : β → Prop}
(e : α ≃ β) (h : ∀ a, p a ↔ q (e a)) : {a : α // p a} ≃ {b : β // q b} :=
⟨λ x, ⟨e x.1, (h _).1 x.2⟩,
λ y, ⟨e.symm y.1, (h _).2 (by simp; exact y.2)⟩,
λ ⟨x, h⟩, subtype.eq' $ by simp,
λ ⟨y, h⟩, subtype.eq' $ by simp⟩
def subtype_congr_right {p q : α → Prop} (e : ∀x, p x ↔ q x) : subtype p ≃ subtype q :=
subtype_congr (equiv.refl _) e
@[simp] lemma subtype_congr_right_mk {p q : α → Prop} (e : ∀x, p x ↔ q x)
{x : α} (h : p x) : subtype_congr_right e ⟨x, h⟩ = ⟨x, (e x).1 h⟩ := rfl
def subtype_equiv_of_subtype' {p : α → Prop} (e : α ≃ β) :
{a : α // p a} ≃ {b : β // p (e.symm b)} :=
subtype_congr e $ by simp
def subtype_congr_prop {α : Type*} {p q : α → Prop} (h : p = q) : subtype p ≃ subtype q :=
subtype_congr (equiv.refl α) (assume a, h ▸ iff.refl _)
def set_congr {α : Type*} {s t : set α} (h : s = t) : s ≃ t :=
subtype_congr_prop h
def subtype_subtype_equiv_subtype {α : Type u} (p : α → Prop) (q : subtype p → Prop) :
subtype q ≃ {a : α // ∃h:p a, q ⟨a, h⟩ } :=
⟨λ⟨⟨a, ha⟩, ha'⟩, ⟨a, ha, ha'⟩,
λ⟨a, ha⟩, ⟨⟨a, ha.cases_on $ assume h _, h⟩, by cases ha; exact ha_h⟩,
assume ⟨⟨a, ha⟩, h⟩, rfl, assume ⟨a, h₁, h₂⟩, rfl⟩
/-- aka coimage -/
def equiv_sigma_subtype {α : Type u} {β : Type v} (f : α → β) : α ≃ Σ b, {x : α // f x = b} :=
⟨λ x, ⟨f x, x, rfl⟩, λ x, x.2.1, λ x, rfl, λ ⟨b, x, H⟩, sigma.eq H $ eq.drec_on H $ subtype.eq rfl⟩
def pi_equiv_subtype_sigma (ι : Type*) (π : ι → Type*) :
(Πi, π i) ≃ {f : ι → Σi, π i | ∀i, (f i).1 = i } :=
⟨ λf, ⟨λi, ⟨i, f i⟩, assume i, rfl⟩, λf i, begin rw ← f.2 i, exact (f.1 i).2 end,
assume f, funext $ assume i, rfl,
assume ⟨f, hf⟩, subtype.eq $ funext $ assume i, sigma.eq (hf i).symm $
eq_of_heq $ rec_heq_of_heq _ $ rec_heq_of_heq _ $ heq.refl _⟩
def subtype_pi_equiv_pi {α : Sort u} {β : α → Sort v} {p : Πa, β a → Prop} :
{f : Πa, β a // ∀a, p a (f a) } ≃ Πa, { b : β a // p a b } :=
⟨λf a, ⟨f.1 a, f.2 a⟩, λf, ⟨λa, (f a).1, λa, (f a).2⟩,
by rintro ⟨f, h⟩; refl,
by rintro f; funext a; exact subtype.eq' rfl⟩
end
section
local attribute [elab_with_expected_type] quot.lift
def quot_equiv_of_quot' {r : α → α → Prop} {s : β → β → Prop} (e : α ≃ β)
(h : ∀ a a', r a a' ↔ s (e a) (e a')) : quot r ≃ quot s :=
⟨quot.lift (λ a, quot.mk _ (e a)) (λ a a' H, quot.sound ((h a a').mp H)),
quot.lift (λ b, quot.mk _ (e.symm b)) (λ b b' H, quot.sound ((h _ _).mpr (by convert H; simp))),
quot.ind $ by simp,
quot.ind $ by simp⟩
def quot_equiv_of_quot {r : α → α → Prop} (e : α ≃ β) :
quot r ≃ quot (λ b b', r (e.symm b) (e.symm b')) :=
quot_equiv_of_quot' e (by simp)
end
namespace set
open set
protected def univ (α) : @univ α ≃ α :=
⟨subtype.val, λ a, ⟨a, trivial⟩, λ ⟨a, _⟩, rfl, λ a, rfl⟩
protected def empty (α) : (∅ : set α) ≃ empty :=
equiv_empty $ λ ⟨x, h⟩, not_mem_empty x h
protected def pempty (α) : (∅ : set α) ≃ pempty :=
equiv_pempty $ λ ⟨x, h⟩, not_mem_empty x h
protected def union' {α} {s t : set α}
(p : α → Prop) [decidable_pred p]
(hs : ∀ x ∈ s, p x)
(ht : ∀ x ∈ t, ¬ p x) : (s ∪ t : set α) ≃ (s ⊕ t) :=
⟨λ ⟨x, h⟩, if hp : p x
then sum.inl ⟨_, h.resolve_right (λ xt, ht _ xt hp)⟩
else sum.inr ⟨_, h.resolve_left (λ xs, hp (hs _ xs))⟩,
λ o, match o with
| (sum.inl ⟨x, h⟩) := ⟨x, or.inl h⟩
| (sum.inr ⟨x, h⟩) := ⟨x, or.inr h⟩
end,
λ ⟨x, h'⟩, by by_cases p x; simp [union'._match_1, union'._match_2, h]; congr,
λ o, by rcases o with ⟨x, h⟩ | ⟨x, h⟩; simp [union'._match_1, union'._match_2, h];
[simp [hs _ h], simp [ht _ h]]⟩
protected def union {α} {s t : set α} [decidable_pred s] (H : s ∩ t = ∅) :
(s ∪ t : set α) ≃ (s ⊕ t) :=
set.union' s (λ _, id) (λ x xt xs, subset_empty_iff.2 H ⟨xs, xt⟩)
protected def singleton {α} (a : α) : ({a} : set α) ≃ punit.{u} :=
⟨λ _, punit.star, λ _, ⟨a, mem_singleton _⟩,
λ ⟨x, h⟩, by simp at h; subst x,
λ ⟨⟩, rfl⟩
protected def insert {α} {s : set.{u} α} [decidable_pred s] {a : α} (H : a ∉ s) :
(insert a s : set α) ≃ (s ⊕ punit.{u+1}) :=
by rw ← union_singleton; exact
(set.union $ inter_singleton_eq_empty.2 H).trans
(sum_congr (equiv.refl _) (set.singleton _))
protected def sum_compl {α} (s : set α) [decidable_pred s] :
(s ⊕ (-s : set α)) ≃ α :=
(set.union (inter_compl_self _)).symm.trans
(by rw union_compl_self; exact set.univ _)
protected def union_sum_inter {α : Type u} (s t : set α) [decidable_pred s] :
((s ∪ t : set α) ⊕ (s ∩ t : set α)) ≃ (s ⊕ t) :=
calc ((s ∪ t : set α) ⊕ (s ∩ t : set α))
≃ ((s ∪ t \ s : set α) ⊕ (s ∩ t : set α)) : by rw [union_diff_self]
... ≃ ((s ⊕ (t \ s : set α)) ⊕ (s ∩ t : set α)) :
sum_congr (set.union (inter_diff_self _ _)) (equiv.refl _)
... ≃ (s ⊕ (t \ s : set α) ⊕ (s ∩ t : set α)) : sum_assoc _ _ _
... ≃ (s ⊕ (t \ s ∪ s ∩ t : set α)) : sum_congr (equiv.refl _) begin
refine (set.union' (∉ s) _ _).symm,
exacts [λ x hx, hx.2, λ x hx, not_not_intro hx.1]
end
... ≃ (s ⊕ t) : by rw (_ : t \ s ∪ s ∩ t = t);
rw [union_comm, inter_comm, inter_union_diff]
protected def prod {α β} (s : set α) (t : set β) :
(s.prod t) ≃ (s × t) :=
⟨λp, ⟨⟨p.1.1, p.2.1⟩, ⟨p.1.2, p.2.2⟩⟩,
λp, ⟨⟨p.1.1, p.2.1⟩, ⟨p.1.2, p.2.2⟩⟩,
λ ⟨⟨x, y⟩, ⟨h₁, h₂⟩⟩, rfl,
λ ⟨⟨x, h₁⟩, ⟨y, h₂⟩⟩, rfl⟩
protected noncomputable def image {α β} (f : α → β) (s : set α) (H : injective f) :
s ≃ (f '' s) :=
⟨λ ⟨x, h⟩, ⟨f x, mem_image_of_mem _ h⟩,
λ ⟨y, h⟩, ⟨classical.some h, (classical.some_spec h).1⟩,
λ ⟨x, h⟩, subtype.eq (H (classical.some_spec (mem_image_of_mem f h)).2),
λ ⟨y, h⟩, subtype.eq (classical.some_spec h).2⟩
@[simp] theorem image_apply {α β} (f : α → β) (s : set α) (H : injective f) (a h) :
set.image f s H ⟨a, h⟩ = ⟨f a, mem_image_of_mem _ h⟩ := rfl
protected noncomputable def range {α β} (f : α → β) (H : injective f) :
α ≃ range f :=
(set.univ _).symm.trans $ (set.image f univ H).trans (equiv.cast $ by rw image_univ)
@[simp] theorem range_apply {α β} (f : α → β) (H : injective f) (a) :
set.range f H a = ⟨f a, set.mem_range_self _⟩ :=
by dunfold equiv.set.range equiv.set.univ;
simp [set_coe_cast, -image_univ, image_univ.symm]
protected def congr {α β : Type*} (e : α ≃ β) : set α ≃ set β :=
⟨λ s, e '' s, λ t, e.symm '' t, symm_image_image e, symm_image_image e.symm⟩
end set
noncomputable def of_bijective {α β} {f : α → β} (hf : bijective f) : α ≃ β :=
⟨f, λ x, classical.some (hf.2 x), λ x, hf.1 (classical.some_spec (hf.2 (f x))),
λ x, classical.some_spec (hf.2 x)⟩
@[simp] theorem of_bijective_to_fun {α β} {f : α → β} (hf : bijective f) : (of_bijective hf : α → β) = f := rfl
lemma subtype_quotient_equiv_quotient_subtype (p₁ : α → Prop) [s₁ : setoid α]
[s₂ : setoid (subtype p₁)] (p₂ : quotient s₁ → Prop) (hp₂ : ∀ a, p₁ a ↔ p₂ ⟦a⟧)
(h : ∀ x y : subtype p₁, @setoid.r _ s₂ x y ↔ (x : α) ≈ y) :
{x // p₂ x} ≃ quotient s₂ :=
{ to_fun := λ a, quotient.hrec_on a.1 (λ a h, ⟦⟨a, (hp₂ _).2 h⟩⟧)
(λ a b hab, hfunext (by rw quotient.sound hab)
(λ h₁ h₂ _, heq_of_eq (quotient.sound ((h _ _).2 hab)))) a.2,
inv_fun := λ a, quotient.lift_on a (λ a, (⟨⟦a.1⟧, (hp₂ _).1 a.2⟩ : {x // p₂ x}))
(λ a b hab, subtype.eq' (quotient.sound ((h _ _).1 hab))),
left_inv := λ ⟨a, ha⟩, quotient.induction_on a (λ a ha, rfl) ha,
right_inv := λ a, quotient.induction_on a (λ ⟨a, ha⟩, rfl) }
section swap
variable [decidable_eq α]
open decidable
def swap_core (a b r : α) : α :=
if r = a then b
else if r = b then a
else r
theorem swap_core_self (r a : α) : swap_core a a r = r :=
by unfold swap_core; split_ifs; cc
theorem swap_core_swap_core (r a b : α) : swap_core a b (swap_core a b r) = r :=
by unfold swap_core; split_ifs; cc
theorem swap_core_comm (r a b : α) : swap_core a b r = swap_core b a r :=
by unfold swap_core; split_ifs; cc
/-- `swap a b` is the permutation that swaps `a` and `b` and
leaves other values as is. -/
def swap (a b : α) : perm α :=
⟨swap_core a b, swap_core a b, λr, swap_core_swap_core r a b, λr, swap_core_swap_core r a b⟩
theorem swap_self (a : α) : swap a a = equiv.refl _ :=
eq_of_to_fun_eq $ funext $ λ r, swap_core_self r a
theorem swap_comm (a b : α) : swap a b = swap b a :=
eq_of_to_fun_eq $ funext $ λ r, swap_core_comm r _ _
theorem swap_apply_def (a b x : α) : swap a b x = if x = a then b else if x = b then a else x :=
rfl
@[simp] theorem swap_apply_left (a b : α) : swap a b a = b :=
if_pos rfl
@[simp] theorem swap_apply_right (a b : α) : swap a b b = a :=
by by_cases b = a; simp [swap_apply_def, *]
theorem swap_apply_of_ne_of_ne {a b x : α} : x ≠ a → x ≠ b → swap a b x = x :=
by simp [swap_apply_def] {contextual := tt}
@[simp] theorem swap_swap (a b : α) : (swap a b).trans (swap a b) = equiv.refl _ :=
eq_of_to_fun_eq $ funext $ λ x, swap_core_swap_core _ _ _
theorem swap_comp_apply {a b x : α} (π : perm α) :
π.trans (swap a b) x = if π x = a then b else if π x = b then a else π x :=
by cases π; refl
@[simp] lemma swap_inv {α : Type*} [decidable_eq α] (x y : α) :
(swap x y)⁻¹ = swap x y := rfl
@[simp] lemma symm_trans_swap_trans [decidable_eq α] [decidable_eq β] (a b : α)
(e : α ≃ β) : (e.symm.trans (swap a b)).trans e = swap (e a) (e b) :=
equiv.ext _ _ (λ x, begin
have : ∀ a, e.symm x = a ↔ x = e a :=
λ a, by rw @eq_comm _ (e.symm x); split; intros; simp * at *,
simp [swap_apply_def, this],
split_ifs; simp
end)
@[simp] lemma swap_mul_self {α : Type*} [decidable_eq α] (i j : α) : swap i j * swap i j = 1 :=
equiv.swap_swap i j
@[simp] lemma swap_apply_self {α : Type*} [decidable_eq α] (i j a : α) : swap i j (swap i j a) = a :=
by rw [← perm.mul_apply, swap_mul_self, perm.one_apply]
/-- Augment an equivalence with a prescribed mapping `f a = b` -/
def set_value (f : α ≃ β) (a : α) (b : β) : α ≃ β :=
(swap a (f.symm b)).trans f
@[simp] theorem set_value_eq (f : α ≃ β) (a : α) (b : β) : set_value f a b a = b :=
by dsimp [set_value]; simp [swap_apply_left]
end swap
end equiv
instance {α} [subsingleton α] : subsingleton (ulift α) := equiv.ulift.subsingleton
instance {α} [subsingleton α] : subsingleton (plift α) := equiv.plift.subsingleton
instance {α} [decidable_eq α] : decidable_eq (ulift α) := equiv.ulift.decidable_eq
instance {α} [decidable_eq α] : decidable_eq (plift α) := equiv.plift.decidable_eq
def unique_unique_equiv : unique (unique α) ≃ unique α :=
{ to_fun := λ h, h.default,
inv_fun := λ h, { default := h, uniq := λ _, subsingleton.elim _ _ },
left_inv := λ _, subsingleton.elim _ _,
right_inv := λ _, subsingleton.elim _ _ }
def equiv_of_unique_of_unique [unique α] [unique β] : α ≃ β :=
{ to_fun := λ _, default β,
inv_fun := λ _, default α,
left_inv := λ _, subsingleton.elim _ _,
right_inv := λ _, subsingleton.elim _ _ }
def equiv_punit_of_unique [unique α] : α ≃ punit.{v} :=
equiv_of_unique_of_unique
namespace quot
/-- Quotients are congruent on equivalences under equality of their relation.
An alternative is just to use rewriting with `eq`, but then computational proofs get stuck. -/
protected def congr {α} {r r' : α → α → Prop} (eq : ∀a b, r a b ↔ r' a b) : quot r ≃ quot r' :=
⟨quot.map r r' (assume a b, (eq a b).1), quot.map r' r (assume a b, (eq a b).2),
by rintros ⟨a⟩; refl, by rintros ⟨a⟩; refl⟩
end quot
namespace quotient
protected def congr {α} {r r' : setoid α} (eq : ∀a b, @setoid.r α r a b ↔ @setoid.r α r' a b) :
quotient r ≃ quotient r' :=
quot.congr eq
end quotient
|
144a00ba66aff09ad78fdcc6953f1559e31038d2
|
33340b3a23ca62ef3c8a7f6a2d4e14c07c6d3354
|
/lia/qfree_sqe.lean
|
060c34ca881679f5ed9e7356461b6dfad9550fa8
|
[] |
no_license
|
lclem/cooper
|
79554e72ced343c64fed24b2d892d24bf9447dfe
|
812afc6b158821f2e7dac9c91d3b6123c7a19faf
|
refs/heads/master
| 1,607,554,257,488
| 1,578,694,133,000
| 1,578,694,133,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,816
|
lean
|
import .sqe .qfree
open list
lemma nqfree_map_unify {z} :
∀ {p : formula}, nqfree p → nqfree (p.map (atom.unify z))
| ⊤' h := by unfold formula.map
| ⊥' h := by unfold formula.map
| (A' _) h := by unfold formula.map
| (p ∧' q) h :=
begin
unfold formula.map, cases h, constructor;
apply nqfree_map_unify; assumption
end
| (p ∨' q) h :=
begin
unfold formula.map, cases h, constructor;
apply nqfree_map_unify; assumption
end
| (¬' _) h := by cases h
| (∃' _) h := by unfold formula.map
lemma nqfree_unify :
∀ {p}, nqfree p → nqfree p.unify :=
begin
intros p h, unfold formula.unify,
apply and.intro, trivial,
apply nqfree_map_unify h
end
lemma qfree_subst_of_qfree (i ks) :
∀ p, qfree p → qfree (subst i ks p)
| ⊤' h := by unfold subst
| ⊥' h := by unfold subst
| (A' a) h := by unfold subst
| (p ∧' q) h :=
begin
cases h with hp hq, apply and.intro,
apply qfree_subst_of_qfree p hp,
apply qfree_subst_of_qfree q hq
end
| (p ∨' q) h :=
begin
cases h with hp hq, apply and.intro,
apply qfree_subst_of_qfree p hp,
apply qfree_subst_of_qfree q hq
end
| (¬' p) h := qfree_subst_of_qfree p h
| (∃' p) h := by unfold subst
lemma qfree_inf_minus_of_qfree :
∀ p, qfree p → qfree (inf_minus p)
| ⊤' h := by trivial
| ⊥' h := by trivial
| (A' a) h :=
begin
cases a with k ks; try {trivial},
cases ks with k' ks, trivial, simp [inf_minus],
apply ite.rec; intro h, trivial,
apply ite.rec; intro h; trivial
end
| (p ∧' q) h :=
begin
cases h with h1 h2, apply cases_and_o; try {trivial};
try {apply qfree_inf_minus_of_qfree, assumption},
constructor; {apply qfree_inf_minus_of_qfree, assumption}
end
| (p ∨' q) h :=
begin
cases h with h1 h2, apply cases_or_o; try {trivial};
try {apply qfree_inf_minus_of_qfree, assumption},
constructor; {apply qfree_inf_minus_of_qfree, assumption}
end
| (¬' p) h := by assumption
| (∃' p) h := by assumption
lemma qfree_sqe_core : ∀ {φ}, nqfree φ → qfree (sqe_core φ) :=
begin
intros p hp, simp [sqe_core, sqe_inf, sqe_bnd],
apply qfree_or_o; apply qfree_disj,
{ intros q hq, rw mem_map at hq,
cases hq with z hz, cases hz with hz1 hz2,
subst hz2, apply qfree_subst_of_qfree,
apply qfree_inf_minus_of_qfree,
apply qfree_of_nqfree hp },
{ intros q hq, rw mem_map at hq,
cases hq with z hz, cases hz with hz1 hz2,
subst hz2, apply qfree_disj,
intros a ha, rw mem_map at ha,
cases ha with k hk, cases hk with hk1 hk2,
subst hk2, apply qfree_subst_of_qfree,
apply qfree_of_nqfree hp }
end
lemma qfree_sqe : ∀ {φ}, nqfree φ → qfree (sqe φ) :=
begin
unfold sqe, intros p h, apply qfree_sqe_core,
apply nqfree_unify, apply h
end
|
ae29e511470a03c3e732853b181ffae79bee05bb
|
367134ba5a65885e863bdc4507601606690974c1
|
/src/geometry/manifold/instances/real.lean
|
f080d84ec789d7cefaf2db0e57572b7657845714
|
[
"Apache-2.0"
] |
permissive
|
kodyvajjha/mathlib
|
9bead00e90f68269a313f45f5561766cfd8d5cad
|
b98af5dd79e13a38d84438b850a2e8858ec21284
|
refs/heads/master
| 1,624,350,366,310
| 1,615,563,062,000
| 1,615,563,062,000
| 162,666,963
| 0
| 0
|
Apache-2.0
| 1,545,367,651,000
| 1,545,367,651,000
| null |
UTF-8
|
Lean
| false
| false
| 15,930
|
lean
|
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import geometry.manifold.algebra.smooth_functions
import linear_algebra.finite_dimensional
import analysis.normed_space.inner_product
/-!
# Constructing examples of manifolds over ℝ
We introduce the necessary bits to be able to define manifolds modelled over `ℝ^n`, boundaryless
or with boundary or with corners. As a concrete example, we construct explicitly the manifold with
boundary structure on the real interval `[x, y]`.
More specifically, we introduce
* `model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_half_space n)` for the model space used
to define `n`-dimensional real manifolds with boundary
* `model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_quadrant n)` for the model space used
to define `n`-dimensional real manifolds with corners
## Notations
In the locale `manifold`, we introduce the notations
* `𝓡 n` for the identity model with corners on `euclidean_space ℝ (fin n)`
* `𝓡∂ n` for `model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_half_space n)`.
For instance, if a manifold `M` is boundaryless, smooth and modelled on `euclidean_space ℝ (fin m)`,
and `N` is smooth with boundary modelled on `euclidean_half_space n`, and `f : M → N` is a smooth
map, then the derivative of `f` can be written simply as `mfderiv (𝓡 m) (𝓡∂ n) f` (as to why the
model with corners can not be implicit, see the discussion in `smooth_manifold_with_corners.lean`).
## Implementation notes
The manifold structure on the interval `[x, y] = Icc x y` requires the assumption `x < y` as a
typeclass. We provide it as `[fact (x < y)]`.
-/
noncomputable theory
open set
open_locale manifold
/--
The half-space in `ℝ^n`, used to model manifolds with boundary. We only define it when
`1 ≤ n`, as the definition only makes sense in this case.
-/
def euclidean_half_space (n : ℕ) [has_zero (fin n)] : Type :=
{x : euclidean_space ℝ (fin n) // 0 ≤ x 0}
/--
The quadrant in `ℝ^n`, used to model manifolds with corners, made of all vectors with nonnegative
coordinates.
-/
def euclidean_quadrant (n : ℕ) : Type := {x : euclidean_space ℝ (fin n) // ∀i:fin n, 0 ≤ x i}
section
/- Register class instances for euclidean half-space and quadrant, that can not be noticed
without the following reducibility attribute (which is only set in this section). -/
local attribute [reducible] euclidean_half_space euclidean_quadrant
variable {n : ℕ}
instance [has_zero (fin n)] : topological_space (euclidean_half_space n) := by apply_instance
instance : topological_space (euclidean_quadrant n) := by apply_instance
instance [has_zero (fin n)] : inhabited (euclidean_half_space n) := ⟨⟨0, le_refl _⟩⟩
instance : inhabited (euclidean_quadrant n) := ⟨⟨0, λ i, le_refl _⟩⟩
lemma range_half_space (n : ℕ) [has_zero (fin n)] :
range (λx : euclidean_half_space n, x.val) = {y | 0 ≤ y 0} :=
by simp
lemma range_quadrant (n : ℕ) :
range (λx : euclidean_quadrant n, x.val) = {y | ∀i:fin n, 0 ≤ y i} :=
by simp
end
/--
Definition of the model with corners `(euclidean_space ℝ (fin n), euclidean_half_space n)`, used as a
model for manifolds with boundary. In the locale `manifold`, use the shortcut `𝓡∂ n`.
-/
def model_with_corners_euclidean_half_space (n : ℕ) [has_zero (fin n)] :
model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_half_space n) :=
{ to_fun := λx, x.val,
inv_fun := λx, ⟨λi, if h : i = 0 then max (x i) 0 else x i, by simp [le_refl]⟩,
source := univ,
target := range (λx : euclidean_half_space n, x.val),
map_source' := λx hx, by simpa only [subtype.range_val] using x.property,
map_target' := λx hx, mem_univ _,
left_inv' := λ⟨xval, xprop⟩ hx, begin
rw subtype.mk_eq_mk,
ext1 i,
by_cases hi : i = 0,
{ rw hi, simp only [xprop, dif_pos, max_eq_left] },
{ simp only [hi, dif_neg, not_false_iff] }
end,
right_inv' := λx hx, begin
simp only [mem_set_of_eq, subtype.range_val_subtype] at hx,
ext1 i,
by_cases hi : i = 0,
{ rw hi, simp only [hx, dif_pos, max_eq_left] } ,
{ simp only [hi, dif_neg, not_false_iff] }
end,
source_eq := rfl,
unique_diff' := begin
/- To check that the half-space has the unique differentiability property, we use the criterion
`unique_diff_on_convex`: it suffices to check that it is convex and with nonempty interior. -/
rw range_half_space,
apply unique_diff_on_convex,
show convex {y : euclidean_space ℝ (fin n) | 0 ≤ y 0},
{ assume x y hx hy a b ha hb hab,
simpa only [add_zero] using add_le_add (mul_nonneg ha hx) (mul_nonneg hb hy) },
show (interior {y : euclidean_space ℝ (fin n) | 0 ≤ y 0}).nonempty,
{ use (λi, 1),
rw mem_interior,
refine ⟨(pi (univ : set (fin n)) (λi, (Ioi 0 : set ℝ))), _,
is_open_set_pi finite_univ (λa ha, is_open_Ioi), _⟩,
{ assume x hx,
simp only [pi, forall_prop_of_true, mem_univ, mem_Ioi] at hx,
exact le_of_lt (hx 0) },
{ simp only [pi, forall_prop_of_true, mem_univ, mem_Ioi],
assume i,
exact zero_lt_one } }
end,
continuous_to_fun := continuous_subtype_val,
continuous_inv_fun := begin
apply continuous_subtype_mk,
apply continuous_pi,
assume i,
by_cases h : i = 0,
{ rw h,
simp only [dif_pos],
have : continuous (λx:ℝ, max x 0) := continuous_id.max continuous_const,
exact this.comp (continuous_apply 0) },
{ simp only [h, dif_neg, not_false_iff],
exact continuous_apply i }
end }
/--
Definition of the model with corners `(euclidean_space ℝ (fin n), euclidean_quadrant n)`, used as a
model for manifolds with corners -/
def model_with_corners_euclidean_quadrant (n : ℕ) :
model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_quadrant n) :=
{ to_fun := λx, x.val,
inv_fun := λx, ⟨λi, max (x i) 0, λi, by simp only [le_refl, or_true, le_max_iff]⟩,
source := univ,
target := range (λx : euclidean_quadrant n, x.val),
map_source' := λx hx, by simpa only [subtype.range_val] using x.property,
map_target' := λx hx, mem_univ _,
left_inv' := λ⟨xval, xprop⟩ hx, begin
rw subtype.mk_eq_mk,
ext1 i,
simp only [xprop i, max_eq_left]
end,
right_inv' := λx hx, begin
rw range_quadrant at hx,
ext1 i,
simp only [hx i, max_eq_left]
end,
source_eq := rfl,
unique_diff' := begin
/- To check that the quadrant has the unique differentiability property, we use the criterion
`unique_diff_on_convex`: it suffices to check that it is convex and with nonempty interior. -/
rw range_quadrant,
apply unique_diff_on_convex,
show convex {y : euclidean_space ℝ (fin n) | ∀ (i : fin n), 0 ≤ y i},
{ assume x y hx hy a b ha hb hab i,
simpa only [add_zero] using add_le_add (mul_nonneg ha (hx i)) (mul_nonneg hb (hy i)) },
show (interior {y : euclidean_space ℝ (fin n) | ∀ (i : fin n), 0 ≤ y i}).nonempty,
{ use (λi, 1),
rw mem_interior,
refine ⟨(pi (univ : set (fin n)) (λi, (Ioi 0 : set ℝ))), _,
is_open_set_pi finite_univ (λa ha, is_open_Ioi), _⟩,
{ assume x hx i,
simp only [pi, forall_prop_of_true, mem_univ, mem_Ioi] at hx,
exact le_of_lt (hx i) },
{ simp only [pi, forall_prop_of_true, mem_univ, mem_Ioi],
assume i,
exact zero_lt_one } }
end,
continuous_to_fun := continuous_subtype_val,
continuous_inv_fun := begin
apply continuous_subtype_mk,
apply continuous_pi,
assume i,
have : continuous (λx:ℝ, max x 0) := continuous.max continuous_id continuous_const,
exact this.comp (continuous_apply i)
end }
localized "notation `𝓡 `n := model_with_corners_self ℝ (euclidean_space ℝ (fin n))" in manifold
localized "notation `𝓡∂ `n := model_with_corners_euclidean_half_space n" in manifold
/--
The left chart for the topological space `[x, y]`, defined on `[x,y)` and sending `x` to `0` in
`euclidean_half_space 1`.
-/
def Icc_left_chart (x y : ℝ) [fact (x < y)] :
local_homeomorph (Icc x y) (euclidean_half_space 1) :=
{ source := {z : Icc x y | z.val < y},
target := {z : euclidean_half_space 1 | z.val 0 < y - x},
to_fun := λ(z : Icc x y), ⟨λi, z.val - x, sub_nonneg.mpr z.property.1⟩,
inv_fun := λz, ⟨min (z.val 0 + x) y, by simp [le_refl, z.prop, le_of_lt ‹x < y›]⟩,
map_source' := by simp only [imp_self, sub_lt_sub_iff_right, mem_set_of_eq, forall_true_iff],
map_target' :=
by { simp only [min_lt_iff, mem_set_of_eq], assume z hz, left,
dsimp [-subtype.val_eq_coe] at hz, linarith },
left_inv' := begin
rintros ⟨z, hz⟩ h'z,
simp only [mem_set_of_eq, mem_Icc] at hz h'z,
simp only [hz, min_eq_left, sub_add_cancel]
end,
right_inv' := begin
rintros ⟨z, hz⟩ h'z,
rw subtype.mk_eq_mk,
funext,
dsimp at hz h'z,
have A : x + z 0 ≤ y, by linarith,
rw subsingleton.elim i 0,
simp only [A, add_comm, add_sub_cancel', min_eq_left],
end,
open_source := begin
have : is_open {z : ℝ | z < y} := is_open_Iio,
exact this.preimage continuous_subtype_val
end,
open_target := begin
have : is_open {z : ℝ | z < y - x} := is_open_Iio,
have : is_open {z : euclidean_space ℝ (fin 1) | z 0 < y - x} :=
this.preimage (@continuous_apply (fin 1) (λ _, ℝ) _ 0),
exact this.preimage continuous_subtype_val
end,
continuous_to_fun := begin
apply continuous.continuous_on,
apply continuous_subtype_mk,
have : continuous (λ (z : ℝ) (i : fin 1), z - x) :=
continuous.sub (continuous_pi $ λi, continuous_id) continuous_const,
exact this.comp continuous_subtype_val,
end,
continuous_inv_fun := begin
apply continuous.continuous_on,
apply continuous_subtype_mk,
have A : continuous (λ z : ℝ, min (z + x) y) :=
(continuous_id.add continuous_const).min continuous_const,
have B : continuous (λz : euclidean_space ℝ (fin 1), z 0) := continuous_apply 0,
exact (A.comp B).comp continuous_subtype_val
end }
/--
The right chart for the topological space `[x, y]`, defined on `(x,y]` and sending `y` to `0` in
`euclidean_half_space 1`.
-/
def Icc_right_chart (x y : ℝ) [fact (x < y)] :
local_homeomorph (Icc x y) (euclidean_half_space 1) :=
{ source := {z : Icc x y | x < z.val},
target := {z : euclidean_half_space 1 | z.val 0 < y - x},
to_fun := λ(z : Icc x y), ⟨λi, y - z.val, sub_nonneg.mpr z.property.2⟩,
inv_fun := λz,
⟨max (y - z.val 0) x, by simp [le_refl, z.prop, le_of_lt ‹x < y›, sub_eq_add_neg]⟩,
map_source' := by simp only [imp_self, mem_set_of_eq, sub_lt_sub_iff_left, forall_true_iff],
map_target' :=
by { simp only [lt_max_iff, mem_set_of_eq], assume z hz, left,
dsimp [-subtype.val_eq_coe] at hz, linarith },
left_inv' := begin
rintros ⟨z, hz⟩ h'z,
simp only [mem_set_of_eq, mem_Icc] at hz h'z,
simp only [hz, sub_eq_add_neg, max_eq_left, add_add_neg_cancel'_right, neg_add_rev, neg_neg]
end,
right_inv' := begin
rintros ⟨z, hz⟩ h'z,
rw subtype.mk_eq_mk,
funext,
dsimp at hz h'z,
have A : x ≤ y - z 0, by linarith,
rw subsingleton.elim i 0,
simp only [A, sub_sub_cancel, max_eq_left],
end,
open_source := begin
have : is_open {z : ℝ | x < z} := is_open_Ioi,
exact this.preimage continuous_subtype_val
end,
open_target := begin
have : is_open {z : ℝ | z < y - x} := is_open_Iio,
have : is_open {z : euclidean_space ℝ (fin 1) | z 0 < y - x} :=
this.preimage (@continuous_apply (fin 1) (λ _, ℝ) _ 0),
exact this.preimage continuous_subtype_val
end,
continuous_to_fun := begin
apply continuous.continuous_on,
apply continuous_subtype_mk,
have : continuous (λ (z : ℝ) (i : fin 1), y - z) :=
continuous_const.sub (continuous_pi (λi, continuous_id)),
exact this.comp continuous_subtype_val,
end,
continuous_inv_fun := begin
apply continuous.continuous_on,
apply continuous_subtype_mk,
have A : continuous (λ z : ℝ, max (y - z) x) :=
(continuous_const.sub continuous_id).max continuous_const,
have B : continuous (λz : euclidean_space ℝ (fin 1), z 0) := continuous_apply 0,
exact (A.comp B).comp continuous_subtype_val
end }
/--
Charted space structure on `[x, y]`, using only two charts taking values in `euclidean_half_space 1`.
-/
instance Icc_manifold (x y : ℝ) [fact (x < y)] : charted_space (euclidean_half_space 1) (Icc x y) :=
{ atlas := {Icc_left_chart x y, Icc_right_chart x y},
chart_at := λz, if z.val < y then Icc_left_chart x y else Icc_right_chart x y,
mem_chart_source := λz, begin
by_cases h' : z.val < y,
{ simp only [h', if_true],
exact h' },
{ simp only [h', if_false],
apply lt_of_lt_of_le ‹x < y›,
simpa only [not_lt] using h'}
end,
chart_mem_atlas := λz, by { by_cases h' : z.val < y; simp [h'] } }
/--
The manifold structure on `[x, y]` is smooth.
-/
instance Icc_smooth_manifold (x y : ℝ) [fact (x < y)] :
smooth_manifold_with_corners (𝓡∂ 1) (Icc x y) :=
begin
have M : times_cont_diff_on ℝ ∞ (λz : euclidean_space ℝ (fin 1), - z + (λi, y - x)) univ,
{ rw times_cont_diff_on_univ,
exact times_cont_diff_id.neg.add times_cont_diff_const },
apply smooth_manifold_with_corners_of_times_cont_diff_on,
assume e e' he he',
simp only [atlas, mem_singleton_iff, mem_insert_iff] at he he',
/- We need to check that any composition of two charts gives a `C^∞` function. Each chart can be
either the left chart or the right chart, leaving 4 possibilities that we handle successively.
-/
rcases he with rfl | rfl; rcases he' with rfl | rfl,
{ -- `e = left chart`, `e' = left chart`
exact (mem_groupoid_of_pregroupoid.mpr (symm_trans_mem_times_cont_diff_groupoid _ _ _)).1 },
{ -- `e = left chart`, `e' = right chart`
apply M.congr_mono _ (subset_univ _),
assume z hz,
simp only [model_with_corners_euclidean_half_space, Icc_left_chart, Icc_right_chart, dif_pos,
lt_add_iff_pos_left, max_lt_iff, lt_min_iff, sub_pos, lt_max_iff, subtype.range_val]
with mfld_simps at hz,
have A : 0 ≤ z 0 := hz.2,
have B : z 0 + x ≤ y, by { dsimp only at hz ⊢, linarith, },
ext i,
rw subsingleton.elim i 0,
simp only [model_with_corners_euclidean_half_space, Icc_left_chart, Icc_right_chart, A, B,
pi_Lp.add_apply, dif_pos, min_eq_left, max_eq_left, pi_Lp.neg_apply] with mfld_simps,
ring },
{ -- `e = right chart`, `e' = left chart`
apply M.congr_mono _ (subset_univ _),
assume z hz,
simp only [model_with_corners_euclidean_half_space, Icc_left_chart, Icc_right_chart, dif_pos,
max_lt_iff, sub_pos, subtype.range_val] with mfld_simps at hz,
have A : 0 ≤ z 0 := hz.2,
have B : x ≤ y - z 0, by { have := hz.1.1.1, dsimp at this, linarith },
ext i,
rw subsingleton.elim i 0,
simp only [model_with_corners_euclidean_half_space, Icc_left_chart, Icc_right_chart, A, B,
pi_Lp.add_apply, dif_pos, max_eq_left, pi_Lp.neg_apply] with mfld_simps,
ring },
{ -- `e = right chart`, `e' = right chart`
exact (mem_groupoid_of_pregroupoid.mpr (symm_trans_mem_times_cont_diff_groupoid _ _ _)).1 }
end
/-! Register the manifold structure on `Icc 0 1`, and also its zero and one. -/
section
lemma fact_zero_lt_one : fact ((0 : ℝ) < 1) := zero_lt_one
local attribute [instance] fact_zero_lt_one
instance : charted_space (euclidean_half_space 1) (Icc (0 : ℝ) 1) := by apply_instance
instance : smooth_manifold_with_corners (𝓡∂ 1) (Icc (0 : ℝ) 1) := by apply_instance
end
|
276ce14573fc5c2166398ca2e4ec6b1f235868fe
|
4d2583807a5ac6caaffd3d7a5f646d61ca85d532
|
/src/ring_theory/adjoin_root.lean
|
dbe6d765ad5f17a7e7621b0f06b57fadc5407373
|
[
"Apache-2.0"
] |
permissive
|
AntoineChambert-Loir/mathlib
|
64aabb896129885f12296a799818061bc90da1ff
|
07be904260ab6e36a5769680b6012f03a4727134
|
refs/heads/master
| 1,693,187,631,771
| 1,636,719,886,000
| 1,636,719,886,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 16,339
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Chris Hughes
-/
import data.polynomial.field_division
import linear_algebra.finite_dimensional
import ring_theory.adjoin.basic
import ring_theory.power_basis
import ring_theory.principal_ideal_domain
/-!
# Adjoining roots of polynomials
This file defines the commutative ring `adjoin_root f`, the ring R[X]/(f) obtained from a
commutative ring `R` and a polynomial `f : R[X]`. If furthermore `R` is a field and `f` is
irreducible, the field structure on `adjoin_root f` is constructed.
## Main definitions and results
The main definitions are in the `adjoin_root` namespace.
* `mk f : polynomial R →+* adjoin_root f`, the natural ring homomorphism.
* `of f : R →+* adjoin_root f`, the natural ring homomorphism.
* `root f : adjoin_root f`, the image of X in R[X]/(f).
* `lift (i : R →+* S) (x : S) (h : f.eval₂ i x = 0) : (adjoin_root f) →+* S`, the ring
homomorphism from R[X]/(f) to S extending `i : R →+* S` and sending `X` to `x`.
* `lift_hom (x : S) (hfx : aeval x f = 0) : adjoin_root f →ₐ[R] S`, the algebra
homomorphism from R[X]/(f) to S extending `algebra_map R S` and sending `X` to `x`
* `equiv : (adjoin_root f →ₐ[F] E) ≃ {x // x ∈ (f.map (algebra_map F E)).roots}` a
bijection between algebra homomorphisms from `adjoin_root` and roots of `f` in `S`
-/
noncomputable theory
open_locale classical
open_locale big_operators
universes u v w
variables {R : Type u} {S : Type v} {K : Type w}
open polynomial ideal
/-- Adjoin a root of a polynomial `f` to a commutative ring `R`. We define the new ring
as the quotient of `polynomial R` by the principal ideal generated by `f`. -/
def adjoin_root [comm_ring R] (f : polynomial R) : Type u :=
ideal.quotient (span {f} : ideal (polynomial R))
namespace adjoin_root
section comm_ring
variables [comm_ring R] (f : polynomial R)
instance : comm_ring (adjoin_root f) := ideal.quotient.comm_ring _
instance : inhabited (adjoin_root f) := ⟨0⟩
instance : decidable_eq (adjoin_root f) := classical.dec_eq _
/-- Ring homomorphism from `R[x]` to `adjoin_root f` sending `X` to the `root`. -/
def mk : polynomial R →+* adjoin_root f := ideal.quotient.mk _
@[elab_as_eliminator]
theorem induction_on {C : adjoin_root f → Prop} (x : adjoin_root f)
(ih : ∀ p : polynomial R, C (mk f p)) : C x :=
quotient.induction_on' x ih
/-- Embedding of the original ring `R` into `adjoin_root f`. -/
def of : R →+* adjoin_root f := (mk f).comp C
instance [comm_ring S] [algebra S R] : algebra S (adjoin_root f) :=
ideal.quotient.algebra S
instance [comm_ring S] [comm_ring K] [has_scalar S K] [algebra S R] [algebra K R]
[is_scalar_tower S K R] :
is_scalar_tower S K (adjoin_root f) :=
submodule.quotient.is_scalar_tower _ _
instance [comm_ring S] [comm_ring K] [algebra S R] [algebra K R] [smul_comm_class S K R] :
smul_comm_class S K (adjoin_root f) :=
submodule.quotient.smul_comm_class _ _
@[simp] lemma algebra_map_eq : algebra_map R (adjoin_root f) = of f := rfl
variables (S)
lemma algebra_map_eq' [comm_ring S] [algebra S R] :
algebra_map S (adjoin_root f) = (of f).comp (algebra_map S R) := rfl
variables {S}
/-- The adjoined root. -/
def root : adjoin_root f := mk f X
variables {f}
instance adjoin_root.has_coe_t : has_coe_t R (adjoin_root f) := ⟨of f⟩
@[simp] lemma mk_eq_mk {g h : polynomial R} : mk f g = mk f h ↔ f ∣ g - h :=
ideal.quotient.eq.trans ideal.mem_span_singleton
@[simp] lemma mk_self : mk f f = 0 :=
quotient.sound' (mem_span_singleton.2 $ by simp)
@[simp] lemma mk_C (x : R) : mk f (C x) = x := rfl
@[simp] lemma mk_X : mk f X = root f := rfl
@[simp] lemma aeval_eq (p : polynomial R) : aeval (root f) p = mk f p :=
polynomial.induction_on p (λ x, by { rw aeval_C, refl })
(λ p q ihp ihq, by rw [alg_hom.map_add, ring_hom.map_add, ihp, ihq])
(λ n x ih, by { rw [alg_hom.map_mul, aeval_C, alg_hom.map_pow, aeval_X,
ring_hom.map_mul, mk_C, ring_hom.map_pow, mk_X], refl })
theorem adjoin_root_eq_top : algebra.adjoin R ({root f} : set (adjoin_root f)) = ⊤ :=
algebra.eq_top_iff.2 $ λ x, induction_on f x $ λ p,
(algebra.adjoin_singleton_eq_range_aeval R (root f)).symm ▸ ⟨p, aeval_eq p⟩
@[simp] lemma eval₂_root (f : polynomial R) : f.eval₂ (of f) (root f) = 0 :=
by rw [← algebra_map_eq, ← aeval_def, aeval_eq, mk_self]
lemma is_root_root (f : polynomial R) : is_root (f.map (of f)) (root f) :=
by rw [is_root, eval_map, eval₂_root]
lemma is_algebraic_root (hf : f ≠ 0) : is_algebraic R (root f) :=
⟨f, hf, eval₂_root f⟩
variables [comm_ring S]
/-- Lift a ring homomorphism `i : R →+* S` to `adjoin_root f →+* S`. -/
def lift (i : R →+* S) (x : S) (h : f.eval₂ i x = 0) : (adjoin_root f) →+* S :=
begin
apply ideal.quotient.lift _ (eval₂_ring_hom i x),
intros g H,
rcases mem_span_singleton.1 H with ⟨y, hy⟩,
rw [hy, ring_hom.map_mul, coe_eval₂_ring_hom, h, zero_mul]
end
variables {i : R →+* S} {a : S} (h : f.eval₂ i a = 0)
@[simp] lemma lift_mk (g : polynomial R) : lift i a h (mk f g) = g.eval₂ i a :=
ideal.quotient.lift_mk _ _ _
@[simp] lemma lift_root : lift i a h (root f) = a := by rw [root, lift_mk, eval₂_X]
@[simp] lemma lift_of {x : R} : lift i a h x = i x :=
by rw [← mk_C x, lift_mk, eval₂_C]
@[simp] lemma lift_comp_of : (lift i a h).comp (of f) = i :=
ring_hom.ext $ λ _, @lift_of _ _ _ _ _ _ _ h _
variables (f) [algebra R S]
/-- Produce an algebra homomorphism `adjoin_root f →ₐ[R] S` sending `root f` to
a root of `f` in `S`. -/
def lift_hom (x : S) (hfx : aeval x f = 0) : adjoin_root f →ₐ[R] S :=
{ commutes' := λ r, show lift _ _ hfx r = _, from lift_of hfx,
.. lift (algebra_map R S) x hfx }
@[simp] lemma coe_lift_hom (x : S) (hfx : aeval x f = 0) :
(lift_hom f x hfx : adjoin_root f →+* S) = lift (algebra_map R S) x hfx := rfl
@[simp] lemma aeval_alg_hom_eq_zero (ϕ : adjoin_root f →ₐ[R] S) : aeval (ϕ (root f)) f = 0 :=
begin
have h : ϕ.to_ring_hom.comp (of f) = algebra_map R S := ring_hom.ext_iff.mpr (ϕ.commutes),
rw [aeval_def, ←h, ←ring_hom.map_zero ϕ.to_ring_hom, ←eval₂_root f, hom_eval₂],
refl,
end
@[simp] lemma lift_hom_eq_alg_hom (f : polynomial R) (ϕ : adjoin_root f →ₐ[R] S) :
lift_hom f (ϕ (root f)) (aeval_alg_hom_eq_zero f ϕ) = ϕ :=
begin
suffices : ϕ.equalizer (lift_hom f (ϕ (root f)) (aeval_alg_hom_eq_zero f ϕ)) = ⊤,
{ exact (alg_hom.ext (λ x, (set_like.ext_iff.mp (this) x).mpr algebra.mem_top)).symm },
rw [eq_top_iff, ←adjoin_root_eq_top, algebra.adjoin_le_iff, set.singleton_subset_iff],
exact (@lift_root _ _ _ _ _ _ _ (aeval_alg_hom_eq_zero f ϕ)).symm,
end
variables (hfx : aeval a f = 0)
@[simp] lemma lift_hom_mk {g : polynomial R} : lift_hom f a hfx (mk f g) = aeval a g :=
lift_mk hfx g
@[simp] lemma lift_hom_root : lift_hom f a hfx (root f) = a :=
lift_root hfx
@[simp] lemma lift_hom_of {x : R} : lift_hom f a hfx (of f x) = algebra_map _ _ x :=
lift_of hfx
end comm_ring
section irreducible
variables [field K] {f : polynomial K} [irreducible f]
instance is_maximal_span : is_maximal (span {f} : ideal (polynomial K)) :=
principal_ideal_ring.is_maximal_of_irreducible ‹irreducible f›
noncomputable instance field : field (adjoin_root f) :=
{ ..adjoin_root.comm_ring f,
..ideal.quotient.field (span {f} : ideal (polynomial K)) }
lemma coe_injective : function.injective (coe : K → adjoin_root f) :=
(of f).injective
variable (f)
lemma mul_div_root_cancel :
((X - C (root f)) * (f.map (of f) / (X - C (root f))) : polynomial (adjoin_root f)) =
f.map (of f) :=
mul_div_eq_iff_is_root.2 $ is_root_root _
end irreducible
section power_basis
variables [comm_ring R] {g : polynomial R}
lemma is_integral_root' (hg : g.monic) : is_integral R (root g) :=
⟨g, hg, eval₂_root g⟩
/-- `adjoin_root.mod_by_monic_hom` sends the equivalence class of `f` mod `g` to `f %ₘ g`.
This is a well-defined right inverse to `adjoin_root.mk`, see `adjoin_root.mk_left_inverse`. -/
def mod_by_monic_hom [nontrivial R] (hg : g.monic) :
adjoin_root g →ₗ[R] polynomial R :=
(submodule.liftq _ (polynomial.mod_by_monic_hom hg)
(λ f (hf : f ∈ (ideal.span {g}).restrict_scalars R),
(mem_ker_mod_by_monic hg).mpr (ideal.mem_span_singleton.mp hf))).comp $
(submodule.quotient.restrict_scalars_equiv R (ideal.span {g} : ideal (polynomial R)))
.symm.to_linear_map
@[simp] lemma mod_by_monic_hom_mk [nontrivial R] (hg : g.monic) (f : polynomial R) :
mod_by_monic_hom hg (mk g f) = f %ₘ g := rfl
lemma mk_left_inverse [nontrivial R] (hg : g.monic) :
function.left_inverse (mk g) (mod_by_monic_hom hg) :=
λ f, induction_on g f $ λ f, begin
rw [mod_by_monic_hom_mk hg, mk_eq_mk, mod_by_monic_eq_sub_mul_div _ hg,
sub_sub_cancel_left, dvd_neg],
apply dvd_mul_right
end
lemma mk_surjective [nontrivial R] (hg : g.monic) : function.surjective (mk g) :=
(mk_left_inverse hg).surjective
/-- The elements `1, root g, ..., root g ^ (d - 1)` form a basis for `adjoin_root g`,
where `g` is a monic polynomial of degree `d`. -/
@[simps] def power_basis_aux' [nontrivial R] (hg : g.monic) :
basis (fin g.nat_degree) R (adjoin_root g) :=
basis.of_equiv_fun
{ to_fun := λ f i, (mod_by_monic_hom hg f).coeff i,
inv_fun := λ c, mk g $ ∑ (i : fin g.nat_degree), monomial i (c i),
map_add' := λ f₁ f₂, funext $ λ i,
by simp only [(mod_by_monic_hom hg).map_add, coeff_add, pi.add_apply],
map_smul' := λ f₁ f₂, funext $ λ i,
by simp only [(mod_by_monic_hom hg).map_smul, coeff_smul, pi.smul_apply, ring_hom.id_apply],
left_inv := λ f, induction_on g f (λ f, eq.symm $ mk_eq_mk.mpr $
by { simp only [mod_by_monic_hom_mk, sum_mod_by_monic_coeff hg degree_le_nat_degree],
rw [mod_by_monic_eq_sub_mul_div _ hg, sub_sub_cancel],
exact dvd_mul_right _ _ }),
right_inv := λ x, funext $ λ i, begin
simp only [mod_by_monic_hom_mk],
rw [(mod_by_monic_eq_self_iff hg).mpr, finset_sum_coeff, finset.sum_eq_single i];
try { simp only [coeff_monomial, eq_self_iff_true, if_true] },
{ intros j _ hj, exact if_neg (fin.coe_injective.ne hj) },
{ intros, have := finset.mem_univ i, contradiction },
{ refine (degree_sum_le _ _).trans_lt ((finset.sup_lt_iff _).mpr (λ j _, _)),
{ exact bot_lt_iff_ne_bot.mpr (mt degree_eq_bot.mp hg.ne_zero) },
{ refine (degree_monomial_le _ _).trans_lt _,
rw [degree_eq_nat_degree hg.ne_zero, with_bot.coe_lt_coe],
exact j.2 } },
end}
/-- The power basis `1, root g, ..., root g ^ (d - 1)` for `adjoin_root g`,
where `g` is a monic polynomial of degree `d`. -/
@[simps] def power_basis' [nontrivial R] (hg : g.monic) :
power_basis R (adjoin_root g) :=
{ gen := root g,
dim := g.nat_degree,
basis := power_basis_aux' hg,
basis_eq_pow := λ i, begin
simp only [power_basis_aux', basis.coe_of_equiv_fun, linear_equiv.coe_symm_mk],
rw finset.sum_eq_single i,
{ rw [function.update_same, monomial_one_right_eq_X_pow, (mk g).map_pow, mk_X] },
{ intros j _ hj,
rw ← monomial_zero_right _,
convert congr_arg _ (function.update_noteq hj _ _) }, -- Fix `decidable_eq` mismatch
{ intros, have := finset.mem_univ i, contradiction },
end}
variables [field K] {f : polynomial K}
lemma is_integral_root (hf : f ≠ 0) : is_integral K (root f) :=
(is_algebraic_iff_is_integral _).mp (is_algebraic_root hf)
lemma minpoly_root (hf : f ≠ 0) : minpoly K (root f) = f * C (f.leading_coeff⁻¹) :=
begin
have f'_monic : monic _ := monic_mul_leading_coeff_inv hf,
refine (minpoly.unique K _ f'_monic _ _).symm,
{ rw [alg_hom.map_mul, aeval_eq, mk_self, zero_mul] },
intros q q_monic q_aeval,
have commutes : (lift (algebra_map K (adjoin_root f)) (root f) q_aeval).comp (mk q) = mk f,
{ ext,
{ simp only [ring_hom.comp_apply, mk_C, lift_of], refl },
{ simp only [ring_hom.comp_apply, mk_X, lift_root] } },
rw [degree_eq_nat_degree f'_monic.ne_zero, degree_eq_nat_degree q_monic.ne_zero,
with_bot.coe_le_coe, nat_degree_mul hf, nat_degree_C, add_zero],
apply nat_degree_le_of_dvd,
{ have : mk f q = 0, by rw [←commutes, ring_hom.comp_apply, mk_self, ring_hom.map_zero],
rwa [←ideal.mem_span_singleton, ←ideal.quotient.eq_zero_iff_mem] },
{ exact q_monic.ne_zero },
{ rwa [ne.def, C_eq_zero, inv_eq_zero, leading_coeff_eq_zero] },
end
/-- The elements `1, root f, ..., root f ^ (d - 1)` form a basis for `adjoin_root f`,
where `f` is an irreducible polynomial over a field of degree `d`. -/
def power_basis_aux (hf : f ≠ 0) : basis (fin f.nat_degree) K (adjoin_root f) :=
begin
set f' := f * C (f.leading_coeff⁻¹) with f'_def,
have deg_f' : f'.nat_degree = f.nat_degree,
{ rw [nat_degree_mul hf, nat_degree_C, add_zero],
{ rwa [ne.def, C_eq_zero, inv_eq_zero, leading_coeff_eq_zero] } },
have minpoly_eq : minpoly K (root f) = f' := minpoly_root hf,
apply @basis.mk _ _ _ (λ (i : fin f.nat_degree), (root f ^ i.val)),
{ rw [← deg_f', ← minpoly_eq],
exact (is_integral_root hf).linear_independent_pow },
{ rw _root_.eq_top_iff,
rintros y -,
rw [← deg_f', ← minpoly_eq],
apply (is_integral_root hf).mem_span_pow,
obtain ⟨g⟩ := y,
use g,
rw aeval_eq,
refl }
end
/-- The power basis `1, root f, ..., root f ^ (d - 1)` for `adjoin_root f`,
where `f` is an irreducible polynomial over a field of degree `d`. -/
@[simps] def power_basis (hf : f ≠ 0) :
power_basis K (adjoin_root f) :=
{ gen := root f,
dim := f.nat_degree,
basis := power_basis_aux hf,
basis_eq_pow := basis.mk_apply _ _ }
lemma minpoly_power_basis_gen (hf : f ≠ 0) :
minpoly K (power_basis hf).gen = f * C (f.leading_coeff⁻¹) :=
by rw [power_basis_gen, minpoly_root hf]
lemma minpoly_power_basis_gen_of_monic (hf : f.monic) (hf' : f ≠ 0 := hf.ne_zero) :
minpoly K (power_basis hf').gen = f :=
by rw [minpoly_power_basis_gen hf', hf.leading_coeff, inv_one, C.map_one, mul_one]
end power_basis
section equiv
section is_domain
variables [comm_ring R] [is_domain R] [comm_ring S] [is_domain S] [algebra R S]
variables (g : polynomial R) (pb : _root_.power_basis R S)
/-- If `S` is an extension of `R` with power basis `pb` and `g` is a monic polynomial over `R`
such that `pb.gen` has a minimal polynomial `g`, then `S` is isomorphic to `adjoin_root g`.
Compare `power_basis.equiv_of_root`, which would require
`h₂ : aeval pb.gen (minpoly R (root g)) = 0`; that minimal polynomial is not
guaranteed to be identical to `g`. -/
@[simps {fully_applied := ff}]
def equiv' (h₁ : aeval (root g) (minpoly R pb.gen) = 0) (h₂ : aeval pb.gen g = 0) :
adjoin_root g ≃ₐ[R] S :=
{ to_fun := adjoin_root.lift_hom g pb.gen h₂,
inv_fun := pb.lift (root g) h₁,
left_inv := λ x, induction_on g x $ λ f, by rw [lift_hom_mk, pb.lift_aeval, aeval_eq],
right_inv := λ x, begin
obtain ⟨f, hf, rfl⟩ := pb.exists_eq_aeval x,
rw [pb.lift_aeval, aeval_eq, lift_hom_mk]
end,
.. adjoin_root.lift_hom g pb.gen h₂ }
@[simp] lemma equiv'_to_alg_hom
(h₁ : aeval (root g) (minpoly R pb.gen) = 0) (h₂ : aeval pb.gen g = 0) :
(equiv' g pb h₁ h₂).to_alg_hom = adjoin_root.lift_hom g pb.gen h₂ :=
rfl
@[simp] lemma equiv'_symm_to_alg_hom
(h₁ : aeval (root g) (minpoly R pb.gen) = 0) (h₂ : aeval pb.gen g = 0) :
(equiv' g pb h₁ h₂).symm.to_alg_hom = pb.lift (root g) h₁ :=
rfl
end is_domain
section field
variables (K) (L F : Type*) [field F] [field K] [field L] [algebra F K] [algebra F L]
variables (pb : _root_.power_basis F K)
/-- If `L` is a field extension of `F` and `f` is a polynomial over `F` then the set
of maps from `F[x]/(f)` into `L` is in bijection with the set of roots of `f` in `L`. -/
def equiv (f : polynomial F) (hf : f ≠ 0) :
(adjoin_root f →ₐ[F] L) ≃ {x // x ∈ (f.map (algebra_map F L)).roots} :=
(power_basis hf).lift_equiv'.trans ((equiv.refl _).subtype_equiv (λ x,
begin
rw [power_basis_gen, minpoly_root hf, polynomial.map_mul, roots_mul,
polynomial.map_C, roots_C, add_zero, equiv.refl_apply],
{ rw ← polynomial.map_mul, exact map_monic_ne_zero (monic_mul_leading_coeff_inv hf) }
end))
end field
end equiv
end adjoin_root
|
0eeb42e787acffd921447dd1239439efa830a6c5
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/ring_theory/witt_vector/verschiebung.lean
|
164eb82bbd13d90ba41819b72754fb9a1df9b7e6
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 5,941
|
lean
|
/-
Copyright (c) 2020 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import ring_theory.witt_vector.basic
import ring_theory.witt_vector.is_poly
/-!
## The Verschiebung operator
## References
* [Hazewinkel, *Witt Vectors*][Haze09]
* [Commelin and Lewis, *Formalizing the Ring of Witt Vectors*][CL21]
-/
namespace witt_vector
open mv_polynomial
variables {p : ℕ} {R S : Type*} [hp : fact p.prime] [comm_ring R] [comm_ring S]
local notation `𝕎` := witt_vector p -- type as `\bbW`
noncomputable theory
/--
`verschiebung_fun x` shifts the coefficients of `x` up by one,
by inserting 0 as the 0th coefficient.
`x.coeff i` then becomes `(verchiebung_fun x).coeff (i + 1)`.
`verschiebung_fun` is the underlying function of the additive monoid hom `witt_vector.verschiebung`.
-/
def verschiebung_fun (x : 𝕎 R) : 𝕎 R :=
mk p $ λ n, if n = 0 then 0 else x.coeff (n - 1)
lemma verschiebung_fun_coeff (x : 𝕎 R) (n : ℕ) :
(verschiebung_fun x).coeff n = if n = 0 then 0 else x.coeff (n - 1) :=
by rw [verschiebung_fun, coeff_mk]
lemma verschiebung_fun_coeff_zero (x : 𝕎 R) :
(verschiebung_fun x).coeff 0 = 0 :=
by rw [verschiebung_fun_coeff, if_pos rfl]
@[simp] lemma verschiebung_fun_coeff_succ (x : 𝕎 R) (n : ℕ) :
(verschiebung_fun x).coeff n.succ = x.coeff n := rfl
include hp
@[ghost_simps] lemma ghost_component_zero_verschiebung_fun (x : 𝕎 R) :
ghost_component 0 (verschiebung_fun x) = 0 :=
by rw [ghost_component_apply, aeval_witt_polynomial, finset.range_one, finset.sum_singleton,
verschiebung_fun_coeff_zero, pow_zero, pow_zero, pow_one, one_mul]
@[ghost_simps] lemma ghost_component_verschiebung_fun (x : 𝕎 R) (n : ℕ) :
ghost_component (n + 1) (verschiebung_fun x) = p * ghost_component n x :=
begin
simp only [ghost_component_apply, aeval_witt_polynomial],
rw [finset.sum_range_succ', verschiebung_fun_coeff, if_pos rfl, zero_pow (pow_pos hp.1.pos _),
mul_zero, add_zero, finset.mul_sum, finset.sum_congr rfl],
rintro i -,
simp only [pow_succ, mul_assoc, verschiebung_fun_coeff, if_neg (nat.succ_ne_zero i),
nat.succ_sub_succ, tsub_zero]
end
omit hp
/--
The 0th Verschiebung polynomial is 0. For `n > 0`, the `n`th Verschiebung polynomial is the
variable `X (n-1)`.
-/
def verschiebung_poly (n : ℕ) : mv_polynomial ℕ ℤ :=
if n = 0 then 0 else X (n-1)
@[simp] lemma verschiebung_poly_zero :
verschiebung_poly 0 = 0 := rfl
lemma aeval_verschiebung_poly' (x : 𝕎 R) (n : ℕ) :
aeval x.coeff (verschiebung_poly n) = (verschiebung_fun x).coeff n :=
begin
cases n,
{ simp only [verschiebung_poly, verschiebung_fun_coeff_zero, if_pos rfl, alg_hom.map_zero] },
{ rw [verschiebung_poly, verschiebung_fun_coeff_succ, if_neg (n.succ_ne_zero),
aeval_X, nat.succ_eq_add_one, add_tsub_cancel_right] }
end
variable (p)
/--
`witt_vector.verschiebung` has polynomial structure given by `witt_vector.verschiebung_poly`.
-/
@[is_poly] lemma verschiebung_fun_is_poly : is_poly p (λ R _Rcr, @verschiebung_fun p R _Rcr) :=
begin
use verschiebung_poly,
simp only [aeval_verschiebung_poly', eq_self_iff_true, forall_3_true_iff]
end
variable {p}
include hp
/--
`verschiebung x` shifts the coefficients of `x` up by one, by inserting 0 as the 0th coefficient.
`x.coeff i` then becomes `(verchiebung x).coeff (i + 1)`.
This is a additive monoid hom with underlying function `verschiebung_fun`.
-/
noncomputable
def verschiebung : 𝕎 R →+ 𝕎 R :=
{ to_fun := verschiebung_fun,
map_zero' :=
by ext ⟨⟩; rw [verschiebung_fun_coeff]; simp only [if_true, eq_self_iff_true, zero_coeff, if_t_t],
map_add' := by { ghost_calc _ _, rintro ⟨⟩; ghost_simp } }
omit hp
/-- `witt_vector.verschiebung` is a polynomial function. -/
@[is_poly] lemma verschiebung_is_poly : is_poly p (λ R _Rcr, @verschiebung p R hp _Rcr) :=
verschiebung_fun_is_poly p
include hp
/-- verschiebung is a natural transformation -/
@[simp] lemma map_verschiebung (f : R →+* S) (x : 𝕎 R) :
map f (verschiebung x) = verschiebung (map f x) :=
by { ext ⟨-, -⟩, exact f.map_zero, refl }
@[ghost_simps] lemma ghost_component_zero_verschiebung (x : 𝕎 R) :
ghost_component 0 (verschiebung x) = 0 :=
ghost_component_zero_verschiebung_fun _
@[ghost_simps] lemma ghost_component_verschiebung (x : 𝕎 R) (n : ℕ) :
ghost_component (n + 1) (verschiebung x) = p * ghost_component n x :=
ghost_component_verschiebung_fun _ _
@[simp] lemma verschiebung_coeff_zero (x : 𝕎 R) :
(verschiebung x).coeff 0 = 0 := rfl
-- simp_nf complains if this is simp
lemma verschiebung_coeff_add_one (x : 𝕎 R) (n : ℕ) :
(verschiebung x).coeff (n + 1) = x.coeff n := rfl
@[simp] lemma verschiebung_coeff_succ (x : 𝕎 R) (n : ℕ) :
(verschiebung x).coeff n.succ = x.coeff n := rfl
lemma aeval_verschiebung_poly (x : 𝕎 R) (n : ℕ) :
aeval x.coeff (verschiebung_poly n) = (verschiebung x).coeff n :=
aeval_verschiebung_poly' x n
@[simp]
lemma bind₁_verschiebung_poly_witt_polynomial (n : ℕ) :
bind₁ verschiebung_poly (witt_polynomial p ℤ n) =
if n = 0 then 0 else p * witt_polynomial p ℤ (n-1) :=
begin
apply mv_polynomial.funext,
intro x,
split_ifs with hn,
{ simp only [hn, verschiebung_poly_zero, witt_polynomial_zero, bind₁_X_right] },
{ obtain ⟨n, rfl⟩ := nat.exists_eq_succ_of_ne_zero hn,
rw [nat.succ_eq_add_one, add_tsub_cancel_right, ring_hom.map_mul,
map_nat_cast, hom_bind₁],
calc _
= ghost_component (n + 1) (verschiebung $ mk p x) : _
... = _ : _,
{ apply eval₂_hom_congr (ring_hom.ext_int _ _) _ rfl,
simp only [←aeval_verschiebung_poly, coeff_mk],
funext k,
exact eval₂_hom_congr (ring_hom.ext_int _ _) rfl rfl },
{ rw [ghost_component_verschiebung],
congr' 1,
exact eval₂_hom_congr (ring_hom.ext_int _ _) rfl rfl } }
end
end witt_vector
|
589931076e0ebc29a5c0e967893ce0f3b8df29ca
|
cf39355caa609c0f33405126beee2739aa3cb77e
|
/library/smt/default.lean
|
b8b2afb7bd81dba6b6147867d3063a8cdacbb05d
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/lean
|
12b87f69d92e614daea8bcc9d4de9a9ace089d0e
|
cce7990ea86a78bdb383e38ed7f9b5ba93c60ce0
|
refs/heads/master
| 1,687,508,156,644
| 1,684,951,104,000
| 1,684,951,104,000
| 169,960,991
| 457
| 107
|
Apache-2.0
| 1,686,744,372,000
| 1,549,790,268,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 197
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Daniel Selsam
-/
import smt.arith smt.array smt.prove
|
1968e18e33422afe7cee16eeb600c0e26c83d37e
|
853df553b1d6ca524e3f0a79aedd32dde5d27ec3
|
/src/order/filter/basic.lean
|
1f948f34a5455f243b9cb77df255a02cd6a34e2c
|
[
"Apache-2.0"
] |
permissive
|
DanielFabian/mathlib
|
efc3a50b5dde303c59eeb6353ef4c35a345d7112
|
f520d07eba0c852e96fe26da71d85bf6d40fcc2a
|
refs/heads/master
| 1,668,739,922,971
| 1,595,201,756,000
| 1,595,201,756,000
| 279,469,476
| 0
| 0
| null | 1,594,696,604,000
| 1,594,696,604,000
| null |
UTF-8
|
Lean
| false
| false
| 95,374
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Jeremy Avigad
-/
import order.zorn
import order.copy
import data.set.finite
import tactic.monotonicity
/-!
# Theory of filters on sets
## Main definitions
* `filter` : filters on a set;
* `at_top`, `at_bot`, `cofinite`, `principal` : specific filters;
* `map`, `comap`, `prod` : operations on filters;
* `tendsto` : limit with respect to filters;
* `eventually` : `f.eventually p` means `{x | p x} ∈ f`;
* `frequently` : `f.frequently p` means `{x | ¬p x} ∉ f`.
* `filter_upwards [h₁, ..., hₙ]` : takes a list of proofs `hᵢ : sᵢ ∈ f`, and replaces a goal `s ∈ f`
with `∀ x, x ∈ s₁ → ... → x ∈ sₙ → x ∈ s`;
Filters on a type `X` are sets of sets of `X` satisfying three conditions. They are mostly used to
abstract two related kinds of ideas:
* *limits*, including finite or infinite limits of sequences, finite or infinite limits of functions
at a point or at infinity, etc...
* *things happening eventually*, including things happening for large enough `n : ℕ`, or near enough
a point `x`, or for close enough pairs of points, or things happening almost everywhere in the
sense of measure theory. Dually, filters can also express the idea of *things happening often*:
for arbitrarily large `n`, or at a point in any neighborhood of given a point etc...
In this file, we define the type `filter X` of filters on `X`, and endow it with a complete lattice
structure. This structure is lifted from the lattice structure on `set (set X)` using the Galois
insertion which maps a filter to its elements in one direction, and an arbitrary set of sets to
the smallest filter containing it in the other direction.
We also prove `filter` is a monadic functor, with a push-forward operation
`filter.map` and a pull-back operation `filter.comap` that form a Galois connections for the
order on filters.
Finally we describe a product operation `filter X → filter Y → filter (X × Y)`.
The examples of filters appearing in the description of the two motivating ideas are:
* `(at_top : filter ℕ)` : made of sets of `ℕ` containing `{n | n ≥ N}` for some `N`
* `𝓝 x` : made of neighborhoods of `x` in a topological space (defined in topology.basic)
* `𝓤 X` : made of entourages of a uniform space (those space are generalizations of metric spaces
defined in topology.uniform_space.basic)
* `μ.a_e` : made of sets whose complement has zero measure with respect to `μ` (defined in
measure_theory.measure_space)
The general notion of limit of a map with respect to filters on the source and target types
is `filter.tendsto`. It is defined in terms of the order and the push-forward operation.
The predicate "happening eventually" is `filter.eventually`, and "happening often" is
`filter.frequently`, whose definitions are immediate after `filter` is defined (but they come
rather late in this file in order to immediately relate them to the lattice structure).
For instance, anticipating on topology.basic, the statement: "if a sequence `u` converges to
some `x` and `u n` belongs to a set `M` for `n` large enough then `x` is in the closure of
`M`" is formalized as: `tendsto u at_top (𝓝 x) → (∀ᶠ n in at_top, u n ∈ M) → x ∈ closure M`,
which is a special case of `mem_closure_of_tendsto` from topology.basic.
## Notations
* `∀ᶠ x in f, p x` : `f.eventually p`;
* `∃ᶠ x in f, p x` : `f.frequently p`;
* `f =ᶠ[l] g` : `∀ᶠ x in l, f x = g x`;
* `f ≤ᶠ[l] g` : `∀ᶠ x in l, f x ≤ g x`;
* `f ×ᶠ g` : `filter.prod f g`, localized in `filter`;
* `𝓟 s` : `principal s`, localized in `filter`.
## References
* [N. Bourbaki, *General Topology*][bourbaki1966]
Important note: Bourbaki requires that a filter on `X` cannot contain all sets of `X`, which
we do *not* require. This gives `filter X` better formal properties, in particular a bottom element
`⊥` for its lattice structure, at the cost of including the assumption
`f ≠ ⊥` in a number of lemmas and definitions.
-/
open set
universes u v w x y
open_locale classical
/-- A filter `F` on a type `α` is a collection of sets of `α` which contains the whole `α`,
is upwards-closed, and is stable under intersection. We do not forbid this collection to be
all sets of `α`. -/
structure filter (α : Type*) :=
(sets : set (set α))
(univ_sets : set.univ ∈ sets)
(sets_of_superset {x y} : x ∈ sets → x ⊆ y → y ∈ sets)
(inter_sets {x y} : x ∈ sets → y ∈ sets → x ∩ y ∈ sets)
/-- If `F` is a filter on `α`, and `U` a subset of `α` then we can write `U ∈ F` as on paper. -/
@[reducible]
instance {α : Type*}: has_mem (set α) (filter α) := ⟨λ U F, U ∈ F.sets⟩
namespace filter
variables {α : Type u} {f g : filter α} {s t : set α}
lemma filter_eq : ∀{f g : filter α}, f.sets = g.sets → f = g
| ⟨a, _, _, _⟩ ⟨._, _, _, _⟩ rfl := rfl
lemma filter_eq_iff : f = g ↔ f.sets = g.sets :=
⟨congr_arg _, filter_eq⟩
protected lemma ext_iff : f = g ↔ ∀ s, s ∈ f ↔ s ∈ g :=
by rw [filter_eq_iff, ext_iff]
@[ext]
protected lemma ext : (∀ s, s ∈ f ↔ s ∈ g) → f = g :=
filter.ext_iff.2
lemma univ_mem_sets : univ ∈ f :=
f.univ_sets
lemma mem_sets_of_superset : ∀{x y : set α}, x ∈ f → x ⊆ y → y ∈ f :=
f.sets_of_superset
lemma inter_mem_sets : ∀{s t}, s ∈ f → t ∈ f → s ∩ t ∈ f :=
f.inter_sets
lemma univ_mem_sets' (h : ∀ a, a ∈ s) : s ∈ f :=
mem_sets_of_superset univ_mem_sets (assume x _, h x)
lemma mp_sets (hs : s ∈ f) (h : {x | x ∈ s → x ∈ t} ∈ f) : t ∈ f :=
mem_sets_of_superset (inter_mem_sets hs h) $ assume x ⟨h₁, h₂⟩, h₂ h₁
lemma congr_sets (h : {x | x ∈ s ↔ x ∈ t} ∈ f) : s ∈ f ↔ t ∈ f :=
⟨λ hs, mp_sets hs (mem_sets_of_superset h (λ x, iff.mp)),
λ hs, mp_sets hs (mem_sets_of_superset h (λ x, iff.mpr))⟩
lemma Inter_mem_sets {β : Type v} {s : β → set α} {is : set β} (hf : finite is) :
(∀i∈is, s i ∈ f) → (⋂i∈is, s i) ∈ f :=
finite.induction_on hf
(assume hs, by simp only [univ_mem_sets, mem_empty_eq, Inter_neg, Inter_univ, not_false_iff])
(assume i is _ hf hi hs,
have h₁ : s i ∈ f, from hs i (by simp),
have h₂ : (⋂x∈is, s x) ∈ f, from hi $ assume a ha, hs _ $ by simp only [ha, mem_insert_iff, or_true],
by simp [inter_mem_sets h₁ h₂])
lemma sInter_mem_sets_of_finite {s : set (set α)} (hfin : finite s) (h_in : ∀ U ∈ s, U ∈ f) :
⋂₀ s ∈ f :=
by { rw sInter_eq_bInter, exact Inter_mem_sets hfin h_in }
lemma Inter_mem_sets_of_fintype {β : Type v} {s : β → set α} [fintype β] (h : ∀i, s i ∈ f) :
(⋂i, s i) ∈ f :=
by simpa using Inter_mem_sets finite_univ (λi hi, h i)
lemma exists_sets_subset_iff : (∃t ∈ f, t ⊆ s) ↔ s ∈ f :=
⟨assume ⟨t, ht, ts⟩, mem_sets_of_superset ht ts, assume hs, ⟨s, hs, subset.refl _⟩⟩
lemma monotone_mem_sets {f : filter α} : monotone (λs, s ∈ f) :=
assume s t hst h, mem_sets_of_superset h hst
end filter
namespace tactic.interactive
open tactic interactive
/-- `filter_upwards [h1, ⋯, hn]` replaces a goal of the form `s ∈ f`
and terms `h1 : t1 ∈ f, ⋯, hn : tn ∈ f` with `∀x, x ∈ t1 → ⋯ → x ∈ tn → x ∈ s`.
`filter_upwards [h1, ⋯, hn] e` is a short form for `{ filter_upwards [h1, ⋯, hn], exact e }`.
-/
meta def filter_upwards
(s : parse types.pexpr_list)
(e' : parse $ optional types.texpr) : tactic unit :=
do
s.reverse.mmap (λ e, eapplyc `filter.mp_sets >> eapply e),
eapplyc `filter.univ_mem_sets',
match e' with
| some e := interactive.exact e
| none := skip
end
end tactic.interactive
namespace filter
variables {α : Type u} {β : Type v} {γ : Type w} {ι : Sort x}
section principal
/-- The principal filter of `s` is the collection of all supersets of `s`. -/
def principal (s : set α) : filter α :=
{ sets := {t | s ⊆ t},
univ_sets := subset_univ s,
sets_of_superset := assume x y hx hy, subset.trans hx hy,
inter_sets := assume x y, subset_inter }
localized "notation `𝓟` := filter.principal" in filter
instance : inhabited (filter α) :=
⟨𝓟 ∅⟩
@[simp] lemma mem_principal_sets {s t : set α} : s ∈ 𝓟 t ↔ t ⊆ s := iff.rfl
lemma mem_principal_self (s : set α) : s ∈ 𝓟 s := subset.refl _
end principal
open_locale filter
section join
/-- The join of a filter of filters is defined by the relation `s ∈ join f ↔ {t | s ∈ t} ∈ f`. -/
def join (f : filter (filter α)) : filter α :=
{ sets := {s | {t : filter α | s ∈ t} ∈ f},
univ_sets := by simp only [univ_mem_sets, mem_set_of_eq]; exact univ_mem_sets,
sets_of_superset := assume x y hx xy,
mem_sets_of_superset hx $ assume f h, mem_sets_of_superset h xy,
inter_sets := assume x y hx hy,
mem_sets_of_superset (inter_mem_sets hx hy) $ assume f ⟨h₁, h₂⟩, inter_mem_sets h₁ h₂ }
@[simp] lemma mem_join_sets {s : set α} {f : filter (filter α)} :
s ∈ join f ↔ {t | s ∈ t} ∈ f := iff.rfl
end join
section lattice
instance : partial_order (filter α) :=
{ le := λf g, ∀ ⦃U : set α⦄, U ∈ g → U ∈ f,
le_antisymm := assume a b h₁ h₂, filter_eq $ subset.antisymm h₂ h₁,
le_refl := assume a, subset.refl _,
le_trans := assume a b c h₁ h₂, subset.trans h₂ h₁ }
theorem le_def {f g : filter α} : f ≤ g ↔ ∀ x ∈ g, x ∈ f := iff.rfl
/-- `generate_sets g s`: `s` is in the filter closure of `g`. -/
inductive generate_sets (g : set (set α)) : set α → Prop
| basic {s : set α} : s ∈ g → generate_sets s
| univ : generate_sets univ
| superset {s t : set α} : generate_sets s → s ⊆ t → generate_sets t
| inter {s t : set α} : generate_sets s → generate_sets t → generate_sets (s ∩ t)
/-- `generate g` is the smallest filter containing the sets `g`. -/
def generate (g : set (set α)) : filter α :=
{ sets := generate_sets g,
univ_sets := generate_sets.univ,
sets_of_superset := assume x y, generate_sets.superset,
inter_sets := assume s t, generate_sets.inter }
lemma sets_iff_generate {s : set (set α)} {f : filter α} : f ≤ filter.generate s ↔ s ⊆ f.sets :=
iff.intro
(assume h u hu, h $ generate_sets.basic $ hu)
(assume h u hu, hu.rec_on h univ_mem_sets
(assume x y _ hxy hx, mem_sets_of_superset hx hxy)
(assume x y _ _ hx hy, inter_mem_sets hx hy))
lemma mem_generate_iff (s : set $ set α) {U : set α} : U ∈ generate s ↔ ∃ t ⊆ s, finite t ∧ ⋂₀ t ⊆ U :=
begin
split ; intro h,
{ induction h with V V_in V W V_in hVW hV V W V_in W_in hV hW,
{ use {V},
simp [V_in] },
{ use ∅,
simp [subset.refl, univ] },
{ rcases hV with ⟨t, hts, htfin, hinter⟩,
exact ⟨t, hts, htfin, subset.trans hinter hVW⟩ },
{ rcases hV with ⟨t, hts, htfin, htinter⟩,
rcases hW with ⟨z, hzs, hzfin, hzinter⟩,
refine ⟨t ∪ z, union_subset hts hzs, htfin.union hzfin, _⟩,
rw sInter_union,
exact inter_subset_inter htinter hzinter } },
{ rcases h with ⟨t, ts, tfin, h⟩,
apply generate_sets.superset _ h,
revert ts,
apply finite.induction_on tfin,
{ intro h,
rw sInter_empty,
exact generate_sets.univ },
{ intros V r hV rfin hinter h,
cases insert_subset.mp h with V_in r_sub,
rw [insert_eq V r, sInter_union],
apply generate_sets.inter _ (hinter r_sub),
rw sInter_singleton,
exact generate_sets.basic V_in } },
end
/-- `mk_of_closure s hs` constructs a filter on `α` whose elements set is exactly
`s : set (set α)`, provided one gives the assumption `hs : (generate s).sets = s`. -/
protected def mk_of_closure (s : set (set α)) (hs : (generate s).sets = s) : filter α :=
{ sets := s,
univ_sets := hs ▸ (univ_mem_sets : univ ∈ generate s),
sets_of_superset := assume x y, hs ▸ (mem_sets_of_superset : x ∈ generate s → x ⊆ y → y ∈ generate s),
inter_sets := assume x y, hs ▸ (inter_mem_sets : x ∈ generate s → y ∈ generate s → x ∩ y ∈ generate s) }
lemma mk_of_closure_sets {s : set (set α)} {hs : (generate s).sets = s} :
filter.mk_of_closure s hs = generate s :=
filter.ext $ assume u,
show u ∈ (filter.mk_of_closure s hs).sets ↔ u ∈ (generate s).sets, from hs.symm ▸ iff.rfl
/-- Galois insertion from sets of sets into filters. -/
def gi_generate (α : Type*) :
@galois_insertion (set (set α)) (order_dual (filter α)) _ _ filter.generate filter.sets :=
{ gc := assume s f, sets_iff_generate,
le_l_u := assume f u h, generate_sets.basic h,
choice := λs hs, filter.mk_of_closure s (le_antisymm hs $ sets_iff_generate.1 $ le_refl _),
choice_eq := assume s hs, mk_of_closure_sets }
/-- The infimum of filters is the filter generated by intersections
of elements of the two filters. -/
instance : has_inf (filter α) := ⟨λf g : filter α,
{ sets := {s | ∃ (a ∈ f) (b ∈ g), a ∩ b ⊆ s },
univ_sets := ⟨_, univ_mem_sets, _, univ_mem_sets, inter_subset_left _ _⟩,
sets_of_superset := assume x y ⟨a, ha, b, hb, h⟩ xy, ⟨a, ha, b, hb, subset.trans h xy⟩,
inter_sets := assume x y ⟨a, ha, b, hb, hx⟩ ⟨c, hc, d, hd, hy⟩,
⟨_, inter_mem_sets ha hc, _, inter_mem_sets hb hd,
calc a ∩ c ∩ (b ∩ d) = (a ∩ b) ∩ (c ∩ d) : by ac_refl
... ⊆ x ∩ y : inter_subset_inter hx hy⟩ }⟩
@[simp] lemma mem_inf_sets {f g : filter α} {s : set α} :
s ∈ f ⊓ g ↔ ∃t₁∈f, ∃t₂∈g, t₁ ∩ t₂ ⊆ s := iff.rfl
lemma mem_inf_sets_of_left {f g : filter α} {s : set α} (h : s ∈ f) : s ∈ f ⊓ g :=
⟨s, h, univ, univ_mem_sets, inter_subset_left _ _⟩
lemma mem_inf_sets_of_right {f g : filter α} {s : set α} (h : s ∈ g) : s ∈ f ⊓ g :=
⟨univ, univ_mem_sets, s, h, inter_subset_right _ _⟩
lemma inter_mem_inf_sets {α : Type u} {f g : filter α} {s t : set α}
(hs : s ∈ f) (ht : t ∈ g) : s ∩ t ∈ f ⊓ g :=
inter_mem_sets (mem_inf_sets_of_left hs) (mem_inf_sets_of_right ht)
instance : has_top (filter α) :=
⟨{ sets := {s | ∀x, x ∈ s},
univ_sets := assume x, mem_univ x,
sets_of_superset := assume x y hx hxy a, hxy (hx a),
inter_sets := assume x y hx hy a, mem_inter (hx _) (hy _) }⟩
lemma mem_top_sets_iff_forall {s : set α} : s ∈ (⊤ : filter α) ↔ (∀x, x ∈ s) :=
iff.rfl
@[simp] lemma mem_top_sets {s : set α} : s ∈ (⊤ : filter α) ↔ s = univ :=
by rw [mem_top_sets_iff_forall, eq_univ_iff_forall]
section complete_lattice
/- We lift the complete lattice along the Galois connection `generate` / `sets`. Unfortunately,
we want to have different definitional equalities for the lattice operations. So we define them
upfront and change the lattice operations for the complete lattice instance. -/
private def original_complete_lattice : complete_lattice (filter α) :=
@order_dual.complete_lattice _ (gi_generate α).lift_complete_lattice
local attribute [instance] original_complete_lattice
instance : complete_lattice (filter α) := original_complete_lattice.copy
/- le -/ filter.partial_order.le rfl
/- top -/ (filter.has_top).1
(top_unique $ assume s hs, by have := univ_mem_sets ; finish)
/- bot -/ _ rfl
/- sup -/ _ rfl
/- inf -/ (filter.has_inf).1
begin
ext f g : 2,
exact le_antisymm
(le_inf (assume s, mem_inf_sets_of_left) (assume s, mem_inf_sets_of_right))
(assume s ⟨a, ha, b, hb, hs⟩, show s ∈ complete_lattice.inf f g, from
mem_sets_of_superset (inter_mem_sets
(@inf_le_left (filter α) _ _ _ _ ha)
(@inf_le_right (filter α) _ _ _ _ hb)) hs)
end
/- Sup -/ (join ∘ 𝓟) (by ext s x; exact (@mem_bInter_iff _ _ s filter.sets x).symm)
/- Inf -/ _ rfl
end complete_lattice
lemma bot_sets_eq : (⊥ : filter α).sets = univ := rfl
lemma sup_sets_eq {f g : filter α} : (f ⊔ g).sets = f.sets ∩ g.sets :=
(gi_generate α).gc.u_inf
lemma Sup_sets_eq {s : set (filter α)} : (Sup s).sets = (⋂f∈s, (f:filter α).sets) :=
(gi_generate α).gc.u_Inf
lemma supr_sets_eq {f : ι → filter α} : (supr f).sets = (⋂i, (f i).sets) :=
(gi_generate α).gc.u_infi
lemma generate_empty : filter.generate ∅ = (⊤ : filter α) :=
(gi_generate α).gc.l_bot
lemma generate_univ : filter.generate univ = (⊥ : filter α) :=
mk_of_closure_sets.symm
lemma generate_union {s t : set (set α)} :
filter.generate (s ∪ t) = filter.generate s ⊓ filter.generate t :=
(gi_generate α).gc.l_sup
lemma generate_Union {s : ι → set (set α)} :
filter.generate (⋃ i, s i) = (⨅ i, filter.generate (s i)) :=
(gi_generate α).gc.l_supr
@[simp] lemma mem_bot_sets {s : set α} : s ∈ (⊥ : filter α) :=
trivial
@[simp] lemma mem_sup_sets {f g : filter α} {s : set α} :
s ∈ f ⊔ g ↔ s ∈ f ∧ s ∈ g :=
iff.rfl
@[simp] lemma mem_Sup_sets {x : set α} {s : set (filter α)} :
x ∈ Sup s ↔ (∀f∈s, x ∈ (f:filter α)) :=
iff.rfl
@[simp] lemma mem_supr_sets {x : set α} {f : ι → filter α} :
x ∈ supr f ↔ (∀i, x ∈ f i) :=
by simp only [supr_sets_eq, iff_self, mem_Inter]
lemma infi_eq_generate (s : ι → filter α) : infi s = generate (⋃ i, (s i).sets) :=
show generate _ = generate _, from congr_arg _ supr_range
lemma mem_infi_iff {ι} {s : ι → filter α} {U : set α} : (U ∈ ⨅ i, s i) ↔
∃ I : set ι, finite I ∧ ∃ V : {i | i ∈ I} → set α, (∀ i, V i ∈ s i) ∧ (⋂ i, V i) ⊆ U :=
begin
rw [infi_eq_generate, mem_generate_iff],
split,
{ rintro ⟨t, tsub, tfin, tinter⟩,
rcases eq_finite_Union_of_finite_subset_Union tfin tsub with ⟨I, Ifin, σ, σfin, σsub, rfl⟩,
rw sInter_Union at tinter,
let V := λ i, ⋂₀ σ i,
have V_in : ∀ i, V i ∈ s i,
{ rintro ⟨i, i_in⟩,
apply sInter_mem_sets_of_finite (σfin _),
apply σsub },
exact ⟨I, Ifin, V, V_in, tinter⟩ },
{ rintro ⟨I, Ifin, V, V_in, h⟩,
refine ⟨range V, _, _, h⟩,
{ rintro _ ⟨i, rfl⟩,
rw mem_Union,
use [i, V_in i] },
{ haveI : fintype {i : ι | i ∈ I} := finite.fintype Ifin,
exact finite_range _ } },
end
@[simp] lemma le_principal_iff {s : set α} {f : filter α} : f ≤ 𝓟 s ↔ s ∈ f :=
show (∀{t}, s ⊆ t → t ∈ f) ↔ s ∈ f,
from ⟨assume h, h (subset.refl s), assume hs t ht, mem_sets_of_superset hs ht⟩
lemma principal_mono {s t : set α} : 𝓟 s ≤ 𝓟 t ↔ s ⊆ t :=
by simp only [le_principal_iff, iff_self, mem_principal_sets]
@[mono] lemma monotone_principal : monotone (𝓟 : set α → filter α) :=
λ _ _, principal_mono.2
@[simp] lemma principal_eq_iff_eq {s t : set α} : 𝓟 s = 𝓟 t ↔ s = t :=
by simp only [le_antisymm_iff, le_principal_iff, mem_principal_sets]; refl
@[simp] lemma join_principal_eq_Sup {s : set (filter α)} : join (𝓟 s) = Sup s := rfl
lemma principal_univ : 𝓟 (univ : set α) = ⊤ :=
top_unique $ by simp only [le_principal_iff, mem_top_sets, eq_self_iff_true]
lemma principal_empty : 𝓟 (∅ : set α) = ⊥ :=
bot_unique $ assume s _, empty_subset _
/-! ### Lattice equations -/
lemma empty_in_sets_eq_bot {f : filter α} : ∅ ∈ f ↔ f = ⊥ :=
⟨assume h, bot_unique $ assume s _, mem_sets_of_superset h (empty_subset s),
assume : f = ⊥, this.symm ▸ mem_bot_sets⟩
lemma nonempty_of_mem_sets {f : filter α} (hf : f ≠ ⊥) {s : set α} (hs : s ∈ f) :
s.nonempty :=
s.eq_empty_or_nonempty.elim (λ h, absurd hs (h.symm ▸ mt empty_in_sets_eq_bot.mp hf)) id
lemma nonempty_of_ne_bot {f : filter α} (hf : f ≠ ⊥) : nonempty α :=
nonempty_of_exists $ nonempty_of_mem_sets hf univ_mem_sets
lemma filter_eq_bot_of_not_nonempty (f : filter α) (ne : ¬ nonempty α) : f = ⊥ :=
empty_in_sets_eq_bot.mp $ univ_mem_sets' $ assume x, false.elim (ne ⟨x⟩)
lemma forall_sets_nonempty_iff_ne_bot {f : filter α} :
(∀ (s : set α), s ∈ f → s.nonempty) ↔ f ≠ ⊥ :=
⟨λ h hf, empty_not_nonempty (h ∅ $ hf.symm ▸ mem_bot_sets), nonempty_of_mem_sets⟩
lemma mem_sets_of_eq_bot {f : filter α} {s : set α} (h : f ⊓ 𝓟 sᶜ = ⊥) : s ∈ f :=
have ∅ ∈ f ⊓ 𝓟 sᶜ, from h.symm ▸ mem_bot_sets,
let ⟨s₁, hs₁, s₂, (hs₂ : sᶜ ⊆ s₂), (hs : s₁ ∩ s₂ ⊆ ∅)⟩ := this in
by filter_upwards [hs₁] assume a ha, classical.by_contradiction $ assume ha', hs ⟨ha, hs₂ ha'⟩
lemma inf_ne_bot_iff {f g : filter α} :
f ⊓ g ≠ ⊥ ↔ ∀ {U V}, U ∈ f → V ∈ g → set.nonempty (U ∩ V) :=
begin
rw ← forall_sets_nonempty_iff_ne_bot,
simp_rw mem_inf_sets,
split ; intro h,
{ intros U V U_in V_in,
exact h (U ∩ V) ⟨U, U_in, V, V_in, subset.refl _⟩ },
{ rintros S ⟨U, U_in, V, V_in, hUV⟩,
cases h U_in V_in with a ha,
use [a, hUV ha] }
end
lemma inf_principal_ne_bot_iff {f : filter α} {s : set α} :
f ⊓ 𝓟 s ≠ ⊥ ↔ ∀ U ∈ f, (U ∩ s).nonempty :=
begin
rw inf_ne_bot_iff,
apply forall_congr,
intros U,
split,
{ intros h U_in,
exact h U_in (mem_principal_self s) },
{ intros h V U_in V_in,
rw mem_principal_sets at V_in,
cases h U_in with x hx,
exact ⟨x, hx.1, V_in hx.2⟩ },
end
lemma inf_eq_bot_iff {f g : filter α} :
f ⊓ g = ⊥ ↔ ∃ U V, (U ∈ f) ∧ (V ∈ g) ∧ U ∩ V = ∅ :=
begin
rw ← not_iff_not,
simp only [not_exists, not_and, ← ne.def, inf_ne_bot_iff, ne_empty_iff_nonempty]
end
protected lemma disjoint_iff {f g : filter α} :
disjoint f g ↔ ∃ U V, (U ∈ f) ∧ (V ∈ g) ∧ U ∩ V = ∅ :=
disjoint_iff.trans inf_eq_bot_iff
lemma eq_Inf_of_mem_sets_iff_exists_mem {S : set (filter α)} {l : filter α}
(h : ∀ {s}, s ∈ l ↔ ∃ f ∈ S, s ∈ f) : l = Inf S :=
le_antisymm (le_Inf $ λ f hf s hs, h.2 ⟨f, hf, hs⟩)
(λ s hs, let ⟨f, hf, hs⟩ := h.1 hs in (Inf_le hf : Inf S ≤ f) hs)
lemma eq_infi_of_mem_sets_iff_exists_mem {f : ι → filter α} {l : filter α}
(h : ∀ {s}, s ∈ l ↔ ∃ i, s ∈ f i) :
l = infi f :=
eq_Inf_of_mem_sets_iff_exists_mem $ λ s, h.trans exists_range_iff.symm
lemma eq_binfi_of_mem_sets_iff_exists_mem {f : ι → filter α} {p : ι → Prop} {l : filter α}
(h : ∀ {s}, s ∈ l ↔ ∃ i (_ : p i), s ∈ f i) :
l = ⨅ i (_ : p i), f i :=
begin
rw [infi_subtype'],
apply eq_infi_of_mem_sets_iff_exists_mem,
intro s,
exact h.trans ⟨λ ⟨i, pi, si⟩, ⟨⟨i, pi⟩, si⟩, λ ⟨⟨i, pi⟩, si⟩, ⟨i, pi, si⟩⟩
end
lemma infi_sets_eq {f : ι → filter α} (h : directed (≥) f) (ne : nonempty ι) :
(infi f).sets = (⋃ i, (f i).sets) :=
let ⟨i⟩ := ne, u := { filter .
sets := (⋃ i, (f i).sets),
univ_sets := by simp only [mem_Union]; exact ⟨i, univ_mem_sets⟩,
sets_of_superset := by simp only [mem_Union, exists_imp_distrib];
intros x y i hx hxy; exact ⟨i, mem_sets_of_superset hx hxy⟩,
inter_sets :=
begin
simp only [mem_Union, exists_imp_distrib],
assume x y a hx b hy,
rcases h a b with ⟨c, ha, hb⟩,
exact ⟨c, inter_mem_sets (ha hx) (hb hy)⟩
end } in
have u = infi f, from eq_infi_of_mem_sets_iff_exists_mem (λ s, by simp only [mem_Union]),
congr_arg filter.sets this.symm
lemma mem_infi {f : ι → filter α} (h : directed (≥) f) (ne : nonempty ι) (s) :
s ∈ infi f ↔ ∃ i, s ∈ f i :=
by simp only [infi_sets_eq h ne, mem_Union]
@[nolint ge_or_gt] -- Intentional use of `≥`
lemma binfi_sets_eq {f : β → filter α} {s : set β}
(h : directed_on (f ⁻¹'o (≥)) s) (ne : s.nonempty) :
(⨅ i∈s, f i).sets = (⋃ i ∈ s, (f i).sets) :=
let ⟨i, hi⟩ := ne in
calc (⨅ i ∈ s, f i).sets = (⨅ t : {t // t ∈ s}, (f t.val)).sets : by rw [infi_subtype]; refl
... = (⨆ t : {t // t ∈ s}, (f t.val).sets) : infi_sets_eq
(assume ⟨x, hx⟩ ⟨y, hy⟩, match h x hx y hy with ⟨z, h₁, h₂, h₃⟩ := ⟨⟨z, h₁⟩, h₂, h₃⟩ end)
⟨⟨i, hi⟩⟩
... = (⨆ t ∈ {t | t ∈ s}, (f t).sets) : by rw [supr_subtype]; refl
@[nolint ge_or_gt] -- Intentional use of `≥`
lemma mem_binfi {f : β → filter α} {s : set β}
(h : directed_on (f ⁻¹'o (≥)) s) (ne : s.nonempty) {t : set α} :
t ∈ (⨅ i∈s, f i) ↔ ∃ i ∈ s, t ∈ f i :=
by simp only [binfi_sets_eq h ne, mem_bUnion_iff]
lemma infi_sets_eq_finite (f : ι → filter α) :
(⨅i, f i).sets = (⋃t:finset (plift ι), (⨅i∈t, f (plift.down i)).sets) :=
begin
rw [infi_eq_infi_finset, infi_sets_eq],
exact (directed_of_sup $ λs₁ s₂ hs, infi_le_infi $ λi, infi_le_infi_const $ λh, hs h),
apply_instance
end
lemma mem_infi_finite {f : ι → filter α} (s) :
s ∈ infi f ↔ s ∈ ⋃t:finset (plift ι), (⨅i∈t, f (plift.down i)).sets :=
show s ∈ (infi f).sets ↔ s ∈ ⋃t:finset (plift ι), (⨅i∈t, f (plift.down i)).sets,
by rw infi_sets_eq_finite
@[simp] lemma sup_join {f₁ f₂ : filter (filter α)} : (join f₁ ⊔ join f₂) = join (f₁ ⊔ f₂) :=
filter_eq $ set.ext $ assume x,
by simp only [supr_sets_eq, join, mem_sup_sets, iff_self, mem_set_of_eq]
@[simp] lemma supr_join {ι : Sort w} {f : ι → filter (filter α)} :
(⨆x, join (f x)) = join (⨆x, f x) :=
filter_eq $ set.ext $ assume x,
by simp only [supr_sets_eq, join, iff_self, mem_Inter, mem_set_of_eq]
instance : bounded_distrib_lattice (filter α) :=
{ le_sup_inf :=
begin
assume x y z s,
simp only [and_assoc, mem_inf_sets, mem_sup_sets, exists_prop, exists_imp_distrib, and_imp],
intros hs t₁ ht₁ t₂ ht₂ hts,
exact ⟨s ∪ t₁,
x.sets_of_superset hs $ subset_union_left _ _,
y.sets_of_superset ht₁ $ subset_union_right _ _,
s ∪ t₂,
x.sets_of_superset hs $ subset_union_left _ _,
z.sets_of_superset ht₂ $ subset_union_right _ _,
subset.trans (@le_sup_inf (set α) _ _ _ _) (union_subset (subset.refl _) hts)⟩
end,
..filter.complete_lattice }
/- the complementary version with ⨆i, f ⊓ g i does not hold! -/
lemma infi_sup_eq {f : filter α} {g : ι → filter α} : (⨅ x, f ⊔ g x) = f ⊔ infi g :=
begin
refine le_antisymm _ (le_infi $ assume i, sup_le_sup_left (infi_le _ _) _),
rintros t ⟨h₁, h₂⟩,
rw [infi_sets_eq_finite] at h₂,
simp only [mem_Union, (finset.inf_eq_infi _ _).symm] at h₂,
rcases h₂ with ⟨s, hs⟩,
suffices : (⨅i, f ⊔ g i) ≤ f ⊔ s.inf (λi, g i.down), { exact this ⟨h₁, hs⟩ },
refine finset.induction_on s _ _,
{ exact le_sup_right_of_le le_top },
{ rintros ⟨i⟩ s his ih,
rw [finset.inf_insert, sup_inf_left],
exact le_inf (infi_le _ _) ih }
end
lemma mem_infi_sets_finset {s : finset α} {f : α → filter β} :
∀t, t ∈ (⨅a∈s, f a) ↔ (∃p:α → set β, (∀a∈s, p a ∈ f a) ∧ (⋂a∈s, p a) ⊆ t) :=
show ∀t, t ∈ (⨅a∈s, f a) ↔ (∃p:α → set β, (∀a∈s, p a ∈ f a) ∧ (⨅a∈s, p a) ≤ t),
begin
simp only [(finset.inf_eq_infi _ _).symm],
refine finset.induction_on s _ _,
{ simp only [finset.not_mem_empty, false_implies_iff, finset.inf_empty, top_le_iff,
imp_true_iff, mem_top_sets, true_and, exists_const],
intros; refl },
{ intros a s has ih t,
simp only [ih, finset.forall_mem_insert, finset.inf_insert, mem_inf_sets,
exists_prop, iff_iff_implies_and_implies, exists_imp_distrib, and_imp, and_assoc] {contextual := tt},
split,
{ intros t₁ ht₁ t₂ p hp ht₂ ht,
existsi function.update p a t₁,
have : ∀a'∈s, function.update p a t₁ a' = p a',
from assume a' ha',
have a' ≠ a, from assume h, has $ h ▸ ha',
function.update_noteq this _ _,
have eq : s.inf (λj, function.update p a t₁ j) = s.inf (λj, p j) :=
finset.inf_congr rfl this,
simp only [this, ht₁, hp, function.update_same, true_and, imp_true_iff, eq] {contextual := tt},
exact subset.trans (inter_subset_inter (subset.refl _) ht₂) ht },
assume p hpa hp ht,
exact ⟨p a, hpa, (s.inf p), ⟨⟨p, hp, le_refl _⟩, ht⟩⟩ }
end
/-- If `f : ι → filter α` is directed, `ι` is not empty, and `∀ i, f i ≠ ⊥`, then `infi f ≠ ⊥`.
See also `infi_ne_bot_of_directed` for a version assuming `nonempty α` instead of `nonempty ι`. -/
lemma infi_ne_bot_of_directed' {f : ι → filter α} (hn : nonempty ι)
(hd : directed (≥) f) (hb : ∀i, f i ≠ ⊥) : (infi f) ≠ ⊥ :=
begin
intro h,
have he: ∅ ∈ (infi f), from h.symm ▸ (mem_bot_sets : ∅ ∈ (⊥ : filter α)),
obtain ⟨i, hi⟩ : ∃i, ∅ ∈ f i,
from (mem_infi hd hn ∅).1 he,
exact hb i (empty_in_sets_eq_bot.1 hi)
end
/-- If `f : ι → filter α` is directed, `α` is not empty, and `∀ i, f i ≠ ⊥`, then `infi f ≠ ⊥`.
See also `infi_ne_bot_of_directed'` for a version assuming `nonempty ι` instead of `nonempty α`. -/
lemma infi_ne_bot_of_directed {f : ι → filter α}
(hn : nonempty α) (hd : directed (≥) f) (hb : ∀i, f i ≠ ⊥) : (infi f) ≠ ⊥ :=
if hι : nonempty ι then infi_ne_bot_of_directed' hι hd hb else
assume h : infi f = ⊥,
have univ ⊆ (∅ : set α),
begin
rw [←principal_mono, principal_univ, principal_empty, ←h],
exact (le_infi $ assume i, false.elim $ hι ⟨i⟩)
end,
let ⟨x⟩ := hn in this (mem_univ x)
lemma infi_ne_bot_iff_of_directed' {f : ι → filter α}
(hn : nonempty ι) (hd : directed (≥) f) : (infi f) ≠ ⊥ ↔ (∀i, f i ≠ ⊥) :=
⟨assume ne_bot i, ne_bot_of_le_ne_bot ne_bot (infi_le _ i),
infi_ne_bot_of_directed' hn hd⟩
lemma infi_ne_bot_iff_of_directed {f : ι → filter α}
(hn : nonempty α) (hd : directed (≥) f) : (infi f) ≠ ⊥ ↔ (∀i, f i ≠ ⊥) :=
⟨assume ne_bot i, ne_bot_of_le_ne_bot ne_bot (infi_le _ i),
infi_ne_bot_of_directed hn hd⟩
lemma mem_infi_sets {f : ι → filter α} (i : ι) : ∀{s}, s ∈ f i → s ∈ ⨅i, f i :=
show (⨅i, f i) ≤ f i, from infi_le _ _
@[elab_as_eliminator]
lemma infi_sets_induct {f : ι → filter α} {s : set α} (hs : s ∈ infi f) {p : set α → Prop}
(uni : p univ)
(ins : ∀{i s₁ s₂}, s₁ ∈ f i → p s₂ → p (s₁ ∩ s₂))
(upw : ∀{s₁ s₂}, s₁ ⊆ s₂ → p s₁ → p s₂) : p s :=
begin
rw [mem_infi_finite] at hs,
simp only [mem_Union, (finset.inf_eq_infi _ _).symm] at hs,
rcases hs with ⟨is, his⟩,
revert s,
refine finset.induction_on is _ _,
{ assume s hs, rwa [mem_top_sets.1 hs] },
{ rintros ⟨i⟩ js his ih s hs,
rw [finset.inf_insert, mem_inf_sets] at hs,
rcases hs with ⟨s₁, hs₁, s₂, hs₂, hs⟩,
exact upw hs (ins hs₁ (ih hs₂)) }
end
/- principal equations -/
@[simp] lemma inf_principal {s t : set α} : 𝓟 s ⊓ 𝓟 t = 𝓟 (s ∩ t) :=
le_antisymm
(by simp; exact ⟨s, subset.refl s, t, subset.refl t, by simp⟩)
(by simp [le_inf_iff, inter_subset_left, inter_subset_right])
@[simp] lemma sup_principal {s t : set α} : 𝓟 s ⊔ 𝓟 t = 𝓟 (s ∪ t) :=
filter_eq $ set.ext $
by simp only [union_subset_iff, union_subset_iff, mem_sup_sets, forall_const, iff_self, mem_principal_sets]
@[simp] lemma supr_principal {ι : Sort w} {s : ι → set α} : (⨆x, 𝓟 (s x)) = 𝓟 (⋃i, s i) :=
filter_eq $ set.ext $ assume x, by simp only [supr_sets_eq, mem_principal_sets, mem_Inter];
exact (@supr_le_iff (set α) _ _ _ _).symm
@[simp] lemma principal_eq_bot_iff {s : set α} : 𝓟 s = ⊥ ↔ s = ∅ :=
empty_in_sets_eq_bot.symm.trans $ mem_principal_sets.trans subset_empty_iff
lemma principal_ne_bot_iff {s : set α} : 𝓟 s ≠ ⊥ ↔ s.nonempty :=
(not_congr principal_eq_bot_iff).trans ne_empty_iff_nonempty
lemma is_compl_principal (s : set α) : is_compl (𝓟 s) (𝓟 sᶜ) :=
⟨by simp only [inf_principal, inter_compl_self, principal_empty, le_refl],
by simp only [sup_principal, union_compl_self, principal_univ, le_refl]⟩
lemma inf_principal_eq_bot {f : filter α} {s : set α} (hs : sᶜ ∈ f) : f ⊓ 𝓟 s = ⊥ :=
empty_in_sets_eq_bot.mp ⟨_, hs, s, mem_principal_self s, assume x ⟨h₁, h₂⟩, h₁ h₂⟩
theorem mem_inf_principal (f : filter α) (s t : set α) :
s ∈ f ⊓ 𝓟 t ↔ {x | x ∈ t → x ∈ s} ∈ f :=
begin
simp only [← le_principal_iff, (is_compl_principal s).le_left_iff, disjoint, inf_assoc,
inf_principal, imp_iff_not_or],
rw [← disjoint, ← (is_compl_principal (t ∩ sᶜ)).le_right_iff, compl_inter, compl_compl],
refl
end
lemma mem_iff_inf_principal_compl {f : filter α} {V : set α} :
V ∈ f ↔ f ⊓ 𝓟 Vᶜ = ⊥ :=
begin
rw inf_eq_bot_iff,
split,
{ intro h,
use [V, Vᶜ],
simp [h, subset.refl] },
{ rintros ⟨U, W, U_in, W_in, UW⟩,
rw [mem_principal_sets, compl_subset_comm] at W_in,
apply mem_sets_of_superset U_in,
intros x x_in,
apply W_in,
intro H,
have : x ∈ U ∩ W := ⟨x_in, H⟩,
rwa UW at this },
end
lemma le_iff_forall_inf_principal_compl {f g : filter α} :
f ≤ g ↔ ∀ V ∈ g, f ⊓ 𝓟 Vᶜ = ⊥ :=
begin
change (∀ V ∈ g, V ∈ f) ↔ _,
simp_rw [mem_iff_inf_principal_compl],
end
lemma principal_le_iff {s : set α} {f : filter α} :
𝓟 s ≤ f ↔ ∀ V ∈ f, s ⊆ V :=
begin
change (∀ V, V ∈ f → V ∈ _) ↔ _,
simp_rw mem_principal_sets,
end
@[simp] lemma infi_principal_finset {ι : Type w} (s : finset ι) (f : ι → set α) :
(⨅i∈s, 𝓟 (f i)) = 𝓟 (⋂i∈s, f i) :=
begin
ext t,
simp [mem_infi_sets_finset],
split,
{ rintros ⟨p, hp, ht⟩,
calc (⋂ (i : ι) (H : i ∈ s), f i) ≤ (⋂ (i : ι) (H : i ∈ s), p i) :
infi_le_infi (λi, infi_le_infi (λhi, mem_principal_sets.1 (hp i hi)))
... ≤ t : ht },
{ assume h,
exact ⟨f, λi hi, subset.refl _, h⟩ }
end
@[simp] lemma infi_principal_fintype {ι : Type w} [fintype ι] (f : ι → set α) :
(⨅i, 𝓟 (f i)) = 𝓟 (⋂i, f i) :=
by simpa using infi_principal_finset finset.univ f
end lattice
@[mono] lemma join_mono {f₁ f₂ : filter (filter α)} (h : f₁ ≤ f₂) :
join f₁ ≤ join f₂ :=
λ s hs, h hs
/-! ### Eventually -/
/-- `f.eventually p` or `∀ᶠ x in f, p x` mean that `{x | p x} ∈ f`. E.g., `∀ᶠ x in at_top, p x`
means that `p` holds true for sufficiently large `x`. -/
protected def eventually (p : α → Prop) (f : filter α) : Prop := {x | p x} ∈ f
notation `∀ᶠ` binders ` in ` f `, ` r:(scoped p, filter.eventually p f) := r
lemma eventually_iff {f : filter α} {P : α → Prop} : (∀ᶠ x in f, P x) ↔ {x | P x} ∈ f :=
iff.rfl
protected lemma ext' {f₁ f₂ : filter α}
(h : ∀ p : α → Prop, (∀ᶠ x in f₁, p x) ↔ (∀ᶠ x in f₂, p x)) :
f₁ = f₂ :=
filter.ext h
lemma eventually.filter_mono {f₁ f₂ : filter α} (h : f₁ ≤ f₂) {p : α → Prop}
(hp : ∀ᶠ x in f₂, p x) :
∀ᶠ x in f₁, p x :=
h hp
lemma eventually_of_mem {f : filter α} {P : α → Prop} {U : set α} (hU : U ∈ f) (h : ∀ x ∈ U, P x) :
∀ᶠ x in f, P x :=
mem_sets_of_superset hU h
protected lemma eventually.and {p q : α → Prop} {f : filter α} :
f.eventually p → f.eventually q → ∀ᶠ x in f, p x ∧ q x :=
inter_mem_sets
@[simp]
lemma eventually_true (f : filter α) : ∀ᶠ x in f, true := univ_mem_sets
lemma eventually_of_forall {p : α → Prop} {f : filter α} (hp : ∀ x, p x) :
∀ᶠ x in f, p x :=
univ_mem_sets' hp
@[simp] lemma eventually_false_iff_eq_bot {f : filter α} :
(∀ᶠ x in f, false) ↔ f = ⊥ :=
empty_in_sets_eq_bot
@[simp] lemma eventually_const {f : filter α} (hf : f ≠ ⊥) {p : Prop} :
(∀ᶠ x in f, p) ↔ p :=
classical.by_cases (λ h : p, by simp [h]) (λ h, by simp [h, hf])
lemma eventually_iff_exists_mem {p : α → Prop} {f : filter α} :
(∀ᶠ x in f, p x) ↔ ∃ v ∈ f, ∀ y ∈ v, p y :=
exists_sets_subset_iff.symm
lemma eventually.exists_mem {p : α → Prop} {f : filter α} (hp : ∀ᶠ x in f, p x) :
∃ v ∈ f, ∀ y ∈ v, p y :=
eventually_iff_exists_mem.1 hp
lemma eventually.mp {p q : α → Prop} {f : filter α} (hp : ∀ᶠ x in f, p x)
(hq : ∀ᶠ x in f, p x → q x) :
∀ᶠ x in f, q x :=
mp_sets hp hq
lemma eventually.mono {p q : α → Prop} {f : filter α} (hp : ∀ᶠ x in f, p x)
(hq : ∀ x, p x → q x) :
∀ᶠ x in f, q x :=
hp.mp (eventually_of_forall hq)
@[simp] lemma eventually_and {p q : α → Prop} {f : filter α} :
(∀ᶠ x in f, p x ∧ q x) ↔ (∀ᶠ x in f, p x) ∧ (∀ᶠ x in f, q x) :=
⟨λ h, ⟨h.mono $ λ _, and.left, h.mono $ λ _, and.right⟩, λ h, h.1.and h.2⟩
lemma eventually.congr {f : filter α} {p q : α → Prop} (h' : ∀ᶠ x in f, p x)
(h : ∀ᶠ x in f, p x ↔ q x) : ∀ᶠ x in f, q x :=
h'.mp (h.mono $ λ x hx, hx.mp)
lemma eventually_congr {f : filter α} {p q : α → Prop} (h : ∀ᶠ x in f, p x ↔ q x) :
(∀ᶠ x in f, p x) ↔ (∀ᶠ x in f, q x) :=
⟨λ hp, hp.congr h, λ hq, hq.congr $ by simpa only [iff.comm] using h⟩
@[simp] lemma eventually_or_distrib_left {f : filter α} {p : Prop} {q : α → Prop} :
(∀ᶠ x in f, p ∨ q x) ↔ (p ∨ ∀ᶠ x in f, q x) :=
classical.by_cases (λ h : p, by simp [h]) (λ h, by simp [h])
@[simp] lemma eventually_or_distrib_right {f : filter α} {p : α → Prop} {q : Prop} :
(∀ᶠ x in f, p x ∨ q) ↔ ((∀ᶠ x in f, p x) ∨ q) :=
by simp only [or_comm _ q, eventually_or_distrib_left]
@[simp] lemma eventually_imp_distrib_left {f : filter α} {p : Prop} {q : α → Prop} :
(∀ᶠ x in f, p → q x) ↔ (p → ∀ᶠ x in f, q x) :=
by simp only [imp_iff_not_or, eventually_or_distrib_left]
@[simp]
lemma eventually_bot {p : α → Prop} : ∀ᶠ x in ⊥, p x := ⟨⟩
@[simp]
lemma eventually_top {p : α → Prop} : (∀ᶠ x in ⊤, p x) ↔ (∀ x, p x) :=
iff.rfl
lemma eventually_sup {p : α → Prop} {f g : filter α} :
(∀ᶠ x in f ⊔ g, p x) ↔ (∀ᶠ x in f, p x) ∧ (∀ᶠ x in g, p x) :=
iff.rfl
@[simp]
lemma eventually_Sup {p : α → Prop} {fs : set (filter α)} :
(∀ᶠ x in Sup fs, p x) ↔ (∀ f ∈ fs, ∀ᶠ x in f, p x) :=
iff.rfl
@[simp]
lemma eventually_supr {p : α → Prop} {fs : β → filter α} :
(∀ᶠ x in (⨆ b, fs b), p x) ↔ (∀ b, ∀ᶠ x in fs b, p x) :=
mem_supr_sets
@[simp]
lemma eventually_principal {a : set α} {p : α → Prop} :
(∀ᶠ x in 𝓟 a, p x) ↔ (∀ x ∈ a, p x) :=
iff.rfl
/-! ### Frequently -/
/-- `f.frequently p` or `∃ᶠ x in f, p x` mean that `{x | ¬p x} ∉ f`. E.g., `∃ᶠ x in at_top, p x`
means that there exist arbitrarily large `x` for which `p` holds true. -/
protected def frequently (p : α → Prop) (f : filter α) : Prop := ¬∀ᶠ x in f, ¬p x
notation `∃ᶠ` binders ` in ` f `, ` r:(scoped p, filter.frequently p f) := r
lemma eventually.frequently {f : filter α} (hf : f ≠ ⊥) {p : α → Prop} (h : ∀ᶠ x in f, p x) :
∃ᶠ x in f, p x :=
begin
assume h',
have := h.and h',
simp only [and_not_self, eventually_false_iff_eq_bot] at this,
exact hf this
end
lemma frequently_of_forall {f : filter α} (hf : f ≠ ⊥) {p : α → Prop} (h : ∀ x, p x) :
∃ᶠ x in f, p x :=
eventually.frequently hf (eventually_of_forall h)
lemma frequently.mp {p q : α → Prop} {f : filter α} (h : ∃ᶠ x in f, p x)
(hpq : ∀ᶠ x in f, p x → q x) :
∃ᶠ x in f, q x :=
mt (λ hq, hq.mp $ hpq.mono $ λ x, mt) h
lemma frequently.mono {p q : α → Prop} {f : filter α} (h : ∃ᶠ x in f, p x)
(hpq : ∀ x, p x → q x) :
∃ᶠ x in f, q x :=
h.mp (eventually_of_forall hpq)
lemma frequently.and_eventually {p q : α → Prop} {f : filter α}
(hp : ∃ᶠ x in f, p x) (hq : ∀ᶠ x in f, q x) :
∃ᶠ x in f, p x ∧ q x :=
begin
refine mt (λ h, hq.mp $ h.mono _) hp,
assume x hpq hq hp,
exact hpq ⟨hp, hq⟩
end
lemma frequently.exists {p : α → Prop} {f : filter α} (hp : ∃ᶠ x in f, p x) : ∃ x, p x :=
begin
by_contradiction H,
replace H : ∀ᶠ x in f, ¬ p x, from eventually_of_forall (not_exists.1 H),
exact hp H
end
lemma eventually.exists {p : α → Prop} {f : filter α} (hp : ∀ᶠ x in f, p x) (hf : f ≠ ⊥) :
∃ x, p x :=
(hp.frequently hf).exists
lemma frequently_iff_forall_eventually_exists_and {p : α → Prop} {f : filter α} :
(∃ᶠ x in f, p x) ↔ ∀ {q : α → Prop}, (∀ᶠ x in f, q x) → ∃ x, p x ∧ q x :=
⟨assume hp q hq, (hp.and_eventually hq).exists,
assume H hp, by simpa only [and_not_self, exists_false] using H hp⟩
lemma frequently_iff {f : filter α} {P : α → Prop} :
(∃ᶠ x in f, P x) ↔ ∀ {U}, U ∈ f → ∃ x ∈ U, P x :=
begin
rw frequently_iff_forall_eventually_exists_and,
split ; intro h,
{ intros U U_in,
simpa [exists_prop, and_comm] using h U_in },
{ intros H H',
simpa [and_comm] using h H' },
end
@[simp] lemma not_eventually {p : α → Prop} {f : filter α} :
(¬ ∀ᶠ x in f, p x) ↔ (∃ᶠ x in f, ¬ p x) :=
by simp [filter.frequently]
@[simp] lemma not_frequently {p : α → Prop} {f : filter α} :
(¬ ∃ᶠ x in f, p x) ↔ (∀ᶠ x in f, ¬ p x) :=
by simp only [filter.frequently, not_not]
@[simp] lemma frequently_true_iff_ne_bot (f : filter α) : (∃ᶠ x in f, true) ↔ f ≠ ⊥ :=
by simp [filter.frequently, -not_eventually, eventually_false_iff_eq_bot]
@[simp] lemma frequently_false (f : filter α) : ¬ ∃ᶠ x in f, false := by simp
@[simp] lemma frequently_const {f : filter α} (hf : f ≠ ⊥) {p : Prop} :
(∃ᶠ x in f, p) ↔ p :=
classical.by_cases (λ h : p, by simp [*]) (λ h, by simp [*])
@[simp] lemma frequently_or_distrib {f : filter α} {p q : α → Prop} :
(∃ᶠ x in f, p x ∨ q x) ↔ (∃ᶠ x in f, p x) ∨ (∃ᶠ x in f, q x) :=
by simp only [filter.frequently, ← not_and_distrib, not_or_distrib, eventually_and]
lemma frequently_or_distrib_left {f : filter α} (hf : f ≠ ⊥) {p : Prop} {q : α → Prop} :
(∃ᶠ x in f, p ∨ q x) ↔ (p ∨ ∃ᶠ x in f, q x) :=
by simp [hf]
lemma frequently_or_distrib_right {f : filter α} (hf : f ≠ ⊥) {p : α → Prop} {q : Prop} :
(∃ᶠ x in f, p x ∨ q) ↔ (∃ᶠ x in f, p x) ∨ q :=
by simp [hf]
@[simp] lemma frequently_imp_distrib {f : filter α} {p q : α → Prop} :
(∃ᶠ x in f, p x → q x) ↔ ((∀ᶠ x in f, p x) → ∃ᶠ x in f, q x) :=
by simp [imp_iff_not_or, not_eventually, frequently_or_distrib]
lemma frequently_imp_distrib_left {f : filter α} (hf : f ≠ ⊥) {p : Prop} {q : α → Prop} :
(∃ᶠ x in f, p → q x) ↔ (p → ∃ᶠ x in f, q x) :=
by simp [hf]
lemma frequently_imp_distrib_right {f : filter α} (hf : f ≠ ⊥) {p : α → Prop} {q : Prop} :
(∃ᶠ x in f, p x → q) ↔ ((∀ᶠ x in f, p x) → q) :=
by simp [hf]
@[simp] lemma eventually_imp_distrib_right {f : filter α} {p : α → Prop} {q : Prop} :
(∀ᶠ x in f, p x → q) ↔ ((∃ᶠ x in f, p x) → q) :=
by simp only [imp_iff_not_or, eventually_or_distrib_right, not_frequently]
@[simp] lemma frequently_bot {p : α → Prop} : ¬ ∃ᶠ x in ⊥, p x := by simp
@[simp]
lemma frequently_top {p : α → Prop} : (∃ᶠ x in ⊤, p x) ↔ (∃ x, p x) :=
by simp [filter.frequently]
lemma inf_ne_bot_iff_frequently_left {f g : filter α} :
f ⊓ g ≠ ⊥ ↔ ∀ {p : α → Prop}, (∀ᶠ x in f, p x) → ∃ᶠ x in g, p x :=
begin
rw filter.inf_ne_bot_iff,
split ; intro h,
{ intros U U_in H,
rcases h U_in H with ⟨x, hx, hx'⟩,
exact hx' hx},
{ intros U V U_in V_in,
classical,
by_contra H,
exact h U_in (mem_sets_of_superset V_in $ λ v v_in v_in', H ⟨v, v_in', v_in⟩) }
end
lemma inf_ne_bot_iff_frequently_right {f g : filter α} :
f ⊓ g ≠ ⊥ ↔ ∀ {p : α → Prop}, (∀ᶠ x in g, p x) → ∃ᶠ x in f, p x :=
by { rw inf_comm, exact filter.inf_ne_bot_iff_frequently_left }
@[simp]
lemma frequently_principal {a : set α} {p : α → Prop} :
(∃ᶠ x in 𝓟 a, p x) ↔ (∃ x ∈ a, p x) :=
by simp [filter.frequently, not_forall]
lemma frequently_sup {p : α → Prop} {f g : filter α} :
(∃ᶠ x in f ⊔ g, p x) ↔ (∃ᶠ x in f, p x) ∨ (∃ᶠ x in g, p x) :=
by simp only [filter.frequently, eventually_sup, not_and_distrib]
@[simp]
lemma frequently_Sup {p : α → Prop} {fs : set (filter α)} :
(∃ᶠ x in Sup fs, p x) ↔ (∃ f ∈ fs, ∃ᶠ x in f, p x) :=
by simp [filter.frequently, -not_eventually, not_forall]
@[simp]
lemma frequently_supr {p : α → Prop} {fs : β → filter α} :
(∃ᶠ x in (⨆ b, fs b), p x) ↔ (∃ b, ∃ᶠ x in fs b, p x) :=
by simp [filter.frequently, -not_eventually, not_forall]
/-!
### Relation “eventually equal”
-/
/-- Two functions `f` and `g` are *eventually equal* along a filter `l` if the set of `x` such that
`f x = g x` belongs to `l`. -/
def eventually_eq (l : filter α) (f g : α → β) : Prop := ∀ᶠ x in l, f x = g x
notation f ` =ᶠ[`:50 l:50 `] `:0 g:50 := eventually_eq l f g
lemma eventually_eq.rw {l : filter α} {f g : α → β} (h : f =ᶠ[l] g) (p : α → β → Prop)
(hf : ∀ᶠ x in l, p x (f x)) :
∀ᶠ x in l, p x (g x) :=
hf.congr $ h.mono $ λ x hx, hx ▸ iff.rfl
lemma eventually_eq.exists_mem {l : filter α} {f g : α → β} (h : f =ᶠ[l] g) :
∃ s ∈ l, ∀ x ∈ s, f x = g x :=
filter.eventually.exists_mem h
lemma eventually_eq_of_mem {l : filter α} {f g : α → β} {s : set α}
(hs : s ∈ l) (h : ∀ x ∈ s, f x = g x) : f =ᶠ[l] g :=
eventually_of_mem hs h
lemma eventually_eq_iff_exists_mem {l : filter α} {f g : α → β} :
(f =ᶠ[l] g) ↔ ∃ s ∈ l, ∀ x ∈ s, f x = g x :=
eventually_iff_exists_mem
@[refl] lemma eventually_eq.refl (l : filter α) (f : α → β) :
f =ᶠ[l] f :=
eventually_of_forall $ λ x, rfl
@[symm] lemma eventually_eq.symm {f g : α → β} {l : filter α} (H : f =ᶠ[l] g) :
g =ᶠ[l] f :=
H.mono $ λ _, eq.symm
@[trans] lemma eventually_eq.trans {f g h : α → β} {l : filter α}
(H₁ : f =ᶠ[l] g) (H₂ : g =ᶠ[l] h) :
f =ᶠ[l] h :=
H₂.rw (λ x y, f x = y) H₁
lemma eventually_eq.fun_comp {f g : α → β} {l : filter α} (H : f =ᶠ[l] g) (h : β → γ) :
(h ∘ f) =ᶠ[l] (h ∘ g) :=
H.mono $ λ x hx, congr_arg h hx
lemma eventually_eq.comp₂ {δ} {f f' : α → β} {g g' : α → γ} {l} (Hf : f =ᶠ[l] f') (h : β → γ → δ)
(Hg : g =ᶠ[l] g') :
(λ x, h (f x) (g x)) =ᶠ[l] (λ x, h (f' x) (g' x)) :=
Hf.mp $ Hg.mono $ by { intros, simp only * }
@[to_additive]
lemma eventually_eq.mul [has_mul β] {f f' g g' : α → β} {l : filter α} (h : f =ᶠ[l] g)
(h' : f' =ᶠ[l] g') :
((λ x, f x * f' x) =ᶠ[l] (λ x, g x * g' x)) :=
h.comp₂ (*) h'
@[to_additive]
lemma eventually_eq.inv [has_inv β] {f g : α → β} {l : filter α} (h : f =ᶠ[l] g) :
((λ x, (f x)⁻¹) =ᶠ[l] (λ x, (g x)⁻¹)) :=
h.fun_comp has_inv.inv
lemma eventually_eq.div [group_with_zero β] {f f' g g' : α → β} {l : filter α} (h : f =ᶠ[l] g)
(h' : f' =ᶠ[l] g') :
((λ x, f x / f' x) =ᶠ[l] (λ x, g x / g' x)) :=
h.mul h'.inv
lemma eventually_eq.sub [add_group β] {f f' g g' : α → β} {l : filter α} (h : f =ᶠ[l] g)
(h' : f' =ᶠ[l] g') :
((λ x, f x - f' x) =ᶠ[l] (λ x, g x - g' x)) :=
h.add h'.neg
section has_le
variables [has_le β] {l : filter α}
/-- A function `f` is eventually less than or equal to a function `g` at a filter `l`. -/
def eventually_le (l : filter α) (f g : α → β) : Prop := ∀ᶠ x in l, f x ≤ g x
notation f ` ≤ᶠ[`:50 l:50 `] `:0 g:50 := eventually_le l f g
lemma eventually_le.congr {f f' g g' : α → β} (H : f ≤ᶠ[l] g) (hf : f =ᶠ[l] f') (hg : g =ᶠ[l] g') :
f' ≤ᶠ[l] g' :=
H.mp $ hg.mp $ hf.mono $ λ x hf hg H, by rwa [hf, hg] at H
lemma eventually_le_congr {f f' g g' : α → β} (hf : f =ᶠ[l] f') (hg : g =ᶠ[l] g') :
f ≤ᶠ[l] g ↔ f' ≤ᶠ[l] g' :=
⟨λ H, H.congr hf hg, λ H, H.congr hf.symm hg.symm⟩
end has_le
section preorder
variables [preorder β] {l : filter α} {f g h : α → β}
lemma eventually_eq.le (h : f =ᶠ[l] g) : f ≤ᶠ[l] g := h.mono $ λ x, le_of_eq
@[refl] lemma eventually_le.refl (l : filter α) (f : α → β) :
f ≤ᶠ[l] f :=
(eventually_eq.refl l f).le
@[trans] lemma eventually_le.trans (H₁ : f ≤ᶠ[l] g) (H₂ : g ≤ᶠ[l] h) : f ≤ᶠ[l] h :=
H₂.mp $ H₁.mono $ λ x, le_trans
@[trans] lemma eventually_eq.trans_le (H₁ : f =ᶠ[l] g) (H₂ : g ≤ᶠ[l] h) : f ≤ᶠ[l] h :=
H₁.le.trans H₂
@[trans] lemma eventually_le.trans_eq (H₁ : f ≤ᶠ[l] g) (H₂ : g =ᶠ[l] h) : f ≤ᶠ[l] h :=
H₁.trans H₂.le
end preorder
lemma eventually_le.antisymm [partial_order β] {l : filter α} {f g : α → β}
(h₁ : f ≤ᶠ[l] g) (h₂ : g ≤ᶠ[l] f) :
f =ᶠ[l] g :=
h₂.mp $ h₁.mono $ λ x, le_antisymm
lemma join_le {f : filter (filter α)} {l : filter α} (h : ∀ᶠ m in f, m ≤ l) : join f ≤ l :=
λ s hs, h.mono $ λ m hm, hm hs
/-! ### Push-forwards, pull-backs, and the monad structure -/
section map
/-- The forward map of a filter -/
def map (m : α → β) (f : filter α) : filter β :=
{ sets := preimage m ⁻¹' f.sets,
univ_sets := univ_mem_sets,
sets_of_superset := assume s t hs st, mem_sets_of_superset hs $ preimage_mono st,
inter_sets := assume s t hs ht, inter_mem_sets hs ht }
@[simp] lemma map_principal {s : set α} {f : α → β} :
map f (𝓟 s) = 𝓟 (set.image f s) :=
filter_eq $ set.ext $ assume a, image_subset_iff.symm
variables {f : filter α} {m : α → β} {m' : β → γ} {s : set α} {t : set β}
@[simp] lemma eventually_map {P : β → Prop} :
(∀ᶠ b in map m f, P b) ↔ ∀ᶠ a in f, P (m a) :=
iff.rfl
@[simp] lemma frequently_map {P : β → Prop} :
(∃ᶠ b in map m f, P b) ↔ ∃ᶠ a in f, P (m a) :=
iff.rfl
@[simp] lemma mem_map : t ∈ map m f ↔ {x | m x ∈ t} ∈ f := iff.rfl
lemma image_mem_map (hs : s ∈ f) : m '' s ∈ map m f :=
f.sets_of_superset hs $ subset_preimage_image m s
lemma range_mem_map : range m ∈ map m f :=
by rw ←image_univ; exact image_mem_map univ_mem_sets
lemma mem_map_sets_iff : t ∈ map m f ↔ (∃s∈f, m '' s ⊆ t) :=
iff.intro
(assume ht, ⟨set.preimage m t, ht, image_preimage_subset _ _⟩)
(assume ⟨s, hs, ht⟩, mem_sets_of_superset (image_mem_map hs) ht)
@[simp] lemma map_id : filter.map id f = f :=
filter_eq $ rfl
@[simp] lemma map_compose : filter.map m' ∘ filter.map m = filter.map (m' ∘ m) :=
funext $ assume _, filter_eq $ rfl
@[simp] lemma map_map : filter.map m' (filter.map m f) = filter.map (m' ∘ m) f :=
congr_fun (@@filter.map_compose m m') f
/-- If functions `m₁` and `m₂` are eventually equal at a filter `f`, then
they map this filter to the same filter. -/
lemma map_congr {m₁ m₂ : α → β} {f : filter α} (h : m₁ =ᶠ[f] m₂) :
map m₁ f = map m₂ f :=
filter.ext' $ λ p,
by { simp only [eventually_map], exact eventually_congr (h.mono $ λ x hx, hx ▸ iff.rfl) }
end map
section comap
/-- The inverse map of a filter -/
def comap (m : α → β) (f : filter β) : filter α :=
{ sets := { s | ∃t∈ f, m ⁻¹' t ⊆ s },
univ_sets := ⟨univ, univ_mem_sets, by simp only [subset_univ, preimage_univ]⟩,
sets_of_superset := assume a b ⟨a', ha', ma'a⟩ ab,
⟨a', ha', subset.trans ma'a ab⟩,
inter_sets := assume a b ⟨a', ha₁, ha₂⟩ ⟨b', hb₁, hb₂⟩,
⟨a' ∩ b', inter_mem_sets ha₁ hb₁, inter_subset_inter ha₂ hb₂⟩ }
@[simp] lemma eventually_comap {f : filter β} {φ : α → β} {P : α → Prop} :
(∀ᶠ a in comap φ f, P a) ↔ ∀ᶠ b in f, ∀ a, φ a = b → P a :=
begin
split ; intro h,
{ rcases h with ⟨t, t_in, ht⟩,
apply mem_sets_of_superset t_in,
rintros y y_in _ rfl,
apply ht y_in },
{ exact ⟨_, h, λ _ x_in, x_in _ rfl⟩ }
end
@[simp] lemma frequently_comap {f : filter β} {φ : α → β} {P : α → Prop} :
(∃ᶠ a in comap φ f, P a) ↔ ∃ᶠ b in f, ∃ a, φ a = b ∧ P a :=
begin
classical,
erw [← not_iff_not, not_not, not_not, filter.eventually_comap],
simp only [not_exists, not_and],
end
end comap
/-- The monadic bind operation on filter is defined the usual way in terms of `map` and `join`.
Unfortunately, this `bind` does not result in the expected applicative. See `filter.seq` for the
applicative instance. -/
def bind (f : filter α) (m : α → filter β) : filter β := join (map m f)
/-- The applicative sequentiation operation. This is not induced by the bind operation. -/
def seq (f : filter (α → β)) (g : filter α) : filter β :=
⟨{ s | ∃u∈ f, ∃t∈ g, (∀m∈u, ∀x∈t, (m : α → β) x ∈ s) },
⟨univ, univ_mem_sets, univ, univ_mem_sets, by simp only [forall_prop_of_true, mem_univ, forall_true_iff]⟩,
assume s₀ s₁ ⟨t₀, t₁, h₀, h₁, h⟩ hst, ⟨t₀, t₁, h₀, h₁, assume x hx y hy, hst $ h _ hx _ hy⟩,
assume s₀ s₁ ⟨t₀, ht₀, t₁, ht₁, ht⟩ ⟨u₀, hu₀, u₁, hu₁, hu⟩,
⟨t₀ ∩ u₀, inter_mem_sets ht₀ hu₀, t₁ ∩ u₁, inter_mem_sets ht₁ hu₁,
assume x ⟨hx₀, hx₁⟩ x ⟨hy₀, hy₁⟩, ⟨ht _ hx₀ _ hy₀, hu _ hx₁ _ hy₁⟩⟩⟩
/-- `pure x` is the set of sets that contain `x`. It is equal to `𝓟 {x}` but
with this definition we have `s ∈ pure a` defeq `a ∈ s`. -/
instance : has_pure filter :=
⟨λ (α : Type u) x,
{ sets := {s | x ∈ s},
inter_sets := λ s t, and.intro,
sets_of_superset := λ s t hs hst, hst hs,
univ_sets := trivial }⟩
instance : has_bind filter := ⟨@filter.bind⟩
instance : has_seq filter := ⟨@filter.seq⟩
instance : functor filter := { map := @filter.map }
lemma pure_sets (a : α) : (pure a : filter α).sets = {s | a ∈ s} := rfl
@[simp] lemma mem_pure_sets {a : α} {s : set α} : s ∈ (pure a : filter α) ↔ a ∈ s := iff.rfl
lemma pure_eq_principal (a : α) : (pure a : filter α) = 𝓟 {a} :=
filter.ext $ λ s, by simp only [mem_pure_sets, mem_principal_sets, singleton_subset_iff]
@[simp] lemma map_pure (f : α → β) (a : α) : map f (pure a) = pure (f a) :=
filter.ext $ λ s, iff.rfl
@[simp] lemma join_pure (f : filter α) : join (pure f) = f := filter.ext $ λ s, iff.rfl
@[simp] lemma pure_bind (a : α) (m : α → filter β) :
bind (pure a) m = m a :=
by simp only [has_bind.bind, bind, map_pure, join_pure]
section
-- this section needs to be before applicative, otherwise the wrong instance will be chosen
/-- The monad structure on filters. -/
protected def monad : monad filter := { map := @filter.map }
local attribute [instance] filter.monad
protected lemma is_lawful_monad : is_lawful_monad filter :=
{ id_map := assume α f, filter_eq rfl,
pure_bind := assume α β, pure_bind,
bind_assoc := assume α β γ f m₁ m₂, filter_eq rfl,
bind_pure_comp_eq_map := assume α β f x, filter.ext $ λ s,
by simp only [has_bind.bind, bind, functor.map, mem_map, mem_join_sets, mem_set_of_eq,
function.comp, mem_pure_sets] }
end
instance : applicative filter := { map := @filter.map, seq := @filter.seq }
instance : alternative filter :=
{ failure := λα, ⊥,
orelse := λα x y, x ⊔ y }
@[simp] lemma map_def {α β} (m : α → β) (f : filter α) : m <$> f = map m f := rfl
@[simp] lemma bind_def {α β} (f : filter α) (m : α → filter β) : f >>= m = bind f m := rfl
/- map and comap equations -/
section map
variables {f f₁ f₂ : filter α} {g g₁ g₂ : filter β} {m : α → β} {m' : β → γ} {s : set α} {t : set β}
@[simp] theorem mem_comap_sets : s ∈ comap m g ↔ ∃t∈ g, m ⁻¹' t ⊆ s := iff.rfl
theorem preimage_mem_comap (ht : t ∈ g) : m ⁻¹' t ∈ comap m g :=
⟨t, ht, subset.refl _⟩
lemma comap_id : comap id f = f :=
le_antisymm (assume s, preimage_mem_comap) (assume s ⟨t, ht, hst⟩, mem_sets_of_superset ht hst)
lemma comap_const_of_not_mem {x : α} {f : filter α} {V : set α} (hV : V ∈ f) (hx : x ∉ V) :
comap (λ y : α, x) f = ⊥ :=
begin
ext W,
suffices : ∃ t ∈ f, (λ (y : α), x) ⁻¹' t ⊆ W, by simpa,
use [V, hV],
simp [preimage_const_of_not_mem hx],
end
lemma comap_const_of_mem {x : α} {f : filter α} (h : ∀ V ∈ f, x ∈ V) : comap (λ y : α, x) f = ⊤ :=
begin
ext W,
suffices : (∃ (t : set α), t ∈ f.sets ∧ (λ (y : α), x) ⁻¹' t ⊆ W) ↔ W = univ,
by simpa,
split,
{ rintros ⟨V, V_in, hW⟩,
simpa [preimage_const_of_mem (h V V_in), univ_subset_iff] using hW },
{ rintro rfl,
use univ,
simp [univ_mem_sets] },
end
lemma comap_comap {m : γ → β} {n : β → α} : comap m (comap n f) = comap (n ∘ m) f :=
le_antisymm
(assume c ⟨b, hb, (h : preimage (n ∘ m) b ⊆ c)⟩, ⟨preimage n b, preimage_mem_comap hb, h⟩)
(assume c ⟨b, ⟨a, ha, (h₁ : preimage n a ⊆ b)⟩, (h₂ : preimage m b ⊆ c)⟩,
⟨a, ha, show preimage m (preimage n a) ⊆ c, from subset.trans (preimage_mono h₁) h₂⟩)
@[simp] theorem comap_principal {t : set β} : comap m (𝓟 t) = 𝓟 (m ⁻¹' t) :=
filter_eq $ set.ext $ assume s,
⟨assume ⟨u, (hu : t ⊆ u), (b : preimage m u ⊆ s)⟩, subset.trans (preimage_mono hu) b,
assume : preimage m t ⊆ s, ⟨t, subset.refl t, this⟩⟩
lemma map_le_iff_le_comap : map m f ≤ g ↔ f ≤ comap m g :=
⟨assume h s ⟨t, ht, hts⟩, mem_sets_of_superset (h ht) hts, assume h s ht, h ⟨_, ht, subset.refl _⟩⟩
lemma gc_map_comap (m : α → β) : galois_connection (map m) (comap m) :=
assume f g, map_le_iff_le_comap
@[mono] lemma map_mono : monotone (map m) := (gc_map_comap m).monotone_l
@[mono] lemma comap_mono : monotone (comap m) := (gc_map_comap m).monotone_u
@[simp] lemma map_bot : map m ⊥ = ⊥ := (gc_map_comap m).l_bot
@[simp] lemma map_sup : map m (f₁ ⊔ f₂) = map m f₁ ⊔ map m f₂ := (gc_map_comap m).l_sup
@[simp] lemma map_supr {f : ι → filter α} : map m (⨆i, f i) = (⨆i, map m (f i)) :=
(gc_map_comap m).l_supr
@[simp] lemma comap_top : comap m ⊤ = ⊤ := (gc_map_comap m).u_top
@[simp] lemma comap_inf : comap m (g₁ ⊓ g₂) = comap m g₁ ⊓ comap m g₂ := (gc_map_comap m).u_inf
@[simp] lemma comap_infi {f : ι → filter β} : comap m (⨅i, f i) = (⨅i, comap m (f i)) :=
(gc_map_comap m).u_infi
lemma le_comap_top (f : α → β) (l : filter α) : l ≤ comap f ⊤ :=
by rw [comap_top]; exact le_top
lemma map_comap_le : map m (comap m g) ≤ g := (gc_map_comap m).l_u_le _
lemma le_comap_map : f ≤ comap m (map m f) := (gc_map_comap m).le_u_l _
@[simp] lemma comap_bot : comap m ⊥ = ⊥ :=
bot_unique $ assume s _, ⟨∅, by simp only [mem_bot_sets], by simp only [empty_subset, preimage_empty]⟩
lemma comap_supr {ι} {f : ι → filter β} {m : α → β} :
comap m (supr f) = (⨆i, comap m (f i)) :=
le_antisymm
(assume s hs,
have ∀i, ∃t, t ∈ f i ∧ m ⁻¹' t ⊆ s, by simpa only [mem_comap_sets, exists_prop, mem_supr_sets] using mem_supr_sets.1 hs,
let ⟨t, ht⟩ := classical.axiom_of_choice this in
⟨⋃i, t i, mem_supr_sets.2 $ assume i, (f i).sets_of_superset (ht i).1 (subset_Union _ _),
begin
rw [preimage_Union, Union_subset_iff],
assume i,
exact (ht i).2
end⟩)
(supr_le $ assume i, comap_mono $ le_supr _ _)
lemma comap_Sup {s : set (filter β)} {m : α → β} : comap m (Sup s) = (⨆f∈s, comap m f) :=
by simp only [Sup_eq_supr, comap_supr, eq_self_iff_true]
lemma comap_sup : comap m (g₁ ⊔ g₂) = comap m g₁ ⊔ comap m g₂ :=
le_antisymm
(assume s ⟨⟨t₁, ht₁, hs₁⟩, ⟨t₂, ht₂, hs₂⟩⟩,
⟨t₁ ∪ t₂,
⟨g₁.sets_of_superset ht₁ (subset_union_left _ _), g₂.sets_of_superset ht₂ (subset_union_right _ _)⟩,
union_subset hs₁ hs₂⟩)
((@comap_mono _ _ m).le_map_sup _ _)
lemma map_comap {f : filter β} {m : α → β} (hf : range m ∈ f) : (f.comap m).map m = f :=
le_antisymm
map_comap_le
(assume t' ⟨t, ht, sub⟩, by filter_upwards [ht, hf]; rintros x hxt ⟨y, rfl⟩; exact sub hxt)
lemma comap_map {f : filter α} {m : α → β} (h : ∀ x y, m x = m y → x = y) :
comap m (map m f) = f :=
have ∀s, preimage m (image m s) = s,
from assume s, preimage_image_eq s h,
le_antisymm
(assume s hs, ⟨
image m s,
f.sets_of_superset hs $ by simp only [this, subset.refl],
by simp only [this, subset.refl]⟩)
le_comap_map
lemma le_of_map_le_map_inj' {f g : filter α} {m : α → β} {s : set α}
(hsf : s ∈ f) (hsg : s ∈ g) (hm : ∀x∈s, ∀y∈s, m x = m y → x = y)
(h : map m f ≤ map m g) : f ≤ g :=
assume t ht, by filter_upwards [hsf, h $ image_mem_map (inter_mem_sets hsg ht)]
assume a has ⟨b, ⟨hbs, hb⟩, h⟩,
have b = a, from hm _ hbs _ has h,
this ▸ hb
lemma le_of_map_le_map_inj_iff {f g : filter α} {m : α → β} {s : set α}
(hsf : s ∈ f) (hsg : s ∈ g) (hm : ∀x∈s, ∀y∈s, m x = m y → x = y) :
map m f ≤ map m g ↔ f ≤ g :=
iff.intro (le_of_map_le_map_inj' hsf hsg hm) (λ h, map_mono h)
lemma eq_of_map_eq_map_inj' {f g : filter α} {m : α → β} {s : set α}
(hsf : s ∈ f) (hsg : s ∈ g) (hm : ∀x∈s, ∀y∈s, m x = m y → x = y)
(h : map m f = map m g) : f = g :=
le_antisymm
(le_of_map_le_map_inj' hsf hsg hm $ le_of_eq h)
(le_of_map_le_map_inj' hsg hsf hm $ le_of_eq h.symm)
lemma map_inj {f g : filter α} {m : α → β} (hm : ∀ x y, m x = m y → x = y) (h : map m f = map m g) :
f = g :=
have comap m (map m f) = comap m (map m g), by rw h,
by rwa [comap_map hm, comap_map hm] at this
theorem le_map_comap_of_surjective' {f : α → β} {l : filter β} {u : set β} (ul : u ∈ l)
(hf : ∀ y ∈ u, ∃ x, f x = y) :
l ≤ map f (comap f l) :=
assume s ⟨t, tl, ht⟩,
have t ∩ u ⊆ s, from
assume x ⟨xt, xu⟩,
exists.elim (hf x xu) $ λ a faeq,
by { rw ←faeq, apply ht, change f a ∈ t, rw faeq, exact xt },
mem_sets_of_superset (inter_mem_sets tl ul) this
theorem map_comap_of_surjective' {f : α → β} {l : filter β} {u : set β} (ul : u ∈ l)
(hf : ∀ y ∈ u, ∃ x, f x = y) :
map f (comap f l) = l :=
le_antisymm map_comap_le (le_map_comap_of_surjective' ul hf)
theorem le_map_comap_of_surjective {f : α → β} (hf : function.surjective f) (l : filter β) :
l ≤ map f (comap f l) :=
le_map_comap_of_surjective' univ_mem_sets (λ y _, hf y)
theorem map_comap_of_surjective {f : α → β} (hf : function.surjective f) (l : filter β) :
map f (comap f l) = l :=
le_antisymm map_comap_le (le_map_comap_of_surjective hf l)
lemma subtype_coe_map_comap (s : set α) (f : filter α) :
map (coe : s → α) (comap (coe : s → α) f) = f ⊓ 𝓟 s :=
begin
apply le_antisymm,
{ rw [map_le_iff_le_comap, comap_inf, comap_principal],
have : (coe : s → α) ⁻¹' s = univ, by { ext x, simp },
rw [this, principal_univ],
simp [le_refl _] },
{ intros V V_in,
rcases V_in with ⟨W, W_in, H⟩,
rw mem_inf_sets,
use [W, W_in, s, mem_principal_self s],
erw [← image_subset_iff, subtype.image_preimage_coe] at H,
exact H }
end
lemma subtype_coe_map_comap_prod (s : set α) (f : filter (α × α)) :
map (coe : s × s → α × α) (comap (coe : s × s → α × α) f) = f ⊓ 𝓟 (s.prod s) :=
let φ (x : s × s) : s.prod s := ⟨⟨x.1.1, x.2.1⟩, ⟨x.1.2, x.2.2⟩⟩ in
begin
rw show (coe : s × s → α × α) = coe ∘ φ, by ext x; cases x; refl,
rw [← filter.map_map, ← filter.comap_comap],
rw map_comap_of_surjective,
exact subtype_coe_map_comap _ _,
exact λ ⟨⟨a, b⟩, ⟨ha, hb⟩⟩, ⟨⟨⟨a, ha⟩, ⟨b, hb⟩⟩, rfl⟩
end
lemma comap_ne_bot_iff {f : filter β} {m : α → β} : comap m f ≠ ⊥ ↔ ∀ t ∈ f, ∃ a, m a ∈ t :=
begin
rw ← forall_sets_nonempty_iff_ne_bot,
exact ⟨λ h t t_in, h (m ⁻¹' t) ⟨t, t_in, subset.refl _⟩,
λ h s ⟨u, u_in, hu⟩, let ⟨x, hx⟩ := h u u_in in ⟨x, hu hx⟩⟩,
end
lemma comap_ne_bot {f : filter β} {m : α → β} (hm : ∀t∈ f, ∃a, m a ∈ t) : comap m f ≠ ⊥ :=
comap_ne_bot_iff.mpr hm
lemma comap_ne_bot_of_range_mem {f : filter β} {m : α → β}
(hf : f ≠ ⊥) (hm : range m ∈ f) : comap m f ≠ ⊥ :=
comap_ne_bot $ assume t ht,
let ⟨_, ha, a, rfl⟩ := nonempty_of_mem_sets hf (inter_mem_sets ht hm)
in ⟨a, ha⟩
lemma comap_inf_principal_ne_bot_of_image_mem {f : filter β} {m : α → β}
(hf : f ≠ ⊥) {s : set α} (hs : m '' s ∈ f) : (comap m f ⊓ 𝓟 s) ≠ ⊥ :=
begin
refine compl_compl s ▸ mt mem_sets_of_eq_bot _,
rintros ⟨t, ht, hts⟩,
rcases nonempty_of_mem_sets hf (inter_mem_sets hs ht) with ⟨_, ⟨x, hxs, rfl⟩, hxt⟩,
exact absurd hxs (hts hxt)
end
lemma comap_ne_bot_of_surj {f : filter β} {m : α → β}
(hf : f ≠ ⊥) (hm : function.surjective m) : comap m f ≠ ⊥ :=
comap_ne_bot_of_range_mem hf $ univ_mem_sets' hm
lemma comap_ne_bot_of_image_mem {f : filter β} {m : α → β} (hf : f ≠ ⊥)
{s : set α} (hs : m '' s ∈ f) : comap m f ≠ ⊥ :=
ne_bot_of_le_ne_bot (comap_inf_principal_ne_bot_of_image_mem hf hs) inf_le_left
@[simp] lemma map_eq_bot_iff : map m f = ⊥ ↔ f = ⊥ :=
⟨by rw [←empty_in_sets_eq_bot, ←empty_in_sets_eq_bot]; exact id,
assume h, by simp only [h, eq_self_iff_true, map_bot]⟩
lemma map_ne_bot (hf : f ≠ ⊥) : map m f ≠ ⊥ :=
assume h, hf $ by rwa [map_eq_bot_iff] at h
lemma map_ne_bot_iff (f : α → β) {F : filter α} : map f F ≠ ⊥ ↔ F ≠ ⊥ :=
by rw [not_iff_not, map_eq_bot_iff]
lemma sInter_comap_sets (f : α → β) (F : filter β) :
⋂₀(comap f F).sets = ⋂ U ∈ F, f ⁻¹' U :=
begin
ext x,
suffices : (∀ (A : set α) (B : set β), B ∈ F → f ⁻¹' B ⊆ A → x ∈ A) ↔
∀ (B : set β), B ∈ F → f x ∈ B,
by simp only [mem_sInter, mem_Inter, mem_comap_sets, this, and_imp, mem_comap_sets, exists_prop, mem_sInter,
iff_self, mem_Inter, mem_preimage, exists_imp_distrib],
split,
{ intros h U U_in,
simpa only [set.subset.refl, forall_prop_of_true, mem_preimage] using h (f ⁻¹' U) U U_in },
{ intros h V U U_in f_U_V,
exact f_U_V (h U U_in) },
end
end map
-- this is a generic rule for monotone functions:
lemma map_infi_le {f : ι → filter α} {m : α → β} :
map m (infi f) ≤ (⨅ i, map m (f i)) :=
le_infi $ assume i, map_mono $ infi_le _ _
lemma map_infi_eq {f : ι → filter α} {m : α → β} (hf : directed (≥) f) (hι : nonempty ι) :
map m (infi f) = (⨅ i, map m (f i)) :=
le_antisymm
map_infi_le
(assume s (hs : preimage m s ∈ infi f),
have ∃i, preimage m s ∈ f i,
by simp only [infi_sets_eq hf hι, mem_Union] at hs; assumption,
let ⟨i, hi⟩ := this in
have (⨅ i, map m (f i)) ≤ 𝓟 s, from
infi_le_of_le i $ by simp only [le_principal_iff, mem_map]; assumption,
by simp only [filter.le_principal_iff] at this; assumption)
lemma map_binfi_eq {ι : Type w} {f : ι → filter α} {m : α → β} {p : ι → Prop}
(h : directed_on (f ⁻¹'o (≥)) {x | p x}) (ne : ∃i, p i) :
map m (⨅i (h : p i), f i) = (⨅i (h: p i), map m (f i)) :=
let ⟨i, hi⟩ := ne in
calc map m (⨅i (h : p i), f i) = map m (⨅i:subtype p, f i.val) : by simp only [infi_subtype, eq_self_iff_true]
... = (⨅i:subtype p, map m (f i.val)) : map_infi_eq
(assume ⟨x, hx⟩ ⟨y, hy⟩, match h x hx y hy with ⟨z, h₁, h₂, h₃⟩ := ⟨⟨z, h₁⟩, h₂, h₃⟩ end)
⟨⟨i, hi⟩⟩
... = (⨅i (h : p i), map m (f i)) : by simp only [infi_subtype, eq_self_iff_true]
lemma map_inf_le {f g : filter α} {m : α → β} : map m (f ⊓ g) ≤ map m f ⊓ map m g :=
(@map_mono _ _ m).map_inf_le f g
lemma map_inf' {f g : filter α} {m : α → β} {t : set α} (htf : t ∈ f) (htg : t ∈ g)
(h : ∀x∈t, ∀y∈t, m x = m y → x = y) : map m (f ⊓ g) = map m f ⊓ map m g :=
begin
refine le_antisymm map_inf_le (assume s hs, _),
simp only [map, mem_inf_sets, exists_prop, mem_map, mem_preimage, mem_inf_sets] at hs ⊢,
rcases hs with ⟨t₁, h₁, t₂, h₂, hs⟩,
refine ⟨m '' (t₁ ∩ t), _, m '' (t₂ ∩ t), _, _⟩,
{ filter_upwards [h₁, htf] assume a h₁ h₂, mem_image_of_mem _ ⟨h₁, h₂⟩ },
{ filter_upwards [h₂, htg] assume a h₁ h₂, mem_image_of_mem _ ⟨h₁, h₂⟩ },
{ rw [image_inter_on],
{ refine image_subset_iff.2 _,
exact λ x ⟨⟨h₁, _⟩, h₂, _⟩, hs ⟨h₁, h₂⟩ },
{ exact λ x ⟨_, hx⟩ y ⟨_, hy⟩, h x hx y hy } }
end
lemma map_inf {f g : filter α} {m : α → β} (h : function.injective m) :
map m (f ⊓ g) = map m f ⊓ map m g :=
map_inf' univ_mem_sets univ_mem_sets (assume x _ y _ hxy, h hxy)
lemma map_eq_comap_of_inverse {f : filter α} {m : α → β} {n : β → α}
(h₁ : m ∘ n = id) (h₂ : n ∘ m = id) : map m f = comap n f :=
le_antisymm
(assume b ⟨a, ha, (h : preimage n a ⊆ b)⟩, f.sets_of_superset ha $
calc a = preimage (n ∘ m) a : by simp only [h₂, preimage_id, eq_self_iff_true]
... ⊆ preimage m b : preimage_mono h)
(assume b (hb : preimage m b ∈ f),
⟨preimage m b, hb, show preimage (m ∘ n) b ⊆ b, by simp only [h₁]; apply subset.refl⟩)
lemma map_swap_eq_comap_swap {f : filter (α × β)} : prod.swap <$> f = comap prod.swap f :=
map_eq_comap_of_inverse prod.swap_swap_eq prod.swap_swap_eq
lemma le_map {f : filter α} {m : α → β} {g : filter β} (h : ∀s∈ f, m '' s ∈ g) :
g ≤ f.map m :=
assume s hs, mem_sets_of_superset (h _ hs) $ image_preimage_subset _ _
protected lemma push_pull (f : α → β) (F : filter α) (G : filter β) :
map f (F ⊓ comap f G) = map f F ⊓ G :=
begin
apply le_antisymm,
{ calc map f (F ⊓ comap f G) ≤ map f F ⊓ (map f $ comap f G) : map_inf_le
... ≤ map f F ⊓ G : inf_le_inf_left (map f F) map_comap_le },
{ rintros U ⟨V, V_in, W, ⟨Z, Z_in, hZ⟩, h⟩,
rw ← image_subset_iff at h,
use [f '' V, image_mem_map V_in, Z, Z_in],
refine subset.trans _ h,
have : f '' (V ∩ f ⁻¹' Z) ⊆ f '' (V ∩ W),
from image_subset _ (inter_subset_inter_right _ ‹_›),
rwa set.push_pull at this }
end
protected lemma push_pull' (f : α → β) (F : filter α) (G : filter β) :
map f (comap f G ⊓ F) = G ⊓ map f F :=
by simp only [filter.push_pull, inf_comm]
section applicative
lemma singleton_mem_pure_sets {a : α} : {a} ∈ (pure a : filter α) :=
mem_singleton a
lemma pure_injective : function.injective (pure : α → filter α) :=
assume a b hab, (filter.ext_iff.1 hab {x | a = x}).1 rfl
@[simp] lemma pure_ne_bot {α : Type u} {a : α} : pure a ≠ (⊥ : filter α) :=
mt empty_in_sets_eq_bot.2 $ not_mem_empty a
@[simp] lemma le_pure_iff {f : filter α} {a : α} : f ≤ pure a ↔ {a} ∈ f :=
⟨λ h, h singleton_mem_pure_sets,
λ h s hs, mem_sets_of_superset h $ singleton_subset_iff.2 hs⟩
lemma mem_seq_sets_def {f : filter (α → β)} {g : filter α} {s : set β} :
s ∈ f.seq g ↔ (∃u ∈ f, ∃t ∈ g, ∀x∈u, ∀y∈t, (x : α → β) y ∈ s) :=
iff.rfl
lemma mem_seq_sets_iff {f : filter (α → β)} {g : filter α} {s : set β} :
s ∈ f.seq g ↔ (∃u ∈ f, ∃t ∈ g, set.seq u t ⊆ s) :=
by simp only [mem_seq_sets_def, seq_subset, exists_prop, iff_self]
lemma mem_map_seq_iff {f : filter α} {g : filter β} {m : α → β → γ} {s : set γ} :
s ∈ (f.map m).seq g ↔ (∃t u, t ∈ g ∧ u ∈ f ∧ ∀x∈u, ∀y∈t, m x y ∈ s) :=
iff.intro
(assume ⟨t, ht, s, hs, hts⟩, ⟨s, m ⁻¹' t, hs, ht, assume a, hts _⟩)
(assume ⟨t, s, ht, hs, hts⟩, ⟨m '' s, image_mem_map hs, t, ht, assume f ⟨a, has, eq⟩, eq ▸ hts _ has⟩)
lemma seq_mem_seq_sets {f : filter (α → β)} {g : filter α} {s : set (α → β)} {t : set α}
(hs : s ∈ f) (ht : t ∈ g) : s.seq t ∈ f.seq g :=
⟨s, hs, t, ht, assume f hf a ha, ⟨f, hf, a, ha, rfl⟩⟩
lemma le_seq {f : filter (α → β)} {g : filter α} {h : filter β}
(hh : ∀t ∈ f, ∀u ∈ g, set.seq t u ∈ h) : h ≤ seq f g :=
assume s ⟨t, ht, u, hu, hs⟩, mem_sets_of_superset (hh _ ht _ hu) $
assume b ⟨m, hm, a, ha, eq⟩, eq ▸ hs _ hm _ ha
@[mono] lemma seq_mono {f₁ f₂ : filter (α → β)} {g₁ g₂ : filter α}
(hf : f₁ ≤ f₂) (hg : g₁ ≤ g₂) : f₁.seq g₁ ≤ f₂.seq g₂ :=
le_seq $ assume s hs t ht, seq_mem_seq_sets (hf hs) (hg ht)
@[simp] lemma pure_seq_eq_map (g : α → β) (f : filter α) : seq (pure g) f = f.map g :=
begin
refine le_antisymm (le_map $ assume s hs, _) (le_seq $ assume s hs t ht, _),
{ rw ← singleton_seq, apply seq_mem_seq_sets _ hs,
exact singleton_mem_pure_sets },
{ refine sets_of_superset (map g f) (image_mem_map ht) _,
rintros b ⟨a, ha, rfl⟩, exact ⟨g, hs, a, ha, rfl⟩ }
end
@[simp] lemma seq_pure (f : filter (α → β)) (a : α) : seq f (pure a) = map (λg:α → β, g a) f :=
begin
refine le_antisymm (le_map $ assume s hs, _) (le_seq $ assume s hs t ht, _),
{ rw ← seq_singleton,
exact seq_mem_seq_sets hs singleton_mem_pure_sets },
{ refine sets_of_superset (map (λg:α→β, g a) f) (image_mem_map hs) _,
rintros b ⟨g, hg, rfl⟩, exact ⟨g, hg, a, ht, rfl⟩ }
end
@[simp] lemma seq_assoc (x : filter α) (g : filter (α → β)) (h : filter (β → γ)) :
seq h (seq g x) = seq (seq (map (∘) h) g) x :=
begin
refine le_antisymm (le_seq $ assume s hs t ht, _) (le_seq $ assume s hs t ht, _),
{ rcases mem_seq_sets_iff.1 hs with ⟨u, hu, v, hv, hs⟩,
rcases mem_map_sets_iff.1 hu with ⟨w, hw, hu⟩,
refine mem_sets_of_superset _
(set.seq_mono (subset.trans (set.seq_mono hu (subset.refl _)) hs) (subset.refl _)),
rw ← set.seq_seq,
exact seq_mem_seq_sets hw (seq_mem_seq_sets hv ht) },
{ rcases mem_seq_sets_iff.1 ht with ⟨u, hu, v, hv, ht⟩,
refine mem_sets_of_superset _ (set.seq_mono (subset.refl _) ht),
rw set.seq_seq,
exact seq_mem_seq_sets (seq_mem_seq_sets (image_mem_map hs) hu) hv }
end
lemma prod_map_seq_comm (f : filter α) (g : filter β) :
(map prod.mk f).seq g = seq (map (λb a, (a, b)) g) f :=
begin
refine le_antisymm (le_seq $ assume s hs t ht, _) (le_seq $ assume s hs t ht, _),
{ rcases mem_map_sets_iff.1 hs with ⟨u, hu, hs⟩,
refine mem_sets_of_superset _ (set.seq_mono hs (subset.refl _)),
rw ← set.prod_image_seq_comm,
exact seq_mem_seq_sets (image_mem_map ht) hu },
{ rcases mem_map_sets_iff.1 hs with ⟨u, hu, hs⟩,
refine mem_sets_of_superset _ (set.seq_mono hs (subset.refl _)),
rw set.prod_image_seq_comm,
exact seq_mem_seq_sets (image_mem_map ht) hu }
end
instance : is_lawful_functor (filter : Type u → Type u) :=
{ id_map := assume α f, map_id,
comp_map := assume α β γ f g a, map_map.symm }
instance : is_lawful_applicative (filter : Type u → Type u) :=
{ pure_seq_eq_map := assume α β, pure_seq_eq_map,
map_pure := assume α β, map_pure,
seq_pure := assume α β, seq_pure,
seq_assoc := assume α β γ, seq_assoc }
instance : is_comm_applicative (filter : Type u → Type u) :=
⟨assume α β f g, prod_map_seq_comm f g⟩
lemma {l} seq_eq_filter_seq {α β : Type l} (f : filter (α → β)) (g : filter α) :
f <*> g = seq f g := rfl
end applicative
/- bind equations -/
section bind
@[simp] lemma eventually_bind {f : filter α} {m : α → filter β} {p : β → Prop} :
(∀ᶠ y in bind f m, p y) ↔ ∀ᶠ x in f, ∀ᶠ y in m x, p y :=
iff.rfl
@[simp] lemma eventually_eq_bind {f : filter α} {m : α → filter β} {g₁ g₂ : β → γ} :
(g₁ =ᶠ[bind f m] g₂) ↔ ∀ᶠ x in f, g₁ =ᶠ[m x] g₂ :=
iff.rfl
@[simp] lemma eventually_le_bind [has_le γ] {f : filter α} {m : α → filter β} {g₁ g₂ : β → γ} :
(g₁ ≤ᶠ[bind f m] g₂) ↔ ∀ᶠ x in f, g₁ ≤ᶠ[m x] g₂ :=
iff.rfl
lemma mem_bind_sets' {s : set β} {f : filter α} {m : α → filter β} :
s ∈ bind f m ↔ {a | s ∈ m a} ∈ f :=
iff.rfl
@[simp] lemma mem_bind_sets {s : set β} {f : filter α} {m : α → filter β} :
s ∈ bind f m ↔ ∃t ∈ f, ∀x ∈ t, s ∈ m x :=
calc s ∈ bind f m ↔ {a | s ∈ m a} ∈ f : iff.rfl
... ↔ (∃t ∈ f, t ⊆ {a | s ∈ m a}) : exists_sets_subset_iff.symm
... ↔ (∃t ∈ f, ∀x ∈ t, s ∈ m x) : iff.rfl
lemma bind_le {f : filter α} {g : α → filter β} {l : filter β} (h : ∀ᶠ x in f, g x ≤ l) :
f.bind g ≤ l :=
join_le $ eventually_map.2 h
@[mono] lemma bind_mono {f₁ f₂ : filter α} {g₁ g₂ : α → filter β} (hf : f₁ ≤ f₂)
(hg : g₁ ≤ᶠ[f₁] g₂) :
bind f₁ g₁ ≤ bind f₂ g₂ :=
begin
refine le_trans (λ s hs, _) (join_mono $ map_mono hf),
simp only [mem_join_sets, mem_bind_sets', mem_map] at hs ⊢,
filter_upwards [hg, hs],
exact λ x hx hs, hx hs
end
lemma bind_inf_principal {f : filter α} {g : α → filter β} {s : set β} :
f.bind (λ x, g x ⊓ 𝓟 s) = (f.bind g) ⊓ 𝓟 s :=
filter.ext $ λ s, by simp only [mem_bind_sets, mem_inf_principal]
lemma sup_bind {f g : filter α} {h : α → filter β} :
bind (f ⊔ g) h = bind f h ⊔ bind g h :=
by simp only [bind, sup_join, map_sup, eq_self_iff_true]
lemma principal_bind {s : set α} {f : α → filter β} :
(bind (𝓟 s) f) = (⨆x ∈ s, f x) :=
show join (map f (𝓟 s)) = (⨆x ∈ s, f x),
by simp only [Sup_image, join_principal_eq_Sup, map_principal, eq_self_iff_true]
end bind
section list_traverse
/- This is a separate section in order to open `list`, but mostly because of universe
equality requirements in `traverse` -/
open list
lemma sequence_mono :
∀(as bs : list (filter α)), forall₂ (≤) as bs → sequence as ≤ sequence bs
| [] [] forall₂.nil := le_refl _
| (a::as) (b::bs) (forall₂.cons h hs) := seq_mono (map_mono h) (sequence_mono as bs hs)
variables {α' β' γ' : Type u} {f : β' → filter α'} {s : γ' → set α'}
lemma mem_traverse_sets :
∀(fs : list β') (us : list γ'),
forall₂ (λb c, s c ∈ f b) fs us → traverse s us ∈ traverse f fs
| [] [] forall₂.nil := mem_pure_sets.2 $ mem_singleton _
| (f::fs) (u::us) (forall₂.cons h hs) := seq_mem_seq_sets (image_mem_map h) (mem_traverse_sets fs us hs)
lemma mem_traverse_sets_iff (fs : list β') (t : set (list α')) :
t ∈ traverse f fs ↔
(∃us:list (set α'), forall₂ (λb (s : set α'), s ∈ f b) fs us ∧ sequence us ⊆ t) :=
begin
split,
{ induction fs generalizing t,
case nil { simp only [sequence, mem_pure_sets, imp_self, forall₂_nil_left_iff,
exists_eq_left, set.pure_def, singleton_subset_iff, traverse_nil] },
case cons : b fs ih t {
assume ht,
rcases mem_seq_sets_iff.1 ht with ⟨u, hu, v, hv, ht⟩,
rcases mem_map_sets_iff.1 hu with ⟨w, hw, hwu⟩,
rcases ih v hv with ⟨us, hus, hu⟩,
exact ⟨w :: us, forall₂.cons hw hus, subset.trans (set.seq_mono hwu hu) ht⟩ } },
{ rintros ⟨us, hus, hs⟩,
exact mem_sets_of_superset (mem_traverse_sets _ _ hus) hs }
end
end list_traverse
/-! ### Limits -/
/-- `tendsto` is the generic "limit of a function" predicate.
`tendsto f l₁ l₂` asserts that for every `l₂` neighborhood `a`,
the `f`-preimage of `a` is an `l₁` neighborhood. -/
def tendsto (f : α → β) (l₁ : filter α) (l₂ : filter β) := l₁.map f ≤ l₂
lemma tendsto_def {f : α → β} {l₁ : filter α} {l₂ : filter β} :
tendsto f l₁ l₂ ↔ ∀ s ∈ l₂, f ⁻¹' s ∈ l₁ := iff.rfl
lemma tendsto.eventually {f : α → β} {l₁ : filter α} {l₂ : filter β} {p : β → Prop}
(hf : tendsto f l₁ l₂) (h : ∀ᶠ y in l₂, p y) :
∀ᶠ x in l₁, p (f x) :=
hf h
@[simp] lemma tendsto_bot {f : α → β} {l : filter β} : tendsto f ⊥ l := by simp [tendsto]
lemma tendsto_of_not_nonempty {f : α → β} {la : filter α} {lb : filter β} (h : ¬nonempty α) :
tendsto f la lb :=
by simp only [filter_eq_bot_of_not_nonempty la h, tendsto_bot]
lemma eventually_eq_of_left_inv_of_right_inv {f : α → β} {g₁ g₂ : β → α} {fa : filter α}
{fb : filter β} (hleft : ∀ᶠ x in fa, g₁ (f x) = x) (hright : ∀ᶠ y in fb, f (g₂ y) = y)
(htendsto : tendsto g₂ fb fa) :
g₁ =ᶠ[fb] g₂ :=
(htendsto.eventually hleft).mp $ hright.mono $ λ y hr hl, (congr_arg g₁ hr.symm).trans hl
lemma tendsto_iff_comap {f : α → β} {l₁ : filter α} {l₂ : filter β} :
tendsto f l₁ l₂ ↔ l₁ ≤ l₂.comap f :=
map_le_iff_le_comap
lemma tendsto_congr' {f₁ f₂ : α → β} {l₁ : filter α} {l₂ : filter β} (hl : f₁ =ᶠ[l₁] f₂) :
tendsto f₁ l₁ l₂ ↔ tendsto f₂ l₁ l₂ :=
by rw [tendsto, tendsto, map_congr hl]
lemma tendsto.congr' {f₁ f₂ : α → β} {l₁ : filter α} {l₂ : filter β}
(hl : f₁ =ᶠ[l₁] f₂) (h : tendsto f₁ l₁ l₂) : tendsto f₂ l₁ l₂ :=
(tendsto_congr' hl).1 h
theorem tendsto_congr {f₁ f₂ : α → β} {l₁ : filter α} {l₂ : filter β}
(h : ∀ x, f₁ x = f₂ x) : tendsto f₁ l₁ l₂ ↔ tendsto f₂ l₁ l₂ :=
tendsto_congr' (univ_mem_sets' h)
theorem tendsto.congr {f₁ f₂ : α → β} {l₁ : filter α} {l₂ : filter β}
(h : ∀ x, f₁ x = f₂ x) : tendsto f₁ l₁ l₂ → tendsto f₂ l₁ l₂ :=
(tendsto_congr h).1
lemma tendsto_id' {x y : filter α} : x ≤ y → tendsto id x y :=
by simp only [tendsto, map_id, forall_true_iff] {contextual := tt}
lemma tendsto_id {x : filter α} : tendsto id x x := tendsto_id' $ le_refl x
lemma tendsto.comp {f : α → β} {g : β → γ} {x : filter α} {y : filter β} {z : filter γ}
(hg : tendsto g y z) (hf : tendsto f x y) : tendsto (g ∘ f) x z :=
calc map (g ∘ f) x = map g (map f x) : by rw [map_map]
... ≤ map g y : map_mono hf
... ≤ z : hg
lemma tendsto_le_left {f : α → β} {x y : filter α} {z : filter β}
(h : y ≤ x) : tendsto f x z → tendsto f y z :=
le_trans (map_mono h)
lemma tendsto_le_right {f : α → β} {x : filter α} {y z : filter β}
(h₁ : y ≤ z) (h₂ : tendsto f x y) : tendsto f x z :=
le_trans h₂ h₁
lemma tendsto.ne_bot {f : α → β} {x : filter α} {y : filter β} (h : tendsto f x y) (hx : x ≠ ⊥) :
y ≠ ⊥ :=
ne_bot_of_le_ne_bot (map_ne_bot hx) h
lemma tendsto_map {f : α → β} {x : filter α} : tendsto f x (map f x) := le_refl (map f x)
lemma tendsto_map' {f : β → γ} {g : α → β} {x : filter α} {y : filter γ}
(h : tendsto (f ∘ g) x y) : tendsto f (map g x) y :=
by rwa [tendsto, map_map]
lemma tendsto_map'_iff {f : β → γ} {g : α → β} {x : filter α} {y : filter γ} :
tendsto f (map g x) y ↔ tendsto (f ∘ g) x y :=
by rw [tendsto, map_map]; refl
lemma tendsto_comap {f : α → β} {x : filter β} : tendsto f (comap f x) x :=
map_comap_le
lemma tendsto_comap_iff {f : α → β} {g : β → γ} {a : filter α} {c : filter γ} :
tendsto f a (c.comap g) ↔ tendsto (g ∘ f) a c :=
⟨assume h, tendsto_comap.comp h, assume h, map_le_iff_le_comap.mp $ by rwa [map_map]⟩
lemma tendsto_comap'_iff {m : α → β} {f : filter α} {g : filter β} {i : γ → α}
(h : range i ∈ f) : tendsto (m ∘ i) (comap i f) g ↔ tendsto m f g :=
by rw [tendsto, ← map_compose]; simp only [(∘), map_comap h, tendsto]
lemma comap_eq_of_inverse {f : filter α} {g : filter β} {φ : α → β} (ψ : β → α)
(eq : ψ ∘ φ = id) (hφ : tendsto φ f g) (hψ : tendsto ψ g f) : comap φ g = f :=
begin
refine le_antisymm (le_trans (comap_mono $ map_le_iff_le_comap.1 hψ) _) (map_le_iff_le_comap.1 hφ),
rw [comap_comap, eq, comap_id],
exact le_refl _
end
lemma map_eq_of_inverse {f : filter α} {g : filter β} {φ : α → β} (ψ : β → α)
(eq : φ ∘ ψ = id) (hφ : tendsto φ f g) (hψ : tendsto ψ g f) : map φ f = g :=
begin
refine le_antisymm hφ (le_trans _ (map_mono hψ)),
rw [map_map, eq, map_id],
exact le_refl _
end
lemma tendsto_inf {f : α → β} {x : filter α} {y₁ y₂ : filter β} :
tendsto f x (y₁ ⊓ y₂) ↔ tendsto f x y₁ ∧ tendsto f x y₂ :=
by simp only [tendsto, le_inf_iff, iff_self]
lemma tendsto_inf_left {f : α → β} {x₁ x₂ : filter α} {y : filter β}
(h : tendsto f x₁ y) : tendsto f (x₁ ⊓ x₂) y :=
le_trans (map_mono inf_le_left) h
lemma tendsto_inf_right {f : α → β} {x₁ x₂ : filter α} {y : filter β}
(h : tendsto f x₂ y) : tendsto f (x₁ ⊓ x₂) y :=
le_trans (map_mono inf_le_right) h
lemma tendsto.inf {f : α → β} {x₁ x₂ : filter α} {y₁ y₂ : filter β}
(h₁ : tendsto f x₁ y₁) (h₂ : tendsto f x₂ y₂) : tendsto f (x₁ ⊓ x₂) (y₁ ⊓ y₂) :=
tendsto_inf.2 ⟨tendsto_inf_left h₁, tendsto_inf_right h₂⟩
lemma tendsto_infi {f : α → β} {x : filter α} {y : ι → filter β} :
tendsto f x (⨅i, y i) ↔ ∀i, tendsto f x (y i) :=
by simp only [tendsto, iff_self, le_infi_iff]
lemma tendsto_infi' {f : α → β} {x : ι → filter α} {y : filter β} (i : ι) :
tendsto f (x i) y → tendsto f (⨅i, x i) y :=
tendsto_le_left (infi_le _ _)
lemma tendsto_principal {f : α → β} {l : filter α} {s : set β} :
tendsto f l (𝓟 s) ↔ ∀ᶠ a in l, f a ∈ s :=
by simp only [tendsto, le_principal_iff, mem_map, iff_self, filter.eventually]
lemma tendsto_principal_principal {f : α → β} {s : set α} {t : set β} :
tendsto f (𝓟 s) (𝓟 t) ↔ ∀a∈s, f a ∈ t :=
by simp only [tendsto, image_subset_iff, le_principal_iff, map_principal, mem_principal_sets]; refl
lemma tendsto_pure {f : α → β} {a : filter α} {b : β} :
tendsto f a (pure b) ↔ ∀ᶠ x in a, f x = b :=
by simp only [tendsto, le_pure_iff, mem_map, mem_singleton_iff, filter.eventually]
lemma tendsto_pure_pure (f : α → β) (a : α) :
tendsto f (pure a) (pure (f a)) :=
tendsto_pure.2 rfl
lemma tendsto_const_pure {a : filter α} {b : β} : tendsto (λx, b) a (pure b) :=
tendsto_pure.2 $ univ_mem_sets' $ λ _, rfl
/-- If two filters are disjoint, then a function cannot tend to both of them along a non-trivial
filter. -/
lemma tendsto.not_tendsto {f : α → β} {a : filter α} {b₁ b₂ : filter β} (hf : tendsto f a b₁)
(ha : a ≠ ⊥) (hb : disjoint b₁ b₂) :
¬ tendsto f a b₂ :=
λ hf', (tendsto_inf.2 ⟨hf, hf'⟩).ne_bot ha hb.eq_bot
lemma tendsto_if {l₁ : filter α} {l₂ : filter β}
{f g : α → β} {p : α → Prop} [decidable_pred p]
(h₀ : tendsto f (l₁ ⊓ 𝓟 p) l₂)
(h₁ : tendsto g (l₁ ⊓ 𝓟 { x | ¬ p x }) l₂) :
tendsto (λ x, if p x then f x else g x) l₁ l₂ :=
begin
revert h₀ h₁, simp only [tendsto_def, mem_inf_principal],
intros h₀ h₁ s hs,
apply mem_sets_of_superset (inter_mem_sets (h₀ s hs) (h₁ s hs)),
rintros x ⟨hp₀, hp₁⟩, simp only [mem_preimage],
by_cases h : p x,
{ rw if_pos h, exact hp₀ h },
rw if_neg h, exact hp₁ h
end
/-! ### Products of filters -/
section prod
variables {s : set α} {t : set β} {f : filter α} {g : filter β}
/- The product filter cannot be defined using the monad structure on filters. For example:
F := do {x ← seq, y ← top, return (x, y)}
hence:
s ∈ F ↔ ∃n, [n..∞] × univ ⊆ s
G := do {y ← top, x ← seq, return (x, y)}
hence:
s ∈ G ↔ ∀i:ℕ, ∃n, [n..∞] × {i} ⊆ s
Now ⋃i, [i..∞] × {i} is in G but not in F.
As product filter we want to have F as result.
-/
/-- Product of filters. This is the filter generated by cartesian products
of elements of the component filters. -/
protected def prod (f : filter α) (g : filter β) : filter (α × β) :=
f.comap prod.fst ⊓ g.comap prod.snd
localized "infix ` ×ᶠ `:60 := filter.prod" in filter
lemma prod_mem_prod {s : set α} {t : set β} {f : filter α} {g : filter β}
(hs : s ∈ f) (ht : t ∈ g) : set.prod s t ∈ f ×ᶠ g :=
inter_mem_inf_sets (preimage_mem_comap hs) (preimage_mem_comap ht)
lemma mem_prod_iff {s : set (α×β)} {f : filter α} {g : filter β} :
s ∈ f ×ᶠ g ↔ (∃ t₁ ∈ f, ∃ t₂ ∈ g, set.prod t₁ t₂ ⊆ s) :=
begin
simp only [filter.prod],
split,
exact assume ⟨t₁, ⟨s₁, hs₁, hts₁⟩, t₂, ⟨s₂, hs₂, hts₂⟩, h⟩,
⟨s₁, hs₁, s₂, hs₂, subset.trans (inter_subset_inter hts₁ hts₂) h⟩,
exact assume ⟨t₁, ht₁, t₂, ht₂, h⟩,
⟨prod.fst ⁻¹' t₁, ⟨t₁, ht₁, subset.refl _⟩, prod.snd ⁻¹' t₂, ⟨t₂, ht₂, subset.refl _⟩, h⟩
end
lemma comap_prod (f : α → β × γ) (b : filter β) (c : filter γ) :
comap f (b ×ᶠ c) = (comap (prod.fst ∘ f) b) ⊓ (comap (prod.snd ∘ f) c) :=
by erw [comap_inf, filter.comap_comap, filter.comap_comap]
lemma eventually_prod_iff {p : α × β → Prop} {f : filter α} {g : filter β} :
(∀ᶠ x in f ×ᶠ g, p x) ↔ ∃ (pa : α → Prop) (ha : ∀ᶠ x in f, pa x)
(pb : β → Prop) (hb : ∀ᶠ y in g, pb y), ∀ {x}, pa x → ∀ {y}, pb y → p (x, y) :=
by simpa only [set.prod_subset_iff] using @mem_prod_iff α β p f g
lemma tendsto_fst {f : filter α} {g : filter β} : tendsto prod.fst (f ×ᶠ g) f :=
tendsto_inf_left tendsto_comap
lemma tendsto_snd {f : filter α} {g : filter β} : tendsto prod.snd (f ×ᶠ g) g :=
tendsto_inf_right tendsto_comap
lemma tendsto.prod_mk {f : filter α} {g : filter β} {h : filter γ} {m₁ : α → β} {m₂ : α → γ}
(h₁ : tendsto m₁ f g) (h₂ : tendsto m₂ f h) : tendsto (λx, (m₁ x, m₂ x)) f (g ×ᶠ h) :=
tendsto_inf.2 ⟨tendsto_comap_iff.2 h₁, tendsto_comap_iff.2 h₂⟩
lemma eventually.prod_inl {la : filter α} {p : α → Prop} (h : ∀ᶠ x in la, p x) (lb : filter β) :
∀ᶠ x in la ×ᶠ lb, p (x : α × β).1 :=
tendsto_fst.eventually h
lemma eventually.prod_inr {lb : filter β} {p : β → Prop} (h : ∀ᶠ x in lb, p x) (la : filter α) :
∀ᶠ x in la ×ᶠ lb, p (x : α × β).2 :=
tendsto_snd.eventually h
lemma eventually.prod_mk {la : filter α} {pa : α → Prop} (ha : ∀ᶠ x in la, pa x)
{lb : filter β} {pb : β → Prop} (hb : ∀ᶠ y in lb, pb y) :
∀ᶠ p in la ×ᶠ lb, pa (p : α × β).1 ∧ pb p.2 :=
(ha.prod_inl lb).and (hb.prod_inr la)
lemma eventually.curry {la : filter α} {lb : filter β} {p : α × β → Prop}
(h : ∀ᶠ x in la.prod lb, p x) :
∀ᶠ x in la, ∀ᶠ y in lb, p (x, y) :=
begin
rcases eventually_prod_iff.1 h with ⟨pa, ha, pb, hb, h⟩,
exact ha.mono (λ a ha, hb.mono $ λ b hb, h ha hb)
end
lemma prod_infi_left {f : ι → filter α} {g : filter β} (i : ι) :
(⨅i, f i) ×ᶠ g = (⨅i, (f i) ×ᶠ g) :=
by rw [filter.prod, comap_infi, infi_inf i]; simp only [filter.prod, eq_self_iff_true]
lemma prod_infi_right {f : filter α} {g : ι → filter β} (i : ι) :
f ×ᶠ (⨅i, g i) = (⨅i, f ×ᶠ (g i)) :=
by rw [filter.prod, comap_infi, inf_infi i]; simp only [filter.prod, eq_self_iff_true]
@[mono] lemma prod_mono {f₁ f₂ : filter α} {g₁ g₂ : filter β} (hf : f₁ ≤ f₂) (hg : g₁ ≤ g₂) :
f₁ ×ᶠ g₁ ≤ f₂ ×ᶠ g₂ :=
inf_le_inf (comap_mono hf) (comap_mono hg)
lemma prod_comap_comap_eq {α₁ : Type u} {α₂ : Type v} {β₁ : Type w} {β₂ : Type x}
{f₁ : filter α₁} {f₂ : filter α₂} {m₁ : β₁ → α₁} {m₂ : β₂ → α₂} :
(comap m₁ f₁) ×ᶠ (comap m₂ f₂) = comap (λp:β₁×β₂, (m₁ p.1, m₂ p.2)) (f₁ ×ᶠ f₂) :=
by simp only [filter.prod, comap_comap, eq_self_iff_true, comap_inf]
lemma prod_comm' : f ×ᶠ g = comap (prod.swap) (g ×ᶠ f) :=
by simp only [filter.prod, comap_comap, (∘), inf_comm, prod.fst_swap,
eq_self_iff_true, prod.snd_swap, comap_inf]
lemma prod_comm : f ×ᶠ g = map (λp:β×α, (p.2, p.1)) (g ×ᶠ f) :=
by rw [prod_comm', ← map_swap_eq_comap_swap]; refl
lemma prod_map_map_eq {α₁ : Type u} {α₂ : Type v} {β₁ : Type w} {β₂ : Type x}
{f₁ : filter α₁} {f₂ : filter α₂} {m₁ : α₁ → β₁} {m₂ : α₂ → β₂} :
(map m₁ f₁) ×ᶠ (map m₂ f₂) = map (λp:α₁×α₂, (m₁ p.1, m₂ p.2)) (f₁ ×ᶠ f₂) :=
le_antisymm
(assume s hs,
let ⟨s₁, hs₁, s₂, hs₂, h⟩ := mem_prod_iff.mp hs in
filter.sets_of_superset _ (prod_mem_prod (image_mem_map hs₁) (image_mem_map hs₂)) $
calc set.prod (m₁ '' s₁) (m₂ '' s₂) = (λp:α₁×α₂, (m₁ p.1, m₂ p.2)) '' set.prod s₁ s₂ :
set.prod_image_image_eq
... ⊆ _ : by rwa [image_subset_iff])
((tendsto.comp (le_refl _) tendsto_fst).prod_mk (tendsto.comp (le_refl _) tendsto_snd))
lemma tendsto.prod_map {δ : Type*} {f : α → γ} {g : β → δ} {a : filter α} {b : filter β}
{c : filter γ} {d : filter δ} (hf : tendsto f a c) (hg : tendsto g b d) :
tendsto (prod.map f g) (a ×ᶠ b) (c ×ᶠ d) :=
begin
erw [tendsto, ← prod_map_map_eq],
exact filter.prod_mono hf hg,
end
lemma map_prod (m : α × β → γ) (f : filter α) (g : filter β) :
map m (f.prod g) = (f.map (λa b, m (a, b))).seq g :=
begin
simp [filter.ext_iff, mem_prod_iff, mem_map_seq_iff],
assume s,
split,
exact assume ⟨t, ht, s, hs, h⟩, ⟨s, hs, t, ht, assume x hx y hy, @h ⟨x, y⟩ ⟨hx, hy⟩⟩,
exact assume ⟨s, hs, t, ht, h⟩, ⟨t, ht, s, hs, assume ⟨x, y⟩ ⟨hx, hy⟩, h x hx y hy⟩
end
lemma prod_eq {f : filter α} {g : filter β} : f.prod g = (f.map prod.mk).seq g :=
have h : _ := map_prod id f g, by rwa [map_id] at h
lemma prod_inf_prod {f₁ f₂ : filter α} {g₁ g₂ : filter β} :
(f₁ ×ᶠ g₁) ⊓ (f₂ ×ᶠ g₂) = (f₁ ⊓ f₂) ×ᶠ (g₁ ⊓ g₂) :=
by simp only [filter.prod, comap_inf, inf_comm, inf_assoc, inf_left_comm]
@[simp] lemma prod_bot {f : filter α} : f ×ᶠ (⊥ : filter β) = ⊥ := by simp [filter.prod]
@[simp] lemma bot_prod {g : filter β} : (⊥ : filter α) ×ᶠ g = ⊥ := by simp [filter.prod]
@[simp] lemma prod_principal_principal {s : set α} {t : set β} :
(𝓟 s) ×ᶠ (𝓟 t) = 𝓟 (set.prod s t) :=
by simp only [filter.prod, comap_principal, principal_eq_iff_eq, comap_principal, inf_principal]; refl
@[simp] lemma prod_pure_pure {a : α} {b : β} : (pure a) ×ᶠ (pure b) = pure (a, b) :=
by simp [pure_eq_principal]
lemma prod_eq_bot {f : filter α} {g : filter β} : f ×ᶠ g = ⊥ ↔ (f = ⊥ ∨ g = ⊥) :=
begin
split,
{ assume h,
rcases mem_prod_iff.1 (empty_in_sets_eq_bot.2 h) with ⟨s, hs, t, ht, hst⟩,
rw [subset_empty_iff, set.prod_eq_empty_iff] at hst,
cases hst with s_eq t_eq,
{ left, exact empty_in_sets_eq_bot.1 (s_eq ▸ hs) },
{ right, exact empty_in_sets_eq_bot.1 (t_eq ▸ ht) } },
{ rintros (rfl | rfl),
exact bot_prod,
exact prod_bot }
end
lemma prod_ne_bot {f : filter α} {g : filter β} : f ×ᶠ g ≠ ⊥ ↔ (f ≠ ⊥ ∧ g ≠ ⊥) :=
by rw [(≠), prod_eq_bot, not_or_distrib]
lemma tendsto_prod_iff {f : α × β → γ} {x : filter α} {y : filter β} {z : filter γ} :
filter.tendsto f (x ×ᶠ y) z ↔
∀ W ∈ z, ∃ U ∈ x, ∃ V ∈ y, ∀ x y, x ∈ U → y ∈ V → f (x, y) ∈ W :=
by simp only [tendsto_def, mem_prod_iff, prod_sub_preimage_iff, exists_prop, iff_self]
end prod
end filter
|
1eea1d7d0351119e6b36362cb324ad7bd9b1d07b
|
cf798a5faaa43a993adcc42d1a99d5eab647e00b
|
/Induction.lean
|
a9e1153ab9d7fffd539b0bf87c37a92ddd7998eb
|
[] |
no_license
|
myuon/lean-software-foundations
|
dbbcd37e3552b58c6e139370b16b25c69a42799b
|
a1a08810f2664493c920742c2d66a3131fb3ae75
|
refs/heads/master
| 1,610,261,785,986
| 1,459,922,839,000
| 1,459,922,839,000
| 50,269,716
| 4
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,480
|
lean
|
import Basics
open bool nat
-- Exercise: 2 stars (andb_true_elim2)
theorem andb_true_elim2 : ∀ (b c : bool), b && c = tt → c = tt
| tt c := λp, by rewrite tt_band at p; apply p
| ff c := λp, by contradiction
theorem plus_0_l : ∀ (n : nat), 0 + n = n
| zero := rfl
| (succ n) := calc
0 + succ n = succ (0 + n) : rfl
... = succ n : plus_0_l
theorem minus_diag : ∀ (n : nat), n - n = 0
| zero := rfl
| (succ n) := calc
succ n - succ n = n - n : succ_sub_succ
... = 0 : minus_diag
-- Exercise: 2 stars (basic_induction)
theorem mult_0_l : ∀ (n : nat), 0 * n = 0
| zero := rfl
| (succ n) := calc
0 * succ n = 0 * n + 0 : rfl
... = 0 * n : rfl
... = 0 : mult_0_l
theorem plus_1_l : ∀ (n : nat), 1 + n = succ n
| zero := rfl
| (succ n) := calc
1 + succ n = succ (1 + n) : rfl
... = succ (succ n) : plus_1_l
theorem plus_n_Sm : ∀ (n m : nat), succ (n + m) = n + succ m :=
take n m, rfl
theorem plus_Sn_m : ∀ (n m : nat), succ (n + m) = succ n + m :=
take n m, by rewrite succ_add
print plus_0_r
theorem plus_comm : ∀ (n m : nat), n + m = m + n
| n zero := calc
n + 0 = n : plus_0_r n
... = 0 + n : plus_0_l n
| n (succ m) := calc
n + succ m = succ (n + m) : plus_n_Sm
... = succ (m + n) : plus_comm
... = succ m + n : plus_Sn_m
theorem plus_assoc : ∀ (n m p : nat), n + (m + p) = (n + m) + p := by rec_simp
-- Exercise: 2 stars (double_plus)
definition double : nat → nat
| zero := zero
| (succ n) := succ (succ (double n))
lemma double_plus : ∀ (n : nat), double n = n + n
| zero := rfl
| (succ n) := calc
double (succ n) = succ (succ (double n)) : rfl
... = succ (succ (n + n)) : double_plus
... = succ (succ n + n) : plus_Sn_m
... = succ n + succ n : rfl
-- Exercise: 1 star (destruct_induction)
-- Exercise: 4 stars (mult_comm)
theorem plus_swap : ∀ (n m p : nat), n + (m + p) = m + (n + p) := by simp
-- Exercise: 2 stars, optional (evenb_n__oddb_Sn)
lemma evenb_SS (n : nat) : evenb (succ (succ n)) = evenb n := rfl
theorem evenb_n_oddb_Sn : ∀ (n : nat), evenb n = bnot (evenb (succ n))
| zero := rfl
| (succ zero) := rfl
| (succ (succ n)) := calc
evenb (succ (succ n)) = evenb n : evenb_SS
... = bnot (evenb (succ n)) : evenb_n_oddb_Sn
... = bnot (evenb (succ (succ (succ n)))) : evenb_SS
-- Exercise: 3 stars, optional (more_exercises)
-- Exercise: 2 stars, optional (beq_nat_refl)
-- Exercise: 2 stars, optional (plus_swap')
-- Exercise: 3 stars (binary_commute)
theorem bin_to_nat_pres_incr : ∀ (b : bin), bin_nat (incr b) = succ (bin_nat b)
| bin.zero := rfl
| (bin.twice b) := rfl
| (bin.twice_one b) := calc
bin_nat (incr (bin.twice_one b)) = bin_nat (bin.twice (incr b)) : rfl
... = 2 * bin_nat (incr b) : rfl
... = 2 * succ (bin_nat b) : bin_to_nat_pres_incr
... = succ (bin_nat (bin.twice_one b)) : rfl
-- Exercise: 5 stars, advanced (binary_inverse)
definition nat_bin : nat → bin
| zero := bin.zero
| (succ n) := incr (nat_bin n)
theorem nat_bin_nat_id : ∀ (n : nat), bin_nat (nat_bin n) = n
| zero := rfl
| (succ n) := calc
bin_nat (nat_bin (succ n)) = bin_nat (incr (nat_bin n)) : rfl
... = succ (bin_nat (nat_bin n)) : bin_to_nat_pres_incr
... = succ n : nat_bin_nat_id
definition normalize n := nat_bin (bin_nat n)
theorem normalize_idp : ∀ (b : bin), normalize (normalize b) = normalize b := λb, by unfold normalize; rewrite nat_bin_nat_id
-- Exercise: 2 stars, advanced (plus_comm_informal)
-- Exercise: 2 stars, optional (beq_nat_refl_informal)
|
568e5b1abfd422b67e7b21760e9c1f6a77883e82
|
947b78d97130d56365ae2ec264df196ce769371a
|
/tests/lean/file_not_found.lean
|
20f87dc7da03a8997872839b4e30e44617ca7b6c
|
[
"Apache-2.0"
] |
permissive
|
shyamalschandra/lean4
|
27044812be8698f0c79147615b1d5090b9f4b037
|
6e7a883b21eaf62831e8111b251dc9b18f40e604
|
refs/heads/master
| 1,671,417,126,371
| 1,601,859,995,000
| 1,601,860,020,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 568
|
lean
|
prelude
import Init.System.IO
new_frontend
open IO.FS
def usingIO {α} (x : IO α) := x
#eval (discard $ IO.FS.Handle.mk "non-existent-file.txt" Mode.read : IO Unit)
#eval usingIO do
condM (IO.fileExists "readonly.txt")
(pure ())
(IO.FS.withFile "readonly.txt" Mode.write $ fun _ => pure ());
IO.setAccessRights "readonly.txt" { user := { read := true } };
pure ()
#eval (discard $ IO.FS.Handle.mk "readonly.txt" Mode.write : IO Unit)
#eval usingIO do
let h ← IO.FS.Handle.mk "readonly.txt" Mode.read;
h.putStr "foo";
IO.println "foo";
pure ()
|
2c603cb047f05144143e342b68ae6f1967ee5d4c
|
80cc5bf14c8ea85ff340d1d747a127dcadeb966f
|
/src/tactic/omega/nat/neg_elim.lean
|
56825206d10d7314c72257053bab77752762dcf8
|
[
"Apache-2.0"
] |
permissive
|
lacker/mathlib
|
f2439c743c4f8eb413ec589430c82d0f73b2d539
|
ddf7563ac69d42cfa4a1bfe41db1fed521bd795f
|
refs/heads/master
| 1,671,948,326,773
| 1,601,479,268,000
| 1,601,479,268,000
| 298,686,743
| 0
| 0
|
Apache-2.0
| 1,601,070,794,000
| 1,601,070,794,000
| null |
UTF-8
|
Lean
| false
| false
| 4,161
|
lean
|
/- Copyright (c) 2019 Seul Baek. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Seul Baek
Negation elimination. -/
import tactic.omega.nat.form
namespace omega
namespace nat
open_locale omega.nat
/-- push_neg p returns the result of normalizing ¬ p by
pushing the outermost negation all the way down,
until it reaches either a negation or an atom -/
@[simp] def push_neg : preform → preform
| (p ∨* q) := (push_neg p) ∧* (push_neg q)
| (p ∧* q) := (push_neg p) ∨* (push_neg q)
| (¬*p) := p
| p := ¬* p
lemma push_neg_equiv :
∀ {p : preform}, preform.equiv (push_neg p) (¬* p) :=
begin
preform.induce `[intros v; try {refl}],
{ simp only [not_not, preform.holds, push_neg] },
{ simp only [preform.holds, push_neg, not_or_distrib, ihp v, ihq v] },
{ simp only [preform.holds, push_neg, not_and_distrib, ihp v, ihq v] }
end
/-- NNF transformation -/
def nnf : preform → preform
| (¬* p) := push_neg (nnf p)
| (p ∨* q) := (nnf p) ∨* (nnf q)
| (p ∧* q) := (nnf p) ∧* (nnf q)
| a := a
/-- Asserts that the given preform is in NNF -/
def is_nnf : preform → Prop
| (t =* s) := true
| (t ≤* s) := true
| ¬*(t =* s) := true
| ¬*(t ≤* s) := true
| (p ∨* q) := is_nnf p ∧ is_nnf q
| (p ∧* q) := is_nnf p ∧ is_nnf q
| _ := false
lemma is_nnf_push_neg : ∀ p : preform, is_nnf p → is_nnf (push_neg p) :=
begin
preform.induce `[intro h1; try {trivial}],
{ cases p; try {cases h1}; trivial },
{ cases h1, constructor; [{apply ihp}, {apply ihq}]; assumption },
{ cases h1, constructor; [{apply ihp}, {apply ihq}]; assumption }
end
lemma is_nnf_nnf : ∀ p : preform, is_nnf (nnf p) :=
begin
preform.induce `[try {trivial}],
{ apply is_nnf_push_neg _ ih },
{ constructor; assumption },
{ constructor; assumption }
end
lemma nnf_equiv : ∀ {p : preform}, preform.equiv (nnf p) p :=
begin
preform.induce `[intros v; try {refl}; simp only [nnf]],
{ rw push_neg_equiv,
apply not_iff_not_of_iff, apply ih },
{ apply pred_mono_2' (ihp v) (ihq v) },
{ apply pred_mono_2' (ihp v) (ihq v) }
end
@[simp] def neg_elim_core : preform → preform
| (¬* (t =* s)) := (t.add_one ≤* s) ∨* (s.add_one ≤* t)
| (¬* (t ≤* s)) := s.add_one ≤* t
| (p ∨* q) := (neg_elim_core p) ∨* (neg_elim_core q)
| (p ∧* q) := (neg_elim_core p) ∧* (neg_elim_core q)
| p := p
lemma neg_free_neg_elim_core : ∀ p, is_nnf p → (neg_elim_core p).neg_free :=
begin
preform.induce `[intro h1, try {simp only [neg_free, neg_elim_core]}, try {trivial}],
{ cases p; try {cases h1}; try {trivial},
constructor; trivial },
{ cases h1, constructor; [{apply ihp}, {apply ihq}]; assumption },
{ cases h1, constructor; [{apply ihp}, {apply ihq}]; assumption }
end
lemma le_and_le_iff_eq {α : Type} [partial_order α] {a b : α} :
(a ≤ b ∧ b ≤ a) ↔ a = b :=
begin
constructor; intro h1,
{ cases h1, apply le_antisymm; assumption },
{ constructor; apply le_of_eq; rw h1 }
end
lemma implies_neg_elim_core : ∀ {p : preform},
preform.implies p (neg_elim_core p) :=
begin
preform.induce `[intros v h, try {apply h}],
{ cases p with t s t s; try {apply h},
{ have : preterm.val v (preterm.add_one t) ≤ preterm.val v s ∨
preterm.val v (preterm.add_one s) ≤ preterm.val v t,
{ rw or.comm,
simpa only [preform.holds, le_and_le_iff_eq.symm,
not_and_distrib, not_le] using h },
simpa only [form.holds, neg_elim_core, int.add_one_le_iff] },
simpa only [preform.holds, not_le, int.add_one_le_iff] using h },
{ simp only [neg_elim_core], cases h;
[{left, apply ihp}, {right, apply ihq}];
assumption },
apply and.imp (ihp _) (ihq _) h
end
/-- Eliminate all negations in a preform -/
def neg_elim : preform → preform := neg_elim_core ∘ nnf
lemma neg_free_neg_elim {p : preform} : (neg_elim p).neg_free :=
neg_free_neg_elim_core _ (is_nnf_nnf _)
lemma implies_neg_elim {p : preform} : preform.implies p (neg_elim p) :=
begin
intros v h1, apply implies_neg_elim_core,
apply (nnf_equiv v).elim_right h1
end
end nat
end omega
|
48162b383ef5c5c522c5fb48c805e5b51ccef235
|
ae1e94c332e17c7dc7051ce976d5a9eebe7ab8a5
|
/tests/lean/server/edits.lean
|
a02855da212cba1d50f919dc2369a1dd21379bc2
|
[
"Apache-2.0"
] |
permissive
|
dupuisf/lean4
|
d082d13b01243e1de29ae680eefb476961221eef
|
6a39c65bd28eb0e28c3870188f348c8914502718
|
refs/heads/master
| 1,676,948,755,391
| 1,610,665,114,000
| 1,610,665,114,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,313
|
lean
|
import Lean.Data.Lsp
open IO Lean Lsp
#exit
#eval (do
Ipc.runWith (←IO.appPath) #["--server"] do
let hIn ← Ipc.stdin
hIn.write (←FS.readBinFile "init_vscode_1_47_2.log")
hIn.flush
discard $ Ipc.readResponseAs 0 InitializeResult
Ipc.writeNotification ⟨"initialized", InitializedParams.mk⟩
hIn.write (←FS.readBinFile "open_content.log")
hIn.flush
discard <| Ipc.collectDiagnostics 1 "file:///test.lean"
hIn.write (←FS.readBinFile "content_changes.log")
hIn.flush
let diags ← Ipc.collectDiagnostics 2 "file:///test.lean"
if diags.isEmpty then
throw $ userError "Test failed, no diagnostics received."
else
let diag := diags.getLast!
FS.writeFile "edits_diag.json.produced" (toString <| toJson (diag : JsonRpc.Message))
if let some (refDiag : JsonRpc.Notification PublishDiagnosticsParams) :=
(Json.parse $ ←FS.readFile "edits_diag.json").toOption >>= fromJson?
then
assert! (diag == refDiag)
else
throw $ userError "Failed parsing test file."
Ipc.writeRequest ⟨3, "shutdown", Json.null⟩
let shutResp ← Ipc.readResponseAs 3 Json
assert! shutResp.result.isNull
Ipc.writeNotification ⟨"exit", Json.null⟩
discard $ Ipc.waitForExit
: IO Unit)
|
6a80d414abe9982b050238ebdbefe1ae9d2df3b6
|
6e9cd8d58e550c481a3b45806bd34a3514c6b3e0
|
/src/field_theory/finite.lean
|
2eb69f049e8e1dca5077f32c1cb200308695e51e
|
[
"Apache-2.0"
] |
permissive
|
sflicht/mathlib
|
220fd16e463928110e7b0a50bbed7b731979407f
|
1b2048d7195314a7e34e06770948ee00f0ac3545
|
refs/heads/master
| 1,665,934,056,043
| 1,591,373,803,000
| 1,591,373,803,000
| 269,815,267
| 0
| 0
|
Apache-2.0
| 1,591,402,068,000
| 1,591,402,067,000
| null |
UTF-8
|
Lean
| false
| false
| 10,068
|
lean
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes, Joey van Langen, Casper Putz
-/
import data.equiv.ring
import data.zmod.basic
import linear_algebra.basis
import ring_theory.integral_domain
/-!
# Finite fields
This file contains basic results about finite fields.
Throughout most of this file, `K` denotes a finite field
and `q` is notation for the cardinality of `K`.
## Main results
1. Every finite integral domain is a field (`field_of_integral_domain`).
2. The unit group of a finite field is a cyclic group of order `q - 1`.
(`finite_field.is_cyclic` and `card_units`)
3. `sum_pow_units`: The sum of `x^i`, where `x` ranges over the units of `K`, is
- `q-1` if `q-1 ∣ i`
- `0` otherwise
4. `finite_field.card`: The cardinality `q` is a power of the characteristic of `K`.
See `card'` for a variant.
## Notation
Throughout most of this file, `K` denotes a finite field
and `q` is notation for the cardinality of `K`.
-/
variables {K : Type*} [field K] [fintype K]
variables {R : Type*} [integral_domain R]
local notation `q` := fintype.card K
open_locale big_operators
namespace finite_field
open finset function
section polynomial
open polynomial
/-- The cardinality of a field is at most `n` times the cardinality of the image of a degree `n`
polynomial -/
lemma card_image_polynomial_eval [fintype R] [decidable_eq R] {p : polynomial R} (hp : 0 < p.degree) :
fintype.card R ≤ nat_degree p * (univ.image (λ x, eval x p)).card :=
finset.card_le_mul_card_image _ _
(λ a _, calc _ = (p - C a).roots.card : congr_arg card
(by simp [finset.ext, mem_roots_sub_C hp, -sub_eq_add_neg])
... ≤ _ : card_roots_sub_C' hp)
/-- If `f` and `g` are quadratic polynomials, then the `f.eval a + g.eval b = 0` has a solution. -/
lemma exists_root_sum_quadratic [fintype R] {f g : polynomial R} (hf2 : degree f = 2)
(hg2 : degree g = 2) (hR : fintype.card R % 2 = 1) : ∃ a b, f.eval a + g.eval b = 0 :=
by letI := classical.dec_eq R; exact
suffices ¬ disjoint (univ.image (λ x : R, eval x f)) (univ.image (λ x : R, eval x (-g))),
begin
simp only [disjoint_left, mem_image] at this,
push_neg at this,
rcases this with ⟨x, ⟨a, _, ha⟩, ⟨b, _, hb⟩⟩,
exact ⟨a, b, by rw [ha, ← hb, eval_neg, neg_add_self]⟩
end,
assume hd : disjoint _ _,
lt_irrefl (2 * ((univ.image (λ x : R, eval x f)) ∪ (univ.image (λ x : R, eval x (-g)))).card) $
calc 2 * ((univ.image (λ x : R, eval x f)) ∪ (univ.image (λ x : R, eval x (-g)))).card
≤ 2 * fintype.card R : nat.mul_le_mul_left _ (finset.card_le_of_subset (subset_univ _))
... = fintype.card R + fintype.card R : two_mul _
... < nat_degree f * (univ.image (λ x : R, eval x f)).card +
nat_degree (-g) * (univ.image (λ x : R, eval x (-g))).card :
add_lt_add_of_lt_of_le
(lt_of_le_of_ne
(card_image_polynomial_eval (by rw hf2; exact dec_trivial))
(mt (congr_arg (%2)) (by simp [nat_degree_eq_of_degree_eq_some hf2, hR])))
(card_image_polynomial_eval (by rw [degree_neg, hg2]; exact dec_trivial))
... = 2 * (univ.image (λ x : R, eval x f) ∪ univ.image (λ x : R, eval x (-g))).card :
by rw [card_disjoint_union hd]; simp [nat_degree_eq_of_degree_eq_some hf2,
nat_degree_eq_of_degree_eq_some hg2, bit0, mul_add]
end polynomial
lemma card_units : fintype.card (units K) = fintype.card K - 1 :=
begin
classical,
rw [eq_comm, nat.sub_eq_iff_eq_add (fintype.card_pos_iff.2 ⟨(0 : K)⟩)],
haveI := set_fintype {a : K | a ≠ 0},
haveI := set_fintype (@set.univ K),
rw [fintype.card_congr (equiv.units_equiv_ne_zero _),
← @set.card_insert _ _ {a : K | a ≠ 0} _ (not_not.2 (eq.refl (0 : K)))
(set.fintype_insert _ _), fintype.card_congr (equiv.set.univ K).symm],
congr; simp [set.ext_iff, classical.em]
end
lemma prod_univ_units_id_eq_neg_one :
univ.prod (λ x, x) = (-1 : units K) :=
begin
classical,
have : ((@univ (units K) _).erase (-1)).prod (λ x, x) = 1,
from prod_involution (λ x _, x⁻¹) (by simp)
(λ a, by simp [units.inv_eq_self_iff] {contextual := tt})
(λ a, by simp [@inv_eq_iff_inv_eq _ _ a, eq_comm] {contextual := tt})
(by simp),
rw [← insert_erase (mem_univ (-1 : units K)), prod_insert (not_mem_erase _ _),
this, mul_one]
end
lemma pow_card_sub_one_eq_one (a : K) (ha : a ≠ 0) :
a ^ (fintype.card K - 1) = 1 :=
calc a ^ (fintype.card K - 1) = (units.mk0 a ha ^ (fintype.card K - 1) : units K) :
by rw [units.coe_pow, units.coe_mk0]
... = 1 : by { classical, rw [← card_units, pow_card_eq_one], refl }
variable (K)
theorem card (p : ℕ) [char_p K p] : ∃ (n : ℕ+), nat.prime p ∧ q = p^(n : ℕ) :=
begin
haveI hp : fact p.prime := char_p.char_is_prime K p,
letI : vector_space (zmod p) K := { .. (zmod.cast_hom (dvd_refl _) K).to_semimodule },
obtain ⟨n, h⟩ := vector_space.card_fintype (zmod p) K,
rw zmod.card at h,
refine ⟨⟨n, _⟩, hp, h⟩,
apply or.resolve_left (nat.eq_zero_or_pos n),
rintro rfl,
rw nat.pow_zero at h,
have : (0 : K) = 1, { apply fintype.card_le_one_iff.mp (le_of_eq h) },
exact absurd this zero_ne_one,
end
theorem card' : ∃ (p : ℕ) (n : ℕ+), nat.prime p ∧ q = p^(n : ℕ) :=
let ⟨p, hc⟩ := char_p.exists K in ⟨p, @finite_field.card K _ _ p hc⟩
@[simp] lemma cast_card_eq_zero : (q : K) = 0 :=
begin
rcases char_p.exists K with ⟨p, _char_p⟩, resetI,
rcases card K p with ⟨n, hp, hn⟩,
simp only [char_p.cast_eq_zero_iff K p, hn],
conv { congr, rw [← nat.pow_one p] },
exact nat.pow_dvd_pow _ n.2,
end
lemma forall_pow_eq_one_iff (i : ℕ) :
(∀ x : units K, x ^ i = 1) ↔ q - 1 ∣ i :=
begin
obtain ⟨x, hx⟩ := is_cyclic.exists_generator (units K),
classical,
rw [← card_units, ← order_of_eq_card_of_forall_mem_gpowers hx, order_of_dvd_iff_pow_eq_one],
split,
{ intro h, apply h },
{ intros h y,
rw ← powers_eq_gpowers at hx,
rcases hx y with ⟨j, rfl⟩,
rw [← pow_mul, mul_comm, pow_mul, h, one_pow], }
end
/-- The sum of `x ^ i` as `x` ranges over the units of a finite field of cardinality `q`
is equal to `0` unless `(q - 1) ∣ i`, in which case the sum is `q - 1`. -/
lemma sum_pow_units (i : ℕ) :
∑ x : units K, (x ^ i : K) = if (q - 1) ∣ i then -1 else 0 :=
begin
let φ : units K →* K :=
{ to_fun := λ x, x ^ i,
map_one' := by rw [units.coe_one, one_pow],
map_mul' := by { intros, rw [units.coe_mul, mul_pow] } },
haveI : decidable (φ = 1) := by { classical, apply_instance },
calc ∑ x : units K, φ x = if φ = 1 then fintype.card (units K) else 0 : sum_hom_units φ
... = if (q - 1) ∣ i then -1 else 0 : _,
suffices : (q - 1) ∣ i ↔ φ = 1,
{ simp only [this],
split_ifs with h h, swap, refl,
rw [card_units, nat.cast_sub, cast_card_eq_zero, nat.cast_one, zero_sub],
show 1 ≤ q, from fintype.card_pos_iff.mpr ⟨0⟩ },
rw [← forall_pow_eq_one_iff, monoid_hom.ext_iff],
apply forall_congr, intro x,
rw [units.ext_iff, units.coe_pow, units.coe_one, monoid_hom.one_apply],
refl,
end
/-- The sum of `x ^ i` as `x` ranges over a finite field of cardinality `q`
is equal to `0` if `i < q - 1`. -/
lemma sum_pow_lt_card_sub_one (i : ℕ) (h : i < q - 1) :
∑ x : K, x ^ i = 0 :=
begin
by_cases hi : i = 0,
{ simp only [hi, nsmul_one, sum_const, pow_zero, card_univ, cast_card_eq_zero], },
classical,
have hiq : ¬ (q - 1) ∣ i, { contrapose! h, exact nat.le_of_dvd (nat.pos_of_ne_zero hi) h },
let φ : units K ↪ K := ⟨coe, units.ext⟩,
have : univ.map φ = univ \ {0},
{ ext x,
simp only [true_and, embedding.coe_fn_mk, mem_sdiff, units.exists_iff_ne_zero,
mem_univ, mem_map, exists_prop_of_true, mem_singleton] },
calc ∑ x : K, x ^ i = ∑ x in univ \ {(0 : K)}, x ^ i :
by rw [← sum_sdiff ({0} : finset K).subset_univ, sum_singleton,
zero_pow (nat.pos_of_ne_zero hi), add_zero]
... = ∑ x : units K, x ^ i : by { rw [← this, univ.sum_map φ], refl }
... = 0 : by { rw [sum_pow_units K i, if_neg], exact hiq, }
end
end finite_field
namespace zmod
open finite_field polynomial
lemma sum_two_squares (p : ℕ) [hp : fact p.prime] (x : zmod p) :
∃ a b : zmod p, a^2 + b^2 = x :=
begin
cases hp.eq_two_or_odd with hp2 hp_odd,
{ unfreezeI, subst p, revert x, exact dec_trivial },
let f : polynomial (zmod p) := X^2,
let g : polynomial (zmod p) := X^2 - C x,
obtain ⟨a, b, hab⟩ : ∃ a b, f.eval a + g.eval b = 0 :=
@exists_root_sum_quadratic _ _ _ f g
(degree_X_pow 2) (degree_X_pow_sub_C dec_trivial _) (by rw [zmod.card, hp_odd]),
refine ⟨a, b, _⟩,
rw ← sub_eq_zero,
simpa only [eval_C, eval_X, eval_pow, eval_sub, ← add_sub_assoc] using hab,
end
end zmod
namespace char_p
lemma sum_two_squares (R : Type*) [integral_domain R] (p : ℕ) [fact (0 < p)] [char_p R p] (x : ℤ) :
∃ a b : ℕ, (a^2 + b^2 : R) = x :=
begin
haveI := char_is_prime_of_pos R p,
obtain ⟨a, b, hab⟩ := zmod.sum_two_squares p x,
refine ⟨a.val, b.val, _⟩,
simpa using congr_arg (zmod.cast_hom (dvd_refl _) R) hab
end
end char_p
open_locale nat
open zmod
/-- The Fermat-Euler totient theorem. `nat.modeq.pow_totient` is an alternative statement
of the same theorem. -/
@[simp] lemma zmod.pow_totient {n : ℕ} [fact (0 < n)] (x : units (zmod n)) : x ^ φ n = 1 :=
by rw [← card_units_eq_totient, pow_card_eq_one]
/-- The Fermat-Euler totient theorem. `zmod.pow_totient` is an alternative statement
of the same theorem. -/
lemma nat.modeq.pow_totient {x n : ℕ} (h : nat.coprime x n) : x ^ φ n ≡ 1 [MOD n] :=
begin
cases n, {simp},
rw ← zmod.eq_iff_modeq_nat,
let x' : units (zmod (n+1)) := zmod.unit_of_coprime _ h,
have := zmod.pow_totient x',
apply_fun (coe : units (zmod (n+1)) → zmod (n+1)) at this,
simpa only [-zmod.pow_totient, nat.succ_eq_add_one, nat.cast_pow, units.coe_one,
nat.cast_one, cast_unit_of_coprime, units.coe_pow],
end
|
6d106f33ae8a15f5c26095c755c3a4ef5893c097
|
a9d0fb7b0e4f802bd3857b803e6c5c23d87fef91
|
/tests/lean/run/help_cmd.lean
|
40e247fee6f5b9b7f3960c73f58562b01d98aa99
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean-osx
|
4a954262c780e404c1369d6c06516161d07fcb40
|
3670278342d2f4faa49d95b46d86642d7875b47c
|
refs/heads/master
| 1,611,410,334,552
| 1,474,425,686,000
| 1,474,425,686,000
| 12,043,103
| 5
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 43
|
lean
|
help options
help commands
print options
|
64bad0bf9f4bf3953575994b08f26b87966b0014
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/control/applicative.lean
|
ffa4df13e1e840d5e7cb548d2841b08382be9442
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 4,862
|
lean
|
/-
Copyright (c) 2017 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon
-/
import algebra.group.defs
import control.functor
/-!
# `applicative` instances
This file provides `applicative` instances for concrete functors:
* `id`
* `functor.comp`
* `functor.const`
* `functor.add_const`
-/
universes u v w
section lemmas
open function
variables {F : Type u → Type v}
variables [applicative F] [is_lawful_applicative F]
variables {α β γ σ : Type u}
lemma applicative.map_seq_map (f : α → β → γ) (g : σ → β) (x : F α) (y : F σ) :
(f <$> x) <*> (g <$> y) = (flip (∘) g ∘ f) <$> x <*> y :=
by simp [flip] with functor_norm
lemma applicative.pure_seq_eq_map' (f : α → β) : (<*>) (pure f : F (α → β)) = (<$>) f :=
by ext; simp with functor_norm
theorem applicative.ext {F} : ∀ {A1 : applicative F} {A2 : applicative F}
[@is_lawful_applicative F A1] [@is_lawful_applicative F A2]
(H1 : ∀ {α : Type u} (x : α),
@has_pure.pure _ A1.to_has_pure _ x = @has_pure.pure _ A2.to_has_pure _ x)
(H2 : ∀ {α β : Type u} (f : F (α → β)) (x : F α),
@has_seq.seq _ A1.to_has_seq _ _ f x = @has_seq.seq _ A2.to_has_seq _ _ f x),
A1 = A2
| {to_functor := F1, seq := s1, pure := p1, seq_left := sl1, seq_right := sr1}
{to_functor := F2, seq := s2, pure := p2, seq_left := sl2, seq_right := sr2} L1 L2 H1 H2 :=
begin
obtain rfl : @p1 = @p2, {funext α x, apply H1},
obtain rfl : @s1 = @s2, {funext α β f x, apply H2},
cases L1, cases L2,
obtain rfl : F1 = F2,
{ resetI, apply functor.ext, intros,
exact (L1_pure_seq_eq_map _ _).symm.trans (L2_pure_seq_eq_map _ _) },
congr; funext α β x y,
{ exact (L1_seq_left_eq _ _).trans (L2_seq_left_eq _ _).symm },
{ exact (L1_seq_right_eq _ _).trans (L2_seq_right_eq _ _).symm }
end
end lemmas
instance : is_comm_applicative id :=
by refine { .. }; intros; refl
namespace functor
namespace comp
open function (hiding comp)
open functor
variables {F : Type u → Type w} {G : Type v → Type u}
variables [applicative F] [applicative G]
variables [is_lawful_applicative F] [is_lawful_applicative G]
variables {α β γ : Type v}
lemma map_pure (f : α → β) (x : α) : (f <$> pure x : comp F G β) = pure (f x) :=
comp.ext $ by simp
lemma seq_pure (f : comp F G (α → β)) (x : α) :
f <*> pure x = (λ g : α → β, g x) <$> f :=
comp.ext $ by simp [(∘)] with functor_norm
lemma seq_assoc (x : comp F G α) (f : comp F G (α → β)) (g : comp F G (β → γ)) :
g <*> (f <*> x) = (@function.comp α β γ <$> g) <*> f <*> x :=
comp.ext $ by simp [(∘)] with functor_norm
lemma pure_seq_eq_map (f : α → β) (x : comp F G α) :
pure f <*> x = f <$> x :=
comp.ext $ by simp [applicative.pure_seq_eq_map'] with functor_norm
instance : is_lawful_applicative (comp F G) :=
{ pure_seq_eq_map := @comp.pure_seq_eq_map F G _ _ _ _,
map_pure := @comp.map_pure F G _ _ _ _,
seq_pure := @comp.seq_pure F G _ _ _ _,
seq_assoc := @comp.seq_assoc F G _ _ _ _ }
theorem applicative_id_comp {F} [AF : applicative F] [LF : is_lawful_applicative F] :
@comp.applicative id F _ _ = AF :=
@applicative.ext F _ _ (@comp.is_lawful_applicative id F _ _ _ _) _
(λ α x, rfl) (λ α β f x, rfl)
theorem applicative_comp_id {F} [AF : applicative F] [LF : is_lawful_applicative F] :
@comp.applicative F id _ _ = AF :=
@applicative.ext F _ _ (@comp.is_lawful_applicative F id _ _ _ _) _
(λ α x, rfl) (λ α β f x, show id <$> f <*> x = f <*> x, by rw id_map)
open is_comm_applicative
instance {f : Type u → Type w} {g : Type v → Type u}
[applicative f] [applicative g]
[is_comm_applicative f] [is_comm_applicative g] :
is_comm_applicative (comp f g) :=
by { refine { .. @comp.is_lawful_applicative f g _ _ _ _, .. },
intros, casesm* comp _ _ _, simp! [map,has_seq.seq] with functor_norm,
rw [commutative_map],
simp [comp.mk,flip,(∘)] with functor_norm,
congr, funext, rw [commutative_map], congr }
end comp
end functor
open functor
@[functor_norm]
lemma comp.seq_mk {α β : Type w}
{f : Type u → Type v} {g : Type w → Type u}
[applicative f] [applicative g]
(h : f (g (α → β))) (x : f (g α)) :
comp.mk h <*> comp.mk x = comp.mk (has_seq.seq <$> h <*> x) := rfl
instance {α} [has_one α] [has_mul α] : applicative (const α) :=
{ pure := λ β x, (1 : α),
seq := λ β γ f x, (f * x : α) }
instance {α} [monoid α] : is_lawful_applicative (const α) :=
by refine { .. }; intros; simp [mul_assoc, (<$>), (<*>), pure]
instance {α} [has_zero α] [has_add α] : applicative (add_const α) :=
{ pure := λ β x, (0 : α),
seq := λ β γ f x, (f + x : α) }
instance {α} [add_monoid α] : is_lawful_applicative (add_const α) :=
by refine { .. }; intros; simp [add_assoc, (<$>), (<*>), pure]
|
882ea3f1e21fb9c688f95b1942905ace9c9cf03f
|
ce6917c5bacabee346655160b74a307b4a5ab620
|
/src/ch3/ex0201.lean
|
fbcd65f215bc2ae6d0d8c6bdbd0959d963902b24
|
[] |
no_license
|
Ailrun/Theorem_Proving_in_Lean
|
ae6a23f3c54d62d401314d6a771e8ff8b4132db2
|
2eb1b5caf93c6a5a555c79e9097cf2ba5a66cf68
|
refs/heads/master
| 1,609,838,270,467
| 1,586,846,743,000
| 1,586,846,743,000
| 240,967,761
| 1
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 77
|
lean
|
constants p q : Prop
theorem t1 : p → q → p := λ hp : p, λ hq : q, hp
|
e4e35fc9fe67ce6706690cb6b12dd5314b8b4df1
|
31f556cdeb9239ffc2fad8f905e33987ff4feab9
|
/src/Lean/ParserCompiler.lean
|
2811a8386d36f6bf69e661057885be5d7bc298bc
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
tobiasgrosser/lean4
|
ce0fd9cca0feba1100656679bf41f0bffdbabb71
|
ebdbdc10436a4d9d6b66acf78aae7a23f5bd073f
|
refs/heads/master
| 1,673,103,412,948
| 1,664,930,501,000
| 1,664,930,501,000
| 186,870,185
| 0
| 0
|
Apache-2.0
| 1,665,129,237,000
| 1,557,939,901,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 7,515
|
lean
|
/-
Copyright (c) 2020 Sebastian Ullrich. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sebastian Ullrich
-/
import Lean.Meta.ReduceEval
import Lean.KeyedDeclsAttribute
import Lean.ParserCompiler.Attribute
import Lean.Parser.Extension
/-!
Gadgets for compiling parser declarations into other programs, such as pretty printers.
-/
namespace Lean
namespace ParserCompiler
structure Context (α : Type) where
varName : Name
categoryAttr : KeyedDeclsAttribute α
combinatorAttr : CombinatorAttribute
def Context.tyName {α} (ctx : Context α) : Name := ctx.categoryAttr.defn.valueTypeName
/-- Replace all references of `Parser` with `tyName` -/
def replaceParserTy {α} (ctx : Context α) (e : Expr) : Expr :=
e.replace fun e =>
-- strip `optParam`
let e := if e.isOptParam then e.appFn!.appArg! else e
if e.isConstOf `Lean.Parser.Parser then mkConst ctx.tyName else none
open Meta Parser in
/-- Takes an expression of type `Parser`, and determines the syntax kind of the root node it produces. -/
partial def parserNodeKind? (e : Expr) : MetaM (Option Name) := do
let reduceEval? e : MetaM (Option Name) := do
try pure <| some (← reduceEval e) catch _ => pure none
let e ← whnfCore e
if e matches Expr.lam .. then
lambdaLetTelescope e fun _ e => parserNodeKind? e
else if e.isAppOfArity ``nodeWithAntiquot 4 then
reduceEval? (e.getArg! 1)
else if e.isAppOfArity ``withAntiquot 2 then
parserNodeKind? (e.getArg! 1)
else if e.isAppOfArity ``leadingNode 3 || e.isAppOfArity ``trailingNode 4 || e.isAppOfArity ``node 2 then
reduceEval? (e.getArg! 0)
else
return none
section
open Meta
variable {α} (ctx : Context α) (builtin : Bool) (force : Bool) in
/--
Translate an expression of type `Parser` into one of type `tyName`, tagging intermediary constants with
`ctx.combinatorAttr`. If `force` is `false`, refuse to do so for imported constants. -/
partial def compileParserExpr (e : Expr) : MetaM Expr := do
let e ← whnfCore e
match e with
| .lam .. => lambdaLetTelescope e fun xs b => compileParserExpr b >>= mkLambdaFVars xs
| .fvar .. => return e
| _ => do
let fn := e.getAppFn
let .const c .. := fn | throwError "call of unknown parser at '{e}'"
-- call the translated `p` with (a prefix of) the arguments of `e`, recursing for arguments
-- of type `ty` (i.e. formerly `Parser`)
let mkCall (p : Name) := do
let ty ← inferType (mkConst p)
forallTelescope ty fun params _ => do
let mut p := mkConst p
let args := e.getAppArgs
for i in [:Nat.min params.size args.size] do
let param := params[i]!
let arg := args[i]!
let paramTy ← inferType param
let resultTy ← forallTelescope paramTy fun _ b => pure b
let arg ← if resultTy.isConstOf ctx.tyName then compileParserExpr arg else pure arg
p := mkApp p arg
return p
let env ← getEnv
match ctx.combinatorAttr.getDeclFor? env c with
| some p => mkCall p
| none =>
let c' := c ++ ctx.varName
let cinfo ← getConstInfo c
let resultTy ← forallTelescope cinfo.type fun _ b => pure b
if resultTy.isConstOf `Lean.Parser.TrailingParser || resultTy.isConstOf `Lean.Parser.Parser then do
-- synthesize a new `[combinatorAttr c]`
let some value ← pure cinfo.value?
| throwError "don't know how to generate {ctx.varName} for non-definition '{e}'"
unless (env.getModuleIdxFor? c).isNone || force do
throwError "refusing to generate code for imported parser declaration '{c}'; use `@[runParserAttributeHooks]` on its definition instead."
let value ← compileParserExpr <| replaceParserTy ctx value
let ty ← forallTelescope cinfo.type fun params _ =>
params.foldrM (init := mkConst ctx.tyName) fun param ty => do
let paramTy ← replaceParserTy ctx <$> inferType param
return mkForall `_ BinderInfo.default paramTy ty
let decl := Declaration.defnDecl {
name := c', levelParams := []
type := ty, value := value, hints := ReducibilityHints.opaque, safety := DefinitionSafety.safe
}
let env ← getEnv
let env ← match env.addAndCompile {} decl with
| Except.ok env => pure env
| Except.error kex => do throwError (← (kex.toMessageData {}).toString)
setEnv <| ctx.combinatorAttr.setDeclFor env c c'
if cinfo.type.isConst then
if let some kind ← parserNodeKind? cinfo.value! then
-- If the parser is parameter-less and produces a node of kind `kind`,
-- then tag the compiled definition as `[(builtin)Parenthesizer kind]`
-- (or `[(builtin)Formatter kind]`, resp.)
let attrName := if builtin then ctx.categoryAttr.defn.builtinName else ctx.categoryAttr.defn.name
-- Create syntax node for a simple attribute of the form
-- `def simple := leading_parser ident >> optional (ident <|> priorityParser)`
let stx := mkNode `Lean.Parser.Attr.simple #[mkIdent attrName, mkNullNode #[mkIdent kind]]
Attribute.add c' attrName stx
mkCall c'
else
-- if this is a generic function, e.g. `AndThen.andthen`, it's easier to just unfold it until we are
-- back to parser combinators
let some e' ← unfoldDefinition? e
| throwError "don't know how to generate {ctx.varName} for non-parser combinator '{e}'"
compileParserExpr e'
end
variable {α} (ctx : Context α) (builtin : Bool) in
def compileEmbeddedParsers : ParserDescr → MetaM Unit
| ParserDescr.const _ => pure ()
| ParserDescr.unary _ d => compileEmbeddedParsers d
| ParserDescr.binary _ d₁ d₂ => compileEmbeddedParsers d₁ *> compileEmbeddedParsers d₂
| ParserDescr.parser constName => discard <| compileParserExpr ctx (mkConst constName) (builtin := builtin) (force := false)
| ParserDescr.node _ _ d => compileEmbeddedParsers d
| ParserDescr.nodeWithAntiquot _ _ d => compileEmbeddedParsers d
| ParserDescr.sepBy p _ psep _ => compileEmbeddedParsers p *> compileEmbeddedParsers psep
| ParserDescr.sepBy1 p _ psep _ => compileEmbeddedParsers p *> compileEmbeddedParsers psep
| ParserDescr.trailingNode _ _ _ d => compileEmbeddedParsers d
| ParserDescr.symbol _ => pure ()
| ParserDescr.nonReservedSymbol _ _ => pure ()
| ParserDescr.cat _ _ => pure ()
/-- Precondition: `α` must match `ctx.tyName`. -/
unsafe def registerParserCompiler {α} (ctx : Context α) : IO Unit := do
Parser.registerParserAttributeHook {
postAdd := fun catName constName builtin => do
let info ← getConstInfo constName
if info.type.isConstOf `Lean.ParserDescr || info.type.isConstOf `Lean.TrailingParserDescr then
let d ← evalConstCheck ParserDescr `Lean.ParserDescr constName <|>
evalConstCheck TrailingParserDescr `Lean.TrailingParserDescr constName
compileEmbeddedParsers ctx d (builtin := builtin) |>.run'
else
-- `[runBuiltinParserAttributeHooks]` => force compilation even if imported, do not apply `ctx.categoryAttr`.
let force := catName.isAnonymous
discard (compileParserExpr ctx (mkConst constName) (builtin := builtin) (force := force)).run'
}
end ParserCompiler
end Lean
|
3d173b440630b1b5ac2fec1bc4b1f584fe95994e
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/category_theory/adhesive.lean
|
e9762d9c6f4e490f6fa7780acb32dada08fde081
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 12,548
|
lean
|
/-
Copyright (c) 2022 Andrew Yang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andrew Yang
-/
import category_theory.extensive
import category_theory.limits.shapes.kernel_pair
/-!
# Adhesive categories
## Main definitions
- `category_theory.is_pushout.is_van_kampen`: A convenience formulation for a pushout being
a van Kampen colimit.
- `category_theory.adhesive`: A category is adhesive if it has pushouts and pullbacks along
monomorphisms, and such pushouts are van Kampen.
## Main Results
- `category_theory.type.adhesive`: The category of `Type` is adhesive.
- `category_theory.adhesive.is_pullback_of_is_pushout_of_mono_left`: In adhesive categories,
pushouts along monomorphisms are pullbacks.
- `category_theory.adhesive.mono_of_is_pushout_of_mono_left`: In adhesive categories,
monomorphisms are stable under pushouts.
- `category_theory.adhesive.to_regular_mono_category`: Monomorphisms in adhesive categories are
regular (this implies that adhesive categories are balanced).
## TODO
Show that the following are adhesive:
- functor categories into adhesive categories
- the categories of sheaves over a site
## References
- https://ncatlab.org/nlab/show/adhesive+category
- [Stephen Lack and Paweł Sobociński, Adhesive Categories][adhesive2004]
-/
namespace category_theory
open limits
universes v' u' v u
variables {J : Type v'} [category.{u'} J] {C : Type u} [category.{v} C]
variables {W X Y Z : C} {f : W ⟶ X} {g : W ⟶ Y} {h : X ⟶ Z} {i : Y ⟶ Z}
/-- A convenience formulation for a pushout being a van Kampen colimit.
See `is_pushout.is_van_kampen_iff` below. -/
@[nolint unused_arguments] -- This only makes sense when the original diagram is a pushout.
def is_pushout.is_van_kampen (H : is_pushout f g h i) : Prop :=
∀ ⦃W' X' Y' Z' : C⦄ (f' : W' ⟶ X') (g' : W' ⟶ Y') (h' : X' ⟶ Z') (i' : Y' ⟶ Z')
(αW : W' ⟶ W) (αX : X' ⟶ X) (αY : Y' ⟶ Y) (αZ : Z' ⟶ Z)
(hf : is_pullback f' αW αX f) (hg : is_pullback g' αW αY g)
(hh : comm_sq h' αX αZ h) (hi : comm_sq i' αY αZ i)
(w : comm_sq f' g' h' i'),
is_pushout f' g' h' i' ↔ is_pullback h' αX αZ h ∧ is_pullback i' αY αZ i
lemma is_pushout.is_van_kampen.flip {H : is_pushout f g h i} (H' : H.is_van_kampen) :
H.flip.is_van_kampen :=
begin
introv W' hf hg hh hi w,
simpa only [is_pushout.flip_iff, is_pullback.flip_iff, and_comm] using
H' g' f' i' h' αW αY αX αZ hg hf hi hh w.flip,
end
lemma is_pushout.is_van_kampen_iff (H : is_pushout f g h i) :
H.is_van_kampen ↔ is_van_kampen_colimit (pushout_cocone.mk h i H.w) :=
begin
split,
{ intros H F' c' α fα eα hα,
refine iff.trans _ ((H (F'.map walking_span.hom.fst) (F'.map walking_span.hom.snd)
(c'.ι.app _) (c'.ι.app _) (α.app _) (α.app _) (α.app _) fα
(by convert hα walking_span.hom.fst) (by convert hα walking_span.hom.snd)
_ _ _).trans _),
{ have : F'.map walking_span.hom.fst ≫ c'.ι.app walking_span.left =
F'.map walking_span.hom.snd ≫ c'.ι.app walking_span.right := by simp only [cocone.w],
rw (is_colimit.equiv_of_nat_iso_of_iso (diagram_iso_span F') c'
(pushout_cocone.mk _ _ this) _).nonempty_congr,
{ exact ⟨λ h, ⟨⟨this⟩, h⟩, λ h, h.2⟩ },
{ refine cocones.ext (iso.refl c'.X) _, rintro (_|_|_); dsimp;
simp only [c'.w, category.assoc, category.id_comp, category.comp_id] } },
{ exact ⟨nat_trans.congr_app eα.symm _⟩ },
{ exact ⟨nat_trans.congr_app eα.symm _⟩ },
{ exact ⟨by simp⟩ },
split,
{ rintros ⟨h₁, h₂⟩ (_|_|_),
{ rw ← c'.w walking_span.hom.fst, exact (hα walking_span.hom.fst).paste_horiz h₁ },
exacts [h₁, h₂] },
{ intro h, exact ⟨h _, h _⟩ } },
{ introv H W' hf hg hh hi w,
refine (iff.trans _
((H w.cocone ⟨by { rintros (_|_|_), exacts [αW, αX, αY] }, _⟩ αZ _ _).trans _)),
rotate,
{ rintros i _ (_|_|_),
{ dsimp, simp only [functor.map_id, category.comp_id, category.id_comp] },
exacts [hf.w, hg.w] },
{ ext (_|_|_),
{ dsimp, rw pushout_cocone.condition_zero, erw [category.assoc, hh.w, hf.w_assoc] },
exacts [hh.w.symm, hi.w.symm] },
{ rintros i _ (_|_|_),
{ dsimp, simp_rw functor.map_id,
exact is_pullback.of_horiz_is_iso ⟨by rw [category.comp_id, category.id_comp]⟩ },
exacts [hf, hg] },
{ split,
{ intro h, exact ⟨h walking_cospan.left, h walking_cospan.right⟩ },
{ rintro ⟨h₁, h₂⟩ (_|_|_),
{ dsimp, rw pushout_cocone.condition_zero, exact hf.paste_horiz h₁ },
exacts [h₁, h₂] } },
{ exact ⟨λ h, h.2, λ h, ⟨_, h⟩⟩ } }
end
.
lemma is_coprod_iff_is_pushout {X E Y YE : C} (c : binary_cofan X E)
(hc : is_colimit c) {f : X ⟶ Y} {iY : Y ⟶ YE} {fE : c.X ⟶ YE}
(H : comm_sq f c.inl iY fE) :
nonempty (is_colimit (binary_cofan.mk (c.inr ≫ fE) iY)) ↔ is_pushout f c.inl iY fE :=
begin
split,
{ rintro ⟨h⟩,
refine ⟨H, ⟨limits.pushout_cocone.is_colimit_aux' _ _⟩⟩,
intro s,
dsimp,
refine ⟨h.desc (binary_cofan.mk (c.inr ≫ s.inr) s.inl), h.fac _ ⟨walking_pair.right⟩, _, _⟩,
{ apply binary_cofan.is_colimit.hom_ext hc,
{ rw ← H.w_assoc, erw h.fac _ ⟨walking_pair.right⟩, exact s.condition },
{ rw ← category.assoc, exact h.fac _ ⟨walking_pair.left⟩ } },
{ intros m e₁ e₂,
apply binary_cofan.is_colimit.hom_ext h,
{ dsimp, rw [category.assoc, e₂, eq_comm], exact h.fac _ ⟨walking_pair.left⟩ },
{ refine e₁.trans (eq.symm _), exact h.fac _ _ } } },
{ refine λ H, ⟨_⟩,
fapply limits.binary_cofan.is_colimit_mk,
{ exact λ s, H.is_colimit.desc (pushout_cocone.mk s.inr _ $
(hc.fac (binary_cofan.mk (f ≫ s.inr) s.inl) ⟨walking_pair.left⟩).symm) },
{ intro s,
erw [category.assoc, H.is_colimit.fac _ walking_span.right, hc.fac], refl },
{ intro s, exact H.is_colimit.fac _ walking_span.left },
{ intros s m e₁ e₂,
apply pushout_cocone.is_colimit.hom_ext H.is_colimit,
{ symmetry, exact (H.is_colimit.fac _ walking_span.left).trans e₂.symm },
{ erw H.is_colimit.fac _ walking_span.right,
apply binary_cofan.is_colimit.hom_ext hc,
{ dsimp, erw [hc.fac, ← H.w_assoc, e₂], refl },
{ refine ((category.assoc _ _ _).symm.trans e₁).trans _, symmetry, exact hc.fac _ _ } } } }
end
lemma is_pushout.is_van_kampen_inl {W E X Z : C} (c : binary_cofan W E)
[finitary_extensive C]
[has_pullbacks C]
(hc : is_colimit c) (f : W ⟶ X) (h : X ⟶ Z) (i : c.X ⟶ Z)
(H : is_pushout f c.inl h i) : H.is_van_kampen :=
begin
obtain ⟨hc₁⟩ := (is_coprod_iff_is_pushout c hc H.1).mpr H,
introv W' hf hg hh hi w,
obtain ⟨hc₂⟩ := ((binary_cofan.is_van_kampen_iff _).mp (finitary_extensive.van_kampen c hc)
(binary_cofan.mk _ pullback.fst) _ _ _ hg.w.symm pullback.condition.symm).mpr
⟨hg, is_pullback.of_has_pullback αY c.inr⟩,
refine (is_coprod_iff_is_pushout _ hc₂ w).symm.trans _,
refine ((binary_cofan.is_van_kampen_iff _).mp (finitary_extensive.van_kampen _ hc₁)
(binary_cofan.mk _ _) pullback.snd _ _ _ hh.w.symm).trans _,
{ dsimp, rw [← pullback.condition_assoc, category.assoc, hi.w] },
split,
{ rintro ⟨hc₃, hc₄⟩,
refine ⟨hc₄, _⟩,
let Y'' := pullback αZ i,
let cmp : Y' ⟶ Y'' := pullback.lift i' αY hi.w,
have e₁ : (g' ≫ cmp) ≫ pullback.snd = αW ≫ c.inl :=
by rw [category.assoc, pullback.lift_snd, hg.w],
have e₂ : (pullback.fst ≫ cmp : pullback αY c.inr ⟶ _) ≫ pullback.snd =
pullback.snd ≫ c.inr :=
by rw [category.assoc, pullback.lift_snd, pullback.condition],
obtain ⟨hc₄⟩ := ((binary_cofan.is_van_kampen_iff _).mp (finitary_extensive.van_kampen c hc)
(binary_cofan.mk _ _) αW _ _ e₁.symm e₂.symm).mpr ⟨_, _⟩,
{ rw [← category.id_comp αZ, ← show cmp ≫ pullback.snd = αY, from pullback.lift_snd _ _ _],
apply is_pullback.paste_vert _ (is_pullback.of_has_pullback αZ i),
have : cmp = (hc₂.cocone_point_unique_up_to_iso hc₄).hom,
{ apply binary_cofan.is_colimit.hom_ext hc₂,
exacts [(hc₂.comp_cocone_point_unique_up_to_iso_hom hc₄ ⟨walking_pair.left⟩).symm,
(hc₂.comp_cocone_point_unique_up_to_iso_hom hc₄ ⟨walking_pair.right⟩).symm] },
rw this,
exact is_pullback.of_vert_is_iso ⟨by rw [← this, category.comp_id, pullback.lift_fst]⟩ },
{ apply is_pullback.of_right _ e₁ (is_pullback.of_has_pullback _ _),
rw [category.assoc, pullback.lift_fst, ← H.w, ← w.w], exact hf.paste_horiz hc₄ },
{ apply is_pullback.of_right _ e₂ (is_pullback.of_has_pullback _ _),
rw [category.assoc, pullback.lift_fst], exact hc₃ } },
{ rintros ⟨hc₃, hc₄⟩,
exact ⟨(is_pullback.of_has_pullback αY c.inr).paste_horiz hc₄, hc₃⟩ }
end
lemma is_pushout.is_van_kampen.is_pullback_of_mono_left [mono f]
{H : is_pushout f g h i} (H' : H.is_van_kampen) :
is_pullback f g h i :=
((H' (𝟙 _) g g (𝟙 Y) (𝟙 _) f (𝟙 _) i
(is_kernel_pair.id_of_mono f) (is_pullback.of_vert_is_iso ⟨by simp⟩) H.1.flip ⟨rfl⟩
⟨by simp⟩).mp (is_pushout.of_horiz_is_iso ⟨by simp⟩)).1.flip
lemma is_pushout.is_van_kampen.is_pullback_of_mono_right [mono g]
{H : is_pushout f g h i} (H' : H.is_van_kampen) :
is_pullback f g h i :=
((H' f (𝟙 _) (𝟙 _) f (𝟙 _) (𝟙 _) g h (is_pullback.of_vert_is_iso ⟨by simp⟩)
(is_kernel_pair.id_of_mono g) ⟨rfl⟩ H.1
⟨by simp⟩).mp (is_pushout.of_vert_is_iso ⟨by simp⟩)).2
lemma is_pushout.is_van_kampen.mono_of_mono_left [mono f]
{H : is_pushout f g h i} (H' : H.is_van_kampen) :
mono i :=
is_kernel_pair.mono_of_is_iso_fst
(((H' (𝟙 _) g g (𝟙 Y) (𝟙 _) f (𝟙 _) i
(is_kernel_pair.id_of_mono f) (is_pullback.of_vert_is_iso ⟨by simp⟩) H.1.flip ⟨rfl⟩
⟨by simp⟩).mp (is_pushout.of_horiz_is_iso ⟨by simp⟩)).2)
lemma is_pushout.is_van_kampen.mono_of_mono_right [mono g]
{H : is_pushout f g h i} (H' : H.is_van_kampen) :
mono h :=
is_kernel_pair.mono_of_is_iso_fst
((H' f (𝟙 _) (𝟙 _) f (𝟙 _) (𝟙 _) g h (is_pullback.of_vert_is_iso ⟨by simp⟩)
(is_kernel_pair.id_of_mono g) ⟨rfl⟩ H.1
⟨by simp⟩).mp (is_pushout.of_vert_is_iso ⟨by simp⟩)).1
/-- A category is adhesive if it has pushouts and pullbacks along monomorphisms,
and such pushouts are van Kampen. -/
class adhesive (C : Type u) [category.{v} C] : Prop :=
[has_pullback_of_mono_left : ∀ {X Y S : C} (f : X ⟶ S) (g : Y ⟶ S) [mono f], has_pullback f g]
[has_pushout_of_mono_left : ∀ {X Y S : C} (f : S ⟶ X) (g : S ⟶ Y) [mono f], has_pushout f g]
(van_kampen : ∀ {W X Y Z : C} {f : W ⟶ X} {g : W ⟶ Y} {h : X ⟶ Z} {i : Y ⟶ Z} [mono f]
(H : is_pushout f g h i), H.is_van_kampen)
attribute [priority 100, instance]
adhesive.has_pullback_of_mono_left adhesive.has_pushout_of_mono_left
lemma adhesive.van_kampen' [adhesive C] [mono g] (H : is_pushout f g h i) : H.is_van_kampen :=
(adhesive.van_kampen H.flip).flip
lemma adhesive.is_pullback_of_is_pushout_of_mono_left [adhesive C]
(H : is_pushout f g h i) [mono f] : is_pullback f g h i :=
(adhesive.van_kampen H).is_pullback_of_mono_left
lemma adhesive.is_pullback_of_is_pushout_of_mono_right [adhesive C]
(H : is_pushout f g h i) [mono g] : is_pullback f g h i :=
(adhesive.van_kampen' H).is_pullback_of_mono_right
lemma adhesive.mono_of_is_pushout_of_mono_left [adhesive C]
(H : is_pushout f g h i) [mono f] : mono i :=
(adhesive.van_kampen H).mono_of_mono_left
lemma adhesive.mono_of_is_pushout_of_mono_right [adhesive C]
(H : is_pushout f g h i) [mono g] : mono h :=
(adhesive.van_kampen' H).mono_of_mono_right
instance type.adhesive : adhesive (Type u) :=
begin
constructor,
intros,
exactI (is_pushout.is_van_kampen_inl _ (types.is_coprod_of_mono f) _ _ _ H.flip).flip
end
@[priority 100] noncomputable
instance adhesive.to_regular_mono_category [adhesive C] : regular_mono_category C :=
⟨λ X Y f hf, by exactI
{ Z := pushout f f,
left := pushout.inl,
right := pushout.inr,
w := pushout.condition,
is_limit := (adhesive.is_pullback_of_is_pushout_of_mono_left
(is_pushout.of_has_pushout f f)).is_limit_fork }⟩
-- This then implies that adhesive categories are balanced
example [adhesive C] : balanced C := infer_instance
end category_theory
|
e30827696129dde1c65c5c6d9af7011530d592b5
|
57c233acf9386e610d99ed20ef139c5f97504ba3
|
/src/order/jordan_holder.lean
|
f0110da75c6b08f4836d17bb60bab8974426a8c2
|
[
"Apache-2.0"
] |
permissive
|
robertylewis/mathlib
|
3d16e3e6daf5ddde182473e03a1b601d2810952c
|
1d13f5b932f5e40a8308e3840f96fc882fae01f0
|
refs/heads/master
| 1,651,379,945,369
| 1,644,276,960,000
| 1,644,276,960,000
| 98,875,504
| 0
| 0
|
Apache-2.0
| 1,644,253,514,000
| 1,501,495,700,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 30,889
|
lean
|
/-
Copyright (c) 2021 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes
-/
import order.lattice
import data.list.sort
import data.equiv.fin
import data.equiv.functor
/-!
# Jordan-Hölder Theorem
This file proves the Jordan Hölder theorem for a `jordan_holder_lattice`, a class also defined in
this file. Examples of `jordan_holder_lattice` include `subgroup G` if `G` is a group, and
`submodule R M` if `M` is an `R`-module. Using this approach the theorem need not be proved
seperately for both groups and modules, the proof in this file can be applied to both.
## Main definitions
The main definitions in this file are `jordan_holder_lattice` and `composition_series`,
and the relation `equivalent` on `composition_series`
A `jordan_holder_lattice` is the class for which the Jordan Hölder theorem is proved. A
Jordan Hölder lattice is a lattice equipped with a notion of maximality, `is_maximal`, and a notion
of isomorphism of pairs `iso`. In the example of subgroups of a group, `is_maximal H K` means that
`H` is a maximal normal subgroup of `K`, and `iso (H₁, K₁) (H₂, K₂)` means that the quotient
`H₁ / K₁` is isomorphic to the quotient `H₂ / K₂`. `iso` must be symmetric and transitive and must
satisfy the second isomorphism theorem `iso (H, H ⊔ K) (H ⊓ K, K)`.
A `composition_series X` is a finite nonempty series of elements of the lattice `X` such that
each element is maximal inside the next. The length of a `composition_series X` is
one less than the number of elements in the series. Note that there is no stipulation
that a series start from the bottom of the lattice and finish at the top.
For a composition series `s`, `s.top` is the largest element of the series,
and `s.bot` is the least element.
Two `composition_series X`, `s₁` and `s₂` are equivalent if there is a bijection
`e : fin s₁.length ≃ fin s₂.length` such that for any `i`,
`iso (s₁ i, s₁ i.succ) (s₂ (e i), s₂ (e i.succ))`
## Main theorems
The main theorem is `composition_series.jordan_holder`, which says that if two composition
series have the same least element and the same largest element,
then they are `equivalent`.
## TODO
Provide instances of `jordan_holder_lattice` for both submodules and subgroups, and potentially
for modular lattices.
It is not entirely clear how this should be done. Possibly there should be no global instances
of `jordan_holder_lattice`, and the instances should only be defined locally in order to prove
the Jordan-Hölder theorem for modules/groups and the API should be transferred because many of the
theorems in this file will have stronger versions for modules. There will also need to be an API for
mapping composition series across homomorphisms. It is also probably possible to
provide an instance of `jordan_holder_lattice` for any `modular_lattice`, and in this case the
Jordan-Hölder theorem will say that there is a well defined notion of length of a modular lattice.
However an instance of `jordan_holder_lattice` for a modular lattice will not be able to contain
the correct notion of isomorphism for modules, so a separate instance for modules will still be
required and this will clash with the instance for modular lattices, and so at least one of these
instances should not be a global instance.
-/
universe u
open set
/--
A `jordan_holder_lattice` is the class for which the Jordan Hölder theorem is proved. A
Jordan Hölder lattice is a lattice equipped with a notion of maximality, `is_maximal`, and a notion
of isomorphism of pairs `iso`. In the example of subgroups of a group, `is_maximal H K` means that
`H` is a maximal normal subgroup of `K`, and `iso (H₁, K₁) (H₂, K₂)` means that the quotient
`H₁ / K₁` is isomorphic to the quotient `H₂ / K₂`. `iso` must be symmetric and transitive and must
satisfy the second isomorphism theorem `iso (H, H ⊔ K) (H ⊓ K, K)`.
Examples include `subgroup G` if `G` is a group, and `submodule R M` if `M` is an `R`-module.
-/
class jordan_holder_lattice (X : Type u) [lattice X] :=
(is_maximal : X → X → Prop)
(lt_of_is_maximal : ∀ {x y}, is_maximal x y → x < y)
(sup_eq_of_is_maximal : ∀ {x y z}, is_maximal x z → is_maximal y z →
x ≠ y → x ⊔ y = z)
(is_maximal_inf_left_of_is_maximal_sup : ∀ {x y}, is_maximal x (x ⊔ y) → is_maximal y (x ⊔ y) →
is_maximal (x ⊓ y) x)
(iso : (X × X) → (X × X) → Prop)
(iso_symm : ∀ {x y}, iso x y → iso y x)
(iso_trans : ∀ {x y z}, iso x y → iso y z → iso x z)
(second_iso : ∀ {x y}, is_maximal x (x ⊔ y) → iso (x, x ⊔ y) (x ⊓ y, y))
namespace jordan_holder_lattice
variables {X : Type u} [lattice X] [jordan_holder_lattice X]
lemma is_maximal_inf_right_of_is_maximal_sup {x y : X}
(hxz : is_maximal x (x ⊔ y)) (hyz : is_maximal y (x ⊔ y)) :
is_maximal (x ⊓ y) y :=
begin
rw [inf_comm],
rw [sup_comm] at hxz hyz,
exact is_maximal_inf_left_of_is_maximal_sup hyz hxz
end
lemma is_maximal_of_eq_inf (x b : X) {a y : X}
(ha : x ⊓ y = a) (hxy : x ≠ y) (hxb : is_maximal x b) (hyb : is_maximal y b) :
is_maximal a y :=
begin
have hb : x ⊔ y = b,
from sup_eq_of_is_maximal hxb hyb hxy,
substs a b,
exact is_maximal_inf_right_of_is_maximal_sup hxb hyb
end
lemma second_iso_of_eq {x y a b : X} (hm : is_maximal x a) (ha : x ⊔ y = a) (hb : x ⊓ y = b) :
iso (x, a) (b, y) :=
by substs a b; exact second_iso hm
lemma is_maximal.iso_refl {x y : X} (h : is_maximal x y) : iso (x, y) (x, y) :=
second_iso_of_eq h
(sup_eq_right.2 (le_of_lt (lt_of_is_maximal h)))
(inf_eq_left.2 (le_of_lt (lt_of_is_maximal h)))
end jordan_holder_lattice
open jordan_holder_lattice
attribute [symm] iso_symm
attribute [trans] iso_trans
/--
A `composition_series X` is a finite nonempty series of elements of a
`jordan_holder_lattice` such that each element is maximal inside the next. The length of a
`composition_series X` is one less than the number of elements in the series.
Note that there is no stipulation that a series start from the bottom of the lattice and finish at
the top. For a composition series `s`, `s.top` is the largest element of the series,
and `s.bot` is the least element.
-/
structure composition_series (X : Type u) [lattice X] [jordan_holder_lattice X] : Type u :=
(length : ℕ)
(series : fin (length + 1) → X)
(step' : ∀ i : fin length, is_maximal (series i.cast_succ) (series i.succ))
namespace composition_series
variables {X : Type u} [lattice X] [jordan_holder_lattice X]
instance : has_coe_to_fun (composition_series X) (λ x, fin (x.length + 1) → X) :=
{ coe := composition_series.series }
instance [inhabited X] : inhabited (composition_series X) :=
⟨{ length := 0,
series := λ _, default,
step' := λ x, x.elim0 }⟩
variables {X}
lemma step (s : composition_series X) : ∀ i : fin s.length,
is_maximal (s i.cast_succ) (s i.succ) := s.step'
@[simp] lemma coe_fn_mk (length : ℕ) (series step) :
(@composition_series.mk X _ _ length series step : fin length.succ → X) = series := rfl
theorem lt_succ (s : composition_series X) (i : fin s.length) :
s i.cast_succ < s i.succ :=
lt_of_is_maximal (s.step _)
protected theorem strict_mono (s : composition_series X) : strict_mono s :=
fin.strict_mono_iff_lt_succ.2 (λ i h, s.lt_succ ⟨i, nat.lt_of_succ_lt_succ h⟩)
protected theorem injective (s : composition_series X) : function.injective s :=
s.strict_mono.injective
@[simp] protected theorem inj (s : composition_series X) {i j : fin s.length.succ} :
s i = s j ↔ i = j :=
s.injective.eq_iff
instance : has_mem X (composition_series X) :=
⟨λ x s, x ∈ set.range s⟩
lemma mem_def {x : X} {s : composition_series X} : x ∈ s ↔ x ∈ set.range s := iff.rfl
lemma total {s : composition_series X} {x y : X} (hx : x ∈ s) (hy : y ∈ s) : x ≤ y ∨ y ≤ x :=
begin
rcases set.mem_range.1 hx with ⟨i, rfl⟩,
rcases set.mem_range.1 hy with ⟨j, rfl⟩,
rw [s.strict_mono.le_iff_le, s.strict_mono.le_iff_le],
exact le_total i j
end
/-- The ordered `list X` of elements of a `composition_series X`. -/
def to_list (s : composition_series X) : list X := list.of_fn s
/-- Two `composition_series` are equal if they are the same length and
have the same `i`th element for every `i` -/
lemma ext_fun {s₁ s₂ : composition_series X}
(hl : s₁.length = s₂.length)
(h : ∀ i, s₁ i = s₂ (fin.cast (congr_arg nat.succ hl) i)) :
s₁ = s₂ :=
begin
cases s₁, cases s₂,
dsimp at *,
subst hl,
simpa [function.funext_iff] using h
end
@[simp] lemma length_to_list (s : composition_series X) : s.to_list.length = s.length + 1 :=
by rw [to_list, list.length_of_fn]
lemma to_list_ne_nil (s : composition_series X) : s.to_list ≠ [] :=
by rw [← list.length_pos_iff_ne_nil, length_to_list]; exact nat.succ_pos _
lemma to_list_injective : function.injective (@composition_series.to_list X _ _) :=
λ s₁ s₂ (h : list.of_fn s₁ = list.of_fn s₂),
have h₁ : s₁.length = s₂.length,
from nat.succ_injective
((list.length_of_fn s₁).symm.trans $
(congr_arg list.length h).trans $
list.length_of_fn s₂),
have h₂ : ∀ i : fin s₁.length.succ, (s₁ i) = s₂ (fin.cast (congr_arg nat.succ h₁) i),
begin
assume i,
rw [← list.nth_le_of_fn s₁ i, ← list.nth_le_of_fn s₂],
simp [h]
end,
begin
cases s₁, cases s₂,
dsimp at *,
subst h₁,
simp only [heq_iff_eq, eq_self_iff_true, true_and],
simp only [fin.cast_refl] at h₂,
exact funext h₂
end
lemma chain'_to_list (s : composition_series X) :
list.chain' is_maximal s.to_list :=
list.chain'_iff_nth_le.2
begin
assume i hi,
simp only [to_list, list.nth_le_of_fn'],
rw [length_to_list] at hi,
exact s.step ⟨i, hi⟩
end
lemma to_list_sorted (s : composition_series X) : s.to_list.sorted (<) :=
list.pairwise_iff_nth_le.2 (λ i j hi hij,
begin
dsimp [to_list],
rw [list.nth_le_of_fn', list.nth_le_of_fn'],
exact s.strict_mono hij
end)
lemma to_list_nodup (s : composition_series X) : s.to_list.nodup :=
list.nodup_iff_nth_le_inj.2
(λ i j hi hj,
begin
delta to_list,
rw [list.nth_le_of_fn', list.nth_le_of_fn', s.injective.eq_iff, fin.ext_iff,
fin.coe_mk, fin.coe_mk],
exact id
end)
@[simp] lemma mem_to_list {s : composition_series X} {x : X} : x ∈ s.to_list ↔ x ∈ s :=
by rw [to_list, list.mem_of_fn, mem_def]
/-- Make a `composition_series X` from the ordered list of its elements. -/
def of_list (l : list X) (hl : l ≠ []) (hc : list.chain' is_maximal l) :
composition_series X :=
{ length := l.length - 1,
series := λ i, l.nth_le i begin
conv_rhs { rw ← tsub_add_cancel_of_le (nat.succ_le_of_lt (list.length_pos_of_ne_nil hl)) },
exact i.2
end,
step' := λ ⟨i, hi⟩, list.chain'_iff_nth_le.1 hc i hi }
lemma length_of_list (l : list X) (hl : l ≠ []) (hc : list.chain' is_maximal l) :
(of_list l hl hc).length = l.length - 1 := rfl
lemma of_list_to_list (s : composition_series X) :
of_list s.to_list s.to_list_ne_nil s.chain'_to_list = s :=
begin
refine ext_fun _ _,
{ rw [length_of_list, length_to_list, nat.succ_sub_one] },
{ rintros ⟨i, hi⟩,
dsimp [of_list, to_list],
rw [list.nth_le_of_fn'] }
end
@[simp] lemma of_list_to_list' (s : composition_series X) :
of_list s.to_list s.to_list_ne_nil s.chain'_to_list = s :=
of_list_to_list s
@[simp] lemma to_list_of_list (l : list X) (hl : l ≠ []) (hc : list.chain' is_maximal l) :
to_list (of_list l hl hc) = l :=
begin
refine list.ext_le _ _,
{ rw [length_to_list, length_of_list,
tsub_add_cancel_of_le (nat.succ_le_of_lt $ list.length_pos_of_ne_nil hl)] },
{ assume i hi hi',
dsimp [of_list, to_list],
rw [list.nth_le_of_fn'],
refl }
end
/-- Two `composition_series` are equal if they have the same elements. See also `ext_fun`. -/
@[ext] lemma ext {s₁ s₂ : composition_series X} (h : ∀ x, x ∈ s₁ ↔ x ∈ s₂) : s₁ = s₂ :=
to_list_injective $ list.eq_of_perm_of_sorted
(by classical; exact list.perm_of_nodup_nodup_to_finset_eq
s₁.to_list_nodup
s₂.to_list_nodup
(finset.ext $ by simp *))
s₁.to_list_sorted s₂.to_list_sorted
/-- The largest element of a `composition_series` -/
def top (s : composition_series X) : X := s (fin.last _)
lemma top_mem (s : composition_series X) : s.top ∈ s :=
mem_def.2 (set.mem_range.2 ⟨fin.last _, rfl⟩)
@[simp] lemma le_top {s : composition_series X} (i : fin (s.length + 1)) : s i ≤ s.top :=
s.strict_mono.monotone (fin.le_last _)
lemma le_top_of_mem {s : composition_series X} {x : X} (hx : x ∈ s) : x ≤ s.top :=
let ⟨i, hi⟩ := set.mem_range.2 hx in hi ▸ le_top _
/-- The smallest element of a `composition_series` -/
def bot (s : composition_series X) : X := s 0
lemma bot_mem (s : composition_series X) : s.bot ∈ s :=
mem_def.2 (set.mem_range.2 ⟨0, rfl⟩)
@[simp] lemma bot_le {s : composition_series X} (i : fin (s.length + 1)) : s.bot ≤ s i :=
s.strict_mono.monotone (fin.zero_le _)
lemma bot_le_of_mem {s : composition_series X} {x : X} (hx : x ∈ s) : s.bot ≤ x :=
let ⟨i, hi⟩ := set.mem_range.2 hx in hi ▸ bot_le _
lemma length_pos_of_mem_ne {s : composition_series X}
{x y : X} (hx : x ∈ s) (hy : y ∈ s) (hxy : x ≠ y) :
0 < s.length :=
let ⟨i, hi⟩ := hx, ⟨j, hj⟩ := hy in
have hij : i ≠ j, from mt s.inj.2 $ λ h, hxy (hi ▸ hj ▸ h),
hij.lt_or_lt.elim
(λ hij, (lt_of_le_of_lt (zero_le i)
(lt_of_lt_of_le hij (nat.le_of_lt_succ j.2))))
(λ hji, (lt_of_le_of_lt (zero_le j)
(lt_of_lt_of_le hji (nat.le_of_lt_succ i.2))))
lemma forall_mem_eq_of_length_eq_zero {s : composition_series X}
(hs : s.length = 0) {x y} (hx : x ∈ s) (hy : y ∈ s) : x = y :=
by_contradiction (λ hxy, pos_iff_ne_zero.1 (length_pos_of_mem_ne hx hy hxy) hs)
/-- Remove the largest element from a `composition_series`. If the series `s`
has length zero, then `s.erase_top = s` -/
@[simps] def erase_top (s : composition_series X) : composition_series X :=
{ length := s.length - 1,
series := λ i, s ⟨i, lt_of_lt_of_le i.2 (nat.succ_le_succ tsub_le_self)⟩,
step' := λ i, begin
have := s.step ⟨i, lt_of_lt_of_le i.2 tsub_le_self⟩,
cases i,
exact this
end }
lemma top_erase_top (s : composition_series X) :
s.erase_top.top = s ⟨s.length - 1, lt_of_le_of_lt tsub_le_self (nat.lt_succ_self _)⟩ :=
show s _ = s _, from congr_arg s
begin
ext,
simp only [erase_top_length, fin.coe_last, fin.coe_cast_succ, fin.coe_of_nat_eq_mod,
fin.coe_mk, coe_coe]
end
lemma erase_top_top_le (s : composition_series X) : s.erase_top.top ≤ s.top :=
by simp [erase_top, top, s.strict_mono.le_iff_le, fin.le_iff_coe_le_coe, tsub_le_self]
@[simp] lemma bot_erase_top (s : composition_series X) : s.erase_top.bot = s.bot := rfl
lemma mem_erase_top_of_ne_of_mem {s : composition_series X} {x : X}
(hx : x ≠ s.top) (hxs : x ∈ s) : x ∈ s.erase_top :=
begin
{ rcases hxs with ⟨i, rfl⟩,
have hi : (i : ℕ) < (s.length - 1).succ,
{ conv_rhs { rw [← nat.succ_sub (length_pos_of_mem_ne ⟨i, rfl⟩ s.top_mem hx),
nat.succ_sub_one] },
exact lt_of_le_of_ne
(nat.le_of_lt_succ i.2)
(by simpa [top, s.inj, fin.ext_iff] using hx) },
refine ⟨i.cast_succ, _⟩,
simp [fin.ext_iff, nat.mod_eq_of_lt hi] }
end
lemma mem_erase_top {s : composition_series X} {x : X}
(h : 0 < s.length) : x ∈ s.erase_top ↔ x ≠ s.top ∧ x ∈ s :=
begin
simp only [mem_def],
dsimp only [erase_top, coe_fn_mk],
split,
{ rintros ⟨i, rfl⟩,
have hi : (i : ℕ) < s.length,
{ conv_rhs { rw [← nat.succ_sub_one s.length, nat.succ_sub h] },
exact i.2 },
simp [top, fin.ext_iff, (ne_of_lt hi)] },
{ intro h,
exact mem_erase_top_of_ne_of_mem h.1 h.2 }
end
lemma lt_top_of_mem_erase_top {s : composition_series X} {x : X} (h : 0 < s.length)
(hx : x ∈ s.erase_top) : x < s.top :=
lt_of_le_of_ne
(le_top_of_mem ((mem_erase_top h).1 hx).2)
((mem_erase_top h).1 hx).1
lemma is_maximal_erase_top_top {s : composition_series X} (h : 0 < s.length) :
is_maximal s.erase_top.top s.top :=
have s.length - 1 + 1 = s.length,
by conv_rhs { rw [← nat.succ_sub_one s.length] }; rw nat.succ_sub h,
begin
rw [top_erase_top, top],
convert s.step ⟨s.length - 1, nat.sub_lt h zero_lt_one⟩;
ext; simp [this]
end
lemma append_cast_add_aux
{s₁ s₂ : composition_series X}
(i : fin s₁.length) :
fin.append (nat.add_succ _ _).symm (s₁ ∘ fin.cast_succ) s₂
(fin.cast_add s₂.length i).cast_succ = s₁ i.cast_succ :=
by { cases i, simp [fin.append, *] }
lemma append_succ_cast_add_aux
{s₁ s₂ : composition_series X}
(i : fin s₁.length)
(h : s₁ (fin.last _) = s₂ 0) :
fin.append (nat.add_succ _ _).symm (s₁ ∘ fin.cast_succ) s₂
(fin.cast_add s₂.length i).succ = s₁ i.succ :=
begin
cases i with i hi,
simp only [fin.append, hi, fin.succ_mk, function.comp_app, fin.cast_succ_mk,
fin.coe_mk, fin.cast_add_mk],
split_ifs,
{ refl },
{ have : i + 1 = s₁.length, from le_antisymm hi (le_of_not_gt h_1),
calc s₂ ⟨i + 1 - s₁.length, by simp [this]⟩
= s₂ 0 : congr_arg s₂ (by simp [fin.ext_iff, this])
... = s₁ (fin.last _) : h.symm
... = _ : congr_arg s₁ (by simp [fin.ext_iff, this]) }
end
lemma append_nat_add_aux
{s₁ s₂ : composition_series X}
(i : fin s₂.length) :
fin.append (nat.add_succ _ _).symm (s₁ ∘ fin.cast_succ) s₂
(fin.nat_add s₁.length i).cast_succ = s₂ i.cast_succ :=
begin
cases i,
simp only [fin.append, nat.not_lt_zero, fin.nat_add_mk, add_lt_iff_neg_left,
add_tsub_cancel_left, dif_neg, fin.cast_succ_mk, not_false_iff, fin.coe_mk]
end
lemma append_succ_nat_add_aux
{s₁ s₂ : composition_series X}
(i : fin s₂.length) :
fin.append (nat.add_succ _ _).symm (s₁ ∘ fin.cast_succ) s₂
(fin.nat_add s₁.length i).succ = s₂ i.succ :=
begin
cases i with i hi,
simp only [fin.append, add_assoc, nat.not_lt_zero, fin.nat_add_mk, add_lt_iff_neg_left,
add_tsub_cancel_left, fin.succ_mk, dif_neg, not_false_iff, fin.coe_mk]
end
/-- Append two composition series `s₁` and `s₂` such that
the least element of `s₁` is the maximum element of `s₂`. -/
@[simps length] def append (s₁ s₂ : composition_series X)
(h : s₁.top = s₂.bot) : composition_series X :=
{ length := s₁.length + s₂.length,
series := fin.append (nat.add_succ _ _).symm (s₁ ∘ fin.cast_succ) s₂,
step' := λ i, begin
refine fin.add_cases _ _ i,
{ intro i,
rw [append_succ_cast_add_aux _ h, append_cast_add_aux],
exact s₁.step i },
{ intro i,
rw [append_nat_add_aux, append_succ_nat_add_aux],
exact s₂.step i }
end }
@[simp] lemma append_cast_add {s₁ s₂ : composition_series X}
(h : s₁.top = s₂.bot) (i : fin s₁.length) :
append s₁ s₂ h (fin.cast_add s₂.length i).cast_succ = s₁ i.cast_succ :=
append_cast_add_aux i
@[simp] lemma append_succ_cast_add {s₁ s₂ : composition_series X}
(h : s₁.top = s₂.bot) (i : fin s₁.length) :
append s₁ s₂ h (fin.cast_add s₂.length i).succ = s₁ i.succ :=
append_succ_cast_add_aux i h
@[simp] lemma append_nat_add {s₁ s₂ : composition_series X}
(h : s₁.top = s₂.bot) (i : fin s₂.length) :
append s₁ s₂ h (fin.nat_add s₁.length i).cast_succ = s₂ i.cast_succ :=
append_nat_add_aux i
@[simp] lemma append_succ_nat_add {s₁ s₂ : composition_series X}
(h : s₁.top = s₂.bot) (i : fin s₂.length) :
append s₁ s₂ h (fin.nat_add s₁.length i).succ = s₂ i.succ :=
append_succ_nat_add_aux i
/-- Add an element to the top of a `composition_series` -/
@[simps length] def snoc (s : composition_series X) (x : X)
(hsat : is_maximal s.top x) : composition_series X :=
{ length := s.length + 1,
series := fin.snoc s x,
step' := λ i, begin
refine fin.last_cases _ _ i,
{ rwa [fin.snoc_cast_succ, fin.succ_last, fin.snoc_last, ← top] },
{ intro i,
rw [fin.snoc_cast_succ, ← fin.cast_succ_fin_succ, fin.snoc_cast_succ],
exact s.step _ }
end }
@[simp] lemma top_snoc (s : composition_series X) (x : X)
(hsat : is_maximal s.top x) : (snoc s x hsat).top = x :=
fin.snoc_last _ _
@[simp] lemma snoc_last (s : composition_series X) (x : X) (hsat : is_maximal s.top x) :
snoc s x hsat (fin.last (s.length + 1)) = x :=
fin.snoc_last _ _
@[simp] lemma snoc_cast_succ (s : composition_series X) (x : X) (hsat : is_maximal s.top x)
(i : fin (s.length + 1)) : snoc s x hsat (i.cast_succ) = s i :=
fin.snoc_cast_succ _ _ _
@[simp] lemma bot_snoc (s : composition_series X) (x : X) (hsat : is_maximal s.top x) :
(snoc s x hsat).bot = s.bot :=
by rw [bot, bot, ← fin.cast_succ_zero, snoc_cast_succ]
lemma mem_snoc {s : composition_series X} {x y: X}
{hsat : is_maximal s.top x} : y ∈ snoc s x hsat ↔ y ∈ s ∨ y = x :=
begin
simp only [snoc, mem_def],
split,
{ rintros ⟨i, rfl⟩,
refine fin.last_cases _ (λ i, _) i,
{ right, simp },
{ left, simp } },
{ intro h,
rcases h with ⟨i, rfl⟩ | rfl,
{ use i.cast_succ, simp },
{ use (fin.last _), simp } }
end
lemma eq_snoc_erase_top {s : composition_series X} (h : 0 < s.length) :
s = snoc (erase_top s) s.top (is_maximal_erase_top_top h) :=
begin
ext x,
simp [mem_snoc, mem_erase_top h],
by_cases h : x = s.top; simp [*, s.top_mem]
end
@[simp] lemma snoc_erase_top_top {s : composition_series X}
(h : is_maximal s.erase_top.top s.top) : s.erase_top.snoc s.top h = s :=
have h : 0 < s.length,
from nat.pos_of_ne_zero begin
assume hs,
refine ne_of_gt (lt_of_is_maximal h) _,
simp [top, fin.ext_iff, hs]
end,
(eq_snoc_erase_top h).symm
/-- Two `composition_series X`, `s₁` and `s₂` are equivalent if there is a bijection
`e : fin s₁.length ≃ fin s₂.length` such that for any `i`,
`iso (s₁ i) (s₁ i.succ) (s₂ (e i), s₂ (e i.succ))` -/
def equivalent (s₁ s₂ : composition_series X) : Prop :=
∃ f : fin s₁.length ≃ fin s₂.length,
∀ i : fin s₁.length,
iso (s₁ i.cast_succ, s₁ i.succ)
(s₂ (f i).cast_succ, s₂ (f i).succ)
namespace equivalent
@[refl] lemma refl (s : composition_series X) : equivalent s s :=
⟨equiv.refl _, λ _, (s.step _).iso_refl⟩
@[symm] lemma symm {s₁ s₂ : composition_series X} (h : equivalent s₁ s₂) :
equivalent s₂ s₁ :=
⟨h.some.symm, λ i, iso_symm (by simpa using h.some_spec (h.some.symm i))⟩
@[trans] lemma trans {s₁ s₂ s₃ : composition_series X}
(h₁ : equivalent s₁ s₂)
(h₂ : equivalent s₂ s₃) :
equivalent s₁ s₃ :=
⟨h₁.some.trans h₂.some, λ i, iso_trans (h₁.some_spec i) (h₂.some_spec (h₁.some i))⟩
lemma append
{s₁ s₂ t₁ t₂ : composition_series X}
(hs : s₁.top = s₂.bot)
(ht : t₁.top = t₂.bot)
(h₁ : equivalent s₁ t₁)
(h₂ : equivalent s₂ t₂) :
equivalent (append s₁ s₂ hs) (append t₁ t₂ ht) :=
let e : fin (s₁.length + s₂.length) ≃ fin (t₁.length + t₂.length) :=
calc fin (s₁.length + s₂.length) ≃ fin s₁.length ⊕ fin s₂.length : fin_sum_fin_equiv.symm
... ≃ fin t₁.length ⊕ fin t₂.length : equiv.sum_congr h₁.some h₂.some
... ≃ fin (t₁.length + t₂.length) : fin_sum_fin_equiv in
⟨e, begin
assume i,
refine fin.add_cases _ _ i,
{ assume i,
simpa [top, bot] using h₁.some_spec i },
{ assume i,
simpa [top, bot] using h₂.some_spec i }
end⟩
protected lemma snoc
{s₁ s₂ : composition_series X}
{x₁ x₂ : X}
{hsat₁ : is_maximal s₁.top x₁}
{hsat₂ : is_maximal s₂.top x₂}
(hequiv : equivalent s₁ s₂)
(htop : iso (s₁.top, x₁) (s₂.top, x₂)) :
equivalent (s₁.snoc x₁ hsat₁) (s₂.snoc x₂ hsat₂) :=
let e : fin s₁.length.succ ≃ fin s₂.length.succ :=
calc fin (s₁.length + 1) ≃ option (fin s₁.length) : fin_succ_equiv_last
... ≃ option (fin s₂.length) : functor.map_equiv option hequiv.some
... ≃ fin (s₂.length + 1) : fin_succ_equiv_last.symm in
⟨e, λ i, begin
refine fin.last_cases _ _ i,
{ simpa [top] using htop },
{ assume i,
simpa [fin.succ_cast_succ] using hequiv.some_spec i }
end⟩
lemma length_eq {s₁ s₂ : composition_series X} (h : equivalent s₁ s₂) : s₁.length = s₂.length :=
by simpa using fintype.card_congr h.some
lemma snoc_snoc_swap
{s : composition_series X}
{x₁ x₂ y₁ y₂ : X}
{hsat₁ : is_maximal s.top x₁}
{hsat₂ : is_maximal s.top x₂}
{hsaty₁ : is_maximal (snoc s x₁ hsat₁).top y₁}
{hsaty₂ : is_maximal (snoc s x₂ hsat₂).top y₂}
(hr₁ : iso (s.top, x₁) (x₂, y₂))
(hr₂ : iso (x₁, y₁) (s.top, x₂)) :
equivalent
(snoc (snoc s x₁ hsat₁) y₁ hsaty₁)
(snoc (snoc s x₂ hsat₂) y₂ hsaty₂) :=
let e : fin (s.length + 1 + 1) ≃ fin (s.length + 1 + 1) :=
equiv.swap (fin.last _) (fin.cast_succ (fin.last _)) in
have h1 : ∀ {i : fin s.length},
i.cast_succ.cast_succ ≠ (fin.last _).cast_succ,
from λ _, ne_of_lt (by simp [fin.cast_succ_lt_last]),
have h2 : ∀ {i : fin s.length},
i.cast_succ.cast_succ ≠ (fin.last _),
from λ _, ne_of_lt (by simp [fin.cast_succ_lt_last]),
⟨e, begin
intro i,
dsimp only [e],
refine fin.last_cases _ (λ i, _) i,
{ erw [equiv.swap_apply_left, snoc_cast_succ, snoc_last, fin.succ_last, snoc_last,
snoc_cast_succ, snoc_cast_succ, fin.succ_cast_succ, snoc_cast_succ,
fin.succ_last, snoc_last],
exact hr₂ },
{ refine fin.last_cases _ (λ i, _) i,
{ erw [equiv.swap_apply_right, snoc_cast_succ, snoc_cast_succ,
snoc_cast_succ, fin.succ_cast_succ, snoc_cast_succ,
fin.succ_last, snoc_last, snoc_last, fin.succ_last, snoc_last],
exact hr₁ },
{ erw [equiv.swap_apply_of_ne_of_ne h2 h1, snoc_cast_succ, snoc_cast_succ,
snoc_cast_succ, snoc_cast_succ, fin.succ_cast_succ, snoc_cast_succ,
fin.succ_cast_succ, snoc_cast_succ, snoc_cast_succ, snoc_cast_succ],
exact (s.step i).iso_refl } }
end⟩
end equivalent
lemma length_eq_zero_of_bot_eq_bot_of_top_eq_top_of_length_eq_zero
{s₁ s₂ : composition_series X}
(hb : s₁.bot = s₂.bot) (ht : s₁.top = s₂.top)
(hs₁ : s₁.length = 0) : s₂.length = 0 :=
begin
have : s₁.bot = s₁.top,
from congr_arg s₁ (fin.ext (by simp [hs₁])),
have : (fin.last s₂.length) = (0 : fin s₂.length.succ),
from s₂.injective (hb.symm.trans (this.trans ht)).symm,
simpa [fin.ext_iff]
end
lemma length_pos_of_bot_eq_bot_of_top_eq_top_of_length_pos
{s₁ s₂ : composition_series X}
(hb : s₁.bot = s₂.bot) (ht : s₁.top = s₂.top) :
0 < s₁.length → 0 < s₂.length :=
not_imp_not.1 begin
simp only [pos_iff_ne_zero, ne.def, not_iff_not, not_not],
exact length_eq_zero_of_bot_eq_bot_of_top_eq_top_of_length_eq_zero hb.symm ht.symm
end
lemma eq_of_bot_eq_bot_of_top_eq_top_of_length_eq_zero
{s₁ s₂ : composition_series X}
(hb : s₁.bot = s₂.bot) (ht : s₁.top = s₂.top)
(hs₁0 : s₁.length = 0) :
s₁ = s₂ :=
have ∀ x, x ∈ s₁ ↔ x = s₁.top,
from λ x, ⟨λ hx, forall_mem_eq_of_length_eq_zero hs₁0 hx s₁.top_mem, λ hx, hx.symm ▸ s₁.top_mem⟩,
have ∀ x, x ∈ s₂ ↔ x = s₂.top,
from λ x, ⟨λ hx, forall_mem_eq_of_length_eq_zero
(length_eq_zero_of_bot_eq_bot_of_top_eq_top_of_length_eq_zero hb ht hs₁0)
hx s₂.top_mem, λ hx, hx.symm ▸ s₂.top_mem⟩,
by { ext, simp * }
/-- Given a `composition_series`, `s`, and an element `x`
such that `x` is maximal inside `s.top` there is a series, `t`,
such that `t.top = x`, `t.bot = s.bot`
and `snoc t s.top _` is equivalent to `s`. -/
lemma exists_top_eq_snoc_equivalant (s : composition_series X) (x : X)
(hm : is_maximal x s.top) (hb : s.bot ≤ x) :
∃ t : composition_series X, t.bot = s.bot ∧ t.length + 1 = s.length ∧
∃ htx : t.top = x, equivalent s (snoc t s.top (htx.symm ▸ hm)) :=
begin
induction hn : s.length with n ih generalizing s x,
{ exact (ne_of_gt (lt_of_le_of_lt hb (lt_of_is_maximal hm))
(forall_mem_eq_of_length_eq_zero hn s.top_mem s.bot_mem)).elim },
{ have h0s : 0 < s.length, from hn.symm ▸ nat.succ_pos _,
by_cases hetx : s.erase_top.top = x,
{ use s.erase_top,
simp [← hetx, hn] },
{ have imxs : is_maximal (x ⊓ s.erase_top.top) s.erase_top.top,
from is_maximal_of_eq_inf x s.top rfl (ne.symm hetx) hm
(is_maximal_erase_top_top h0s),
have := ih _ _ imxs (le_inf (by simpa) (le_top_of_mem s.erase_top.bot_mem)) (by simp [hn]),
rcases this with ⟨t, htb, htl, htt, hteqv⟩,
have hmtx : is_maximal t.top x,
from is_maximal_of_eq_inf s.erase_top.top s.top
(by rw [inf_comm, htt]) hetx
(is_maximal_erase_top_top h0s) hm,
use snoc t x hmtx,
refine ⟨by simp [htb], by simp [htl], by simp, _⟩,
have : s.equivalent ((snoc t s.erase_top.top (htt.symm ▸ imxs)).snoc s.top
(by simpa using is_maximal_erase_top_top h0s)),
{ conv_lhs { rw eq_snoc_erase_top h0s },
exact equivalent.snoc hteqv
(by simpa using (is_maximal_erase_top_top h0s).iso_refl) },
refine this.trans _,
refine equivalent.snoc_snoc_swap _ _,
{ exact iso_symm (second_iso_of_eq hm
(sup_eq_of_is_maximal hm
(is_maximal_erase_top_top h0s)
(ne.symm hetx))
htt.symm) },
{ exact second_iso_of_eq (is_maximal_erase_top_top h0s)
(sup_eq_of_is_maximal
(is_maximal_erase_top_top h0s)
hm hetx)
(by rw [inf_comm, htt]) } } }
end
/-- The **Jordan-Hölder** theorem, stated for any `jordan_holder_lattice`.
If two composition series start and finish at the same place, they are equivalent. -/
theorem jordan_holder (s₁ s₂ : composition_series X)
(hb : s₁.bot = s₂.bot) (ht : s₁.top = s₂.top) :
equivalent s₁ s₂ :=
begin
induction hle : s₁.length with n ih generalizing s₁ s₂,
{ rw [eq_of_bot_eq_bot_of_top_eq_top_of_length_eq_zero hb ht hle] },
{ have h0s₂ : 0 < s₂.length,
from length_pos_of_bot_eq_bot_of_top_eq_top_of_length_pos hb ht (hle.symm ▸ nat.succ_pos _),
rcases exists_top_eq_snoc_equivalant s₁ s₂.erase_top.top
(ht.symm ▸ is_maximal_erase_top_top h0s₂)
(hb.symm ▸ s₂.bot_erase_top ▸ bot_le_of_mem (top_mem _)) with ⟨t, htb, htl, htt, hteq⟩,
have := ih t s₂.erase_top (by simp [htb, ← hb]) htt (nat.succ_inj'.1 (htl.trans hle)),
refine hteq.trans _,
conv_rhs { rw [eq_snoc_erase_top h0s₂] },
simp only [ht],
exact equivalent.snoc this
(by simp [htt, (is_maximal_erase_top_top h0s₂).iso_refl]) }
end
end composition_series
|
4b549e0b5f050691a7ca4c8c8e9d79f5d1b236d7
|
7cdf3413c097e5d36492d12cdd07030eb991d394
|
/world_experiments/world7/level12.lean
|
f9a9248df2e1f9fd3a95891066544e41a62a1e81
|
[] |
no_license
|
alreadydone/natural_number_game
|
3135b9385a9f43e74cfbf79513fc37e69b99e0b3
|
1a39e693df4f4e871eb449890d3c7715a25c2ec9
|
refs/heads/master
| 1,599,387,390,105
| 1,573,200,587,000
| 1,573,200,691,000
| 220,397,084
| 0
| 0
| null | 1,573,192,734,000
| 1,573,192,733,000
| null |
UTF-8
|
Lean
| false
| false
| 974
|
lean
|
import mynat.definition -- hide
import mynat.add -- hide
import game.world2.level11 -- hide
namespace mynat -- hide
/-
# World 2 -- Addition World
## Level 12 -- `eq_zero_of_add_right_eq_self`
You have : the usual stuff.
* `succ_inj (a b : mynat) : succ(a) = succ(b) → a = b`
will be useful for this one.
You might want to read about how `rw zero_add at h` works in the
<a href="http://wwwf.imperial.ac.uk/~buzzard/xena/html/source/tactics/tacticindex.html" target="blank">tactic guide</a>.
The lemma you're about to prove will be useful when we want to prove that $\leq$ is antisymmetric.
-/
/- Lemma
If $a$ and $b$ are natural numbers such that
$$ a + b = a, $$
then $b = 0$.
-/
lemma eq_zero_of_add_right_eq_self {{a b : mynat}} : a + b = a → b = 0 :=
begin [less_leaky]
intro h,
induction a with a ha,
{
rw zero_add at h,
assumption
},
{ apply ha,
apply succ_inj,
rw succ_add at h,
assumption,
}
end
end mynat -- hide
|
b2b46d4314dad1dfcceb56f236b71e9b18449156
|
5fbbd711f9bfc21ee168f46a4be146603ece8835
|
/lean/natural_number_game/advanced_addition/10.lean
|
eea79fadeec8f882ce78711d6c599ea3a50ce048
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
goedel-gang/maths
|
22596f71e3fde9c088e59931f128a3b5efb73a2c
|
a20a6f6a8ce800427afd595c598a5ad43da1408d
|
refs/heads/master
| 1,623,055,941,960
| 1,621,599,441,000
| 1,621,599,441,000
| 169,335,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 157
|
lean
|
lemma add_left_eq_zero {a b : mynat} (H : a + b = 0) : b = 0 :=
begin
cases b with d,
refl,
rw add_succ at H,
exfalso,
exact succ_ne_zero _ H,
end
|
9c6fde16692ba11c8744735b892c904f8261376f
|
680b0d1592ce164979dab866b232f6fa743f2cc8
|
/hott/types/fiber.hlean
|
24a9f8e6c857ed24074942598c813b8e212e4b9e
|
[
"Apache-2.0"
] |
permissive
|
syohex/lean
|
657428ab520f8277fc18cf04bea2ad200dbae782
|
081ad1212b686780f3ff8a6d0e5f8a1d29a7d8bc
|
refs/heads/master
| 1,611,274,838,635
| 1,452,668,188,000
| 1,452,668,188,000
| 49,562,028
| 0
| 0
| null | 1,452,675,604,000
| 1,452,675,602,000
| null |
UTF-8
|
Lean
| false
| false
| 4,613
|
hlean
|
/-
Copyright (c) 2015 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Floris van Doorn
Ported from Coq HoTT
Theorems about fibers
-/
import .sigma .eq .pi .pointed
structure fiber {A B : Type} (f : A → B) (b : B) :=
(point : A)
(point_eq : f point = b)
open equiv sigma sigma.ops eq pi
namespace fiber
variables {A B : Type} {f : A → B} {b : B}
protected definition sigma_char [constructor]
(f : A → B) (b : B) : fiber f b ≃ (Σ(a : A), f a = b) :=
begin
fapply equiv.MK,
{intro x, exact ⟨point x, point_eq x⟩},
{intro x, exact (fiber.mk x.1 x.2)},
{intro x, cases x, apply idp},
{intro x, cases x, apply idp},
end
definition fiber_eq_equiv (x y : fiber f b)
: (x = y) ≃ (Σ(p : point x = point y), point_eq x = ap f p ⬝ point_eq y) :=
begin
apply equiv.trans,
apply eq_equiv_fn_eq_of_equiv, apply fiber.sigma_char,
apply equiv.trans,
apply sigma_eq_equiv,
apply sigma_equiv_sigma_id,
intro p,
apply pathover_eq_equiv_Fl,
end
definition fiber_eq {x y : fiber f b} (p : point x = point y)
(q : point_eq x = ap f p ⬝ point_eq y) : x = y :=
to_inv !fiber_eq_equiv ⟨p, q⟩
open is_trunc
definition fiber_pr1 (B : A → Type) (a : A) : fiber (pr1 : (Σa, B a) → A) a ≃ B a :=
calc
fiber pr1 a ≃ Σu, u.1 = a : fiber.sigma_char
... ≃ Σa' (b : B a'), a' = a : sigma_assoc_equiv
... ≃ Σa' (p : a' = a), B a' : sigma_equiv_sigma_id (λa', !comm_equiv_nondep)
... ≃ Σu, B u.1 : sigma_assoc_equiv
... ≃ B a : !sigma_equiv_of_is_contr_left
definition sigma_fiber_equiv (f : A → B) : (Σb, fiber f b) ≃ A :=
calc
(Σb, fiber f b) ≃ Σb a, f a = b : sigma_equiv_sigma_id (λb, !fiber.sigma_char)
... ≃ Σa b, f a = b : sigma_comm_equiv
... ≃ A : sigma_equiv_of_is_contr_right
definition is_pointed_fiber [instance] [constructor] (f : A → B) (a : A)
: pointed (fiber f (f a)) :=
pointed.mk (fiber.mk a idp)
definition pointed_fiber [constructor] (f : A → B) (a : A) : Type* :=
Pointed.mk (fiber.mk a (idpath (f a)))
definition is_trunc_fun [reducible] (n : trunc_index) (f : A → B) :=
Π(b : B), is_trunc n (fiber f b)
definition is_contr_fun [reducible] (f : A → B) := is_trunc_fun -2 f
end fiber
open unit is_trunc
namespace fiber
definition fiber_star_equiv (A : Type) : fiber (λx : A, star) star ≃ A :=
begin
fapply equiv.MK,
{ intro f, cases f with a H, exact a },
{ intro a, apply fiber.mk a, reflexivity },
{ intro a, reflexivity },
{ intro f, cases f with a H, change fiber.mk a (refl star) = fiber.mk a H,
rewrite [is_hset.elim H (refl star)] }
end
definition fiber_const_equiv (A : Type) (a₀ : A) (a : A)
: fiber (λz : unit, a₀) a ≃ a₀ = a :=
calc
fiber (λz : unit, a₀) a
≃ Σz : unit, a₀ = a : fiber.sigma_char
... ≃ a₀ = a : sigma_unit_left
end fiber
open function is_equiv
namespace fiber
/- Theorem 4.7.6 -/
variables {A : Type} {P Q : A → Type}
variable (f : Πa, P a → Q a)
definition fiber_total_equiv {a : A} (q : Q a)
: fiber (total f) ⟨a , q⟩ ≃ fiber (f a) q :=
calc
fiber (total f) ⟨a , q⟩
≃ Σ(w : Σx, P x), ⟨w.1 , f w.1 w.2 ⟩ = ⟨a , q⟩
: fiber.sigma_char
... ≃ Σ(x : A), Σ(p : P x), ⟨x , f x p⟩ = ⟨a , q⟩
: sigma_assoc_equiv
... ≃ Σ(x : A), Σ(p : P x), Σ(H : x = a), f x p =[H] q
:
begin
apply sigma_equiv_sigma_id, intro x,
apply sigma_equiv_sigma_id, intro p,
apply sigma_eq_equiv
end
... ≃ Σ(x : A), Σ(H : x = a), Σ(p : P x), f x p =[H] q
:
begin
apply sigma_equiv_sigma_id, intro x,
apply sigma_comm_equiv
end
... ≃ Σ(w : Σx, x = a), Σ(p : P w.1), f w.1 p =[w.2] q
: sigma_assoc_equiv
... ≃ Σ(p : P (center (Σx, x=a)).1), f (center (Σx, x=a)).1 p =[(center (Σx, x=a)).2] q
: sigma_equiv_of_is_contr_left
... ≃ Σ(p : P a), f a p =[idpath a] q
: equiv_of_eq idp
... ≃ Σ(p : P a), f a p = q
:
begin
apply sigma_equiv_sigma_id, intro p,
apply pathover_idp
end
... ≃ fiber (f a) q
: fiber.sigma_char
end fiber
|
c37d0976d35a81b79bab25e2e912cc0bccb3f165
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/measure_theory/pi_auto.lean
|
ac1a05264ea602bc55ca95fc6529602a1f7a97cf
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 21,452
|
lean
|
/-
Copyright (c) 2020 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.measure_theory.prod
import Mathlib.PostPort
universes u_1 u_2 u_3 u_4
namespace Mathlib
/-!
# Product measures
In this file we define and prove properties about finite products of measures
(and at some point, countable products of measures).
## Main definition
* `measure_theory.measure.pi`: The product of finitely many σ-finite measures.
Given `μ : Π i : ι, measure (α i)` for `[fintype ι]` it has type `measure (Π i : ι, α i)`.
## Implementation Notes
We define `measure_theory.outer_measure.pi`, the product of finitely many outer measures, as the
maximal outer measure `n` with the property that `n (pi univ s) ≤ ∏ i, m i (s i)`,
where `pi univ s` is the product of the sets `{ s i | i : ι }`.
We then show that this induces a product of measures, called `measure_theory.measure.pi`.
For a collection of σ-finite measures `μ` and a collection of measurable sets `s` we show that
`measure.pi μ (pi univ s) = ∏ i, m i (s i)`. To do this, we follow the following steps:
* We know that there is some ordering on `ι`, given by an element of `[encodable ι]`.
* Using this, we have an equivalence `measurable_equiv.pi_measurable_equiv_tprod` between
`Π ι, α i` and an iterated product of `α i`, called `list.tprod α l` for some list `l`.
* On this iterated product we can easily define a product measure `measure_theory.measure.tprod`
by iterating `measure_theory.measure.prod`
* Using the previous two steps we construct `measure_theory.measure.pi'` on `Π ι, α i` for encodable
`ι`.
* We know that `measure_theory.measure.pi'` sends products of sets to products of measures, and
since `measure_theory.measure.pi` is the maximal such measure (or at least, it comes from an outer
measure which is the maximal such outer measure), we get the same rule for
`measure_theory.measure.pi`.
## Tags
finitary product measure
-/
namespace measure_theory
/-- An upper bound for the measure in a finite product space.
It is defined to by taking the image of the set under all projections, and taking the product
of the measures of these images.
For measurable boxes it is equal to the correct measure. -/
@[simp] def pi_premeasure {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
(m : (i : ι) → outer_measure (α i)) (s : set ((i : ι) → α i)) : ennreal :=
finset.prod finset.univ fun (i : ι) => coe_fn (m i) (function.eval i '' s)
theorem pi_premeasure_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
{m : (i : ι) → outer_measure (α i)} {s : (i : ι) → set (α i)}
(hs : set.nonempty (set.pi set.univ s)) :
pi_premeasure m (set.pi set.univ s) =
finset.prod finset.univ fun (i : ι) => coe_fn (m i) (s i) :=
sorry
theorem pi_premeasure_pi' {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
{m : (i : ι) → outer_measure (α i)} [Nonempty ι] {s : (i : ι) → set (α i)} :
pi_premeasure m (set.pi set.univ s) =
finset.prod finset.univ fun (i : ι) => coe_fn (m i) (s i) :=
sorry
theorem pi_premeasure_pi_mono {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
{m : (i : ι) → outer_measure (α i)} {s : set ((i : ι) → α i)} {t : set ((i : ι) → α i)}
(h : s ⊆ t) : pi_premeasure m s ≤ pi_premeasure m t :=
finset.prod_le_prod'
fun (i : ι) (_x : i ∈ finset.univ) =>
outer_measure.mono' (m i) (set.image_subset (function.eval i) h)
theorem pi_premeasure_pi_eval {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
{m : (i : ι) → outer_measure (α i)} [Nonempty ι] {s : set ((i : ι) → α i)} :
pi_premeasure m (set.pi set.univ fun (i : ι) => function.eval i '' s) = pi_premeasure m s :=
sorry
namespace outer_measure
/-- `outer_measure.pi m` is the finite product of the outer measures `{m i | i : ι}`.
It is defined to be the maximal outer measure `n` with the property that
`n (pi univ s) ≤ ∏ i, m i (s i)`, where `pi univ s` is the product of the sets
`{ s i | i : ι }`. -/
protected def pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} (m : (i : ι) → outer_measure (α i)) :
outer_measure ((i : ι) → α i) :=
bounded_by (pi_premeasure m)
theorem pi_pi_le {ι : Type u_1} [fintype ι] {α : ι → Type u_2} (m : (i : ι) → outer_measure (α i))
(s : (i : ι) → set (α i)) :
coe_fn (outer_measure.pi m) (set.pi set.univ s) ≤
finset.prod finset.univ fun (i : ι) => coe_fn (m i) (s i) :=
sorry
theorem le_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} {m : (i : ι) → outer_measure (α i)}
{n : outer_measure ((i : ι) → α i)} :
n ≤ outer_measure.pi m ↔
∀ (s : (i : ι) → set (α i)),
set.nonempty (set.pi set.univ s) →
coe_fn n (set.pi set.univ s) ≤
finset.prod finset.univ fun (i : ι) => coe_fn (m i) (s i) :=
sorry
end outer_measure
namespace measure
/-- A product of measures in `tprod α l`. -/
-- for some reason the equation compiler doesn't like this definition
protected def tprod {δ : Type u_3} {π : δ → Type u_4} [(x : δ) → measurable_space (π x)]
(l : List δ) (μ : (i : δ) → measure (π i)) : measure (list.tprod π l) :=
List.rec (dirac PUnit.unit)
(fun (i : δ) (l : List δ) (ih : measure (list.tprod π l)) => measure.prod (μ i) ih) l
@[simp] theorem tprod_nil {δ : Type u_3} {π : δ → Type u_4} [(x : δ) → measurable_space (π x)]
(μ : (i : δ) → measure (π i)) : measure.tprod [] μ = dirac PUnit.unit :=
rfl
@[simp] theorem tprod_cons {δ : Type u_3} {π : δ → Type u_4} [(x : δ) → measurable_space (π x)]
(i : δ) (l : List δ) (μ : (i : δ) → measure (π i)) :
measure.tprod (i :: l) μ = measure.prod (μ i) (measure.tprod l μ) :=
rfl
protected instance sigma_finite_tprod {δ : Type u_3} {π : δ → Type u_4}
[(x : δ) → measurable_space (π x)] (l : List δ) (μ : (i : δ) → measure (π i))
[∀ (i : δ), sigma_finite (μ i)] : sigma_finite (measure.tprod l μ) :=
List.rec
(eq.mpr (id (Eq._oldrec (Eq.refl (sigma_finite (measure.tprod [] μ))) (tprod_nil μ)))
(finite_measure.to_sigma_finite (dirac PUnit.unit)))
(fun (i : δ) (l : List δ) (ih : sigma_finite (measure.tprod l μ)) =>
eq.mpr
(id (Eq._oldrec (Eq.refl (sigma_finite (measure.tprod (i :: l) μ))) (tprod_cons i l μ)))
prod.sigma_finite)
l
theorem tprod_tprod {δ : Type u_3} {π : δ → Type u_4} [(x : δ) → measurable_space (π x)]
(l : List δ) (μ : (i : δ) → measure (π i)) [∀ (i : δ), sigma_finite (μ i)]
{s : (i : δ) → set (π i)} (hs : ∀ (i : δ), is_measurable (s i)) :
coe_fn (measure.tprod l μ) (set.tprod l s) =
list.prod (list.map (fun (i : δ) => coe_fn (μ i) (s i)) l) :=
sorry
theorem tprod_tprod_le {δ : Type u_3} {π : δ → Type u_4} [(x : δ) → measurable_space (π x)]
(l : List δ) (μ : (i : δ) → measure (π i)) [∀ (i : δ), sigma_finite (μ i)]
(s : (i : δ) → set (π i)) :
coe_fn (measure.tprod l μ) (set.tprod l s) ≤
list.prod (list.map (fun (i : δ) => coe_fn (μ i) (s i)) l) :=
sorry
/-- The product measure on an encodable finite type, defined by mapping `measure.tprod` along the
equivalence `measurable_equiv.pi_measurable_equiv_tprod`.
The definition `measure_theory.measure.pi` should be used instead of this one. -/
def pi' {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measurable_space (α i)]
(μ : (i : ι) → measure (α i)) [encodable ι] : measure ((i : ι) → α i) :=
coe_fn (map (list.tprod.elim' encodable.mem_sorted_univ))
(measure.tprod (encodable.sorted_univ ι) μ)
theorem pi'_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measurable_space (α i)]
(μ : (i : ι) → measure (α i)) [encodable ι] [∀ (i : ι), sigma_finite (μ i)]
{s : (i : ι) → set (α i)} (hs : ∀ (i : ι), is_measurable (s i)) :
coe_fn (pi' μ) (set.pi set.univ s) =
finset.prod finset.univ fun (i : ι) => coe_fn (μ i) (s i) :=
sorry
theorem pi'_pi_le {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measurable_space (α i)]
(μ : (i : ι) → measure (α i)) [encodable ι] [∀ (i : ι), sigma_finite (μ i)]
{s : (i : ι) → set (α i)} :
coe_fn (pi' μ) (set.pi set.univ s) ≤
finset.prod finset.univ fun (i : ι) => coe_fn (μ i) (s i) :=
sorry
theorem pi_caratheodory {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] (μ : (i : ι) → measure (α i)) :
measurable_space.pi ≤
outer_measure.caratheodory (outer_measure.pi fun (i : ι) => to_outer_measure (μ i)) :=
sorry
/-- `measure.pi μ` is the finite product of the measures `{μ i | i : ι}`.
It is defined to be measure corresponding to `measure_theory.outer_measure.pi`. -/
protected def pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measurable_space (α i)]
(μ : (i : ι) → measure (α i)) : measure ((i : ι) → α i) :=
outer_measure.to_measure (outer_measure.pi fun (i : ι) => to_outer_measure (μ i)) sorry
theorem pi_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measurable_space (α i)]
(μ : (i : ι) → measure (α i)) [∀ (i : ι), sigma_finite (μ i)] (s : (i : ι) → set (α i))
(hs : ∀ (i : ι), is_measurable (s i)) :
coe_fn (measure.pi μ) (set.pi set.univ s) =
finset.prod finset.univ fun (i : ι) => coe_fn (μ i) (s i) :=
sorry
theorem pi_eval_preimage_null {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] (μ : (i : ι) → measure (α i)) [∀ (i : ι), sigma_finite (μ i)]
{i : ι} {s : set (α i)} (hs : coe_fn (μ i) s = 0) :
coe_fn (measure.pi μ) (function.eval i ⁻¹' s) = 0 :=
sorry
theorem pi_hyperplane {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] (μ : (i : ι) → measure (α i)) [∀ (i : ι), sigma_finite (μ i)]
(i : ι) [has_no_atoms (μ i)] (x : α i) :
coe_fn (measure.pi μ) (set_of fun (f : (i : ι) → α i) => f i = x) = 0 :=
(fun (this : coe_fn (measure.pi μ) (function.eval i ⁻¹' singleton x) = 0) => this)
(pi_eval_preimage_null μ (measure_singleton x))
theorem ae_eval_ne {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measurable_space (α i)]
(μ : (i : ι) → measure (α i)) [∀ (i : ι), sigma_finite (μ i)] (i : ι) [has_no_atoms (μ i)]
(x : α i) : filter.eventually (fun (y : (i : ι) → α i) => y i ≠ x) (ae (measure.pi μ)) :=
iff.mpr compl_mem_ae_iff (pi_hyperplane μ i x)
theorem tendsto_eval_ae_ae {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
{i : ι} : filter.tendsto (function.eval i) (ae (measure.pi μ)) (ae (μ i)) :=
fun (s : set (α i)) (hs : s ∈ ae (μ i)) => pi_eval_preimage_null μ hs
-- TODO: should we introduce `filter.pi` and prove some basic facts about it?
-- The same combinator appears here and in `nhds_pi`
theorem ae_pi_le_infi_comap {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)}
[∀ (i : ι), sigma_finite (μ i)] :
ae (measure.pi μ) ≤ infi fun (i : ι) => filter.comap (function.eval i) (ae (μ i)) :=
le_infi fun (i : ι) => filter.tendsto.le_comap tendsto_eval_ae_ae
theorem ae_eq_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measurable_space (α i)]
{μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)] {β : ι → Type u_3}
{f : (i : ι) → α i → β i} {f' : (i : ι) → α i → β i}
(h : ∀ (i : ι), filter.eventually_eq (ae (μ i)) (f i) (f' i)) :
filter.eventually_eq (ae (measure.pi μ)) (fun (x : (i : ι) → α i) (i : ι) => f i (x i))
fun (x : (i : ι) → α i) (i : ι) => f' i (x i) :=
filter.eventually.mono
(iff.mpr filter.eventually_all
fun (i : ι) => filter.tendsto.eventually tendsto_eval_ae_ae (h i))
fun (x : (x : ι) → α x) (hx : ∀ (i : ι), f i (function.eval i x) = f' i (function.eval i x)) =>
funext hx
theorem ae_le_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measurable_space (α i)]
{μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)] {β : ι → Type u_3}
[(i : ι) → preorder (β i)] {f : (i : ι) → α i → β i} {f' : (i : ι) → α i → β i}
(h : ∀ (i : ι), filter.eventually_le (ae (μ i)) (f i) (f' i)) :
filter.eventually_le (ae (measure.pi μ)) (fun (x : (i : ι) → α i) (i : ι) => f i (x i))
fun (x : (i : ι) → α i) (i : ι) => f' i (x i) :=
filter.eventually.mono
(iff.mpr filter.eventually_all
fun (i : ι) => filter.tendsto.eventually tendsto_eval_ae_ae (h i))
fun (x : (x : ι) → α x) (hx : ∀ (i : ι), f i (function.eval i x) ≤ f' i (function.eval i x)) =>
hx
theorem ae_le_set_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
{I : set ι} {s : (i : ι) → set (α i)} {t : (i : ι) → set (α i)}
(h : ∀ (i : ι), i ∈ I → filter.eventually_le (ae (μ i)) (s i) (t i)) :
filter.eventually_le (ae (measure.pi μ)) (set.pi I s) (set.pi I t) :=
sorry
theorem ae_eq_set_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
{I : set ι} {s : (i : ι) → set (α i)} {t : (i : ι) → set (α i)}
(h : ∀ (i : ι), i ∈ I → filter.eventually_eq (ae (μ i)) (s i) (t i)) :
filter.eventually_eq (ae (measure.pi μ)) (set.pi I s) (set.pi I t) :=
filter.eventually_le.antisymm
(ae_le_set_pi fun (i : ι) (hi : i ∈ I) => filter.eventually_eq.le (h i hi))
(ae_le_set_pi
fun (i : ι) (hi : i ∈ I) => filter.eventually_eq.le (filter.eventually_eq.symm (h i hi)))
theorem pi_Iio_ae_eq_pi_Iic {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {s : set ι}
{f : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi s fun (i : ι) => set.Iio (f i))
(set.pi s fun (i : ι) => set.Iic (f i)) :=
ae_eq_set_pi fun (i : ι) (hi : i ∈ s) => Iio_ae_eq_Iic
theorem pi_Ioi_ae_eq_pi_Ici {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {s : set ι}
{f : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi s fun (i : ι) => set.Ioi (f i))
(set.pi s fun (i : ι) => set.Ici (f i)) :=
ae_eq_set_pi fun (i : ι) (hi : i ∈ s) => Ioi_ae_eq_Ici
theorem univ_pi_Iio_ae_eq_Iic {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {f : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi set.univ fun (i : ι) => set.Iio (f i))
(set.Iic f) :=
sorry
theorem univ_pi_Ioi_ae_eq_Ici {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {f : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi set.univ fun (i : ι) => set.Ioi (f i))
(set.Ici f) :=
sorry
theorem pi_Ioo_ae_eq_pi_Icc {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {s : set ι} {f : (i : ι) → α i}
{g : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi s fun (i : ι) => set.Ioo (f i) (g i))
(set.pi s fun (i : ι) => set.Icc (f i) (g i)) :=
ae_eq_set_pi fun (i : ι) (hi : i ∈ s) => Ioo_ae_eq_Icc
theorem univ_pi_Ioo_ae_eq_Icc {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {f : (i : ι) → α i}
{g : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi set.univ fun (i : ι) => set.Ioo (f i) (g i))
(set.Icc f g) :=
sorry
theorem pi_Ioc_ae_eq_pi_Icc {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {s : set ι} {f : (i : ι) → α i}
{g : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi s fun (i : ι) => set.Ioc (f i) (g i))
(set.pi s fun (i : ι) => set.Icc (f i) (g i)) :=
ae_eq_set_pi fun (i : ι) (hi : i ∈ s) => Ioc_ae_eq_Icc
theorem univ_pi_Ioc_ae_eq_Icc {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {f : (i : ι) → α i}
{g : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi set.univ fun (i : ι) => set.Ioc (f i) (g i))
(set.Icc f g) :=
sorry
theorem pi_Ico_ae_eq_pi_Icc {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {s : set ι} {f : (i : ι) → α i}
{g : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi s fun (i : ι) => set.Ico (f i) (g i))
(set.pi s fun (i : ι) => set.Icc (f i) (g i)) :=
ae_eq_set_pi fun (i : ι) (hi : i ∈ s) => Ico_ae_eq_Icc
theorem univ_pi_Ico_ae_eq_Icc {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[(i : ι) → partial_order (α i)] [∀ (i : ι), has_no_atoms (μ i)] {f : (i : ι) → α i}
{g : (i : ι) → α i} :
filter.eventually_eq (ae (measure.pi μ)) (set.pi set.univ fun (i : ι) => set.Ico (f i) (g i))
(set.Icc f g) :=
sorry
/-- If one of the measures `μ i` has no atoms, them `measure.pi µ`
has no atoms. The instance below assumes that all `μ i` have no atoms. -/
theorem pi_has_no_atoms {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
(i : ι) [has_no_atoms (μ i)] : has_no_atoms (measure.pi μ) :=
has_no_atoms.mk
fun (x : (i : ι) → α i) =>
flip measure_mono_null (pi_hyperplane μ i (x i)) (iff.mpr set.singleton_subset_iff rfl)
protected instance pi.measure_theory.has_no_atoms {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)} [∀ (i : ι), sigma_finite (μ i)]
[h : Nonempty ι] [∀ (i : ι), has_no_atoms (μ i)] : has_no_atoms (measure.pi μ) :=
nonempty.elim h fun (i : ι) => pi_has_no_atoms i
protected instance pi.measure_theory.locally_finite_measure {ι : Type u_1} [fintype ι]
{α : ι → Type u_2} [(i : ι) → measurable_space (α i)] {μ : (i : ι) → measure (α i)}
[∀ (i : ι), sigma_finite (μ i)] [(i : ι) → topological_space (α i)]
[∀ (i : ι), opens_measurable_space (α i)] [∀ (i : ι), locally_finite_measure (μ i)] :
locally_finite_measure (measure.pi μ) :=
sorry
end measure
protected instance measure_space.pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2}
[(i : ι) → measure_space (α i)] : measure_space ((i : ι) → α i) :=
measure_space.mk (measure.pi fun (i : ι) => volume)
theorem volume_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measure_space (α i)] :
volume = measure.pi fun (i : ι) => volume :=
rfl
theorem volume_pi_pi {ι : Type u_1} [fintype ι] {α : ι → Type u_2} [(i : ι) → measure_space (α i)]
[ι → sigma_finite volume] (s : (i : ι) → set (α i)) (hs : ∀ (i : ι), is_measurable (s i)) :
coe_fn volume (set.pi set.univ s) =
finset.prod finset.univ fun (i : ι) => coe_fn volume (s i) :=
measure.pi_pi (fun (i : ι) => volume) s hs
end Mathlib
|
eecec0790e8983b3ac452eab26858c7d74b80398
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/algebra/category/Group/colimits.lean
|
eba8807d36ce57db5022ae4934f98b5a9b28e842
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 9,723
|
lean
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import algebra.category.Group.preadditive
import group_theory.quotient_group
import category_theory.limits.concrete_category
import category_theory.limits.shapes.kernels
import category_theory.limits.shapes.concrete_category
/-!
# The category of additive commutative groups has all colimits.
This file uses a "pre-automated" approach, just as for `Mon/colimits.lean`.
It is a very uniform approach, that conceivably could be synthesised directly
by a tactic that analyses the shape of `add_comm_group` and `monoid_hom`.
TODO:
In fact, in `AddCommGroup` there is a much nicer model of colimits as quotients
of finitely supported functions, and we really should implement this as well (or instead).
-/
universes u v
open category_theory
open category_theory.limits
-- [ROBOT VOICE]:
-- You should pretend for now that this file was automatically generated.
-- It follows the same template as colimits in Mon.
namespace AddCommGroup.colimits
/-!
We build the colimit of a diagram in `AddCommGroup` by constructing the
free group on the disjoint union of all the abelian groups in the diagram,
then taking the quotient by the abelian group laws within each abelian group,
and the identifications given by the morphisms in the diagram.
-/
variables {J : Type v} [small_category J] (F : J ⥤ AddCommGroup.{v})
/--
An inductive type representing all group expressions (without relations)
on a collection of types indexed by the objects of `J`.
-/
inductive prequotient
-- There's always `of`
| of : Π (j : J) (x : F.obj j), prequotient
-- Then one generator for each operation
| zero : prequotient
| neg : prequotient → prequotient
| add : prequotient → prequotient → prequotient
instance : inhabited (prequotient F) := ⟨prequotient.zero⟩
open prequotient
/--
The relation on `prequotient` saying when two expressions are equal
because of the abelian group laws, or
because one element is mapped to another by a morphism in the diagram.
-/
inductive relation : prequotient F → prequotient F → Prop
-- Make it an equivalence relation:
| refl : Π (x), relation x x
| symm : Π (x y) (h : relation x y), relation y x
| trans : Π (x y z) (h : relation x y) (k : relation y z), relation x z
-- There's always a `map` relation
| map : Π (j j' : J) (f : j ⟶ j') (x : F.obj j), relation (of j' (F.map f x)) (of j x)
-- Then one relation per operation, describing the interaction with `of`
| zero : Π (j), relation (of j 0) zero
| neg : Π (j) (x : F.obj j), relation (of j (-x)) (neg (of j x))
| add : Π (j) (x y : F.obj j), relation (of j (x + y)) (add (of j x) (of j y))
-- Then one relation per argument of each operation
| neg_1 : Π (x x') (r : relation x x'), relation (neg x) (neg x')
| add_1 : Π (x x' y) (r : relation x x'), relation (add x y) (add x' y)
| add_2 : Π (x y y') (r : relation y y'), relation (add x y) (add x y')
-- And one relation per axiom
| zero_add : Π (x), relation (add zero x) x
| add_zero : Π (x), relation (add x zero) x
| add_left_neg : Π (x), relation (add (neg x) x) zero
| add_comm : Π (x y), relation (add x y) (add y x)
| add_assoc : Π (x y z), relation (add (add x y) z) (add x (add y z))
/--
The setoid corresponding to group expressions modulo abelian group relations and identifications.
-/
def colimit_setoid : setoid (prequotient F) :=
{ r := relation F, iseqv := ⟨relation.refl, relation.symm, relation.trans⟩ }
attribute [instance] colimit_setoid
/--
The underlying type of the colimit of a diagram in `AddCommGroup`.
-/
@[derive inhabited]
def colimit_type : Type v := quotient (colimit_setoid F)
instance : add_comm_group (colimit_type F) :=
{ zero :=
begin
exact quot.mk _ zero
end,
neg :=
begin
fapply @quot.lift,
{ intro x,
exact quot.mk _ (neg x) },
{ intros x x' r,
apply quot.sound,
exact relation.neg_1 _ _ r },
end,
add :=
begin
fapply @quot.lift _ _ ((colimit_type F) → (colimit_type F)),
{ intro x,
fapply @quot.lift,
{ intro y,
exact quot.mk _ (add x y) },
{ intros y y' r,
apply quot.sound,
exact relation.add_2 _ _ _ r } },
{ intros x x' r,
funext y,
induction y,
dsimp,
apply quot.sound,
{ exact relation.add_1 _ _ _ r },
{ refl } },
end,
zero_add := λ x,
begin
induction x,
dsimp,
apply quot.sound,
apply relation.zero_add,
refl,
end,
add_zero := λ x,
begin
induction x,
dsimp,
apply quot.sound,
apply relation.add_zero,
refl,
end,
add_left_neg := λ x,
begin
induction x,
dsimp,
apply quot.sound,
apply relation.add_left_neg,
refl,
end,
add_comm := λ x y,
begin
induction x,
induction y,
dsimp,
apply quot.sound,
apply relation.add_comm,
refl,
refl,
end,
add_assoc := λ x y z,
begin
induction x,
induction y,
induction z,
dsimp,
apply quot.sound,
apply relation.add_assoc,
refl,
refl,
refl,
end, }
@[simp] lemma quot_zero : quot.mk setoid.r zero = (0 : colimit_type F) := rfl
@[simp] lemma quot_neg (x) :
quot.mk setoid.r (neg x) = (-(quot.mk setoid.r x) : colimit_type F) := rfl
@[simp] lemma quot_add (x y) :
quot.mk setoid.r (add x y) = ((quot.mk setoid.r x) + (quot.mk setoid.r y) : colimit_type F) := rfl
/-- The bundled abelian group giving the colimit of a diagram. -/
def colimit : AddCommGroup := AddCommGroup.of (colimit_type F)
/-- The function from a given abelian group in the diagram to the colimit abelian group. -/
def cocone_fun (j : J) (x : F.obj j) : colimit_type F :=
quot.mk _ (of j x)
/-- The group homomorphism from a given abelian group in the diagram to the colimit abelian
group. -/
def cocone_morphism (j : J) : F.obj j ⟶ colimit F :=
{ to_fun := cocone_fun F j,
map_zero' := by apply quot.sound; apply relation.zero,
map_add' := by intros; apply quot.sound; apply relation.add }
@[simp] lemma cocone_naturality {j j' : J} (f : j ⟶ j') :
F.map f ≫ (cocone_morphism F j') = cocone_morphism F j :=
begin
ext,
apply quot.sound,
apply relation.map,
end
@[simp] lemma cocone_naturality_components (j j' : J) (f : j ⟶ j') (x : F.obj j):
(cocone_morphism F j') (F.map f x) = (cocone_morphism F j) x :=
by { rw ←cocone_naturality F f, refl }
/-- The cocone over the proposed colimit abelian group. -/
def colimit_cocone : cocone F :=
{ X := colimit F,
ι :=
{ app := cocone_morphism F } }.
/-- The function from the free abelian group on the diagram to the cone point of any other
cocone. -/
@[simp] def desc_fun_lift (s : cocone F) : prequotient F → s.X
| (of j x) := (s.ι.app j) x
| zero := 0
| (neg x) := -(desc_fun_lift x)
| (add x y) := desc_fun_lift x + desc_fun_lift y
/-- The function from the colimit abelian group to the cone point of any other cocone. -/
def desc_fun (s : cocone F) : colimit_type F → s.X :=
begin
fapply quot.lift,
{ exact desc_fun_lift F s },
{ intros x y r,
induction r; try { dsimp },
-- refl
{ refl },
-- symm
{ exact r_ih.symm },
-- trans
{ exact eq.trans r_ih_h r_ih_k },
-- map
{ simp, },
-- zero
{ simp, },
-- neg
{ simp, },
-- add
{ simp, },
-- neg_1
{ rw r_ih, },
-- add_1
{ rw r_ih, },
-- add_2
{ rw r_ih, },
-- zero_add
{ rw zero_add, },
-- add_zero
{ rw add_zero, },
-- add_left_neg
{ rw add_left_neg, },
-- add_comm
{ rw add_comm, },
-- add_assoc
{ rw add_assoc, } }
end
/-- The group homomorphism from the colimit abelian group to the cone point of any other cocone. -/
def desc_morphism (s : cocone F) : colimit F ⟶ s.X :=
{ to_fun := desc_fun F s,
map_zero' := rfl,
map_add' := λ x y, by { induction x; induction y; refl }, }
/-- Evidence that the proposed colimit is the colimit. -/
def colimit_cocone_is_colimit : is_colimit (colimit_cocone F) :=
{ desc := λ s, desc_morphism F s,
uniq' := λ s m w,
begin
ext,
induction x,
induction x,
{ have w' := congr_fun (congr_arg (λ f : F.obj x_j ⟶ s.X, (f : F.obj x_j → s.X)) (w x_j)) x_x,
erw w',
refl, },
{ simp *, },
{ simp *, },
{ simp *, },
refl
end }.
instance has_colimits_AddCommGroup : has_colimits AddCommGroup :=
{ has_colimits_of_shape := λ J 𝒥, by exactI
{ has_colimit := λ F, has_colimit.mk
{ cocone := colimit_cocone F,
is_colimit := colimit_cocone_is_colimit F } } }
end AddCommGroup.colimits
namespace AddCommGroup
open quotient_add_group
/--
The categorical cokernel of a morphism in `AddCommGroup`
agrees with the usual group-theoretical quotient.
-/
noncomputable def cokernel_iso_quotient {G H : AddCommGroup.{u}} (f : G ⟶ H) :
cokernel f ≅ AddCommGroup.of (H ⧸ (add_monoid_hom.range f)) :=
{ hom := cokernel.desc f (mk' _)
(by { ext, apply quotient.sound, apply left_rel_apply.mpr, fsplit, exact -x,
simp only [add_zero, add_monoid_hom.map_neg], }),
inv := quotient_add_group.lift _ (cokernel.π f)
(by { intros x H_1, cases H_1, induction H_1_h,
simp only [cokernel.condition_apply, zero_apply]}),
-- obviously can take care of the next goals, but it is really slow
hom_inv_id' := begin
ext1, simp only [coequalizer_as_cokernel, category.comp_id, cokernel.π_desc_assoc], ext1, refl,
end,
inv_hom_id' := begin
ext x : 2,
simp only [colimit.ι_desc_apply, id_apply, lift_mk, mk'_apply,
cofork.of_π_ι_app, comp_apply, add_monoid_hom.comp_apply],
end, }
end AddCommGroup
|
b119a0657f5657c87b7b893b07e60de3d0937839
|
d1a52c3f208fa42c41df8278c3d280f075eb020c
|
/stage0/src/Lean/Elab/Quotation.lean
|
5eda0123019d5af77fff98db58ce1c7c7c3b7ee1
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
cipher1024/lean4
|
6e1f98bb58e7a92b28f5364eb38a14c8d0aae393
|
69114d3b50806264ef35b57394391c3e738a9822
|
refs/heads/master
| 1,642,227,983,603
| 1,642,011,696,000
| 1,642,011,696,000
| 228,607,691
| 0
| 0
|
Apache-2.0
| 1,576,584,269,000
| 1,576,584,268,000
| null |
UTF-8
|
Lean
| false
| false
| 24,974
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sebastian Ullrich
Elaboration of syntax quotations as terms and patterns (in `match_syntax`). See also `./Hygiene.lean` for the basic
hygiene workings and data types.
-/
import Lean.Syntax
import Lean.ResolveName
import Lean.Elab.Term
import Lean.Elab.Quotation.Util
import Lean.Elab.Quotation.Precheck
import Lean.Parser.Term
namespace Lean.Elab.Term.Quotation
open Lean.Parser.Term
open Lean.Syntax
open Meta
/-- `C[$(e)]` ~> `let a := e; C[$a]`. Used in the implementation of antiquot splices. -/
private partial def floatOutAntiquotTerms : Syntax → StateT (Syntax → TermElabM Syntax) TermElabM Syntax
| stx@(Syntax.node i k args) => do
if isAntiquot stx && !isEscapedAntiquot stx then
let e := getAntiquotTerm stx
if !e.isIdent || !e.getId.isAtomic then
return ← withFreshMacroScope do
let a ← `(a)
modify (fun cont stx => (`(let $a:ident := $e; $stx) : TermElabM _))
stx.setArg 2 a
Syntax.node i k (← args.mapM floatOutAntiquotTerms)
| stx => pure stx
private def getSepFromSplice (splice : Syntax) : Syntax :=
if let Syntax.atom _ sep := getAntiquotSpliceSuffix splice then
Syntax.mkStrLit (sep.dropRight 1)
else
unreachable!
partial def mkTuple : Array Syntax → TermElabM Syntax
| #[] => `(Unit.unit)
| #[e] => e
| es => do
let stx ← mkTuple (es.eraseIdx 0)
`(Prod.mk $(es[0]) $stx)
def resolveSectionVariable (sectionVars : NameMap Name) (id : Name) : List (Name × List String) :=
-- decode macro scopes from name before recursion
let extractionResult := extractMacroScopes id
let rec loop : Name → List String → List (Name × List String)
| id@(Name.str p s _), projs =>
-- NOTE: we assume that macro scopes always belong to the projected constant, not the projections
let id := { extractionResult with name := id }.review
match sectionVars.find? id with
| some newId => [(newId, projs)]
| none => loop p (s::projs)
| _, _ => []
loop extractionResult.name []
/-- Transform sequence of pushes and appends into acceptable code -/
def ArrayStxBuilder := Sum (Array Syntax) Syntax
namespace ArrayStxBuilder
def empty : ArrayStxBuilder := Sum.inl #[]
def build : ArrayStxBuilder → Syntax
| Sum.inl elems => quote elems
| Sum.inr arr => arr
def push (b : ArrayStxBuilder) (elem : Syntax) : ArrayStxBuilder :=
match b with
| Sum.inl elems => Sum.inl <| elems.push elem
| Sum.inr arr => Sum.inr <| mkCApp ``Array.push #[arr, elem]
def append (b : ArrayStxBuilder) (arr : Syntax) (appendName := ``Array.append) : ArrayStxBuilder :=
Sum.inr <| mkCApp appendName #[b.build, arr]
end ArrayStxBuilder
-- Elaborate the content of a syntax quotation term
private partial def quoteSyntax : Syntax → TermElabM Syntax
| Syntax.ident info rawVal val preresolved => do
if !hygiene.get (← getOptions) then
return ← `(Syntax.ident info $(quote rawVal) $(quote val) $(quote preresolved))
-- Add global scopes at compilation time (now), add macro scope at runtime (in the quotation).
-- See the paper for details.
let r ← resolveGlobalName val
-- extension of the paper algorithm: also store unique section variable names as top-level scopes
-- so they can be captured and used inside the section, but not outside
let r' := resolveSectionVariable (← read).sectionVars val
let preresolved := r ++ r' ++ preresolved
let val := quote val
-- `scp` is bound in stxQuot.expand
`(Syntax.ident info $(quote rawVal) (addMacroScope mainModule $val scp) $(quote preresolved))
-- if antiquotation, insert contents as-is, else recurse
| stx@(Syntax.node _ k _) => do
if isAntiquot stx && !isEscapedAntiquot stx then
getAntiquotTerm stx
else if isTokenAntiquot stx && !isEscapedAntiquot stx then
match stx[0] with
| Syntax.atom _ val => `(Syntax.atom (Option.getD (getHeadInfo? $(getAntiquotTerm stx)) info) $(quote val))
| _ => throwErrorAt stx "expected token"
else if isAntiquotSuffixSplice stx && !isEscapedAntiquot stx then
-- splices must occur in a `many` node
throwErrorAt stx "unexpected antiquotation splice"
else if isAntiquotSplice stx && !isEscapedAntiquot stx then
throwErrorAt stx "unexpected antiquotation splice"
else
-- if escaped antiquotation, decrement by one escape level
let stx := unescapeAntiquot stx
let mut args := ArrayStxBuilder.empty
let appendName := if (← getEnv).contains ``Array.append then ``Array.append else ``Array.appendCore
for arg in stx.getArgs do
if k == nullKind && isAntiquotSuffixSplice arg then
let antiquot := getAntiquotSuffixSpliceInner arg
args := args.append (appendName := appendName) <| ←
match antiquotSuffixSplice? arg with
| `optional => `(match $(getAntiquotTerm antiquot):term with
| some x => Array.empty.push x
| none => Array.empty)
| `many => getAntiquotTerm antiquot
| `sepBy => `(@SepArray.elemsAndSeps $(getSepFromSplice arg) $(getAntiquotTerm antiquot))
| k => throwErrorAt arg "invalid antiquotation suffix splice kind '{k}'"
else if k == nullKind && isAntiquotSplice arg then
let k := antiquotSpliceKind? arg
let (arg, bindLets) ← floatOutAntiquotTerms arg |>.run pure
let inner ← (getAntiquotSpliceContents arg).mapM quoteSyntax
let ids ← getAntiquotationIds arg
if ids.isEmpty then
throwErrorAt stx "antiquotation splice must contain at least one antiquotation"
let arr ← match k with
| `optional => `(match $[$ids:ident],* with
| $[some $ids:ident],* => $(quote inner)
| none => Array.empty)
| _ =>
let arr ← ids[:ids.size-1].foldrM (fun id arr => `(Array.zip $id $arr)) ids.back
`(Array.map (fun $(← mkTuple ids) => $(inner[0])) $arr)
let arr ←
if k == `sepBy then
`(mkSepArray $arr (mkAtom $(getSepFromSplice arg)))
else arr
let arr ← bindLets arr
args := args.append arr
else do
let arg ← quoteSyntax arg
args := args.push arg
`(Syntax.node SourceInfo.none $(quote k) $(args.build))
| Syntax.atom _ val =>
`(Syntax.atom info $(quote val))
| Syntax.missing => throwUnsupportedSyntax
def stxQuot.expand (stx : Syntax) : TermElabM Syntax := do
/- Syntax quotations are monadic values depending on the current macro scope. For efficiency, we bind
the macro scope once for each quotation, then build the syntax tree in a completely pure computation
depending on this binding. Note that regular function calls do not introduce a new macro scope (i.e.
we preserve referential transparency), so we can refer to this same `scp` inside `quoteSyntax` by
including it literally in a syntax quotation. -/
-- TODO: simplify to `(do scp ← getCurrMacroScope; pure $(quoteSyntax quoted))
let stx ← quoteSyntax stx.getQuotContent;
`(Bind.bind MonadRef.mkInfoFromRefPos (fun info =>
Bind.bind getCurrMacroScope (fun scp =>
Bind.bind getMainModule (fun mainModule => Pure.pure $stx))))
/- NOTE: It may seem like the newly introduced binding `scp` may accidentally
capture identifiers in an antiquotation introduced by `quoteSyntax`. However,
note that the syntax quotation above enjoys the same hygiene guarantees as
anywhere else in Lean; that is, we implement hygienic quotations by making
use of the hygienic quotation support of the bootstrapped Lean compiler!
Aside: While this might sound "dangerous", it is in fact less reliant on a
"chain of trust" than other bootstrapping parts of Lean: because this
implementation itself never uses `scp` (or any other identifier) both inside
and outside quotations, it can actually correctly be compiled by an
unhygienic (but otherwise correct) implementation of syntax quotations. As
long as it is then compiled again with the resulting executable (i.e. up to
stage 2), the result is a correct hygienic implementation. In this sense the
implementation is "self-stabilizing". It was in fact originally compiled
by an unhygienic prototype implementation. -/
macro "elab_stx_quot" kind:ident : command =>
`(@[builtinTermElab $kind:ident] def elabQuot : TermElab := adaptExpander stxQuot.expand)
--
elab_stx_quot Parser.Level.quot
elab_stx_quot Parser.Term.quot
elab_stx_quot Parser.Term.funBinder.quot
elab_stx_quot Parser.Term.bracketedBinder.quot
elab_stx_quot Parser.Term.matchDiscr.quot
elab_stx_quot Parser.Tactic.quot
elab_stx_quot Parser.Tactic.quotSeq
elab_stx_quot Parser.Term.stx.quot
elab_stx_quot Parser.Term.prec.quot
elab_stx_quot Parser.Term.attr.quot
elab_stx_quot Parser.Term.prio.quot
elab_stx_quot Parser.Term.doElem.quot
elab_stx_quot Parser.Term.dynamicQuot
/- match -/
-- an "alternative" of patterns plus right-hand side
private abbrev Alt := List Syntax × Syntax
/--
In a single match step, we match the first discriminant against the "head" of the first pattern of the first
alternative. This datatype describes what kind of check this involves, which helps other patterns decide if
they are covered by the same check and don't have to be checked again (see also `MatchResult`). -/
inductive HeadCheck where
-- match step that always succeeds: _, x, `($x), ...
| unconditional
-- match step based on kind and, optionally, arity of discriminant
-- If `arity` is given, that number of new discriminants is introduced. `covered` patterns should then introduce the
-- same number of new patterns.
-- We actually check the arity at run time only in the case of `null` nodes since it should otherwise by implied by
-- the node kind.
-- without arity: `($x:k)
-- with arity: any quotation without an antiquotation head pattern
| shape (k : SyntaxNodeKind) (arity : Option Nat)
-- Match step that succeeds on `null` nodes of arity at least `numPrefix + numSuffix`, introducing discriminants
-- for the first `numPrefix` children, one `null` node for those in between, and for the `numSuffix` last children.
-- example: `([$x, $xs,*, $y]) is `slice 2 2`
| slice (numPrefix numSuffix : Nat)
-- other, complicated match step that will probably only cover identical patterns
-- example: antiquotation splices `($[...]*)
| other (pat : Syntax)
open HeadCheck
/-- Describe whether a pattern is covered by a head check (induced by the pattern itself or a different pattern). -/
inductive MatchResult where
-- Pattern agrees with head check, remove and transform remaining alternative.
-- If `exhaustive` is `false`, *also* include unchanged alternative in the "no" branch.
| covered (f : Alt → TermElabM Alt) (exhaustive : Bool)
-- Pattern disagrees with head check, include in "no" branch only
| uncovered
-- Pattern is not quite sure yet; include unchanged in both branches
| undecided
open MatchResult
/-- All necessary information on a pattern head. -/
structure HeadInfo where
-- check induced by the pattern
check : HeadCheck
-- compute compatibility of pattern with given head check
onMatch (taken : HeadCheck) : MatchResult
-- actually run the specified head check, with the discriminant bound to `discr`
doMatch (yes : (newDiscrs : List Syntax) → TermElabM Syntax) (no : TermElabM Syntax) : TermElabM Syntax
/-- Adapt alternatives that do not introduce new discriminants in `doMatch`, but are covered by those that do so. -/
private def noOpMatchAdaptPats : HeadCheck → Alt → Alt
| shape k (some sz), (pats, rhs) => (List.replicate sz (Unhygienic.run `(_)) ++ pats, rhs)
| slice p s, (pats, rhs) => (List.replicate (p + 1 + s) (Unhygienic.run `(_)) ++ pats, rhs)
| _, alt => alt
private def adaptRhs (fn : Syntax → TermElabM Syntax) : Alt → TermElabM Alt
| (pats, rhs) => do (pats, ← fn rhs)
private partial def getHeadInfo (alt : Alt) : TermElabM HeadInfo :=
let pat := alt.fst.head!
let unconditionally (rhsFn) := pure {
check := unconditional,
doMatch := fun yes no => yes [],
onMatch := fun taken => covered (adaptRhs rhsFn ∘ noOpMatchAdaptPats taken) (match taken with | unconditional => true | _ => false)
}
-- quotation pattern
if isQuot pat then
let quoted := getQuotContent pat
if quoted.isAtom then
-- We assume that atoms are uniquely determined by the node kind and never have to be checked
unconditionally pure
else if quoted.isTokenAntiquot then
unconditionally (`(let $(quoted.getAntiquotTerm) := discr; $(·)))
else if isAntiquot quoted && !isEscapedAntiquot quoted then
-- quotation contains a single antiquotation
let k := antiquotKind? quoted |>.get!
let rhsFn := match getAntiquotTerm quoted with
| `(_) => pure
| `($id:ident) => fun stx => `(let $id := discr; $(stx))
| anti => fun _ => throwErrorAt anti "unsupported antiquotation kind in pattern"
-- Antiquotation kinds like `$id:ident` influence the parser, but also need to be considered by
-- `match` (but not by quotation terms). For example, `($id:ident) and `($e) are not
-- distinguishable without checking the kind of the node to be captured. Note that some
-- antiquotations like the latter one for terms do not correspond to any actual node kind
-- (signified by `k == Name.anonymous`), so we would only check for `ident` here.
--
-- if stx.isOfKind `ident then
-- let id := stx; let e := stx; ...
-- else
-- let e := stx; ...
if k == Name.anonymous then unconditionally rhsFn else pure {
check := shape k none,
onMatch := fun
| other _ => undecided
| taken@(shape k' sz) =>
if k' == k then
covered (adaptRhs rhsFn ∘ noOpMatchAdaptPats taken) (exhaustive := sz.isNone)
else uncovered
| _ => uncovered,
doMatch := fun yes no => do `(cond (Syntax.isOfKind discr $(quote k)) $(← yes []) $(← no)),
}
else if isAntiquotSuffixSplice quoted then throwErrorAt quoted "unexpected antiquotation splice"
else if isAntiquotSplice quoted then throwErrorAt quoted "unexpected antiquotation splice"
else if quoted.getArgs.size == 1 && isAntiquotSuffixSplice quoted[0] then
let anti := getAntiquotTerm (getAntiquotSuffixSpliceInner quoted[0])
unconditionally fun rhs => match antiquotSuffixSplice? quoted[0] with
| `optional => `(let $anti := Syntax.getOptional? discr; $rhs)
| `many => `(let $anti := Syntax.getArgs discr; $rhs)
| `sepBy => `(let $anti := @SepArray.mk $(getSepFromSplice quoted[0]) (Syntax.getArgs discr); $rhs)
| k => throwErrorAt quoted "invalid antiquotation suffix splice kind '{k}'"
else if quoted.getArgs.size == 1 && isAntiquotSplice quoted[0] then pure {
check := other pat,
onMatch := fun
| other pat' => if pat' == pat then covered pure (exhaustive := true) else undecided
| _ => undecided,
doMatch := fun yes no => do
let splice := quoted[0]
let k := antiquotSpliceKind? splice
let contents := getAntiquotSpliceContents splice
let ids ← getAntiquotationIds splice
let yes ← yes []
let no ← no
match k with
| `optional =>
let nones := mkArray ids.size (← `(none))
`(let_delayed yes _ $ids* := $yes;
if discr.isNone then yes () $[ $nones]*
else match discr with
| `($(mkNullNode contents)) => yes () $[ (some $ids)]*
| _ => $no)
| _ =>
let mut discrs ← `(Syntax.getArgs discr)
if k == `sepBy then
discrs ← `(Array.getSepElems $discrs)
let tuple ← mkTuple ids
let mut yes := yes
let resId ← match ids with
| #[id] => id
| _ =>
for id in ids do
yes ← `(let $id := tuples.map (fun $tuple => $id); $yes)
`(tuples)
let contents := if contents.size == 1
then contents[0]
else mkNullNode contents
`(match OptionM.run ($(discrs).sequenceMap fun
| `($contents) => some $tuple
| _ => none) with
| some $resId => $yes
| none => $no)
}
else if let some idx := quoted.getArgs.findIdx? (fun arg => isAntiquotSuffixSplice arg || isAntiquotSplice arg) then do
/-
pattern of the form `match discr, ... with | `(pat_0 ... pat_(idx-1) $[...]* pat_(idx+1) ...), ...`
transform to
```
if discr.getNumArgs >= $quoted.getNumArgs - 1 then
match discr[0], ..., discr[idx-1], mkNullNode (discr.getArgs.extract idx (discr.getNumArgs - $numSuffix))), ..., discr[quoted.getNumArgs - 1] with
| `(pat_0), ... `(pat_(idx-1)), `($[...])*, `(pat_(idx+1)), ...
```
-/
let numSuffix := quoted.getNumArgs - 1 - idx
pure {
check := slice idx numSuffix
onMatch := fun
| other _ => undecided
| slice p s =>
if p == idx && s == numSuffix then
let argPats := quoted.getArgs.mapIdx fun i arg =>
let arg := if (i : Nat) == idx then mkNullNode #[arg] else arg
Unhygienic.run `(`($(arg)))
covered (fun (pats, rhs) => (argPats.toList ++ pats, rhs)) (exhaustive := true)
else uncovered
| _ => uncovered
doMatch := fun yes no => do
let prefixDiscrs ← (List.range idx).mapM (`(Syntax.getArg discr $(quote ·)))
let sliceDiscr ← `(mkNullNode (discr.getArgs.extract $(quote idx) (discr.getNumArgs - $(quote numSuffix))))
let suffixDiscrs ← (List.range numSuffix).mapM fun i =>
`(Syntax.getArg discr (discr.getNumArgs - $(quote (numSuffix - i))))
`(ite (GE.ge discr.getNumArgs $(quote (quoted.getNumArgs - 1)))
$(← yes (prefixDiscrs ++ sliceDiscr :: suffixDiscrs))
$(← no))
}
else
-- not an antiquotation, or an escaped antiquotation: match head shape
let quoted := unescapeAntiquot quoted
let kind := quoted.getKind
let argPats := quoted.getArgs.map fun arg => Unhygienic.run `(`($(arg)))
pure {
check :=
if quoted.isIdent then
-- identifiers only match identical identifiers
-- NOTE: We could make this case more precise by including the matched identifier,
-- if any, in the `shape` constructor, but matching on literal identifiers is quite
-- rare.
other quoted
else
shape kind argPats.size,
onMatch := fun
| other stx' =>
if quoted.isIdent && quoted == stx' then
covered pure (exhaustive := true)
else
uncovered
| shape k' sz =>
if k' == kind && sz == argPats.size then
covered (fun (pats, rhs) => (argPats.toList ++ pats, rhs)) (exhaustive := true)
else
uncovered
| _ => uncovered,
doMatch := fun yes no => do
let cond ← match kind with
| `null => `(Syntax.matchesNull discr $(quote argPats.size))
| `ident => `(Syntax.matchesIdent discr $(quote quoted.getId))
| _ => `(Syntax.isOfKind discr $(quote kind))
let newDiscrs ← (List.range argPats.size).mapM fun i => `(Syntax.getArg discr $(quote i))
`(ite (Eq $cond true) $(← yes newDiscrs) $(← no))
}
else match pat with
| `(_) => unconditionally pure
| `($id:ident) => unconditionally (`(let $id := discr; $(·)))
| `($id:ident@$pat) => do
let info ← getHeadInfo (pat::alt.1.tail!, alt.2)
{ info with onMatch := fun taken => match info.onMatch taken with
| covered f exh => covered (fun alt => f alt >>= adaptRhs (`(let $id := discr; $(·)))) exh
| r => r }
| _ => throwErrorAt pat "match (syntax) : unexpected pattern kind {pat}"
-- Bind right-hand side to new `let_delayed` decl in order to prevent code duplication
private def deduplicate (floatedLetDecls : Array Syntax) : Alt → TermElabM (Array Syntax × Alt)
-- NOTE: new macro scope so that introduced bindings do not collide
| (pats, rhs) => do
if let `($f:ident $[ $args:ident]*) := rhs then
-- looks simple enough/created by this function, skip
return (floatedLetDecls, (pats, rhs))
withFreshMacroScope do
match (← getPatternsVars pats.toArray) with
| #[] =>
-- no antiquotations => introduce Unit parameter to preserve evaluation order
let rhs' ← `(rhs Unit.unit)
(floatedLetDecls.push (← `(letDecl|rhs _ := $rhs)), (pats, rhs'))
| vars =>
let rhs' ← `(rhs $vars*)
(floatedLetDecls.push (← `(letDecl|rhs $vars:ident* := $rhs)), (pats, rhs'))
private partial def compileStxMatch (discrs : List Syntax) (alts : List Alt) : TermElabM Syntax := do
trace[Elab.match_syntax] "match {discrs} with {alts}"
match discrs, alts with
| [], ([], rhs)::_ => pure rhs -- nothing left to match
| _, [] =>
logError "non-exhaustive 'match' (syntax)"
pure Syntax.missing
| discr::discrs, alt::alts => do
let info ← getHeadInfo alt
let pat := alt.1.head!
let alts ← (alt::alts).mapM fun alt => do ((← getHeadInfo alt).onMatch info.check, alt)
let mut yesAlts := #[]
let mut undecidedAlts := #[]
let mut nonExhaustiveAlts := #[]
let mut floatedLetDecls := #[]
for alt in alts do
let mut alt := alt
match alt with
| (covered f exh, alt') =>
-- we can only factor out a common check if there are no undecided patterns in between;
-- otherwise we would change the order of alternatives
if undecidedAlts.isEmpty then
yesAlts ← yesAlts.push <$> f (alt'.1.tail!, alt'.2)
if !exh then
nonExhaustiveAlts := nonExhaustiveAlts.push alt'
else
(floatedLetDecls, alt) ← deduplicate floatedLetDecls alt'
undecidedAlts := undecidedAlts.push alt
nonExhaustiveAlts := nonExhaustiveAlts.push alt
| (undecided, alt') =>
(floatedLetDecls, alt) ← deduplicate floatedLetDecls alt'
undecidedAlts := undecidedAlts.push alt
nonExhaustiveAlts := nonExhaustiveAlts.push alt
| (uncovered, alt') =>
nonExhaustiveAlts := nonExhaustiveAlts.push alt'
let mut stx ← info.doMatch
(yes := fun newDiscrs => do
let mut yesAlts := yesAlts
if !undecidedAlts.isEmpty then
-- group undecided alternatives in a new default case `| discr2, ... => match discr, discr2, ... with ...`
let vars ← discrs.mapM fun _ => withFreshMacroScope `(discr)
let pats := List.replicate newDiscrs.length (Unhygienic.run `(_)) ++ vars
let alts ← undecidedAlts.mapM fun alt => `(matchAltExpr| | $(alt.1.toArray),* => $(alt.2))
let rhs ← `(match discr, $[$(vars.toArray):term],* with $alts:matchAlt*)
yesAlts := yesAlts.push (pats, rhs)
withFreshMacroScope $ compileStxMatch (newDiscrs ++ discrs) yesAlts.toList)
(no := withFreshMacroScope $ compileStxMatch (discr::discrs) nonExhaustiveAlts.toList)
for d in floatedLetDecls do
stx ← `(let_delayed $d:letDecl; $stx)
`(let discr := $discr; $stx)
| _, _ => unreachable!
def match_syntax.expand (stx : Syntax) : TermElabM Syntax := do
match stx with
| `(match $[$discrs:term],* with $[| $[$patss],* => $rhss]*) => do
if !patss.any (·.any (fun
| `($id@$pat) => pat.isQuot
| pat => pat.isQuot)) then
-- no quotations => fall back to regular `match`
throwUnsupportedSyntax
let stx ← compileStxMatch discrs.toList (patss.map (·.toList) |>.zip rhss).toList
trace[Elab.match_syntax.result] "{stx}"
stx
| _ => throwUnsupportedSyntax
/-- Syntactic pattern match. Matches a `Syntax` value against quotations, pattern variables, or `_`. -/
@[builtinTermElab «match»] def elabMatchSyntax : TermElab :=
adaptExpander match_syntax.expand
builtin_initialize
registerTraceClass `Elab.match_syntax
registerTraceClass `Elab.match_syntax.result
end Lean.Elab.Term.Quotation
|
889114abb14b9864d79315e9ccb12e3b665b748c
|
35677d2df3f081738fa6b08138e03ee36bc33cad
|
/src/category_theory/limits/shapes/kernels.lean
|
6d2bb59ea823d027f11ac454a492204c6c16da72
|
[
"Apache-2.0"
] |
permissive
|
gebner/mathlib
|
eab0150cc4f79ec45d2016a8c21750244a2e7ff0
|
cc6a6edc397c55118df62831e23bfbd6e6c6b4ab
|
refs/heads/master
| 1,625,574,853,976
| 1,586,712,827,000
| 1,586,712,827,000
| 99,101,412
| 1
| 0
|
Apache-2.0
| 1,586,716,389,000
| 1,501,667,958,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 9,704
|
lean
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Markus Himmel
-/
import category_theory.limits.shapes.zero
import category_theory.limits.shapes.equalizers
/-!
# Kernels and cokernels
In a category with zero morphisms, the kernel of a morphism `f : X ⟶ Y` is just the equalizer of `f`
and `0 : X ⟶ Y`. (Similarly the cokernel is the coequalizer.)
We don't yet prove much here, just provide
* `kernel : (X ⟶ Y) → C`
* `kernel.ι : kernel f ⟶ X`
* `kernel.condition : kernel.ι f ≫ f = 0` and
* `kernel.lift (k : W ⟶ X) (h : k ≫ f = 0) : W ⟶ kernel f` (as well as the dual versions)
## Main statements
Besides the definition and lifts,
* `kernel.ι_zero_is_iso`: a kernel map of a zero morphism is an isomorphism
* `kernel.is_limit_cone_zero_cone`: if our category has a zero object, then the map from the zero
obect is a kernel map of any monomorphism
## Future work
* TODO: images and coimages, and then abelian categories.
* TODO: connect this with existing working in the group theory and ring theory libraries.
## Implementation notes
As with the other special shapes in the limits library, all the definitions here are given as
`abbreviation`s of the general statements for limits, so all the `simp` lemmas and theorems about
general limits can be used.
## References
* [F. Borceux, *Handbook of Categorical Algebra 2*][borceux-vol2]
-/
universes v u
open category_theory
open category_theory.limits.walking_parallel_pair
namespace category_theory.limits
variables {C : Type u} [𝒞 : category.{v} C]
include 𝒞
variables {X Y : C} (f : X ⟶ Y)
section
variables [has_zero_morphisms.{v} C]
/-- A kernel fork is just a fork where the second morphism is a zero morphism. -/
abbreviation kernel_fork := fork f 0
variables {f}
@[simp, reassoc] lemma kernel_fork.condition (s : kernel_fork f) : fork.ι s ≫ f = 0 :=
by erw [fork.condition, has_zero_morphisms.comp_zero]
@[simp] lemma kernel_fork.app_one (s : kernel_fork f) : s.π.app one = 0 :=
by erw [←cone_parallel_pair_left, kernel_fork.condition]; refl
/-- A morphism `ι` satisfying `ι ≫ f = 0` determines a kernel fork over `f`. -/
abbreviation kernel_fork.of_ι {Z : C} (ι : Z ⟶ X) (w : ι ≫ f = 0) : kernel_fork f :=
fork.of_ι ι $ by rw [w, has_zero_morphisms.comp_zero]
end
section
variables [has_zero_morphisms.{v} C] [has_limit (parallel_pair f 0)]
/-- The kernel of a morphism, expressed as the equalizer with the 0 morphism. -/
abbreviation kernel : C := equalizer f 0
/-- The map from `kernel f` into the source of `f`. -/
abbreviation kernel.ι : kernel f ⟶ X := equalizer.ι f 0
@[simp, reassoc] lemma kernel.condition : kernel.ι f ≫ f = 0 :=
kernel_fork.condition _
/-- Given any morphism `k` so `k ≫ f = 0`, `k` factors through `kernel f`. -/
abbreviation kernel.lift {W : C} (k : W ⟶ X) (h : k ≫ f = 0) : W ⟶ kernel f :=
limit.lift (parallel_pair f 0) (kernel_fork.of_ι k h)
/-- Every kernel of the zero morphism is an isomorphism -/
def kernel.ι_zero_is_iso [has_limit (parallel_pair (0 : X ⟶ Y) 0)] :
is_iso (kernel.ι (0 : X ⟶ Y)) :=
by { apply limit_cone_parallel_pair_self_is_iso, apply limit.is_limit }
end
section has_zero_object
variables [has_zero_object.{v} C]
local attribute [instance] has_zero_object.has_zero
variables [has_zero_morphisms.{v} C]
/-- The morphism from the zero object determines a cone on a kernel diagram -/
def kernel.zero_cone : cone (parallel_pair f 0) :=
{ X := 0,
π := { app := λ j, 0 }}
/-- The map from the zero object is a kernel of a monomorphism -/
def kernel.is_limit_cone_zero_cone [mono f] : is_limit (kernel.zero_cone f) :=
fork.is_limit.mk _ (λ s, 0)
(λ s, by { erw has_zero_morphisms.zero_comp,
convert (@zero_of_comp_mono _ _ _ _ _ _ _ f _ _).symm,
exact kernel_fork.condition _ })
(λ _ _ _, has_zero_object.zero_of_to_zero _)
/-- The kernel of a monomorphism is isomorphic to the zero object -/
def kernel.of_mono [has_limit (parallel_pair f 0)] [mono f] : kernel f ≅ 0 :=
functor.map_iso (cones.forget _) $ is_limit.unique_up_to_iso
(limit.is_limit (parallel_pair f 0)) (kernel.is_limit_cone_zero_cone f)
/-- The kernel morphism of a monomorphism is a zero morphism -/
lemma kernel.ι_of_mono [has_limit (parallel_pair f 0)] [mono f] : kernel.ι f = 0 :=
by rw [←category.id_comp (kernel.ι f), ←iso.hom_inv_id (kernel.of_mono f), category.assoc,
has_zero_object.zero_of_to_zero (kernel.of_mono f).hom, has_zero_morphisms.zero_comp]
end has_zero_object
section
variables (X) (Y) [has_zero_morphisms.{v} C]
/-- The kernel morphism of a zero morphism is an isomorphism -/
def kernel.ι_of_zero [has_limit (parallel_pair (0 : X ⟶ Y) 0)] : is_iso (kernel.ι (0 : X ⟶ Y)) :=
equalizer.ι_of_self _
end
section
variables [has_zero_morphisms.{v} C]
/-- A cokernel cofork is just a cofork where the second morphism is a zero morphism. -/
abbreviation cokernel_cofork := cofork f 0
variables {f}
@[simp, reassoc] lemma cokernel_cofork.condition (s : cokernel_cofork f) : f ≫ cofork.π s = 0 :=
by erw [cofork.condition, has_zero_morphisms.zero_comp]
@[simp] lemma cokernel_cofork.app_zero (s : cokernel_cofork f) : s.ι.app zero = 0 :=
by erw [←cocone_parallel_pair_left, cokernel_cofork.condition]; refl
/-- A morphism `π` satisfying `f ≫ π = 0` determines a cokernel cofork on `f`. -/
abbreviation cokernel_cofork.of_π {Z : C} (π : Y ⟶ Z) (w : f ≫ π = 0) : cokernel_cofork f :=
cofork.of_π π $ by rw [w, has_zero_morphisms.zero_comp]
end
section
variables [has_zero_morphisms.{v} C] [has_colimit (parallel_pair f 0)]
/-- The cokernel of a morphism, expressed as the coequalizer with the 0 morphism. -/
abbreviation cokernel : C := coequalizer f 0
/-- The map from the target of `f` to `cokernel f`. -/
abbreviation cokernel.π : Y ⟶ cokernel f := coequalizer.π f 0
@[simp, reassoc] lemma cokernel.condition : f ≫ cokernel.π f = 0 :=
cokernel_cofork.condition _
/-- Given any morphism `k` so `f ≫ k = 0`, `k` factors through `cokernel f`. -/
abbreviation cokernel.desc {W : C} (k : Y ⟶ W) (h : f ≫ k = 0) : cokernel f ⟶ W :=
colimit.desc (parallel_pair f 0) (cokernel_cofork.of_π k h)
end
section has_zero_object
variables [has_zero_object.{v} C]
local attribute [instance] has_zero_object.has_zero
variable [has_zero_morphisms.{v} C]
/-- The morphism to the zero object determines a cocone on a cokernel diagram -/
def cokernel.zero_cocone : cocone (parallel_pair f 0) :=
{ X := 0,
ι := { app := λ j, 0 } }
/-- The morphism to the zero object is a cokernel of an epimorphism -/
def cokernel.is_colimit_cocone_zero_cocone [epi f] : is_colimit (cokernel.zero_cocone f) :=
cofork.is_colimit.mk _ (λ s, 0)
(λ s, by { erw has_zero_morphisms.zero_comp,
convert (@zero_of_comp_epi _ _ _ _ _ _ f _ _ _).symm,
exact cokernel_cofork.condition _ })
(λ _ _ _, has_zero_object.zero_of_from_zero _)
/-- The cokernel of an epimorphism is isomorphic to the zero object -/
def cokernel.of_epi [has_colimit (parallel_pair f 0)] [epi f] : cokernel f ≅ 0 :=
functor.map_iso (cocones.forget _) $ is_colimit.unique_up_to_iso
(colimit.is_colimit (parallel_pair f 0)) (cokernel.is_colimit_cocone_zero_cocone f)
/-- The cokernel morphism if an epimorphism is a zero morphism -/
lemma cokernel.π_of_epi [has_colimit (parallel_pair f 0)] [epi f] : cokernel.π f = 0 :=
by rw [←category.comp_id (cokernel.π f), ←iso.hom_inv_id (cokernel.of_epi f), ←category.assoc,
has_zero_object.zero_of_from_zero (cokernel.of_epi f).inv, has_zero_morphisms.comp_zero]
end has_zero_object
section
variables (X) (Y) [has_zero_morphisms.{v} C]
/-- The cokernel of a zero morphism is an isomorphism -/
def cokernel.π_of_zero [has_colimit (parallel_pair (0 : X ⟶ Y) 0)] :
is_iso (cokernel.π (0 : X ⟶ Y)) :=
coequalizer.π_of_self _
end
section has_zero_object
variables [has_zero_object.{v} C]
local attribute [instance] has_zero_object.has_zero
variables [has_zero_morphisms.{v} C]
/-- The kernel of the cokernel of an epimorphism is an isomorphism -/
instance kernel.of_cokernel_of_epi [has_colimit (parallel_pair f 0)]
[has_limit (parallel_pair (cokernel.π f) 0)] [epi f] : is_iso (kernel.ι (cokernel.π f)) :=
equalizer.ι_of_self' _ _ $ cokernel.π_of_epi f
/-- The cokernel of the kernel of a monomorphism is an isomorphism -/
instance cokernel.of_kernel_of_mono [has_limit (parallel_pair f 0)]
[has_colimit (parallel_pair (kernel.ι f) 0)] [mono f] : is_iso (cokernel.π (kernel.ι f)) :=
coequalizer.π_of_self' _ _ $ kernel.ι_of_mono f
end has_zero_object
end category_theory.limits
namespace category_theory.limits
variables (C : Type u) [𝒞 : category.{v} C]
include 𝒞
variables [has_zero_morphisms.{v} C]
/-- `has_kernels` represents a choice of kernel for every morphism -/
class has_kernels :=
(has_limit : Π {X Y : C} (f : X ⟶ Y), has_limit (parallel_pair f 0))
/-- `has_cokernels` represents a choice of cokernel for every morphism -/
class has_cokernels :=
(has_colimit : Π {X Y : C} (f : X ⟶ Y), has_colimit (parallel_pair f 0))
attribute [instance] has_kernels.has_limit has_cokernels.has_colimit
/-- Kernels are finite limits, so if `C` has all finite limits, it also has all kernels -/
def has_kernels_of_has_finite_limits [has_finite_limits.{v} C] : has_kernels.{v} C :=
{ has_limit := infer_instance }
/-- Cokernels are finite limits, so if `C` has all finite colimits, it also has all cokernels -/
def has_cokernels_of_has_finite_colimits [has_finite_colimits.{v} C] : has_cokernels.{v} C :=
{ has_colimit := infer_instance }
end category_theory.limits
|
6c2d0eb6d2743abb54d5ffa02c7ca1e3103d8ddb
|
dd0f5513e11c52db157d2fcc8456d9401a6cd9da
|
/04_Quantifiers_and_Equality.org.19.lean
|
b3f2ffd176db4ba8480f1a303cdf849ad447744e
|
[] |
no_license
|
cjmazey/lean-tutorial
|
ba559a49f82aa6c5848b9bf17b7389bf7f4ba645
|
381f61c9fcac56d01d959ae0fa6e376f2c4e3b34
|
refs/heads/master
| 1,610,286,098,832
| 1,447,124,923,000
| 1,447,124,923,000
| 43,082,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 981
|
lean
|
/- page 54 -/
import standard
import data.nat
open nat
-- BEGIN
example (x y : ℕ) : (x + y) * (x + y) = x * x + y * x + x * y + y * y :=
calc
(x + y) * (x + y) = (x + y) * x + (x + y) * y : mul.left_distrib
... = x * x + y * x + (x + y) * y : mul.right_distrib
... = x * x + y * x + (x * y + y * y) : mul.right_distrib
... = x * x + y * x + x * y + y * y : add.assoc
-- END
check mul.left_distrib
check mul.comm
check add.comm
check mul_sub_right_distrib
check add_sub_add_left
example (x y : ℕ) : (x - y) * (x + y) = x * x - y * y :=
calc
(x - y) * (x + y) = x * (x + y) - y * (x + y) : mul_sub_right_distrib
... = x * x + x * y - y * (x + y) : mul.left_distrib
... = x * x + x * y - (y * x + y * y) : mul.left_distrib
... = x * y + x * x - (y * x + y * y) : add.comm
... = x * y + x * x - (x * y + y * y) : mul.comm
... = x * x - y * y : add_sub_add_left
|
c0ab351dc0b32dadd1a19a9c442808a138570176
|
556aeb81a103e9e0ac4e1fe0ce1bc6e6161c3c5e
|
/src/starkware/cairo/common/cairo_secp/verification/verification/signature_recover_public_key_ec_mul_inner_soundness.lean
|
095ab61e98373cf0df5254cf786defef39671ac7
|
[] |
permissive
|
starkware-libs/formal-proofs
|
d6b731604461bf99e6ba820e68acca62a21709e8
|
f5fa4ba6a471357fd171175183203d0b437f6527
|
refs/heads/master
| 1,691,085,444,753
| 1,690,507,386,000
| 1,690,507,386,000
| 410,476,629
| 32
| 9
|
Apache-2.0
| 1,690,506,773,000
| 1,632,639,790,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 27,534
|
lean
|
/-
File: signature_recover_public_key_ec_mul_inner_soundness.lean
Autogenerated file.
-/
import starkware.cairo.lean.semantics.soundness.hoare
import .signature_recover_public_key_code
import ..signature_recover_public_key_spec
import .signature_recover_public_key_fast_ec_add_soundness
import .signature_recover_public_key_ec_double_soundness
open tactic
open starkware.cairo.common.cairo_secp.ec
open starkware.cairo.common.cairo_secp.bigint
open starkware.cairo.common.cairo_secp.field
variables {F : Type} [field F] [decidable_eq F] [prelude_hyps F]
variable mem : F → F
variable σ : register_state F
/- starkware.cairo.common.cairo_secp.ec.ec_mul_inner autogenerated soundness theorem -/
theorem auto_sound_ec_mul_inner
-- arguments
(range_check_ptr : F) (point : EcPoint F) (scalar m : F)
-- code is in memory at σ.pc
(h_mem : mem_at mem code_ec_mul_inner σ.pc)
-- all dependencies are in memory
(h_mem_4 : mem_at mem code_nondet_bigint3 (σ.pc - 460))
(h_mem_5 : mem_at mem code_unreduced_mul (σ.pc - 448))
(h_mem_6 : mem_at mem code_unreduced_sqr (σ.pc - 428))
(h_mem_7 : mem_at mem code_verify_zero (σ.pc - 412))
(h_mem_12 : mem_at mem code_compute_doubling_slope (σ.pc - 284))
(h_mem_13 : mem_at mem code_compute_slope (σ.pc - 240))
(h_mem_14 : mem_at mem code_ec_double (σ.pc - 216))
(h_mem_15 : mem_at mem code_fast_ec_add (σ.pc - 143))
-- input arguments on the stack
(hin_range_check_ptr : range_check_ptr = mem (σ.fp - 11))
(hin_point : point = cast_EcPoint mem (σ.fp - 10))
(hin_scalar : scalar = mem (σ.fp - 4))
(hin_m : m = mem (σ.fp - 3))
-- conclusion
: ensures_ret mem σ (λ κ τ,
∃ μ ≤ κ, rc_ensures mem (rc_bound F) μ (mem (σ.fp - 11)) (mem $ τ.ap - 13)
(spec_ec_mul_inner mem κ range_check_ptr point scalar m (mem (τ.ap - 13)) (cast_EcPoint mem (τ.ap - 12)) (cast_EcPoint mem (τ.ap - 6)))) :=
begin
apply ensures_of_ensuresb, intro νbound,
revert σ range_check_ptr point scalar m h_mem h_mem_4 h_mem_5 h_mem_6 h_mem_7 h_mem_12 h_mem_13 h_mem_14 h_mem_15 hin_range_check_ptr hin_point hin_scalar hin_m,
induction νbound with νbound νih,
{ intros, intros n nlt, apply absurd nlt (nat.not_lt_zero _) },
intros σ range_check_ptr point scalar m h_mem h_mem_4 h_mem_5 h_mem_6 h_mem_7 h_mem_12 h_mem_13 h_mem_14 h_mem_15 hin_range_check_ptr hin_point hin_scalar hin_m,
dsimp at νih,
have h_mem_rec := h_mem,
unpack_memory code_ec_mul_inner at h_mem with ⟨hpc0, hpc1, hpc2, hpc3, hpc4, hpc5, hpc6, hpc7, hpc8, hpc9, hpc10, hpc11, hpc12, hpc13, hpc14, hpc15, hpc16, hpc17, hpc18, hpc19, hpc20, hpc21, hpc22, hpc23, hpc24, hpc25, hpc26, hpc27, hpc28, hpc29, hpc30, hpc31, hpc32, hpc33, hpc34, hpc35, hpc36, hpc37, hpc38, hpc39, hpc40, hpc41, hpc42, hpc43, hpc44, hpc45, hpc46, hpc47, hpc48, hpc49, hpc50, hpc51, hpc52, hpc53, hpc54, hpc55, hpc56, hpc57, hpc58, hpc59, hpc60, hpc61, hpc62, hpc63, hpc64, hpc65, hpc66, hpc67, hpc68, hpc69, hpc70, hpc71, hpc72, hpc73, hpc74, hpc75, hpc76, hpc77, hpc78, hpc79, hpc80, hpc81, hpc82, hpc83, hpc84, hpc85, hpc86, hpc87, hpc88, hpc89, hpc90, hpc91, hpc92, hpc93, hpc94, hpc95, hpc96, hpc97, hpc98, hpc99, hpc100⟩,
-- if statement
step_jnz hpc0 hpc1 with hcond hcond,
{
-- if: positive branch
have a0 : m = 0, {
try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [hcond] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },
},
try { dsimp at a0 }, try { arith_simps at a0 },
clear hcond,
-- assert eq
step_assert_eq hpc2 hpc3 with temp0,
have a2: scalar = 0, {
apply assert_eq_reduction temp0,
try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },
},
try { dsimp at a2 }, try { arith_simps at a2 },
clear temp0,
-- let
generalize' hl_rev_ZERO_POINT: ({
x := { d0 := 0, d1 := 0, d2 := 0 },
y := { d0 := 0, d1 := 0, d2 := 0 }
} : EcPoint F) = ZERO_POINT,
have hl_ZERO_POINT := hl_rev_ZERO_POINT.symm, clear hl_rev_ZERO_POINT,
try { dsimp at hl_ZERO_POINT }, try { arith_simps at hl_ZERO_POINT },
-- return
step_assert_eq hpc4 with hret0,
step_assert_eq hpc5 with hret1,
step_assert_eq hpc6 with hret2,
step_assert_eq hpc7 with hret3,
step_assert_eq hpc8 with hret4,
step_assert_eq hpc9 with hret5,
step_assert_eq hpc10 with hret6,
step_assert_eq hpc11 hpc12 with hret7,
step_assert_eq hpc13 hpc14 with hret8,
step_assert_eq hpc15 hpc16 with hret9,
step_assert_eq hpc17 hpc18 with hret10,
step_assert_eq hpc19 hpc20 with hret11,
step_assert_eq hpc21 hpc22 with hret12,
step_ret hpc23,
-- finish
step_done, use_only [rfl, rfl],
-- range check condition
use_only (0+0), split,
linarith [],
split,
{ arith_simps, try { simp only [hret0 ,hret1 ,hret2 ,hret3 ,hret4 ,hret5 ,hret6 ,hret7 ,hret8 ,hret9 ,hret10 ,hret11 ,hret12] },
try { arith_simps, refl <|> norm_cast }, try { refl } },
intro rc_h_range_check_ptr, repeat { rw [add_assoc] at rc_h_range_check_ptr },
have rc_h_range_check_ptr' := range_checked_add_right rc_h_range_check_ptr,
-- Final Proof
-- user-provided reduction
suffices auto_spec: auto_spec_ec_mul_inner mem _ range_check_ptr point scalar m _ _ _,
{ apply sound_ec_mul_inner, apply auto_spec },
-- prove the auto generated assertion
dsimp [auto_spec_ec_mul_inner],
try { norm_num1 }, try { arith_simps },
left,
use_only [a0],
use_only [a2],
use_only [ZERO_POINT, hl_ZERO_POINT],
try { split, linarith },
try { ensures_simps; try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, hl_ZERO_POINT] }, },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [hret0, hret1, hret2, hret3, hret4, hret5, hret6, hret7, hret8, hret9, hret10, hret11, hret12] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },
},
{
-- if: negative branch
have a0 : m ≠ 0, {
try { simp only [ne.def] },
try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [hcond] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },
},
try { dsimp at a0 }, try { arith_simps at a0 },
clear hcond,
-- ap += 6
step_advance_ap hpc24 hpc25,
-- function call
step_assert_eq hpc26 with arg0,
step_assert_eq hpc27 with arg1,
step_assert_eq hpc28 with arg2,
step_assert_eq hpc29 with arg3,
step_assert_eq hpc30 with arg4,
step_assert_eq hpc31 with arg5,
step_assert_eq hpc32 with arg6,
step_sub hpc33 (auto_sound_ec_double mem _ range_check_ptr point _ _ _ _ _ _ _ _),
{ rw hpc34, norm_num2, exact h_mem_14 },
{ rw hpc34, norm_num2, exact h_mem_4 },
{ rw hpc34, norm_num2, exact h_mem_5 },
{ rw hpc34, norm_num2, exact h_mem_6 },
{ rw hpc34, norm_num2, exact h_mem_7 },
{ rw hpc34, norm_num2, exact h_mem_12 },
{ try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, },
{ try { ext } ; {
try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },}, },
intros κ_call35 ap35 h_call35,
rcases h_call35 with ⟨rc_m35, rc_mle35, hl_range_check_ptr₁, h_call35⟩,
generalize' hr_rev_range_check_ptr₁: mem (ap35 - 7) = range_check_ptr₁,
have htv_range_check_ptr₁ := hr_rev_range_check_ptr₁.symm, clear hr_rev_range_check_ptr₁,
generalize' hr_rev_double_point: cast_EcPoint mem (ap35 - 6) = double_point,
simp only [hr_rev_double_point] at h_call35,
have htv_double_point := hr_rev_double_point.symm, clear hr_rev_double_point,
try { simp only [arg0 ,arg1 ,arg2 ,arg3 ,arg4 ,arg5 ,arg6] at hl_range_check_ptr₁ },
rw [←htv_range_check_ptr₁, ←hin_range_check_ptr] at hl_range_check_ptr₁,
try { simp only [arg0 ,arg1 ,arg2 ,arg3 ,arg4 ,arg5 ,arg6] at h_call35 },
rw [hin_range_check_ptr] at h_call35,
clear arg0 arg1 arg2 arg3 arg4 arg5 arg6,
-- jnz
apply of_register_state,
intros regstate35 regstate35eq,
have regstateapeq_a35 := congr_arg register_state.ap regstate35eq,
try { dsimp at regstateapeq_a35 },
step_jnz hpc35 hpc36 with a35 a35,
{
-- jnz: positive branch
rw ←regstateapeq_a35 at a35,
-- tail recursive function call
step_assert_eq hpc37 with arg0,
step_assert_eq hpc38 with arg1,
step_assert_eq hpc39 with arg2,
step_assert_eq hpc40 with arg3,
step_assert_eq hpc41 with arg4,
step_assert_eq hpc42 with arg5,
step_assert_eq hpc43 with arg6,
step_assert_eq hpc44 hpc45 with arg7,
step_assert_eq hpc46 hpc47 with arg8,
have h_δ37_c0 : ∀ x : F, x / (2 : ℤ) = x * (-1809251394333065606848661391547535052811553607665798349986546028067936010240 : ℤ),
{ intro x, apply div_eq_mul_inv', apply PRIME.int_cast_mul_eq_one, rw [PRIME], try { simp_int_casts }, norm_num1 },
have h_δ37_c0_fz : ∀ x : F, x / 2 = x / (2 : ℤ), { intro x, norm_cast },
step_rec_sub hpc48 (νih _ range_check_ptr₁ double_point (scalar / (2 : ℤ)) (m - 1) _ _ _ _ _ _ _ _ _ _ _ _ _),
{ rw hpc49, norm_num, exact h_mem_rec },
{ rw hpc49, norm_num2, exact h_mem_4 },
{ rw hpc49, norm_num2, exact h_mem_5 },
{ rw hpc49, norm_num2, exact h_mem_6 },
{ rw hpc49, norm_num2, exact h_mem_7 },
{ rw hpc49, norm_num2, exact h_mem_12 },
{ rw hpc49, norm_num2, exact h_mem_13 },
{ rw hpc49, norm_num2, exact h_mem_14 },
{ rw hpc49, norm_num2, exact h_mem_15 },
{ try { simp only [h_δ37_c0_fz, h_δ37_c0] }, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, },
{ try { simp only [h_δ37_c0_fz, h_δ37_c0] }, try { ext } ; {
try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },}, },
{ try { simp only [h_δ37_c0_fz, h_δ37_c0] }, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, },
{ try { simp only [h_δ37_c0_fz, h_δ37_c0] }, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, },
intros κ_call50 ap50 h_call50,
rcases h_call50 with ⟨rc_m50, rc_mle50, hl_range_check_ptr₂, h_call50⟩,
step_ret hpc50,
generalize' hr_rev_range_check_ptr₂: mem (ap50 - 13) = range_check_ptr₂,
have htv_range_check_ptr₂ := hr_rev_range_check_ptr₂.symm, clear hr_rev_range_check_ptr₂,
try { simp only [arg0 ,arg1 ,arg2 ,arg3 ,arg4 ,arg5 ,arg6 ,arg7 ,arg8] at hl_range_check_ptr₂ },
rw [←htv_range_check_ptr₂, ←htv_range_check_ptr₁] at hl_range_check_ptr₂,
try { simp only [arg0 ,arg1 ,arg2 ,arg3 ,arg4 ,arg5 ,arg6 ,arg7 ,arg8] at h_call50 },
rw [←htv_range_check_ptr₁, hl_range_check_ptr₁, hin_range_check_ptr] at h_call50,
clear arg0 arg1 arg2 arg3 arg4 arg5 arg6 arg7 arg8,
-- finish
step_done, use_only [rfl, rfl],
-- range check condition
use_only (rc_m35+rc_m50+0+0), split,
linarith [rc_mle35, rc_mle50],
split,
{ arith_simps,
rw [←htv_range_check_ptr₂, hl_range_check_ptr₂, hl_range_check_ptr₁, hin_range_check_ptr],
try { arith_simps, refl <|> norm_cast }, try { refl } },
intro rc_h_range_check_ptr, repeat { rw [add_assoc] at rc_h_range_check_ptr },
have rc_h_range_check_ptr' := range_checked_add_right rc_h_range_check_ptr,
-- Final Proof
-- user-provided reduction
suffices auto_spec: auto_spec_ec_mul_inner mem _ range_check_ptr point scalar m _ _ _,
{ apply sound_ec_mul_inner, apply auto_spec },
-- prove the auto generated assertion
dsimp [auto_spec_ec_mul_inner],
try { norm_num1 }, try { arith_simps },
right,
use_only [a0],
use_only [κ_call35],
use_only [range_check_ptr₁],
use_only [double_point],
have rc_h_range_check_ptr₁ := range_checked_offset' rc_h_range_check_ptr,
have rc_h_range_check_ptr₁' := range_checked_add_right rc_h_range_check_ptr₁, try { norm_cast at rc_h_range_check_ptr₁' },
have spec35 := h_call35 rc_h_range_check_ptr',
rw [←hin_range_check_ptr, ←htv_range_check_ptr₁] at spec35,
try { dsimp at spec35, arith_simps at spec35 },
use_only [spec35],
use_only (mem regstate35.ap),
left,
use_only [a35],
use_only [κ_call50],
have rc_h_range_check_ptr₂ := range_checked_offset' rc_h_range_check_ptr₁,
have rc_h_range_check_ptr₂' := range_checked_add_right rc_h_range_check_ptr₂, try { norm_cast at rc_h_range_check_ptr₂' },
have spec50 := h_call50 rc_h_range_check_ptr₁',
rw [←hin_range_check_ptr, ←hl_range_check_ptr₁] at spec50,
try { dsimp at spec50, arith_simps at spec50 },
use_only [spec50],
try { linarith },
},
{
-- jnz: negative branch
rw ←regstateapeq_a35 at a35,
-- recursive function call
step_assert_eq hpc51 hpc52 with arg0,
step_assert_eq hpc53 with arg1,
step_assert_eq hpc54 with arg2,
step_assert_eq hpc55 with arg3,
step_assert_eq hpc56 with arg4,
step_assert_eq hpc57 with arg5,
step_assert_eq hpc58 with arg6,
step_assert_eq hpc59 with arg7,
step_assert_eq hpc60 hpc61 with arg8,
step_assert_eq hpc62 hpc63 with arg9,
have h_δ51_c0 : ∀ x : F, x / (2 : ℤ) = x * (-1809251394333065606848661391547535052811553607665798349986546028067936010240 : ℤ),
{ intro x, apply div_eq_mul_inv', apply PRIME.int_cast_mul_eq_one, rw [PRIME], try { simp_int_casts }, norm_num1 },
have h_δ51_c0_fz : ∀ x : F, x / 2 = x / (2 : ℤ), { intro x, norm_cast },
step_rec_sub hpc64 (νih _ range_check_ptr₁ double_point ((scalar - 1) / (2 : ℤ)) (m - 1) _ _ _ _ _ _ _ _ _ _ _ _ _),
{ rw hpc65, norm_num, exact h_mem_rec },
{ rw hpc65, norm_num2, exact h_mem_4 },
{ rw hpc65, norm_num2, exact h_mem_5 },
{ rw hpc65, norm_num2, exact h_mem_6 },
{ rw hpc65, norm_num2, exact h_mem_7 },
{ rw hpc65, norm_num2, exact h_mem_12 },
{ rw hpc65, norm_num2, exact h_mem_13 },
{ rw hpc65, norm_num2, exact h_mem_14 },
{ rw hpc65, norm_num2, exact h_mem_15 },
{ try { simp only [h_δ51_c0_fz, h_δ51_c0] }, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, },
{ try { simp only [h_δ51_c0_fz, h_δ51_c0] }, try { ext } ; {
try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },}, },
{ try { simp only [h_δ51_c0_fz, h_δ51_c0] }, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, },
{ try { simp only [h_δ51_c0_fz, h_δ51_c0] }, try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, },
intros κ_call66 ap66 h_call66,
rcases h_call66 with ⟨rc_m66, rc_mle66, hl_range_check_ptr₂, h_call66⟩,
generalize' hr_rev_range_check_ptr₂: mem (ap66 - 13) = range_check_ptr₂,
have htv_range_check_ptr₂ := hr_rev_range_check_ptr₂.symm, clear hr_rev_range_check_ptr₂,
generalize' hr_rev_inner_pow2: cast_EcPoint mem (ap66 - 12) = inner_pow2,
simp only [hr_rev_inner_pow2] at h_call66,
have htv_inner_pow2 := hr_rev_inner_pow2.symm, clear hr_rev_inner_pow2,
generalize' hr_rev_inner_res: cast_EcPoint mem (ap66 - 6) = inner_res,
simp only [hr_rev_inner_res] at h_call66,
have htv_inner_res := hr_rev_inner_res.symm, clear hr_rev_inner_res,
try { simp only [arg0 ,arg1 ,arg2 ,arg3 ,arg4 ,arg5 ,arg6 ,arg7 ,arg8 ,arg9] at hl_range_check_ptr₂ },
rw [←htv_range_check_ptr₂, ←htv_range_check_ptr₁] at hl_range_check_ptr₂,
try { simp only [arg0 ,arg1 ,arg2 ,arg3 ,arg4 ,arg5 ,arg6 ,arg7 ,arg8 ,arg9] at h_call66 },
rw [←htv_range_check_ptr₁, hl_range_check_ptr₁, hin_range_check_ptr] at h_call66,
clear arg0 arg1 arg2 arg3 arg4 arg5 arg6 arg7 arg8 arg9,
-- local var
step_assert_eq hpc66 with temp0,
step_assert_eq hpc67 with temp1,
step_assert_eq hpc68 with temp2,
step_assert_eq hpc69 with temp3,
step_assert_eq hpc70 with temp4,
step_assert_eq hpc71 with temp5,
have lc_inner_pow2: inner_pow2 = cast_EcPoint mem σ.fp, {
try { ext } ; {
try { simp only [htv_inner_pow2] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [temp0, temp1, temp2, temp3, temp4, temp5] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },}, },
clear temp0 temp1 temp2 temp3 temp4 temp5,
-- function call
step_assert_eq hpc72 with arg0,
step_assert_eq hpc73 with arg1,
step_assert_eq hpc74 with arg2,
step_assert_eq hpc75 with arg3,
step_assert_eq hpc76 with arg4,
step_assert_eq hpc77 with arg5,
step_assert_eq hpc78 with arg6,
step_assert_eq hpc79 with arg7,
step_assert_eq hpc80 with arg8,
step_assert_eq hpc81 with arg9,
step_assert_eq hpc82 with arg10,
step_assert_eq hpc83 with arg11,
step_assert_eq hpc84 with arg12,
step_sub hpc85 (auto_sound_fast_ec_add mem _ range_check_ptr₂ point inner_res _ _ _ _ _ _ _ _ _),
{ rw hpc86, norm_num2, exact h_mem_15 },
{ rw hpc86, norm_num2, exact h_mem_4 },
{ rw hpc86, norm_num2, exact h_mem_5 },
{ rw hpc86, norm_num2, exact h_mem_6 },
{ rw hpc86, norm_num2, exact h_mem_7 },
{ rw hpc86, norm_num2, exact h_mem_13 },
{ try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point, htv_range_check_ptr₂, htv_inner_pow2, htv_inner_res, lc_inner_pow2] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } }, },
{ try { ext } ; {
try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point, htv_range_check_ptr₂, htv_inner_pow2, htv_inner_res, lc_inner_pow2] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },}, },
{ try { ext } ; {
try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point, htv_range_check_ptr₂, htv_inner_pow2, htv_inner_res, lc_inner_pow2] },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },}, },
intros κ_call87 ap87 h_call87,
rcases h_call87 with ⟨rc_m87, rc_mle87, hl_range_check_ptr₃, h_call87⟩,
generalize' hr_rev_range_check_ptr₃: mem (ap87 - 7) = range_check_ptr₃,
have htv_range_check_ptr₃ := hr_rev_range_check_ptr₃.symm, clear hr_rev_range_check_ptr₃,
generalize' hr_rev_res: cast_EcPoint mem (ap87 - 6) = res,
simp only [hr_rev_res] at h_call87,
have htv_res := hr_rev_res.symm, clear hr_rev_res,
try { simp only [arg0 ,arg1 ,arg2 ,arg3 ,arg4 ,arg5 ,arg6 ,arg7 ,arg8 ,arg9 ,arg10 ,arg11 ,arg12] at hl_range_check_ptr₃ },
rw [←htv_range_check_ptr₃, ←htv_range_check_ptr₂] at hl_range_check_ptr₃,
try { simp only [arg0 ,arg1 ,arg2 ,arg3 ,arg4 ,arg5 ,arg6 ,arg7 ,arg8 ,arg9 ,arg10 ,arg11 ,arg12] at h_call87 },
rw [←htv_range_check_ptr₂, hl_range_check_ptr₂, hl_range_check_ptr₁, hin_range_check_ptr] at h_call87,
clear arg0 arg1 arg2 arg3 arg4 arg5 arg6 arg7 arg8 arg9 arg10 arg11 arg12,
-- return
step_assert_eq hpc87 with hret0,
step_assert_eq hpc88 with hret1,
step_assert_eq hpc89 with hret2,
step_assert_eq hpc90 with hret3,
step_assert_eq hpc91 with hret4,
step_assert_eq hpc92 with hret5,
step_assert_eq hpc93 with hret6,
step_assert_eq hpc94 with hret7,
step_assert_eq hpc95 with hret8,
step_assert_eq hpc96 with hret9,
step_assert_eq hpc97 with hret10,
step_assert_eq hpc98 with hret11,
step_assert_eq hpc99 with hret12,
step_ret hpc100,
-- finish
step_done, use_only [rfl, rfl],
-- range check condition
use_only (rc_m35+rc_m66+rc_m87+0+0), split,
linarith [rc_mle35, rc_mle66, rc_mle87],
split,
{ arith_simps, try { simp only [hret0 ,hret1 ,hret2 ,hret3 ,hret4 ,hret5 ,hret6 ,hret7 ,hret8 ,hret9 ,hret10 ,hret11 ,hret12] },
rw [←htv_range_check_ptr₃, hl_range_check_ptr₃, hl_range_check_ptr₂, hl_range_check_ptr₁, hin_range_check_ptr],
try { arith_simps, refl <|> norm_cast }, try { refl } },
intro rc_h_range_check_ptr, repeat { rw [add_assoc] at rc_h_range_check_ptr },
have rc_h_range_check_ptr' := range_checked_add_right rc_h_range_check_ptr,
-- Final Proof
-- user-provided reduction
suffices auto_spec: auto_spec_ec_mul_inner mem _ range_check_ptr point scalar m _ _ _,
{ apply sound_ec_mul_inner, apply auto_spec },
-- prove the auto generated assertion
dsimp [auto_spec_ec_mul_inner],
try { norm_num1 }, try { arith_simps },
right,
use_only [a0],
use_only [κ_call35],
use_only [range_check_ptr₁],
use_only [double_point],
have rc_h_range_check_ptr₁ := range_checked_offset' rc_h_range_check_ptr,
have rc_h_range_check_ptr₁' := range_checked_add_right rc_h_range_check_ptr₁, try { norm_cast at rc_h_range_check_ptr₁' },
have spec35 := h_call35 rc_h_range_check_ptr',
rw [←hin_range_check_ptr, ←htv_range_check_ptr₁] at spec35,
try { dsimp at spec35, arith_simps at spec35 },
use_only [spec35],
use_only (mem regstate35.ap),
right,
use_only [a35],
use_only [κ_call66],
use_only [range_check_ptr₂],
use_only [inner_pow2],
use_only [inner_res],
have rc_h_range_check_ptr₂ := range_checked_offset' rc_h_range_check_ptr₁,
have rc_h_range_check_ptr₂' := range_checked_add_right rc_h_range_check_ptr₂, try { norm_cast at rc_h_range_check_ptr₂' },
have spec66 := h_call66 rc_h_range_check_ptr₁',
rw [←hin_range_check_ptr, ←hl_range_check_ptr₁, ←htv_range_check_ptr₂] at spec66,
try { dsimp at spec66, arith_simps at spec66 },
use_only [spec66],
use_only [κ_call87],
use_only [range_check_ptr₃],
use_only [res],
have rc_h_range_check_ptr₃ := range_checked_offset' rc_h_range_check_ptr₂,
have rc_h_range_check_ptr₃' := range_checked_add_right rc_h_range_check_ptr₃, try { norm_cast at rc_h_range_check_ptr₃' },
have spec87 := h_call87 rc_h_range_check_ptr₂',
rw [←hin_range_check_ptr, ←hl_range_check_ptr₁, ←hl_range_check_ptr₂, ←htv_range_check_ptr₃] at spec87,
try { dsimp at spec87, arith_simps at spec87 },
use_only [spec87],
try { split, linarith },
try { ensures_simps; try { simp only [add_neg_eq_sub, hin_range_check_ptr, hin_point, hin_scalar, hin_m, htv_range_check_ptr₁, htv_double_point, htv_range_check_ptr₂, htv_inner_pow2, htv_inner_res, lc_inner_pow2, htv_range_check_ptr₃, htv_res] }, },
try { dsimp [cast_EcPoint, cast_BigInt3] },
try { arith_simps }, try { simp only [hret0, hret1, hret2, hret3, hret4, hret5, hret6, hret7, hret8, hret9, hret10, hret11, hret12] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },
}
}
end
|
b00dfa3133983b77234f0e87f243deda6e0357a7
|
a721fe7446524f18ba361625fc01033d9c8b7a78
|
/src/principia/mylist/sorting.lean
|
0541f993979568aa8eac14d60e757643c8a1d879
|
[] |
no_license
|
Sterrs/leaning
|
8fd80d1f0a6117a220bb2e57ece639b9a63deadc
|
3901cc953694b33adda86cb88ca30ba99594db31
|
refs/heads/master
| 1,627,023,822,744
| 1,616,515,221,000
| 1,616,515,221,000
| 245,512,190
| 2
| 0
| null | 1,616,429,050,000
| 1,583,527,118,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 18,585
|
lean
|
import .mylist
import ..mynat.nat_sub
-- Sorting algorithms and permutations on lists of naturals
-- can probably be generalised to some type class thing
-- TODO: encode/state criterion for a sorting algorithm to
-- be stable, and prove it
-- more algorithms, obviously
namespace hidden
namespace mylist
open mynat
universe u
variable {T: Sort u}
variables {a b c d m n k x y z: mynat}
variables {lst lst1 lst2 lst3 xs ys zs: mylist mynat}
variables {alg: mylist mynat → mylist mynat}
def is_sorted (lst: mylist mynat): Prop :=
∀ a b: mynat,
∀ hbl: b < len lst,
∀ hab: a < b,
get a lst (lt_trans hab hbl) ≤
get b lst hbl
def count [decidable_eq T]: T → mylist T → mynat
| _ empty := 0
| x (y :: ys) := if x = y then
succ (count x ys) else
count x ys
-- this seems more straightforward than using bijections or something.
-- correct me if I'm wrong
def is_perm (lst1 lst2: mylist mynat) :=
∀ m: mynat,
count m lst1 = count m lst2
def sort_alg_correct (alg: mylist mynat → mylist mynat) :=
∀ lst: mylist mynat,
is_sorted (alg lst) ∧ is_perm lst (alg lst)
private theorem succ_lt_impl_lt: succ a < n → a < n :=
@lt_cancel_strong a n 1
theorem adjacent_sorted_implies_sorted:
(∀ a,
∀ hsal: succ a < len lst,
get a lst (succ_lt_impl_lt hsal) ≤
get (succ a) lst hsal)
→ is_sorted lst :=
begin
assume hadj,
intros a b hbl hab,
rw lt_iff_succ_le at hab,
cases hab with d hd,
induction d with d d_ih generalizing b, {
conv in b {rw hd},
simp,
apply hadj,
}, {
conv in b {rw hd},
conv in (succ a + succ d) {rw add_succ},
have := d_ih _ _ _ rfl,
apply le_trans this (hadj _ _),
rw lt_iff_succ_le,
from le_to_add,
},
end
theorem empty_sorted: is_sorted empty :=
begin
intros a b,
assume hbl hab,
exfalso,
from lt_nzero hbl,
end
theorem singleton_sorted: is_sorted (singleton x) :=
begin
intros a b,
assume hbl hab,
exfalso,
change b < 1 at hbl,
rw [←one_eq_succ_zero, ←le_iff_lt_succ] at hbl,
rw le_zero hbl at hab,
from lt_nzero hab,
end
theorem cons_sorted:
x ≤ y → is_sorted (y :: ys) → is_sorted (x :: y :: ys) :=
begin
assume hxy hstd_yys,
apply adjacent_sorted_implies_sorted,
intro m,
assume hsal,
cases m, {
from hxy,
}, {
have hsm := lt_succ_cancel hsal,
have hmsm := @lt_to_add_succ m 0,
have := hstd_yys m (succ m) hsm hmsm,
from this,
},
end
theorem duo_sorted: x ≤ y → is_sorted (x :: y :: empty) :=
(λ h, cons_sorted h singleton_sorted)
theorem tail_sorted: is_sorted (x :: xs) → is_sorted xs :=
begin
assume hstd_xxs,
intros a b,
assume hbl hab,
have := hstd_xxs (succ a) (succ b)
(@lt_add _ _ 1 hbl)
(@lt_add _ _ 1 hab),
from this, -- too lazy to change indentation
end
@[refl]
theorem perm_refl: is_perm lst lst := (λ _, rfl)
theorem perm_symm:
is_perm lst1 lst2 → is_perm lst2 lst1 :=
(λ hp12 m, (hp12 m).symm)
theorem cons_perm:
is_perm xs ys → is_perm (x :: xs) (x :: ys) :=
begin
assume hpxsys,
intro m,
dsimp [count],
rw hpxsys m,
end
theorem perm_trans:
is_perm lst1 lst2 → is_perm lst2 lst3 → is_perm lst1 lst3 :=
begin
assume hp12 hp23,
intro m,
from eq.trans (hp12 m) (hp23 m),
end
theorem count_empty: count x empty = 0 := rfl
theorem empty_perm_is_empty:
is_perm lst empty → lst = empty :=
begin
assume hple,
cases lst, {
refl,
}, {
exfalso,
have := hple lst_head,
dsimp [count] at this,
rw if_pos rfl at this,
from succ_ne_zero this,
},
end
theorem count_concat:
count m (lst1 ++ lst2)
= count m lst1 + count m lst2 :=
begin
induction lst1 with x xs h_ih, {
rw [count_empty, zero_add, empty_concat],
}, {
dsimp [count],
by_cases (m = x), {
repeat {rw if_pos h},
rw [h_ih, succ_add],
}, {
repeat {rw if_neg h},
from h_ih,
},
},
end
theorem perm_concat:
is_perm xs ys → is_perm lst1 lst2
→ is_perm (xs ++ lst1) (ys ++ lst2) :=
begin
assume hpxsys hp12,
intro m,
rw [count_concat, count_concat, hpxsys m, hp12 m],
end
theorem duo_perm:
is_perm (x :: y :: empty) (y :: x :: empty) :=
begin
intro m,
dsimp [count],
by_cases hmx: m = x, {
repeat {rw if_pos hmx},
by_cases hmy:m = y, {
repeat {rw if_pos hmy},
}, {
repeat {rw if_neg hmy},
refl,
},
}, {
repeat {rw if_neg hmx},
by_cases hmy: m = y, {
repeat {rw if_pos hmy},
refl,
}, {
repeat {rw if_neg hmy},
},
},
end
-- insertion sort
def insert_aux: mynat → mylist mynat → mylist mynat
| x empty := singleton x
| x (y :: ys) := if x ≤ y then
x :: y :: ys else
y :: insert_aux x ys
def insertion_sort: mylist mynat → mylist mynat
| empty := empty
| (x :: xs) := insert_aux x (insertion_sort xs)
-- ew
theorem insertion_preserves_sorted:
is_sorted lst → is_sorted (insert_aux x lst) :=
begin
induction lst with y ys h_ih, {
assume _,
from singleton_sorted,
}, {
assume h,
dsimp [insert_aux],
by_cases hxy: x ≤ y, {
rw if_pos hxy,
from cons_sorted hxy h,
}, {
rw if_neg hxy,
cases hz: insert_aux x ys with z zs, {
from singleton_sorted,
}, {
apply cons_sorted, {
cases ys with y' ys, {
have: x = z, {
dsimp [insert_aux] at hz,
from cons_injective_1 hz,
},
rw ←this,
from lt_impl_le hxy,
}, {
dsimp [insert_aux] at hz,
by_cases hxy': x ≤ y', {
rw if_pos hxy' at hz,
rw ←(cons_injective_1 hz),
from lt_impl_le hxy,
}, {
rw if_neg hxy' at hz,
rw ←(cons_injective_1 hz),
apply h 0 1 _ _, {
dsimp [len],
apply @lt_add _ _ 1,
from zero_lt_succ,
}, {
from zero_lt_succ,
},
},
},
}, {
rw ←hz,
apply h_ih,
from tail_sorted h,
},
},
},
},
end
theorem insertion_sort_is_sorted:
is_sorted (insertion_sort lst) :=
begin
induction lst with head tail h_ih, {
dsimp [insertion_sort],
from empty_sorted,
}, {
dsimp [insertion_sort],
from insertion_preserves_sorted h_ih,
},
end
theorem insertion_is_perm:
is_perm (x :: xs) (insert_aux x xs) :=
begin
induction xs with x' xs h_ih, {
refl,
}, {
dsimp [insert_aux],
by_cases hxx': x ≤ x', {
rw if_pos hxx',
}, {
rw if_neg hxx',
apply perm_trans _ (cons_perm h_ih),
from perm_concat duo_perm perm_refl,
},
},
end
theorem insertion_sort_is_perm:
is_perm lst (insertion_sort lst) :=
begin
induction lst with head tail h_ih, {
refl,
}, {
dsimp [insertion_sort],
have := @cons_perm head _ _ h_ih,
from perm_trans this insertion_is_perm,
},
end
theorem insertion_sort_correct:
sort_alg_correct insertion_sort :=
(λ lst,
and.intro
insertion_sort_is_sorted insertion_sort_is_perm)
-- more theory about permutations/sorted lists
theorem perm_concat_swap:
is_perm (xs ++ ys) (ys ++ xs) :=
begin
intro m,
rw count_concat,
rw count_concat,
rw add_comm,
end
theorem rev_perm: is_perm lst (rev lst) :=
begin
induction lst with head tail h_ih, {
refl,
}, {
from perm_trans
(cons_perm h_ih)
(@perm_concat_swap (singleton head) (rev tail)),
},
end
-- is switching away from recursive definitions a good idea? who knows
def slice:
Π m n: mynat,
Π lst: mylist T,
m ≤ n →
n ≤ len lst → mylist T :=
(λ m n lst hmn hnl,
-- really I just want to use the witness to m ≤ n here
take (n - m)
(drop m lst (le_trans hmn hnl))
begin
cases hmn with d hd,
conv in n {rw hd},
rw add_comm,
rw add_sub,
apply @le_cancel _ _ m,
rw len_drop,
rw add_comm,
rw ←hd,
from hnl,
end)
-- swap elements at two indices (m and n resp.) in a list.
-- Cannot be the same index.
-- Require one index less than the other for convenience.
-- maybe define wrapper function?? but makes proofs harder
def swap_elems:
Π (m n: mynat) (lst: mylist T), m < n → n < len lst → mylist T
:= (λ m n lst hmn hnl,
take m lst (lt_impl_le (lt_trans hmn hnl))
++ singleton (get n lst hnl)
++ slice (succ m) n lst (lt_iff_succ_le.mp hmn)
(lt_impl_le hnl)
++ singleton (get m lst (lt_trans hmn hnl))
++ drop (succ n) lst (lt_iff_succ_le.mp hnl))
theorem len_slice
(hmn: m ≤ n)
(hnl: n ≤ len lst):
len (slice m n lst hmn hnl) + m = n :=
begin
unfold slice,
rw len_take,
cases hmn with d hd,
rw hd,
rw add_comm m d,
rw add_sub,
end
theorem len_swap
(hmn: m < n)
(hnl: n < len lst):
len (swap_elems m n lst hmn hnl) = len lst :=
begin
unfold swap_elems,
repeat {rw len_concat_add},
rw len_take,
rw len_singleton,
rw len_singleton,
conv {
congr,
congr,
congr,
rw add_one_succ,
rw add_comm,
rw len_slice,
},
rw add_one_succ,
rw add_comm,
rw len_drop,
end
-- TODO: all this hypothesis-slinging is getting a bit ugly
-- good god working with equivalence relations is a pain
theorem swap_perm
(hmn: m < n)
(hnl: n < len lst):
is_perm lst (swap_elems m n lst hmn hnl) :=
begin
have hml := lt_trans hmn hnl,
have hml_ns := lt_impl_le hml,
have hnl_ns := lt_impl_le hnl,
unfold swap_elems,
conv {
congr,
rw ←take_concat_drop hml_ns,
},
repeat {rw concat_assoc},
apply perm_concat perm_refl,
have hdmlne: drop m lst hml_ns ≠ empty, {
assume h,
have h2: len lst = m, {
rw ←@len_drop _ _ m,
rw h,
from zero_add m,
},
rw h2 at hml,
from lt_nrefl hml,
},
have hrw := @cons_head_tail _ (drop m lst hml_ns) hdmlne,
have hrw2 := get_head_drop hml,
conv {
congr,
rw ←hrw,
rw ←hrw2,
}, clear hrw hrw2,
apply perm_trans _ perm_concat_swap,
apply
@perm_trans _
(tail (drop m lst hml_ns) hdmlne ++ singleton (get m lst hml))
_ perm_concat_swap,
unfold slice,
rw ←@drop_one_tail _ (drop m lst hml_ns)
begin
cases drop m lst hml_ns, {
contradiction,
}, {
apply succ_le_succ,
from zero_le,
},
end,
rw @drop_drop _ _ m 1,
conv in (m + 1) {rw add_one_succ},
have hdl:
∀ hsml: succ m ≤ len lst,
n - succ m ≤ len (drop (succ m) lst hsml), {
assume _,
cases (lt_iff_succ_le.mp hmn) with k hk,
rw hk,
rw add_comm,
rw add_sub,
apply @le_cancel _ _ (succ m),
rw len_drop,
rw add_comm,
rw ←hk,
from lt_impl_le hnl,
},
conv {
congr,
congr,
rw ←@take_concat_drop _ (drop (succ m) lst _) (n - succ m) (hdl _),
},
rw concat_assoc,
rw concat_assoc,
apply perm_concat perm_refl,
have hrw:
succ m + (n - succ m) = n, {
cases (lt_iff_succ_le.mp hmn) with k hk,
rw hk,
rw add_comm _ k,
rw add_sub,
rw add_comm,
},
rw @drop_drop _ lst (succ m) (n - succ m),
conv {
congr,
congr,
congr,
rw hrw,
},
have hdnlne: drop n lst hnl_ns ≠ empty, {
assume h,
have h2: len lst = n, {
rw ←@len_drop _ _ n,
rw h,
from zero_add n,
},
rw h2 at hnl,
from lt_nrefl hnl,
},
have hrw2 := @cons_head_tail _ (drop n lst hnl_ns) hdnlne,
have hrw3 := @get_head_drop _ lst n hnl,
conv {
congr,
congr,
rw ←hrw2,
rw ←hrw3,
},
apply perm_trans perm_concat_swap,
rw concat_assoc,
apply perm_concat perm_refl,
apply perm_trans _ perm_concat_swap,
apply @perm_concat _ _ (singleton (get n lst hnl)) _ perm_refl,
rw ←@drop_one_tail _ (drop n lst hnl_ns)
begin
cases hl: drop n lst hnl_ns, {
rw hl at hdnlne,
contradiction,
}, {
apply succ_le_succ,
from zero_le,
},
end,
rw @drop_drop _ _ n 1 _ _,
refl,
end
-- TODO: swap involutive, get_swap
theorem count_head_eq:
count x (x :: xs) = succ (count x xs) :=
begin
unfold count,
rw if_pos rfl,
end
-- again should be typeclass
-- also this kind of signals how useless ∈ was
def index:
Π (n: mynat) (lst: mylist mynat), count n lst ≠ 0 → mynat
| n empty h := absurd count_empty h
| n (x :: xs) h := if hnx: n = x then
0 else
succ (index n xs
begin
unfold count at h,
rw if_neg hnx at h,
from h,
end)
theorem index_head_eq
(hc: count x (x :: xs) ≠ 0):
index x (x :: xs) hc = 0 := dif_pos rfl
theorem index_valid
(hc: count n lst ≠ 0):
index n lst hc < len lst :=
begin
induction lst with head tail h_ih, {
contradiction,
}, {
unfold index,
by_cases hnh: n = head, {
rw dif_pos hnh,
from zero_lt_succ,
}, {
rw dif_neg hnh,
from succ_lt_succ (h_ih _),
},
},
end
@[simp]
theorem get_index
(hc: count n lst ≠ 0)
(hil: index n lst hc < len lst):
get (index n lst hc) lst hil = n :=
begin
induction lst with head tail h_ih, {
contradiction,
}, {
unfold index,
by_cases hnh: n = head, {
conv {
congr,
congr,
rw dif_pos hnh,
},
rw get_zero_cons,
from hnh.symm,
}, {
conv {
congr,
congr,
rw dif_neg hnh,
},
rw get_succ_cons,
apply h_ih,
},
},
end
theorem perm_head_cancel:
is_perm (x :: xs) (x :: ys) → is_perm xs ys :=
begin
assume hpxsys,
intro m,
have := hpxsys m,
unfold count at this,
by_cases hmx: m = x, {
repeat {rw if_pos hmx at this},
from succ_inj this,
}, {
repeat {rw if_neg hmx at this},
from this,
},
end
theorem perm_concat_cancel:
is_perm (xs ++ lst1) (xs ++ lst2) → is_perm lst1 lst2 :=
begin
induction xs with x xs h_ih, {
assume h,
from h,
}, {
assume h,
apply h_ih,
from perm_head_cancel h,
},
end
theorem perm_len:
is_perm lst1 lst2 → len lst1 = len lst2 :=
begin
induction lst1 with x xs h_ih generalizing lst2, {
assume h,
rw empty_perm_is_empty (perm_symm h),
}, {
assume h,
cases lst2 with y ys, {
cases empty_perm_is_empty h,
}, {
by_cases hxy: x = y, {
rw hxy at h,
-- can't work out direct way to "cancel" succs in goal
simp,
apply h_ih,
from perm_head_cancel h,
}, {
have h_aux_1: count x (y :: ys) ≠ 0, {
rw ←h x,
rw count_head_eq,
from succ_ne_zero,
},
have h_aux_2: 0 < index x (y :: ys) h_aux_1, {
unfold index,
rw dif_neg hxy,
from zero_lt_succ,
},
-- world record for biggest "have" type-to-value ratio
-- thank your maker that I split away some of the
-- auxiliary hypotheses
-- also I had to in order to do "cases" with hypothesis later,
-- so couldn't afford to have any _s lying around
have hpswap:
is_perm
(y :: ys)
(swap_elems
0
(index x (y :: ys) h_aux_1)
(y :: ys)
h_aux_2
(index_valid h_aux_1)) := swap_perm _ _,
have hxswap := perm_trans h hpswap,
clear hpswap,
cases
hswap: swap_elems 0 (index x (y :: ys) h_aux_1) (y :: ys) h_aux_2 (index_valid h_aux_1)
with swap_head swap_tail, {
rw hswap at hxswap,
cases (empty_perm_is_empty hxswap),
}, {
rw hswap at hxswap,
have hxsh: swap_head = x, {
unfold swap_elems at hswap,
symmetry,
rw [take_zero, empty_concat, get_index] at hswap,
from cons_injective_1 hswap,
},
rw hxsh at hxswap, clear hxsh,
have := len_of_refl hswap,
rw len_swap at this,
rw this,
simp,
apply h_ih,
from perm_head_cancel hxswap,
},
},
},
},
end
theorem is_perm_sorted_eq:
is_perm lst1 lst2 → is_sorted lst1 → is_sorted lst2
→ lst1 = lst2 :=
begin
assume hp12 hs1 hs2,
have hl12 := perm_len hp12,
induction hl: len lst1 with n hn generalizing lst1 lst2, {
rw [zz, ←empty_iff_len_zero] at hl,
rw hl,
rw hl at hp12,
symmetry,
from empty_perm_is_empty (perm_symm hp12),
}, {
cases lst1 with x xs, {
contradiction,
}, {
cases lst2 with y ys, {
contradiction,
}, {
have hxy: x = y, {
-- note this is not use of classical reasoning, since
-- we know = is decidable
by_contradiction hxy',
-- note sure if it would be worth it to use wlog or not
-- is_sorted (y :: xs) seems like a pain to prove
by_cases hxy'': x ≤ y, {
have hxly := lt_iff_le_and_neq.mpr ⟨hxy'', hxy'⟩,
have h :=
hs2 0 (index x (y :: ys)
begin
rw ←hp12 x,
rw count_head_eq,
from succ_ne_zero,
end)
(index_valid _)
begin
unfold index,
rw dif_neg hxy',
from zero_lt_succ,
end,
rw get_zero_cons at h,
rw get_index at h,
contradiction,
}, {
have h :=
hs1 0 (index y (x :: xs)
begin
rw hp12 y,
rw count_head_eq,
from succ_ne_zero,
end)
(index_valid _)
begin
unfold index,
have: ¬y = x, {
assume h,
from hxy' h.symm,
},
rw dif_neg this,
from zero_lt_succ,
end,
rw get_zero_cons at h,
rw get_index at h,
contradiction,
},
},
rw hxy,
simp,
apply hn _ (tail_sorted hs1) (tail_sorted hs2), {
simp at hl12,
assumption,
}, {
simp at hl,
assumption,
}, {
rw hxy at hp12,
from perm_head_cancel hp12,
},
},
},
},
end
end mylist
end hidden
|
bf6795808ac5d4038f05b2baa502422be74a6eb0
|
bb31430994044506fa42fd667e2d556327e18dfe
|
/src/data/finset/n_ary.lean
|
2ce67aac5c1c99e14748cffb7a864347ffe187c2
|
[
"Apache-2.0"
] |
permissive
|
sgouezel/mathlib
|
0cb4e5335a2ba189fa7af96d83a377f83270e503
|
00638177efd1b2534fc5269363ebf42a7871df9a
|
refs/heads/master
| 1,674,527,483,042
| 1,673,665,568,000
| 1,673,665,568,000
| 119,598,202
| 0
| 0
| null | 1,517,348,647,000
| 1,517,348,646,000
| null |
UTF-8
|
Lean
| false
| false
| 16,572
|
lean
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import data.finset.prod
/-!
# N-ary images of finsets
This file defines `finset.image₂`, the binary image of finsets. This is the finset version of
`set.image2`. This is mostly useful to define pointwise operations.
## Notes
This file is very similar to `data.set.n_ary`, `order.filter.n_ary` and `data.option.n_ary`. Please
keep them in sync.
We do not define `finset.image₃` as its only purpose would be to prove properties of `finset.image₂`
and `set.image2` already fulfills this task.
-/
open function set
namespace finset
variables {α α' β β' γ γ' δ δ' ε ε' : Type*}
[decidable_eq α'] [decidable_eq β'] [decidable_eq γ] [decidable_eq γ'] [decidable_eq δ]
[decidable_eq δ'] [decidable_eq ε] [decidable_eq ε']
{f f' : α → β → γ} {g g' : α → β → γ → δ} {s s' : finset α} {t t' : finset β} {u u' : finset γ}
{a a' : α} {b b' : β} {c : γ}
/-- The image of a binary function `f : α → β → γ` as a function `finset α → finset β → finset γ`.
Mathematically this should be thought of as the image of the corresponding function `α × β → γ`. -/
def image₂ (f : α → β → γ) (s : finset α) (t : finset β) : finset γ :=
(s ×ˢ t).image $ uncurry f
@[simp] lemma mem_image₂ : c ∈ image₂ f s t ↔ ∃ a b, a ∈ s ∧ b ∈ t ∧ f a b = c :=
by simp [image₂, and_assoc]
@[simp, norm_cast] lemma coe_image₂ (f : α → β → γ) (s : finset α) (t : finset β) :
(image₂ f s t : set γ) = set.image2 f s t :=
set.ext $ λ _, mem_image₂
lemma card_image₂_le (f : α → β → γ) (s : finset α) (t : finset β) :
(image₂ f s t).card ≤ s.card * t.card :=
card_image_le.trans_eq $ card_product _ _
lemma card_image₂_iff :
(image₂ f s t).card = s.card * t.card ↔ (s ×ˢ t : set (α × β)).inj_on (λ x, f x.1 x.2) :=
by { rw [←card_product, ←coe_product], exact card_image_iff }
lemma card_image₂ (hf : injective2 f) (s : finset α) (t : finset β) :
(image₂ f s t).card = s.card * t.card :=
(card_image_of_injective _ hf.uncurry).trans $ card_product _ _
lemma mem_image₂_of_mem (ha : a ∈ s) (hb : b ∈ t) : f a b ∈ image₂ f s t :=
mem_image₂.2 ⟨a, b, ha, hb, rfl⟩
lemma mem_image₂_iff (hf : injective2 f) : f a b ∈ image₂ f s t ↔ a ∈ s ∧ b ∈ t :=
by rw [←mem_coe, coe_image₂, mem_image2_iff hf, mem_coe, mem_coe]
lemma image₂_subset (hs : s ⊆ s') (ht : t ⊆ t') : image₂ f s t ⊆ image₂ f s' t' :=
by { rw [←coe_subset, coe_image₂, coe_image₂], exact image2_subset hs ht }
lemma image₂_subset_left (ht : t ⊆ t') : image₂ f s t ⊆ image₂ f s t' := image₂_subset subset.rfl ht
lemma image₂_subset_right (hs : s ⊆ s') : image₂ f s t ⊆ image₂ f s' t :=
image₂_subset hs subset.rfl
lemma image_subset_image₂_left (hb : b ∈ t) : (λ a, f a b) '' s ⊆ image₂ f s t :=
ball_image_of_ball $ λ a ha, mem_image₂_of_mem ha hb
lemma image_subset_image₂_right (ha : a ∈ s) : f a '' t ⊆ image₂ f s t :=
ball_image_of_ball $ λ b, mem_image₂_of_mem ha
lemma forall_image₂_iff {p : γ → Prop} : (∀ z ∈ image₂ f s t, p z) ↔ ∀ (x ∈ s) (y ∈ t), p (f x y) :=
by simp_rw [←mem_coe, coe_image₂, forall_image2_iff]
@[simp] lemma image₂_subset_iff : image₂ f s t ⊆ u ↔ ∀ (x ∈ s) (y ∈ t), f x y ∈ u :=
forall_image₂_iff
@[simp] lemma image₂_nonempty_iff : (image₂ f s t).nonempty ↔ s.nonempty ∧ t.nonempty :=
by { rw [←coe_nonempty, coe_image₂], exact image2_nonempty_iff }
lemma nonempty.image₂ (hs : s.nonempty) (ht : t.nonempty) : (image₂ f s t).nonempty :=
image₂_nonempty_iff.2 ⟨hs, ht⟩
lemma nonempty.of_image₂_left (h : (image₂ f s t).nonempty) : s.nonempty :=
(image₂_nonempty_iff.1 h).1
lemma nonempty.of_image₂_right (h : (image₂ f s t).nonempty) : t.nonempty :=
(image₂_nonempty_iff.1 h).2
@[simp] lemma image₂_empty_left : image₂ f ∅ t = ∅ := coe_injective $ by simp
@[simp] lemma image₂_empty_right : image₂ f s ∅ = ∅ := coe_injective $ by simp
@[simp] lemma image₂_eq_empty_iff : image₂ f s t = ∅ ↔ s = ∅ ∨ t = ∅ :=
by simp_rw [←not_nonempty_iff_eq_empty, image₂_nonempty_iff, not_and_distrib]
@[simp] lemma image₂_singleton_left : image₂ f {a} t = t.image (λ b, f a b) := ext $ λ x, by simp
@[simp] lemma image₂_singleton_right : image₂ f s {b} = s.image (λ a, f a b) := ext $ λ x, by simp
lemma image₂_singleton_left' : image₂ f {a} t = t.image (f a) := image₂_singleton_left
lemma image₂_singleton : image₂ f {a} {b} = {f a b} := by simp
lemma image₂_union_left [decidable_eq α] : image₂ f (s ∪ s') t = image₂ f s t ∪ image₂ f s' t :=
coe_injective $ by { push_cast, exact image2_union_left }
lemma image₂_union_right [decidable_eq β] : image₂ f s (t ∪ t') = image₂ f s t ∪ image₂ f s t' :=
coe_injective $ by { push_cast, exact image2_union_right }
lemma image₂_inter_left [decidable_eq α] (hf : injective2 f) :
image₂ f (s ∩ s') t = image₂ f s t ∩ image₂ f s' t :=
coe_injective $ by { push_cast, exact image2_inter_left hf }
lemma image₂_inter_right [decidable_eq β] (hf : injective2 f) :
image₂ f s (t ∩ t') = image₂ f s t ∩ image₂ f s t' :=
coe_injective $ by { push_cast, exact image2_inter_right hf }
lemma image₂_inter_subset_left [decidable_eq α] :
image₂ f (s ∩ s') t ⊆ image₂ f s t ∩ image₂ f s' t :=
coe_subset.1 $ by { push_cast, exact image2_inter_subset_left }
lemma image₂_inter_subset_right [decidable_eq β] :
image₂ f s (t ∩ t') ⊆ image₂ f s t ∩ image₂ f s t' :=
coe_subset.1 $ by { push_cast, exact image2_inter_subset_right }
lemma image₂_congr (h : ∀ (a ∈ s) (b ∈ t), f a b = f' a b) : image₂ f s t = image₂ f' s t :=
coe_injective $ by { push_cast, exact image2_congr h }
/-- A common special case of `image₂_congr` -/
lemma image₂_congr' (h : ∀ a b, f a b = f' a b) : image₂ f s t = image₂ f' s t :=
image₂_congr $ λ a _ b _, h a b
lemma subset_image₂ {s : set α} {t : set β} (hu : ↑u ⊆ image2 f s t) :
∃ (s' : finset α) (t' : finset β), ↑s' ⊆ s ∧ ↑t' ⊆ t ∧ u ⊆ image₂ f s' t' :=
begin
apply finset.induction_on' u,
{ exact ⟨∅, ∅, set.empty_subset _, set.empty_subset _, empty_subset _⟩ },
rintro a u ha _ _ ⟨s', t', hs, hs', h⟩,
obtain ⟨x, y, hx, hy, ha⟩ := hu ha,
haveI := classical.dec_eq α,
haveI := classical.dec_eq β,
refine ⟨insert x s', insert y t', _⟩,
simp_rw [coe_insert, set.insert_subset],
exact ⟨⟨hx, hs⟩, ⟨hy, hs'⟩, insert_subset.2 ⟨mem_image₂.2 ⟨x, y, mem_insert_self _ _,
mem_insert_self _ _, ha⟩, h.trans $ image₂_subset (subset_insert _ _) $ subset_insert _ _⟩⟩,
end
variables (s t)
lemma card_image₂_singleton_left (hf : injective (f a)) : (image₂ f {a} t).card = t.card :=
by rw [image₂_singleton_left, card_image_of_injective _ hf]
lemma card_image₂_singleton_right (hf : injective (λ a, f a b)) : (image₂ f s {b}).card = s.card :=
by rw [image₂_singleton_right, card_image_of_injective _ hf]
lemma image₂_singleton_inter [decidable_eq β] (t₁ t₂ : finset β) (hf : injective (f a)) :
image₂ f {a} (t₁ ∩ t₂) = image₂ f {a} t₁ ∩ image₂ f {a} t₂ :=
by simp_rw [image₂_singleton_left, image_inter _ _ hf]
lemma image₂_inter_singleton [decidable_eq α] (s₁ s₂ : finset α) (hf : injective (λ a, f a b)) :
image₂ f (s₁ ∩ s₂) {b} = image₂ f s₁ {b} ∩ image₂ f s₂ {b} :=
by simp_rw [image₂_singleton_right, image_inter _ _ hf]
lemma card_le_card_image₂_left {s : finset α} (hs : s.nonempty) (hf : ∀ a, injective (f a)) :
t.card ≤ (image₂ f s t).card :=
begin
obtain ⟨a, ha⟩ := hs,
rw ←card_image₂_singleton_left _ (hf a),
exact card_le_of_subset (image₂_subset_right $ singleton_subset_iff.2 ha),
end
lemma card_le_card_image₂_right {t : finset β} (ht : t.nonempty)
(hf : ∀ b, injective (λ a, f a b)) :
s.card ≤ (image₂ f s t).card :=
begin
obtain ⟨b, hb⟩ := ht,
rw ←card_image₂_singleton_right _ (hf b),
exact card_le_of_subset (image₂_subset_left $ singleton_subset_iff.2 hb),
end
variables {s t}
lemma bUnion_image_left : s.bUnion (λ a, t.image $ f a) = image₂ f s t :=
coe_injective $ by { push_cast, exact set.Union_image_left _ }
lemma bUnion_image_right : t.bUnion (λ b, s.image $ λ a, f a b) = image₂ f s t :=
coe_injective $ by { push_cast, exact set.Union_image_right _ }
/-!
### Algebraic replacement rules
A collection of lemmas to transfer associativity, commutativity, distributivity, ... of operations
to the associativity, commutativity, distributivity, ... of `finset.image₂` of those operations.
The proof pattern is `image₂_lemma operation_lemma`. For example, `image₂_comm mul_comm` proves that
`image₂ (*) f g = image₂ (*) g f` in a `comm_semigroup`.
-/
lemma image_image₂ (f : α → β → γ) (g : γ → δ) :
(image₂ f s t).image g = image₂ (λ a b, g (f a b)) s t :=
coe_injective $ by { push_cast, exact image_image2 _ _ }
lemma image₂_image_left (f : γ → β → δ) (g : α → γ) :
image₂ f (s.image g) t = image₂ (λ a b, f (g a) b) s t :=
coe_injective $ by { push_cast, exact image2_image_left _ _ }
lemma image₂_image_right (f : α → γ → δ) (g : β → γ) :
image₂ f s (t.image g) = image₂ (λ a b, f a (g b)) s t :=
coe_injective $ by { push_cast, exact image2_image_right _ _ }
lemma image₂_swap (f : α → β → γ) (s : finset α) (t : finset β) :
image₂ f s t = image₂ (λ a b, f b a) t s :=
coe_injective $ by { push_cast, exact image2_swap _ _ _ }
@[simp] lemma image₂_mk_eq_product [decidable_eq α] [decidable_eq β] (s : finset α) (t : finset β) :
image₂ prod.mk s t = s ×ˢ t :=
by ext; simp [prod.ext_iff]
@[simp] lemma image₂_curry (f : α × β → γ) (s : finset α) (t : finset β) :
image₂ (curry f) s t = (s ×ˢ t).image f :=
by { classical, rw [←image₂_mk_eq_product, image_image₂, curry] }
@[simp] lemma image_uncurry_product (f : α → β → γ) (s : finset α) (t : finset β) :
(s ×ˢ t).image (uncurry f) = image₂ f s t := by rw [←image₂_curry, curry_uncurry]
@[simp] lemma image₂_left [decidable_eq α] (h : t.nonempty) : image₂ (λ x y, x) s t = s :=
coe_injective $ by { push_cast, exact image2_left h }
@[simp] lemma image₂_right [decidable_eq β] (h : s.nonempty) : image₂ (λ x y, y) s t = t :=
coe_injective $ by { push_cast, exact image2_right h }
lemma image₂_assoc {γ : Type*} {u : finset γ} {f : δ → γ → ε} {g : α → β → δ} {f' : α → ε' → ε}
{g' : β → γ → ε'} (h_assoc : ∀ a b c, f (g a b) c = f' a (g' b c)) :
image₂ f (image₂ g s t) u = image₂ f' s (image₂ g' t u) :=
coe_injective $ by { push_cast, exact image2_assoc h_assoc }
lemma image₂_comm {g : β → α → γ} (h_comm : ∀ a b, f a b = g b a) : image₂ f s t = image₂ g t s :=
(image₂_swap _ _ _).trans $ by simp_rw h_comm
lemma image₂_left_comm {γ : Type*} {u : finset γ} {f : α → δ → ε} {g : β → γ → δ} {f' : α → γ → δ'}
{g' : β → δ' → ε} (h_left_comm : ∀ a b c, f a (g b c) = g' b (f' a c)) :
image₂ f s (image₂ g t u) = image₂ g' t (image₂ f' s u) :=
coe_injective $ by { push_cast, exact image2_left_comm h_left_comm }
lemma image₂_right_comm {γ : Type*} {u : finset γ} {f : δ → γ → ε} {g : α → β → δ} {f' : α → γ → δ'}
{g' : δ' → β → ε} (h_right_comm : ∀ a b c, f (g a b) c = g' (f' a c) b) :
image₂ f (image₂ g s t) u = image₂ g' (image₂ f' s u) t :=
coe_injective $ by { push_cast, exact image2_right_comm h_right_comm }
lemma image_image₂_distrib {g : γ → δ} {f' : α' → β' → δ} {g₁ : α → α'} {g₂ : β → β'}
(h_distrib : ∀ a b, g (f a b) = f' (g₁ a) (g₂ b)) :
(image₂ f s t).image g = image₂ f' (s.image g₁) (t.image g₂) :=
coe_injective $ by { push_cast, exact image_image2_distrib h_distrib }
/-- Symmetric statement to `finset.image₂_image_left_comm`. -/
lemma image_image₂_distrib_left {g : γ → δ} {f' : α' → β → δ} {g' : α → α'}
(h_distrib : ∀ a b, g (f a b) = f' (g' a) b) :
(image₂ f s t).image g = image₂ f' (s.image g') t :=
coe_injective $ by { push_cast, exact image_image2_distrib_left h_distrib }
/-- Symmetric statement to `finset.image_image₂_right_comm`. -/
lemma image_image₂_distrib_right {g : γ → δ} {f' : α → β' → δ} {g' : β → β'}
(h_distrib : ∀ a b, g (f a b) = f' a (g' b)) :
(image₂ f s t).image g = image₂ f' s (t.image g') :=
coe_injective $ by { push_cast, exact image_image2_distrib_right h_distrib }
/-- Symmetric statement to `finset.image_image₂_distrib_left`. -/
lemma image₂_image_left_comm {f : α' → β → γ} {g : α → α'} {f' : α → β → δ} {g' : δ → γ}
(h_left_comm : ∀ a b, f (g a) b = g' (f' a b)) :
image₂ f (s.image g) t = (image₂ f' s t).image g' :=
(image_image₂_distrib_left $ λ a b, (h_left_comm a b).symm).symm
/-- Symmetric statement to `finset.image_image₂_distrib_right`. -/
lemma image_image₂_right_comm {f : α → β' → γ} {g : β → β'} {f' : α → β → δ} {g' : δ → γ}
(h_right_comm : ∀ a b, f a (g b) = g' (f' a b)) :
image₂ f s (t.image g) = (image₂ f' s t).image g' :=
(image_image₂_distrib_right $ λ a b, (h_right_comm a b).symm).symm
/-- The other direction does not hold because of the `s`-`s` cross terms on the RHS. -/
lemma image₂_distrib_subset_left {γ : Type*} {u : finset γ} {f : α → δ → ε} {g : β → γ → δ}
{f₁ : α → β → β'} {f₂ : α → γ → γ'} {g' : β' → γ' → ε}
(h_distrib : ∀ a b c, f a (g b c) = g' (f₁ a b) (f₂ a c)) :
image₂ f s (image₂ g t u) ⊆ image₂ g' (image₂ f₁ s t) (image₂ f₂ s u) :=
coe_subset.1 $ by { push_cast, exact set.image2_distrib_subset_left h_distrib }
/-- The other direction does not hold because of the `u`-`u` cross terms on the RHS. -/
lemma image₂_distrib_subset_right {γ : Type*} {u : finset γ} {f : δ → γ → ε} {g : α → β → δ}
{f₁ : α → γ → α'} {f₂ : β → γ → β'} {g' : α' → β' → ε}
(h_distrib : ∀ a b c, f (g a b) c = g' (f₁ a c) (f₂ b c)) :
image₂ f (image₂ g s t) u ⊆ image₂ g' (image₂ f₁ s u) (image₂ f₂ t u) :=
coe_subset.1 $ by { push_cast, exact set.image2_distrib_subset_right h_distrib }
lemma image_image₂_antidistrib {g : γ → δ} {f' : β' → α' → δ} {g₁ : β → β'} {g₂ : α → α'}
(h_antidistrib : ∀ a b, g (f a b) = f' (g₁ b) (g₂ a)) :
(image₂ f s t).image g = image₂ f' (t.image g₁) (s.image g₂) :=
by { rw image₂_swap f, exact image_image₂_distrib (λ _ _, h_antidistrib _ _) }
/-- Symmetric statement to `finset.image₂_image_left_anticomm`. -/
lemma image_image₂_antidistrib_left {g : γ → δ} {f' : β' → α → δ} {g' : β → β'}
(h_antidistrib : ∀ a b, g (f a b) = f' (g' b) a) :
(image₂ f s t).image g = image₂ f' (t.image g') s :=
coe_injective $ by { push_cast, exact image_image2_antidistrib_left h_antidistrib }
/-- Symmetric statement to `finset.image_image₂_right_anticomm`. -/
lemma image_image₂_antidistrib_right {g : γ → δ} {f' : β → α' → δ} {g' : α → α'}
(h_antidistrib : ∀ a b, g (f a b) = f' b (g' a)) :
(image₂ f s t).image g = image₂ f' t (s.image g') :=
coe_injective $ by { push_cast, exact image_image2_antidistrib_right h_antidistrib }
/-- Symmetric statement to `finset.image_image₂_antidistrib_left`. -/
lemma image₂_image_left_anticomm {f : α' → β → γ} {g : α → α'} {f' : β → α → δ} {g' : δ → γ}
(h_left_anticomm : ∀ a b, f (g a) b = g' (f' b a)) :
image₂ f (s.image g) t = (image₂ f' t s).image g' :=
(image_image₂_antidistrib_left $ λ a b, (h_left_anticomm b a).symm).symm
/-- Symmetric statement to `finset.image_image₂_antidistrib_right`. -/
lemma image_image₂_right_anticomm {f : α → β' → γ} {g : β → β'} {f' : β → α → δ} {g' : δ → γ}
(h_right_anticomm : ∀ a b, f a (g b) = g' (f' b a)) :
image₂ f s (t.image g) = (image₂ f' t s).image g' :=
(image_image₂_antidistrib_right $ λ a b, (h_right_anticomm b a).symm).symm
end finset
|
bd60aa2d90d1852d616c53256c36185b1160c777
|
d450724ba99f5b50b57d244eb41fef9f6789db81
|
/src/instructor/lectures/lecture_22.lean
|
38b8ec969473f39f9efa477490803ff93982784e
|
[] |
no_license
|
jakekauff/CS2120F21
|
4f009adeb4ce4a148442b562196d66cc6c04530c
|
e69529ec6f5d47a554291c4241a3d8ec4fe8f5ad
|
refs/heads/main
| 1,693,841,880,030
| 1,637,604,848,000
| 1,637,604,848,000
| 399,946,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,115
|
lean
|
import tactic.ring
/-
State formally and prove the proposition
that congruence mod n is an equivalence
relation. Follow the steps below.
-/
/-
First, we need to define congruence mod n.
Technically it is defined not only on the
natural numbers but on the integers.
Here's the English language definition:
Given a natural number, n, greater than 1
(a "modulus"), two natural numbers, a and
b, are "congruent modulo n", if n is a
divisor of their difference: that is, if
there is some natural number, k such that
a − b = kn).
Your first task is to define cong_mod,
formally, stating that for any value,
n, cong_mod n is a binary relation
on natural numbers, as defined above.
-/
def cong_mod (n a b : ℤ) : Prop :=
∃ k, a - b = k * n
/-
Second, formally state the proposition that
for each natural number, n, "cong_mod n" is
an equivalence relation. You should use the
"equivalence" predicate on binary relations
defined in Lean's library (which is the same
as our definition from the last lecture) in
writing this propopsition.
-/
def cong_mod_n_is_equiv_relation (n : ℤ) : Prop :=
equivalence (cong_mod n)
/-
Note that partial evaluation makes
cong_mod n into a binary relation: in
that it's waiting for two more natural
number arguments, let's say, a and b,
and when applied to such arguments,
it yields the proposition that the
two numbers are congruent as defined.
-/
#reduce cong_mod (4:ℤ)
#reduce cong_mod (4:ℤ) (6:ℤ) (10:ℤ)
-- First, translate the goal into ordinary notation
-- Now what must you choose as a witness for a proof?
-- Let's
example : cong_mod (4:ℤ) (6:ℤ) (14:ℤ) :=
begin
unfold cong_mod,
apply exists.intro (-2:ℤ),
apply rfl,
end
/-
Now assert and prove this proposition to be
a theorem, i.e., to have a proof.
-/
example (n : ℤ) : cong_mod_n_is_equiv_relation n :=
begin
unfold cong_mod_n_is_equiv_relation,
unfold equivalence,
split, -- chooses to apply and.elim
-- reflexive
unfold reflexive,
assume k,
unfold cong_mod,
apply exists.intro (0:ℤ),
ring,
-- symmetric
split,
unfold symmetric cong_mod,
assume x y h,
cases h with v pf,
apply exists.intro (-v),
ring,
rw <-pf,
ring,
-- transitive
-- you prove it
unfold transitive cong_mod,
assume x y z h1 h2,
cases h1 with h1v h1pf,
cases h2 with h2v h2pf,
apply exists.intro (h1v+h2v),
rw int.distrib_right _ _ _, -- LIBRARY LOOKUP!
rw <-h2pf,
rw <-h1pf,
ring,
end
/-
A version of congruence mod n restricted to the
natural (non-negative whole) numbers.
-/
/-
Previous problem requires access to negative
numbers because it involves a term a-b, which,
in ℤ can be negative. If it's negative in ℤ it
will simply be truncated to 0 in ℕ, losing
critical information.
-/
#reduce (6:ℤ) - (11:ℤ)
#reduce 6-10 -- oops
#reduce 6-11 -- oops
#reduce 6-12 -- oops
def cong_mod_nat (n a b : ℕ) :=
a%n = b%n
example : cong_mod_nat 4 3 7 :=
begin
unfold cong_mod_nat,
exact rfl,
end
-- You prove it
example : ∀ n, equivalence (cong_mod_nat n) :=
begin
end
|
3b2ee2ff7db752608ff2f5b33e887f099a58e8b8
|
947fa6c38e48771ae886239b4edce6db6e18d0fb
|
/src/data/nat/factorization/prime_pow.lean
|
ba7ee36067bb911d6faf94473a263d7f9bd0022d
|
[
"Apache-2.0"
] |
permissive
|
ramonfmir/mathlib
|
c5dc8b33155473fab97c38bd3aa6723dc289beaa
|
14c52e990c17f5a00c0cc9e09847af16fabbed25
|
refs/heads/master
| 1,661,979,343,526
| 1,660,830,384,000
| 1,660,830,384,000
| 182,072,989
| 0
| 0
| null | 1,555,585,876,000
| 1,555,585,876,000
| null |
UTF-8
|
Lean
| false
| false
| 5,471
|
lean
|
/-
Copyright (c) 2022 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta
-/
import algebra.is_prime_pow
import data.nat.factorization.basic
/-!
# Prime powers and factorizations
This file deals with factorizations of prime powers.
-/
variables {R : Type*} [comm_monoid_with_zero R] (n p : R) (k : ℕ)
lemma is_prime_pow.min_fac_pow_factorization_eq {n : ℕ} (hn : is_prime_pow n) :
n.min_fac ^ n.factorization n.min_fac = n :=
begin
obtain ⟨p, k, hp, hk, rfl⟩ := hn,
rw ←nat.prime_iff at hp,
rw [hp.pow_min_fac hk.ne', hp.factorization_pow, finsupp.single_eq_same],
end
lemma is_prime_pow_of_min_fac_pow_factorization_eq {n : ℕ}
(h : n.min_fac ^ n.factorization n.min_fac = n) (hn : n ≠ 1) :
is_prime_pow n :=
begin
rcases eq_or_ne n 0 with rfl | hn',
{ simpa using h },
refine ⟨_, _, nat.prime_iff.1 (nat.min_fac_prime hn), _, h⟩,
rw [pos_iff_ne_zero, ←finsupp.mem_support_iff, nat.factor_iff_mem_factorization,
nat.mem_factors_iff_dvd hn' (nat.min_fac_prime hn)],
apply nat.min_fac_dvd
end
lemma is_prime_pow_iff_min_fac_pow_factorization_eq {n : ℕ} (hn : n ≠ 1) :
is_prime_pow n ↔ n.min_fac ^ n.factorization n.min_fac = n :=
⟨λ h, h.min_fac_pow_factorization_eq, λ h, is_prime_pow_of_min_fac_pow_factorization_eq h hn⟩
lemma is_prime_pow_iff_factorization_eq_single {n : ℕ} :
is_prime_pow n ↔ ∃ p k : ℕ, 0 < k ∧ n.factorization = finsupp.single p k :=
begin
rw is_prime_pow_nat_iff,
refine exists₂_congr (λ p k, _),
split,
{ rintros ⟨hp, hk, hn⟩,
exact ⟨hk, by rw [←hn, nat.prime.factorization_pow hp]⟩ },
{ rintros ⟨hk, hn⟩,
have hn0 : n ≠ 0,
{ rintro rfl,
simpa only [finsupp.single_eq_zero, eq_comm, nat.factorization_zero, hk.ne'] using hn },
rw nat.eq_pow_of_factorization_eq_single hn0 hn,
exact ⟨nat.prime_of_mem_factorization
(by simp [hn, hk.ne'] : p ∈ n.factorization.support), hk, rfl⟩ }
end
lemma is_prime_pow_iff_card_support_factorization_eq_one {n : ℕ} :
is_prime_pow n ↔ n.factorization.support.card = 1 :=
by simp_rw [is_prime_pow_iff_factorization_eq_single, finsupp.card_support_eq_one', exists_prop,
pos_iff_ne_zero]
/-- An equivalent definition for prime powers: `n` is a prime power iff there is a unique prime
dividing it. -/
lemma is_prime_pow_iff_unique_prime_dvd {n : ℕ} :
is_prime_pow n ↔ ∃! p : ℕ, p.prime ∧ p ∣ n :=
begin
rw is_prime_pow_nat_iff,
split,
{ rintro ⟨p, k, hp, hk, rfl⟩,
refine ⟨p, ⟨hp, dvd_pow_self _ hk.ne'⟩, _⟩,
rintro q ⟨hq, hq'⟩,
exact (nat.prime_dvd_prime_iff_eq hq hp).1 (hq.dvd_of_dvd_pow hq') },
rintro ⟨p, ⟨hp, hn⟩, hq⟩,
-- Take care of the n = 0 case
rcases eq_or_ne n 0 with rfl | hn₀,
{ obtain ⟨q, hq', hq''⟩ := nat.exists_infinite_primes (p + 1),
cases hq q ⟨hq'', by simp⟩,
simpa using hq' },
-- So assume 0 < n
refine ⟨p, n.factorization p, hp, hp.factorization_pos_of_dvd hn₀ hn, _⟩,
simp only [and_imp] at hq,
apply nat.dvd_antisymm (nat.ord_proj_dvd _ _),
-- We need to show n ∣ p ^ n.factorization p
apply nat.dvd_of_factors_subperm hn₀,
rw [hp.factors_pow, list.subperm_ext_iff],
intros q hq',
rw nat.mem_factors hn₀ at hq',
cases hq _ hq'.1 hq'.2,
simp,
end
lemma is_prime_pow_pow_iff {n k : ℕ} (hk : k ≠ 0) :
is_prime_pow (n ^ k) ↔ is_prime_pow n :=
begin
simp only [is_prime_pow_iff_unique_prime_dvd],
apply exists_unique_congr,
simp only [and.congr_right_iff],
intros p hp,
exact ⟨hp.dvd_of_dvd_pow, λ t, t.trans (dvd_pow_self _ hk)⟩,
end
lemma nat.coprime.is_prime_pow_dvd_mul {n a b : ℕ} (hab : nat.coprime a b) (hn : is_prime_pow n) :
n ∣ a * b ↔ n ∣ a ∨ n ∣ b :=
begin
rcases eq_or_ne a 0 with rfl | ha,
{ simp only [nat.coprime_zero_left] at hab,
simp [hab, finset.filter_singleton, not_is_prime_pow_one] },
rcases eq_or_ne b 0 with rfl | hb,
{ simp only [nat.coprime_zero_right] at hab,
simp [hab, finset.filter_singleton, not_is_prime_pow_one] },
refine ⟨_, λ h, or.elim h (λ i, i.trans (dvd_mul_right _ _)) (λ i, i.trans (dvd_mul_left _ _))⟩,
obtain ⟨p, k, hp, hk, rfl⟩ := (is_prime_pow_nat_iff _).1 hn,
simp only [hp.pow_dvd_iff_le_factorization (mul_ne_zero ha hb),
nat.factorization_mul ha hb, hp.pow_dvd_iff_le_factorization ha,
hp.pow_dvd_iff_le_factorization hb, pi.add_apply, finsupp.coe_add],
have : a.factorization p = 0 ∨ b.factorization p = 0,
{ rw [←finsupp.not_mem_support_iff, ←finsupp.not_mem_support_iff, ←not_and_distrib,
←finset.mem_inter],
exact λ t, nat.factorization_disjoint_of_coprime hab t },
cases this;
simp [this, imp_or_distrib],
end
lemma nat.mul_divisors_filter_prime_pow {a b : ℕ} (hab : a.coprime b) :
(a * b).divisors.filter is_prime_pow = (a.divisors ∪ b.divisors).filter is_prime_pow :=
begin
rcases eq_or_ne a 0 with rfl | ha,
{ simp only [nat.coprime_zero_left] at hab,
simp [hab, finset.filter_singleton, not_is_prime_pow_one] },
rcases eq_or_ne b 0 with rfl | hb,
{ simp only [nat.coprime_zero_right] at hab,
simp [hab, finset.filter_singleton, not_is_prime_pow_one] },
ext n,
simp only [ha, hb, finset.mem_union, finset.mem_filter, nat.mul_eq_zero, and_true, ne.def,
and.congr_left_iff, not_false_iff, nat.mem_divisors, or_self],
apply hab.is_prime_pow_dvd_mul,
end
|
e51078a3f7fdca58776858b3e046690f04cb0e0d
|
ae1e94c332e17c7dc7051ce976d5a9eebe7ab8a5
|
/tests/lean/run/forInPArray.lean
|
17e8a1078b0b70f1d1754099f9cbea4a4c01de21
|
[
"Apache-2.0"
] |
permissive
|
dupuisf/lean4
|
d082d13b01243e1de29ae680eefb476961221eef
|
6a39c65bd28eb0e28c3870188f348c8914502718
|
refs/heads/master
| 1,676,948,755,391
| 1,610,665,114,000
| 1,610,665,114,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 840
|
lean
|
#lang lean4
import Std
def check (x : IO Nat) (expected : IO Nat) : IO Unit := do
unless (← x) == (← expected) do
throw $ IO.userError "unexpected result"
def f1 (xs : Std.PArray Nat) (top : Nat) : IO Nat := do
let mut sum := 0
for x in xs do
if x % 2 == 0 then
IO.println s!"x: {x}"
sum := sum + x
if sum > top then
return sum
IO.println s!"sum: {sum}"
return sum
#eval f1 [1, 2, 3, 4, 5, 10, 20].toPersistentArray 10
#eval check (f1 [1, 2, 3, 4, 5, 10, 20].toPersistentArray 10) (pure 16)
def f2 (xs : Std.PArray Nat) (top : Nat) : IO Nat := do
let mut sum := 0
for x in xs do
if x % 2 == 0 then
IO.println s!"x: {x}"
sum := sum + x
if sum > top then
break
IO.println s!"sum: {sum}"
return sum
#eval check (f1 (List.iota 100).toPersistentArray 1000) (f2 (List.iota 100).toPersistentArray 1000)
|
7533cd2ed289806a6ccf8569b5e8a8e5aa2dcb2f
|
57c233acf9386e610d99ed20ef139c5f97504ba3
|
/src/category_theory/category/Cat.lean
|
cf80ad14a584608cf8a1392d03022436ef23a1d1
|
[
"Apache-2.0"
] |
permissive
|
robertylewis/mathlib
|
3d16e3e6daf5ddde182473e03a1b601d2810952c
|
1d13f5b932f5e40a8308e3840f96fc882fae01f0
|
refs/heads/master
| 1,651,379,945,369
| 1,644,276,960,000
| 1,644,276,960,000
| 98,875,504
| 0
| 0
|
Apache-2.0
| 1,644,253,514,000
| 1,501,495,700,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 3,188
|
lean
|
/-
Copyright (c) 2019 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import category_theory.concrete_category.bundled
import category_theory.discrete_category
import category_theory.types
import category_theory.bicategory.strict
/-!
# Category of categories
This file contains the definition of the category `Cat` of all categories.
In this category objects are categories and
morphisms are functors between these categories.
## Implementation notes
Though `Cat` is not a concrete category, we use `bundled` to define
its carrier type.
-/
universes v u
namespace category_theory
/-- Category of categories. -/
@[nolint check_univs] -- intended to be used with explicit universe parameters
def Cat := bundled category.{v u}
namespace Cat
instance : inhabited Cat := ⟨⟨Type u, category_theory.types⟩⟩
instance : has_coe_to_sort Cat (Type u) := ⟨bundled.α⟩
instance str (C : Cat.{v u}) : category.{v u} C := C.str
/-- Construct a bundled `Cat` from the underlying type and the typeclass. -/
def of (C : Type u) [category.{v} C] : Cat.{v u} := bundled.of C
/-- Bicategory structure on `Cat` -/
instance bicategory : bicategory.{(max v u) (max v u)} Cat.{v u} :=
{ hom := λ C D, C ⥤ D,
id := λ C, 𝟭 C,
comp := λ C D E F G, F ⋙ G,
hom_category := λ C D, functor.category C D,
whisker_left := λ C D E F G H η, whisker_left F η,
whisker_right := λ C D E F G η H, whisker_right η H,
associator := λ A B C D, functor.associator,
left_unitor := λ A B, functor.left_unitor,
right_unitor := λ A B, functor.right_unitor,
pentagon' := λ A B C D E, functor.pentagon,
triangle' := λ A B C, functor.triangle }
/-- `Cat` is a strict bicategory. -/
instance bicategory.strict : bicategory.strict Cat.{v u} :=
{ id_comp' := λ C D F, by cases F; refl,
comp_id' := λ C D F, by cases F; refl,
assoc' := by intros; refl }
/-- Category structure on `Cat` -/
instance category : large_category.{max v u} Cat.{v u} := strict_bicategory.category Cat.{v u}
/-- Functor that gets the set of objects of a category. It is not
called `forget`, because it is not a faithful functor. -/
def objects : Cat.{v u} ⥤ Type u :=
{ obj := λ C, C,
map := λ C D F, F.obj }
/-- Any isomorphism in `Cat` induces an equivalence of the underlying categories. -/
def equiv_of_iso {C D : Cat} (γ : C ≅ D) : C ≌ D :=
{ functor := γ.hom,
inverse := γ.inv,
unit_iso := eq_to_iso $ eq.symm γ.hom_inv_id,
counit_iso := eq_to_iso γ.inv_hom_id }
end Cat
/--
Embedding `Type` into `Cat` as discrete categories.
This ought to be modelled as a 2-functor!
-/
@[simps]
def Type_to_Cat : Type u ⥤ Cat :=
{ obj := λ X, Cat.of (discrete X),
map := λ X Y f, discrete.functor f,
map_id' := λ X, begin apply functor.ext, tidy, end,
map_comp' := λ X Y Z f g, begin apply functor.ext, tidy, end }
instance : faithful Type_to_Cat.{u} := {}
instance : full Type_to_Cat.{u} :=
{ preimage := λ X Y F, F.obj,
witness' :=
begin
intros X Y F,
apply functor.ext,
{ intros x y f, dsimp, ext, },
{ intros x, refl, }
end }
end category_theory
|
5bda0b2f9787edca8a7405d5d9af7157dbbd9139
|
a0e23cfdd129a671bf3154ee1a8a3a72bf4c7940
|
/stage0/src/Init/Data/Option/Basic.lean
|
9952cf42695210ed0dd6db334235b25617e3cb80
|
[
"Apache-2.0"
] |
permissive
|
WojciechKarpiel/lean4
|
7f89706b8e3c1f942b83a2c91a3a00b05da0e65b
|
f6e1314fa08293dea66a329e05b6c196a0189163
|
refs/heads/master
| 1,686,633,402,214
| 1,625,821,189,000
| 1,625,821,258,000
| 384,640,886
| 0
| 0
|
Apache-2.0
| 1,625,903,617,000
| 1,625,903,026,000
| null |
UTF-8
|
Lean
| false
| false
| 2,106
|
lean
|
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Core
import Init.Control.Basic
import Init.Coe
namespace Option
def toMonad [Monad m] [Alternative m] : Option α → m α
| none => failure
| some a => pure a
@[inline] def toBool : Option α → Bool
| some _ => true
| none => false
@[inline] def isSome : Option α → Bool
| some _ => true
| none => false
@[inline] def isNone : Option α → Bool
| some _ => false
| none => true
@[inline] protected def bind : Option α → (α → Option β) → Option β
| none, b => none
| some a, b => b a
@[inline] protected def map (f : α → β) (o : Option α) : Option β :=
Option.bind o (some ∘ f)
theorem mapId : (Option.map id : Option α → Option α) = id :=
funext (fun o => match o with | none => rfl | some x => rfl)
instance : Functor Option where
map := Option.map
@[inline] protected def filter (p : α → Bool) : Option α → Option α
| some a => if p a then some a else none
| none => none
@[inline] protected def all (p : α → Bool) : Option α → Bool
| some a => p a
| none => true
@[inline] protected def any (p : α → Bool) : Option α → Bool
| some a => p a
| none => false
@[macroInline] protected def orElse : Option α → Option α → Option α
| some a, _ => some a
| none, b => b
instance : OrElse (Option α) where
orElse := Option.orElse
@[inline] protected def lt (r : α → α → Prop) : Option α → Option α → Prop
| none, some x => True
| some x, some y => r x y
| _, _ => False
instance (r : α → α → Prop) [s : DecidableRel r] : DecidableRel (Option.lt r)
| none, some y => isTrue trivial
| some x, some y => s x y
| some x, none => isFalse notFalse
| none, none => isFalse notFalse
end Option
deriving instance DecidableEq for Option
deriving instance BEq for Option
instance [LT α] : LT (Option α) where
lt := Option.lt (· < ·)
|
6bbaa0bb647ef038596f7de9f255ade22db9d93d
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/measure_theory/group/geometry_of_numbers.lean
|
63dea545779a21c77e60564b3ccaa40cec97ab17
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,091
|
lean
|
/-
Copyright (c) 2021 Alex J. Best. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Alex J. Best
-/
import analysis.convex.measure
import measure_theory.group.fundamental_domain
import measure_theory.measure.lebesgue.eq_haar
/-!
# Geometry of numbers
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we prove some of the fundamental theorems in the geometry of numbers, as studied by
Hermann Minkowski.
## Main results
* `exists_pair_mem_lattice_not_disjoint_vadd`: Blichfeldt's principle, existence of two distinct
points in a subgroup such that the translates of a set by these two points are not disjoint when
the covolume of the subgroup is larger than the volume of the
* `exists_ne_zero_mem_lattice_of_measure_mul_two_pow_lt_measure`: Minkowski's theorem, existence of
a non-zero lattice point inside a convex symmetric domain of large enough volume.
## TODO
* Calculate the volume of the fundamental domain of a finite index subgroup
* Voronoi diagrams
* See [Pete L. Clark, *Abstract Geometry of Numbers: Linear Forms* (arXiv)](https://arxiv.org/abs/1405.2119)
for some more ideas.
## References
* [Pete L. Clark, *Geometry of Numbers with Applications to Number Theory*][clark_gon] p.28
-/
namespace measure_theory
open ennreal finite_dimensional measure_theory measure_theory.measure set
open_locale pointwise
variables {E L : Type*} [measurable_space E] {μ : measure E} {F s : set E}
/-- **Blichfeldt's Theorem**. If the volume of the set `s` is larger than the covolume of the
countable subgroup `L` of `E`, then there exists two distincts points `x, y ∈ L` such that `(x + s)`
and `(y + s)` are not disjoint. -/
lemma exists_pair_mem_lattice_not_disjoint_vadd [add_comm_group L] [countable L]
[add_action L E] [measurable_space L] [has_measurable_vadd L E] [vadd_invariant_measure L E μ]
(fund : is_add_fundamental_domain L F μ) (hS : null_measurable_set s μ) (h : μ F < μ s) :
∃ x y : L, x ≠ y ∧ ¬ disjoint (x +ᵥ s) (y +ᵥ s) :=
begin
contrapose! h,
exact ((fund.measure_eq_tsum _).trans (measure_Union₀ (pairwise.mono h $ λ i j hij,
(hij.mono inf_le_left inf_le_left).ae_disjoint) $ λ _,
(hS.vadd _).inter fund.null_measurable_set).symm).trans_le
(measure_mono $ Union_subset $ λ _, inter_subset_right _ _),
end
/-- The **Minkowksi Convex Body Theorem**. If `s` is a convex symmetric domain of `E` whose volume
is large enough compared to the covolume of a lattice `L` of `E`, then it contains a non-zero
lattice point of `L`. -/
lemma exists_ne_zero_mem_lattice_of_measure_mul_two_pow_lt_measure [normed_add_comm_group E]
[normed_space ℝ E] [borel_space E] [finite_dimensional ℝ E] [is_add_haar_measure μ]
{L : add_subgroup E} [countable L] (fund : is_add_fundamental_domain L F μ)
(h : μ F * 2 ^ finrank ℝ E < μ s) (h_symm : ∀ x ∈ s, -x ∈ s) (h_conv : convex ℝ s) :
∃ x ≠ 0, ((x : L) : E) ∈ s :=
begin
have h_vol : μ F < μ ((2⁻¹ : ℝ) • s),
{ rwa [add_haar_smul_of_nonneg μ (by norm_num : 0 ≤ (2 : ℝ)⁻¹) s, ←mul_lt_mul_right
(pow_ne_zero (finrank ℝ E) (two_ne_zero' _)) (pow_ne_top two_ne_top), mul_right_comm,
of_real_pow (by norm_num : 0 ≤ (2 : ℝ)⁻¹), ←of_real_inv_of_pos zero_lt_two, of_real_bit0,
of_real_one, ←mul_pow, ennreal.inv_mul_cancel two_ne_zero two_ne_top, one_pow, one_mul] },
obtain ⟨x, y, hxy, h⟩ := exists_pair_mem_lattice_not_disjoint_vadd fund
((h_conv.smul _).null_measurable_set _) h_vol,
obtain ⟨_, ⟨v, hv, rfl⟩, w, hw, hvw⟩ := not_disjoint_iff.mp h,
refine ⟨x - y, sub_ne_zero.2 hxy, _⟩,
rw mem_inv_smul_set_iff₀ (two_ne_zero' ℝ) at hv hw,
simp_rw [add_subgroup.vadd_def, vadd_eq_add, add_comm _ w, ←sub_eq_sub_iff_add_eq_add,
←add_subgroup.coe_sub] at hvw,
rw [←hvw, ←inv_smul_smul₀ (two_ne_zero' ℝ) (_ - _), smul_sub, sub_eq_add_neg, smul_add],
refine h_conv hw (h_symm _ hv) _ _ _; norm_num,
end
end measure_theory
|
6f472ef0466bf1f596c85b42c881aaee6aa5775d
|
59a4b050600ed7b3d5826a8478db0a9bdc190252
|
/src/category_theory/universal/limits_are_limit_cones.lean
|
43cf01828c21a94f882cebcd14ac01a28c3b570a
|
[] |
no_license
|
rwbarton/lean-category-theory
|
f720268d800b62a25d69842ca7b5d27822f00652
|
00df814d463406b7a13a56f5dcda67758ba1b419
|
refs/heads/master
| 1,585,366,296,767
| 1,536,151,349,000
| 1,536,151,349,000
| 147,652,096
| 0
| 0
| null | 1,536,226,960,000
| 1,536,226,960,000
| null |
UTF-8
|
Lean
| false
| false
| 1,013
|
lean
|
import category_theory.universal.cones
import category_theory.limits.terminal
open category_theory
namespace category_theory.limits
universes u v
variables {J : Type v} [small_category J] {C : Type u} [𝒞 : category.{u v} C]
include 𝒞
variable {F : J ⥤ C}
def limit_cone_of_limit {t : cone F} (L : is_limit t) : is_terminal.{(max u v) v} t :=
{ lift := λ s, { hom := is_limit.lift t s, },
uniq' := begin tidy, apply is_limit.uniq t, tidy, end } -- TODO uniq is marked @[back'], but the unifier fails to apply it
def limit_of_limit_cone {t : cone F} (L : is_terminal.{(max u v) v} t) : is_limit t :=
{ lift := λ s, (@is_terminal.lift _ _ t L s).hom,
uniq' := begin tidy, have p := @is_terminal.uniq _ _ t L s { hom := m }, rw ← p, end }
def limits_are_limit_cones {t : cone F} : equiv (is_limit t) (is_terminal.{(max u v) v} t) :=
{ to_fun := limit_cone_of_limit,
inv_fun := limit_of_limit_cone,
left_inv := by obviously,
right_inv := by obviously }
end category_theory.limits
|
0655020cf58067944b59821e973631da05fbaf41
|
ac1c2a2f522b0fdf854095ba00f882ca849669e7
|
/library/init/meta/environment.lean
|
55dd3539e5dc8500c207d997a54a9905ea22c99c
|
[
"Apache-2.0"
] |
permissive
|
abliss/lean
|
b8b336abc8d50dbb0726dcff9dd16793c23bfbe1
|
fb24cc99573c153f97a1951ee94bbbdda300b6be
|
refs/heads/master
| 1,611,536,584,520
| 1,497,811,981,000
| 1,497,811,981,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 6,049
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import init.meta.declaration init.meta.exceptional init.data.option.basic
import init.meta.rb_map
meta constant environment : Type
namespace environment
/--
Information for a projection declaration
- `cname` is the name of the constructor associated with the projection.
- `nparams` is the number of constructor parameters
- `idx` is the parameter being projected by this projection
- `is_class` is tt iff this is a class projection.
-/
structure projection_info :=
(cname : name)
(nparams : nat)
(idx : nat)
(is_class : bool)
/-- Create a standard environment using the given trust level -/
meta constant mk_std : nat → environment
/-- Return the trust level of the given environment -/
meta constant trust_lvl : environment → nat
/-- Add a new declaration to the environment -/
meta constant add : environment → declaration → exceptional environment
/-- Retrieve a declaration from the environment -/
meta constant get : environment → name → exceptional declaration
meta def contains (env : environment) (d : name) : bool :=
match env.get d with
| exceptional.success _ := tt
| exceptional.exception ._ _ := ff
end
/-- Return tt iff the given name is a namespace -/
meta constant is_namespace : environment → name → bool
/-- Add a new inductive datatype to the environment
name, universe parameters, number of parameters, type, constructors (name and type), is_meta -/
meta constant add_inductive : environment → name → list name → nat → expr → list (name × expr) → bool →
exceptional environment
/-- Return tt iff the given name is an inductive datatype -/
meta constant is_inductive : environment → name → bool
/-- Return tt iff the given name is a constructor -/
meta constant is_constructor : environment → name → bool
/-- Return tt iff the given name is a recursor -/
meta constant is_recursor : environment → name → bool
/-- Return tt iff the given name is a recursive inductive datatype -/
meta constant is_recursive : environment → name → bool
/-- Return the name of the inductive datatype of the given constructor. -/
meta constant inductive_type_of : environment → name → option name
/-- Return the constructors of the inductive datatype with the given name -/
meta constant constructors_of : environment → name → list name
/-- Return the recursor of the given inductive datatype -/
meta constant recursor_of : environment → name → option name
/-- Return the number of parameters of the inductive datatype -/
meta constant inductive_num_params : environment → name → nat
/-- Return the number of indices of the inductive datatype -/
meta constant inductive_num_indices : environment → name → nat
/-- Return tt iff the inductive datatype recursor supports dependent elimination -/
meta constant inductive_dep_elim : environment → name → bool
/-- Return tt iff the given name is a generalized inductive datatype -/
meta constant is_ginductive : environment → name → bool
meta constant is_projection : environment → name → option projection_info
/-- Fold over declarations in the environment -/
meta constant fold {α :Type} : environment → α → (declaration → α → α) → α
/-- `relation_info env n` returns some value if n is marked as a relation in the given environment.
the tuple contains: total number of arguments of the relation, lhs position and rhs position. -/
meta constant relation_info : environment → name → option (nat × nat × nat)
/-- `refl_for env R` returns the name of the reflexivity theorem for the relation R -/
meta constant refl_for : environment → name → option name
/-- `symm_for env R` returns the name of the symmetry theorem for the relation R -/
meta constant symm_for : environment → name → option name
/-- `trans_for env R` returns the name of the transitivity theorem for the relation R -/
meta constant trans_for : environment → name → option name
/-- `decl_olean env d` returns the name of the .olean file where d was defined.
The result is none if d was not defined in an imported file. -/
meta constant decl_olean : environment → name → option string
/-- `decl_pos env d` returns the source location of d if available. -/
meta constant decl_pos : environment → name → option pos
/-- Return the fields of the structure with the given name, or `none` if it is not a structure -/
meta constant structure_fields : environment → name → option (list name)
/-- `get_class_attribute_symbols env attr_name` return symbols
occurring in instances of type classes tagged with the attribute `attr_name`.
Example: [algebra] -/
meta constant get_class_attribute_symbols : environment → name → name_set
meta constant fingerprint : environment → nat
open expr
meta constant unfold_untrusted_macros : environment → expr → expr
meta def is_constructor_app (env : environment) (e : expr) : bool :=
is_constant (get_app_fn e) && is_constructor env (const_name (get_app_fn e))
meta def is_refl_app (env : environment) (e : expr) : option (name × expr × expr) :=
match (refl_for env (const_name (get_app_fn e))) with
| (some n) :=
if get_app_num_args e ≥ 2
then some (n, app_arg (app_fn e), app_arg e)
else none
| none := none
end
/-- Return true if 'n' has been declared in the current file -/
meta def in_current_file (env : environment) (n : name) : bool :=
(env.decl_olean n).is_none && env.contains n
meta def is_definition (env : environment) (n : name) : bool :=
match env.get n with
| exceptional.success (declaration.defn _ _ _ _ _ _) := tt
| _ := ff
end
end environment
meta instance : has_to_string environment :=
⟨λ e, "[environment]"⟩
meta instance : inhabited environment :=
⟨environment.mk_std 0⟩
|
1c4da65838deb07b7c57bc833c6c9151f96457cd
|
57c233acf9386e610d99ed20ef139c5f97504ba3
|
/src/order/liminf_limsup.lean
|
6d1d199cf67ccaea219ea697fad09e9947d01897
|
[
"Apache-2.0"
] |
permissive
|
robertylewis/mathlib
|
3d16e3e6daf5ddde182473e03a1b601d2810952c
|
1d13f5b932f5e40a8308e3840f96fc882fae01f0
|
refs/heads/master
| 1,651,379,945,369
| 1,644,276,960,000
| 1,644,276,960,000
| 98,875,504
| 0
| 0
|
Apache-2.0
| 1,644,253,514,000
| 1,501,495,700,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 25,521
|
lean
|
/-
Copyright (c) 2018 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Johannes Hölzl, Rémy Degenne
-/
import order.filter.cofinite
/-!
# liminfs and limsups of functions and filters
Defines the Liminf/Limsup of a function taking values in a conditionally complete lattice, with
respect to an arbitrary filter.
We define `f.Limsup` (`f.Liminf`) where `f` is a filter taking values in a conditionally complete
lattice. `f.Limsup` is the smallest element `a` such that, eventually, `u ≤ a` (and vice versa for
`f.Liminf`). To work with the Limsup along a function `u` use `(f.map u).Limsup`.
Usually, one defines the Limsup as `Inf (Sup s)` where the Inf is taken over all sets in the filter.
For instance, in ℕ along a function `u`, this is `Inf_n (Sup_{k ≥ n} u k)` (and the latter quantity
decreases with `n`, so this is in fact a limit.). There is however a difficulty: it is well possible
that `u` is not bounded on the whole space, only eventually (think of `Limsup (λx, 1/x)` on ℝ. Then
there is no guarantee that the quantity above really decreases (the value of the `Sup` beforehand is
not really well defined, as one can not use ∞), so that the Inf could be anything. So one can not
use this `Inf Sup ...` definition in conditionally complete lattices, and one has to use a less
tractable definition.
In conditionally complete lattices, the definition is only useful for filters which are eventually
bounded above (otherwise, the Limsup would morally be +∞, which does not belong to the space) and
which are frequently bounded below (otherwise, the Limsup would morally be -∞, which is not in the
space either). We start with definitions of these concepts for arbitrary filters, before turning to
the definitions of Limsup and Liminf.
In complete lattices, however, it coincides with the `Inf Sup` definition.
-/
open filter set
open_locale filter
variables {α β ι : Type*}
namespace filter
section relation
/-- `f.is_bounded (≺)`: the filter `f` is eventually bounded w.r.t. the relation `≺`, i.e.
eventually, it is bounded by some uniform bound.
`r` will be usually instantiated with `≤` or `≥`. -/
def is_bounded (r : α → α → Prop) (f : filter α) := ∃ b, ∀ᶠ x in f, r x b
/-- `f.is_bounded_under (≺) u`: the image of the filter `f` under `u` is eventually bounded w.r.t.
the relation `≺`, i.e. eventually, it is bounded by some uniform bound. -/
def is_bounded_under (r : α → α → Prop) (f : filter β) (u : β → α) := (f.map u).is_bounded r
variables {r : α → α → Prop} {f g : filter α}
/-- `f` is eventually bounded if and only if, there exists an admissible set on which it is
bounded. -/
lemma is_bounded_iff : f.is_bounded r ↔ (∃s∈f.sets, ∃b, s ⊆ {x | r x b}) :=
iff.intro
(assume ⟨b, hb⟩, ⟨{a | r a b}, hb, b, subset.refl _⟩)
(assume ⟨s, hs, b, hb⟩, ⟨b, mem_of_superset hs hb⟩)
/-- A bounded function `u` is in particular eventually bounded. -/
lemma is_bounded_under_of {f : filter β} {u : β → α} :
(∃b, ∀x, r (u x) b) → f.is_bounded_under r u
| ⟨b, hb⟩ := ⟨b, show ∀ᶠ x in f, r (u x) b, from eventually_of_forall hb⟩
lemma is_bounded_bot : is_bounded r ⊥ ↔ nonempty α :=
by simp [is_bounded, exists_true_iff_nonempty]
lemma is_bounded_top : is_bounded r ⊤ ↔ (∃t, ∀x, r x t) :=
by simp [is_bounded, eq_univ_iff_forall]
lemma is_bounded_principal (s : set α) : is_bounded r (𝓟 s) ↔ (∃t, ∀x∈s, r x t) :=
by simp [is_bounded, subset_def]
lemma is_bounded_sup [is_trans α r] (hr : ∀b₁ b₂, ∃b, r b₁ b ∧ r b₂ b) :
is_bounded r f → is_bounded r g → is_bounded r (f ⊔ g)
| ⟨b₁, h₁⟩ ⟨b₂, h₂⟩ := let ⟨b, rb₁b, rb₂b⟩ := hr b₁ b₂ in
⟨b, eventually_sup.mpr ⟨h₁.mono (λ x h, trans h rb₁b), h₂.mono (λ x h, trans h rb₂b)⟩⟩
lemma is_bounded.mono (h : f ≤ g) : is_bounded r g → is_bounded r f
| ⟨b, hb⟩ := ⟨b, h hb⟩
lemma is_bounded_under.mono {f g : filter β} {u : β → α} (h : f ≤ g) :
g.is_bounded_under r u → f.is_bounded_under r u :=
λ hg, hg.mono (map_mono h)
lemma is_bounded.is_bounded_under {q : β → β → Prop} {u : α → β}
(hf : ∀a₀ a₁, r a₀ a₁ → q (u a₀) (u a₁)) : f.is_bounded r → f.is_bounded_under q u
| ⟨b, h⟩ := ⟨u b, show ∀ᶠ x in f, q (u x) (u b), from h.mono (λ x, hf x b)⟩
lemma not_is_bounded_under_of_tendsto_at_top [preorder β] [no_max_order β] {f : α → β}
{l : filter α} [l.ne_bot] (hf : tendsto f l at_top) :
¬ is_bounded_under (≤) l f :=
begin
rintro ⟨b, hb⟩,
rw eventually_map at hb,
obtain ⟨b', h⟩ := exists_gt b,
have hb' := (tendsto_at_top.mp hf) b',
have : {x : α | f x ≤ b} ∩ {x : α | b' ≤ f x} = ∅ :=
eq_empty_of_subset_empty (λ x hx, (not_le_of_lt h) (le_trans hx.2 hx.1)),
exact (nonempty_of_mem (hb.and hb')).ne_empty this
end
lemma not_is_bounded_under_of_tendsto_at_bot [preorder β] [no_min_order β] {f : α → β}
{l : filter α} [l.ne_bot](hf : tendsto f l at_bot) :
¬ is_bounded_under (≥) l f :=
@not_is_bounded_under_of_tendsto_at_top α (order_dual β) _ _ _ _ _ hf
lemma is_bounded_under.bdd_above_range_of_cofinite [semilattice_sup β] {f : α → β}
(hf : is_bounded_under (≤) cofinite f) : bdd_above (range f) :=
begin
rcases hf with ⟨b, hb⟩,
haveI : nonempty β := ⟨b⟩,
rw [← image_univ, ← union_compl_self {x | f x ≤ b}, image_union, bdd_above_union],
exact ⟨⟨b, ball_image_iff.2 $ λ x, id⟩, (hb.image f).bdd_above⟩
end
lemma is_bounded_under.bdd_below_range_of_cofinite [semilattice_inf β] {f : α → β}
(hf : is_bounded_under (≥) cofinite f) : bdd_below (range f) :=
@is_bounded_under.bdd_above_range_of_cofinite α (order_dual β) _ _ hf
lemma is_bounded_under.bdd_above_range [semilattice_sup β] {f : ℕ → β}
(hf : is_bounded_under (≤) at_top f) : bdd_above (range f) :=
by { rw ← nat.cofinite_eq_at_top at hf, exact hf.bdd_above_range_of_cofinite }
lemma is_bounded_under.bdd_below_range [semilattice_inf β] {f : ℕ → β}
(hf : is_bounded_under (≥) at_top f) : bdd_below (range f) :=
@is_bounded_under.bdd_above_range (order_dual β) _ _ hf
/-- `is_cobounded (≺) f` states that the filter `f` does not tend to infinity w.r.t. `≺`. This is
also called frequently bounded. Will be usually instantiated with `≤` or `≥`.
There is a subtlety in this definition: we want `f.is_cobounded` to hold for any `f` in the case of
complete lattices. This will be relevant to deduce theorems on complete lattices from their
versions on conditionally complete lattices with additional assumptions. We have to be careful in
the edge case of the trivial filter containing the empty set: the other natural definition
`¬ ∀ a, ∀ᶠ n in f, a ≤ n`
would not work as well in this case.
-/
def is_cobounded (r : α → α → Prop) (f : filter α) := ∃b, ∀a, (∀ᶠ x in f, r x a) → r b a
/-- `is_cobounded_under (≺) f u` states that the image of the filter `f` under the map `u` does not
tend to infinity w.r.t. `≺`. This is also called frequently bounded. Will be usually instantiated
with `≤` or `≥`. -/
def is_cobounded_under (r : α → α → Prop) (f : filter β) (u : β → α) := (f.map u).is_cobounded r
/-- To check that a filter is frequently bounded, it suffices to have a witness
which bounds `f` at some point for every admissible set.
This is only an implication, as the other direction is wrong for the trivial filter.-/
lemma is_cobounded.mk [is_trans α r] (a : α) (h : ∀s∈f, ∃x∈s, r a x) : f.is_cobounded r :=
⟨a, assume y s, let ⟨x, h₁, h₂⟩ := h _ s in trans h₂ h₁⟩
/-- A filter which is eventually bounded is in particular frequently bounded (in the opposite
direction). At least if the filter is not trivial. -/
lemma is_bounded.is_cobounded_flip [is_trans α r] [ne_bot f] :
f.is_bounded r → f.is_cobounded (flip r)
| ⟨a, ha⟩ := ⟨a, assume b hb,
let ⟨x, rxa, rbx⟩ := (ha.and hb).exists in
show r b a, from trans rbx rxa⟩
lemma is_bounded.is_cobounded_ge [preorder α] [ne_bot f] (h : f.is_bounded (≤)) :
f.is_cobounded (≥) :=
h.is_cobounded_flip
lemma is_bounded.is_cobounded_le [preorder α] [ne_bot f] (h : f.is_bounded (≥)) :
f.is_cobounded (≤) :=
h.is_cobounded_flip
lemma is_cobounded_bot : is_cobounded r ⊥ ↔ (∃b, ∀x, r b x) :=
by simp [is_cobounded]
lemma is_cobounded_top : is_cobounded r ⊤ ↔ nonempty α :=
by simp [is_cobounded, eq_univ_iff_forall, exists_true_iff_nonempty] {contextual := tt}
lemma is_cobounded_principal (s : set α) :
(𝓟 s).is_cobounded r ↔ (∃b, ∀a, (∀x∈s, r x a) → r b a) :=
by simp [is_cobounded, subset_def]
lemma is_cobounded.mono (h : f ≤ g) : f.is_cobounded r → g.is_cobounded r
| ⟨b, hb⟩ := ⟨b, assume a ha, hb a (h ha)⟩
end relation
lemma is_cobounded_le_of_bot [preorder α] [order_bot α] {f : filter α} : f.is_cobounded (≤) :=
⟨⊥, assume a h, bot_le⟩
lemma is_cobounded_ge_of_top [preorder α] [order_top α] {f : filter α} : f.is_cobounded (≥) :=
⟨⊤, assume a h, le_top⟩
lemma is_bounded_le_of_top [preorder α] [order_top α] {f : filter α} : f.is_bounded (≤) :=
⟨⊤, eventually_of_forall $ λ _, le_top⟩
lemma is_bounded_ge_of_bot [preorder α] [order_bot α] {f : filter α} : f.is_bounded (≥) :=
⟨⊥, eventually_of_forall $ λ _, bot_le⟩
lemma is_bounded_under_sup [semilattice_sup α] {f : filter β} {u v : β → α} :
f.is_bounded_under (≤) u → f.is_bounded_under (≤) v → f.is_bounded_under (≤) (λa, u a ⊔ v a)
| ⟨bu, (hu : ∀ᶠ x in f, u x ≤ bu)⟩ ⟨bv, (hv : ∀ᶠ x in f, v x ≤ bv)⟩ :=
⟨bu ⊔ bv, show ∀ᶠ x in f, u x ⊔ v x ≤ bu ⊔ bv,
by filter_upwards [hu, hv] with _ using sup_le_sup⟩
lemma is_bounded_under_inf [semilattice_inf α] {f : filter β} {u v : β → α} :
f.is_bounded_under (≥) u → f.is_bounded_under (≥) v → f.is_bounded_under (≥) (λa, u a ⊓ v a)
| ⟨bu, (hu : ∀ᶠ x in f, u x ≥ bu)⟩ ⟨bv, (hv : ∀ᶠ x in f, v x ≥ bv)⟩ :=
⟨bu ⊓ bv, show ∀ᶠ x in f, u x ⊓ v x ≥ bu ⊓ bv,
by filter_upwards [hu, hv] with _ using inf_le_inf⟩
/-- Filters are automatically bounded or cobounded in complete lattices. To use the same statements
in complete and conditionally complete lattices but let automation fill automatically the
boundedness proofs in complete lattices, we use the tactic `is_bounded_default` in the statements,
in the form `(hf : f.is_bounded (≥) . is_bounded_default)`. -/
meta def is_bounded_default : tactic unit :=
tactic.applyc ``is_cobounded_le_of_bot <|>
tactic.applyc ``is_cobounded_ge_of_top <|>
tactic.applyc ``is_bounded_le_of_top <|>
tactic.applyc ``is_bounded_ge_of_bot
section conditionally_complete_lattice
variables [conditionally_complete_lattice α]
/-- The `Limsup` of a filter `f` is the infimum of the `a` such that, eventually for `f`,
holds `x ≤ a`. -/
def Limsup (f : filter α) : α := Inf { a | ∀ᶠ n in f, n ≤ a }
/-- The `Liminf` of a filter `f` is the supremum of the `a` such that, eventually for `f`,
holds `x ≥ a`. -/
def Liminf (f : filter α) : α := Sup { a | ∀ᶠ n in f, a ≤ n }
/-- The `limsup` of a function `u` along a filter `f` is the infimum of the `a` such that,
eventually for `f`, holds `u x ≤ a`. -/
def limsup (f : filter β) (u : β → α) : α := (f.map u).Limsup
/-- The `liminf` of a function `u` along a filter `f` is the supremum of the `a` such that,
eventually for `f`, holds `u x ≥ a`. -/
def liminf (f : filter β) (u : β → α) : α := (f.map u).Liminf
section
variables {f : filter β} {u : β → α}
theorem limsup_eq : f.limsup u = Inf { a | ∀ᶠ n in f, u n ≤ a } := rfl
theorem liminf_eq : f.liminf u = Sup { a | ∀ᶠ n in f, a ≤ u n } := rfl
end
theorem Limsup_le_of_le {f : filter α} {a}
(hf : f.is_cobounded (≤) . is_bounded_default) (h : ∀ᶠ n in f, n ≤ a) : f.Limsup ≤ a :=
cInf_le hf h
theorem le_Liminf_of_le {f : filter α} {a}
(hf : f.is_cobounded (≥) . is_bounded_default) (h : ∀ᶠ n in f, a ≤ n) : a ≤ f.Liminf :=
le_cSup hf h
theorem le_Limsup_of_le {f : filter α} {a}
(hf : f.is_bounded (≤) . is_bounded_default) (h : ∀ b, (∀ᶠ n in f, n ≤ b) → a ≤ b) :
a ≤ f.Limsup :=
le_cInf hf h
theorem Liminf_le_of_le {f : filter α} {a}
(hf : f.is_bounded (≥) . is_bounded_default) (h : ∀ b, (∀ᶠ n in f, b ≤ n) → b ≤ a) :
f.Liminf ≤ a :=
cSup_le hf h
theorem Liminf_le_Limsup {f : filter α} [ne_bot f]
(h₁ : f.is_bounded (≤) . is_bounded_default) (h₂ : f.is_bounded (≥) . is_bounded_default) :
f.Liminf ≤ f.Limsup :=
Liminf_le_of_le h₂ $ assume a₀ ha₀, le_Limsup_of_le h₁ $ assume a₁ ha₁,
show a₀ ≤ a₁, from let ⟨b, hb₀, hb₁⟩ := (ha₀.and ha₁).exists in le_trans hb₀ hb₁
lemma Liminf_le_Liminf {f g : filter α}
(hf : f.is_bounded (≥) . is_bounded_default) (hg : g.is_cobounded (≥) . is_bounded_default)
(h : ∀ a, (∀ᶠ n in f, a ≤ n) → ∀ᶠ n in g, a ≤ n) : f.Liminf ≤ g.Liminf :=
cSup_le_cSup hg hf h
lemma Limsup_le_Limsup {f g : filter α}
(hf : f.is_cobounded (≤) . is_bounded_default) (hg : g.is_bounded (≤) . is_bounded_default)
(h : ∀ a, (∀ᶠ n in g, n ≤ a) → ∀ᶠ n in f, n ≤ a) : f.Limsup ≤ g.Limsup :=
cInf_le_cInf hf hg h
lemma Limsup_le_Limsup_of_le {f g : filter α} (h : f ≤ g)
(hf : f.is_cobounded (≤) . is_bounded_default) (hg : g.is_bounded (≤) . is_bounded_default) :
f.Limsup ≤ g.Limsup :=
Limsup_le_Limsup hf hg (assume a ha, h ha)
lemma Liminf_le_Liminf_of_le {f g : filter α} (h : g ≤ f)
(hf : f.is_bounded (≥) . is_bounded_default) (hg : g.is_cobounded (≥) . is_bounded_default) :
f.Liminf ≤ g.Liminf :=
Liminf_le_Liminf hf hg (assume a ha, h ha)
lemma limsup_le_limsup {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : u ≤ᶠ[f] v)
(hu : f.is_cobounded_under (≤) u . is_bounded_default)
(hv : f.is_bounded_under (≤) v . is_bounded_default) :
f.limsup u ≤ f.limsup v :=
Limsup_le_Limsup hu hv $ assume b, h.trans
lemma liminf_le_liminf {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a ≤ v a)
(hu : f.is_bounded_under (≥) u . is_bounded_default)
(hv : f.is_cobounded_under (≥) v . is_bounded_default) :
f.liminf u ≤ f.liminf v :=
@limsup_le_limsup (order_dual β) α _ _ _ _ h hv hu
lemma limsup_le_limsup_of_le {α β} [conditionally_complete_lattice β] {f g : filter α} (h : f ≤ g)
{u : α → β} (hf : f.is_cobounded_under (≤) u . is_bounded_default)
(hg : g.is_bounded_under (≤) u . is_bounded_default) :
f.limsup u ≤ g.limsup u :=
Limsup_le_Limsup_of_le (map_mono h) hf hg
lemma liminf_le_liminf_of_le {α β} [conditionally_complete_lattice β] {f g : filter α} (h : g ≤ f)
{u : α → β} (hf : f.is_bounded_under (≥) u . is_bounded_default)
(hg : g.is_cobounded_under (≥) u . is_bounded_default) :
f.liminf u ≤ g.liminf u :=
Liminf_le_Liminf_of_le (map_mono h) hf hg
theorem Limsup_principal {s : set α} (h : bdd_above s) (hs : s.nonempty) :
(𝓟 s).Limsup = Sup s :=
by simp [Limsup]; exact cInf_upper_bounds_eq_cSup h hs
theorem Liminf_principal {s : set α} (h : bdd_below s) (hs : s.nonempty) :
(𝓟 s).Liminf = Inf s :=
@Limsup_principal (order_dual α) _ s h hs
lemma limsup_congr {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a = v a) : limsup f u = limsup f v :=
begin
rw limsup_eq,
congr' with b,
exact eventually_congr (h.mono $ λ x hx, by simp [hx])
end
lemma liminf_congr {α : Type*} [conditionally_complete_lattice β] {f : filter α} {u v : α → β}
(h : ∀ᶠ a in f, u a = v a) : liminf f u = liminf f v :=
@limsup_congr (order_dual β) _ _ _ _ _ h
lemma limsup_const {α : Type*} [conditionally_complete_lattice β] {f : filter α} [ne_bot f]
(b : β) : limsup f (λ x, b) = b :=
by simpa only [limsup_eq, eventually_const] using cInf_Ici
lemma liminf_const {α : Type*} [conditionally_complete_lattice β] {f : filter α} [ne_bot f]
(b : β) : liminf f (λ x, b) = b :=
@limsup_const (order_dual β) α _ f _ b
lemma liminf_le_limsup {f : filter β} [ne_bot f] {u : β → α}
(h : f.is_bounded_under (≤) u . is_bounded_default)
(h' : f.is_bounded_under (≥) u . is_bounded_default) :
liminf f u ≤ limsup f u :=
Liminf_le_Limsup h h'
end conditionally_complete_lattice
section complete_lattice
variables [complete_lattice α]
@[simp] theorem Limsup_bot : (⊥ : filter α).Limsup = ⊥ :=
bot_unique $ Inf_le $ by simp
@[simp] theorem Liminf_bot : (⊥ : filter α).Liminf = ⊤ :=
top_unique $ le_Sup $ by simp
@[simp] theorem Limsup_top : (⊤ : filter α).Limsup = ⊤ :=
top_unique $ le_Inf $
by simp [eq_univ_iff_forall]; exact assume b hb, (top_unique $ hb _)
@[simp] theorem Liminf_top : (⊤ : filter α).Liminf = ⊥ :=
bot_unique $ Sup_le $
by simp [eq_univ_iff_forall]; exact assume b hb, (bot_unique $ hb _)
/-- Same as limsup_const applied to `⊥` but without the `ne_bot f` assumption -/
lemma limsup_const_bot {f : filter β} : limsup f (λ x : β, (⊥ : α)) = (⊥ : α) :=
begin
rw [limsup_eq, eq_bot_iff],
exact Inf_le (eventually_of_forall (λ x, le_rfl)),
end
/-- Same as limsup_const applied to `⊤` but without the `ne_bot f` assumption -/
lemma liminf_const_top {f : filter β} : liminf f (λ x : β, (⊤ : α)) = (⊤ : α) :=
@limsup_const_bot (order_dual α) β _ _
theorem has_basis.Limsup_eq_infi_Sup {ι} {p : ι → Prop} {s} {f : filter α} (h : f.has_basis p s) :
f.Limsup = ⨅ i (hi : p i), Sup (s i) :=
le_antisymm
(le_binfi $ λ i hi, Inf_le $ h.eventually_iff.2 ⟨i, hi, λ x, le_Sup⟩)
(le_Inf $ assume a ha, let ⟨i, hi, ha⟩ := h.eventually_iff.1 ha in
infi_le_of_le _ $ infi_le_of_le hi $ Sup_le ha)
theorem has_basis.Liminf_eq_supr_Inf {p : ι → Prop} {s : ι → set α} {f : filter α}
(h : f.has_basis p s) : f.Liminf = ⨆ i (hi : p i), Inf (s i) :=
@has_basis.Limsup_eq_infi_Sup (order_dual α) _ _ _ _ _ h
theorem Limsup_eq_infi_Sup {f : filter α} : f.Limsup = ⨅ s ∈ f, Sup s :=
f.basis_sets.Limsup_eq_infi_Sup
theorem Liminf_eq_supr_Inf {f : filter α} : f.Liminf = ⨆ s ∈ f, Inf s :=
@Limsup_eq_infi_Sup (order_dual α) _ _
/-- In a complete lattice, the limsup of a function is the infimum over sets `s` in the filter
of the supremum of the function over `s` -/
theorem limsup_eq_infi_supr {f : filter β} {u : β → α} : f.limsup u = ⨅ s ∈ f, ⨆ a ∈ s, u a :=
(f.basis_sets.map u).Limsup_eq_infi_Sup.trans $
by simp only [Sup_image, id]
lemma limsup_eq_infi_supr_of_nat {u : ℕ → α} : limsup at_top u = ⨅ n : ℕ, ⨆ i ≥ n, u i :=
(at_top_basis.map u).Limsup_eq_infi_Sup.trans $
by simp only [Sup_image, infi_const]; refl
lemma limsup_eq_infi_supr_of_nat' {u : ℕ → α} : limsup at_top u = ⨅ n : ℕ, ⨆ i : ℕ, u (i + n) :=
by simp only [limsup_eq_infi_supr_of_nat, supr_ge_eq_supr_nat_add]
theorem has_basis.limsup_eq_infi_supr {p : ι → Prop} {s : ι → set β} {f : filter β} {u : β → α}
(h : f.has_basis p s) : f.limsup u = ⨅ i (hi : p i), ⨆ a ∈ s i, u a :=
(h.map u).Limsup_eq_infi_Sup.trans $ by simp only [Sup_image, id]
/-- In a complete lattice, the liminf of a function is the infimum over sets `s` in the filter
of the supremum of the function over `s` -/
theorem liminf_eq_supr_infi {f : filter β} {u : β → α} : f.liminf u = ⨆ s ∈ f, ⨅ a ∈ s, u a :=
@limsup_eq_infi_supr (order_dual α) β _ _ _
lemma liminf_eq_supr_infi_of_nat {u : ℕ → α} : liminf at_top u = ⨆ n : ℕ, ⨅ i ≥ n, u i :=
@limsup_eq_infi_supr_of_nat (order_dual α) _ u
lemma liminf_eq_supr_infi_of_nat' {u : ℕ → α} : liminf at_top u = ⨆ n : ℕ, ⨅ i : ℕ, u (i + n) :=
@limsup_eq_infi_supr_of_nat' (order_dual α) _ _
theorem has_basis.liminf_eq_supr_infi {p : ι → Prop} {s : ι → set β} {f : filter β} {u : β → α}
(h : f.has_basis p s) : f.liminf u = ⨆ i (hi : p i), ⨅ a ∈ s i, u a :=
@has_basis.limsup_eq_infi_supr (order_dual α) _ _ _ _ _ _ _ h
@[simp] lemma liminf_nat_add (f : ℕ → α) (k : ℕ) :
at_top.liminf (λ i, f (i + k)) = at_top.liminf f :=
by { simp_rw liminf_eq_supr_infi_of_nat, exact supr_infi_ge_nat_add f k }
@[simp] lemma limsup_nat_add (f : ℕ → α) (k : ℕ) :
at_top.limsup (λ i, f (i + k)) = at_top.limsup f :=
@liminf_nat_add (order_dual α) _ f k
lemma liminf_le_of_frequently_le' {α β} [complete_lattice β]
{f : filter α} {u : α → β} {x : β} (h : ∃ᶠ a in f, u a ≤ x) :
f.liminf u ≤ x :=
begin
rw liminf_eq,
refine Sup_le (λ b hb, _),
have hbx : ∃ᶠ a in f, b ≤ x,
{ revert h,
rw [←not_imp_not, not_frequently, not_frequently],
exact λ h, hb.mp (h.mono (λ a hbx hba hax, hbx (hba.trans hax))), },
exact hbx.exists.some_spec,
end
lemma le_limsup_of_frequently_le' {α β} [complete_lattice β]
{f : filter α} {u : α → β} {x : β} (h : ∃ᶠ a in f, x ≤ u a) :
x ≤ f.limsup u :=
@liminf_le_of_frequently_le' _ (order_dual β) _ _ _ _ h
end complete_lattice
section conditionally_complete_linear_order
lemma eventually_lt_of_lt_liminf {f : filter α} [conditionally_complete_linear_order β]
{u : α → β} {b : β} (h : b < liminf f u) (hu : f.is_bounded_under (≥) u . is_bounded_default) :
∀ᶠ a in f, b < u a :=
begin
obtain ⟨c, hc, hbc⟩ : ∃ (c : β) (hc : c ∈ {c : β | ∀ᶠ (n : α) in f, c ≤ u n}), b < c :=
exists_lt_of_lt_cSup hu h,
exact hc.mono (λ x hx, lt_of_lt_of_le hbc hx)
end
lemma eventually_lt_of_limsup_lt {f : filter α} [conditionally_complete_linear_order β]
{u : α → β} {b : β} (h : limsup f u < b) (hu : f.is_bounded_under (≤) u . is_bounded_default) :
∀ᶠ a in f, u a < b :=
@eventually_lt_of_lt_liminf _ (order_dual β) _ _ _ _ h hu
lemma le_limsup_of_frequently_le {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β} (hu_le : ∃ᶠ x in f, b ≤ u x)
(hu : f.is_bounded_under (≤) u . is_bounded_default) :
b ≤ f.limsup u :=
begin
revert hu_le,
rw [←not_imp_not, not_frequently],
simp_rw ←lt_iff_not_ge,
exact λ h, eventually_lt_of_limsup_lt h hu,
end
lemma liminf_le_of_frequently_le {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β} (hu_le : ∃ᶠ x in f, u x ≤ b)
(hu : f.is_bounded_under (≥) u . is_bounded_default) :
f.liminf u ≤ b :=
@le_limsup_of_frequently_le _ (order_dual β) _ f u b hu_le hu
lemma frequently_lt_of_lt_limsup {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β}
(hu : f.is_cobounded_under (≤) u . is_bounded_default) (h : b < f.limsup u) :
∃ᶠ x in f, b < u x :=
begin
contrapose! h,
apply Limsup_le_of_le hu,
simpa using h,
end
lemma frequently_lt_of_liminf_lt {α β} [conditionally_complete_linear_order β] {f : filter α}
{u : α → β} {b : β}
(hu : f.is_cobounded_under (≥) u . is_bounded_default) (h : f.liminf u < b) :
∃ᶠ x in f, u x < b :=
@frequently_lt_of_lt_limsup _ (order_dual β) _ f u b hu h
end conditionally_complete_linear_order
end filter
section order
open filter
lemma galois_connection.l_limsup_le {α β γ} [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {v : α → β}
{l : β → γ} {u : γ → β} (gc : galois_connection l u)
(hlv : f.is_bounded_under (≤) (λ x, l (v x)) . is_bounded_default)
(hv_co : f.is_cobounded_under (≤) v . is_bounded_default) :
l (f.limsup v) ≤ f.limsup (λ x, l (v x)) :=
begin
refine le_Limsup_of_le hlv (λ c hc, _),
rw filter.eventually_map at hc,
simp_rw (gc _ _) at hc ⊢,
exact Limsup_le_of_le hv_co hc,
end
lemma order_iso.limsup_apply {γ} [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {u : α → β} (g : β ≃o γ)
(hu : f.is_bounded_under (≤) u . is_bounded_default)
(hu_co : f.is_cobounded_under (≤) u . is_bounded_default)
(hgu : f.is_bounded_under (≤) (λ x, g (u x)) . is_bounded_default)
(hgu_co : f.is_cobounded_under (≤) (λ x, g (u x)) . is_bounded_default) :
g (f.limsup u) = f.limsup (λ x, g (u x)) :=
begin
refine le_antisymm (g.to_galois_connection.l_limsup_le hgu hu_co) _,
rw [←(g.symm.symm_apply_apply (f.limsup (λ (x : α), g (u x)))), g.symm_symm],
refine g.monotone _,
have hf : u = λ i, g.symm (g (u i)), from funext (λ i, (g.symm_apply_apply (u i)).symm),
nth_rewrite 0 hf,
refine g.symm.to_galois_connection.l_limsup_le _ hgu_co,
simp_rw g.symm_apply_apply,
exact hu,
end
lemma order_iso.liminf_apply {γ} [conditionally_complete_lattice β]
[conditionally_complete_lattice γ] {f : filter α} {u : α → β} (g : β ≃o γ)
(hu : f.is_bounded_under (≥) u . is_bounded_default)
(hu_co : f.is_cobounded_under (≥) u . is_bounded_default)
(hgu : f.is_bounded_under (≥) (λ x, g (u x)) . is_bounded_default)
(hgu_co : f.is_cobounded_under (≥) (λ x, g (u x)) . is_bounded_default) :
g (f.liminf u) = f.liminf (λ x, g (u x)) :=
@order_iso.limsup_apply α (order_dual β) (order_dual γ) _ _ f u g.dual hu hu_co hgu hgu_co
end order
|
8566def2cc4d6afe6ad73ea6f632fcb7aebc2c1b
|
735bb6d9c54e20a6bdc031c27bff1717e68886b9
|
/data/seq/computation.lean
|
809f30e8db62dbd31bb22bcce8efb55ace80e6d3
|
[] |
no_license
|
digama0/library_dev
|
3ea441564c4d7eca54a562b701febaa4db6a1061
|
56520d5d1dda46d87c98bf3acdf850672fdab00f
|
refs/heads/master
| 1,611,047,574,219
| 1,500,469,648,000
| 1,500,469,648,000
| 87,738,883
| 0
| 0
| null | 1,491,771,880,000
| 1,491,771,879,000
| null |
UTF-8
|
Lean
| false
| false
| 34,512
|
lean
|
/-
Copyright (c) 2017 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
Coinductive formalization of unbounded computations.
-/
import data.stream pending
universes u v w
/-
coinductive computation (α : Type u) : Type u
| return : α → computation α
| think : computation α → computation α
-/
def computation (α : Type u) : Type u :=
{ f : stream (option α) // ∀ {n a}, f n = some a → f (n+1) = some a }
namespace computation
variables {α : Type u} {β : Type v} {γ : Type w}
-- constructors
def return (a : α) : computation α := ⟨stream.const (some a), λn a', id⟩
instance : has_coe α (computation α) := ⟨return⟩
def think (c : computation α) : computation α :=
⟨none :: c.1, λn a h, by {cases n with n, contradiction, exact c.2 h}⟩
def thinkN (c : computation α) : ℕ → computation α
| 0 := c
| (n+1) := think (thinkN n)
-- check for immediate result
def head (c : computation α) : option α := c.1.head
-- one step of computation
def tail (c : computation α) : computation α :=
⟨c.1.tail, λ n a, let t := c.2 in t⟩
def empty (α) : computation α := ⟨stream.const none, λn a', id⟩
def run_for : computation α → ℕ → option α := subtype.val
def destruct (s : computation α) : α ⊕ computation α :=
match s.1 0 with
| none := sum.inr (tail s)
| some a := sum.inl a
end
meta def run : computation α → α | c :=
match destruct c with
| sum.inl a := a
| sum.inr ca := run ca
end
theorem destruct_eq_ret {s : computation α} {a : α} :
destruct s = sum.inl a → s = return a :=
begin
dsimp [destruct],
ginduction s.1 0 with f0; intro h,
{ contradiction },
{ apply subtype.eq, apply funext,
dsimp [return], intro n,
induction n with n IH,
{ injection h, rwa h at f0 },
{ exact s.2 IH } }
end
theorem destruct_eq_think {s : computation α} {s'} :
destruct s = sum.inr s' → s = think s' :=
begin
dsimp [destruct],
ginduction s.1 0 with f0 a'; intro h,
{ injection h, rw ←h,
cases s with f al,
apply subtype.eq, dsimp [think, tail],
rw ←f0, exact (stream.eta f).symm },
{ contradiction }
end
@[simp] theorem destruct_ret (a : α) : destruct (return a) = sum.inl a := rfl
@[simp] theorem destruct_think : ∀ s : computation α, destruct (think s) = sum.inr s
| ⟨f, al⟩ := rfl
@[simp] theorem destruct_empty : destruct (empty α) = sum.inr (empty α) := rfl
@[simp] theorem head_ret (a : α) : head (return a) = some a := rfl
@[simp] theorem head_think (s : computation α) : head (think s) = none := rfl
@[simp] theorem head_empty : head (empty α) = none := rfl
@[simp] theorem tail_ret (a : α) : tail (return a) = return a := rfl
@[simp] theorem tail_think (s : computation α) : tail (think s) = s :=
by cases s with f al; apply subtype.eq; dsimp [tail, think]; rw [stream.tail_cons]
@[simp] theorem tail_empty : tail (empty α) = empty α := rfl
theorem think_empty : empty α = think (empty α) :=
destruct_eq_think destruct_empty
def cases_on {C : computation α → Sort v} (s : computation α)
(h1 : ∀ a, C (return a)) (h2 : ∀ s, C (think s)) : C s := begin
ginduction destruct s with H,
{ rw destruct_eq_ret H, apply h1 },
{ cases a with a s', rw destruct_eq_think H, apply h2 }
end
def corec.F (f : β → α ⊕ β) : α ⊕ β → option α × (α ⊕ β)
| (sum.inl a) := (some a, sum.inl a)
| (sum.inr b) := (match f b with
| sum.inl a := some a
| sum.inr b' := none
end, f b)
def corec (f : β → α ⊕ β) (b : β) : computation α :=
begin
refine ⟨stream.corec' (corec.F f) (sum.inr b), λn a' h, _⟩,
rw stream.corec'_eq,
change stream.corec' (corec.F f) (corec.F f (sum.inr b)).2 n = some a',
revert h, generalize (sum.inr b) o,
induction n with n IH; intro o,
{ change (corec.F f o).1 = some a' → (corec.F f (corec.F f o).2).1 = some a',
cases o with a b; intro h, { exact h },
dsimp [corec.F] at h, dsimp [corec.F],
cases f b with a b', { exact h },
{ contradiction } },
{ rw [stream.corec'_eq (corec.F f) (corec.F f o).2,
stream.corec'_eq (corec.F f) o],
exact IH (corec.F f o).2 }
end
def lmap (f : α → β) : α ⊕ γ → β ⊕ γ
| (sum.inl a) := sum.inl (f a)
| (sum.inr b) := sum.inr b
def rmap (f : β → γ) : α ⊕ β → α ⊕ γ
| (sum.inl a) := sum.inl a
| (sum.inr b) := sum.inr (f b)
attribute [simp] lmap rmap
@[simp] def corec_eq (f : β → α ⊕ β) (b : β) :
destruct (corec f b) = rmap (corec f) (f b) :=
begin
dsimp [corec, destruct],
change stream.corec' (corec.F f) (sum.inr b) 0 with corec.F._match_1 (f b),
ginduction f b with h a b', { refl },
dsimp [corec.F, destruct],
apply congr_arg, apply subtype.eq,
dsimp [corec, tail],
rw [stream.corec'_eq, stream.tail_cons],
dsimp [corec.F], rw h
end
section bisim
variable (R : computation α → computation α → Prop)
local infix ~ := R
def bisim_o : α ⊕ computation α → α ⊕ computation α → Prop
| (sum.inl a) (sum.inl a') := a = a'
| (sum.inr s) (sum.inr s') := R s s'
| _ _ := false
attribute [simp] bisim_o
def is_bisimulation := ∀ ⦃s₁ s₂⦄, s₁ ~ s₂ → bisim_o R (destruct s₁) (destruct s₂)
-- If two computations are bisimilar, then they are equal
lemma eq_of_bisim (bisim : is_bisimulation R) {s₁ s₂} (r : s₁ ~ s₂) : s₁ = s₂ :=
begin
apply subtype.eq,
apply stream.eq_of_bisim (λx y, ∃ s s' : computation α, s.1 = x ∧ s'.1 = y ∧ R s s'),
dsimp [stream.is_bisimulation],
intros t₁ t₂ e,
exact match t₁, t₂, e with ._, ._, ⟨s, s', rfl, rfl, r⟩ :=
suffices head s = head s' ∧ R (tail s) (tail s'), from
and.imp id (λr, ⟨tail s, tail s',
by cases s; refl, by cases s'; refl, r⟩) this,
begin
have := bisim r, revert r this,
apply cases_on s _ _; intros; apply cases_on s' _ _; intros; intros r this,
{ constructor, dsimp at this, rw this, assumption },
{ rw [destruct_ret, destruct_think] at this,
exact false.elim this },
{ rw [destruct_ret, destruct_think] at this,
exact false.elim this },
{ simp at this, simp [*] }
end
end,
exact ⟨s₁, s₂, rfl, rfl, r⟩
end
end bisim
-- It's more of a stretch to use ∈ for this relation, but it
-- asserts that the computation limits to the given value.
protected def mem (a : α) (s : computation α) := some a ∈ s.1
instance : has_mem α (computation α) :=
⟨computation.mem⟩
theorem le_stable (s : computation α) {a m n} (h : m ≤ n) :
s.1 m = some a → s.1 n = some a :=
by {cases s with f al, induction h with n h IH, exacts [id, λ h2, al (IH h2)]}
theorem mem_unique :
relator.left_unique ((∈) : α → computation α → Prop) :=
λa s b ⟨m, ha⟩ ⟨n, hb⟩, by injection
(le_stable s (le_max_left m n) ha.symm).symm.trans
(le_stable s (le_max_right m n) hb.symm)
@[class] def terminates (s : computation α) : Prop := ∃ a, a ∈ s
theorem terminates_of_mem {s : computation α} {a : α} : a ∈ s → terminates s :=
exists.intro a
theorem terminates_def (s : computation α) : terminates s ↔ ∃ n, (s.1 n).is_some :=
⟨λ⟨a, n, h⟩, ⟨n, by {dsimp [stream.nth] at h, rw ←h, exact rfl}⟩,
λ⟨n, h⟩, ⟨option.get h, n, (option.eq_some_of_is_some h).symm⟩⟩
theorem ret_mem (a : α) : a ∈ return a :=
exists.intro 0 rfl
theorem eq_of_ret_mem {a a' : α} (h : a' ∈ return a) : a' = a :=
mem_unique h (ret_mem _)
instance ret_terminates (a : α) : terminates (return a) :=
terminates_of_mem (ret_mem _)
theorem think_mem {s : computation α} {a} : a ∈ s → a ∈ think s
| ⟨n, h⟩ := ⟨n+1, h⟩
instance think_terminates (s : computation α) :
∀ [terminates s], terminates (think s)
| ⟨a, n, h⟩ := ⟨a, n+1, h⟩
theorem of_think_mem {s : computation α} {a} : a ∈ think s → a ∈ s
| ⟨n, h⟩ := by {cases n with n', contradiction, exact ⟨n', h⟩}
theorem of_think_terminates {s : computation α} :
terminates (think s) → terminates s
| ⟨a, h⟩ := ⟨a, of_think_mem h⟩
theorem not_mem_empty (a : α) : a ∉ empty α :=
λ ⟨n, h⟩, by clear _fun_match; contradiction
theorem not_terminates_empty : ¬ terminates (empty α) :=
λ ⟨a, h⟩, not_mem_empty a h
theorem eq_empty_of_not_terminates {s} (H : ¬ terminates s) : s = empty α :=
begin
apply subtype.eq, apply funext, intro n,
ginduction s.val n with h, {refl},
refine absurd _ H, exact ⟨_, _, h.symm⟩
end
theorem thinkN_mem {s : computation α} {a} : ∀ n, a ∈ thinkN s n ↔ a ∈ s
| 0 := iff.rfl
| (n+1) := iff.trans ⟨of_think_mem, think_mem⟩ (thinkN_mem n)
instance thinkN_terminates (s : computation α) :
∀ [terminates s] n, terminates (thinkN s n)
| ⟨a, h⟩ n := ⟨a, (thinkN_mem n).2 h⟩
theorem of_thinkN_terminates (s : computation α) (n) :
terminates (thinkN s n) → terminates s
| ⟨a, h⟩ := ⟨a, (thinkN_mem _).1 h⟩
def promises (s : computation α) (a : α) : Prop := ∀ ⦃a'⦄, a' ∈ s → a = a'
infix ` ~> `:50 := promises
theorem mem_promises {s : computation α} {a : α} : a ∈ s → s ~> a :=
λ h a', mem_unique h
theorem empty_promises (a : α) : empty α ~> a :=
λ a' h, absurd h (not_mem_empty _)
section get
variables (s : computation α) [h : terminates s]
include s h
def length : ℕ := nat.find ((terminates_def _).1 h)
-- If a computation has a result, we can retrieve it
def get : α := option.get (nat.find_spec $ (terminates_def _).1 h)
def get_mem : get s ∈ s :=
exists.intro (length s) (option.eq_some_of_is_some _).symm
def get_eq_of_mem {a} : a ∈ s → get s = a :=
mem_unique (get_mem _)
def mem_of_get_eq {a} : get s = a → a ∈ s :=
by intro h; rw ←h; apply get_mem
@[simp] theorem get_think : get (think s) = get s :=
get_eq_of_mem _ $ let ⟨n, h⟩ := get_mem s in ⟨n+1, h⟩
@[simp] theorem get_thinkN (n) : get (thinkN s n) = get s :=
get_eq_of_mem _ $ (thinkN_mem _).2 (get_mem _)
def get_promises : s ~> get s := λ a, get_eq_of_mem _
def mem_of_promises {a} (p : s ~> a) : a ∈ s :=
by cases h with a' h; rw p h; exact h
def get_eq_of_promises {a} : s ~> a → get s = a :=
get_eq_of_mem _ ∘ mem_of_promises _
end get
def results (s : computation α) (a : α) (n : ℕ) :=
∃ (h : a ∈ s), @length _ s (terminates_of_mem h) = n
def results_of_terminates (s : computation α) [T : terminates s] :
results s (get s) (length s) :=
⟨get_mem _, rfl⟩
def results_of_terminates' (s : computation α) [T : terminates s] {a} (h : a ∈ s) :
results s a (length s) :=
by rw ←get_eq_of_mem _ h; apply results_of_terminates
def results.mem {s : computation α} {a n} : results s a n → a ∈ s
| ⟨m, _⟩ := m
def results.terminates {s : computation α} {a n} (h : results s a n) : terminates s :=
terminates_of_mem h.mem
def results.length {s : computation α} {a n} [T : terminates s] :
results s a n → length s = n
| ⟨_, h⟩ := h
def results.val_unique {s : computation α} {a b m n}
(h1 : results s a m) (h2 : results s b n) : a = b :=
mem_unique h1.mem h2.mem
def results.len_unique {s : computation α} {a b m n}
(h1 : results s a m) (h2 : results s b n) : m = n :=
by have := h1.terminates; have := h2.terminates; rw [←h1.length, h2.length]
def exists_results_of_mem {s : computation α} {a} (h : a ∈ s) : ∃ n, results s a n :=
by have := terminates_of_mem h; have := results_of_terminates' s h; exact ⟨_, this⟩
@[simp] theorem get_ret (a : α) : get (return a) = a :=
get_eq_of_mem _ ⟨0, rfl⟩
@[simp] theorem length_ret (a : α) : length (return a) = 0 :=
let h := computation.ret_terminates a in
nat.eq_zero_of_le_zero $ nat.find_min' ((terminates_def (return a)).1 h) rfl
theorem results_ret (a : α) : results (return a) a 0 :=
⟨_, length_ret _⟩
@[simp] theorem length_think (s : computation α) [h : terminates s] :
length (think s) = length s + 1 :=
begin
apply le_antisymm,
{ exact nat.find_min' _ (nat.find_spec ((terminates_def _).1 h)) },
{ have : (option.is_some ((think s).val (length (think s))) : Prop) :=
nat.find_spec ((terminates_def _).1 s.think_terminates),
cases length (think s) with n,
{ contradiction },
{ apply nat.succ_le_succ, apply nat.find_min', apply this } }
end
theorem results_think {s : computation α} {a n}
(h : results s a n) : results (think s) a (n + 1) :=
by have := h.terminates; exact ⟨think_mem h.mem, by rw [length_think, h.length]⟩
theorem of_results_think {s : computation α} {a n}
(h : results (think s) a n) : ∃ m, results s a m ∧ n = m + 1 :=
begin
have := of_think_terminates h.terminates,
have := results_of_terminates' _ (of_think_mem h.mem),
exact ⟨_, this, results.len_unique h (results_think this)⟩,
end
@[simp] theorem results_think_iff {s : computation α} {a n} :
results (think s) a (n + 1) ↔ results s a n :=
⟨λ h, let ⟨n', r, e⟩ := of_results_think h in by injection e; rwa h,
results_think⟩
theorem results_thinkN {s : computation α} {a m} :
∀ n, results s a m → results (thinkN s n) a (m + n)
| 0 h := h
| (n+1) h := results_think (results_thinkN n h)
theorem results_thinkN_ret (a : α) (n) : results (thinkN (return a) n) a n :=
by have := results_thinkN n (results_ret a); rwa zero_add at this
@[simp] theorem length_thinkN (s : computation α) [h : terminates s] (n) :
length (thinkN s n) = length s + n :=
(results_thinkN n (results_of_terminates _)).length
def eq_thinkN {s : computation α} {a n} (h : results s a n) :
s = thinkN (return a) n :=
begin
revert s,
induction n with n IH; intro s;
apply cases_on s (λ a', _) (λ s, _); intro h,
{ rw ←eq_of_ret_mem h.mem, refl },
{ cases of_results_think h with n h, cases h, contradiction },
{ have := h.len_unique (results_ret _), contradiction },
{ rw IH (results_think_iff.1 h), refl }
end
def eq_thinkN' (s : computation α) [h : terminates s] :
s = thinkN (return (get s)) (length s) :=
eq_thinkN (results_of_terminates _)
def mem_rec_on {C : computation α → Sort v} {a s} (M : a ∈ s)
(h1 : C (return a)) (h2 : ∀ s, C s → C (think s)) : C s :=
begin
have T := terminates_of_mem M,
rw [eq_thinkN' s, get_eq_of_mem s M],
generalize (length s) n, intro n,
induction n with n IH, exacts [h1, h2 _ IH]
end
def terminates_rec_on {C : computation α → Sort v} (s) [terminates s]
(h1 : ∀ a, C (return a)) (h2 : ∀ s, C s → C (think s)) : C s :=
mem_rec_on (get_mem s) (h1 _) h2
def map (f : α → β) : computation α → computation β
| ⟨s, al⟩ := ⟨s.map (λo, option.cases_on o none (some ∘ f)),
λn b, begin
dsimp [stream.map, stream.nth],
ginduction s n with e a; intro h,
{ contradiction }, { rw [al e, ←h] }
end⟩
def bind.G : β ⊕ computation β → β ⊕ computation α ⊕ computation β
| (sum.inl b) := sum.inl b
| (sum.inr cb') := sum.inr $ sum.inr cb'
def bind.F (f : α → computation β) :
computation α ⊕ computation β → β ⊕ computation α ⊕ computation β
| (sum.inl ca) :=
match destruct ca with
| sum.inl a := bind.G $ destruct (f a)
| sum.inr ca' := sum.inr $ sum.inl ca'
end
| (sum.inr cb) := bind.G $ destruct cb
def bind (c : computation α) (f : α → computation β) : computation β :=
corec (bind.F f) (sum.inl c)
instance : has_bind computation := ⟨@bind⟩
theorem has_bind_eq_bind {β} (c : computation α) (f : α → computation β) :
c >>= f = bind c f := rfl
def join (c : computation (computation α)) : computation α := c >>= id
@[simp] lemma map_ret (f : α → β) (a) : map f (return a) = return (f a) := rfl
@[simp] lemma map_think (f : α → β) : ∀ s, map f (think s) = think (map f s)
| ⟨s, al⟩ := by apply subtype.eq; dsimp [think, map]; rw stream.map_cons
@[simp] lemma destruct_map (f : α → β) (s) : destruct (map f s) = lmap f (rmap (map f) (destruct s)) :=
by apply s.cases_on; intro; simp
@[simp] theorem map_id : ∀ (s : computation α), map id s = s
| ⟨f, al⟩ := begin
apply subtype.eq; simp [map, function.comp],
have e : (@option.rec α (λ_, option α) none some) = id,
{ apply funext, intro, cases x; refl },
simp [e, stream.map_id]
end
lemma map_comp (f : α → β) (g : β → γ) :
∀ (s : computation α), map (g ∘ f) s = map g (map f s)
| ⟨s, al⟩ := begin
apply subtype.eq; dsimp [map],
rw stream.map_map,
apply congr_arg (λ f : _ → option γ, stream.map f s),
apply funext, intro, cases x with x; refl
end
@[simp] lemma ret_bind (a) (f : α → computation β) :
bind (return a) f = f a :=
begin
apply eq_of_bisim (λc1 c2,
c1 = bind (return a) f ∧ c2 = f a ∨
c1 = corec (bind.F f) (sum.inr c2)),
{ intros c1 c2 h,
exact match c1, c2, h with
| ._, ._, or.inl ⟨rfl, rfl⟩ := begin
simp [bind, bind.F],
cases destruct (f a) with b cb; simp [bind.G]
end
| ._, c, or.inr rfl := begin
simp [bind.F],
cases destruct c with b cb; simp [bind.G]
end end },
{ simp }
end
@[simp] lemma think_bind (c) (f : α → computation β) :
bind (think c) f = think (bind c f) :=
destruct_eq_think $ by simp [bind, bind.F]
@[simp] theorem bind_ret (f : α → β) (s) : bind s (return ∘ f) = map f s :=
begin
apply eq_of_bisim (λc1 c2, c1 = c2 ∨
∃ s, c1 = bind s (return ∘ f) ∧ c2 = map f s),
{ intros c1 c2 h,
exact match c1, c2, h with
| c, ._, or.inl rfl := by cases destruct c with b cb; simp
| ._, ._, or.inr ⟨s, rfl, rfl⟩ := begin
apply cases_on s; intros s; simp,
exact or.inr ⟨s, rfl, rfl⟩
end end },
{ exact or.inr ⟨s, rfl, rfl⟩ }
end
@[simp] theorem bind_ret' (s : computation α) : bind s return = s :=
by rw bind_ret; change (λ x : α, x) with @id α; rw map_id
@[simp] theorem bind_assoc (s : computation α) (f : α → computation β) (g : β → computation γ) :
bind (bind s f) g = bind s (λ (x : α), bind (f x) g) :=
begin
apply eq_of_bisim (λc1 c2, c1 = c2 ∨
∃ s, c1 = bind (bind s f) g ∧ c2 = bind s (λ (x : α), bind (f x) g)),
{ intros c1 c2 h,
exact match c1, c2, h with
| c, ._, or.inl rfl := by cases destruct c with b cb; simp
| ._, ._, or.inr ⟨s, rfl, rfl⟩ := begin
apply cases_on s; intros s; simp,
{ generalize (f s) fs, intro fs,
apply cases_on fs; intros t; simp,
{ cases destruct (g t) with b cb; simp } },
{ exact or.inr ⟨s, rfl, rfl⟩ }
end end },
{ exact or.inr ⟨s, rfl, rfl⟩ }
end
theorem results_bind {s : computation α} {f : α → computation β} {a b m n}
(h1 : results s a m) (h2 : results (f a) b n) : results (bind s f) b (n + m) :=
begin
have := h1.mem, revert m,
apply mem_rec_on this _ (λ s IH, _); intros m h1,
{ rw [ret_bind], rw h1.len_unique (results_ret _), exact h2 },
{ rw [think_bind], cases of_results_think h1 with m' h, cases h with h1 e,
rw e, exact results_think (IH h1) }
end
theorem mem_bind {s : computation α} {f : α → computation β} {a b}
(h1 : a ∈ s) (h2 : b ∈ f a) : b ∈ bind s f :=
let ⟨m, h1⟩ := exists_results_of_mem h1,
⟨n, h2⟩ := exists_results_of_mem h2 in (results_bind h1 h2).mem
instance terminates_bind (s : computation α) (f : α → computation β)
[terminates s] [terminates (f (get s))] :
terminates (bind s f) :=
terminates_of_mem (mem_bind (get_mem s) (get_mem (f (get s))))
@[simp] theorem get_bind (s : computation α) (f : α → computation β)
[terminates s] [terminates (f (get s))] :
get (bind s f) = get (f (get s)) :=
get_eq_of_mem _ (mem_bind (get_mem s) (get_mem (f (get s))))
@[simp] theorem length_bind (s : computation α) (f : α → computation β)
[T1 : terminates s] [T2 : terminates (f (get s))] :
length (bind s f) = length (f (get s)) + length s :=
(results_of_terminates _).len_unique $
results_bind (results_of_terminates _) (results_of_terminates _)
theorem of_results_bind {s : computation α} {f : α → computation β} {b k} :
results (bind s f) b k →
∃ a m n, results s a m ∧ results (f a) b n ∧ k = n + m :=
begin
revert s, induction k with n IH; intro s;
apply cases_on s (λ a, _) (λ s', _); intro e,
{ simp [thinkN] at e, refine ⟨a, _, _, results_ret _, e, rfl⟩ },
{ have := congr_arg head (eq_thinkN e), contradiction },
{ simp at e, refine ⟨a, _, n+1, results_ret _, e, rfl⟩ },
{ simp at e, exact let ⟨a, m, n', h1, h2, e'⟩ := IH e in
by rw e'; exact ⟨a, m.succ, n', results_think h1, h2, rfl⟩ }
end
theorem exists_of_mem_bind {s : computation α} {f : α → computation β} {b}
(h : b ∈ bind s f) : ∃ a ∈ s, b ∈ f a :=
let ⟨k, h⟩ := exists_results_of_mem h,
⟨a, m, n, h1, h2, e⟩ := of_results_bind h in ⟨a, h1.mem, h2.mem⟩
theorem bind_promises {s : computation α} {f : α → computation β} {a b}
(h1 : s ~> a) (h2 : f a ~> b) : bind s f ~> b :=
λ b' bB, begin
cases exists_of_mem_bind bB with a' a's, cases a's with a's ba',
rw ←h1 a's at ba', exact h2 ba'
end
instance : monad computation :=
{ map := @map,
pure := @return,
bind := @bind,
id_map := @map_id,
bind_pure_comp_eq_map := @bind_ret,
pure_bind := @ret_bind,
bind_assoc := @bind_assoc }
theorem has_map_eq_map {β} (f : α → β) (c : computation α) : f <$> c = map f c := rfl
@[simp] lemma return_def (a) : (_root_.return a : computation α) = return a := rfl
@[simp] lemma map_ret' {α β} : ∀ (f : α → β) (a), f <$> return a = return (f a) := map_ret
@[simp] lemma map_think' {α β} : ∀ (f : α → β) s, f <$> think s = think (f <$> s) := map_think
theorem mem_map (f : α → β) {a} {s : computation α} (m : a ∈ s) : f a ∈ map f s :=
by rw ←bind_ret; apply mem_bind m; apply ret_mem
theorem exists_of_mem_map {f : α → β} {b : β} {s : computation α} (h : b ∈ map f s) :
∃ a, a ∈ s ∧ f a = b :=
by rw ←bind_ret at h; exact
let ⟨a, as, fb⟩ := exists_of_mem_bind h in ⟨a, as, mem_unique (ret_mem _) fb⟩
instance terminates_map (f : α → β) (s : computation α) [terminates s] : terminates (map f s) :=
by rw ←bind_ret; apply_instance
theorem terminates_map_iff (f : α → β) (s : computation α) :
terminates (map f s) ↔ terminates s :=
⟨λ⟨a, h⟩, let ⟨b, h1, _⟩ := exists_of_mem_map h in ⟨_, h1⟩, @computation.terminates_map _ _ _ _⟩
-- Parallel computation
def orelse (c1 c2 : computation α) : computation α :=
@computation.corec α (computation α × computation α)
(λ⟨c1, c2⟩, match destruct c1 with
| sum.inl a := sum.inl a
| sum.inr c1' := match destruct c2 with
| sum.inl a := sum.inl a
| sum.inr c2' := sum.inr (c1', c2')
end
end) (c1, c2)
instance : alternative computation :=
{ computation.monad with
orelse := @orelse,
failure := @empty }
@[simp] theorem ret_orelse (a : α) (c2 : computation α) :
(return a <|> c2) = return a :=
destruct_eq_ret $ by unfold has_orelse.orelse; simp [orelse]
@[simp] theorem orelse_ret (c1 : computation α) (a : α) :
(think c1 <|> return a) = return a :=
destruct_eq_ret $ by unfold has_orelse.orelse; simp [orelse]
@[simp] theorem orelse_think (c1 c2 : computation α) :
(think c1 <|> think c2) = think (c1 <|> c2) :=
destruct_eq_think $ by unfold has_orelse.orelse; simp [orelse]
@[simp] theorem empty_orelse (c) : (empty α <|> c) = c :=
begin
apply eq_of_bisim (λc1 c2, (empty α <|> c2) = c1) _ rfl,
intros s' s h, rw ←h,
apply cases_on s; intros s; rw think_empty; simp,
rw ←think_empty,
end
@[simp] theorem orelse_empty (c : computation α) : (c <|> empty α) = c :=
begin
apply eq_of_bisim (λc1 c2, (c2 <|> empty α) = c1) _ rfl,
intros s' s h, rw ←h,
apply cases_on s; intros s; rw think_empty; simp,
rw←think_empty,
end
def equiv (c1 c2 : computation α) : Prop := ∀ a, a ∈ c1 ↔ a ∈ c2
infix ~ := equiv
@[refl] theorem equiv.refl (s : computation α) : s ~ s := λ_, iff.rfl
@[symm] theorem equiv.symm {s t : computation α} : s ~ t → t ~ s :=
λh a, (h a).symm
@[trans] theorem equiv.trans {s t u : computation α} : s ~ t → t ~ u → s ~ u :=
λh1 h2 a, (h1 a).trans (h2 a)
theorem equiv.equivalence : equivalence (@equiv α) :=
⟨@equiv.refl _, @equiv.symm _, @equiv.trans _⟩
theorem equiv_of_mem {s t : computation α} {a} (h1 : a ∈ s) (h2 : a ∈ t) : s ~ t :=
λa', ⟨λma, by rw mem_unique ma h1; exact h2,
λma, by rw mem_unique ma h2; exact h1⟩
theorem terminates_congr {c1 c2 : computation α}
(h : c1 ~ c2) : terminates c1 ↔ terminates c2 :=
exists_congr h
theorem promises_congr {c1 c2 : computation α}
(h : c1 ~ c2) (a) : c1 ~> a ↔ c2 ~> a :=
forall_congr (λa', imp_congr (h a') iff.rfl)
theorem get_equiv {c1 c2 : computation α} (h : c1 ~ c2)
[terminates c1] [terminates c2] : get c1 = get c2 :=
get_eq_of_mem _ $ (h _).2 $ get_mem _
theorem think_equiv (s : computation α) : think s ~ s :=
λ a, ⟨of_think_mem, think_mem⟩
theorem thinkN_equiv (s : computation α) (n) : thinkN s n ~ s :=
λ a, thinkN_mem n
theorem bind_congr {s1 s2 : computation α} {f1 f2 : α → computation β}
(h1 : s1 ~ s2) (h2 : ∀ a, f1 a ~ f2 a) : bind s1 f1 ~ bind s2 f2 :=
λ b, ⟨λh, let ⟨a, ha, hb⟩ := exists_of_mem_bind h in
mem_bind ((h1 a).1 ha) ((h2 a b).1 hb),
λh, let ⟨a, ha, hb⟩ := exists_of_mem_bind h in
mem_bind ((h1 a).2 ha) ((h2 a b).2 hb)⟩
theorem equiv_ret_of_mem {s : computation α} {a} (h : a ∈ s) : s ~ return a :=
equiv_of_mem h (ret_mem _)
def lift_rel (R : α → β → Prop) (ca : computation α) (cb : computation β) : Prop :=
(∀ {a}, a ∈ ca → ∃ {b}, b ∈ cb ∧ R a b) ∧
∀ {b}, b ∈ cb → ∃ {a}, a ∈ ca ∧ R a b
theorem lift_rel.swap (R : α → β → Prop) (ca : computation α) (cb : computation β) :
lift_rel (function.swap R) cb ca ↔ lift_rel R ca cb :=
and_comm _ _
theorem lift_eq_iff_equiv (c1 c2 : computation α) : lift_rel (=) c1 c2 ↔ c1 ~ c2 :=
⟨λ⟨h1, h2⟩ a,
⟨λ a1, let ⟨b, b2, ab⟩ := h1 a1 in by rwa ab,
λ a2, let ⟨b, b1, ab⟩ := h2 a2 in by rwa ←ab⟩,
λe, ⟨λ a a1, ⟨a, (e _).1 a1, rfl⟩, λ a a2, ⟨a, (e _).2 a2, rfl⟩⟩⟩
def lift_rel.refl (R : α → α → Prop) (H : reflexive R) : reflexive (lift_rel R) :=
λ s, ⟨λ a as, ⟨a, as, H a⟩, λ b bs, ⟨b, bs, H b⟩⟩
def lift_rel.symm (R : α → α → Prop) (H : symmetric R) : symmetric (lift_rel R) :=
λ s1 s2 ⟨l, r⟩,
⟨λ a a2, let ⟨b, b1, ab⟩ := r a2 in ⟨b, b1, H ab⟩,
λ a a1, let ⟨b, b2, ab⟩ := l a1 in ⟨b, b2, H ab⟩⟩
def lift_rel.trans (R : α → α → Prop) (H : transitive R) : transitive (lift_rel R) :=
λ s1 s2 s3 ⟨l1, r1⟩ ⟨l2, r2⟩,
⟨λ a a1, let ⟨b, b2, ab⟩ := l1 a1, ⟨c, c3, bc⟩ := l2 b2 in ⟨c, c3, H ab bc⟩,
λ c c3, let ⟨b, b2, bc⟩ := r2 c3, ⟨a, a1, ab⟩ := r1 b2 in ⟨a, a1, H ab bc⟩⟩
def lift_rel.equiv (R : α → α → Prop) : equivalence R → equivalence (lift_rel R)
| ⟨refl, symm, trans⟩ :=
⟨lift_rel.refl R refl, lift_rel.symm R symm, lift_rel.trans R trans⟩
def lift_rel.imp {R S : α → β → Prop} (H : ∀ {a b}, R a b → S a b) (s t) :
lift_rel R s t → lift_rel S s t | ⟨l, r⟩ :=
⟨λ a as, let ⟨b, bt, ab⟩ := l as in ⟨b, bt, H ab⟩,
λ b bt, let ⟨a, as, ab⟩ := r bt in ⟨a, as, H ab⟩⟩
def terminates_of_lift_rel {R : α → β → Prop} {s t} :
lift_rel R s t → (terminates s ↔ terminates t) | ⟨l, r⟩ :=
⟨λ ⟨a, as⟩, let ⟨b, bt, ab⟩ := l as in ⟨b, bt⟩,
λ ⟨b, bt⟩, let ⟨a, as, ab⟩ := r bt in ⟨a, as⟩⟩
def rel_of_lift_rel {R : α → β → Prop} {ca cb} :
lift_rel R ca cb → ∀ {a b}, a ∈ ca → b ∈ cb → R a b
| ⟨l, r⟩ a b ma mb :=
let ⟨b', mb', ab'⟩ := l ma in by rw mem_unique mb mb'; exact ab'
theorem lift_rel_of_mem {R : α → β → Prop} {a b ca cb}
(ma : a ∈ ca) (mb : b ∈ cb) (ab : R a b) : lift_rel R ca cb :=
⟨λ a' ma', by rw mem_unique ma' ma; exact ⟨b, mb, ab⟩,
λ b' mb', by rw mem_unique mb' mb; exact ⟨a, ma, ab⟩⟩
theorem exists_of_lift_rel_left {R : α → β → Prop} {ca cb}
(H : lift_rel R ca cb) {a} (h : a ∈ ca) : ∃ {b}, b ∈ cb ∧ R a b :=
H.left h
theorem exists_of_lift_rel_right {R : α → β → Prop} {ca cb}
(H : lift_rel R ca cb) {b} (h : b ∈ cb) : ∃ {a}, a ∈ ca ∧ R a b :=
H.right h
theorem lift_rel_def {R : α → β → Prop} {ca cb} : lift_rel R ca cb ↔
(terminates ca ↔ terminates cb) ∧ ∀ {a b}, a ∈ ca → b ∈ cb → R a b :=
⟨λh, ⟨terminates_of_lift_rel h, λ a b ma mb,
let ⟨b', mb', ab⟩ := h.left ma in by rwa mem_unique mb mb'⟩,
λ⟨l, r⟩,
⟨λ a ma, let ⟨b, mb⟩ := l.1 ⟨_, ma⟩ in ⟨b, mb, r ma mb⟩,
λ b mb, let ⟨a, ma⟩ := l.2 ⟨_, mb⟩ in ⟨a, ma, r ma mb⟩⟩⟩
theorem lift_rel_bind {δ} (R : α → β → Prop) (S : γ → δ → Prop)
{s1 : computation α} {s2 : computation β}
{f1 : α → computation γ} {f2 : β → computation δ}
(h1 : lift_rel R s1 s2) (h2 : ∀ {a b}, R a b → lift_rel S (f1 a) (f2 b))
: lift_rel S (bind s1 f1) (bind s2 f2) :=
let ⟨l1, r1⟩ := h1 in
⟨λ c cB,
let ⟨a, a1, c1⟩ := exists_of_mem_bind cB,
⟨b, b2, ab⟩ := l1 a1,
⟨l2, r2⟩ := h2 ab,
⟨d, d2, cd⟩ := l2 c1 in
⟨_, mem_bind b2 d2, cd⟩,
λ d dB,
let ⟨b, b1, d1⟩ := exists_of_mem_bind dB,
⟨a, a2, ab⟩ := r1 b1,
⟨l2, r2⟩ := h2 ab,
⟨c, c2, cd⟩ := r2 d1 in
⟨_, mem_bind a2 c2, cd⟩⟩
@[simp] theorem lift_rel_return_left (R : α → β → Prop) (a : α) (cb : computation β) :
lift_rel R (return a) cb ↔ ∃ {b}, b ∈ cb ∧ R a b :=
⟨λ⟨l, r⟩, l (ret_mem _),
λ⟨b, mb, ab⟩,
⟨λ a' ma', by rw eq_of_ret_mem ma'; exact ⟨b, mb, ab⟩,
λ b' mb', ⟨_, ret_mem _, by rw mem_unique mb' mb; exact ab⟩⟩⟩
@[simp] theorem lift_rel_return_right (R : α → β → Prop) (ca : computation α) (b : β) :
lift_rel R ca (return b) ↔ ∃ {a}, a ∈ ca ∧ R a b :=
by rw [lift_rel.swap, lift_rel_return_left]
@[simp] theorem lift_rel_return (R : α → β → Prop) (a : α) (b : β) :
lift_rel R (return a) (return b) ↔ R a b :=
by rw [lift_rel_return_left]; exact
⟨λ⟨b', mb', ab'⟩, by rwa eq_of_ret_mem mb' at ab',
λab, ⟨_, ret_mem _, ab⟩⟩
@[simp] theorem lift_rel_think_left (R : α → β → Prop) (ca : computation α) (cb : computation β) :
lift_rel R (think ca) cb ↔ lift_rel R ca cb :=
and_congr (forall_congr $ λb, imp_congr ⟨of_think_mem, think_mem⟩ iff.rfl)
(forall_congr $ λb, imp_congr iff.rfl $
exists_congr $ λ b, and_congr ⟨of_think_mem, think_mem⟩ iff.rfl)
@[simp] theorem lift_rel_think_right (R : α → β → Prop) (ca : computation α) (cb : computation β) :
lift_rel R ca (think cb) ↔ lift_rel R ca cb :=
by rw [←lift_rel.swap R, ←lift_rel.swap R]; apply lift_rel_think_left
theorem lift_rel_mem_cases {R : α → β → Prop} {ca cb}
(Ha : ∀ a ∈ ca, lift_rel R ca cb)
(Hb : ∀ b ∈ cb, lift_rel R ca cb) : lift_rel R ca cb :=
⟨λ a ma, (Ha _ ma).left ma, λ b mb, (Hb _ mb).right mb⟩
theorem lift_rel_congr {R : α → β → Prop} {ca ca' : computation α} {cb cb' : computation β}
(ha : ca ~ ca') (hb : cb ~ cb') : lift_rel R ca cb ↔ lift_rel R ca' cb' :=
and_congr
(forall_congr $ λ a, imp_congr (ha _) $ exists_congr $ λ b, and_congr (hb _) iff.rfl)
(forall_congr $ λ b, imp_congr (hb _) $ exists_congr $ λ a, and_congr (ha _) iff.rfl)
theorem lift_rel_map {δ} (R : α → β → Prop) (S : γ → δ → Prop)
{s1 : computation α} {s2 : computation β}
{f1 : α → γ} {f2 : β → δ}
(h1 : lift_rel R s1 s2) (h2 : ∀ {a b}, R a b → S (f1 a) (f2 b))
: lift_rel S (map f1 s1) (map f2 s2) :=
by rw [←bind_ret, ←bind_ret]; apply lift_rel_bind _ _ h1; simp; exact @h2
theorem map_congr (R : α → α → Prop) (S : β → β → Prop)
{s1 s2 : computation α} {f : α → β}
(h1 : s1 ~ s2) : map f s1 ~ map f s2 :=
by rw [←lift_eq_iff_equiv];
exact lift_rel_map eq _ ((lift_eq_iff_equiv _ _).2 h1) (λ a b, congr_arg _)
def lift_rel_aux (R : α → β → Prop)
(C : computation α → computation β → Prop) :
α ⊕ computation α → β ⊕ computation β → Prop
| (sum.inl a) (sum.inl b) := R a b
| (sum.inl a) (sum.inr cb) := ∃ {b}, b ∈ cb ∧ R a b
| (sum.inr ca) (sum.inl b) := ∃ {a}, a ∈ ca ∧ R a b
| (sum.inr ca) (sum.inr cb) := C ca cb
attribute [simp] lift_rel_aux
@[simp] def lift_rel_aux.ret_left (R : α → β → Prop)
(C : computation α → computation β → Prop) (a cb) :
lift_rel_aux R C (sum.inl a) (destruct cb) ↔ ∃ {b}, b ∈ cb ∧ R a b :=
begin
apply cb.cases_on (λ b, _) (λ cb, _),
{ exact ⟨λ h, ⟨_, ret_mem _, h⟩, λ ⟨b', mb, h⟩,
by rw [mem_unique (ret_mem _) mb]; exact h⟩ },
{ rw [destruct_think],
exact ⟨λ ⟨b, h, r⟩, ⟨b, think_mem h, r⟩,
λ ⟨b, h, r⟩, ⟨b, of_think_mem h, r⟩⟩ }
end
theorem lift_rel_aux.swap (R : α → β → Prop) (C) (a b) :
lift_rel_aux (function.swap R) (function.swap C) b a = lift_rel_aux R C a b :=
by cases a with a ca; cases b with b cb; simp only [lift_rel_aux]
@[simp] def lift_rel_aux.ret_right (R : α → β → Prop)
(C : computation α → computation β → Prop) (b ca) :
lift_rel_aux R C (destruct ca) (sum.inl b) ↔ ∃ {a}, a ∈ ca ∧ R a b :=
by rw [←lift_rel_aux.swap, lift_rel_aux.ret_left]
lemma lift_rel_rec.lem {R : α → β → Prop} (C : computation α → computation β → Prop)
(H : ∀ {ca cb}, C ca cb → lift_rel_aux R C (destruct ca) (destruct cb))
(ca cb) (Hc : C ca cb) (a) (ha : a ∈ ca) : lift_rel R ca cb :=
begin
revert cb, refine mem_rec_on ha _ (λ ca' IH, _);
intros cb Hc; have h := H Hc,
{ simp at h, simp [h] },
{ have h := H Hc, simp, revert h, apply cb.cases_on (λ b, _) (λ cb', _);
intro h; simp at h; simp [h], exact IH _ h }
end
theorem lift_rel_rec {R : α → β → Prop} (C : computation α → computation β → Prop)
(H : ∀ {ca cb}, C ca cb → lift_rel_aux R C (destruct ca) (destruct cb))
(ca cb) (Hc : C ca cb) : lift_rel R ca cb :=
lift_rel_mem_cases (lift_rel_rec.lem C @H ca cb Hc) (λ b hb,
(lift_rel.swap _ _ _).2 $
lift_rel_rec.lem (function.swap C)
(λ cb ca h, cast (lift_rel_aux.swap _ _ _ _).symm $ H h)
cb ca Hc b hb)
end computation
|
709f41d517a4e0422eaf858706b48eee4858f650
|
c777c32c8e484e195053731103c5e52af26a25d1
|
/src/ring_theory/discriminant.lean
|
21a559720dab7f00fc6662ae2cfa9e185730a746
|
[
"Apache-2.0"
] |
permissive
|
kbuzzard/mathlib
|
2ff9e85dfe2a46f4b291927f983afec17e946eb8
|
58537299e922f9c77df76cb613910914a479c1f7
|
refs/heads/master
| 1,685,313,702,744
| 1,683,974,212,000
| 1,683,974,212,000
| 128,185,277
| 1
| 0
| null | 1,522,920,600,000
| 1,522,920,600,000
| null |
UTF-8
|
Lean
| false
| false
| 16,422
|
lean
|
/-
Copyright (c) 2021 Riccardo Brasca. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Riccardo Brasca
-/
import ring_theory.trace
import ring_theory.norm
import number_theory.number_field.basic
/-!
# Discriminant of a family of vectors
Given an `A`-algebra `B` and `b`, an `ι`-indexed family of elements of `B`, we define the
*discriminant* of `b` as the determinant of the matrix whose `(i j)`-th element is the trace of
`b i * b j`.
## Main definition
* `algebra.discr A b` : the discriminant of `b : ι → B`.
## Main results
* `algebra.discr_zero_of_not_linear_independent` : if `b` is not linear independent, then
`algebra.discr A b = 0`.
* `algebra.discr_of_matrix_vec_mul` and `discr_of_matrix_mul_vec` : formulas relating
`algebra.discr A ι b` with `algebra.discr A ((P.map (algebra_map A B)).vec_mul b)` and
`algebra.discr A ((P.map (algebra_map A B)).mul_vec b)`.
* `algebra.discr_not_zero_of_basis` : over a field, if `b` is a basis, then
`algebra.discr K b ≠ 0`.
* `algebra.discr_eq_det_embeddings_matrix_reindex_pow_two` : if `L/K` is a field extension and
`b : ι → L`, then `discr K b` is the square of the determinant of the matrix whose `(i, j)`
coefficient is `σⱼ (b i)`, where `σⱼ : L →ₐ[K] E` is the embedding in an algebraically closed
field `E` corresponding to `j : ι` via a bijection `e : ι ≃ (L →ₐ[K] E)`.
* `algebra.discr_of_power_basis_eq_prod` : the discriminant of a power basis.
* `discr_is_integral` : if `K` and `L` are fields and `is_scalar_tower R K L`, is `b : ι → L`
satisfies ` ∀ i, is_integral R (b i)`, then `is_integral R (discr K b)`.
* `discr_mul_is_integral_mem_adjoin` : let `K` be the fraction field of an integrally closed domain
`R` and let `L` be a finite separable extension of `K`. Let `B : power_basis K L` be such that
`is_integral R B.gen`. Then for all, `z : L` we have
`(discr K B.basis) • z ∈ adjoin R ({B.gen} : set L)`.
## Implementation details
Our definition works for any `A`-algebra `B`, but note that if `B` is not free as an `A`-module,
then `trace A B = 0` by definition, so `discr A b = 0` for any `b`.
-/
universes u v w z
open_locale matrix big_operators
open matrix finite_dimensional fintype polynomial finset intermediate_field
namespace algebra
variables (A : Type u) {B : Type v} (C : Type z) {ι : Type w}
variables [comm_ring A] [comm_ring B] [algebra A B] [comm_ring C] [algebra A C]
section discr
/-- Given an `A`-algebra `B` and `b`, an `ι`-indexed family of elements of `B`, we define
`discr A ι b` as the determinant of `trace_matrix A ι b`. -/
noncomputable
def discr (A : Type u) {B : Type v} [comm_ring A] [comm_ring B] [algebra A B] [fintype ι]
(b : ι → B) := by { classical, exact (trace_matrix A b).det }
lemma discr_def [decidable_eq ι] [fintype ι] (b : ι → B) :
discr A b = (trace_matrix A b).det := by convert rfl
variables {ι' : Type*} [fintype ι'] [fintype ι]
section basic
@[simp] lemma discr_reindex (b : basis ι A B) (f : ι ≃ ι') :
discr A (b ∘ ⇑(f.symm)) = discr A b :=
begin
classical,
rw [← basis.coe_reindex, discr_def, trace_matrix_reindex, det_reindex_self, ← discr_def]
end
/-- If `b` is not linear independent, then `algebra.discr A b = 0`. -/
lemma discr_zero_of_not_linear_independent [is_domain A] {b : ι → B}
(hli : ¬linear_independent A b) : discr A b = 0 :=
begin
classical,
obtain ⟨g, hg, i, hi⟩ := fintype.not_linear_independent_iff.1 hli,
have : (trace_matrix A b).mul_vec g = 0,
{ ext i,
have : ∀ j, (trace A B) (b i * b j) * g j = (trace A B) (((g j) • (b j)) * b i),
{ intro j, simp [mul_comm], },
simp only [mul_vec, dot_product, trace_matrix_apply, pi.zero_apply, trace_form_apply,
λ j, this j, ← linear_map.map_sum, ← sum_mul, hg, zero_mul, linear_map.map_zero] },
by_contra h,
rw discr_def at h,
simpa [matrix.eq_zero_of_mul_vec_eq_zero h this] using hi,
end
variable {A}
/-- Relation between `algebra.discr A ι b` and
`algebra.discr A ((P.map (algebra_map A B)).vec_mul b)`. -/
lemma discr_of_matrix_vec_mul [decidable_eq ι] (b : ι → B) (P : matrix ι ι A) :
discr A ((P.map (algebra_map A B)).vec_mul b) = P.det ^ 2 * discr A b :=
by rw [discr_def, trace_matrix_of_matrix_vec_mul, det_mul, det_mul, det_transpose, mul_comm,
← mul_assoc, discr_def, pow_two]
/-- Relation between `algebra.discr A ι b` and
`algebra.discr A ((P.map (algebra_map A B)).mul_vec b)`. -/
lemma discr_of_matrix_mul_vec [decidable_eq ι] (b : ι → B) (P : matrix ι ι A) :
discr A ((P.map (algebra_map A B)).mul_vec b) = P.det ^ 2 * discr A b :=
by rw [discr_def, trace_matrix_of_matrix_mul_vec, det_mul, det_mul, det_transpose,
mul_comm, ← mul_assoc, discr_def, pow_two]
end basic
section field
variables (K : Type u) {L : Type v} (E : Type z) [field K] [field L] [field E]
variables [algebra K L] [algebra K E]
variables [module.finite K L] [is_alg_closed E]
/-- Over a field, if `b` is a basis, then `algebra.discr K b ≠ 0`. -/
lemma discr_not_zero_of_basis [is_separable K L] (b : basis ι K L) : discr K b ≠ 0 :=
begin
casesI is_empty_or_nonempty ι,
{ simp [discr] },
{ have := span_eq_top_of_linear_independent_of_card_eq_finrank b.linear_independent
(finrank_eq_card_basis b).symm,
classical,
rw [discr_def, trace_matrix],
simp_rw [← basis.mk_apply b.linear_independent this.ge],
rw [← trace_matrix, trace_matrix_of_basis, ← bilin_form.nondegenerate_iff_det_ne_zero],
exact trace_form_nondegenerate _ _ },
end
/-- Over a field, if `b` is a basis, then `algebra.discr K b` is a unit. -/
lemma discr_is_unit_of_basis [is_separable K L] (b : basis ι K L) : is_unit (discr K b) :=
is_unit.mk0 _ (discr_not_zero_of_basis _ _)
variables (b : ι → L) (pb : power_basis K L)
/-- If `L/K` is a field extension and `b : ι → L`, then `discr K b` is the square of the
determinant of the matrix whose `(i, j)` coefficient is `σⱼ (b i)`, where `σⱼ : L →ₐ[K] E` is the
embedding in an algebraically closed field `E` corresponding to `j : ι` via a bijection
`e : ι ≃ (L →ₐ[K] E)`. -/
lemma discr_eq_det_embeddings_matrix_reindex_pow_two [decidable_eq ι] [is_separable K L]
(e : ι ≃ (L →ₐ[K] E)) : algebra_map K E (discr K b) =
(embeddings_matrix_reindex K E b e).det ^ 2 :=
by rw [discr_def, ring_hom.map_det, ring_hom.map_matrix_apply,
trace_matrix_eq_embeddings_matrix_reindex_mul_trans, det_mul, det_transpose, pow_two]
/-- The discriminant of a power basis. -/
lemma discr_power_basis_eq_prod (e : fin pb.dim ≃ (L →ₐ[K] E)) [is_separable K L] :
algebra_map K E (discr K pb.basis) =
∏ i : fin pb.dim, ∏ j in Ioi i, (e j pb.gen- (e i pb.gen)) ^ 2 :=
begin
rw [discr_eq_det_embeddings_matrix_reindex_pow_two K E pb.basis e,
embeddings_matrix_reindex_eq_vandermonde, det_transpose, det_vandermonde, ← prod_pow],
congr, ext i,
rw [← prod_pow]
end
/-- A variation of `of_power_basis_eq_prod`. -/
lemma discr_power_basis_eq_prod' [is_separable K L] (e : fin pb.dim ≃ (L →ₐ[K] E)) :
algebra_map K E (discr K pb.basis) =
∏ i : fin pb.dim, ∏ j in Ioi i, -((e j pb.gen - e i pb.gen) * (e i pb.gen - e j pb.gen)) :=
begin
rw [discr_power_basis_eq_prod _ _ _ e],
congr, ext i, congr, ext j,
ring
end
local notation `n` := finrank K L
/-- A variation of `of_power_basis_eq_prod`. -/
lemma discr_power_basis_eq_prod'' [is_separable K L] (e : fin pb.dim ≃ (L →ₐ[K] E)) :
algebra_map K E (discr K pb.basis) =
(-1) ^ (n * (n - 1) / 2) * ∏ i : fin pb.dim, ∏ j in Ioi i,
(e j pb.gen - e i pb.gen) * (e i pb.gen - e j pb.gen) :=
begin
rw [discr_power_basis_eq_prod' _ _ _ e],
simp_rw [λ i j, neg_eq_neg_one_mul ((e j pb.gen- (e i pb.gen)) * (e i pb.gen- (e j pb.gen))),
prod_mul_distrib],
congr,
simp only [prod_pow_eq_pow_sum, prod_const],
congr,
rw [← @nat.cast_inj ℚ, nat.cast_sum],
have : ∀ (x : fin pb.dim), (↑x + 1) ≤ pb.dim := by simp [nat.succ_le_iff, fin.is_lt],
simp_rw [fin.card_Ioi, nat.sub_sub, add_comm 1],
simp only [nat.cast_sub, this, finset.card_fin, nsmul_eq_mul, sum_const, sum_sub_distrib,
nat.cast_add, nat.cast_one, sum_add_distrib, mul_one],
rw [← nat.cast_sum, ← @finset.sum_range ℕ _ pb.dim (λ i, i), sum_range_id ],
have hn : n = pb.dim,
{ rw [← alg_hom.card K L E, ← fintype.card_fin pb.dim],
exact card_congr (equiv.symm e) },
have h₂ : 2 ∣ (pb.dim * (pb.dim - 1)) := even_iff_two_dvd.1 (nat.even_mul_self_pred _),
have hne : ((2 : ℕ) : ℚ) ≠ 0 := by simp,
have hle : 1 ≤ pb.dim,
{ rw [← hn, nat.one_le_iff_ne_zero, ← zero_lt_iff, finite_dimensional.finrank_pos_iff],
apply_instance },
rw [hn, nat.cast_div h₂ hne, nat.cast_mul, nat.cast_sub hle],
field_simp,
ring,
end
/-- Formula for the discriminant of a power basis using the norm of the field extension. -/
lemma discr_power_basis_eq_norm [is_separable K L] : discr K pb.basis =
(-1) ^ (n * (n - 1) / 2) * (norm K (aeval pb.gen (minpoly K pb.gen).derivative)) :=
begin
let E := algebraic_closure L,
letI := λ (a b : E), classical.prop_decidable (eq a b),
have e : fin pb.dim ≃ (L →ₐ[K] E),
{ refine equiv_of_card_eq _,
rw [fintype.card_fin, alg_hom.card],
exact (power_basis.finrank pb).symm },
have hnodup : (map (algebra_map K E) (minpoly K pb.gen)).roots.nodup :=
nodup_roots (separable.map (is_separable.separable K pb.gen)),
have hroots : ∀ σ : L →ₐ[K] E, σ pb.gen ∈ (map (algebra_map K E) (minpoly K pb.gen)).roots,
{ intro σ,
rw [mem_roots, is_root.def, eval_map, ← aeval_def, aeval_alg_hom_apply],
repeat { simp [minpoly.ne_zero (is_separable.is_integral K pb.gen)] } },
apply (algebra_map K E).injective,
rw [ring_hom.map_mul, ring_hom.map_pow, ring_hom.map_neg, ring_hom.map_one,
discr_power_basis_eq_prod'' _ _ _ e],
congr,
rw [norm_eq_prod_embeddings, prod_prod_Ioi_mul_eq_prod_prod_off_diag],
conv_rhs { congr, skip, funext,
rw [← aeval_alg_hom_apply, aeval_root_derivative_of_splits (minpoly.monic
(is_separable.is_integral K pb.gen)) (is_alg_closed.splits_codomain _) (hroots σ),
← finset.prod_mk _ (hnodup.erase _)] },
rw [prod_sigma', prod_sigma'],
refine prod_bij (λ i hi, ⟨e i.2, e i.1 pb.gen⟩) (λ i hi, _) (λ i hi, by simp at hi)
(λ i j hi hj hij, _) (λ σ hσ, _),
{ simp only [true_and, finset.mem_mk, mem_univ, mem_sigma],
rw [multiset.mem_erase_of_ne (λ h, _)],
{ exact hroots _ },
{ simp only [true_and, mem_univ, ne.def, mem_sigma, mem_compl, mem_singleton] at hi,
rw [← power_basis.lift_equiv_apply_coe, ← power_basis.lift_equiv_apply_coe] at h,
exact hi (e.injective $ pb.lift_equiv.injective $ subtype.eq h.symm) } },
{ simp only [equiv.apply_eq_iff_eq, heq_iff_eq] at hij,
have h := hij.2,
rw [← power_basis.lift_equiv_apply_coe, ← power_basis.lift_equiv_apply_coe] at h,
refine sigma.eq (equiv.injective e (equiv.injective _ (subtype.eq h))) (by simp [hij.1]) },
{ simp only [true_and, finset.mem_mk, mem_univ, mem_sigma] at ⊢ hσ,
simp only [sigma.exists, exists_prop, mem_compl, mem_singleton, ne.def],
refine ⟨e.symm (power_basis.lift pb σ.2 _), e.symm σ.1, ⟨λ h, _, sigma.eq _ _⟩⟩,
{ rw [aeval_def, eval₂_eq_eval_map, ← is_root.def, ← mem_roots],
{ exact multiset.erase_subset _ _ hσ },
{ simp [minpoly.ne_zero (is_separable.is_integral K pb.gen)] } },
{ replace h := alg_hom.congr_fun (equiv.injective _ h) pb.gen,
rw [power_basis.lift_gen] at h,
rw [← h] at hσ,
exact hnodup.not_mem_erase hσ },
all_goals { simp } }
end
section integral
variables {R : Type z} [comm_ring R] [algebra R K] [algebra R L] [is_scalar_tower R K L]
/-- If `K` and `L` are fields and `is_scalar_tower R K L`, and `b : ι → L` satisfies
` ∀ i, is_integral R (b i)`, then `is_integral R (discr K b)`. -/
lemma discr_is_integral {b : ι → L} (h : ∀ i, is_integral R (b i)) :
is_integral R (discr K b) :=
begin
classical,
rw [discr_def],
exact is_integral.det (λ i j, is_integral_trace (is_integral_mul (h i) (h j)))
end
/-- If `b` and `b'` are `ℚ`-bases of a number field `K` such that
`∀ i j, is_integral ℤ (b.to_matrix b' i j)` and `∀ i j, is_integral ℤ (b'.to_matrix b i j)` then
`discr ℚ b = discr ℚ b'`. -/
lemma discr_eq_discr_of_to_matrix_coeff_is_integral [number_field K] {b : basis ι ℚ K}
{b' : basis ι' ℚ K} (h : ∀ i j, is_integral ℤ (b.to_matrix b' i j))
(h' : ∀ i j, is_integral ℤ (b'.to_matrix b i j)) :
discr ℚ b = discr ℚ b' :=
begin
replace h' : ∀ i j, is_integral ℤ (b'.to_matrix ((b.reindex (b.index_equiv b'))) i j),
{ intros i j,
convert h' i ((b.index_equiv b').symm j),
simpa },
classical,
rw [← (b.reindex (b.index_equiv b')).to_matrix_map_vec_mul b', discr_of_matrix_vec_mul,
← one_mul (discr ℚ b), basis.coe_reindex, discr_reindex],
congr,
have hint : is_integral ℤ (((b.reindex (b.index_equiv b')).to_matrix b').det) :=
is_integral.det (λ i j, h _ _),
obtain ⟨r, hr⟩ := is_integrally_closed.is_integral_iff.1 hint,
have hunit : is_unit r,
{ have : is_integral ℤ ((b'.to_matrix (b.reindex (b.index_equiv b'))).det) :=
is_integral.det (λ i j, h' _ _),
obtain ⟨r', hr'⟩ := is_integrally_closed.is_integral_iff.1 this,
refine is_unit_iff_exists_inv.2 ⟨r', _⟩,
suffices : algebra_map ℤ ℚ (r * r') = 1,
{ rw [← ring_hom.map_one (algebra_map ℤ ℚ)] at this,
exact (is_fraction_ring.injective ℤ ℚ) this },
rw [ring_hom.map_mul, hr, hr', ← det_mul, basis.to_matrix_mul_to_matrix_flip, det_one] },
rw [← ring_hom.map_one (algebra_map ℤ ℚ), ← hr],
cases int.is_unit_iff.1 hunit with hp hm,
{ simp [hp] },
{ simp [hm] }
end
/-- Let `K` be the fraction field of an integrally closed domain `R` and let `L` be a finite
separable extension of `K`. Let `B : power_basis K L` be such that `is_integral R B.gen`.
Then for all, `z : L` that are integral over `R`, we have
`(discr K B.basis) • z ∈ adjoin R ({B.gen} : set L)`. -/
lemma discr_mul_is_integral_mem_adjoin [is_domain R] [is_separable K L] [is_integrally_closed R]
[is_fraction_ring R K] {B : power_basis K L} (hint : is_integral R B.gen) {z : L}
(hz : is_integral R z) : (discr K B.basis) • z ∈ adjoin R ({B.gen} : set L) :=
begin
have hinv : is_unit (trace_matrix K B.basis).det :=
by simpa [← discr_def] using discr_is_unit_of_basis _ B.basis,
have H : (trace_matrix K B.basis).det • (trace_matrix K B.basis).mul_vec (B.basis.equiv_fun z) =
(trace_matrix K B.basis).det • (λ i, trace K L (z * B.basis i)),
{ congr, exact trace_matrix_of_basis_mul_vec _ _ },
have cramer := mul_vec_cramer (trace_matrix K B.basis) (λ i, trace K L (z * B.basis i)),
suffices : ∀ i, ((trace_matrix K B.basis).det • (B.basis.equiv_fun z)) i ∈ (⊥ : subalgebra R K),
{ rw [← B.basis.sum_repr z, finset.smul_sum],
refine subalgebra.sum_mem _ (λ i hi, _),
replace this := this i,
rw [← discr_def, pi.smul_apply, mem_bot] at this,
obtain ⟨r, hr⟩ := this,
rw [basis.equiv_fun_apply] at hr,
rw [← smul_assoc, ← hr, algebra_map_smul],
refine subalgebra.smul_mem _ _ _,
rw [B.basis_eq_pow i],
refine subalgebra.pow_mem _ (subset_adjoin (set.mem_singleton _)) _},
intro i,
rw [← H, ← mul_vec_smul] at cramer,
replace cramer := congr_arg (mul_vec (trace_matrix K B.basis)⁻¹) cramer,
rw [mul_vec_mul_vec, nonsing_inv_mul _ hinv, mul_vec_mul_vec, nonsing_inv_mul _ hinv,
one_mul_vec, one_mul_vec] at cramer,
rw [← congr_fun cramer i, cramer_apply, det_apply],
refine subalgebra.sum_mem _ (λ σ _, subalgebra.zsmul_mem _ (subalgebra.prod_mem _ (λ j _, _)) _),
by_cases hji : j = i,
{ simp only [update_column_apply, hji, eq_self_iff_true, power_basis.coe_basis],
exact mem_bot.2 (is_integrally_closed.is_integral_iff.1 $ is_integral_trace $
is_integral_mul hz $ is_integral.pow hint _) },
{ simp only [update_column_apply, hji, power_basis.coe_basis],
exact mem_bot.2 (is_integrally_closed.is_integral_iff.1 $ is_integral_trace
$ is_integral_mul (is_integral.pow hint _) (is_integral.pow hint _)) }
end
end integral
end field
end discr
end algebra
|
6d38cccaeec65e2179442295b2f195bfc6d5a430
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/data/multiset/bind.lean
|
57946c5c01ecb383c5b4138afdb61490d2029620
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 9,679
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import algebra.big_operators.multiset.basic
/-!
# Bind operation for multisets
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines a few basic operations on `multiset`, notably the monadic bind.
## Main declarations
* `multiset.join`: The join, aka union or sum, of multisets.
* `multiset.bind`: The bind of a multiset-indexed family of multisets.
* `multiset.product`: Cartesian product of two multisets.
* `multiset.sigma`: Disjoint sum of multisets in a sigma type.
-/
variables {α β γ δ : Type*}
namespace multiset
/-! ### Join -/
/-- `join S`, where `S` is a multiset of multisets, is the lift of the list join
operation, that is, the union of all the sets.
join {{1, 2}, {1, 2}, {0, 1}} = {0, 1, 1, 1, 2, 2} -/
def join : multiset (multiset α) → multiset α := sum
lemma coe_join : ∀ L : list (list α),
join (L.map (@coe _ (multiset α) _) : multiset (multiset α)) = L.join
| [] := rfl
| (l :: L) := congr_arg (λ s : multiset α, ↑l + s) (coe_join L)
@[simp] lemma join_zero : @join α 0 = 0 := rfl
@[simp] lemma join_cons (s S) : @join α (s ::ₘ S) = s + join S := sum_cons _ _
@[simp] lemma join_add (S T) : @join α (S + T) = join S + join T := sum_add _ _
@[simp] lemma singleton_join (a) : join ({a} : multiset (multiset α)) = a := sum_singleton _
@[simp] lemma mem_join {a S} : a ∈ @join α S ↔ ∃ s ∈ S, a ∈ s :=
multiset.induction_on S (by simp) $
by simp [or_and_distrib_right, exists_or_distrib] {contextual := tt}
@[simp] lemma card_join (S) : card (@join α S) = sum (map card S) :=
multiset.induction_on S (by simp) (by simp)
lemma rel_join {r : α → β → Prop} {s t} (h : rel (rel r) s t) : rel r s.join t.join :=
begin
induction h,
case rel.zero { simp },
case rel.cons : a b s t hab hst ih { simpa using hab.add ih }
end
/-! ### Bind -/
section bind
variables (a : α) (s t : multiset α) (f g : α → multiset β)
/-- `s.bind f` is the monad bind operation, defined as `(s.map f).join`. It is the union of `f a` as
`a` ranges over `s`. -/
def bind (s : multiset α) (f : α → multiset β) : multiset β := (s.map f).join
@[simp] lemma coe_bind (l : list α) (f : α → list β) : @bind α β l (λ a, f a) = l.bind f :=
by rw [list.bind, ←coe_join, list.map_map]; refl
@[simp] lemma zero_bind : bind 0 f = 0 := rfl
@[simp] lemma cons_bind : (a ::ₘ s).bind f = f a + s.bind f := by simp [bind]
@[simp] lemma singleton_bind : bind {a} f = f a := by simp [bind]
@[simp] lemma add_bind : (s + t).bind f = s.bind f + t.bind f := by simp [bind]
@[simp] lemma bind_zero : s.bind (λ a, 0 : α → multiset β) = 0 := by simp [bind, join, nsmul_zero]
@[simp] lemma bind_add : s.bind (λ a, f a + g a) = s.bind f + s.bind g := by simp [bind, join]
@[simp] lemma bind_cons (f : α → β) (g : α → multiset β) :
s.bind (λ a, f a ::ₘ g a) = map f s + s.bind g :=
multiset.induction_on s (by simp) (by simp [add_comm, add_left_comm] {contextual := tt})
@[simp] lemma bind_singleton (f : α → β) : s.bind (λ x, ({f x} : multiset β)) = map f s :=
multiset.induction_on s (by rw [zero_bind, map_zero]) (by simp [singleton_add])
@[simp] lemma mem_bind {b s} {f : α → multiset β} : b ∈ bind s f ↔ ∃ a ∈ s, b ∈ f a :=
by simp [bind]; simp [-exists_and_distrib_right, exists_and_distrib_right.symm];
rw exists_swap; simp [and_assoc]
@[simp] lemma card_bind : (s.bind f).card = (s.map (card ∘ f)).sum := by simp [bind]
lemma bind_congr {f g : α → multiset β} {m : multiset α} :
(∀ a ∈ m, f a = g a) → bind m f = bind m g :=
by simp [bind] {contextual := tt}
lemma bind_hcongr {β' : Type*} {m : multiset α} {f : α → multiset β} {f' : α → multiset β'}
(h : β = β') (hf : ∀a ∈ m, f a == f' a) :
bind m f == bind m f' :=
begin subst h, simp at hf, simp [bind_congr hf] end
lemma map_bind (m : multiset α) (n : α → multiset β) (f : β → γ) :
map f (bind m n) = bind m (λ a, map f (n a)) :=
multiset.induction_on m (by simp) (by simp {contextual := tt})
lemma bind_map (m : multiset α) (n : β → multiset γ) (f : α → β) :
bind (map f m) n = bind m (λ a, n (f a)) :=
multiset.induction_on m (by simp) (by simp {contextual := tt})
lemma bind_assoc {s : multiset α} {f : α → multiset β} {g : β → multiset γ} :
(s.bind f).bind g = s.bind (λ a, (f a).bind g) :=
multiset.induction_on s (by simp) (by simp {contextual := tt})
lemma bind_bind (m : multiset α) (n : multiset β) {f : α → β → multiset γ} :
(bind m $ λ a, bind n $ λ b, f a b) = (bind n $ λ b, bind m $ λ a, f a b) :=
multiset.induction_on m (by simp) (by simp {contextual := tt})
lemma bind_map_comm (m : multiset α) (n : multiset β) {f : α → β → γ} :
(bind m $ λ a, n.map $ λ b, f a b) = (bind n $ λ b, m.map $ λ a, f a b) :=
multiset.induction_on m (by simp) (by simp {contextual := tt})
@[simp, to_additive]
lemma prod_bind [comm_monoid β] (s : multiset α) (t : α → multiset β) :
(s.bind t).prod = (s.map $ λ a, (t a).prod).prod :=
multiset.induction_on s (by simp) (assume a s ih, by simp [ih, cons_bind])
lemma rel_bind {r : α → β → Prop} {p : γ → δ → Prop} {s t} {f : α → multiset γ} {g : β → multiset δ}
(h : (r ⇒ rel p) f g) (hst : rel r s t) :
rel p (s.bind f) (t.bind g) :=
by { apply rel_join, rw rel_map, exact hst.mono (λ a ha b hb hr, h hr) }
lemma count_sum [decidable_eq α] {m : multiset β} {f : β → multiset α} {a : α} :
count a (map f m).sum = sum (m.map $ λ b, count a $ f b) :=
multiset.induction_on m (by simp) ( by simp)
lemma count_bind [decidable_eq α] {m : multiset β} {f : β → multiset α} {a : α} :
count a (bind m f) = sum (m.map $ λ b, count a $ f b) := count_sum
lemma le_bind {α β : Type*} {f : α → multiset β} (S : multiset α) {x : α} (hx : x ∈ S) :
f x ≤ S.bind f :=
begin
classical,
rw le_iff_count, intro a,
rw count_bind, apply le_sum_of_mem,
rw mem_map, exact ⟨x, hx, rfl⟩
end
@[simp] theorem attach_bind_coe (s : multiset α) (f : α → multiset β) :
s.attach.bind (λ i, f i) = s.bind f :=
congr_arg join $ attach_map_coe' _ _
end bind
/-! ### Product of two multisets -/
section product
variables (a : α) (b : β) (s : multiset α) (t : multiset β)
/-- The multiplicity of `(a, b)` in `s ×ˢ t` is
the product of the multiplicity of `a` in `s` and `b` in `t`. -/
def product (s : multiset α) (t : multiset β) : multiset (α × β) := s.bind $ λ a, t.map $ prod.mk a
/- This notation binds more strongly than (pre)images, unions and intersections. -/
infixr (name := multiset.product) ` ×ˢ `:82 := multiset.product
@[simp] lemma coe_product (l₁ : list α) (l₂ : list β) : @product α β l₁ l₂ = l₁.product l₂ :=
by { rw [product, list.product, ←coe_bind], simp }
@[simp] lemma zero_product : @product α β 0 t = 0 := rfl
@[simp] lemma cons_product : (a ::ₘ s) ×ˢ t = map (prod.mk a) t + s ×ˢ t := by simp [product]
@[simp] lemma product_zero : s ×ˢ (0 : multiset β) = 0 := by simp [product]
@[simp] lemma product_cons : s ×ˢ (b ::ₘ t) = s.map (λ a, (a, b)) + s ×ˢ t := by simp [product]
@[simp] lemma product_singleton : ({a} : multiset α) ×ˢ ({b} : multiset β) = {(a, b)} :=
by simp only [product, bind_singleton, map_singleton]
@[simp] lemma add_product (s t : multiset α) (u : multiset β) : (s + t) ×ˢ u = s ×ˢ u + t ×ˢ u :=
by simp [product]
@[simp] lemma product_add (s : multiset α) : ∀ t u : multiset β, s ×ˢ (t + u) = s ×ˢ t + s ×ˢ u :=
multiset.induction_on s (λ t u, rfl) $ λ a s IH t u,
by rw [cons_product, IH]; simp; cc
@[simp] lemma mem_product {s t} : ∀ {p : α × β}, p ∈ @product α β s t ↔ p.1 ∈ s ∧ p.2 ∈ t
| (a, b) := by simp [product, and.left_comm]
@[simp] lemma card_product : (s ×ˢ t).card = s.card * t.card := by simp [product]
end product
/-! ### Disjoint sum of multisets -/
section sigma
variables {σ : α → Type*} (a : α) (s : multiset α) (t : Π a, multiset (σ a))
/-- `sigma s t` is the dependent version of `product`. It is the sum of
`(a, b)` as `a` ranges over `s` and `b` ranges over `t a`. -/
protected def sigma (s : multiset α) (t : Π a, multiset (σ a)) : multiset (Σ a, σ a) :=
s.bind $ λ a, (t a).map $ sigma.mk a
@[simp] lemma coe_sigma (l₁ : list α) (l₂ : Π a, list (σ a)) :
@multiset.sigma α σ l₁ (λ a, l₂ a) = l₁.sigma l₂ :=
by rw [multiset.sigma, list.sigma, ←coe_bind]; simp
@[simp] lemma zero_sigma : @multiset.sigma α σ 0 t = 0 := rfl
@[simp] lemma cons_sigma : (a ::ₘ s).sigma t = (t a).map (sigma.mk a) + s.sigma t :=
by simp [multiset.sigma]
@[simp] lemma sigma_singleton (b : α → β) :
({a} : multiset α).sigma (λ a, ({b a} : multiset β)) = {⟨a, b a⟩} := rfl
@[simp] lemma add_sigma (s t : multiset α) (u : Π a, multiset (σ a)) :
(s + t).sigma u = s.sigma u + t.sigma u :=
by simp [multiset.sigma]
@[simp] lemma sigma_add : ∀ t u : Π a, multiset (σ a),
s.sigma (λ a, t a + u a) = s.sigma t + s.sigma u :=
multiset.induction_on s (λ t u, rfl) $ λ a s IH t u,
by rw [cons_sigma, IH]; simp; cc
@[simp] lemma mem_sigma {s t} : ∀ {p : Σ a, σ a},
p ∈ @multiset.sigma α σ s t ↔ p.1 ∈ s ∧ p.2 ∈ t p.1
| ⟨a, b⟩ := by simp [multiset.sigma, and_assoc, and.left_comm]
@[simp] lemma card_sigma :
card (s.sigma t) = sum (map (λ a, card (t a)) s) :=
by simp [multiset.sigma, (∘)]
end sigma
end multiset
|
9bc8b4107ee6ec78f34f650505cc9dbe83a2f77a
|
ac076ebc286fa9b7a67171f6cd11eb98b263d6ef
|
/src/list_equiv.lean
|
9fdeb3c231ca74038c683c512aa968ceb50d5391
|
[] |
no_license
|
Shamrock-Frost/jordan-holder
|
e891e489d00f8ff9e29c47b3083f22cac7804efb
|
bab3daccd70a4f3c5b25731b899a2cd72d7b8376
|
refs/heads/master
| 1,594,962,465,041
| 1,576,197,432,000
| 1,576,197,432,000
| 205,951,913
| 1
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 9,375
|
lean
|
import .category_theory .SES
universe u
def equiv_group_lists (L₁ L₂ : list Group) :=
list.length L₁ = list.length L₂ ∧
∃ L, list.perm L L₁
∧ (list.foldr (∧) true
$ list.zip_with (λ K K' : Group, nonempty (K ≅ K')) L L₂)
def termwise_iso_symm
: ∀ {xs ys : list Group},
list.length xs = list.length ys
→ list.foldr (∧) true (list.zip_with (λ K K' : Group, nonempty (K ≅ K')) xs ys)
→ list.foldr (∧) true (list.zip_with (λ K K' : Group, nonempty (K ≅ K')) ys xs)
| [] [] hlen h := trivial
| [] (_::_) hlen h := by cases hlen
| (_::_) [] hlen h := by cases hlen
| (x::xs) (y::ys) hlen h :=
match h.left with
| ⟨iso⟩ := ⟨⟨iso.symm⟩, termwise_iso_symm (nat.succ.inj hlen) h.right⟩
end
@[symm]
def equiv_group_lists.symm {xs ys : list Group}
(heqv : equiv_group_lists xs ys) : equiv_group_lists ys xs :=
begin
cases heqv with hlen heqv, cases heqv with L hL,
cases hL with hperm hL, revert ys, induction hperm; intros ys hlen heqv,
{ simp at hlen, have := iff.mp list.length_eq_zero hlen.symm,
rw this, obviously },
case list.perm.skip : x xs L hperm ih
{ constructor, exact hlen.symm, cases ys with y ys, cases hlen,
cases ih (nat.succ_inj hlen) heqv.right with _ h,
cases h with L' h, existsi list.cons y L',
constructor, apply list.perm.skip, exact h.left,
constructor, cases heqv.left, constructor, exact val.symm,
exact h.right, },
case list.perm.swap : x x' xs ys
{ constructor, exact hlen.symm,
cases ys with y ys, cases hlen, cases ys with y' ys, cases hlen,
existsi list.cons y' (list.cons y ys), constructor,
apply list.perm.swap, dsimp [list.zip_with] at heqv,
cases heqv.left with h, cases heqv.right.left with h',
exact ⟨⟨h'.symm⟩, ⟨h.symm⟩, termwise_iso_symm (nat.succ.inj $ nat.succ.inj hlen) heqv.right.right⟩ },
case list.perm.trans : xs zs ws hxz hzw ih ih'
{ have : list.length ys = list.length zs,
symmetry, transitivity list.length ws, apply list.perm_length, assumption',
cases ih this.symm heqv with _ h,
cases h with L h, cases @ih' L _ _,
tactic.swap, transitivity list.length ys, assumption,
apply list.perm_length h.left.symm, tactic.swap,
apply termwise_iso_symm, transitivity list.length ys,
apply list.perm_length h.left, assumption,
exact h.right,
cases right with L' h', apply and.intro hlen.symm,
existsi L', refine and.intro _ h'.right, transitivity L,
exact h'.left, exact h.left }
end
inductive list.perm_seq {α : Type u} : list α → list α → Type (u+1)
| nil : list.perm_seq [] []
| skip : Π (x : α) {l₁ l₂ : list α}, list.perm_seq l₁ l₂ → list.perm_seq (x::l₁) (x::l₂)
| swap : Π (x y : α) (l : list α), list.perm_seq (y::x::l) (x::y::l)
| trans : Π {l₁ l₂ l₃ : list α}, list.perm_seq l₁ l₂ → list.perm_seq l₂ l₃ → list.perm_seq l₁ l₃
lemma list.perm_iff_perm_seq {α} {xs ys : list α} : xs ~ ys ↔ nonempty (list.perm_seq xs ys) :=
begin
constructor; intro hperm,
{ induction hperm,
{ constructor, constructor },
{ cases hperm_ih, constructor, constructor, assumption },
{ constructor, constructor },
{ cases hperm_ih_a, cases hperm_ih_a_1, constructor, constructor; assumption } },
{ cases hperm, induction hperm,
{ constructor },
{ constructor, assumption },
{ constructor },
{ constructor; assumption } }
end
lemma list.perm_seq_length {α} {xs xs' : list α}
: list.perm_seq xs xs' → list.length xs = list.length xs'
:= by { intro h, replace h := nonempty.intro h,
rw ← list.perm_iff_perm_seq at h, apply list.perm_length h }
def apply_perm {α β} : ∀ {ys ys' : list α},
list.perm_seq ys ys'
→ ∀ (xs : list β), xs.length = ys.length → Σ (xs' : list β), list.perm_seq xs xs'
| [] [] list.perm_seq.nil := λ xs,
match xs with
| [] := λ _, ⟨[], list.perm_seq.nil⟩
| _::_ := λ hlen, false.elim (nat.succ_ne_zero _ hlen)
end
| (._::ys) (._::ys') (list.perm_seq.skip y hperm) := λ xs,
match xs with
| [] := λ hlen, false.elim $ nat.succ_ne_zero _ hlen.symm
| (x::xs) := λ hlen,
let ⟨xs', h⟩ := apply_perm hperm xs (nat.succ.inj hlen)
in ⟨x::xs', list.perm_seq.skip x h⟩
end
| ._ ._ (list.perm_seq.swap a b ys) := λ xs,
match xs with
| [] := λ hlen, false.elim $ nat.succ_ne_zero _ hlen.symm
| [_] := λ hlen, false.elim $ nat.succ_ne_zero _ (nat.succ.inj hlen.symm)
| (x::x'::xs) :=
λ _, ⟨x' :: x :: xs, list.perm_seq.swap x' x xs⟩
end
| ys ys'' (@list.perm_seq.trans _ _ ys' _ hperm hperm') := λ xs hlen,
let ⟨xs', h⟩ := apply_perm hperm xs hlen,
⟨xs'', h'⟩ := apply_perm hperm' xs'
$ calc list.length xs' = list.length xs : eq.symm (list.perm_seq_length h)
... = list.length ys : hlen
... = list.length ys' : list.perm_seq_length hperm
in ⟨xs'', list.perm_seq.trans h h'⟩
lemma apply_perm_spec {α α' β γ} {f : α → α' → β} (c : β → γ → γ) (n : γ)
(f_comm : left_commutative c)
{xs : list α} {ys L : list α'}
(hlen : list.length xs = list.length ys)
(hperm : list.perm_seq ys L)
: list.foldr c n (list.zip_with f xs ys)
= list.foldr c n (list.zip_with f (apply_perm hperm xs hlen).fst L) :=
begin
revert xs, induction hperm; intros,
{ dsimp at hlen, rw list.length_eq_zero at hlen, subst hlen, refl },
case list.perm_seq.skip : y ys ys' hperm ih {
cases xs with x xs, { exfalso, exact nat.succ_ne_zero _ (eq.symm hlen), },
dsimp [apply_perm], destruct (apply_perm hperm xs (nat.succ_inj hlen)),
intros, rw a, dsimp [apply_perm._match_3, list.zip_with, list.foldr],
apply congr_arg, rw (_ : fst = (apply_perm hperm xs (nat.succ_inj hlen)).fst),
apply ih, rw a,
},
case list.perm_seq.swap : y' y ys xs hlen ih {
cases xs with x xs, contradiction,
cases xs with x' xs, have := nat.succ_inj hlen, contradiction,
apply f_comm,
},
case list.perm_seq.trans : ys ys' ys'' hperm hperm' ih ih' {
dsimp [apply_perm], destruct apply_perm hperm xs hlen, intros,
rw a, dsimp [apply_perm._match_6],
have hlen' : list.length fst = list.length ys',
{ transitivity list.length xs, symmetry, apply list.perm_seq_length, assumption,
transitivity list.length ys, assumption, apply list.perm_seq_length, assumption, },
destruct apply_perm hperm' fst hlen', intros,
rw a_1, dsimp [apply_perm._match_5],
rw (_ : fst_1 = (apply_perm hperm' fst hlen').fst),
rw ← ih', rw (_ : fst = (apply_perm hperm xs hlen).fst),
apply ih, rw a, rw a_1,
}
end
@[trans]
lemma equiv_group_lists.trans {xs ys zs : list Group}
(hxy : equiv_group_lists xs ys) (hyz : equiv_group_lists ys zs)
: equiv_group_lists xs zs :=
begin
cases hxy with hlen hxy, cases hxy with L h, cases h with hperm h,
replace hyz := equiv_group_lists.symm hyz,
cases hyz with hlen' hyz, cases hyz with L' h', cases h' with hperm' h',
constructor, exact eq.trans hlen hlen'.symm,
have : list.foldr and true (list.zip_with (λ (K K' : Group), nonempty (K ≅ K')) L L'),
{ replace hlen := eq.trans (list.perm_length hperm) hlen,
replace hlen' := eq.trans (list.perm_length hperm') hlen',
clear hperm, clear hperm', clear xs, clear zs,
revert L L', induction ys with y ys ih; intros xs zs h h' hlen hlen';
cases xs with x xs; cases zs with z zs; try { contradiction },
case list.nil { constructor },
case list.cons {
constructor,
{ cases h.left, cases h'.left, constructor,
transitivity y, assumption, symmetry, assumption },
{ apply ih, exact h.right, exact h'.right,
exact nat.succ.inj hlen, exact nat.succ.inj hlen' } } },
replace hlen := eq.trans hlen (eq.symm hlen'),
clear h, clear h', clear hlen', clear ys,
rw list.perm_iff_perm_seq at hperm hperm',
cases hperm, cases hperm',
have hlen' : list.length L = list.length L'
:= calc list.length L = list.length xs : list.perm_seq_length hperm
... = list.length zs : hlen
... = list.length L' : eq.symm (list.perm_seq_length hperm'),
existsi (apply_perm hperm' L hlen').fst, constructor,
{ have hperm := iff.mpr list.perm_iff_perm_seq ⟨hperm⟩,
have hperm'' := iff.mpr list.perm_iff_perm_seq ⟨(apply_perm hperm' L hlen').snd⟩,
transitivity L, symmetry, assumption, assumption },
rw ← apply_perm_spec, assumption,
{ intros p q r, rw [← and_assoc p q, and_comm p q, and_assoc q p], }
end
lemma equiv_group_lists.skip {xs ys : list Group} (x : Group)
(heqv : equiv_group_lists xs ys) : equiv_group_lists (x::xs) (x::ys) :=
begin
cases heqv with hlen heqv, apply and.intro (congr_arg nat.succ hlen),
cases heqv with L h, cases h with hperm h, existsi (list.cons x L),
constructor, apply list.perm.skip, assumption, refine ⟨_, h⟩, constructor, refl
end
lemma equiv_group_lists.swap' {xs ys : list Group} (a b : Group)
(heqv : equiv_group_lists xs ys) : equiv_group_lists (a::b::xs) (b::a::ys) :=
begin
cases heqv with hlen heqv, apply and.intro (congr_arg nat.succ (congr_arg nat.succ hlen)),
cases heqv with L h, cases h with hperm h, existsi (list.cons b (list.cons a L)),
constructor, apply list.perm.swap', assumption, refine ⟨_, _, h⟩; constructor; refl
end
|
884181ee2f6184da2677524da29924be488a6c15
|
2eab05920d6eeb06665e1a6df77b3157354316ad
|
/src/set_theory/ordinal_notation.lean
|
f1668831052839b8d692e2fb561b3ef68a1baf22
|
[
"Apache-2.0"
] |
permissive
|
ayush1801/mathlib
|
78949b9f789f488148142221606bf15c02b960d2
|
ce164e28f262acbb3de6281b3b03660a9f744e3c
|
refs/heads/master
| 1,692,886,907,941
| 1,635,270,866,000
| 1,635,270,866,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 35,964
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import set_theory.ordinal_arithmetic
/-!
# Ordinal notation
Constructive ordinal arithmetic for ordinals below `ε₀`.
We define a type `onote`, with constructors `0 : onote` and `onote.oadd e n a` representing
`ω ^ e * n + a`.
We say that `o` is in Cantor normal form - `onote.NF o` - if either `o = 0` or
`o = ω ^ e * n + a` with `a < ω ^ e` and `a` in Cantor normal form.
The type `nonote` is the type of ordinals below `ε₀` in Cantor normal form.
Various operations (addition, subtraction, multiplication, power function)
are defined on `onote` and `nonote`.
-/
open ordinal
open_locale ordinal -- get notation for `ω`
/-- Recursive definition of an ordinal notation. `zero` denotes the
ordinal 0, and `oadd e n a` is intended to refer to `ω^e * n + a`.
For this to be valid Cantor normal form, we must have the exponents
decrease to the right, but we can't state this condition until we've
defined `repr`, so it is a separate definition `NF`. -/
@[derive decidable_eq]
inductive onote : Type
| zero : onote
| oadd : onote → ℕ+ → onote → onote
namespace onote
/-- Notation for 0 -/
instance : has_zero onote := ⟨zero⟩
@[simp] theorem zero_def : zero = 0 := rfl
instance : inhabited onote := ⟨0⟩
/-- Notation for 1 -/
instance : has_one onote := ⟨oadd 0 1 0⟩
/-- Notation for ω -/
def omega : onote := oadd 1 1 0
/-- The ordinal denoted by a notation -/
@[simp] noncomputable def repr : onote → ordinal.{0}
| 0 := 0
| (oadd e n a) := ω ^ repr e * n + repr a
/-- Auxiliary definition to print an ordinal notation -/
def to_string_aux1 (e : onote) (n : ℕ) (s : string) : string :=
if e = 0 then _root_.to_string n else
(if e = 1 then "ω" else "ω^(" ++ s ++ ")") ++
if n = 1 then "" else "*" ++ _root_.to_string n
/-- Print an ordinal notation -/
def to_string : onote → string
| zero := "0"
| (oadd e n 0) := to_string_aux1 e n (to_string e)
| (oadd e n a) := to_string_aux1 e n (to_string e) ++ " + " ++ to_string a
/-- Print an ordinal notation -/
def repr' : onote → string
| zero := "0"
| (oadd e n a) := "(oadd " ++ repr' e ++ " " ++ _root_.to_string (n:ℕ) ++ " " ++ repr' a ++ ")"
instance : has_to_string onote := ⟨to_string⟩
instance : has_repr onote := ⟨repr'⟩
instance : preorder onote :=
{ le := λ x y, repr x ≤ repr y,
lt := λ x y, repr x < repr y,
le_refl := λ a, @le_refl ordinal _ _,
le_trans := λ a b c, @le_trans ordinal _ _ _ _,
lt_iff_le_not_le := λ a b, @lt_iff_le_not_le ordinal _ _ _ }
theorem lt_def {x y : onote} : x < y ↔ repr x < repr y := iff.rfl
theorem le_def {x y : onote} : x ≤ y ↔ repr x ≤ repr y := iff.rfl
/-- Convert a `nat` into an ordinal -/
@[simp] def of_nat : ℕ → onote
| 0 := 0
| (nat.succ n) := oadd 0 n.succ_pnat 0
@[simp] theorem of_nat_one : of_nat 1 = 1 := rfl
@[simp] theorem repr_of_nat (n : ℕ) : repr (of_nat n) = n :=
by cases n; simp
@[simp] theorem repr_one : repr 1 = 1 :=
by simpa using repr_of_nat 1
theorem omega_le_oadd (e n a) : ω ^ repr e ≤ repr (oadd e n a) :=
begin
unfold repr,
refine le_trans _ (le_add_right _ _),
simpa using (mul_le_mul_iff_left $ power_pos (repr e) omega_pos).2 (nat_cast_le.2 n.2)
end
theorem oadd_pos (e n a) : 0 < oadd e n a :=
@lt_of_lt_of_le _ _ _ _ _ (power_pos _ omega_pos)
(omega_le_oadd _ _ _)
/-- Compare ordinal notations -/
def cmp : onote → onote → ordering
| 0 0 := ordering.eq
| _ 0 := ordering.gt
| 0 _ := ordering.lt
| o₁@(oadd e₁ n₁ a₁) o₂@(oadd e₂ n₂ a₂) :=
(cmp e₁ e₂).or_else $ (_root_.cmp (n₁:ℕ) n₂).or_else (cmp a₁ a₂)
theorem eq_of_cmp_eq : ∀ {o₁ o₂}, cmp o₁ o₂ = ordering.eq → o₁ = o₂
| 0 0 h := rfl
| (oadd e n a) 0 h := by injection h
| 0 (oadd e n a) h := by injection h
| o₁@(oadd e₁ n₁ a₁) o₂@(oadd e₂ n₂ a₂) h := begin
revert h, simp [cmp],
cases h₁ : cmp e₁ e₂; intro h; try {cases h},
have := eq_of_cmp_eq h₁, subst e₂,
revert h, cases h₂ : _root_.cmp (n₁:ℕ) n₂; intro h; try {cases h},
have := eq_of_cmp_eq h, subst a₂,
rw [_root_.cmp, cmp_using_eq_eq] at h₂,
have := subtype.eq (eq_of_incomp h₂), subst n₂, simp
end
theorem zero_lt_one : (0 : onote) < 1 :=
by rw [lt_def, repr, repr_one]; exact zero_lt_one
/-- `NF_below o b` says that `o` is a normal form ordinal notation
satisfying `repr o < ω ^ b`. -/
inductive NF_below : onote → ordinal.{0} → Prop
| zero {b} : NF_below 0 b
| oadd' {e n a eb b} : NF_below e eb →
NF_below a (repr e) → repr e < b → NF_below (oadd e n a) b
/-- A normal form ordinal notation has the form
ω ^ a₁ * n₁ + ω ^ a₂ * n₂ + ... ω ^ aₖ * nₖ
where `a₁ > a₂ > ... > aₖ` and all the `aᵢ` are
also in normal form.
We will essentially only be interested in normal form
ordinal notations, but to avoid complicating the algorithms
we define everything over general ordinal notations and
only prove correctness with normal form as an invariant. -/
class NF (o : onote) : Prop := (out : Exists (NF_below o))
attribute [pp_nodot] NF
instance NF.zero : NF 0 := ⟨⟨0, NF_below.zero⟩⟩
theorem NF_below.oadd {e n a b} : NF e →
NF_below a (repr e) → repr e < b → NF_below (oadd e n a) b
| ⟨⟨eb, h⟩⟩ := NF_below.oadd' h
theorem NF_below.fst {e n a b} (h : NF_below (oadd e n a) b) : NF e :=
by cases h with _ _ _ _ eb _ h₁ h₂ h₃; exact ⟨⟨_, h₁⟩⟩
theorem NF.fst {e n a} : NF (oadd e n a) → NF e
| ⟨⟨b, h⟩⟩ := h.fst
theorem NF_below.snd {e n a b} (h : NF_below (oadd e n a) b) : NF_below a (repr e) :=
by cases h with _ _ _ _ eb _ h₁ h₂ h₃; exact h₂
theorem NF.snd' {e n a} : NF (oadd e n a) → NF_below a (repr e)
| ⟨⟨b, h⟩⟩ := h.snd
theorem NF.snd {e n a} (h : NF (oadd e n a)) : NF a :=
⟨⟨_, h.snd'⟩⟩
theorem NF.oadd {e a} (h₁ : NF e) (n)
(h₂ : NF_below a (repr e)) : NF (oadd e n a) :=
⟨⟨_, NF_below.oadd h₁ h₂ (ordinal.lt_succ_self _)⟩⟩
instance NF.oadd_zero (e n) [h : NF e] : NF (oadd e n 0) :=
h.oadd _ NF_below.zero
theorem NF_below.lt {e n a b} (h : NF_below (oadd e n a) b) : repr e < b :=
by cases h with _ _ _ _ eb _ h₁ h₂ h₃; exact h₃
theorem NF_below_zero : ∀ {o}, NF_below o 0 ↔ o = 0
| 0 := ⟨λ _, rfl, λ _, NF_below.zero⟩
| (oadd e n a) := ⟨λ h, (not_le_of_lt h.lt).elim (ordinal.zero_le _),
λ e, e.symm ▸ NF_below.zero⟩
theorem NF.zero_of_zero {e n a} (h : NF (oadd e n a)) (e0 : e = 0) : a = 0 :=
by simpa [e0, NF_below_zero] using h.snd'
theorem NF_below.repr_lt {o b} (h : NF_below o b) : repr o < ω ^ b :=
begin
induction h with _ e n a eb b h₁ h₂ h₃ _ IH,
{ exact power_pos _ omega_pos },
{ rw repr,
refine lt_of_lt_of_le ((ordinal.add_lt_add_iff_left _).2 IH) _,
rw ← mul_succ,
refine le_trans (mul_le_mul_left _ $ ordinal.succ_le.2 $ nat_lt_omega _) _,
rw ← power_succ,
exact power_le_power_right omega_pos (ordinal.succ_le.2 h₃) }
end
theorem NF_below.mono {o b₁ b₂} (bb : b₁ ≤ b₂) (h : NF_below o b₁) : NF_below o b₂ :=
begin
induction h with _ e n a eb b h₁ h₂ h₃ _ IH; constructor,
exacts [h₁, h₂, lt_of_lt_of_le h₃ bb]
end
theorem NF.below_of_lt {e n a b} (H : repr e < b) : NF (oadd e n a) → NF_below (oadd e n a) b
| ⟨⟨b', h⟩⟩ := by cases h with _ _ _ _ eb _ h₁ h₂ h₃;
exact NF_below.oadd' h₁ h₂ H
theorem NF.below_of_lt' : ∀ {o b}, repr o < ω ^ b → NF o → NF_below o b
| 0 b H _ := NF_below.zero
| (oadd e n a) b H h := h.below_of_lt $ (power_lt_power_iff_right one_lt_omega).1 $
(lt_of_le_of_lt (omega_le_oadd _ _ _) H)
theorem NF_below_of_nat : ∀ n, NF_below (of_nat n) 1
| 0 := NF_below.zero
| (nat.succ n) := NF_below.oadd NF.zero NF_below.zero ordinal.zero_lt_one
instance NF_of_nat (n) : NF (of_nat n) := ⟨⟨_, NF_below_of_nat n⟩⟩
instance NF_one : NF 1 := by rw ← of_nat_one; apply_instance
theorem oadd_lt_oadd_1 {e₁ n₁ o₁ e₂ n₂ o₂} (h₁ : NF (oadd e₁ n₁ o₁)) (h : e₁ < e₂) :
oadd e₁ n₁ o₁ < oadd e₂ n₂ o₂ :=
@lt_of_lt_of_le _ _ _ _ _ ((h₁.below_of_lt h).repr_lt) (omega_le_oadd _ _ _)
theorem oadd_lt_oadd_2 {e o₁ o₂ : onote} {n₁ n₂ : ℕ+}
(h₁ : NF (oadd e n₁ o₁)) (h : (n₁:ℕ) < n₂) : oadd e n₁ o₁ < oadd e n₂ o₂ :=
begin
simp [lt_def],
refine lt_of_lt_of_le ((ordinal.add_lt_add_iff_left _).2 h₁.snd'.repr_lt)
(le_trans _ (le_add_right _ _)),
rwa [← mul_succ, mul_le_mul_iff_left (power_pos _ omega_pos),
ordinal.succ_le, nat_cast_lt]
end
theorem oadd_lt_oadd_3 {e n a₁ a₂} (h : a₁ < a₂) :
oadd e n a₁ < oadd e n a₂ :=
begin
rw lt_def, unfold repr,
exact (ordinal.add_lt_add_iff_left _).2 h
end
theorem cmp_compares : ∀ (a b : onote) [NF a] [NF b], (cmp a b).compares a b
| 0 0 h₁ h₂ := rfl
| (oadd e n a) 0 h₁ h₂ := oadd_pos _ _ _
| 0 (oadd e n a) h₁ h₂ := oadd_pos _ _ _
| o₁@(oadd e₁ n₁ a₁) o₂@(oadd e₂ n₂ a₂) h₁ h₂ := begin
rw cmp,
have IHe := @cmp_compares _ _ h₁.fst h₂.fst,
cases cmp e₁ e₂,
case ordering.lt { exact oadd_lt_oadd_1 h₁ IHe },
case ordering.gt { exact oadd_lt_oadd_1 h₂ IHe },
change e₁ = e₂ at IHe, subst IHe,
unfold _root_.cmp, cases nh : cmp_using (<) (n₁:ℕ) n₂,
case ordering.lt {
rw cmp_using_eq_lt at nh, exact oadd_lt_oadd_2 h₁ nh },
case ordering.gt {
rw cmp_using_eq_gt at nh, exact oadd_lt_oadd_2 h₂ nh },
rw cmp_using_eq_eq at nh,
have := subtype.eq (eq_of_incomp nh), subst n₂,
have IHa := @cmp_compares _ _ h₁.snd h₂.snd,
cases cmp a₁ a₂,
case ordering.lt { exact oadd_lt_oadd_3 IHa },
case ordering.gt { exact oadd_lt_oadd_3 IHa },
change a₁ = a₂ at IHa, subst IHa, exact rfl
end
theorem repr_inj {a b} [NF a] [NF b] : repr a = repr b ↔ a = b :=
⟨match cmp a b, cmp_compares a b with
| ordering.lt, (h : repr a < repr b), e := (ne_of_lt h e).elim
| ordering.gt, (h : repr a > repr b), e := (ne_of_gt h e).elim
| ordering.eq, h, e := h
end, congr_arg _⟩
theorem NF.of_dvd_omega_power {b e n a} (h : NF (oadd e n a)) (d : ω ^ b ∣ repr (oadd e n a)) :
b ≤ repr e ∧ ω ^ b ∣ repr a :=
begin
have := mt repr_inj.1 (λ h, by injection h : oadd e n a ≠ 0),
have L := le_of_not_lt (λ l, not_le_of_lt (h.below_of_lt l).repr_lt (le_of_dvd this d)),
simp at d,
exact ⟨L, (dvd_add_iff $ (power_dvd_power _ L).mul_right _).1 d⟩
end
theorem NF.of_dvd_omega {e n a} (h : NF (oadd e n a)) :
ω ∣ repr (oadd e n a) → repr e ≠ 0 ∧ ω ∣ repr a :=
by rw [← power_one ω, ← one_le_iff_ne_zero]; exact h.of_dvd_omega_power
/-- `top_below b o` asserts that the largest exponent in `o`, if
it exists, is less than `b`. This is an auxiliary definition
for decidability of `NF`. -/
def top_below (b) : onote → Prop
| 0 := true
| (oadd e n a) := cmp e b = ordering.lt
instance decidable_top_below : decidable_rel top_below :=
by intros b o; cases o; delta top_below; apply_instance
theorem NF_below_iff_top_below {b} [NF b] : ∀ {o},
NF_below o (repr b) ↔ NF o ∧ top_below b o
| 0 := ⟨λ h, ⟨⟨⟨_, h⟩⟩, trivial⟩, λ _, NF_below.zero⟩
| (oadd e n a) :=
⟨λ h, ⟨⟨⟨_, h⟩⟩, (@cmp_compares _ b h.fst _).eq_lt.2 h.lt⟩,
λ ⟨h₁, h₂⟩, h₁.below_of_lt $ (@cmp_compares _ b h₁.fst _).eq_lt.1 h₂⟩
instance decidable_NF : decidable_pred NF
| 0 := is_true NF.zero
| (oadd e n a) := begin
have := decidable_NF e,
have := decidable_NF a, resetI,
apply decidable_of_iff (NF e ∧ NF a ∧ top_below e a),
abstract {
rw ← and_congr_right (λ h, @NF_below_iff_top_below _ h _),
exact ⟨λ ⟨h₁, h₂⟩, NF.oadd h₁ n h₂, λ h, ⟨h.fst, h.snd'⟩⟩ },
end
/-- Addition of ordinal notations (correct only for normal input) -/
def add : onote → onote → onote
| 0 o := o
| (oadd e n a) o := match add a o with
| 0 := oadd e n 0
| o'@(oadd e' n' a') := match cmp e e' with
| ordering.lt := o'
| ordering.eq := oadd e (n + n') a'
| ordering.gt := oadd e n o'
end
end
instance : has_add onote := ⟨add⟩
@[simp] theorem zero_add (o : onote) : 0 + o = o := rfl
theorem oadd_add (e n a o) : oadd e n a + o = add._match_1 e n (a + o) := rfl
/-- Subtraction of ordinal notations (correct only for normal input) -/
def sub : onote → onote → onote
| 0 o := 0
| o 0 := o
| o₁@(oadd e₁ n₁ a₁) (oadd e₂ n₂ a₂) := match cmp e₁ e₂ with
| ordering.lt := 0
| ordering.gt := o₁
| ordering.eq := match (n₁:ℕ) - n₂ with
| 0 := if n₁ = n₂ then sub a₁ a₂ else 0
| (nat.succ k) := oadd e₁ k.succ_pnat a₁
end
end
instance : has_sub onote := ⟨sub⟩
theorem add_NF_below {b} : ∀ {o₁ o₂}, NF_below o₁ b → NF_below o₂ b → NF_below (o₁ + o₂) b
| 0 o h₁ h₂ := h₂
| (oadd e n a) o h₁ h₂ := begin
have h' := add_NF_below (h₁.snd.mono $ le_of_lt h₁.lt) h₂,
simp [oadd_add], cases a + o with e' n' a',
{ exact NF_below.oadd h₁.fst NF_below.zero h₁.lt },
simp [add], have := @cmp_compares _ _ h₁.fst h'.fst,
cases cmp e e'; simp [add],
{ exact h' },
{ simp at this, subst e',
exact NF_below.oadd h'.fst h'.snd h'.lt },
{ exact NF_below.oadd h₁.fst (NF.below_of_lt this ⟨⟨_, h'⟩⟩) h₁.lt }
end
instance add_NF (o₁ o₂) : ∀ [NF o₁] [NF o₂], NF (o₁ + o₂)
| ⟨⟨b₁, h₁⟩⟩ ⟨⟨b₂, h₂⟩⟩ := ⟨(b₁.le_total b₂).elim
(λ h, ⟨b₂, add_NF_below (h₁.mono h) h₂⟩)
(λ h, ⟨b₁, add_NF_below h₁ (h₂.mono h)⟩)⟩
@[simp] theorem repr_add : ∀ o₁ o₂ [NF o₁] [NF o₂], repr (o₁ + o₂) = repr o₁ + repr o₂
| 0 o h₁ h₂ := by simp
| (oadd e n a) o h₁ h₂ := begin
haveI := h₁.snd, have h' := repr_add a o,
conv at h' in (_+o) {simp [(+)]},
have nf := onote.add_NF a o,
conv at nf in (_+o) {simp [(+)]},
conv in (_+o) {simp [(+), add]},
cases add a o with e' n' a'; simp [add, h'.symm, add_assoc],
have := h₁.fst, haveI := nf.fst, have ee := cmp_compares e e',
cases cmp e e'; simp [add],
{ rw [← add_assoc, @add_absorp _ (repr e') (ω ^ repr e' * (n':ℕ))],
{ have := (h₁.below_of_lt ee).repr_lt, unfold repr at this,
exact lt_of_le_of_lt (le_add_right _ _) this },
{ simpa using (mul_le_mul_iff_left $
power_pos (repr e') omega_pos).2 (nat_cast_le.2 n'.pos) } },
{ change e = e' at ee, substI e',
rw [← add_assoc, ← ordinal.mul_add, ← nat.cast_add] }
end
theorem sub_NF_below : ∀ {o₁ o₂ b}, NF_below o₁ b → NF o₂ → NF_below (o₁ - o₂) b
| 0 o b h₁ h₂ := by cases o; exact NF_below.zero
| (oadd e n a) 0 b h₁ h₂ := h₁
| (oadd e₁ n₁ a₁) (oadd e₂ n₂ a₂) b h₁ h₂ := begin
have h' := sub_NF_below h₁.snd h₂.snd,
simp [has_sub.sub, sub] at h' ⊢,
have := @cmp_compares _ _ h₁.fst h₂.fst,
cases cmp e₁ e₂; simp [sub],
{ apply NF_below.zero },
{ simp at this, subst e₂,
cases mn : (n₁:ℕ) - n₂; simp [sub],
{ by_cases en : n₁ = n₂; simp [en],
{ exact h'.mono (le_of_lt h₁.lt) },
{ exact NF_below.zero } },
{ exact NF_below.oadd h₁.fst h₁.snd h₁.lt } },
{ exact h₁ }
end
instance sub_NF (o₁ o₂) : ∀ [NF o₁] [NF o₂], NF (o₁ - o₂)
| ⟨⟨b₁, h₁⟩⟩ h₂ := ⟨⟨b₁, sub_NF_below h₁ h₂⟩⟩
@[simp] theorem repr_sub : ∀ o₁ o₂ [NF o₁] [NF o₂], repr (o₁ - o₂) = repr o₁ - repr o₂
| 0 o h₁ h₂ := by cases o; exact (ordinal.zero_sub _).symm
| (oadd e n a) 0 h₁ h₂ := (ordinal.sub_zero _).symm
| (oadd e₁ n₁ a₁) (oadd e₂ n₂ a₂) h₁ h₂ := begin
haveI := h₁.snd, haveI := h₂.snd, have h' := repr_sub a₁ a₂,
conv at h' in (a₁-a₂) {simp [has_sub.sub]},
have nf := onote.sub_NF a₁ a₂,
conv at nf in (a₁-a₂) {simp [has_sub.sub]},
conv in (_-oadd _ _ _) {simp [has_sub.sub, sub]},
have ee := @cmp_compares _ _ h₁.fst h₂.fst,
cases cmp e₁ e₂,
{ rw [ordinal.sub_eq_zero_iff_le.2], {refl},
exact le_of_lt (oadd_lt_oadd_1 h₁ ee) },
{ change e₁ = e₂ at ee, substI e₂, unfold sub._match_1,
cases mn : (n₁:ℕ) - n₂; dsimp only [sub._match_2],
{ by_cases en : n₁ = n₂,
{ simp [en], rwa [add_sub_add_cancel] },
{ simp [en, -repr],
exact (ordinal.sub_eq_zero_iff_le.2 $ le_of_lt $ oadd_lt_oadd_2 h₁ $
lt_of_le_of_ne (tsub_eq_zero_iff_le.1 mn) (mt pnat.eq en)).symm } },
{ simp [nat.succ_pnat, -nat.cast_succ],
rw [(tsub_eq_iff_eq_add_of_le $ le_of_lt $ nat.lt_of_sub_eq_succ mn).1 mn,
add_comm, nat.cast_add, ordinal.mul_add, add_assoc, add_sub_add_cancel],
refine (ordinal.sub_eq_of_add_eq $ add_absorp h₂.snd'.repr_lt $
le_trans _ (le_add_right _ _)).symm,
simpa using mul_le_mul_left _ (nat_cast_le.2 $ nat.succ_pos _) } },
{ exact (ordinal.sub_eq_of_add_eq $ add_absorp (h₂.below_of_lt ee).repr_lt $
omega_le_oadd _ _ _).symm }
end
/-- Multiplication of ordinal notations (correct only for normal input) -/
def mul : onote → onote → onote
| 0 _ := 0
| _ 0 := 0
| o₁@(oadd e₁ n₁ a₁) (oadd e₂ n₂ a₂) :=
if e₂ = 0 then oadd e₁ (n₁ * n₂) a₁ else
oadd (e₁ + e₂) n₂ (mul o₁ a₂)
instance : has_mul onote := ⟨mul⟩
@[simp] theorem zero_mul (o : onote) : 0 * o = 0 := by cases o; refl
@[simp] theorem mul_zero (o : onote) : o * 0 = 0 := by cases o; refl
theorem oadd_mul (e₁ n₁ a₁ e₂ n₂ a₂) : oadd e₁ n₁ a₁ * oadd e₂ n₂ a₂ =
if e₂ = 0 then oadd e₁ (n₁ * n₂) a₁ else
oadd (e₁ + e₂) n₂ (oadd e₁ n₁ a₁ * a₂) := rfl
theorem oadd_mul_NF_below {e₁ n₁ a₁ b₁} (h₁ : NF_below (oadd e₁ n₁ a₁) b₁) :
∀ {o₂ b₂}, NF_below o₂ b₂ → NF_below (oadd e₁ n₁ a₁ * o₂) (repr e₁ + b₂)
| 0 b₂ h₂ := NF_below.zero
| (oadd e₂ n₂ a₂) b₂ h₂ := begin
have IH := oadd_mul_NF_below h₂.snd,
by_cases e0 : e₂ = 0; simp [e0, oadd_mul],
{ apply NF_below.oadd h₁.fst h₁.snd,
simpa using (add_lt_add_iff_left (repr e₁)).2
(lt_of_le_of_lt (ordinal.zero_le _) h₂.lt) },
{ haveI := h₁.fst, haveI := h₂.fst,
apply NF_below.oadd, apply_instance,
{ rwa repr_add },
{ rw [repr_add, ordinal.add_lt_add_iff_left], exact h₂.lt } }
end
instance mul_NF : ∀ o₁ o₂ [NF o₁] [NF o₂], NF (o₁ * o₂)
| 0 o h₁ h₂ := by cases o; exact NF.zero
| (oadd e n a) o ⟨⟨b₁, hb₁⟩⟩ ⟨⟨b₂, hb₂⟩⟩ :=
⟨⟨_, oadd_mul_NF_below hb₁ hb₂⟩⟩
@[simp] theorem repr_mul : ∀ o₁ o₂ [NF o₁] [NF o₂], repr (o₁ * o₂) = repr o₁ * repr o₂
| 0 o h₁ h₂ := by cases o; exact (ordinal.zero_mul _).symm
| (oadd e₁ n₁ a₁) 0 h₁ h₂ := (ordinal.mul_zero _).symm
| (oadd e₁ n₁ a₁) (oadd e₂ n₂ a₂) h₁ h₂ := begin
have IH : repr (mul _ _) = _ := @repr_mul _ _ h₁ h₂.snd,
conv {to_lhs, simp [(*)]},
have ao : repr a₁ + ω ^ repr e₁ * (n₁:ℕ) = ω ^ repr e₁ * (n₁:ℕ),
{ apply add_absorp h₁.snd'.repr_lt,
simpa using (mul_le_mul_iff_left $ power_pos _ omega_pos).2
(nat_cast_le.2 n₁.2) },
by_cases e0 : e₂ = 0; simp [e0, mul],
{ cases nat.exists_eq_succ_of_ne_zero n₂.ne_zero with x xe,
simp [h₂.zero_of_zero e0, xe, -nat.cast_succ],
rw [← nat_cast_succ x, add_mul_succ _ ao, mul_assoc] },
{ haveI := h₁.fst, haveI := h₂.fst,
simp [IH, repr_add, power_add, ordinal.mul_add],
rw ← mul_assoc, congr' 2,
have := mt repr_inj.1 e0,
rw [add_mul_limit ao (power_is_limit_left omega_is_limit this),
mul_assoc, mul_omega_dvd (nat_cast_pos.2 n₁.pos) (nat_lt_omega _)],
simpa using power_dvd_power ω (one_le_iff_ne_zero.2 this) },
end
/-- Calculate division and remainder of `o` mod ω.
`split' o = (a, n)` means `o = ω * a + n`. -/
def split' : onote → onote × ℕ
| 0 := (0, 0)
| (oadd e n a) := if e = 0 then (0, n) else
let (a', m) := split' a in (oadd (e - 1) n a', m)
/-- Calculate division and remainder of `o` mod ω.
`split o = (a, n)` means `o = a + n`, where `ω ∣ a`. -/
def split : onote → onote × ℕ
| 0 := (0, 0)
| (oadd e n a) := if e = 0 then (0, n) else
let (a', m) := split a in (oadd e n a', m)
/-- `scale x o` is the ordinal notation for `ω ^ x * o`. -/
def scale (x : onote) : onote → onote
| 0 := 0
| (oadd e n a) := oadd (x + e) n (scale a)
/-- `mul_nat o n` is the ordinal notation for `o * n`. -/
def mul_nat : onote → ℕ → onote
| 0 m := 0
| _ 0 := 0
| (oadd e n a) (m+1) := oadd e (n * m.succ_pnat) a
/-- Auxiliary definition to compute the ordinal notation for the ordinal
exponentiation in `power` -/
def power_aux (e a0 a : onote) : ℕ → ℕ → onote
| _ 0 := 0
| 0 (m+1) := oadd e m.succ_pnat 0
| (k+1) m := scale (e + mul_nat a0 k) a + power_aux k m
/-- `power o₁ o₂` calculates the ordinal notation for
the ordinal exponential `o₁ ^ o₂`. -/
def power (o₁ o₂ : onote) : onote :=
match split o₁ with
| (0, 0) := if o₂ = 0 then 1 else 0
| (0, 1) := 1
| (0, m+1) := let (b', k) := split' o₂ in
oadd b' (@has_pow.pow ℕ+ _ _ m.succ_pnat k) 0
| (a@(oadd a0 _ _), m) := match split o₂ with
| (b, 0) := oadd (a0 * b) 1 0
| (b, k+1) := let eb := a0*b in
scale (eb + mul_nat a0 k) a + power_aux eb a0 (mul_nat a m) k m
end
end
instance : has_pow onote onote := ⟨power⟩
theorem power_def (o₁ o₂ : onote) : o₁ ^ o₂ = power._match_1 o₂ (split o₁) := rfl
theorem split_eq_scale_split' : ∀ {o o' m} [NF o], split' o = (o', m) → split o = (scale 1 o', m)
| 0 o' m h p := by injection p; substs o' m; refl
| (oadd e n a) o' m h p := begin
by_cases e0 : e = 0; simp [e0, split, split'] at p ⊢,
{ rcases p with ⟨rfl, rfl⟩, exact ⟨rfl, rfl⟩ },
{ revert p, cases h' : split' a with a' m',
haveI := h.fst, haveI := h.snd,
simp [split_eq_scale_split' h', split, split'],
have : 1 + (e - 1) = e,
{ refine repr_inj.1 _, simp,
have := mt repr_inj.1 e0,
exact ordinal.add_sub_cancel_of_le (one_le_iff_ne_zero.2 this) },
intros, substs o' m, simp [scale, this] }
end
theorem NF_repr_split' : ∀ {o o' m} [NF o], split' o = (o', m) → NF o' ∧ repr o = ω * repr o' + m
| 0 o' m h p := by injection p; substs o' m; simp [NF.zero]
| (oadd e n a) o' m h p := begin
by_cases e0 : e = 0; simp [e0, split, split'] at p ⊢,
{ rcases p with ⟨rfl, rfl⟩,
simp [h.zero_of_zero e0, NF.zero] },
{ revert p, cases h' : split' a with a' m',
haveI := h.fst, haveI := h.snd,
cases NF_repr_split' h' with IH₁ IH₂,
simp [IH₂, split'],
intros, substs o' m,
have : ω ^ repr e = ω ^ (1 : ordinal.{0}) * ω ^ (repr e - 1),
{ have := mt repr_inj.1 e0,
rw [← power_add, ordinal.add_sub_cancel_of_le (one_le_iff_ne_zero.2 this)] },
refine ⟨NF.oadd (by apply_instance) _ _, _⟩,
{ simp at this ⊢,
refine IH₁.below_of_lt' ((mul_lt_mul_iff_left omega_pos).1 $
lt_of_le_of_lt (le_add_right _ m') _),
rw [← this, ← IH₂], exact h.snd'.repr_lt },
{ rw this, simp [ordinal.mul_add, mul_assoc, add_assoc] } }
end
theorem scale_eq_mul (x) [NF x] : ∀ o [NF o], scale x o = oadd x 1 0 * o
| 0 h := rfl
| (oadd e n a) h := begin
simp [(*)], simp [mul, scale],
haveI := h.snd,
by_cases e0 : e = 0,
{ rw scale_eq_mul, simp [e0, h.zero_of_zero, show x + 0 = x, from repr_inj.1 (by simp)] },
{ simp [e0, scale_eq_mul, (*)] }
end
instance NF_scale (x) [NF x] (o) [NF o] : NF (scale x o) :=
by rw scale_eq_mul; apply_instance
@[simp] theorem repr_scale (x) [NF x] (o) [NF o] : repr (scale x o) = ω ^ repr x * repr o :=
by simp [scale_eq_mul]
theorem NF_repr_split {o o' m} [NF o] (h : split o = (o', m)) : NF o' ∧ repr o = repr o' + m :=
begin
cases e : split' o with a n,
cases NF_repr_split' e with s₁ s₂, resetI,
rw split_eq_scale_split' e at h,
injection h, substs o' n,
simp [repr_scale, s₂.symm],
apply_instance
end
theorem split_dvd {o o' m} [NF o] (h : split o = (o', m)) : ω ∣ repr o' :=
begin
cases e : split' o with a n,
rw split_eq_scale_split' e at h,
injection h, subst o',
cases NF_repr_split' e, resetI, simp
end
theorem split_add_lt {o e n a m} [NF o] (h : split o = (oadd e n a, m)) : repr a + m < ω ^ repr e :=
begin
cases NF_repr_split h with h₁ h₂,
cases h₁.of_dvd_omega (split_dvd h) with e0 d,
have := h₁.fst, have := h₁.snd,
refine add_lt_omega_power h₁.snd'.repr_lt (lt_of_lt_of_le (nat_lt_omega _) _),
simpa using power_le_power_right omega_pos (one_le_iff_ne_zero.2 e0),
end
@[simp] theorem mul_nat_eq_mul (n o) : mul_nat o n = o * of_nat n :=
by cases o; cases n; refl
instance NF_mul_nat (o) [NF o] (n) : NF (mul_nat o n) :=
by simp; apply_instance
instance NF_power_aux (e a0 a) [NF e] [NF a0] [NF a] : ∀ k m, NF (power_aux e a0 a k m)
| k 0 := by cases k; exact NF.zero
| 0 (m+1) := NF.oadd_zero _ _
| (k+1) (m+1) := by haveI := NF_power_aux k;
simp [power_aux, nat.succ_ne_zero]; apply_instance
instance NF_power (o₁ o₂) [NF o₁] [NF o₂] : NF (o₁ ^ o₂) :=
begin
cases e₁ : split o₁ with a m,
have na := (NF_repr_split e₁).1,
cases e₂ : split' o₂ with b' k,
haveI := (NF_repr_split' e₂).1,
casesI a with a0 n a',
{ cases m with m,
{ by_cases o₂ = 0; simp [pow, power, *]; apply_instance },
{ by_cases m = 0,
{ simp only [pow, power, *, zero_def], apply_instance },
{ simp [pow, power, *, - npow_eq_pow], apply_instance } } },
{ simp [pow, power, e₁, e₂, split_eq_scale_split' e₂],
have := na.fst,
cases k with k; simp [succ_eq_add_one, power]; resetI; apply_instance }
end
theorem scale_power_aux (e a0 a : onote) [NF e] [NF a0] [NF a] :
∀ k m, repr (power_aux e a0 a k m) = ω ^ repr e * repr (power_aux 0 a0 a k m)
| 0 m := by cases m; simp [power_aux]
| (k+1) m := by by_cases m = 0; simp [h, power_aux,
ordinal.mul_add, power_add, mul_assoc, scale_power_aux]
theorem repr_power_aux₁ {e a} [Ne : NF e] [Na : NF a] {a' : ordinal}
(e0 : repr e ≠ 0) (h : a' < ω ^ repr e) (aa : repr a = a') (n : ℕ+) :
(ω ^ repr e * (n:ℕ) + a') ^ ω = (ω ^ repr e) ^ ω :=
begin
subst aa,
have No := Ne.oadd n (Na.below_of_lt' h),
have := omega_le_oadd e n a, unfold repr at this,
refine le_antisymm _ (power_le_power_left _ this),
apply (power_le_of_limit
(ne_of_gt $ lt_of_lt_of_le (power_pos _ omega_pos) this) omega_is_limit).2,
intros b l,
have := (No.below_of_lt (lt_succ_self _)).repr_lt, unfold repr at this,
apply le_trans (power_le_power_left b $ le_of_lt this),
rw [← power_mul, ← power_mul],
apply power_le_power_right omega_pos,
cases le_or_lt ω (repr e) with h h,
{ apply le_trans (mul_le_mul_left _ $ le_of_lt $ lt_succ_self _),
rw [succ, add_mul_succ _ (one_add_of_omega_le h), ← succ,
succ_le, mul_lt_mul_iff_left (ordinal.pos_iff_ne_zero.2 e0)],
exact omega_is_limit.2 _ l },
{ refine le_trans (le_of_lt $ mul_lt_omega (omega_is_limit.2 _ h) l) _,
simpa using mul_le_mul_right ω (one_le_iff_ne_zero.2 e0) }
end
section
local infixr ^ := @pow ordinal.{0} ordinal ordinal.has_pow
theorem repr_power_aux₂ {a0 a'} [N0 : NF a0] [Na' : NF a'] (m : ℕ)
(d : ω ∣ repr a')
(e0 : repr a0 ≠ 0) (h : repr a' + m < ω ^ repr a0) (n : ℕ+) (k : ℕ) :
let R := repr (power_aux 0 a0 (oadd a0 n a' * of_nat m) k m) in
(k ≠ 0 → R < (ω ^ repr a0) ^ succ k) ∧
(ω ^ repr a0) ^ k * (ω ^ repr a0 * (n:ℕ) + repr a') + R =
(ω ^ repr a0 * (n:ℕ) + repr a' + m) ^ succ k :=
begin
intro,
haveI No : NF (oadd a0 n a') :=
N0.oadd n (Na'.below_of_lt' $ lt_of_le_of_lt (le_add_right _ _) h),
induction k with k IH, {cases m; simp [power_aux, R]},
rename R R', let R := repr (power_aux 0 a0 (oadd a0 n a' * of_nat m) k m),
let ω0 := ω ^ repr a0, let α' := ω0 * n + repr a',
change (k ≠ 0 → R < ω0 ^ succ k) ∧ ω0 ^ k * α' + R = (α' + m) ^ succ k at IH,
have RR : R' = ω0 ^ k * (α' * m) + R,
{ by_cases m = 0; simp [h, R', power_aux, R, power_mul],
{ cases k; simp [power_aux] }, { refl } },
have α0 : 0 < α', {simpa [α', lt_def, repr] using oadd_pos a0 n a'},
have ω00 : 0 < ω0 ^ k := power_pos _ (power_pos _ omega_pos),
have Rl : R < ω ^ (repr a0 * succ ↑k),
{ by_cases k0 : k = 0,
{ simp [k0],
refine lt_of_lt_of_le _ (power_le_power_right omega_pos (one_le_iff_ne_zero.2 e0)),
cases m with m; simp [k0, R, power_aux, omega_pos],
rw [← nat.cast_succ], apply nat_lt_omega },
{ rw power_mul, exact IH.1 k0 } },
refine ⟨λ_, _, _⟩,
{ rw [RR, ← power_mul _ _ (succ k.succ)],
have e0 := ordinal.pos_iff_ne_zero.2 e0,
have rr0 := lt_of_lt_of_le e0 (le_add_left _ _),
apply add_lt_omega_power,
{ simp [power_mul, ω0, power_add, mul_assoc],
rw [mul_lt_mul_iff_left ω00, ← ordinal.power_add],
have := (No.below_of_lt _).repr_lt, unfold repr at this,
refine mul_lt_omega_power rr0 this (nat_lt_omega _),
simpa using (add_lt_add_iff_left (repr a0)).2 e0 },
{ refine lt_of_lt_of_le Rl (power_le_power_right omega_pos $
mul_le_mul_left _ $ succ_le_succ.2 $ nat_cast_le.2 $ le_of_lt k.lt_succ_self) } },
refine calc
ω0 ^ k.succ * α' + R'
= ω0 ^ succ k * α' + (ω0 ^ k * α' * m + R) : by rw [nat_cast_succ, RR, ← mul_assoc]
... = (ω0 ^ k * α' + R) * α' + (ω0 ^ k * α' + R) * m : _
... = (α' + m) ^ succ k.succ : by rw [← ordinal.mul_add, ← nat_cast_succ, power_succ, IH.2],
congr' 1,
{ have αd : ω ∣ α' := dvd_add (dvd_mul_of_dvd_left
(by simpa using power_dvd_power ω (one_le_iff_ne_zero.2 e0)) _) d,
rw [ordinal.mul_add (ω0 ^ k), add_assoc, ← mul_assoc, ← power_succ,
add_mul_limit _ (is_limit_iff_omega_dvd.2 ⟨ne_of_gt α0, αd⟩), mul_assoc,
@mul_omega_dvd n (nat_cast_pos.2 n.pos) (nat_lt_omega _) _ αd],
apply @add_absorp _ (repr a0 * succ k),
{ refine add_lt_omega_power _ Rl,
rw [power_mul, power_succ, mul_lt_mul_iff_left ω00],
exact No.snd'.repr_lt },
{ have := mul_le_mul_left (ω0 ^ succ k) (one_le_iff_pos.2 $ nat_cast_pos.2 n.pos),
rw power_mul, simpa [-power_succ] } },
{ cases m,
{ have : R = 0, {cases k; simp [R, power_aux]}, simp [this] },
{ rw [← nat_cast_succ, add_mul_succ],
apply add_absorp Rl,
rw [power_mul, power_succ],
apply ordinal.mul_le_mul_left,
simpa [α', repr] using omega_le_oadd a0 n a' } }
end
end
theorem repr_power (o₁ o₂) [NF o₁] [NF o₂] : repr (o₁ ^ o₂) = repr o₁ ^ repr o₂ :=
begin
cases e₁ : split o₁ with a m,
cases NF_repr_split e₁ with N₁ r₁,
cases a with a0 n a',
{ cases m with m,
{ by_cases o₂ = 0; simp [power_def, power, e₁, h, r₁],
have := mt repr_inj.1 h, rw zero_power this },
{ cases e₂ : split' o₂ with b' k,
cases NF_repr_split' e₂ with _ r₂,
by_cases m = 0; simp [power_def, power, e₁, h, r₁, e₂, r₂, -nat.cast_succ],
rw [power_add, power_mul, power_omega _ (nat_lt_omega _)],
simpa using nat_cast_lt.2 (nat.succ_lt_succ $ pos_iff_ne_zero.2 h) } },
{ haveI := N₁.fst, haveI := N₁.snd,
cases N₁.of_dvd_omega (split_dvd e₁) with a00 ad,
have al := split_add_lt e₁,
have aa : repr (a' + of_nat m) = repr a' + m, {simp},
cases e₂ : split' o₂ with b' k,
cases NF_repr_split' e₂ with _ r₂,
simp [power_def, power, e₁, r₁, split_eq_scale_split' e₂],
cases k with k; resetI,
{ simp [power, r₂, power_mul, repr_power_aux₁ a00 al aa, add_assoc] },
{ simp [succ_eq_add_one, power, r₂, power_add, power_mul, mul_assoc, add_assoc],
rw [repr_power_aux₁ a00 al aa, scale_power_aux], simp [power_mul],
rw [← ordinal.mul_add, ← add_assoc (ω ^ repr a0 * (n:ℕ))], congr' 1,
rw [← power_succ],
exact (repr_power_aux₂ _ ad a00 al _ _).2 } }
end
end onote
/-- The type of normal ordinal notations. (It would have been
nicer to define this right in the inductive type, but `NF o`
requires `repr` which requires `onote`, so all these things
would have to be defined at once, which messes up the VM
representation.) -/
def nonote := {o : onote // o.NF}
instance : decidable_eq nonote := by unfold nonote; apply_instance
namespace nonote
open onote
instance NF (o : nonote) : NF o.1 := o.2
/-- Construct a `nonote` from an ordinal notation
(and infer normality) -/
def mk (o : onote) [h : NF o] : nonote := ⟨o, h⟩
/-- The ordinal represented by an ordinal notation.
(This function is noncomputable because ordinal
arithmetic is noncomputable. In computational applications
`nonote` can be used exclusively without reference
to `ordinal`, but this function allows for correctness
results to be stated.) -/
noncomputable def repr (o : nonote) : ordinal := o.1.repr
instance : has_to_string nonote := ⟨λ x, x.1.to_string⟩
instance : has_repr nonote := ⟨λ x, x.1.repr'⟩
instance : preorder nonote :=
{ le := λ x y, repr x ≤ repr y,
lt := λ x y, repr x < repr y,
le_refl := λ a, @le_refl ordinal _ _,
le_trans := λ a b c, @le_trans ordinal _ _ _ _,
lt_iff_le_not_le := λ a b, @lt_iff_le_not_le ordinal _ _ _ }
instance : has_zero nonote := ⟨⟨0, NF.zero⟩⟩
instance : inhabited nonote := ⟨0⟩
/-- Convert a natural number to an ordinal notation -/
def of_nat (n : ℕ) : nonote := ⟨of_nat n, ⟨⟨_, NF_below_of_nat _⟩⟩⟩
/-- Compare ordinal notations -/
def cmp (a b : nonote) : ordering :=
cmp a.1 b.1
theorem cmp_compares : ∀ a b : nonote, (cmp a b).compares a b
| ⟨a, ha⟩ ⟨b, hb⟩ := begin
resetI,
dsimp [cmp], have := onote.cmp_compares a b,
cases onote.cmp a b; try {exact this},
exact subtype.mk_eq_mk.2 this
end
instance : linear_order nonote := linear_order_of_compares cmp cmp_compares
/-- Asserts that `repr a < ω ^ repr b`. Used in `nonote.rec_on` -/
def below (a b : nonote) : Prop := NF_below a.1 (repr b)
/-- The `oadd` pseudo-constructor for `nonote` -/
def oadd (e : nonote) (n : ℕ+) (a : nonote) (h : below a e) : nonote := ⟨_, NF.oadd e.2 n h⟩
/-- This is a recursor-like theorem for `nonote` suggesting an
inductive definition, which can't actually be defined this
way due to conflicting dependencies. -/
@[elab_as_eliminator] def rec_on {C : nonote → Sort*} (o : nonote)
(H0 : C 0)
(H1 : ∀ e n a h, C e → C a → C (oadd e n a h)) : C o :=
begin
cases o with o h, induction o with e n a IHe IHa,
{ exact H0 },
{ exact H1 ⟨e, h.fst⟩ n ⟨a, h.snd⟩ h.snd' (IHe _) (IHa _) }
end
/-- Addition of ordinal notations -/
instance : has_add nonote := ⟨λ x y, mk (x.1 + y.1)⟩
theorem repr_add (a b) : repr (a + b) = repr a + repr b :=
onote.repr_add a.1 b.1
/-- Subtraction of ordinal notations -/
instance : has_sub nonote := ⟨λ x y, mk (x.1 - y.1)⟩
theorem repr_sub (a b) : repr (a - b) = repr a - repr b :=
onote.repr_sub a.1 b.1
/-- Multiplication of ordinal notations -/
instance : has_mul nonote := ⟨λ x y, mk (x.1 * y.1)⟩
theorem repr_mul (a b) : repr (a * b) = repr a * repr b :=
onote.repr_mul a.1 b.1
/-- Exponentiation of ordinal notations -/
def power (x y : nonote) := mk (x.1.power y.1)
theorem repr_power (a b) : repr (power a b) = (repr a).power (repr b) :=
onote.repr_power a.1 b.1
end nonote
|
3191b9c8058409943d144228980c9f3f7975db54
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/ring_theory/ore_localization/ore_set.lean
|
10fecbb77e85c677bca396ba24f894fd168dce99
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 3,861
|
lean
|
/-
Copyright (c) 2022 Jakob von Raumer. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jakob von Raumer, Kevin Klinge
-/
import group_theory.subgroup.basic
/-!
# (Right) Ore sets
This defines right Ore sets on arbitrary monoids.
## References
* https://ncatlab.org/nlab/show/Ore+set
-/
namespace ore_localization
section monoid
/-- A submonoid `S` of a monoid `R` is (right) Ore if common factors on the left can be turned
into common factors on the right, and if each pair of `r : R` and `s : S` admits an Ore numerator
`v : R` and an Ore denominator `u : S` such that `r * u = s * v`. -/
class ore_set {R : Type*} [monoid R] (S : submonoid R) :=
(ore_left_cancel : ∀ (r₁ r₂ : R) (s : S), ↑s * r₁ = s * r₂ → ∃ s' : S, r₁ * s' = r₂ * s')
(ore_num : R → S → R)
(ore_denom : R → S → S)
(ore_eq : ∀ (r : R) (s : S), r * ore_denom r s = s * ore_num r s)
variables {R : Type*} [monoid R] {S : submonoid R} [ore_set S]
/-- Common factors on the left can be turned into common factors on the right, a weak form of
cancellability. -/
lemma ore_left_cancel (r₁ r₂ : R) (s : S) (h : ↑s * r₁ = s * r₂) : ∃ s' : S, r₁ * s' = r₂ * s' :=
ore_set.ore_left_cancel r₁ r₂ s h
/-- The Ore numerator of a fraction. -/
def ore_num (r : R) (s : S) : R := ore_set.ore_num r s
/-- The Ore denominator of a fraction. -/
def ore_denom (r : R) (s : S) : S := ore_set.ore_denom r s
/-- The Ore condition of a fraction, expressed in terms of `ore_num` and `ore_denom`. -/
lemma ore_eq (r : R) (s : S) : r * (ore_denom r s) = s * (ore_num r s) := ore_set.ore_eq r s
/-- The Ore condition bundled in a sigma type. This is useful in situations where we want to obtain
both witnesses and the condition for a given fraction. -/
def ore_condition (r : R) (s : S) : Σ' r' : R, Σ' s' : S, r * s' = s * r' :=
⟨ore_num r s, ore_denom r s, ore_eq r s⟩
/-- The trivial submonoid is an Ore set. -/
instance ore_set_bot : ore_set (⊥ : submonoid R) :=
{ ore_left_cancel := λ _ _ s h,
⟨s, begin
rcases s with ⟨s, hs⟩,
rw submonoid.mem_bot at hs,
subst hs,
rw [set_like.coe_mk, one_mul, one_mul] at h,
subst h
end⟩,
ore_num := λ r _, r,
ore_denom := λ _ s, s,
ore_eq := λ _ s, by { rcases s with ⟨s, hs⟩, rw submonoid.mem_bot at hs, simp [hs] } }
/-- Every submonoid of a commutative monoid is an Ore set. -/
@[priority 100]
instance ore_set_comm {R} [comm_monoid R] (S : submonoid R) : ore_set S :=
{ ore_left_cancel := λ m n s h, ⟨s, by rw [mul_comm n s, mul_comm m s, h]⟩,
ore_num := λ r _, r,
ore_denom := λ _ s, s,
ore_eq := λ r s, by rw mul_comm }
end monoid
/-- Cancellability in monoids with zeros can act as a replacement for the `ore_left_cancel`
condition of an ore set. -/
def ore_set_of_cancel_monoid_with_zero
{R : Type*} [cancel_monoid_with_zero R] {S : submonoid R}
(ore_num : R → S → R) (ore_denom : R → S → S)
(ore_eq : ∀ (r : R) (s : S), r * (ore_denom r s) = s * (ore_num r s)) :
ore_set S :=
{ ore_left_cancel := λ r₁ r₂ s h, ⟨s, mul_eq_mul_right_iff.mpr (mul_eq_mul_left_iff.mp h)⟩,
ore_num := ore_num,
ore_denom := ore_denom,
ore_eq := ore_eq }
/-- In rings without zero divisors, the first (cancellability) condition is always fulfilled,
it suffices to give a proof for the Ore condition itself. -/
def ore_set_of_no_zero_divisors
{R : Type*} [ring R] [no_zero_divisors R] {S : submonoid R}
(ore_num : R → S → R) (ore_denom : R → S → S)
(ore_eq : ∀ (r : R) (s : S), r * (ore_denom r s) = s * (ore_num r s)) :
ore_set S :=
begin
letI : cancel_monoid_with_zero R := no_zero_divisors.to_cancel_monoid_with_zero,
exact ore_set_of_cancel_monoid_with_zero ore_num ore_denom ore_eq
end
end ore_localization
|
6f8268f24b4065bdd4e7e2950a57fcd8af187cee
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/tests/lean/run/matrix2.lean
|
024135ce62d69e0f08b26d9ad500c0b532b1d276
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 833
|
lean
|
import logic
constant matrix.{l} : Type.{l} → Type.{l}
constant same_dim {A : Type} : matrix A → matrix A → Prop
constant add1 {A : Type} (m1 m2 : matrix A) {H : same_dim m1 m2} : matrix A
open eq
theorem same_dim_eq_args {A : Type} {m1 m2 m1' m2' : matrix A} (H1 : m1 = m1') (H2 : m2 = m2') (H : same_dim m1 m2) : same_dim m1' m2' :=
subst H1 (subst H2 H)
theorem add1_congr {A : Type} (m1 m2 m1' m2' : matrix A) (H1 : m1 = m1') (H2 : m2 = m2') (H : same_dim m1 m2) : @add1 A m1 m2 H = @add1 A m1' m2' (same_dim_eq_args H1 H2 H) :=
have base : ∀ (H1 : m1 = m1) (H2 : m2 = m2), @add1 A m1 m2 H = @add1 A m1 m2 (eq.rec (eq.rec H H1) H2), from
assume H1 H2, rfl,
have general : ∀ (H1 : m1 = m1') (H2 : m2 = m2'), @add1 A m1 m2 H = @add1 A m1' m2' (eq.rec (eq.rec H H1) H2), from
subst H1 (subst H2 base),
general H1 H2
|
5ee6d76c00f6e515d14977c7950a68bb97852a7e
|
94e33a31faa76775069b071adea97e86e218a8ee
|
/src/group_theory/p_group.lean
|
465062b3829f7526ffea5f006a7c8f5e5d569051
|
[
"Apache-2.0"
] |
permissive
|
urkud/mathlib
|
eab80095e1b9f1513bfb7f25b4fa82fa4fd02989
|
6379d39e6b5b279df9715f8011369a301b634e41
|
refs/heads/master
| 1,658,425,342,662
| 1,658,078,703,000
| 1,658,078,703,000
| 186,910,338
| 0
| 0
|
Apache-2.0
| 1,568,512,083,000
| 1,557,958,709,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 12,012
|
lean
|
/-
Copyright (c) 2018 . All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes, Thomas Browning
-/
import data.zmod.basic
import group_theory.index
import group_theory.group_action.conj_act
import group_theory.group_action.quotient
import group_theory.perm.cycle.type
/-!
# p-groups
This file contains a proof that if `G` is a `p`-group acting on a finite set `α`,
then the number of fixed points of the action is congruent mod `p` to the cardinality of `α`.
It also contains proofs of some corollaries of this lemma about existence of fixed points.
-/
open_locale big_operators
open fintype mul_action
variables (p : ℕ) (G : Type*) [group G]
/-- A p-group is a group in which every element has prime power order -/
def is_p_group : Prop := ∀ g : G, ∃ k : ℕ, g ^ (p ^ k) = 1
variables {p} {G}
namespace is_p_group
lemma iff_order_of [hp : fact p.prime] :
is_p_group p G ↔ ∀ g : G, ∃ k : ℕ, order_of g = p ^ k :=
forall_congr (λ g, ⟨λ ⟨k, hk⟩, exists_imp_exists (by exact λ j, Exists.snd)
((nat.dvd_prime_pow hp.out).mp (order_of_dvd_of_pow_eq_one hk)),
exists_imp_exists (λ k hk, by rw [←hk, pow_order_of_eq_one])⟩)
lemma of_card [fintype G] {n : ℕ} (hG : card G = p ^ n) : is_p_group p G :=
λ g, ⟨n, by rw [←hG, pow_card_eq_one]⟩
lemma of_bot : is_p_group p (⊥ : subgroup G) :=
of_card (subgroup.card_bot.trans (pow_zero p).symm)
lemma iff_card [fact p.prime] [fintype G] :
is_p_group p G ↔ ∃ n : ℕ, card G = p ^ n :=
begin
have hG : card G ≠ 0 := card_ne_zero,
refine ⟨λ h, _, λ ⟨n, hn⟩, of_card hn⟩,
suffices : ∀ q ∈ nat.factors (card G), q = p,
{ use (card G).factors.length,
rw [←list.prod_repeat, ←list.eq_repeat_of_mem this, nat.prod_factors hG] },
intros q hq,
obtain ⟨hq1, hq2⟩ := (nat.mem_factors hG).mp hq,
haveI : fact q.prime := ⟨hq1⟩,
obtain ⟨g, hg⟩ := exists_prime_order_of_dvd_card q hq2,
obtain ⟨k, hk⟩ := (iff_order_of.mp h) g,
exact (hq1.pow_eq_iff.mp (hg.symm.trans hk).symm).1.symm,
end
section G_is_p_group
variables (hG : is_p_group p G)
include hG
lemma of_injective {H : Type*} [group H] (ϕ : H →* G) (hϕ : function.injective ϕ) :
is_p_group p H :=
begin
simp_rw [is_p_group, ←hϕ.eq_iff, ϕ.map_pow, ϕ.map_one],
exact λ h, hG (ϕ h),
end
lemma to_subgroup (H : subgroup G) : is_p_group p H :=
hG.of_injective H.subtype subtype.coe_injective
lemma of_surjective {H : Type*} [group H] (ϕ : G →* H) (hϕ : function.surjective ϕ) :
is_p_group p H :=
begin
refine λ h, exists.elim (hϕ h) (λ g hg, exists_imp_exists (λ k hk, _) (hG g)),
rw [←hg, ←ϕ.map_pow, hk, ϕ.map_one],
end
lemma to_quotient (H : subgroup G) [H.normal] :
is_p_group p (G ⧸ H) :=
hG.of_surjective (quotient_group.mk' H) quotient.surjective_quotient_mk'
lemma of_equiv {H : Type*} [group H] (ϕ : G ≃* H) : is_p_group p H :=
hG.of_surjective ϕ.to_monoid_hom ϕ.surjective
variables [hp : fact p.prime]
include hp
lemma index (H : subgroup G) [fintype (G ⧸ H)] :
∃ n : ℕ, H.index = p ^ n :=
begin
obtain ⟨n, hn⟩ := iff_card.mp (hG.to_quotient H.normal_core),
obtain ⟨k, hk1, hk2⟩ := (nat.dvd_prime_pow hp.out).mp ((congr_arg _
(H.normal_core.index_eq_card.trans hn)).mp (subgroup.index_dvd_of_le H.normal_core_le)),
exact ⟨k, hk2⟩,
end
variables {α : Type*} [mul_action G α]
lemma card_orbit (a : α) [fintype (orbit G a)] :
∃ n : ℕ, card (orbit G a) = p ^ n :=
begin
let ϕ := orbit_equiv_quotient_stabilizer G a,
haveI := fintype.of_equiv (orbit G a) ϕ,
rw [card_congr ϕ, ←subgroup.index_eq_card],
exact hG.index (stabilizer G a),
end
variables (α) [fintype α] [fintype (fixed_points G α)]
/-- If `G` is a `p`-group acting on a finite set `α`, then the number of fixed points
of the action is congruent mod `p` to the cardinality of `α` -/
lemma card_modeq_card_fixed_points : card α ≡ card (fixed_points G α) [MOD p] :=
begin
classical,
calc card α = card (Σ y : quotient (orbit_rel G α), {x // quotient.mk' x = y}) :
card_congr (equiv.sigma_fiber_equiv (@quotient.mk' _ (orbit_rel G α))).symm
... = ∑ a : quotient (orbit_rel G α), card {x // quotient.mk' x = a} : card_sigma _
... ≡ ∑ a : fixed_points G α, 1 [MOD p] : _
... = _ : by simp; refl,
rw [←zmod.eq_iff_modeq_nat p, nat.cast_sum, nat.cast_sum],
have key : ∀ x, card {y // (quotient.mk' y : quotient (orbit_rel G α)) = quotient.mk' x} =
card (orbit G x) := λ x, by simp only [quotient.eq']; congr,
refine eq.symm (finset.sum_bij_ne_zero (λ a _ _, quotient.mk' a.1) (λ _ _ _, finset.mem_univ _)
(λ a₁ a₂ _ _ _ _ h, subtype.eq ((mem_fixed_points' α).mp a₂.2 a₁.1 (quotient.exact' h)))
(λ b, quotient.induction_on' b (λ b _ hb, _)) (λ a ha _, by
{ rw [key, mem_fixed_points_iff_card_orbit_eq_one.mp a.2] })),
obtain ⟨k, hk⟩ := hG.card_orbit b,
have : k = 0 := nat.le_zero_iff.1 (nat.le_of_lt_succ (lt_of_not_ge (mt (pow_dvd_pow p)
(by rwa [pow_one, ←hk, ←nat.modeq_zero_iff_dvd, ←zmod.eq_iff_modeq_nat, ←key,
nat.cast_zero])))),
exact ⟨⟨b, mem_fixed_points_iff_card_orbit_eq_one.2 $ by rw [hk, this, pow_zero]⟩,
finset.mem_univ _, (ne_of_eq_of_ne nat.cast_one one_ne_zero), rfl⟩,
end
/-- If a p-group acts on `α` and the cardinality of `α` is not a multiple
of `p` then the action has a fixed point. -/
lemma nonempty_fixed_point_of_prime_not_dvd_card (hpα : ¬ p ∣ card α) :
(fixed_points G α).nonempty :=
@set.nonempty_of_nonempty_subtype _ _ begin
rw [←card_pos_iff, pos_iff_ne_zero],
contrapose! hpα,
rw [←nat.modeq_zero_iff_dvd, ←hpα],
exact hG.card_modeq_card_fixed_points α,
end
/-- If a p-group acts on `α` and the cardinality of `α` is a multiple
of `p`, and the action has one fixed point, then it has another fixed point. -/
lemma exists_fixed_point_of_prime_dvd_card_of_fixed_point
(hpα : p ∣ card α) {a : α} (ha : a ∈ fixed_points G α) :
∃ b, b ∈ fixed_points G α ∧ a ≠ b :=
have hpf : p ∣ card (fixed_points G α) :=
nat.modeq_zero_iff_dvd.mp ((hG.card_modeq_card_fixed_points α).symm.trans hpα.modeq_zero_nat),
have hα : 1 < card (fixed_points G α) :=
(fact.out p.prime).one_lt.trans_le (nat.le_of_dvd (card_pos_iff.2 ⟨⟨a, ha⟩⟩) hpf),
let ⟨⟨b, hb⟩, hba⟩ := exists_ne_of_one_lt_card hα ⟨a, ha⟩ in
⟨b, hb, λ hab, hba (by simp_rw [hab])⟩
lemma center_nontrivial [nontrivial G] [fintype G] : nontrivial (subgroup.center G) :=
begin
classical,
have := (hG.of_equiv conj_act.to_conj_act).exists_fixed_point_of_prime_dvd_card_of_fixed_point G,
rw conj_act.fixed_points_eq_center at this,
obtain ⟨g, hg⟩ := this _ (subgroup.center G).one_mem,
{ exact ⟨⟨1, ⟨g, hg.1⟩, mt subtype.ext_iff.mp hg.2⟩⟩ },
{ obtain ⟨n, hn⟩ := is_p_group.iff_card.mp hG,
rw hn,
apply dvd_pow_self,
rintro rfl,
exact (fintype.one_lt_card).ne' hn },
end
lemma bot_lt_center [nontrivial G] [fintype G] : ⊥ < subgroup.center G :=
begin
haveI := center_nontrivial hG,
classical,
exact bot_lt_iff_ne_bot.mpr ((subgroup.center G).one_lt_card_iff_ne_bot.mp fintype.one_lt_card),
end
end G_is_p_group
lemma to_le {H K : subgroup G} (hK : is_p_group p K) (hHK : H ≤ K) : is_p_group p H :=
hK.of_injective (subgroup.inclusion hHK) (λ a b h, subtype.ext (show _, from subtype.ext_iff.mp h))
lemma to_inf_left {H K : subgroup G} (hH : is_p_group p H) : is_p_group p (H ⊓ K : subgroup G) :=
hH.to_le inf_le_left
lemma to_inf_right {H K : subgroup G} (hK : is_p_group p K) : is_p_group p (H ⊓ K : subgroup G) :=
hK.to_le inf_le_right
lemma map {H : subgroup G} (hH : is_p_group p H) {K : Type*} [group K]
(ϕ : G →* K) : is_p_group p (H.map ϕ) :=
begin
rw [←H.subtype_range, monoid_hom.map_range],
exact hH.of_surjective (ϕ.restrict H).range_restrict (ϕ.restrict H).range_restrict_surjective,
end
lemma comap_of_ker_is_p_group {H : subgroup G} (hH : is_p_group p H) {K : Type*} [group K]
(ϕ : K →* G) (hϕ : is_p_group p ϕ.ker) : is_p_group p (H.comap ϕ) :=
begin
intro g,
obtain ⟨j, hj⟩ := hH ⟨ϕ g.1, g.2⟩,
rw [subtype.ext_iff, H.coe_pow, subtype.coe_mk, ←ϕ.map_pow] at hj,
obtain ⟨k, hk⟩ := hϕ ⟨g.1 ^ p ^ j, hj⟩,
rwa [subtype.ext_iff, ϕ.ker.coe_pow, subtype.coe_mk, ←pow_mul, ←pow_add] at hk,
exact ⟨j + k, by rwa [subtype.ext_iff, (H.comap ϕ).coe_pow]⟩,
end
lemma ker_is_p_group_of_injective {K : Type*} [group K] {ϕ : K →* G} (hϕ : function.injective ϕ) :
is_p_group p ϕ.ker :=
(congr_arg (λ Q : subgroup K, is_p_group p Q) (ϕ.ker_eq_bot_iff.mpr hϕ)).mpr is_p_group.of_bot
lemma comap_of_injective {H : subgroup G} (hH : is_p_group p H) {K : Type*} [group K]
(ϕ : K →* G) (hϕ : function.injective ϕ) : is_p_group p (H.comap ϕ) :=
hH.comap_of_ker_is_p_group ϕ (ker_is_p_group_of_injective hϕ)
lemma comap_subtype {H : subgroup G} (hH : is_p_group p H) {K : subgroup G} :
is_p_group p (H.comap K.subtype) :=
hH.comap_of_injective K.subtype subtype.coe_injective
lemma to_sup_of_normal_right {H K : subgroup G} (hH : is_p_group p H) (hK : is_p_group p K)
[K.normal] : is_p_group p (H ⊔ K : subgroup G) :=
begin
rw [←quotient_group.ker_mk K, ←subgroup.comap_map_eq],
apply (hH.map (quotient_group.mk' K)).comap_of_ker_is_p_group,
rwa quotient_group.ker_mk,
end
lemma to_sup_of_normal_left {H K : subgroup G} (hH : is_p_group p H) (hK : is_p_group p K)
[H.normal] : is_p_group p (H ⊔ K : subgroup G) :=
(congr_arg (λ H : subgroup G, is_p_group p H) sup_comm).mp (to_sup_of_normal_right hK hH)
lemma to_sup_of_normal_right' {H K : subgroup G} (hH : is_p_group p H) (hK : is_p_group p K)
(hHK : H ≤ K.normalizer) : is_p_group p (H ⊔ K : subgroup G) :=
let hHK' := to_sup_of_normal_right (hH.of_equiv (subgroup.comap_subtype_equiv_of_le hHK).symm)
(hK.of_equiv (subgroup.comap_subtype_equiv_of_le subgroup.le_normalizer).symm) in
((congr_arg (λ H : subgroup K.normalizer, is_p_group p H)
(subgroup.sup_subgroup_of_eq hHK subgroup.le_normalizer)).mp hHK').of_equiv
(subgroup.comap_subtype_equiv_of_le (sup_le hHK subgroup.le_normalizer))
lemma to_sup_of_normal_left' {H K : subgroup G} (hH : is_p_group p H) (hK : is_p_group p K)
(hHK : K ≤ H.normalizer) : is_p_group p (H ⊔ K : subgroup G) :=
(congr_arg (λ H : subgroup G, is_p_group p H) sup_comm).mp (to_sup_of_normal_right' hK hH hHK)
/-- finite p-groups with different p have coprime orders -/
lemma coprime_card_of_ne {G₂ : Type*} [group G₂]
(p₁ p₂ : ℕ) [hp₁ : fact p₁.prime] [hp₂ : fact p₂.prime] (hne : p₁ ≠ p₂)
(H₁ : subgroup G) (H₂ : subgroup G₂) [fintype H₁] [fintype H₂]
(hH₁ : is_p_group p₁ H₁) (hH₂ : is_p_group p₂ H₂) :
nat.coprime (fintype.card H₁) (fintype.card H₂) :=
begin
obtain ⟨n₁, heq₁⟩ := iff_card.mp hH₁, rw heq₁, clear heq₁,
obtain ⟨n₂, heq₂⟩ := iff_card.mp hH₂, rw heq₂, clear heq₂,
exact nat.coprime_pow_primes _ _ (hp₁.elim) (hp₂.elim) hne,
end
/-- p-groups with different p are disjoint -/
lemma disjoint_of_ne (p₁ p₂ : ℕ) [hp₁ : fact p₁.prime] [hp₂ : fact p₂.prime] (hne : p₁ ≠ p₂)
(H₁ H₂ : subgroup G) (hH₁ : is_p_group p₁ H₁) (hH₂ : is_p_group p₂ H₂) :
disjoint H₁ H₂ :=
begin
rintro x ⟨hx₁, hx₂⟩,
rw subgroup.mem_bot,
obtain ⟨n₁, hn₁⟩ := iff_order_of.mp hH₁ ⟨x, hx₁⟩,
obtain ⟨n₂, hn₂⟩ := iff_order_of.mp hH₂ ⟨x, hx₂⟩,
rw [← order_of_subgroup, subgroup.coe_mk] at hn₁ hn₂,
have : p₁ ^ n₁ = p₂ ^ n₂, by rw [← hn₁, ← hn₂],
have : n₁ = 0,
{ contrapose! hne with h,
rw ← associated_iff_eq at this ⊢,
exact associated.of_pow_associated_of_prime
(nat.prime_iff.mp hp₁.elim) (nat.prime_iff.mp hp₂.elim) (ne.bot_lt h) this },
simpa [this] using hn₁,
end
end is_p_group
|
f67f06c2fc067c0e5afda3b1fe44e4ff2690adf8
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/src/Init/Data/Nat/Linear.lean
|
4fa179874e142f104d1f911e56c7909ecfb4bbd0
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 29,995
|
lean
|
/-
Copyright (c) 2022 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Coe
import Init.Classical
import Init.SimpLemmas
import Init.Data.Nat.Basic
import Init.Data.List.Basic
import Init.Data.Prod
namespace Nat.Linear
/-!
Helper definitions and theorems for constructing linear arithmetic proofs.
-/
abbrev Var := Nat
abbrev Context := List Nat
/--
When encoding polynomials. We use `fixedVar` for encoding numerals.
The denotation of `fixedVar` is always `1`. -/
def fixedVar := 100000000 -- Any big number should work here
def Var.denote (ctx : Context) (v : Var) : Nat :=
bif v == fixedVar then 1 else go ctx v
where
go : List Nat → Nat → Nat
| [], _ => 0
| a::_, 0 => a
| _::as, i+1 => go as i
inductive Expr where
| num (v : Nat)
| var (i : Var)
| add (a b : Expr)
| mulL (k : Nat) (a : Expr)
| mulR (a : Expr) (k : Nat)
deriving Inhabited
def Expr.denote (ctx : Context) : Expr → Nat
| Expr.add a b => Nat.add (denote ctx a) (denote ctx b)
| Expr.num k => k
| Expr.var v => v.denote ctx
| Expr.mulL k e => Nat.mul k (denote ctx e)
| Expr.mulR e k => Nat.mul (denote ctx e) k
abbrev Poly := List (Nat × Var)
def Poly.denote (ctx : Context) (p : Poly) : Nat :=
match p with
| [] => 0
| (k, v) :: p => Nat.add (Nat.mul k (v.denote ctx)) (denote ctx p)
def Poly.insertSorted (k : Nat) (v : Var) (p : Poly) : Poly :=
match p with
| [] => [(k, v)]
| (k', v') :: p => bif Nat.blt v v' then (k, v) :: (k', v') :: p else (k', v') :: insertSorted k v p
def Poly.sort (p : Poly) : Poly :=
let rec go (p : Poly) (r : Poly) : Poly :=
match p with
| [] => r
| (k, v) :: p => go p (r.insertSorted k v)
go p []
def Poly.fuse (p : Poly) : Poly :=
match p with
| [] => []
| (k, v) :: p =>
match fuse p with
| [] => [(k, v)]
| (k', v') :: p' => bif v == v' then (Nat.add k k', v)::p' else (k, v) :: (k', v') :: p'
def Poly.mul (k : Nat) (p : Poly) : Poly :=
bif k == 0 then
[]
else bif k == 1 then
p
else
go p
where
go : Poly → Poly
| [] => []
| (k', v) :: p => (Nat.mul k k', v) :: go p
def Poly.cancelAux (fuel : Nat) (m₁ m₂ r₁ r₂ : Poly) : Poly × Poly :=
match fuel with
| 0 => (r₁.reverse ++ m₁, r₂.reverse ++ m₂)
| fuel + 1 =>
match m₁, m₂ with
| m₁, [] => (r₁.reverse ++ m₁, r₂.reverse)
| [], m₂ => (r₁.reverse, r₂.reverse ++ m₂)
| (k₁, v₁) :: m₁, (k₂, v₂) :: m₂ =>
bif Nat.blt v₁ v₂ then
cancelAux fuel m₁ ((k₂, v₂) :: m₂) ((k₁, v₁) :: r₁) r₂
else bif Nat.blt v₂ v₁ then
cancelAux fuel ((k₁, v₁) :: m₁) m₂ r₁ ((k₂, v₂) :: r₂)
else bif Nat.blt k₁ k₂ then
cancelAux fuel m₁ m₂ r₁ ((Nat.sub k₂ k₁, v₁) :: r₂)
else bif Nat.blt k₂ k₁ then
cancelAux fuel m₁ m₂ ((Nat.sub k₁ k₂, v₁) :: r₁) r₂
else
cancelAux fuel m₁ m₂ r₁ r₂
def hugeFuel := 1000000 -- any big number should work
def Poly.cancel (p₁ p₂ : Poly) : Poly × Poly :=
cancelAux hugeFuel p₁ p₂ [] []
def Poly.isNum? (p : Poly) : Option Nat :=
match p with
| [] => some 0
| [(k, v)] => bif v == fixedVar then some k else none
| _ => none
def Poly.isZero (p : Poly) : Bool :=
match p with
| [] => true
| _ => false
def Poly.isNonZero (p : Poly) : Bool :=
match p with
| [] => false
| (k, v) :: p => bif v == fixedVar then k > 0 else isNonZero p
def Poly.denote_eq (ctx : Context) (mp : Poly × Poly) : Prop := mp.1.denote ctx = mp.2.denote ctx
def Poly.denote_le (ctx : Context) (mp : Poly × Poly) : Prop := mp.1.denote ctx ≤ mp.2.denote ctx
def Poly.combineAux (fuel : Nat) (p₁ p₂ : Poly) : Poly :=
match fuel with
| 0 => p₁ ++ p₂
| fuel + 1 =>
match p₁, p₂ with
| p₁, [] => p₁
| [], p₂ => p₂
| (k₁, v₁) :: p₁, (k₂, v₂) :: p₂ =>
bif Nat.blt v₁ v₂ then
(k₁, v₁) :: combineAux fuel p₁ ((k₂, v₂) :: p₂)
else bif Nat.blt v₂ v₁ then
(k₂, v₂) :: combineAux fuel ((k₁, v₁) :: p₁) p₂
else
(Nat.add k₁ k₂, v₁) :: combineAux fuel p₁ p₂
def Poly.combine (p₁ p₂ : Poly) : Poly :=
combineAux hugeFuel p₁ p₂
def Expr.toPoly : Expr → Poly
| Expr.num k => bif k == 0 then [] else [ (k, fixedVar) ]
| Expr.var i => [(1, i)]
| Expr.add a b => a.toPoly ++ b.toPoly
| Expr.mulL k a => a.toPoly.mul k
| Expr.mulR a k => a.toPoly.mul k
def Poly.norm (p : Poly) : Poly :=
p.sort.fuse
def Expr.toNormPoly (e : Expr) : Poly :=
e.toPoly.norm
def Expr.inc (e : Expr) : Expr :=
Expr.add e (Expr.num 1)
structure PolyCnstr where
eq : Bool
lhs : Poly
rhs : Poly
deriving BEq
-- TODO: implement LawfulBEq generator companion for BEq
instance : LawfulBEq PolyCnstr where
eq_of_beq {a b} h := by
cases a; rename_i eq₁ lhs₁ rhs₁
cases b; rename_i eq₂ lhs₂ rhs₂
have h : eq₁ == eq₂ && lhs₁ == lhs₂ && rhs₁ == rhs₂ := h
simp at h
have ⟨⟨h₁, h₂⟩, h₃⟩ := h
rw [h₁, h₂, h₃]
rfl {a} := by
cases a; rename_i eq lhs rhs
show (eq == eq && lhs == lhs && rhs == rhs) = true
simp [LawfulBEq.rfl]
def PolyCnstr.mul (k : Nat) (c : PolyCnstr) : PolyCnstr :=
{ c with lhs := c.lhs.mul k, rhs := c.rhs.mul k }
def PolyCnstr.combine (c₁ c₂ : PolyCnstr) : PolyCnstr :=
let (lhs, rhs) := Poly.cancel (c₁.lhs.combine c₂.lhs) (c₁.rhs.combine c₂.rhs)
{ eq := c₁.eq && c₂.eq, lhs, rhs }
structure ExprCnstr where
eq : Bool
lhs : Expr
rhs : Expr
def PolyCnstr.denote (ctx : Context) (c : PolyCnstr) : Prop :=
bif c.eq then
Poly.denote_eq ctx (c.lhs, c.rhs)
else
Poly.denote_le ctx (c.lhs, c.rhs)
def PolyCnstr.norm (c : PolyCnstr) : PolyCnstr :=
let (lhs, rhs) := Poly.cancel c.lhs.sort.fuse c.rhs.sort.fuse
{ eq := c.eq, lhs, rhs }
def PolyCnstr.isUnsat (c : PolyCnstr) : Bool :=
bif c.eq then
(c.lhs.isZero && c.rhs.isNonZero) || (c.lhs.isNonZero && c.rhs.isZero)
else
c.lhs.isNonZero && c.rhs.isZero
def PolyCnstr.isValid (c : PolyCnstr) : Bool :=
bif c.eq then
c.lhs.isZero && c.rhs.isZero
else
c.lhs.isZero
def ExprCnstr.denote (ctx : Context) (c : ExprCnstr) : Prop :=
bif c.eq then
c.lhs.denote ctx = c.rhs.denote ctx
else
c.lhs.denote ctx ≤ c.rhs.denote ctx
def ExprCnstr.toPoly (c : ExprCnstr) : PolyCnstr :=
{ c with lhs := c.lhs.toPoly, rhs := c.rhs.toPoly }
def ExprCnstr.toNormPoly (c : ExprCnstr) : PolyCnstr :=
let (lhs, rhs) := Poly.cancel c.lhs.toNormPoly c.rhs.toNormPoly
{ c with lhs, rhs }
abbrev Certificate := List (Nat × ExprCnstr)
def Certificate.combineHyps (c : PolyCnstr) (hs : Certificate) : PolyCnstr :=
match hs with
| [] => c
| (k, c') :: hs => combineHyps (PolyCnstr.combine c (c'.toNormPoly.mul (Nat.add k 1))) hs
def Certificate.combine (hs : Certificate) : PolyCnstr :=
match hs with
| [] => { eq := true, lhs := [], rhs := [] }
| (k, c) :: hs => combineHyps (c.toNormPoly.mul (Nat.add k 1)) hs
def Certificate.denote (ctx : Context) (c : Certificate) : Prop :=
match c with
| [] => False
| (_, c)::hs => c.denote ctx → denote ctx hs
def monomialToExpr (k : Nat) (v : Var) : Expr :=
bif v == fixedVar then
Expr.num k
else bif k == 1 then
Expr.var v
else
Expr.mulL k (Expr.var v)
def Poly.toExpr (p : Poly) : Expr :=
match p with
| [] => Expr.num 0
| (k, v) :: p => go (monomialToExpr k v) p
where
go (e : Expr) (p : Poly) : Expr :=
match p with
| [] => e
| (k, v) :: p => go (Expr.add e (monomialToExpr k v)) p
def PolyCnstr.toExpr (c : PolyCnstr) : ExprCnstr :=
{ c with lhs := c.lhs.toExpr, rhs := c.rhs.toExpr }
attribute [local simp] Nat.add_comm Nat.add_assoc Nat.add_left_comm Nat.right_distrib Nat.left_distrib Nat.mul_assoc Nat.mul_comm
attribute [local simp] Poly.denote Expr.denote Poly.insertSorted Poly.sort Poly.sort.go Poly.fuse Poly.cancelAux
attribute [local simp] Poly.mul Poly.mul.go
theorem Poly.denote_insertSorted (ctx : Context) (k : Nat) (v : Var) (p : Poly) : (p.insertSorted k v).denote ctx = p.denote ctx + k * v.denote ctx := by
match p with
| [] => simp
| (k', v') :: p => by_cases h : Nat.blt v v' <;> simp [h, denote_insertSorted]
attribute [local simp] Poly.denote_insertSorted
theorem Poly.denote_sort_go (ctx : Context) (p : Poly) (r : Poly) : (sort.go p r).denote ctx = p.denote ctx + r.denote ctx := by
match p with
| [] => simp
| (k, v):: p => simp [denote_sort_go]
attribute [local simp] Poly.denote_sort_go
theorem Poly.denote_sort (ctx : Context) (m : Poly) : m.sort.denote ctx = m.denote ctx := by
simp
attribute [local simp] Poly.denote_sort
theorem Poly.denote_append (ctx : Context) (p q : Poly) : (p ++ q).denote ctx = p.denote ctx + q.denote ctx := by
match p with
| [] => simp
| (k, v) :: p => simp [denote_append]
attribute [local simp] Poly.denote_append
theorem Poly.denote_cons (ctx : Context) (k : Nat) (v : Var) (p : Poly) : denote ctx ((k, v) :: p) = k * v.denote ctx + p.denote ctx := by
match p with
| [] => simp
| _ :: m => simp [denote_cons]
attribute [local simp] Poly.denote_cons
theorem Poly.denote_reverseAux (ctx : Context) (p q : Poly) : denote ctx (List.reverseAux p q) = denote ctx (p ++ q) := by
match p with
| [] => simp [List.reverseAux]
| (k, v) :: p => simp [List.reverseAux, denote_reverseAux]
attribute [local simp] Poly.denote_reverseAux
theorem Poly.denote_reverse (ctx : Context) (p : Poly) : denote ctx (List.reverse p) = denote ctx p := by
simp [List.reverse]
attribute [local simp] Poly.denote_reverse
theorem Poly.denote_fuse (ctx : Context) (p : Poly) : p.fuse.denote ctx = p.denote ctx := by
match p with
| [] => rfl
| (k, v) :: p =>
have ih := denote_fuse ctx p
simp
split
case _ h => simp [← ih, h]
case _ k' v' p' h => by_cases he : v == v' <;> simp [he, ← ih, h]; rw [eq_of_beq he]
attribute [local simp] Poly.denote_fuse
theorem Poly.denote_mul (ctx : Context) (k : Nat) (p : Poly) : (p.mul k).denote ctx = k * p.denote ctx := by
simp
by_cases h : k == 0 <;> simp [h]; simp [eq_of_beq h]
by_cases h : k == 1 <;> simp [h]; simp [eq_of_beq h]
induction p with
| nil => simp
| cons kv m ih => cases kv with | _ k' v => simp [ih]
private theorem eq_of_not_blt_eq_true (h₁ : ¬ (Nat.blt x y = true)) (h₂ : ¬ (Nat.blt y x = true)) : x = y :=
have h₁ : ¬ x < y := fun h => h₁ (Nat.blt_eq.mpr h)
have h₂ : ¬ y < x := fun h => h₂ (Nat.blt_eq.mpr h)
Nat.le_antisymm (Nat.ge_of_not_lt h₂) (Nat.ge_of_not_lt h₁)
attribute [local simp] Poly.denote_mul
theorem Poly.denote_eq_cancelAux (ctx : Context) (fuel : Nat) (m₁ m₂ r₁ r₂ : Poly)
(h : denote_eq ctx (r₁.reverse ++ m₁, r₂.reverse ++ m₂)) : denote_eq ctx (cancelAux fuel m₁ m₂ r₁ r₂) := by
induction fuel generalizing m₁ m₂ r₁ r₂ with
| zero => assumption
| succ fuel ih =>
simp
split <;> try (simp at h; try assumption)
rename_i k₁ v₁ m₁ k₂ v₂ m₂
by_cases hltv : Nat.blt v₁ v₂ <;> simp [hltv]
· apply ih; simp [denote_eq] at h |-; assumption
· by_cases hgtv : Nat.blt v₂ v₁ <;> simp [hgtv]
· apply ih; simp [denote_eq] at h |-; assumption
· have heqv : v₁ = v₂ := eq_of_not_blt_eq_true hltv hgtv; subst heqv
by_cases hltk : Nat.blt k₁ k₂ <;> simp [hltk]
· apply ih
simp [denote_eq] at h |-
have haux : k₁ * Var.denote ctx v₁ ≤ k₂ * Var.denote ctx v₁ := Nat.mul_le_mul_right _ (Nat.le_of_lt (Nat.blt_eq.mp hltk))
rw [Nat.mul_sub_right_distrib, ← Nat.add_assoc, ← Nat.add_sub_assoc haux]
apply Eq.symm
apply Nat.sub_eq_of_eq_add
simp [h]
· by_cases hgtk : Nat.blt k₂ k₁ <;> simp [hgtk]
· apply ih
simp [denote_eq] at h |-
have haux : k₂ * Var.denote ctx v₁ ≤ k₁ * Var.denote ctx v₁ := Nat.mul_le_mul_right _ (Nat.le_of_lt (Nat.blt_eq.mp hgtk))
rw [Nat.mul_sub_right_distrib, ← Nat.add_assoc, ← Nat.add_sub_assoc haux]
apply Nat.sub_eq_of_eq_add
simp [h]
· have heqk : k₁ = k₂ := eq_of_not_blt_eq_true hltk hgtk; subst heqk
apply ih
simp [denote_eq] at h |-
rw [← Nat.add_assoc, ← Nat.add_assoc] at h
exact Nat.add_right_cancel h
theorem Poly.of_denote_eq_cancelAux (ctx : Context) (fuel : Nat) (m₁ m₂ r₁ r₂ : Poly)
(h : denote_eq ctx (cancelAux fuel m₁ m₂ r₁ r₂)) : denote_eq ctx (r₁.reverse ++ m₁, r₂.reverse ++ m₂) := by
induction fuel generalizing m₁ m₂ r₁ r₂ with
| zero => assumption
| succ fuel ih =>
simp at h
split at h <;> (try simp; assumption)
rename_i k₁ v₁ m₁ k₂ v₂ m₂
by_cases hltv : Nat.blt v₁ v₂ <;> simp [hltv] at h
· have ih := ih (h := h); simp [denote_eq] at ih ⊢; assumption
· by_cases hgtv : Nat.blt v₂ v₁ <;> simp [hgtv] at h
· have ih := ih (h := h); simp [denote_eq] at ih ⊢; assumption
· have heqv : v₁ = v₂ := eq_of_not_blt_eq_true hltv hgtv; subst heqv
by_cases hltk : Nat.blt k₁ k₂ <;> simp [hltk] at h
· have ih := ih (h := h); simp [denote_eq] at ih ⊢
have haux : k₁ * Var.denote ctx v₁ ≤ k₂ * Var.denote ctx v₁ := Nat.mul_le_mul_right _ (Nat.le_of_lt (Nat.blt_eq.mp hltk))
rw [Nat.mul_sub_right_distrib, ← Nat.add_assoc, ← Nat.add_sub_assoc haux] at ih
have ih := Nat.eq_add_of_sub_eq (Nat.le_trans haux (Nat.le_add_left ..)) ih.symm
simp at ih
rw [ih]
· by_cases hgtk : Nat.blt k₂ k₁ <;> simp [hgtk] at h
· have ih := ih (h := h); simp [denote_eq] at ih ⊢
have haux : k₂ * Var.denote ctx v₁ ≤ k₁ * Var.denote ctx v₁ := Nat.mul_le_mul_right _ (Nat.le_of_lt (Nat.blt_eq.mp hgtk))
rw [Nat.mul_sub_right_distrib, ← Nat.add_assoc, ← Nat.add_sub_assoc haux] at ih
have ih := Nat.eq_add_of_sub_eq (Nat.le_trans haux (Nat.le_add_left ..)) ih
simp at ih
rw [ih]
· have heqk : k₁ = k₂ := eq_of_not_blt_eq_true hltk hgtk; subst heqk
have ih := ih (h := h); simp [denote_eq] at ih ⊢
rw [← Nat.add_assoc, ih, Nat.add_assoc]
theorem Poly.denote_eq_cancel {ctx : Context} {m₁ m₂ : Poly} (h : denote_eq ctx (m₁, m₂)) : denote_eq ctx (cancel m₁ m₂) := by
apply denote_eq_cancelAux; simp [h]
theorem Poly.of_denote_eq_cancel {ctx : Context} {m₁ m₂ : Poly} (h : denote_eq ctx (cancel m₁ m₂)) : denote_eq ctx (m₁, m₂) := by
have := Poly.of_denote_eq_cancelAux (h := h)
simp at this
assumption
theorem Poly.denote_eq_cancel_eq (ctx : Context) (m₁ m₂ : Poly) : denote_eq ctx (cancel m₁ m₂) = denote_eq ctx (m₁, m₂) :=
propext <| Iff.intro (fun h => of_denote_eq_cancel h) (fun h => denote_eq_cancel h)
attribute [local simp] Poly.denote_eq_cancel_eq
theorem Poly.denote_le_cancelAux (ctx : Context) (fuel : Nat) (m₁ m₂ r₁ r₂ : Poly)
(h : denote_le ctx (r₁.reverse ++ m₁, r₂.reverse ++ m₂)) : denote_le ctx (cancelAux fuel m₁ m₂ r₁ r₂) := by
induction fuel generalizing m₁ m₂ r₁ r₂ with
| zero => assumption
| succ fuel ih =>
simp
split <;> try (simp at h; assumption)
rename_i k₁ v₁ m₁ k₂ v₂ m₂
by_cases hltv : Nat.blt v₁ v₂ <;> simp [hltv]
· apply ih; simp [denote_le] at h |-; assumption
· by_cases hgtv : Nat.blt v₂ v₁ <;> simp [hgtv]
· apply ih; simp [denote_le] at h |-; assumption
· have heqv : v₁ = v₂ := eq_of_not_blt_eq_true hltv hgtv; subst heqv
by_cases hltk : Nat.blt k₁ k₂ <;> simp [hltk]
· apply ih
simp [denote_le] at h |-
have haux : k₁ * Var.denote ctx v₁ ≤ k₂ * Var.denote ctx v₁ := Nat.mul_le_mul_right _ (Nat.le_of_lt (Nat.blt_eq.mp hltk))
rw [Nat.mul_sub_right_distrib, ← Nat.add_assoc, ← Nat.add_sub_assoc haux]
apply Nat.le_sub_of_add_le
simp [h]
· by_cases hgtk : Nat.blt k₂ k₁ <;> simp [hgtk]
· apply ih
simp [denote_le] at h |-
have haux : k₂ * Var.denote ctx v₁ ≤ k₁ * Var.denote ctx v₁ := Nat.mul_le_mul_right _ (Nat.le_of_lt (Nat.blt_eq.mp hgtk))
rw [Nat.mul_sub_right_distrib, ← Nat.add_assoc, ← Nat.add_sub_assoc haux]
apply Nat.sub_le_of_le_add
simp [h]
· have heqk : k₁ = k₂ := eq_of_not_blt_eq_true hltk hgtk; subst heqk
apply ih
simp [denote_le] at h |-
rw [← Nat.add_assoc, ← Nat.add_assoc] at h
apply Nat.le_of_add_le_add_right h
done
theorem Poly.of_denote_le_cancelAux (ctx : Context) (fuel : Nat) (m₁ m₂ r₁ r₂ : Poly)
(h : denote_le ctx (cancelAux fuel m₁ m₂ r₁ r₂)) : denote_le ctx (r₁.reverse ++ m₁, r₂.reverse ++ m₂) := by
induction fuel generalizing m₁ m₂ r₁ r₂ with
| zero => assumption
| succ fuel ih =>
simp at h
split at h <;> try (simp; assumption)
rename_i k₁ v₁ m₁ k₂ v₂ m₂
by_cases hltv : Nat.blt v₁ v₂ <;> simp [hltv] at h
· have ih := ih (h := h); simp [denote_le] at ih ⊢; assumption
· by_cases hgtv : Nat.blt v₂ v₁ <;> simp [hgtv] at h
· have ih := ih (h := h); simp [denote_le] at ih ⊢; assumption
· have heqv : v₁ = v₂ := eq_of_not_blt_eq_true hltv hgtv; subst heqv
by_cases hltk : Nat.blt k₁ k₂ <;> simp [hltk] at h
· have ih := ih (h := h); simp [denote_le] at ih ⊢
have haux : k₁ * Var.denote ctx v₁ ≤ k₂ * Var.denote ctx v₁ := Nat.mul_le_mul_right _ (Nat.le_of_lt (Nat.blt_eq.mp hltk))
rw [Nat.mul_sub_right_distrib, ← Nat.add_assoc, ← Nat.add_sub_assoc haux] at ih
have := Nat.add_le_of_le_sub (Nat.le_trans haux (Nat.le_add_left ..)) ih
simp at this
exact this
· by_cases hgtk : Nat.blt k₂ k₁ <;> simp [hgtk] at h
· have ih := ih (h := h); simp [denote_le] at ih ⊢
have haux : k₂ * Var.denote ctx v₁ ≤ k₁ * Var.denote ctx v₁ := Nat.mul_le_mul_right _ (Nat.le_of_lt (Nat.blt_eq.mp hgtk))
rw [Nat.mul_sub_right_distrib, ← Nat.add_assoc, ← Nat.add_sub_assoc haux] at ih
have := Nat.le_add_of_sub_le ih
simp at this
exact this
· have heqk : k₁ = k₂ := eq_of_not_blt_eq_true hltk hgtk; subst heqk
have ih := ih (h := h); simp [denote_le] at ih ⊢
have := Nat.add_le_add_right ih (k₁ * Var.denote ctx v₁)
simp at this
exact this
theorem Poly.denote_le_cancel {ctx : Context} {m₁ m₂ : Poly} (h : denote_le ctx (m₁, m₂)) : denote_le ctx (cancel m₁ m₂) := by
apply denote_le_cancelAux; simp [h]
theorem Poly.of_denote_le_cancel {ctx : Context} {m₁ m₂ : Poly} (h : denote_le ctx (cancel m₁ m₂)) : denote_le ctx (m₁, m₂) := by
have := Poly.of_denote_le_cancelAux (h := h)
simp at this
assumption
theorem Poly.denote_le_cancel_eq (ctx : Context) (m₁ m₂ : Poly) : denote_le ctx (cancel m₁ m₂) = denote_le ctx (m₁, m₂) :=
propext <| Iff.intro (fun h => of_denote_le_cancel h) (fun h => denote_le_cancel h)
attribute [local simp] Poly.denote_le_cancel_eq
theorem Poly.denote_combineAux (ctx : Context) (fuel : Nat) (p₁ p₂ : Poly) : (p₁.combineAux fuel p₂).denote ctx = p₁.denote ctx + p₂.denote ctx := by
induction fuel generalizing p₁ p₂ with simp [combineAux]
| succ fuel ih =>
split <;> simp
rename_i k₁ v₁ p₁ k₂ v₂ p₂
by_cases hltv : Nat.blt v₁ v₂ <;> simp [hltv, ih]
by_cases hgtv : Nat.blt v₂ v₁ <;> simp [hgtv, ih]
have heqv : v₁ = v₂ := eq_of_not_blt_eq_true hltv hgtv
simp [heqv]
theorem Poly.denote_combine (ctx : Context) (p₁ p₂ : Poly) : (p₁.combine p₂).denote ctx = p₁.denote ctx + p₂.denote ctx := by
simp [combine, denote_combineAux]
attribute [local simp] Poly.denote_combine
theorem Expr.denote_toPoly (ctx : Context) (e : Expr) : e.toPoly.denote ctx = e.denote ctx := by
induction e with
| num k => by_cases h : k == 0 <;> simp [toPoly, h, Var.denote]; simp [eq_of_beq h]
| var i => simp [toPoly]
| add a b iha ihb => simp [toPoly, iha, ihb]
| mulL k a ih => simp [toPoly, ih, -Poly.mul]
| mulR k a ih => simp [toPoly, ih, -Poly.mul]
attribute [local simp] Expr.denote_toPoly
theorem Expr.eq_of_toNormPoly (ctx : Context) (a b : Expr) (h : a.toNormPoly = b.toNormPoly) : a.denote ctx = b.denote ctx := by
simp [toNormPoly, Poly.norm] at h
have h := congrArg (Poly.denote ctx) h
simp at h
assumption
theorem Expr.of_cancel_eq (ctx : Context) (a b c d : Expr) (h : Poly.cancel a.toNormPoly b.toNormPoly = (c.toPoly, d.toPoly)) : (a.denote ctx = b.denote ctx) = (c.denote ctx = d.denote ctx) := by
have := Poly.denote_eq_cancel_eq ctx a.toNormPoly b.toNormPoly
rw [h] at this
simp [toNormPoly, Poly.norm, Poly.denote_eq] at this
exact this.symm
theorem Expr.of_cancel_le (ctx : Context) (a b c d : Expr) (h : Poly.cancel a.toNormPoly b.toNormPoly = (c.toPoly, d.toPoly)) : (a.denote ctx ≤ b.denote ctx) = (c.denote ctx ≤ d.denote ctx) := by
have := Poly.denote_le_cancel_eq ctx a.toNormPoly b.toNormPoly
rw [h] at this
simp [toNormPoly, Poly.norm,Poly.denote_le] at this
exact this.symm
theorem Expr.of_cancel_lt (ctx : Context) (a b c d : Expr) (h : Poly.cancel a.inc.toNormPoly b.toNormPoly = (c.inc.toPoly, d.toPoly)) : (a.denote ctx < b.denote ctx) = (c.denote ctx < d.denote ctx) :=
of_cancel_le ctx a.inc b c.inc d h
theorem ExprCnstr.toPoly_norm_eq (c : ExprCnstr) : c.toPoly.norm = c.toNormPoly :=
rfl
theorem ExprCnstr.denote_toPoly (ctx : Context) (c : ExprCnstr) : c.toPoly.denote ctx = c.denote ctx := by
cases c; rename_i eq lhs rhs
simp [ExprCnstr.denote, PolyCnstr.denote, ExprCnstr.toPoly];
by_cases h : eq = true <;> simp [h]
· simp [Poly.denote_eq, Expr.toPoly]
· simp [Poly.denote_le, Expr.toPoly]
attribute [local simp] ExprCnstr.denote_toPoly
theorem ExprCnstr.denote_toNormPoly (ctx : Context) (c : ExprCnstr) : c.toNormPoly.denote ctx = c.denote ctx := by
cases c; rename_i eq lhs rhs
simp [ExprCnstr.denote, PolyCnstr.denote, ExprCnstr.toNormPoly]
by_cases h : eq = true <;> simp [h]
· rw [Poly.denote_eq_cancel_eq]; simp [Poly.denote_eq, Expr.toNormPoly, Poly.norm]
· rw [Poly.denote_le_cancel_eq]; simp [Poly.denote_le, Expr.toNormPoly, Poly.norm]
attribute [local simp] ExprCnstr.denote_toNormPoly
theorem Poly.mul.go_denote (ctx : Context) (k : Nat) (p : Poly) : (Poly.mul.go k p).denote ctx = k * p.denote ctx := by
match p with
| [] => rfl
| (k', v) :: p => simp [Poly.mul.go, go_denote]
attribute [local simp] Poly.mul.go_denote
section
attribute [-simp] Nat.right_distrib Nat.left_distrib
theorem PolyCnstr.denote_mul (ctx : Context) (k : Nat) (c : PolyCnstr) : (c.mul (k+1)).denote ctx = c.denote ctx := by
cases c; rename_i eq lhs rhs
have : k ≠ 0 → k + 1 ≠ 1 := by intro h; match k with | 0 => contradiction | k+1 => simp; apply Nat.succ_ne_zero
have : ¬ (k == 0) → (k + 1 == 1) = false := fun h => beq_false_of_ne (this (ne_of_beq_false (Bool.of_not_eq_true h)))
have : ¬ ((k + 1 == 0) = true) := fun h => absurd (eq_of_beq h) (Nat.succ_ne_zero k)
have : (1 == (0 : Nat)) = false := rfl
have : (1 == (1 : Nat)) = true := rfl
by_cases he : eq = true <;> simp [he, PolyCnstr.mul, PolyCnstr.denote, Poly.denote_le, Poly.denote_eq]
<;> by_cases hk : k == 0 <;> (try simp [eq_of_beq hk]) <;> simp [*] <;> apply propext <;> apply Iff.intro <;> intro h
· exact Nat.eq_of_mul_eq_mul_left (Nat.zero_lt_succ _) h
· rw [h]
· exact Nat.le_of_mul_le_mul_left h (Nat.zero_lt_succ _)
· apply Nat.mul_le_mul_left _ h
end
attribute [local simp] PolyCnstr.denote_mul
theorem PolyCnstr.denote_combine {ctx : Context} {c₁ c₂ : PolyCnstr} (h₁ : c₁.denote ctx) (h₂ : c₂.denote ctx) : (c₁.combine c₂).denote ctx := by
cases c₁; cases c₂; rename_i eq₁ lhs₁ rhs₁ eq₂ lhs₂ rhs₂
simp [denote] at h₁ h₂
simp [PolyCnstr.combine, denote]
by_cases he₁ : eq₁ = true <;> by_cases he₂ : eq₂ = true <;> simp [he₁, he₂] at h₁ h₂ |-
· rw [Poly.denote_eq_cancel_eq]; simp [Poly.denote_eq] at h₁ h₂ |-; simp [h₁, h₂]
· rw [Poly.denote_le_cancel_eq]; simp [Poly.denote_eq, Poly.denote_le] at h₁ h₂ |-; rw [h₁]; apply Nat.add_le_add_left h₂
· rw [Poly.denote_le_cancel_eq]; simp [Poly.denote_eq, Poly.denote_le] at h₁ h₂ |-; rw [h₂]; apply Nat.add_le_add_right h₁
· rw [Poly.denote_le_cancel_eq]; simp [Poly.denote_eq, Poly.denote_le] at h₁ h₂ |-; apply Nat.add_le_add h₁ h₂
attribute [local simp] PolyCnstr.denote_combine
theorem Poly.isNum?_eq_some (ctx : Context) {p : Poly} {k : Nat} : p.isNum? = some k → p.denote ctx = k := by
simp [isNum?]
split
next => intro h; injection h
next k v => by_cases h : v == fixedVar <;> simp [h]; intros; simp [Var.denote, eq_of_beq h]; assumption
next => intros; contradiction
theorem Poly.of_isZero (ctx : Context) {p : Poly} (h : isZero p = true) : p.denote ctx = 0 := by
simp [isZero] at h
split at h
· simp
· contradiction
theorem Poly.of_isNonZero (ctx : Context) {p : Poly} (h : isNonZero p = true) : p.denote ctx > 0 := by
match p with
| [] => contradiction
| (k, v) :: p =>
by_cases he : v == fixedVar <;> simp [he, isNonZero] at h ⊢
· simp [eq_of_beq he, Var.denote]; apply Nat.lt_of_succ_le; exact Nat.le_trans h (Nat.le_add_right ..)
· have ih := of_isNonZero ctx h
exact Nat.le_trans ih (Nat.le_add_right ..)
theorem PolyCnstr.eq_false_of_isUnsat (ctx : Context) {c : PolyCnstr} : c.isUnsat → c.denote ctx = False := by
cases c; rename_i eq lhs rhs
simp [isUnsat]
by_cases he : eq = true <;> simp [he, denote, Poly.denote_eq, Poly.denote_le]
· intro
| Or.inl ⟨h₁, h₂⟩ => simp [Poly.of_isZero, h₁]; have := Nat.not_eq_zero_of_lt (Poly.of_isNonZero ctx h₂); simp [this.symm]
| Or.inr ⟨h₁, h₂⟩ => simp [Poly.of_isZero, h₂]; have := Nat.not_eq_zero_of_lt (Poly.of_isNonZero ctx h₁); simp [this]
· intro ⟨h₁, h₂⟩
simp [Poly.of_isZero, h₂]
have := Nat.not_eq_zero_of_lt (Poly.of_isNonZero ctx h₁)
simp [this]
done
theorem PolyCnstr.eq_true_of_isValid (ctx : Context) {c : PolyCnstr} : c.isValid → c.denote ctx = True := by
cases c; rename_i eq lhs rhs
simp [isValid]
by_cases he : eq = true <;> simp [he, denote, Poly.denote_eq, Poly.denote_le]
· intro ⟨h₁, h₂⟩
simp [Poly.of_isZero, h₁, h₂]
· intro h
simp [Poly.of_isZero, h]
theorem ExprCnstr.eq_false_of_isUnsat (ctx : Context) (c : ExprCnstr) (h : c.toNormPoly.isUnsat) : c.denote ctx = False := by
have := PolyCnstr.eq_false_of_isUnsat ctx h
simp at this
assumption
theorem ExprCnstr.eq_true_of_isValid (ctx : Context) (c : ExprCnstr) (h : c.toNormPoly.isValid) : c.denote ctx = True := by
have := PolyCnstr.eq_true_of_isValid ctx h
simp at this
assumption
theorem Certificate.of_combineHyps (ctx : Context) (c : PolyCnstr) (cs : Certificate) (h : (combineHyps c cs).denote ctx → False) : c.denote ctx → cs.denote ctx := by
match cs with
| [] => simp [combineHyps, denote] at *; exact h
| (k, c')::cs =>
intro h₁ h₂
have := PolyCnstr.denote_combine (ctx := ctx) (c₂ := PolyCnstr.mul (k + 1) (ExprCnstr.toNormPoly c')) h₁
simp at this
have := this h₂
have ih := of_combineHyps ctx (PolyCnstr.combine c (PolyCnstr.mul (k + 1) (ExprCnstr.toNormPoly c'))) cs
exact ih h this
theorem Certificate.of_combine (ctx : Context) (cs : Certificate) (h : cs.combine.denote ctx → False) : cs.denote ctx := by
match cs with
| [] => simp [combine, PolyCnstr.denote, Poly.denote_eq] at h
| (k, c)::cs =>
simp [denote, combine] at *
intro h'
apply of_combineHyps (h := h)
simp [h']
theorem Certificate.of_combine_isUnsat (ctx : Context) (cs : Certificate) (h : cs.combine.isUnsat) : cs.denote ctx :=
have h := PolyCnstr.eq_false_of_isUnsat ctx h
of_combine ctx cs (fun h' => Eq.mp h h')
theorem denote_monomialToExpr (ctx : Context) (k : Nat) (v : Var) : (monomialToExpr k v).denote ctx = k * v.denote ctx := by
simp [monomialToExpr]
by_cases h : v == fixedVar <;> simp [h, Expr.denote]
· simp [eq_of_beq h, Var.denote]
· by_cases h : k == 1 <;> simp [h, Expr.denote]; simp [eq_of_beq h]
attribute [local simp] denote_monomialToExpr
theorem Poly.denote_toExpr_go (ctx : Context) (e : Expr) (p : Poly) : (toExpr.go e p).denote ctx = e.denote ctx + p.denote ctx := by
induction p generalizing e with
| nil => simp [toExpr.go, Poly.denote]
| cons kv p ih => cases kv; simp [toExpr.go, ih, Expr.denote, Poly.denote]
attribute [local simp] Poly.denote_toExpr_go
theorem Poly.denote_toExpr (ctx : Context) (p : Poly) : p.toExpr.denote ctx = p.denote ctx := by
match p with
| [] => simp [toExpr, Expr.denote, Poly.denote]
| (k, v) :: p => simp [toExpr, Expr.denote, Poly.denote]
theorem ExprCnstr.eq_of_toNormPoly_eq (ctx : Context) (c d : ExprCnstr) (h : c.toNormPoly == d.toPoly) : c.denote ctx = d.denote ctx := by
have h := congrArg (PolyCnstr.denote ctx) (eq_of_beq h)
simp at h
assumption
theorem Expr.eq_of_toNormPoly_eq (ctx : Context) (e e' : Expr) (h : e.toNormPoly == e'.toPoly) : e.denote ctx = e'.denote ctx := by
have h := congrArg (Poly.denote ctx) (eq_of_beq h)
simp [Expr.toNormPoly, Poly.norm] at h
assumption
end Nat.Linear
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.