source
stringlengths
17
118
lean4
stringlengths
0
335k
.lake/packages/mathlib/MathlibTest/CategoryTheory/Monoidal/Basic.lean
import Mathlib.Tactic.CategoryTheory.Monoidal.Basic open CategoryTheory Mathlib.Tactic BicategoryLike open MonoidalCategory universe v u variable {C : Type u} [Category.{v} C] [MonoidalCategory C] variable {X Y Z W : C} (f : X ⟶ Y) (g : Y ⟶ Z) example (f : U ⟶ V ⊗ (W ⊗ X)) (g : (V ⊗ W) ⊗ X ⟶ Y) : f ⊗≫ g = f ≫ (α_ _ _ _).inv ≫ g := by monoidal example (f : Z ⟶ W) : (X ⊗ Y) ◁ f = (α_ _ _ _).hom ≫ X ◁ Y ◁ f ≫ (α_ _ _ _).inv := by monoidal example : f ≫ g = f ≫ g := by monoidal example : (f ⊗ₘ g) ▷ X = (α_ _ _ _).hom ≫ (f ⊗ₘ g ▷ X) ≫ (α_ _ _ _).inv := by monoidal example {V₁ V₂ V₃ : C} (R : ∀ V₁ V₂ : C, V₁ ⊗ V₂ ⟶ V₂ ⊗ V₁) : R V₁ V₂ ▷ V₃ ⊗≫ V₂ ◁ R V₁ V₃ = R V₁ V₂ ▷ V₃ ≫ (α_ _ _ _).hom ⊗≫ 𝟙 _ ≫ V₂ ◁ R V₁ V₃ := by monoidal
.lake/packages/mathlib/MathlibTest/CategoryTheory/Monoidal/Normalize.lean
import Mathlib.Tactic.CategoryTheory.Monoidal.Normalize open CategoryTheory Mathlib.Tactic BicategoryLike open MonoidalCategory namespace CategoryTheory.MonoidalCategory /-- `normalize% η` is the normalization of the 2-morphism `η`. 1. The normalized 2-morphism is of the form `α₀ ≫ η₀ ≫ α₁ ≫ η₁ ≫ ... αₘ ≫ ηₘ ≫ αₘ₊₁` where each `αᵢ` is a structural 2-morphism (consisting of associators and unitors), 2. each `ηᵢ` is a non-structural 2-morphism of the form `f₁ ◁ ... ◁ fₘ ◁ θ`, and 3. `θ` is of the form `ι ▷ g₁ ▷ ... ▷ gₗ` -/ elab "normalize% " t:term:51 : term => do let e ← Lean.Elab.Term.elabTerm t none let ctx : Monoidal.Context ← BicategoryLike.mkContext e CoherenceM.run (ctx := ctx) do return (← BicategoryLike.eval `monoidal (← MkMor₂.ofExpr e)).expr.e.e universe v u variable {C : Type u} [Category.{v} C] [MonoidalCategory C] variable {X Y Z W : C} (f : X ⟶ Y) (g : Y ⟶ Z) #guard_expr normalize% X ◁ 𝟙 Y = (whiskerLeftIso X (Iso.refl Y)).hom #guard_expr normalize% 𝟙 X ▷ Y = (whiskerRightIso (Iso.refl X) Y).hom #guard_expr normalize% X ◁ (f ≫ g) = _ ≫ X ◁ f ≫ _ ≫ X ◁ g ≫ _ #guard_expr normalize% (f ≫ g) ▷ Y = _ ≫ f ▷ Y ≫ _ ≫ g ▷ Y ≫ _ #guard_expr normalize% 𝟙_ C ◁ f = _ ≫ f ≫ _ #guard_expr normalize% (X ⊗ Y) ◁ f = _ ≫ X ◁ Y ◁ f ≫ _ #guard_expr normalize% f ▷ 𝟙_ C = _ ≫ f ≫ _ #guard_expr normalize% f ▷ (X ⊗ Y) = _ ≫ f ▷ X ▷ Y ≫ _ #guard_expr normalize% (X ◁ f) ▷ Y = _ ≫ X ◁ f ▷ Y ≫ _ #guard_expr normalize% (λ_ X).hom = (λ_ X).hom #guard_expr normalize% (λ_ X).inv = ((λ_ X).symm).hom #guard_expr normalize% (ρ_ X).hom = (ρ_ X).hom #guard_expr normalize% (ρ_ X).inv = ((ρ_ X).symm).hom #guard_expr normalize% (α_ X Y Z).hom = (α_ _ _ _).hom #guard_expr normalize% (α_ X Y Z).inv = ((α_ X Y Z).symm).hom #guard_expr normalize% 𝟙 (X ⊗ Y) = (Iso.refl (X ⊗ Y)).hom #guard_expr normalize% f ⊗ₘ g = _ ≫ (f ⊗ₘ g) ≫ _ variable {V₁ V₂ V₃ : C} (R : ∀ V₁ V₂ : C, V₁ ⊗ V₂ ⟶ V₂ ⊗ V₁) in #guard_expr normalize% R V₁ V₂ ▷ V₃ ⊗≫ V₂ ◁ R V₁ V₃ = _ ≫ R V₁ V₂ ▷ V₃ ≫ _ ≫ V₂ ◁ R V₁ V₃ ≫ _ end CategoryTheory.MonoidalCategory
.lake/packages/mathlib/MathlibTest/CategoryTheory/Bicategory/Basic.lean
import Mathlib.Tactic.CategoryTheory.Bicategory.Basic open CategoryTheory Mathlib.Tactic BicategoryLike open Bicategory universe w v u variable {B : Type u} [Bicategory.{w, v} B] variable {a b c d : B} example {f j : a ⟶ d} {g : a ⟶ b} {h : b ⟶ c} {i : c ⟶ d} (η : f ⟶ g ≫ (h ≫ i)) (θ : (g ≫ h) ≫ i ⟶ j) : η ⊗≫ θ = η ≫ (α_ _ _ _).inv ≫ θ := by bicategory example {f : a ⟶ b} {g : b ⟶ c} {h i : c ⟶ d} (η : h ⟶ i) : (f ≫ g) ◁ η = (α_ _ _ _).hom ≫ f ◁ g ◁ η ≫ (α_ _ _ _).inv := by bicategory example {f g h : a ⟶ b} {η : f ⟶ g} {θ : g ⟶ h} : η ≫ θ = η ≫ θ := by bicategory
.lake/packages/mathlib/MathlibTest/CategoryTheory/Bicategory/Normalize.lean
import Mathlib.Tactic.CategoryTheory.Bicategory.Normalize open CategoryTheory Mathlib.Tactic BicategoryLike open Bicategory namespace CategoryTheory.Bicategory /-- `normalize% η` is the normalization of the 2-morphism `η`. 1. The normalized 2-morphism is of the form `α₀ ≫ η₀ ≫ α₁ ≫ η₁ ≫ ... αₘ ≫ ηₘ ≫ αₘ₊₁` where each `αᵢ` is a structural 2-morphism (consisting of associators and unitors), 2. each `ηᵢ` is a non-structural 2-morphism of the form `f₁ ◁ ... ◁ fₘ ◁ θ`, and 3. `θ` is of the form `ι ▷ g₁ ▷ ... ▷ gₗ` -/ elab "normalize% " t:term:51 : term => do let e ← Lean.Elab.Term.elabTerm t none let ctx : Bicategory.Context ← BicategoryLike.mkContext e CoherenceM.run (ctx := ctx) do return (← BicategoryLike.eval `bicategory (← MkMor₂.ofExpr e)).expr.e.e universe w v u variable {B : Type u} [Bicategory.{w, v} B] variable {a b c d e : B} variable {f : a ⟶ b} {g : b ⟶ c} in #guard_expr normalize% f ◁ 𝟙 g = (whiskerLeftIso f (Iso.refl g)).hom variable {f : a ⟶ b} {g : b ⟶ c} in #guard_expr normalize% 𝟙 f ▷ g = (whiskerRightIso (Iso.refl f) g).hom variable {f : a ⟶ b} {g h i : b ⟶ c} {η : g ⟶ h} {θ : h ⟶ i} in #guard_expr normalize% f ◁ (η ≫ θ) = _ ≫ f ◁ η ≫ _ ≫ f ◁ θ ≫ _ variable {f g h : a ⟶ b} {i : b ⟶ c} {η : f ⟶ g} {θ : g ⟶ h} in #guard_expr normalize% (η ≫ θ) ▷ i = _ ≫ η ▷ i ≫ _ ≫ θ ▷ i ≫ _ variable {η : 𝟙 a ⟶ 𝟙 a} in #guard_expr normalize% 𝟙 a ◁ η = _ ≫ η ≫ _ variable {f : a ⟶ b} {g : b ⟶ c} {h i : c ⟶ d} {η : h ⟶ i} in #guard_expr normalize% (f ≫ g) ◁ η = _ ≫ f ◁ g ◁ η ≫ _ variable {η : 𝟙 a ⟶ 𝟙 a} in #guard_expr normalize% η ▷ 𝟙 a = _ ≫ η ≫ _ variable {f g : a ⟶ b} {h : b ⟶ c} {i : c ⟶ d} {η : f ⟶ g} in #guard_expr normalize% η ▷ (h ≫ i) = _ ≫ η ▷ h ▷ i ≫ _ variable {f : a ⟶ b} {g h : b ⟶ c} {i : c ⟶ d} {η : g ⟶ h} in #guard_expr normalize% (f ◁ η) ▷ i = _ ≫ f ◁ η ▷ i ≫ _ variable {f : a ⟶ b} in #guard_expr normalize% (λ_ f).hom = (λ_ f).hom variable {f : a ⟶ b} in #guard_expr normalize% (λ_ f).inv = ((λ_ f).symm).hom variable {f : a ⟶ b} in #guard_expr normalize% (ρ_ f).hom = (ρ_ f).hom variable {f : a ⟶ b} in #guard_expr normalize% (ρ_ f).inv = ((ρ_ f).symm).hom variable {f : a ⟶ b} {g : b ⟶ c} {h : c ⟶ d} in #guard_expr normalize% (α_ f g h).hom = (α_ _ _ _).hom variable {f : a ⟶ b} {g : b ⟶ c} {h : c ⟶ d} in #guard_expr normalize% (α_ f g h).inv = ((α_ f g h).symm).hom variable {f : a ⟶ b} {g : b ⟶ c} in #guard_expr normalize% 𝟙 (f ≫ g) = (Iso.refl (f ≫ g)).hom end CategoryTheory.Bicategory
.lake/packages/mathlib/MathlibTest/search/BestFirst.lean
import Mathlib.Deprecated.MLList.BestFirst import Mathlib.Data.Nat.Basic /-! # Testing best first search and beam search. We check that `bestFirstSearch` can find its way around a wall. -/ set_option linter.deprecated false open Lean MLList Function def Point := Int × Int deriving Repr def wall : Point → Bool := fun ⟨x, y⟩ => x ≤ 3 || y ≤ 3 || x ≥ 20 || y ≥ 20 || (x ≥ 6 && y ≥ 6) def nbhd : Point → MLList MetaM Point := fun ⟨x, y⟩ => MLList.ofList ([(x+1,y), (x-1,y), (x,y+1), (x,y-1)].filter wall) def emb : Point → Nat ×ₗ (Int ×ₗ Int) | (x, y) => (x.natAbs^2 + y.natAbs^2, x, y) lemma emb_injective : Injective emb := fun ⟨x, y⟩ ⟨w, z⟩ h => by injection h instance : LinearOrder Point := LinearOrder.lift' _ emb_injective run_cmd Elab.Command.liftTermElabM do let r := (← bestFirstSearch nbhd (10, 10) (maxQueued := some 4) |>.takeUpToFirst (· = (0,0)) |>.force) if r ≠ [(10, 10), (11, 10), (9, 10), (8, 10), (7, 10), (6, 10), (6, 11), (6, 9), (7, 9), (6, 8), (7, 8), (6, 7), (7, 7), (6, 6), (7, 6), (8, 6), (8, 7), (9, 6), (9, 7), (8, 8), (10, 6), (9, 8), (8, 9), (10, 7), (11, 6), (9, 9), (11, 7), (10, 8), (12, 6), (10, 9), (11, 8), (13, 6), (12, 7), (11, 9), (12, 8), (13, 7), (12, 9), (13, 8), (14, 7), (13, 9), (12, 10), (14, 8), (13, 10), (12, 11), (15, 7), (14, 6), (15, 6), (15, 8), (14, 9), (16, 6), (15, 9), (14, 10), (16, 8), (17, 6), (16, 7), (15, 10), (14, 11), (17, 7), (15, 11), (13, 11), (13, 12), (14, 12), (12, 12), (11, 12), (10, 12), (9, 12), (8, 12), (7, 12), (6, 12), (6, 13), (7, 13), (7, 11), (8, 11), (9, 11), (10, 11), (6, 14), (11, 11), (7, 14), (6, 15), (8, 14), (7, 15), (9, 14), (8, 15), (8, 13), (9, 13), (10, 13), (6, 16), (11, 13), (10, 14), (12, 13), (11, 14), (7, 16), (6, 17), (10, 15), (8, 16), (7, 17), (9, 16), (8, 17), (6, 18), (10, 16), (9, 17), (9, 15), (8, 18), (11, 16), (10, 17), (12, 16), (11, 17), (11, 15), (12, 15), (13, 15), (12, 14), (13, 14), (14, 14), (13, 13), (14, 13), (15, 13), (16, 13), (15, 14), (15, 12), (16, 12), (17, 12), (16, 11), (17, 11), (16, 10), (17, 10), (16, 9), (17, 9), (18, 9), (17, 8), (18, 8), (19, 8), (18, 7), (19, 7), (18, 6), (19, 6), (20, 6), (21, 6), (20, 7), (20, 5), (21, 5), (20, 4), (21, 4), (20, 3), (21, 3), (19, 3), (18, 3), (17, 3), (16, 3), (15, 3), (14, 3), (13, 3), (12, 3), (11, 3), (10, 3), (9, 3), (8, 3), (7, 3), (6, 3), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3), (-1, 3), (0, 4), (0, 2), (1, 2), (-1, 2), (0, 1), (1, 1), (-1, 1), (0, 0)] then throwError "Test failed!"
.lake/packages/mathlib/MathlibTest/GCongr/inequalities.lean
import Mathlib.Algebra.Order.BigOperators.Ring.Finset import Mathlib.Algebra.Order.Field.Basic import Mathlib.Data.Finset.Lattice.Fold import Mathlib.Tactic.Linarith import Mathlib.Tactic.GCongr import Mathlib.Tactic.SuccessIfFailWithMsg import Mathlib.Tactic.NormNum.OfScientific namespace GCongrTests private axiom test_sorry : ∀ {α}, α /-! # Inequality tests for the `gcongr` tactic -/ open Nat Finset -- We deliberately mock `ℝ` here so that we don't have to import the dependencies axiom Real : Type notation "ℝ" => Real @[instance] axiom Real.field : Field ℝ @[instance] axiom Real.linearOrder : LinearOrder ℝ @[instance] axiom Real.isStrictOrderedRing : IsStrictOrderedRing ℝ /-! ## Examples as a finishing tactic -/ example {x : ℤ} (hx : x ≥ 12) : x * x ^ 2 ≥ 12 * x ^ 2 := by gcongr example {x y : ℤ} (hx : x ≥ 12) : y + x * x ≥ y + 12 * x := by gcongr example {x y : ℤ} (hx : x ≥ 12) : y + x * x ≥ y + 12 * x := by rel [hx] example {x : ℤ} (hx : x > 12) : x * x ^ 2 > 12 * x ^ 2 := by gcongr example {x y : ℤ} (hx : x > 12) : y + x * x > y + 12 * x := by gcongr -- not solved by `nlinarith` because of the cube example {n m : ℤ} (hn : n ≥ 10) : n * n ^ 3 - m ≥ 10 * n ^ 3 - m := by gcongr example {k m n : ℤ} (hn : n ≥ 10) : m + 7 * n * n ^ 2 - k ≥ m + 7 * 10 * n ^ 2 - k := by gcongr example {k m n : ℤ} (hn : n ≥ 10) : m + 7 * n * n ^ 2 - k ≥ m + 7 * 10 * n ^ 2 - k := by rel [hn] example {x : ℤ} (hx : x ≥ 12) : x ≥ 12 := by gcongr example {x y : ℤ} (hx : x ≥ 12) : y + 8 * x ≥ y + 8 * 12 := by gcongr example {a b x c d : ℝ} (h1 : a ≤ b) (h2 : c ≤ d) : x ^ 2 * a + c ≤ x ^ 2 * b + d := by rel [h1, h2] -- not solved by `nlinarith` because of the cube and the absolute value example {a b c x y : ℤ} (hb : b ≥ 4) (hxy : x ≤ y) : c + (3 * |a| ^ 3 * b + b * 7 + 14) * x ≤ c + (3 * |a| ^ 3 * b + b * 7 + 14) * y := by gcongr example {x y : ℤ} (hy : 3 ≤ y) (hx : x ≥ 9) : y + 2 * x ≥ 3 + 2 * 9 := by gcongr example {b : ℤ} (h2 : b ≥ 3) : 2 * b + 5 ≥ 2 * 3 + 5 := by gcongr example {x : ℝ} (h1 : x ≤ 3) : 4 * x - 3 ≤ 4 * 3 - 3 := by gcongr example {x : ℝ} (h : x < 1) : 3 * x ≤ 3 * 1 := by gcongr example {x : ℝ} (h1 : x < 3) : 4 * x - 3 < 4 * 3 - 3 := by gcongr example {x : ℝ} (h : x < 1) : 3 * x < 3 * 1 := by gcongr example {x y : ℝ} (h1 : 1 ≤ y) (h2 : x < 2) : x * y ≤ 2 * y := by gcongr -- for this test to pass, need the check to ensure that leading function application is -- syntactically (not just definitionally) the same on both sides. example {a b c : ℚ} (h2 : 2 ≤ a + b) : 2 + c ≤ (a + b) + c := by gcongr -- for this test to pass, need to ensure it's treated as a division, not a multiplication example {a b : ℚ} (h1 : 3 ≤ a) (h2 : 4 ≤ b) : (3 + 4) / 2 ≤ (a + b) / 2 := by gcongr -- for this test to pass, need to ensure it's treated as a division, not a multiplication example {a : ℚ} (h1 : 3 ≤ a) : 3 / 2 ≤ a / 2 := by gcongr example {a : ℝ} (h1 : 3 ≤ a) : 3 / 2 ≤ a / 2 := by gcongr example {x y : ℝ} (h : 3 ≤ x) (h' : 1 ≤ y) : (3 + 1) / 2 ≤ (x + y) / 2 := by gcongr example {x : ℝ} (h : x ≤ 3) : 0.1 * x ≤ 0.1 * 3 := by gcongr example {x : ℝ} (h : x ≤ 3) : x / 10 ≤ 3 / 10 := by gcongr example {x : ℝ} (h : x ≤ 3) : 1 / 10 * x ≤ 1 / 10 * 3 := by gcongr example (a b c d : ℕ) (h1 : a ≤ b) (h2 : c ≤ d) : a * c ≤ b * d := by gcongr -- this tests that the tactic prioritizes applying hypotheses (such as, here, `0 ≤ a ^ 6`) over the -- greedy application of nonnegativity lemmas example {a b : ℚ} (h : 0 ≤ a ^ 6) : 0 + b ≤ a ^ 6 + b := by gcongr example {a b : ℚ} (h₁ : a ≤ b) (c : ℝ) : (a + c : ℝ) ≤ b + c := by gcongr example {a b : ℚ} (h₁ : a < b) (c : ℝ) : (a + c : ℝ) < b + c := by gcongr -- another priority test example {k m n : ℤ} (H : m ^ 2 ≤ n ^ 2) : k + m ^ 2 ≤ k + n ^ 2 := by gcongr -- test of behaviour when no lemmas are applicable example (n k : ℕ) (H : n % k + 1 ≤ k % n + 1) : n % k ≤ k % n := by success_if_fail_with_msg "gcongr did not make progress" (gcongr) linarith set_option linter.unusedVariables false in example {x : ℤ} (hx : x ≥ 12) (h : Even x) : Even x := by success_if_fail_with_msg "rel failed, goal not a relation" (rel [hx]) exact h example {a b x c d : ℝ} (h1 : a ≤ b) (h2 : c ≤ d) (h3 : 1 ≤ x + 1) : x * a + c ≤ x * b + d := by success_if_fail_with_msg "rel failed, cannot prove goal by 'substituting' the listed relationships. \ The steps which could not be automatically justified were:\n0 ≤ x\nc ≤ d" (rel [h1]) have : 0 ≤ x := by linarith rel [h1, h2] -- test for a missing `withContext` example {x y : ℚ} {n : ℕ} (hx : 0 ≤ x) (hn : 0 < n) : y ≤ x := by have h : x < y := test_sorry have _this : x ^ n < y ^ n := by rel [h] -- before bugfix: complained "unknown identifier 'h'" exact test_sorry /-! ## Non-finishing examples -/ example {a b x c d : ℝ} (h1 : a + 1 ≤ b + 1) (h2 : c + 2 ≤ d + 2) : x ^ 2 * a + c ≤ x ^ 2 * b + d := by gcongr <;> linarith example {a b c d x : ℝ} (h : a + c + 1 ≤ b + d + 1) : x ^ 2 * (a + c) + 5 ≤ x ^ 2 * (b + d) + 5 := by gcongr x ^ 2 * ?_ + 5 linarith example {x y z : ℝ} (h : 2 ≤ z) : z * |x + y| ≤ z * (|x| + |y|) := by gcongr; apply abs_add_le example (A B C : ℝ) : |A + B| + C ≤ |A| + |B| + C := by gcongr; apply abs_add_le example (A B C : ℝ) : |A + B| + C ≤ |A| + |B| + C := by gcongr ?_ + _; apply abs_add_le example (A B C : ℝ) : |A + B| + C ≤ |A| + |B| + C := by gcongr ?_ + (C : ℝ); apply abs_add_le example {n i : ℕ} (hi : i ∈ range n) : 2 ^ i ≤ 2 ^ n := by gcongr · norm_num · apply le_of_lt simpa using hi example {n' : ℕ} (hn' : 6 ≤ n') : 2 ^ ((n' + 1) * (n' + 1)) ≤ 2 ^ (n' * n' + 4 * n') := by gcongr · norm_num · linarith example {F : ℕ → ℕ} (le_sum : ∀ {N : ℕ}, 6 ≤ N → 15 ≤ F N) {n' : ℕ} (hn' : 6 ≤ n') : let A := F n'; A ! * (15 + 1) ^ n' ≤ A ! * (A + 1) ^ n' := by intro A gcongr exact le_sum hn' example {a : ℤ} {n : ℕ} (ha : ∀ i < n, 2 ^ i ≤ a) : ∏ i ∈ range n, (a - 2 ^ i) ≤ ∏ _i ∈ range n, a := by gcongr with i · intro i hi simp only [mem_range] at hi linarith [ha i hi] · have : 0 ≤ 2 ^ i := by positivity linarith -- this tests that the match goes only as deep as is indicated by the template example {a b c d e : ℝ} (_h1 : 0 ≤ b) (_h2 : 0 ≤ c) (hac : a * b + 1 ≤ c * d + 1) (_hbd : b ≤ d) : a * b + e ≤ c * d + e := by gcongr ?_ + _ guard_target =ₛ a * b ≤ c * d linarith -- test big operators example (f g : ℕ → ℕ) (s : Finset ℕ) (h : ∀ i ∈ s, f i ≤ g i) : ∑ i ∈ s, (3 + f i ^ 2) ≤ ∑ i ∈ s, (3 + g i ^ 2) := by gcongr with i hi exact h i hi -- this tests templates with binders example (f g : ℕ → ℕ) (s : Finset ℕ) (h : ∀ i ∈ s, f i ^ 2 + 1 ≤ g i ^ 2 + 1) : ∑ i ∈ s, f i ^ 2 ≤ ∑ i ∈ s, g i ^ 2 := by gcongr ∑ _i ∈ s, ?_ with i hi linarith [h i hi] -- this tests templates with binders example (f g : ℕ → ℕ) (s : Finset ℕ) (h : ∀ i ∈ s, f i ^ 2 + 1 ≤ g i ^ 2 + 1) : ∑ i ∈ s, (3 + f i ^ 2) ≤ ∑ i ∈ s, (3 + g i ^ 2) := by gcongr ∑ _i ∈ s, (3 + ?_) with i hi linarith [h i hi] example (f g : ℕ → ℕ) (s : Finset ℕ) (h : ∀ i ∈ s, f i ^ 2 + 1 ≤ g i ^ 2 + 1) : ∑ i ∈ s, (f i ^ 2 / 5) ≤ ∑ i ∈ s, (g i ^ 2 / 5) := by gcongr 2 with i hi linarith [h i hi] axiom f : ℕ → ℕ example {x y : ℕ} (h : f x ≤ f y) : f x ≤ f y := by success_if_fail_with_msg "Tactic `gcongr` failed: there is no `@[gcongr]` lemma for relation 'LE.le' and constant 'GCongrTests.f'. x y : ℕ h : GCongrTests.f x ≤ GCongrTests.f y ⊢ GCongrTests.f x ≤ GCongrTests.f y" (gcongr f ?a) exact h example {x y : ℕ} (h : f x ≤ f y) : f x ^ 2 ≤ f y ^ 2 := by success_if_fail_with_msg "Tactic `gcongr` failed: there is no `@[gcongr]` lemma for relation 'LE.le' and constant 'GCongrTests.f'. case hab x y : ℕ h : GCongrTests.f x ≤ GCongrTests.f y ⊢ GCongrTests.f x ≤ GCongrTests.f y" (gcongr (f ?a) ^ 2) gcongr example (s : Finset ℕ) (h : ∀ i ∈ s, f i ≤ f (2 * i)) : ∑ i ∈ s, f i ≤ ∑ i ∈ s, f (2 * i) := by gcongr apply h assumption def dontUnfoldMe : Nat → List Bool → Nat | 0, _ => 0 | n + 1, l => dontUnfoldMe n (true::l) + dontUnfoldMe n (false::l) -- times out if a certain reducibility setting in `gcongr`'s implementation is not correct example {x y : ℕ} (h : x ≤ y) (l) : dontUnfoldMe 14 l + x ≤ 0 + y := by gcongr guard_target = dontUnfoldMe 14 l ≤ 0 apply test_sorry /-! Test that `gcongr` works well with proof arguments -/ example {α β : Type*} [SemilatticeSup α] (f : β → α) {s₁ s₂ : Finset β} (h : s₁ ⊆ s₂) (h₁ : s₁.Nonempty) : s₁.sup' h₁ f ≤ s₂.sup' (h₁.mono h) f := by gcongr example {α β : Type*} [SemilatticeSup α] (f : β → α) {s₁ s₂ : Finset β} (h : s₁ ⊆ s₂) (h₁ : s₁.Nonempty) (h₂ : s₂.Nonempty) : s₁.sup' h₁ f ≤ s₂.sup' h₂ f := by gcongr /-! Test that `gcongr` can solve side goals of the form `∀ i, f i` when `f i` is in scope for `positivity` -/ example {ι : Type*} [Fintype ι] {f g : ι → ℝ} : ∏ i, f i ^ 2 ≤ ∏ i, g i ^ 2 := by gcongr with i _ i _ · guard_target = 0 ≤ f i exact test_sorry · guard_target = f i ≤ g i exact test_sorry /-! Test that `gcongr` can deal with `_ ≤ _ → _ ≤ _` and `_ < _ → _ < _` -/ example {a b : ℕ} (h1 : a ≤ 0) (h2 : 0 ≤ b) : b ≤ a + 1 → 0 ≤ 0 + 1 := by gcongr example {a b : ℕ} (h1 : a ≤ 0) (_h2 : 0 ≤ b) : b ≤ a + 1 → b ≤ 0 + 1 := by gcongr example {a b : ℕ} (_h1 : a ≤ 0) (h2 : 0 ≤ b) : b ≤ a + 1 → 0 ≤ a + 1 := by gcongr example {a b : ℕ} (h1 : a ≤ 0) (h2 : 0 ≤ b) : b < a + 1 → 0 < 0 + 1 := by gcongr example {a b : ℕ} (h1 : a ≤ 0) (_h2 : 0 ≤ b) : b < a + 1 → b < 0 + 1 := by gcongr example {a b : ℕ} (_h1 : a ≤ 0) (h2 : 0 ≤ b) : b < a + 1 → 0 < a + 1 := by gcongr /-! Test that `gcongr` with a pattern doesn't complain about type class inference problems. -/ example {a b : ℕ} (h1 : a ≤ 0) (h2 : 0 ≤ b) : b ≤ a + 1 → 0 ≤ 0 + 1 := by gcongr ?_ ≤ ?_ + _ /-! Test that `Monotone` and friends can be tagged with `@[gcongr]` -/ def myCons (_ : Nat) : Nat := 0 theorem myCons_monotone : Monotone myCons := fun _ _ _ => le_rfl theorem myCons_monotone' : Monotone (myCons ·) := fun _ _ _ => le_rfl theorem myCons_antitone : Antitone myCons := fun _ _ _ => le_rfl theorem myCons_monotoneOn : MonotoneOn myCons (Set.Ioi 0) := fun _ _ _ _ _ => le_rfl theorem myCons_antitoneOn : AntitoneOn myCons (Set.Ioi 0) := fun _ _ _ _ _ => le_rfl attribute [local gcongr] myCons_monotone in example : myCons 4 ≤ myCons 5 := by gcongr; simp attribute [local gcongr] myCons_monotone' in example : myCons 4 ≤ myCons 5 := by gcongr; simp attribute [local gcongr] myCons_antitone in example : myCons 6 ≤ myCons 5 := by gcongr; simp attribute [local gcongr] myCons_monotoneOn in example : myCons 4 ≤ myCons 5 := by gcongr <;> simp attribute [local gcongr] myCons_antitoneOn in example : myCons 6 ≤ myCons 5 := by gcongr <;> simp def myMono (n : Nat) : Nat := n theorem myMono_strictMono : StrictMono myMono := fun _ _ => id theorem myMono_strictMonoOn : StrictMonoOn myMono (Set.Ioi 0) := fun _ _ _ _ => id attribute [local gcongr] myMono_strictMono in example : myMono 4 < myMono 5 := by gcongr; simp attribute [local gcongr] myMono_strictMonoOn in example : myMono 4 < myMono 5 := by gcongr <;> simp def myAnti (n : Int) : Int := -n theorem myAnti_strictAnti : StrictAnti myAnti := fun _ _ => neg_lt_neg theorem myAnti_strictAntiOn : StrictAntiOn myAnti (Set.Ioi 0) := fun _ _ _ _ => neg_lt_neg attribute [local gcongr] myAnti_strictAnti in example : myAnti 6 < myAnti 5 := by gcongr; simp attribute [local gcongr] myAnti_strictAntiOn in example : myAnti 6 < myAnti 5 := by gcongr <;> simp end GCongrTests
.lake/packages/mathlib/MathlibTest/GCongr/implications.lean
import Mathlib.Tactic.GCongr variable {a b c d : Prop} example (h : a → b) : (a ∧ ¬b) ∨ c → (b ∧ ¬a) ∨ c := by gcongr example (h : a → b) : (a ∧ ¬b) ∨ c → (b ∧ ¬a) ∨ c := by gcongr ?_ ∧ ¬?_ ∨ c example (h : d → b) : (a ∨ b ∧ c → d) → (a ∨ d ∧ c → b) := by gcongr example (h : d → b) : (a ∨ b ∧ c → d) → (a ∨ d ∧ c → b) := by gcongr a ∨ ?_ ∧ c → ?_ example (h : a → b) : ¬ ¬ ¬ b → ¬ ¬ ¬ a := by gcongr example (h : a → b) : ¬ ¬ ¬ b → ¬ ¬ ¬ a := by gcongr ¬ ¬ ¬ ?_ example (h : a → b) : (∃ i, ∀ j, i ∧ b → j) → (∃ i, ∀ j, i ∧ a → j) := by gcongr example (h : a → b) : (∃ i, ∀ j, i ∧ b → j) → (∃ i, ∀ j, i ∧ a → j) := by gcongr ∃ i, ∀ j, i ∧ ?_ → j example (h : c → b) : (a → b → c) → (a → b → b) := by gcongr 1 guard_target =ₛ (b → c) → (b → b) gcongr 1 /-- error: gcongr did not make progress -/ #guard_msgs in example (h : ∀ n : Nat, 0 ≤ n) : ∀ n : Int, 0 ≤ n := by revert h gcongr
.lake/packages/mathlib/MathlibTest/GCongr/GCongr.lean
import Mathlib.MeasureTheory.Measure.MeasureSpace /-! # Testing for the `gcongr` tactic -/ namespace GCongrTests /- Test that `gcongr` lemmas are applied in the `reducible` transparency by default. Previously, `DFunLike.coe` would be unfolded when applying a `@[gcongr]` lemma. -/ section transparency open MeasureTheory variable {α : Type*} (a : Set α) {μ : OuterMeasure α} {μ' : OuterMeasure α} @[gcongr high] lemma mono_outerMeasure (h : μ ≤ μ') : μ a ≤ μ' a := h a example (h : μ ≤ μ') : μ a ≤ μ' a := by gcongr variable [MeasurableSpace α] {ν : Measure α} {ν' : Measure α} @[gcongr] lemma mono_measure (h : ν ≤ ν') : ν a ≤ ν' a := h a example (h : ν ≤ ν') : ν a ≤ ν' a := by gcongr end transparency /- Test that a more general `@[gcongr]` lemma always applies, and the resulting reflexive goals are closed with `rfl`. -/ section rfl axiom myAdd : Nat → Nat → Nat axiom rel : Nat → Nat → Prop local infix:50 "~" => rel variable {a b c d : Nat} @[gcongr] axiom myAdd_mono : a ~ c → b ~ d → myAdd a b ~ myAdd c d axiom myAdd_rfl : a ~ a /-- error: unsolved goals case a a b c d : ℕ h : b~d ⊢ a~a case a.a a b c d : ℕ h : b~d ⊢ c~c -/ #guard_msgs in example (h : b ~ d) : myAdd a (myAdd b c) ~ myAdd a (myAdd d c) := by gcongr /-- error: Tactic `gcongr` failed: subgoal a~a is not allowed by the provided pattern and is not closed by `rfl` case a a b c d : ℕ h : b~d ⊢ a~a -/ #guard_msgs in example (h : b ~ d) : myAdd a (myAdd b c) ~ myAdd a (myAdd d c) := by gcongr myAdd _ (myAdd ?_ _) attribute [refl] myAdd_rfl example (h : b ~ d) : myAdd a (myAdd b c) ~ myAdd a (myAdd d c) := by gcongr example (h : b ~ d) : myAdd a (myAdd b c) ~ myAdd a (myAdd d c) := by gcongr myAdd _ (myAdd ?_ _) end rfl end GCongrTests
.lake/packages/mathlib/MathlibTest/GCongr/mod.lean
import Mathlib.Data.Int.ModEq /-! # Modular arithmetic tests for the `gcongr` tactic -/ variable {a b n x : ℤ} example (ha : a ≡ 2 [ZMOD 4]) : a * b ^ 2 + a ^ 2 * b + 3 ≡ 2 * b ^ 2 + 2 ^ 2 * b + 3 [ZMOD 4] := by gcongr example (ha : a ≡ 4 [ZMOD 5]) (hb : b ≡ 3 [ZMOD 5]) : a * b + b ^ 3 + 3 ≡ 4 * 3 + 3 ^ 3 + 3 [ZMOD 5] := by gcongr example (hb : 3 ≡ b [ZMOD 5]) : b ^ 2 ≡ 3 ^ 2 [ZMOD 5] := by gcongr example (hb : 3 ≡ b [ZMOD 5]) : b ^ 2 ≡ 3 ^ 2 [ZMOD 5] := by gcongr ?_ ^ 2 example (hb : 3 ≡ b [ZMOD 5]) : b ^ 2 ≡ 3 ^ 2 [ZMOD 5] := by rel [hb] example (hx : x ≡ 0 [ZMOD 3]) : x ^ 3 ≡ 0 ^ 3 [ZMOD 3] := by gcongr example (hx : x ≡ 0 [ZMOD 3]) : x ^ 3 ≡ 0 ^ 3 [ZMOD 3] := by gcongr ?_ ^ 3 example (hx : x ≡ 2 [ZMOD 3]) : x ^ 3 ≡ 2 ^ 3 [ZMOD 3] := by gcongr example (hn : n ≡ 1 [ZMOD 3]) : n ^ 3 + 7 * n ≡ 1 ^ 3 + 7 * 1 [ZMOD 3] := by gcongr example (hn : n ≡ 1 [ZMOD 3]) : n ^ 3 + 7 * n ≡ 1 ^ 3 + 7 * 1 [ZMOD 3] := by gcongr ?_ ^ 3 + 7 * ?_ example (hn : n ≡ 1 [ZMOD 3]) : n ^ 3 + 7 * n ≡ 1 ^ 3 + 7 * 1 [ZMOD 3] := by rel [hn] example (hn : n ≡ 1 [ZMOD 2]) : 5 * n ^ 2 + 3 * n + 7 ≡ 5 * 1 ^ 2 + 3 * 1 + 7 [ZMOD 2] := by gcongr example (hx : x ≡ 3 [ZMOD 5]) : x ^ 5 ≡ 3 ^ 5 [ZMOD 5] := by gcongr /-! Test that `gcongr` can deal with `_ ≡ _ [ZMOD _] → _ ≡ _ [ZMOD _]` -/ variable {a b : ℤ} example (h1 : 0 ≡ a [ZMOD 7]) (h2 : 0 ≡ b [ZMOD 7]) : b ≡ a + 1 [ZMOD 7] → 0 ≡ 0 + 1 [ZMOD 7] := by gcongr example (h1 : 0 ≡ a [ZMOD 7]) (_h2 : 0 ≡ b [ZMOD 7]) : b ≡ a + 1 [ZMOD 7] → b ≡ 0 + 1 [ZMOD 7] := by gcongr example (_h1 : 0 ≡ a [ZMOD 7]) (h2 : 0 ≡ b [ZMOD 7]) : b ≡ a + 1 [ZMOD 7] → 0 ≡ a + 1 [ZMOD 7] := by gcongr example (h : True → a ^ 2 ≡ b ^ 2 [ZMOD 5]) : a ^ 2 * n + x ≡ b ^2 * n + x [ZMOD 5] := by gcongr 2 exact h trivial
.lake/packages/mathlib/MathlibTest/Bound/attribute.lean
import Mathlib.Algebra.Order.Field.Basic import Mathlib.Tactic.Bound /-! ## Tests for the `@bound` attribute Verify that our heuristic for the priority of a declaration produces the expected values. -/ open Mathlib.Tactic.Bound (declPriority) /-- info: 0 -/ #guard_msgs in #eval declPriority `le_refl /-- info: 0 -/ #guard_msgs in #eval declPriority `sq_nonneg /-- info: 11 -/ #guard_msgs in #eval declPriority `Bound.one_lt_div_of_pos_of_lt /-- info: 141 -/ #guard_msgs in #eval declPriority `Bound.pow_le_pow_right_of_le_one_or_one_le
.lake/packages/mathlib/MathlibTest/Bound/bound.lean
import Mathlib.Analysis.SpecialFunctions.Pow.Real import Mathlib.Tactic.Bound /-! ## Tests for the `bound` tactic -/ open scoped NNReal -- Tests that work with `bound`, but not `positivity`, `gcongr`, or `norm_num` section bound_only variable {a b c x y : ℝ} {z : ℂ} {n : ℕ} example (h : x < y) : y - x > 0 := by bound example (h : x < y) : Real.exp (y - x) > 1 := by bound example (h : x < y) (y0 : 0 < y) : x / y < 1 := by bound example (f : ℕ → ℝ) (h : ∀ n, f n ≥ 0) : f n ≥ 0 := by bound [h n] example (x y : ℝ≥0) (h : x < y) : (x : ℝ) < y := by bound example : dist a c ≤ dist a b + dist b c := by bound example {α : Type} {s : Finset α} {f g : α → ℂ} : -- An example that requires function inference ‖s.sum (fun x ↦ f x + g x)‖ ≤ s.sum (fun x ↦ ‖f x + g x‖) := by bound end bound_only -- Calc example: A weak lower bound for `z ← z^2 + c` example {c z : ℝ} (cz : ‖c‖ ≤ ‖z‖) (z3 : 3 ≤ ‖z‖) : 2 * ‖z‖ ≤ ‖z^2 + c‖ := by calc ‖z^2 + c‖ _ ≥ ‖z^2‖ - ‖c‖ := by bound _ ≥ ‖z^2‖ - ‖z‖ := by bound -- gcongr works here, not for the other two _ ≥ (‖z‖ - 1) * ‖z‖ := by rw [mul_comm, mul_sub_one, ← pow_two, ← norm_pow] _ ≥ 2 * ‖z‖ := by bound -- Testing branching functionality. None of these tests work with `positivity` or `bound`. section guess_tests variable {a b c : ℝ} {n m : ℕ} example (h : a ≤ b) : a ≤ max b c := by bound example (h : a ≤ c) : a ≤ max b c := by bound example (h : a ≤ c) : min a b ≤ c := by bound example (h : b ≤ c) : min a b ≤ c := by bound example (h : a < b) : a < max b c := by bound example (h : a < c) : a < max b c := by bound example (h : a < c) : min a b < c := by bound example (h : b < c) : min a b < c := by bound example (a1 : 1 ≤ a) (h : m ≤ n) : a^m ≤ a^n := by bound example (a0 : 0 ≤ a) (a1 : a ≤ 1) (h : n ≤ m) : a^m ≤ a^n := by bound example (a1 : 1 ≤ a) (h : b ≤ c) : a^b ≤ a^c := by bound example (a0 : 0 < a) (a1 : a ≤ 1) (h : c ≤ b) : a^b ≤ a^c := by bound end guess_tests section positive_tests variable {n : ℕ} {x y : ℝ} {u : ℝ≥0} {z : ℂ} example (h : 0 < x) : x^2 > 0 := by bound example (h : x > 0) : x^2 > 0 := by bound example (p : x > 0) (q : y > 0) : x * y > 0 := by bound example (p : x > 0) (q : y > 0) : x / y > 0 := by bound example : 0 < 4 := by bound example : 0 < 7 := by bound example : 0 < (4 : ℝ) := by bound example : 0 < (7 : ℝ) := by bound example : 0 < (1 : ℝ) := by bound example (h : u > 0) : 0 < (u : ℝ) := by bound example : 0 < 2^n := by bound example : 0 < (1 : ℝ)⁻¹ := by bound end positive_tests section nonneg_tests variable {n : ℕ} {x y : ℝ} {u : ℝ≥0} {z : ℂ} example : 0 ≤ ‖z‖ := by bound example : ‖z‖ ≥ 0 := by bound example : x^2 ≥ 0 := by bound example (p : x ≥ 0) (q : y ≥ 0) : x * y ≥ 0 := by bound example (p : x ≥ 0) (q : y ≥ 0) : x / y ≥ 0 := by bound example (p : x ≥ 0) (q : y ≥ 0) : x + y ≥ 0 := by bound example : (n : ℝ) ≥ 0 := by bound example : 0 ≤ 7 := by bound example : 0 ≤ (7 : ℝ) := by bound example : 0 ≤ (1 : ℝ) := by bound example : 0 ≤ (u : ℝ) := by bound example : 0 ≤ (0 : ℝ) := by bound example : 0 ≤ 2^n := by bound example : 0 ≤ (0 : ℝ)⁻¹ := by bound end nonneg_tests section bound_tests variable {a b c x y : ℝ} {z : ℂ} {n : ℕ} example : (1 : ℝ) < 4 := by bound example : (2 : ℝ) < 4 := by bound example (n : x ≥ 0) (h : x ≤ y) : x^2 ≤ y^2 := by bound example (n : x ≥ 0) (h : x ≤ y) : y^2 ≥ x^2 := by bound example (n : a ≥ 0) (h : x ≤ y) : a * x ≤ a * y := by bound example (n : a ≥ 0) (h : x ≤ y) : x * a ≤ y * a := by bound example (bp : b ≥ 0) (xp : x ≥ 0) (ab : a ≤ b) (xy : x ≤ y) : a * x ≤ b * y := by bound example (h : x ≤ y) : ‖z‖ * x ≤ ‖z‖ * y := by bound example (h : x ≤ y) : a + x ≤ a + y := by bound example (h : x ≤ y) : x + a ≤ y + a := by bound example (ab : a ≤ b) (xy : x ≤ y) : a + x ≤ b + y := by bound example (h : x ≥ y) : a - x ≤ a - y := by bound example (h : x ≤ y) : x - a ≤ y - a := by bound example (ab : a ≤ b) (xy : x ≥ y) : a - x ≤ b - y := by bound example (h : x > 0) : x ≥ 0 := by bound example (hc : c ≥ 0) (h : a ≤ b) : a / c ≤ b / c := by bound example (ha : a ≥ 0) (hc : c > 0) (h : b ≥ c) : a / b ≤ a / c := by bound example (x y : ℝ) (x0 : 0 < x) (h : x ≤ y) : x.log ≤ y.log := by bound end bound_tests /-- This broke without appropriate `g.withContext` use in an older implementation of `bound`. Leaving the test here just in case. -/ example {s : Set ℂ} (o : IsOpen s) (z) (h : z ∈ s) : ∃ r : ℝ, r > 0 := by rw [Metric.isOpen_iff] at o rcases o z h with ⟨t, tp, bs⟩ exists t/2 clear o h bs z s bound -- Test various elaboration issues example {f : ℂ → ℂ} {z w : ℂ} {s r c e : ℝ} (sc : ∀ {w}, ‖w - z‖ < s → ‖f w - f z‖ < e) (wz : ‖w - z‖ < s) (wr : ‖w‖ < r) (h : ∀ z : ℂ, ‖z‖ < r → ‖f z‖ ≤ c * ‖z‖) : ‖f z‖ ≤ c * ‖w‖ + e := by calc ‖f z‖ = ‖f w - (f w - f z)‖ := by ring_nf _ ≤ ‖f w‖ + ‖f w - f z‖ := by bound _ ≤ c * ‖w‖+ e := by bound [h w wr, sc wz] -- A test that requires reduction to weak head normal form to work (surfaced by `Hartogs.lean`) example (x y : ℝ) (h : x < y ∧ True) : x ≤ y := by bound [h.1] -- Used to fail with `unknown identifier n`, since I wasn't elaborating [] inside the goal theorem test_unknown_identifier {f : ℕ → ℝ} (le : ∀ n, f n ≤ n) : ∀ n : ℕ, f n ≤ n := by intro n; bound [le n]
.lake/packages/mathlib/MathlibTest/instances/CommRing_integralClosure.lean
import Mathlib -- https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/category.20theory.20import.20breaks.20CommRing.20synthesis/near/449132250 variable (R : Type) (A : Type) [CommRing R] [CommRing A] [Algebra R A] /-- info: (integralClosure R A).toCommRing -/ #guard_msgs in #synth CommRing (integralClosure R A)
.lake/packages/mathlib/MathlibTest/instances/Ring_finiteness.lean
import Mathlib -- https://github.com/leanprover-community/mathlib4/pull/17557#issuecomment-2426920648 variable (R : Type*) [Ring R] [IsSemisimpleRing R] example : IsNoetherianRing R := inferInstance example : IsArtinianRing R := inferInstance
.lake/packages/mathlib/MathlibTest/DirectoryDependencyLinter/Test.lean
import Mathlib.Init import Qq import Mathlib.Util.AssertExists /-- warning: Module MathlibTest.DirectoryDependencyLinter.Test depends on Mathlib.Util.AssertExists, but is only allowed to import modules starting with one of [Mathlib.Lean]. Note: module Mathlib.Util.AssertExists is directly imported by this module Note: This linter can be disabled with `set_option linter.directoryDependency false` --- warning: The module doc-string for a file should be the first command after the imports. Please, add a module doc-string before `/-!# Tests for the `directoryDependency` linter -/ `. Note: This linter can be disabled with `set_option linter.style.header false` -/ #guard_msgs in set_option linter.style.header true in /-! # Tests for the `directoryDependency` linter -/ -- Some unit-tests for internal functions. #guard Lean.Name.isPrefixOf `Mathlib.Util `Mathlib.Util.Basic == true #guard Lean.Name.isPrefixOf `Mathlib.Util `Mathlib.Util.Nested.Basic == true #guard Lean.Name.isPrefixOf `Mathlib.Util `Mathlib.Utils.Basic == false #guard Lean.Name.isPrefixOf `Mathlib.Foo `Mathlib.Util.Foo == false #guard Lean.Name.isPrefixOf `Mathlib.Util `Mathlib.Utils == false
.lake/packages/mathlib/MathlibTest/Delab/FinsetBuilder.lean
import Mathlib.Data.Fintype.Defs variable {α : Type*} [Fintype α] {p : α → Prop} {s : Finset α} {a : α} [DecidablePred p] [DecidableEq α] [Singleton α (Finset α)] [HasCompl (Finset α)] /-- info: {x | p x} : Finset α -/ #guard_msgs in #check ({x | p x} : Finset α) /-- info: {x ∈ s | p x} : Finset α -/ #guard_msgs in #check ({x ∈ s | p x}) /-- info: {x ≠ a | p x} : Finset α -/ #guard_msgs in #check ({x ≠ a | p x} : Finset α) /-- info: {x ∉ s | p x} : Finset α -/ #guard_msgs in #check ({x ∉ s | p x}) /-- info: {x : α | p x} : Finset α -/ #guard_msgs in set_option pp.funBinderTypes true in #check ({x | p x} : Finset α) /-- info: {x ∈ s | p x} : Finset α -/ #guard_msgs in set_option pp.funBinderTypes true in #check ({x ∈ s | p x}) /-- info: {x ≠ a | p x} : Finset α -/ #guard_msgs in set_option pp.funBinderTypes true in #check ({x ≠ a | p x} : Finset α) /-- info: {x ∉ s | p x} : Finset α -/ #guard_msgs in set_option pp.funBinderTypes true in #check ({x ∉ s | p x})
.lake/packages/mathlib/MathlibTest/Delab/SupInf.lean
import Mathlib /-- info: max 1 2 : ℕ -/ #guard_msgs in #check max (1 : ℕ) 2 /-- info: max 1 2 : ℝ -/ #guard_msgs in #check max (1 : ℝ) 2 /-- info: ({0} ⊔ {1} ⊔ ({2} ⊔ {3})) ⊓ ({4} ⊔ {5}) ⊔ {6} ⊓ {7} ⊓ ({8} ⊓ {9}) : Set ℕ -/ #guard_msgs in #check (max (min (max (max {0} {1}) (max {2} {3})) (max {4} {5})) (min (min {6} {7}) (min {8} {9})) : Set ℕ) section variable {α : Type*} (a b : α) variable [Lattice α] in /-- info: a ⊔ b : α -/ #guard_msgs in #check max a b variable [LinearOrder α] in /-- info: max a b : α -/ #guard_msgs in #check max a b variable [CompleteLinearOrder α] in /-- info: max a b : α -/ #guard_msgs in #check max a b variable [ConditionallyCompleteLinearOrder α] in /-- info: max a b : α -/ #guard_msgs in #check max a b end universe u /-- info: fun α [Lattice α] a b => a ⊔ b : (α : Type u) → [Lattice α] → α → α → α -/ #guard_msgs in #check fun (α : Type u) [Lattice α] (a b : α) => max a b /-- info: fun α [LinearOrder α] a b => max a b : (α : Type u) → [LinearOrder α] → α → α → α -/ #guard_msgs in #check fun (α : Type u) [LinearOrder α] (a b : α) => max a b /-- info: fun α [CompleteLinearOrder α] a b => max a b : (α : Type u) → [CompleteLinearOrder α] → α → α → α -/ #guard_msgs in #check fun (α : Type u) [CompleteLinearOrder α] (a b : α) => max a b /-- info: fun α [ConditionallyCompleteLinearOrder α] a b => max a b : (α : Type u) → [ConditionallyCompleteLinearOrder α] → α → α → α -/ #guard_msgs in #check fun (α : Type u) [ConditionallyCompleteLinearOrder α] (a b : α) => max a b -- In this section we check that the delaborator respects the options `pp.explicit` and `pp.notation`. section variable [Min α] [Max α] (a b c : α) /-- info: (a ⊔ b) ⊓ c : α -/ #guard_msgs in #check min (max a b) c set_option pp.notation false in /-- info: min (max a b) c : α -/ #guard_msgs in #check min (max a b) c set_option pp.explicit true in /-- info: @min α inst✝¹ (@max α inst✝ a b) c : α -/ #guard_msgs in #check min (max a b) c end
.lake/packages/mathlib/MathlibTest/Delab/Scheme.lean
import Mathlib.AlgebraicGeometry.Restrict universe u open AlgebraicGeometry variable (X : Scheme.{u}) (x : X) in /-- info: x : ↥X -/ #guard_msgs in #check x variable (R : CommRingCat.{u}) (x : Spec R) in /-- info: x : ↥(Spec R) -/ #guard_msgs in #check x variable (X : Scheme.{u}) (U : X.Opens) (x : U.toScheme) in /-- info: x : ↥↑U -/ #guard_msgs in #check x variable (X : Scheme.{u}) (U : X.Opens) (x : U) in /-- info: x : ↥U -/ #guard_msgs in #check x
.lake/packages/mathlib/MathlibTest/Delab/Abs.lean
import Mathlib.Algebra.Order.Group.Unbundled.Abs variable {α β : Type*} [Lattice α] [Lattice β] [Group α] [AddGroup β] {a : α} {b : β} /-- info: |a|ₘ : α -/ #guard_msgs in #check |a|ₘ /-- info: |b| : β -/ #guard_msgs in #check |b| /-- info: |(|b|)| : β -/ #guard_msgs in #check |(|b|)| /-- info: |(|a|ₘ)|ₘ : α -/ #guard_msgs in #check |(|a|ₘ)|ₘ /-- info: |(-b)| : β -/ #guard_msgs in #check |(-b)|
.lake/packages/mathlib/MathlibTest/instance_diamonds/normed.lean
import Mathlib.Analysis.Complex.Basic -- https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/defeq.20of.20.60NormedAddCommGroup.20.E2.84.82.60/near/422248635 example : (NonUnitalNormedRing.toNormedAddCommGroup : NormedAddCommGroup ℂ) = Complex.instNormedAddCommGroup := by with_reducible_and_instances rfl
.lake/packages/mathlib/MathlibTest/instance_diamonds/algebra_rat.lean
import Mathlib.Algebra.Algebra.Rat /-- The two `Algebra ℚ≥0 ℚ≥0` instances should coincide. -/ example : DivisionSemiring.toNNRatAlgebra = Algebra.id ℚ≥0 := rfl /-- The two `Algebra ℚ ℚ` instances should coincide. -/ example : DivisionRing.toRatAlgebra = Algebra.id ℚ := rfl
.lake/packages/mathlib/MathlibTest/instance_diamonds/FieldTheory/SplittingField/Construction.lean
import Mathlib.Algebra.Algebra.Rat import Mathlib.FieldTheory.SplittingField.Construction universe u v w open Polynomial variable {F : Type u} {K : Type v} {L : Type w} variable [Field K] [Field L] [Field F] variable (f : K[X]) -- The algebra instance deriving from `K` should be definitionally equal to that -- deriving from the field structure on `SplittingField f`. example : (AddCommMonoid.toNatModule : Module ℕ (SplittingField f)) = @Algebra.toModule _ _ _ _ (SplittingField.instAlgebra f) := by with_reducible_and_instances rfl example : (AddCommGroup.toIntModule _ : Module ℤ (SplittingField f)) = @Algebra.toModule _ _ _ _ (SplittingField.instAlgebra f) := by with_reducible_and_instances rfl example [CharZero K] : SplittingField.instAlgebra f = DivisionRing.toRatAlgebra := rfl -- TODO: by with_reducible_and_instances rfl fails example {q : ℚ[X]} : Ring.toIntAlgebra (SplittingField q) = SplittingField.instAlgebra q := rfl -- TODO: by with_reducible_and_instances rfl fails
.lake/packages/mathlib/MathlibTest/instance_diamonds/FieldTheory/IsAlgClosed/AlgebraicClosure.lean
import Mathlib.Algebra.Algebra.Rat import Mathlib.FieldTheory.IsAlgClosed.AlgebraicClosure variable {k : Type*} [Field k] example : (AddCommMonoid.toNatModule : Module ℕ (AlgebraicClosure k)) = @Algebra.toModule _ _ _ _ (AlgebraicClosure.instAlgebra k) := by with_reducible_and_instances rfl example : (AddCommGroup.toIntModule _ : Module ℤ (AlgebraicClosure k)) = @Algebra.toModule _ _ _ _ (AlgebraicClosure.instAlgebra k) := by with_reducible_and_instances rfl example [CharZero k] : AlgebraicClosure.instAlgebra k = DivisionRing.toRatAlgebra := rfl -- TODO: by with_reducible_and_instances rfl fails example : Ring.toIntAlgebra (AlgebraicClosure ℚ) = AlgebraicClosure.instAlgebra ℚ := rfl -- TODO: by with_reducible_and_instances rfl fails
.lake/packages/mathlib/MathlibTest/instance_diamonds/Data/Complex/Module.lean
import Mathlib.Algebra.Algebra.Rat import Mathlib.LinearAlgebra.Complex.Module -- Test that the `SMul ℚ ℂ` instance is correct. example : (Complex.SMul.instSMulRealComplex : SMul ℚ ℂ) = (Algebra.toSMul : SMul ℚ ℂ) := by with_reducible_and_instances rfl
.lake/packages/mathlib/MathlibTest/solve_by_elim/instances.lean
import Mathlib.Algebra.Order.GroupWithZero.Synonym import Mathlib.Algebra.Order.Ring.Nat lemma foo (a b : ℕ) (ha : a ≠ 0) (hb : b ≠ 0) : a * b ≠ 0 := by apply_rules [mul_ne_zero]
.lake/packages/mathlib/MathlibTest/solve_by_elim/basic.lean
import Lean.Meta.Tactic.SolveByElim import Mathlib.Tactic.Constructor import Batteries.Tactic.PermuteGoals import MathlibTest.solve_by_elim.dummy_label_attr example (h : Nat) : Nat := by solve_by_elim example {α β : Type} (f : α → β) (a : α) : β := by solve_by_elim example {α β : Type} (f : α → α → β) (a : α) : β := by solve_by_elim example {α β γ : Type} (f : α → β) (g : β → γ) (a : α) : γ := by solve_by_elim example {α β γ : Type} (_f : α → β) (g : β → γ) (b : β) : γ := by solve_by_elim example {α : Nat → Type} (f : (n : Nat) → α n → α (n + 1)) (a : α 0) : α 4 := by solve_by_elim example (h : Nat) : Nat := by solve_by_elim [] example {α β : Type} (f : α → β) (a : α) : β := by solve_by_elim [] example {α β : Type} (f : α → α → β) (a : α) : β := by solve_by_elim [] example {α β γ : Type} (f : α → β) (g : β → γ) (a : α) : γ := by solve_by_elim [] example {α β γ : Type} (_f : α → β) (g : β → γ) (b : β) : γ := by solve_by_elim [] example {α : Nat → Type} (f : (n : Nat) → α n → α (n + 1)) (a : α 0) : α 4 := by solve_by_elim [] example {α β : Type} (f : α → β) (a : α) : β := by fail_if_success solve_by_elim [-f] fail_if_success solve_by_elim [-a] fail_if_success solve_by_elim only [f] solve_by_elim set_option linter.unusedVariables false in example {α β γ : Type} (f : α → β) (g : β → γ) (b : β) : γ := by fail_if_success solve_by_elim [-g] solve_by_elim [-f] example (h : Nat) : Nat := by solve_by_elim only [h] example {α β : Type} (f : α → β) (a : α) : β := by solve_by_elim only [f, a] example {α β : Type} (f : α → α → β) (a : α) : β := by solve_by_elim only [f, a] example {α β γ : Type} (f : α → β) (g : β → γ) (a : α) : γ := by solve_by_elim only [f, g, a] example {α β γ : Type} (_f : α → β) (g : β → γ) (b : β) : γ := by solve_by_elim only [g, b] example {α : Nat → Type} (f : (n : Nat) → α n → α (n + 1)) (a : α 0) : α 4 := by solve_by_elim only [f, a] set_option linter.unusedVariables false in example (h₁ h₂ : False) : Empty := by -- 'It doesn't make sense to remove local hypotheses when using `only` without `*`.' fail_if_success solve_by_elim only [-h₁] -- 'It does make sense to use `*` without `only`.' fail_if_success solve_by_elim [*, -h₁] solve_by_elim only [*, -h₁] -- Verify that already assigned metavariables are skipped. example (P₁ P₂ : α → Prop) (f : ∀ (a : α), P₁ a → P₂ a → β) (a : α) (ha₁ : P₁ a) (ha₂ : P₂ a) : β := by solve_by_elim example {X : Type} (x : X) : x = x := by fail_if_success solve_by_elim -constructor only -- needs the `rfl` lemma solve_by_elim -- Needs to apply `rfl` twice, with different implicit arguments each time. -- A naive implementation of solve_by_elim would get stuck. example {X : Type} (x y : X) (p : Prop) (h : x = x → y = y → p) : p := by solve_by_elim example : True := by fail_if_success solve_by_elim -constructor only -- needs the `trivial` lemma solve_by_elim example : True := by -- uses the `trivial` lemma, which should now be removed from the default set: solve_by_elim -constructor example : True := by solve_by_elim only -- uses the constructor discharger. -- Requires backtracking. example (P₁ P₂ : α → Prop) (f : ∀ (a: α), P₁ a → P₂ a → β) (a : α) (_ha₁ : P₁ a) (a' : α) (ha'₁ : P₁ a') (ha'₂ : P₂ a') : β := by fail_if_success solve_by_elim (config := .noBackTracking) solve_by_elim example {α : Type} {a b : α → Prop} (h₀ : b = a) (y : α) : a y = b y := by fail_if_success solve_by_elim -symm solve_by_elim example (P : True → False) : 3 = 7 := by fail_if_success solve_by_elim -exfalso solve_by_elim -- Verifying that `solve_by_elim` acts only on the main goal. example (n : Nat) : Nat × Nat := by constructor solve_by_elim solve_by_elim -- Verifying that `solve_by_elim*` acts on all remaining goals. example (n : Nat) : Nat × Nat := by constructor solve_by_elim* -- Verifying that `solve_by_elim*` backtracks when given multiple goals. example (n m : Nat) (f : Nat → Nat → Prop) (h : f n m) : ∃ p : Nat × Nat, f p.1 p.2 := by fconstructor fconstructor solve_by_elim* -- test that metavariables created for implicit arguments don't get stuck example (P : Nat → Type) (f : {n : Nat} → P n) : P 2 × P 3 := by fconstructor solve_by_elim* only [f] example : 6 = 6 ∧ [7] = [7] := by fconstructor solve_by_elim* only [@rfl _] -- Test that `solve_by_elim*`, which works on multiple goals, -- successfully uses the relevant local hypotheses for each goal. example (f g : Nat → Prop) : (∃ k : Nat, f k) ∨ (∃ k : Nat, g k) ↔ ∃ k : Nat, f k ∨ g k := by fconstructor rintro (⟨n, fn⟩ | ⟨n, gn⟩) pick_goal 3 rintro ⟨n, hf | hg⟩ solve_by_elim* (maxDepth := 13) [Or.inl, Or.inr, Exists.intro] -- Test that we can disable the `intro` discharger. example (P : Prop) : P → P := by fail_if_success solve_by_elim -intro solve_by_elim example (P Q : Prop) : P ∧ Q → P ∧ Q := by solve_by_elim section apply_assumption example {a b : Type} (h₀ : a → b) (h₁ : a) : b := by apply_assumption apply_assumption example {α : Type} {p : α → Prop} (h₀ : ∀ x, p x) (y : α) : p y := by apply_assumption -- Check that `apply_assumption` uses `symm`. example (a b : α) (h : b = a) : a = b := by fail_if_success apply_assumption -symm apply_assumption -- Check that `apply_assumption` uses `exfalso`. example {P Q : Prop} (p : P) (q : Q) (h : P → ¬ Q) : Nat := by fail_if_success apply_assumption -exfalso apply_assumption <;> assumption end apply_assumption section «using» @[dummy_label_attr] axiom foo : 1 = 2 example : 1 = 2 := by fail_if_success solve_by_elim solve_by_elim using dummy_label_attr end «using» section issue1581 axiom mySorry {α} : α @[dummy_label_attr] theorem le_rfl [LE α] {b c : α} (_h : b = c) : b ≤ c := mySorry example : 5 ≤ 7 := by apply_rules using dummy_label_attr guard_target = 5 = 7 exact mySorry example : 5 ≤ 7 := by apply_rules [le_rfl] guard_target = 5 = 7 exact mySorry end issue1581
.lake/packages/mathlib/MathlibTest/solve_by_elim/dummy_label_attr.lean
import Lean.LabelAttribute register_label_attr dummy_label_attr
.lake/packages/mathlib/MathlibTest/grind/ordered_ring.lean
import Mathlib.Algebra.Order.Ring.Defs example {K : Type _} [CommRing K] [PartialOrder K] [IsStrictOrderedRing K] (this : 0 ≤ (-1 : K)) : False := by grind example {K : Type _} [CommRing K] [PartialOrder K] [IsStrictOrderedRing K] (this : 0 ≤ (-c - 1) + c) : b * b - 4 * 0 * c ≤ 0 := by grind
.lake/packages/mathlib/MathlibTest/grind/ring.lean
import Mathlib.Algebra.Order.Ring.Defs import Mathlib.Algebra.Field.Defs /-! # Preliminary tests for `grind` using Mathlib typeclasses. These are far from exhaustive tests, for now just testing the minimal functionality for `grind` using Mathlib's `CommRing` typeclass. -/ -- We mock ℝ here so that we don't have to import the dependencies. axiom Real : Type notation "ℝ" => Real @[instance] axiom Real.field : Field ℝ @[instance] axiom Real.linearOrder : LinearOrder ℝ @[instance] axiom Real.isStrictOrderedRing : IsStrictOrderedRing ℝ example (R : Type) [I : Ring R] : @AddCommGroup.toGrindIntModule R (@Ring.toAddCommGroup R I) = @Lean.Grind.Ring.toIntModule R (@Ring.toGrindRing R I) := rfl example {α} [CommRing α] (x y : α) : x + y + y - x = 2 * y := by grind example (x y : ℝ) : (x + y) ^ 3 = x ^ 3 + y ^ 3 + 3 * (x * y ^ 2 + x ^ 2 * y) := by grind example {α} [CommRing α] (x : α) : x ^ 2 = x * x := by grind example {α} [Field α] [LinearOrder α] [IsStrictOrderedRing α] (a b c : α) : b ^ 2 - 4 * c * a = -(4 * c * a) + b ^ 2 := by grind example {α} [Field α] [LinearOrder α] [IsStrictOrderedRing α] (a b c : α) : b ^ 2 - 4 * a * c = 4 * a * 0 + b * b - 4 * a * c := by grind example {α} [CommRing α] (a b c d e : α) : (-(a * b) + c + d) * e = (c + (d + -a * b)) * e := by grind example {α} [CommRing α] (x y z : α) (h₁ : x^2 = y) (h₂ : x^3 = z) : y^3 = z^2 := by grind example (x y : ℝ) (h₁ : x^2 = x * y^3) (h₂ : x^3 * y^2 = y) : y^2 = x^4 := by grind example {α} [CommSemiring α] [IsRightCancelAdd α] (x y z : α) (h : x + z = y + z) : x = y := by grind
.lake/packages/mathlib/MathlibTest/grind/pairwise_disjoint.lean
import Mathlib abbrev S1 : Fin 3 → Finset (Fin 4) | 0 => {0} | 1 => {1} | 2 => {2, 3} attribute [grind _=_] LawfulSingleton.insert_emptyc_eq attribute [grind =] Finset.mem_singleton attribute [grind =] Finset.disjoint_insert_left attribute [grind =] Finset.disjoint_insert_right attribute [grind ←] Finset.disjoint_empty_left attribute [grind ←] Finset.disjoint_empty_right attribute [grind] Pairwise example : Pairwise (Function.onFun Disjoint fun x ↦ S1 x) := by grind
.lake/packages/mathlib/MathlibTest/grind/trig.lean
import Mathlib open Real grind_pattern cos_sq_add_sin_sq => cos x grind_pattern cos_sq_add_sin_sq => sin x -- Whenever `grind` sees `cos` or `sin`, it adds `(cos x)^2 + (sin x)^2 = 1` to the blackboard. -- That's a polynomial, so it is sent to the Grobner basis module. -- And we can prove equalities modulo that relation! example : (cos x + sin x)^2 = 2 * cos x * sin x + 1 := by grind -- `grind` notices that the two arguments of `f` are equal, -- and hence the function applications are too. example (f : ℝ → ℕ) : f ((cos x + sin x)^2) = f (2 * cos x * sin x + 1) := by grind -- After that, we can use basic modularity conditions: -- this reduces to `4 * x ≠ 2 + x` for some `x : ℕ` example (f : ℝ → ℕ) : 4 * f ((cos x + sin x)^2) ≠ 2 + f (2 * cos x * sin x + 1) := by grind -- A bit of case splitting is also fine. -- If `max = 3`, then `f _ = 0`, and we're done. -- Otherwise, the previous argument applies. example (f : ℝ → ℕ) : max 3 (4 * f ((cos x + sin x)^2)) ≠ 2 + f (2 * cos x * sin x + 1) := by grind
.lake/packages/mathlib/MathlibTest/grind/set.lean
import Mathlib example (h : ({[], ['a']} : Set (List Char)) = {[], ['a'], ['b']}) : False := by grind
.lake/packages/mathlib/MathlibTest/grind/grobner.lean
import Mathlib /-! Mathlib used to have a tactic (`polyrith`) that communicated with a remote Sage server to do Grobner basis calculations. This test file is adapted from the test file for `polyrith`, but uses the `grobner` tactic instead. (Recall `grobner` is a thin wrapper around `grind` disabling other modules.) -/ /-! ### Standard Cases over ℤ, ℚ, and ℝ -/ example (x y : ℤ) (h1 : 3*x + 2*y = 10) : 3*x + 2*y = 10 := by grobner example (x y : ℚ) (h1 : x*y + 2*x = 1) (h2 : x = y) : x*y = -2*y + 1 := by grobner example (x y : ℝ) (h1 : x + 2 = -3) (h2 : y = 10) : -y + 2*x + 4 = -16 := by grobner example (x y z : ℝ) (ha : x + 2*y - z = 4) (hb : 2*x + y + z = -2) (hc : x + 2*y + z = 2) : -3*x - 3*y - 4*z = 2 := by grobner /-- warning: declaration uses 'sorry' -/ #guard_msgs in example (w x y z : ℝ) (h1 : x + 2.1*y + 2*z = 2) (h2 : x + 8*z + 5*w = -6.5) (h3 : x + y + 5*z + 5*w = 3) : x + 2.2*y + 2*z - 5*w = -8.5 := by -- `grind` does not yet understand scientific notation: fail_if_success grobner sorry example (a b c d : ℚ) (h1 : a = 4) (h2 : 3 = b) (h3 : c*3 = d) (h4 : -d = a) : 2*a - 3 + 9*c + 3*d = 8 - b + 3*d - 3*a := by grobner /-! ### Case with ambiguous identifiers -/ example («def evil» y : ℤ) (h1 : 3*«def evil» + 2*y = 10) : 3*«def evil» + 2*y = 10 := by grobner example («¥» y : ℤ) (h1 : 3*«¥» + 2*y = 10) : «¥» * (3*«¥» + 2*y) = 10 * «¥» := by grobner /-! ### Cases with arbitrary coefficients -/ example (a b : ℤ) (h : a = b) : a * a = a * b := by grobner example (a b c : ℤ) (h : a = b) : a * c = b * c := by grobner example (a b c : ℤ) (h1 : a = b) (h2 : b = 1) : c * a + b = c * b + 1 := by grobner example (x y : ℚ) (h1 : x + y = 3) (h2 : 3*x = 7) : x*x*y + y*x*y + 6*x = 3*x*y + 14 := by grobner example (x y z w : ℚ) (hzw : z = w) : x*z + 2*y*z = x*w + 2*y*w := by grobner /-! ### Cases with non-hypothesis inputs/input restrictions -/ example (a b : ℝ) (ha : 2*a = 4) (hab : 2*b = a - b) (hignore : 3 = a + b) : b = 2 / 3 := by grobner axiom term : ∀ a b : ℚ, a + b = 0 example (a b c d : ℚ) (h : a + b = 0) (_h2 : b + c = 0) : a + b + c + d = 0 := by have := term c d grobner axiom qc : ℚ axiom hqc : qc = 2*qc example (a b : ℚ) (h : ∀ p q : ℚ, p = q) : 3*a + qc = 3*b + 2*qc := by have := hqc specialize h a b grobner axiom bad (q : ℚ) : q = 0 example (a b : ℚ) : a + b^3 = 0 := by have := bad a have := bad (b^2) grobner /-! ### Case over arbitrary field/ring -/ example {α} [h : CommRing α] {a b c d e f : α} (h1 : a*d = b*c) (h2 : c*f = e*d) : c * (a*f - b*e) = 0 := by grobner example {K : Type*} [Field K] [Invertible 2] [Invertible 3] {ω p q r s t : K} (hp_nonzero : p ≠ 0) (hr : r ^ 2 = q ^ 2 + p ^ 3) (hs3 : s ^ 3 = q + r) (ht : t * s = p) (x : K) (H : 1 + ω + ω ^ 2 = 0) : x ^ 3 + 3 * p * x - 2 * q = (x - (s - t)) * (x - (s * ω - t * ω ^ 2)) * (x - (s * ω ^ 2 - t * ω)) := by have hs_nonzero : s ≠ 0 := by contrapose! hp_nonzero with hs_nonzero grobner have H' : 2 * q = s ^ 3 - t ^ 3 := by rw [← mul_left_inj' (pow_ne_zero 3 hs_nonzero)] grobner grobner /-! ## Degenerate cases -/ example {K : Type*} [Field K] [CharZero K] {s : K} (hs : 3 * s + 1 = 4) : s = 1 := by grobner example {x : ℤ} (h1 : x + 4 = 2) : x = -2 := by grobner example {w : ℚ} (h1 : 3 * w + 1 = 4) : w = 1 := by grobner example {x : ℤ} (h1 : 2 * x + 3 = x) : x = -3 := by grobner example {c : ℚ} (h1 : 4 * c + 1 = 3 * c - 2) : c = -3 := by grobner example (z : ℤ) (h1 : z + 1 = 2) (h2 : z + 2 = 2) : (1 : ℤ) = 2 := by grobner example {R} [CommRing R] (x : R) (h2 : (2 : R) = 0) : x + x = 0 := by grobner example {R} [CommRing R] (_x : R) (h : (2 : R) = 4) : (0 : R) = 2 := by grobner /- ### Examples with exponent -/ example (x y z : ℚ) (h : x = y) (h2 : x * y = 0) : x + y*z = 0 := by grobner example (K : Type) [Field K] [CharZero K] {x y z : K} (h₂ : y ^ 3 + x * (3 * z ^ 2) = 0) (h₁ : x ^ 3 + z * (3 * y ^ 2) = 0) (h₀ : y * (3 * x ^ 2) + z ^ 3 = 0) (h : x ^ 3 * y + y ^ 3 * z + z ^ 3 * x = 0) : x = 0 := by grobner example (y a : ℤ) (k : ℕ) (h : a ^ k = 0) : a ^ k * y = 0 := by grobner
.lake/packages/mathlib/MathlibTest/grind/field.lean
import Mathlib example (x y : ℝ) (h : x ≠ y) : (x^2 - y^2)/(x - y) = x + y := by grind example (x y : ℝ) (h : (x + y)^2 ≠ 0) : (x^2 - y^2)/(x + y) = x - y := by grind
.lake/packages/mathlib/MathlibTest/grind/panic.lean
import Mathlib.Algebra.Ring.GrindInstances import Mathlib.Tactic.FastInstance /-! From `v4.23.0-rc2` through to `nightly-2025-09-02`, `grind` panicked here. We keep this example as a regression test in Mathlib. -/ set_option warn.sorry false variable {R : Type} [CommRing R] class DivisionRing (K : Type) extends Ring K, DivInvMonoid K, Nontrivial K where protected mul_inv_cancel : ∀ (a : K), a ≠ 0 → a * a⁻¹ = 1 protected inv_zero : (0 : K)⁻¹ = 0 class Field (K : Type) extends CommRing K, DivisionRing K instance {K : Type} [Field K] : Lean.Grind.Field K := { CommRing.toGrindCommRing K, ‹Field K› with zpow := ⟨fun a n => a^n⟩ zpow_zero a := sorry zpow_succ a n := sorry zpow_neg a n := sorry zero_ne_one := sorry } instance {K : Type} [Field K] : IsDomain K := sorry instance [IsDomain R] : CancelCommMonoidWithZero R := sorry noncomputable def normalizedFactors {α : Type} [CancelCommMonoidWithZero α] (a : α) : Set α := sorry structure Ideal (R : Type) [CommRing R] where structure Ideal.Quotient (I : Ideal R) where class Ideal.IsMaximal (I : Ideal R) : Prop where namespace Ideal.Quotient variable (I J : Ideal R) instance commRing {R} [CommRing R] (I : Ideal R) : CommRing I.Quotient := sorry protected noncomputable abbrev groupWithZero [hI : I.IsMaximal] : GroupWithZero (I.Quotient) := { inv := sorry mul_inv_cancel := sorry inv_zero := sorry exists_pair_ne := sorry } protected noncomputable abbrev divisionRing [I.IsMaximal] : DivisionRing (I.Quotient) := fast_instance% -- The panic seems to rely on this specific `fast_instance%`. { __ := commRing _ __ := Quotient.groupWithZero _ } protected noncomputable abbrev field {R} [CommRing R] (I : Ideal R) [I.IsMaximal] : Field (I.Quotient) := { __ := commRing _ __ := Quotient.divisionRing I } end Ideal.Quotient attribute [local instance] Ideal.Quotient.field open Classical in theorem normalizedFactorsMapEquivNormalizedFactorsMinPolyMk_symm_apply_eq_span.extracted_1_3 {R : Type} [CommRing R] {I : Ideal R} [I.IsMaximal] {mapQ mapMinpoly : I.Quotient} (_ : mapQ ∈ normalizedFactors mapMinpoly) : 0 = 0 := by grind
.lake/packages/mathlib/MathlibTest/grind/cc.lean
example (a₁ a₂ b₁ b₂ c d : Nat) : a₁ = c → a₂ = c → b₁ = d → d = b₂ → a₁ + b₁ + a₁ = a₂ + b₂ + c := by grind example (a b c : Prop) : (a ↔ b) → ((a ∧ (c ∨ b)) ↔ (b ∧ (c ∨ a))) := by grind example (a b c d : Prop) [d₁ : Decidable a] [d₂ : Decidable b] [d₃ : Decidable c] [d₄ : Decidable d] : (a ↔ b) → (c ↔ d) → ((if (a ∧ c) then True else False) ↔ (if (b ∧ d) then True else False)) := by grind example (a b : Nat) : (a = b) = (b = a) := by grind section Lean3Issue1442 def Rel : Int × Int → Int × Int → Prop | (n₁, d₁), (n₂, d₂) => n₁ * d₂ = n₂ * d₁ def mul' : Int × Int → Int × Int → Int × Int | (n₁, d₁), (n₂, d₂) => ⟨n₁ * n₂, d₁ * d₂⟩ example : ∀ (a b c d : Int × Int), Rel a c → Rel b d → Rel (mul' a b) (mul' c d) := fun (n₁, d₁) (n₂, d₂) (n₃, d₃) (n₄, d₄) => fun (h₁ : n₁ * d₃ = n₃ * d₁) (h₂ : n₂ * d₄ = n₄ * d₂) => show (n₁ * n₂) * (d₃ * d₄) = (n₃ * n₄) * (d₁ * d₂) by grind end Lean3Issue1442
.lake/packages/mathlib/MathlibTest/grind/ac.lean
section CCAC1 example (a b c : Nat) (f : Nat → Nat) : f (a + b + c) = f (c + b + a) := by grind example (a b c : Nat) (f : Nat → Nat) : a + b = c → f (c + c) = f (a + b + c) := by grind end CCAC1 section CCAC2 example (a b c d : Nat) (f : Nat → Nat → Nat) : b + a = d → f (a + b + c) a = f (c + d) a := by grind end CCAC2 section CCAC3 example (a b c d e : Nat) (f : Nat → Nat → Nat) : b + a = d → b + c = e → f (a + b + c) (a + b + c) = f (c + d) (a + e) := by grind example (a b c d e : Nat) (f : Nat → Nat → Nat) : b + a = d + d → b + c = e + e → f (a + b + c) (a + b + c) = f (c + d + d) (e + a + e) := by grind section universe u variable {α : Type u} variable (op : α → α → α) variable [Std.Associative op] variable [Std.Commutative op] example (a b c d e : α) (f : α → α → α) : op b a = op d d → op b c = op e e → f (op a (op b c)) (op (op a b) c) = f (op (op c d) d) (op e (op a e)) := by grind [Std.Associative, Std.Commutative] end end CCAC3
.lake/packages/mathlib/MathlibTest/LibrarySearch/observe.lean
import Mathlib.Tactic.Observe import Mathlib.Tactic.AdaptationNote set_option linter.unusedVariables false /-- info: Try this: [apply] have h : x + y = y + x := Nat.add_comm x y -/ #guard_msgs in example (x y : Nat) : True := by observe? h : x + y = y + x guard_hyp h : x + y = y + x trivial
.lake/packages/mathlib/MathlibTest/LibrarySearch/mathlib.lean
import Mathlib -- We verify that `exact?` copes with all of Mathlib. -- On `v4.7.0-rc1` this revealed a cache corruption problem. /-- info: Try this: [apply] exact Nat.one_pos -/ #guard_msgs in example : 0 < 1 := by exact?
.lake/packages/mathlib/MathlibTest/LibrarySearch/basic.lean
import Mathlib.Util.AssertNoSorry import Mathlib.Algebra.Order.Ring.Canonical import Mathlib.Data.Quot import Mathlib.Data.Nat.Prime.Defs import Mathlib.Data.Real.Basic set_option autoImplicit true set_option linter.style.setOption false -- Enable this option for tracing: -- set_option trace.Tactic.librarySearch true -- And this option to trace all candidate lemmas before application. -- set_option trace.Tactic.librarySearch.lemmas true -- It may also be useful to enable -- set_option trace.Meta.Tactic.solveByElim true -- We need to set this here, as the lakefile does not enable this during testing. -- https://github.com/leanprover-community/mathlib4/issues/6440 set_option pp.unicode.fun true noncomputable section /-- info: Try this: [apply] exact Nat.lt_add_one x -/ #guard_msgs in example (x : Nat) : x ≠ x.succ := ne_of_lt (by apply?) /-- info: Try this: [apply] exact Nat.zero_lt_succ 1 -/ #guard_msgs in example : 0 ≠ 1 + 1 := ne_of_lt (by apply?) /-- info: Try this: [apply] exact Nat.add_comm x y -/ #guard_msgs in example (x y : Nat) : x + y = y + x := by apply? /- info: Try this: exact fun a ↦ Nat.add_le_add_right a k -/ #guard_msgs (drop info) in example (n m k : Nat) : n ≤ m → n + k ≤ m + k := by apply? /- info: Try this: exact Nat.mul_dvd_mul_left a w -/ #guard_msgs (drop info) in example (_ha : a > 0) (w : b ∣ c) : a * b ∣ a * c := by apply? -- Could be any number of results (`Int.one`, `Int.zero`, etc) #guard_msgs (drop info) in example : Int := by apply? /-- info: Try this: [apply] lt_add_one x -/ #guard_msgs in example : x < x + 1 := exact?% /-- info: Try this: [apply] exact p -/ #guard_msgs in example (P : Prop) (p : P) : P := by apply? /-- info: Try this: [apply] exact False.elim (np p) -/ #guard_msgs in example (P : Prop) (p : P) (np : ¬P) : false := by apply? /-- info: Try this: [apply] exact h x rfl -/ #guard_msgs in example (X : Type) (P : Prop) (x : X) (h : ∀ x : X, x = x → P) : P := by apply? -- Could be any number of results (`fun x ↦ x`, `id`, etc) #guard_msgs (drop info) in example (α : Prop) : α → α := by apply? -- Note: these examples no longer work after we turned off lemmas with discrimination key `#[*]`. -- example (p : Prop) : (¬¬p) → p := by apply? -- says: `exact not_not.mp` -- example (a b : Prop) (h : a ∧ b) : a := by apply? -- says: `exact h.left` -- example (P Q : Prop) : (¬ Q → ¬ P) → (P → Q) := by apply? -- say: `exact Function.mtr` /-- info: Try this: [apply] exact Nat.add_comm a b -/ #guard_msgs in example (a b : ℕ) : a + b = b + a := by apply? /-- info: Try this: [apply] exact Nat.mul_sub_left_distrib n m k -/ #guard_msgs in example (n m k : ℕ) : n * (m - k) = n * m - n * k := by apply? /-- info: Try this: [apply] exact Eq.symm (Nat.mul_sub_left_distrib n m k) -/ #guard_msgs in example (n m k : ℕ) : n * m - n * k = n * (m - k) := by apply? /- info: Try this: exact eq_comm -/ #guard_msgs (drop info) in example {α : Type} (x y : α) : x = y ↔ y = x := by apply? /- info: Try this: exact Nat.add_pos_left ha b -/ #guard_msgs (drop info) in example (a b : ℕ) (_ha : 0 < a) (_hb : 0 < b) : 0 < a + b := by apply? /- info: Try this: exact Nat.add_pos_left ha b -/ #guard_msgs (drop info) in -- Verify that if maxHeartbeats is 0 we don't stop immediately. set_option maxHeartbeats 0 in example (a b : ℕ) (_ha : 0 < a) (_hb : 0 < b) : 0 < a + b := by apply? section synonym /- info: Try this: exact Nat.add_pos_left ha b -/ #guard_msgs (drop info) in example (a b : ℕ) (_ha : a > 0) (_hb : 0 < b) : 0 < a + b := by apply? /-- info: Try this: [apply] exact Nat.le_of_dvd w h -/ #guard_msgs in example (a b : ℕ) (h : a ∣ b) (w : b > 0) : a ≤ b := by apply? /-- info: Try this: [apply] exact Nat.le_of_dvd w h -/ #guard_msgs in example (a b : ℕ) (h : a ∣ b) (w : b > 0) : b ≥ a := by apply? -- TODO: A lemma with head symbol `¬` can be used to prove `¬ p` or `⊥` /-- info: Try this: [apply] exact Nat.not_lt_zero a -/ #guard_msgs in example (a : ℕ) : ¬ (a < 0) := by apply? /-- info: Try this: [apply] exact Nat.not_succ_le_zero a h -/ #guard_msgs in example (a : ℕ) (h : a < 0) : False := by apply? -- An inductive type hides the constructor's arguments enough -- so that `apply?` doesn't accidentally close the goal. inductive P : ℕ → Prop | gt_in_head {n : ℕ} : n < 0 → P n -- This lemma with `>` as its head symbol should also be found for goals with head symbol `<`. theorem lemma_with_gt_in_head (a : ℕ) (h : P a) : 0 > a := by cases h; assumption -- This lemma with `false` as its head symbols should also be found for goals with head symbol `¬`. theorem lemma_with_false_in_head (a b : ℕ) (_h1 : a < b) (h2 : P a) : False := by apply Nat.not_lt_zero; cases h2; assumption /-- info: Try this: [apply] exact lemma_with_gt_in_head a h -/ #guard_msgs in example (a : ℕ) (h : P a) : 0 > a := by apply? /-- info: Try this: [apply] exact lemma_with_gt_in_head a h -/ #guard_msgs in example (a : ℕ) (h : P a) : a < 0 := by apply? /-- info: Try this: [apply] exact lemma_with_false_in_head a b h1 h2 -/ #guard_msgs in example (a b : ℕ) (h1 : a < b) (h2 : P a) : False := by apply? -- TODO this no longer works: -- example (a b : ℕ) (h1 : a < b) : ¬ (P a) := by -- apply? -- says `exact lemma_with_false_in_head a b h1` end synonym /-- info: Try this: [apply] exact fun P ↦ iff_not_self -/ #guard_msgs in example : ∀ P : Prop, ¬(P ↔ ¬P) := by apply? -- We even find `iff` results: /- info: Try this: exact (Nat.dvd_add_left h₁).mp h₂ -/ #guard_msgs (drop info) in example {a b c : ℕ} (h₁ : a ∣ c) (h₂ : a ∣ b + c) : a ∣ b := by apply? -- Note: these examples no longer work after we turned off lemmas with discrimination key `#[*]`. -- example {α : Sort u} (h : Empty) : α := by apply? -- says `exact Empty.elim h` -- example (f : A → C) (g : B → C) : (A ⊕ B) → C := by apply? -- says `exact Sum.elim f g` -- example (n : ℕ) (r : ℚ) : ℚ := by apply? using n, r -- exact nsmulRec n r opaque f : ℕ → ℕ axiom F (a b : ℕ) : f a ≤ f b ↔ a ≤ b /-- info: Try this: [apply] exact (F a b).mpr h -/ #guard_msgs in example (a b : ℕ) (h : a ≤ b) : f a ≤ f b := by apply? /-- info: Try this: [apply] exact L.flatten -/ #guard_msgs in example (L _M : List (List ℕ)) : List ℕ := by apply? using L -- Could be any number of results #guard_msgs (drop info) in example (P _Q : List ℕ) (h : ℕ) : List ℕ := by apply? using h, P -- Could be any number of results #guard_msgs (drop info) in example (l : List α) (f : α → β ⊕ γ) : List β × List γ := by apply? using f -- partitionMap f l -- Could be any number of results (`Nat.mul n m`, `Nat.add n m`, etc) #guard_msgs (drop info) in example (n m : ℕ) : ℕ := by apply? using n, m -- Could be any number of results #guard_msgs (drop info) in example (P Q : List ℕ) (_h : ℕ) : List ℕ := by apply? using P, Q -- Check that we don't use sorryAx: -- (see https://github.com/leanprover-community/mathlib4/issues/226) theorem Bool_eq_iff {A B : Bool} : (A = B) = (A ↔ B) := by (cases A <;> cases B <;> simp) /-- info: Try this: [apply] exact Bool_eq_iff -/ #guard_msgs in theorem Bool_eq_iff2 {A B : Bool} : (A = B) = (A ↔ B) := by apply? -- exact Bool_eq_iff assert_no_sorry Bool_eq_iff2 -- Example from https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/library_search.20regression/near/354025788 /-- info: Try this: [apply] exact Quot.mk_surjective -/ #guard_msgs in example {r : α → α → Prop} : Function.Surjective (Quot.mk r) := by exact? -- Example from https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/library_search.20failing.20to.20apply.20symm /-- info: Try this: [apply] exact Iff.symm Nat.prime_iff -/ #guard_msgs in lemma prime_of_prime (n : ℕ) : Prime n ↔ Nat.Prime n := by exact? -- https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/apply.3F.20failure/near/402534407 example (P Q : Prop) (h : P → Q) (h' : ¬Q) : ¬P := by exact? says exact fun a ↦ h' (h a) -- Removed until we come up with a way of handling nonspecific lemmas -- that does not pollute the output or cause too much slow-down. -- -- Example from https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/Exact.3F.20fails.20on.20le_antisymm/near/388993167 -- set_option linter.unreachableTactic false in -- example {x y : ℝ} (hxy : x ≤ y) (hyx : y ≤ x) : x = y := by -- -- This example non-deterministically picks between `le_antisymm hxy hyx` and -- -- `ge_antisymm hyx hxy`. -- first -- | exact? says exact le_antisymm hxy hyx -- | exact? says exact ge_antisymm hyx hxy -- Check that adding `with_reducible` prevents expensive kernel reductions. -- https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/.60exact.3F.60.20failure.3A.20.22maximum.20recursion.20depth.20has.20been.20reached.22/near/417649319 /-- info: Try this: [apply] exact Nat.add_comm n m -/ #guard_msgs in example (_h : List.range 10000 = List.range 10000) (n m : Nat) : n + m = m + n := by with_reducible exact?
.lake/packages/mathlib/MathlibTest/LibrarySearch/IsCompact.lean
import Mathlib.Topology.Instances.Real.Lemmas import Mathlib.Topology.Order.Compact -- TODO: uses sorry, but is hidden behind the `apply?` /-- warning: declaration uses 'sorry' -/ #guard_msgs(warning, drop info) in example (f : ℝ → ℝ) {K : Set ℝ} (_hK : IsCompact K) : ∃ x ∈ K, ∀ y ∈ K, f x ≤ f y := by fail_if_success exact? apply? -- Verify that this includes: `refine IsCompact.exists_forall_le _hK ?_ ?_`
.lake/packages/mathlib/MathlibTest/Algebra/Category/Grp/Injective.lean
import Mathlib.Algebra.Category.Grp.Injective import Mathlib.Topology.Instances.AddCircle.Defs open CategoryTheory -- This instance used to have a specialized proof, but we can now find it with typeclass synthesis. -- If this test fails, you should re-add this as a specialized instance. instance AddCommGrpCat.injective_ratCircle : Injective <| of <| ULift.{u} <| AddCircle (1 : ℚ) := inferInstance -- Proof should be: injective_of_divisible _
.lake/packages/mathlib/docs/Conv/Guide.lean
/-! # Guide: Conversion mode tactic This is a curated guide to point you toward how `conv` mode works and what tactics are available. It is not meant to be comprehensive, but rather a "cheat sheet." See also the [`conv` introduction](https://leanprover-community.github.io/mathlib4_docs/docs/Conv/Introduction.html). ## Syntax The syntax for the `conv` tactic is ``` "conv" ("at" ident)? ("in" ("(occs :=" ("*" <|> num+) ")")? term)? "=>" convSeq ``` where `convSeq` is any sequence of "`conv` tactics", which are tactics specifically written for `conv` mode. The `in` clause is exactly the same as the arguments to the `conv` tactic `pattern`. ```lean conv in ...pattArgs... => ... ``` is short for ```lean conv => pattern ...patArgs... ... ``` Note that `conv in (occs := 1 2 3) pat => ...` starts with three goals (one for each occurrence), but `conv in (occs := *) pat => ...` starts with a single goal that converts in all occurrences simultaneously. Mathlib also provides `conv_lhs` and `conv_rhs` variants to immediately apply either the `lhs` or `rhs` tactic. ## What is `conv` mode? `conv` mode is essentially the normal tactic mode but with two differences. 1. Only "`conv` tactics" can appear in the `conv` block. These are tactics with syntax in the `conv` category. 2. The goals are all of the form `⊢ lhs = ?rhs` with `?rhs` a metavariable, but the goals are annotated in such a way that they display as `| lhs`. Each `conv` tactic is aware that the goal is of this form, and in addition to solving for the goal like normal, they also solve for the `?rhs` metavariable in some controlled way. For example, the `rfl` tactic uses `rfl` to solve the goal, which sets `?rhs := lhs`. Other tactics, like `congr`, partially solve for `?rhs` and create new goal metavariables for each unsolved-for hole. Once all the tactics have had a chance to run, `conv` mode itself uses `rfl` to solve any remaining goals (note that in `conv` mode, every goal can be solved for by `rfl`!), and then it uses the resulting `lhs = rhs` proof to rewrite the goal in the surrounding normal tactic mode. ## Conv tactics from Lean 4, Batteries, and Mathlib Unless they're annotated with "Batteries" or "Mathlib", the following tactics are defined in Lean 4 core. ### Control * `done` checks that there are no `conv` goals remaining. * `skip` does nothing. It can be used to be the single tactic in an otherwise empty `conv` block. It does *not* skip a `conv` goal. * `rfl` skips/closes a `conv` goal by using `rfl`. (Remember, the actual goal is `⊢ lhs = ?rhs`, so this sets `?rhs := lhs` and uses `rfl` to prove `lhs = lhs`.) * `conv => convSeq` is a nested `conv`. It uses `conv` to change the current goal without closing it. For example, this is how you can do a `conv`-targeted rewrite of the current expression and then apply `conv` tactics to the result. * `all_goals convSeq` runs the `conv` tactics on every `conv` goal, collecting all the produced subgoals (if any). * `any_goals convSeq` is like `all_goals` but succeeds if the tactic sequence succeeds for any of the goals. * `case tag => convSeq` focuses on a goal with a given tag, runs the tactic sequence, and then auto-closes the focused goal with `rfl`. Has the same syntax as the `case` tactic. * `case' tag => convSeq` is like `case` but does not auto-close the goal if the tactics do not close it. * `next => convSeq` and `next x1 ... xn => convSeq` are like the `next` tactic, but they auto-close the focused goal with `rfl`. * `· convSeq` focuses on the current goal and auto-closes it with `rfl`. * `focus => convSeq` focuses on the current goal. It does not auto-close the goal, unlike `next`. * `{ convSeq }` is like `next`. * `first | convSeq1 | convSeq2 | ...` tries each `conv` sequence one at a time until one of them succeeds, or else fails. * `try convSeq` runs the `conv` sequence and succeeds even if it fails. Same as `first | convSeq | skip`. * `repeat convSeq` repeatedly runs `convSeq` until it fails. * `( convSeq )` is for grouping. Useful when using `conv` tactic combinators. * `conv1 <;> conv2` is for running `conv1` and running `conv2` on every goal produced by `conv`. * `tactic => tacticSeq` converts the goal into `⊢ lhs = ?rhs` form and applies the tactic sequence. The tactic does not have to solve the goal completely, and remaining goals are turned back into `conv` goals. (Internal: there's also a `tactic' => tacticSeq` that does not remove the `conv` annotations from the goal before applying the tactic sequence.) * `discharge => tacticSeq` takes a goal `| p` with `p` a proposition, uses the tactic sequence to prove `⊢ p`, and then closes the goal to convert `p` to `True`. (Mathlib) * `with_reducible convSeq` changes the transparency settings to `reducible` while evaluating the `conv` sequence. (Mathlib) ### Navigation * `congr` (synonym: `args`) creates subgoals for every immediate subexpression of the expression. You can use `rfl` to skip any of these subgoals. * `lhs` (synonym: `left`) traverses into the second-to-last argument of the expression. (Implemented using `congr`.) * `rhs` (synonym: `right`) traverses into the last argument of the expression. (Implemented using `congr`.) * `arg i` (and `arg @i`) traverses into the `i`th explicit argument (resp. the `i`th argument) of the expression. (Implemented using `congr`.) * `ext` (synonym: `intro`) traverses into lambda, forall, and `let` expressions. `ext x` gives the resulting binder the name `x`. `ext x y z ...` applies `ext` once for each provided binder. * `enter [...]` is a compact way to describe a path to a subterm. * `enter [i]` (where `i` is a natural number) is equivalent to `arg i`. * `enter [@i]` is equivalent to `arg @i`. * `enter [x]` (where `x` is an identifier) is equivalent to `ext x`. * `enter [a,b,c,...]` is `enter [a]; enter [b]; enter [c]; enter [...]`. * `pattern` is for navigating into subexpressions that match a given pattern * `pattern pat` traverses to the first subterm of the target that matches `pat`. * `pattern (occs := *) pat` traverses to every subterm of the target that matches `pat` which is not contained in another match of `pat`. It generates one subgoal. * `pattern (occs := 1 2 4) pat` matches occurrences `1, 2, 4` of `pat` and produces three subgoals. Occurrences are numbered left to right from the outside in. ### Manipulation * `change t` changes the expression to `t` if the expression and `t` are definitionally equal. * `equals t => tacticSeq` changes the current expression, say `e`, to `t`, and asks you to prove the equality `e = t`. (Batteries) * `rw [thms...]` rewrites the expression using the given theorems. The syntax is similar to `rw`. * `erw [thms...]` rewrites the expression using the given theorems. The syntax is similar to `erw`. * `simp [thms...]` applies `simp` to rewrite the expression. The syntax is similar to `simp`. * `dsimp [thms...]` applies `dsimp` to rewrite the expression. The syntax is similar to `dsimp`. * `simp_match` simplifies `match` expressions. * `apply e` applies `e` to the goal (which remember is `⊢ lhs = ?rhs`) using the `apply` tactic. Strange results may occur if the hypotheses of `e` are not equalities. * `refine e` applies `e` to the goal (which remember is `⊢ lhs = ?rhs`) using the `refine` tactic. Strange results may occur if the placeholders in `e` are not equalities. * `exact e` closes the goal, where `e : lhs = ?rhs`. (Batteries) * Mathlib provides a number of tactics as `conv` tactics: * `abel` and `abel_nf` * `ring` and `ring_nf` * `norm_cast` * `norm_num1` and `norm_num` * `push_neg` * `apply_congr` applies a relevant `@[congr]` lemma, which can be better suited for a function than the congruence lemma that the `congr` tactic might generate. (Mathlib) * `slice i j` (for category theory) reassociates a composition of morphisms to focus on the composition of morphisms `i` through `j`. (Mathlib) ### Reductions * `whnf` reduces the expression to weak-head normal form. * `zeta` applies zeta reduction to the expression (i.e., substitutes all `let` expressions and expands all local variables). * `reduce` reduces the expression like the `#reduce` command. (Documentation says "for debugging purposes only.") * `unfold id1 id2 ...` unfolds the definitions for the given constants using each definitions equational lemmas. For recursive definitions, only one layer of unfolding is performed. * `delta id1 id2 ...` applies delta reduction for the given constants (i.e., substitutes the values of each constant). It is primitive: it ignores definitional equations and uses the raw definition of each constant. Using `unfold` is preferred. ### Debugging, for internal use, or otherwise technical * `trace_state` prints the current goal state (runs the `trace_state` tactic) * `fail_if_success convSeq` fails if the `conv` sequence succeeds. * `guard_expr` and `guard_target` for asserting that certain expressions are equal to others. (Batteries) * `unreachable!`, which is the same as the `unreachable!` tactic. (Batteries) * `run_tac doSeq` evaluates a monadic value and runs it as a tactic using `tactic'`. (Mathlib) ## Tactics and commands related to `conv` * `conv_lhs ... => ...` and `conv_rhs ... => ...` are like `conv`, but they immediately use `lhs` or `rhs` (respectively). (Mathlib) * `conv' ... => ...` is like `conv` but assumes the goal is already annotated as a `conv` goal. Used internally to go back and forth between tactic mode and conv mode. * `#conv convTactic => e` is a command to apply the `convTactic` to the expression `e`, yielding the converted expression (and dropping the generated proof). This is used to implement `#simp`, `#whnf`, `#norm_num`, and `#push_neg`. (Mathlib) -/
.lake/packages/mathlib/docs/Conv/Introduction.lean
/-! # Introduction to the conversion mode tactic Inside a tactic block, one can use the `conv` tactic to enter conversion mode. This mode allows one to travel into subexpressions inside assumptions and goals, even inside lambda functions and foralls, to apply targeted rewrites, simplifications, and other tactics. This is similar to the conversion tacticals (tactic combinators) found in other theorem provers like HOL4, HOL Light or Isabelle. ## Basic navigation and rewriting As a first example, let us prove `example (a b c : ℕ) : a * (b * c) = a * (c * b)` (examples in this file are somewhat artificial since the `ring` tactic from `Mathlib/Tactic/Ring.lean` could finish them immediately). The naive first attempt is to enter tactic mode and try `rw [mul_comm]`. But this transforms the goal into `b * c * a = a * (c * b)`, after commuting the very first multiplication appearing in the term. There are several ways to fix this issue, and one way is to use a more precise tool: the conversion mode. The following code block shows the current target after each line. Note that the target is prefixed by `|` where normal tactic mode shows a goal prefixed by `⊢`. Both cases are still called "goals" though. ```lean example (a b c : ℕ) : a * (b * c) = a * (c * b) := by conv => -- `| a * (b * c) = a * (c * b)` lhs -- `| a * (b * c)` congr -- 2 goals : `| a` and `| b * c` rfl -- skip `| a` goal rw [mul_comm] -- `| c * b` ``` The above snippet show three navigation commands: * `lhs` navigates to the left-hand side of a relation (here equality), there is also a `rhs` navigating to the right-hand side. * `congr` creates as many targets as there are arguments to the current head function (here the head function is multiplication) * `rfl` goes to the next target Once we arrive to the relevant target, we can use `rw` as in normal tactic mode. At the end, `conv` will automatically use `rfl` to skip the last remaining target. The second main reason to use conversion mode is to rewrite subexpressions involving bound variables ("rewrite under binders"). Suppose we want to prove `example : (fun x : ℕ => 0 + x) = (fun x => x)`. The naive first attempt is to enter tactic mode and try `rw [zero_add]`. However, this fails with a frustrating ```text tactic 'rewrite' failed, did not find instance of the pattern in the target expression 0 + ?a ⊢ (fun x ↦ 0 + x) = fun x ↦ x ``` The solution is: ```lean example : (fun x : ℕ => 0 + x) = (fun x => x) := by conv => -- | (fun x ↦ 0 + x) = fun x ↦ x lhs -- | fun x ↦ 0 + x ext x -- | 0 + x rw [zero_add] -- | x ``` where `ext` is the navigation command entering inside the `fun` binder (the `x` argument is optional). Note that this example is somewhat artificial, one could also do: ```lean example : (fun x : ℕ => 0 + x) = (fun x => x) := by ext rw [zero_add] ``` All of this is also available for converting a hypothesis `H` in the local context by using the syntax `conv at H => ...`. Here are a more ways to navigate expressions: * `arg i` navigates to the `i`th explicit argument. It is like doing `congr` and the appropriate number of `rfl`s for all but the `i`th argument. * `arg @i` navigates to the `i`th argument, counting both explicit and implicit arguments. * `enter [...]`, where the `...` consists of a list of arguments appropriate for `arg` or `ext`, and then runs the corresponding `arg` and `ext` commands. For example, `enter [1,@2,x,3]` is the same as `arg 1; arg @2; ext x; arg 3`. ## Pattern matching Navigation using the above commands can be tedious. One can shortcut it using pattern matching as follows: ```lean example (a b c : ℕ) : a * (b * c) = a * (c * b) := by conv in b * c => -- | b * c rw [mul_comm] -- | c * b ``` This `in` clause is short for ```lean example (a b c : ℕ) : a * (b * c) = a * (c * b) := by conv => -- | a * (b * c) = a * (c * b) pattern b * c -- | b * c rw [mul_comm] -- | c * b ``` As usual for `=>` block tactics, the body can be placed on a single line with tactics separated by semicolons. This yields a one-liner: ```lean example (a b c : ℕ) : a * (b * c) = a * (c * b) := by conv in b * c => rw [mul_comm] ``` Of course placeholders are allowed: ```lean example (a b c : ℕ) : a * (b * c) = a * (c * b) := by conv in _ * c => rw [mul_comm] ``` In all those cases, only the first match is affected. One can also specify which occurrences to convert using an `occs` clause, which creates goals for every matched occurrence. These can then all be handled at once using the `all_goals` combinator. The following performs rewriting only for the second and third occurrences of `b * c`: ```lean example (b c : ℕ) : (b * c) * (b * c) * (b * c) = (b * c) * (c * b) * (c * b) := by conv in (occs := 2 3) b * c => all_goals rw [mul_comm] ``` This can also be done using `pattern` and the `<;>` combinator, where, like in normal tactic mode, `t1 <;> t2` means to run `t1` and then run `t2` for every goal produced by it. ``` example (b c : ℕ) : (b * c) * (b * c) * (b * c) = (b * c) * (c * b) * (c * b) := by conv => pattern (occs := 2 3) b * c <;> rw [mul_comm] ``` ## Sub-conversions The `conv` tactic supports nested `conv` mode. This allows one to do a targeted rewrite using the power of `conv` mode and then return to the original position with the rewritten expression. ```lean example (a b : ℕ) : a * b * (a * b) = b * a * (a * b) := by conv => -- | a * b * (a * b) = b * a * (a * b) conv => pattern (occs := 2) a * b; rw [mul_comm] -- | a * b * (b * a) = b * a * (a * b) rw [mul_comm] -- | b * a * (a * b) = b * a * (a * b) ``` ## Other tactics inside conversion mode Besides rewriting using `rw`, one can use `simp`, `dsimp`, `change`, `equals`, `ring`, `norm_num`, `push_neg`, `unfold`, among others. See the [`conv` guide](https://leanprover-community.github.io/mathlib4_docs/docs/Conv/Guide.html) for a more in-depth overview. -/
.lake/packages/mathlib/DownstreamTest/DownstreamTest.lean
import Mathlib
.lake/packages/mathlib/DownstreamTest/README.md
# DownstreamTest This directory is used for testing that a basic downstream project can be built using mathlib, and that the mathlib cache works. There is no `lean-toolchain` file, because CI will copy it from the main repo during testing.
.lake/packages/mathlib/scripts/lint-style.lean
import Lake.CLI.Main import Lean.Elab.ParseImportsFast import Batteries.Data.String.Basic import Mathlib.Tactic.Linter.TextBased import Cli.Basic /-! # Text-based style linters This file defines the `lint-style` executable which runs all text-based style linters. The linters themselves are defined in `Mathlib/Tactic/Linter/TextBased.lean`. In addition, this checks that - `Mathlib.Init` is (transitively) imported in all of mathlib, and - every file in `scripts` is documented in its top-level README. -/ open Cli Lean.Linter Mathlib.Linter.TextBased System.FilePath /-- Additional imports generated by `mk_all`. -/ def explicitImports : Array Lean.Name := #[`Batteries, `Std] /-- Remove the additional imports generated by `mk_all` so that only mathlib modules remain. -/ def eraseExplicitImports (names : Array Lean.Name) : Array Lean.Name := explicitImports.foldl Array.erase names /-- Get the root package of the Lake workspace we are running in. -/ def getWorkspaceRoot : IO Lake.Package := do let (elanInstall?, leanInstall?, lakeInstall?) ← Lake.findInstall? let config ← Lake.MonadError.runEIO <| Lake.mkLoadConfig { elanInstall?, leanInstall?, lakeInstall? } let some workspace ← Lake.loadWorkspace config |>.toBaseIO | throw <| IO.userError "failed to load Lake workspace" return workspace.root section LinterSetsElab open Lean instance [ToExpr α] : ToExpr (NameMap α) where toExpr s := mkApp4 (.const ``Std.TreeMap.ofArray [.zero, .zero]) (toTypeExpr Name) (toTypeExpr α) (toExpr s.toArray) (.const ``Lean.Name.quickCmp []) toTypeExpr := .const ``LinterSets [] instance : ToExpr LinterSets := inferInstanceAs <| ToExpr (NameMap _) /-- Return the linter sets defined at this point of elaborating the current file. -/ elab "linter_sets%" : term => do return toExpr <| linterSetsExt.getState (← getEnv) end LinterSetsElab /-- Convert the options that Lake knows into the option that Lean knows. -/ def toLeanOptions (opts : Lean.LeanOptions) : Lean.Options := Id.run do let mut out := Lean.Options.empty for ⟨name, value⟩ in opts.values do -- Strip off the `weak.` prefix, like Lean does when parsing command line arguments. if name.getRoot == `weak then out := out.insert (name.replacePrefix `weak Lean.Name.anonymous) value.toDataValue else out := out.insert name value.toDataValue return out /-- Determine the `Lean.Options` from the Lakefile of the current project. We have to do this since style linters do not run in the `CoreM`/`CommandElabM` monads, and so they do not get access to the options in scope. Please do not confuse this with the Lean options at the moment that `lint-style` was compiled. -/ def getLakefileLeanOptions : IO Lean.Options := do let root ← getWorkspaceRoot -- Some projects declare options in the root package. let rootOpts := root.leanOptions -- Other projects, like Mathlib, declare options in the targets. -- Here we use the default targets, since that probably contains the modules we'll be linting. let defaultOpts := root.defaultTargets.flatMap fun target ↦ if let some lib := root.findLeanLib? target then lib.config.leanOptions else if let some exe := root.findLeanExe? target then exe.config.leanOptions else #[] return toLeanOptions (rootOpts.appendArray defaultOpts) /-- Check that `Mathlib.Init` is transitively imported in all of Mathlib -/ register_option linter.checkInitImports : Bool := { defValue := false } /-- Check that `Mathlib.Init` is transitively imported in all of Mathlib. Moreover, every file imported in `Mathlib.Init` should in turn import the `Header` linter (except for the header linter itself, of course). Return the number of modules which violated one of these rules. -/ def missingInitImports (opts : LinterOptions) : IO Nat := do unless getLinterValue linter.checkInitImports opts do return 0 -- Find any file in the Mathlib directory which does not contain any Mathlib import. -- We simply parse `Mathlib.lean`, as CI ensures this file is up to date. let allModuleNames := eraseExplicitImports (← findImports "Mathlib.lean") let mut modulesWithoutMathlibImports := #[] let mut importsHeaderLinter := #[] for module in allModuleNames do let path := System.mkFilePath (module.components.map fun n ↦ n.toString)|>.addExtension "lean" let imports ← findImports path let hasNoMathlibImport := imports.all fun name ↦ name.getRoot != `Mathlib if hasNoMathlibImport then modulesWithoutMathlibImports := modulesWithoutMathlibImports.push module if imports.contains `Mathlib.Tactic.Linter.Header then importsHeaderLinter := importsHeaderLinter.push module -- Every file importing the `header` linter should be imported in `Mathlib/Init.lean` itself. -- (Downstream files should import `Mathlib.Init` and not the header linter.) -- The only exception are auto-generated import-only files. let initImports ← findImports ("Mathlib" / "Init.lean") let mismatch := importsHeaderLinter.filter (fun mod ↦ ![`Mathlib, `Mathlib.Tactic, `Mathlib.Init].contains mod && !initImports.contains mod) -- These files are transitively imported by `Mathlib.Init`. |>.erase `Mathlib.Tactic.DeclarationNames |>.erase `Mathlib.Lean.Elab.Tactic.Meta |>.erase `Mathlib.Lean.ContextInfo |>.erase `Mathlib.Tactic.Linter.DirectoryDependency if mismatch.size > 0 then IO.eprintln s!"error: the following {mismatch.size} module(s) import the `header` linter \ directly, but should import Mathlib.Init instead: {mismatch}\n\ The `header` linter is included in Mathlib.Init, and every file in Mathlib \ should import Mathlib.Init.\nPlease adjust the imports accordingly." return mismatch.size -- Now, it only remains to check that every module (except for the Header linter itself) -- imports some file in Mathlib. let missing := modulesWithoutMathlibImports.erase `Mathlib.Tactic.Linter.Header -- This file is imported by `Mathlib/Tactic/Linter/Header.lean`. |>.erase `Mathlib.Tactic.Linter.DirectoryDependency if missing.size > 0 then IO.eprintln s!"error: the following {missing.size} module(s) do not import Mathlib.Init: \ {missing}" return missing.size return 0 /-- Verify that every file in the `scripts` directory is documented in `scripts/README.md` -/ register_option linter.allScriptsDocumented : Bool := { defValue := false } /-- Verifies that every file in the `scripts` directory is documented in `scripts/README.md`. Return the number of undocumented scripts. -/ def undocumentedScripts (opts : LinterOptions) : IO Nat := do unless getLinterValue linter.allScriptsDocumented opts do return 0 -- Retrieve all top-level entries in scripts directory (not recursive). let entries ← System.FilePath.readDir "scripts" let allScripts := entries.filterMap fun entry ↦ -- Skip the bench directory and README if entry.fileName == "bench" || entry.fileName == "README.md" then none else some entry.fileName -- Check if the README text contains each file enclosed in backticks. let readme : String ← IO.FS.readFile ("scripts" / "README.md") -- These are data files for linter exceptions: don't complain about these *for now*. let dataFiles := #["noshake.json", "nolints-style.txt"] let undocumented := allScripts.filter fun script ↦ !readme.containsSubstr s!"`{script}`" && !dataFiles.contains script if undocumented.size > 0 then IO.println s!"error: found {undocumented.size} undocumented script(s): \ please describe the script(s) in 'scripts/README.md'\n \ {String.intercalate "," undocumented.toList}" return undocumented.size /-- Implementation of the `lint-style` command line program. -/ def lintStyleCli (args : Cli.Parsed) : IO UInt32 := do let opts : LinterOptions := { toOptions := ← getLakefileLeanOptions, linterSets := linter_sets%, } let style : ErrorFormat := match args.hasFlag "github" with | true => ErrorFormat.github | false => ErrorFormat.humanReadable let fix := args.hasFlag "fix" -- If no modules are specified, use the defaults from the Lakefile. let originModules ← match args.variableArgsAs! String with | #[] => -- If none are specified, lint the default Lake modules. let (elanInstall?, leanInstall?, lakeInstall?) ← Lake.findInstall? let config ← Lake.MonadError.runEIO <| Lake.mkLoadConfig { elanInstall?, leanInstall?, lakeInstall? } let some workspace ← Lake.loadWorkspace config |>.toBaseIO | throw <| IO.userError "failed to load Lake workspace" pure <| workspace.root.defaultTargets.flatMap fun target => if let some lib := workspace.root.findLeanLib? target then lib.roots else if let some exe := workspace.root.findLeanExe? target then #[exe.config.root] else #[] | mods => do let mut result := #[] for mod in mods do let modParse := Lean.ParseImports.moduleIdent mod {} match modParse.error? with | none => result := result.append <| modParse.imports.map Lean.Import.module | some err => throw <| IO.userError s!"could not parse module name {mod}: {err}" pure result -- Smoke tests for accidentally disabling all the linters again: -- require a nonempty set of modules that get linted. if originModules.isEmpty then throw <| IO.userError s!"lint-style: no modules to lint.\n\ \n\ Note: by default, we lint all the default `lake build` targets in the Lakefile.\n\ \n\ Hint: specify modules to lint as command line arguments to `lake exe lint-style`." -- ensure the header linter is active if we're linting Mathlib. if `Mathlib ∈ originModules then if !getLinterValue linter.checkInitImports opts then throw <| IO.userError s!"lint-style selftest failed: header linter is not enabled in Mathlib.\n\ \n\ Hint: in a project downstream of Mathlib, remove `Mathlib` as an argument to \ `lake exe style`, or remove `Mathlib` from the default `lake build` targets.\n\ \n\ Hint: in Mathlib, check that the `linter_sets%` elaborator still works.\n\ \n\ Note: we want to make sure that we do not accidentally turn off all the linters, \ since such a change would not be noticed in CI otherwise. The header linter is an \ arbitrarily chosen important Mathlib style linter." -- Get all the imports, but only those in the same package. let pkgs := originModules.map (·.components.head!) Lean.initSearchPath (← Lean.findSysroot) let searchPath ← Lean.getSrcSearchPath let allModuleNames ← originModules.flatMapM fun mod => do let imports ← match ← searchPath.findWithExt "lean" mod with | some file => findImports file | none => throw <| IO.userError s!"could not find module with name {mod}" pure <| imports.filter (·.components.head! ∈ pkgs) -- Read the `nolints` file, with manual exceptions for the linter. -- NB. We pass these lints to `lintModules` explicitly to prevent cache invalidation bugs: -- if the text-based linter read the file itself, replaying a cached build of that -- file could re-use an outdated version of the nolints file. -- (For syntax linters, such a bug actually occurred in mathlib.) -- This script is re-run each time, hence is immune to such issues. let filename : System.FilePath := ("scripts" / "nolints-style.txt") let nolints ← try IO.FS.lines filename catch _ => IO.eprintln s!"warning: nolints file could not be read; treating as empty: {filename}" pure #[] let numberErrors := (← lintModules opts nolints allModuleNames style fix) + (← missingInitImports opts).toUInt32 + (← undocumentedScripts opts).toUInt32 + (← modulesNotUpperCamelCase opts allModuleNames).toUInt32 + (← modulesOSForbidden opts allModuleNames).toUInt32 -- If run with the `--fix` argument, return a zero exit code. -- Otherwise, make sure to return an exit code of at most 125, -- so this return value can be used further in shell scripts. if args.hasFlag "fix" then return 0 else return min numberErrors 125 /-- Setting up command line options and help text for `lake exe lint-style`. -/ -- so far, no help options or so: perhaps that is fine? def lintStyle : Cmd := `[Cli| «lint-style» VIA lintStyleCli; ["0.0.1"] "Run text-based style linters on every Lean file in specified modules. Print errors about any unexpected style errors to standard output." FLAGS: github; "Print errors in a format suitable for github problem matchers\n\ otherwise, produce human-readable output" fix; "Automatically fix the style error, if possible" ARGS: ...modules : String; "Which modules, and their imports, will be linted.\n\ If no modules are specified, the linter runs on the default Lake module(s)." ] /-- The entry point to the `lake exe lint-style` command. -/ def main (args : List String) : IO UInt32 := do lintStyle.validate args
.lake/packages/mathlib/scripts/mk_all.lean
import Cli.Basic import Lake.CLI.Main import Mathlib.Util.GetAllModules -- The `style.header` linter flags `import Lake.CLI.Main` as a potential performance issue. set_option linter.style.header false /-! # Script to create a file importing all files from a folder This file declares a command to gather all Lean files from a folder into a single Lean file. -/ open Lean System.FilePath open Lake in /-- `getLeanLibs` returns the names (as an `Array` of `String`s) of all the libraries on which the current project depends. If the current project is `mathlib`, then it excludes the libraries `Cache` and `LongestPole` and it includes `Mathlib/Tactic`. -/ def getLeanLibs : IO (Array String) := do let (elanInstall?, leanInstall?, lakeInstall?) ← findInstall? let config ← MonadError.runEIO <| mkLoadConfig { elanInstall?, leanInstall?, lakeInstall? } let some ws ← loadWorkspace config |>.toBaseIO | throw <| IO.userError "failed to load Lake workspace" let package := ws.root let libs := (package.leanLibs.map (·.name)).map (·.toString) return if package.name == `mathlib then libs.erase "Cache" |>.erase "LongestPole" |>.erase "MathlibTest" |>.push ("Mathlib".push pathSeparator ++ "Tactic") else libs open IO.FS IO.Process Name Cli in /-- Implementation of the `mk_all` command line program. The exit code is the number of files that the command updates/creates. -/ def mkAllCLI (args : Parsed) : IO UInt32 := do -- Check whether the `--git` flag was set let git := (args.flag? "git").isSome -- Check whether we only verify the files, or update them in-place. let check := (args.flag? "check").isSome -- Check whether the `--lib` flag was set. If so, build the file corresponding to the library -- passed to `--lib`. Else build all the libraries of the package. -- If the package is `mathlib`, then it removes the libraries `Cache` and `LongestPole` and it -- adds `Mathlib/Tactic`. let libs := ← match args.flag? "lib" with | some lib => return #[lib.as! String] | none => getLeanLibs let mut updates := 0 for d in libs.reverse do -- reverse to create `Mathlib/Tactic.lean` before `Mathlib.lean` let fileName := addExtension d "lean" let mut allFiles ← getAllModulesSorted git d -- mathlib exception: manually import Std and Batteries in `Mathlib.lean` if d == "Mathlib" then allFiles := #["Std", "Batteries"] ++ allFiles let fileContent := ("\n".intercalate (allFiles.map ("import " ++ ·)).toList).push '\n' if !(← pathExists fileName) then if check then IO.println s!"File '{fileName}' does not exist" else IO.println s!"Creating '{fileName}'" IO.FS.writeFile fileName fileContent updates := updates + 1 else if (← IO.FS.readFile fileName) != fileContent then if check then IO.println s!"The file '{fileName}' is out of date: \ run `lake exe mk_all{if git then " --git" else ""}` to update it" else IO.println s!"Updating '{fileName}'" IO.FS.writeFile fileName fileContent updates := updates + 1 if updates == 0 then IO.println "No update necessary" -- Make sure to return an exit code of at most 125, so this return value can be used further -- in shell scripts. return min updates 125 open Cli in /-- Setting up command line options and help text for `lake exe mk_all`. -/ def mkAll : Cmd := `[Cli| mk_all VIA mkAllCLI; ["0.0.1"] "Generate a file importing all the files of a Lean folder. \ By default, it generates the files for the Lean libraries of the package.\ In the case of `Mathlib`, it removes the libraries `Cache` and `LongestPole`\ and it adds `Mathlib/Tactic`. \ If you are working in a project downstream of mathlib, use `lake exe mk_all --lib MyProject`." FLAGS: lib : String; "Create a folder importing all Lean files from the specified library/subfolder." git; "Use the folder content information from git." check; "Only check if the files are up-to-date; print an error if not" ] /-- The entrypoint to the `lake exe mk_all` command. -/ def main (args : List String) : IO UInt32 := mkAll.validate args
.lake/packages/mathlib/scripts/nolints-style.txt
-- Manual exceptions for the text-based linters. -- The entries in this file could be necessary in the long term. -- The linter for the string "adaptation note" fires in the implementation of the linter, -- and in the implementation of the #adaptation_note tactic: this is as expected. Mathlib/Tactic/AdaptationNote.lean : line 9 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/AdaptationNote.lean : line 12 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/AdaptationNote.lean : line 21 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/AdaptationNote.lean : line 27 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/AdaptationNote.lean : line 39 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/AdaptationNote.lean : line 52 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/Linter/TextBased.lean : line 20 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/Linter/TextBased.lean : line 49 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/Linter/TextBased.lean : line 84 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/Linter/TextBased.lean : line 274 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/Linter/TextBased.lean : line 279 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead Mathlib/Tactic/Linter/TextBased.lean : line 280 : ERR_ADN : Found the string "Adaptation note:", please use the #adaptation_note command instead
.lake/packages/mathlib/scripts/check-yaml.lean
import Mathlib.Lean.CoreM import Mathlib.Tactic.ToExpr /-! # Script to check `undergrad.yaml`, `overview.yaml`, `100.yaml` and `1000.yaml` This assumes `yaml_check.py` has first translated these to `json` files. It verifies that the referenced declarations exist, and prints an error otherwise. -/ open IO.FS Lean Lean.Elab open Lean Core Elab Command abbrev DBFile := Array (String × Name) def readJsonFile (α) [FromJson α] (path : System.FilePath) : IO α := do let _ : MonadExceptOf String IO := ⟨throw ∘ IO.userError, fun x _ => x⟩ liftExcept <| fromJson? <|← liftExcept <| Json.parse <|← IO.FS.readFile path def databases : List String := ["undergrad", "overview", "100", "1000"] def processDb (decls : ConstMap) : String → IO Bool | file => do let lines ← readJsonFile DBFile s!"{file}.json" let missing := lines.filter (fun l ↦ !(decls.contains l.2)) if 0 < missing.size then IO.println s!"Entries in `docs/{file}.yaml` refer to {missing.size} declaration(s) that don't exist. \ Please correct the following:" for p in missing do IO.println s!" {p.1}: {p.2}" IO.println "" return true else return false unsafe def main : IO Unit := do let searchPath ← addSearchPathFromEnv (← getBuiltinSearchPath (← findSysroot)) CoreM.withImportModules #[`Mathlib, `Archive] (searchPath := searchPath) (trustLevel := 1024) do let decls := (← getEnv).constants let results ← databases.mapM (fun p ↦ processDb decls p) if results.any id then IO.Process.exit 1
.lake/packages/mathlib/scripts/README.md
# Miscellaneous scripts for working on mathlib This directory contains miscellaneous scripts that are useful for working on or with mathlib. When adding a new script, please make sure to document it here, so other readers have a chance to learn about it as well! ## Current scripts and their purpose **Installation scripts** - `install_debian.sh`, `install_macos.sh` Installation scripts referenced from the leanprover community install pages. https://leanprover-community.github.io/install/debian.html https://leanprover-community.github.io/install/macos.html If these web pages are deprecated or removed, we should remove these scripts. **Repository analysis and reporting** - `user_activity_report.py` Generates a comprehensive report of all users with repository access and their last commit activity. Shows username, age of last commit, and access level, sorted by commit recency (most recent first). **Features:** - Fetches repository collaborators and organization members via GitHub API - Intelligent caching: user lists (24h TTL) and commit data (6h TTL) for performance - Access level filtering: `--admin` (admin users only), `--write` (write+ access) - Single user analysis: `--user USERNAME` for debugging specific users - Result limiting: `--limit N` for testing with smaller datasets - Inactive user cleanup: `--remove N` generates (but doesn't execute) gh commands to remove write access from non-admin users inactive for more than N days - Fallback to contributors API if collaborators access is restricted (`--contributors-only`) **Caching:** Results cached in `scripts/users_cache.json` and `scripts/commits_cache.json` (automatically added to .gitignore). Cache saved after each commit lookup to prevent data loss. **Requirements:** `gh` (GitHub CLI) installed and authenticated (`gh auth login`). **Tools for manual maintenance** - `fix_unused.py` Bulk processing of unused variable warnings, replacing them with `_`. - `add_deprecations.sh` is a text-based script that automatically adds deprecation statements. It assumes that the only difference between master and the current status of the PR consists of renames. More precisely, any change on a line that contains a declaration name and is not a rename, will likely confuse the script. - `create_deprecated_modules.lean` defines the `#create_deprecated_modules` command that automatically generates the `deprecated_module` entries, gathering information from `git`. The expectation is that this will be expanded to a fully automated process that happens in CI. - `migrate_to_fork.py` Helps contributors migrate from having direct write access to the main repository to using a fork-based workflow. This comprehensive script automates the entire migration process: * Validates the current branch (prevents migration of system branches like master, nightly-testing) * Checks GitHub CLI installation/authentication with OS-specific installation instructions * Creates or syncs a fork of mathlib4 automatically * Sets up git remotes correctly (`upstream` for leanprover-community/mathlib4, `origin` for user's fork) * Detects already-completed migration steps and skips them for efficiency * Migrates the current branch to the fork with proper upstream tracking * Intelligently handles existing PRs (migrates main repo PRs to fork-based PRs, detects existing fork PRs) * Uses fast delete/re-add approach for remote operations to avoid slow branch tracking updates * Provides comprehensive status reporting and next steps guidance Run with `python3 scripts/migrate_to_fork.py` (interactive) or `python3 scripts/migrate_to_fork.py -y` (auto-accept). Requires GitHub CLI (`gh`) installed and authenticated. Safe to run multiple times. - `githelper.py` The subcommand `githelper.py fix` helps contributors fix their git repository setup by step-by-step converting it from its current state to a well-defined target state. The target state mostly matches the state after of a freshly cloned fork (`gh repo clone <fork>`) and looks like this: - The remote `upstream` points to `leanprover-community/mathlib4` - The remote `origin` points to the contributor's own fork - The `gh` default repo points to `leanprover-community/mathlib4` - `master`s remote is `upstream` but its pushRemote is `origin` Other subcommands to automate git-related actions may be added in the future. **Analyzing Mathlib's import structure** - `unused_in_pole.sh` (followed by an optional `<target>`, defaulting to `Mathlib`) calls `lake exe pole --loc --to <target>` to compute the longest pole to a given target module, and then feeds this into `lake exe unused` to analyze transitively unused imports. Generates `unused.md` containing a markdown table showing the unused imports, and suggests `lake exe graph` commands to visualize the largest "rectangles" of unused imports. **CI workflow** - `lake-build-with-retry.sh` Runs `lake build` on a target until `lake build --no-build` succeeds. Used in the main build workflows. - `lake-build-wrapper.py` A wrapper script for `lake build` which collapses normal build into log groups and saves a build summary JSON file. See file for usage. - `mk_all.lean` run via `lake exe mk_all`, regenerates the import-only files `Mathlib.lean`, `Mathlib/Tactic.lean`, `Archive.lean` and `Counterexamples.lean` - `lint-style.lean`, `lint-style.py`, `print-style-errors.sh` style linters, written in Python and Lean. Run via `lake exe lint-style`. Medium-term, the latter two scripts should be rewritten and incorporated in `lint-style.lean`. - `lint-bib.sh` normalize the BibTeX file `docs/references.bib` using `bibtool`. - `yaml_check.py`, `check-yaml.lean` Sanity checks for `undergrad.yaml`, `overview.yaml`, `100.yaml` and `1000.yaml`. - `lean-pr-testing-comments.sh` Generate comments and labels on a Lean or Batteries PR after CI has finished on a `*-pr-testing-NNNN` branch. - `assign_reviewers.py` is used to automatically assign a reviewer to each stale github PR on the review queue. This script downloads a .json file with proposed assignments and makes the corresponding github API calls. - `bench_summary.lean` Convert data retrieved from the speed center into a shorter, more accessible format, and post a comment with this summary on github. - `declarations_diff.sh` Attempts to find which declarations have been removed and which have been added in the current PR with respect to `master`, and posts a comment on github with the result. - `autolabel.lean` is the Lean script in charge of automatically adding a `t-`label on eligible PRs. Autolabelling is inferred by which directories the current PR modifies. **Managing nightly-testing and bump branches** - `create-adaptation-pr.sh` implements some of the steps in the workflow described at https://leanprover-community.github.io/contribute/tags_and_branches.html#mathlib-nightly-and-bump-branches Specifically, it will: - merge `master` into `bump/v4.x.y` - create a new branch from `bump/v4.x.y`, called `bump/nightly-YYYY-MM-DD` - merge `nightly-testing` into the new branch - open a PR to merge the new branch back into `bump/v4.x.y` - announce the PR on zulip - finally, merge the new branch back into `nightly-testing`, if conflict resolution was required. If there are merge conflicts, it pauses and asks for help from the human driver. - `merge-lean-testing-pr.sh` takes a PR number `NNNN` as argument, and attempts to merge the branch `lean-pr-testing-NNNN` into `master`. It will resolve conflicts in `lean-toolchain`, `lakefile.lean`, and `lake-manifest.json`. If there are more conflicts, it will bail. - `zulip_build_report.sh` is used to analyse the output from building the nightly-testing-green branch with additional linting enabled, and posts a summary of its findings on zulip. **Managing downstream repos** - `downstream_repos.yml` contains basic information about significant downstream repositories. - `downstream-tags.py` is a script to check whether a given tag exists on the downstream repositories listed in `downstream_repos.yml`. - `downstream_dashboard.py` inspects the CI infrastructure of each repository in `downstream_repos.yml` and makes actionable suggestions for improvement or automation. **Managing and tracking technical debt** - `technical-debt-metrics.sh` Prints information on certain kind of technical debt in Mathlib. This output is automatically posted to zulip once a week. - `long_file_report.sh` Prints the list of the 10 longest Lean files in `Mathlib`. This output is automatically posted to zulip once a week. **Data files with linter exceptions** - `nolints.json` contains exceptions for all `env_linter`s in mathlib. For permanent and deliberate exceptions, add a `@[nolint lintername]` in the .lean file instead. - `nolints_prime_decls.txt` contains temporary exceptions for the `docPrime` linter Both of these files should tend to zero over time; please do not add new entries to these files. PRs removing (the need for) entries are welcome. **API surrounding CI** - `parse_lake_manifest_changes.py` compares two versions of `lake-manifest.json` to report dependency changes in Zulip notifications. Used by the `update_dependencies_zulip.yml` workflow to show which dependencies were updated, added, or removed, with links to GitHub diffs. - `update_PR_comment.sh` is a script that edits an existing message (or creates a new one). It is used by the `PR_summary` workflow to maintain an up-to-date report with a searchable history. - `get_tlabel.sh` extracts the `t-`label that a PR has (assuming that there is exactly one). It is used by the `maintainer_merge` family of workflows to dispatch `maintainer merge` requests to the appropriate topic on zulip. - `count-trans-deps.py`, `import-graph-report.py` and `import_trans_difference.sh` produce various summaries of changes in transitive imports that the `PR_summary` message incorporates. - `zulip_emoji_reactions.py` is called * every time a `bors d`, `bors merge` or `bors r` comment is added to a PR, * whenever bors merges a PR, * whenever a PR is closed or reopened * whenever a PR is labelled or unlabelled with `awaiting-author` or `maintainer-merge` It looks through all zulip posts containing a reference to the relevant PR and will post or update an emoji reaction corresponding to the current PR state to the message. This reaction is ✌️ (`:peace_sign:`) for delegated, `:bors:` for PRs sent to bors, `:merge` for merged PRs, ✍️ (`:writing:`) for PRs awaiting-author, 🔨 (`:hammer:`) for maintainer-merged PRs and `:closed-pr:` for closed PRs. PRs which were migrated to a fork (as indicated by the `migrated-to-fork` label) additionally receive a reaction ... (`skip_forward`). Two of these are custom emojis configured on zulip. - `late_importers.sh` is the main script used by the `latest_import.yml` action: it formats the `linter.minImports` output, summarizing the data in a table. See the module docs of `late_importers.sh` for further details. - `maintainer_merge_message.sh` contains a shell script that produces the Zulip message for a `maintainer merge`/`maintainer delegate` comment. **Docker images** - `docker_build.sh` builds the `lean4`, `gitpod4`, and `gitpod4-blueprint` Docker images. These are used by some CI workflows, as well as places such as Gitpod. - `docker_push.sh` first runs `docker_build.sh`, and then pushes the images to Docker Hub, appropriately tagged with the date on which the images were built. This should be re-run after breaking changes to `elan`, so that CI and Gitpod have access to updated versions of `elan`.
.lake/packages/mathlib/scripts/autolabel.lean
import Lean.Elab.Command /-! # Automatic labelling of PRs This file contains the script to automatically assign a GitHub label to a PR. ## Label definition The mapping from GitHub labels to Mathlib folders is done in this file and needs to be updated here if necessary: * `AutoLabel.mathlibLabels` contains an assignment of GitHub labels to folders inside the mathlib repository. If no folder is specified, a label like `t-set-theory` will be interpreted as matching the folder `"Mathlib" / "SetTheory"`. * `AutoLabel.mathlibUnlabelled` contains subfolders of `Mathlib/` which are deliberately left without topic label. ## lake exe autolabel `lake exe autolabel` uses `git diff --name-only origin/master...HEAD` to determine which files have been modified and then finds all labels which should be added based on these changes. These are printed for testing purposes. `lake exe autolabel [NUMBER]` will further try to add the applicable labels to the PR specified. This requires the **GitHub CLI** `gh` to be installed! Example: `lake exe autolabel 10402` for PR https://github.com/leanprover-community/mathlib4/pull/10402. For the time being, the script only adds a label if it finds a **single unique label** which would apply. If multiple labels are found, nothing happens. ## Workflow There is a mathlib workflow `.github/workflows/add_label_from_diff.yaml` which executes this script automatically. Currently it is set to run only one time when a PR is created. ## Tests Additionally, the script does a few consistency checks: - it ensures all paths in specified in `AutoLabel.mathlibLabels` exist - It makes sure all subfolders of `Mathlib/` belong to at least one label. There is `AutoLabel.mathlibUnlabelled` to add exceptions for this test. -/ open Lean System namespace AutoLabel /-- A `Label` consists of the * The `label` field is the actual GitHub label name. * The `dirs` field is the array of all "root paths" such that a modification in a file contained in one of these paths should be labelled with `label`. * The `exclusions` field is the array of all "root paths" that are excluded, among the ones that start with the ones in `dirs`. Any modifications to a file in an excluded path is ignored for the purposes of labelling. -/ structure Label where /-- The label name as it appears on GitHub -/ label : String /-- Array of paths which fall under this label. e.g. `"Mathlib" / "Algebra"`. For a label of the form `t-set-theory` this defaults to `#["Mathlib" / "SetTheory"]`. -/ dirs : Array FilePath := if label.startsWith "t-" then #["Mathlib" / ("".intercalate (label.splitOn "-" |>.drop 1 |>.map .capitalize))] else #[] /-- Array of paths which should be excluded. Any modifications to a file in an excluded path are ignored for the purposes of labelling. -/ exclusions : Array FilePath := #[] deriving BEq, Hashable /-- Mathlib labels and their corresponding folders. Add new labels and folders here! -/ def mathlibLabels : Array Label := #[ { label := "t-algebra", dirs := #[ "Mathlib" / "Algebra", "Mathlib" / "FieldTheory", "Mathlib" / "RepresentationTheory", "Mathlib" / "LinearAlgebra"] }, { label := "t-algebraic-geometry", dirs := #[ "Mathlib" / "AlgebraicGeometry", "Mathlib" / "Geometry" / "RingedSpace"] }, { label := "t-algebraic-topology", dirs := #["Mathlib" / "AlgebraicTopology"] }, { label := "t-analysis" }, { label := "t-category-theory" }, { label := "t-combinatorics" }, { label := "t-computability" }, { label := "t-condensed" }, { label := "t-convex-geometry", dirs := #["Mathlib" / "Geometry" / "Convex"] }, { label := "t-data" dirs := #[ "Mathlib" / "Control", "Mathlib" / "Data",] }, { label := "t-differential-geometry", dirs := #["Mathlib" / "Geometry" / "Manifold"] }, { label := "t-dynamics" }, { label := "t-euclidean-geometry", dirs := #["Mathlib" / "Geometry" / "Euclidean"] }, { label := "t-geometric-group-theory", dirs := #["Mathlib" / "Geometry" / "Group"] }, { label := "t-group-theory", dirs := #["Mathlib" / "GroupTheory"] }, { label := "t-linter", dirs := #[ "Mathlib" / "Tactic" / "Linter", "scripts" / "lint-style.lean", "scripts" / "lint-style.py", ] }, { label := "t-logic", dirs := #[ "Mathlib" / "Logic", "Mathlib" / "ModelTheory"] }, { label := "t-measure-probability", dirs := #[ "Mathlib" / "MeasureTheory", "Mathlib" / "Probability", "Mathlib" / "InformationTheory"] }, { label := "t-meta", dirs := #[ "Mathlib" / "Lean", "Mathlib" / "Mathport", "Mathlib" / "Tactic", "Mathlib" / "Util"], exclusions := #["Mathlib" / "Tactic" / "Linter"] }, { label := "t-number-theory" }, { label := "t-order" }, { label := "t-ring-theory", dirs := #["Mathlib" / "RingTheory"] }, { label := "t-set-theory" }, { label := "t-topology", dirs := #["Mathlib" / "Topology"] }, { label := "CI", dirs := #[ ".github", "scripts" /"bench", "scripts", ], exclusions := #[ "scripts" / "lint-style.lean", "scripts" / "lint-style.py", "scripts" / "noshake.json", "scripts" / "nolints.json", "scripts" / "nolints-style.txt", "scripts" / "nolints_prime_decls.txt", ] }, { label := "IMO", dirs := #["Archive" / "Imo"] }, { label := "dependency-bump", dirs := #["lake-manifest.json"] } ] /-- Exceptions inside `Mathlib/` which are not covered by any label. -/ def mathlibUnlabelled : Array FilePath := #[ "Mathlib" / "Deprecated", "Mathlib" / "Init", "Mathlib" / "Testing", "Mathlib" / "Std" ] /-- Checks if the folder `path` lies inside the folder `dir`. -/ def _root_.System.FilePath.isPrefixOf (dir path : FilePath) : Bool := -- use `dir / ""` to prevent partial matching of folder names (dir / "").normalize.toString.isPrefixOf (path / "").normalize.toString /-- Return all names of labels in `mathlibLabels` which match at least one of the `files`. * `files`: array of relative paths starting from the mathlib root directory. -/ def getMatchingLabels (files : Array FilePath) : Array String := let applicable := mathlibLabels.filter fun label ↦ -- first exclude all files the label excludes, -- then see if any file remains included by the label let notExcludedFiles := files.filter fun file ↦ label.exclusions.all (!·.isPrefixOf file) label.dirs.any (fun dir ↦ notExcludedFiles.any (dir.isPrefixOf ·)) -- return sorted list of label names applicable.map (·.label) |>.qsort (· < ·) /-! Testing the functionality of the declarations defined in this script -/ section Tests -- Test `FilePath.isPrefixOf` #guard ("Mathlib" / "Algebra" : FilePath).isPrefixOf ("Mathlib" / "Algebra" / "Basic.lean") -- Test `FilePath.isPrefixOf` does not trigger on partial prefixes #guard ! ("Mathlib" / "Algebra" : FilePath).isPrefixOf ("Mathlib" / "AlgebraicGeometry") #guard getMatchingLabels #[] == #[] -- Test default value for `label.dirs` works #guard getMatchingLabels #["Mathlib" / "SetTheory" / "ZFC"] == #["t-set-theory"] -- Test exclusion #guard getMatchingLabels #["Mathlib" / "Tactic"/ "Abel.lean"] == #["t-meta"] #guard getMatchingLabels #["Mathlib" / "Tactic"/ "Linter" / "Lint.lean"] == #["t-linter"] #guard getMatchingLabels #[ "Mathlib" / "Tactic"/ "Linter" / "Lint.lean", "Mathlib" / "Tactic" / "Abel.lean" ] == #["t-linter", "t-meta"] -- Test targeting a file instead of a directory #guard getMatchingLabels #["lake-manifest.json"] == #["dependency-bump"] -- Test linting of specific changes touching linting and CI. #guard getMatchingLabels #["scripts" / "add_deprecations.sh"] == #["CI"] #guard getMatchingLabels #["scripts" / "lint-style.lean"] == #["t-linter"] #guard getMatchingLabels #["Mathlib" / "Tactic" / "Linter" / "TextBased.lean", "scripts" / "lint-style.lean", "scripts" / "lint-style.py"] == #["t-linter"] #guard getMatchingLabels #["scripts" / "noshake.json"] == #[] /-- Testing function to ensure the labels defined in `mathlibLabels` cover all subfolders of `Mathlib/`. -/ partial def findUncoveredPaths (path : FilePath) (exceptions : Array FilePath := #[]) : IO <| Array FilePath := do let mut notMatched : Array FilePath := #[] -- all directories inside `path` let subDirs ← (← path.readDir).map (·.path) |>.filterM (do FilePath.isDir ·) for dir in subDirs do -- if the sub directory is not matched by a label, -- we go recursively into it if (getMatchingLabels #[dir]).size == 0 then notMatched := notMatched ++ (← findUncoveredPaths dir exceptions) -- a directory should be flagged if none of its sub-directories is matched by a label -- note: we assume here the base directory, i.e. "Mathlib" is never matched by a label, -- therefore we skip this test. if notMatched.size == subDirs.size then if exceptions.contains path then return #[] else return #[path] else return notMatched end Tests /-- Create a message which GitHub CI parses as annotation and displays at the specified file. Note: `file` is duplicated below so that it is also visible in the plain text output. * `type`: "error" or "warning" * `file`: file where the annotation should be displayed * `title`: title of the annotation * `message`: annotation message -/ def githubAnnotation (type file title message : String) : String := s!"::{type} file={file},title={title}::{file}: {message}" end AutoLabel open IO AutoLabel in /-- `args` is expected to have length 0 or 1, where the first argument is the PR number. If a PR number is provided, the script requires GitHub CLI `gh` to be installed in order to add the label to the PR. ## Exit codes: - `0`: success - `1`: invalid arguments provided - `2`: invalid labels defined - `3`: ~labels do not cover all of `Mathlib/`~ (unused; only emitting warning) -/ unsafe def main (args : List String): IO UInt32 := do if args.length > 1 then println s!"::error:: autolabel: invalid number of arguments ({args.length}), \ expected at most 1. Please run without arguments or provide the target PR's \ number as a single argument!" return 1 let prNumber? := args[0]? -- test: validate that all paths in `mathlibLabels` actually exist let mut valid := true for label in mathlibLabels do for dir in label.dirs do unless ← FilePath.pathExists dir do -- print github annotation error println <| AutoLabel.githubAnnotation "error" "scripts/autolabel.lean" s!"Misformatted `{ ``AutoLabel.mathlibLabels }`" s!"directory '{dir}' does not exist but is included by label '{label.label}'. \ Please update `{ ``AutoLabel.mathlibLabels }`!" valid := false for dir in label.exclusions do unless ← FilePath.pathExists dir do -- print github annotation error println <| AutoLabel.githubAnnotation "error" "scripts/autolabel.lean" s!"Misformatted `{ ``AutoLabel.mathlibLabels }`" s!"directory '{dir}' does not exist but is excluded by label '{label.label}'. \ Please update `{ ``AutoLabel.mathlibLabels }`!" valid := false unless valid do return 2 -- test: validate that the labels cover all of the `Mathlib/` folder let notMatchedPaths ← findUncoveredPaths "Mathlib" (exceptions := mathlibUnlabelled) if notMatchedPaths.size > 0 then -- print github annotation warning -- note: only emitting a warning because the workflow is only triggered on the first commit -- of a PR and could therefore lead to unexpected behaviour if a folder was created later. println <| AutoLabel.githubAnnotation "warning" "scripts/autolabel.lean" s!"Incomplete `{ ``AutoLabel.mathlibLabels }`" s!"the following paths inside `Mathlib/` are not covered \ by any label: {notMatchedPaths} Please modify `AutoLabel.mathlibLabels` accordingly!" -- return 3 -- get the modified files println "Computing 'git diff --name-only origin/master...HEAD'" let gitDiff ← IO.Process.run { cmd := "git", args := #["diff", "--name-only", "origin/master...HEAD"] } println s!"---\n{gitDiff}\n---" let modifiedFiles : Array FilePath := (gitDiff.splitOn "\n").toArray.map (⟨·⟩) -- find labels covering the modified files let labels := getMatchingLabels modifiedFiles println s!"::notice::Applicable labels: {labels}" match labels with | #[] => println s!"::warning::no label to add" | #[label] => match prNumber? with | some n => let labelsPresent ← IO.Process.run { cmd := "gh" args := #["pr", "view", n, "--json", "labels", "--jq", ".labels .[] .name"]} let labels := labelsPresent.split (· == '\n') let autoLabels := mathlibLabels.map (·.label) match labels.filter autoLabels.contains with | [] => -- if the PR does not have a label that this script could add, then we add a label let _ ← IO.Process.run { cmd := "gh", args := #["pr", "edit", n, "--add-label", label] } println s!"::notice::added label: {label}" | t_labels_already_present => println s!"::notice::Did not add label '{label}', since {t_labels_already_present} \ were already present" | none => println s!"::warning::no PR-number provided, not adding labels. \ (call `lake exe autolabel 150602` to add the labels to PR `150602`)" | _ => println s!"::notice::not adding multiple labels: {labels}" return 0
.lake/packages/mathlib/scripts/bench_summary.lean
import Lean.Elab.Command /-! # Summary of `!bench` results This file contains a script that converts data retrieved from the speed-center into a shorter, more accessible format, and post a comment with this summary on github. -/ namespace BenchAction open Lean /-- `Bench` is a structure with the data used to compute the `!bench` summary. It contains * a string `file` (that could be `build`, `lint` or `~Mathlib.Path.To.File`); * an integer `diff` representing the change in number of instructions for `file`; * a float `reldiff` representing the percentage change in number of instructions for `file`. -/ structure Bench where file : String diff : Int reldiff : Float deriving FromJson, ToJson, Inhabited /-- `intDecs z exp prec` is a "generic" formatting of an integer `z`. It writes `z` in the form `x.y * 10 ^ exp` (for non-negative integers `x`, `y` and `z`), such that `y` has `prec` digits and returns * the sign of `z` as a string (in fact, just either `+` or `-`); * the integer `x`; * the natural number `y` (that has `prec` digits). -/ def intDecs (z : Int) (exp : Nat := 9) (prec : Nat := 3) : String × Int × Nat := let sgn := z.sign let z := sgn * z let p10 : Int := 10 ^ (exp - prec) let idec := z / p10 (if sgn < 0 then "-" else "+", idec / (10 ^ prec), (idec % 10 ^ prec).toNat) /-- `formatDiff z` uses `intDecs` to format an integer `z` as `±x.y⬝10⁹`. -/ def formatDiff (z : Int) : String := let (sgn, intDigs, decDigs) := intDecs z s!"{sgn}{intDigs}.{decDigs}⬝10⁹" /-- Convert a `Float` into a formatted string of the form `±z.w%`. -/ def formatPercent (reldiff : Float) : String := -- shift by `2` twice: once to get a percentage, again for two decimal digits of precision let reldiff := reldiff * 10 ^ 4 let sgn : Int := if reldiff < 0 then -1 else 1 let reldiff := (.ofInt sgn) * reldiff let (sgn, intDigs, decDigs) := intDecs (sgn * reldiff.toUInt32.toFin) 0 2 -- the `if ... then ... else ...` makes sure that the output includes leading `0`s s!"({sgn}{intDigs}.{if decDigs < 10 then "0" else ""}{decDigs}%)" /-- info: [(+0.00%), (+14.28%), (+0.20%), (-0.60%), (-0.08%), (+1.02%)] --- info: [+0.0⬝10⁹, +1.0⬝10⁹, +30.200⬝10⁹, -0.460⬝10⁹] -/ #guard_msgs in run_cmd let floats : Array Float := #[0, 1/7, 0.002, -0.006, -8.253600406145226E-4, 0.0102] logInfo m!"{floats.map formatPercent}" let ints : Array Int := #[0, 10^9, 302*10^8, -460000000] logInfo m!"{ints.map formatDiff}" /-- `formatFile file` converts a `String` into a formatted string of the form `` `file` ``, removing leading non-letters. It is expected that `~` is the only leading non-letter. -/ def formatFile (file : String) : String := s!"`{file.dropWhile (!·.isAlpha)}`" /-- `summary bc` converts a `Bench` into a formatted string of the form ``| `file` | ±x.y⬝10⁹ | ±z.w% |`` (technically, without the spaces). -/ def summary (bc : Bench) : String := let middle := [formatFile bc.file, formatDiff bc.diff, formatPercent bc.reldiff] "|".intercalate (""::middle ++ [""]) /-- `toTable bcs` formats an array of `Bench`es into a markdown table whose columns are the file name, the absolute change in instruction counts and the relative change as a percentage. A typical entry may look like ``|`Mathlib/Analysis/Seminorm.lean`|+2.509⬝10⁹|(+1.41%)|``. -/ def toTable (bcs : Array Bench) : String := let header := "|File|Instructions|%|\n|-|-:|:-:|" "\n".intercalate (header :: (bcs.map summary).toList) /-- `toCollapsibleTable bcs roundDiff` is similar to `toTable bcs`, except that it returns output enclosed in a `<details><summary>` html-block. The `<summary>` part tallies the number of entries in `bcs` whose instructions increased resp. decreased by at least the amount `roundDiff`. -/ def toCollapsibleTable (bcs : Array Bench) (roundDiff : Int) : String := s!"<details><summary>{bcs.size} files, Instructions {formatDiff <| roundDiff * 10 ^ 9}\ </summary>\n\n{toTable (bcs.qsort (·.diff > ·.diff))}\n</details>\n" /-- Assuming that the input is a `json`-string formatted to produce an array of `Bench`, `benchOutput` returns the "significant" changes in numbers of instructions as a string. -/ def benchOutput (jsonInput : String) : IO String := do let data ← IO.ofExcept (Json.parse jsonInput >>= fromJson? (α := Array Bench)) -- `head` contains the changes for `build` and `lint`, -- `data` contains the instruction changes for individual files: -- each filename is prefixed by `~`. let (head, data) := data.partition (·.file.take 1 != "~") -- Partition the `Bench`es into "bins", i.e. the subsets of all `Bench`es whose difference -- in instructions lies in an interval `[n·10⁹, (n+1)·10⁹)`. -- The values `n` need not be consecutive: we only retain non-empty bins. let grouped := ((data.groupByKey (·.diff / (10 ^ 9))).toArray.qsort (·.1 > ·.1)).toList -- We consider `build` and `lint` as their own groups, in this order. let sortHead := (head.qsort (·.file < ·.file)).toList.map (0, #[·]) let togetherSorted := sortHead ++ grouped -- For better formatting, we merge consecutive bins with just a single *file* into one. -- This covers the two steps `ts1` and `ts2`. -- For example, `[..., (bound₁, #[a₁]), (bound₂, #[a₂]), (bound₃, #[a₃]), ...]` gets collapsed to -- `[..., (none, #[a₁, a₂, a₃]), ...]`. -- The `boundᵢ` entry becomes `none` for the collapsed entries, so that we know that these -- should be printed individually instead of inside a `<details><summary>`-block. -- A single bin with just a single file is also marked with `none`, for the same reason. let ts1 := togetherSorted.splitBy (·.2.size == 1 && ·.2.size == 1) let ts2 := List.flatten <| ts1.map fun l ↦ if (l.getD 0 default).2.size == 1 then [(none, l.foldl (· ++ ·.2) #[])] else l.map fun (n, ar) ↦ (some n, ar) let mut overall := [] for (bound, gs) in ts2 do overall := overall ++ [ match bound with -- These entries are from "singleton" files in their range; we print them individually. | none => toTable gs -- These get a collapsible summary instead. | some roundedDiff => toCollapsibleTable gs roundedDiff] return "\n".intercalate overall open Lean Elab Command in /-- `addBenchSummaryComment PR repo tempFile` adds a summary of benchmarking results as a comment to a pull request. It takes as input * the number `PR` and the name `repo` as a `String` containing the relevant pull-request (it reads and posts comments there) * the optional `jobID` numeral for reporting the action that produced the output (`jobID` is a natural number, even though it gets converted to a `String` -- this is mostly due to the fact that it is easier to get CI to pass a number, than a string with quotations) * the `String` `tempFile` of a temporary file where the command stores transient information. The code itself interfaces with the shell to retrieve and process json data and eventually uses `benchOutput`. Here is a summary of the steps: * retrieve the last comment to the PR (using `gh pr view ...`), * check if it was posted by `leanprover-bot`, * try to retrieve the source and target commits from the url that the bot posts and store them in `source` and `target`, * query the speed center for the benchmarking data (using `curl url`), * format and filter the returned JSON data (various times), saving intermediate steps into `tempFile` (using `jq` multiple times), * process the final string to produce a summary (using `benchOutput`), * finally post the resulting output to the PR (using `gh pr comment ...`). -/ def addBenchSummaryComment (PR : Nat) (repo : String) (jobID : Nat := 0) (author : String := "leanprover-bot") (tempFile : String := "benchOutput.json") : CommandElabM Unit := do let PR := s!"{PR}" let jq := s!".comments | last | select(.author.login==\"{author}\") | .body" -- retrieve the relevant comment let gh_pr_comments : IO.Process.SpawnArgs := { cmd := "gh", args := #["pr", "view", PR, "--repo", repo, "--json", "comments", "--jq", jq] } -- This is the content of the last comment made by `leanprover-bot` to the PR `PR`. let output ← IO.Process.run gh_pr_comments -- URLs of benchmarking results have the form {something}/compare/source_sha/to/target_sha, -- where source_sha and target_sha are the commit hashes of the revisions being benchmarked. -- The comment contains such a URL (and only one); parse the revisions from the comment. let frags := output.split (· == '/') let some compIdx := frags.findIdx? (· == "compare") | logInfo "No 'compare' found in URL." return let source := frags.getD (compIdx + 1) "" let target := (frags.getD (compIdx + 3) "").takeWhile (· != ')') if (source.length, target.length) != (36, 36) then logInfo m!"Found\nsource: '{source}'\ntarget: '{target}'\ninstead of two commit hashes." return dbg_trace s!"Using commits\nsource: '{source}'\ntarget: '{target}'\n" let job_msg := s!"\n[CI run](https://github.com/{repo}/actions/runs/{jobID}) [Lakeprof report](https://speed.lean-lang.org/mathlib4-out/{target}/)" -- retrieve the data from the speed-center let curlSpeedCenter : IO.Process.SpawnArgs := { cmd := "curl" args := #[s!"https://speed.lean-lang.org/mathlib4/api/compare/{source}/to/{target}?all_values=true"] } dbg_trace "\n#running\n\ curl https://speed.lean-lang.org/mathlib4/api/compare/{source}/to/{target}?all_values=true > {tempFile}.src" let bench ← IO.Process.run curlSpeedCenter IO.FS.writeFile (tempFile ++ ".src") bench -- Extract all instruction changes whose magnitude is larger than `threshold`. let threshold := 10 ^ 9 let jq1 : IO.Process.SpawnArgs := { cmd := "jq" args := #["-r", "--arg", "thr", s!"{threshold}", ".differences | .[] | ($thr|tonumber) as $th | select(.dimension.metric == \"instructions\" and ((.diff >= $th) or (.diff <= -$th)))", (tempFile ++ ".src")] } dbg_trace "\n#running\n\ jq -r --arg thr {threshold} '.differences | .[] | ($thr|tonumber) as $th |\n \ select(.dimension.metric == \"instructions\" and ((.diff >= $th) or (.diff <= -$th)))' \ {tempFile}.src > {tempFile}" let firstFilter ← IO.Process.run jq1 -- we leave `tempFile.src` unchanged and we switch to updating `tempfile`: this is useful for -- debugging, as it preserves the original data downloaded from the speed-center IO.FS.writeFile tempFile firstFilter -- Write these in compact form, in the format suitable for `benchOutput`. let jq2 : IO.Process.SpawnArgs := { cmd := "jq" args := #["-c", "[{file: .dimension.benchmark, diff: .diff, reldiff: .reldiff}]", tempFile] } dbg_trace "\n#running\n\ jq -c '[\{file: .dimension.benchmark, diff: .diff, reldiff: .reldiff}]' {tempFile} > \ {tempFile}.2" let secondFilter ← IO.Process.run jq2 if secondFilter == "" then let _ ← IO.Process.run { cmd := "gh", args := #["pr", "comment", PR, "--repo", repo, "--body", s!"No benchmark entry differed by at least {formatDiff threshold} instructions." ++ job_msg] } else IO.FS.writeFile tempFile secondFilter let jq3 : IO.Process.SpawnArgs := { cmd := "jq", args := #["-n", "reduce inputs as $in (null; . + $in)", tempFile] } dbg_trace "\n#running\n\ jq -n 'reduce inputs as $in (null; . + $in)' {tempFile}.2 > {tempFile}.3" let thirdFilter ← IO.Process.run jq3 let report ← benchOutput thirdFilter IO.println report -- Post the computed summary as a github comment. let add_comment : IO.Process.SpawnArgs := { cmd := "gh", args := #["pr", "comment", PR, "--repo", repo, "--body", report ++ job_msg] } let _ ← IO.Process.run add_comment end BenchAction -- CI adds the following line, replacing `putPR` with the PR number: --run_cmd BenchAction.addBenchSummaryComment putPR "leanprover-community/mathlib4"
.lake/packages/mathlib/scripts/nolints_prime_decls.txt
AbelRuffini.not_solvable_by_rad' abs_add' abs_le_of_sq_le_sq' abs_lt_of_sq_lt_sq' abs_norm' abs_norm_sub_norm_le' Absorbent.zero_mem' ack_strict_mono_left' Action.inhabited' Action.tensorUnit_ρ' Action.tensor_ρ' AddAction.orbitZMultiplesEquiv_symm_apply' AddChar.div_apply' AddChar.inv_apply' AddChar.neg_apply' AddChar.sub_apply' AddChar.zmodChar_apply' AddCircle.addOrderOf_div_of_gcd_eq_one' AddCircle.continuous_mk' AddCircle.measurable_mk' AddCircle.norm_eq' AddCommGroup.intCast_modEq_intCast' AddCommGroup.ModEq.add_left_cancel' AddCommGroup.ModEq.add_right_cancel' AddCommGroup.modEq_sub_iff_add_modEq' AddCommGroup.ModEq.sub_left_cancel' AddCommGroup.ModEq.sub_right_cancel' AddCommGroup.sub_modEq_iff_modEq_add' AddConstMapClass.map_add_int' AddConstMapClass.map_add_nat' AddConstMapClass.map_add_ofNat' AddConstMapClass.map_int_add' AddConstMapClass.map_nat' AddConstMapClass.map_nat_add' AddConstMapClass.map_ofNat' AddConstMapClass.map_ofNat_add' AddConstMapClass.map_sub_int' AddConstMapClass.map_sub_nat' AddConstMapClass.map_sub_ofNat' add_div' AddGroup.int_smulCommClass' Additive.isIsIsometricVAdd' Additive.isIsIsometricVAdd'' add_le_mul' AddMonoidAlgebra.lift_apply' AddMonoidAlgebra.lift_of' AddMonoidAlgebra.lift_unique' AddMonoidAlgebra.mem_grade_iff' AddMonoidHom.coe_smul' AddMonoidHom.coe_toMultiplicative' AddMonoidHom.coe_toMultiplicative'' AddMonoid.nat_smulCommClass' add_sq' AddSubgroup.torsionBy.mod_self_nsmul' AddValuation.map_add' AddValuation.map_lt_sum' ADEInequality.admissible_A' ADEInequality.admissible_D' ADEInequality.admissible_of_one_lt_sumInv_aux' AdjoinRoot.algebraMap_eq' AdjoinRoot.coe_injective' AdjoinRoot.isIntegral_root' AdjoinRoot.Minpoly.toAdjoin_apply' AEMeasurable.comp_aemeasurable' aemeasurable_const' AEMeasurable.const_smul' AEMeasurable.div' aemeasurable_id' aemeasurable_id'' AEMeasurable.inf' AEMeasurable.mono' AEMeasurable.mul' aemeasurable_of_tendsto_metrizable_ae' AEMeasurable.sup' AffineEquiv.coe_mk' AffineEquiv.linear_mk' AffineIsometryEquiv.coe_mk' AffineIsometryEquiv.coe_vaddConst' AffineIsometryEquiv.dist_pointReflection_self' AffineIsometryEquiv.linearIsometryEquiv_mk' AffineMap.coe_mk' AffineMap.lineMap_apply_module' AffineMap.lineMap_apply_ring' AffineSubspace.mem_perpBisector_iff_dist_eq' AkraBazziRecurrence.asympBound_def' AkraBazziRecurrence.dist_r_b' Algebra.adjoin_induction'' Algebra.algebraMap_eq_smul_one' Algebra.fg_trans' Algebra.FormallyUnramified.ext' Algebra.FormallyUnramified.lift_unique' Algebra.Generators.Cotangent.module' Algebra.Generators.Cotangent.val_smul' Algebra.Generators.Cotangent.val_smul'' Algebra.Generators.Cotangent.val_smul''' AlgebraicClosure.toStepOfLE' AlgebraicGeometry.basicOpen_eq_of_affine' AlgebraicGeometry.IsAffineOpen.fromSpec_preimage_basicOpen' AlgebraicGeometry.IsAffineOpen.isLocalization_stalk' AlgebraicGeometry.IsOpenImmersion.hasLimit_cospan_forget_of_left' AlgebraicGeometry.IsOpenImmersion.hasLimit_cospan_forget_of_right' AlgebraicGeometry.LocallyRingedSpace.Hom.ext' AlgebraicGeometry.LocallyRingedSpace.id_val' AlgebraicGeometry.LocallyRingedSpace.stalkMap_germ' AlgebraicGeometry.morphismRestrict_app' AlgebraicGeometry.PresheafedSpace.GlueData.opensImagePreimageMap_app' AlgebraicGeometry.PresheafedSpace.IsOpenImmersion.c_iso' AlgebraicGeometry.PresheafedSpace.stalkMap_germ' AlgebraicGeometry.ProjectiveSpectrum.StructureSheaf.SectionSubring.add_mem' AlgebraicGeometry.ProjectiveSpectrum.StructureSheaf.SectionSubring.mul_mem' AlgebraicGeometry.ProjectiveSpectrum.StructureSheaf.SectionSubring.neg_mem' AlgebraicGeometry.ProjectiveSpectrum.StructureSheaf.SectionSubring.one_mem' AlgebraicGeometry.ProjectiveSpectrum.StructureSheaf.SectionSubring.zero_mem' AlgebraicGeometry.ProjIsoSpecTopComponent.FromSpec.mem_carrier_iff' AlgebraicGeometry.Proj.stalkIso'_germ' AlgebraicGeometry.Scheme.Hom.appIso_hom' AlgebraicGeometry.Scheme.Hom.appLE_map' AlgebraicGeometry.Scheme.Hom.map_appLE' AlgebraicGeometry.Scheme.map_basicOpen' AlgebraicGeometry.Scheme.mem_basicOpen_top' AlgebraicGeometry.Scheme.Opens.germ_stalkIso_hom' AlgebraicGeometry.Scheme.stalkMap_germ' AlgebraicGeometry.SheafedSpace.comp_c_app' AlgebraicGeometry.SheafedSpace.IsOpenImmersion.hasLimit_cospan_forget_of_left' AlgebraicGeometry.SheafedSpace.IsOpenImmersion.hasLimit_cospan_forget_of_right' AlgebraicGeometry.Spec.locallyRingedSpaceObj_presheaf' AlgebraicGeometry.Spec.locallyRingedSpaceObj_presheaf_map' AlgebraicGeometry.Spec.locallyRingedSpaceObj_sheaf' AlgebraicGeometry.stalkToFiberRingHom_germ' AlgebraicGeometry.StructureSheaf.comap_id' AlgebraicGeometry.StructureSheaf.const_apply' AlgebraicGeometry.StructureSheaf.const_mul_cancel' AlgebraicGeometry.StructureSheaf.IsFraction.eq_mk' AlgebraicGeometry.StructureSheaf.localizationToStalk_mk' AlgebraicGeometry.StructureSheaf.res_const' AlgebraicGeometry.StructureSheaf.stalkToFiberRingHom_germ' AlgebraicGeometry.StructureSheaf.toBasicOpen_mk' AlgebraicGeometry.ΓSpec.adjunction_counit_app' AlgebraicGeometry.ΓSpec.locallyRingedSpaceAdjunction_counit_app' AlgebraicGeometry.ΓSpec.locallyRingedSpaceAdjunction_homEquiv_apply' AlgebraicGeometry.ΓSpec.toOpen_unit_app_val_c_app' algebraicIndependent_equiv' AlgebraicIndependent.map' AlgebraicIndependent.to_subtype_range' AlgebraicTopology.DoldKan.hσ'_eq' AlgebraicTopology.DoldKan.Γ₀.Obj.map_on_summand' AlgebraicTopology.DoldKan.Γ₀.Obj.map_on_summand₀' AlgebraicTopology.DoldKan.Γ₀.Obj.Termwise.mapMono_δ₀' Algebra.IsAlgebraic.bijective_of_isScalarTower' Algebra.TensorProduct.algebraMap_apply' Algebra.TensorProduct.basis_repr_symm_apply' Algebra.TensorProduct.ext' Algebra.TensorProduct.intCast_def' Algebra.TensorProduct.natCast_def' Algebra.toMatrix_lmul' AlgEquiv.apply_smulCommClass' AlgEquiv.coe_restrictScalars' AlgEquiv.coe_ringEquiv' AlgEquiv.mk_coe' AlgHom.coe_mk' AlgHom.coe_restrictScalars' AlgHom.toAddMonoidHom' AlgHom.toMonoidHom' AlternatingMap.domCoprod.summand_mk'' analyticOnNhd_congr' AnalyticOnNhd.congr' AnalyticOnNhd.eval_continuousLinearMap' AnalyticOnNhd.eval_linearMap' AntilipschitzWith.le_mul_nnnorm' AntilipschitzWith.le_mul_norm' AntilipschitzWith.to_rightInvOn' antisymm' Antitone.const_mul' Antitone.mul_const' AntitoneOn.const_mul' AntitoneOn.mul_const' Applicative.pure_seq_eq_map' ApplicativeTransformation.preserves_map' apply_abs_le_mul_of_one_le' ArithmeticFunction.mul_smul' ArithmeticFunction.one_smul' ArithmeticFunction.ppow_succ' ArithmeticFunction.sum_eq_iff_sum_smul_moebius_eq_on' Associated.dvd' Associated.of_pow_associated_of_prime' Associates.count_mul_of_coprime' Associates.dvd_of_mem_factors' Associates.map_subtype_coe_factors' Associates.mk_ne_zero' Associates.unique' Asymptotics.IsBigO.congr' Asymptotics.isBigO_const_mul_left_iff' Asymptotics.IsBigO.const_mul_right' Asymptotics.isBigO_const_mul_right_iff' Asymptotics.isBigO_fst_prod' Asymptotics.IsBigO.of_bound' Asymptotics.isBigO_of_le' Asymptotics.isBigO_self_const_mul' Asymptotics.isBigO_snd_prod' Asymptotics.IsBigOWith.congr' Asymptotics.IsBigOWith.const_mul_right' Asymptotics.isBigOWith_of_le' Asymptotics.IsBigOWith.pow' Asymptotics.isBigOWith_self_const_mul' Asymptotics.IsBigOWith.sup' Asymptotics.isBigOWith_zero' Asymptotics.isEquivalent_of_tendsto_one' Asymptotics.IsLittleO.congr' Asymptotics.isLittleO_const_mul_left_iff' Asymptotics.IsLittleO.const_mul_right' Asymptotics.isLittleO_const_mul_right_iff' Asymptotics.IsLittleO.def' Asymptotics.isLittleO_iff_nat_mul_le' Asymptotics.isLittleO_iff_tendsto' Asymptotics.isLittleO_irrefl' Asymptotics.IsLittleO.right_isBigO_add' Asymptotics.IsLittleO.right_isTheta_add' Asymptotics.isTheta_of_norm_eventuallyEq' Asymptotics.SuperpolynomialDecay.congr' ball_eq' Ballot.ballot_problem' Basis.det_map' Basis.det_reindex' Basis.mk_eq_rank' Basis.mk_eq_rank'' Basis.reindexRange_repr' Basis.repr_eq_iff' Basis.tensorProduct_apply' Behrend.bound_aux' Behrend.lower_bound_le_one' Behrend.map_succ' bernoulli'_def' bernoulli'_spec' bernoulli_spec' bernsteinPolynomial.flip' Besicovitch.SatelliteConfig.hlast' Besicovitch.SatelliteConfig.inter' bihimp_eq' biInf_congr' biInf_finsetSigma' biInf_sigma' Bimod.AssociatorBimod.hom_left_act_hom' Bimod.AssociatorBimod.hom_right_act_hom' Bimod.comp_hom' Bimod.id_hom' Bimod.LeftUnitorBimod.hom_left_act_hom' Bimod.LeftUnitorBimod.hom_right_act_hom' Bimod.RightUnitorBimod.hom_left_act_hom' Bimod.RightUnitorBimod.hom_right_act_hom' Bimod.TensorBimod.actRight_one' Bimod.TensorBimod.left_assoc' Bimod.TensorBimod.middle_assoc' Bimod.TensorBimod.one_act_left' Bimod.TensorBimod.right_assoc' Bimon.comp_hom' Bimon.id_hom' birkhoffAverage_congr_ring' birkhoffAverage_one' birkhoffAverage_zero' birkhoffSum_one' birkhoffSum_succ' birkhoffSum_zero' biSup_congr' biSup_finsetSigma' biSup_sigma' BooleanRing.add_eq_zero' Bool.eq_false_of_not_eq_true' Bool.eq_true_of_not_eq_false' Bornology.ext_iff' Bornology.IsBounded.exists_pos_norm_le' Bornology.IsBounded.exists_pos_norm_lt' bound' BoundedContinuousFunction.const_apply' BoundedContinuousFunction.dist_le_two_norm' BoundedContinuousFunction.dist_nonneg' BoundedContinuousFunction.extend_apply' BoundedContinuousFunction.instModule' BoundedContinuousFunction.instSMul' BoundedLatticeHom.coe_comp_inf_hom' BoundedLatticeHom.coe_comp_lattice_hom' BoundedLatticeHom.coe_comp_sup_hom' BoxIntegral.Box.coe_mk' BoxIntegral.Box.volume_apply' BoxIntegral.IntegrationParams.toFilterDistortioniUnion_neBot' BoxIntegral.Prepartition.iUnion_def' BoxIntegral.Prepartition.mem_restrict' BoxIntegral.Prepartition.mem_split_iff' BoxIntegral.TaggedPrepartition.IsSubordinate.mono' Bundle.TotalSpace.mk' calc_eval_z' card_dvd_exponent_pow_rank' Cardinal.add_eq_max' Cardinal.add_le_add' Cardinal.add_mk_eq_max' Cardinal.aleph0_le_aleph' Cardinal.aleph_eq_aleph' Cardinal.alephIdx_aleph' Cardinal.cantor' Cardinal.lift_lt_univ' Cardinal.lift_mk_shrink' Cardinal.lift_mk_shrink'' Cardinal.lt_univ' Cardinal.mk_eq_two_iff' Cardinal.mk_finsupp_lift_of_infinite' Cardinal.mk_finsupp_of_infinite' Cardinal.mul_comm' Cardinal.mul_eq_max' Cardinal.prod_const' Cardinal.sum_add_distrib' Cardinal.sum_const' Cardinal.two_le_iff' card_le_of_injective' card_le_of_surjective' catalan_succ' CategoryTheory.Abelian.coimageImageComparison_eq_coimageImageComparison' CategoryTheory.Abelian.epi_of_epi_of_epi_of_mono' CategoryTheory.Abelian.epi_of_mono_of_epi_of_mono' CategoryTheory.Abelian.Ext.add_hom' CategoryTheory.Abelian.Ext.neg_hom' CategoryTheory.Abelian.FunctorCategory.coimageImageComparison_app' CategoryTheory.Abelian.mono_of_epi_of_epi_mono' CategoryTheory.Abelian.mono_of_epi_of_mono_of_mono' CategoryTheory.Abelian.OfCoimageImageComparisonIsIso.imageMonoFactorisation_e' CategoryTheory.Abelian.Pseudoelement.pseudoApply_mk' CategoryTheory.Abelian.Pseudoelement.zero_eq_zero' CategoryTheory.Abelian.Pseudoelement.zero_morphism_ext' CategoryTheory.ActionCategory.cases' CategoryTheory.additive_coyonedaObj' CategoryTheory.additive_yonedaObj' CategoryTheory.Adhesive.van_kampen' CategoryTheory.Adjunction.he'' CategoryTheory.Arrow.iso_w' CategoryTheory.BicategoricalCoherence.assoc' CategoryTheory.BicategoricalCoherence.left' CategoryTheory.BicategoricalCoherence.right' CategoryTheory.BicategoricalCoherence.tensorRight' CategoryTheory.Bifunctor.diagonal' CategoryTheory.Biproduct.column_nonzero_of_iso' CategoryTheory.BraidedCategory.yang_baxter' CategoryTheory.BraidedFunctor.ext' CategoryTheory.CategoryOfElements.CreatesLimitsAux.π_liftedConeElement' CategoryTheory.CechNerveTerminalFrom.hasWidePullback' CategoryTheory.CommSq.HasLift.mk' CategoryTheory.Comonad.Coalgebra.Hom.ext' CategoryTheory.ComonadHom.ext' CategoryTheory.comp_apply' CategoryTheory.ComposableArrows.Exact.exact' CategoryTheory.ComposableArrows.IsComplex.zero' CategoryTheory.ComposableArrows.map'_inv_eq_inv_map' CategoryTheory.ComposableArrows.naturality' CategoryTheory.ComposableArrows.Precomp.map_zero_one' CategoryTheory.composePath_comp' CategoryTheory.conj_eqToHom_iff_heq' CategoryTheory.CosimplicialObject.δ_comp_δ' CategoryTheory.CosimplicialObject.δ_comp_δ'' CategoryTheory.CosimplicialObject.δ_comp_δ_self' CategoryTheory.CosimplicialObject.δ_comp_σ_of_gt' CategoryTheory.CosimplicialObject.δ_comp_σ_self' CategoryTheory.CosimplicialObject.δ_comp_σ_succ' CategoryTheory.DifferentialObject.eqToHom_f' CategoryTheory.e_assoc' CategoryTheory.Endofunctor.Algebra.Initial.left_inv' CategoryTheory.eq_of_comp_left_eq' CategoryTheory.eq_of_comp_right_eq' CategoryTheory.Equivalence.cancel_counitInv_right_assoc' CategoryTheory.Equivalence.cancel_unit_right_assoc' CategoryTheory.ExactPairing.coevaluation_evaluation'' CategoryTheory.ExactPairing.evaluation_coevaluation'' CategoryTheory.exists_zigzag' CategoryTheory.forgetEnrichment_id' CategoryTheory.Functor.commShiftIso_add' CategoryTheory.Functor.coreflective' CategoryTheory.Functor.HasRightDerivedFunctor.mk' CategoryTheory.Functor.inl_biprodComparison' CategoryTheory.Functor.inr_biprodComparison' CategoryTheory.Functor.isContinuous_comp' CategoryTheory.Functor.IsHomological.mk' CategoryTheory.Functor.IsLocalization.mk' CategoryTheory.Functor.Iteration.Hom.ext' CategoryTheory.Functor.map_comp_heq' CategoryTheory.Functor.postcomp_map_heq' CategoryTheory.Functor.reflective' CategoryTheory.Functor.relativelyRepresentable.map_fst' CategoryTheory.Functor.relativelyRepresentable.w' CategoryTheory.Functor.shiftIso_add' CategoryTheory.Functor.shiftMap_comp' CategoryTheory.FunctorToTypes.jointly_surjective' CategoryTheory.FunctorToTypes.prod_ext' CategoryTheory.Functor.uncurry_obj_curry_obj_flip_flip' CategoryTheory.Functor.ι_biproductComparison' CategoryTheory.Grothendieck.comp_fiber' CategoryTheory.Grothendieck.id_fiber' CategoryTheory.GrothendieckTopology.OneHypercoverFamily.IsSheafIff.fac' CategoryTheory.GrothendieckTopology.OneHypercover.mem_sieve₁' CategoryTheory.GrothendieckTopology.WEqualsLocallyBijective.mk' CategoryTheory.Grpd.str' CategoryTheory.HasPullbacksOfInclusions.hasPullbackInr' CategoryTheory.HasPullbacksOfInclusions.preservesPullbackInl' CategoryTheory.HasSheafify.mk' CategoryTheory.Injective.injective_iff_preservesEpimorphisms_preadditive_yoneda_obj' CategoryTheory.IsCoreflexivePair.mk' CategoryTheory.IsHomLift.fac' CategoryTheory.IsHomLift.of_fac' CategoryTheory.Iso.inv_ext' CategoryTheory.IsPullback.inl_snd' CategoryTheory.IsPullback.inr_fst' CategoryTheory.IsPullback.of_hasBinaryProduct' CategoryTheory.IsPullback.of_is_bilimit' CategoryTheory.IsPushout.inl_snd' CategoryTheory.IsPushout.inr_fst' CategoryTheory.IsPushout.of_hasBinaryCoproduct' CategoryTheory.IsPushout.of_is_bilimit' CategoryTheory.IsReflexivePair.mk' CategoryTheory.isSheaf_yoneda' CategoryTheory.LaxBraidedFunctor.ext' CategoryTheory.Limits.biprod.hom_ext' CategoryTheory.Limits.biprod.map_eq_map' CategoryTheory.Limits.biprod.symmetry' CategoryTheory.Limits.biproduct.hom_ext' CategoryTheory.Limits.biproduct.map_eq_map' CategoryTheory.Limits.Cofork.IsColimit.π_desc' CategoryTheory.Limits.colimit.pre_map' CategoryTheory.Limits.colimMap_epi' CategoryTheory.Limits.Concrete.widePullback_ext' CategoryTheory.Limits.Concrete.widePushout_exists_rep' CategoryTheory.Limits.coprod.symmetry' CategoryTheory.Limits.equalizerSubobject_arrow' CategoryTheory.Limits.Fork.IsLimit.lift_ι' CategoryTheory.Limits.ImageMap.mk.injEq' CategoryTheory.Limits.imageSubobject_arrow' CategoryTheory.Limits.kernelSubobject_arrow' CategoryTheory.Limits.limit.map_pre' CategoryTheory.Limits.limLax_obj' CategoryTheory.Limits.limMap_mono' CategoryTheory.Limits.MonoCoprod.mk' CategoryTheory.Limits.MonoCoprod.mono_binaryCofanSum_inl' CategoryTheory.Limits.MonoCoprod.mono_binaryCofanSum_inr' CategoryTheory.Limits.MonoCoprod.mono_of_injective' CategoryTheory.Limits.Multicoequalizer.multicofork_ι_app_right' CategoryTheory.Limits.Multicofork.ofSigmaCofork_ι_app_right' CategoryTheory.Limits.parallelPair_initial_mk' CategoryTheory.Limits.Pi.map'_comp_map' CategoryTheory.Limits.Pi.map_comp_map' CategoryTheory.Limits.prod.symmetry' CategoryTheory.Limits.Sigma.map'_comp_map' CategoryTheory.Limits.Sigma.map_comp_map' CategoryTheory.Limits.Sigma.ι_comp_map' CategoryTheory.Limits.Types.Colimit.w_apply' CategoryTheory.Limits.Types.Colimit.ι_desc_apply' CategoryTheory.Limits.Types.Colimit.ι_map_apply' CategoryTheory.Limits.Types.limit_ext' CategoryTheory.Limits.Types.limit_ext_iff' CategoryTheory.Limits.Types.Limit.lift_π_apply' CategoryTheory.Limits.Types.Limit.map_π_apply' CategoryTheory.Limits.Types.Limit.w_apply' CategoryTheory.Limits.Types.Pushout.equivalence_rel' CategoryTheory.Limits.WalkingParallelPairHom.id.sizeOf_spec' CategoryTheory.Limits.zero_of_source_iso_zero' CategoryTheory.Limits.zero_of_target_iso_zero' CategoryTheory.Localization.Preadditive.comp_add' CategoryTheory.Localization.Preadditive.zero_add' CategoryTheory.Localization.SmallShiftedHom.equiv_shift' CategoryTheory.LocalizerMorphism.guitartExact_of_isRightDerivabilityStructure' CategoryTheory.LocalizerMorphism.IsLocalizedEquivalence.mk' CategoryTheory.Mat_.additiveObjIsoBiproduct_naturality' CategoryTheory.Monad.Algebra.Hom.ext' CategoryTheory.MonadHom.ext' CategoryTheory.MonoidalCategory.hom_inv_id_tensor' CategoryTheory.MonoidalCategory.hom_inv_whiskerRight' CategoryTheory.MonoidalCategory.inv_hom_id_tensor' CategoryTheory.MonoidalCategory.inv_hom_whiskerRight' CategoryTheory.MonoidalCategory.leftUnitor_tensor_hom' CategoryTheory.MonoidalCategory.leftUnitor_tensor_hom'' CategoryTheory.MonoidalCategory.leftUnitor_tensor_inv' CategoryTheory.MonoidalCategory.tensorHom_def' CategoryTheory.MonoidalCategory.tensor_hom_inv_id' CategoryTheory.MonoidalCategory.tensor_inv_hom_id' CategoryTheory.MonoidalCategory.tensorIso_def' CategoryTheory.MonoidalCategory.whiskerLeft_hom_inv' CategoryTheory.MonoidalCategory.whiskerLeft_inv_hom' CategoryTheory.MonoidalCoherence.assoc' CategoryTheory.MonoidalCoherence.left' CategoryTheory.MonoidalCoherence.right' CategoryTheory.MonoidalCoherence.tensor_right' CategoryTheory.MonoidalNatTrans.ext' CategoryTheory.MonoOver.mk'_coe' CategoryTheory.NatIso.naturality_1' CategoryTheory.NatIso.naturality_2' CategoryTheory.NatTrans.ext' CategoryTheory.NatTrans.id_app' CategoryTheory.NatTrans.vcomp_app' CategoryTheory.NonPreadditiveAbelian.neg_sub' CategoryTheory.OplaxNatTrans.Modification.comp_app' CategoryTheory.OplaxNatTrans.Modification.id_app' CategoryTheory.Preadditive.epi_iff_surjective' CategoryTheory.Preadditive.epi_of_isZero_cokernel' CategoryTheory.Preadditive.mono_iff_injective' CategoryTheory.Preadditive.mono_of_isZero_kernel' CategoryTheory.Prefunctor.mapPath_comp' CategoryTheory.PreOneHypercover.sieve₁_eq_pullback_sieve₁' CategoryTheory.PreservesPullbacksOfInclusions.preservesPullbackInl' CategoryTheory.PreservesPullbacksOfInclusions.preservesPullbackInr' CategoryTheory.Presheaf.isLocallyInjective_toSheafify' CategoryTheory.Presheaf.isLocallySurjective_iff_imagePresheaf_sheafify_eq_top' CategoryTheory.Presheaf.isLocallySurjective_toSheafify' CategoryTheory.Pretriangulated.mem_distTriang_op_iff' CategoryTheory.Pretriangulated.Opposite.mem_distinguishedTriangles_iff' CategoryTheory.Projective.projective_iff_preservesEpimorphisms_preadditiveCoyoneda_obj' CategoryTheory.ProjectiveResolution.pOpcycles_comp_fromLeftDerivedZero' CategoryTheory.Quiv.str' CategoryTheory.Quotient.lift_unique' CategoryTheory.RanIsSheafOfIsCocontinuous.fac' CategoryTheory.RanIsSheafOfIsCocontinuous.liftAux_map' CategoryTheory.regularTopology.equalizerCondition_w' CategoryTheory.Sheaf.epi_of_isLocallySurjective' CategoryTheory.Sheaf.isLocallySurjective_iff_epi' CategoryTheory.SheafOfTypes.Hom.ext' CategoryTheory.shift_neg_shift' CategoryTheory.shift_shift' CategoryTheory.shift_shift_neg' CategoryTheory.shiftZero' CategoryTheory.ShortComplex.abLeftHomologyData_f' CategoryTheory.ShortComplex.epi_homologyMap_of_epi_cyclesMap' CategoryTheory.ShortComplex.Exact.desc' CategoryTheory.ShortComplex.Exact.epi_f' CategoryTheory.ShortComplex.exact_iff_epi_imageToKernel' CategoryTheory.ShortComplex.Exact.isIso_f' CategoryTheory.ShortComplex.Exact.isIso_g' CategoryTheory.ShortComplex.Exact.lift' CategoryTheory.ShortComplex.Exact.mono_g' CategoryTheory.ShortComplex.f'_cyclesMap' CategoryTheory.ShortComplex.HasHomology.mk' CategoryTheory.ShortComplex.hasHomology_of_epi_of_isIso_of_mono' CategoryTheory.ShortComplex.hasHomology_of_isIso_leftRightHomologyComparison' CategoryTheory.ShortComplex.hasHomology_of_preserves' CategoryTheory.ShortComplex.HasLeftHomology.mk' CategoryTheory.ShortComplex.hasLeftHomology_of_epi_of_isIso_of_mono' CategoryTheory.ShortComplex.hasLeftHomology_of_preserves' CategoryTheory.ShortComplex.HasRightHomology.mk' CategoryTheory.ShortComplex.hasRightHomology_of_epi_of_isIso_of_mono' CategoryTheory.ShortComplex.hasRightHomology_of_preserves' CategoryTheory.ShortComplex.HomologyData.exact_iff' CategoryTheory.ShortComplex.HomologyData.map_homologyMap' CategoryTheory.ShortComplex.isIso₂_of_shortExact_of_isIso₁₃' CategoryTheory.ShortComplex.isIso_cyclesMap_of_isIso_of_mono' CategoryTheory.ShortComplex.isIso_homologyMap_of_epi_of_isIso_of_mono' CategoryTheory.ShortComplex.isIso_leftRightHomologyComparison' CategoryTheory.ShortComplex.isIso_opcyclesMap_of_isIso_of_epi' CategoryTheory.ShortComplex.LeftHomologyData.exact_iff_epi_f' CategoryTheory.ShortComplex.LeftHomologyData.map_cyclesMap' CategoryTheory.ShortComplex.LeftHomologyData.map_f' CategoryTheory.ShortComplex.LeftHomologyData.map_leftHomologyMap' CategoryTheory.ShortComplex.LeftHomologyData.ofEpiOfIsIsoOfMono'_f' CategoryTheory.ShortComplex.LeftHomologyData.ofIsColimitCokernelCofork_f' CategoryTheory.ShortComplex.LeftHomologyData.ofIsLimitKernelFork_f' CategoryTheory.ShortComplex.LeftHomologyData.ofZeros_f' CategoryTheory.ShortComplex.LeftHomologyData.op_g' CategoryTheory.ShortComplex.LeftHomologyData.unop_g' CategoryTheory.ShortComplex.LeftHomologyData.τ₁_ofEpiOfIsIsoOfMono_f' CategoryTheory.ShortComplex.leftHomologyπ_naturality' CategoryTheory.ShortComplex.leftRightHomologyComparison'_eq_leftHomologpMap'_comp_iso_hom_comp_rightHomologyMap' CategoryTheory.ShortComplex.moduleCatLeftHomologyData_f' CategoryTheory.ShortComplex.mono_homologyMap_of_mono_opcyclesMap' CategoryTheory.ShortComplex.opcyclesMap'_g' CategoryTheory.ShortComplex.p_opcyclesMap' CategoryTheory.ShortComplex.quasiIso_iff_isIso_homologyMap' CategoryTheory.ShortComplex.quasiIso_iff_isIso_leftHomologyMap' CategoryTheory.ShortComplex.quasiIso_iff_isIso_rightHomologyMap' CategoryTheory.ShortComplex.RightHomologyData.exact_iff_mono_g' CategoryTheory.ShortComplex.RightHomologyData.map_g' CategoryTheory.ShortComplex.RightHomologyData.map_opcyclesMap' CategoryTheory.ShortComplex.RightHomologyData.map_rightHomologyMap' CategoryTheory.ShortComplex.RightHomologyData.ofEpiOfIsIsoOfMono_g' CategoryTheory.ShortComplex.RightHomologyData.ofIsColimitCokernelCofork_g' CategoryTheory.ShortComplex.RightHomologyData.ofIsLimitKernelFork_g' CategoryTheory.ShortComplex.RightHomologyData.ofZeros_g' CategoryTheory.ShortComplex.RightHomologyData.op_f' CategoryTheory.ShortComplex.RightHomologyData.p_g' CategoryTheory.ShortComplex.RightHomologyData.unop_f' CategoryTheory.ShortComplex.RightHomologyData.ι_g' CategoryTheory.ShortComplex.rightHomologyι_naturality' CategoryTheory.ShortComplex.ShortExact.mk' CategoryTheory.ShortComplex.ShortExact.δ_apply' CategoryTheory.ShortComplex.ShortExact.δ_eq' CategoryTheory.SimplicialObject.δ_comp_δ' CategoryTheory.SimplicialObject.δ_comp_δ'' CategoryTheory.SimplicialObject.δ_comp_δ_self' CategoryTheory.SimplicialObject.δ_comp_σ_of_gt' CategoryTheory.SimplicialObject.δ_comp_σ_self' CategoryTheory.SimplicialObject.δ_comp_σ_succ' CategoryTheory.SingleFunctors.shiftIso_add' CategoryTheory.StrongEpi.mk' CategoryTheory.StrongMono.mk' CategoryTheory.Subgroupoid.coe_inv_coe' CategoryTheory.Subgroupoid.IsNormal.conj' CategoryTheory.Subobject.inf_eq_map_pullback' CategoryTheory.Triangulated.Subcategory.ext₁' CategoryTheory.Triangulated.Subcategory.ext₃' CategoryTheory.Triangulated.Subcategory.W_iff' CategoryTheory.Triangulated.Subcategory.W.mk' CategoryTheory.TwoSquare.GuitartExact.vComp' CategoryTheory.whiskerLeft_id' CategoryTheory.whiskerRight_id' CategoryTheory.yonedaEquiv_naturality' Cauchy.comap' CauchyFilter.mem_uniformity' cauchy_iff' cauchy_iInf_uniformSpace' cauchy_map_iff' Cauchy.mono' cauchy_pi_iff' cauchySeq_iff' CauSeq.bounded' CauSeq.mul_equiv_zero' cfc_comp' cfcₙ_comp' CharP.exists' CharP.natCast_eq_natCast' charP_of_injective_algebraMap' CharP.pi' CharP.subring' ChartedSpaceCore.open_source' CharTwo.neg_eq' CharTwo.sub_eq_add' ciInf_le' ciInf_le_of_le' ciInf_subtype' ciInf_subtype'' CircleDeg1Lift.tendsto_translation_number' CircleDeg1Lift.tendsto_translation_number₀' CircleDeg1Lift.translationNumber_conj_eq' CircleDeg1Lift.translationNumber_eq_of_tendsto₀' circleIntegral.norm_integral_le_of_norm_le_const' circleMap_mem_sphere' ciSup_le' ciSup_le_iff' ciSup_mono' ciSup_or' ciSup_subtype' ciSup_subtype'' Classical.choose_eq' CliffordAlgebra.instAlgebra' CliffordAlgebra.star_def' ClosedIciTopology.isClosed_ge' ClosedIicTopology.isClosed_le' closedUnderRestriction' closure_smul₀' clusterPt_iff_lift'_closure' ClusterPt.of_le_nhds' cmp_div_one' cmp_mul_left' cmp_mul_right' CochainComplex.HomComplex.Cochain.shift_v' CochainComplex.mappingCone.d_fst_v' CochainComplex.mappingCone.d_snd_v' CochainComplex.shiftFunctorAdd'_hom_app_f' CochainComplex.shiftFunctorAdd'_inv_app_f' CochainComplex.shiftFunctor_map_f' CochainComplex.shiftFunctor_obj_d' CochainComplex.shiftFunctor_obj_X' Codisjoint.ne_bot_of_ne_top' Codisjoint.of_codisjoint_sup_of_le' Codisjoint.sup_left' Codisjoint.sup_right' coe_comp_nnnorm' coe_nnnorm' CofiniteTopology.isOpen_iff' comap_norm_atTop' comap_norm_nhdsWithin_Ioi_zero' CommGrpCat.coe_comp' CommGrpCat.coe_id' CommMon.comp' CommMon.id' commProb_def' CommRingCat.equalizer_ι_isLocalHom' CommRingCat.instCommRing' CommRingCat.instFunLike' CommRingCat.instFunLike'' CommRingCat.instFunLike''' CommSemiRingCat.instCommSemiring' Commute.mul_self_sub_mul_self_eq' Comon.comp_hom' Comon.id_hom' CompactIccSpace.mk' CompactIccSpace.mk'' CompHaus.toProfinite_obj' compl_beattySeq' iSupIndep.comp' iSupIndep_def' iSupIndep_def'' iSupIndep_of_dfinsuppSumAddHom_injective' iSupIndep.supIndep' CompleteLattice.inf_continuous' CompleteLattice.sSup_continuous' CompletelyDistribLattice.MinimalAxioms.iInf_iSup_eq' CompleteOrthogonalIdempotents.bijective_pi' CompleteSublattice.coe_sInf' CompleteSublattice.coe_sSup' Complex.abs_eq_one_iff' Complex.AbsTheory.abs_nonneg' Complex.affine_of_mapsTo_ball_of_exists_norm_dslope_eq_div' Complex.conj_mul' Complex.cos_eq_tsum' Complex.cos_sq' Complex.cos_two_mul' Complex.cpow_ofNat_mul' Complex.deriv_cos' Complex.equivRealProd_apply_le' Complex.exp_bound' Complex.hasStrictFDerivAt_cpow' Complex.hasSum_conj' Complex.hasSum_cos' Complex.hasSum_sin' Complex.mul_conj' Complex.ofReal_mul' Complex.rank_real_complex' Complex.restrictScalars_one_smulRight' ComplexShape.Embedding.not_boundaryGE_next' ComplexShape.Embedding.not_boundaryLE_prev' ComplexShape.next_add' ComplexShape.next_eq' ComplexShape.next_eq_self' ComplexShape.prev_eq' ComplexShape.prev_eq_self' Complex.sin_eq_tsum' Complex.stolzCone_subset_stolzSet_aux' Complex.tan_add' Complex.UnitDisc.instSMulCommClass_circle' Complex.UnitDisc.instSMulCommClass_closedBall' compl_sInf' compl_sSup' CompositionAsSet.lt_length' Composition.blocks_pos' Composition.mem_range_embedding_iff' Composition.one_le_blocks' Composition.sizeUpTo_succ' Computability.inhabitedΓ' ComputablePred.computable_iff_re_compl_re' Computable.vector_ofFn' Computation.bind_pure' Computation.eq_thinkN' Computation.map_pure' Computation.map_think' Computation.results_of_terminates' ConcaveOn.left_le_of_le_right' ConcaveOn.left_le_of_le_right'' ConcaveOn.left_lt_of_lt_right' ConcaveOn.lt_right_of_left_lt' ConcaveOn.mul' ConcaveOn.mul_convexOn' ConcaveOn.right_le_of_le_left' ConcaveOn.right_le_of_le_left'' ConcaveOn.smul' ConcaveOn.smul'' ConcaveOn.smul_convexOn' Concept.ext' Con.coe_mk' conformalFactorAt_inner_eq_mul_inner' CongruenceSubgroup.Gamma1_mem' CongruenceSubgroup.Gamma_mem' ConjAct.smulCommClass' ConjAct.smulCommClass₀' ConjAct.unitsSMulCommClass' conjneg_neg' conjugate_le_conjugate' conjugate_lt_conjugate' conjugate_nonneg' conjugate_pos' Con.mrange_mk' ConnectedComponents.coe_eq_coe' connectedComponents_lift_unique' ContDiffAt.comp' contDiffAt_id' contDiffAt_pi' contDiffAt_prod' ContDiff.comp' contDiff_id' ContDiff.iterate_deriv' ContDiffOn.div' contDiffOn_id' contDiffOn_pi' contDiffOn_prod' contDiff_pi' contDiff_prod' ContDiffWithinAt.congr' ContDiffWithinAt.congr_of_eventually_eq' ContDiffWithinAt.contDiffOn' contDiffWithinAt_inter' contDiffWithinAt_prod' ContinuousAlgHom.coe_comp' ContinuousAlgHom.coe_fst' ContinuousAlgHom.coe_id' ContinuousAlgHom.coe_mk' ContinuousAlgHom.coe_prodMap' ContinuousAlgHom.coe_restrictScalars' ContinuousAlgHom.coe_snd' ContinuousAt.comp' continuousAt_const_cpow' ContinuousAt.div' continuousAt_extChartAt' continuousAt_extChartAt_symm' continuousAt_extChartAt_symm'' ContinuousAt.finset_inf' ContinuousAt.finset_sup' continuousAt_id' continuousAt_iff_continuous_left'_right' ContinuousAt.inf' continuousAt_jacobiTheta₂' ContinuousAt.nnnorm' ContinuousAt.norm' continuousAt_pi' ContinuousAt.prod_map' ContinuousAt.sup' Continuous.comp' Continuous.comp_continuousOn' Continuous.div' continuous_div_left' continuous_div_right' Continuous.finset_inf' Continuous.finset_sup' continuous_id' continuous_if' Continuous.inf' ContinuousLinearEquiv.coe_refl' ContinuousLinearEquiv.comp_hasFDerivAt_iff' ContinuousLinearEquiv.comp_hasFDerivWithinAt_iff' ContinuousLinearEquiv.comp_right_hasFDerivAt_iff' ContinuousLinearEquiv.comp_right_hasFDerivWithinAt_iff' ContinuousLinearMap.apply_apply' ContinuousLinearMap.applySMulCommClass' ContinuousLinearMap.coe_add' ContinuousLinearMap.coe_comp' ContinuousLinearMap.coe_flipₗᵢ' ContinuousLinearMap.coeFn_compLp' ContinuousLinearMap.coe_fst' ContinuousLinearMap.coe_id' ContinuousLinearMap.coe_mk' ContinuousLinearMap.coe_neg' ContinuousLinearMap.coe_pi' ContinuousLinearMap.coe_prodMap' ContinuousLinearMap.coe_restrictScalars' ContinuousLinearMap.coe_restrict_scalarsL' ContinuousLinearMap.coe_smul' ContinuousLinearMap.coe_snd' ContinuousLinearMap.coe_sub' ContinuousLinearMap.coe_sum' ContinuousLinearMap.coe_zero' ContinuousLinearMap.compFormalMultilinearSeries_apply' ContinuousLinearMap.comp_memLp' ContinuousLinearMap.integral_comp_comm' ContinuousLinearMap.measurable_apply' ContinuousLinearMap.mul_apply' ContinuousLinearMap.norm_extendTo𝕜' ContinuousLinearMap.opNorm_le_of_shell' ContinuousLinearMap.sub_apply' ContinuousLinearMap.toSpanSingleton_smul' ContinuousMap.coe_const' ContinuousMap.coe_inf' ContinuousMap.coe_sup' ContinuousMap.comp_yonedaPresheaf' ContinuousMap.continuous.comp' ContinuousMap.continuous_const' ContinuousMap.instSMul' ContinuousMap.liftCover_coe' ContinuousMap.liftCover_restrict' ContinuousMap.module' ContinuousMap.unitsLift_symm_apply_apply_inv' ContinuousMapZero.instIsScalarTower' ContinuousMapZero.instSMulCommClass' Continuous.matrix_blockDiag' Continuous.matrix_blockDiagonal' continuousMultilinearCurryRightEquiv_apply' continuousMultilinearCurryRightEquiv_symm_apply' continuous_nnnorm' Continuous.nnnorm' continuous_norm' Continuous.norm' ContinuousOn.circleIntegrable' ContinuousOn.comp' ContinuousOn.comp'' ContinuousOn.div' ContinuousOn.finset_inf' ContinuousOn.finset_sup' continuousOn_id' ContinuousOn.if' continuousOn_iff' ContinuousOn.inf' ContinuousOn.nnnorm' ContinuousOn.norm' continuousOn_pi' ContinuousOn.piecewise' continuousOn_piecewise_ite' ContinuousOn.sup' Continuous.quotient_liftOn' Continuous.quotient_map' continuous_quotient_mk' Continuous.strictMono_of_inj_boundedOrder' Continuous.sup' ContinuousWithinAt.comp' ContinuousWithinAt.div' ContinuousWithinAt.finset_inf' ContinuousWithinAt.finset_sup' ContinuousWithinAt.inf' continuousWithinAt_inter' ContinuousWithinAt.nnnorm' ContinuousWithinAt.norm' ContinuousWithinAt.preimage_mem_nhdsWithin' ContinuousWithinAt.preimage_mem_nhdsWithin'' ContinuousWithinAt.sup' contMDiffAt_extChartAt' contMDiffAt_finset_prod' ContMDiffAt.prod_map' contMDiff_finset_prod' ContMDiffMap.mdifferentiable' contMDiffOn_finset_prod' contMDiffOn_iff_of_mem_maximalAtlas' ContMDiffSection.mdifferentiable' contMDiffWithinAt_finset_prod' contMDiffWithinAt_iff_of_mem_source' contMDiffWithinAt_inter' ContractingWith.apriori_edist_iterate_efixedPoint_le' ContractingWith.edist_efixedPoint_le' ContractingWith.edist_efixedPoint_lt_top' ContractingWith.efixedPoint_isFixedPt' ContractingWith.efixedPoint_mem' ContractingWith.fixedPoint_unique' ContractingWith.one_sub_K_pos' ContractingWith.tendsto_iterate_efixedPoint' ConvexBody.coe_smul' Convex.mem_toCone' ConvexOn.le_left_of_right_le' ConvexOn.le_left_of_right_le'' ConvexOn.le_right_of_left_le' ConvexOn.le_right_of_left_le'' ConvexOn.lt_left_of_right_lt' ConvexOn.lt_right_of_left_lt' ConvexOn.mul' ConvexOn.mul_concaveOn' ConvexOn.smul' ConvexOn.smul'' ConvexOn.smul_concaveOn' coord_norm' CovBy.ne' CoxeterSystem.alternatingWord_succ' CoxeterSystem.exists_reduced_word' CoxeterSystem.length_mul_ge_length_sub_length' CoxeterSystem.simple_mul_simple_pow' CPolynomialOn.congr' CPolynomialOn_congr' cpow_eq_nhds' cross_anticomm' csInf_le' csInf_le_csInf' csSup_le' csSup_le_csSup' csSup_le_iff' CStarAlgebra.conjugate_le_norm_smul' CStarAlgebra.instNonnegSpectrumClass' CStarRing.conjugate_le_norm_smul' CStarRing.instNonnegSpectrumClass' CStarRing.norm_star_mul_self' Ctop.Realizer.ext' Cubic.degree_of_a_eq_zero' Cubic.degree_of_a_ne_zero' Cubic.degree_of_b_eq_zero' Cubic.degree_of_b_ne_zero' Cubic.degree_of_c_eq_zero' Cubic.degree_of_c_ne_zero' Cubic.degree_of_d_eq_zero' Cubic.degree_of_d_ne_zero' Cubic.leadingCoeff_of_a_ne_zero' Cubic.leadingCoeff_of_b_ne_zero' Cubic.leadingCoeff_of_c_eq_zero' Cubic.leadingCoeff_of_c_ne_zero' Cubic.monic_of_a_eq_one' Cubic.monic_of_b_eq_one' Cubic.monic_of_c_eq_one' Cubic.monic_of_d_eq_one' Cubic.natDegree_of_a_eq_zero' Cubic.natDegree_of_a_ne_zero' Cubic.natDegree_of_b_eq_zero' Cubic.natDegree_of_b_ne_zero' Cubic.natDegree_of_c_eq_zero' Cubic.natDegree_of_c_ne_zero' Cubic.of_a_eq_zero' Cubic.of_b_eq_zero' Cubic.of_c_eq_zero' Cubic.of_d_eq_zero' Cycle.next_reverse_eq_prev' Cycle.prev_reverse_eq_next' CyclotomicField.algebra' dec_em' Decidable.mul_lt_mul'' Decidable.Partrec.const' decide_False' decide_True' DedekindDomain.ProdAdicCompletions.algebra' DedekindDomain.ProdAdicCompletions.algebraMap_apply' DedekindDomain.ProdAdicCompletions.IsFiniteAdele.algebraMap' IsDenseEmbedding.mk' Dense.exists_ge' Dense.exists_le' IsDenseInducing.extend_eq_at' IsDenseInducing.mk' Denumerable.lower_raise' Denumerable.raise_lower' deriv_add_const' Derivation.apply_aeval_eq' Derivation.coe_mk' deriv_const' deriv_const_add' deriv_const_mul_field' deriv_id' deriv_id'' deriv_inv' deriv_inv'' deriv_mul_const_field' deriv.neg' deriv_neg' deriv_neg'' deriv_pow' deriv_pow'' deriv_sqrt_mul_log' deriv.star' derivWithin_congr_set' derivWithin_inv' derivWithin_pow' deriv_zpow' det_traceMatrix_ne_zero' DFinsupp.coe_mk' DFinsupp.filter_ne_eq_erase' DFinsupp.le_iff' DFinsupp.Lex.wellFounded' DFinsupp.wellFoundedLT' DFunLike.ext' DiffContOnCl.differentiableAt' Diffeomorph.symm_trans' DifferentiableAt.comp' differentiableAt_id' differentiableAt_inv' DifferentiableAt.inv' differentiableAt_pi'' Differentiable.comp' differentiable_id' Differentiable.inv' DifferentiableOn.comp' differentiableOn_id' differentiableOn_inv' DifferentiableOn.inv' differentiableOn_pi'' differentiable_pi'' DifferentiableWithinAt.comp' differentiableWithinAt_congr_set' differentiableWithinAt_inter' differentiableWithinAt_inv' DifferentiableWithinAt.inv' differentiableWithinAt_pi'' DirectedOn.mono' directedOn_pair' DirectSum.Gmodule.mul_smul' DirectSum.Gmodule.one_smul' DirichletCharacter.level_one' DirichletCharacter.toUnitHom_eq_char' discreteTopology_iff_orderTopology_of_pred_succ' DiscreteTopology.of_forall_le_norm' DiscreteTopology.orderTopology_of_pred_succ' DiscreteValuationRing.addVal_def' Disjoint.inf_left' Disjoint.inf_right' Disjoint.inter_left' Disjoint.inter_right' Disjoint.of_disjoint_inf_of_le' dist_eq_norm_div' dist_le_norm_add_norm' dist_midpoint_midpoint_le' dist_norm_norm_le' dist_partial_sum' dist_pi_le_iff' DistribMulActionHom.coe_fn_coe' dite_eq_iff' div_add' div_div_cancel' div_div_cancel_left' div_div_div_cancel_left' div_div_self' div_eq_iff_eq_mul' div_eq_of_eq_mul' div_eq_of_eq_mul'' div_le_div'' div_le_div_iff' div_le_div_left' div_le_div_right' div_left_inj' div_le_iff₀' div_le_iff_le_mul' div_le_iff_of_neg' div_le_one' div_lt_div' div_lt_div₀' div_lt_div'' div_lt_div_iff' div_lt_div_left' div_lt_div_right' div_lt_iff' div_lt_iff_lt_mul' div_lt_iff_of_neg' div_lt_one' div_mul_div_cancel' div_mul_div_cancel₀' div_self' div_self_mul_self' div_sub' Doset.disjoint_out' Doset.out_eq' DoubleCentralizer.nnnorm_def' DoubleCentralizer.norm_def' dvd_antisymm' dvd_geom_sum₂_iff_of_dvd_sub' edist_zero_eq_enorm' EllipticCurve.coe_inv_map_Δ' EllipticCurve.coe_inv_variableChange_Δ' EllipticCurve.coe_map_Δ' EllipticCurve.coe_variableChange_Δ' em' Topology.IsEmbedding.mk' EMetric.diam_pos_iff' EMetric.diam_union' EMetric.mem_ball' EMetric.mem_closedBall' EMetric.totallyBounded_iff' ENat.add_biSup' ENat.biSup_add' ENat.biSup_add_biSup_le' ENat.sSup_eq_zero' Encodable.mem_decode₂' ENNReal.add_biSup' ENNReal.biSup_add' ENNReal.biSup_add_biSup_le' ENNReal.div_le_iff' ENNReal.div_le_of_le_mul' ENNReal.div_lt_of_lt_mul' ENNReal.exists_frequently_lt_of_liminf_ne_top' ENNReal.exists_pos_sum_of_countable' ENNReal.iInf_mul_left' ENNReal.iInf_mul_right' ENNReal.inv_le_inv' ENNReal.inv_lt_inv' ENNReal.log_pos_real' ENNReal.mul_le_of_le_div' ENNReal.mul_lt_mul_left' ENNReal.mul_lt_mul_right' ENNReal.mul_lt_of_lt_div' ENNReal.mul_top' ENNReal.nhds_top' ENNReal.ofReal_le_ofReal_iff' ENNReal.ofReal_lt_ofReal_iff' ENNReal.ofReal_mul' ENNReal.range_coe' ENNReal.some_eq_coe' ENNReal.toNNReal_eq_toNNReal_iff' ENNReal.top_mul' ENNReal.toReal_eq_toReal_iff' ENNReal.toReal_mono' ENNReal.toReal_ofReal' ENNReal.tsum_eq_iSup_nat' ENNReal.tsum_eq_iSup_sum' ENNReal.tsum_prod' ENNReal.tsum_sigma' Eq.cmp_eq_eq' eq_div_iff_mul_eq' eq_div_iff_mul_eq'' eq_div_of_mul_eq' eq_div_of_mul_eq'' eq_intCast' eq_mul_of_div_eq' eq_natCast' eq_of_forall_dvd' eq_of_prime_pow_eq' eqOn_closure₂' eq_one_of_inv_eq' eq_one_of_mul_left' eq_one_of_mul_right' eqRec_heq' Equiv.bijOn' Equiv.coe_piCongr' Equiv.exists_congr' Equiv.existsUnique_congr' Equiv.forall₂_congr' Equiv.forall₃_congr' Equiv.forall_congr' Equiv.inhabited' Equiv.lawfulFunctor' Equiv.left_inv' Equiv.Perm.cycleType_eq' Equiv.Perm.exists_fixed_point_of_prime' Equiv.Perm.isCycle_of_prime_order' Equiv.Perm.isCycle_of_prime_order'' Equiv.Perm.IsCycleOn.exists_pow_eq' Equiv.Perm.IsCycle.pow_eq_one_iff' Equiv.Perm.IsCycle.pow_eq_one_iff'' Equiv.Perm.mem_support_cycleOf_iff' Equiv.Perm.prod_comp' Equiv.Perm.SameCycle.exists_pow_eq' Equiv.Perm.SameCycle.exists_pow_eq'' Equiv.Perm.signAux_swap_zero_one' Equiv.Perm.sign_of_cycleType' Equiv.Perm.sign_swap' Equiv.right_inv' EReal.add_lt_add_of_lt_of_le' EReal.coe_neg' EReal.nhds_bot' EReal.nhds_top' EReal.sign_mul_inv_abs' essInf_const' essSup_const' essSup_mono_measure' estimator' EuclideanDomain.div_add_mod' EuclideanDomain.mod_add_div' EuclideanDomain.mul_div_cancel' EuclideanGeometry.center_eq_inversion' EuclideanGeometry.dist_center_eq_dist_center_of_mem_sphere' EuclideanGeometry.inversion_dist_center' EuclideanGeometry.inversion_eq_center' EuclideanGeometry.mem_sphere' EuclideanGeometry.Sphere.mem_coe' eventually_cobounded_le_norm' exists_apply_eq_apply' exists_apply_eq_apply2' exists_apply_eq_apply3' exists_associated_pow_of_mul_eq_pow' exists_Ico_subset_of_mem_nhds' exists_increasing_or_nonincreasing_subseq' exists_Ioc_subset_of_mem_nhds' exists_lt_of_lt_ciSup' exists_lt_of_lt_csSup' exists_maximal_independent' exists_one_lt' exists_one_lt_mul_of_lt' exists_reduced_fraction' exists_seq_strictAnti_tendsto' exists_seq_strictMono_tendsto' exists_square_le' exists_sum_eq_one_iff_pairwise_coprime' exists_unique_eq' existsUnique_zpow_near_of_one_lt' extChartAt_preimage_mem_nhds' extChartAt_source_mem_nhds' extChartAt_source_mem_nhdsWithin' extChartAt_target_mem_nhdsWithin' ext_nat' fderiv_continuousLinearEquiv_comp' fderiv_id' fderiv_list_prod' fderiv_mul' fderiv_mul_const' fderivWithin_congr' fderivWithin_congr_set' fderivWithin_eventually_congr_set' fderivWithin_id' fderivWithin_list_prod' fderivWithin_mul' fderivWithin_mul_const' FDRep.char_tensor' FermatLastTheoremWith.fermatLastTheoremWith' FiberBundleCore.open_source' Field.finInsepDegree_def' Field.primitive_element_iff_algHom_eq_of_eval' Filter.atBot_basis' Filter.atBot_basis_Iio' Filter.atTop_basis' Filter.atTop_basis_Ioi' Filter.bliminf_congr' Filter.blimsup_congr' Filter.comap_eq_lift' Filter.comap_eval_neBot_iff' Filter.comap_id' Filter.const_eventuallyEq' Filter.coprodᵢ_bot' Filter.coprodᵢ_eq_bot_iff' Filter.coprodᵢ_neBot_iff' Filter.countable_biInf_eq_iInf_seq' Filter.disjoint_comap_iff_map' Filter.eventually_atBot_prod_self' Filter.eventually_atTop_prod_self' Filter.eventuallyConst_pred' Filter.eventuallyConst_set' Filter.EventuallyEq.fderivWithin' Filter.EventuallyEq.iteratedFDerivWithin' Filter.EventuallyLE.mul_le_mul' Filter.eventually_smallSets' Filter.exists_forall_mem_of_hasBasis_mem_blimsup' Filter.ext' Filter.extraction_forall_of_eventually' Filter.frequently_atBot' Filter.frequently_atTop' Filter.Germ.coe_compTendsto' Filter.Germ.coe_smul' Filter.Germ.const_compTendsto' Filter.Germ.instDistribMulAction' Filter.Germ.instModule' Filter.Germ.instMulAction' Filter.Germ.instSMul' Filter.hasBasis_biInf_of_directed' Filter.hasBasis_biInf_principal' Filter.HasBasis.cauchySeq_iff' Filter.hasBasis_cobounded_norm' Filter.HasBasis.cobounded_of_norm' Filter.HasBasis.eventuallyConst_iff' Filter.hasBasis_iInf' Filter.hasBasis_iInf_of_directed' Filter.HasBasis.inf' Filter.HasBasis.lift' Filter.HasBasis.nhds' Filter.HasBasis.prod_nhds' Filter.HasBasis.sup' Filter.HasBasis.to_hasBasis' Filter.HasBasis.to_image_id' Filter.HasBasis.isUniformEmbedding_iff' Filter.iInf_neBot_iff_of_directed' Filter.iInf_sets_eq_finite' Filter.isScalarTower' Filter.isScalarTower'' Filter.le_lift' Filter.le_limsup_of_frequently_le' Filter.le_pure_iff' Filter.lift_lift'_same_eq_lift' Filter.lift_lift'_same_le_lift' Filter.lift'_mono' Filter.lift_mono' Filter.liminf_eq_iSup_iInf_of_nat' Filter.liminf_le_of_frequently_le' Filter.limsup_eq_iInf_iSup_of_nat' Filter.map_id' Filter.map_inf' Filter.map_inv' Filter.map_one' Filter.map_prod_eq_map₂' Filter.mem_bind' Filter.mem_coclosed_compact' Filter.mem_cocompact' Filter.mem_comap' Filter.mem_comap'' Filter.mem_iInf' Filter.mem_iInf_finite' Filter.mem_inf_principal' Filter.mem_lift' Filter.mem_map' Filter.mem_nhds_iff' Filter.mem_pi' Filter.mem_rcomap' Filter.mono_bliminf' Filter.mono_blimsup' Filter.monotone_lift' Filter.neBot_inf_comap_iff_map' Filter.nhds_eq' Filter.principal_le_lift' Filter.prod_comm' Filter.prod_lift'_lift' Filter.prod_map_map_eq' Filter.ptendsto_of_ptendsto' Filter.push_pull' Filter.rcomap'_rcomap' Filter.sInf_neBot_of_directed' Filter.smulCommClass_filter' Filter.smulCommClass_filter'' Filter.tendsto_atBot' Filter.tendsto_atBot_add_left_of_ge' Filter.tendsto_atBot_add_nonpos_left' Filter.tendsto_atBot_add_nonpos_right' Filter.tendsto_atBot_add_right_of_ge' Filter.tendsto_atBot_mono' Filter.tendsto_atBot_of_add_bdd_below_left' Filter.tendsto_atBot_of_add_bdd_below_right' Filter.tendsto_atTop' Filter.tendsto_atTop_add_left_of_le' Filter.tendsto_atTop_add_nonneg_left' Filter.tendsto_atTop_add_nonneg_right' Filter.tendsto_atTop_add_right_of_le' Filter.tendsto_atTop_mono' Filter.tendsto_atTop_of_add_bdd_above_left' Filter.tendsto_atTop_of_add_bdd_above_right' Filter.tendsto_congr' Filter.Tendsto.congr' Filter.Tendsto.const_div' Filter.Tendsto.div' Filter.Tendsto.div_const' Filter.Tendsto.eventually_ne_atTop' Filter.tendsto_id' Filter.Tendsto.if' Filter.tendsto_iff_rtendsto' Filter.tendsto_iInf' Filter.Tendsto.inf_nhds' Filter.tendsto_inv₀_cobounded' Filter.tendsto_lift' Filter.Tendsto.nnnorm' Filter.Tendsto.norm' Filter.tendsto_prod_iff' Filter.Tendsto.sup_nhds' Filter.unbounded_of_tendsto_atBot' Filter.unbounded_of_tendsto_atTop' Filter.univ_mem' Fin.card_filter_univ_succ' Fin.castPred_zero' Fin.cycleRange_zero' Fin.exists_fin_succ' Fin.find_min' Fin.forall_fin_succ' Fin.insertNth_last' Fin.insertNth_zero' Fin.isEmpty' FiniteDimensional.finiteDimensional_pi' FiniteField.card' Finite.Set.finite_biUnion' Fin.last_pos' Finmap.ext_iff' Fin.mem_piFinset_succ' Fin.mul_one' Fin.mul_zero' Fin.one_mul' Fin.one_pos' Fin.orderIso_subsingleton' Fin.partialProd_succ' Finpartition.IsEquipartition.card_biUnion_offDiag_le' Finpartition.IsEquipartition.sum_nonUniforms_lt' Fin.pred_one' Fin.preimage_apply_01_prod' Fin.prod_congr' finprod_emb_domain' finprod_mem_inter_mul_diff' finprod_mem_inter_mulSupport_eq' Fin.prod_univ_get' Fin.prod_univ_two' finrank_real_complex_fact' finRotate_last' Finset.abs_sum_of_nonneg' Finset.aemeasurable_prod' Finset.aestronglyMeasurable_prod' Finset.card_le_card_of_forall_subsingleton' Finset.card_mul_le_card_mul' Finset.coe_inf' Finset.coe_max' Finset.coe_min' Finset.coe_sup' Finset.Colex.toColex_sdiff_le_toColex_sdiff' Finset.Colex.toColex_sdiff_lt_toColex_sdiff' Finset.decidableMem' Finset.disjoint_filter_filter' Finset.eq_of_mem_uIcc_of_mem_uIcc' Finset.eq_prod_range_div' Finset.erase_injOn' Finset.exists_le_of_prod_le' Finset.exists_lt_of_prod_lt' Finset.exists_mem_eq_inf' Finset.exists_mem_eq_sup' Finset.exists_one_lt_of_prod_one_of_exists_ne_one' Finset.expect_boole_mul' Finset.expect_dite_eq' Finset.expect_ite_eq' Finset.extract_gcd' Finset.filter_attach' Finset.filter_inj' Finset.filter_ne' Finset.forall_mem_not_eq' Finset.Icc_mul_Icc_subset' Finset.Icc_mul_Ico_subset' Finset.Icc_subset_uIcc' Finset.Ici_mul_Ici_subset' Finset.Ici_mul_Ioi_subset' Finset.Ico_mul_Icc_subset' Finset.Ico_mul_Ioc_subset' Finset.Ico_union_Ico' Finset.Iic_mul_Iic_subset' Finset.Iic_mul_Iio_subset' Finset.Iio_mul_Iic_subset' Finset.image₂_singleton_left' Finset.image_id' Finset.image_mul_left' Finset.image_mul_right' Finset.inf'_sup_inf' Finset.insert_inj_on' Finset.insert_sdiff_insert' Finset.insert_val' Finset.Ioc_mul_Ico_subset' Finset.Ioi_mul_Ici_subset' Finset.isGreatest_max' Finset.isLeast_min' Finset.isScalarTower' Finset.isScalarTower'' Finset.le_inf' Finset.le_max' Finset.le_min' Finset.le_sum_condensed' Finset.le_sum_schlomilch' Finset.le_sup' Finset.lt_max'_of_mem_erase_max' Finset.map_filter' Finset.max'_eq_sup' Finset.measurable_prod' Finset.measurable_range_sup' Finset.measurable_range_sup'' Finset.measurable_sup' Finset.mem_finsuppAntidiag' Finset.mem_inv' Finset.mem_map' Finset.mem_range_iff_mem_finset_range_of_mod_eq' Finset.mem_uIcc' Finset.min'_eq_inf' Finset.min'_lt_max' Finset.min'_lt_of_mem_erase_min' Finset.mulEnergy_eq_sum_sq' Finset.Nat.antidiagonal_eq_image' Finset.Nat.antidiagonal_eq_map' Finset.Nat.antidiagonal_succ' Finset.Nat.antidiagonal_succ_succ' Finset.Nat.prod_antidiagonal_succ' Finset.Nat.sum_antidiagonal_succ' Finset.nnnorm_prod_le' Finset.noncommProd_cons' Finset.noncommProd_insert_of_not_mem' Finset.Nonempty.csInf_eq_min' Finset.Nonempty.csSup_eq_max' Finset.norm_prod_le' Finset.nsmul_inf' Finset.nsmul_sup' Finset.ofDual_inf' Finset.ofDual_max' Finset.ofDual_min' Finset.ofDual_sup' Finset.one_le_prod' Finset.one_le_prod'' Finset.one_lt_prod' Finset.pairwise_cons' Finset.pairwise_subtype_iff_pairwise_finset' Finset.piecewise_le_piecewise' Finset.piecewise_mem_Icc' Finset.PiFinsetCoe.canLift' Finset.preimage_mul_left_one' Finset.preimage_mul_right_one' Finset.prod_dite_eq' Finset.prod_eq_one_iff' Finset.prod_eq_one_iff_of_le_one' Finset.prod_eq_one_iff_of_one_le' Finset.prod_fiberwise' Finset.prod_fiberwise_eq_prod_filter' Finset.prod_fiberwise_le_prod_of_one_le_prod_fiber' Finset.prod_fiberwise_of_maps_to' Finset.prod_finset_product' Finset.prod_finset_product_right' Finset.prod_Ico_add' Finset.prod_image' Finset.prod_le_one' Finset.prod_le_prod_fiberwise_of_prod_fiber_le_one' Finset.prod_le_prod_of_ne_one' Finset.prod_le_prod_of_subset' Finset.prod_le_prod_of_subset_of_one_le' Finset.prod_le_univ_prod_of_one_le' Finset.prod_lt_one' Finset.prod_lt_prod' Finset.prod_lt_prod_of_subset' Finset.prod_mono_set' Finset.prod_mono_set_of_one_le' Finset.prod_pi_mulSingle' Finset.prod_preimage' Finset.prod_range_div' Finset.prod_range_succ' Finset.prod_sigma' Finset.range_add_one' Finset.sdiff_sdiff_left' Finset.single_le_prod' Finset.single_lt_prod' Finset.smulCommClass_finset' Finset.smulCommClass_finset'' Finset.smul_prod' Finset.smul_univ₀' Finset.sorted_last_eq_max' Finset.sorted_zero_eq_min' Finset.stronglyMeasurable_prod' Finset.subset_singleton_iff' Finset.sum_apply' Finset.sum_condensed_le' Finset.sum_pow' Finset.sum_schlomilch_le' Finset.sup'_inf_sup' Finset.toDual_inf' Finset.toDual_max' Finset.toDual_min' Finset.toDual_sup' Finset.tprod_subtype' Finset.uIcc_subset_uIcc_iff_le' Finset.untrop_sum' Fin.size_positive' Fin.succ_zero_eq_one' Finsupp.apply_single' Finsupp.card_support_eq_one' Finsupp.card_support_le_one' Finsupp.equivMapDomain_refl' Finsupp.equivMapDomain_trans' Finsupp.ext_iff' Finsupp.le_iff' Finsupp.le_weight_of_ne_zero' Finsupp.Lex.wellFounded' Finsupp.mapDomain_apply' Finsupp.mapRange_add' Finsupp.mapRange_neg' Finsupp.mapRange_sub' Finsupp.mem_supported' Finsupp.mulHom_ext' Finsupp.smul_single' Finsupp.subtypeDomain_eq_zero_iff' Finsupp.sum_apply' Finsupp.sum_cons' Finsupp.sum_ite_self_eq' Finsupp.sum_smul_index' Finsupp.sum_smul_index_linearMap' Finsupp.sum_sum_index' Finsupp.support_eq_singleton' Finsupp.support_subset_singleton' Finsupp.univ_sum_single_apply' Finsupp.wellFoundedLT' Fintype.card_congr' Fintype.card_of_finset' Fintype.card_subtype_eq' Fintype.expect_dite_eq' Fintype.expect_ite_eq' Fintype.prod_fiberwise' Fintype.prod_mono' Fintype.prod_strictMono' Fin.univ_image_get' Fin.univ_image_getElem' Fin.val_one' Fin.val_one'' Fin.zero_mul' Fin.zero_ne_one' FirstOrder.Language.addEmptyConstants_is_expansion_on' FirstOrder.Language.DirectLimit.cg' FirstOrder.Language.DirectLimit.funMap_quotient_mk'_sigma_mk' FirstOrder.Language.DirectLimit.lift_quotient_mk'_sigma_mk' FirstOrder.Language.DirectLimit.relMap_quotient_mk'_sigma_mk' FirstOrder.Language.Embedding.codRestrict_apply' FirstOrder.Language.funMap_quotient_mk' FirstOrder.Language.relMap_quotient_mk' FirstOrder.Language.Term.realize_quotient_mk' FixedPoints.minpoly.eval₂' FixedPoints.smulCommClass' forall_apply_eq_imp_iff' forall_eq_apply_imp_iff' forall_lt_iff_le' forall_prop_congr' forall_true_iff' FormalMultilinearSeries.apply_order_ne_zero' FormalMultilinearSeries.comp_coeff_zero' FormalMultilinearSeries.order_eq_find' FormalMultilinearSeries.order_eq_zero_iff' fourier_add' fourier_coe_apply' fourierIntegral_gaussian_innerProductSpace' fourierIntegral_gaussian_pi' fourier_neg' fourier_zero' four_ne_zero' FP.Float.sign' FractionalIdeal.absNorm_eq' FractionalIdeal.coeIdeal_eq_zero' FractionalIdeal.coeIdeal_inj' FractionalIdeal.coeIdeal_injective' FractionalIdeal.coeIdeal_le_coeIdeal' FractionalIdeal.coeIdeal_ne_zero' FractionalIdeal.inv_zero' FreeAbelianGroup.induction_on' FreeGroup.map.id' FreeMagma.lift_comp_of' FreeMagma.map_mul' FreeMagma.traverse_mul' FreeMagma.traverse_pure' FreeMonoid.countP_of' FreeSemigroup.lift_comp_of' FreeSemigroup.map_mul' FreeSemigroup.traverse_mul' FreeSemigroup.traverse_pure' frontier_closedBall' frontier_Ici' frontier_Iic' frontier_Iio' frontier_Ioi' frontier_sphere' Function.Antiperiodic.funext' Function.Antiperiodic.mul_const' Function.Antiperiodic.sub_eq' Function.Bijective.of_comp_iff' Function.Commute.iterate_pos_le_iff_map_le' Function.Commute.iterate_pos_lt_iff_map_lt' Function.Commute.iterate_pos_lt_of_map_lt' Function.Exact.of_ladder_addEquiv_of_exact' Function.Exact.split_tfae' Function.extend_apply' FunctionField.InftyValuation.map_add_le_max' FunctionField.InftyValuation.map_mul' FunctionField.InftyValuation.map_one' FunctionField.InftyValuation.map_zero' Function.Injective.eq_iff' Function.Injective.ne_iff' Function.Injective.of_comp_iff' Function.Injective.surjective_comp_right' Function.iterate_succ' Function.iterate_succ_apply' Function.minimalPeriod_iterate_eq_div_gcd' Function.mulSupport_add_one' Function.mulSupport_curry' Function.mulSupport_inv' Function.mulSupport_one' Function.mulSupport_one_add' Function.mulSupport_one_sub' Function.mulSupport_prod_mk' Function.mulSupport_subset_iff' Function.Periodic.mul_const' Function.periodicOrbit_chain' Function.Periodic.sub_eq' Function.support_div' Function.support_inv' Function.support_mul' Function.support_pow' Function.Surjective.of_comp_iff' Function.update_comp_eq_of_forall_ne' Function.update_comp_eq_of_injective' Function.update_comp_eq_of_not_mem_range' GaloisCoinsertion.isCoatom_iff' GaloisConnection.l_csSup' GaloisConnection.l_u_l_eq_l' GaloisConnection.u_csInf' GaloisConnection.u_l_u_eq_u' GaloisInsertion.isAtom_iff' gauge_gaugeRescale' gauge_lt_eq' gauge_zero' GaussianFourier.norm_cexp_neg_mul_sq_add_mul_I' GaussianInt.toComplex_def' gcd_assoc' gcd_comm' gcd_mul_left' gcd_mul_right' gcd_neg' gcd_one_left' gcd_one_right' gcd_zero_left' gcd_zero_right' GenContFract.of_convs_eq_convs' ge_of_tendsto' geom_sum_Ico' geom_sum_pos' geom_sum_succ' GradedTensorProduct.algebraMap_def' gradient_const' gradient_eq_deriv' gramSchmidt_def' gramSchmidt_def'' gramSchmidtNormed_unit_length' gramSchmidtOrthonormalBasis_inv_triangular' Group.conjugatesOfSet_subset' Group.fg_iff' GroupTopology.ext' GrpCat.coe_comp' GrpCat.coe_id' GrpCat.SurjectiveOfEpiAuxs.h_apply_fromCoset' GrpCat.SurjectiveOfEpiAuxs.τ_apply_fromCoset' HahnModule.mul_smul' HahnModule.one_smul' HahnModule.support_smul_subset_vadd_support' HahnModule.zero_smul' HahnSeries.add_coeff' HahnSeries.algebraMap_apply' HahnSeries.mul_assoc' HahnSeries.mul_coeff_left' HahnSeries.mul_coeff_right' HahnSeries.neg_coeff' HahnSeries.sub_coeff' HasCompactMulSupport.intro' HasCompactMulSupport.inv' HasCompactMulSupport.mono' HasDerivAt.complexToReal_fderiv' hasDerivAt_exp_smul_const' hasDerivAt_exp_smul_const_of_mem_ball' HasDerivAtFilter.hasGradientAtFilter' HasDerivAt.hasGradientAt' hasDerivAt_id' hasDerivAt_neg' HasDerivWithinAt.complexToReal_fderiv' hasDerivWithinAt_congr_set' hasDerivWithinAt_iff_tendsto_slope' hasDerivWithinAt_inter' HasDerivWithinAt.limsup_slope_le' hasFDerivAt_exp_smul_const' hasFDerivAt_exp_smul_const_of_mem_ball' hasFDerivAtFilter_pi' hasFDerivAt_list_prod' hasFDerivAt_list_prod_attach' hasFDerivAt_list_prod_finRange' HasFDerivAt.mul' HasFDerivAt.mul_const' hasFDerivAt_pi' hasFDerivAt_pi'' HasFDerivWithinAt.congr' hasFDerivWithinAt_congr_set' hasFDerivWithinAt_inter' HasFDerivWithinAt.list_prod' HasFDerivWithinAt.mul' HasFDerivWithinAt.mul_const' hasFDerivWithinAt_pi' hasFDerivWithinAt_pi'' HasFiniteFPowerSeriesOnBall.mk' hasFPowerSeriesAt_iff' HasFPowerSeriesOnBall.factorial_smul' hasFTaylorSeriesUpToOn_pi' HasFTaylorSeriesUpToOn.zero_eq' HasFTaylorSeriesUpTo.zero_eq' HasGradientAtFilter.hasDerivAtFilter' HasGradientAt.hasDerivAt' hasGradientWithinAt_congr_set' HasLineDerivWithinAt.congr' HasLineDerivWithinAt.hasLineDerivAt' HasMFDerivAt.mul' hasMFDerivWithinAt_inter' HasMFDerivWithinAt.mul' HasOrthogonalProjection.map_linearIsometryEquiv' hasProd_nat_add_iff' HasStrictDerivAt.complexToReal_fderiv' hasStrictDerivAt_exp_smul_const' hasStrictDerivAt_exp_smul_const_of_mem_ball' hasStrictFDerivAt_exp_smul_const' hasStrictFDerivAt_exp_smul_const_of_mem_ball' hasStrictFDerivAt_list_prod' HasStrictFDerivAt.list_prod' hasStrictFDerivAt_list_prod_attach' hasStrictFDerivAt_list_prod_finRange' HasStrictFDerivAt.mul' HasStrictFDerivAt.mul_const' hasStrictFDerivAt_pi' hasStrictFDerivAt_pi'' hasSum_choose_mul_geometric_of_norm_lt_one' hasSum_geometric_two' HasSum.matrix_blockDiag' HasSum.matrix_blockDiagonal' hasSum_sum_range_mul_of_summable_norm' Homeomorph.comp_continuousAt_iff' Homeomorph.comp_continuous_iff' Homeomorph.comp_isOpenMap_iff' HomogeneousIdeal.ext' HomologicalComplex₂.d₁_eq' HomologicalComplex₂.d₁_eq_zero' HomologicalComplex₂.d₂_eq' HomologicalComplex₂.d₂_eq_zero' HomologicalComplex₂.totalAux.d₁_eq' HomologicalComplex₂.totalAux.d₂_eq' HomologicalComplex.exactAt_iff' HomologicalComplex.extend.d_none_eq_zero' HomologicalComplex.homotopyCofiber.desc_f' HomologicalComplex.homotopyCofiber.ext_from_X' HomologicalComplex.homotopyCofiber.ext_to_X' HomologicalComplex.homotopyCofiber.inlX_d' HomologicalComplex.isZero_extend_X' HomologicalComplex.mapBifunctor.d₁_eq' HomologicalComplex.mapBifunctor.d₁_eq_zero' HomologicalComplex.mapBifunctor.d₂_eq' HomologicalComplex.mapBifunctor.d₂_eq_zero' HomologicalComplex.restrictionMap_f' HomotopyCategory.Pretriangulated.invRotate_distinguished_triangle' HomotopyCategory.Pretriangulated.rotate_distinguished_triangle' HurwitzZeta.jacobiTheta₂'_functional_equation' HurwitzZeta.oddKernel_def' Hyperreal.isSt_st' Icc_mem_nhdsWithin_Ici' Icc_mem_nhdsWithin_Iic' Icc_mem_nhdsWithin_Iio' Icc_mem_nhdsWithin_Ioi' Ico_mem_nhdsWithin_Ici' Ico_mem_nhdsWithin_Iio' Ico_mem_nhdsWithin_Ioi' Ideal.comap_map_of_surjective' Ideal.comap_sInf' Ideal.eq_jacobson_iff_sInf_maximal' Ideal.isJacobson_iff_sInf_maximal' Ideal.isJacobson_of_isIntegral' Ideal.isMaximal_comap_of_isIntegral_of_isMaximal' Ideal.IsMaximal.isPrime' Ideal.isMaximal_of_isIntegral_of_isMaximal_comap' Ideal.isPrime_ideal_prod_top' Ideal.IsPrime.inf_le' Ideal.isPrime_of_isPrime_prod_top' Ideal.mem_span_insert' Ideal.mem_span_singleton' Ideal.MvPolynomial.quotient_mk_comp_C_isIntegral_of_jacobson' Ideal.Polynomial.isMaximal_comap_C_of_isJacobson' Ideal.quotientInfToPiQuotient_mk' Ideal.Quotient.smulCommClass' Ideal.span_mul_span' Ideal.subset_union_prime' IfExpr.eval_ite_ite' iInf₂_mono' iInf_le' iInf_mono' iInf_prod' iInf_psigma' iInf_range' iInf_sigma' iInf_subtype' iInf_subtype'' imageSubobjectIso_imageToKernel' Imo1962Q1.ProblemPredicate' imo1962_q4' Imo1969Q1.not_prime_of_int_mul' Imo2001Q2.imo2001_q2' imp_or' induced_orderTopology' Topology.IsInducing.continuousAt_iff' Topology.IsInducing.isClosed_iff' inf_compl_eq_bot' inf_eq_half_smul_add_sub_abs_sub' inner_map_polarization' InnerProductSpaceable.add_left_aux2' InnerProductSpaceable.add_left_aux4' Inseparable.specializes' Int.add_le_zero_iff_le_neg' Int.add_nonnneg_iff_neg_le' Int.ceil_eq_on_Ioc' Int.coprime_of_sq_sum' Int.dist_eq' integrable_cexp_quadratic' integrableOn_Icc_iff_integrableOn_Ico' integrableOn_Icc_iff_integrableOn_Ioc' integrableOn_Icc_iff_integrableOn_Ioo' integrableOn_Ici_iff_integrableOn_Ioi' integrableOn_Ico_iff_integrableOn_Ioo' integrableOn_Iic_iff_integrableOn_Iio' integrableOn_Ioc_iff_integrableOn_Ioo' Int.eq_one_or_neg_one_of_mul_eq_neg_one' Int.eq_one_or_neg_one_of_mul_eq_one' interior_closedBall' interior_eq_nhds' interior_Ici' interior_Iic' interior_sphere' IntermediateField.algebra' IntermediateField.charP' IntermediateField.eq_of_le_of_finrank_le'' IntermediateField.exists_algHom_adjoin_of_splits'' IntermediateField.exists_algHom_of_splits' IntermediateField.exists_finset_of_mem_supr' IntermediateField.exists_finset_of_mem_supr'' IntermediateField.expChar' IntermediateField.finInsepDegree_bot' IntermediateField.finiteDimensional_iSup_of_finset' IntermediateField.finrank_bot' IntermediateField.finrank_top' IntermediateField.finSepDegree_bot' IntermediateField.insepDegree_bot' IntermediateField.lift_insepDegree_bot' IntermediateField.lift_sepDegree_bot' IntermediateField.module' IntermediateField.normalClosure_def' IntermediateField.normalClosure_def'' IntermediateField.normal_iff_forall_map_eq' IntermediateField.normal_iff_forall_map_le' IntermediateField.rank_bot' IntermediateField.rank_top' IntermediateField.sepDegree_bot' intermediate_value_Ico' intermediate_value_Ioc' intermediate_value_Ioo' IntervalIntegrable.aestronglyMeasurable' intervalIntegrable_iff' IntervalIntegrable.mono_fun' IntervalIntegrable.mono_set' intervalIntegral.continuous_parametric_intervalIntegral_of_continuous' intervalIntegral.integral_congr_ae' intervalIntegral.integral_const' intervalIntegral.integral_deriv_comp_mul_deriv' intervalIntegral.integral_deriv_comp_smul_deriv' intervalIntegral.integral_deriv_eq_sub' intervalIntegral.integral_interval_sub_interval_comm' Int.even_add' Int.even_or_odd' Int.even_pow' Int.even_sub' Int.even_xor'_odd' Int.exists_gcd_one' Int.floor_eq_on_Ico' Int.Matrix.exists_ne_zero_int_vec_norm_le' Int.ModEq.add_left_cancel' Int.ModEq.add_right_cancel' Int.ModEq.mul_left' Int.ModEq.mul_right' Int.natAbs_ofNat' Int.odd_add' Int.odd_pow' Int.odd_sub' Int.Prime.dvd_mul' Int.Prime.dvd_pow' Int.toNat_lt' Int.two_pow_sub_pow' inv_div' inv_le' inv_le_div_iff_le_mul' inv_le_iff_one_le_mul' inv_le_inv' inv_lt' inv_lt_div_iff_lt_mul' inv_lt_iff_one_lt_mul' inv_lt_inv' inv_mul' inv_mul_le_iff' inv_mul_le_iff_le_mul' inv_mul_lt_iff' inv_mul_lt_iff_lt_mul' inv_neg' inv_neg'' invOf_mul_cancel_left' invOf_mul_cancel_right' invOf_mul_self' invOf_one' inv_pos_le_iff_one_le_mul' inv_pos_lt_iff_one_lt_mul' inv_zpow' Ioc_mem_nhdsWithin_Iic' Ioc_mem_nhdsWithin_Iio' Ioc_mem_nhdsWithin_Ioi' Ioo_mem_nhdsWithin_Iio' Ioo_mem_nhdsWithin_Ioi' IsAbsoluteValue.abv_one' isAddFundamentalDomain_Ioc' isAdjointPair_toBilin' isAdjointPair_toLinearMap₂' IsAlgClosed.algebraMap_surjective_of_isIntegral' IsAntichain.eq' IsAntichain.interior_eq_empty' isArtinian_of_fg_of_artinian' isArtinian_submodule' IsBaseChange.algHom_ext' IsBoundedBilinearMap.isBigO' isBounded_iff_forall_norm_le' isBoundedUnder_ge_finset_inf' isBoundedUnder_le_finset_sup' IsCauSeq.bounded' isClosed_induced_iff' isCoboundedUnder_ge_finset_inf' isCoboundedUnder_le_finset_sup' IsCompact.elim_nhds_subcover' IsCompact.elim_nhds_subcover_nhdsSet' IsCompact.exists_bound_of_continuousOn' isCompact_iff_ultrafilter_le_nhds' IsCompact.tendsto_subseq' isComplete_iff_ultrafilter' IsCoprime.isUnit_of_dvd' IsCyclotomicExtension.neZero' IsCyclotomicExtension.Rat.discr_odd_prime' IsDedekindDomain.HeightOneSpectrum.adicCompletion.algebra' IsDedekindDomain.HeightOneSpectrum.adicCompletion.instIsScalarTower' IsDedekindDomain.HeightOneSpectrum.adicValued.has_uniform_continuous_const_smul' IsDedekindDomain.HeightOneSpectrum.algebraMap_adicCompletion' isField_of_isIntegral_of_isField' IsFractionRing.mk'_num_den' IsFractionRing.num_mul_den_eq_num_iff_eq' IsGLB.exists_between' IsGLB.exists_between_self_add' isGLB_inv' IsGroupHom.inv_iff_ker' IsGroupHom.inv_ker_one' IsGroupHom.map_mul' IsGroupHom.one_iff_ker_inv' IsGroupHom.one_ker_inv' IsIntegralClosure.algebraMap_mk' isIntegral_localization' IsIntegral.minpoly_splits_tower_top' IsIntegral.of_mem_closure'' IsInvariantSubring.coe_subtypeHom' IsKleinFour.card_four' IsLindelof.elim_nhds_subcover' IsLinearMap.isLinearMap_smul' IsLocalization.algebraMap_mk' IsLocalization.algEquiv_mk' IsLocalization.algEquiv_symm_mk' IsLocalization.map_id_mk' IsLocalization.map_mk' IsLocalization.mem_invSubmonoid_iff_exists_mk' IsLocalization.mk'_eq_iff_eq' IsLocalization.mk'_eq_of_eq' IsLocalization.mk'_mul_mk'_eq_one' IsLocalization.mk'_self' IsLocalization.mk'_self'' IsLocalization.mk'_spec' IsLocalization.ringEquivOfRingEquiv_mk' IsLocalization.smul_mk' IsLocalization.surj'' IsLocalization.toInvSubmonoid_eq_mk' IsLocalizedModule.iso_symm_apply' IsLocalizedModule.map_mk' IsLocalizedModule.mk'_add_mk' IsLocalizedModule.mk'_cancel' IsLocalizedModule.mk_eq_mk' IsLocalizedModule.mk'_eq_zero' IsLocalizedModule.mk'_mul_mk' IsLocalizedModule.mk'_sub_mk' IsLowerSet.cthickening' IsLowerSet.thickening' isLUB_csSup' IsLUB.exists_between' IsLUB.exists_between_sub_self' isLUB_hasProd' isLUB_inv' IsMax.not_isMin' IsMin.not_isMax' isNoetherian_iff' isNoetherian_submodule' IsometryEquiv.comp_continuous_iff' isOpen_extChartAt_preimage' isOpen_gt' isOpen_iff_ultrafilter' IsOpen.ite' isOpen_lt' isOpen_pi_iff' IsPathConnected.exists_path_through_family' IsPGroup.to_sup_of_normal_left' IsPGroup.to_sup_of_normal_right' IsPreconnected.union' IsPrimitiveRoot.card_rootsOfUnity' IsPrimitiveRoot.finite_quotient_span_sub_one' IsPrimitiveRoot.isPrimitiveRoot_iff' IsPrimitiveRoot.isUnit_unit' IsPrimitiveRoot.neZero' IsPrimitiveRoot.zmodEquivZPowers_symm_apply_pow' IsPrimitiveRoot.zmodEquivZPowers_symm_apply_zpow' isQuasiregular_iff_isUnit' isRegular_iff_ne_zero' isRegular_of_ne_zero' IsScalarTower.coe_toAlgHom' IsScalarTower.subalgebra' IsScalarTower.to_smulCommClass' IsSelfAdjoint.conjugate' isSemisimpleModule_of_isSemisimpleModule_submodule' IsUnifLocDoublingMeasure.eventually_measure_le_scaling_constant_mul' IsUnifLocDoublingMeasure.exists_measure_closedBall_le_mul' isUnit_iff_exists_inv' IsUnit.map' IsUnit.val_inv_unit' iSup₂_mono' iSup_mono' iSup_of_empty' IsUpperSet.cthickening' IsUpperSet.thickening' iSup_prod' iSup_psigma' iSup_range' iSup_sigma' iSup_subtype' iSup_subtype'' ite_eq_iff' iteratedFDeriv_add_apply' iteratedFDeriv_const_smul_apply' iteratedFDerivWithin_eventually_congr_set' iter_deriv_inv' iter_deriv_pow' iter_deriv_zpow' jacobiTheta₂'_add_left' KaehlerDifferential.isScalarTower' KaehlerDifferential.module' LatticeHom.coe_comp_inf_hom' LatticeHom.coe_comp_sup_hom' LawfulFix.fix_eq' lcm_assoc' lcm_comm' le_abs' le_add_tsub' Lean.Elab.Tactic.TacticM.runCore' le_ciInf_iff' le_ciSup_iff' le_csInf_iff' le_csInf_iff'' le_csSup_iff' le_div_iff₀' le_div_iff_mul_le' le_div_iff_of_neg' LeftOrdContinuous.map_sSup' Left.pow_lt_one_iff' legendreSym.eq_neg_one_iff' legendreSym.eq_one_iff' le_hasProd' le_iff_exists_mul' le_iff_forall_one_lt_lt_mul' le_inv' le_iSup' le_map_add_map_div' le_mul_iff_one_le_left' le_mul_iff_one_le_right' le_mul_of_le_of_one_le' le_mul_of_one_le_left' le_mul_of_one_le_right' le_nhdsAdjoint_iff' le_of_eq_of_le' le_of_forall_le' le_of_forall_lt' le_of_forall_one_lt_lt_mul' le_of_le_of_eq' le_of_mul_le_mul_left' le_of_mul_le_mul_right' le_of_pow_le_pow_left' le_of_tendsto' le_of_tendsto_of_tendsto' le_tprod' le_trans' Lex.instDistribMulAction' Lex.instDistribSMul' Lex.instIsScalarTower' Lex.instIsScalarTower'' Lex.instModule' Lex.instMulAction' Lex.instMulActionWithZero' Lex.instPow' Lex.instSMulCommClass' Lex.instSMulCommClass'' Lex.instSMulWithZero' LieAlgebra.IsKilling.apply_coroot_eq_cast' LieAlgebra.IsKilling.coe_corootSpace_eq_span_singleton' LieAlgebra.lieCharacter_apply_lie' LieAlgebra.mem_corootSpace' LieIdeal.map_sup_ker_eq_map' LieModule.chainTop_isNonZero' LieModule.coe_chainTop' LieModule.genWeightSpaceChain_def' LieModule.iSupIndep_genWeightSpace' LieModule.instIsTrivialOfSubsingleton' LieModule.isNilpotent_of_top_iff' LieModule.iSup_genWeightSpace_eq_top' LieModule.Weight.ext_iff' LieSubalgebra.coe_incl' LieSubalgebra.ext_iff' LieSubalgebra.mem_normalizer_iff' LieSubmodule.iSup_induction' LieSubmodule.lieIdeal_oper_eq_linear_span' LieSubmodule.mem_mk_iff' LieSubmodule.module' LieSubmodule.Quotient.mk_eq_zero' LieSubmodule.Quotient.module' LieSubmodule.Quotient.range_mk' LieSubmodule.Quotient.surjective_mk' LieSubmodule.Quotient.toEnd_comp_mk' LieSubmodule.sInf_coe_toSubmodule' LieSubmodule.sSup_coe_toSubmodule' liftOfDerivationToSquareZero_mk_apply' lift_rank_lt_rank_dual' LightProfinite.proj_comp_transitionMap' LightProfinite.proj_comp_transitionMapLE' liminf_finset_inf' limsup_finset_sup' linearDependent_comp_subtype' LinearEquiv.apply_smulCommClass' LinearEquiv.coe_toContinuousLinearEquiv' LinearEquiv.coe_toContinuousLinearEquiv_symm' LinearEquiv.isRegular_congr' LinearEquiv.isSMulRegular_congr' LinearEquiv.isWeaklyRegular_congr' LinearEquiv.mk_coe' linearIndependent_algHom_toLinearMap' LinearIndependent.cardinal_le_rank' linearIndependent_equiv' LinearIndependent.eq_zero_of_pair' linearIndependent_fin_succ' linearIndependent_iff' linearIndependent_iff'' linearIndependent_inl_union_inr' linearIndependent_insert' linearIndependent_le_span_aux' linearIndependent_option' LinearIndependent.span_eq_top_of_card_eq_finrank' LinearIndependent.to_subtype_range' LinearIsometry.completeSpace_map' LinearIsometryEquiv.coe_coe'' LinearIsometryEquiv.comp_fderiv' LinearIsometryEquiv.comp_hasFDerivAt_iff' LinearIsometryEquiv.comp_hasFDerivWithinAt_iff' LinearIsometry.isComplete_image_iff' LinearIsometry.isComplete_map_iff' LinearIsometry.map_orthogonalProjection' LinearMap.apply_smulCommClass' LinearMap.BilinForm.mul_toMatrix' LinearMap.BilinForm.nondegenerate_toBilin'_of_det_ne_zero' LinearMap.BilinForm.Nondegenerate.toMatrix' LinearMap.BilinForm.toMatrix'_toBilin' LinearMap.coe_toContinuousLinearMap' LinearMap.detAux_def'' LinearMap.det_toLin' LinearMap.det_toMatrix' LinearMap.det_zero' LinearMap.det_zero'' LinearMap.disjoint_ker' LinearMap.dualMap_apply' LinearMap.extendScalarsOfIsLocalization_apply' LinearMap.IsProj.eq_conj_prod_map' LinearMap.IsScalarTower.compatibleSMul' LinearMap.IsSymmetric.orthogonalComplement_iSup_eigenspaces_eq_bot' LinearMap.IsSymmetric.orthogonalFamily_eigenspaces' LinearMap.ker_eq_bot' LinearMap.ker_smul' LinearMap.lcomp_apply' LinearMap.llcomp_apply' LinearMap.map_le_map_iff' LinearMap.minpoly_toMatrix' LinearMap.mkContinuous₂_norm_le' LinearMap.mul_apply' LinearMap.mul_toMatrix' LinearMap.ofIsCompl_eq' LinearMap.range_smul' LinearMap.separatingLeft_toLinearMap₂'_of_det_ne_zero' LinearMap.SeparatingLeft.toMatrix₂' LinearMap.stdBasis_apply' LinearMap.toMatrixAlgEquiv_apply' LinearMap.toMatrixAlgEquiv'_toLinAlgEquiv' LinearMap.toMatrixAlgEquiv_transpose_apply' LinearMap.toMatrix_apply' LinearMap.toMatrix'_toLin' LinearMap.toMatrix'_toLinearMap₂' LinearMap.toMatrix'_toLinearMapₛₗ₂' LinearMap.toMatrix_transpose_apply' LinearMap.trace_comp_comm' LinearMap.trace_conj' LinearMap.trace_eq_sum_trace_restrict' LinearMap.trace_mul_cycle' LinearMap.trace_prodMap' LinearMap.trace_tensorProduct' LinearMap.trace_transpose' LinearOrderedCommGroup.mul_lt_mul_left' LinearPMap.closure_def' LinearPMap.ext' LinearPMap.mem_graph_iff' LinearPMap.mem_graph_snd_inj' LinearPMap.toFun' lineDerivWithin_congr' LipschitzOnWith.of_dist_le' LipschitzWith.const' LipschitzWith.integral_inv_smul_sub_mul_tendsto_integral_lineDeriv_mul' LipschitzWith.nnorm_le_mul' LipschitzWith.norm_le_mul' LipschitzWith.of_dist_le' lipschitzWith_one_nnnorm' lipschitzWith_one_norm' List.aemeasurable_prod' List.aestronglyMeasurable_prod' List.alternatingProd_cons' List.alternatingProd_cons_cons' list_casesOn' list_cons' List.cons_sublist_cons' List.count_cons' List.dedup_cons_of_mem' List.dedup_cons_of_not_mem' List.destutter_cons' List.destutter'_is_chain' List.destutter_is_chain' List.destutter_of_chain' List.drop_take_succ_join_eq_get' List.exists_le_of_prod_le' List.exists_lt_of_prod_lt' List.ext_get?' List.ext_get?_iff' List.filter_attach' List.filter_subset' list_foldl' List.foldl_eq_foldr' List.foldl_eq_of_comm' List.foldl_fixed' List.foldr_eq_of_comm' List.foldr_fixed' List.Forall₂.prod_le_prod' List.getLast_append' List.getLast_concat' List.getLast_singleton' List.get_reverse' List.get?_zipWith' List.inter_nil' List.isRotated_nil_iff' List.isRotated_singleton_iff' List.LE' List.left_unique_forall₂' List.le_maximum_of_mem' List.length_foldr_permutationsAux2' List.length_mergeSort' List.length_rotate' List.length_sublists' List.lookmap_id' List.LT' List.map₂Left_eq_map₂Left' List.map₂Right_eq_map₂Right' List.map_filter' List.map_mergeSort' List.map_permutations' List.map_permutationsAux2' List.measurable_prod' List.mem_destutter' List.mem_mergeSort' List.mem_permutations' List.mem_permutationsAux2' List.mem_sublists' List.minimum_le_of_mem' List.Nat.antidiagonal_succ' List.Nat.antidiagonal_succ_succ' List.next_cons_cons_eq' List.nnnorm_prod_le' List.nodup_sublists' List.norm_prod_le' List.not_maximum_lt_of_mem' List.not_lt_minimum_of_mem' List.ofFn_succ' List.Pairwise.chain' List.pairwise_map' List.Pairwise.sublists' List.perm_mergeSort' List.Perm.permutations' List.permutations_perm_permutations' List.prev_cons_cons_eq' List.prev_cons_cons_of_ne' List.prev_getLast_cons' List.prod_le_prod' List.prod_lt_prod' List.replicate_right_inj' List.replicate_succ' list_reverse' List.reverse_concat' List.reverse_cons' List.revzip_sublists' List.right_unique_forall₂' List.rotate_eq_rotate' List.rotate'_rotate' Lists' Lists.lt_sizeof_cons' Lists'.mem_of_subset' List.smul_prod' List.sorted_mergeSort' List.stronglyMeasurable_prod' List.SublistForall₂.prod_le_prod' List.sublists_eq_sublists' List.sublistsLen_sublist_sublists' List.sublists_perm_sublists' List.support_formPerm_le' List.support_formPerm_of_nodup' List.takeD_left' List.takeI_left' List.tendsto_insertNth' List.zipLeft_eq_zipLeft' List.zipRight_eq_zipRight' List.zipWith_swap_prod_support' Localization.algEquiv_mk' Localization.algEquiv_symm_mk' Localization.Away.mk_eq_monoidOf_mk' Localization.epi' Localization.liftOn₂_mk' Localization.liftOn_mk' Localization.localRingHom_mk' Localization.mk_eq_mk' Localization.mk_eq_mk_iff' Localization.mk_eq_monoidOf_mk' Localization.mulEquivOfQuotient_mk' Localization.mulEquivOfQuotient_symm_mk' localization_unit_isIso' LocalizedModule.add_assoc' LocalizedModule.add_comm' LocalizedModule.add_zero' LocalizedModule.algebra' LocalizedModule.algebraMap_mk' LocalizedModule.isModule' LocalizedModule.mul_smul' LocalizedModule.nsmul_succ' LocalizedModule.nsmul_zero' LocalizedModule.zero_add' LocallyFinite.continuous' LocallyFinite.continuousOn_iUnion' LocallyFinite.option_elim' IsLocalRing.of_surjective' logDeriv_id' lowerClosure_interior_subset' lp.eq_zero' lp.norm_le_of_forall_le' lp.norm_nonneg' lp.tsum_mul_le_mul_norm' LSeries.abscissaOfAbsConv_le_of_forall_lt_LSeriesSummable' lt_div_iff' lt_div_iff_mul_lt' lt_div_iff_of_neg' lt_iff_lt_of_le_iff_le' lt_inv' lt_inv_iff_mul_lt_one' LT.lt.ne' lt_mul_iff_one_lt_left' lt_mul_iff_one_lt_right' lt_mul_of_le_of_one_lt' lt_mul_of_lt_of_one_le' lt_mul_of_lt_of_one_lt' lt_mul_of_one_lt_left' lt_mul_of_one_lt_of_lt' lt_mul_of_one_lt_right' lt_of_eq_of_lt' lt_of_le_of_lt' lt_of_le_of_ne' lt_of_lt_of_eq' lt_of_lt_of_le' lt_of_mul_lt_mul_left' lt_of_mul_lt_mul_right' lt_of_pow_lt_pow_left' lt_trans' mabs_le' Magma.AssocQuotient.lift_comp_of' MapClusterPt.tendsto_comp' map_comp_div' map_comp_zpow' map_div' map_extChartAt_nhds' map_extChartAt_nhdsWithin' map_extChartAt_nhdsWithin_eq_image' map_extChartAt_symm_nhdsWithin' map_extChartAt_symm_nhdsWithin_range' map_finset_inf' map_finset_sup' map_natCast' map_ofNat' map_preNormEDS' mapsTo_omegaLimit' map_zpow' Mathlib.Meta.Finset.range_succ' Mathlib.Meta.Finset.range_zero' Mathlib.Meta.FunProp.StateList.toList' Mathlib.Meta.List.range_succ_eq_map' Mathlib.Meta.List.range_zero' Mathlib.Meta.Multiset.range_succ' Mathlib.Meta.Multiset.range_zero' Mathlib.Meta.NormNum.jacobiSymNat.qr₁' Mathlib.Meta.Positivity.lt_of_le_of_ne' Mathlib.Tactic.ComputeDegree.coeff_pow_of_natDegree_le_of_eq_ite' Mathlib.Tactic.ComputeDegree.degree_eq_of_le_of_coeff_ne_zero' Mathlib.Tactic.Group.zpow_trick_one' Mathlib.Tactic.Ring.atom_pf' Mathlib.Util.addAndCompile' List.Vector.eraseIdx_insertNth' List.Vector.prod_set' Mathlib.WhatsNew.mkHeader' Matrix.blockDiag'_blockDiagonal' Matrix.blockDiagonal'_apply' Matrix.blockDiagonal_apply' Matrix.blockTriangular_blockDiagonal' Matrix.blockTriangular_single' Matrix.blockTriangular_transvection' Matrix.cons_val' Matrix.cons_val_succ' Matrix.cons_val_zero' Matrix.det_apply' Matrix.det_units_conj' Matrix.det_updateCol_smul' Matrix.det_updateRow_smul' Matrix.diagonal_apply_ne' Matrix.diagonal_intCast' Matrix.diagonal_mul_diagonal' Matrix.diagonal_natCast' Matrix.diagonal_ofNat' Matrix.diagonal_toLin' dotProduct_diagonal' dotProduct_zero' Matrix.empty_val' Matrix.exists_mulVec_eq_zero_iff' Matrix.exp_blockDiagonal' Matrix.exp_conj' Matrix.exp_units_conj' Matrix.head_val' Matrix.induction_on' Matrix.inv_pow' Matrix.inv_smul' Matrix.inv_zpow' Matrix.isAdjointPair_equiv' Matrix.ker_diagonal_toLin' Matrix.kronecker_assoc' Matrix.kroneckerTMul_assoc' Matrix.map_id' Matrix.mem_orthogonalGroup_iff' Matrix.mem_unitaryGroup_iff' Matrix.minpoly_toLin' Matrix.mul_apply' Matrix.Nondegenerate.toBilin' Matrix.Nondegenerate.toLinearMap₂' Matrix.one_apply_ne' Matrix.PosDef.of_toQuadraticForm' Matrix.PosDef.toQuadraticForm' Matrix.pow_inv_comm' Matrix.pow_sub' Matrix.range_toLin' Matrix.represents_iff' Matrix.tail_val' Matrix.toBilin'_apply' Matrix.toBilin'_toMatrix' Matrix.toLinAlgEquiv'_toMatrixAlgEquiv' Matrix.toLin'_apply' Matrix.toLinearMap₂'_apply' Matrix.toLinearMap₂'_toMatrix' Matrix.toLinearMapₛₗ₂'_toMatrix' Matrix.toLin'_toMatrix' Matrix.trace_blockDiagonal' Matrix.trace_mul_cycle' Matrix.twoBlockTriangular_det' Matrix.vec2_dotProduct' Matrix.vec3_dotProduct' zero_dotProduct' Matrix.zpow_mul' Matroid.Base.exchange_base_of_indep' Matroid.base_restrict_iff' Matroid.Basis.basis' Matroid.basis_iff' Matroid.basis_iff_basis_closure_of_subset' Matroid.basis_restrict_iff' Matroid.closure_def' Matroid.coindep_iff_exists' Matroid.dual_base_iff' Matroid.dual_indep_iff_exists' Matroid.exists_basis' Matroid.Finitary.sum' Matroid.Indep.mem_closure_iff' Matroid.map_basis_iff' Matroid.mapSetEmbedding_indep_iff' Matroid.mem_closure_of_mem' Matroid.restrictSubtype_dual' Matroid.subset_closure_of_subset' Matroid.uniqueBaseOn_indep_iff' Matroid.uniqueBaseOn_restrict' max_def' max_div_div_left' max_div_div_right' max_div_min_eq_mabs' maximal_subset_iff' max_inv_inv' max_mul_mul_le_max_mul_max' max_rec' mdifferentiableWithinAt_iff' mdifferentiableWithinAt_inter' Measurable.comp' Measurable.comp_aemeasurable' Measurable.const_smul' Measurable.div' MeasurableEmbedding.withDensity_ofReal_comap_apply_eq_integral_abs_deriv_mul' Measurable.ennreal_tsum' MeasurableEquiv.withDensity_ofReal_map_symm_apply_eq_integral_abs_deriv_mul' measurable_findGreatest' measurable_id' measurable_id'' Measurable.inf' Measurable.iSup' Measurable.lintegral_kernel_prod_left' Measurable.lintegral_kernel_prod_right' Measurable.lintegral_kernel_prod_right'' Measurable.mul' measurable_of_isClosed' measurable_quotient_mk' measurable_quotient_mk'' measurableSet_eq_fun' MeasurableSet.image_inclusion' measurableSet_le' measurableSet_lt' Measurable.sup' measurable_to_countable' measurable_tProd_elim' MeasureTheory.abs_toReal_measure_sub_le_measure_symmDiff' MeasureTheory.adapted_predictablePart' MeasureTheory.addContent_union' MeasureTheory.AECover.integrable_of_lintegral_nnnorm_bounded' MeasureTheory.AECover.integrable_of_lintegral_nnnorm_tendsto' MeasureTheory.ae_eq_comp' MeasureTheory.ae_eq_dirac' MeasureTheory.ae_eq_of_forall_setIntegral_eq_of_sigmaFinite' MeasureTheory.ae_eq_trim_iff_of_aeStronglyMeasurable' MeasureTheory.ae_lt_top' MeasureTheory.aemeasurable_withDensity_ennreal_iff' MeasureTheory.ae_restrict_iff' MeasureTheory.AEStronglyMeasurable.comp_ae_measurable' MeasureTheory.AEStronglyMeasurable.const_smul' MeasureTheory.AEStronglyMeasurable.convolution_integrand' MeasureTheory.AEStronglyMeasurable.convolution_integrand_snd' MeasureTheory.AEStronglyMeasurable.convolution_integrand_swap_snd' MeasureTheory.AEStronglyMeasurable'.of_subsingleton' MeasureTheory.ae_withDensity_iff' MeasureTheory.ae_withDensity_iff_ae_restrict' MeasureTheory.average_eq' MeasureTheory.condExp_bot' MeasureTheory.condExpIndL1Fin_smul' MeasureTheory.condExpIndL1_smul' MeasureTheory.condExpInd_smul' MeasureTheory.condExpIndSMul_smul' MeasureTheory.condExpL1CLM_of_aestronglyMeasurable' MeasureTheory.condExpL1_of_aestronglyMeasurable' MeasureTheory.condExp_of_aestronglyMeasurable' MeasureTheory.Content.innerContent_mono' MeasureTheory.diracProba_toMeasure_apply' MeasureTheory.eLpNorm_add_le' MeasureTheory.eLpNorm'_const' MeasureTheory.eLpNorm_const' MeasureTheory.eLpNorm_eq_eLpNorm' MeasureTheory.eLpNorm'_eq_zero_of_ae_zero' MeasureTheory.eLpNorm_indicator_const' MeasureTheory.eLpNorm'_le_eLpNorm'_mul_eLpNorm' MeasureTheory.eLpNorm_nnreal_eq_eLpNorm' MeasureTheory.eLpNorm_one_le_of_le' MeasureTheory.eLpNorm'_smul_le_mul_eLpNorm' MeasureTheory.eLpNorm_sub_le' MeasureTheory.eLpNorm'_zero' MeasureTheory.eLpNorm_zero' MeasureTheory.exp_llr_of_ac' MeasureTheory.exp_neg_llr' MeasureTheory.Filtration.stronglyMeasurable_limit_process' MeasureTheory.hasFiniteIntegral_congr' MeasureTheory.HasFiniteIntegral.congr' MeasureTheory.HasFiniteIntegral.mono' MeasureTheory.hasFiniteIntegral_prod_iff' MeasureTheory.HasPDF.congr' MeasureTheory.Ico_ae_eq_Icc' MeasureTheory.Ico_ae_eq_Ioc' MeasureTheory.Iio_ae_eq_Iic' MeasureTheory.inducedOuterMeasure_eq' MeasureTheory.inducedOuterMeasure_eq_extend' MeasureTheory.Integrable.add' MeasureTheory.Integrable.bdd_mul' MeasureTheory.Integrable.comp_mul_left' MeasureTheory.Integrable.comp_mul_right' MeasureTheory.integrable_congr' MeasureTheory.Integrable.congr' MeasureTheory.Integrable.const_mul' MeasureTheory.integrable_finset_sum' MeasureTheory.Integrable.mono' MeasureTheory.Integrable.mul_const' MeasureTheory.integrable_of_forall_fin_meas_le' MeasureTheory.Integrable.simpleFunc_mul' MeasureTheory.Integrable.toL1_smul' MeasureTheory.integrable_withDensity_iff_integrable_smul' MeasureTheory.integral_add' MeasureTheory.integral_countable' MeasureTheory.integral_dirac' MeasureTheory.integral_Icc_eq_integral_Ico' MeasureTheory.integral_Icc_eq_integral_Ioc' MeasureTheory.integral_Icc_eq_integral_Ioo' MeasureTheory.integral_Ici_eq_integral_Ioi' MeasureTheory.integral_Ico_eq_integral_Ioo' MeasureTheory.integral_Iic_eq_integral_Iio' MeasureTheory.integral_Ioc_eq_integral_Ioo' MeasureTheory.integral_neg' MeasureTheory.integral_singleton' MeasureTheory.integral_sub' MeasureTheory.integral_zero' MeasureTheory.Ioc_ae_eq_Icc' MeasureTheory.Ioi_ae_eq_Ici' MeasureTheory.Ioo_ae_eq_Icc' MeasureTheory.Ioo_ae_eq_Ico' MeasureTheory.Ioo_ae_eq_Ioc' MeasureTheory.isClosed_aeStronglyMeasurable' MeasureTheory.isComplete_aeStronglyMeasurable' MeasureTheory.IsFundamentalDomain.integral_eq_tsum' MeasureTheory.IsFundamentalDomain.integral_eq_tsum'' MeasureTheory.IsFundamentalDomain.lintegral_eq_tsum' MeasureTheory.IsFundamentalDomain.lintegral_eq_tsum'' MeasureTheory.IsFundamentalDomain.measure_eq_tsum' MeasureTheory.IsFundamentalDomain.setIntegral_eq_tsum' MeasureTheory.IsFundamentalDomain.setLIntegral_eq_tsum' MeasureTheory.IsStoppingTime.measurableSet_eq' MeasureTheory.IsStoppingTime.measurableSet_eq_of_countable' MeasureTheory.IsStoppingTime.measurableSet_eq_of_countable_range' MeasureTheory.IsStoppingTime.measurableSet_ge' MeasureTheory.IsStoppingTime.measurableSet_ge_of_countable' MeasureTheory.IsStoppingTime.measurableSet_ge_of_countable_range' MeasureTheory.IsStoppingTime.measurableSet_gt' MeasureTheory.IsStoppingTime.measurableSet_le' MeasureTheory.IsStoppingTime.measurableSet_lt' MeasureTheory.IsStoppingTime.measurableSet_lt_of_countable' MeasureTheory.IsStoppingTime.measurableSet_lt_of_countable_range' MeasureTheory.IsStoppingTime.measurableSpace_le' MeasureTheory.L1.norm_setToL1_le' MeasureTheory.L1.norm_setToL1_le_mul_norm' MeasureTheory.L1.setToL1_add_left' MeasureTheory.L1.setToL1_congr_left' MeasureTheory.L1.setToL1_eq_setToL1' MeasureTheory.L1.setToL1_mono_left' MeasureTheory.L1.setToL1_smul_left' MeasureTheory.L1.setToL1_zero_left' MeasureTheory.L1.SimpleFunc.norm_setToL1SCLM_le' MeasureTheory.L1.SimpleFunc.setToL1S_add_left' MeasureTheory.L1.SimpleFunc.setToL1SCLM_add_left' MeasureTheory.L1.SimpleFunc.setToL1SCLM_congr_left' MeasureTheory.L1.SimpleFunc.setToL1SCLM_mono_left' MeasureTheory.L1.SimpleFunc.setToL1SCLM_smul_left' MeasureTheory.L1.SimpleFunc.setToL1SCLM_zero_left' MeasureTheory.L1.SimpleFunc.setToL1S_mono_left' MeasureTheory.L1.SimpleFunc.setToL1S_smul_left' MeasureTheory.L1.SimpleFunc.setToL1S_zero_left' MeasureTheory.L2.add_left' MeasureTheory.L2.smul_left' MeasureTheory.laverage_eq' MeasureTheory.lintegral_add_left' MeasureTheory.lintegral_add_right' MeasureTheory.lintegral_const_mul' MeasureTheory.lintegral_const_mul'' MeasureTheory.lintegral_count' MeasureTheory.lintegral_countable' MeasureTheory.lintegral_dirac' MeasureTheory.lintegral_eq_zero_iff' MeasureTheory.lintegral_finset_sum' MeasureTheory.lintegral_iInf' MeasureTheory.lintegral_map' MeasureTheory.lintegral_mono' MeasureTheory.lintegral_mono_fn' MeasureTheory.lintegral_mono_set' MeasureTheory.lintegral_mul_const' MeasureTheory.lintegral_mul_const'' MeasureTheory.lintegral_rpow_enorm_eq_rpow_eLpNorm' MeasureTheory.lintegral_singleton' MeasureTheory.lintegral_sub' MeasureTheory.lintegral_sub_le' MeasureTheory.lmarginal_union' MeasureTheory.locallyIntegrable_finset_sum' MeasureTheory.lowerCrossingTime_stabilize' MeasureTheory.Lp.ae_tendsto_of_cauchy_eLpNorm' MeasureTheory.Lp.eLpNorm'_lim_le_liminf_eLpNorm' MeasureTheory.Lp.eLpNorm'_sum_norm_sub_le_tsum_of_cauchy_eLpNorm' MeasureTheory.lpMeas.aestronglyMeasurable MeasureTheory.Lp.norm_const' MeasureTheory.Lp.simpleFunc.eq' MeasureTheory.Lp.tendsto_Lp_iff_tendsto_eLpNorm' MeasureTheory.Lp.tendsto_Lp_iff_tendsto_eLpNorm'' MeasureTheory.measurableSet_filtrationOfSet' MeasureTheory.measurableSet_sigmaFiniteSetWRT' MeasureTheory.Measure.ae_sum_iff' MeasureTheory.Measure.bind_zero_right' MeasureTheory.Measure.count_apply_eq_top' MeasureTheory.Measure.count_apply_finite' MeasureTheory.Measure.count_apply_finset' MeasureTheory.Measure.count_apply_lt_top' MeasureTheory.Measure.count_eq_zero_iff' MeasureTheory.Measure.count_injective_image' MeasureTheory.Measure.count_ne_zero' MeasureTheory.Measure.count_ne_zero'' MeasureTheory.Measure.count_singleton' MeasureTheory.measure_diff' MeasureTheory.measure_diff_null' MeasureTheory.Measure.dirac_apply' MeasureTheory.Measure.empty_of_count_eq_zero' MeasureTheory.Measure.ext_iff' MeasureTheory.Measure.haveLebesgueDecompositionSMul' MeasureTheory.Measure.InnerRegularWRT.map' MeasureTheory.Measure.integral_toReal_rnDeriv' MeasureTheory.measure_inter_conull' MeasureTheory.Measure.inv_rnDeriv' MeasureTheory.measure_iUnion_null_iff' MeasureTheory.Measure.LebesgueDecomposition.iSup_mem_measurableLE' MeasureTheory.Measure.LebesgueDecomposition.iSup_monotone' MeasureTheory.Measure.le_iff' MeasureTheory.Measure.lt_iff' MeasureTheory.Measure.map_id' MeasureTheory.Measure.measurable_bind' MeasureTheory.Measure.MeasureDense.nonempty' MeasureTheory.Measure.nonpos_iff_eq_zero' MeasureTheory.Measure.pi_noAtoms' MeasureTheory.MeasurePreserving.integral_comp' MeasureTheory.Measure.restrict_apply₀' MeasureTheory.Measure.restrict_apply_eq_zero' MeasureTheory.Measure.restrict_restrict' MeasureTheory.Measure.restrict_restrict₀' MeasureTheory.Measure.restrict_singleton' MeasureTheory.Measure.restrict_union' MeasureTheory.Measure.restrict_union_add_inter' MeasureTheory.Measure.rnDeriv_mul_rnDeriv' MeasureTheory.Measure.rnDeriv_pos' MeasureTheory.Measure.setIntegral_toReal_rnDeriv' MeasureTheory.Measure.setIntegral_toReal_rnDeriv_eq_withDensity' MeasureTheory.Measure.setLIntegral_rnDeriv' MeasureTheory.Measure.sum_apply_eq_zero' MeasureTheory.Measure.toSphere_apply' MeasureTheory.Measure.toSphere_apply_univ' MeasureTheory.measure_union' MeasureTheory.measure_union₀' MeasureTheory.measure_union_add_inter' MeasureTheory.measure_union_add_inter₀' MeasureTheory.memLp_finset_sum' MeasureTheory.MemLp.integrable_norm_rpow' MeasureTheory.MemLp.meas_ge_lt_top' MeasureTheory.mem_lpMeas_iff_aeStronglyMeasurable' MeasureTheory.MemLp.mono' MeasureTheory.norm_indicatorConstLp' MeasureTheory.norm_setIntegral_le_of_norm_le_const' MeasureTheory.norm_setIntegral_le_of_norm_le_const_ae' MeasureTheory.norm_setIntegral_le_of_norm_le_const_ae'' MeasureTheory.norm_setToFun_le' MeasureTheory.norm_setToFun_le_mul_norm' MeasureTheory.NullMeasurable.measurable' MeasureTheory.OuterMeasure.empty' MeasureTheory.OuterMeasure.isCaratheodory_iff_le' MeasureTheory.OuterMeasure.iUnion_null_iff' MeasureTheory.OuterMeasure.le_boundedBy' MeasureTheory.OuterMeasure.mono' MeasureTheory.OuterMeasure.mono'' MeasureTheory.OuterMeasure.top_apply' MeasureTheory.OuterMeasure.trim_eq_iInf' MeasureTheory.pdf.eq_of_map_eq_withDensity' MeasureTheory.pdf.quasiMeasurePreserving_hasPDF' MeasureTheory.piPremeasure_pi' MeasureTheory.ProbabilityMeasure.tendsto_measure_of_null_frontier_of_tendsto' MeasureTheory.ProgMeasurable.finset_prod' MeasureTheory.progMeasurable_of_tendsto' MeasureTheory.restrict_dirac' MeasureTheory.restrict_withDensity' MeasureTheory.setAverage_eq' MeasureTheory.setIntegral_dirac' MeasureTheory.setIntegral_tilted' MeasureTheory.setLaverage_eq' MeasureTheory.setLIntegral_dirac' MeasureTheory.setLIntegral_eq_zero_iff' MeasureTheory.setLIntegral_mono' MeasureTheory.setLIntegral_mono_ae' MeasureTheory.setLIntegral_tilted' MeasureTheory.setLIntegral_withDensity_eq_lintegral_mul₀' MeasureTheory.setLIntegral_withDensity_eq_setLIntegral_mul_non_measurable₀' MeasureTheory.setToFun_add_left' MeasureTheory.setToFun_congr_left' MeasureTheory.setToFun_finset_sum' MeasureTheory.setToFun_measure_zero' MeasureTheory.setToFun_mono_left' MeasureTheory.setToFun_smul_left' MeasureTheory.setToFun_zero_left' MeasureTheory.sigmaFinite_restrict_sigmaFiniteSetWRT' MeasureTheory.SigmaFinite.withDensity_of_ne_top' MeasureTheory.SignedMeasure.eq_singularPart' MeasureTheory.SignedMeasure.exists_subset_restrict_nonpos' MeasureTheory.SignedMeasure.haveLebesgueDecomposition_mk' MeasureTheory.SignedMeasure.restrictNonposSeq_disjoint' MeasureTheory.SignedMeasure.someExistsOneDivLT_subset' MeasureTheory.SimpleFunc.extend_apply' MeasureTheory.SimpleFunc.extend_comp_eq' MeasureTheory.SimpleFunc.lintegral_eq_of_subset' MeasureTheory.SimpleFunc.lintegral_map' MeasureTheory.SimpleFunc.setToSimpleFunc_add_left' MeasureTheory.SimpleFunc.setToSimpleFunc_congr' MeasureTheory.SimpleFunc.setToSimpleFunc_const' MeasureTheory.SimpleFunc.setToSimpleFunc_mono_left' MeasureTheory.SimpleFunc.setToSimpleFunc_nonneg' MeasureTheory.SimpleFunc.setToSimpleFunc_smul_left' MeasureTheory.SimpleFunc.setToSimpleFunc_zero' MeasureTheory.SimpleFunc.simpleFunc_bot' MeasureTheory.stoppedProcess_eq' MeasureTheory.stoppedProcess_eq'' MeasureTheory.stoppedValue_eq' MeasureTheory.stoppedValue_piecewise_const' MeasureTheory.stoppedValue_sub_eq_sum' MeasureTheory.StronglyMeasurable.aestronglyMeasurable MeasureTheory.StronglyMeasurable.const_smul' MeasureTheory.StronglyMeasurable.integral_kernel_prod_left' MeasureTheory.StronglyMeasurable.integral_kernel_prod_left'' MeasureTheory.StronglyMeasurable.integral_kernel_prod_right' MeasureTheory.StronglyMeasurable.integral_kernel_prod_right'' MeasureTheory.Submartingale.stoppedValue_leastGE_eLpNorm_le' MeasureTheory.Subsingleton.aestronglyMeasurable' MeasureTheory.Subsingleton.stronglyMeasurable' MeasureTheory.TendstoInMeasure.congr' MeasureTheory.TendstoInMeasure.exists_seq_tendsto_ae' MeasureTheory.tendsto_sum_indicator_atTop_iff' MeasureTheory.tilted_apply' MeasureTheory.tilted_apply_eq_ofReal_integral' MeasureTheory.tilted_const' MeasureTheory.tilted_neg_same' MeasureTheory.tilted_zero' MeasureTheory.upcrossingsBefore_zero' MeasureTheory.upperCrossingTime_stabilize' MeasureTheory.upperCrossingTime_zero' MeasureTheory.VectorMeasure.ext_iff' MeasureTheory.VectorMeasure.le_iff' MeasureTheory.weightedSMul_union' MeasureTheory.withDensity_apply' MeasureTheory.withDensity_apply_eq_zero' MeasureTheory.withDensity_smul' MeasureTheory.withDensityᵥ_add' MeasureTheory.withDensityᵥ_neg' MeasureTheory.withDensityᵥ_smul' MeasureTheory.withDensityᵥ_smul_eq_withDensityᵥ_withDensity' MeasureTheory.withDensityᵥ_sub' MeasureTheory.MemLp.zero' mem_ball_iff_norm'' mem_ball_iff_norm''' mem_closedBall_iff_norm'' mem_closedBall_iff_norm''' mem_closure_iff_nhds' mem_closure_iff_nhds_basis' mem_coclosed_Lindelof' mem_codiscrete' mem_coLindelof' memℓp_gen' mem_nhds_prod_iff' mem_pairSelfAdjointMatricesSubmodule' mem_rootsOfUnity' mem_rootsOfUnity_prime_pow_mul_iff' mem_selfAdjointMatricesSubmodule' mem_skewAdjointMatricesSubmodule' mem_sphere_iff_norm' Metric.ball_eq_ball' Metric.ball_subset_ball' Metric.closedBall_subset_ball' Metric.closedBall_subset_closedBall' Metric.closedBall_zero' Metric.continuousAt_iff' Metric.continuous_iff' Metric.continuousOn_iff' Metric.continuousWithinAt_iff' Metric.cthickening_eq_iInter_cthickening' Metric.cthickening_eq_iInter_thickening' Metric.cthickening_eq_iInter_thickening'' Metric.mem_ball' Metric.mem_closedBall' Metric.mem_of_closed' Metric.mem_sphere' midpoint_eq_iff' min_def' min_div_div_left' min_div_div_right' minimal_subset_iff' min_inv_inv' min_mul_distrib' min_mul_min_le_min_mul_mul' minpoly.dvd_map_of_isScalarTower' minpoly.eq_X_sub_C' minpoly.unique' min_rec' Miu.le_pow2_and_pow2_eq_mod3' mk_eq_mk_of_basis' Mod_.comp_hom' Mod_.id_hom' ModularCyclotomicCharacter.toFun_spec' ModularCyclotomicCharacter.toFun_spec'' ModularCyclotomicCharacter.toFun_unique' Module.Baer.ExtensionOfMaxAdjoin.extendIdealTo_wd' ModuleCat.CoextendScalars.smul_apply' ModuleCat.hasLimits' ModuleCat.restrictScalars.smul_def' Module.End_algebraMap_isUnit_inv_apply_eq_iff' Module.End.smulCommClass' Module.free_of_finite_type_torsion_free' Module.Free.of_subsingleton' Module.mem_support_iff' Module.not_mem_support_iff' Module.projective_def' Monad.mapM' Monad.sequence' Mon.comp_hom' Mon.id_hom' MonoidAlgebra.lift_apply' MonoidAlgebra.lift_unique' Monoid.CoprodI.lift_comp_of' Monoid.CoprodI.lift_of' Monoid.Coprod.induction_on' Monoid.exponent_eq_iSup_orderOf' Monoid.exponent_min' MonoidHom.coe_toAdditive' MonoidHom.coe_toAdditive'' MonoidHom.comap_bot' MonoidHom.map_zpow' MonoidHom.prod_map_comap_prod' Monoid.PushoutI.NormalWord.base_smul_def' Monoid.PushoutI.NormalWord.summand_smul_def' Monotone.const_mul' Monotone.mul_const' MonotoneOn.const_mul' MonotoneOn.mul_const' MulActionHom.comp_inverse' MulActionHom.inverse_eq_inverse' MulActionHom.inverse'_inverse' MulAction.mem_fixedPoints' MulAction.mem_stabilizer_finset' MulAction.mem_stabilizer_set' MulAction.orbitRel.quotient_eq_of_quotient_subgroup_eq' MulAction.orbitRel.Quotient.mem_subgroup_orbit_iff' MulAction.orbitZPowersEquiv_symm_apply' MulAction.Quotient.coe_smul_out' MulAction.Quotient.mk_smul_out' MulAction.right_quotientAction' MulChar.star_apply' mul_div_assoc' mul_div_cancel_of_imp' mul_eq_mul_iff_eq_and_eq_of_pos' mul_eq_of_eq_div' mul_eq_one' MulEquiv.mk_coe' MulHom.prod_map_comap_prod' mul_inv_le_iff' mul_inv_le_iff_le_mul' mul_inv_le_mul_inv_iff' mul_inv_lt_iff' mul_inv_lt_iff_le_mul' mul_inv_lt_mul_inv_iff' mul_invOf_cancel_left' mul_invOf_cancel_right' mul_invOf_self' mul_left_cancel'' mul_left_inj' mul_le_iff_le_one_left' mul_le_iff_le_one_right' mul_le_mul' mul_le_mul_left' mul_le_mul_of_nonneg' mul_le_mul_of_nonneg_of_nonpos' mul_le_mul_of_nonpos_of_nonneg' mul_le_mul_of_nonpos_of_nonpos' mul_le_mul_right' mul_le_of_le_of_le_one' mul_le_of_le_one_left' mul_le_of_le_one_of_le' mul_le_of_le_one_right' mul_lt_iff_lt_one_left' mul_lt_iff_lt_one_right' mul_lt_mul_left' mul_lt_mul_of_pos' mul_lt_mul_right' mul_lt_of_le_of_lt_one' mul_lt_of_le_one_of_lt' mul_lt_of_lt_of_le_one' mul_lt_of_lt_of_lt_one' mul_lt_of_lt_one_left' mul_lt_of_lt_one_of_le' mul_lt_of_lt_one_of_lt' mul_lt_of_lt_one_right' mul_ne_one' mul_right_cancel'' mul_right_inj' mul_rotate' MulSemiringActionHom.coe_fn_coe' MultilinearMap.mkContinuousLinear_norm_le' MultilinearMap.mkContinuousMultilinear_norm_le' Multipliable.sigma' Multiplicative.isIsometricSMul' Multiplicative.isIsIsometricVAdd'' multiplicity.is_greatest' multiplicity.mul' multiplicity.pow' multiplicity.unique' Multiset.add_le_add_iff_left' Multiset.aemeasurable_prod' Multiset.aestronglyMeasurable_prod' Multiset.antidiagonal_coe' Multiset.attach_map_val' Multiset.count_sum' Multiset.dedup_subset' Multiset.ext' Multiset.extract_gcd' Multiset.filter_attach' Multiset.filter_eq' Multiset.foldl_induction' Multiset.foldr_induction' Multiset.induction_on' Multiset.map_const' Multiset.map_filter' Multiset.map_id' Multiset.measurable_prod' Multiset.Nat.antidiagonal_succ' Multiset.Nat.antidiagonal_succ_succ' Multiset.noncommProd_cons' Multiset.powersetAux_perm_powersetAux' Multiset.powersetCard_coe' Multiset.powerset_coe' Multiset.prod_hom' Multiset.prod_lt_prod' Multiset.prod_lt_prod_of_nonempty' Multiset.prod_map_inv' Multiset.prod_X_add_C_coeff' Multiset.quot_mk_to_coe' Multiset.quot_mk_to_coe'' Multiset.revzip_powersetAux' Multiset.revzip_powersetAux_perm_aux' Multiset.smul_prod' Multiset.stronglyMeasurable_prod' Multiset.subset_dedup' MvFunctor.f' MvFunctor.g' MvFunctor.id_map' MvPFunctor.liftP_iff' MvPFunctor.M.bisim' MvPFunctor.M.dest_corec' MvPFunctor.M.dest'_eq_dest' MvPFunctor.M.dest_eq_dest' MvPFunctor.wDest'_wMk' MvPolynomial.aeval_zero' MvPolynomial.algHom_ext' MvPolynomial.C_mul' MvPolynomial.coeff_monomial_mul' MvPolynomial.coeff_mul_monomial' MvPolynomial.coeff_mul_X' MvPolynomial.coeff_X' MvPolynomial.coeff_X_mul' MvPolynomial.degrees_X' MvPolynomial.eval₂_eq' MvPolynomial.eval₂Hom_congr' MvPolynomial.eval₂Hom_X' MvPolynomial.eval₂Hom_zero' MvPolynomial.eval_eq' MvPolynomial.eval_eq_eval_mv_eval' MvPolynomial.eval_zero' MvPolynomial.finSuccEquiv_support' MvPolynomial.homogeneousComponent_eq_zero' MvPolynomial.isLocalization_C_mk' MvPolynomial.monomial_zero' MvPolynomial.support_esymm' MvPolynomial.support_esymm'' MvPolynomial.weightedHomogeneousComponent_eq_zero' MvPowerSeries.algebraMap_apply' MvPowerSeries.algebraMap_apply'' MvPowerSeries.invOfUnit_eq' MvQPF.Cofix.dest_corec' MvQPF.liftR_map_last' MvQPF.recF_eq' MvQPF.wEquiv.abs' Nat.add_descFactorial_eq_ascFactorial' Nat.ascFactorial_eq_factorial_mul_choose' Nat.bit_add' Nat.card_eq_two_iff' Nat.cauchy_induction' Nat.choose_eq_asc_factorial_div_factorial' Nat.choose_succ_succ' Nat.coprime_of_dvd' Nat.count_add' Nat.count_succ' Nat.decreasingInduction_succ' Nat.digits_def' Nat.digits_zero_succ' Nat.dist_tri_left' Nat.dist_tri_right' Nat.div_add_mod' Nat.div_le_of_le_mul' Nat.div_le_self' Nat.div_lt_iff_lt_mul' Nat.eq_sqrt' Nat.eq_sub_of_add_eq' Nat.equivProdNatFactoredNumbers_apply' Nat.equivProdNatSmoothNumbers_apply' Nat.even_add' Nat.even_or_odd' Nat.even_pow' Nat.even_sub' Nat.even_xor_odd' Nat.exists_mul_self' Nat.factorial_inj' Nat.find_min' Nat.floor_eq_iff' Nat.floor_eq_on_Ico' Nat.floor_lt' Nat.Icc_eq_range' Nat.Ico_eq_range' Nat.iInf_le_succ' Nat.iInf_lt_succ' Nat.Ioc_eq_range' Nat.Ioo_eq_range' Nat.iSup_le_succ' Nat.iSup_lt_succ' Nat.le_div_iff_mul_le' Nat.le_floor_iff' Nat.le_minFac' Nat.le_nth_count' Nat.leRecOn_succ' Nat.leRec_succ' Nat.le_sqrt' Nat.log_eq_one_iff' Nat.lt_sub_iff_add_lt' Nat.lt_succ_sqrt' Nat.mem_primeFactorsList' Nat.mod_add_div' Nat.ModEq.add_left_cancel' Nat.ModEq.add_right_cancel' Nat.ModEq.cancel_left_div_gcd' Nat.ModEq.cancel_right_div_gcd' Nat.ModEq.mul_left' Nat.ModEq.mul_left_cancel_iff' Nat.ModEq.mul_right' Nat.ModEq.mul_right_cancel_iff' Nat.monotone_primeCounting' Nat.mul_add_mod' Nat.mul_div_cancel_left' nat_mul_inj' Nat.mul_lt_mul'' Nat.not_exists_sq' Nat.nth_le_nth' Nat.nth_lt_nth' Nat.odd_add' Nat.odd_sub' Nat.ofDigits_modEq' Nat.ofDigits_zmodeq' Nat.one_le_pow' Nat.one_lt_pow' Nat.one_lt_two_pow' Nat.Partrec.Code.encode_lt_rfind' Nat.Partrec.Code.rec_prim' Nat.Partrec'.comp' Nat.Partrec.merge' Nat.Partrec.prec' Nat.Partrec.rfind' Nat.pow_lt_ascFactorial' Nat.pow_sub_lt_descFactorial' Nat.prime_def_lt' Nat.Prime.eq_two_or_odd' Nat.primeFactorsList_chain' Nat.Prime.not_prime_pow' Nat.Prime.one_lt' Nat.Primrec.casesOn' Nat.Primrec'.comp' Nat.Primrec'.prec' Nat.Primrec.swap' Nat.prod_divisorsAntidiagonal' Nat.rfind_dom' Nat.rfind_min' Nat.sInf_add' Nat.size_shiftLeft' Nat.sq_mul_squarefree_of_pos' Nat.sqrt_add_eq' Nat.sqrt_eq' Nat.sqrt_le' Nat.sqrt_lt' Nat.sqrt_mul_sqrt_lt_succ' Nat.sub_eq_of_eq_add' Nat.sub_lt_iff_lt_add' Nat.succ_le_succ_sqrt' Nat.succ_pos' Nat.sum_totient' Nat.surjective_primeCounting' Nat.tendsto_primeCounting' Nat.uIcc_eq_range' Ne.bot_lt' neg_div' neg_gcd' neg_of_smul_neg_left' neg_of_smul_neg_right' neg_pow' Ne.lt_of_le' Ne.lt_top' ne_of_irrefl' ne_of_ne_of_eq' newton_seq_dist_tendsto' NeZero.ne' NeZero.of_gt' ne_zero_of_irreducible_X_pow_sub_C' nhds_basis_Ioo' nhds_basis_uniformity' nhds_def' nhds_eq_comap_uniformity' nhds_eq_uniformity' nhds_left'_sup_nhds_right' nhds_left_sup_nhds_right' nhds_one_symm' nhdsWithin_eq_nhdsWithin' nhdsWithin_extChartAt_target_eq' nhdsWithin_Ici_basis' nhdsWithin_Ici_eq' nhdsWithin_Ici_eq'' nhdsWithin_Iic_basis' nhdsWithin_Iic_eq' nhdsWithin_Iic_eq'' nhdsWithin_Iio_basis' nhdsWithin_Iio_neBot' nhdsWithin_Iio_self_neBot' nhdsWithin_inter' nhdsWithin_inter_of_mem' nhdsWithin_Ioi_basis' nhdsWithin_Ioi_neBot' nhdsWithin_Ioi_self_neBot' nhdsWithin_pi_eq' nhdsWithin_restrict' nhdsWithin_restrict'' nndist_eq_nnnorm_vsub' nndist_midpoint_midpoint_le' nndist_nnnorm_nnnorm_le' nnnorm_algebraMap' nnnorm_eq_zero' nnnorm_inv' nnnorm_le_nnnorm_add_nnnorm_div' nnnorm_le_pi_nnnorm' nnnorm_map' nnnorm_mul_le' nnnorm_ne_zero_iff' nnnorm_one' nnnorm_pos' NNRat.instSMulCommClass' NNReal.ball_zero_eq_Ico' NNReal.closedBall_zero_eq_Icc' NNReal.div_le_iff' NNReal.div_le_of_le_mul' NNReal.div_lt_iff' NNReal.inner_le_Lp_mul_Lq_tsum' NNReal.le_div_iff' NNReal.list_prod_map_rpow' NNReal.Lp_add_le_tsum' NNReal.lt_div_iff' NNReal.nndist_zero_eq_val' NNReal.rpow_add' NNReal.rpow_add_intCast' NNReal.rpow_add_natCast' NNReal.rpow_add_one' NNReal.rpow_one_add' NNReal.rpow_one_sub' NNReal.rpow_sub' NNReal.rpow_sub_intCast' NNReal.rpow_sub_natCast' NNReal.rpow_sub_one' NNReal.tendsto_coe' NonUnitalAlgHom.coe_inverse' NonUnitalAlgHom.coe_restrictScalars' NonUnitalStarAlgebra.adjoin_induction' NonUnitalStarAlgHom.coe_mk' NonUnitalStarAlgHom.coe_restrictScalars' NonUnitalStarSubalgebra.instIsScalarTower' NonUnitalStarSubalgebra.instSMulCommClass' NonUnitalStarSubalgebra.module' NonUnitalSubalgebra.instIsScalarTower' NonUnitalSubalgebra.instModule' NonUnitalSubalgebra.instSMulCommClass' NonUnitalSubring.coe_mk' NonUnitalSubring.eq_top_iff' NonUnitalSubring.mem_mk' NonUnitalSubsemiring.coe_mk' NonUnitalSubsemiring.eq_top_iff' NonUnitalSubsemiring.mem_mk' normalClosure_eq_iSup_adjoin' norm_algebraMap' norm_map' NormedAddCommGroup.cauchy_series_of_le_geometric' NormedAddCommGroup.cauchy_series_of_le_geometric'' NormedAddGroupHom.coe_mkNormedAddGroupHom' NormedAddGroupHom.completion_coe' NormedAddGroupHom.norm_comp_le_of_le' NormedRing.inverse_one_sub_nth_order' NormedSpace.exp_conj' NormedSpace.expSeries_apply_eq' NormedSpace.expSeries_apply_eq_div' NormedSpace.exp_series_hasSum_exp' NormedSpace.expSeries_hasSum_exp_of_mem_ball' NormedSpace.expSeries_summable' NormedSpace.expSeries_summable_of_mem_ball' NormedSpace.exp_units_conj' NormedSpace.isVonNBounded_iff' NormedSpace.norm_expSeries_summable' NormedSpace.norm_expSeries_summable_of_mem_ball' norm_eq_of_mem_sphere' norm_eq_zero' norm_eq_zero'' norm_eq_zero''' norm_inv' norm_le_norm_add_const_of_dist_le' norm_le_norm_add_norm_div' norm_le_of_mem_closedBall' norm_le_pi_norm' norm_le_zero_iff' norm_le_zero_iff'' norm_le_zero_iff''' norm_lt_of_mem_ball' norm_ne_zero_iff' norm_nonneg' norm_of_subsingleton' norm_one' norm_pos_iff' norm_pos_iff' norm_pos_iff''' norm_sub_norm_le' norm_toNNReal' not_dvd_index_sylow' not_lt_zero' not_mem_of_lt_csInf' npow_mul' nsmul_eq_mul' nullMeasurableSet_lt' Num.add_ofNat' NumberField.InfinitePlace.orbitRelEquiv_apply_mk'' NumberField.mixedEmbedding.convexBodySumFun_apply' NumberField.mixedEmbedding.norm_eq_zero_iff' NumberField.Units.regulator_eq_det' Num.cast_sub' Num.cast_succ' Num.cast_zero' Num.mem_ofZNum' Num.of_to_nat' Num.succ_ofInt' odd_add_one_self' odd_add_self_one' ofReal_norm_eq_enorm' OmegaCompletePartialOrder.const_continuous' OmegaCompletePartialOrder.ContinuousHom.bind_continuous' OmegaCompletePartialOrder.ContinuousHom.forall_forall_merge' OmegaCompletePartialOrder.ContinuousHom.ite_continuous' OmegaCompletePartialOrder.ContinuousHom.map_continuous' OmegaCompletePartialOrder.ContinuousHom.seq_continuous' OmegaCompletePartialOrder.Continuous.of_bundled' OmegaCompletePartialOrder.flip₁_continuous' OmegaCompletePartialOrder.flip₂_continuous' OmegaCompletePartialOrder.id_continuous' OmegaCompletePartialOrder.ScottContinuous.continuous' one_le_div' one_le_finprod' one_le_pow_of_one_le' one_le_thickenedIndicator_apply' one_le_two' one_lt_div' one_lt_finprod' one_lt_pow' one_lt_zpow one_ne_zero' OnePoint.continuousAt_infty' OnePoint.isOpen_iff_of_mem' OnePoint.tendsto_nhds_infty' ONote.exists_lt_mul_omega0' ONote.exists_lt_omega0_opow' ONote.fastGrowing_zero' ONote.NF.below_of_lt' ONote.nf_repr_split' ONote.NF.snd' ONote.split_eq_scale_split' Topology.IsOpenEmbedding.tendsto_nhds_iff' openSegment_eq_image' openSegment_eq_Ioo' Option.bind_congr' Option.bind_eq_bind' Option.bind_eq_some' Option.guard_eq_some' Option.map_bind' Option.map_coe' Option.none_bind' Option.none_orElse' Option.orElse_eq_none' Option.orElse_eq_some' Option.orElse_none' Option.some_bind' Option.some_orElse' or_congr_left' or_congr_right' OrderDual.continuousConstSMul' OrderDual.instDistribMulAction' OrderDual.instDistribSMul' OrderDual.instIsScalarTower' OrderDual.instIsScalarTower'' OrderDual.instModule' OrderDual.instMulAction' OrderDual.instMulActionWithZero' OrderDual.instPow' OrderDual.instSMulCommClass' OrderDual.instSMulCommClass'' OrderDual.instSMulWithZero' Order.height_le_iff' Order.Ideal.IsMaximal.isCoatom' OrderIso.isGLB_image' OrderIso.isGLB_preimage' OrderIso.isLUB_image' OrderIso.isLUB_preimage' OrderIso.map_bot' OrderIso.map_csInf' OrderIso.map_csSup' OrderIso.map_top' OrderIso.subsingleton_of_wellFoundedGT' OrderIso.subsingleton_of_wellFoundedLT' Order.isPredPrelimitRecOn_pred' Order.isSuccPrelimitRecOn_succ' Order.not_isPredPrelimit_iff' Order.not_isSuccPrelimit_iff' orderOf_eq_zero_iff' orderOf_pow' Ordinal.add_lt_add_iff_left' Ordinal.blsub_eq_lsub' Ordinal.brange_bfamilyOfFamily' Ordinal.bsup_eq_sup' Ordinal.cof_eq' Ordinal.comp_bfamilyOfFamily' Ordinal.comp_familyOfBFamily' Ordinal.enum_le_enum' Ordinal.enum_zero_le' Ordinal.IsNormal.le_set' Ordinal.lift_down' Ordinal.lift.principalSeg_top' Ordinal.liftPrincipalSeg_top' Ordinal.lsub_eq_blsub' Ordinal.lt_nmul_iff₃' Ordinal.mul_eq_zero' Ordinal.nhds_right' Ordinal.nmul_le_iff₃' Ordinal.nmul_nadd_le₃' Ordinal.nmul_nadd_lt₃' Ordinal.pred_eq_iff_not_succ' Ordinal.range_familyOfBFamily' Ordinal.relIso_enum' Ordinal.succ_le_iff' Ordinal.sup_eq_bsup' Ordinal.toPGame_moveLeft' Ordinal.type_def' Ordinal.typein_le_typein' Ordinal.type_le_iff' Ordinal.zero_opow' Ordnode.all_balance' Ordnode.all_node' Ordnode.balance_eq_balance' Ordnode.balanceL_eq_balance' Ordnode.balanceR_eq_balance' Ordnode.dual_balance' Ordnode.dual_node' Ordnode.length_toList' Ordnode.Raised.dist_le' Ordnode.size_balance' Ordnode.Sized.balance' Ordnode.Sized.eq_node' Ordnode.Sized.node' Ordnode.Valid'.balance' Ordnode.Valid'.node' OreLocalization.add' OreLocalization.add'' OreLocalization.div_eq_one' OreLocalization.inv' OreLocalization.mul_cancel' OreLocalization.oreDiv_add_char' OreLocalization.smul' OreLocalization.smul_cancel' OreLocalization.zero_oreDiv' Orientation.inner_rightAngleRotation_swap' Orientation.kahler_comp_rightAngleRotation' Orientation.rightAngleRotation_map' Orientation.volumeForm_robust' Padic.complete' Padic.complete'' Padic.lim' padicNormE.eq_padic_norm' padicNormE.image' padicNorm.sum_le' padicNorm.sum_lt' Padic.rat_dense' padicValNat_def' padicValNat.div' PartENat.casesOn' PartENat.get_natCast' PartENat.get_ofNat' PartENat.toWithTop_natCast' PartENat.toWithTop_one' PartENat.toWithTop_top' PartENat.toWithTop_zero' Part.eq_none_iff' Part.Fix.approx_mono' Part.fix_def' PartialEquiv.image_source_inter_eq' PartialEquiv.symm_image_target_inter_eq' PartialEquiv.trans_refl_restr' PartialEquiv.trans_source' PartialEquiv.trans_source'' PartialEquiv.trans_target' PartialEquiv.trans_target'' PartialHomeomorph.contDiffWithinAt_extend_coord_change' PartialHomeomorph.continuousAt_extend_symm' PartialHomeomorph.eventually_left_inverse' PartialHomeomorph.eventually_nhds' PartialHomeomorph.eventually_nhdsWithin' PartialHomeomorph.eventually_right_inverse' PartialHomeomorph.extend_coord_change_source_mem_nhdsWithin' PartialHomeomorph.extend_target' PartialHomeomorph.image_source_inter_eq' PartialHomeomorph.IsImage.iff_preimage_eq' PartialHomeomorph.IsImage.iff_symm_preimage_eq' PartialHomeomorph.isOpen_extend_preimage' PartialHomeomorph.ofSet_trans' PartialHomeomorph.prod_eq_prod_of_nonempty' PartialHomeomorph.restr_source' PartialHomeomorph.restr_toPartialEquiv' PartialHomeomorph.trans_of_set' PartialHomeomorph.trans_source' PartialHomeomorph.trans_source'' PartialHomeomorph.trans_target' PartialHomeomorph.trans_target'' PartitionOfUnity.exists_finset_nhds' PartitionOfUnity.sum_finsupport' Part.map_id' Partrec₂.unpaired' Partrec.const' Partrec.merge' PathConnectedSpace.exists_path_through_family' Path.extend_extends' pcontinuous_iff' Pell.eq_of_xn_modEq' Perfection.coeff_iterate_frobenius' Perfection.coeff_pow_p' PerfectionMap.comp_equiv' PerfectionMap.comp_symm_equiv' PFunctor.Approx.head_succ' PFunctor.liftp_iff' PFunctor.M.agree_iff_agree' PFunctor.M.bisim' PFunctor.M.casesOn_mk' PFunctor.M.ext' PFunctor.M.head_eq_head' PFunctor.M.isPath_cons' Pi.compact_Icc_space' Pi.continuous_postcomp' Pi.continuous_precomp' Pi.cstarRing' Pi.distribMulAction' Pi.distribSMul' pi_Icc_mem_nhds' pi_Ici_mem_nhds' pi_Ico_mem_nhds' pi_Iic_mem_nhds' pi_Iio_mem_nhds' Pi.induced_precomp' Pi.infConvergenceClass' Pi.instIsBoundedSMul' pi_Ioc_mem_nhds' pi_Ioi_mem_nhds' pi_Ioo_mem_nhds' Pi.isIsometricSMul' Pi.isIsometricSMul'' Pi.isScalarTower' Pi.isScalarTower'' Pi.lawfulFix' Pi.Lex.noMaxOrder' Pi.module' Pi.mulAction' Pi.mulActionWithZero' Pi.mulDistribMulAction' pinGroup.star_eq_inv' pi_nnnorm_const' pi_nnnorm_const_le' Pi.nnnorm_def' pi_nnnorm_le_iff' pi_nnnorm_lt_iff' pi_norm_const' pi_norm_const_le' Pi.norm_def' pi_norm_le_iff_of_nonempty' Pi.orderClosedTopology' Pi.smul' Pi.smul_apply' Pi.smulCommClass' Pi.smulCommClass'' Pi.smul_def' Pi.smulWithZero' Pi.smulZeroClass' PiSubtype.canLift' Pi.supConvergenceClass' PiTensorProduct.add_tprodCoeff' PiTensorProduct.distribMulAction' PiTensorProduct.hasSMul' PiTensorProduct.isScalarTower' PiTensorProduct.lift.unique' PiTensorProduct.module' PiTensorProduct.smulCommClass' PiTensorProduct.smul_tprodCoeff' PiTensorProduct.zero_tprodCoeff' Pi.uniformContinuous_postcomp' Pi.uniformContinuous_precomp' Pi.uniformSpace_comap_precomp' PNat.coe_toPNat' PNat.div_add_mod' PNat.dvd_iff' PNat.factorMultiset_le_iff' PNat.find_min' PNat.gcd_rel_left' PNat.gcd_rel_right' PNat.mod_add_div' PNat.XgcdType.reduce_isReduced' PNat.XgcdType.reduce_isSpecial' pNilradical_eq_bot' Pointed.Hom.comp_toFun' Pointed.Hom.id_toFun' Polynomial.add' Polynomial.addHom_ext' Polynomial.aeval_apply_smul_mem_of_le_comap' Polynomial.aeval_eq_sum_range' Polynomial.as_sum_range' Polynomial.card_roots' Polynomial.card_roots_sub_C' Polynomial.card_support_eq' Polynomial.card_support_eraseLead' Polynomial.C_mul' Polynomial.coeff_expand_mul' Polynomial.coeff_mul_X_pow' Polynomial.coeff_restriction' Polynomial.coeff_toSubring' Polynomial.coeff_X_pow_mul' Polynomial.coeff_zero_eq_aeval_zero' Polynomial.degree_eq_card_roots' Polynomial.degree_mul' Polynomial.degree_pow' Polynomial.div_tendsto_atBot_of_degree_gt' Polynomial.div_tendsto_atTop_of_degree_gt' Polynomial.eq_zero_of_natDegree_lt_card_of_eval_eq_zero' Polynomial.eval₂_comp' Polynomial.eval₂_eq_sum_range' Polynomial.eval₂_mul' Polynomial.eval₂_mul_C' Polynomial.eval₂_pow' Polynomial.eval_eq_sum_range' Polynomial.eval_smul' Polynomial.exists_root_of_splits' Polynomial.expand_contract' Polynomial.hasseDeriv_one' Polynomial.hasseDeriv_zero' Polynomial.HasSeparableContraction.dvd_degree' Polynomial.hermite_eq_deriv_gaussian' Polynomial.isRoot_cyclotomic_iff' Polynomial.isUnit_iff' Polynomial.isUnitTrinomial_iff' Polynomial.isUnitTrinomial_iff'' Polynomial.leadingCoeff_add_of_degree_lt' Polynomial.leadingCoeff_map' Polynomial.leadingCoeff_mul' Polynomial.leadingCoeff_pow' Polynomial.leadingCoeff_sub_of_degree_lt' Polynomial.lhom_ext' Polynomial.lt_rootMultiplicity_iff_isRoot_iterate_derivative_of_mem_nonZeroDivisors' Polynomial.lt_rootMultiplicity_of_isRoot_iterate_derivative_of_mem_nonZeroDivisors' Polynomial.map_dvd_map' Polynomial.map_rootOfSplits' Polynomial.mem_aroots' Polynomial.mem_roots' Polynomial.mem_rootSet' Polynomial.mem_roots_sub_C' Polynomial.mkDerivation_one_eq_derivative' PolynomialModule.eval_map' PolynomialModule.isScalarTower' Polynomial.Monic.geom_sum' Polynomial.Monic.irreducible_iff_natDegree' Polynomial.Monic.natDegree_mul' Polynomial.monic_zero_iff_subsingleton' Polynomial.mul' Polynomial.mul_scaleRoots' Polynomial.natDegree_eq_card_roots' Polynomial.natDegree_eq_support_max' Polynomial.natDegree_mul' Polynomial.natDegree_pow' Polynomial.natDegree_removeFactor' Polynomial.natTrailingDegree_eq_support_min' Polynomial.natTrailingDegree_mul' Polynomial.neg' Polynomial.ringHom_ext' Polynomial.rootMultiplicity_mul' Polynomial.rootMultiplicity_pos' Polynomial.rootSet_maps_to' Polynomial.roots_ne_zero_of_splits' Polynomial.scaleRoots_dvd' Polynomial.separable_def' Polynomial.Separable.of_pow' Polynomial.separable_prod' Polynomial.separable_prod_X_sub_C_iff' polynomial_smul_apply' Polynomial.splits_of_splits_mul' Polynomial.SplittingField.algebra' Polynomial.SplittingFieldAux.algebra' Polynomial.SplittingFieldAux.algebra'' Polynomial.SplittingFieldAux.algebra''' Polynomial.SplittingFieldAux.scalar_tower' Polynomial.sum_add' Polynomial.sum_smul_index' Polynomial.support_binomial' Polynomial.support_C_mul_X' Polynomial.support_C_mul_X_pow' Polynomial.support_monomial' Polynomial.support_trinomial' Polynomial.taylor_zero' Polynomial.trailingDegree_mul' Polynomial.trinomial_leading_coeff' Polynomial.trinomial_trailing_coeff' PosNum.cast_one' PosNum.cast_sub' PosNum.of_to_nat' PosNum.one_sub' PosNum.pred'_succ' PosNum.succ'_pred' pow_add_pow_le' pow_card_eq_one' pow_eq_zero_iff' PowerBasis.exists_eq_aeval' PowerBasis.mem_span_pow' PowerSeries.algebraMap_apply' PowerSeries.algebraMap_apply'' PowerSeries.algebraPolynomial' PowerSeries.coeff_mul_X_pow' PowerSeries.coeff_X_pow_mul' PowerSeries.derivative_inv' PowerSeries.invOfUnit_eq' PowerSeries.trunc_derivative' PowerSeries.trunc_zero' pow_le_one' pow_le_pow_iff_right' pow_le_pow_left' pow_le_pow_right' pow_le_pow_right_of_le_one' pow_lt_one' pow_lt_pow_iff_right' pow_lt_pow_left' pow_lt_pow_right' pow_mul' pow_mul_comm' pow_right_strictMono' pow_succ' pow_three' ppow_mul' PProd.exists' PProd.forall' PredOrder.prelimitRecOn_pred' preimage_nhdsWithin_coinduced' PresheafOfModules.sheafificationHomEquiv_hom' Pretrivialization.apply_symm_apply' Pretrivialization.coe_fst' Pretrivialization.continuousLinearMap_symm_apply' Pretrivialization.ext' Pretrivialization.mk_proj_snd' Pretrivialization.proj_symm_apply' PrimeMultiset.prod_dvd_iff' PrimeSpectrum.iSup_basicOpen_eq_top_iff' Primrec₂.nat_iff' Primrec₂.unpaired' Primrec.nat_casesOn' Primrec.nat_omega_rec' Primrec.nat_rec' Primrec.vector_get' Primrec.vector_ofFn' PrincipalSeg.coe_coe_fn' ProbabilityTheory.centralMoment_one' ProbabilityTheory.cgf_const' ProbabilityTheory.cgf_zero' ProbabilityTheory.cond_apply' ProbabilityTheory.cond_cond_eq_cond_inter' ProbabilityTheory.uniformOn_inter' ProbabilityTheory.condExp_ae_eq_integral_condExpKernel' ProbabilityTheory.condExpKernel_ae_eq_condExp' ProbabilityTheory.CondIndepSets.condIndep' ProbabilityTheory.cond_mul_eq_inter' ProbabilityTheory.evariance_def' ProbabilityTheory.gaussianReal_absolutelyContinuous' ProbabilityTheory.hasFiniteIntegral_compProd_iff' ProbabilityTheory.iIndep.iIndepSets' ProbabilityTheory.IndepFun.integral_mul' ProbabilityTheory.IndepFun.mgf_add' ProbabilityTheory.IndepSets.indep' ProbabilityTheory.IsMarkovKernel.is_probability_measure' ProbabilityTheory.IsMeasurableRatCDF.stieltjesFunctionAux_def' ProbabilityTheory.Kernel.borelMarkovFromReal_apply' ProbabilityTheory.Kernel.comap_apply' ProbabilityTheory.Kernel.comap_id' ProbabilityTheory.Kernel.comapRight_apply' ProbabilityTheory.Kernel.comp_apply' ProbabilityTheory.Kernel.const_comp' ProbabilityTheory.Kernel.deterministic_apply' ProbabilityTheory.Kernel.ext_iff' ProbabilityTheory.Kernel.finset_sum_apply' ProbabilityTheory.Kernel.fst_apply' ProbabilityTheory.Kernel.iIndep.iIndepSets' ProbabilityTheory.Kernel.IndepSets.indep' ProbabilityTheory.Kernel.integral_deterministic' ProbabilityTheory.Kernel.integral_integral_add' ProbabilityTheory.Kernel.integral_integral_sub' ProbabilityTheory.Kernel.lintegral_deterministic' ProbabilityTheory.Kernel.map_apply' ProbabilityTheory.Kernel.map_id' ProbabilityTheory.Kernel.measurable_kernel_prod_mk_left' ProbabilityTheory.Kernel.measure_eq_zero_or_one_of_indepSet_self' ProbabilityTheory.Kernel.piecewise_apply' ProbabilityTheory.Kernel.prod_apply' ProbabilityTheory.Kernel.prodMkLeft_apply' ProbabilityTheory.Kernel.prodMkRight_apply' ProbabilityTheory.Kernel.restrict_apply' ProbabilityTheory.Kernel.rnDeriv_def' ProbabilityTheory.Kernel.rnDeriv_eq_top_iff' ProbabilityTheory.Kernel.setIntegral_deterministic' ProbabilityTheory.Kernel.setLIntegral_deterministic' ProbabilityTheory.Kernel.snd_apply' ProbabilityTheory.Kernel.sum_apply' ProbabilityTheory.Kernel.swapLeft_apply' ProbabilityTheory.Kernel.swapRight_apply' ProbabilityTheory.Kernel.withDensity_apply' ProbabilityTheory.Kernel.withDensity_one' ProbabilityTheory.Kernel.withDensity_zero' ProbabilityTheory.lintegral_mul_eq_lintegral_mul_lintegral_of_indepFun'' ProbabilityTheory.measurable_preCDF' ProbabilityTheory.mgf_const' ProbabilityTheory.mgf_pos' ProbabilityTheory.mgf_zero' ProbabilityTheory.variance_def' ProbabilityTheory.variance_smul' Prod.exists' Prod.forall' Prod.isIsometricSMul' Prod.isIsometricSMul'' Prod.map_apply' Prod.map_fst' Prod.map_id' Prod.map_snd' prod_mul_tprod_nat_mul' Profinite.NobelingProof.coe_πs' Profinite.NobelingProof.contained_C' Profinite.NobelingProof.injective_πs' Profinite.NobelingProof.Products.eval_πs' Profinite.NobelingProof.Products.eval_πs_image' Profinite.NobelingProof.Products.max_eq_o_cons_tail' Projectivization.submodule_mk'' Prop.countable' QPF.Cofix.bisim' QPF.liftp_iff' QPF.recF_eq' QPF.Wequiv.abs' quadraticChar_eq_pow_of_char_ne_two' QuadraticForm.equivalent_weightedSumSquares_units_of_nondegenerate' QuadraticForm.posDef_of_toMatrix' QuadraticForm.posDef_toMatrix' QuadraticMap.isSymm_toMatrix' QuadraticMap.map_sum' quasiIsoAt_iff' quasiIsoAt_iff_exactAt' QuaternionAlgebra.self_add_star' QuaternionAlgebra.star_add_self' Quaternion.normSq_def' Quaternion.self_add_star' Quaternion.star_add_self' Quiver.Hom.unop_op' Quiver.Path.comp_inj' QuotientAddGroup.btw_coe_iff' Quotient.eq' Quotient.eq'' Quotient.exact' QuotientGroup.coe_mk' QuotientGroup.congr_mk' QuotientGroup.kerLift_mk' QuotientGroup.ker_mk' QuotientGroup.lift_mk' QuotientGroup.map_mk' QuotientGroup.mk'_eq_mk' QuotientGroup.out_eq' Quotient.hrecOn₂'_mk'' Quotient.hrecOn'_mk'' Quotient.liftOn₂'_mk'' Quotient.liftOn'_mk'' Quotient.map₂'_mk'' Quotient.map'_mk'' isQuotientMap_quotient_mk' Quotient.mk_out' Quotient.out_eq' Quotient.sound' Quotient.surjective_liftOn' range_pow_padicValNat_subset_divisors' rank_finsupp' rank_fun' rank_lt_rank_dual' Rat.add_def'' Rat.add_num_den' Rat.cast_mk' Rat.div_def' Rat.divInt_mul_divInt' Rat.divInt_self' Rat.floor_def' RatFunc.liftAlgHom_apply_div' RatFunc.liftMonoidWithZeroHom_apply_div' RatFunc.liftRingHom_apply_div' RatFunc.mk_eq_div' RatFunc.mk_eq_mk' RatFunc.mk_one' RatFunc.num_div' RatFunc.ofFractionRing_mk' Rat.instSMulCommClass' Rat.inv_def' Rat.inv_divInt' Rat.le_toNNRat_iff_coe_le' Rat.mk'_mul_mk' Rat.mul_num_den' Rat.normalize_eq_mk' Rat.sub_def'' Rat.substr_num_den' Rat.toNNRat_div' Rat.toNNRat_lt_toNNRat_iff' RCLike.hasSum_conj' RCLike.I_im' RCLike.normSq_eq_def' Real.arcsin_le_iff_le_sin' Real.arcsin_lt_iff_lt_sin' Real.arcsin_sin' Real.binEntropy_eq_negMulLog_add_negMulLog_one_sub' Real.b_ne_one' Real.coe_toNNReal' Real.continuousAt_const_rpow' Real.continuous_log' Real.cosh_sq' Real.cos_sq' Real.cos_two_mul' Real.deriv_cos' Real.deriv_log' Real.deriv_rpow_const' Real.eulerMascheroniConstant_lt_eulerMascheroniSeq' Real.eulerMascheroniSeq_lt_eulerMascheroniSeq' Real.exp_approx_end' Real.exp_bound' Real.exp_bound_div_one_sub_of_interval' Real.fourierIntegral_continuousLinearMap_apply' Real.fourierIntegral_continuousMultilinearMap_apply' Real.fourierIntegral_eq' Real.fourierIntegralInv_eq' Real.hasDerivAt_arctan' Real.inner_le_Lp_mul_Lq_tsum_of_nonneg' Real.le_arcsin_iff_sin_le' Real.le_def' Real.le_sqrt' Real.le_toNNReal_iff_coe_le' Real.list_prod_map_rpow' Real.logb_nonpos_iff' Real.log_nonpos_iff' Real.Lp_add_le_tsum_of_nonneg' Real.lt_arcsin_iff_sin_lt' Real.natCastle_toNNReal' Real.nndist_eq' Real.rpow_add' Real.rpow_add_intCast' Real.rpow_add_natCast' Real.rpow_add_one' Real.rpow_le_rpow_of_exponent_ge' Real.rpow_lt_one_iff' Real.rpow_one_add' Real.rpow_one_sub' Real.rpow_sub' Real.rpow_sub_intCast' Real.rpow_sub_natCast' Real.rpow_sub_one' Real.sin_arcsin' Real.sqrt_div' Real.sqrt_div_self' Real.sqrt_eq_zero' Real.sqrt_le_sqrt_iff' Real.sqrt_lt' Real.sqrt_mul' Real.sqrt_ne_zero' Real.strictAnti_eulerMascheroniSeq' Real.surjOn_log' Real.surjOn_logb' Real.tan_add' Real.tan_eq_zero_iff' Real.tendsto_eulerMascheroniSeq' Real.tendsto_integral_gaussian_smul' Real.toNNReal_div' Real.toNNReal_le_toNNReal_iff' Real.toNNReal_lt_natCast' Real.toNNReal_lt_toNNReal_iff' RegularExpression.rmatch_iff_matches' Relation.ReflTransGen.lift' Relation.TransGen.closed' Relation.TransGen.head' Relation.TransGen.lift' Relation.TransGen.tail' RelSeries.last_snoc' RelSeries.toList_chain' RightOrdContinuous.map_sInf' Ring.choose_one_right' Ring.choose_zero_right' RingCon.smulCommClass' RingEquiv.mk_coe' RingHom.eq_intCast' RingHom.surjectiveOnStalks_iff_forall_maximal' Ring.inverse_eq_inv' Ring.mul_inverse_rev' Ring.multichoose_one_right' Ring.multichoose_zero_right' RingQuot.ringQuot_ext' RingTheory.Sequence.IsRegular.cons' RingTheory.Sequence.isRegular_cons_iff' RingTheory.Sequence.isWeaklyRegular_append_iff' RingTheory.Sequence.IsWeaklyRegular.cons' RingTheory.Sequence.isWeaklyRegular_cons_iff' RootPairing.coroot_eq_coreflection_of_root_eq' RootPairing.ne_zero' rootsOfUnity.integer_power_of_ringEquiv' root_X_pow_sub_C_ne_zero' SameRay.of_subsingleton' schnirelmannDensity_congr' sdiff_eq_self_iff_disjoint' sdiff_le' sdiff_le_iff' sdiff_sdiff_left' sdiff_sdiff_right' sdiff_sdiff_sup_sdiff' sdiff_sup_self' sdiff_symmDiff' segment_eq_Icc' segment_eq_image' Semigroup.opposite_smulCommClass' Seminorm.ball_finset_sup' Seminorm.ball_zero' Seminorm.closedBall_finset_sup' Seminorm.closedBall_zero' Seminorm.coe_sSup_eq' Seminorm.continuous' Seminorm.continuousAt_zero' Seminorm.uniformContinuous' Semiquot.blur_eq_blur' Semiquot.mem_blur' Semiquot.mem_pure' SeparationQuotient.uniformContinuous_lift' Set.biInter_and' Set.biInter_finsetSigma' Set.biInter_le_succ' Set.biInter_lt_succ' Set.biInter_sigma' Set.bijOn_of_subsingleton' Set.biUnion_and' Set.biUnion_finsetSigma' Set.biUnion_finsetSigma_univ' Set.biUnion_le_succ' Set.biUnion_lt_succ' Set.biUnion_sigma' SetCoe.exists' SetCoe.forall' Set.empty_card' Set.encard_exchange' Set.eq_of_mem_uIcc_of_mem_uIcc' Set.eq_of_mem_uIoc_of_mem_uIoc' Set.eq_of_nonempty_of_subsingleton' Set.EqOn.piecewise_ite' Set.eval_preimage' Set.exists_intermediate_set' Set.finite' Set.finite_diff_iUnion_Ioo' Set.Finite.eq_of_subset_of_encard_le' Set.Finite.preimage' Set.Finite.seq' Set.Finite.toFinset_insert' Set.fintypeBind' Set.fintypeBiUnion' Set.fintypeSeq' Set.Icc_mul_Icc_subset' Set.Icc_mul_Ico_subset' Set.Icc_subset_uIcc' Set.Icc_union_Icc' Set.Icc_union_Ici' Set.Ici_mul_Ici_subset' Set.Ici_mul_Ioi_subset' Set.Ico_mul_Icc_subset' Set.Ico_mul_Ioc_subset' Set.Ico_union_Ici' Set.Ico_union_Ico' Set.Iic_mul_Iic_subset' Set.Iic_mul_Iio_subset' Set.Iic_union_Icc' Set.Iic_union_Ioc' Set.iInter₂_mono' Set.iInter_iInter_eq' Set.iInter_mono' Set.iInter_mono'' Set.iInter_sigma' Set.Iio_mul_Iic_subset' Set.Iio_union_Ico' Set.Iio_union_Ioo' Set.image_affine_Icc' Set.image_mul_left' Set.image_mul_left_Icc' Set.image_mul_right' Set.image_mul_right_Icc' Set.Infinite.preimage' setIntegral_withDensity_eq_setIntegral_smul₀' Set.Ioc_mul_Ico_subset' Set.Ioc_subset_uIoc' Set.Ioc_union_Ioc' Set.Ioc_union_Ioi' Set.Ioi_mul_Ici_subset' Set.Ioo_union_Ioi' Set.Ioo_union_Ioo' Set.isScalarTower' Set.isScalarTower'' Set.iUnion₂_mono' Set.iUnion_iUnion_eq' Set.iUnion_mono' Set.iUnion_mono'' Set.iUnion_sigma' Set.LeftInvOn.image_image' Set.LeftInvOn.image_inter' SetLike.ext' Set.mapsTo_of_subsingleton' Set.mulIndicator_apply_le' Set.mulIndicator_compl' Set.mulIndicator_diff' Set.mulIndicator_div' Set.mulIndicator_empty' Set.mulIndicator_eq_one' Set.mulIndicator_inv' Set.mulIndicator_le' Set.mulIndicator_le_mulIndicator' Set.mulIndicator_le_self' Set.mulIndicator_mul' Set.mulIndicator_one' Set.ncard_eq_toFinset_card' Set.ncard_exchange' Set.nonempty_of_ssubset' Set.Nonempty.preimage' Setoid.comm' Setoid.eqv_class_mem' Setoid.ext' Setoid.ker_apply_mk_out' Setoid.refl' Setoid.symm' Setoid.trans' Set.ordConnected_iInter' Set.OrdConnected.inter' Set.ordConnected_pi' Set.PairwiseDisjoint.elim' Set.Pairwise.mono' Set.piecewise_mem_Icc' Set.pi_eq_empty_iff' Set.PiSetCoe.canLift' Set.preimage_eq_preimage' Set.preimage_id' Set.preimage_mul_left_one' Set.preimage_mul_right_one' Set.Quotient.range_mk'' Set.range_id' Set.range_ite_subset' Set.range_quotient_lift_on' Set.range_quotient_mk' Set.setOf_eq_eq_singleton' Set.singleton_pi' Set.Sized.subsingleton' Set.smulCommClass_set' Set.smulCommClass_set'' Set.smul_inter_ne_empty_iff' Set.smul_univ₀' Set.star_inv' Set.star_mem_centralizer' Set.surjOn_of_subsingleton' SetTheory.Game.birthday_neg' SetTheory.PGame.add_le_add_right' SetTheory.PGame.Equiv.not_fuzzy' SetTheory.PGame.Fuzzy.not_equiv' SetTheory.PGame.LF.not_equiv' SetTheory.PGame.moveLeft_neg' SetTheory.PGame.moveLeft_neg_symm' SetTheory.PGame.moveLeft_nim' SetTheory.PGame.moveRight_neg' SetTheory.PGame.moveRight_neg_symm' SetTheory.PGame.moveRight_nim' SetTheory.PGame.ofLists_moveLeft' SetTheory.PGame.ofLists_moveRight' SetTheory.PGame.relabel_moveLeft' SetTheory.PGame.relabel_moveRight' SetTheory.PGame.Subsequent.mk_right' SetTheory.PGame.zero_lf_inv' Set.uIcc_subset_uIcc_iff_le' Set.union_diff_cancel' Set.WellFoundedOn.mono' Sigma.exists' Sigma.forall' sigma_mk_preimage_image' SimpleGraph.Adj.ne' SimpleGraph.cliqueSet_mono' SimpleGraph.cycleGraph_adj' SimpleGraph.dart_edge_eq_mk'_iff' SimpleGraph.FarFromTriangleFree.cliqueFinset_nonempty' SimpleGraph.Subgraph.connected_iff' SimpleGraph.Subgraph.Connected.mono' SimpleGraph.Subgraph.degree_le' SimpleGraph.TripartiteFromTriangles.Graph.in₀₁_iff' SimpleGraph.TripartiteFromTriangles.Graph.in₀₂_iff' SimpleGraph.TripartiteFromTriangles.Graph.in₁₀_iff' SimpleGraph.TripartiteFromTriangles.Graph.in₁₂_iff' SimpleGraph.TripartiteFromTriangles.Graph.in₂₀_iff' SimpleGraph.TripartiteFromTriangles.Graph.in₂₁_iff' SimpleGraph.Walk.coe_support_append' SimpleGraph.Walk.IsPath.mk' simple_iff_isSimpleModule' SimplexCategory.eq_comp_δ_of_not_surjective' SimplexCategory.eq_σ_comp_of_not_injective' SimplexCategory.Hom.ext' SimplexCategory.δ_comp_δ' SimplexCategory.δ_comp_δ'' SimplexCategory.δ_comp_δ_self' SimplexCategory.δ_comp_σ_of_gt' SimplexCategory.δ_comp_σ_self' SimplexCategory.δ_comp_σ_succ' SimplicialObject.Splitting.hom_ext' SimplicialObject.Splitting.IndexSet.ext' sInf_eq_iInf' sInf_image' skewAdjoint.conjugate' SlashInvariantForm.slash_action_eqn' small_biInter' small_iInter' small_sInter' SmoothPartitionOfUnity.sum_finsupport' smoothWithinAt_finset_prod' smul_ball'' smul_closedBall' smul_closedBall'' SMulCommClass.nnrat' SMulCommClass.rat' smul_div' smul_eq_smul_iff_eq_and_eq_of_pos' smul_finprod' smul_inv' smul_left_injective' smul_le_smul' smul_lt_smul' smul_lt_smul_of_le_of_lt' smul_lt_smul_of_lt_of_le' smul_mul' smul_nonneg' smul_pos' smul_pow' smul_sphere' spec' SpectralMap.coe_comp_continuousMap' spinGroup.star_eq_inv' sq_le_sq' sq_lt_sq' sSup_eq_bot' sSup_eq_iSup' sSup_image' StarAlgHom.coe_mk' star_comm_self' StarConvex.sub' star_inv' Stream' Stream'.corec' Stream'.drop_tail' Stream'.get_succ_iterate' Stream'.Seq1.map_join' Stream'.tail_drop' Stream'.take_succ' StrictAnti.const_mul' StrictAnti.ite' StrictAnti.mul_const' StrictAntiOn.const_mul' StrictAntiOn.mul_const' StrictMono.const_mul' StrictMono.ite' StrictMono.mul_const' StrictMonoOn.const_mul' StrictMonoOn.mul_const' StrictWeakOrder.not_lt_of_equiv' String.LT' StructureGroupoid.LocalInvariantProp.congr' StructureGroupoid.LocalInvariantProp.congr_nhdsWithin' StructureGroupoid.LocalInvariantProp.liftPropWithinAt_inter' Subalgebra.algebra' Subalgebra.coe_valA' Subalgebra.module' Subbimodule.smul_mem' sub_div' Subgroup.center_eq_infi' Subgroup.comap_equiv_eq_map_symm' Subgroup.commutator_def' Subgroup.disjoint_def' Subgroup.eq_top_iff' Subgroup.finiteIndex_iInf' Subgroup.map_equiv_eq_comap_symm' Subgroup.map_le_map_iff' Subgroup.mem_normalizer_iff' Subgroup.mem_normalizer_iff'' Subgroup.mem_sup' Subgroup.Normal.conj_mem' Subgroup.quotient_finite_of_isOpen' Subgroup.smul_diff' Subgroup.smul_diff_smul' Subgroup.smul_opposite_image_mul_preimage' Subgroup.transferTransversal_apply' Subgroup.transferTransversal_apply'' Sublattice.coe_inf' SubmoduleClass.module' Submodule.coe_continuous_linearProjOfClosedCompl' Submodule.coe_prodEquivOfIsCompl' Submodule.coe_subtypeL' Submodule.comap_smul' Submodule.disjoint_def' Submodule.disjoint_span_singleton' Submodule.eq_top_iff' Submodule.hasSMul' Submodule.inhabited' Submodule.isScalarTower' Submodule.ker_liftQ_eq_bot' Submodule.le_sInf' Submodule.linearProjOfIsCompl_apply_right' Submodule.map_smul' Submodule.map_smul'' Submodule.map_toAddSubmonoid' Submodule.mem_annihilator' Submodule.mem_colon' Submodule.mem_ideal_smul_span_iff_exists_sum' Submodule.mem_localized' Submodule.mem_span_insert' Submodule.mem_sup' Submodule.module' Submodule.orderIsoMapComap_apply' Submodule.orderIsoMapComap_symm_apply' Submodule.Quotient.distribMulAction' Submodule.Quotient.distribSMul' Submodule.Quotient.eq' Submodule.Quotient.instSMul' Submodule.Quotient.mk'_eq_mk' Submodule.Quotient.module' Submodule.Quotient.mulAction' Submodule.Quotient.smulZeroClass' Submodule.sInf_le' Submodule.smul_mem_iff' Submodule.smul_mem_span_smul' Submodule.span_image' Submodule.unique' Submonoid.disjoint_def' Submonoid.eq_top_iff' Submonoid.LocalizationMap.eq' Submonoid.LocalizationMap.map_mk' Submonoid.LocalizationMap.mk'_eq_iff_eq' Submonoid.LocalizationMap.mk'_eq_of_eq' Submonoid.LocalizationMap.mk'_self' Submonoid.LocalizationMap.mk'_spec' Submonoid.LocalizationMap.mulEquivOfMulEquiv_mk' Submonoid.LocalizationMap.mul_mk'_one_eq_mk' Submonoid.LocalizationMap.sec_spec' Submonoid.LocalizationMap.symm_comp_ofMulEquivOfLocalizations_apply' Submonoid.mrange_inl' Submonoid.mrange_inr' SubMulAction.isScalarTower' SubMulAction.mem_one' SubMulAction.smul' SubMulAction.smul_mem_iff' Subring.closure_induction' Subring.coe_mk' Subring.eq_top_iff' Subring.mem_mk' Subsemigroup.eq_top_iff' Subsemiring.closure_induction' Subsemiring.coe_mk' Subsemiring.eq_top_iff' Subsemiring.mem_mk' subset_interior_mul' Subsingleton.antitone' Subsingleton.monotone' sub_sq' Subtype.preimage_coe_compl' SuccOrder.prelimitRecOn_succ' suffixLevenshtein_nil' sum_bernoulli' summable_geometric_two' Summable.matrix_blockDiag' summable_matrix_blockDiagonal' Summable.matrix_blockDiagonal' summable_mul_of_summable_norm' summable_of_isBigO' summable_of_isBigO_nat' summable_star_iff' summable_sum_mul_antidiagonal_of_summable_norm' summable_sum_mul_range_of_summable_norm' sup_eq_half_smul_add_add_abs_sub' sup_sdiff_cancel' Surreal.dyadicMap_apply_pow' Surreal.nsmul_pow_two_powHalf' Sym2.instDecidableRel' Sym2.mem_iff' Sym2.other_eq_other' Sym2.other_invol' Sym2.other_mem' Sym2.other_spec' Sym2.rel_iff' Sym.inhabitedSym' symmDiff_eq' symmDiff_eq_Xor' symmDiff_symmDiff_right' symmDiff_symmDiff_self' symmDiff_top' SymplecticGroup.coe_inv' SymplecticGroup.mem_iff' t0Space_iff_uniformity' Tactic.NormNum.int_gcd_helper' Tactic.NormNum.nat_gcd_helper_1' Tactic.NormNum.nat_gcd_helper_2' tendsto_ceil_left' tendsto_ceil_right' tendsto_const_mul_pow_nhds_iff' tendsto_floor_left' tendsto_floor_right' tendsto_fract_left' tendsto_fract_right' tendsto_gauge_nhds_zero' tendsto_indicator_const_apply_iff_eventually' tendsto_indicator_const_iff_forall_eventually' tendsto_indicator_const_iff_tendsto_pi_pure' tendsto_measure_Icc_nhdsWithin_right' tendsto_nhds_bot_mono' tendsto_nhds_top_mono' tendsto_nhds_unique' tendsto_norm' tendsto_norm_atTop_iff_cobounded' tendsto_norm_cobounded_atTop' tendsto_norm_cocompact_atTop' tendsto_norm_zero' TensorProduct.ext' TensorProduct.finsuppLeft_smul' TensorProduct.isPushout' TensorProduct.lift.tmul' TensorProduct.smul_tmul' Theorems100.«82».Cube.hw' Theorems100.num_series' three_ne_zero' toIcoDiv_add_left' toIcoDiv_add_right' toIcoDiv_add_zsmul' toIcoDiv_neg' toIcoDiv_sub' toIcoDiv_sub_eq_toIcoDiv_add' toIcoDiv_sub_zsmul' toIcoMod_add_left' toIcoMod_add_right' toIcoMod_add_zsmul' toIcoMod_mem_Ico' toIcoMod_neg' toIcoMod_sub' toIcoMod_sub_zsmul' toIcoMod_zsmul_add' toIocDiv_add_left' toIocDiv_add_right' toIocDiv_add_zsmul' toIocDiv_neg' toIocDiv_sub' toIocDiv_sub_eq_toIocDiv_add' toIocDiv_sub_zsmul' toIocMod_add_left' toIocMod_add_right' toIocMod_add_zsmul' toIocMod_neg' toIocMod_sub' toIocMod_sub_zsmul' toIocMod_zsmul_add' toIxxMod_total' TopCat.GlueData.preimage_image_eq_image' TopCat.isOpenEmbedding_iff_comp_isIso' TopCat.isOpenEmbedding_iff_isIso_comp' TopCat.Presheaf.germ_stalkSpecializes' TopCat.Presheaf.pushforward_eq' TopCat.Presheaf.pushforward_map_app' TopologicalGroup.of_nhds_one' TopologicalSpace.OpenNhds.map_id_obj' TopologicalSpace.Opens.coe_inclusion' TopologicalSpace.Opens.map_comp_obj' TopologicalSpace.Opens.map_functor_eq' TopologicalSpace.Opens.map_id_obj' TopologicalSpace.Opens.isOpenEmbedding' TopologicalSpace.Opens.set_range_forget_map_inclusion' TopologicalSpace.Opens.set_range_inclusion' TopologicalSpace.SecondCountableTopology.mk' Topology.WithScott.isOpen_iff_isUpperSet_and_scottHausdorff_open' top_sdiff' top_symmDiff' toSubalgebra_toIntermediateField' T_pow' tprod_comm' tprod_eq_prod' tprod_eq_zero_mul' tprod_le_of_prod_le' tprod_prod' tprod_sigma' Traversable.map_traverse' Traversable.naturality' Traversable.traverse_eq_map_id' Traversable.traverse_map' Trivialization.apply_symm_apply' Trivialization.coe_coordChangeL' Trivialization.coe_fst' Trivialization.coe_fst_eventuallyEq_proj' Trivialization.continuousLinearEquivAt_apply' Trivialization.ext' Trivialization.mk_proj_snd' Trivialization.proj_symm_apply' TrivSqZeroExt.algebra' TrivSqZeroExt.algebraMap_eq_inl' TrivSqZeroExt.algHom_ext' TrivSqZeroExt.snd_pow_of_smul_comm' TruncatedWittVector.commutes' TruncatedWittVector.commutes_symm' tsum_choose_mul_geometric_of_norm_lt_one' tsum_geometric_two' tsum_mul_tsum_eq_tsum_sum_antidiagonal_of_summable_norm' tsum_mul_tsum_eq_tsum_sum_range_of_summable_norm' tsum_mul_tsum_of_summable_norm' Tuple.proj_equiv₁' Turing.PartrecToTM2.trStmts₁_supports' Turing.Reaches₀.tail' Turing.Tape.exists_mk' Turing.Tape.map_mk' Turing.Tape.move_left_mk' Turing.Tape.move_right_mk' Turing.Tape.write_mk' Turing.TM1to1.trTape_mk' Turing.tr_eval' two_ne_zero' TwoSidedIdeal.mem_mk' TypeVec.appendFun_comp' TypeVec.drop_append1' TypeVec.dropFun_RelLast' TypeVec.subtypeVal_toSubtype' TypeVec.toSubtype'_of_subtype' ULift.distribMulAction' ULift.distribSMul' ULift.isIsometricSMul' ULift.isScalarTower' ULift.isScalarTower'' ULift.module' ULift.mulAction' ULift.mulActionWithZero' ULift.mulDistribMulAction' ULift.smulWithZero' ULift.smulZeroClass' Ultrafilter.le_of_inf_neBot' Ultrafilter.map_id' UniformCauchySeqOn.prod' uniformContinuous_comap' UniformContinuous.const_mul' uniformContinuous_div_const' UniformContinuous.div_const' UniformContinuous.mul_const' uniformContinuous_mul_left' uniformContinuous_mul_right' uniformContinuous_nnnorm' uniformContinuous_norm' isUniformEmbedding_iff' UniformGroup.mk' isUniformInducing_iff' IsUniformInducing.mk' uniformity_basis_edist' uniformity_basis_edist_le' uniformity_eq_comap_nhds_one' UniformSpace.Completion.ext' unique' uniqueDiffWithinAt_inter' UniqueDiffWithinAt.inter' UniqueFactorizationMonoid.exists_reduced_factors' UniqueMDiffWithinAt.inter' UniqueMDiffWithinAt.smooth_bundle_preimage' Unique.subsingleton_unique' Unique.subtypeEq' unitary.star_eq_inv' Unitization.algHom_ext'' Unitization.quasispectrum_eq_spectrum_inr' Units.coe_map' Units.conj_pow' Units.inv_mul' Units.mul_inv' UniversalEnvelopingAlgebra.lift_ι_apply' update_le_update_iff' upperClosure_interior_subset' UpperHalfPlane.cosh_dist' UpperHalfPlane.ext_iff' UpperHalfPlane.ModularGroup.det_coe' UpperHalfPlane.mul_smul' UV.compress_of_disjoint_of_le' Valuation.Integers.one_of_isUnit' Valuation.map_add' Valuation.map_sum_lt' ValuationSubring.isIntegral_of_mem_ringOfIntegers' Vector.continuous_insertNth' VitaliFamily.ae_tendsto_lintegral_div' volume_regionBetween_eq_integral' volume_regionBetween_eq_lintegral' WCovBy.of_le_of_le' WeakBilin.instModule' WeakSpace.instModule' WeierstrassCurve.Affine.CoordinateRing.mk_XYIdeal'_mul_mk_XYIdeal' WeierstrassCurve.Affine.equation_iff' WeierstrassCurve.Affine.nonsingular_iff' WeierstrassCurve.Affine.Point.add_of_X_ne' WeierstrassCurve.Affine.Point.add_of_Y_ne' WeierstrassCurve.Affine.Point.add_self_of_Y_ne' WeierstrassCurve.baseChange_preΨ' WeierstrassCurve.coeff_preΨ' WeierstrassCurve.Jacobian.add_of_Y_ne' WeierstrassCurve.Jacobian.addX_eq' WeierstrassCurve.Jacobian.addX_of_X_eq' WeierstrassCurve.Jacobian.addY_of_X_eq' WeierstrassCurve.Jacobian.dblXYZ_of_Y_eq' WeierstrassCurve.Jacobian.dblZ_ne_zero_of_Y_ne' WeierstrassCurve.Jacobian.equiv_iff_eq_of_Z_eq' WeierstrassCurve.Jacobian.isUnit_dblZ_of_Y_ne' WeierstrassCurve.Jacobian.negAddY_eq' WeierstrassCurve.Jacobian.negAddY_of_X_eq' WeierstrassCurve.Jacobian.neg_of_Z_eq_zero' WeierstrassCurve.Jacobian.Y_eq_iff' WeierstrassCurve.Jacobian.Y_eq_of_Y_ne' WeierstrassCurve.Jacobian.Y_ne_negY_of_Y_ne' WeierstrassCurve.leadingCoeff_preΨ' WeierstrassCurve.map_preΨ' WeierstrassCurve.natDegree_coeff_preΨ' WeierstrassCurve.natDegree_preΨ' WeierstrassCurve.Projective.add_of_Y_ne' WeierstrassCurve.Projective.addX_eq' WeierstrassCurve.Projective.addY_of_X_eq' WeierstrassCurve.Projective.addZ_eq' WeierstrassCurve.Projective.dblX_eq' WeierstrassCurve.Projective.dblY_of_Y_eq' WeierstrassCurve.Projective.dblZ_ne_zero_of_Y_ne' WeierstrassCurve.Projective.equiv_iff_eq_of_Z_eq' WeierstrassCurve.Projective.isUnit_dblZ_of_Y_ne' WeierstrassCurve.Projective.negAddY_eq' WeierstrassCurve.Projective.negAddY_of_X_eq' WeierstrassCurve.Projective.negDblY_eq' WeierstrassCurve.Projective.negDblY_of_Y_eq' WeierstrassCurve.Projective.Y_eq_iff' WeierstrassCurve.Projective.Y_eq_of_Y_ne' WeierstrassCurve.Projective.Y_ne_negY_of_Y_ne' WellFounded.monotone_chain_condition' WfDvdMonoid.max_power_factor' WithBot.bot_mul' WithBot.coe_sInf' WithBot.coe_sSup' WithBot.le_coe_unbot' WithBot.mul_bot' WithBot.unbot_one' WithTop.coe_sInf' WithTop.coe_sSup' WithTop.distrib' WithTop.mul_top' WithTop.top_mul' WithTop.untop_one' WithZero.map'_map' WittVector.aeval_verschiebung_poly' WittVector.exists_eq_pow_p_mul' WittVector.idIsPolyI' WittVector.nth_mul_coeff' WittVector.poly_eq_of_wittPolynomial_bind_eq' WittVector.RecursionBase.solution_spec' WittVector.RecursionMain.succNthVal_spec' WittVector.truncate_mk' WriterT.callCC' WriterT.goto_mkLabel' WriterT.mkLabel' WType.WType' Xor' xor_iff_not_iff' X_pow_sub_C_eq_prod' zero_le' zero_lt_one_add_norm_sq' zero_mem_ℓp' zero_ne_one' ZFSet.IsTransitive.sUnion' ZMod.cast_add' ZMod.cast_id' ZMod.cast_intCast' ZMod.cast_mul' ZMod.cast_natCast' ZMod.cast_one' ZMod.cast_pow' ZMod.cast_sub' ZMod.intCast_eq_intCast_iff' ZMod.invDFT_apply' ZMod.invDFT_def' ZMod.natCast_eq_natCast_iff' ZMod.natCast_self' ZMod.neg_val' ZMod.nontrivial' ZMod.val_mul' ZMod.val_neg' ZMod.val_one' ZMod.val_one'' ZMod.val_unit' ZNum.cast_zero' ZNum.of_to_int' zpow_add' zpow_eq_zpow_emod' zpow_mul' zsmul_eq_mul' Zsqrtd.norm_eq_one_iff'
.lake/packages/mathlib/scripts/create_deprecated_modules.lean
--import Mathlib.Init import Mathlib.Tactic.Linter.DeprecatedModule import Std.Time.Zoned import Lean.Meta.Tactic.TryThis -- a comment here to test `keepTrailing /-! # Create a deprecated module This file defines the lean script for creating a deprecated module. -/ open Lean Elab Command namespace DeprecatedModule /-- This file interacts with `git ...` quite a bit. `runCmd` takes as input the command-line function `git ...` and returns its stdout string as its output. (Technically, the command need not be `git`: it can be any command we need. We only use this for `git`, though.) This is convenient to get both the output of the function, but also for reproducing the exact command-line text that produced the output for better reproducibility and error reporting. *Warning*. Splitting of the input string happens at *every* space. This means that if you pass `"git commit -m 'message with spaces'`, the command will be split into `["git", "commit", "-m", "'message", "with", "spaces'"]`, which is not what you want. -/ def runCmd (s : String) : IO String := do let cmd::args := s.splitOn | EStateM.throw "Please provide at least one word in your command!" IO.Process.run {cmd := cmd, args := args.toArray} /-- `getHeader fname fileContent keepTrailing` takes as input two strings and a `Bool`ean. It uses * `fname`, as the path of a file (which need not exist); * `fileContent`, as the content of `fname` (regardless of whether the file exists and what its content is); * `keepTrailing` a boolean to control whether to keep trailing comments. It returns just the imports of `fileContent`, including trailing comments if `keepTrailing = true` (the command always trims trailing whitespace after the last comment). -/ def getHeader (fname fileContent : String) (keepTrailing : Bool) : IO String := do let (stx, _) ← Parser.parseHeader (Parser.mkInputContext fileContent fname) let stx := if keepTrailing then stx.raw else stx.raw.unsetTrailing let some substring := stx.getSubstring? | throw <| .userError "No substring: we have a problem!" return substring.toString /-- `getHeaderFromFileName fname keepTrailing` is similar to `getHeader`, except that it assumes that `fname` is the actual path of a file, and uses `fname`'s content as input to `getHeader`. -/ def getHeaderFromFileName (fname : String) (keepTrailing : Bool) : IO String := do getHeader fname (← IO.FS.readFile fname) keepTrailing /-- `mkDeprecationWithDate date customMessage` returns the formatted syntax `deprecated_module "customMessage" (since := "date")`, where the date is of the form `YYYY-MM-DD`. -/ def mkDeprecationWithDate (date : String) (customMessage : Option String := some "Auto-generated deprecation") : CommandElabM Format := do let msgStx := customMessage.map Syntax.mkStrLit let dateStx := Syntax.mkStrLit date let stx ← `(command|deprecated_module $[$msgStx]? (since := $dateStx)) liftCoreM <| PrettyPrinter.ppCategory `command stx /-- `mkDeprecation customMessage` returns the formatted syntax `deprecated_module "customMessage" (since := "YYYY-MM-DD")`, where the date is today's date. -/ def mkDeprecation (customMessage : Option String := some "Auto-generated deprecation") : CommandElabM Format := do -- Get the current date in UTC: we don't want this to depend on the user computer's time zone. let date := s!"{(← Std.Time.DateTime.now (tz := .UTC)).toPlainDate}" mkDeprecationWithDate date customMessage /-- The command `#create_deprecated_module filePath (<comment>)? (rename_to <fname>) (write)?` generates a module deprecation. Writing ```lean #create_deprecated_module path/To/DeletedFile.lean "This file is no longer relevant" rename_to "Mathlib/Path/To/Rename.lean" ``` checks that `path/To/DeletedFile.lean` is not currently present, but was present in `Mathlib/` at some point. If the check is successful, then it reports on its findings, shows how the corresponding deprecated module should look like, using `"This file is no longer relevant"` as the (optional) <comment>. If the message is not explicitly used, `#create_deprecated_module` defaults to `"Auto-generated deprecation"`. If you wish there to be no comment, use `#create_deprecated_module path/To/DeletedFile.lean ""`. If `rename_to "Mathlib/Path/To/Rename.lean"` is present, then instead of copying over the imports from a deleted file, it uses `import Mathlib.Path.To.Rename`. Finally, if everything looks correct, adding a final `write` actually generates the file: ```lean #create_deprecated_module path/To/DeletedFile.lean "This file is no longer relevant" write ``` -/ syntax "#create_deprecated_module " str (ppSpace str)? (&" rename_to " str)? (&" write")? ppLine : command /-- `processPrettyOneLine log msg` takes as input two strings `log` and `msg`. It expects `log` to be a line in the output of `git log --pretty=oneline`: it should look like `<hash> <PRdescr>`. It returns the pair `(<hash>, <msg> in <PRdescr> <hash> <diff of file wrt previous commit>)`, formatted as a collapsible message. In practice, `msg` is either `last modified` or `deleted`. it returns the pair `(<hash>, <msg> in <PRdescr> <hash> <diff of file wrt previous commit>)`, formatted as a collapsible message. -/ def processPrettyOneLine (log msg fname : String) : IO (String × MessageData) := do let hash := log.takeWhile (!·.isWhitespace) let PRdescr := (log.drop hash.length).trim let gitDiffCLI := s!"git diff {hash}^...{hash} -- {fname}" let diff ← runCmd gitDiffCLI <|> pure s!"{hash}: error in computing '{gitDiffCLI}'" let diffCollapsed := .trace {cls := .str .anonymous s!"{hash}"} m!"{gitDiffCLI}" #[m!"{diff}"] return (hash, m!"{msg} in " ++ .trace {cls := .str .anonymous ("_" ++ hash.take 7)} m!"{PRdescr}" #[diffCollapsed]) /-- `mkRenamesDict pct` takes as optional input a natural number. It computes the `git` status of the files at the current `HEAD` commit, comparing them with `master`. It returns a `HashMap` with keys the old names and values the new names of all the files that git considers renames with likelihood at least the input `percent`. If no input is provided, the default percentage is `100`. -/ def mkRenamesDict (percent : Nat := 100) : IO (Std.HashMap String String) := do let mut dict := ∅ let gitDiff ← runCmd s!"git diff --name-status origin/master...HEAD" let lines := gitDiff.trim.splitOn "\n" for git in lines do -- If `git` corresponds to a rename, it contains `3` segments, separated by a -- tab character (`\t`): `R%%`, `oldName`, `newName`. let [pct, oldName, newName] := git.split (· == '\t') | continue if pct.take 1 != "R" then IO.println s!"mkRenamesDict: '{pct}' should have been of the form Rxxx, denoting a `R`ename \ and a similarity percentage.\nFull git line: '{git}'" continue let some pctNat := (pct.drop 1).toNat? | continue -- This looks like a rename with a similarity index at least as big as our threshold: -- we add the rename to our dictionary. if percent ≤ pctNat then dict := dict.insert oldName newName -- This looks like a rename, but the similarity index is smaller than our threshold: -- we report a message and do not add the rename to our dictionary. else IO.println s!"'{oldName}' was renamed to '{newName}' ({pct}), but the similarity {pctNat}% \ is less than the expected threshold of {percent}%.\n\n We treat this file as a removal." return dict /-- `mkModName fname` takes as input a file path and returns the guessed module name: the dir-separators get converted to `.` and a trailing `.lean` gets removed, if it exists. *Note*. The input path is relative to the root of the project. E.g., within `mathlib`, every path effectively starts as `Mathlib/...`. -/ def mkModName (fname : System.FilePath) : String := let cpts := fname.components let cpts := match cpts.getLast? with | none => cpts | some last => cpts.dropLast ++ [if last.endsWith ".lean" then last.dropRight ".lean".length else last] ".".intercalate cpts #guard mkModName ("Mathlib" / "Data" / "Nat" / "Basic.lean") == "Mathlib.Data.Nat.Basic" #guard mkModName "" == "" #guard mkModName ("" / "") == "." /-- `deprecateFilePath fname rename comment` takes as input * the path `fname` of a file that was deleted; * an optional module name `rename`, in case the file was renamed, rather than removed; * an optional comment to add in the `deprecated module` syntax. It returns a pair consisting of * an array of `MessageData` giving details about the last time that `fname` was modified and when it was deleted; * if `rename = some preRenamedModule`, then a suggestion to create a file with `import newName` in the regenerated module `preRenamedModule`; otherwise the content of the deprecated file, matching the original `fname` up to the last import command. In both cases, after the imports, there will be the `deprecated_module` command with the optional `comment` input, defaulting to `Auto-generated deprecation` if `comment = none`. -/ def deprecateFilePath (fname : String) (rename comment : Option String) : CommandElabM (Array MessageData × String) := do let mut msgs : Array MessageData := #[] -- Check that the input `fname` is a file that currently does not exist. if ← System.FilePath.pathExists fname then throwError m!"The file {fname} exists: I cannot deprecate it!" -- Retrieve the last two commits that modified `fname`: -- the last one is the deletion, the previous one is the last file modification. let log ← runCmd s!"git log --pretty=oneline -2 -- {fname}" let [deleted, lastModified] := log.trim.splitOn "\n" | throwError "Found {(log.trim.splitOn "\n").length} commits, but expected 2! \ Please make sure the file {fname} existed at some point!" let (_deleteHash, deletedMsg) ← processPrettyOneLine deleted "deleted" fname let (modifiedHash, modifiedMsg) ← processPrettyOneLine lastModified "last modified" fname msgs := msgs.append #[m!"The file {fname} was\n", modifiedMsg, deletedMsg] -- Get the commit date, in `YYYY-MM-DD` format, of the commit deleting the file. let log' ← runCmd s!"git log --format=%cs -2 -- {fname}" let deletionDate := (log'.trim.splitOn "\n")[0]! let deprecation ← mkDeprecationWithDate deletionDate comment msgs := msgs.push "" -- Retrieve the final version of the file, before it was deleted. let file ← runCmd s!"git show {modifiedHash}:{fname}" -- Generate a module deprecation for the file `fname`. let fileHeader := ← match rename with | some rename => do let modName := mkModName rename pure s!"import {modName}" | none => getHeader fname file false let deprecatedFile := s!"{fileHeader.trimRight}\n\n{deprecation.pretty.trimRight}\n" msgs := msgs.push <| .trace {cls := `Deprecation} m!"{fname}" #[m!"\n{deprecatedFile}"] return (msgs, deprecatedFile) elab_rules : command | `(#create_deprecated_module%$tk $fnameStx:str $[$comment:str]? $[rename_to $rename?:str]? $[write%$write?]?) => do let fname := fnameStx.getString if ← System.FilePath.pathExists fname then logWarningAt fnameStx m!"The file {fname} exists: I cannot deprecate it!" return let (msgs, deprecatedFile) ← deprecateFilePath fname (rename?.map (·.getString)) (comment.map (·.getString)) if write?.isSome then if let some dir := System.FilePath.parent fname then if !(← System.FilePath.pathExists dir) then logInfoAt fnameStx m!"Creating directory {dir}" IO.FS.createDirAll dir IO.FS.writeFile fname deprecatedFile if write?.isNone then -- We strip trailing comments from `fnameStx` and `comment` to avoid them showing up in the -- regenerated syntax. let fnameStx := ⟨fnameStx.raw.unsetTrailing⟩ let comment := comment.map (⟨·.raw.unsetTrailing⟩) let stx ← `(command|#create_deprecated_module $fnameStx:str $[$comment:str]? $[rename_to $rename?:str]? write) liftTermElabM do Meta.liftMetaM do Meta.Tactic.TryThis.addSuggestion (← getRef).unsetTrailing { preInfo? := "Confirm that you are happy with the information below before continuing!\n\n" suggestion := stx postInfo? := if comment.isNone then "\nYou can add a reason for the removal after the file name, as a string." else ""} logInfoAt tk <| .joinSep msgs.toList "\n" /-- `#find_deleted_files (nc)? (pct%)?` takes an optional natural number input `nc` and an optional percentage `pct%`. Using `#find_deleted_files 5 80%` It looks at the `lean` files in `Mathlib` that existed `4` commits ago (i.e. the commit that you see with `git log -5`) and that are no longer present. It then proposes `Try these:` suggestions calling the `#create_deprecated_module` to finalize the deprecation. The percentage `pct` is used to detect renames: if a file was renamed with similarity at least `pct`, then the `#create_deprecated_module` suggestion will use the new name in the `import` command, rather than copying over the imports from the deleted file. Unlike what usually happens with `Try these:`, the original `#find_deleted_files` does not get replaced by the suggestion, which means that you can click on multiple suggestions and proceed with the deprecations later on. If the number of commits is not explicitly given, `#find_deleted_files` defaults to `2`, namely, the commit just prior to the current one. -/ elab tk:"#find_deleted_files" nc:(ppSpace num)? pct:(ppSpace num)? bang:&"%"? : command => do if pct.isSome && bang.isNone then throwError m!"Please add a '%' after the percentage {pct.getD default}" let n := nc.getD (Syntax.mkNumLit "2") |>.getNat if n == 0 then logWarningAt (nc.getD default) "The number of commits to look back must be at least 1!" return let mut msgs : Array MessageData := #[] -- Get the hash and the commit message of the commit at `git log -n` -- and format the message (with its hash) as a collapsible message. -- (throwing an error if that doesn't exist). let getHashAndMessage (n : Nat) : CommandElabM (String × MessageData) := do let log ← runCmd s!"git log --pretty=oneline -{n}" let some last := log.trim.splitOn "\n" |>.getLast? | throwError "Found no commits!" let commitHash := last.takeWhile (!·.isWhitespace) let PRdescr := (last.drop commitHash.length).trim return (commitHash, .trace {cls := `Commit} m!"{PRdescr}" #[m!"{commitHash}"]) let getFilesAtHash (hash : String) : CommandElabM (Std.HashSet String) := do let files ← runCmd s!"git ls-tree -r --name-only {hash} Mathlib/" return .ofList <| files.splitOn "\n" let (currentHash, currentPRdescr) ← getHashAndMessage 1 let currentFiles ← getFilesAtHash currentHash msgs := msgs.push m!"{currentFiles.size} files at the current commit {currentPRdescr}" let (pastHash, pastPRdescr) ← getHashAndMessage n let pastFiles ← getFilesAtHash pastHash msgs := msgs.push m!"{pastFiles.size} files at the past commit {pastPRdescr}" let onlyPastFiles := pastFiles.filter fun fil ↦ fil.endsWith ".lean" && !currentFiles.contains fil let noFiles := onlyPastFiles.size msgs := msgs.push m!"{noFiles} Lean file{if noFiles == 1 then "" else "s"} in 'Mathlib' that no longer exist:" msgs := msgs.push "" ++ onlyPastFiles.toArray.map (m!" {·}") |>.push "" if onlyPastFiles.isEmpty then logWarningAt (nc.getD ⟨tk⟩) m!"All Lean files in Mathlib that existed {n} commits ago are still present. \ Increase {n} to search further back!" return let mut suggestions : Array Meta.Tactic.TryThis.Suggestion := #[] let ref := .ofRange {tk.getRange?.get! with stop := tk.getPos?.get!} let dict ← mkRenamesDict (pct.getD (Syntax.mkNumLit "100")).getNat for fname in onlyPastFiles do let fnameStx := Syntax.mkStrLit fname let stx ← if let some newName := dict[fname]? then let newNameStx := Syntax.mkStrLit newName `(command|#create_deprecated_module $fnameStx rename_to $newNameStx) else `(command|#create_deprecated_module $fnameStx) suggestions := suggestions.push { suggestion := (⟨stx.raw.updateTrailing "hello".toSubstring⟩ : TSyntax `command) } let suggestionsText := if suggestions.size == 1 then ("the suggestion", "") else (s!"any of the {suggestions.size} suggestions", ", so you can click several of them") liftTermElabM do Meta.liftMetaM do Meta.Tactic.TryThis.addSuggestions (origSpan? := some ref) (header := s!"Try these:\n\n\ Clicking on {suggestionsText.1} below will *not* remove the \ `#find_delete_files` command{suggestionsText.2}.\n") tk suggestions logInfoAt tk <| .joinSep msgs.toList "\n" /-! If you already know the name of the file that you want to deprecate, then uncomment the `#create_deprecated_module` line below to get started, writing the file path as a string. * omitting `"a comment here"` is equivalent to using `"Auto-generated deprecation"` while using the empty string `""` eliminates the comment entirely; * uncomment `write` only when you are satisfied that the deprecations look correct! The command will also by proposed this as a `Try this` suggestion by the `#create_deprecated_module` command. -/ --#create_deprecated_module "Mathlib/LinearAlgebra/RootSystem/Finite/g2.lean" "a comment here" --write /-! If, instead, you are looking for a file to be deprecated, uncomment the `#find_deleted_files 10` line below to start scanning. You can play around with `10`: it represents the number of past commits that the command considers to find files that existed then and do not exist now. The exact value is not important: we are just looking for a file name. Once you found what you were looking for, click on all the relevant `Try these:` suggestions and continue following the instructions on these commands. Unlike what usually happens with `Try these:`, the original `#find_deleted_files` does not get replaced by the suggestion, which means that you can click on multiple suggestions and proceed with the deprecations later on. -/ #find_deleted_files 0 /-- info: import Mathlib.Tactic.Linter.DeprecatedModule import Std.Time.Zoned import Lean.Meta.Tactic.TryThis -/ #guard_msgs in run_cmd let fname ← getFileName let head ← getHeader fname (← getFileMap).source false logInfo head /-- info: import Mathlib.Tactic.Linter.DeprecatedModule import Std.Time.Zoned import Lean.Meta.Tactic.TryThis -- a comment here to test `keepTrailing -/ #guard_msgs in run_cmd let fname ← getFileName let head ← getHeader fname (← getFileMap).source true logInfo head
.lake/packages/mathlib/.github/PULL_REQUEST_TEMPLATE.md
--- <!-- The text above the `---` will become the commit message when your PR is merged. Please leave a blank newline before the `---`, otherwise GitHub will format the text above it as a title. For details on the "pull request lifecycle" in mathlib, please see: https://leanprover-community.github.io/contribute/index.html In particular, note that most reviewers will only notice your PR if it passes the continuous integration checks. Please ask for help on https://leanprover.zulipchat.com if needed. When merging, all the commits will be squashed into a single commit listing all co-authors. Co-authors in the squash commit are gathered from two sources: First, all authors of commits to this PR branch are included. Thus, one way to add co-authors is to include at least one commit authored by each co-author among the commits in the pull request. If necessary, you may create empty commits to indicate co-authorship, using commands like so: git commit --author="Author Name <author@email.com>" --allow-empty -m "add Author Name as coauthor" Second, co-authors can also be listed in lines at the very bottom of the commit message (that is, directly before the `---`) using the following format: Co-authored-by: Author Name <author@email.com> If you are moving or deleting declarations, please include these lines at the bottom of the commit message (before the `---`, and also before any "Co-authored-by" lines) using the following format: Moves: - Vector.* -> List.Vector.* - ... Deletions: - Nat.bit1_add_bit1 - ... Any other comments you want to keep out of the PR commit should go below the `---`, and placed outside this HTML comment, or else they will be invisible to reviewers. If this PR depends on other PRs, please list them below this comment, using the following format: - [ ] depends on: #abc [optional extra text] - [ ] depends on: #xyz [optional extra text] --> [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/from-referrer/)
.lake/packages/mathlib/.github/CONTRIBUTING.md
Please see [our contribution guide](https://leanprover-community.github.io/contribute/index.html) on our website.
.lake/packages/plausible/Plausible.lean
module public import Plausible.Random public import Plausible.Gen public import Plausible.Sampleable public import Plausible.Testable public import Plausible.Functions public import Plausible.Attr public import Plausible.Tactic public import Plausible.Arbitrary public import Plausible.DeriveArbitrary
.lake/packages/plausible/README.md
# Plausible A property testing framework for Lean 4 that integrates into the tactic framework. ## Usage If you are using built in types Plausible is usually able to handle them already: ```lean import Plausible example (xs ys : Array Nat) : xs.size = ys.size → xs = ys := by /-- =================== Found a counter-example! xs := #[0] ys := #[1] guard: 1 = 1 issue: #[0] = #[1] does not hold (0 shrinks) ------------------- -/ plausible #eval Plausible.Testable.check <| ∀ (xs ys : Array Nat), xs.size = ys.size → xs = ys ``` If you are defining your own type it needs instances of `Repr`, `Plausible.Shrinkable` and `Plausible.SampleableExt` (or `Plausible.Arbitrary`): ```lean import Plausible open Plausible structure MyType where x : Nat y : Nat h : x ≤ y deriving Repr instance : Shrinkable MyType where shrink := fun ⟨x, y, _⟩ => let proxy := Shrinkable.shrink (x, y - x) proxy.map (fun (fst, snd) => ⟨fst, fst + snd, by omega⟩) instance : SampleableExt MyType := SampleableExt.mkSelfContained do let x ← SampleableExt.interpSample Nat let xyDiff ← SampleableExt.interpSample Nat return ⟨x, x + xyDiff, by omega⟩ -- No counter example found #eval Testable.check <| ∀ a b : MyType, a.y ≤ b.x → a.x ≤ b.y ``` For more documentation refer to the module docs. **Deriving Instance for `Arbitrary`** (for algebraic data types) Users can write `deriving Arbitrary` after an inductive type definition, i.e. ```lean inductive Foo where ... deriving Arbitrary ``` Alternatively, users can also write `deriving instance Arbitrary for T1, ..., Tn` as a top-level command to derive `Arbitrary` instances for types `T1, ..., Tn` simultaneously.
.lake/packages/plausible/Test.lean
import Test.Tactic import Test.Testable -- Tests for `deriving Arbitrary` import Test.DeriveArbitrary.DeriveTreeGenerator import Test.DeriveArbitrary.DeriveSTLCTermTypeGenerators import Test.DeriveArbitrary.DeriveNKIValueGenerator import Test.DeriveArbitrary.DeriveNKIBinopGenerator import Test.DeriveArbitrary.DeriveRegExpGenerator import Test.DeriveArbitrary.StructureTest import Test.DeriveArbitrary.BitVecStructureTest import Test.DeriveArbitrary.MissingNonRecursiveConstructorTest import Test.DeriveArbitrary.ParameterizedTypeTest import Test.DeriveArbitrary.MutuallyRecursiveTypeTest
.lake/packages/plausible/Plausible/Attr.lean
module public meta import Lean.Util.Trace public meta section open Lean initialize registerTraceClass `plausible.instance initialize registerTraceClass `plausible.decoration initialize registerTraceClass `plausible.discarded initialize registerTraceClass `plausible.success initialize registerTraceClass `plausible.shrink.steps initialize registerTraceClass `plausible.shrink.candidates initialize registerTraceClass `plausible.deriving.arbitrary
.lake/packages/plausible/Plausible/Functions.lean
module public meta import Plausible.Sampleable public meta import Plausible.Testable @[expose] public meta section /-! ## `plausible`: generators for functions This file defines `Sampleable` instances for `α → β` functions and `Int → Int` injective functions. Functions are generated by creating a list of pairs and one more value using the list as a lookup table and resorting to the additional value when a value is not found in the table. Injective functions are generated by creating a list of numbers and a permutation of that list. The permutation insures that every input is mapped to a unique output. When an input is not found in the list the input itself is used as an output. Injective functions `f : α → α` could be generated easily instead of `Int → Int` by generating a `List α`, removing duplicates and creating a permutation. One has to be careful when generating the domain to make it vast enough that, when generating arguments to apply `f` to, they argument should be likely to lie in the domain of `f`. This is the reason that injective functions `f : Int → Int` are generated by fixing the domain to the range `[-2*size .. 2*size]`, with `size` the size parameter of the `gen` monad. Much of the machinery provided in this file is applicable to generate injective functions of type `α → α` and new instances should be easy to define. Other classes of functions such as monotone functions can generated using similar techniques. For monotone functions, generating two lists, sorting them and matching them should suffice, with appropriate default values. Some care must be taken for shrinking such functions to make sure their defining property is invariant through shrinking. Injective functions are an example of how complicated it can get. -/ universe u v w variable {α : Type u} {β : Type v} {γ : Sort w} namespace Plausible /-- Data structure specifying a total function using a list of pairs and a default value returned when the input is not in the domain of the partial function. `withDefault f y` encodes `x => f x` when `x ∈ f` and `x => y` otherwise. We use `Σ` to encode mappings instead of `×` because we rely on the association list API defined in `Mathlib/Data/List/Sigma.lean`. -/ inductive TotalFunction (α : Type u) (β : Type v) : Type max u v | withDefault : List (Σ _ : α, β) → β → TotalFunction α β instance TotalFunction.inhabited [Inhabited β] : Inhabited (TotalFunction α β) := ⟨TotalFunction.withDefault ∅ default⟩ namespace TotalFunction -- Porting note: new /-- Compose a total function with a regular function on the left -/ def comp {γ : Type w} (f : β → γ) : TotalFunction α β → TotalFunction α γ | TotalFunction.withDefault m y => TotalFunction.withDefault (m.map fun ⟨a, b⟩ => ⟨a, f b⟩) (f y) /-- Apply a total function to an argument. -/ def apply [DecidableEq α] : TotalFunction α β → α → β | TotalFunction.withDefault m y, x => (m.find? fun ⟨a, _⟩ => a = x).map Sigma.snd |>.getD y /-- Implementation of `Repr (TotalFunction α β)`. Creates a string for a given `Finmap` and output, `x₀ => y₀, .. xₙ => yₙ` for each of the entries. The brackets are provided by the calling function. -/ def reprAux [Repr α] [Repr β] (m : List (Σ _ : α, β)) : String := String.join <| -- Porting note: No `List.qsort`, so convert back and forth to an `Array`. Array.toList <| Array.qsort (lt := fun x y => x < y) (m.map fun x => s!"{(repr <| Sigma.fst x)} => {repr <| Sigma.snd x}, ").toArray /-- Produce a string for a given `TotalFunction`. The output is of the form `[x₀ => f x₀, .. xₙ => f xₙ, _ => y]`. -/ protected def repr [Repr α] [Repr β] : TotalFunction α β → String | TotalFunction.withDefault m y => s!"[{(reprAux m)}_ => {repr y}]" instance (α : Type u) (β : Type v) [Repr α] [Repr β] : Repr (TotalFunction α β) where reprPrec f _ := TotalFunction.repr f /-- Create a `Finmap` from a list of pairs. -/ def List.toFinmap' (xs : List (α × β)) : List (Σ _ : α, β) := xs.map (fun ⟨a, b⟩ => ⟨a, b⟩) section universe ua ub variable [SampleableExt.{_,u} α] [SampleableExt.{_,ub} β] variable [DecidableEq α] /-- Shrink a total function by shrinking the lists that represent it. -/ def shrink {α β} [DecidableEq α] [Shrinkable α] [Shrinkable β] : TotalFunction α β → List (TotalFunction α β) | ⟨m, x⟩ => (Shrinkable.shrink (m, x)).map fun ⟨m', x'⟩ => ⟨dedup m', x'⟩ where dedup (m' : List ((_ : α) × β)) : List ((_ : α) × β) := let rec insertKey (xs : List ((_ : α) × β)) (pair : (_ : α) × β) : List ((_ : α) × β) := match xs with | [] => [pair] | x :: xs => if pair.fst = x.fst then pair :: xs else x :: insertKey xs pair m'.foldl (init := []) insertKey variable [Repr α] instance Pi.sampleableExt : SampleableExt (α → β) where proxy := TotalFunction α (SampleableExt.proxy β) interp f := SampleableExt.interp ∘ f.apply sample := ⟨do let xs : List (_ × _) ← (Arbitrary.arbitrary (α := List (SampleableExt.proxy α × SampleableExt.proxy β))) let ⟨x⟩ ← Gen.up <| (Arbitrary.arbitrary : Gen (SampleableExt.proxy β)) pure <| TotalFunction.withDefault (List.toFinmap' <| xs.map <| Prod.map SampleableExt.interp id) x⟩ -- note: no way of shrinking the domain without an inverse to `interp` shrink := { shrink := letI : Shrinkable α := {}; TotalFunction.shrink } end section SampleableExt open SampleableExt instance (priority := 2000) PiPred.sampleableExt [SampleableExt (α → Bool)] : SampleableExt.{u + 1} (α → Prop) where proxy := proxy (α → Bool) interp m x := interp m x sample := sample shrink := SampleableExt.shrink instance (priority := 2000) PiUncurry.sampleableExt [SampleableExt (α × β → γ)] : SampleableExt.{imax (u + 1) (v + 1) w} (α → β → γ) where proxy := proxy (α × β → γ) interp m x y := interp m (x, y) sample := sample shrink := SampleableExt.shrink end SampleableExt end TotalFunction end Plausible
.lake/packages/plausible/Plausible/Testable.lean
module public meta import Lean.Elab.Tactic.Config public meta import Plausible.Sampleable public meta section /-! # `Testable` Class Testable propositions have a procedure that can generate counter-examples together with a proof that they invalidate the proposition. This is a port of the Haskell QuickCheck library. ## Creating Customized Instances The type classes `Testable`, `SampleableExt` and `Shrinkable` are the means by which `Plausible` creates samples and tests them. For instance, the proposition `∀ i j : Nat, i ≤ j` has a `Testable` instance because `Nat` is sampleable and `i ≤ j` is decidable. Once `Plausible` finds the `Testable` instance, it can start using the instance to repeatedly creating samples and checking whether they satisfy the property. Once it has found a counter-example it will then use a `Shrinkable` instance to reduce the example. This allows the user to create new instances and apply `Plausible` to new situations. ### What do I do if I'm testing a property about my newly defined type? Let us consider a type made for a new formalization: ```lean structure MyType where x : Nat y : Nat h : x ≤ y deriving Repr ``` How do we test a property about `MyType`? For instance, let us consider `Testable.check <| ∀ a b : MyType, a.y ≤ b.x → a.x ≤ b.y`. Writing this property as is will give us an error because we do not have an instance of `Shrinkable MyType` and `SampleableExt MyType`. We can define one as follows: ```lean instance : Shrinkable MyType where shrink := fun ⟨x, y, _⟩ => let proxy := Shrinkable.shrink (x, y - x) proxy.map (fun (fst, snd) => ⟨fst, fst + snd, by omega⟩) instance : SampleableExt MyType := SampleableExt.mkSelfContained do let x ← SampleableExt.interpSample Nat let xyDiff ← SampleableExt.interpSample Nat return ⟨x, x + xyDiff, by omega⟩ ``` Again, we take advantage of the fact that other types have useful `Shrinkable` implementations, in this case `Prod`. ## Main definitions * `Testable` class * `Testable.check`: a way to test a proposition using random examples ## References * https://hackage.haskell.org/package/QuickCheck -/ namespace Plausible /-- Result of trying to disprove `p` -/ inductive TestResult (p : Prop) where /-- Succeed when we find another example satisfying `p`. In `success h`, `h` is an optional proof of the proposition. Without the proof, all we know is that we found one example where `p` holds. With a proof, the one test was sufficient to prove that `p` holds and we do not need to keep finding examples. -/ | success : Unit ⊕' p → TestResult p /-- Give up when a well-formed example cannot be generated. `gaveUp n` tells us that `n` invalid examples were tried. -/ | gaveUp : Nat → TestResult p /-- A counter-example to `p`; the strings specify values for the relevant variables. `failure h vs n` also carries a proof that `p` does not hold. This way, we can guarantee that there will be no false positive. The last component, `n`, is the number of times that the counter-example was shrunk. -/ | failure : ¬ p → List String → Nat → TestResult p deriving Inhabited /-- Configuration for testing a property. -/ structure Configuration where /-- How many test instances to generate. -/ numInst : Nat := 100 /-- The maximum size of the values to generate. -/ maxSize : Nat := 100 numRetries : Nat := 10 /-- Enable tracing of values that didn't fulfill preconditions and were thus discarded. -/ traceDiscarded : Bool := false /-- Enable tracing of values that fulfilled the property and were thus discarded. -/ traceSuccesses : Bool := false /-- Enable basic tracing of shrinking. -/ traceShrink : Bool := false /-- Enable tracing of all attempted values during shrinking. -/ traceShrinkCandidates : Bool := false /-- Hard code the seed to use for the RNG -/ randomSeed : Option Nat := none /-- Disable output. -/ quiet : Bool := false deriving Inhabited open Lean in instance : ToExpr Configuration where toTypeExpr := mkConst `Configuration toExpr cfg := mkApp9 (mkConst ``Configuration.mk) (toExpr cfg.numInst) (toExpr cfg.maxSize) (toExpr cfg.numRetries) (toExpr cfg.traceDiscarded) (toExpr cfg.traceSuccesses) (toExpr cfg.traceShrink) (toExpr cfg.traceShrinkCandidates) (toExpr cfg.randomSeed) (toExpr cfg.quiet) /-- Allow elaboration of `Configuration` arguments to tactics. -/ declare_config_elab elabConfig Configuration /-- `PrintableProp p` allows one to print a proposition so that `Plausible` can indicate how values relate to each other. It's basically a poor man's delaborator. -/ class PrintableProp (p : Prop) where printProp : String export PrintableProp (printProp) variable {p q : Prop} instance (priority := low) : PrintableProp p where printProp := "⋯" /-- `Testable p` uses random examples to try to disprove `p`. -/ class Testable (p : Prop) where run (cfg : Configuration) (minimize : Bool) : Gen (TestResult p) @[expose] def NamedBinder (_n : String) (p : Prop) : Prop := p namespace TestResult def toString : TestResult p → String | success (PSum.inl _) => "success (no proof)" | success (PSum.inr _) => "success (proof)" | gaveUp n => s!"gave {n} times" | failure _ counters _ => s!"failed {counters}" instance : ToString (TestResult p) := ⟨toString⟩ /-- Applicative combinator proof carrying test results. -/ def combine {p q : Prop} : Unit ⊕' (p → q) → Unit ⊕' p → Unit ⊕' q | PSum.inr f, PSum.inr proof => PSum.inr <| f proof | _, _ => PSum.inl () /-- Combine the test result for properties `p` and `q` to create a test for their conjunction. -/ def and : TestResult p → TestResult q → TestResult (p ∧ q) | failure h xs n, _ => failure (fun h2 => h h2.left) xs n | _, failure h xs n => failure (fun h2 => h h2.right) xs n | success h1, success h2 => success <| combine (combine (PSum.inr And.intro) h1) h2 | gaveUp n, gaveUp m => gaveUp <| n + m | gaveUp n, _ => gaveUp n | _, gaveUp n => gaveUp n /-- Combine the test result for properties `p` and `q` to create a test for their disjunction. -/ def or : TestResult p → TestResult q → TestResult (p ∨ q) | failure h1 xs n, failure h2 ys m => let h3 := fun h => match h with | Or.inl h3 => h1 h3 | Or.inr h3 => h2 h3 failure h3 (xs ++ ys) (n + m) | success h, _ => success <| combine (PSum.inr Or.inl) h | _, success h => success <| combine (PSum.inr Or.inr) h | gaveUp n, gaveUp m => gaveUp <| n + m | gaveUp n, _ => gaveUp n | _, gaveUp n => gaveUp n /-- If `q → p`, then `¬ p → ¬ q` which means that testing `p` can allow us to find counter-examples to `q`. -/ def imp (h : q → p) (r : TestResult p) (p : Unit ⊕' (p → q) := PSum.inl ()) : TestResult q := match r with | failure h2 xs n => failure (mt h h2) xs n | success h2 => success <| combine p h2 | gaveUp n => gaveUp n /-- Test `q` by testing `p` and proving the equivalence between the two. -/ def iff (h : q ↔ p) (r : TestResult p) : TestResult q := imp h.mp r (PSum.inr h.mpr) /-- When we assign a value to a universally quantified variable, we record that value using this function so that our counter-examples can be informative. -/ def addInfo (x : String) (h : q → p) (r : TestResult p) (p : Unit ⊕' (p → q) := PSum.inl ()) : TestResult q := if let failure h2 xs n := r then failure (mt h h2) (x :: xs) n else imp h r p /-- Add some formatting to the information recorded by `addInfo`. -/ def addVarInfo {γ : Type _} [Repr γ] (var : String) (x : γ) (h : q → p) (r : TestResult p) (p : Unit ⊕' (p → q) := PSum.inl ()) : TestResult q := addInfo s!"{var} := {repr x}" h r p def isFailure : TestResult p → Bool | failure _ _ _ => true | _ => false end TestResult namespace Configuration /-- A configuration with all the trace options enabled, useful for debugging. -/ def verbose : Configuration where traceDiscarded := true traceSuccesses := true traceShrink := true traceShrinkCandidates := true end Configuration namespace Testable open TestResult def runProp (p : Prop) [Testable p] : Configuration → Bool → Gen (TestResult p) := Testable.run def runPropE (p : Prop) [Testable p] (cfg : Configuration) (min : Bool) : Gen (TestResult p) := do try runProp p cfg min catch | .genError _ => return gaveUp 1 /-- A `dbgTrace` with special formatting -/ def slimTrace {m : Type → Type _} [Pure m] (s : String) : m PUnit := dbgTrace s!"[Plausible: {s}]" (fun _ => pure ()) instance andTestable [Testable p] [Testable q] : Testable (p ∧ q) where run := fun cfg min => do let xp ← runProp p cfg min let xq ← runProp q cfg min return and xp xq instance orTestable [Testable p] [Testable q] : Testable (p ∨ q) where run := fun cfg min => do let xp ← runProp p cfg min -- As a little performance optimization we can just not run the second -- test if the first succeeds match xp with | success (PSum.inl h) => return success (PSum.inl h) | success (PSum.inr h) => return success (PSum.inr <| Or.inl h) | _ => let xq ← runProp q cfg min return or xp xq instance iffTestable [Testable ((p ∧ q) ∨ (¬ p ∧ ¬ q))] : Testable (p ↔ q) where run := fun cfg min => do let h ← runProp ((p ∧ q) ∨ (¬ p ∧ ¬ q)) cfg min have := by constructor · intro h simp [h, Classical.em] · intro h rcases h with ⟨hleft, hright⟩ | ⟨hleft, hright⟩ <;> simp [hleft, hright] return iff this h variable {var : String} instance decGuardTestable [PrintableProp p] [Decidable p] {β : p → Prop} [∀ h, Testable (β h)] : Testable (NamedBinder var <| ∀ h, β h) where run := fun cfg min => do if h : p then let res := runProp (β h) cfg min let s := printProp p (fun r => addInfo s!"guard: {s}" (· <| h) r (PSum.inr <| fun q _ => q)) <$> res else if cfg.traceDiscarded || cfg.traceSuccesses then let res := return gaveUp 1 let s := printProp p slimTrace s!"discard: Guard {s} does not hold"; res else return gaveUp 1 instance forallTypesTestable {f : Type → Prop} [Testable (f Int)] : Testable (NamedBinder var <| ∀ x, f x) where run := fun cfg min => do let r ← runProp (f Int) cfg min return addVarInfo var "Int" (· <| Int) r -- TODO: only in mathlib: @[pp_with_univ] instance (priority := 100) forallTypesULiftTestable.{u} {f : Type u → Prop} [Testable (f (ULift.{u} Int))] : Testable (NamedBinder var <| ∀ x, f x) where run cfg min := do let r ← runProp (f (ULift Int)) cfg min pure <| addVarInfo var "ULift Int" (· <| ULift Int) r /-- Format the counter-examples found in a test failure. -/ def formatFailure (s : String) (xs : List String) (n : Nat) : String := let counter := String.intercalate "\n" xs let parts := [ "\n===================", s, counter, s!"({n} shrinks)", "-------------------" ] String.intercalate "\n" parts /-- Increase the number of shrinking steps in a test result. -/ def addShrinks (n : Nat) : TestResult p → TestResult p | TestResult.failure p xs m => TestResult.failure p xs (m + n) | p => p universe u in instance {α : Type u} {m : Type u → Type _} [Pure m] : Inhabited (OptionT m α) := ⟨(pure none : m (Option α))⟩ variable {α : Sort _} /-- Shrink a counter-example `x` by using `Shrinkable.shrink x`, picking the first candidate that falsifies a property and recursively shrinking that one. The process is guaranteed to terminate because `shrink x` produces a proof that all the values it produces are smaller (according to `SizeOf`) than `x`. -/ partial def minimizeAux [SampleableExt α] {β : α → Prop} [∀ x, Testable (β x)] (cfg : Configuration) (var : String) (x : SampleableExt.proxy α) (n : Nat) : OptionT Gen (Σ x, TestResult (β (SampleableExt.interp x))) := do let candidates := SampleableExt.shrink.shrink x if cfg.traceShrinkCandidates then slimTrace s!"Candidates for {var} := {repr x}:\n {repr candidates}" for candidate in candidates do if cfg.traceShrinkCandidates then slimTrace s!"Trying {var} := {repr candidate}" let res ← OptionT.lift <| Testable.runProp (β (SampleableExt.interp candidate)) cfg true if res.isFailure then if cfg.traceShrink then slimTrace s!"{var} shrunk to {repr candidate} from {repr x}" let currentStep := OptionT.lift <| return Sigma.mk candidate (addShrinks (n + 1) res) let nextStep := minimizeAux cfg var candidate (n + 1) return ← (nextStep <|> currentStep) if cfg.traceShrink then slimTrace s!"No shrinking possible for {var} := {repr x}" failure /-- Once a property fails to hold on an example, look for smaller counter-examples to show the user. -/ def minimize [SampleableExt α] {β : α → Prop} [∀ x, Testable (β x)] (cfg : Configuration) (var : String) (x : SampleableExt.proxy α) (r : TestResult (β <| SampleableExt.interp x)) : Gen (Σ x, TestResult (β <| SampleableExt.interp x)) := do if cfg.traceShrink then slimTrace "Shrink" slimTrace s!"Attempting to shrink {var} := {repr x}" let res ← OptionT.run <| minimizeAux cfg var x 0 return res.getD ⟨x, r⟩ /-- Test a universal property by creating a sample of the right type and instantiating the bound variable with it. -/ instance varTestable [SampleableExt α] {β : α → Prop} [∀ x, Testable (β x)] : Testable (NamedBinder var <| ∀ x : α, β x) where run := fun cfg min => do let x ← Arbitrary.arbitrary if cfg.traceSuccesses || cfg.traceDiscarded then slimTrace s!"{var} := {repr x}" -- Use `runPropE` here to collect errors from the call to `Arbitrary.arbitrary`. let r ← Testable.runPropE (β <| SampleableExt.interp x) cfg false let ⟨finalX, finalR⟩ ← if isFailure r then if cfg.traceSuccesses then slimTrace s!"{var} := {repr x} is a failure" if min then minimize cfg var x r else pure ⟨x, r⟩ else pure ⟨x, r⟩ return addVarInfo var finalX (· <| SampleableExt.interp finalX) finalR /-- Test a universal property about propositions -/ instance propVarTestable {β : Prop → Prop} [∀ b : Bool, Testable (β b)] : Testable (NamedBinder var <| ∀ p : Prop, β p) where run := fun cfg min => imp (fun h (b : Bool) => h b) <$> Testable.runProp (NamedBinder var <| ∀ b : Bool, β b) cfg min instance (priority := high) unusedVarTestable {β : Prop} [Nonempty α] [Testable β] : Testable (NamedBinder var (α → β)) where run := fun cfg min => do if cfg.traceDiscarded || cfg.traceSuccesses then slimTrace s!"{var} is unused" let r ← Testable.runProp β cfg min let finalR := addInfo s!"{var} is irrelevant (unused)" id r return imp (· <| Classical.ofNonempty) finalR (PSum.inr <| fun x _ => x) universe u in instance (priority := 2000) subtypeVarTestable {α : Type u} {p : α → Prop} {β : α → Prop} [∀ x, PrintableProp (p x)] [∀ x, Testable (β x)] [SampleableExt (Subtype p)] {var'} : Testable (NamedBinder var <| (x : α) → NamedBinder var' <| p x → β x) where run cfg min := letI (x : Subtype p) : Testable (β x) := { run := fun cfg min => do let r ← Testable.runProp (β x.val) cfg min return addInfo s!"guard: {printProp (p x)} (by construction)" id r (PSum.inr id) } do let r ← @Testable.run (∀ x : Subtype p, β x.val) (@varTestable var _ _ _ _) cfg min have := by simp [Subtype.forall, NamedBinder] return iff this r instance (priority := low) decidableTestable {p : Prop} [PrintableProp p] [Decidable p] : Testable p where run := fun _ _ => if h : p then return success (PSum.inr h) else let s := printProp p return failure h [s!"issue: {s} does not hold"] 0 end Testable section PrintableProp variable {α : Type _} instance Eq.printableProp [Repr α] {x y : α} : PrintableProp (x = y) where printProp := s!"{repr x} = {repr y}" instance Ne.printableProp [Repr α] {x y : α} : PrintableProp (x ≠ y) where printProp := s!"{repr x} ≠ {repr y}" instance LE.printableProp [Repr α] [LE α] {x y : α} : PrintableProp (x ≤ y) where printProp := s!"{repr x} ≤ {repr y}" instance LT.printableProp [Repr α] [LT α] {x y : α} : PrintableProp (x < y) where printProp := s!"{repr x} < {repr y}" variable {x y : Prop} instance And.printableProp [PrintableProp x] [PrintableProp y] : PrintableProp (x ∧ y) where printProp := s!"{printProp x} ∧ {printProp y}" instance Or.printableProp [PrintableProp x] [PrintableProp y] : PrintableProp (x ∨ y) where printProp := s!"{printProp x} ∨ {printProp y}" instance Iff.printableProp [PrintableProp x] [PrintableProp y] : PrintableProp (x ↔ y) where printProp := s!"{printProp x} ↔ {printProp y}" instance Imp.printableProp [PrintableProp x] [PrintableProp y] : PrintableProp (x → y) where printProp := s!"{printProp x} → {printProp y}" instance Not.printableProp [PrintableProp x] : PrintableProp (¬x) where printProp := s!"¬{printProp x}" instance True.printableProp : PrintableProp True where printProp := "True" instance False.printableProp : PrintableProp False where printProp := "False" instance Bool.printableProp {b : Bool} : PrintableProp b where printProp := if b then "true" else "false" end PrintableProp section IO open TestResult /-- Execute `cmd` and repeat every time the result is `gaveUp` (at most `n` times). -/ def retry (cmd : Gen (TestResult p)) : Nat → Gen (TestResult p) | 0 => return TestResult.gaveUp 1 | n+1 => do let r ← cmd match r with | .success hp => return success hp | .failure h xs n => return failure h xs n | .gaveUp _ => retry cmd n /-- Count the number of times the test procedure gave up. -/ def giveUp (x : Nat) : TestResult p → TestResult p | success (PSum.inl ()) => gaveUp x | success (PSum.inr p) => success <| (PSum.inr p) | gaveUp n => gaveUp <| n + x | TestResult.failure h xs n => failure h xs n /-- Try `n` times to find a counter-example for `p`. -/ def Testable.runSuiteAux (p : Prop) [Testable p] (cfg : Configuration) : TestResult p → Nat → Gen (TestResult p) | r, 0 => return r | r, n+1 => do let size (_ : Nat) := (cfg.numInst - n - 1) * cfg.maxSize / cfg.numInst if cfg.traceSuccesses then slimTrace s!"New sample" slimTrace s!"Retrying up to {cfg.numRetries} times until guards hold" let x ← retry ((Testable.runProp p cfg true).resize size) cfg.numRetries match x with | success (PSum.inl ()) => runSuiteAux p cfg r n | gaveUp g => runSuiteAux p cfg (giveUp g r) n | _ => return x /-- Try to find a counter-example of `p`. -/ def Testable.runSuite (p : Prop) [Testable p] (cfg : Configuration := {}) : Gen (TestResult p) := Testable.runSuiteAux p cfg (success <| PSum.inl ()) cfg.numInst /-- Run a test suite for `p` in `IO` using the global RNG in `stdGenRef`. -/ def Testable.checkIO (p : Prop) [Testable p] (cfg : Configuration := {}) : IO (TestResult p) := match cfg.randomSeed with | none => Gen.run (Testable.runSuite p cfg) 0 | some seed => runRandWith seed (Testable.runSuite p cfg) end IO namespace Decorations open Lean /-- Traverse the syntax of a proposition to find universal quantifiers quantifiers and add `NamedBinder` annotations next to them. -/ partial def addDecorations (e : Expr) : MetaM Expr := Meta.transform e fun expr => do if not (← Meta.inferType expr).isProp then return .done expr else if let Expr.forallE name type body data := expr then let newType ← addDecorations type let newBody ← Meta.withLocalDecl name data type fun fvar => do return (← addDecorations (body.instantiate1 fvar)).abstract #[fvar] let rest := Expr.forallE name newType newBody data return .done <| (← Meta.mkAppM `Plausible.NamedBinder #[mkStrLit name.toString, rest]) else return .continue /-- `DecorationsOf p` is used as a hint to `mk_decorations` to specify that the goal should be satisfied with a proposition equivalent to `p` with added annotations. -/ abbrev DecorationsOf (_p : Prop) := Prop open Elab.Tactic open Meta /-- In a goal of the shape `⊢ DecorationsOf p`, `mk_decoration` examines the syntax of `p` and adds `NamedBinder` around universal quantifications to improve error messages. This tool can be used in the declaration of a function as follows: ```lean def foo (p : Prop) (p' : Decorations.DecorationsOf p := by mk_decorations) [Testable p'] : ... ``` `p` is the parameter given by the user, `p'` is a definitionally equivalent proposition where the quantifiers are annotated with `NamedBinder`. -/ scoped elab "mk_decorations" : tactic => do let goal ← getMainGoal let goalType ← goal.getType if let .app (.const ``Decorations.DecorationsOf _) body := goalType then closeMainGoal `mk_decorations (← addDecorations body) end Decorations open Decorations in /-- Run a test suite for `p` and throw an exception if `p` does not hold. -/ def Testable.check (p : Prop) (cfg : Configuration := {}) (p' : Decorations.DecorationsOf p := by mk_decorations) [Testable p'] : Lean.CoreM PUnit := do match ← Testable.checkIO p' cfg with | TestResult.success _ => if !cfg.quiet then Lean.logInfo "Unable to find a counter-example" | TestResult.gaveUp n => if !cfg.quiet then let msg := s!"Gave up after failing to generate values that fulfill the preconditions {n} times." Lean.logWarning msg | TestResult.failure _ xs n => let msg := "Found a counter-example!" if cfg.quiet then Lean.throwError msg else Lean.throwError <| formatFailure msg xs n -- #eval Testable.check (∀ (x y z a : Nat) (h1 : 3 < x) (h2 : 3 < y), x - y = y - x) -- Configuration.verbose -- #eval Testable.check (∀ x : Nat, ∀ y : Nat, x + y = y + x) Configuration.verbose -- #eval Testable.check (∀ (x : (Nat × Nat)), x.fst - x.snd - 10 = x.snd - x.fst - 10) -- Configuration.verbose -- #eval Testable.check (∀ (x : Nat) (h : 10 < x), 5 < x) Configuration.verbose macro tk:"#test " e:term : command => `(command| #eval%$tk Testable.check $e) -- #test ∀ (x : Nat) (h : 5 < x), 10 < x -- #test ∀ (x : Nat) (h : 10 < x), 5 < x end Plausible
.lake/packages/plausible/Plausible/Gen.lean
module public meta import Plausible.Random public meta section /-! # `Gen` Monad This monad is used to formulate randomized computations with a parameter to specify the desired size of the result. ## Main definitions * `Gen` monad ## References * https://hackage.haskell.org/package/QuickCheck -/ universe u v namespace Plausible open Random /-- Error thrown on generation failure, e.g. because you've run out of resources. -/ inductive GenError : Type where | genError : String → GenError deriving Inhabited, Repr, BEq def Gen.genericFailure : GenError := .genError "Generation failure." /-- Monad to generate random examples to test properties with. It has a `Nat` parameter so that the caller can decide on the size of the examples. It allows failure to generate via the `Except` monad -/ abbrev Gen (α : Type u) := RandT (ReaderT (ULift Nat) (Except GenError)) α instance instMonadLiftGen [MonadLiftT m (ReaderT (ULift Nat) (Except GenError))] : MonadLiftT (RandGT StdGen m) Gen where monadLift := fun m => liftM ∘ m.run instance instMonadErrorGen : MonadExcept GenError Gen := by infer_instance def Gen.genFailure (e : GenError) : IO.Error := let .genError mes := e IO.userError s!"generation failure: {mes}" namespace Gen @[inline] def up (x : Gen.{u} α) : Gen (ULift.{v} α) := RandT.up (λ m size ↦ match m.run ⟨size.down⟩ with | .error (.genError s) => .error (.genError s) | .ok a => .ok ⟨a⟩) x @[inline] def down (x : Gen (ULift.{v} α)) : Gen α := RandT.down (λ m size ↦ match m.run ⟨size.down⟩ with | .error e => .error e | .ok a => .ok a.down) x /-- Lift `Random.random` to the `Gen` monad. -/ def chooseAny (α : Type u) [Random Id α] : Gen α := rand (g := StdGen) α (m := Id) |> liftM /-- Lift `BoundedRandom.randomR` to the `Gen` monad. -/ def choose (α : Type u) [LE α] [BoundedRandom Id α] (lo hi : α) (h : lo ≤ hi) : Gen {a // lo ≤ a ∧ a ≤ hi} := randBound (g := StdGen) α (m := Id) lo hi h |> liftM /-- Generate a `Nat` example between `lo` and `hi` (exclusively). -/ def chooseNatLt (lo hi : Nat) (h : lo < hi) : Gen {a // lo ≤ a ∧ a < hi} := do let ⟨val, h⟩ ← choose Nat (lo + 1) hi (by omega) return ⟨val - 1, by omega⟩ /-- Get access to the size parameter of the `Gen` monad. -/ def getSize : Gen Nat := return (← read).down /-- Apply a function to the size parameter. -/ def resize {α : Type _} (f : Nat → Nat) (x : Gen α) : Gen α := withReader (ULift.up ∘ f ∘ ULift.down) x /-- Choose a `Nat` between `0` and `getSize`. -/ def chooseNat : Gen Nat := do choose Nat 0 (← getSize) (by omega) /-! The following section defines various combinators for generators, which are used in the body of derived generators (for derived `Arbitrary` instances). The code for these combinators closely mirrors those used in Rocq/Coq QuickChick (see link in the **References** section below). ## References * https://github.com/QuickChick/QuickChick/blob/master/src/Generators.v -/ /-- `pick default xs n` chooses a weight & a generator `(k, gen)` from the list `xs` such that `n < k`. If `xs` is empty, the `default` generator with weight 0 is returned. -/ private def pick (default : Gen α) (xs : List (Nat × Gen α)) (n : Nat) : Nat × Gen α := match xs with | [] => (0, default) | (k, x) :: xs => if n < k then (k, x) else pick default xs (n - k) /-- Picks one of the generators in `gs` at random, returning the `default` generator if `gs` is empty. (This is a more ergonomic version of Plausible's `Gen.oneOf` which doesn't require the caller to supply a proof that the list index is in bounds.) -/ def oneOfWithDefault (default : Gen α) (gs : List (Gen α)) : Gen α := match gs with | [] => default | _ => do let idx ← Gen.choose Nat 0 (gs.length - 1) (by omega) List.getD gs idx.val default /-- `frequency` picks a generator from the list `gs` according to the weights in `gs`. If `gs` is empty, the `default` generator is returned. -/ def frequency (default : Gen α) (gs : List (Nat × Gen α)) : Gen α := do let total := List.sum <| List.map Prod.fst gs let n ← Gen.choose Nat 0 (total - 1) (by omega) (pick default gs n).snd /-- `sized f` constructs a generator that depends on its `size` parameter -/ def sized (f : Nat → Gen α) : Gen α := Gen.getSize >>= f variable {α : Type u} /-- Create an `Array` of examples using `x`. The size is controlled by the size parameter of `Gen`. -/ def arrayOf (x : Gen α) : Gen (Array α) := do let ⟨sz⟩ ← up chooseNat let mut res := Array.mkEmpty sz for _ in [0:sz] do res := res.push (← x) return res /-- Create a `List` of examples using `x`. The size is controlled by the size parameter of `Gen`. -/ def listOf (x : Gen α) : Gen (List α) := do return (← arrayOf x).toList /-- Given a list of example generators, choose one to create an example. -/ def oneOf (xs : Array (Gen α)) (pos : 0 < xs.size := by decide) : Gen α := do let ⟨x, _, h2⟩ ← up <| chooseNatLt 0 xs.size pos xs[x] /-- Given a list of examples, choose one to create an example. -/ def elements (xs : List α) (pos : 0 < xs.length) : Gen α := do let ⟨x, _, h2⟩ ← up <| chooseNatLt 0 xs.length pos return xs[x] open List in /-- Generate a random permutation of a given list. -/ def permutationOf : (xs : List α) → Gen { ys // xs ~ ys } | [] => pure ⟨[], Perm.nil⟩ | x::xs => do let ⟨ys, h1⟩ ← permutationOf xs let ⟨n, _, h3⟩ ← up <| choose Nat 0 ys.length (by omega) return ⟨ys.insertIdx n x, Perm.trans (Perm.cons _ h1) (List.perm_insertIdx _ _ h3).symm⟩ /-- Given two generators produces a tuple consisting out of the result of both -/ def prodOf {α : Type u} {β : Type v} (x : Gen α) (y : Gen β) : Gen (α × β) := do let ⟨a⟩ ← up x let ⟨b⟩ ← up y return (a, b) end Gen private def errorOfGenError {α} (m : Except GenError α) : IO α := match m with | .ok a => pure a | .error (.genError msg) => throw <| .userError ("Generation failure:" ++ msg) -- Instance that just sets the size to zero (it will be reset later) instance instMonadLiftStateIOGen : MonadLift (ReaderT (ULift Nat) (Except GenError)) IO where monadLift m := private errorOfGenError <| ReaderT.run m ⟨0⟩ /-- Execute a `Gen` inside the `IO` monad using `size` as the example size -/ def Gen.run {α : Type} (x : Gen α) (size : Nat) : IO α := letI : MonadLift (ReaderT (ULift Nat) (Except GenError)) IO := ⟨fun m => errorOfGenError <| ReaderT.run m ⟨size⟩⟩ runRand x /-- Print (at most) 10 samples of a given type to stdout for debugging. Sadly specialized to `Type 0` -/ def Gen.printSamples {t : Type} [Repr t] (g : Gen t) : IO PUnit := do let xs := List.range 10 for x in xs do try let y ← Gen.run g x IO.println s!"{repr y}" catch | .userError msg => IO.println msg | e => throw e /-- Execute a `Gen` until it actually produces an output. May diverge for bad generators! -/ partial def Gen.runUntil {α : Type} (attempts : Option Nat := .none) (x : Gen α) (size : Nat) : IO α := Gen.run (repeatGen attempts x) size where repeatGen (attempts : Option Nat) (x : Gen α) : Gen α := match attempts with | .some 0 => throw <| GenError.genError "Gen.runUtil: Out of attempts" | _ => try x catch | GenError.genError _ => do let _ ← Rand.next repeatGen (decr attempts) x decr : Option Nat → Option Nat | .some n => .some (n-1) | .none => .none private def test : Gen Nat := do let x : Nat ← Gen.choose Nat 0 (← Gen.getSize) (Nat.zero_le _) if x % 10 == 0 then return x else throw <| .genError "uh oh" -- This fails 9/10 times -- #eval Gen.run test 9 -- This succeeds almost always. -- #eval Gen.runUntil (attempts := .some 1000) test 9 end Plausible
.lake/packages/plausible/Plausible/Tactic.lean
module public meta import Plausible.Testable public meta import Plausible.Attr public meta section /-! ## Finding counterexamples automatically using `plausible` A proposition can be tested by writing it out as: ```lean example (xs : List Nat) (w : ∃ x ∈ xs, x < 3) : ∀ y ∈ xs, y < 5 := by plausible -- =================== -- Found problems! -- xs := [0, 5] -- x := 0 -- y := 5 -- ------------------- example (x : Nat) (h : 2 ∣ x) : x < 100 := by plausible -- =================== -- Found problems! -- x := 258 -- ------------------- example (α : Type) (xs ys : List α) : xs ++ ys = ys ++ xs := by plausible -- =================== -- Found problems! -- α := Int -- xs := [-4] -- ys := [1] -- ------------------- example : ∀ x ∈ [1,2,3], x < 4 := by plausible -- Success ``` In the first example, `plausible` is called on the following goal: ```lean xs : List Nat, h : ∃ (x : Nat) (H : x ∈ xs), x < 3 ⊢ ∀ (y : Nat), y ∈ xs → y < 5 ``` The local constants are reverted and an instance is found for `Testable (∀ (xs : List Nat), (∃ x ∈ xs, x < 3) → (∀ y ∈ xs, y < 5))`. The `Testable` instance is supported by instances of `Sampleable (List Nat)`, `Decidable (x < 3)` and `Decidable (y < 5)`. `plausible` builds a `Testable` instance step by step with: ``` - Testable (∀ (xs : List Nat), (∃ x ∈ xs, x < 3) → (∀ y ∈ xs, y < 5)) -: Sampleable (List xs) - Testable ((∃ x ∈ xs, x < 3) → (∀ y ∈ xs, y < 5)) - Testable (∀ x ∈ xs, x < 3 → (∀ y ∈ xs, y < 5)) - Testable (x < 3 → (∀ y ∈ xs, y < 5)) -: Decidable (x < 3) - Testable (∀ y ∈ xs, y < 5) -: Decidable (y < 5) ``` `Sampleable (List Nat)` lets us create random data of type `List Nat` in a way that helps find small counter-examples. Next, the test of the proposition hinges on `x < 3` and `y < 5` to both be decidable. The implication between the two could be tested as a whole but it would be less informative. Indeed, if we generate lists that only contain numbers greater than `3`, the implication will always trivially hold but we should conclude that we haven't found meaningful examples. Instead, when `x < 3` does not hold, we reject the example (i.e. we do not count it toward the 100 required positive examples) and we start over. Therefore, when `plausible` prints `Success`, it means that a hundred suitable lists were found and successfully tested. If no counter-examples are found, `plausible` behaves like `admit`. `plausible` can also be invoked using `#eval`: ```lean #eval Plausible.Testable.check (∀ (α : Type) (xs ys : List α), xs ++ ys = ys ++ xs) -- =================== -- Found problems! -- α := Int -- xs := [-4] -- ys := [1] -- ------------------- ``` For more information on writing your own `Sampleable` and `Testable` instances, see `Testing.Plausible.Testable`. -/ open Lean Elab Meta Tactic open Parser.Tactic open Plausible Decorations /-- `plausible` considers a proof goal and tries to generate examples that would contradict the statement. Let's consider the following proof goal. ```lean xs : List Nat, h : ∃ (x : Nat) (H : x ∈ xs), x < 3 ⊢ ∀ (y : Nat), y ∈ xs → y < 5 ``` The local constants will be reverted and an instance will be found for `Testable (∀ (xs : List Nat), (∃ x ∈ xs, x < 3) → (∀ y ∈ xs, y < 5))`. The `Testable` instance is supported by an instance of `Sampleable (List Nat)`, `Decidable (x < 3)` and `Decidable (y < 5)`. Examples will be created in ascending order of size (more or less) The first counter-examples found will be printed and will result in an error: ``` =================== Found problems! xs := [1, 28] x := 1 y := 28 ------------------- ``` If `plausible` successfully tests 100 examples, it acts like admit. If it gives up or finds a counter-example, it reports an error. For more information on writing your own `Sampleable` and `Testable` instances, see `Testing.Plausible.Testable`. Optional arguments given with `plausible (config : { ... })` * `numInst` (default 100): number of examples to test properties with * `maxSize` (default 100): final size argument Options: * `set_option trace.plausible.decoration true`: print the proposition with quantifier annotations * `set_option trace.plausible.discarded true`: print the examples discarded because they do not satisfy assumptions * `set_option trace.plausible.shrink.steps true`: trace the shrinking of counter-example * `set_option trace.plausible.shrink.candidates true`: print the lists of candidates considered when shrinking each variable * `set_option trace.plausible.instance true`: print the instances of `testable` being used to test the proposition * `set_option trace.plausible.success true`: print the tested samples that satisfy a property -/ syntax (name := plausibleSyntax) "plausible" (config)? : tactic elab_rules : tactic | `(tactic| plausible $[$cfg]?) => withMainContext do let cfg ← elabConfig (mkOptionalNode cfg) let (_, g) ← (← getMainGoal).revert ((← getLocalHyps).map (Expr.fvarId!)) g.withContext do let tgt ← g.getType let tgt' ← addDecorations tgt let cfg := { cfg with traceDiscarded := cfg.traceDiscarded || (← isTracingEnabledFor `plausible.discarded), traceSuccesses := cfg.traceSuccesses || (← isTracingEnabledFor `plausible.success), traceShrink := cfg.traceShrink || (← isTracingEnabledFor `plausible.shrink.steps), traceShrinkCandidates := cfg.traceShrinkCandidates || (← isTracingEnabledFor `plausible.shrink.candidates) } let inst ← try synthInstance (← mkAppM ``Testable #[tgt']) catch _ => throwError "\ Failed to create a `testable` instance for `{tgt}`.\ \nWhat to do:\ \n1. make sure that the types you are using have `Plausible.SampleableExt` instances\ \n (you can use `#sample my_type` if you are unsure);\ \n2. make sure that the relations and predicates that your proposition use are decidable;\ \n3. if your hypothesis is big consider increasing `set_option synthInstance.maxSize` to a \n higher power of two \n4. make sure that instances of `Plausible.Testable` exist that, when combined,\ \n apply to your decorated proposition:\ \n```\ \n{tgt'}\ \n```\ \n\ \nUse `set_option trace.Meta.synthInstance true` to understand what instances are missing.\ \n\ \nTry this:\ \nset_option trace.Meta.synthInstance true\ \n#synth Plausible.Testable ({tgt'})" let e ← mkAppOptM ``Testable.check #[tgt, toExpr cfg, tgt', inst] trace[plausible.decoration] "[testable decoration]\n {tgt'}" -- Porting note: I have not ported support for `trace.plausible.instance`. -- See the commented out code below from mathlib3 if you would like to implement this. -- when_tracing `plausible.instance <| do -- { inst ← summarize_instance inst >>= pp, -- trace!"\n[testable instance]{format.indent inst 2}" }, let code ← unsafe evalExpr (CoreM PUnit) (mkApp (mkConst ``CoreM) (mkConst ``PUnit [1])) e _ ← code admitGoal g -- Porting note: below is the remaining code from mathlib3 which supports the -- `trace.plausible.instance` trace option, and which has not been ported. -- namespace tactic.interactive -- open tactic plausible -- open expr -- /-- Tree structure representing a `testable` instance. -/ -- meta inductive instance_tree -- | node : name → expr → list instance_tree → instance_tree -- /-- Gather information about a `testable` instance. Given -- an expression of type `testable ?p`, gather the -- name of the `testable` instances that it is built from -- and the proposition that they test. -/ -- meta def summarize_instance : expr → tactic instance_tree -- | (lam n bi d b) := do -- v ← mk_local' n bi d, -- summarize_instance <| b.instantiate_var v -- | e@(app f x) := do -- `(testable %%p) ← infer_type e, -- xs ← e.get_app_args.mmap_filter (try_core ∘ summarize_instance), -- pure <| instance_tree.node e.get_app_fn.const_name p xs -- | e := do -- failed -- /-- format an `instance_tree` -/ -- meta def instance_tree.to_format : instance_tree → tactic format -- | (instance_tree.node n p xs) := do -- xs ← format.join <$> (xs.mmap <| λ t, flip format.indent 2 <$> instance_tree.to_format t), -- ys ← pformat!"testable ({p})", -- pformat!"+ {n} :{format.indent ys 2}\n{xs}" -- meta instance instance_tree.has_to_tactic_format : has_to_tactic_format instance_tree := -- ⟨ instance_tree.to_format ⟩
.lake/packages/plausible/Plausible/DeriveArbitrary.lean
module import Lean.Elab import Lean.Elab.Deriving.Basic import Lean.Elab.Deriving.Util import Plausible.Arbitrary import Plausible.ArbitraryFueled open Lean Elab Meta Parser Term open Elab.Deriving open Elab.Command /-! # Deriving Handler for `Arbitrary` This file defines a handler which automatically derives `Arbitrary` instances for inductive types. (Note that the deriving handler technically derives `ArbitraryFueled` instancces, but every `ArbitraryFueled` instance automatically results in an `Arbitrary` instance, as detailed in `Arbitrary.lean`.) Note that the resulting `Arbitrary` and `ArbitraryFueled` instance should be considered to be opaque, following the convention for the deriving handler for Mathlib's `Encodable` typeclass. Example usage: ```lean -- Datatype for binary trees inductive Tree | Leaf : Tree | Node : Nat → Tree → Tree → Tree deriving Arbitrary ``` To sample from a derived generator, users can simply call `Arbitrary.runArbitrary`, specify the type for the desired generated values and provide some Nat to act as the generator's fuel parameter (10 in the example below): ```lean #eval Arbitrary.runArbitrary (α := Tree) 10 ``` To view the code for the derived generator, users can enable trace messages using the `plausible.deriving.arbitrary` trace class as follows: ```lean set_option trace.plausible.deriving.arbitrary true ``` ## Main definitions * Deriving handler for `ArbitraryFueled` typeclass -/ namespace Plausible open Arbitrary /-- Takes the name of a constructor for an algebraic data type and returns an array containing `(argument_name, argument_type)` pairs. If the algebraic data type is defined using anonymous constructor argument syntax, i.e. ``` inductive T where C1 : τ1 → … → τn … ``` Lean produces macro scopes when we try to access the names for the constructor args. In this case, we remove the macro scopes so that the name is user-accessible. (This will result in constructor argument names being non-unique in the array that is returned -- it is the caller's responsibility to produce fresh names.) -/ def getCtorArgsNamesAndTypes (_header : Header) (indVal : InductiveVal) (ctorName : Name) : MetaM (Array (Name × Expr)) := do let ctorInfo ← getConstInfoCtor ctorName forallTelescopeReducing ctorInfo.type fun args _ => do let mut argNamesAndTypes := #[] for i in *...args.size do let arg := args[i]! let argType ← arg.fvarId!.getType if i < indVal.numParams then continue else let argName ← Core.mkFreshUserName `a argNamesAndTypes := argNamesAndTypes.push (argName, argType) return argNamesAndTypes -- Note: the following functions closely follow the implementation of the deriving handler for `Repr` / `BEq` -- (see https://github.com/leanprover/lean4/blob/master/src/Lean/Elab/Deriving/Repr.lean). open TSyntax.Compat in /-- Variant of `Deriving.Util.mkHeader` where we don't add an explicit binder of the form `($targetName : $targetType)` to the field `binders` (i.e. `binders` contains only implicit binders) -/ def mkHeaderWithOnlyImplicitBinders (className : Name) (arity : Nat) (indVal : InductiveVal) : TermElabM Header := do let argNames ← mkInductArgNames indVal let binders ← mkImplicitBinders argNames let targetType ← mkInductiveApp indVal argNames let mut targetNames := #[] for _ in [:arity] do targetNames := targetNames.push (← mkFreshUserName `x) let binders := binders ++ (← mkInstImplicitBinders className indVal argNames) return { binders := binders argNames := argNames targetNames := targetNames targetType := targetType } open TSyntax.Compat in /-- Variant of `Deriving.Util.mkInstanceCmds` which is specialized to creating `ArbitraryFueled` instances that have `Arbitrary` inst-implicit binders. Note that we can't use `mkInstanceCmds` out of the box, since it expects the inst-implicit binders and the instance we're creating to both belong to the same typeclass. -/ def mkArbitraryFueledInstanceCmds (ctx : Deriving.Context) (typeNames : Array Name) (useAnonCtor := true) : TermElabM (Array Command) := do let mut instances := #[] for i in [:ctx.typeInfos.size] do let indVal := ctx.typeInfos[i]! if typeNames.contains indVal.name then let auxFunName := ctx.auxFunNames[i]! let argNames ← mkInductArgNames indVal let binders ← mkImplicitBinders argNames let binders := binders ++ (← mkInstImplicitBinders ``Arbitrary indVal argNames) -- this line is changed from let indType ← mkInductiveApp indVal argNames let type ← `($(mkCIdent ``ArbitraryFueled) $indType) let mut val := mkIdent auxFunName if useAnonCtor then val ← `(⟨$val⟩) let instCmd ← `(instance $binders:implicitBinder* : $type := $val) instances := instances.push instCmd return instances /-- Creates a `Header` for the `Arbitrary` typeclass -/ def mkArbitraryHeader (indVal : InductiveVal) : TermElabM Header := mkHeaderWithOnlyImplicitBinders ``Arbitrary 1 indVal /-- Creates the *body* of the generator that appears in the instance of the `ArbitraryFueled` typeclass -/ def mkBody (header : Header) (inductiveVal : InductiveVal) (generatorType : TSyntax `term) : TermElabM Term := do -- Fetch the name of the target type (the type for which we are deriving a generator) let targetTypeName := inductiveVal.name -- Produce `Ident`s for the `fuel` argument for the lambda -- at the end of the generator function, as well as the `aux_arb` inner helper function let freshFuel := Lean.mkIdent (← Core.mkFreshUserName `fuel) let freshFuel' := Lean.mkIdent (← Core.mkFreshUserName `fuel') let auxArb := mkIdent `aux_arb -- Maintain two arrays which will be populated with pairs -- where the first component is a sub-generator (non-recursive / recursive) -- and the 2nd component is the generator's associated weight let mut weightedNonRecursiveGenerators := #[] let mut weightedRecursiveGenerators := #[] -- We also need to keep track of non-recursive generators without their weights, -- since some of Plausible's `Gen` combinators operate on generator functions let mut nonRecursiveGeneratorsNoWeights := #[] for ctorName in inductiveVal.ctors do let ctorIdent := mkIdent ctorName let ctorArgNamesTypes ← getCtorArgsNamesAndTypes header inductiveVal ctorName let (ctorArgNames, ctorArgTypes) := Array.unzip ctorArgNamesTypes /- Produce fresh names for each of the constructor's arguments. Producing fresh names is necessary in order to handle constructors expressed using the following syntax: ``` inductive Foo | C : T1 → ... → Tn ``` in which all the arguments to the constructor `C` don't have explicit names. -/ let ctorArgIdents := Lean.mkIdent <$> ctorArgNames let ctorArgIdentsTypes := Array.zip ctorArgIdents ctorArgTypes if ctorArgNamesTypes.isEmpty then -- Constructor is nullary, we can just use an generator of the form `pure ...` with weight 1, -- following the QuickChick convention. -- (For clarity, this generator is parenthesized in the code produced.) let pureGen ← `(($(Lean.mkIdent `pure) $ctorIdent)) weightedNonRecursiveGenerators := weightedNonRecursiveGenerators.push (← `((1, $pureGen))) nonRecursiveGeneratorsNoWeights := nonRecursiveGeneratorsNoWeights.push pureGen else -- Add all the constructor's argument names + types to the local context, -- then produce the body of the sub-generator (& a flag indicating if the constructor is recursive) let (generatorBody, ctorIsRecursive) ← withLocalDeclsDND ctorArgNamesTypes (fun _ => do let mut doElems := #[] -- Flag to indicate whether the constructor is recursive (initialized to `false`) let mut ctorIsRecursive := false -- Examine each argument to see which of them require recursive calls to the generator for (freshIdent, argType) in ctorArgIdentsTypes do -- If the argument's type is the same as the target type, -- produce a recursive call to the generator using `aux_arb`, -- otherwise generate a value using `arbitrary` let bindExpr ← if argType.getAppFn.constName == targetTypeName then -- We've detected that the constructor has a recursive argument, so we update the flag ctorIsRecursive := true `(doElem| let $freshIdent ← $(mkIdent `aux_arb):term $(freshFuel'):term) else `(doElem| let $freshIdent ← $(mkIdent ``Arbitrary.arbitrary):term) doElems := doElems.push bindExpr -- Create an expression `return C x1 ... xn` at the end of the generator, where -- `C` is the constructor name and the `xi` are the generated values for the args let pureExpr ← `(doElem| return $ctorIdent $ctorArgIdents*) doElems := doElems.push pureExpr -- Put the body of the generator together in an explicitly-parenthesized `do`-block let generatorBody ← `((do $[$doElems:doElem]*)) pure (generatorBody, ctorIsRecursive)) if !ctorIsRecursive then -- Non-recursive generators have weight 1, following the QuickChick convention weightedNonRecursiveGenerators := weightedNonRecursiveGenerators.push (← `((1, $generatorBody))) nonRecursiveGeneratorsNoWeights := nonRecursiveGeneratorsNoWeights.push generatorBody else -- Recursive generaotrs have an associated weight of `fuel' + 1`, following the QuickChick convention weightedRecursiveGenerators := weightedRecursiveGenerators.push (← ``(($freshFuel' + 1, $generatorBody))) -- Use the first non-recursive generator (without its weight) as the default generator -- If the target type has no non-recursive constructors, we emit an error message -- saying that we cannot derive a generator for that type let defaultGenerator ← Option.getDM (nonRecursiveGeneratorsNoWeights[0]?) (throwError m!"derive Arbitrary failed, {targetTypeName} has no non-recursive constructors") -- Create the cases for the pattern-match on the fuel argument -- If `fuel = 0`, pick one of the non-recursive generators let mut caseExprs := #[] let zeroCase ← `(Term.matchAltExpr| | $(mkIdent ``Nat.zero) => $(mkIdent ``Gen.oneOfWithDefault) $defaultGenerator [$nonRecursiveGeneratorsNoWeights,*]) caseExprs := caseExprs.push zeroCase -- If `fuel = fuel' + 1`, pick a generator (it can be non-recursive or recursive) let mut allWeightedGenerators ← `([$weightedNonRecursiveGenerators,*, $weightedRecursiveGenerators,*]) let succCase ← `(Term.matchAltExpr| | $freshFuel' + 1 => $(mkIdent ``Gen.frequency) $defaultGenerator $allWeightedGenerators) caseExprs := caseExprs.push succCase -- Create function argument for the generator fuel let fuelParam ← `(Term.letIdBinder| ($freshFuel : $(mkIdent `Nat))) let matchExpr ← `(match $freshFuel:ident with $caseExprs:matchAlt*) -- Create an instance of the `ArbitraryFueled` typeclass `(let rec $auxArb:ident $fuelParam : $generatorType := $matchExpr fun $freshFuel => $auxArb $freshFuel) /-- Creates the function definition for the derived generator -/ def mkAuxFunction (ctx : Deriving.Context) (i : Nat) : TermElabM Command := do let auxFunName := ctx.auxFunNames[i]! let indVal := ctx.typeInfos[i]! let header ← mkArbitraryHeader indVal let mut binders := header.binders -- Determine the type of the generator -- (the `Plausible.Gen` type constructor applied to the name of the `inductive` type, plus any type parameters) let targetType ← mkInductiveApp ctx.typeInfos[i]! header.argNames let generatorType ← `($(mkIdent ``Plausible.Gen) $targetType) -- Create the body of the generator function let mut body ← mkBody header indVal generatorType -- For mutually-recursive types, we need to create -- local `let`-definitions containing the relevant `ArbitraryFueled` instances so that -- the derived generator typechecks if ctx.usePartial then let letDecls ← mkLocalInstanceLetDecls ctx ``ArbitraryFueled header.argNames body ← mkLet letDecls body -- If we are deriving a generator for a bunch of mutually-recursive types, -- the derived generator needs to be marked `partial` (following the implementation -- of the `deriving Repr` handler) if ctx.usePartial then `(partial def $(mkIdent auxFunName):ident $binders:bracketedBinder* : $(mkIdent ``Nat) → $generatorType := $body:term) else `(def $(mkIdent auxFunName):ident $binders:bracketedBinder* : $(mkIdent ``Nat) → $generatorType := $body:term) /-- Creates a `mutual ... end` block containing the definitions of the derived generators -/ def mkMutualBlock (ctx : Deriving.Context) : TermElabM Syntax := do let mut auxDefs := #[] for i in *...ctx.typeInfos.size do auxDefs := auxDefs.push (← mkAuxFunction ctx i) `(mutual $auxDefs:command* end) /-- Creates an instance of the `ArbitraryFueled` typeclass -/ private def mkArbitraryFueledInstanceCmd (declName : Name) : TermElabM (Array Syntax) := do let ctx ← mkContext ``Arbitrary "arbitrary" declName let cmds := #[← mkMutualBlock ctx] ++ (← mkArbitraryFueledInstanceCmds ctx #[declName]) trace[plausible.deriving.arbitrary] "\n{cmds}" return cmds /-- Deriving handler which produces an instance of the `ArbitraryFueled` typeclass for each type specified in `declNames` -/ def mkArbitraryInstanceHandler (declNames : Array Name) : CommandElabM Bool := do if (← declNames.allM isInductive) then for declName in declNames do let cmds ← liftTermElabM $ mkArbitraryFueledInstanceCmd declName cmds.forM elabCommand return true else throwError "Cannot derive instance of Arbitrary typeclass for non-inductive types" return false initialize registerDerivingHandler ``Arbitrary mkArbitraryInstanceHandler end Plausible
.lake/packages/plausible/Plausible/ArbitraryFueled.lean
module public import Plausible.Arbitrary public import Plausible.Gen public section namespace Plausible open Gen /-- A typeclass for *fueled* random generation, i.e. a variant of the `Arbitrary` typeclass where the fuel for the generator is made explicit. - This typeclass is equivalent to Rocq QuickChick's `arbitrarySized` typeclass (QuickChick uses the `Nat` parameter as both fuel and the generator size, here we use it just for fuel, as Plausible's `Gen` type constructor already internalizes the size parameter.) -/ class ArbitraryFueled (α : Type) where /-- Takes a `Nat` and produces a random generator dependent on the `Nat` parameter (which indicates the amount of fuel to be used before failing). -/ arbitraryFueled : Nat → Gen α /-- Every `ArbitraryFueled` instance gives rise to an `Arbitrary` instance -/ meta instance [ArbitraryFueled α] : Arbitrary α where arbitrary := Gen.sized ArbitraryFueled.arbitraryFueled /-- Raised when a fueled generator fails due to insufficient fuel. -/ meta def Gen.outOfFuel : GenError := .genError "out of fuel" end Plausible
.lake/packages/plausible/Plausible/Sampleable.lean
module public meta import Lean.Elab.Command public meta import Lean.Meta.Eval public meta import Plausible.Gen public meta import Plausible.Arbitrary public meta section /-! # `SampleableExt` Class This class permits the creation samples of a given type controlling the size of those values using the `Gen` monad. # `Shrinkable` Class This class helps minimize examples by creating smaller versions of given values. When testing a proposition like `∀ n : Nat, Prime n → n ≤ 100`, `Plausible` requires that `Nat` have an instance of `SampleableExt` and for `Prime n` to be decidable. `Plausible` will then use the instance of `SampleableExt` to generate small examples of Nat and progressively increase in size. For each example `n`, `Prime n` is tested. If it is false, the example will be rejected (not a test success nor a failure) and `Plausible` will move on to other examples. If `Prime n` is true, `n ≤ 100` will be tested. If it is false, `n` is a counter-example of `∀ n : Nat, Prime n → n ≤ 100` and the test fails. If `n ≤ 100` is true, the test passes and `Plausible` moves on to trying more examples. ## Main definitions * `SampleableExt` class * `Shrinkable` class ### `SampleableExt` `SampleableExt` can be used in two ways. The first (and most common) is to simply generate values of a type directly using the `Gen` monad, if this is what you want to do then the way to go is to declare an `Arbitrary` instance, and rely on the default `selfContained` instance. Furthermore it makes it possible to express generators for types that do not lend themselves to introspection, such as `Nat → Nat`. If we test a quantification over functions the counter-examples cannot be shrunken or printed meaningfully. For that purpose, `SampleableExt` provides a proxy representation `proxy` that can be printed and shrunken as well as interpreted (using `interp`) as an object of the right type. If you are using it in the first way, this proxy type will simply be the type itself and the `interp` function `id`. ### `Shrinkable` Given an example `x : α`, `Shrinkable α` gives us a way to shrink it and suggest simpler examples. ## Shrinking Shrinking happens when `Plausible` finds a counter-example to a property. It is likely that the example will be more complicated than necessary so `Plausible` proceeds to shrink it as much as possible. Although equally valid, a smaller counter-example is easier for a user to understand and use. The `Shrinkable` class, , has a `shrink` function so that we can use specialized knowledge while shrinking a value. It is not responsible for the whole shrinking process however. It only has to take one step in the shrinking process. `Plausible` will repeatedly call `shrink` until no more steps can be taken. Because `shrink` guarantees that the size of the candidates it produces is strictly smaller than the argument, we know that `Plausible` is guaranteed to terminate. ## Tags random testing ## References * https://hackage.haskell.org/package/QuickCheck -/ namespace Plausible open Random Gen universe u v variable {α β : Type _} /-- Given an example `x : α`, `Shrinkable α` gives us a way to shrink it and suggest simpler examples. -/ class Shrinkable (α : Type u) where shrink : (x : α) → List α := fun _ => [] /-- `SampleableExt` can be used in two ways. The first (and most common) is to simply generate values of a type directly using the `Gen` monad, if this is what you want to do then declaring an `Arbitrary` instance is the way to go. Furthermore it makes it possible to express generators for types that do not lend themselves to introspection, such as `Nat → Nat`. If we test a quantification over functions the counter-examples cannot be shrunken or printed meaningfully. For that purpose, `SampleableExt` provides a proxy representation `proxy` that can be printed and shrunken as well as interpreted (using `interp`) as an object of the right type. -/ class SampleableExt (α : Sort u) where proxy : Type v [proxyRepr : Repr proxy] [shrink : Shrinkable proxy] [sample : Arbitrary proxy] interp : proxy → α attribute [instance] SampleableExt.proxyRepr attribute [instance] SampleableExt.shrink namespace SampleableExt /-- Default instance whose purpose is to simply generate values of a type directly using the `Arbitrary` instance -/ @[default_instance] instance selfContained [Repr α] [Shrinkable α] [Arbitrary α] : SampleableExt α where proxy := α proxyRepr := inferInstance shrink := inferInstance sample := inferInstance interp := id /-- This is kept for backwards compatibility -/ @[deprecated "Define an `Arbitrary` instance instead and use the default `SampleableExt` instance provided" (since := "22-10-2025")] def mkSelfContained [Repr α] [Shrinkable α] (g : Gen α) : SampleableExt α := let : Arbitrary α := ⟨g⟩ inferInstance /-- First samples a proxy value and interprets it. Especially useful if the proxy and target type are the same. -/ def interpSample (α : Type u) [SampleableExt α] : Gen α := SampleableExt.interp <$> SampleableExt.sample.arbitrary end SampleableExt section Shrinkers instance [Shrinkable α] [Shrinkable β] : Shrinkable (Sum α β) where shrink s := match s with | .inl l => Shrinkable.shrink l |>.map .inl | .inr r => Shrinkable.shrink r |>.map .inr instance Unit.shrinkable : Shrinkable Unit where shrink _ := [] /-- `Nat.shrink' n` creates a list of smaller natural numbers by successively dividing `n` by 2 . For example, `Nat.shrink 5 = [2, 1, 0]`. -/ def Nat.shrink (n : Nat) : List Nat := if 0 < n then let m := n/2 m :: shrink m else [] instance Nat.shrinkable : Shrinkable Nat where shrink := Nat.shrink instance Fin.shrinkable {n : Nat} : Shrinkable (Fin n.succ) where shrink m := Nat.shrink m |>.map (Fin.ofNat _) instance BitVec.shrinkable {n : Nat} : Shrinkable (BitVec n) where shrink m := Nat.shrink m.toNat |>.map (BitVec.ofNat n) instance UInt8.shrinkable : Shrinkable UInt8 where shrink m := Nat.shrink m.toNat |>.map UInt8.ofNat instance UInt16.shrinkable : Shrinkable UInt16 where shrink m := Nat.shrink m.toNat |>.map UInt16.ofNat instance UInt32.shrinkable : Shrinkable UInt32 where shrink m := Nat.shrink m.toNat |>.map UInt32.ofNat instance UInt64.shrinkable : Shrinkable UInt64 where shrink m := Nat.shrink m.toNat |>.map UInt64.ofNat instance USize.shrinkable : Shrinkable USize where shrink m := Nat.shrink m.toNat |>.map USize.ofNat /-- `Int.shrinkable` operates like `Nat.shrinkable` but also includes the negative variants. -/ instance Int.shrinkable : Shrinkable Int where shrink n := let converter n := let int := Int.ofNat n [int, -int] Nat.shrink n.natAbs |>.flatMap converter instance Bool.shrinkable : Shrinkable Bool := {} instance Char.shrinkable : Shrinkable Char := {} instance Option.shrinkable [Shrinkable α] : Shrinkable (Option α) where shrink o := match o with | some x => Shrinkable.shrink x |>.map .some | none => [] instance Prod.shrinkable [shrA : Shrinkable α] [shrB : Shrinkable β] : Shrinkable (Prod α β) where shrink := fun (fst,snd) => let shrink1 := shrA.shrink fst |>.map fun x => (x, snd) let shrink2 := shrB.shrink snd |>.map fun x => (fst, x) shrink1 ++ shrink2 instance Sigma.shrinkable [shrA : Shrinkable α] [shrB : Shrinkable β] : Shrinkable ((_ : α) × β) where shrink := fun ⟨fst,snd⟩ => let shrink1 := shrA.shrink fst |>.map fun x => ⟨x, snd⟩ let shrink2 := shrB.shrink snd |>.map fun x => ⟨fst, x⟩ shrink1 ++ shrink2 open Shrinkable /-- Shrink a list of a shrinkable type, either by discarding an element or shrinking an element. -/ instance List.shrinkable [Shrinkable α] : Shrinkable (List α) where shrink := fun L => (L.mapIdx fun i _ => L.eraseIdx i) ++ (L.mapIdx fun i a => (shrink a).map fun a' => L.modify i fun _ => a').flatten instance ULift.shrinkable [Shrinkable α] : Shrinkable (ULift α) where shrink u := (shrink u.down).map ULift.up instance String.shrinkable : Shrinkable String where shrink s := (shrink s.toList).map String.mk instance Array.shrinkable [Shrinkable α] : Shrinkable (Array α) where shrink xs := (shrink xs.toList).map Array.mk instance Subtype.shrinkable {α : Type u} {β : α → Prop} [Shrinkable α] [∀ x, Decidable (β x)] : Shrinkable {x : α // β x} where shrink x := let val := x.val let candidates := shrink val let filter x := do if h : β x then some ⟨x, h⟩ else none candidates.filterMap filter end Shrinkers section Samplers open SampleableExt open Arbitrary instance arbitraryProxy [SampleableExt α] : Arbitrary (proxy α) := sample instance Sum.SampleableExt [SampleableExt α] [SampleableExt β] : SampleableExt (Sum α β) where proxy := Sum (proxy α) (proxy β) sample := inferInstance interp s := match s with | .inl l => .inl (interp l) | .inr r => .inr (interp r) instance [SampleableExt α] [SampleableExt β] : SampleableExt ((_ : α) × β) where proxy := (_ : proxy α) × proxy β sample := inferInstance interp s := ⟨interp s.fst, interp s.snd⟩ instance Option.sampleableExt [SampleableExt α] : SampleableExt (Option α) where proxy := Option (proxy α) sample := inferInstance interp o := o.map interp instance Prod.sampleableExt {α : Type u} {β : Type v} [SampleableExt α] [SampleableExt β] : SampleableExt (α × β) where proxy := Prod (proxy α) (proxy β) proxyRepr := inferInstance shrink := inferInstance sample := inferInstance interp := Prod.map interp interp instance Prop.sampleableExt : SampleableExt Prop where proxy := Bool proxyRepr := inferInstance sample := inferInstance shrink := inferInstance interp := Coe.coe instance List.sampleableExt [SampleableExt α] : SampleableExt (List α) where proxy := List (proxy α) sample := inferInstance interp := List.map interp instance ULift.sampleableExt [SampleableExt α] : SampleableExt (ULift α) where proxy := proxy α sample := sample interp a := ⟨interp a⟩ instance Array.sampleableExt [SampleableExt α] : SampleableExt (Array α) where proxy := Array (proxy α) sample := inferInstance interp := Array.map interp end Samplers /-- An annotation for values that should never get shrunk. -/ @[expose] def NoShrink (α : Type u) := α namespace NoShrink open SampleableExt def mk (x : α) : NoShrink α := x def get (x : NoShrink α) : α := x instance inhabited [inst : Inhabited α] : Inhabited (NoShrink α) := inst instance repr [inst : Repr α] : Repr (NoShrink α) := inst instance shrinkable : Shrinkable (NoShrink α) where shrink := fun _ => [] instance arbitrary [arb : Arbitrary α] : Arbitrary (NoShrink α) := arb instance sampleableExt [SampleableExt α] [Repr α] : SampleableExt (NoShrink α) where proxy := NoShrink (proxy α) interp := interp end NoShrink open Lean Meta Elab /-- `e` is a type to sample from, this can either be a type that implements `SampleableExt` or `Gen α` directly. For this return: - the universe level of the `Type u` that the relevant type to sample lives in. - the actual type `α` to sample from - a `Repr α` instance - a `Gen α` generator to run in order to sample -/ private def mkGenerator (e : Expr) : MetaM (Level × Expr × Expr × Expr) := do let exprTyp ← inferType e let .sort u ← whnf (← inferType exprTyp) | throwError m!"{exprTyp} is not a type" let .succ u := u | throwError m!"{exprTyp} is not a type with computational content" match_expr exprTyp with | Gen α => let reprInst ← synthInstance (mkApp (mkConst ``Repr [u]) α) return ⟨u, α, reprInst, e⟩ | _ => let v ← mkFreshLevelMVar let sampleableExtInst ← synthInstance (mkApp (mkConst ``SampleableExt [u, v]) e) let v ← instantiateLevelMVars v let reprInst := mkApp2 (mkConst ``SampleableExt.proxyRepr [u, v]) e sampleableExtInst let arb := mkApp2 (mkConst ``SampleableExt.sample [u, v]) e sampleableExtInst let gen := mkApp2 (mkConst ``Arbitrary.arbitrary [v]) e arb let typ := mkApp2 (mkConst ``SampleableExt.proxy [u, v]) e sampleableExtInst return ⟨v, typ, reprInst, gen⟩ /-- `#sample type`, where `type` has an instance of `SampleableExt`, prints ten random values of type `type` using an increasing size parameter. ```lean #sample Nat -- prints -- 0 -- 0 -- 2 -- 24 -- 64 -- 76 -- 5 -- 132 -- 8 -- 449 -- or some other sequence of numbers #sample List Int -- prints -- [] -- [1, 1] -- [-7, 9, -6] -- [36] -- [-500, 105, 260] -- [-290] -- [17, 156] -- [-2364, -7599, 661, -2411, -3576, 5517, -3823, -968] -- [-643] -- [11892, 16329, -15095, -15461] -- or whatever ``` -/ elab "#sample " e:term : command => Command.runTermElabM fun _ => do let e ← Elab.Term.elabTermAndSynthesize e none let ⟨_, α, repr, gen⟩ ← mkGenerator e let printSamples := mkApp3 (mkConst ``Gen.printSamples []) α repr gen let code ← unsafe evalExpr (IO PUnit) (mkApp (mkConst ``IO) (mkConst ``PUnit [1])) printSamples _ ← code end Plausible
.lake/packages/plausible/Plausible/Arbitrary.lean
module public meta import Plausible.Gen public meta section /-! # `Arbitrary` Typeclass The `Arbitrary` typeclass represents types for which there exists a random generator suitable for property-based testing, similar to Haskell QuickCheck's `Arbitrary` typeclass and Rocq/Coq QuickChick's `Gen` typeclass. (Note: the `SampleableExt` involvs types which have *both* a generator & a shrinker, and possibly a non trivial `proxy` type, whereas `Arbitrary` describes types which have a generator only.) ## Main definitions * `Arbitrary` typeclass ## References * https://hackage.haskell.org/package/QuickCheck * https://softwarefoundations.cis.upenn.edu/qc-current/QuickChickInterface.html -/ namespace Plausible open Gen universe u /-- The `Arbitrary` typeclass represents types for which there exists a random generator suitable for property-based testing. - This is the equivalent of Haskell QuickCheck's `Arbitrary` typeclass. - In QuickChick, this typeclass is called `Gen`, but `Gen` is already a reserved keyword in Plausible, so we call this typeclass `Arbitrary` following the Haskell QuickCheck convention). -/ class Arbitrary (α : Type u) where /-- A random generator for values of the given type. -/ arbitrary : Gen α namespace Arbitrary /-- Samples from the generator associated with the `Arbitrary` instance for a type, using `size` as the size parameter for the generator. To invoke this function, you will need to specify what type `α` is, for example by doing `runArbitrary (α := Nat) 10`. -/ def runArbitrary [Arbitrary α] (size : Nat) : IO α := Gen.run Arbitrary.arbitrary size end Arbitrary section Instances open Arbitrary instance Sum.Arbitrary [Arbitrary α] [Arbitrary β] : Arbitrary (Sum α β) where arbitrary := do match ← chooseAny Bool with | true => return .inl (← arbitrary) | false => return .inr (← arbitrary) instance Unit.Arbitrary : Arbitrary Unit := ⟨return ()⟩ instance Sigma.Arbitrary [Arbitrary α] [Arbitrary β] : Arbitrary ((_ : α) × β) where arbitrary := do let p ← prodOf arbitrary arbitrary return ⟨p.fst, p.snd⟩ instance Nat.Arbitrary : Arbitrary Nat where arbitrary := do choose Nat 0 (← getSize) (Nat.zero_le _) instance Fin.Arbitrary {n : Nat} : Arbitrary (Fin (n.succ)) where arbitrary := do let m ← choose Nat 0 (min (← getSize) n) (Nat.zero_le _) return (Fin.ofNat _ m) instance BitVec.Arbitrary {n : Nat} : Arbitrary (BitVec n) where arbitrary := do let m ← choose Nat 0 (min (← getSize) (2^n)) (Nat.zero_le _) return BitVec.ofNat _ m instance UInt8.Arbitrary : Arbitrary UInt8 where arbitrary := do let n ← choose Nat 0 (min (← getSize) UInt8.size) (Nat.zero_le _) return UInt8.ofNat n instance UInt16.Arbitrary : Arbitrary UInt16 where arbitrary := do let n ← choose Nat 0 (min (← getSize) UInt16.size) (Nat.zero_le _) return UInt16.ofNat n instance UInt32.Arbitrary : Arbitrary UInt32 where arbitrary := do let n ← choose Nat 0 (min (← getSize) UInt32.size) (Nat.zero_le _) return UInt32.ofNat n instance UInt64.Arbitrary : Arbitrary UInt64 where arbitrary := do let n ← choose Nat 0 (min (← getSize) UInt64.size) (Nat.zero_le _) return UInt64.ofNat n instance USize.Arbitrary : Arbitrary USize where arbitrary := do let n ← choose Nat 0 (min (← getSize) USize.size) (Nat.zero_le _) return USize.ofNat n instance Int.Arbitrary : Arbitrary Int where arbitrary := do choose Int (-(← getSize)) (← getSize) (by omega) instance Bool.Arbitrary : Arbitrary Bool where arbitrary := chooseAny Bool /-- This can be specialized into customized `Arbitrary Char` instances. The resulting instance has `1 / p` chances of making an unrestricted choice of characters and it otherwise chooses a character from `chars` with uniform probability. -/ def Char.arbitraryFromList (p : Nat) (chars : List Char) (pos : 0 < chars.length) : Arbitrary Char where arbitrary := do let x ← choose Nat 0 p (Nat.zero_le _) if x.val == 0 then let n ← arbitrary pure <| Char.ofNat n else elements chars pos /-- Pick a simple ASCII character 2/3s of the time, and otherwise pick any random `Char` encoded by the next `Nat` (or `\0` if there is no such character) -/ instance Char.arbitraryDefaultInstance : Arbitrary Char := Char.arbitraryFromList 3 " 0123abcABC:,;`\\/".toList (by decide) instance Option.Arbitrary [Arbitrary α] : Arbitrary (Option α) where arbitrary := do match ← chooseAny Bool with | true => return none | false => return some (← arbitrary) instance Prod.Arbitrary {α : Type u} {β : Type v} [Arbitrary α] [Arbitrary β] : Arbitrary (α × β) where arbitrary := prodOf arbitrary arbitrary instance List.Arbitrary [Arbitrary α] : Arbitrary (List α) where arbitrary := Gen.listOf arbitrary instance ULift.Arbitrary [Arbitrary α] : Arbitrary (ULift α) where arbitrary := do let x : α ← arbitrary; return ⟨x⟩ instance String.Arbitrary : Arbitrary String where arbitrary := return String.mk (← Gen.listOf arbitrary) instance Array.Arbitrary [Arbitrary α] : Arbitrary (Array α) := ⟨Gen.arrayOf arbitrary⟩ end Instances end Plausible
.lake/packages/plausible/Plausible/Random.lean
module meta import Init.Data.Random public meta section /-! # Rand Monad and Random Class This module provides tools for formulating computations guided by randomness and for defining objects that can be created randomly. ## Main definitions * `RandT` and `RandGT` monad transformers for computations guided by randomness; * `Rand` and `RandG` monads as special cases of the above * `Random` class for objects that can be generated randomly; * `random` to generate one object; * `BoundedRandom` class for objects that can be generated randomly inside a range; * `randomR` to generate one object inside a range; * `runRand` to run a randomized computation inside any monad that has access to `stdGenRef`. ## References * Similar library in Haskell: https://hackage.haskell.org/package/MonadRandom -/ set_option linter.missingDocs true namespace Plausible /-- A monad transformer to generate random objects using the generic generator type `g` -/ abbrev RandGT (g : Type) := StateT (ULift g) /-- A monad to generate random objects using the generator type `g`. -/ abbrev RandG (g : Type) := RandGT g Id /-- A monad transformer to generate random objects using the generator type `StdGen`. `RandT m α` should be thought of a random value in `m α`. -/ abbrev RandT := RandGT StdGen /-- A monad to generate random objects using the generator type `StdGen`. -/ abbrev Rand := RandG StdGen instance [MonadLift m n] : MonadLiftT (RandGT g m) (RandGT g n) where monadLift x := fun s => x s /-- `Random m α` gives us machinery to generate values of type `α` in the monad `m`. Note that `m` is a parameter as some types may only be sampleable with access to a certain monad. -/ class Random (m) (α : Type u) where /-- Generate a value of type `α` randomly using generator `g`. -/ random [RandomGen g] : RandGT g m α /-- `BoundedRandom m α` gives us machinery to generate values of type `α` between certain bounds in the monad `m`. -/ class BoundedRandom (m) (α : Type u) [LE α] where /-- Generate a value of type `α` between `lo` and `hi` randomly using generator `g`. -/ randomR {g : Type} (lo hi : α) (h : lo ≤ hi) [RandomGen g] : RandGT g m {a // lo ≤ a ∧ a ≤ hi} /-- Given a random generator for `α`, we can convert it to a random generator for `ULift α`. -/ @[inline] protected def RandT.up {α : Type u} {m : Type u → Type w} {m' : Type (max u v) → Type w'} {g : Type} [RandomGen g] [Monad m] [Monad m'] (m_up : ∀ {α}, m α → m' (ULift α)) (x : RandGT g m α) : RandGT g m' (ULift.{v} α) := do let ⟨val, gen⟩ ← m_up <| x.run ⟨(← get).down⟩ set <| ULift.up gen.down return ⟨val⟩ /-- Given a random generator for `ULift α`, we can convert it to a random generator for `α`. -/ @[inline] protected def RandT.down {α : Type u} {m : Type (max u v) → Type w} {m' : Type u → Type w'} {g : Type} [RandomGen g] [Monad m] [Monad m'] (m_down : ∀ {α}, m (ULift α) → m' α) (x : RandGT g m (ULift.{v} α) ) : RandGT g m' α := do let gen := (← get).down let ⟨val, gen⟩ ← m_down do let ⟨⟨val⟩, ⟨gen⟩⟩ ← x.run ⟨gen⟩ pure <| .up (val, gen) set <| ULift.up gen return val namespace Rand /-- Generate one more `Nat` -/ def next [RandomGen g] [Monad m] : RandGT g m Nat := do let rng := (← get).down let (res, new) := RandomGen.next rng set (ULift.up new) return res /-- Create a new random number generator distinct from the one stored in the state -/ def split {g : Type} [RandomGen g] [Monad m] : RandGT g m g := do let rng := (← get).down let (r1, r2) := RandomGen.split rng set (ULift.up r1) return r2 /-- Get the range of Nat that can be generated by the generator `g` -/ def range {g : Type} [RandomGen g] [Monad m] : RandGT g m (Nat × Nat) := do let rng := (← get).down return RandomGen.range rng /-- Given a random generator for `α`, we can convert it to a random generator for `ULift α`. -/ @[inline] protected def up {α : Type u} {g : Type} [RandomGen g] (x : RandG g α) : RandG g (ULift.{v} α) := do RandT.up (fun x => pure ⟨Id.run x⟩) x /-- Given a random generator for `ULift α`, we can convert it to a random generator for `α`. -/ @[inline] protected def down {α : Type u} {g : Type} [RandomGen g] (x : RandG g (ULift.{v} α)) : RandG g α := RandT.down (fun x => pure (Id.run x).down) x end Rand namespace Random open Rand variable [Monad m] /-- Generate a random value of type `α`. -/ def rand (α : Type u) [Random m α] [RandomGen g] : RandGT g m α := Random.random /-- Generate a random value of type `α` between `x` and `y` inclusive. -/ def randBound (α : Type u) [LE α] [BoundedRandom m α] (lo hi : α) (h : lo ≤ hi) [RandomGen g] : RandGT g m {a // lo ≤ a ∧ a ≤ hi} := BoundedRandom.randomR lo hi h /-- Generate a random `Fin`. -/ def randFin {n : Nat} [RandomGen g] : RandGT g m (Fin n.succ) := fun ⟨g⟩ => return randNat g 0 n |>.map (Fin.ofNat _) ULift.up instance {n : Nat} : Random m (Fin n.succ) where random := randFin /-- Generate a random `Bool`. -/ def randBool [RandomGen g] : RandGT g m Bool := return (← rand (Fin 2)) == 1 instance : Random m Bool where random := randBool instance : BoundedRandom m Nat where randomR lo hi h _ := do let z ← rand (Fin (hi - lo).succ) return ⟨lo + z.val, by omega, by omega⟩ instance : BoundedRandom m Int where randomR lo hi h _ := do let ⟨z, _, h2⟩ ← randBound Nat 0 (Int.natAbs <| hi - lo) (by omega) return ⟨z + lo, by omega, by omega⟩ instance {n : Nat} : BoundedRandom m (Fin n) where randomR lo hi h _ := do let ⟨r, h1, h2⟩ ← randBound Nat lo.val hi.val h return ⟨⟨r, by omega⟩, h1, h2⟩ instance {n : Nat} : BoundedRandom m (BitVec n) where randomR lo hi h _ := do let ⟨r, h1, h2⟩ ← randBound Nat lo.toNat hi.toNat h return ⟨⟨r, by omega⟩, h1, h2⟩ end Random open IO variable {m : Type _ → Type _} variable [Monad m] [MonadLiftT (ST RealWorld) m] /-- Computes a `RandT m α` using the global `stdGenRef` as RNG. Note that: - `stdGenRef` is not necessarily properly seeded on program startup as of now and will therefore be deterministic. - `stdGenRef` is not thread local, hence two threads accessing it at the same time will get the exact same generator. -/ def runRand (cmd : RandT m α) : m α := do let stdGen ← stdGenRef.get let (res, new) ← StateT.run cmd (ULift.up stdGen) let _ ← stdGenRef.set new.down return res /-- Run the random computaton `cmd` with `seed` for the RNG. -/ def runRandWith (seed : Nat) (cmd : RandT m α) : m α := do return (← cmd.run (ULift.up <| mkStdGen seed)).1 end Plausible
.lake/packages/plausible/Test/Testable.lean
import Plausible.Testable open Plausible structure MyType where x : Nat y : Nat h : x ≤ y deriving Repr instance : Shrinkable MyType where shrink := fun ⟨x, y, _⟩ => let proxy := Shrinkable.shrink (x, y - x) proxy.map (fun (fst, snd) => ⟨fst, fst + snd, by omega⟩) instance : Arbitrary MyType := ⟨do let x ← SampleableExt.interpSample Nat let xyDiff ← SampleableExt.interpSample Nat return ⟨x, x + xyDiff, by omega⟩⟩ -- TODO: this is a noisy test. -- We can't use `#guard_msgs` because the number of attempts to non-deterministic. #eval Testable.check <| ∀ a b : MyType, a.y ≤ b.x → a.x ≤ b.y
.lake/packages/plausible/Test/Tactic.lean
import Plausible /-! Demonstrate that Plausible can handle the basic types from core: - Sum - Sigma - Unit - Prod - Bool - Nat - Fin - UIntX - BitVec - Char - Option - List - String - Array -/ /-- error: Found a counter-example! -/ #guard_msgs in example (a b : Sum Nat Nat) : a = b := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a b : Σ n : Nat, Nat) : a.fst = b.snd := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a b : Unit) : a ≠ b := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (x y : Nat × Unit) : x = y := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a b : Bool) : a = b := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a b c : Nat) : a + (b - c) = (a + b) - c := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : Fin (n + 1)) : a + 1 > a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : BitVec n) : a + 1 > a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : UInt8) : a - 1 < a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : UInt16) : a - 1 < a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : UInt32) : a - 1 < a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : UInt64) : a - 1 < a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : USize) : a - 1 < a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : Char) : a ≠ a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (a : Option Char) : a ≠ a := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (xs ys : List Nat) : xs.length = ys.length → xs = ys := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (xs ys : String) : xs.length = ys.length → xs = ys := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (xs ys : Array Nat) : xs.size = ys.size → xs = ys := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in example (xs : List Int) (f : Int → Int) : xs.map f = xs := by plausible (config := {quiet := true}) /-- info: Unable to find a counter-example --- warning: declaration uses 'sorry' -/ #guard_msgs in example (a : Sum Nat Nat) : a = a := by plausible /-- warning: Gave up after failing to generate values that fulfill the preconditions 100 times. --- warning: declaration uses 'sorry' -/ #guard_msgs in example (a b : Sum Nat Nat) : a ≠ a → a = b := by plausible (config := {numInst := 100}) -- https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/slim_check.20giving.20wrong.20counterexamples.3F/near/420008365 open Nat in /-- info: Unable to find a counter-example --- warning: declaration uses 'sorry' -/ #guard_msgs in theorem testBit_pred : testBit (pred x) i = (decide (0 < x) && (Bool.xor ((List.range i).all fun j => ! testBit x j) (testBit x i))) := by plausible /-- error: Found a counter-example! -/ #guard_msgs in theorem ulift_nat (f : ULift.{1} Nat) : f = ⟨0⟩ := by plausible (config := {quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in theorem type_u (α : Type u) (l : List α) : l = l ++ l := by plausible (config := {quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/DeriveSTLCTermTypeGenerators.lean
import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Attr import Plausible.Testable open Plausible Gen set_option guard_msgs.diff true /-- Base types in the Simply-Typed Lambda Calculus (STLC) (either Nat or functions) -/ inductive type where | Nat : type | Fun : type → type → type deriving BEq, DecidableEq, Repr /-- Terms in the STLC extended with naturals and addition -/ inductive term where | Const: Nat → term | Add: term → term → term | Var: Nat → term | App: term → term → term | Abs: type → term → term deriving BEq, Repr -- Invoke deriving instance handler for the `Arbitrary` typeclass on `type` and `term` set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryType.arbitrary : Nat → Plausible.Gen (@type✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@type✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (pure type.Nat) [(pure type.Nat)] | fuel'✝ + 1 => Plausible.Gen.frequency (pure type.Nat) [(1, (pure type.Nat)), (fuel'✝ + 1, (do let a✝ ← aux_arb fuel'✝ let a✝¹ ← aux_arb fuel'✝ return type.Fun a✝ a✝¹))]) fun fuel✝ => aux_arb fuel✝ end, instance : Plausible.ArbitraryFueled✝ (@type✝) := ⟨instArbitraryType.arbitrary⟩] --- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryTerm.arbitrary : Nat → Plausible.Gen (@term✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@term✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (do let a✝ ← Plausible.Arbitrary.arbitrary return term.Const a✝) [(do let a✝ ← Plausible.Arbitrary.arbitrary return term.Const a✝), (do let a✝¹ ← Plausible.Arbitrary.arbitrary return term.Var a✝¹)] | fuel'✝ + 1 => Plausible.Gen.frequency (do let a✝ ← Plausible.Arbitrary.arbitrary return term.Const a✝) [(1, (do let a✝ ← Plausible.Arbitrary.arbitrary return term.Const a✝)), (1, (do let a✝¹ ← Plausible.Arbitrary.arbitrary return term.Var a✝¹)), (fuel'✝ + 1, (do let a✝² ← aux_arb fuel'✝ let a✝³ ← aux_arb fuel'✝ return term.Add a✝² a✝³)), (fuel'✝ + 1, (do let a✝⁴ ← aux_arb fuel'✝ let a✝⁵ ← aux_arb fuel'✝ return term.App a✝⁴ a✝⁵)), (fuel'✝ + 1, (do let a✝⁶ ← Plausible.Arbitrary.arbitrary let a✝⁷ ← aux_arb fuel'✝ return term.Abs a✝⁶ a✝⁷))]) fun fuel✝ => aux_arb fuel✝ end, instance : Plausible.ArbitraryFueled✝ (@term✝) := ⟨instArbitraryTerm.arbitrary⟩] -/ #guard_msgs in deriving instance Arbitrary for type, term -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` -- for both `type` & `term` /-- info: instArbitraryFueledType -/ #guard_msgs in #synth ArbitraryFueled type /-- info: instArbitraryFueledTerm -/ #guard_msgs in #synth ArbitraryFueled term /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary type /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary term /-! Test that we can use the derived generator to find counterexamples. We construct two faulty properties: 1. `∀ (term : term), isValue term = true` 2. `∀ (ty : type), isFunctionType ty = true` Both of these properties are false, since there exist terms in the STLC which are not values (e.g. function applications), and there are types which are not function types (e.g. `Nat`). We then test that the respective derived generators for `term`s and `type`s generate counterexamples which refute the aforementioned properties. -/ /-- Determines whether a `term` is a value. (Note that only constant `Nat`s and lambda abstractions are considered values in the STLC.) -/ def isValue (tm : term) : Bool := match tm with | .Const _ | .Abs _ _ => true | _ => false /-- Determines whether a `type` is a function type -/ def isFunctionType (ty : type) : Bool := match ty with | .Nat => false | .Fun _ _ => true /-- `Shrinkable` instance for `type` -/ instance : Shrinkable type where shrink (ty : type) := match ty with | .Nat => [] | .Fun t1 t2 => [.Nat, t1, t2] /-- `Shrinkable` instance for `term` -/ instance : Shrinkable term where shrink := shrinkTerm where shrinkTerm (tm : term) : List term := match tm with | .Const _ | .Var _ => [] | .App e1 e2 | .Add e1 e2 => shrinkTerm e1 ++ shrinkTerm e2 | .Abs _ e => shrinkTerm e /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ (term : term), isValue term) (cfg := {numInst := 10, maxSize := 5, quiet := true}) /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ (ty : type), isFunctionType ty) (cfg := {numInst := 10, maxSize := 5, quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/StructureTest.lean
import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Attr import Plausible.Testable open Plausible Gen set_option guard_msgs.diff true /-! To test whether the derived generator can handle `structure`s with named fields, we define a dummy `structure`: ```lean structure Foo where stringField : String boolField : Bool natField : Nat ``` To test whether the derived generator finds counterexamples, we create a faulty property: ```lean ∀ foo : Foo, foo.stringField.isEmpty || !foo.boolField || foo.natField == 0) ``` The derived generator should be able to generate inhabitants of `Foo` where `stringField` is non-empty, where `boolField` is false and `natField` is non-zero. -/ /-- Dummy `structure` with named fields -/ structure Foo where stringField : String boolField : Bool natField : Nat deriving Repr -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryFoo.arbitrary : Nat → Plausible.Gen (@Foo✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@Foo✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary return Foo.mk a✝ a✝¹ a✝²) [(do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary return Foo.mk a✝ a✝¹ a✝²)] | fuel'✝ + 1 => Plausible.Gen.frequency (do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary return Foo.mk a✝ a✝¹ a✝²) [(1, (do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary return Foo.mk a✝ a✝¹ a✝²)), ]) fun fuel✝ => aux_arb fuel✝ end, instance : Plausible.ArbitraryFueled✝ (@Foo✝) := ⟨instArbitraryFoo.arbitrary⟩] -/ #guard_msgs in deriving instance Arbitrary for Foo /-- info: instArbitraryFueledFoo -/ #guard_msgs in #synth ArbitraryFueled Foo /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary Foo /-- `Shrinkable` instance for `Foo`, which shrinks each of its constituent fields -/ instance : Shrinkable Foo where shrink (foo : Foo) := let strings := Shrinkable.shrink foo.stringField let bools := Shrinkable.shrink foo.boolField let nats := Shrinkable.shrink foo.natField let zippedFields := List.zip (List.zip strings bools) nats (fun ((s, b), n) => Foo.mk s b n) <$> zippedFields /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ foo : Foo, foo.stringField.isEmpty || !foo.boolField || foo.natField == 0) (cfg := {numInst := 100, maxSize := 5, quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/MissingNonRecursiveConstructorTest.lean
import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Attr open Plausible /-- A variant of a binary tree datatype where the non-recursive `Leaf` constructor is missing. We are unable to derive a generator for this type, since it is impossible to construct an inhabitant of this type. The test below checks that an appropriate error message is emitted by the deriving handler. -/ inductive TreeNoLeaf where | Node : Nat → TreeNoLeaf → TreeNoLeaf → TreeNoLeaf set_option trace.plausible.deriving.arbitrary true in /-- error: derive Arbitrary failed, TreeNoLeaf has no non-recursive constructors -/ #guard_msgs in deriving instance Arbitrary for TreeNoLeaf
.lake/packages/plausible/Test/DeriveArbitrary/MutuallyRecursiveTypeTest.lean
import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Attr import Plausible.Testable open Plausible /- # Testing the deriving `Arbitrary` handler on mutually recursive inductive types To test that our derived generators can handle mutually recursive types, we define two mutually recursive types (one `inductive` and one `structure`) to represent a binary tree. (Example adapted from Cornell CS 3110 lecture notes https://www.cs.cornell.edu/courses/cs3110/2008fa/lectures/lec04.html) ```lean mutual inductive NatTree | Empty : NatTree | Node : Node → NatTree deriving Nonempty structure Node where value : Nat left : NatTree right : NatTree deriving Nonempty end ``` Note that the user needs to add the `deriving Nonempty` annotation to each type in the mutually recursive definition -- this is needed in order to convince Lean that the type `Nat → Plausible.Gen NatTree` is empty during the derivation process. -/ mutual /-- A (possibly empty) binary tree -/ inductive NatTree | Empty : NatTree | Node : Node → NatTree deriving Nonempty, Repr /-- A child node in a tree, containing a value and two subtrees -/ structure Node where value : Nat left : NatTree right : NatTree deriving Nonempty end set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual partial def instArbitraryNatTree.arbitrary_1 : Nat → Plausible.Gen (@NatTree✝) := let localinst✝ : Plausible.ArbitraryFueled✝ (@NatTree✝) := ⟨instArbitraryNatTree.arbitrary_1⟩; let localinst✝¹ : Plausible.ArbitraryFueled✝ (@Node✝) := ⟨instArbitraryNatTree.arbitrary_2⟩; let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@NatTree✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (pure NatTree.Empty) [(pure NatTree.Empty), (do let a✝ ← Plausible.Arbitrary.arbitrary return NatTree.Node a✝)] | fuel'✝ + 1 => Plausible.Gen.frequency (pure NatTree.Empty) [(1, (pure NatTree.Empty)), (1, (do let a✝ ← Plausible.Arbitrary.arbitrary return NatTree.Node a✝)), ]) fun fuel✝ => aux_arb fuel✝ partial def instArbitraryNatTree.arbitrary_2 : Nat → Plausible.Gen (@Node✝) := let localinst✝² : Plausible.ArbitraryFueled✝ (@NatTree✝) := ⟨instArbitraryNatTree.arbitrary_1⟩; let localinst✝³ : Plausible.ArbitraryFueled✝ (@Node✝) := ⟨instArbitraryNatTree.arbitrary_2⟩; let rec aux_arb (fuel✝¹ : Nat) : Plausible.Gen (@Node✝) := (match fuel✝¹ with | Nat.zero => Plausible.Gen.oneOfWithDefault (do let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary let a✝³ ← Plausible.Arbitrary.arbitrary return Node.mk a✝¹ a✝² a✝³) [(do let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary let a✝³ ← Plausible.Arbitrary.arbitrary return Node.mk a✝¹ a✝² a✝³)] | fuel'✝¹ + 1 => Plausible.Gen.frequency (do let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary let a✝³ ← Plausible.Arbitrary.arbitrary return Node.mk a✝¹ a✝² a✝³) [(1, (do let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary let a✝³ ← Plausible.Arbitrary.arbitrary return Node.mk a✝¹ a✝² a✝³)), ]) fun fuel✝¹ => aux_arb fuel✝¹ end, instance : Plausible.ArbitraryFueled✝ (@NatTree✝) := ⟨instArbitraryNatTree.arbitrary_1⟩] -/ #guard_msgs in deriving instance Arbitrary for NatTree -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` /-- info: instArbitraryFueledNatTree -/ #guard_msgs in #synth ArbitraryFueled NatTree /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary NatTree /-- `search tree x` recursively searches for a value `x` in `tree`, returning a `Bool` indicating `x`'s membership in `tree` (Note that `tree` may not obey the binary search tree invariant, so this algorithm is not the most efficient.) -/ def search (tree : NatTree) (x : Nat) : Bool := match tree with | .Empty => false | .Node { value, left, right } => value == x || search left x || search right x /-- A shrinker for `NatTree`, adapted from Penn CIS 5520 lecture notes https://www.seas.upenn.edu/~cis5520/current/lectures/stub/05-quickcheck/QuickCheck.html -/ def shrinkNatTree (tree : NatTree) : List NatTree := match tree with | .Empty => [] | .Node {value := x, left := l, right := r} => [.Empty, l, r] -- left and right trees are smaller ++ (fun l' => NatTree.Node $ Node.mk x l' r) <$> shrinkNatTree l -- shrink left subtree ++ (fun r' => NatTree.Node $ Node.mk x l r') <$> shrinkNatTree r -- shrink right tree ++ (fun x' => NatTree.Node $ Node.mk x' l r) <$> Shrinkable.shrink x -- shrink the value /-- `Shrinkable` instance for `NatTree` -/ instance : Shrinkable NatTree where shrink := shrinkNatTree /-! To test whether the derived generator can generate counterexamples, we create an erroneous property `∀ tree : NatTree, search tree 5`, and ask Plausible to falsify it. (This property is false, since there exist trees which don't contain the value 5, e.g. the `Empty` tree.) -/ /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ tree : NatTree, search tree 5) (cfg := {numInst := 10, maxSize := 2, quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/DeriveRegExpGenerator.lean
import Plausible.Attr import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Testable open Plausible Gen set_option guard_msgs.diff true /-- An inductive datatype representing regular expressions (where "characters" are `Nat`s). Adapted from the Inductive Propositions chapter of Software Foundations, volume 1: See https://softwarefoundations.cis.upenn.edu/lf-current/IndProp.html and search for "Case Study: Regular Expressions". The `RegExp`s below are non-polymorphic in the character type. -/ inductive RegExp : Type where | EmptySet : RegExp | EmptyStr : RegExp | Char : Nat → RegExp -- using Nat instead of Char | App : RegExp → RegExp → RegExp | Union : RegExp → RegExp → RegExp | Star : RegExp → RegExp deriving Repr, BEq set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryRegExp.arbitrary : Nat → Plausible.Gen (@RegExp✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@RegExp✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (pure RegExp.EmptySet) [(pure RegExp.EmptySet), (pure RegExp.EmptyStr), (do let a✝ ← Plausible.Arbitrary.arbitrary return RegExp.Char a✝)] | fuel'✝ + 1 => Plausible.Gen.frequency (pure RegExp.EmptySet) [(1, (pure RegExp.EmptySet)), (1, (pure RegExp.EmptyStr)), (1, (do let a✝ ← Plausible.Arbitrary.arbitrary return RegExp.Char a✝)), (fuel'✝ + 1, (do let a✝¹ ← aux_arb fuel'✝ let a✝² ← aux_arb fuel'✝ return RegExp.App a✝¹ a✝²)), (fuel'✝ + 1, (do let a✝³ ← aux_arb fuel'✝ let a✝⁴ ← aux_arb fuel'✝ return RegExp.Union a✝³ a✝⁴)), (fuel'✝ + 1, (do let a✝⁵ ← aux_arb fuel'✝ return RegExp.Star a✝⁵))]) fun fuel✝ => aux_arb fuel✝ end, instance : Plausible.ArbitraryFueled✝ (@RegExp✝) := ⟨instArbitraryRegExp.arbitrary⟩] -/ #guard_msgs in deriving instance Arbitrary for RegExp -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` /-- info: instArbitraryFueledRegExp -/ #guard_msgs in #synth ArbitraryFueled RegExp /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary RegExp /-! Test that we can use the derived generator to find counterexamples. We construct a faulty property, which (erroneously) states that all regular expressions never accept any string. (Example taken from UPenn CIS 5520 https://www.seas.upenn.edu/~cis5520/current/hw/hw04/RegExp.html) ```lean ∀ r : RegExp, neverMatchesAnyString r == True ``` (This property is faulty, since there exist regular expressions, e.g. `EmptyString` which do match some string!) We then test that the derived generator for `Tree`s succesfully generates a counterexample (e.g. `EmptyString`) which refutes the property. -/ /-- Determines whether a regular expression *never* matches any string -/ def neverMatchesAnyString (r : RegExp) : Bool := match r with | .EmptySet => true | .EmptyStr | .Char _ | .Star _ => false -- Note that `Star` can always match the empty string | .App r1 r2 => neverMatchesAnyString r1 || neverMatchesAnyString r2 | .Union r1 r2 => neverMatchesAnyString r1 && neverMatchesAnyString r2 /-- A shrinker for regular expressions -/ def shrinkRegExp (r : RegExp) : List RegExp := match r with | .EmptySet | .EmptyStr => [] | .Char _ => [.EmptyStr] | .Star r' => .Star <$> shrinkRegExp r' | .App r1 r2 | .Union r1 r2 => shrinkRegExp r1 ++ shrinkRegExp r2 /-- `Shrinkable` instance for `RegExp` -/ instance : Shrinkable RegExp where shrink := shrinkRegExp /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ r : RegExp, neverMatchesAnyString r == True) (cfg := {numInst := 10, maxSize := 5, quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/BitVecStructureTest.lean
import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Attr import Plausible.Testable open Plausible Gen set_option guard_msgs.diff true /-- Dummy `inductive` where a constructor has a dependently-typed argument (`BitVec n`) whose index does not appear in the overall type (`DummyInductive`) -/ inductive DummyInductive where | FromBitVec : ∀ (n : Nat), BitVec n → String → DummyInductive deriving Repr set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryDummyInductive.arbitrary : Nat → Plausible.Gen (@DummyInductive✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@DummyInductive✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary return DummyInductive.FromBitVec a✝ a✝¹ a✝²) [(do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary return DummyInductive.FromBitVec a✝ a✝¹ a✝²)] | fuel'✝ + 1 => Plausible.Gen.frequency (do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary return DummyInductive.FromBitVec a✝ a✝¹ a✝²) [(1, (do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← Plausible.Arbitrary.arbitrary let a✝² ← Plausible.Arbitrary.arbitrary return DummyInductive.FromBitVec a✝ a✝¹ a✝²)), ]) fun fuel✝ => aux_arb fuel✝ end, instance : Plausible.ArbitraryFueled✝ (@DummyInductive✝) := ⟨instArbitraryDummyInductive.arbitrary⟩] -/ #guard_msgs in deriving instance Arbitrary for DummyInductive -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` /-- info: instArbitraryFueledDummyInductive -/ #guard_msgs in #synth ArbitraryFueled DummyInductive /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary DummyInductive /-- Shrinker for `DummyInductive` -/ def shrinkDummyInductive : DummyInductive → List DummyInductive | .FromBitVec n bitVec str => let shrunkenBitVecs := Shrinkable.shrink bitVec let shrunkenStrs := Shrinkable.shrink str (fun (bv, s) => .FromBitVec n bv s) <$> List.zip shrunkenBitVecs shrunkenStrs /-- `Shrinkable` instance for `DummyInductive` -/ instance : Shrinkable DummyInductive where shrink := shrinkDummyInductive /-- To test whether the derived generator can generate counterexamples, we state an (erroneous) property that states that all `BitVec` arguments to `DummyInductive.FromBitVec` represent the `Nat` 2, and see if the derived generator can refute this property. -/ def BitVecEqualsTwo : DummyInductive → Bool | .FromBitVec _ bitVec _ => bitVec.toNat == 2 /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ ind : DummyInductive, BitVecEqualsTwo ind) (cfg := {numInst := 10, maxSize := 5, quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/ParameterizedTypeTest.lean
import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Attr import Plausible.Testable open Plausible /-- A dummy `inductive` type isomorphic to the polymorphic `List` type, used as an example of a parameterized inductive type -/ inductive MyList (α : Type) where | MyNil : MyList α | MyCons : α → MyList α → MyList α deriving Repr, BEq set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryMyList.arbitrary {α✝} [Plausible.Arbitrary✝ α✝] : Nat → Plausible.Gen (@MyList✝ α✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@MyList✝ α✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (pure MyList.MyNil) [(pure MyList.MyNil)] | fuel'✝ + 1 => Plausible.Gen.frequency (pure MyList.MyNil) [(1, (pure MyList.MyNil)), (fuel'✝ + 1, (do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← aux_arb fuel'✝ return MyList.MyCons a✝ a✝¹))]) fun fuel✝ => aux_arb fuel✝ end, instance {α✝} [Plausible.Arbitrary✝ α✝] : Plausible.ArbitraryFueled✝ (@MyList✝ α✝) := ⟨instArbitraryMyList.arbitrary⟩] -/ #guard_msgs in deriving instance Arbitrary for MyList -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` -- when `α` is specialized to `Nat` /-- info: instArbitraryFueledMyListOfArbitrary -/ #guard_msgs in #synth ArbitraryFueled (MyList Nat) /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary (MyList Nat) -- Infrastructure for testing the derived generator /-- Converts a `MyList` to an ordinary `List` -/ def listOfMyList (l : MyList α) : List α := match l with | .MyNil => [] | .MyCons x xs => x :: listOfMyList xs /-- Converts an ordinary `List` to a `MyList` -/ def myListOfList (l : List α) : MyList α := match l with | [] => .MyNil | x :: xs => .MyCons x (myListOfList xs) /-- Trivial shrinker for `MyList α`. (Under the hood, this converts the `MyList` to an ordinary `List`, uses the default `Shrinkable` instance for `List α`, then converts it back to `MyList α`.) -/ def shrinkMyList [Shrinkable α] (myList : MyList α) : List (MyList α) := let l := listOfMyList myList myListOfList <$> Shrinkable.shrink l /-- `Shrinkable` instance for `MyList α` -/ instance [Shrinkable α] : Shrinkable (MyList α) where shrink := shrinkMyList /-! To test whether the derived generator can generate counterexamples, we create an erroneous property `∀ l : MyList Nat, reverse (reverse l) != l`, and ask Plausible to falsify it. (This property is false, since `reverse` is an involution on `List α`, and `MyList α` is isomorphic to `List α`.) -/ /-- Returns the elements of a `MyList α` in reverse order. Implementation adapted from the Haskell `List.reverse` function. https://hackage.haskell.org/package/base-4.17.1.0/docs/Prelude.html#v:reverse -/ def reverse (l : MyList α) : MyList α := rev l .MyNil where rev (l : MyList α) (acc : MyList α) := match l with | .MyNil => acc | .MyCons x xs => rev xs (.MyCons x acc) /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ l : MyList Nat, reverse (reverse l) != l) (cfg := {numInst := 10, maxSize := 5, quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/DeriveNKIValueGenerator.lean
import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Attr import Plausible.Testable open Plausible Gen set_option guard_msgs.diff true /-- A datatype representing values in the NKI language, adapted from https://github.com/leanprover/KLR/blob/main/KLR/NKI/Basic.lean -/ inductive Value where | none | bool (value : Bool) | int (value : Int) | string (value : String) | ellipsis | tensor (shape : List Nat) (dtype : String) deriving Repr set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryValue.arbitrary : Nat → Plausible.Gen (@Value✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@Value✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (pure Value.none) [(pure Value.none), (do let a✝ ← Plausible.Arbitrary.arbitrary return Value.bool a✝), (do let a✝¹ ← Plausible.Arbitrary.arbitrary return Value.int a✝¹), (do let a✝² ← Plausible.Arbitrary.arbitrary return Value.string a✝²), (pure Value.ellipsis), (do let a✝³ ← Plausible.Arbitrary.arbitrary let a✝⁴ ← Plausible.Arbitrary.arbitrary return Value.tensor a✝³ a✝⁴)] | fuel'✝ + 1 => Plausible.Gen.frequency (pure Value.none) [(1, (pure Value.none)), (1, (do let a✝ ← Plausible.Arbitrary.arbitrary return Value.bool a✝)), (1, (do let a✝¹ ← Plausible.Arbitrary.arbitrary return Value.int a✝¹)), (1, (do let a✝² ← Plausible.Arbitrary.arbitrary return Value.string a✝²)), (1, (pure Value.ellipsis)), (1, (do let a✝³ ← Plausible.Arbitrary.arbitrary let a✝⁴ ← Plausible.Arbitrary.arbitrary return Value.tensor a✝³ a✝⁴)), ]) fun fuel✝ => aux_arb fuel✝ end, instance : Plausible.ArbitraryFueled✝ (@Value✝) := ⟨instArbitraryValue.arbitrary⟩] -/ #guard_msgs in deriving instance Arbitrary for Value -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` /-- info: instArbitraryFueledValue -/ #guard_msgs in #synth ArbitraryFueled Value /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary Value /-- `Shrinkable` instance for `Value`s which recursively shrinks each argument to a constructor -/ instance : Shrinkable Value where shrink (v : Value) := match v with | .none | .ellipsis => [] | .bool b => .bool <$> Shrinkable.shrink b | .int n => .int <$> Shrinkable.shrink n | .string s => .string <$> Shrinkable.shrink s | .tensor shape dtype => let shrunkenShapes := Shrinkable.shrink shape let shrunkenDtypes := Shrinkable.shrink dtype (Function.uncurry .tensor) <$> List.zip shrunkenShapes shrunkenDtypes -- To test whether the derived generator can generate counterexamples, -- we state an (erroneous) property that states that all `Value`s are `Bool`s -- and see if the generator can refute this property. /-- Determines whether a `Value` is a `Bool` -/ def isBool (v : Value) : Bool := match v with | .bool _ => true | _ => false /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ v : Value, isBool v) (cfg := {numInst := 10, maxSize := 5, quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/DeriveTreeGenerator.lean
import Plausible.Attr import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Testable open Plausible Gen set_option guard_msgs.diff true /-- A binary tree is either a single `Leaf`, or a `Node` containing a `Nat` with left & right sub-trees -/ inductive Tree where | Leaf : Tree | Node : Nat → Tree → Tree → Tree deriving BEq, Repr -- Invoke deriving instance handler for the `Arbitrary` typeclass on `type` and `term` set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryTree.arbitrary : Nat → Plausible.Gen (@Tree✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@Tree✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (pure Tree.Leaf) [(pure Tree.Leaf)] | fuel'✝ + 1 => Plausible.Gen.frequency (pure Tree.Leaf) [(1, (pure Tree.Leaf)), (fuel'✝ + 1, (do let a✝ ← Plausible.Arbitrary.arbitrary let a✝¹ ← aux_arb fuel'✝ let a✝² ← aux_arb fuel'✝ return Tree.Node a✝ a✝¹ a✝²))]) fun fuel✝ => aux_arb fuel✝ end, instance : Plausible.ArbitraryFueled✝ (@Tree✝) := ⟨instArbitraryTree.arbitrary⟩] -/ #guard_msgs in deriving instance Arbitrary for Tree -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` /-- info: instArbitraryFueledTree -/ #guard_msgs in #synth ArbitraryFueled Tree /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary Tree /-! Test that we can use the derived generator to find counterexamples. We construct a faulty property, which (erroneously) states that mirroring a tree does not yield the original tree. (Example taken from "Generating Good Generators for Inductive Relations", POPL '18) ```lean ∀ t : Tree, mirror (mirror t) != t ``` where `mirror` is defined as follows: ```lean def mirror (t : Tree) : Tree := match t with | .Leaf => .Leaf | .Node x l r => .Node x r l ``` (This property is faulty, since `mirror` is an involution.) We then test that the derived generator for `Tree`s succesfully generates a counterexample (e.g. `Leaf`) which refutes the property. -/ /-- Mirrors a tree, i.e. interchanges the left & right children of all `Node`s -/ def mirror (t : Tree) : Tree := match t with | .Leaf => .Leaf | .Node x l r => .Node x r l /-- A shrinker for `Tree`, adapted from Penn CIS 5520 lecture notes https://www.seas.upenn.edu/~cis5520/current/lectures/stub/05-quickcheck/QuickCheck.html -/ def shrinkTree (t : Tree) : List Tree := match t with | .Leaf => [] -- empty trees can't be shrunk | .Node x l r => [.Leaf, l, r] -- left and right trees are smaller ++ (fun l' => .Node x l' r) <$> shrinkTree l -- shrink left subtree ++ (fun r' => .Node x l r') <$> shrinkTree r -- shrink right tree ++ (fun x' => .Node x' l r) <$> Shrinkable.shrink x -- shrink the value /-- `Shrinkable` instance for `Tree` -/ instance : Shrinkable Tree where shrink := shrinkTree -- Mirroring a tree twice should yield the original tree -- Test that we can succesfully generate a counterexample to the erroneous property /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ t : Tree, mirror (mirror t) != t) (cfg := {numInst := 10, maxSize := 5, quiet := true})
.lake/packages/plausible/Test/DeriveArbitrary/DeriveNKIBinopGenerator.lean
import Plausible.Attr import Plausible.Arbitrary import Plausible.DeriveArbitrary import Plausible.Testable open Plausible Gen set_option guard_msgs.diff true /-- Binary operators for the NKI language, adapted from https://github.com/leanprover/KLR/blob/main/KLR/NKI/Basic.lean -/ inductive BinOp where -- logical | land | lor -- comparison | eq | ne | lt | le | gt | ge -- arithmetic | add | sub | mul | div | mod | pow | floor -- bitwise | lshift | rshift | or | xor | and deriving Repr set_option trace.plausible.deriving.arbitrary true in /-- trace: [plausible.deriving.arbitrary] ⏎ [mutual def instArbitraryBinOp.arbitrary : Nat → Plausible.Gen (@BinOp✝) := let rec aux_arb (fuel✝ : Nat) : Plausible.Gen (@BinOp✝) := (match fuel✝ with | Nat.zero => Plausible.Gen.oneOfWithDefault (pure BinOp.land) [(pure BinOp.land), (pure BinOp.lor), (pure BinOp.eq), (pure BinOp.ne), (pure BinOp.lt), (pure BinOp.le), (pure BinOp.gt), (pure BinOp.ge), (pure BinOp.add), (pure BinOp.sub), (pure BinOp.mul), (pure BinOp.div), (pure BinOp.mod), (pure BinOp.pow), (pure BinOp.floor), (pure BinOp.lshift), (pure BinOp.rshift), (pure BinOp.or), (pure BinOp.xor), (pure BinOp.and)] | fuel'✝ + 1 => Plausible.Gen.frequency (pure BinOp.land) [(1, (pure BinOp.land)), (1, (pure BinOp.lor)), (1, (pure BinOp.eq)), (1, (pure BinOp.ne)), (1, (pure BinOp.lt)), (1, (pure BinOp.le)), (1, (pure BinOp.gt)), (1, (pure BinOp.ge)), (1, (pure BinOp.add)), (1, (pure BinOp.sub)), (1, (pure BinOp.mul)), (1, (pure BinOp.div)), (1, (pure BinOp.mod)), (1, (pure BinOp.pow)), (1, (pure BinOp.floor)), (1, (pure BinOp.lshift)), (1, (pure BinOp.rshift)), (1, (pure BinOp.or)), (1, (pure BinOp.xor)), (1, (pure BinOp.and)), ]) fun fuel✝ => aux_arb fuel✝ end, instance : Plausible.ArbitraryFueled✝ (@BinOp✝) := ⟨instArbitraryBinOp.arbitrary⟩] -/ #guard_msgs in deriving instance Arbitrary for BinOp -- Test that we can successfully synthesize instances of `Arbitrary` & `ArbitraryFueled` /-- info: instArbitraryFueledBinOp -/ #guard_msgs in #synth ArbitraryFueled BinOp /-- info: instArbitraryOfArbitraryFueled -/ #guard_msgs in #synth Arbitrary BinOp /-- Trivial `Shrinkable` instance for `BinOp`s -/ instance : Shrinkable BinOp where shrink := fun _ => [] -- To test whether the derived generator can generate counterexamples, -- we state an (erroneous) property that states that all binary operators -- are logical operators, and see if the generator can refute this property. /-- Determines whether a `BinOp` is a logical operation -/ def isLogicalOp (op : BinOp) : Bool := match op with | .land | .lor => true | _ => false /-- error: Found a counter-example! -/ #guard_msgs in #eval Testable.check (∀ op : BinOp, isLogicalOp op) (cfg := {numInst := 10, maxSize := 5, quiet := true})
.lake/packages/proofwidgets/RELEASES.md
# ProofWidgets4 releases This file contains work-in-progress notes for the upcoming release, as well as previous releases. Please check the [releases](https://github.com/leanprover-community/ProofWidgets4/releases) page for the build artifacts. v0.0.49 (WIP) ------------- * Published [@leanprover-community/proofwidgets4](https://www.npmjs.com/package/@leanprover-community/proofwidgets4) on NPM. This allows importing ProofWidgets4 JS components from widgets defined in other Lean packages. NPM support is experimental: see [discussion](https://leanprover.zulipchat.com/#narrow/channel/270676-lean4/topic/ProofWidgets.2EHtml.20to.20JSX.2EElement/near/488669256). v0.0.47 ------- * Generalized `GraphDisplay.Vertex.radius` to `GraphDisplay.Vertex.boundingShape`. * Added demo of metavariable graph display in tactic proofs. v0.0.46 ------- * Renamed `DigraphDisplay` to `GraphDisplay`. Undirected graphs can be rendered by turning off arrowheads. * Added support for edge labels and more customization to `GraphDisplay`. v0.0.45 ------- * Updated Lean to v4.14.0-rc1. v0.0.44 ------- * Added `DigraphDisplay` component. It uses [d3-force](https://d3js.org/d3-force) for layout and can accommodate a variety of display styles. See `Demos/Digraph.lean` for more. * Added `MarkdownDisplay` component to display Markdown (including LaTeX). * Fixed cloud release issue (see [Zulip](https://leanprover.zulipchat.com/#narrow/channel/287929-mathlib4/topic/cache.20and.20proofwidgets)). v0.0.30 ------- * Moved the toolchain to `leanprover/lean4:v4.7.0-rc1`. * Performance improvements for widgets using `mk_rpc_widget%`. Redundant, duplicate calls were previously made to the underlying RPC method; this has been fixed. Furthermore, serverside execution of the RPC method gets cancelled by the infoview as long as its results are no longer needed (for example because the user moved the cursor elsewhere). To opt into this mechanism, use `@[server_rpc_method_cancellable]` instead of `@[server_rpc_method]`. RPC methods using that attribute can check whether they have been cancelled using [IO.checkCanceled](https://leanprover-community.github.io/mathlib4_docs/Init/System/IO.html#IO.checkCanceled), and immediately return with an error or a partial result. v0.0.29 ------- * Moved the toolchain to `leanprover/lean4:v4.6.0`. * Exposed `theme.background` to Penrose style programs. v0.0.26 - v0.0.28 ------- * Toolchain bumps and associated tweaks. v0.0.25 ------- * Build the demos in CI. v0.0.24 ------- * Moved the toolchain to `leanprover/lean4:v4.5.0-rc1`. This brings changes to the user widget API described [here](https://github.com/leanprover/lean4/blob/master/RELEASES.md#v450). * **Removed** `ProofWidgets.savePanelWidgetInfo`. For now you should use `Lean.Widget.savePanelWidgetInfo` instead. An example migration can be found [here](https://github.com/leanprover-community/ProofWidgets4/compare/v0.0.23..v0.0.24#diff-c48bcbf1b4d226947726f7a0fe8c945f082f4195b34681638ca61a776bbf778eL49-R52). * The `with_panel_widgets` tactic now optionally accepts props for each listed widget. * Several components now use `mk_rpc_widget%` instead of JavaScript string literals. * Fixes and improvements in the `PenroseDiagram` component and the `Euclidean` demo.
.lake/packages/proofwidgets/README.md
# ProofWidgets ProofWidgets is a library of user interface components for [Lean 4](https://leanprover.github.io/). It supports: - symbolic visualizations of mathematical objects and data structures - data visualization - interfaces for tactics and tactic modes - alternative and domain-specific goal state displays - user interfaces for entering expressions and editing proofs Authors: Wojciech Nawrocki, E.W.Ayers with contributions from Tomáš Skřivan ### How does ProofWidgets relate to user widgets? ProofWidgets relies on the [user widgets](https://leanprover.github.io/lean4/doc/examples/widgets.lean.html) mechanism built into Lean. User widgets provide the minimum of functionality needed to enable custom user interfaces. ProofWidgets builds on top of this with a higher-level component library, syntax sugar, and user-friendly abstractions. Stable parts of ProofWidgets may eventually be backported into Lean core, but ProofWidgets overall will remain a separate library for the foreseeable future. ## Usage ### Viewing the demos The easiest way to get started is to clone a **release tag** of ProofWidgets and run `lake build :release`, as follows: ```bash # You should replace v0.0.3 with the latest version published under Releases git clone https://github.com/leanprover-community/ProofWidgets4 --branch v0.0.3 cd ProofWidgets4/ lake build :release ``` After doing this you will hopefully be able to view the demos in `ProofWidgets/Demos/`. Top tip: use the pushpin icon (![pin](https://raw.githubusercontent.com/microsoft/vscode-codicons/31b33da05aab662f1973ba5667dad672c8e20fbc/src/icons/pin.svg)) to keep a widget in view. You can then live code your widgets. ### Using ProofWidgets as a dependency Put this in your `lakefile.lean`, making sure to reference a **release tag** rather than the `main` branch: ```lean -- You should replace v0.0.3 with the latest version published under Releases require proofwidgets from git "https://github.com/leanprover-community/ProofWidgets4"@"v0.0.3" ``` [Developing ProofWidgets](#developing-proofwidgets) involves building TypeScript code with NPM. When depending on `ProofWidgets` but not writing any custom TypeScript yourself, you likely want to spare yourself and your users from having to install and run NPM. ProofWidgets is configured to use Lake's [cloud releases](https://github.com/leanprover/lake/#cloud-releases) feature which will automatically fetch pre-built JavaScript files *as long as* you require a release tag rather than the `main` branch. In this mode, you and your users should not need to have NPM installed. However, fetching cloud release may sometimes fail, in which case ProofWidgets may still revert to a full build. You can force ProofWidgets to fail with a custom error in this case by importing it like so: ```lean -- You should replace v0.0.3 with the latest version published under Releases require proofwidgets with NameMap.empty.insert `errorOnBuild "<my message>" from git "https://github.com/leanprover-community/ProofWidgets4"@"v0.0.3" ``` ⚠️ [EXPERIMENTAL] To use ProofWidgets4 JS components in widgets defined in other Lean packages, you can import [@leanprover-community/proofwidgets4](https://www.npmjs.com/package/@leanprover-community/proofwidgets4) from NPM. ## Features ![Red-black tree](doc/infoview-rbtree.png) ### JSX-like syntax ```lean import ProofWidgets.Component.HtmlDisplay open scoped ProofWidgets.Jsx -- click on the line below to see it in your infoview! #html <b>You can use HTML in Lean {.text s!"{1 + 3}"}!</b> ``` See the `Jsx.lean` and `ExprPresentation.lean` demos. ### Support for libraries We have good support for building diagrams with [Penrose](https://penrose.cs.cmu.edu/), and expose some [Recharts](https://recharts.org/en-US/) components for plotting functions and other kinds of data. See the `Venn.lean` and `Plot.lean` demos. For more purpose-specific integrations of libraries see the `Rubiks.lean` and `RbTree.lean` demos. ### Custom `Expr` displays Just like delaborators and unexpanders allow you to customize how expressions are displayed as text, ProofWidgets allows "delaborating" into (potentially interactive) HTML. See the `ExprPresentation.lean` demo. ### Multi-stage interactions Proof widgets can be used to create proving loops involving user interactions and running tactics in the background. See the `LazyComputation.lean` demo, and the `Conv.lean` demo for an example of editing the proof script. ### Animated HTML As a hidden feature, you can also make animated widgets using the `AnimatedHtml` component. This works particularly well with libraries that ease between different plots, for example Recharts. You can see an example of how to do this in the `Plot.lean` demo. ## Developing ProofWidgets **Contributions are welcome!** Check out issues tagged with "good first issue". The package consists of widget user interface modules written in TypeScript (under `widget/`), and Lean modules (under `ProofWidgets/`). To build ProofWidgets from source, you must have NPM (the [Node.js](https://nodejs.org/en) package manager) installed. During a build, we first compile the TypeScript widget code using NPM, and afterwards build all Lean modules. Lean modules may use TypeScript compilation outputs. The Lakefile handles all of this, so executing `lake build` should suffice to build the entire package. In order to build only the TypeScript, run `lake build widgetJsAll`. Widgets can also be built in development mode using `lake build widgetJsAllDev`. This makes them easier to inspect in developer tools. 💡 The NPM part of the build process may sometimes fail with missing packages. If this happens, run `npm clean-install` in the `widget/` directory and then try `lake build` again. We use the `include_str` term elaborator to splice the minified JavaScript produced during the first part of the build (by `tsc` and Rollup) into ProofWidgets Lean modules. The minifed JS is stored in `.lake/build/js/`. Modifying any TypeScript source will trigger a rebuild, and should correctly propagate the new minified code to where it used in Lean. ⚠️ Note however that due to Lake issue [#86](https://github.com/leanprover/lake/issues/86), *all* the widget sources are rebuilt whenever any single one changes, which might take a while. ## Cite We have written [a paper describing the design of ProofWidgets4](https://drops.dagstuhl.de/opus/volltexte/2023/18399/). If this work helps you in your own research, you can cite it as follows: ```bibtex @InProceedings{nawrocki_et_al:LIPIcs.ITP.2023.24, author = {Nawrocki, Wojciech and Ayers, Edward W. and Ebner, Gabriel}, title = {{An Extensible User Interface for Lean 4}}, booktitle = {14th International Conference on Interactive Theorem Proving (ITP 2023)}, pages = {24:1--24:20}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, ISBN = {978-3-95977-284-6}, ISSN = {1868-8969}, year = {2023}, volume = {268}, editor = {Naumowicz, Adam and Thiemann, Ren\'{e}}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/opus/volltexte/2023/18399}, URN = {urn:nbn:de:0030-drops-183991}, doi = {10.4230/LIPIcs.ITP.2023.24}, annote = {Keywords: user interfaces, human-computer interaction, Lean} } ```
.lake/packages/proofwidgets/ProofWidgets.lean
import ProofWidgets.Compat import ProofWidgets.Component.Basic import ProofWidgets.Component.FilterDetails import ProofWidgets.Component.GraphDisplay import ProofWidgets.Component.HtmlDisplay import ProofWidgets.Component.InteractiveSvg import ProofWidgets.Component.MakeEditLink import ProofWidgets.Component.OfRpcMethod import ProofWidgets.Component.Panel.Basic import ProofWidgets.Component.Panel.GoalTypePanel import ProofWidgets.Component.Panel.SelectionPanel import ProofWidgets.Component.PenroseDiagram import ProofWidgets.Component.Recharts import ProofWidgets.Data.Html import ProofWidgets.Data.Svg import ProofWidgets.Presentation.Expr
.lake/packages/proofwidgets/lakefile.lean
import Lake open Lake DSL System package proofwidgets where preferReleaseBuild := true buildArchive? := "ProofWidgets4.tar.gz" releaseRepo := "https://github.com/leanprover-community/ProofWidgets4" leanOptions := #[⟨`experimental.module, true⟩] def widgetDir : FilePath := "widget" nonrec def Lake.Package.widgetDir (pkg : Package) : FilePath := pkg.dir / widgetDir def Lake.Package.runNpmCommand (pkg : Package) (args : Array String) : LogIO Unit := -- Running `cmd := "npm.cmd"` directly fails on Windows sometimes -- (https://github.com/leanprover-community/ProofWidgets4/issues/97) -- so run in PowerShell instead (`cmd.exe` also doesn't work.) if Platform.isWindows then proc { cmd := "powershell" args := #["-Command", "npm.cmd"] ++ args cwd := some pkg.widgetDir } (quiet := true) -- use `quiet` here or `lake` will replay the output in downstream projects. else proc { cmd := "npm" args cwd := some pkg.widgetDir } (quiet := true) input_file widgetPackageJson where path := widgetDir / "package.json" text := true /-- Target to update `package-lock.json` whenever `package.json` has changed. -/ target widgetPackageLock pkg : FilePath := do let packageFile ← widgetPackageJson.fetch let packageLockFile := pkg.widgetDir / "package-lock.json" buildFileAfterDep (text := true) packageLockFile packageFile fun _srcFile => do pkg.runNpmCommand #["install"] input_file widgetRollupConfig where path := widgetDir / "rollup.config.js" text := true input_file widgetTsconfig where path := widgetDir / "tsconfig.json" text := true /-- The TypeScript widget modules in `widget/src`. -/ input_dir widgetJsSrcs where path := widgetDir / "src" filter := .extension <| .mem #["ts", "tsx", "js", "jsx"] text := true /-- Target to build all widget modules from `widgetJsSrcs`. -/ def widgetJsAllTarget (pkg : Package) (isDev : Bool) : FetchM (Job Unit) := do let srcs ← widgetJsSrcs.fetch let rollupConfig ← widgetRollupConfig.fetch let tsconfig ← widgetTsconfig.fetch let widgetPackageLock ← widgetPackageLock.fetch /- `widgetJsAll` is built via `needs`, and Lake's default build order is `needs -> cloud release -> main build`. We must instead ensure that the cloud release is fetched first so that this target does not build from scratch unnecessarily. `afterBuildCacheAsync` guarantees this. -/ pkg.afterBuildCacheAsync do srcs.bindM (sync := true) fun _ => rollupConfig.bindM (sync := true) fun _ => tsconfig.bindM (sync := true) fun _ => widgetPackageLock.mapM fun _ => do let traceFile := pkg.buildDir / "js" / "lake.trace" buildUnlessUpToDate traceFile (← getTrace) traceFile do if let some msg := get_config? errorOnBuild then error msg /- HACK: Ensure that NPM modules are installed before building TypeScript, *if* we are building Typescript. It would probably be better to have a proper target for `node_modules` that all the JS/TS modules depend on. BUT when we are being built as a dependency of another package using the cloud releases feature, we wouldn't want that target to trigger since in that case NPM is not necessarily installed. Hence, we put this block inside the build process for JS/TS files rather than as a top-level target. This only runs when some TypeScript needs building. -/ pkg.runNpmCommand #["clean-install"] pkg.runNpmCommand #["run", if isDev then "build-dev" else "build"] target widgetJsAll pkg : Unit := widgetJsAllTarget pkg (isDev := false) target widgetJsAllDev pkg : Unit := widgetJsAllTarget pkg (isDev := true) @[default_target] lean_lib ProofWidgets where needs := #[widgetJsAll] lean_lib ProofWidgets.Demos where needs := #[widgetJsAll] globs := #[.submodules `ProofWidgets.Demos]
.lake/packages/proofwidgets/test/delab.lean
import ProofWidgets.Data.Html open scoped ProofWidgets.Jsx open ProofWidgets.Html Lean /-- info: <span id="greeting">Hello world</span> : ProofWidgets.Html -/ #guard_msgs in #check <span id="greeting">Hello world</span> /-- info: <span>Hello interpolated world</span> : ProofWidgets.Html -/ #guard_msgs in #check <span>Hello {.text "interpolated"} world</span> /-- info: <span>Hello {text "<>"} world</span> : ProofWidgets.Html -/ #guard_msgs in #check <span>Hello {.text "<>"} world</span> /-- info: <hr/> : ProofWidgets.Html -/ #guard_msgs in #check <hr /> variable (attrs children) /-- info: <div {...attrs}>{...children}</div> : ProofWidgets.Html -/ #guard_msgs in #check element "div" attrs children structure CustomProps where val : Nat str : String deriving Server.RpcEncodable def CustomComponent : ProofWidgets.Component CustomProps where javascript := "" -- TODO: spacing between attributes /-- info: <div><CustomComponent val={2}str={"3"}>Content</CustomComponent></div> : ProofWidgets.Html -/ #guard_msgs in #check <div><CustomComponent val={2} str="3">Content</CustomComponent></div> def ProdComponent : ProofWidgets.Component (Nat × Nat) where javascript := "" /-- info: <div><ProdComponent {...(1, 2)}/></div> : ProofWidgets.Html -/ #guard_msgs in #check <div><ProdComponent fst={1} snd={2} /></div> /-- info: <div><ProdComponent {...(1, 2)}/></div> : ProofWidgets.Html -/ #guard_msgs in #check <div><ProdComponent {...Prod.mk 1 2}/></div> /-- info: <div><ProdComponent {...let __src := (1, 2); (__src.fst, 3)}/></div> : ProofWidgets.Html -/ #guard_msgs in #check <div><ProdComponent {...Prod.mk 1 2} snd={3}/></div> -- interactive test: check that the hovers in the infoview on subexpressions are correct #check <span id="test">Hello {.text "<>"} world<CustomComponent val={1} str="3" /></span>
.lake/packages/proofwidgets/ProofWidgets/Compat.lean
module public meta import Lean.Elab.InfoTree.Main public meta section namespace ProofWidgets open Lean Server Elab abbrev LazyEncodable α := StateM RpcObjectStore α -- back from exile structure ExprWithCtx where ci : Elab.ContextInfo lctx : LocalContext linsts : LocalInstances expr : Expr deriving TypeName def ExprWithCtx.runMetaM (e : ExprWithCtx) (x : Expr → MetaM α) : IO α := e.ci.runMetaM {} $ Meta.withLCtx e.lctx e.linsts (x e.expr) def ExprWithCtx.save (e : Expr) : MetaM ExprWithCtx := return { ci := { ← CommandContextInfo.save with } lctx := ← getLCtx linsts := ← Meta.getLocalInstances expr := e } end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Util.lean
/- Copyright (c) 2024 Eric Wieser. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Eric Wieser -/ module public meta import Lean.PrettyPrinter.Delaborator.Basic public meta section namespace ProofWidgets.Util open Lean /-- Sends `#[a, b, c]` to `` `(term| $a ++ $b ++ $c)``-/ def joinArrays {m} [Monad m] [MonadRef m] [MonadQuotation m] (arr : Array Term) : m Term := do if h : 0 < arr.size then arr.foldlM (fun x xs => `($x ++ $xs)) arr[0] (start := 1) else `(#[]) /-- Collapse adjacent `inl (_ : α)`s into a `β` using `f`. For example, `#[.inl a₁, .inl a₂, .inr b, .inl a₃] ↦ #[← f #[a₁, a₂], b, ← f #[a₃]]`. -/ def foldInlsM {m} [Monad m] (arr : Array (α ⊕ β)) (f : Array α → m β) : m (Array β) := do let mut ret : Array β := #[] let mut pending_inls : Array α := #[] for c in arr do match c with | .inl ci => pending_inls := pending_inls.push ci | .inr cis => if pending_inls.size ≠ 0 then ret := ret.push <| ← f pending_inls pending_inls := #[] ret := ret.push cis if pending_inls.size ≠ 0 then ret := ret.push <| ← f pending_inls return ret end ProofWidgets.Util namespace Lean.PrettyPrinter.Delaborator open SubExpr /-- Delaborate the elements of a list literal separately, calling `elem` on each. -/ partial def delabListLiteral {α} (elem : DelabM α) : DelabM (Array α) := go #[] where go (acc : Array α) : DelabM (Array α) := do match_expr ← getExpr with | List.nil _ => return acc | List.cons _ _ _ => let hd ← withNaryArg 1 elem withNaryArg 2 $ go (acc.push hd) | _ => failure /-- Delaborate the elements of an array literal separately, calling `elem` on each. -/ partial def delabArrayLiteral {α} (elem : DelabM α) : DelabM (Array α) := do match_expr ← getExpr with | List.toArray _ _ => withNaryArg 1 <| delabListLiteral elem | _ => failure /-- A copy of `Delaborator.annotateTermInfo` for other syntactic categories. -/ def annotateTermLikeInfo (stx : TSyntax n) : DelabM (TSyntax n) := do let stx ← annotateCurPos ⟨stx⟩ addTermInfo (← getPos) stx (← getExpr) pure ⟨stx⟩ /-- A copy of `Delaborator.withAnnotateTermInfo` for other syntactic categories. -/ def withAnnotateTermLikeInfo (d : DelabM (TSyntax n)) : DelabM (TSyntax n) := do let stx ← d annotateTermLikeInfo stx end Lean.PrettyPrinter.Delaborator
.lake/packages/proofwidgets/ProofWidgets/Cancellable.lean
module public meta import Lean.Data.Json.FromToJson public meta import Lean.Server.Rpc.RequestHandling public meta import Std.Data.HashMap public meta import ProofWidgets.Compat public meta section /-! Experimental support for cancellable RPC requests. Note: Cancellation should eventually become a feature of the core RPC protocol, and the requests map should be stored in `RequestM`, or somewhere in the server anyway. -/ namespace ProofWidgets open Lean Server Meta Std abbrev RequestId := Nat structure CancellableTask where task : Task (Except RequestError (LazyEncodable Json)) /- Note: we cannot just `IO.cancel task` because it is a result of `map`. See https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/Should.20cancelling.20a.20purely.20mapped.20task.20cancel.20the.20original.3F -/ cancel : IO Unit /-- Maps the ID of each currently executing request to its task. -/ initialize runningRequests : IO.Ref (RequestId × Std.HashMap RequestId CancellableTask) ← IO.mkRef (0, ∅) /-- Transforms a request handler returning `β` into one that returns immediately with a `RequestId`. The ID uniquely identifies the running request: its results can be retrieved using `checkRequest`, and it can be cancelled using `cancelRequest`. -/ def mkCancellable [RpcEncodable β] (handler : α → RequestM (RequestTask β)) : α → RequestM (RequestTask RequestId) := fun a => do RequestM.asTask do let t ← handler a let t' := t.mapCheap (·.map rpcEncode) runningRequests.modifyGet fun (id, m) => (id, (id+1, m.insert id ⟨t'.task, t.cancel⟩)) /-- Cancel the request with ID `rid`. Does nothing if `rid` is invalid. -/ @[server_rpc_method] def cancelRequest (rid : RequestId) : RequestM (RequestTask String) := do RequestM.asTask do let t? ← runningRequests.modifyGet fun (id, m) => (m[rid]?, (id, m.erase rid)) if let some t := t? then t.cancel return "ok" /-- The status of a running cancellable request. -/ inductive CheckRequestResponse | running | done (result : LazyEncodable Json) deriving RpcEncodable /-- Check whether a request has finished computing, and return the response if so. The request is removed from `runningRequests` the first time it is checked and found to have finished. Throws an error if the `rid` is invalid, or if the request itself threw an error. -/ /- NOTE: a notification-based version would be better than this polling-based one. But we cannot include RPC references in notifications atm; another possible addition to the RPC protocol? -/ @[server_rpc_method] def checkRequest (rid : RequestId) : RequestM (RequestTask CheckRequestResponse) := do RequestM.asTask do let (_, m) ← runningRequests.get match m[rid]? with | none => throw $ RequestError.invalidParams s!"Request '{rid}' has already finished, or the ID is invalid." | some t => if !(← IO.hasFinished t.task) then return .running runningRequests.modify fun (id, m) => (id, m.erase rid) match t.task.get with | .error e => throw e | .ok v => return .done v def cancellableSuffix : Name := `_cancellable /-- Like `server_rpc_method`, but requests for this method can be cancelled. The method should check for that using `IO.checkCanceled`. Cancellable methods are invoked differently from JavaScript: see `callCancellable` in `cancellable.ts`. -/ initialize registerBuiltinAttribute { name := `server_rpc_method_cancellable descr := "Like `server_rpc_method`, \ but requests for this method can be cancelled. \ The method should check for that using `IO.checkCanceled`. \ Cancellable methods are invoked differently from JavaScript: \ see `callCancellable` in `cancellable.ts`." applicationTime := AttributeApplicationTime.afterCompilation add := fun decl _ _ => Prod.fst <$> MetaM.run do let name := decl ++ cancellableSuffix let value ← mkAppM ``mkCancellable #[mkConst decl] addAndCompile $ .defnDecl { name levelParams := [] type := ← inferType value value hints := .opaque safety := .safe } registerRpcProcedure name } end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Component/GraphDisplay.lean
module public meta import ProofWidgets.Component.Basic public meta import ProofWidgets.Data.Html public meta section namespace ProofWidgets.GraphDisplay open Lean Server Jsx /-- A themed `<circle>` SVG element, with optional extra attributes. -/ def mkCircle (attrs : Array (String × Json) := #[]) : Html := <circle r={5} fill="var(--vscode-editor-background)" stroke="var(--vscode-editor-foreground)" strokeWidth={.num 1.5} {...attrs} /> /-- A shape containing the vertex label. Used to position incident edge endpoints. The shape is assumed to be centred on the vertex position. -/ -- TODO: use `getBoundingClientRect` to dynamically compute size inductive BoundingShape where /-- A circle of fixed radius. -/ | circle (radius : Float) : BoundingShape /-- A rectangle of fixed dimensions. -/ | rect (width height : Float) : BoundingShape deriving Inhabited, FromJson, ToJson structure Vertex where /-- Identifier for this vertex. Must be unique. -/ id : String /-- The label is drawn at the vertex position. This must be an SVG element. Use `<foreignObject>` to draw non-SVG elements. -/ label : Html := mkCircle boundingShape : BoundingShape := .circle 5 /-- Details are shown below the graph display after the vertex label has been clicked. See also `Props.showDetails`. -/ details? : Option Html := none deriving Inhabited, RpcEncodable structure Edge where /-- Source vertex. Must match the `id` of one of the vertices. -/ source : String /-- Target vertex. Must match the `id` of one of the vertices. -/ target : String /-- Extra attributes to set on the SVG `<line>` element representing this edge. See also `Props.defaultEdgeAttrs`. -/ attrs : Array (String × Json) := #[] /-- If present, the label is shown over the edge midpoint. This must be an SVG element. Use `<foreignObject>` to draw non-SVG elements. -/ label? : Option Html := none /-- Details are shown below the graph display after the edge has been clicked. See also `Props.showDetails`. -/ details? : Option Html := none deriving Inhabited, RpcEncodable structure ForceCenterParams where x? : Option Float := none y? : Option Float := none strength? : Option Float := none deriving Inhabited, FromJson, ToJson structure ForceCollideParams where radius? : Option Float := none strength? : Option Float := none iterations? : Option Nat := none deriving Inhabited, FromJson, ToJson structure ForceLinkParams where distance? : Option Float := none strength? : Option Float := none iterations? : Option Nat := none deriving Inhabited, FromJson, ToJson structure ForceManyBodyParams where strength? : Option Float := none theta? : Option Float := none distanceMin? : Option Float := none distanceMax? : Option Float := none deriving Inhabited, FromJson, ToJson structure ForceXParams where x? : Option Float := none strength? : Option Float := none deriving Inhabited, FromJson, ToJson structure ForceYParams where y? : Option Float := none strength? : Option Float := none deriving Inhabited, FromJson, ToJson structure ForceRadialParams where radius : Float x? : Option Float := none y? : Option Float := none strength? : Option Float := none deriving Inhabited, FromJson, ToJson /-- Settings for the simulation of forces on vertices. See https://d3js.org/d3-force. -/ inductive ForceParams where | center : ForceCenterParams → ForceParams | collide : ForceCollideParams → ForceParams | link : ForceLinkParams → ForceParams | manyBody : ForceManyBodyParams → ForceParams | x : ForceXParams → ForceParams | y : ForceYParams → ForceParams | radial : ForceRadialParams → ForceParams deriving Inhabited, FromJson, ToJson structure Props where vertices : Array Vertex /-- At most one edge may exist between any two vertices. Self-loops are allowed, but (TODO) are currently not rendered well. -/ edges : Array Edge /-- Attributes to set by default on `<line>` elements representing edges. -/ defaultEdgeAttrs : Array (String × Json) := #[ ("fill", "var(--vscode-editor-foreground)"), ("stroke", "var(--vscode-editor-foreground)"), ("strokeWidth", 2), ("markerEnd", "url(#arrow)") ] /-- Which forces to apply to the vertices. Most force parameters are optional, using default values if not specified. -/ forces : Array ForceParams := #[ .link {}, .manyBody {}, .x {}, .y {} ] /-- Whether to show a details box below the graph. -/ showDetails : Bool := false deriving Inhabited, RpcEncodable end GraphDisplay /-- Display a graph with an interactive force simulation. -/ @[widget_module] def GraphDisplay : Component GraphDisplay.Props where javascript := include_str ".." / ".." / ".lake" / "build" / "js" / "d3Graph.js" end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Component/FilterDetails.lean
module public meta import ProofWidgets.Data.Html public meta section namespace ProofWidgets open Lean /-- Props for the `FilterDetails` component. -/ structure FilterDetailsProps where /-- Contents of the `<summary>`. -/ summary : Html /-- What is shown in the filtered state. -/ filtered : Html /-- What is shown in the non-filtered state. -/ all : Html /-- Whether to start in the filtered state. -/ initiallyFiltered : Bool := true deriving Server.RpcEncodable /-- The `FilterDetails` component is like a `<details>` HTML element, but also has a filter button that allows you to switch between filtered and unfiltered states. -/ @[widget_module] def FilterDetails : Component FilterDetailsProps where javascript := include_str ".." / ".." / ".lake" / "build" / "js" / "filterDetails.js" end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Component/Basic.lean
module public meta import Lean.Widget.InteractiveCode public meta import Lean.Widget.UserWidget public meta import ProofWidgets.Compat public meta section namespace ProofWidgets open Lean /-- A component is a widget module with a `default` or named export which is a [React component](https://react.dev/learn/your-first-component). Every component definition must be annotated with `@[widget_module]`. This makes it possible for the infoview to load the component. ## Execution environment The JS environment in which components execute provides a fixed set of libraries accessible via direct `import`, notably [`@leanprover/infoview`](https://www.npmjs.com/package/@leanprover/infoview). All [React contexts](https://react.dev/learn/passing-data-deeply-with-context) exported from `@leanprover/infoview` are usable from components. ## Lean encoding of props `Props` is the Lean representation of the type `JsProps` of [React props](https://react.dev/learn/passing-props-to-a-component) that the component expects. The export of the module specified in `«export»` should then have type `(props: JsProps & { pos: DocumentPosition }): React.ReactNode` where `DocumentPosition` is defined in `@leanprover/infoview`. `Props` is expected to have a `Lean.Server.RpcEncodable` instance specifying how to encode props as JSON. Note that by defining a `Component Props` with a specific JS implementation, you are *asserting* that `Props` is a correct representation of `JsProps`. -/ structure Component (Props : Type) extends Widget.Module where /-- Which export of the module to use as the component function. -/ «export» : String := "default" instance : Widget.ToModule (Component Props) := ⟨Component.toModule⟩ structure InteractiveCodeProps where fmt : Widget.CodeWithInfos deriving Server.RpcEncodable /-- Present pretty-printed code as interactive text. The most common use case is to instantiate this component from a `Lean.Expr`. To do so, you must eagerly pretty-print the `Expr` using `Widget.ppExprTagged`. See also `InteractiveExpr`. -/ @[widget_module] def InteractiveCode : Component InteractiveCodeProps where javascript := " import { InteractiveCode } from '@leanprover/infoview' import * as React from 'react' export default function(props) { return React.createElement(InteractiveCode, props) }" structure InteractiveExprProps where expr : Server.WithRpcRef ExprWithCtx deriving Server.RpcEncodable @[server_rpc_method] def ppExprTagged : InteractiveExprProps → Server.RequestM (Server.RequestTask Widget.CodeWithInfos) | ⟨ref⟩ => Server.RequestM.asTask <| ref.val.runMetaM Widget.ppExprTagged /-- Lazily pretty-print and present a `Lean.Expr` as interactive text. This component is preferrable over `InteractiveCode` when the `Expr` will not necessarily be displayed in the UI (e.g. it may be hidden by default), in which case laziness saves some work. On the other hand if the `Expr` will likely be shown and you are in a `MetaM` context, it is preferrable to use the eager `InteractiveCode` in order to avoid the extra client-server roundtrip needed for the pretty-printing RPC call. -/ @[widget_module] def InteractiveExpr : Component InteractiveExprProps where javascript := include_str ".." / ".." / ".lake" / "build" / "js" / "interactiveExpr.js" structure InteractiveMessageProps where msg : Server.WithRpcRef MessageData deriving Server.RpcEncodable /-- Present a structured Lean message. -/ @[widget_module] def InteractiveMessage : Component InteractiveMessageProps where javascript := " import { InteractiveMessageData } from '@leanprover/infoview' import * as React from 'react' export default function(props) { return React.createElement(InteractiveMessageData, props) } " structure MarkdownDisplay.Props where contents : String deriving ToJson, FromJson /-- Render a given string as Markdown. LaTeX is supported with MathJax: use `$...$` for inline math, and `$$...$$` for displayed math. Example usage: ```lean <MarkdownDisplay contents={"$a + b = c$"} /> ``` -/ @[widget_module] def MarkdownDisplay : Component MarkdownDisplay.Props where javascript := " import { Markdown } from '@leanprover/infoview' import * as React from 'react' export default (props) => React.createElement(Markdown, props) " end ProofWidgets /-- Construct a structured message from a ProofWidgets component. For the meaning of `alt`, see `MessageData.ofWidget`. -/ def Lean.MessageData.ofComponent [Server.RpcEncodable Props] (c : ProofWidgets.Component Props) (p : Props) (alt : String) : CoreM MessageData := do let wi ← Widget.WidgetInstance.ofHash c.javascriptHash (Server.RpcEncodable.rpcEncode p) return .ofWidget wi alt
.lake/packages/proofwidgets/ProofWidgets/Component/OfRpcMethod.lean
module public meta import Lean.Elab.ElabRules public meta import ProofWidgets.Component.Basic public meta import ProofWidgets.Data.Html public meta import ProofWidgets.Cancellable public meta section namespace ProofWidgets open Lean Server Meta Elab Term def ofRpcMethodTemplate := include_str ".." / ".." / ".lake" / "build" / "js" / "ofRpcMethod.js" /-- The elaborator `mk_rpc_widget%` allows writing certain widgets in Lean instead of JavaScript. Specifically, it translates an RPC method of type `MyProps → RequestM (RequestTask Html)` into a widget component of type `Component MyProps`. Even more specifically, we can write: ```lean open Lean Server structure MyProps where ... deriving RpcEncodable @[server_rpc_method] def MyComponent.rpc (ps : MyProps) : RequestM (RequestTask Html) := ... @[widget_module] def MyComponent : Component MyProps := mk_rpc_widget% MyComponent.rpc ``` This is convenient because we can program the logic that computes an output HTML tree given input props in Lean directly. ⚠️ However, note that there are several limitations on what such component can do compared to ones written natively in TypeScript or JavaScript: - It must be pure, i.e. cannot directly store any React state. Child components may store state as usual. - It cannot pass closures as props to the child components that it returns. For example, it is not currently possible to write click event handlers in Lean and pass them to a `<button onClick={..}>` child. - Every time the input props change, the infoview has to send a message to the Lean server in order to invoke the RPC method. Thus there can be a noticeable visual delay between the input props changing and the display updating. Consequently, components whose props change at a high frequency (e.g. depending on the mouse position) should not be implemented using this method. 💡 Note that an inverse transformation is already possible. Given `MyComponent : Component MyProps`, we can write: ```lean open Lean Server @[server_rpc_method] def MyComponent.rpc (ps : MyProps) : RequestM (RequestTask Html) := RequestM.asTask do return Html.ofComponent MyComponent ps #[] ``` -/ elab "mk_rpc_widget%" fn:term : term <= expectedType => do let α ← mkFreshExprMVar (some (.sort levelOne)) (userName := `α) let compT ← mkAppM ``Component #[α] if !(← isDefEq expectedType compT) then throwError "expected type{indentD expectedType}\nis not of the form{indentD compT}" let arr ← mkArrow α (← mkAppM ``RequestM #[← mkAppM ``RequestTask #[.const ``Html []]]) let fn ← Term.elabTermEnsuringType fn arr let fn ← instantiateMVars fn if let .const nm .. := fn then let cancellableNm := nm ++ cancellableSuffix if (← existsBuiltinRpcProcedure cancellableNm) || userRpcProcedures.contains (← getEnv) cancellableNm then -- Use the cancellable variant if possible. let code : StrLit := quote $ ofRpcMethodTemplate |>.replace "$RPC_METHOD" (toString cancellableNm) |>.replace "window.toString()" "'true'" let valStx ← `({ javascript := $code }) let ret ← elabTerm valStx expectedType return ret if !(← existsBuiltinRpcProcedure nm) && !userRpcProcedures.contains (← getEnv) nm then throwError s!"'{nm}' is not a known RPC method. Use `@[server_rpc_method]` to register it." -- https://github.com/leanprover/lean4/issues/1415 let code : StrLit := quote $ ofRpcMethodTemplate |>.replace "$RPC_METHOD" (toString nm) |>.replace "window.toString()" "'false'" let valStx ← `({ javascript := $code }) let ret ← elabTerm valStx expectedType return ret throwError "Expected the name of a constant, got a complex term{indentD fn}" end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Component/MakeEditLink.lean
module public meta import Lean.Server.Utils public meta import ProofWidgets.Component.Basic public meta section /-- Assuming that `s` is the content of a file starting at position `p`, advance `p` to the end of `s`. -/ def Lean.Lsp.Position.advance (p : Position) (s : Substring) : Position := let (nLinesAfter, lastLineUtf16Sz) := s.foldl (init := (0, 0)) fun (n, l) c => if c == '\n' then (n + 1, 0) else (n, l + c.utf16Size.toNat) { line := p.line + nLinesAfter character := (if nLinesAfter == 0 then p.character else 0) + lastLineUtf16Sz } namespace ProofWidgets open Lean structure MakeEditLinkProps where /-- The edit to perform on the file. -/ edit : Lsp.TextDocumentEdit /-- Which textual range to select after the edit. The range is interpreted in the file that `edit` applies to. If present and `start == end`, the cursor is moved to `start` and nothing is selected. If not present, the selection is not changed. -/ newSelection? : Option Lsp.Range := none /-- The `title` property, if any, to set on the displayed `<a>` link. -/ title? : Option String := none deriving FromJson, ToJson /-- Replace `range` with `newText`. If `newSelection?` is absent, place the cursor at the end of the new text. If `newSelection?` is present, make the specified selection instead. See also `MakeEditLinkProps.ofReplaceRange`. -/ def MakeEditLinkProps.ofReplaceRange' (doc : Server.DocumentMeta) (range : Lsp.Range) (newText : String) (newSelection? : Option Lsp.Range := none) : MakeEditLinkProps := let edit := { textDocument := { uri := doc.uri, version? := doc.version } edits := #[{ range, newText }] } if newSelection?.isSome then { edit, newSelection? } else let endPos := range.start.advance newText.toSubstring { edit, newSelection? := some { start := endPos, «end» := endPos } } /-- Replace `range` with `newText`. If `newSelection?` is absent, place the cursor at the end of the new text. If `newSelection?` is present, select the range it specifies within `newText`. See also `MakeEditLinkProps.ofReplaceRange'`. -/ def MakeEditLinkProps.ofReplaceRange (doc : Server.DocumentMeta) (range : Lsp.Range) (newText : String) (newSelection? : Option (String.Pos.Raw × String.Pos.Raw) := none) : MakeEditLinkProps := ofReplaceRange' doc range newText (newSelection?.map fun (s, e) => let ps := range.start.advance (newText.toSubstring.extract 0 s) let pe := ps.advance (newText.toSubstring.extract s e) { start := ps, «end» := pe }) /-- A link that, when clicked, makes the specified edit and potentially moves the cursor or makes a selection. -/ @[widget_module] def MakeEditLink : Component MakeEditLinkProps where javascript := include_str ".." / ".." / ".lake" / "build" / "js" / "makeEditLink.js" end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Component/PenroseDiagram.lean
module public meta import ProofWidgets.Component.Basic public meta import ProofWidgets.Data.Html public meta import Std.Data.HashMap public meta section namespace ProofWidgets.Penrose open Lean Server Std structure DiagramProps where embeds : Array (String × Html) dsl : String sty : String sub : String /-- Maximum number of optimization steps to take before showing the diagram. Optimization may converge earlier, before taking this many steps. -/ maxOptSteps : Nat := 500 deriving Inhabited, RpcEncodable /-- Displays the given diagram using [Penrose](https://penrose.cs.cmu.edu/). The website contains explanations of how to write domain (`dsl`), style (`sty`), and substance (`sub`) programs. The diagram may also contain embedded HTML trees which are specified in `embeds`. Each embed is HTML together with the name of an object `x` in the substance program. The object `x` can be of any type but *must* be assigned an `x.textBox : Rectangle` field in the style program. This rectangle will be replaced with the HTML tree. Its dimensions will be overridden in the style program to match those of the HTML node. The following additional constants are prepended to the style program: ```penrose theme { color foreground color tooltipBackground color tooltipForeground color tooltipBorder } ``` and can be accessed as, for example, `theme.foreground` in the provided `sty` in order to match the editor theme. -/ @[widget_module] def Diagram : Component DiagramProps where javascript := include_str ".." / ".." / ".lake" / "build" / "js" / "penroseDisplay.js" /-! # `DiagramBuilderM` -/ structure DiagramState where /-- The Penrose substance program. Note that `embeds` are added lazily at the end. -/ sub : String := "" /-- Components to display as labels in the diagram, stored in the map as name ↦ (type, html). -/ embeds : Std.HashMap String (String × Html) := ∅ /-- A monad to easily build Penrose diagrams in. -/ abbrev DiagramBuilderM := StateT DiagramState MetaM namespace DiagramBuilderM open scoped Jsx in /-- Assemble the diagram using the provided domain and style programs. `none` is returned iff nothing was added to the diagram. -/ def buildDiagram (dsl sty : String) (maxOptSteps : Nat := 500) : DiagramBuilderM (Option Html) := do let st ← get if st.sub == "" && st.embeds.isEmpty then return none let mut sub := "AutoLabel All\n" let mut embedHtmls := #[] for (n, (tp, h)) in st.embeds.toArray do sub := sub ++ s!"{tp} {n}\n" embedHtmls := embedHtmls.push (n, h) -- Note: order matters here, embed variables are declared first. sub := sub ++ st.sub return <Diagram embeds={embedHtmls} dsl={dsl} sty={sty} sub={sub} maxOptSteps={maxOptSteps} /> /-- Add an object `nm` of Penrose type `tp`, labelled by `h`, to the substance program. -/ def addEmbed (nm : String) (tp : String) (h : Html) : DiagramBuilderM Unit := do modify fun st => { st with embeds := st.embeds.insert nm (tp, h) } open scoped Jsx in /-- Add an object of Penrose type `tp`, corresponding to (and labelled by) the expression `e`, to the substance program. Return its Penrose name. -/ def addExpr (tp : String) (e : Expr) : DiagramBuilderM String := do let nm ← toString <$> Lean.Meta.ppExpr e let h := <InteractiveCode fmt={← Widget.ppExprTagged e} /> addEmbed nm tp h return nm /-- Add an instruction `i` to the substance program. -/ def addInstruction (i : String) : DiagramBuilderM Unit := do modify fun st => { st with sub := st.sub ++ s!"{i}\n" } def run (x : DiagramBuilderM α) : MetaM α := x.run' {} end DiagramBuilderM end Penrose /-- Abbreviation for backwards-compatibility. -/ abbrev PenroseDiagramProps := Penrose.DiagramProps /-- Abbreviation for backwards-compatibility. -/ abbrev PenroseDiagram := Penrose.Diagram end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Component/Recharts.lean
module public meta import ProofWidgets.Component.Basic public meta section namespace ProofWidgets.Recharts open Lean @[widget_module] def Recharts : Widget.Module where javascript := include_str ".." / ".." / ".lake" / "build" / "js" / "recharts.js" inductive LineChartLayout where | horizontal | vertical deriving FromJson, ToJson inductive LineChartSyncMethod where | index | value deriving FromJson, ToJson structure LineChartMargin where top : Nat := 5 right : Nat := 5 bottom : Nat := 5 left : Nat := 5 deriving FromJson, ToJson structure LineChartProps where layout : LineChartLayout := .horizontal syncId? : Option String := none syncMethod? : Option LineChartSyncMethod := some .index width : Nat height : Nat data : Array Json margin : LineChartMargin := {} deriving FromJson, ToJson /-- See https://recharts.org/en-US/api/LineChart. -/ def LineChart : Component LineChartProps where javascript := Recharts.javascript «export» := "LineChart" inductive AxisType where /-- Treat values as numbers: spacing on axis by numeric difference. -/ | number /-- Treat values as categorical: equal spacing between values. -/ | category deriving FromJson, ToJson structure AxisProps where dataKey? : Option Json := none domain? : Option (Array Json) := none allowDataOverflow : Bool := false /-- How values along this axis should be interpreted. The Recharts default is `category`. -/ type : AxisType := .number -- TODO: There are many more props deriving FromJson, ToJson /-- See https://recharts.org/en-US/api/XAxis. -/ def XAxis : Component AxisProps where javascript := Recharts.javascript «export» := "XAxis" /-- See https://recharts.org/en-US/api/YAxis. -/ def YAxis : Component AxisProps where javascript := Recharts.javascript «export» := "YAxis" inductive LineType where | basis | basisClosed | basisOpen | linear | linearClosed | natural | monotoneX | monotoneY | monotone | step | stepBefore | stepAfter deriving FromJson, ToJson structure LineProps where type : LineType := .linear dataKey : Json stroke : String dot? : Option Bool := none -- TODO: There are many more props deriving FromJson, ToJson /-- See https://recharts.org/en-US/api/Line. -/ def Line : Component LineProps where javascript := Recharts.javascript «export» := "Line" end ProofWidgets.Recharts
.lake/packages/proofwidgets/ProofWidgets/Component/HtmlDisplay.lean
module public meta import Lean.Server.Rpc.Basic public meta import Lean.Elab.Command public meta import ProofWidgets.Data.Html public meta section namespace ProofWidgets open Lean Server structure HtmlDisplayProps where html : Html deriving RpcEncodable @[widget_module] def HtmlDisplay : Component HtmlDisplayProps where javascript := include_str ".." / ".." / ".lake" / "build" / "js" / "htmlDisplay.js" @[widget_module] def HtmlDisplayPanel : Component HtmlDisplayProps where javascript := include_str ".." / ".." / ".lake" / "build" / "js" / "htmlDisplayPanel.js" open Lean Server Elab Command /-- Any term `t : α` with a `HtmlEval α` instance can be evaluated in a `#html t` command. This is analogous to how `Lean.MetaEval` supports `#eval`. -/ class HtmlEval (α : Type u) where eval : α → CommandElabM Html instance : HtmlEval Html where eval ht := pure ht instance [MonadLiftT m CommandElabM] : HtmlEval (m Html) where eval := monadLift instance : HtmlEval (CoreM Html) where eval := liftCoreM instance : HtmlEval (MetaM Html) where eval x := liftTermElabM x instance : HtmlEval (TermElabM Html) where eval := liftTermElabM namespace HtmlCommand open Elab Command unsafe def evalCommandMHtmlUnsafe (stx : Term) : TermElabM (CommandElabM Html) := do let tp := mkApp (mkConst ``CommandElabM) (mkConst ``Html) Term.evalTerm _ tp stx @[implemented_by evalCommandMHtmlUnsafe] opaque evalCommandMHtml : Term → TermElabM (CommandElabM Html) /-- Display a value of type `Html` in the infoview. The input can be a pure value or a computation in any Lean metaprogramming monad (e.g. `CommandElabM Html`). -/ syntax (name := htmlCmd) "#html " term : command @[command_elab htmlCmd] def elabHtmlCmd : CommandElab := fun | stx@`(#html $t:term) => do let htX ← liftTermElabM <| evalCommandMHtml <| ← ``(HtmlEval.eval $t) let ht ← htX liftCoreM <| Widget.savePanelWidgetInfo (hash HtmlDisplayPanel.javascript) (return json% { html: $(← rpcEncode ht) }) stx | stx => throwError "Unexpected syntax {stx}." end HtmlCommand end ProofWidgets /-- Construct a structured message from ProofWidgets HTML. For the meaning of `alt`, see `MessageData.ofWidget`. -/ def Lean.MessageData.ofHtml (h : ProofWidgets.Html) (alt : String) : CoreM MessageData := MessageData.ofComponent ProofWidgets.HtmlDisplay ⟨h⟩ alt
.lake/packages/proofwidgets/ProofWidgets/Component/InteractiveSvg.lean
module public meta import ProofWidgets.Data.Svg public meta section namespace ProofWidgets open Lean Std private def _root_.Float.toInt (x : Float) : Int := if x >= 0 then x.toUInt64.toNat else -((-x).toUInt64.toNat) namespace Svg inductive ActionKind where | timeout | mousedown | mouseup | mousemove -- [note] mouse moves only happen when mouse button is down. deriving ToJson, FromJson, DecidableEq structure Action where kind : ActionKind id : Option String data : Option Json deriving ToJson, FromJson /-- The input type `State` is any state the user wants to use and update SvgState in addition automatically handles tracking of time, selection and custom data -/ structure SvgState (State : Type) where state : State time : Float /-- time in milliseconds -/ selected : Option String mousePos : Option (Int × Int) idToData : List (String × Json) deriving ToJson, FromJson, Server.RpcEncodable structure UpdateParams (State : Type) where elapsed : Float actions : Array Action state : SvgState State mousePos : Option (Float × Float) -- TODO: change to Option (Int × Int) or do we want to support subpixel precision? deriving ToJson, FromJson structure UpdateResult (State : Type) where html : Html state : SvgState State /-- Approximate number of milliseconds to wait before calling again. -/ callbackTime : Option Float := some 33 deriving Server.RpcEncodable -- maybe add title, refresh rate, initial time?, custom selection rendering structure InteractiveSvg (State : Type) where init : State frame : Svg.Frame update (time_ms Δt_ms : Float) (action : Action) (mouseStart mouseEnd : Option (Svg.Point frame)) (selectedId : Option String) (getSelectedData : (α : Type) → [FromJson α] → Option α) : State → State render (time_ms : Float) (mouseStart mouseEnd : Option (Svg.Point frame)) : State → Svg frame open Server RequestM Jsx in def InteractiveSvg.serverRpcMethod {State : Type} (isvg : InteractiveSvg State) (params : UpdateParams State) : RequestM (RequestTask (UpdateResult State)) := do -- Ideally, each action should have time and mouse position attached -- right now we just assume that all actions are uqually spaced within the frame let Δt := (params.elapsed - params.state.time) / params.actions.size.toFloat let idToData : Std.HashMap String Json := HashMap.ofList params.state.idToData let mut time := params.state.time let mut state := params.state.state let mut selected := params.state.selected let getData := λ (α : Type) [FromJson α] => do let id ← selected; let data ← idToData[id]? match fromJson? (α:=α) data with | .error _ => none | .ok val => some val let mouseStart := params.state.mousePos.map λ (i,j) => (i, j) let mouseEnd := params.mousePos.map λ (x,y) => (x.toInt, y.toInt) for action in params.actions do -- todo: interpolate mouse movenment! -- update state state := isvg.update time Δt action mouseStart mouseEnd selected getData state -- update selection if action.kind == ActionKind.mousedown then selected := action.id if action.kind == ActionKind.mouseup then selected := none -- update time time := time + Δt let mut svg := isvg.render time mouseStart mouseEnd state let svgState : SvgState State := { state := state time := params.elapsed selected := selected mousePos := mouseEnd.map λ p => p.toPixels idToData := svg.idToDataList } -- highlight selection if let some id := selected then if let some idx := svg.idToIdx[id]? then svg := { elements := svg.elements.modify idx λ e => e.setStroke (1.,1.,0.) (.px 5) } return RequestTask.pure { html := <div> {svg.toHtml} </div>, state := svgState, callbackTime := some 33, } end Svg end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Component/Panel/Basic.lean
module public meta import ProofWidgets.Compat public meta import ProofWidgets.Component.Basic public meta import Lean.Elab.Tactic public meta import Lean.Widget.Commands public meta section namespace ProofWidgets open Lean Elab Tactic /-- In the infoview, an **info block** is a top-level collapsible block corresponding to a given location in a Lean file (e.g. with the header `▼ Basic.lean:12:34`). A **panel widget** is a component which can appear as a panel inside an info block in the infoview. For example, a tactic state display. The type `PanelWidgetProps` represents the props passed to a panel widget. The TypeScript version is exported as `PanelWidgetProps` from `@leanprover/infoview`. Note that to be a good citizen which doesn't mess up the infoview layout, a panel widget should be a block element, and should provide some way to collapse it, for example by using `<details>` as the top-level tag. -/ structure PanelWidgetProps : Type where /-- Cursor position in the file at which the widget is being displayed. -/ pos : Lsp.Position /-- The current tactic-mode goals. -/ goals : Array Widget.InteractiveGoal /-- The current term-mode goal, if any. -/ termGoal? : Option Widget.InteractiveTermGoal /-- Locations currently selected in the goal state. -/ selectedLocations : Array SubExpr.GoalsLocation deriving Server.RpcEncodable /-- Display the selected panel widgets in the nested tactic script. For example, assuming we have written a `GeometryDisplay` component, ```lean by with_panel_widgets [GeometryDisplay] simp rfl ``` will show the geometry display alongside the usual tactic state throughout the proof. -/ syntax (name := withPanelWidgetsTacticStx) "with_panel_widgets" "[" Widget.widgetInstanceSpec,+ "]" tacticSeq : tactic @[tactic withPanelWidgetsTacticStx] def withPanelWidgets : Tactic | stx@`(tactic| with_panel_widgets [ $specs,* ] $seq) => do specs.getElems.forM fun specStx => do let spec ← Widget.elabWidgetInstanceSpec specStx let wi ← Widget.evalWidgetInstance spec Widget.savePanelWidgetInfo wi.javascriptHash wi.props stx evalTacticSeq seq | _ => throwUnsupportedSyntax end ProofWidgets
.lake/packages/proofwidgets/ProofWidgets/Component/Panel/SelectionPanel.lean
module public meta import Lean.Meta.ExprLens public meta import ProofWidgets.Component.Panel.Basic public meta import ProofWidgets.Presentation.Expr public meta section -- Needed for RPC calls in SelectionPanel open ProofWidgets in /-- Save the expression corresponding to a goals location. -/ def Lean.SubExpr.GoalsLocation.saveExprWithCtx (loc : GoalsLocation) : MetaM ExprWithCtx := let mvarId := loc.mvarId match loc.loc with | .hyp fv => mvarId.withContext <| ExprWithCtx.save (mkFVar fv) | .hypType fv pos => mvarId.withContext do let tp ← Meta.inferType (mkFVar fv) Meta.viewSubexpr (visit := fun _ => ExprWithCtx.save) pos tp | .hypValue fv pos => mvarId.withContext do let some val ← fv.getValue? | throwError "fvar {mkFVar fv} is not a let-binding" Meta.viewSubexpr (visit := fun _ => ExprWithCtx.save) pos val | .target pos => mvarId.withContext do let tp ← Meta.inferType (mkMVar mvarId) Meta.viewSubexpr (visit := fun _ => ExprWithCtx.save) pos tp namespace ProofWidgets open Lean Server structure GoalsLocationsToExprsParams where locations : Array (WithRpcRef Elab.ContextInfo × SubExpr.GoalsLocation) deriving RpcEncodable structure GoalsLocationsToExprsResponse where exprs : Array (WithRpcRef ExprWithCtx) deriving RpcEncodable /-- Compute expressions corresponding to the given `GoalsLocation`s. -/ @[server_rpc_method] def goalsLocationsToExprs (args : GoalsLocationsToExprsParams) : RequestM (RequestTask GoalsLocationsToExprsResponse) := RequestM.asTask do let mut exprs := #[] for ⟨ref, loc⟩ in args.locations do let ci := ref.val exprs := exprs.push (← WithRpcRef.mk (← ci.runMetaM {} loc.saveExprWithCtx)) return { exprs } /-- Display a list of all expressions selected in the goal state, with a choice of which `Expr` presenter should be used to display each of those expressions. Expressions can be selected using shift-click. -/ @[widget_module] def SelectionPanel : Component PanelWidgetProps where javascript := include_str ".." / ".." / ".." / ".lake" / "build" / "js" / "presentSelection.js" end ProofWidgets