source stringlengths 17 118 | lean4 stringlengths 0 335k |
|---|---|
.lake/packages/mathlib/Mathlib/Tactic/CongrExclamation.lean | import Lean.Elab.Tactic.Config
import Lean.Elab.Tactic.RCases
import Lean.Meta.Tactic.Assumption
import Lean.Meta.Tactic.Rfl
import Mathlib.Lean.Meta.CongrTheorems
import Mathlib.Logic.Basic
/-!
# The `congr!` tactic
This is a more powerful version of the `congr` tactic that knows about more congruence lemmas and
can apply to more situations. It is similar to the `congr'` tactic from Mathlib 3.
The `congr!` tactic is used by the `convert` and `convert_to` tactics.
See the syntax docstring for more details.
-/
universe u v
open Lean Meta Elab Tactic
initialize registerTraceClass `congr!
initialize registerTraceClass `congr!.synthesize
/-- The configuration for the `congr!` tactic. -/
structure Congr!.Config where
/-- If `closePre := true`, then try to close goals before applying congruence lemmas
using tactics such as `rfl` and `assumption. These tactics are applied with the
transparency level specified by `preTransparency`, which is `.reducible` by default. -/
closePre : Bool := true
/-- If `closePost := true`, then try to close goals that remain after no more congruence
lemmas can be applied, using the same tactics as `closePre`. These tactics are applied
with current tactic transparency level. -/
closePost : Bool := true
/-- The transparency level to use when applying a congruence theorem.
By default this is `.reducible`, which prevents unfolding of most definitions. -/
transparency : TransparencyMode := TransparencyMode.reducible
/-- The transparency level to use when trying to close goals before applying congruence lemmas.
This includes trying to prove the goal by `rfl` and using the `assumption` tactic.
By default this is `.reducible`, which prevents unfolding of most definitions. -/
preTransparency : TransparencyMode := TransparencyMode.reducible
/-- For passes that synthesize a congruence lemma using one side of the equality,
we run the pass both for the left-hand side and the right-hand side. If `preferLHS` is `true`
then we start with the left-hand side.
This can be used to control which side's definitions are expanded when applying the
congruence lemma (if `preferLHS = true` then the RHS can be expanded). -/
preferLHS : Bool := true
/-- Allow both sides to be partial applications.
When false, given an equality `f a b = g x y z` this means we never consider
proving `f a = g x y`.
In this case, we might still consider `f = g x` if a pass generates a congruence lemma using the
left-hand side. Use `sameFun := true` to ensure both sides are applications
of the same function (making it be similar to the `congr` tactic). -/
partialApp : Bool := true
/-- Whether to require that both sides of an equality be applications of defeq functions.
That is, if true, `f a = g x` is only considered if `f` and `g` are defeq (making it be similar
to the `congr` tactic). -/
sameFun : Bool := false
/-- The maximum number of arguments to consider when doing congruence of function applications.
For example, with `f a b c = g w x y z`, setting `maxArgs := some 2` means it will only consider
either `f a b = g w x y` and `c = z` or `f a = g w x`, `b = y`, and `c = z`. Setting
`maxArgs := none` (the default) means no limit.
When the functions are dependent, `maxArgs` can prevent congruence from working at all.
In `Fintype.card α = Fintype.card β`, one needs to have `maxArgs` at `2` or higher since
there is a `Fintype` instance argument that depends on the first.
When there aren't such dependency issues, setting `maxArgs := some 1` causes `congr!` to
do congruence on a single argument at a time. This can be used in conjunction with the
iteration limit to control exactly how many arguments are to be processed by congruence. -/
maxArgs : Option Nat := none
/-- For type arguments that are implicit or have forward dependencies, whether or not `congr!`
should generate equalities even if the types do not look plausibly equal.
We have a heuristic in the main congruence generator that types
`α` and `β` are *plausibly equal* according to the following algorithm:
- If the types are both propositions, they are plausibly equal (`Iff`s are plausible).
- If the types are from different universes, they are not plausibly equal.
- Suppose in whnf we have `α = f a₁ ... aₘ` and `β = g b₁ ... bₘ`. If `f` is not definitionally
equal to `g` or `m ≠ n`, then `α` and `β` are not plausibly equal.
- If there is some `i` such that `aᵢ` and `bᵢ` are not plausibly equal, then `α` and `β` are
not plausibly equal.
- Otherwise, `α` and `β` are plausibly equal.
The purpose of this is to prevent considering equalities like `ℕ = ℤ` while allowing equalities
such as `Fin n = Fin m` or `Subtype p = Subtype q` (so long as these are subtypes of the
same type).
The way this is implemented is that when the congruence generator is comparing arguments when
looking at an equality of function applications, it marks a function parameter as "fixed" if the
provided arguments are types that are not plausibly equal. The effect of this is that congruence
succeeds only if those arguments are defeq at `transparency` transparency. -/
typeEqs : Bool := false
/-- As a last pass, perform eta expansion of both sides of an equality. For example,
this transforms a bare `HAdd.hAdd` into `fun x y => x + y`. -/
etaExpand : Bool := false
/-- Whether to use the congruence generator that is used by `simp` and `congr`. This generator
is more strict, and it does not respect all configuration settings. It does respect
`preferLHS`, `partialApp` and `maxArgs` and transparency settings. It acts as if `sameFun := true`
and it ignores `typeEqs`. -/
useCongrSimp : Bool := false
/-- Whether to use a special congruence lemma for `BEq` instances.
This synthesizes `LawfulBEq` instances to discharge equalities of `BEq` instances. -/
beqEq : Bool := true
/-- A configuration option that makes `congr!` do the sorts of aggressive unfoldings that `congr`
does while also similarly preventing `congr!` from considering partial applications or congruences
between different functions being applied. -/
def Congr!.Config.unfoldSameFun : Congr!.Config where
partialApp := false
sameFun := true
transparency := .default
preTransparency := .default
/-- Whether the given number of arguments is allowed to be considered. -/
def Congr!.Config.numArgsOk (config : Config) (numArgs : Nat) : Bool :=
numArgs ≤ config.maxArgs.getD numArgs
/-- According to the configuration, how many of the arguments in `numArgs` should be considered. -/
def Congr!.Config.maxArgsFor (config : Config) (numArgs : Nat) : Nat :=
min numArgs (config.maxArgs.getD numArgs)
/--
Asserts the given congruence theorem as fresh hypothesis, and then applies it.
Return the `fvarId` for the new hypothesis and the new subgoals.
We apply it with transparency settings specified by `Congr!.Config.transparency`.
-/
private def applyCongrThm?
(config : Congr!.Config) (mvarId : MVarId) (congrThmType congrThmProof : Expr) :
MetaM (List MVarId) := do
trace[congr!] "trying to apply congr lemma {congrThmType}"
try
let mvarId ← mvarId.assert (← mkFreshUserName `h_congr_thm) congrThmType congrThmProof
let (fvarId, mvarId) ← mvarId.intro1P
let mvarIds ← withTransparency config.transparency <|
mvarId.apply (mkFVar fvarId) { synthAssignedInstances := false }
mvarIds.mapM fun mvarId => mvarId.tryClear fvarId
catch e =>
withTraceNode `congr! (fun _ => pure m!"failed to apply congr lemma") do
trace[congr!] "{e.toMessageData}"
throw e
/-- Returns whether or not it's reasonable to consider an equality between types `ty1` and `ty2`.
The heuristic is the following:
- If `ty1` and `ty2` are in `Prop`, then yes.
- If in whnf both `ty1` and `ty2` have the same head and if (recursively) it's reasonable to
consider an equality between corresponding type arguments, then yes.
- Otherwise, no.
This helps keep congr from going too far and generating hypotheses like `ℝ = ℤ`.
To keep things from going out of control, there is a `maxDepth`. Additionally, if we do the check
with `maxDepth = 0` then the heuristic answers "no". -/
def Congr!.plausiblyEqualTypes (ty1 ty2 : Expr) (maxDepth : Nat := 5) : MetaM Bool :=
match maxDepth with
| 0 => return false
| maxDepth + 1 => do
-- Props are plausibly equal
if (← isProp ty1) && (← isProp ty2) then
return true
-- Types from different type universes are not plausibly equal.
-- This is redundant, but it saves carrying out the remaining checks.
unless ← withNewMCtxDepth <| isDefEq (← inferType ty1) (← inferType ty2) do
return false
-- Now put the types into whnf, check they have the same head, and then recurse on arguments
let ty1 ← whnfD ty1
let ty2 ← whnfD ty2
unless ← withNewMCtxDepth <| isDefEq ty1.getAppFn ty2.getAppFn do
return false
for arg1 in ty1.getAppArgs, arg2 in ty2.getAppArgs do
if (← isType arg1) && (← isType arg2) then
unless ← plausiblyEqualTypes arg1 arg2 maxDepth do
return false
return true
/--
This is like `Lean.MVarId.hcongr?` but (1) looks at both sides when generating the congruence lemma
and (2) inserts additional hypotheses from equalities from previous arguments.
It uses `Lean.Meta.mkRichHCongr` to generate the congruence lemmas.
If the goal is an `Eq`, it uses `eq_of_heq` first.
As a backup strategy, it uses the LHS/RHS method like in `Lean.MVarId.congrSimp?`
(where `Congr!.Config.preferLHS` determines which side to try first). This uses a particular side
of the target, generates the congruence lemma, then tries applying it. This can make progress
with higher transparency settings. To help the unifier, in this mode it assumes both sides have the
exact same function.
-/
partial
def Lean.MVarId.smartHCongr? (config : Congr!.Config) (mvarId : MVarId) :
MetaM (Option (List MVarId)) :=
mvarId.withContext do
mvarId.checkNotAssigned `congr!
commitWhenSome? do
let mvarId ← mvarId.eqOfHEq
let some (_, lhs, _, rhs) := (← withReducible mvarId.getType').heq? | return none
if let some mvars ← loop mvarId 0 lhs rhs [] [] then
return mvars
-- The "correct" behavior failed. However, it's often useful
-- to apply congruence lemmas while unfolding definitions, which is what the
-- basic `congr` tactic does due to limitations in how congruence lemmas are generated.
-- We simulate this behavior here by generating congruence lemmas for the LHS and RHS and
-- then applying them.
trace[congr!] "Default smartHCongr? failed, trying LHS/RHS method"
let (fst, snd) := if config.preferLHS then (lhs, rhs) else (rhs, lhs)
if let some mvars ← forSide mvarId fst then
return mvars
else if let some mvars ← forSide mvarId snd then
return mvars
else
return none
where
loop (mvarId : MVarId) (numArgs : Nat) (lhs rhs : Expr) (lhsArgs rhsArgs : List Expr) :
MetaM (Option (List MVarId)) :=
match lhs.cleanupAnnotations, rhs.cleanupAnnotations with
| .app f a, .app f' b => do
if not (config.numArgsOk (numArgs + 1)) then
return none
let lhsArgs' := a :: lhsArgs
let rhsArgs' := b :: rhsArgs
-- We try to generate a theorem for the maximal number of arguments
if let some mvars ← loop mvarId (numArgs + 1) f f' lhsArgs' rhsArgs' then
return mvars
-- That failing, we now try for the present number of arguments.
if not config.partialApp && f.isApp && f'.isApp then
-- It's a partial application on both sides though.
return none
-- The congruence generator only handles the case where both functions have
-- definitionally equal types.
unless ← withNewMCtxDepth <| isDefEq (← inferType f) (← inferType f') do
return none
let funDefEq ← withReducible <| withNewMCtxDepth <| isDefEq f f'
if config.sameFun && not funDefEq then
return none
let info ← getFunInfoNArgs f (numArgs + 1)
let mut fixed : Array Bool := #[]
for larg in lhsArgs', rarg in rhsArgs', pinfo in info.paramInfo do
if !config.typeEqs && (!pinfo.isExplicit || pinfo.hasFwdDeps) then
-- When `typeEqs = false` then for non-explicit arguments or
-- arguments with forward dependencies, we want type arguments
-- to be plausibly equal.
if ← isType larg then
-- ^ since `f` and `f'` have defeq types, this implies `isType rarg`.
unless ← Congr!.plausiblyEqualTypes larg rarg do
fixed := fixed.push true
continue
fixed := fixed.push (← withReducible <| withNewMCtxDepth <| isDefEq larg rarg)
let cthm ← mkRichHCongr (forceHEq := true) (← inferType f) info
(fixedFun := funDefEq) (fixedParams := fixed)
-- Now see if the congruence theorem actually applies in this situation by applying it!
let (congrThm', congrProof') :=
if funDefEq then
(cthm.type.bindingBody!.instantiate1 f, cthm.proof.beta #[f])
else
(cthm.type.bindingBody!.bindingBody!.instantiateRev #[f, f'],
cthm.proof.beta #[f, f'])
observing? <| applyCongrThm? config mvarId congrThm' congrProof'
| _, _ => return none
forSide (mvarId : MVarId) (side : Expr) : MetaM (Option (List MVarId)) := do
let side := side.cleanupAnnotations
if not side.isApp then return none
let numArgs := config.maxArgsFor side.getAppNumArgs
if not config.partialApp && numArgs < side.getAppNumArgs then
return none
let mut f := side
for _ in [:numArgs] do
f := f.appFn!'
let info ← getFunInfoNArgs f numArgs
let mut fixed : Array Bool := #[]
if !config.typeEqs then
-- We need some strategy for fixed parameters to keep `forSide` from applying
-- in cases where `Congr!.possiblyEqualTypes` suggested not to in the previous pass.
for pinfo in info.paramInfo, arg in side.getAppArgs do
if pinfo.isProp || !(← isType arg) then
fixed := fixed.push false
else if pinfo.isExplicit && !pinfo.hasFwdDeps then
-- It's fine generating equalities for explicit type arguments without forward
-- dependencies. Only allowing these is a little strict, because an argument
-- might be something like `Fin n`. We might consider being able to generate
-- congruence lemmas that only allow equalities where they can plausibly go,
-- but that would take looking at a whole application tree.
fixed := fixed.push false
else
fixed := fixed.push true
let cthm ← mkRichHCongr (forceHEq := true) (← inferType f) info
(fixedFun := true) (fixedParams := fixed)
let congrThm' := cthm.type.bindingBody!.instantiate1 f
let congrProof' := cthm.proof.beta #[f]
observing? <| applyCongrThm? config mvarId congrThm' congrProof'
/--
Like `Lean.MVarId.congr?` but instead of using only the congruence lemma associated to the LHS,
it tries the RHS too, in the order specified by `config.preferLHS`.
It uses `Lean.Meta.mkCongrSimp?` to generate a congruence lemma, like in the `congr` tactic.
Applies the congruence generated congruence lemmas according to `config`.
-/
def Lean.MVarId.congrSimp? (config : Congr!.Config) (mvarId : MVarId) :
MetaM (Option (List MVarId)) :=
mvarId.withContext do
mvarId.checkNotAssigned `congrSimp?
let some (_, lhs, rhs) := (← withReducible mvarId.getType').eq? | return none
let (fst, snd) := if config.preferLHS then (lhs, rhs) else (rhs, lhs)
if let some mvars ← forSide mvarId fst then
return mvars
else if let some mvars ← forSide mvarId snd then
return mvars
else
return none
where
forSide (mvarId : MVarId) (side : Expr) : MetaM (Option (List MVarId)) :=
commitWhenSome? do
let side := side.cleanupAnnotations
if not side.isApp then return none
let numArgs := config.maxArgsFor side.getAppNumArgs
if not config.partialApp && numArgs < side.getAppNumArgs then
return none
let mut f := side
for _ in [:numArgs] do
f := f.appFn!'
let some congrThm ← mkCongrSimpNArgs f numArgs
| return none
observing? <| applyCongrThm? config mvarId congrThm.type congrThm.proof
/-- Like `mkCongrSimp?` but takes in a specific arity. -/
mkCongrSimpNArgs (f : Expr) (nArgs : Nat) : MetaM (Option CongrTheorem) := do
let f := (← Lean.instantiateMVars f).cleanupAnnotations
let info ← getFunInfoNArgs f nArgs
mkCongrSimpCore? f info
(← getCongrSimpKinds f info) (subsingletonInstImplicitRhs := false)
/--
Try applying user-provided congruence lemmas. If any are applicable,
returns a list of new goals.
Tries a congruence lemma associated to the LHS and then, if that failed, the RHS.
-/
def Lean.MVarId.userCongr? (config : Congr!.Config) (mvarId : MVarId) :
MetaM (Option (List MVarId)) :=
mvarId.withContext do
mvarId.checkNotAssigned `userCongr?
let some (lhs, rhs) := (← withReducible mvarId.getType').eqOrIff? | return none
let (fst, snd) := if config.preferLHS then (lhs, rhs) else (rhs, lhs)
if let some mvars ← forSide fst then
return mvars
else if let some mvars ← forSide snd then
return mvars
else
return none
where
forSide (side : Expr) : MetaM (Option (List MVarId)) := do
let side := side.cleanupAnnotations
if not side.isApp then return none
let some name := side.getAppFn.constName? | return none
let congrTheorems := (← getSimpCongrTheorems).get name
-- Note: congruence theorems are provided in decreasing order of priority.
for congrTheorem in congrTheorems do
let res ← observing? do
let cinfo ← getConstInfo congrTheorem.theoremName
let us ← cinfo.levelParams.mapM fun _ => mkFreshLevelMVar
let proof := mkConst congrTheorem.theoremName us
let ptype ← instantiateTypeLevelParams cinfo.toConstantVal us
applyCongrThm? config mvarId ptype proof
if let some mvars := res then
return mvars
return none
/--
Try to apply `pi_congr`. This is similar to `Lean.MVar.congrImplies?`.
-/
def Lean.MVarId.congrPi? (mvarId : MVarId) : MetaM (Option (List MVarId)) :=
observing? do withReducible <| mvarId.apply (← mkConstWithFreshMVarLevels `pi_congr)
/--
Try to apply `funext`, but only if it is an equality of two functions where at least one is
a lambda expression.
One thing this check prevents is accidentally applying `funext` to a set equality, but also when
doing congruence we don't want to apply `funext` unnecessarily.
-/
def Lean.MVarId.obviousFunext? (mvarId : MVarId) : MetaM (Option (List MVarId)) :=
mvarId.withContext <| observing? do
let some (_, lhs, rhs) := (← withReducible mvarId.getType').eq? | failure
if not lhs.cleanupAnnotations.isLambda && not rhs.cleanupAnnotations.isLambda then failure
mvarId.apply (← mkConstWithFreshMVarLevels ``funext)
/--
Try to apply `Function.hfunext`, returning the new goals if it succeeds.
Like `Lean.MVarId.obviousFunext?`, we only do so if at least one side of the `HEq` is a lambda.
This prevents unfolding of things like `Set`.
Need to have `Mathlib/Logic/Function/Basic.lean` imported for this to succeed.
-/
def Lean.MVarId.obviousHfunext? (mvarId : MVarId) : MetaM (Option (List MVarId)) :=
mvarId.withContext <| observing? do
let some (_, lhs, _, rhs) := (← withReducible mvarId.getType').heq? | failure
if not lhs.cleanupAnnotations.isLambda && not rhs.cleanupAnnotations.isLambda then failure
mvarId.apply (← mkConstWithFreshMVarLevels `Function.hfunext)
/-- Like `implies_congr` but provides an additional assumption to the second hypothesis.
This is a non-dependent version of `pi_congr` that allows the domains to be different. -/
private theorem implies_congr' {α α' : Sort u} {β β' : Sort v} (h : α = α') (h' : α' → β = β') :
(α → β) = (α' → β') := by
cases h
change (∀ (x : α), (fun _ => β) x) = _
rw [funext h']
/-- A version of `Lean.MVarId.congrImplies?` that uses `implies_congr'`
instead of `implies_congr`. -/
def Lean.MVarId.congrImplies?' (mvarId : MVarId) : MetaM (Option (List MVarId)) :=
observing? do
let [mvarId₁, mvarId₂] ← mvarId.apply (← mkConstWithFreshMVarLevels ``implies_congr')
| throwError "unexpected number of goals"
return [mvarId₁, mvarId₂]
/--
Try to apply `Subsingleton.helim` if the goal is a `HEq`. Tries synthesizing a `Subsingleton`
instance for both the LHS and the RHS.
If successful, this reduces proving `@HEq α x β y` to proving `α = β`.
-/
def Lean.MVarId.subsingletonHelim? (mvarId : MVarId) : MetaM (Option (List MVarId)) :=
mvarId.withContext <| observing? do
mvarId.checkNotAssigned `subsingletonHelim
let some (α, lhs, β, rhs) := (← withReducible mvarId.getType').heq? | failure
withSubsingletonAsFast fun elim => do
let eqmvar ← mkFreshExprSyntheticOpaqueMVar (← mkEq α β) (← mvarId.getTag)
-- First try synthesizing using the left-hand side for the Subsingleton instance
if let some pf ← observing? (mkAppM ``FastSubsingleton.helim #[eqmvar, lhs, rhs]) then
mvarId.assign <| elim pf
return [eqmvar.mvarId!]
let eqsymm ← mkAppM ``Eq.symm #[eqmvar]
-- Second try synthesizing using the right-hand side for the Subsingleton instance
if let some pf ← observing? (mkAppM ``FastSubsingleton.helim #[eqsymm, rhs, lhs]) then
mvarId.assign <| elim (← mkAppM ``HEq.symm #[pf])
return [eqmvar.mvarId!]
failure
/--
Tries to apply `lawful_beq_subsingleton` to prove that two `BEq` instances are equal
by synthesizing `LawfulBEq` instances for both.
-/
def Lean.MVarId.beqInst? (mvarId : MVarId) : MetaM (Option (List MVarId)) :=
observing? do withReducible <| mvarId.applyConst ``lawful_beq_subsingleton
/--
A list of all the congruence strategies used by `Lean.MVarId.congrCore!`.
-/
def Lean.MVarId.congrPasses! :
List (String × (Congr!.Config → MVarId → MetaM (Option (List MVarId)))) :=
[("user congr", userCongr?),
("hcongr lemma", smartHCongr?),
("congr simp lemma", when (·.useCongrSimp) congrSimp?),
("Subsingleton.helim", fun _ => subsingletonHelim?),
("BEq instances", when (·.beqEq) fun _ => beqInst?),
("obvious funext", fun _ => obviousFunext?),
("obvious hfunext", fun _ => obviousHfunext?),
("congr_implies", fun _ => congrImplies?'),
("congr_pi", fun _ => congrPi?)]
where
/--
Conditionally runs a congruence strategy depending on the predicate `b` applied to the config.
-/
when (b : Congr!.Config → Bool) (f : Congr!.Config → MVarId → MetaM (Option (List MVarId)))
(config : Congr!.Config) (mvar : MVarId) : MetaM (Option (List MVarId)) := do
unless b config do return none
f config mvar
structure CongrState where
/-- Accumulated goals that `congr!` could not handle. -/
goals : Array MVarId
/-- Patterns to use when doing intro. -/
patterns : List (TSyntax `rcasesPat)
abbrev CongrMetaM := StateRefT CongrState MetaM
/-- Pop the next pattern from the current state. -/
def CongrMetaM.nextPattern : CongrMetaM (Option (TSyntax `rcasesPat)) := do
modifyGet fun s =>
if let p :: ps := s.patterns then
(p, {s with patterns := ps})
else
(none, s)
private theorem heq_imp_of_eq_imp {α : Sort*} {x y : α} {p : x ≍ y → Prop}
(h : (he : x = y) → p (heq_of_eq he)) (he : x ≍ y) : p he := by
cases he
exact h rfl
private theorem eq_imp_of_iff_imp {x y : Prop} {p : x = y → Prop}
(h : (he : x ↔ y) → p (propext he)) (he : x = y) : p he := by
cases he
exact h Iff.rfl
/--
Does `Lean.MVarId.intros` but then cleans up the introduced hypotheses, removing anything
that is trivial. If there are any patterns in the current `CongrMetaM` state then instead
of `Lean.MVarId.intros` it does `Lean.Elab..Tactic.RCases.rintro`.
Cleaning up includes:
- deleting hypotheses of the form `x ≍ x`, `x = x`, and `x ↔ x`.
- deleting Prop hypotheses that are already in the local context.
- converting `x ≍ y` to `x = y` if possible.
- converting `x = y` to `x ↔ y` if possible.
-/
partial
def Lean.MVarId.introsClean (mvarId : MVarId) : CongrMetaM (List MVarId) :=
loop mvarId
where
heqImpOfEqImp (mvarId : MVarId) : MetaM (Option MVarId) :=
observing? <| withReducible do
let [mvarId] ← mvarId.apply (← mkConstWithFreshMVarLevels ``heq_imp_of_eq_imp) | failure
return mvarId
eqImpOfIffImp (mvarId : MVarId) : MetaM (Option MVarId) :=
observing? <| withReducible do
let [mvarId] ← mvarId.apply (← mkConstWithFreshMVarLevels ``eq_imp_of_iff_imp) | failure
return mvarId
loop (mvarId : MVarId) : CongrMetaM (List MVarId) :=
mvarId.withContext do
let ty ← withReducible <| mvarId.getType'
if ty.isForall then
let mvarId := (← heqImpOfEqImp mvarId).getD mvarId
let mvarId := (← eqImpOfIffImp mvarId).getD mvarId
let ty ← withReducible <| mvarId.getType'
if ty.isArrow then
if ← (isTrivialType ty.bindingDomain!
<||> (← getLCtx).anyM (fun decl => do
return (← Lean.instantiateMVars decl.type) == ty.bindingDomain!)) then
-- Don't intro, clear it
let mvar ← mkFreshExprSyntheticOpaqueMVar ty.bindingBody! (← mvarId.getTag)
mvarId.assign <| .lam .anonymous ty.bindingDomain! mvar .default
return ← loop mvar.mvarId!
if let some patt ← CongrMetaM.nextPattern then
let gs ← Term.TermElabM.run' <| Lean.Elab.Tactic.RCases.rintro #[patt] none mvarId
List.flatten <$> gs.mapM loop
else
let (_, mvarId) ← mvarId.intro1
loop mvarId
else
return [mvarId]
isTrivialType (ty : Expr) : MetaM Bool := do
unless ← Meta.isProp ty do
return false
let ty ← Lean.instantiateMVars ty
if let some (lhs, rhs) := ty.eqOrIff? then
if lhs.cleanupAnnotations == rhs.cleanupAnnotations then
return true
if let some (α, lhs, β, rhs) := ty.heq? then
if α.cleanupAnnotations == β.cleanupAnnotations
&& lhs.cleanupAnnotations == rhs.cleanupAnnotations then
return true
return false
/-- Convert a goal into an `Eq` goal if possible (since we have a better shot at those).
Also, if `tryClose := true`, then try to close the goal using an assumption, `Subsingleton.Elim`,
or definitional equality. -/
def Lean.MVarId.preCongr! (mvarId : MVarId) (tryClose : Bool) : MetaM (Option MVarId) := do
-- Next, turn `HEq` and `Iff` into `Eq`
let mvarId ← mvarId.heqOfEq
if tryClose then
-- This is a good time to check whether we have a relevant hypothesis.
if ← mvarId.assumptionCore then return none
let mvarId ← mvarId.iffOfEq
if tryClose then
-- Now try definitional equality. No need to try `mvarId.hrefl` since we already did `heqOfEq`.
-- We allow synthetic opaque metavariables to be assigned to fill in `x = _` goals that might
-- appear (for example, due to using `convert` with placeholders).
try withAssignableSyntheticOpaque mvarId.refl; return none catch _ => pure ()
-- Now we go for (heterogeneous) equality via subsingleton considerations
if ← Lean.Meta.fastSubsingletonElim mvarId then return none
if ← mvarId.proofIrrelHeq then return none
return some mvarId
def Lean.MVarId.congrCore! (config : Congr!.Config) (mvarId : MVarId) :
MetaM (Option (List MVarId)) := do
mvarId.checkNotAssigned `congr!
let s ← saveState
/- We do `liftReflToEq` here rather than in `preCongr!` since we don't want to commit to it
if there are no relevant congr lemmas. -/
let mvarId ← mvarId.liftReflToEq
for (passName, pass) in congrPasses! do
try
if let some mvarIds ← pass config mvarId then
trace[congr!] "pass succeeded: {passName}"
return mvarIds
catch e =>
throwTacticEx `congr! mvarId
m!"internal error in congruence pass {passName}, {e.toMessageData}"
if ← mvarId.isAssigned then
throwTacticEx `congr! mvarId
s!"congruence pass {passName} assigned metavariable but failed"
restoreState s
trace[congr!] "no passes succeeded"
return none
/-- A pass to clean up after `Lean.MVarId.preCongr!` and `Lean.MVarId.congrCore!`. -/
def Lean.MVarId.postCongr! (config : Congr!.Config) (mvarId : MVarId) : MetaM (Option MVarId) := do
let some mvarId ← mvarId.preCongr! config.closePost | return none
-- Convert `p = q` to `p ↔ q`, which is likely the more useful form:
let mvarId ← mvarId.propext
if config.closePost then
-- `preCongr` sees `p = q`, but now we've put it back into `p ↔ q` form.
if ← mvarId.assumptionCore then return none
if config.etaExpand then
if let some (_, lhs, rhs) := (← withReducible mvarId.getType').eq? then
let lhs' ← Meta.etaExpand lhs
let rhs' ← Meta.etaExpand rhs
return ← mvarId.change (← mkEq lhs' rhs')
return mvarId
/-- A more insistent version of `Lean.MVarId.congrN`.
See the documentation on the `congr!` syntax.
The `depth?` argument controls the depth of the recursion. If `none`, then it uses a reasonably
large bound that is linear in the expression depth. -/
def Lean.MVarId.congrN! (mvarId : MVarId)
(depth? : Option Nat := none) (config : Congr!.Config := {})
(patterns : List (TSyntax `rcasesPat) := []) :
MetaM (List MVarId) := do
let ty ← withReducible <| mvarId.getType'
-- A reasonably large yet practically bounded default recursion depth.
let defaultDepth := min 1000000 (8 * (1 + ty.approxDepth.toNat))
let depth := depth?.getD defaultDepth
let (_, s) ← go depth depth mvarId |>.run {goals := #[], patterns := patterns}
return s.goals.toList
where
post (mvarId : MVarId) : CongrMetaM Unit := do
for mvarId in ← mvarId.introsClean do
if let some mvarId ← mvarId.postCongr! config then
modify (fun s => {s with goals := s.goals.push mvarId})
else
trace[congr!] "Dispatched goal by post-processing step."
go (depth : Nat) (n : Nat) (mvarId : MVarId) : CongrMetaM Unit := do
for mvarId in ← mvarId.introsClean do
if let some mvarId ← withTransparency config.preTransparency <|
mvarId.preCongr! config.closePre then
match n with
| 0 =>
trace[congr!] "At level {depth - n}, doing post-processing. {mvarId}"
post mvarId
| n + 1 =>
trace[congr!] "At level {depth - n}, trying congrCore!. {mvarId}"
if let some mvarIds ← mvarId.congrCore! config then
mvarIds.forM (go depth n)
else
post mvarId
namespace Congr!
declare_config_elab elabConfig Config
/--
Equates pieces of the left-hand side of a goal to corresponding pieces of the right-hand side by
recursively applying congruence lemmas. For example, with `⊢ f as = g bs` we could get
two goals `⊢ f = g` and `⊢ as = bs`.
Syntax:
```
congr!
congr! n
congr! with x y z
congr! n with x y z
```
Here, `n` is a natural number and `x`, `y`, `z` are `rintro` patterns (like `h`, `rfl`, `⟨x, y⟩`,
`_`, `-`, `(h | h)`, etc.).
The `congr!` tactic is similar to `congr` but is more insistent in trying to equate left-hand sides
to right-hand sides of goals. Here is a list of things it can try:
- If `R` in `⊢ R x y` is a reflexive relation, it will convert the goal to `⊢ x = y` if possible.
The list of reflexive relations is maintained using the `@[refl]` attribute.
As a special case, `⊢ p ↔ q` is converted to `⊢ p = q` during congruence processing and then
returned to `⊢ p ↔ q` form at the end.
- If there is a user congruence lemma associated to the goal (for instance, a `@[congr]`-tagged
lemma applying to `⊢ List.map f xs = List.map g ys`), then it will use that.
- It uses a congruence lemma generator at least as capable as the one used by `congr` and `simp`.
If there is a subexpression that can be rewritten by `simp`, then `congr!` should be able
to generate an equality for it.
- It can do congruences of pi types using lemmas like `implies_congr` and `pi_congr`.
- Before applying congruences, it will run the `intros` tactic automatically.
The introduced variables can be given names using a `with` clause.
This helps when congruence lemmas provide additional assumptions in hypotheses.
- When there is an equality between functions, so long as at least one is obviously a lambda, we
apply `funext` or `Function.hfunext`, which allows for congruence of lambda bodies.
- It can try to close goals using a few strategies, including checking
definitional equality, trying to apply `Subsingleton.elim` or `proof_irrel_heq`, and using the
`assumption` tactic.
The optional parameter is the depth of the recursive applications.
This is useful when `congr!` is too aggressive in breaking down the goal.
For example, given `⊢ f (g (x + y)) = f (g (y + x))`,
`congr!` produces the goals `⊢ x = y` and `⊢ y = x`,
while `congr! 2` produces the intended `⊢ x + y = y + x`.
The `congr!` tactic also takes a configuration option, for example
```lean
congr! (transparency := .default) 2
```
This overrides the default, which is to apply congruence lemmas at reducible transparency.
The `congr!` tactic is aggressive with equating two sides of everything. There is a predefined
configuration that uses a different strategy:
Try
```lean
congr! (config := .unfoldSameFun)
```
This only allows congruences between functions applications of definitionally equal functions,
and it applies congruence lemmas at default transparency (rather than just reducible).
This is somewhat like `congr`.
See `Congr!.Config` for all options.
-/
syntax (name := congr!) "congr!" Parser.Tactic.optConfig (ppSpace num)?
(" with" (ppSpace colGt rintroPat)*)? : tactic
elab_rules : tactic
| `(tactic| congr! $cfg:optConfig $[$n]? $[with $ps?*]?) => do
let config ← elabConfig cfg
let patterns := (Lean.Elab.Tactic.RCases.expandRIntroPats (ps?.getD #[])).toList
liftMetaTactic fun g ↦
let depth := n.map (·.getNat)
g.congrN! depth config patterns
end Congr! |
.lake/packages/mathlib/Mathlib/Tactic/GeneralizeProofs.lean | import Mathlib.Tactic.Linter.DeprecatedModule
import Batteries.Tactic.GeneralizeProofs
deprecated_module (since := "2025-11-09") |
.lake/packages/mathlib/Mathlib/Tactic/Set.lean | import Mathlib.Init
import Lean.Elab.Tactic.ElabTerm
/-!
# The `set` tactic
This file defines the `set` tactic and its variant `set!`.
`set a := t with h` is a variant of `let a := t`. It adds the hypothesis `h : a = t` to
the local context and replaces `t` with `a` everywhere it can.
`set a := t with ← h` will add `h : t = a` instead.
`set! a := t with h` does not do any replacing.
-/
namespace Mathlib.Tactic
open Lean Elab Elab.Tactic Meta
syntax setArgsRest := ppSpace ident (" : " term)? " := " term (" with " "← "? ident)?
-- This is called `setTactic` rather than `set`
-- as we sometimes refer to `MonadStateOf.set` from inside `Mathlib.Tactic`.
syntax (name := setTactic) "set" "!"? setArgsRest : tactic
macro "set!" rest:setArgsRest : tactic => `(tactic| set ! $rest:setArgsRest)
/--
`set a := t with h` is a variant of `let a := t`. It adds the hypothesis `h : a = t` to
the local context and replaces `t` with `a` everywhere it can.
`set a := t with ← h` will add `h : t = a` instead.
`set! a := t with h` does not do any replacing.
```lean
example (x : Nat) (h : x + x - x = 3) : x + x - x = 3 := by
set y := x with ← h2
sorry
/-
x : Nat
y : Nat := x
h : y + y - y = 3
h2 : x = y
⊢ y + y - y = 3
-/
```
-/
elab_rules : tactic
| `(tactic| set%$tk $[!%$rw]? $a:ident $[: $ty:term]? := $val:term $[with $[←%$rev]? $h:ident]?) =>
withMainContext do
let (ty, vale) ← match ty with
| some ty =>
let ty ← Term.elabType ty
pure (ty, ← elabTermEnsuringType val ty)
| none =>
let val ← elabTerm val none
pure (← inferType val, val)
let fvar ← liftMetaTacticAux fun goal ↦ do
let (fvar, goal) ← (← goal.define a.getId ty vale).intro1P
pure (fvar, [goal])
withMainContext <|
Term.addTermInfo' (isBinder := true) a (mkFVar fvar)
if rw.isNone then
evalTactic (← `(tactic| try rewrite [show $(← Term.exprToSyntax vale) = $a from rfl] at *))
match h, rev with
| some h, some none =>
evalTactic (← `(tactic| have%$tk
$h : $a = ($(← Term.exprToSyntax vale) : $(← Term.exprToSyntax ty)) := rfl))
| some h, some (some _) =>
evalTactic (← `(tactic| have%$tk
$h : ($(← Term.exprToSyntax vale) : $(← Term.exprToSyntax ty)) = $a := rfl))
| _, _ => pure ()
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Spread.lean | import Mathlib.Init
import Lean.Elab.Binders
/-!
# Macro for spread syntax (`__ := instSomething`) in structures.
-/
open Lean Parser.Term Macro
/-
This adds support for structure instance spread syntax.
```lean
instance : Foo α where
__ := instSomething -- include fields from `instSomething`
example : Foo α := {
__ := instSomething -- include fields from `instSomething`
}
```
-/
/--
Mathlib extension to preserve old behavior of structure instances.
We need to be able to `let` some implementation details that are still local instances.
Normally implementation detail fvars are not local instances,
but we need them to be implementation details so that `simp` will see them as "reducible" fvars.
-/
syntax (name := letImplDetailStx) "let_impl_detail " ident " := " term "; " term : term
open Lean Elab Term Meta
@[term_elab letImplDetailStx, inherit_doc letImplDetailStx]
def elabLetImplDetail : TermElab := fun stx expectedType? =>
match stx with
| `(let_impl_detail $id := $valStx; $body) => do
let val ← elabTerm valStx none
let type ← inferType val
trace[Elab.let.decl] "{id.getId} : {type} := {val}"
let result ←
withLetDecl id.getId (kind := .default) type val fun x => do
addLocalVarInfo id x
let lctx ← getLCtx
let lctx := lctx.modifyLocalDecl x.fvarId! fun decl => decl.setKind .implDetail
withLCtx lctx (← getLocalInstances) do
let body ← elabTermEnsuringType body expectedType?
let body ← instantiateMVars body
mkLetFVars #[x] body (usedLetOnly := false)
pure result
| _ => throwUnsupportedSyntax
macro_rules
| `({ $[$srcs,* with]? $[$fields],* $[: $ty?]? }) => show MacroM Term from do
let mut spreads := #[]
let mut newFields := #[]
for field in fields do
match field.1 with
| `(structInstField| $name:ident := $arg) =>
if name.getId.eraseMacroScopes == `__ then do
spreads := spreads.push arg
else
newFields := newFields.push field
| _ =>
throwUnsupported
if spreads.isEmpty then throwUnsupported
let spreadData ← withFreshMacroScope <| spreads.mapIdxM fun i spread => do
let n := Name.num `__spread i
return (mkIdent <| ← Macro.addMacroScope n, spread)
let srcs := (srcs.map (·.getElems)).getD {} ++ spreadData.map Prod.fst
let body ← `({ $srcs,* with $[$newFields],* $[: $ty?]? })
spreadData.foldrM (init := body) fun (id, val) body => `(let_impl_detail $id := $val; $body) |
.lake/packages/mathlib/Mathlib/Tactic/RewriteSearch.lean | import Mathlib.Init
import Lean.Elab.Tactic.Basic
/-!
# The `rw_search` tactic has been removed from Mathlib.
-/
namespace Mathlib.Tactic.RewriteSearch
open Lean Meta
open Elab Tactic Lean.Parser.Tactic
/--
`rw_search` has been removed from Mathlib.
-/
syntax "rw_search" (rewrites_forbidden)? : tactic
elab_rules : tactic |
`(tactic| rw_search $[[ $[-$forbidden],* ]]?) => withMainContext do
logError "The `rw_search` tactic has been removed from Mathlib, as it was unmaintained,\
broken on v4.23.0, and rarely used."
end RewriteSearch
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/CasesM.lean | import Mathlib.Init
import Lean.Elab.Tactic.Conv.Pattern
/-!
# `casesm`, `cases_type`, `constructorm` tactics
These tactics implement repeated `cases` / `constructor` on anything satisfying a predicate.
-/
namespace Lean.MVarId
/--
Core tactic for `casesm` and `cases_type`. Calls `cases` on all fvars in `g` for which
`matcher ldecl.type` returns true.
* `recursive`: if true, it calls itself repeatedly on the resulting subgoals
* `allowSplit`: if false, it will skip any hypotheses where `cases` returns more than one subgoal.
* `throwOnNoMatch`: if true, then throws an error if no match is found
-/
partial def casesMatching (matcher : Expr → MetaM Bool) (recursive := false) (allowSplit := true)
(throwOnNoMatch := true) (g : MVarId) : MetaM (List MVarId) := do
let result := (← go g).toList
if throwOnNoMatch && result == [g] then
throwError "no match"
else
return result
where
/-- Auxiliary for `casesMatching`. Accumulates generated subgoals in `acc`. -/
go (g : MVarId) (acc : Array MVarId := #[]) : MetaM (Array MVarId) :=
g.withContext do
for ldecl in ← getLCtx do
if ldecl.isImplementationDetail then continue
if ← matcher ldecl.type then
let mut acc := acc
let subgoals ← if allowSplit then
g.cases ldecl.fvarId
else
let s ← saveState
let subgoals ← g.cases ldecl.fvarId (givenNames := #[⟨true, [ldecl.userName]⟩])
if subgoals.size > 1 then
s.restore
continue
else
pure subgoals
for subgoal in subgoals do
-- If only one new hypothesis is generated, rename it to the original name.
let g ← match subgoal.fields with
| #[.fvar fvarId] => subgoal.mvarId.rename fvarId ldecl.userName
| _ => pure subgoal.mvarId
if recursive then
acc ← go g acc
else
acc := acc.push g
return acc
return (acc.push g)
def casesType (heads : Array Name) (recursive := false) (allowSplit := true) :
MVarId → MetaM (List MVarId) :=
let matcher ty := pure <|
if let .const n .. := ty.headBeta.getAppFn then heads.contains n else false
casesMatching matcher recursive allowSplit
end Lean.MVarId
namespace Mathlib.Tactic
open Lean Meta Elab Tactic MVarId
/-- Elaborate a list of terms with holes into a list of patterns. -/
def elabPatterns (pats : Array Term) : TermElabM (Array AbstractMVarsResult) :=
withTheReader Term.Context (fun ctx ↦ { ctx with ignoreTCFailures := true }) <|
Term.withoutErrToSorry <|
pats.mapM fun p ↦ Term.withoutModifyingElabMetaStateWithInfo do
withRef p <| abstractMVars (← Term.elabTerm p none)
/-- Returns true if any of the patterns match the expression. -/
def matchPatterns (pats : Array AbstractMVarsResult) (e : Expr) : MetaM Bool := do
let e ← instantiateMVars e
pats.anyM fun p ↦ return (← Conv.matchPattern? p e) matches some (_, #[])
/-- Common implementation of `casesm` and `casesm!`. -/
def elabCasesM (pats : Array Term) (recursive allowSplit : Bool) : TacticM Unit := do
let pats ← elabPatterns pats
liftMetaTactic (casesMatching (matchPatterns pats) recursive allowSplit)
/--
* `casesm p` applies the `cases` tactic to a hypothesis `h : type`
if `type` matches the pattern `p`.
* `casesm p_1, ..., p_n` applies the `cases` tactic to a hypothesis `h : type`
if `type` matches one of the given patterns.
* `casesm* p` is a more efficient and compact version of `· repeat casesm p`.
It is more efficient because the pattern is compiled once.
* `casesm! p` only applies `cases` if the number of resulting subgoals is <= 1.
Example: The following tactic destructs all conjunctions and disjunctions in the current context.
```
casesm* _ ∨ _, _ ∧ _
```
-/
elab (name := casesM) "casesm" recursive:"*"? ppSpace pats:term,+ : tactic => do
elabCasesM pats recursive.isSome true
@[inherit_doc casesM]
elab (name := casesm!) "casesm!" recursive:"*"? ppSpace pats:term,+ : tactic => do
elabCasesM pats recursive.isSome false
/-- Common implementation of `cases_type` and `cases_type!`. -/
def elabCasesType (heads : Array Ident)
(recursive := false) (allowSplit := true) : TacticM Unit := do
let heads ← heads.mapM (fun stx => realizeGlobalConstNoOverloadWithInfo stx)
liftMetaTactic (casesType heads recursive allowSplit)
/--
* `cases_type I` applies the `cases` tactic to a hypothesis `h : (I ...)`
* `cases_type I_1 ... I_n` applies the `cases` tactic to a hypothesis
`h : (I_1 ...)` or ... or `h : (I_n ...)`
* `cases_type* I` is shorthand for `· repeat cases_type I`
* `cases_type! I` only applies `cases` if the number of resulting subgoals is <= 1.
Example: The following tactic destructs all conjunctions and disjunctions in the current goal.
```
cases_type* Or And
```
-/
elab (name := casesType) "cases_type" recursive:"*"? heads:(ppSpace colGt ident)+ : tactic =>
elabCasesType heads recursive.isSome true
@[inherit_doc casesType]
elab (name := casesType!) "cases_type!" recursive:"*"? heads:(ppSpace colGt ident)+ : tactic =>
elabCasesType heads recursive.isSome false
/--
Core tactic for `constructorm`. Calls `constructor` on all subgoals for which
`matcher ldecl.type` returns true.
* `recursive`: if true, it calls itself repeatedly on the resulting subgoals
* `throwOnNoMatch`: if true, throws an error if no match is found
-/
partial def constructorMatching (g : MVarId) (matcher : Expr → MetaM Bool)
(recursive := false) (throwOnNoMatch := true) : MetaM (List MVarId) := do
let result ←
(if recursive then (do
let result ← go g
pure result.toList)
else
(g.withContext do
if ← matcher (← g.getType) then g.constructor else pure [g]))
if throwOnNoMatch && [g] == result then
throwError "no match"
else
return result
where
/-- Auxiliary for `constructorMatching`. Accumulates generated subgoals in `acc`. -/
go (g : MVarId) (acc : Array MVarId := #[]) : MetaM (Array MVarId) :=
g.withContext do
if ← matcher (← g.getType) then
let mut acc := acc
for g' in ← g.constructor do
acc ← go g' acc
return acc
return (acc.push g)
/--
* `constructorm p_1, ..., p_n` applies the `constructor` tactic to the main goal
if `type` matches one of the given patterns.
* `constructorm* p` is a more efficient and compact version of `· repeat constructorm p`.
It is more efficient because the pattern is compiled once.
Example: The following tactic proves any theorem like `True ∧ (True ∨ True)` consisting of
and/or/true:
```
constructorm* _ ∨ _, _ ∧ _, True
```
-/
elab (name := constructorM) "constructorm" recursive:"*"? ppSpace pats:term,+ : tactic => do
let pats ← elabPatterns pats.getElems
liftMetaTactic (constructorMatching · (matchPatterns pats) recursive.isSome)
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Trace.lean | import Mathlib.Init
import Lean.Elab.Tactic.ElabTerm
import Lean.Meta.Eval
/-!
# Defines the `trace` tactic.
-/
open Lean Meta Elab Tactic
/-- Evaluates a term to a string (when possible), and prints it as a trace message. -/
elab (name := Lean.Parser.Tactic.trace) tk:"trace " val:term : tactic => do
let e ← elabTerm (← `(toString $val)) (some (mkConst `String))
logInfoAt tk <|← unsafe evalExpr String (mkConst `String) e |
.lake/packages/mathlib/Mathlib/Tactic/GRewrite.lean | import Mathlib.Tactic.GRewrite.Elab
/-!
# The generalized rewriting tactic
The `grw`/`grewrite` tactic is a generalization of the `rewrite` tactic that works with relations
other than equality. The core implementation of `grewrite` is in the file `Tactic.GRewrite.Core`
-/ |
.lake/packages/mathlib/Mathlib/Tactic/Linter.lean | /-
This is the `Linter`s file: it imports files defining linters.
Most syntax linters, in particular the ones enabled by default, are imported in `Mathlib.Init`;
this file contains all linters not imported in that file.
This file is ignored by `shake`:
* it is in `ignoreAll`, meaning that all its imports are considered necessary;
* it is in `ignoreImport`, meaning that where it is imported, it is considered necessary.
-/
import Mathlib.Tactic.Linter.DeprecatedModule
import Mathlib.Tactic.Linter.HaveLetLinter
import Mathlib.Tactic.Linter.MinImports
import Mathlib.Tactic.Linter.PPRoundtrip
import Mathlib.Tactic.Linter.UpstreamableDecl |
.lake/packages/mathlib/Mathlib/Tactic/Convert.lean | import Mathlib.Tactic.CongrExclamation
/-!
# The `convert` tactic.
-/
open Lean Meta Elab Tactic
/--
Close the goal `g` using `Eq.mp v e`,
where `v` is a metavariable asserting that the type of `g` and `e` are equal.
Then call `MVarId.congrN!` (also using local hypotheses and reflexivity) on `v`,
and return the resulting goals.
With `symm = true`, reverses the equality in `v`, and uses `Eq.mpr v e` instead.
With `depth = some n`, calls `MVarId.congrN! n` instead, with `n` as the max recursion depth.
-/
def Lean.MVarId.convert (e : Expr) (symm : Bool)
(depth : Option Nat := none) (config : Congr!.Config := {})
(patterns : List (TSyntax `rcasesPat) := []) (g : MVarId) :
MetaM (List MVarId) := g.withContext do
let src ← inferType e
let tgt ← g.getType
let v ← mkFreshExprMVar (← mkAppM ``Eq (if symm then #[src, tgt] else #[tgt, src]))
g.assign (← mkAppM (if symm then ``Eq.mp else ``Eq.mpr) #[v, e])
let m := v.mvarId!
m.congrN! depth config patterns
/--
Replaces the type of the local declaration `fvarId` with `typeNew`,
using `Lean.MVarId.congrN!` to prove that the old type of `fvarId` is equal to `typeNew`.
Uses `Lean.MVarId.replaceLocalDecl` to replace the type.
Returns the new goal along with the side goals generated by `congrN!`.
With `symm = true`, reverses the equality,
changing the goal to prove `typeNew` is equal to `typeOld`.
With `depth = some n`, calls `MVarId.congrN! n` instead, with `n` as the max recursion depth.
-/
def Lean.MVarId.convertLocalDecl (g : MVarId) (fvarId : FVarId) (typeNew : Expr) (symm : Bool)
(depth : Option Nat := none) (config : Congr!.Config := {})
(patterns : List (TSyntax `rcasesPat) := []) :
MetaM (MVarId × List MVarId) := g.withContext do
let typeOld ← fvarId.getType
let v ← mkFreshExprMVar (← mkAppM ``Eq
(if symm then #[typeNew, typeOld] else #[typeOld, typeNew]))
let pf ← if symm then mkEqSymm v else pure v
let res ← g.replaceLocalDecl fvarId typeNew pf
let gs ← v.mvarId!.congrN! depth config patterns
return (res.mvarId, gs)
namespace Mathlib.Tactic
/--
The `exact e` and `refine e` tactics require a term `e` whose type is
definitionally equal to the goal. `convert e` is similar to `refine e`,
but the type of `e` is not required to exactly match the
goal. Instead, new goals are created for differences between the type
of `e` and the goal using the same strategies as the `congr!` tactic.
For example, in the proof state
```lean
n : ℕ,
e : Prime (2 * n + 1)
⊢ Prime (n + n + 1)
```
the tactic `convert e using 2` will change the goal to
```lean
⊢ n + n = 2 * n
```
In this example, the new goal can be solved using `ring`.
The `using 2` indicates it should iterate the congruence algorithm up to two times,
where `convert e` would use an unrestricted number of iterations and lead to two
impossible goals: `⊢ HAdd.hAdd = HMul.hMul` and `⊢ n = 2`.
A variant configuration is `convert (config := .unfoldSameFun) e`, which only equates function
applications for the same function (while doing so at the higher `default` transparency).
This gives the same goal of `⊢ n + n = 2 * n` without needing `using 2`.
The `convert` tactic applies congruence lemmas eagerly before reducing,
therefore it can fail in cases where `exact` succeeds:
```lean
def p (n : ℕ) := True
example (h : p 0) : p 1 := by exact h -- succeeds
example (h : p 0) : p 1 := by convert h -- fails, with leftover goal `1 = 0`
```
Limiting the depth of recursion can help with this. For example, `convert h using 1` will work
in this case.
The syntax `convert ← e` will reverse the direction of the new goals
(producing `⊢ 2 * n = n + n` in this example).
Internally, `convert e` works by creating a new goal asserting that
the goal equals the type of `e`, then simplifying it using
`congr!`. The syntax `convert e using n` can be used to control the
depth of matching (like `congr! n`). In the example, `convert e using 1`
would produce a new goal `⊢ n + n + 1 = 2 * n + 1`.
Refer to the `congr!` tactic to understand the congruence operations. One of its many
features is that if `x y : t` and an instance `Subsingleton t` is in scope,
then any goals of the form `x = y` are solved automatically.
Like `congr!`, `convert` takes an optional `with` clause of `rintro` patterns,
for example `convert e using n with x y z`.
The `convert` tactic also takes a configuration option, for example
```lean
convert (config := {transparency := .default}) h
```
These are passed to `congr!`. See `Congr!.Config` for options.
-/
syntax (name := convert) "convert" (Parser.Tactic.config)? " ←"? ppSpace term (" using " num)?
(" with" (ppSpace colGt rintroPat)*)? : tactic
/--
Elaborates `term` ensuring the expected type, allowing stuck metavariables.
Returns stuck metavariables as additional goals.
-/
def elabTermForConvert (term : Syntax) (expectedType? : Option Expr) :
TacticM (Expr × List MVarId) := do
withCollectingNewGoalsFrom (parentTag := ← getMainTag) (tagSuffix := `convert)
(allowNaturalHoles := true) do
-- Allow typeclass inference failures since these will be inferred by unification
-- or else become new goals
withTheReader Term.Context (fun ctx => { ctx with ignoreTCFailures := true }) do
let t ← elabTermEnsuringType (mayPostpone := true) term expectedType?
-- Process everything so that tactics get run, but again allow TC failures
Term.synthesizeSyntheticMVars (postpone := .no) (ignoreStuckTC := true)
return t
elab_rules : tactic
| `(tactic| convert $[$cfg:config]? $[←%$sym]? $term $[using $n]? $[with $ps?*]?) =>
withMainContext do
let config ← Congr!.elabConfig (mkOptionalNode cfg)
let patterns := (Lean.Elab.Tactic.RCases.expandRIntroPats (ps?.getD #[])).toList
let expectedType ← mkFreshExprMVar (mkSort (← getLevel (← getMainTarget)))
let (e, gs) ← elabTermForConvert term expectedType
liftMetaTactic fun g ↦
return (← g.convert e sym.isSome (n.map (·.getNat)) config patterns) ++ gs
-- FIXME restore when `add_tactic_doc` is ported.
-- add_tactic_doc
-- { name := "convert",
-- category := doc_category.tactic,
-- decl_names := [`tactic.interactive.convert],
-- tags := ["congruence"] }
/--
The `convert_to` tactic is for changing the type of the target or a local hypothesis,
but unlike the `change` tactic it will generate equality proof obligations using `congr!`
to resolve discrepancies.
* `convert_to ty` changes the target to `ty`
* `convert_to ty using n` uses `congr! n` instead of `congr! 1`
* `convert_to ty at h` changes the type of the local hypothesis `h` to `ty`.
Any remaining `congr!` goals come first.
Operating on the target, the tactic `convert_to ty using n`
is the same as `convert (?_ : ty) using n`.
The difference is that `convert_to` takes a type but `convert` takes a proof term.
Except for it also being able to operate on local hypotheses,
the syntax for `convert_to` is the same as for `convert`, and it has variations such as
`convert_to ← g` and `convert_to (config := {transparency := .default}) g`.
Note that `convert_to ty at h` may leave a copy of `h` if a later local hypotheses or the target
depends on it, just like in `rw` or `simp`.
-/
syntax (name := convertTo) "convert_to" (Parser.Tactic.config)? " ←"? ppSpace term (" using " num)?
(" with" (ppSpace colGt rintroPat)*)? (Parser.Tactic.location)? : tactic
elab_rules : tactic
| `(tactic| convert_to $[$cfg:config]? $[←%$sym]? $newType $[using $n]?
$[with $ps?*]? $[$loc?:location]?) => do
let n : ℕ := n |>.map (·.getNat) |>.getD 1
let config ← Congr!.elabConfig (mkOptionalNode cfg)
let patterns := (Lean.Elab.Tactic.RCases.expandRIntroPats (ps?.getD #[])).toList
withLocation (expandOptLocation (mkOptionalNode loc?))
(atLocal := fun fvarId ↦ do
let (e, gs) ← elabTermForConvert newType (← inferType (← fvarId.getType))
liftMetaTactic fun g ↦ do
let (g', gs') ← g.convertLocalDecl fvarId e sym.isSome n config patterns
return (gs' ++ (g' :: gs)))
(atTarget := do
let expectedType ← mkFreshExprMVar (mkSort (← getLevel (← getMainTarget)))
let (e, gs) ← elabTermForConvert (← `((id ?_ : $newType))) expectedType
liftMetaTactic fun g ↦
return (← g.convert e sym.isSome n config patterns) ++ gs)
(failed := fun _ ↦ throwError "convert_to failed")
/--
`ac_change g using n` is `convert_to g using n` followed by `ac_rfl`. It is useful for
rearranging/reassociating e.g. sums:
```lean
example (a b c d e f g N : ℕ) : (a + b) + (c + d) + (e + f) + g ≤ N := by
ac_change a + d + e + f + c + g + b ≤ _
-- ⊢ a + d + e + f + c + g + b ≤ N
```
-/
syntax (name := acChange) "ac_change " term (" using " num)? : tactic
macro_rules
| `(tactic| ac_change $t $[using $n]?) => `(tactic| convert_to $t:term $[using $n]? <;> try ac_rfl)
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Peel.lean | import Mathlib.Tactic.Basic
import Mathlib.Order.Filter.Basic
/-!
# The `peel` tactic
`peel h with h' idents*` tries to apply `forall_imp` (or `Exists.imp`, or `Filter.Eventually.mp`,
`Filter.Frequently.mp` and `Filter.Eventually.of_forall`) with the argument `h` and uses `idents*`
to introduce variables with the supplied names, giving the "peeled" argument the name `h'`.
One can provide a numeric argument as in `peel 4 h` which will peel 4 quantifiers off
the expressions automatically name any variables not specifically named in the `with` clause.
In addition, the user may supply a term `e` via `... using e` in order to close the goal
immediately. In particular, `peel h using e` is equivalent to `peel h; exact e`. The `using` syntax
may be paired with any of the other features of `peel`.
-/
namespace Mathlib.Tactic.Peel
open Lean Expr Meta Elab Tactic
/--
Peels matching quantifiers off of a given term and the goal and introduces the relevant variables.
- `peel e` peels all quantifiers (at reducible transparency),
using `this` for the name of the peeled hypothesis.
- `peel e with h` is `peel e` but names the peeled hypothesis `h`.
If `h` is `_` then uses `this` for the name of the peeled hypothesis.
- `peel n e` peels `n` quantifiers (at default transparency).
- `peel n e with x y z ... h` peels `n` quantifiers, names the peeled hypothesis `h`,
and uses `x`, `y`, `z`, and so on to name the introduced variables; these names may be `_`.
If `h` is `_` then uses `this` for the name of the peeled hypothesis.
The length of the list of variables does not need to equal `n`.
- `peel e with x₁ ... xₙ h` is `peel n e with x₁ ... xₙ h`.
There are also variants that apply to an iff in the goal:
- `peel n` peels `n` quantifiers in an iff.
- `peel with x₁ ... xₙ` peels `n` quantifiers in an iff and names them.
Given `p q : ℕ → Prop`, `h : ∀ x, p x`, and a goal `⊢ : ∀ x, q x`, the tactic `peel h with x h'`
will introduce `x : ℕ`, `h' : p x` into the context and the new goal will be `⊢ q x`. This works
with `∃`, as well as `∀ᶠ` and `∃ᶠ`, and it can even be applied to a sequence of quantifiers. Note
that this is a logically weaker setup, so using this tactic is not always feasible.
For a more complex example, given a hypothesis and a goal:
```
h : ∀ ε > (0 : ℝ), ∃ N : ℕ, ∀ n ≥ N, 1 / (n + 1 : ℝ) < ε
⊢ ∀ ε > (0 : ℝ), ∃ N : ℕ, ∀ n ≥ N, 1 / (n + 1 : ℝ) ≤ ε
```
(which differ only in `<`/`≤`), applying `peel h with ε hε N n hn h_peel` will yield a tactic state:
```
h : ∀ ε > (0 : ℝ), ∃ N : ℕ, ∀ n ≥ N, 1 / (n + 1 : ℝ) < ε
ε : ℝ
hε : 0 < ε
N n : ℕ
hn : N ≤ n
h_peel : 1 / (n + 1 : ℝ) < ε
⊢ 1 / (n + 1 : ℝ) ≤ ε
```
and the goal can be closed with `exact h_peel.le`.
Note that in this example, `h` and the goal are logically equivalent statements, but `peel`
*cannot* be immediately applied to show that the goal implies `h`.
In addition, `peel` supports goals of the form `(∀ x, p x) ↔ ∀ x, q x`, or likewise for any
other quantifier. In this case, there is no hypothesis or term to supply, but otherwise the syntax
is the same. So for such goals, the syntax is `peel 1` or `peel with x`, and after which the
resulting goal is `p x ↔ q x`. The `congr!` tactic can also be applied to goals of this form using
`congr! 1 with x`. While `congr!` applies congruence lemmas in general, `peel` can be relied upon
to only apply to outermost quantifiers.
Finally, the user may supply a term `e` via `... using e` in order to close the goal
immediately. In particular, `peel h using e` is equivalent to `peel h; exact e`. The `using` syntax
may be paired with any of the other features of `peel`.
This tactic works by repeatedly applying lemmas such as `forall_imp`, `Exists.imp`,
`Filter.Eventually.mp`, `Filter.Frequently.mp`, and `Filter.Eventually.of_forall`.
-/
syntax (name := peel)
"peel" (num)? (ppSpace colGt term)?
(" with" (ppSpace colGt (ident <|> hole))+)? (usingArg)? : tactic
private lemma and_imp_left_of_imp_imp {p q r : Prop} (h : r → p → q) : r ∧ p → r ∧ q := by tauto
private theorem eventually_imp {α : Type*} {p q : α → Prop} {f : Filter α}
(hq : ∀ (x : α), p x → q x) (hp : ∀ᶠ (x : α) in f, p x) : ∀ᶠ (x : α) in f, q x :=
Filter.Eventually.mp hp (Filter.Eventually.of_forall hq)
private theorem frequently_imp {α : Type*} {p q : α → Prop} {f : Filter α}
(hq : ∀ (x : α), p x → q x) (hp : ∃ᶠ (x : α) in f, p x) : ∃ᶠ (x : α) in f, q x :=
Filter.Frequently.mp hp (Filter.Eventually.of_forall hq)
private theorem eventually_congr {α : Type*} {p q : α → Prop} {f : Filter α}
(hq : ∀ (x : α), p x ↔ q x) : (∀ᶠ (x : α) in f, p x) ↔ ∀ᶠ (x : α) in f, q x := by
congr! 2; exact hq _
private theorem frequently_congr {α : Type*} {p q : α → Prop} {f : Filter α}
(hq : ∀ (x : α), p x ↔ q x) : (∃ᶠ (x : α) in f, p x) ↔ ∃ᶠ (x : α) in f, q x := by
congr! 2; exact hq _
/-- The list of constants that are regarded as being quantifiers. -/
def quantifiers : List Name :=
[``Exists, ``And, ``Filter.Eventually, ``Filter.Frequently]
/-- If `unfold` is false then do `whnfR`, otherwise unfold everything that's not a quantifier,
according to the `quantifiers` list. -/
def whnfQuantifier (p : Expr) (unfold : Bool) : MetaM Expr := do
if unfold then
whnfHeadPred p fun e =>
if let .const n .. := e.getAppFn then
return !(n ∈ quantifiers)
else
return false
else
whnfR p
/-- Throws an error saying `ty` and `target` could not be matched up. -/
def throwPeelError {α : Type} (ty target : Expr) : MetaM α :=
throwError "Tactic 'peel' could not match quantifiers in{indentD ty}\nand{indentD target}"
/-- If `f` is a lambda then use its binding name to generate a new hygienic name,
and otherwise choose a new hygienic name. -/
def mkFreshBinderName (f : Expr) : MetaM Name :=
mkFreshUserName (if let .lam n .. := f then n else `a)
/-- Applies a "peel theorem" with two main arguments, where the first is the new goal
and the second can be filled in using `e`. Then it intros two variables with the
provided names.
If, for example, `goal : ∃ y : α, q y` and `thm := Exists.imp`, the metavariable returned has
type `q x` where `x : α` has been introduced into the context. -/
def applyPeelThm (thm : Name) (goal : MVarId)
(e : Expr) (ty target : Expr) (n : Name) (n' : Name) :
MetaM (FVarId × List MVarId) := do
let new_goal :: ge :: _ ← goal.applyConst thm <|> throwPeelError ty target
| throwError "peel: internal error"
ge.assignIfDefEq e <|> throwPeelError ty target
let (fvars, new_goal) ← new_goal.introN 2 [n, n']
return (fvars[1]!, [new_goal])
/-- This is the core to the `peel` tactic.
It tries to match `e` and `goal` as quantified statements (using `∀` and the quantifiers in
the `quantifiers` list), then applies "peel theorems" using `applyPeelThm`.
We treat `∧` as a quantifier for sake of dealing with quantified statements
like `∃ δ > (0 : ℝ), q δ`, which is notation for `∃ δ, δ > (0 : ℝ) ∧ q δ`. -/
def peelCore (goal : MVarId) (e : Expr) (n? : Option Name) (n' : Name) (unfold : Bool) :
MetaM (FVarId × List MVarId) := goal.withContext do
let ty ← whnfQuantifier (← inferType e) unfold
let target ← whnfQuantifier (← goal.getType) unfold
if ty.isForall && target.isForall then
applyPeelThm ``forall_imp goal e ty target (← n?.getDM (mkFreshUserName target.bindingName!)) n'
else if ty.getAppFn.isConst
&& ty.getAppNumArgs == target.getAppNumArgs
&& ty.getAppFn == target.getAppFn then
match target.getAppFnArgs with
| (``Exists, #[_, p]) =>
applyPeelThm ``Exists.imp goal e ty target (← n?.getDM (mkFreshBinderName p)) n'
| (``And, #[_, _]) =>
applyPeelThm ``and_imp_left_of_imp_imp goal e ty target (← n?.getDM (mkFreshUserName `p)) n'
| (``Filter.Eventually, #[_, p, _]) =>
applyPeelThm ``eventually_imp goal e ty target (← n?.getDM (mkFreshBinderName p)) n'
| (``Filter.Frequently, #[_, p, _]) =>
applyPeelThm ``frequently_imp goal e ty target (← n?.getDM (mkFreshBinderName p)) n'
| _ => throwPeelError ty target
else
throwPeelError ty target
/-- Given a list `l` of names, this peels `num` quantifiers off of the expression `e` and
the main goal and introduces variables with the provided names until the list of names is exhausted.
Note: the name `n?` (with default `this`) is used for the name of the expression `e` with
quantifiers peeled. -/
def peelArgs (e : Expr) (num : Nat) (l : List Name) (n? : Option Name) (unfold : Bool := true) :
TacticM Unit := do
match num with
| 0 => return
| num + 1 =>
let fvarId ← liftMetaTacticAux (peelCore · e l.head? (n?.getD `this) unfold)
peelArgs (.fvar fvarId) num l.tail n?
unless num == 0 do
if let some mvarId ← observing? do (← getMainGoal).clear fvarId then
replaceMainGoal [mvarId]
/-- Similar to `peelArgs` but peels arbitrarily many quantifiers. Returns whether or not
any quantifiers were peeled. -/
partial def peelUnbounded (e : Expr) (n? : Option Name) (unfold : Bool := false) :
TacticM Bool := do
let fvarId? ← observing? <| liftMetaTacticAux (peelCore · e none (n?.getD `this) unfold)
if let some fvarId := fvarId? then
let peeled ← peelUnbounded (.fvar fvarId) n?
if peeled then
if let some mvarId ← observing? do (← getMainGoal).clear fvarId then
replaceMainGoal [mvarId]
return true
else
return false
/-- Peel off a single quantifier from an `↔`. -/
def peelIffAux : TacticM Unit := do
evalTactic (← `(tactic| focus
first | apply forall_congr'
| apply exists_congr
| apply eventually_congr
| apply frequently_congr
| apply and_congr_right
| fail "failed to apply a quantifier congruence lemma."))
/-- Peel off quantifiers from an `↔` and assign the names given in `l` to the introduced
variables. -/
def peelArgsIff (l : List Name) : TacticM Unit := withMainContext do
match l with
| [] => pure ()
| h :: hs =>
peelIffAux
let goal ← getMainGoal
let (_, new_goal) ← goal.intro h
replaceMainGoal [new_goal]
peelArgsIff hs
elab_rules : tactic
| `(tactic| peel $[$num?:num]? $e:term $[with $l?* $n?]?) => withMainContext do
/- we use `elabTermForApply` instead of `elabTerm` so that terms passed to `peel` can contain
quantifiers with implicit bound variables without causing errors or requiring `@`. -/
let e ← elabTermForApply e false
let n? := n?.bind fun n => if n.raw.isIdent then pure n.raw.getId else none
let l := (l?.getD #[]).map getNameOfIdent' |>.toList
-- If num is not present and if there are any provided variable names,
-- use the number of variable names.
let num? := num?.map (·.getNat) <|> if l.isEmpty then none else l.length
if let some num := num? then
peelArgs e num l n?
else
unless ← peelUnbounded e n? do
throwPeelError (← inferType e) (← getMainTarget)
| `(tactic| peel $n:num) => peelArgsIff <| .replicate n.getNat `_
| `(tactic| peel with $args*) => peelArgsIff (args.map getNameOfIdent').toList
macro_rules
| `(tactic| peel $[$n:num]? $[$e:term]? $[with $h*]? using $u:term) =>
`(tactic| peel $[$n:num]? $[$e:term]? $[with $h*]?; exact $u)
end Mathlib.Tactic.Peel |
.lake/packages/mathlib/Mathlib/Tactic/RenameBVar.lean | import Lean.Elab.Tactic.Location
import Mathlib.Util.Tactic
import Mathlib.Lean.Expr.Basic
/-!
# The `rename_bvar` tactic
This file defines the `rename_bvar` tactic, for renaming bound variables.
-/
namespace Mathlib.Tactic
open Lean Parser Elab Tactic
/-- Renames a bound variable in a hypothesis. -/
def renameBVarHyp (mvarId : MVarId) (fvarId : FVarId) (old new : Name) :
MetaM Unit :=
modifyLocalDecl mvarId fvarId fun ldecl ↦
ldecl.setType <| ldecl.type.renameBVar old new
/-- Renames a bound variable in the target. -/
def renameBVarTarget (mvarId : MVarId) (old new : Name) : MetaM Unit :=
modifyTarget mvarId fun e ↦ e.renameBVar old new
/--
* `rename_bvar old → new` renames all bound variables named `old` to `new` in the target.
* `rename_bvar old → new at h` does the same in hypothesis `h`.
```lean
example (P : ℕ → ℕ → Prop) (h : ∀ n, ∃ m, P n m) : ∀ l, ∃ m, P l m := by
rename_bvar n → q at h -- h is now ∀ (q : ℕ), ∃ (m : ℕ), P q m,
rename_bvar m → n -- target is now ∀ (l : ℕ), ∃ (n : ℕ), P k n,
exact h -- Lean does not care about those bound variable names
```
Note: name clashes are resolved automatically.
-/
elab "rename_bvar " old:ident " → " new:ident loc?:(location)? : tactic => do
let mvarId ← getMainGoal
instantiateMVarDeclMVars mvarId
match loc? with
| none => renameBVarTarget mvarId old.getId new.getId
| some loc =>
withLocation (expandLocation loc)
(fun fvarId ↦ renameBVarHyp mvarId fvarId old.getId new.getId)
(renameBVarTarget mvarId old.getId new.getId)
fun _ ↦ throwError "unexpected location syntax"
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Zify.lean | import Mathlib.Tactic.Basic
import Mathlib.Tactic.Attr.Register
import Mathlib.Data.Int.Cast.Basic
import Mathlib.Order.Basic
/-!
# `zify` tactic
The `zify` tactic is used to shift propositions from `Nat` to `Int`.
This is often useful since `Int` has well-behaved subtraction.
```
example (a b c x y z : Nat) (h : ¬ x*y*z < 0) : c < a + 3*b := by
zify
zify at h
/-
h : ¬↑x * ↑y * ↑z < 0
⊢ ↑c < ↑a + 3 * ↑b
-/
```
-/
namespace Mathlib.Tactic.Zify
open Lean
open Lean.Meta
open Lean.Parser.Tactic
open Lean.Elab.Tactic
/--
The `zify` tactic is used to shift propositions from `Nat` to `Int`.
This is often useful since `Int` has well-behaved subtraction.
```
example (a b c x y z : Nat) (h : ¬ x*y*z < 0) : c < a + 3*b := by
zify
zify at h
/-
h : ¬↑x * ↑y * ↑z < 0
⊢ ↑c < ↑a + 3 * ↑b
-/
```
`zify` can be given extra lemmas to use in simplification. This is especially useful in the
presence of nat subtraction: passing `≤` arguments will allow `push_cast` to do more work.
```
example (a b c : Nat) (h : a - b < c) (hab : b ≤ a) : false := by
zify [hab] at h
/- h : ↑a - ↑b < ↑c -/
```
`zify` makes use of the `@[zify_simps]` attribute to move propositions,
and the `push_cast` tactic to simplify the `Int`-valued expressions.
`zify` is in some sense dual to the `lift` tactic.
`lift (z : Int) to Nat` will change the type of an
integer `z` (in the supertype) to `Nat` (the subtype), given a proof that `z ≥ 0`;
propositions concerning `z` will still be over `Int`.
`zify` changes propositions about `Nat` (the subtype) to propositions about `Int` (the supertype),
without changing the type of any variable.
-/
syntax (name := zify) "zify" (simpArgs)? (location)? : tactic
macro_rules
| `(tactic| zify $[[$simpArgs,*]]? $[at $location]?) =>
let args := simpArgs.map (·.getElems) |>.getD #[]
`(tactic|
simp -decide only [zify_simps, push_cast, $args,*] $[at $location]?)
/-- The `Simp.Context` generated by `zify`. -/
def mkZifyContext (simpArgs : Option (Syntax.TSepArray `Lean.Parser.Tactic.simpStar ",")) :
TacticM MkSimpContextResult := do
let args := simpArgs.map (·.getElems) |>.getD #[]
mkSimpContext
(← `(tactic| simp -decide only [zify_simps, push_cast, $args,*])) false
/-- A variant of `applySimpResultToProp` that cannot close the goal, but does not need a meta
variable and returns a tuple of a proof and the corresponding simplified proposition. -/
def applySimpResultToProp' (proof : Expr) (prop : Expr) (r : Simp.Result) : MetaM (Expr × Expr) :=
do
match r.proof? with
| some eqProof => return (← mkExpectedTypeHint (← mkEqMP eqProof proof) r.expr, r.expr)
| none =>
if r.expr != prop then
return (← mkExpectedTypeHint proof r.expr, r.expr)
else
return (proof, r.expr)
/-- Translate a proof and the proposition into a zified form. -/
def zifyProof (simpArgs : Option (Syntax.TSepArray `Lean.Parser.Tactic.simpStar ","))
(proof : Expr) (prop : Expr) : TacticM (Expr × Expr) := do
let ctx_result ← mkZifyContext simpArgs
let (r, _) ← simp prop ctx_result.ctx
applySimpResultToProp' proof prop r
@[zify_simps] lemma natCast_eq (a b : Nat) : a = b ↔ (a : Int) = (b : Int) := Int.ofNat_inj.symm
@[zify_simps] lemma natCast_le (a b : Nat) : a ≤ b ↔ (a : Int) ≤ (b : Int) := Int.ofNat_le.symm
@[zify_simps] lemma natCast_lt (a b : Nat) : a < b ↔ (a : Int) < (b : Int) := Int.ofNat_lt.symm
@[zify_simps] lemma natCast_ne (a b : Nat) : a ≠ b ↔ (a : Int) ≠ (b : Int) :=
not_congr Int.ofNat_inj.symm
@[zify_simps] lemma natCast_dvd (a b : Nat) : a ∣ b ↔ (a : Int) ∣ (b : Int) := Int.ofNat_dvd.symm
-- TODO: is it worth adding lemmas for Prime and Coprime as well?
-- Doing so in this file would require adding imports.
-- `Nat.cast_sub` is already tagged as `norm_cast` but it does allow to use assumptions like
-- `m < n` or more generally `m + k ≤ n`. We add two lemmas to increase the probability that
-- `zify` will push through `ℕ` subtraction.
variable {R : Type*} [AddGroupWithOne R]
@[norm_cast] theorem Nat.cast_sub_of_add_le {m n k} (h : m + k ≤ n) :
((n - m : ℕ) : R) = n - m := Nat.cast_sub (m.le_add_right k |>.trans h)
@[norm_cast] theorem Nat.cast_sub_of_lt {m n} (h : m < n) :
((n - m : ℕ) : R) = n - m := Nat.cast_sub h.le
end Zify
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/TautoSet.lean | import Mathlib.Data.Set.SymmDiff
import Mathlib.Data.Set.Disjoint
/-!
# The `tauto_set` tactic
-/
assert_not_exists RelIso
namespace Mathlib.Tactic.TautoSet
open Lean Elab.Tactic
/--
`specialize_all x` runs `specialize h x` for all hypotheses `h` where this tactic succeeds.
-/
elab (name := specialize_all) "specialize_all" x:term : tactic => withMainContext do
for h in ← getLCtx do
evalTactic (← `(tactic|specialize $(mkIdent h.userName) $x)) <|> pure ()
/--
`tauto_set` attempts to prove tautologies involving hypotheses and goals of the form `X ⊆ Y`
or `X = Y`, where `X`, `Y` are expressions built using ∪, ∩, \, and ᶜ from finitely many
variables of type `Set α`. It also unfolds expressions of the form `Disjoint A B` and
`symmDiff A B`.
Examples:
```lean
example {α} (A B C D : Set α) (h1 : A ⊆ B) (h2 : C ⊆ D) : C \ B ⊆ D \ A := by
tauto_set
example {α} (A B C : Set α) (h1 : A ⊆ B ∪ C) : (A ∩ B) ∪ (A ∩ C) = A := by
tauto_set
```
-/
macro "tauto_set" : tactic => `(tactic|
· simp_all -failIfUnchanged only [
Set.ext_iff, Set.subset_def,
Set.mem_union, Set.mem_compl_iff, Set.mem_inter_iff,
Set.symmDiff_def, Set.diff_eq, Set.disjoint_iff
]
try intro x
try specialize_all x
<;> tauto
)
end Mathlib.Tactic.TautoSet |
.lake/packages/mathlib/Mathlib/Tactic/Says.lean | import Mathlib.Init
import Lean.Meta.Tactic.TryThis
import Batteries.Linter.UnreachableTactic
import Qq.Match
import Mathlib.Lean.Elab.InfoTree
import Mathlib.Tactic.Basic
/-!
# The `says` tactic combinator.
If you write `X says`, where `X` is a tactic that produces a "Try this: Y" message,
then you will get a message "Try this: X says Y".
Once you've clicked to replace `X says` with `X says Y`,
afterwards `X says Y` will only run `Y`.
The typical usage case is:
```
simp? [X] says simp only [X, Y, Z]
```
If you use `set_option says.verify true` (set automatically during CI) then `X says Y`
runs `X` and verifies that it still prints "Try this: Y".
-/
open Lean Elab Tactic
open Lean.Meta.Tactic.TryThis
namespace Mathlib.Tactic.Says
/-- If this option is `true`, verify for `X says Y` that `X says` outputs `Y`. -/
register_option says.verify : Bool :=
{ defValue := false
group := "says"
descr := "Verify the output" }
/-- This option is only used in CI to negate `says.verify`. -/
register_option says.no_verify_in_CI : Bool :=
{ defValue := false
group := "says"
descr := "Disable reverification, even if the `CI` environment variable is set." }
open Parser Tactic
/-- This is a slight modification of `Parser.runParserCategory`. -/
def parseAsTacticSeq (env : Environment) (input : String) (fileName := "<input>") :
Except String (TSyntax ``tacticSeq) :=
let p := andthenFn whitespace Tactic.tacticSeq.fn
let ictx := mkInputContext input fileName
let s := p.run ictx { env, options := {} } (getTokenTable env) (mkParserState input)
if s.hasError then
Except.error (s.toErrorMsg ictx)
else if s.pos.atEnd input then
Except.ok ⟨s.stxStack.back⟩
else
Except.error ((s.mkError "end of input").toErrorMsg ictx)
/--
Run `evalTactic`, capturing a "Try this:" message and converting it back to syntax.
-/
def evalTacticCapturingTryThis (tac : TSyntax `tactic) : TacticM (TSyntax ``tacticSeq) := do
let { trees, ..} ← withResetServerInfo <| evalTactic tac
let suggestions := collectTryThisSuggestions trees
let some s := suggestions[0]?
| throwError m!"Tactic `{tac}` did not produce a 'Try this:' suggestion."
let suggestion ← do
if let some msg := s.messageData? then
pure <| SuggestionText.string <| ← msg.toString
else
pure <| s.suggestion
match suggestion with
| .tsyntax (kind := ``tacticSeq) stx =>
return stx
| .tsyntax (kind := `tactic) stx =>
return ← `(tacticSeq| $stx:tactic)
| .tsyntax stx =>
throwError m!"Tactic `{tac}` produced a 'Try this:' suggestion with a non-tactic syntax: {stx}"
| .string s =>
match parseAsTacticSeq (← getEnv) s with
| .ok stx => return stx
| .error err => throwError m!"Failed to parse 'Try this:' suggestion: {s}\n{err}"
/--
If you write `X says`, where `X` is a tactic that produces a "Try this: Y" message,
then you will get a message "Try this: X says Y".
Once you've clicked to replace `X says` with `X says Y`,
afterwards `X says Y` will only run `Y`.
The typical usage case is:
```
simp? [X] says simp only [X, Y, Z]
```
If you use `set_option says.verify true` (set automatically during CI) then `X says Y`
runs `X` and verifies that it still prints "Try this: Y".
-/
syntax (name := says) tactic " says" (colGt tacticSeq)? : tactic
elab_rules : tactic
| `(tactic| $tac:tactic says%$tk $[$result:tacticSeq]?) => do
let verify := says.verify.get (← getOptions) ||
!says.no_verify_in_CI.get (← getOptions) && (← IO.getEnv "CI").isSome
match result, verify with
| some _, true
| none, _ =>
let stx ← evalTacticCapturingTryThis tac
match result with
| some r =>
let stx' := (← Lean.PrettyPrinter.ppTactic ⟨Syntax.stripPos stx⟩).pretty
let r' := (← Lean.PrettyPrinter.ppTactic ⟨Syntax.stripPos r⟩).pretty
if stx' != r' then
throwError m!"Tactic `{tac}` produced `{stx'}`,\nbut was expecting it to produce `{r'}`!"
++ m!"\n\nYou can reproduce this error locally using `set_option says.verify true`."
| none =>
addSuggestion tk (← `(tactic| $tac says $stx)) (origSpan? := (← `(tactic| $tac says)))
| some result, false =>
evalTactic result
initialize Batteries.Linter.UnreachableTactic.addIgnoreTacticKind `Mathlib.Tactic.Says.says
end Says
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Nontriviality.lean | import Mathlib.Tactic.Nontriviality.Core
/-! # The `nontriviality` tactic. -/ |
.lake/packages/mathlib/Mathlib/Tactic/GCongr.lean | import Mathlib.Tactic.GCongr.CoreAttrs
import Mathlib.Tactic.Hint
/-! # Setup for the `gcongr` tactic
The core implementation of the `gcongr` ("generalized congruence") tactic is in the file
`Tactic.GCongr.Core`. -/
/-!
We register `gcongr` with the `hint` tactic.
-/
register_hint 1000 gcongr |
.lake/packages/mathlib/Mathlib/Tactic/LinearCombination'.lean | import Mathlib.Tactic.Ring
/-!
# linear_combination' Tactic
In this file, the `linear_combination'` tactic is created. This tactic, which
works over `CommRing`s, attempts to simplify the target by creating a linear combination
of a list of equalities and subtracting it from the target. A `Syntax.Tactic`
object can also be passed into the tactic, allowing the user to specify a
normalization tactic.
## Implementation Notes
This tactic works by creating a weighted sum of the given equations with the
given coefficients. Then, it subtracts the right side of the weighted sum
from the left side so that the right side equals 0, and it does the same with
the target. Afterwards, it sets the goal to be the equality between the
left-hand side of the new goal and the left-hand side of the new weighted sum.
Lastly, calls a normalization tactic on this target.
This file contains the `linear_combination'` tactic (note the '): the original
Lean 4 implementation of the "linear combination" idea, written at the time of
the port from Lean 3. Notably, its scope includes certain *nonlinear*
operations. The `linear_combination` tactic (in a separate file) is a variant
implementation, but this version is provided for backward-compatibility.
## References
* <https://leanprover.zulipchat.com/#narrow/stream/239415-metaprogramming-.2F.20tactics/topic/Linear.20algebra.20tactic/near/213928196>
-/
namespace Mathlib.Tactic.LinearCombination'
open Lean
open Elab Meta Term
variable {α : Type*} {a a' a₁ a₂ b b' b₁ b₂ c : α}
theorem pf_add_c [Add α] (p : a = b) (c : α) : a + c = b + c := p ▸ rfl
theorem c_add_pf [Add α] (p : b = c) (a : α) : a + b = a + c := p ▸ rfl
theorem add_pf [Add α] (p₁ : (a₁ : α) = b₁) (p₂ : a₂ = b₂) : a₁ + a₂ = b₁ + b₂ := p₁ ▸ p₂ ▸ rfl
theorem pf_sub_c [Sub α] (p : a = b) (c : α) : a - c = b - c := p ▸ rfl
theorem c_sub_pf [Sub α] (p : b = c) (a : α) : a - b = a - c := p ▸ rfl
theorem sub_pf [Sub α] (p₁ : (a₁ : α) = b₁) (p₂ : a₂ = b₂) : a₁ - a₂ = b₁ - b₂ := p₁ ▸ p₂ ▸ rfl
theorem neg_pf [Neg α] (p : (a : α) = b) : -a = -b := p ▸ rfl
theorem pf_mul_c [Mul α] (p : a = b) (c : α) : a * c = b * c := p ▸ rfl
theorem c_mul_pf [Mul α] (p : b = c) (a : α) : a * b = a * c := p ▸ rfl
theorem mul_pf [Mul α] (p₁ : (a₁ : α) = b₁) (p₂ : a₂ = b₂) : a₁ * a₂ = b₁ * b₂ := p₁ ▸ p₂ ▸ rfl
theorem inv_pf [Inv α] (p : (a : α) = b) : a⁻¹ = b⁻¹ := p ▸ rfl
theorem pf_div_c [Div α] (p : a = b) (c : α) : a / c = b / c := p ▸ rfl
theorem c_div_pf [Div α] (p : b = c) (a : α) : a / b = a / c := p ▸ rfl
theorem div_pf [Div α] (p₁ : (a₁ : α) = b₁) (p₂ : a₂ = b₂) : a₁ / a₂ = b₁ / b₂ := p₁ ▸ p₂ ▸ rfl
/-- Result of `expandLinearCombo`, either an equality proof or a value. -/
inductive Expanded
/-- A proof of `a = b`. -/
| proof (pf : Syntax.Term)
/-- A value, equivalently a proof of `c = c`. -/
| const (c : Syntax.Term)
/--
Performs macro expansion of a linear combination expression,
using `+`/`-`/`*`/`/` on equations and values.
* `.proof p` means that `p` is a syntax corresponding to a proof of an equation.
For example, if `h : a = b` then `expandLinearCombo (2 * h)` returns `.proof (c_add_pf 2 h)`
which is a proof of `2 * a = 2 * b`.
* `.const c` means that the input expression is not an equation but a value.
-/
partial def expandLinearCombo (ty : Expr) (stx : Syntax.Term) : TermElabM Expanded := withRef stx do
match stx with
| `(($e)) => expandLinearCombo ty e
| `($e₁ + $e₂) => do
match ← expandLinearCombo ty e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ + $c₂)
| .proof p₁, .const c₂ => .proof <$> ``(pf_add_c $p₁ $c₂)
| .const c₁, .proof p₂ => .proof <$> ``(c_add_pf $p₂ $c₁)
| .proof p₁, .proof p₂ => .proof <$> ``(add_pf $p₁ $p₂)
| `($e₁ - $e₂) => do
match ← expandLinearCombo ty e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ - $c₂)
| .proof p₁, .const c₂ => .proof <$> ``(pf_sub_c $p₁ $c₂)
| .const c₁, .proof p₂ => .proof <$> ``(c_sub_pf $p₂ $c₁)
| .proof p₁, .proof p₂ => .proof <$> ``(sub_pf $p₁ $p₂)
| `(-$e) => do
match ← expandLinearCombo ty e with
| .const c => .const <$> `(-$c)
| .proof p => .proof <$> ``(neg_pf $p)
| `(← $e) => do
match ← expandLinearCombo ty e with
| .const c => return .const c
| .proof p => .proof <$> ``(Eq.symm $p)
| `($e₁ * $e₂) => do
match ← expandLinearCombo ty e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ * $c₂)
| .proof p₁, .const c₂ => .proof <$> ``(pf_mul_c $p₁ $c₂)
| .const c₁, .proof p₂ => .proof <$> ``(c_mul_pf $p₂ $c₁)
| .proof p₁, .proof p₂ => .proof <$> ``(mul_pf $p₁ $p₂)
| `($e⁻¹) => do
match ← expandLinearCombo ty e with
| .const c => .const <$> `($c⁻¹)
| .proof p => .proof <$> ``(inv_pf $p)
| `($e₁ / $e₂) => do
match ← expandLinearCombo ty e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ / $c₂)
| .proof p₁, .const c₂ => .proof <$> ``(pf_div_c $p₁ $c₂)
| .const c₁, .proof p₂ => .proof <$> ``(c_div_pf $p₂ $c₁)
| .proof p₁, .proof p₂ => .proof <$> ``(div_pf $p₁ $p₂)
| e =>
-- We have the expected type from the goal, so we can fully synthesize this leaf node.
withSynthesize do
-- It is OK to use `ty` as the expected type even if `e` is a proof.
-- The expected type is just a hint.
let c ← withSynthesizeLight <| Term.elabTerm e ty
if (← whnfR (← inferType c)).isEq then
.proof <$> c.toSyntax
else
.const <$> c.toSyntax
theorem eq_trans₃ (p : (a : α) = b) (p₁ : a = a') (p₂ : b = b') : a' = b' := p₁ ▸ p₂ ▸ p
theorem eq_of_add [AddGroup α] (p : (a : α) = b) (H : (a' - b') - (a - b) = 0) : a' = b' := by
rw [← sub_eq_zero] at p ⊢; rwa [sub_eq_zero, p] at H
theorem eq_of_add_pow [Ring α] [NoZeroDivisors α] (n : ℕ) (p : (a : α) = b)
(H : (a' - b') ^ n - (a - b) = 0) : a' = b' := by
rw [← sub_eq_zero] at p ⊢; apply eq_zero_of_pow_eq_zero (n := n); rwa [sub_eq_zero, p] at H
/-- Implementation of `linear_combination'` and `linear_combination2`. -/
def elabLinearCombination' (tk : Syntax)
(norm? : Option Syntax.Tactic) (exp? : Option Syntax.NumLit) (input : Option Syntax.Term)
(twoGoals := false) : Tactic.TacticM Unit := Tactic.withMainContext do
let some (ty, _) := (← (← Tactic.getMainGoal).getType').eq? |
throwError "'linear_combination'' only proves equalities"
let p ← match input with
| none => `(Eq.refl 0)
| some e =>
match ← expandLinearCombo ty e with
| .const c => `(Eq.refl $c)
| .proof p => pure p
let norm := norm?.getD (Unhygienic.run <| withRef tk `(tactic| ring1))
Term.withoutErrToSorry <| Tactic.evalTactic <| ← withFreshMacroScope <|
if twoGoals then
`(tactic| (
refine eq_trans₃ $p ?a ?b
case' a => $norm:tactic
case' b => $norm:tactic))
else
match exp? with
| some n =>
if n.getNat = 1 then `(tactic| (refine eq_of_add $p ?a; case' a => $norm:tactic))
else `(tactic| (refine eq_of_add_pow $n $p ?a; case' a => $norm:tactic))
| _ => `(tactic| (refine eq_of_add $p ?a; case' a => $norm:tactic))
/--
The `(norm := $tac)` syntax says to use `tac` as a normalization postprocessor for
`linear_combination'`. The default normalizer is `ring1`, but you can override it with `ring_nf`
to get subgoals from `linear_combination'` or with `skip` to disable normalization.
-/
syntax normStx := atomic(" (" &"norm" " := ") withoutPosition(tactic) ")"
/--
The `(exp := n)` syntax for `linear_combination'` says to take the goal to the `n`th power before
subtracting the given combination of hypotheses.
-/
syntax expStx := atomic(" (" &"exp" " := ") withoutPosition(num) ")"
/--
`linear_combination'` attempts to simplify the target by creating a linear combination
of a list of equalities and subtracting it from the target.
The tactic will create a linear
combination by adding the equalities together from left to right, so the order
of the input hypotheses does matter. If the `norm` field of the
tactic is set to `skip`, then the tactic will simply set the user up to
prove their target using the linear combination instead of normalizing the subtraction.
Note: There is also a similar tactic `linear_combination` (no prime); this version is
provided for backward compatibility. Compared to this tactic, `linear_combination`:
* drops the `←` syntax for reversing an equation, instead offering this operation using the `-`
syntax
* does not support multiplication of two hypotheses (`h1 * h2`), division by a hypothesis (`3 / h`),
or inversion of a hypothesis (`h⁻¹`)
* produces noisy output when the user adds or subtracts a constant to a hypothesis (`h + 3`)
Note: The left and right sides of all the equalities should have the same
type, and the coefficients should also have this type. There must be
instances of `Mul` and `AddGroup` for this type.
* The input `e` in `linear_combination' e` is a linear combination of proofs of equalities,
given as a sum/difference of coefficients multiplied by expressions.
The coefficients may be arbitrary expressions.
The expressions can be arbitrary proof terms proving equalities.
Most commonly they are hypothesis names `h1, h2, ...`.
* `linear_combination' (norm := tac) e` runs the "normalization tactic" `tac`
on the subgoal(s) after constructing the linear combination.
* The default normalization tactic is `ring1`, which closes the goal or fails.
* To get a subgoal in the case that it is not immediately provable, use
`ring_nf` as the normalization tactic.
* To avoid normalization entirely, use `skip` as the normalization tactic.
* `linear_combination' (exp := n) e` will take the goal to the `n`th power before subtracting the
combination `e`. In other words, if the goal is `t1 = t2`, `linear_combination' (exp := n) e`
will change the goal to `(t1 - t2)^n = 0` before proceeding as above.
This feature is not supported for `linear_combination2`.
* `linear_combination2 e` is the same as `linear_combination' e` but it produces two
subgoals instead of one: rather than proving that `(a - b) - (a' - b') = 0` where
`a' = b'` is the linear combination from `e` and `a = b` is the goal,
it instead attempts to prove `a = a'` and `b = b'`.
Because it does not use subtraction, this form is applicable also to semirings.
* Note that a goal which is provable by `linear_combination' e` may not be provable
by `linear_combination2 e`; in general you may need to add a coefficient to `e`
to make both sides match, as in `linear_combination2 e + c`.
* You can also reverse equalities using `← h`, so for example if `h₁ : a = b`
then `2 * (← h)` is a proof of `2 * b = 2 * a`.
Example Usage:
```
example (x y : ℤ) (h1 : x*y + 2*x = 1) (h2 : x = y) : x*y = -2*y + 1 := by
linear_combination' 1*h1 - 2*h2
example (x y : ℤ) (h1 : x*y + 2*x = 1) (h2 : x = y) : x*y = -2*y + 1 := by
linear_combination' h1 - 2*h2
example (x y : ℤ) (h1 : x*y + 2*x = 1) (h2 : x = y) : x*y = -2*y + 1 := by
linear_combination' (norm := ring_nf) -2*h2
/- Goal: x * y + x * 2 - 1 = 0 -/
example (x y z : ℝ) (ha : x + 2*y - z = 4) (hb : 2*x + y + z = -2)
(hc : x + 2*y + z = 2) :
-3*x - 3*y - 4*z = 2 := by
linear_combination' ha - hb - 2*hc
example (x y : ℚ) (h1 : x + y = 3) (h2 : 3*x = 7) :
x*x*y + y*x*y + 6*x = 3*x*y + 14 := by
linear_combination' x*y*h1 + 2*h2
example (x y : ℤ) (h1 : x = -3) (h2 : y = 10) : 2*x = -6 := by
linear_combination' (norm := skip) 2*h1
simp
axiom qc : ℚ
axiom hqc : qc = 2*qc
example (a b : ℚ) (h : ∀ p q : ℚ, p = q) : 3*a + qc = 3*b + 2*qc := by
linear_combination' 3 * h a b + hqc
```
-/
syntax (name := linearCombination') "linear_combination'"
(normStx)? (expStx)? (ppSpace colGt term)? : tactic
elab_rules : tactic
| `(tactic| linear_combination'%$tk $[(norm := $tac)]? $[(exp := $n)]? $(e)?) =>
elabLinearCombination' tk tac n e
@[inherit_doc linearCombination']
syntax "linear_combination2" (normStx)? (ppSpace colGt term)? : tactic
elab_rules : tactic
| `(tactic| linear_combination2%$tk $[(norm := $tac)]? $(e)?) =>
elabLinearCombination' tk tac none e true
end Mathlib.Tactic.LinearCombination' |
.lake/packages/mathlib/Mathlib/Tactic/SudoSetOption.lean | import Mathlib.Init
import Lean.Elab.ElabRules
/-!
# Defines the `sudo set_option` command.
Allows setting undeclared options.
-/
open Lean Elab
private def setOption {m : Type → Type} [Monad m] [MonadError m]
(name val : Syntax) (opts : Options) : m Options := do
let val ← match val with
| Syntax.ident _ _ `true _ => pure <| DataValue.ofBool true
| Syntax.ident _ _ `false _ => pure <| DataValue.ofBool false
| _ => match val.isNatLit? with
| some num => pure <| DataValue.ofNat num
| none => match val.isStrLit? with
| some str => pure <| DataValue.ofString str
| none => throwError "unsupported option value {val}"
pure <| opts.insert name.getId val
open Elab.Command in
/--
The command `sudo set_option name val` is similar to `set_option name val`,
but it also allows to set undeclared options.
-/
elab "sudo " "set_option " n:ident ppSpace val:term : command => do
let options ← setOption n val (← getOptions)
modify fun s ↦ { s with maxRecDepth := maxRecDepth.get options }
modifyScope fun scope ↦ { scope with opts := options }
open Elab.Term in
/--
The command `sudo set_option name val in term` is similar to `set_option name val in term`,
but it also allows to set undeclared options.
-/
elab "sudo " "set_option " n:ident ppSpace val:term " in " body:term : term <= expectedType => do
let options ← setOption n val (← getOptions)
withTheReader Core.Context (fun ctx ↦
{ ctx with maxRecDepth := maxRecDepth.get options, options := options }) do
elabTerm body expectedType
/-
sudo set_option trace.Elab.resuming true in #check 4
#check sudo set_option trace.Elab.resuming true in by exact 4
-/ |
.lake/packages/mathlib/Mathlib/Tactic/Positivity.lean | import Mathlib.Tactic.Positivity.Basic
import Mathlib.Tactic.Positivity.Finset
import Mathlib.Tactic.NormNum.Basic
import Mathlib.Data.Int.Order.Basic |
.lake/packages/mathlib/Mathlib/Tactic/NthRewrite.lean | import Mathlib.Init
/-!
# `nth_rewrite` tactic
The tactic `nth_rewrite` and `nth_rw` are variants of `rewrite` and `rw` that only performs the
`n`th possible rewrite.
-/
namespace Mathlib.Tactic
open Lean Elab Tactic Meta Parser.Tactic
/-- `nth_rewrite` is a variant of `rewrite` that only changes the `n₁, ..., nₖ`ᵗʰ _occurrence_ of
the expression to be rewritten. `nth_rewrite n₁ ... nₖ [eq₁, eq₂,..., eqₘ]` will rewrite the
`n₁, ..., nₖ`ᵗʰ _occurrence_ of each of the `m` equalities `eqᵢ`in that order. Occurrences are
counted beginning with `1` in order of precedence.
For example,
```lean
example (h : a = 1) : a + a + a + a + a = 5 := by
nth_rewrite 2 3 [h]
/-
a: ℕ
h: a = 1
⊢ a + 1 + 1 + a + a = 5
-/
```
Notice that the second and third occurrences of `a` from the left have been rewritten by
`nth_rewrite`.
To understand the importance of order of precedence, consider the example below
```lean
example (a b c : Nat) : (a + b) + c = (b + a) + c := by
nth_rewrite 2 [Nat.add_comm] -- ⊢ (b + a) + c = (b + a) + c
```
Here, although the occurrence parameter is `2`, `(a + b)` is rewritten to `(b + a)`. This happens
because in order of precedence, the first occurrence of `_ + _` is the one that adds `a + b` to `c`.
The occurrence in `a + b` counts as the second occurrence.
If a term `t` is introduced by rewriting with `eqᵢ`, then this instance of `t` will be counted as an
_occurrence_ of `t` for all subsequent rewrites of `t` with `eqⱼ` for `j > i`. This behaviour is
illustrated by the example below
```lean
example (h : a = a + b) : a + a + a + a + a = 0 := by
nth_rewrite 3 [h, h]
/-
a b: ℕ
h: a = a + b
⊢ a + a + (a + b + b) + a + a = 0
-/
```
Here, the first `nth_rewrite` with `h` introduces an additional occurrence of `a` in the goal.
That is, the goal state after the first rewrite looks like below
```lean
/-
a b: ℕ
h: a = a + b
⊢ a + a + (a + b) + a + a = 0
-/
```
This new instance of `a` also turns out to be the third _occurrence_ of `a`. Therefore,
the next `nth_rewrite` with `h` rewrites this `a`.
-/
macro "nth_rewrite" c:optConfig ppSpace nums:(num)+ s:rwRuleSeq loc:(location)? : tactic => do
`(tactic| rewrite $[$(getConfigItems c)]* (occs := .pos [$[$nums],*]) $s:rwRuleSeq $(loc)?)
/--
`nth_rw` is a variant of `rw` that only changes the `n₁, ..., nₖ`ᵗʰ _occurrence_ of the expression
to be rewritten. Like `rw`, and unlike `nth_rewrite`, it will try to close the goal by trying `rfl`
afterwards. `nth_rw n₁ ... nₖ [eq₁, eq₂,..., eqₘ]` will rewrite the `n₁, ..., nₖ`ᵗʰ _occurrence_ of
each of the `m` equalities `eqᵢ`in that order. Occurrences are counted beginning with `1` in
order of precedence. For example,
```lean
example (h : a = 1) : a + a + a + a + a = 5 := by
nth_rw 2 3 [h]
/-
a: ℕ
h: a = 1
⊢ a + 1 + 1 + a + a = 5
-/
```
Notice that the second and third occurrences of `a` from the left have been rewritten by
`nth_rw`.
To understand the importance of order of precedence, consider the example below
```lean
example (a b c : Nat) : (a + b) + c = (b + a) + c := by
nth_rewrite 2 [Nat.add_comm] -- ⊢ (b + a) + c = (b + a) + c
```
Here, although the occurrence parameter is `2`, `(a + b)` is rewritten to `(b + a)`. This happens
because in order of precedence, the first occurrence of `_ + _` is the one that adds `a + b` to `c`.
The occurrence in `a + b` counts as the second occurrence.
If a term `t` is introduced by rewriting with `eqᵢ`, then this instance of `t` will be counted as an
_occurrence_ of `t` for all subsequent rewrites of `t` with `eqⱼ` for `j > i`. This behaviour is
illustrated by the example below
```lean
example (h : a = a + b) : a + a + a + a + a = 0 := by
nth_rw 3 [h, h]
/-
a b: ℕ
h: a = a + b
⊢ a + a + (a + b + b) + a + a = 0
-/
```
Here, the first `nth_rw` with `h` introduces an additional occurrence of `a` in the goal. That is,
the goal state after the first rewrite looks like below
```lean
/-
a b: ℕ
h: a = a + b
⊢ a + a + (a + b) + a + a = 0
-/
```
This new instance of `a` also turns out to be the third _occurrence_ of `a`. Therefore,
the next `nth_rw` with `h` rewrites this `a`.
Further, `nth_rw` will close the remaining goal with `rfl` if possible.
-/
macro "nth_rw" c:optConfig ppSpace nums:(num)+ s:rwRuleSeq loc:(location)? : tactic => do
`(tactic| rw $[$(getConfigItems c)]* (occs := .pos [$[$nums],*]) $s:rwRuleSeq $(loc)?)
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/SuppressCompilation.lean | import Mathlib.Init
import Lean.Elab.Declaration
/-!
# Suppressing compilation to executable code in a file or in a section
Currently, the compiler may spend a lot of time trying to produce executable code for complicated
definitions. This is a waste of resources for definitions in area of mathematics that will never
lead to executable code. The command `suppress_compilation` is a hack to disable code generation
on all definitions (in a section or in a whole file). See the issue https://github.com/leanprover-community/mathlib4/issues/7103
To compile a definition even when `suppress_compilation` is active, use
`unsuppress_compilation in def foo : ...`. This is activated by default on notations to make
sure that they work properly.
Note that `suppress_compilation` does not work with `notation3`. You need to prefix such a notation
declaration with `unsuppress_compilation` if `suppress_compilation` is active.
-/
open Lean Parser Elab Command
/-- Replacing `def` and `instance` by `noncomputable def` and `noncomputable instance`, designed
to disable the compiler in a given file or a given section.
This is a hack to work around https://github.com/leanprover-community/mathlib4/issues/7103. -/
def elabSuppressCompilationDecl : CommandElab := fun
| `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)?
$(recKind?)? def $id $sig:optDeclSig $val:declVal) => do
elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)?
$(recKind?)? def $id $sig:optDeclSig $val:declVal)
| `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)?
$(recKind?)? def $id $sig:optDeclSig $val:declVal deriving $derivs,*) => do
elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)?
$(recKind?)? def $id $sig:optDeclSig $val:declVal deriving $derivs,*)
| `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)?
$(recKind?)? $(attrKind?)? instance $(prio?)? $(id?)? $sig:declSig $val:declVal) => do
elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)?
$(recKind?)? $(attrKind?)? instance $(prio?)? $(id?)? $sig:declSig $val:declVal)
| `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)?
$(recKind?)? example $sig:optDeclSig $val:declVal) => do
elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)?
$(recKind?)? example $sig:optDeclSig $val:declVal)
| `($[$doc?:docComment]? $(attrs?)? $(vis?)? $[noncomputable]? $(unsafe?)?
$(recKind?)? abbrev $id $sig:optDeclSig $val:declVal) => do
elabDeclaration <| ← `($[$doc?:docComment]? $(attrs?)? $(vis?)? noncomputable $(unsafe?)?
$(recKind?)? abbrev $id $sig:optDeclSig $val:declVal)
| _ => throwUnsupportedSyntax
/-- The command `unsuppress_compilation in def foo : ...` makes sure that the definition is
compiled to executable code, even if `suppress_compilation` is active. -/
syntax "unsuppress_compilation" (" in " command)? : command
/-- Make sure that notations are compiled, even if `suppress_compilation` is active, by prepending
them with `unsuppress_compilation`. -/
def expandSuppressCompilationNotation : Macro := fun
| `($[$doc?:docComment]? $(attrs?)? $(attrKind)? notation
$(prec?)? $(name?)? $(prio?)? $items* => $v) => do
`(unsuppress_compilation in
$[$doc?:docComment]? $(attrs?)? $(attrKind)? notation
$(prec?)? $(name?)? $(prio?)? $items* => $v)
| _ => Macro.throwUnsupported
/-- Replacing `def` and `instance` by `noncomputable def` and `noncomputable instance`, designed
to disable the compiler in a given file or a given section.
This is a hack to work around https://github.com/leanprover-community/mathlib4/issues/7103.
Note that it does not work with `notation3`. You need to prefix such a notation declaration with
`unsuppress_compilation` if `suppress_compilation` is active. -/
macro "suppress_compilation" : command => do
let declKind := mkIdent ``declaration
let notaKind := mkIdent ``«notation»
let declElab := mkCIdent ``elabSuppressCompilationDecl
let notaMacro := mkCIdent ``expandSuppressCompilationNotation
`(
attribute [local command_elab $declKind] $declElab
attribute [local macro $notaKind] $notaMacro
)
/-- The command `unsuppress_compilation in def foo : ...` makes sure that the definition is
compiled to executable code, even if `suppress_compilation` is active. -/
macro_rules
| `(unsuppress_compilation $[in $cmd?]?) => do
let declElab := mkCIdent ``elabSuppressCompilationDecl
let notaMacro := mkCIdent ``expandSuppressCompilationNotation
let attrCmds ← `(
attribute [-command_elab] $declElab
attribute [-macro] $notaMacro
)
if let some cmd := cmd? then
`($attrCmds:command $cmd:command suppress_compilation)
else
return attrCmds |
.lake/packages/mathlib/Mathlib/Tactic/Substs.lean | import Mathlib.Init
/-!
# The `substs` macro
The `substs` macro applies the `subst` tactic to a list of hypothesis, in left to right order.
-/
namespace Mathlib.Tactic.Substs
/--
Applies the `subst` tactic to all given hypotheses from left to right.
-/
syntax (name := substs) "substs" (colGt ppSpace ident)* : tactic
macro_rules
| `(tactic| substs $xs:ident*) => `(tactic| ($[subst $xs]*))
end Substs
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/AdaptationNote.lean | import Mathlib.Init
import Lean.Meta.Tactic.TryThis
/-!
# Adaptation notes
This file defines a `#adaptation_note` command.
Adaptation notes are comments that are used to indicate that a piece of code
has been changed to accommodate a change in Lean core.
They typically require further action/maintenance to be taken in the future.
-/
open Lean
initialize registerTraceClass `adaptationNote
/-- General function implementing adaptation notes. -/
def reportAdaptationNote (f : Syntax → Meta.Tactic.TryThis.Suggestion) : MetaM Unit := do
let stx ← getRef
if let some doc := stx[1].getOptional? then
trace[adaptationNote] (Lean.TSyntax.getDocString ⟨doc⟩)
else
logError "Adaptation notes must be followed by a /-- comment -/"
let trailing := if let .original (trailing := s) .. := stx[0].getTailInfo then s else default
let doc : Syntax :=
Syntax.node2 .none ``Parser.Command.docComment (mkAtom "/--") (mkAtom "comment -/")
-- Optional: copy the original whitespace after the `#adaptation_note` token
-- to after the docstring comment
let doc := doc.updateTrailing trailing
let stx' := (← getRef)
let stx' := stx'.setArg 0 stx'[0].unsetTrailing
let stx' := stx'.setArg 1 (mkNullNode #[doc])
Meta.Tactic.TryThis.addSuggestion (← getRef) (f stx') (origSpan? := ← getRef)
/-- Adaptation notes are comments that are used to indicate that a piece of code
has been changed to accommodate a change in Lean core.
They typically require further action/maintenance to be taken in the future. -/
elab (name := adaptationNoteCmd) "#adaptation_note " (docComment)? : command => do
Elab.Command.liftTermElabM <| reportAdaptationNote (fun s => (⟨s⟩ : TSyntax `tactic))
@[inherit_doc adaptationNoteCmd]
elab "#adaptation_note " (docComment)? : tactic =>
reportAdaptationNote (fun s => (⟨s⟩ : TSyntax `tactic))
@[inherit_doc adaptationNoteCmd]
syntax (name := adaptationNoteTermStx) "#adaptation_note " (docComment)? term : term
/-- Elaborator for adaptation notes. -/
@[term_elab adaptationNoteTermStx]
def adaptationNoteTermElab : Elab.Term.TermElab
| `(#adaptation_note $[$_]? $t) => fun expectedType? => do
reportAdaptationNote (fun s => (⟨s⟩ : Term))
Elab.Term.elabTerm t expectedType?
| _ => fun _ => Elab.throwUnsupportedSyntax
#adaptation_note /-- This is a test -/
example : True := by
#adaptation_note /-- This is a test -/
trivial
example : True :=
#adaptation_note /-- This is a test -/
trivial |
.lake/packages/mathlib/Mathlib/Tactic/Abel.lean | import Mathlib.Tactic.NormNum.Basic
import Mathlib.Tactic.TryThis
import Mathlib.Util.AtLocation
import Mathlib.Util.AtomM.Recurse
/-!
# The `abel` tactic
Evaluate expressions in the language of additive, commutative monoids and groups.
-/
-- TODO: assert_not_exists NonUnitalNonAssociativeSemiring
assert_not_exists IsOrderedMonoid TopologicalSpace PseudoMetricSpace
namespace Mathlib.Tactic.Abel
open Lean Elab Meta Tactic Qq
initialize registerTraceClass `abel
initialize registerTraceClass `abel.detail
/--
Tactic for evaluating equations in the language of
*additive*, commutative monoids and groups.
`abel` and its variants work as both tactics and conv tactics.
* `abel1` fails if the target is not an equality that is provable by the axioms of
commutative monoids/groups.
* `abel_nf` rewrites all group expressions into a normal form.
* In tactic mode, `abel_nf at h` can be used to rewrite in a hypothesis.
* `abel_nf (config := cfg)` allows for additional configuration:
* `red`: the reducibility setting (overridden by `!`)
* `zetaDelta`: if true, local let variables can be unfolded (overridden by `!`)
* `recursive`: if true, `abel_nf` will also recurse into atoms
* `abel!`, `abel1!`, `abel_nf!` will use a more aggressive reducibility setting to identify atoms.
For example:
```
example [AddCommMonoid α] (a b : α) : a + (b + a) = a + a + b := by abel
example [AddCommGroup α] (a : α) : (3 : ℤ) • a = a + (2 : ℤ) • a := by abel
```
## Future work
* In mathlib 3, `abel` accepted additional optional arguments:
```
syntax "abel" (&" raw" <|> &" term")? (location)? : tactic
```
It is undecided whether these features should be restored eventually.
-/
syntax (name := abel) "abel" "!"? : tactic
/-- The `Context` for a call to `abel`.
Stores a few options for this call, and caches some common subexpressions
such as typeclass instances and `0 : α`.
-/
structure Context where
/-- The type of the ambient additive commutative group or monoid. -/
α : Expr
/-- The universe level for `α`. -/
univ : Level
/-- The expression representing `0 : α`. -/
α0 : Expr
/-- Specify whether we are in an additive commutative group or an additive commutative monoid. -/
isGroup : Bool
/-- The `AddCommGroup α` or `AddCommMonoid α` expression. -/
inst : Expr
/-- Populate a `context` object for evaluating `e`. -/
def mkContext (e : Expr) : MetaM Context := do
let α ← inferType e
let c ← synthInstance (← mkAppM ``AddCommMonoid #[α])
let cg ← synthInstance? (← mkAppM ``AddCommGroup #[α])
let u ← mkFreshLevelMVar
_ ← isDefEq (.sort (.succ u)) (← inferType α)
let α0 ← Expr.ofNat α 0
match cg with
| some cg => return ⟨α, u, α0, true, cg⟩
| _ => return ⟨α, u, α0, false, c⟩
/-- The monad for `Abel` contains, in addition to the `AtomM` state,
some information about the current type we are working over, so that we can consistently
use group lemmas or monoid lemmas as appropriate. -/
abbrev M := ReaderT Context AtomM
/-- Apply the function `n : ∀ {α} [inst : AddWhatever α], _` to the
implicit parameters in the context, and the given list of arguments. -/
def Context.app (c : Context) (n : Name) (inst : Expr) : Array Expr → Expr :=
mkAppN (((@Expr.const n [c.univ]).app c.α).app inst)
/-- Apply the function `n : ∀ {α} [inst α], _` to the implicit parameters in the
context, and the given list of arguments.
Compared to `context.app`, this takes the name of the typeclass, rather than an
inferred typeclass instance.
-/
def Context.mkApp (c : Context) (n inst : Name) (l : Array Expr) : MetaM Expr := do
return c.app n (← synthInstance ((Expr.const inst [c.univ]).app c.α)) l
/-- Add the letter "g" to the end of the name, e.g. turning `term` into `termg`.
This is used to choose between declarations taking `AddCommMonoid` and those
taking `AddCommGroup` instances.
-/
def addG : Name → Name
| .str p s => .str p (s ++ "g")
| n => n
/-- Apply the function `n : ∀ {α} [AddComm{Monoid,Group} α]` to the given list of arguments.
Will use the `AddComm{Monoid,Group}` instance that has been cached in the context.
-/
def iapp (n : Name) (xs : Array Expr) : M Expr := do
let c ← read
return c.app (if c.isGroup then addG n else n) c.inst xs
/-- A type synonym used by `abel` to represent `n • x + a` in an additive commutative monoid. -/
def term {α} [AddCommMonoid α] (n : ℕ) (x a : α) : α := n • x + a
/-- A type synonym used by `abel` to represent `n • x + a` in an additive commutative group. -/
def termg {α} [AddCommGroup α] (n : ℤ) (x a : α) : α := n • x + a
/-- Evaluate a term with coefficient `n`, atom `x` and successor terms `a`. -/
def mkTerm (n x a : Expr) : M Expr := iapp ``term #[n, x, a]
/-- Interpret an integer as a coefficient to a term. -/
def intToExpr (n : ℤ) : M Expr := do
Expr.ofInt (mkConst (if (← read).isGroup then ``Int else ``Nat) []) n
/-- A normal form for `abel`.
Expressions are represented as a list of terms of the form `e = n • x`,
where `n : ℤ` and `x` is an arbitrary element of the additive commutative monoid or group.
We explicitly track the `Expr` forms of `e` and `n`, even though they could be reconstructed,
for efficiency. -/
inductive NormalExpr : Type
| zero (e : Expr) : NormalExpr
| nterm (e : Expr) (n : Expr × ℤ) (x : ℕ × Expr) (a : NormalExpr) : NormalExpr
deriving Inhabited
/-- Extract the expression from a normal form. -/
def NormalExpr.e : NormalExpr → Expr
| .zero e => e
| .nterm e .. => e
instance : Coe NormalExpr Expr where coe := NormalExpr.e
/-- Construct the normal form representing a single term. -/
def NormalExpr.term' (n : Expr × ℤ) (x : ℕ × Expr) (a : NormalExpr) : M NormalExpr :=
return .nterm (← mkTerm n.1 x.2 a) n x a
/-- Construct the normal form representing zero. -/
def NormalExpr.zero' : M NormalExpr := return NormalExpr.zero (← read).α0
open NormalExpr
theorem const_add_term {α} [AddCommMonoid α] (k n x a a') (h : k + a = a') :
k + @term α _ n x a = term n x a' := by
simp [h.symm, term, add_comm, add_assoc]
theorem const_add_termg {α} [AddCommGroup α] (k n x a a') (h : k + a = a') :
k + @termg α _ n x a = termg n x a' := by
simp [h.symm, termg, add_comm, add_assoc]
theorem term_add_const {α} [AddCommMonoid α] (n x a k a') (h : a + k = a') :
@term α _ n x a + k = term n x a' := by
simp [h.symm, term, add_assoc]
theorem term_add_constg {α} [AddCommGroup α] (n x a k a') (h : a + k = a') :
@termg α _ n x a + k = termg n x a' := by
simp [h.symm, termg, add_assoc]
theorem term_add_term {α} [AddCommMonoid α] (n₁ x a₁ n₂ a₂ n' a') (h₁ : n₁ + n₂ = n')
(h₂ : a₁ + a₂ = a') : @term α _ n₁ x a₁ + @term α _ n₂ x a₂ = term n' x a' := by
simp [h₁.symm, h₂.symm, term, add_nsmul, add_assoc, add_left_comm]
theorem term_add_termg {α} [AddCommGroup α] (n₁ x a₁ n₂ a₂ n' a')
(h₁ : n₁ + n₂ = n') (h₂ : a₁ + a₂ = a') :
@termg α _ n₁ x a₁ + @termg α _ n₂ x a₂ = termg n' x a' := by
simp only [termg, h₁.symm, add_zsmul, h₂.symm]
exact add_add_add_comm (n₁ • x) a₁ (n₂ • x) a₂
theorem zero_term {α} [AddCommMonoid α] (x a) : @term α _ 0 x a = a := by
simp [term, zero_nsmul]
theorem zero_termg {α} [AddCommGroup α] (x a) : @termg α _ 0 x a = a := by
simp [termg, zero_zsmul]
/--
Interpret the sum of two expressions in `abel`'s normal form.
-/
partial def evalAdd : NormalExpr → NormalExpr → M (NormalExpr × Expr)
| zero _, e₂ => do
let p ← mkAppM ``zero_add #[e₂]
return (e₂, p)
| e₁, zero _ => do
let p ← mkAppM ``add_zero #[e₁]
return (e₁, p)
| he₁@(nterm e₁ n₁ x₁ a₁), he₂@(nterm e₂ n₂ x₂ a₂) => do
if x₁.1 = x₂.1 then
let n' ← Mathlib.Meta.NormNum.eval (← mkAppM ``HAdd.hAdd #[n₁.1, n₂.1])
let (a', h₂) ← evalAdd a₁ a₂
let k := n₁.2 + n₂.2
let p₁ ← iapp ``term_add_term
#[n₁.1, x₁.2, a₁, n₂.1, a₂, n'.expr, a', ← n'.getProof, h₂]
if k = 0 then do
let p ← mkEqTrans p₁ (← iapp ``zero_term #[x₁.2, a'])
return (a', p)
else return (← term' (n'.expr, k) x₁ a', p₁)
else if x₁.1 < x₂.1 then do
let (a', h) ← evalAdd a₁ he₂
return (← term' n₁ x₁ a', ← iapp ``term_add_const #[n₁.1, x₁.2, a₁, e₂, a', h])
else do
let (a', h) ← evalAdd he₁ a₂
return (← term' n₂ x₂ a', ← iapp ``const_add_term #[e₁, n₂.1, x₂.2, a₂, a', h])
theorem term_neg {α} [AddCommGroup α] (n x a n' a')
(h₁ : -n = n') (h₂ : -a = a') : -@termg α _ n x a = termg n' x a' := by
simpa [h₂.symm, h₁.symm, termg] using add_comm _ _
/--
Interpret a negated expression in `abel`'s normal form.
-/
def evalNeg : NormalExpr → M (NormalExpr × Expr)
| (zero _) => do
let p ← (← read).mkApp ``neg_zero ``NegZeroClass #[]
return (← zero', p)
| (nterm _ n x a) => do
let n' ← Mathlib.Meta.NormNum.eval (← mkAppM ``Neg.neg #[n.1])
let (a', h₂) ← evalNeg a
return (← term' (n'.expr, -n.2) x a',
(← read).app ``term_neg (← read).inst #[n.1, x.2, a, n'.expr, a', ← n'.getProof, h₂])
/-- A synonym for `•`, used internally in `abel`. -/
def smul {α} [AddCommMonoid α] (n : ℕ) (x : α) : α := n • x
/-- A synonym for `•`, used internally in `abel`. -/
def smulg {α} [AddCommGroup α] (n : ℤ) (x : α) : α := n • x
theorem zero_smul {α} [AddCommMonoid α] (c) : smul c (0 : α) = 0 := by
simp [smul, nsmul_zero]
theorem zero_smulg {α} [AddCommGroup α] (c) : smulg c (0 : α) = 0 := by
simp [smulg, zsmul_zero]
theorem term_smul {α} [AddCommMonoid α] (c n x a n' a')
(h₁ : c * n = n') (h₂ : smul c a = a') :
smul c (@term α _ n x a) = term n' x a' := by
simp [h₂.symm, h₁.symm, term, smul, nsmul_add, mul_nsmul']
theorem term_smulg {α} [AddCommGroup α] (c n x a n' a')
(h₁ : c * n = n') (h₂ : smulg c a = a') :
smulg c (@termg α _ n x a) = termg n' x a' := by
simp [h₂.symm, h₁.symm, termg, smulg, zsmul_add, mul_zsmul]
/--
Auxiliary function for `evalSMul'`.
-/
def evalSMul (k : Expr × ℤ) : NormalExpr → M (NormalExpr × Expr)
| zero _ => return (← zero', ← iapp ``zero_smul #[k.1])
| nterm _ n x a => do
let n' ← Mathlib.Meta.NormNum.eval (← mkAppM ``HMul.hMul #[k.1, n.1])
let (a', h₂) ← evalSMul k a
return (← term' (n'.expr, k.2 * n.2) x a',
← iapp ``term_smul #[k.1, n.1, x.2, a, n'.expr, a', ← n'.getProof, h₂])
theorem term_atom {α} [AddCommMonoid α] (x : α) : x = term 1 x 0 := by simp [term]
theorem term_atomg {α} [AddCommGroup α] (x : α) : x = termg 1 x 0 := by simp [termg]
theorem term_atom_pf {α} [AddCommMonoid α] (x x' : α) (h : x = x') : x = term 1 x' 0 := by
simp [term, h]
theorem term_atom_pfg {α} [AddCommGroup α] (x x' : α) (h : x = x') : x = termg 1 x' 0 := by
simp [termg, h]
/-- Interpret an expression as an atom for `abel`'s normal form. -/
def evalAtom (e : Expr) : M (NormalExpr × Expr) := do
let { expr := e', proof?, .. } ← (← readThe AtomM.Context).evalAtom e
let (i, a) ← AtomM.addAtom e'
let p ← match proof? with
| none => iapp ``term_atom #[e]
| some p => iapp ``term_atom_pf #[e, a, p]
return (← term' (← intToExpr 1, 1) (i, a) (← zero'), p)
theorem unfold_sub {α} [SubtractionMonoid α] (a b c : α) (h : a + -b = c) : a - b = c := by
rw [sub_eq_add_neg, h]
theorem unfold_smul {α} [AddCommMonoid α] (n) (x y : α)
(h : smul n x = y) : n • x = y := h
theorem unfold_smulg {α} [AddCommGroup α] (n : ℕ) (x y : α)
(h : smulg (Int.ofNat n) x = y) : (n : ℤ) • x = y := h
theorem unfold_zsmul {α} [AddCommGroup α] (n : ℤ) (x y : α)
(h : smulg n x = y) : n • x = y := h
lemma subst_into_smul {α} [AddCommMonoid α]
(l r tl tr t) (prl : l = tl) (prr : r = tr)
(prt : @smul α _ tl tr = t) : smul l r = t := by simp [prl, prr, prt]
lemma subst_into_smulg {α} [AddCommGroup α]
(l r tl tr t) (prl : l = tl) (prr : r = tr)
(prt : @smulg α _ tl tr = t) : smulg l r = t := by simp [prl, prr, prt]
lemma subst_into_smul_upcast {α} [AddCommGroup α]
(l r tl zl tr t) (prl₁ : l = tl) (prl₂ : ↑tl = zl) (prr : r = tr)
(prt : @smulg α _ zl tr = t) : smul l r = t := by
simp [← prt, prl₁, ← prl₂, prr, smul, smulg, natCast_zsmul]
lemma subst_into_add {α} [AddCommMonoid α] (l r tl tr t)
(prl : (l : α) = tl) (prr : r = tr) (prt : tl + tr = t) : l + r = t := by
rw [prl, prr, prt]
lemma subst_into_addg {α} [AddCommGroup α] (l r tl tr t)
(prl : (l : α) = tl) (prr : r = tr) (prt : tl + tr = t) : l + r = t := by
rw [prl, prr, prt]
lemma subst_into_negg {α} [AddCommGroup α] (a ta t : α)
(pra : a = ta) (prt : -ta = t) : -a = t := by
simp [pra, prt]
/-- Normalize a term `orig` of the form `smul e₁ e₂` or `smulg e₁ e₂`.
Normalized terms use `smul` for monoids and `smulg` for groups,
so there are actually four cases to handle:
* Using `smul` in a monoid just simplifies the pieces using `subst_into_smul`
* Using `smulg` in a group just simplifies the pieces using `subst_into_smulg`
* Using `smul a b` in a group requires converting `a` from a nat to an int and
then simplifying `smulg ↑a b` using `subst_into_smul_upcast`
* Using `smulg` in a monoid is impossible (or at least out of scope),
because you need a group argument to write a `smulg` term -/
def evalSMul' (eval : Expr → M (NormalExpr × Expr))
(is_smulg : Bool) (orig e₁ e₂ : Expr) : M (NormalExpr × Expr) := do
trace[abel] "Calling NormNum on {e₁}"
let ⟨e₁', p₁, _⟩ ← try Meta.NormNum.eval e₁ catch _ => pure { expr := e₁ }
let p₁ ← p₁.getDM (mkEqRefl e₁')
match e₁'.int? with
| some n => do
let c ← read
let (e₂', p₂) ← eval e₂
if c.isGroup = is_smulg then do
let (e', p) ← evalSMul (e₁', n) e₂'
return (e', ← iapp ``subst_into_smul #[e₁, e₂, e₁', e₂', e', p₁, p₂, p])
else do
if ¬ c.isGroup then throwError "Doesn't make sense to us `smulg` in a monoid. "
-- We are multiplying by a natural number in an additive group.
let zl ← Expr.ofInt q(ℤ) n
let p₁' ← mkEqRefl zl
let (e', p) ← evalSMul (zl, n) e₂'
return (e', c.app ``subst_into_smul_upcast c.inst #[e₁, e₂, e₁', zl, e₂', e', p₁, p₁', p₂, p])
| none => evalAtom orig
/-- Evaluate an expression into its `abel` normal form, by recursing into subexpressions. -/
partial def eval (e : Expr) : M (NormalExpr × Expr) := do
trace[abel.detail] "running eval on {e}"
trace[abel.detail] "getAppFnArgs: {e.getAppFnArgs}"
match e.getAppFnArgs with
| (``HAdd.hAdd, #[_, _, _, _, e₁, e₂]) => do
let (e₁', p₁) ← eval e₁
let (e₂', p₂) ← eval e₂
let (e', p') ← evalAdd e₁' e₂'
return (e', ← iapp ``subst_into_add #[e₁, e₂, e₁', e₂', e', p₁, p₂, p'])
| (``HSub.hSub, #[_, _, _, _, e₁, e₂]) => do
let e₂' ← mkAppM ``Neg.neg #[e₂]
let e ← mkAppM ``HAdd.hAdd #[e₁, e₂']
let (e', p) ← eval e
let p' ← (← read).mkApp ``unfold_sub ``SubtractionMonoid #[e₁, e₂, e', p]
return (e', p')
| (``Neg.neg, #[_, _, e]) => do
let (e₁, p₁) ← eval e
let (e₂, p₂) ← evalNeg e₁
return (e₂, ← iapp `Mathlib.Tactic.Abel.subst_into_neg #[e, e₁, e₂, p₁, p₂])
| (``AddMonoid.nsmul, #[_, _, e₁, e₂]) => do
let n ← if (← read).isGroup then mkAppM ``Int.ofNat #[e₁] else pure e₁
let (e', p) ← eval <| ← iapp ``smul #[n, e₂]
return (e', ← iapp ``unfold_smul #[e₁, e₂, e', p])
| (``SubNegMonoid.zsmul, #[_, _, e₁, e₂]) => do
if ¬ (← read).isGroup then failure
let (e', p) ← eval <| ← iapp ``smul #[e₁, e₂]
return (e', (← read).app ``unfold_zsmul (← read).inst #[e₁, e₂, e', p])
| (``SMul.smul, #[.const ``Int _, _, _, e₁, e₂]) =>
evalSMul' eval true e e₁ e₂
| (``SMul.smul, #[.const ``Nat _, _, _, e₁, e₂]) =>
evalSMul' eval false e e₁ e₂
| (``HSMul.hSMul, #[.const ``Int _, _, _, _, e₁, e₂]) =>
evalSMul' eval true e e₁ e₂
| (``HSMul.hSMul, #[.const ``Nat _, _, _, _, e₁, e₂]) =>
evalSMul' eval false e e₁ e₂
| (``smul, #[_, _, e₁, e₂]) => evalSMul' eval false e e₁ e₂
| (``smulg, #[_, _, e₁, e₂]) => evalSMul' eval true e e₁ e₂
| (``OfNat.ofNat, #[_, .lit (.natVal 0), _])
| (``Zero.zero, #[_, _]) =>
if ← isDefEq e (← read).α0 then
pure (← zero', ← mkEqRefl (← read).α0)
else
evalAtom e
| _ => evalAtom e
/-- Determine whether `e` will be handled as an atom by the `abel` tactic. The `match` in this
function should be preserved to be parallel in case-matching to that in the
`Mathlib.Tactic.Abel.eval` metaprogram. -/
def isAtom (e : Expr) : Bool :=
match e.getAppFnArgs with
| (``HAdd.hAdd, #[_, _, _, _, _, _])
| (``HSub.hSub, #[_, _, _, _, _, _])
| (``Neg.neg, #[_, _, _])
| (``AddMonoid.nsmul, #[_, _, _, _])
| (``SubNegMonoid.zsmul, #[_, _, _, _])
| (``SMul.smul, #[.const ``Int _, _, _, _, _])
| (``SMul.smul, #[.const ``Nat _, _, _, _, _])
| (``HSMul.hSMul, #[.const ``Int _, _, _, _, _, _])
| (``HSMul.hSMul, #[.const ``Nat _, _, _, _, _, _])
| (``smul, #[_, _, _, _])
| (``smulg, #[_, _, _, _]) => false
/- The `OfNat.ofNat` and `Zero.zero` cases are deliberately omitted here: these two cases are not
strictly atoms for `abel`, but they are atom-like in that their handling by
`Mathlib.Tactic.Abel.eval` contains no recursive call. -/
-- | (``OfNat.ofNat, #[_, .lit (.natVal 0), _])
-- | (``Zero.zero, #[_, _])
| _ => true
@[tactic_alt abel]
elab (name := abel1) "abel1" tk:"!"? : tactic => withMainContext do
let tm := if tk.isSome then .default else .reducible
let some (_, e₁, e₂) := (← whnfR <| ← getMainTarget).eq?
| throwError "abel1 requires an equality goal"
trace[abel] "running on an equality `{e₁} = {e₂}`."
let c ← mkContext e₁
closeMainGoal `abel1 <| ← AtomM.run tm <| ReaderT.run (r := c) do
let (e₁', p₁) ← eval e₁
trace[abel] "found `{p₁}`, a proof that `{e₁} = {e₁'.e}`"
let (e₂', p₂) ← eval e₂
trace[abel] "found `{p₂}`, a proof that `{e₂} = {e₂'.e}`"
unless ← isDefEq e₁' e₂' do
throwError "abel1 found that the two sides were not equal"
trace[abel] "verified that the simplified forms are identical"
mkEqTrans p₁ (← mkEqSymm p₂)
@[tactic_alt abel]
macro (name := abel1!) "abel1!" : tactic => `(tactic| abel1 !)
theorem term_eq {α : Type*} [AddCommMonoid α] (n : ℕ) (x a : α) : term n x a = n • x + a := rfl
/-- A type synonym used by `abel` to represent `n • x + a` in an additive commutative group. -/
theorem termg_eq {α : Type*} [AddCommGroup α] (n : ℤ) (x a : α) : termg n x a = n • x + a := rfl
/-- True if this represents an atomic expression. -/
def NormalExpr.isAtom : NormalExpr → Bool
| .nterm _ (_, 1) _ (.zero _) => true
| _ => false
/-- The normalization style for `abel_nf`. -/
inductive AbelMode where
/-- The default form -/
| term
/-- Raw form: the representation `abel` uses internally. -/
| raw
/-- Configuration for `abel_nf`. -/
structure AbelNF.Config extends AtomM.Recurse.Config where
/-- The normalization style. -/
mode := AbelMode.term
/-- Function elaborating `AbelNF.Config`. -/
declare_config_elab elabAbelNFConfig AbelNF.Config
/-- A cleanup routine, which simplifies expressions in `abel` normal form to a more human-friendly
format. -/
def cleanup (cfg : AbelNF.Config) (r : Simp.Result) : MetaM Simp.Result := do
match cfg.mode with
| .raw => pure r
| .term =>
let thms := [``term_eq, ``termg_eq, ``add_zero, ``one_nsmul, ``one_zsmul, ``zsmul_zero]
let ctx ← Simp.mkContext (config := { zetaDelta := cfg.zetaDelta })
(simpTheorems := #[← thms.foldlM (·.addConst ·) {}])
(congrTheorems := ← getSimpCongrTheorems)
pure <| ←
r.mkEqTrans (← Simp.main r.expr ctx (methods := ← Lean.Meta.Simp.mkDefaultMethods)).1
/--
Evaluate an expression into its `abel` normal form.
This is a variant of `Mathlib.Tactic.Abel.eval`, the main driver of the `abel` tactic.
It differs in
* outputting a `Simp.Result`, rather than a `NormalExpr × Expr`;
* throwing an error if the expression `e` is an atom for the `abel` tactic.
-/
def evalExpr (e : Expr) : AtomM Simp.Result := do
let e ← withReducible <| whnf e
guard !(isAtom e)
let (a, pa) ← eval e (← mkContext e)
return { expr := a, proof? := pa }
open Parser.Tactic
@[tactic_alt abel]
elab (name := abelNF) "abel_nf" tk:"!"? cfg:optConfig loc:(location)? : tactic => do
let mut cfg ← elabAbelNFConfig cfg
if tk.isSome then cfg := { cfg with red := .default, zetaDelta := true }
let loc := (loc.map expandLocation).getD (.targets #[] true)
let s ← IO.mkRef {}
let m := AtomM.recurse s cfg.toConfig evalExpr (cleanup cfg)
transformAtLocation (m ·) "abel_nf" loc (failIfUnchanged := true) false
@[tactic_alt abel]
macro "abel_nf!" cfg:optConfig loc:(location)? : tactic =>
`(tactic| abel_nf ! $cfg:optConfig $(loc)?)
@[inherit_doc abel]
syntax (name := abelNFConv) "abel_nf" "!"? optConfig : conv
/-- Elaborator for the `abel_nf` tactic. -/
@[tactic abelNFConv]
def elabAbelNFConv : Tactic := fun stx ↦ match stx with
| `(conv| abel_nf $[!%$tk]? $cfg:optConfig) => withMainContext do
let mut cfg ← elabAbelNFConfig cfg
if tk.isSome then cfg := { cfg with red := .default, zetaDelta := true }
let s ← IO.mkRef {}
Conv.applySimpResult
(← AtomM.recurse s cfg.toConfig evalExpr (cleanup cfg) (← instantiateMVars (← Conv.getLhs)))
| _ => Elab.throwUnsupportedSyntax
@[inherit_doc abel]
macro "abel_nf!" cfg:optConfig : conv => `(conv| abel_nf ! $cfg:optConfig)
macro_rules
| `(tactic| abel !) => `(tactic| first | abel1! | try_this abel_nf!)
| `(tactic| abel) => `(tactic| first | abel1 | try_this abel_nf)
@[tactic_alt abel]
macro "abel!" : tactic => `(tactic| abel !)
@[inherit_doc abel]
macro (name := abelConv) "abel" : conv =>
`(conv| first | discharge => abel1 | try_this abel_nf)
@[inherit_doc abelConv] macro "abel!" : conv =>
`(conv| first | discharge => abel1! | try_this abel_nf!)
end Mathlib.Tactic.Abel
/-!
We register `abel` with the `hint` tactic.
-/
register_hint 1000 abel |
.lake/packages/mathlib/Mathlib/Tactic/DeriveFintype.lean | import Mathlib.Tactic.ProxyType
import Mathlib.Data.Fintype.Basic
import Mathlib.Data.Fintype.Sigma
import Mathlib.Data.Fintype.Sum
/-!
# The `Fintype` derive handler
This file defines a derive handler to automatically generate `Fintype` instances
for structures and inductive types.
The following is a prototypical example of what this can handle:
```
inductive MyOption (α : Type*)
| none
| some (x : α)
deriving Fintype
```
This deriving handler does not attempt to process inductive types that are either
recursive or that have indices.
To get debugging information, do `set_option trace.Elab.Deriving.fintype true`
and `set_option Elab.ProxyType true`.
There is a term elaborator `derive_fintype%` implementing the derivation of `Fintype` instances.
This can be useful in cases when there are necessary additional assumptions (like `DecidableEq`).
This is implemented using `Fintype.ofEquiv` and `proxy_equiv%`, which is a term elaborator
that creates an equivalence from a "proxy type" composed of basic type constructors. If Lean
can synthesize a `Fintype` instance for the proxy type, then `derive_fintype%` succeeds.
## Implementation notes
There are two kinds of `Fintype` instances that we generate, depending on the inductive type.
If it is an enum (an inductive type with only 0-ary constructors), then we generate the
complete `List` of all constructors; see `Mathlib.Deriving.Fintype.mkFintypeEnum` for more
details. The proof has $O(n)$ complexity in the number of constructors.
Otherwise, the strategy we take is to generate a "proxy type", define an equivalence between
our type and the proxy type (see `proxy_equiv%`), and then use `Fintype.ofEquiv` to pull a
`Fintype` instance on the proxy type (if one exists) to our inductive type. For example, with
the `MyOption α` type above, we generate `Unit ⊕ α`. While the proxy type is not a finite type
in general, we add `Fintype` instances for every type parameter of our inductive type (and
`Decidable` instances for every `Prop` parameter). Hence, in our example we get
`Fintype (MyOption α)` assuming `Fintype α`.
There is a source of quadratic complexity in this `Fintype` instance from the fact that an
inductive type with `n` constructors has a proxy type of the form `C₁ ⊕ (C₂ ⊕ (⋯ ⊕ Cₙ))`,
so mapping to and from `Cᵢ` requires looking through `i` levels of `Sum` constructors.
Ignoring time spent looking through these constructors, the construction of `Finset.univ`
contributes just linear time with respect to the cardinality of the type since the instances
involved compute the underlying `List` for the `Finset` as `l₁ ++ (l₂ ++ (⋯ ++ lₙ))` with
right associativity.
Note that an alternative design could be that instead of using `Sum` we could create a
function `C : Fin n → Type*` with `C i = ULift Cᵢ` and then use `(i : Fin n) × C i` for
the proxy type, which would save us from the nested `Sum` constructors.
This implementation takes some inspiration from the one by Mario Carneiro for Mathlib 3.
A difference is that the Mathlib 3 version does not explicitly construct the total proxy type,
and instead it opts to construct the underlying `Finset` as a disjoint union of the `Finset.univ`
for each individual constructor's proxy type.
-/
namespace Mathlib.Deriving.Fintype
open Lean Elab Lean.Parser.Term
open Meta Command
/--
The term elaborator `derive_fintype% α` tries to synthesize a `Fintype α` instance
using all the assumptions in the local context; this can be useful, for example, if one
needs an extra `DecidableEq` instance. It works only if `α` is an inductive
type that `proxy_equiv% α` can handle. The elaborator makes use of the
expected type, so `(derive_fintype% _ : Fintype α)` works.
This uses `proxy_equiv% α`, so as a side effect it defines `proxyType` and `proxyTypeEquiv` in
the namespace associated to the inductive type `α`.
-/
macro "derive_fintype% " t:term : term => `(term| Fintype.ofEquiv _ (proxy_equiv% $t))
/-
Creates a `Fintype` instance by adding additional `Fintype` and `Decidable` instance arguments
for every type and prop parameter of the type, then use the `derive_fintype%` elaborator.
-/
def mkFintype (declName : Name) : CommandElabM Bool := do
let indVal ← getConstInfoInduct declName
let cmd ← liftTermElabM do
let header ← Deriving.mkHeader `Fintype 0 indVal
let binders' ← Deriving.mkInstImplicitBinders `Decidable indVal header.argNames
let instCmd ← `(command|
instance $header.binders:bracketedBinder* $(binders'.map TSyntax.mk):bracketedBinder* :
Fintype $header.targetType := derive_fintype% _)
return instCmd
trace[Elab.Deriving.fintype] "instance command:\n{cmd}"
elabCommand cmd
return true
/-- Derive a `Fintype` instance for enum types. These come with a `ctorIdx` function.
We generate a more optimized instance than the one produced by `mkFintype`.
The strategy is to (1) create a list `enumList` of all the constructors, (2) prove that this
is in `ctorIdx` order, (3) show that `ctorIdx` maps `enumList` to `List.range numCtors` to show
the list has no duplicates, and (4) give the `Fintype` instance, using 2 for completeness.
The proofs are all linear complexity, and the main computation is that
`enumList.map ctorIdx = List.range numCtors`, which is true by `refl`. -/
def mkFintypeEnum (declName : Name) : CommandElabM Unit := do
let indVal ← getConstInfoInduct declName
let levels := indVal.levelParams.map Level.param
let ctorIdxName := declName.mkStr "ctorIdx"
let enumListName := declName.mkStr "enumList"
let ctorThmName := declName.mkStr "enumList_getElem?_ctorIdx_eq"
let enumListNodupName := declName.mkStr "enumList_nodup"
liftTermElabM <| Term.withoutErrToSorry do
do -- Define `enumList` enumerating all constructors
trace[Elab.Deriving.fintype] "defining {enumListName}"
let type := mkConst declName levels
let listType ← mkAppM ``List #[type]
let listNil ← mkAppOptM ``List.nil #[some type]
let listCons name xs := mkAppM ``List.cons #[mkConst name levels, xs]
let enumList ← indVal.ctors.foldrM (listCons · ·) listNil
addAndCompile <| Declaration.defnDecl
{ name := enumListName
levelParams := indVal.levelParams
safety := DefinitionSafety.safe
hints := ReducibilityHints.abbrev
type := listType
value := enumList }
setProtected enumListName
addDocStringCore enumListName s!"A list enumerating every element of the type, \
which are all zero-argument constructors. (Generated by the `Fintype` deriving handler.)"
do -- Prove that this list is in `ctorIdx` order
trace[Elab.Deriving.fintype] "proving {ctorThmName}"
let goalStx ← `(term| ∀ (x : $(← Term.exprToSyntax <| mkConst declName levels)),
$(mkIdent enumListName)[$(mkIdent ctorIdxName) x]? = some x)
let goal ← Term.elabTerm goalStx (mkSort .zero)
let pf ← Term.elabTerm (← `(term| by intro x; cases x <;> rfl)) goal
Term.synthesizeSyntheticMVarsNoPostponing
addAndCompile <| Declaration.thmDecl
{ name := ctorThmName
levelParams := indVal.levelParams
type := ← instantiateMVars goal
value := ← instantiateMVars pf }
setProtected ctorThmName
do -- Use this theorem to prove `enumList` has no duplicates
trace[Elab.Deriving.fintype] "proving {enumListNodupName}"
let enum ← Term.exprToSyntax <| mkConst enumListName levels
let goal ← Term.elabTerm (← `(term| List.Nodup $enum)) (mkSort .zero)
let n : TSyntax `term := quote indVal.numCtors
let pf ← Term.elabTerm (← `(term| by
apply List.Nodup.of_map $(mkIdent ctorIdxName)
have h : List.map $(mkIdent ctorIdxName) $(mkIdent enumListName)
= List.range $n := rfl
exact h ▸ List.nodup_range)) goal
Term.synthesizeSyntheticMVarsNoPostponing
addAndCompile <| Declaration.thmDecl
{ name := enumListNodupName
levelParams := indVal.levelParams
type := ← instantiateMVars goal
value := ← instantiateMVars pf }
setProtected enumListNodupName
-- Make the Fintype instance
trace[Elab.Deriving.fintype] "defining fintype instance"
let cmd ← `(command|
instance : Fintype $(mkIdent declName) where
elems := Finset.mk $(mkIdent enumListName) $(mkIdent enumListNodupName)
complete := by
intro x
rw [Finset.mem_mk, Multiset.mem_coe, List.mem_iff_getElem?]
exact ⟨$(mkIdent ctorIdxName) x, $(mkIdent ctorThmName) x⟩)
trace[Elab.Deriving.fintype] "instance command:\n{cmd}"
elabCommand cmd
def mkFintypeInstanceHandler (declNames : Array Name) : CommandElabM Bool := do
if h : declNames.size ≠ 1 then
return false -- mutually inductive types are not supported
else
let declName := declNames[0]
if ← isEnumType declName then
mkFintypeEnum declName
return true
else
mkFintype declName
initialize
registerDerivingHandler ``Fintype mkFintypeInstanceHandler
registerTraceClass `Elab.Deriving.fintype
end Mathlib.Deriving.Fintype |
.lake/packages/mathlib/Mathlib/Tactic/UnsetOption.lean | import Mathlib.Init
import Lean.Parser.Term
import Lean.Parser.Do
import Lean.Elab.Command
/-!
# The `unset_option` command
This file defines an `unset_option` user command, which unsets user configurable
options.
For example, inputting `set_option blah 7` and then `unset_option blah`
returns the user to the default state before any `set_option` command is called.
This is helpful when the user does not know the default value of the option or it
is cleaner not to write it explicitly, or for some options where the default
behaviour is different from any user set value.
-/
namespace Lean.Elab
variable {m : Type → Type} [Monad m] [MonadOptions m] [MonadRef m] [MonadInfoTree m]
/-- unset the option specified by id -/
def elabUnsetOption (id : Syntax) : m Options := do
-- We include the first argument (the keyword) for position information in case `id` is `missing`.
addCompletionInfo <| CompletionInfo.option (← getRef)
unsetOption id.getId.eraseMacroScopes
where
/-- unset the given option name -/
unsetOption (optionName : Name) : m Options := return (← getOptions).erase optionName
namespace Command
/-- Unset a user option -/
elab (name := unsetOption) "unset_option " opt:ident : command => do
let options ← Elab.elabUnsetOption opt
modify fun s ↦ { s with maxRecDepth := maxRecDepth.get options }
modifyScope fun scope ↦ { scope with opts := options }
end Command
end Lean.Elab |
.lake/packages/mathlib/Mathlib/Tactic/Bound.lean | import Aesop
import Mathlib.Tactic.Bound.Attribute
import Mathlib.Tactic.Lemma
import Mathlib.Tactic.Linarith.Frontend
import Mathlib.Tactic.NormNum.Core
/-!
## The `bound` tactic
`bound` is an `aesop` wrapper that proves inequalities by straightforward recursion on structure,
assuming that intermediate terms are nonnegative or positive as needed. It also has some support
for guessing where it is unclear where to recurse, such as which side of a `min` or `max` to use
as the bound or whether to assume a power is less than or greater than one.
The functionality of `bound` overlaps with `positivity` and `gcongr`, but can jump back and forth
between `0 ≤ x` and `x ≤ y`-type inequalities. For example, `bound` proves
`0 ≤ c → b ≤ a → 0 ≤ a * c - b * c`
by turning the goal into `b * c ≤ a * c`, then using `mul_le_mul_of_nonneg_right`. `bound` also
uses specialized lemmas for goals of the form `1 ≤ x, 1 < x, x ≤ 1, x < 1`.
Additional hypotheses can be passed as `bound [h0, h1 n, ...]`. This is equivalent to declaring
them via `have` before calling `bound`.
See `MathlibTest/Bound/bound.lean` for tests.
### Calc usage
Since `bound` requires the inequality proof to exactly match the structure of the expression, it is
often useful to iterate between `bound` and `rw / simp` using `calc`. Here is an example:
```
-- Calc example: A weak lower bound for `z ↦ z^2 + c`
lemma le_sqr_add {c z : ℂ} (cz : abs c ≤ abs z) (z3 : 3 ≤ abs z) :
2 * abs z ≤ abs (z^2 + c) := by
calc abs (z^2 + c)
_ ≥ abs (z^2) - abs c := by bound
_ ≥ abs (z^2) - abs z := by bound
_ ≥ (abs z - 1) * abs z := by rw [mul_comm, mul_sub_one, ← pow_two, ← abs.map_pow]
_ ≥ 2 * abs z := by bound
```
### Aesop rules
`bound` uses threes types of aesop rules: `apply`, `forward`, and closing `tactic`s. To register a
lemma as an `apply` rule, tag it with `@[bound]`. It will be automatically converted into either a
`norm apply` or `safe apply` rule depending on the number and type of its hypotheses:
1. Nonnegativity/positivity/nonpositivity/negativity hypotheses get score 1 (those involving `0`).
2. Other inequalities get score 10.
3. Disjunctions `a ∨ b` get score 100, plus the score of `a` and `b`.
Score `0` lemmas turn into `norm apply` rules, and score `0 < s` lemmas turn into `safe apply s`
rules. The score is roughly lexicographic ordering on the counts of the three type (guessing,
general, involving-zero), and tries to minimize the complexity of hypotheses we have to prove.
See `Mathlib/Tactic/Bound/Attribute.lean` for the full algorithm.
To register a lemma as a `forward` rule, tag it with `@[bound_forward]`. The most important
builtin forward rule is `le_of_lt`, so that strict inequalities can be used to prove weak
inequalities. Another example is `HasFPowerSeriesOnBall.r_pos`, so that `bound` knows that any
power series present in the context have positive radius of convergence. Custom `@[bound_forward]`
rules that similarly expose inequalities inside structures are often useful.
### Guessing apply rules
There are several cases where there are two standard ways to recurse down an inequality, and it is
not obvious which is correct without more information. For example, `a ≤ min b c` is registered as
a `safe apply 4` rule, since we always need to prove `a ≤ b ∧ a ≤ c`. But if we see `min a b ≤ c`,
either `a ≤ c` or `b ≤ c` suffices, and we don't know which.
In these cases we declare a new lemma with an `∨` hypotheses that covers the two cases. Tagging
it as `@[bound]` will add a +100 penalty to the score, so that it will be used only if necessary.
Aesop will then try both ways by splitting on the resulting `∨` hypothesis.
Currently the two types of guessing rules are
1. `min` and `max` rules, for both `≤` and `<`
2. `pow` and `rpow` monotonicity rules which branch on `1 ≤ a` or `a ≤ 1`.
### Closing tactics
We close numerical goals with `norm_num` and `linarith`.
-/
open Lean Elab Meta Term Mathlib.Tactic Syntax
open Lean.Elab.Tactic (liftMetaTactic liftMetaTactic' TacticM getMainGoal)
namespace Mathlib.Tactic.Bound
/-!
### `.mpr` lemmas of iff statements for use as Aesop apply rules
Once Aesop can do general terms directly, we can remove these:
https://github.com/leanprover-community/aesop/issues/107
-/
lemma Nat.cast_pos_of_pos {R : Type} [Semiring R] [PartialOrder R] [IsOrderedRing R] [Nontrivial R]
{n : ℕ} : 0 < n → 0 < (n : R) :=
Nat.cast_pos.mpr
lemma Nat.one_le_cast_of_le {α : Type} [AddCommMonoidWithOne α] [PartialOrder α]
[AddLeftMono α] [ZeroLEOneClass α]
[CharZero α] {n : ℕ} : 1 ≤ n → 1 ≤ (n : α) :=
Nat.one_le_cast.mpr
/-!
### Apply rules for `bound`
Most `bound` lemmas are registered in-place where the lemma is declared. These are only the lemmas
that do not require additional imports within this file.
-/
-- Reflexivity
attribute [bound] le_refl
-- 0 ≤, 0 <
attribute [bound] sq_nonneg Nat.cast_nonneg abs_nonneg Nat.zero_lt_succ pow_pos pow_nonneg
sub_nonneg_of_le sub_pos_of_lt inv_nonneg_of_nonneg inv_pos_of_pos tsub_pos_of_lt mul_pos
mul_nonneg div_pos div_nonneg add_nonneg
-- 1 ≤, ≤ 1
attribute [bound] Nat.one_le_cast_of_le one_le_mul_of_one_le_of_one_le
-- ≤
attribute [bound] le_abs_self neg_abs_le neg_le_neg tsub_le_tsub_right mul_le_mul_of_nonneg_left
mul_le_mul_of_nonneg_right le_add_of_nonneg_right le_add_of_nonneg_left le_mul_of_one_le_right
mul_le_of_le_one_right sub_le_sub add_le_add mul_le_mul
-- <
attribute [bound] Nat.cast_pos_of_pos neg_lt_neg sub_lt_sub_left sub_lt_sub_right add_lt_add_left
add_lt_add_right mul_lt_mul_of_pos_left mul_lt_mul_of_pos_right
-- min and max
attribute [bound] min_le_right min_le_left le_max_left le_max_right le_min max_le lt_min max_lt
-- Memorize a few constants to avoid going to `norm_num`
attribute [bound] zero_le_one zero_lt_one zero_le_two zero_lt_two
/-!
### Forward rules for `bound`
-/
-- Bound applies `le_of_lt` to all hypotheses
attribute [bound_forward] le_of_lt
/-!
### Guessing rules: when we don't know how to recurse
-/
section Guessing
variable {α : Type} [LinearOrder α] {a b c : α}
-- `min` and `max` guessing lemmas
lemma le_max_of_le_left_or_le_right : a ≤ b ∨ a ≤ c → a ≤ max b c := le_max_iff.mpr
lemma lt_max_of_lt_left_or_lt_right : a < b ∨ a < c → a < max b c := lt_max_iff.mpr
lemma min_le_of_left_le_or_right_le : a ≤ c ∨ b ≤ c → min a b ≤ c := min_le_iff.mpr
lemma min_lt_of_left_lt_or_right_lt : a < c ∨ b < c → min a b < c := min_lt_iff.mpr
-- Register guessing rules
attribute [bound]
-- Which side of the `max` should we use as the lower bound?
le_max_of_le_left_or_le_right
lt_max_of_lt_left_or_lt_right
-- Which side of the `min` should we use as the upper bound?
min_le_of_left_le_or_right_le
min_lt_of_left_lt_or_right_lt
end Guessing
/-!
### Closing tactics
TODO: Kim Morrison noted that we could check for `ℕ` or `ℤ` and try `omega` as well.
-/
/-- Close numerical goals with `norm_num` -/
def boundNormNum : Aesop.RuleTac :=
Aesop.SingleRuleTac.toRuleTac fun i => do
let tac := do Mathlib.Meta.NormNum.elabNormNum .missing .missing .missing
let goals ← Lean.Elab.Tactic.run i.goal tac |>.run'
if !goals.isEmpty then failure
return (#[], none, some .hundred)
attribute [aesop unsafe 10% tactic (rule_sets := [Bound])] boundNormNum
/-- Close numerical and other goals with `linarith` -/
def boundLinarith : Aesop.RuleTac :=
Aesop.SingleRuleTac.toRuleTac fun i => do
Linarith.linarith false [] {} i.goal
return (#[], none, some .hundred)
attribute [aesop unsafe 5% tactic (rule_sets := [Bound])] boundLinarith
/-!
### `bound` tactic implementation
-/
/-- Aesop configuration for `bound` -/
def boundConfig : Aesop.Options := {
enableSimp := false
}
end Mathlib.Tactic.Bound
/-- `bound` tactic for proving inequalities via straightforward recursion on expression structure.
An example use case is
```
-- Calc example: A weak lower bound for `z ↦ z^2 + c`
lemma le_sqr_add (c z : ℝ) (cz : ‖c‖ ≤ ‖z‖) (z3 : 3 ≤ ‖z‖) :
2 * ‖z‖ ≤ ‖z^2 + c‖ := by
calc ‖z^2 + c‖
_ ≥ ‖z^2‖ - ‖c‖ := by bound
_ ≥ ‖z^2‖ - ‖z‖ := by bound
_ ≥ (‖z‖ - 1) * ‖z‖ := by
rw [mul_comm, mul_sub_one, ← pow_two, ← norm_pow]
_ ≥ 2 * ‖z‖ := by bound
```
`bound` is built on top of `aesop`, and uses
1. Apply lemmas registered via the `@[bound]` attribute
2. Forward lemmas registered via the `@[bound_forward]` attribute
3. Local hypotheses from the context
4. Optionally: additional hypotheses provided as `bound [h₀, h₁]` or similar. These are added to the
context as if by `have := hᵢ`.
The functionality of `bound` overlaps with `positivity` and `gcongr`, but can jump back and forth
between `0 ≤ x` and `x ≤ y`-type inequalities. For example, `bound` proves
`0 ≤ c → b ≤ a → 0 ≤ a * c - b * c`
by turning the goal into `b * c ≤ a * c`, then using `mul_le_mul_of_nonneg_right`. `bound` also
contains lemmas for goals of the form `1 ≤ x, 1 < x, x ≤ 1, x < 1`. Conversely, `gcongr` can prove
inequalities for more types of relations, supports all `positivity` functionality, and is likely
faster since it is more specialized (not built atop `aesop`). -/
syntax "bound " (" [" term,* "]")? : tactic
-- Plain `bound` elaboration, with no hypotheses
elab_rules : tactic
| `(tactic| bound) => do
let tac ← `(tactic| aesop (rule_sets := [Bound, -default]) (config := Bound.boundConfig))
liftMetaTactic fun g ↦ do return (← Lean.Elab.runTactic g tac.raw).1
-- Rewrite `bound [h₀, h₁]` into `have := h₀, have := h₁, bound`, and similar
macro_rules
| `(tactic| bound%$tk [$[$ts],*]) => do
let haves ← ts.mapM fun (t : Term) => withRef t `(tactic| have := $t)
`(tactic| ($haves;*; bound%$tk))
/-!
We register `bound` with the `hint` tactic.
-/
register_hint 70 bound |
.lake/packages/mathlib/Mathlib/Tactic/FinCases.lean | import Mathlib.Tactic.Core
import Mathlib.Lean.Expr.Basic
import Mathlib.Data.Fintype.Basic
/-!
# The `fin_cases` tactic.
Given a hypothesis of the form `h : x ∈ (A : List α)`, `x ∈ (A : Finset α)`,
or `x ∈ (A : Multiset α)`,
or a hypothesis of the form `h : A`, where `[Fintype A]` is available,
`fin_cases h` will repeatedly call `cases` to split the goal into
separate cases for each possible value.
-/
open Lean.Meta
namespace Lean.Elab.Tactic
/-- If `e` is of the form `x ∈ (A : List α)`, `x ∈ (A : Finset α)`, or `x ∈ (A : Multiset α)`,
return `some α`, otherwise `none`. -/
def getMemType {m : Type → Type} [Monad m] [MonadError m] (e : Expr) : m (Option Expr) := do
match e.getAppFnArgs with
| (``Membership.mem, #[_, type, _, _, _]) =>
match type.getAppFnArgs with
| (``List, #[α]) => return α
| (``Multiset, #[α]) => return α
| (``Finset, #[α]) => return α
| _ => throwError "Hypothesis must be of type `x ∈ (A : List α)`, `x ∈ (A : Finset α)`, \
or `x ∈ (A : Multiset α)`"
| _ => return none
/--
Recursively runs the `cases` tactic on a hypothesis `h`.
As long as two goals are produced, `cases` is called recursively on the second goal,
and we return a list of the first goals which appeared.
This is useful for hypotheses of the form `h : a ∈ [l₁, l₂, ...]`,
which will be transformed into a sequence of goals with hypotheses `h : a = l₁`, `h : a = l₂`,
and so on.
Cases are named according to the order in which they are generated as tracked by `counter`
and prefixed with `userNamePre`.
-/
partial def unfoldCases (g : MVarId) (h : FVarId)
(userNamePre : Name := .anonymous) (counter := 0) : MetaM (List MVarId) := do
let gs ← g.cases h
try
let #[g₁, g₂] := gs | throwError "unexpected number of cases"
g₁.mvarId.setUserName (.str userNamePre s!"{counter}")
let gs ← unfoldCases g₂.mvarId g₂.fields[2]!.fvarId! userNamePre (counter+1)
return g₁.mvarId :: gs
catch _ => return []
/-- Implementation of the `fin_cases` tactic. -/
partial def finCasesAt (g : MVarId) (hyp : FVarId) : MetaM (List MVarId) := g.withContext do
let type ← hyp.getType >>= instantiateMVars
match ← getMemType type with
| some _ => unfoldCases g hyp (userNamePre := ← g.getTag)
| none =>
-- Deal with `x : A`, where `[Fintype A]` is available:
let inst ← synthInstance (← mkAppM ``Fintype #[type])
let elems ← mkAppOptM ``Fintype.elems #[type, inst]
let t ← mkAppM ``Membership.mem #[elems, .fvar hyp]
let v ← mkAppOptM ``Fintype.complete #[type, inst, Expr.fvar hyp]
let (fvar, g) ← (← g.assert `this t v).intro1P
finCasesAt g fvar
/--
`fin_cases h` performs case analysis on a hypothesis of the form
`h : A`, where `[Fintype A]` is available, or
`h : a ∈ A`, where `A : Finset X`, `A : Multiset X` or `A : List X`.
As an example, in
```
example (f : ℕ → Prop) (p : Fin 3) (h0 : f 0) (h1 : f 1) (h2 : f 2) : f p.val := by
fin_cases p; simp
all_goals assumption
```
after `fin_cases p; simp`, there are three goals, `f 0`, `f 1`, and `f 2`.
-/
syntax (name := finCases) "fin_cases " ("*" <|> term,+) (" with " term,+)? : tactic
/-!
`fin_cases` used to also have two modifiers, `fin_cases ... with ...` and `fin_cases ... using ...`.
With neither actually used in mathlib, they haven't been re-implemented here.
In case someone finds a need for them, and wants to re-implement, the relevant sections of
the doc-string are preserved here:
---
`fin_cases h with l` takes a list of descriptions for the cases of `h`.
These should be definitionally equal to and in the same order as the
default enumeration of the cases.
For example,
```
example (x y : ℕ) (h : x ∈ [1, 2]) : x = y := by
fin_cases h with 1, 1+1
```
produces two cases: `1 = y` and `1 + 1 = y`.
When using `fin_cases a` on data `a` defined with `let`,
the tactic will not be able to clear the variable `a`,
and will instead produce hypotheses `this : a = ...`.
These hypotheses can be given a name using `fin_cases a using ha`.
For example,
```
example (f : ℕ → Fin 3) : True := by
let a := f 3
fin_cases a using ha
```
produces three goals with hypotheses
`ha : a = 0`, `ha : a = 1`, and `ha : a = 2`.
-/
/- TODO: In mathlib3 we ran `norm_num` when there is no `with` clause. Is this still useful? -/
@[tactic finCases] elab_rules : tactic
| `(tactic| fin_cases $[$hyps:ident],*) => withMainContext <| focus do
for h in hyps do
allGoals <| liftMetaTactic (finCasesAt · (← getFVarId h))
end Tactic
end Elab
end Lean |
.lake/packages/mathlib/Mathlib/Tactic/ClearExclamation.lean | import Mathlib.Init
import Lean.Elab.Tactic.ElabTerm
/-! # `clear!` tactic -/
namespace Mathlib.Tactic
open Lean Meta Elab.Tactic
/-- A variant of `clear` which clears not only the given hypotheses but also any other hypotheses
depending on them -/
elab (name := clear!) "clear!" hs:(ppSpace colGt ident)* : tactic => do
let fvarIds ← getFVarIds hs
liftMetaTactic1 fun goal ↦ do
goal.tryClearMany <| (← collectForwardDeps (fvarIds.map .fvar) true).map (·.fvarId!)
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/CancelDenoms.lean | import Mathlib.Tactic.CancelDenoms.Core
import Mathlib.Tactic.NormNum.Ineq |
.lake/packages/mathlib/Mathlib/Tactic/Coe.lean | import Mathlib.Init
import Lean.Elab.ElabRules
/-!
# Additional coercion notation
Defines notation for coercions.
1. `↑ t` is defined in core.
2. `(↑)` is equivalent to the eta-reduction of `(↑ ·)`
3. `⇑ t` is a coercion to a function type.
4. `(⇑)` is equivalent to the eta-reduction of `(⇑ ·)`
3. `↥ t` is a coercion to a type.
6. `(↥)` is equivalent to the eta-reduction of `(↥ ·)`
-/
open Lean Meta
namespace Lean.Elab.Term.CoeImpl
/-- Elaborator for the `(↑)`, `(⇑)`, and `(↥)` notations. -/
def elabPartiallyAppliedCoe (sym : String) (expectedType : Expr)
(mkCoe : (expectedType x : Expr) → TermElabM Expr) : TermElabM Expr := do
let expectedType ← instantiateMVars expectedType
let Expr.forallE _ a b .. := expectedType | do
tryPostpone
throwError "({sym}) must have a function type, not{indentExpr expectedType}"
if b.hasLooseBVars then
tryPostpone
throwError "({sym}) must have a non-dependent function type, not{indentExpr expectedType}"
if a.hasExprMVar then tryPostpone
let f ← withLocalDeclD `x a fun x ↦ do
mkLambdaFVars #[x] (← mkCoe b x)
return f.etaExpanded?.getD f
/-- Partially applied coercion. Equivalent to the η-reduction of `(↑ ·)` -/
elab "(" "↑" ")" : term <= expectedType =>
elabPartiallyAppliedCoe "↑" expectedType fun b x => do
if b.hasExprMVar then tryPostpone
if let .some e ← coerce? x b then
return e
else
throwError "cannot coerce{indentExpr x}\nto type{indentExpr b}"
/-- Partially applied function coercion. Equivalent to the η-reduction of `(⇑ ·)` -/
elab "(" "⇑" ")" : term <= expectedType =>
elabPartiallyAppliedCoe "⇑" expectedType fun b x => do
if let some ty ← coerceToFunction? x then
ensureHasType b ty
else
throwError "cannot coerce to function{indentExpr x}"
/-- Partially applied type coercion. Equivalent to the η-reduction of `(↥ ·)` -/
elab "(" "↥" ")" : term <= expectedType =>
elabPartiallyAppliedCoe "↥" expectedType fun b x => do
if let some ty ← coerceToSort? x then
ensureHasType b ty
else
throwError "cannot coerce to sort{indentExpr x}"
end Lean.Elab.Term.CoeImpl |
.lake/packages/mathlib/Mathlib/Tactic/Field.lean | import Mathlib.Tactic.FieldSimp
import Mathlib.Tactic.Ring.Basic
/-! # A tactic for proving algebraic goals in a field
This file contains the `field` tactic, a finishing tactic which roughly consists of running
`field_simp; ring1`.
-/
open Lean Meta Qq
namespace Mathlib.Tactic.FieldSimp
open Lean Elab Tactic Lean.Parser.Tactic
/--
The `field` tactic proves equality goals in (semi-)fields. For example:
```
example {x y : ℚ} (hx : x + y ≠ 0) : x / (x + y) + y / (x + y) = 1 := by
field
example {a b : ℝ} (ha : a ≠ 0) : a / (a * b) - 1 / b = 0 := by field
```
The scope of the tactic is equality goals which are *universal*, in the sense that they are true in
any field in which the appropriate denominators don't vanish. (That is, they are consequences purely
of the field axioms.)
Checking the nonvanishing of the necessary denominators is done using a variety of tricks -- in
particular this part of the reasoning is non-universal, i.e. can be specific to the field at hand
(order properties, explicit `≠ 0` hypotheses, `CharZero` if that is known, etc). The user can also
provide additional terms to help with the nonzeroness proofs. For example:
```
example {K : Type*} [Field K] (hK : ∀ x : K, x ^ 2 + 1 ≠ 0) (x : K) :
1 / (x ^ 2 + 1) + x ^ 2 / (x ^ 2 + 1) = 1 := by
field [hK]
```
The `field` tactic is built from the tactics `field_simp` (which clears the denominators) and `ring`
(which proves equality goals universally true in commutative (semi-)rings). If `field` fails to
prove your goal, you may still be able to prove your goal by running the `field_simp` and `ring_nf`
normalizations in some order. For example, this statement:
```
example {a b : ℚ} (H : b + a ≠ 0) : a / (a + b) + b / (b + a) = 1
```
is not proved by `field` but is proved by `ring_nf at *; field`. -/
elab (name := field) "field" d:(ppSpace discharger)? args:(ppSpace simpArgs)? : tactic =>
withMainContext do
let disch ← parseDischarger d args
let s0 ← saveState
-- run `field_simp` (only at the top level, not recursively)
liftMetaTactic1 (transformAtTarget ((AtomM.run .reducible ∘ reduceProp disch) ·) "field"
(failIfUnchanged := False) · default)
let s1 ← saveState
try
-- run `ring1`
liftMetaFinishingTactic fun g ↦ AtomM.run .reducible <| Ring.proveEq g
catch e =>
try
-- If `field` doesn't solve the goal, we first backtrack to the situation at the time of the
-- `field_simp` call, and suggest `field_simp` if `field_simp` does anything useful.
s0.restore
let tacticStx ← `(tactic| field_simp $(d)? $(args)?)
evalTactic tacticStx
Meta.Tactic.TryThis.addSuggestion (← getRef) tacticStx
catch _ =>
-- If `field_simp` also doesn't do anything useful (maybe there are no denominators in the
-- goal), then we backtrack to where the `ring1` call failed, and report that error message.
s1.restore
throw e
end Mathlib.Tactic.FieldSimp
/-! We register `field` with the `hint` tactic. -/
register_hint 850 field |
.lake/packages/mathlib/Mathlib/Tactic/ExtendDoc.lean | import Mathlib.Init
import Lean.Elab.ElabRules
import Lean.DocString
/-!
# `extend_doc` command
In a file where declaration `decl` is defined, writing
```lean
extend_doc decl
before "I will be added as a prefix to the docs of `decl`"
after "I will be added as a suffix to the docs of `decl`"
```
does what is probably clear: it extends the doc-string of `decl` by adding the string of
`before` at the beginning and the string of `after` at the end.
At least one of `before` and `after` must appear, but either one of them is optional.
-/
namespace Mathlib.Tactic.ExtendDocs
/-- `extend_docs <declName> before <prefix_string> after <suffix_string>` extends the
docs of `<declName>` by adding `<prefix_string>` before and `<suffix_string>` after. -/
syntax "extend_docs" ident (colGt &"before " str)? (colGt &"after " str)? : command
open Lean Elab Command in
elab_rules : command
| `(command| extend_docs $na:ident $[before $bef:str]? $[after $aft:str]?) => do
if bef.isNone && aft.isNone then throwError "expected at least one of 'before' or 'after'"
let declName ← liftCoreM <| Elab.realizeGlobalConstNoOverloadWithInfo na
let bef := if bef.isNone then "" else (bef.get!).getString ++ "\n\n"
let aft := if aft.isNone then "" else "\n\n" ++ (aft.get!).getString
let oldDoc := (← findDocString? (← getEnv) declName).getD ""
addDocStringCore declName <| bef ++ oldDoc ++ aft
end Mathlib.Tactic.ExtendDocs |
.lake/packages/mathlib/Mathlib/Tactic/Common.lean | -- First import Aesop, Qq, and Plausible
import Aesop
import Qq
import Plausible
-- Tools for analysing imports, like `#find_home`, `#minimize_imports`, ...
import ImportGraph.Imports
-- Import common Batteries tactics and commands
import Batteries.Tactic.Basic
import Batteries.Tactic.Case
import Batteries.Tactic.HelpCmd
import Batteries.Tactic.Alias
import Batteries.Tactic.GeneralizeProofs
-- Import syntax for leansearch
import LeanSearchClient
-- Import Mathlib-specific linters.
import Mathlib.Tactic.Linter.Lint
-- Now import all tactics defined in Mathlib that do not require theory files.
import Mathlib.Tactic.ApplyCongr
-- ApplyFun imports `Mathlib/Order/Monotone/Basic.lean`
-- import Mathlib.Tactic.ApplyFun
import Mathlib.Tactic.ApplyAt
import Mathlib.Tactic.ApplyWith
import Mathlib.Tactic.Basic
import Mathlib.Tactic.ByCases
import Mathlib.Tactic.ByContra
import Mathlib.Tactic.CasesM
import Mathlib.Tactic.Check
import Mathlib.Tactic.Choose
import Mathlib.Tactic.ClearExclamation
import Mathlib.Tactic.ClearExcept
import Mathlib.Tactic.Clear_
import Mathlib.Tactic.Coe
import Mathlib.Tactic.CongrExclamation
import Mathlib.Tactic.CongrM
import Mathlib.Tactic.Constructor
import Mathlib.Tactic.Contrapose
import Mathlib.Tactic.Conv
import Mathlib.Tactic.Convert
import Mathlib.Tactic.DefEqTransformations
import Mathlib.Tactic.DeprecateTo
import Mathlib.Tactic.ErwQuestion
import Mathlib.Tactic.Eqns
import Mathlib.Tactic.ExistsI
import Mathlib.Tactic.ExtractGoal
import Mathlib.Tactic.FailIfNoProgress
import Mathlib.Tactic.Find
import Mathlib.Tactic.FunProp
import Mathlib.Tactic.GCongr
import Mathlib.Tactic.GRewrite
import Mathlib.Tactic.GuardGoalNums
import Mathlib.Tactic.GuardHypNums
import Mathlib.Tactic.HigherOrder
import Mathlib.Tactic.Hint
import Mathlib.Tactic.InferParam
import Mathlib.Tactic.Inhabit
import Mathlib.Tactic.IrreducibleDef
import Mathlib.Tactic.Lift
import Mathlib.Tactic.Linter
import Mathlib.Tactic.MkIffOfInductiveProp
-- NormNum imports `Algebra.Order.Invertible`, `Data.Int.Basic`, `Data.Nat.Cast.Commute`
-- import Mathlib.Tactic.NormNum.Basic
import Mathlib.Tactic.NthRewrite
import Mathlib.Tactic.Observe
import Mathlib.Tactic.OfNat
-- `positivity` imports `Data.Nat.Factorial.Basic`, but hopefully this can be rearranged.
-- import Mathlib.Tactic.Positivity
import Mathlib.Tactic.Propose
import Mathlib.Tactic.Push
import Mathlib.Tactic.RSuffices
import Mathlib.Tactic.Recover
import Mathlib.Tactic.Relation.Rfl
import Mathlib.Tactic.Rename
import Mathlib.Tactic.RenameBVar
import Mathlib.Tactic.Says
import Mathlib.Tactic.ScopedNS
import Mathlib.Tactic.Set
import Mathlib.Tactic.SimpIntro
import Mathlib.Tactic.SimpRw
import Mathlib.Tactic.Simps.Basic
import Mathlib.Tactic.SplitIfs
import Mathlib.Tactic.Spread
import Mathlib.Tactic.Subsingleton
import Mathlib.Tactic.Substs
import Mathlib.Tactic.SuccessIfFailWithMsg
import Mathlib.Tactic.SudoSetOption
import Mathlib.Tactic.SwapVar
import Mathlib.Tactic.Tauto
import Mathlib.Tactic.TermCongr
-- TFAE imports `Mathlib/Data/List/TFAE.lean` and thence `Mathlib/Data/List/Basic.lean`.
-- import Mathlib.Tactic.TFAE
import Mathlib.Tactic.ToExpr
import Mathlib.Tactic.ToLevel
import Mathlib.Tactic.Trace
import Mathlib.Tactic.TypeCheck
import Mathlib.Tactic.UnsetOption
import Mathlib.Tactic.Use
import Mathlib.Tactic.Variable
import Mathlib.Tactic.Widget.Calc
import Mathlib.Tactic.Widget.CongrM
import Mathlib.Tactic.Widget.Conv
import Mathlib.Tactic.Widget.LibraryRewrite
import Mathlib.Tactic.WLOG
import Mathlib.Util.AssertExists
import Mathlib.Util.CountHeartbeats
import Mathlib.Util.PrintSorries
import Mathlib.Util.TransImports
import Mathlib.Util.WhatsNew
/-!
This file imports all tactics which do not have significant theory imports,
and hence can be imported very low in the theory import hierarchy,
thereby making tactics widely available without needing specific imports.
We include some commented out imports here, with an explanation of their theory requirements,
to save some time for anyone wondering why they are not here.
We also import theory-free linters, commands, and utilities which are useful to have low in the
import hierarchy.
-/
/-!
# Register tactics with `hint`. Tactics with larger priority run first.
-/
section Hint
register_hint 200 grind
register_hint 1000 trivial
register_hint 500 tauto
register_hint 1000 split
register_hint 1000 intro
register_hint 80 aesop
register_hint 800 simp_all?
register_hint 600 exact?
register_hint 1000 decide
register_hint 200 omega
register_hint 200 fun_prop
end Hint |
.lake/packages/mathlib/Mathlib/Tactic/ProdAssoc.lean | import Mathlib.Lean.Expr.Basic
import Mathlib.Logic.Equiv.Defs
/-!
# Associativity of products
This file constructs a term elaborator for "obvious" equivalences between iterated products.
For example,
```lean
(prod_assoc% : (α × β) × (γ × δ) ≃ α × (β × γ) × δ)
```
gives the "obvious" equivalence between `(α × β) × (γ × δ)` and `α × (β × γ) × δ`.
-/
namespace Lean.Expr
open Lean Meta
/-- A helper type to keep track of universe levels and types in iterated products. -/
inductive ProdTree where
| type (tp : Expr) (l : Level)
| prod (fst snd : ProdTree) (lfst lsnd : Level)
deriving Repr
/-- The iterated product corresponding to a `ProdTree`. -/
def ProdTree.getType : ProdTree → Expr
| type tp _ => tp
| prod fst snd u v => mkAppN (.const ``Prod [u,v]) #[fst.getType, snd.getType]
/-- The number of types appearing in an iterated product encoded as a `ProdTree`. -/
def ProdTree.size : ProdTree → Nat
| type _ _ => 1
| prod fst snd _ _ => fst.size + snd.size
/-- The components of an iterated product, presented as a `ProdTree`. -/
def ProdTree.components : ProdTree → List Expr
| type tp _ => [tp]
| prod fst snd _ _ => fst.components ++ snd.components
/-- Make a `ProdTree` out of an `Expr`. -/
partial def mkProdTree (e : Expr) : MetaM ProdTree :=
match e.consumeMData with
| .app (.app (.const ``Prod [u,v]) X) Y => do
return .prod (← X.mkProdTree) (← Y.mkProdTree) u v
| X => do
let some u := (← whnfD <| ← inferType X).type? | throwError "Not a type{indentExpr X}"
return .type X u
/-- Given `P : ProdTree` representing an iterated product and `e : Expr` which
should correspond to a term of the iterated product, this will return
a list, whose items correspond to the leaves of `P` (i.e. the types appearing in the product),
where each item is the appropriate composition of `Prod.fst` and `Prod.snd` applied to `e`
resulting in an element of the type corresponding to the leaf.
For example, if `P` corresponds to `(X × Y) × Z` and `t : (X × Y) × Z`, then this
should return `[t.fst.fst, t.fst.snd, t.snd]`.
-/
def ProdTree.unpack (t : Expr) : ProdTree → MetaM (List Expr)
| type _ _ => return [t]
| prod fst snd u v => do
let fst' ← fst.unpack <| mkAppN (.const ``Prod.fst [u,v]) #[fst.getType, snd.getType, t]
let snd' ← snd.unpack <| mkAppN (.const ``Prod.snd [u,v]) #[fst.getType, snd.getType, t]
return fst' ++ snd'
/-- This function should act as the "reverse" of `ProdTree.unpack`, constructing
a term of the iterated product out of a list of terms of the types appearing in the product. -/
def ProdTree.pack (ts : List Expr) : ProdTree → MetaM Expr
| type _ _ => do
match ts with
| [] => throwError "Can't pack the empty list."
| [a] => return a
| _ => throwError "Failed due to size mismatch."
| prod fst snd u v => do
let fstSize := fst.size
let sndSize := snd.size
unless ts.length == fstSize + sndSize do throwError "Failed due to size mismatch."
let tsfst := ts.toArray[:fstSize] |>.toArray.toList
let tssnd := ts.toArray[fstSize:] |>.toArray.toList
let mk : Expr := mkAppN (.const ``Prod.mk [u,v]) #[fst.getType, snd.getType]
return .app (.app mk (← fst.pack tsfst)) (← snd.pack tssnd)
/-- Converts a term `e` in an iterated product `P1` into a term of an iterated product `P2`.
Here `e` is an `Expr` representing the term, and the iterated products are represented
by terms of `ProdTree`. -/
def ProdTree.convertTo (P1 P2 : ProdTree) (e : Expr) : MetaM Expr :=
return ← P2.pack <| ← P1.unpack e
/-- Given two expressions corresponding to iterated products of the same types, associated in
possibly different ways, this constructs the "obvious" function from one to the other. -/
def mkProdFun (a b : Expr) : MetaM Expr := do
let pa ← a.mkProdTree
let pb ← b.mkProdTree
unless pa.components.length == pb.components.length do
throwError "The number of components in{indentD a}\nand{indentD b}\nmust match."
for (x,y) in pa.components.zip pb.components do
unless ← isDefEq x y do
throwError "Component{indentD x}\nis not definitionally equal to component{indentD y}."
withLocalDeclD `t a fun fvar => do
mkLambdaFVars #[fvar] (← pa.convertTo pb fvar)
/-- Construct the equivalence between iterated products of the same type, associated
in possibly different ways. -/
def mkProdEquiv (a b : Expr) : MetaM Expr := do
let some u := (← whnfD <| ← inferType a).type? | throwError "Not a type{indentExpr a}"
let some v := (← whnfD <| ← inferType b).type? | throwError "Not a type{indentExpr b}"
return mkAppN (.const ``Equiv.mk [.succ u,.succ v])
#[a, b, ← mkProdFun a b, ← mkProdFun b a,
.app (.const ``rfl [.succ u]) a,
.app (.const ``rfl [.succ v]) b]
/-- IMPLEMENTATION: Syntax used in the implementation of `prod_assoc%`.
This elaborator postpones if there are metavariables in the expected type,
and to propagate the fact that this elaborator produces an `Equiv`,
the `prod_assoc%` macro sets things up with a type ascription.
This enables using `prod_assoc%` with, for example `Equiv.trans` dot notation. -/
syntax (name := prodAssocStx) "prod_assoc_internal%" : term
open Elab Term in
/-- Elaborator for `prod_assoc%`. -/
@[term_elab prodAssocStx]
def elabProdAssoc : TermElab := fun stx expectedType? => do
match stx with
| `(prod_assoc_internal%) => do
let some expectedType ← tryPostponeIfHasMVars? expectedType?
| throwError "expected type must be known"
let .app (.app (.const ``Equiv _) a) b := expectedType
| throwError "Expected type{indentD expectedType}\nis not of the form `α ≃ β`."
mkProdEquiv a b
| _ => throwUnsupportedSyntax
/--
`prod_assoc%` elaborates to the "obvious" equivalence between iterated products of types,
regardless of how the products are parenthesized.
The `prod_assoc%` term uses the expected type when elaborating.
For example, `(prod_assoc% : (α × β) × (γ × δ) ≃ α × (β × γ) × δ)`.
The elaborator can handle holes in the expected type,
so long as they eventually get filled by unification.
```lean
example : (α × β) × (γ × δ) ≃ α × (β × γ) × δ :=
(prod_assoc% : _ ≃ α × β × γ × δ).trans prod_assoc%
```
-/
macro "prod_assoc%" : term => `((prod_assoc_internal% : _ ≃ _))
end Lean.Expr |
.lake/packages/mathlib/Mathlib/Tactic/SetLike.lean | import Mathlib.Tactic.Basic
import Aesop
/-!
# SetLike Rule Set
This module defines the `SetLike` and `SetLike!` Aesop rule sets.
Aesop rule sets only become visible once the file in which they're declared is imported,
so we must put this declaration into its own file.
-/
declare_aesop_rule_sets [SetLike] (default := true)
declare_aesop_rule_sets [SetLike!] (default := false)
library_note2 «SetLike Aesop ruleset» /--
The Aesop tactic (`aesop`) can automatically prove obvious facts about membership in structures
such as subgroups and subrings. Certain lemmas regarding membership in algebraic substructures
are given the `aesop` attribute according to the following principles:
- Rules are in the `SetLike` ruleset: (rule_sets := [SetLike]).
- Apply-style rules with trivial hypotheses are registered both as `simp` rules and as
`safe` Aesop rules. The latter is needed in case there are metavariables in the goal.
For instance, Aesop can use the rule `one_mem` to prove
`(M : Type*) [Monoid M] (s : Submonoid M) ⊢ ∃ m : M, m ∈ s`.
- Apply-style rules with nontrivial hypotheses are marked `unsafe`. This is because applying them
might not be provability-preserving in the context of more complex membership rules.
For instance, `mul_mem` is marked `unsafe`.
- Unsafe rules are given a probability no higher than 90%. This is the same probability
Aesop gives to safe rules when they generate metavariables. If the priority is too high, loops
generated in the presence of metavariables will time out Aesop.
- Rules that cause loops (even in the absence of metavariables) are given a low priority of 5%.
These rules are placed in the `SetLike!` ruleset instead of the `SetLike` ruleset so that
they are not invoked by default. An example is `SetLike.mem_of_subset`.
- Simplifying the left-hand side of a membership goal is prioritised over simplifying the
right-hand side. By default, rules simplifying the LHS (e.g., `mul_mem`) are given
probability 90% and rules simplifying the RHS are given probability 80%
(e.g., `Subgroup.mem_closure_of_mem`).
- These default probabilities are for rules with simple hypotheses that fail quickly when
not satisfied, such as `mul_mem`. Rules with more complicated hypotheses, or rules that are
less likely to progress the proof state towards a solution, are given a lower priority.
- To optimise performance and avoid timeouts, Aesop should not be invoking low-priority rules
unless it can make no other progress. If common usage patterns cause Aesop to invoke such rules,
additional lemmas are added at a higher priority to cover that pattern.
For example, `Subgroup.mem_closure_of_mem` covers a common use case of `SetLike.mem_of_subset`.
Some examples of membership-related goals which Aesop with this ruleset is designed to close
can be found in the file MathlibTest/set_like.lean.
-/ |
.lake/packages/mathlib/Mathlib/Tactic/ModCases.lean | import Mathlib.Data.Int.ModEq
import Mathlib.Tactic.HaveI
/-! # `mod_cases` tactic
The `mod_cases` tactic does case disjunction on `e % n`, where `e : ℤ` or `e : ℕ`,
to yield `n` new subgoals corresponding to the possible values of `e` modulo `n`.
-/
namespace Mathlib.Tactic.ModCases
open Lean Meta Elab Tactic Term Qq
namespace IntMod
open Int
/--
`OnModCases n a lb p` represents a partial proof by cases that
there exists `0 ≤ z < n` such that `a ≡ z (mod n)`.
It asserts that if `∃ z, lb ≤ z < n ∧ a ≡ z (mod n)` holds, then `p`
(where `p` is the current goal).
-/
def OnModCases (n : ℕ) (a : ℤ) (lb : ℕ) (p : Sort*) :=
∀ z, lb ≤ z ∧ z < n ∧ a ≡ ↑z [ZMOD ↑n] → p
/--
The first theorem we apply says that `∃ z, 0 ≤ z < n ∧ a ≡ z (mod n)`.
The actual mathematical content of the proof is here.
-/
@[inline] def onModCases_start (p : Sort*) (a : ℤ) (n : ℕ) (hn : Nat.ble 1 n = true)
(H : OnModCases n a (nat_lit 0) p) : p :=
H (a % ↑n).toNat <| by
have := natCast_pos.2 <| Nat.le_of_ble_eq_true hn
have nonneg := emod_nonneg a <| Int.ne_of_gt this
refine ⟨Nat.zero_le _, ?_, ?_⟩
· rw [Int.toNat_lt nonneg]; exact Int.emod_lt_of_pos _ this
· rw [Int.ModEq, Int.toNat_of_nonneg nonneg, emod_emod]
/--
The end point is that once we have reduced to `∃ z, n ≤ z < n ∧ a ≡ z (mod n)`
there are no more cases to consider.
-/
@[inline] def onModCases_stop (p : Sort*) (n : ℕ) (a : ℤ) : OnModCases n a n p :=
fun _ h => (Nat.not_lt.2 h.1 h.2.1).elim
/--
The successor case decomposes `∃ z, b ≤ z < n ∧ a ≡ z (mod n)` into
`a ≡ b (mod n) ∨ ∃ z, b+1 ≤ z < n ∧ a ≡ z (mod n)`,
and the `a ≡ b (mod n) → p` case becomes a subgoal.
-/
@[inline] def onModCases_succ {p : Sort*} {n : ℕ} {a : ℤ} (b : ℕ)
(h : a ≡ OfNat.ofNat b [ZMOD OfNat.ofNat n] → p) (H : OnModCases n a (Nat.add b 1) p) :
OnModCases n a b p :=
fun z ⟨h₁, h₂⟩ => if e : b = z then h (e ▸ h₂.2) else H _ ⟨Nat.lt_of_le_of_ne h₁ e, h₂⟩
/--
Proves an expression of the form `OnModCases n a b p` where `n` and `b` are raw nat literals
and `b ≤ n`. Returns the list of subgoals `?gi : a ≡ i [ZMOD n] → p`.
-/
partial def proveOnModCases {u : Level} (n : Q(ℕ)) (a : Q(ℤ)) (b : Q(ℕ)) (p : Q(Sort u)) :
MetaM (Q(OnModCases $n $a $b $p) × List MVarId) := do
if n.natLit! ≤ b.natLit! then
haveI' : $b =Q $n := ⟨⟩
pure (q(onModCases_stop $p $n $a), [])
else
let ty := q($a ≡ OfNat.ofNat $b [ZMOD OfNat.ofNat $n] → $p)
let g ← mkFreshExprMVarQ ty
have b1 : Q(ℕ) := mkRawNatLit (b.natLit! + 1)
haveI' : $b1 =Q ($b).succ := ⟨⟩
let (pr, acc) ← proveOnModCases n a b1 p
pure (q(onModCases_succ $b $g $pr), g.mvarId! :: acc)
/--
Int case of `mod_cases h : e % n`.
-/
def modCases (h : TSyntax `Lean.binderIdent) (e : Q(ℤ)) (n : ℕ) : TacticM Unit := do
let ⟨u, p, g⟩ ← inferTypeQ (.mvar (← getMainGoal))
have lit : Q(ℕ) := mkRawNatLit n
have p₁ : Nat.ble 1 $lit =Q true := ⟨⟩
let (p₂, gs) ← proveOnModCases lit e q(nat_lit 0) p
let gs ← gs.mapM fun g => do
let (fvar, g) ← match h with
| `(binderIdent| $n:ident) => g.intro n.getId
| _ => g.intro `H
g.withContext <| (Expr.fvar fvar).addLocalVarInfoForBinderIdent h
pure g
g.mvarId!.assign q(onModCases_start $p $e $lit $p₁ $p₂)
replaceMainGoal gs
end IntMod
namespace NatMod
/--
`OnModCases n a lb p` represents a partial proof by cases that
there exists `0 ≤ m < n` such that `a ≡ m (mod n)`.
It asserts that if `∃ m, lb ≤ m < n ∧ a ≡ m (mod n)` holds, then `p`
(where `p` is the current goal).
-/
def OnModCases (n : ℕ) (a : ℕ) (lb : ℕ) (p : Sort _) :=
∀ m, lb ≤ m ∧ m < n ∧ a ≡ m [MOD n] → p
/--
The first theorem we apply says that `∃ m, 0 ≤ m < n ∧ a ≡ m (mod n)`.
The actual mathematical content of the proof is here.
-/
@[inline] def onModCases_start (p : Sort _) (a : ℕ) (n : ℕ) (hn : Nat.ble 1 n = true)
(H : OnModCases n a (nat_lit 0) p) : p :=
H (a % n) <| by
refine ⟨Nat.zero_le _, ?_, ?_⟩
· exact Nat.mod_lt _ (Nat.le_of_ble_eq_true hn)
· rw [Nat.ModEq, Nat.mod_mod]
/--
The end point is that once we have reduced to `∃ m, n ≤ m < n ∧ a ≡ m (mod n)`
there are no more cases to consider.
-/
@[inline] def onModCases_stop (p : Sort _) (n : ℕ) (a : ℕ) : OnModCases n a n p :=
fun _ h => (Nat.not_lt.2 h.1 h.2.1).elim
/--
The successor case decomposes `∃ m, b ≤ m < n ∧ a ≡ m (mod n)` into
`a ≡ b (mod n) ∨ ∃ m, b+1 ≤ m < n ∧ a ≡ m (mod n)`,
and the `a ≡ b (mod n) → p` case becomes a subgoal.
-/
@[inline] def onModCases_succ {p : Sort _} {n : ℕ} {a : ℕ} (b : ℕ)
(h : a ≡ b [MOD n] → p) (H : OnModCases n a (Nat.add b 1) p) :
OnModCases n a b p :=
fun z ⟨h₁, h₂⟩ => if e : b = z then h (e ▸ h₂.2) else H _ ⟨Nat.lt_of_le_of_ne h₁ e, h₂⟩
/--
Proves an expression of the form `OnModCases n a b p` where `n` and `b` are raw nat literals
and `b ≤ n`. Returns the list of subgoals `?gi : a ≡ i [MOD n] → p`.
-/
partial def proveOnModCases {u : Level} (n : Q(ℕ)) (a : Q(ℕ)) (b : Q(ℕ)) (p : Q(Sort u)) :
MetaM (Q(OnModCases $n $a $b $p) × List MVarId) := do
if n.natLit! ≤ b.natLit! then
have : $b =Q $n := ⟨⟩
pure (q(onModCases_stop $p $n $a), [])
else
let ty := q($a ≡ $b [MOD $n] → $p)
let g ← mkFreshExprMVarQ ty
let ((pr : Q(OnModCases $n $a (Nat.add $b 1) $p)), acc) ←
proveOnModCases n a (mkRawNatLit (b.natLit! + 1)) p
pure (q(onModCases_succ $b $g $pr), g.mvarId! :: acc)
/--
Nat case of `mod_cases h : e % n`.
-/
def modCases (h : TSyntax `Lean.binderIdent) (e : Q(ℕ)) (n : ℕ) : TacticM Unit := do
let ⟨u, p, g⟩ ← inferTypeQ (.mvar (← getMainGoal))
have lit : Q(ℕ) := mkRawNatLit n
let p₁ : Q(Nat.ble 1 $lit = true) := (q(Eq.refl true) : Expr)
let (p₂, gs) ← proveOnModCases lit e q(nat_lit 0) p
let gs ← gs.mapM fun g => do
let (fvar, g) ← match h with
| `(binderIdent| $n:ident) => g.intro n.getId
| _ => g.intro `H
g.withContext <| (Expr.fvar fvar).addLocalVarInfoForBinderIdent h
pure g
g.mvarId!.assign q(onModCases_start $p $e $lit $p₁ $p₂)
replaceMainGoal gs
end NatMod
/--
* The tactic `mod_cases h : e % 3` will perform a case disjunction on `e`.
If `e : ℤ`, then it will yield subgoals containing the assumptions
`h : e ≡ 0 [ZMOD 3]`, `h : e ≡ 1 [ZMOD 3]`, `h : e ≡ 2 [ZMOD 3]`
respectively. If `e : ℕ` instead, then it works similarly, except with
`[MOD 3]` instead of `[ZMOD 3]`.
* In general, `mod_cases h : e % n` works
when `n` is a positive numeral and `e` is an expression of type `ℕ` or `ℤ`.
* If `h` is omitted as in `mod_cases e % n`, it will be default-named `H`.
-/
syntax "mod_cases " (atomic(binderIdent ":"))? term:71 " % " num : tactic
elab_rules : tactic
| `(tactic| mod_cases $[$h :]? $e % $n) => do
let n := n.getNat
if n == 0 then Elab.throwUnsupportedSyntax
let h := h.getD (← `(binderIdent| _))
withMainContext do
let e ← Tactic.elabTerm e none
let α : Q(Type) ← inferType e
match α with
| ~q(ℤ) => IntMod.modCases h e n
| ~q(ℕ) => NatMod.modCases h e n
| _ => throwError "mod_cases only works with Int and Nat"
end Mathlib.Tactic.ModCases |
.lake/packages/mathlib/Mathlib/Tactic/DeclarationNames.lean | import Lean.DeclarationRange
import Lean.ResolveName
-- Import this linter explicitly to ensure that
-- this file has a valid copyright header and module docstring.
import Mathlib.Tactic.Linter.Header
/-!
This file contains functions that are used by multiple linters.
-/
open Lean Parser Elab Command Meta
namespace Mathlib.Linter
/--
If `pos` is a `String.Pos`, then `getNamesFrom pos` returns the array of identifiers
for the names of the declarations whose syntax begins in position at least `pos`.
-/
def getNamesFrom {m} [Monad m] [MonadEnv m] [MonadFileMap m] (pos : String.Pos.Raw) :
m (Array Syntax) := do
-- declarations from parallelism branches should not be interesting here, so use `local`
let drs := declRangeExt.toPersistentEnvExtension.getState (asyncMode := .local) (← getEnv)
let fm ← getFileMap
let mut nms := #[]
for (nm, rgs) in drs do
if pos ≤ fm.ofPosition rgs.range.pos then
let ofPos1 := fm.ofPosition rgs.selectionRange.pos
let ofPos2 := fm.ofPosition rgs.selectionRange.endPos
nms := nms.push (mkIdentFrom (.ofRange ⟨ofPos1, ofPos2⟩) nm)
return nms
/--
If `stx` is a syntax node for an `export` statement, then `getAliasSyntax stx` returns the array of
identifiers with the "exported" names.
-/
def getAliasSyntax {m} [Monad m] [MonadResolveName m] (stx : Syntax) : m (Array Syntax) := do
let mut aliases := #[]
if let `(export $_ ($ids*)) := stx then
let currNamespace ← getCurrNamespace
for idStx in ids do
let id := idStx.getId
aliases := aliases.push
(mkIdentFrom (.ofRange (idStx.raw.getRange?.getD default)) (currNamespace ++ id))
return aliases
/-- Used for linters which use `0` instead of `false` for disabling. -/
def logLint0Disable {m} [Monad m] [MonadLog m] [AddMessageContext m] [MonadOptions m]
(linterOption : Lean.Option Nat) (stx : Syntax) (msg : MessageData) : m Unit :=
let disable := .note m!"This linter can be disabled with `set_option {linterOption.name} 0`"
logWarningAt stx (.tagged linterOption.name m!"{msg}{disable}") |
.lake/packages/mathlib/Mathlib/Tactic/FieldSimp.lean | import Mathlib.Data.Ineq
import Mathlib.Tactic.FieldSimp.Attr
import Mathlib.Tactic.FieldSimp.Discharger
import Mathlib.Tactic.FieldSimp.Lemmas
import Mathlib.Util.AtLocation
import Mathlib.Util.AtomM.Recurse
import Mathlib.Util.SynthesizeUsing
/-!
# `field_simp` tactic
Tactic to clear denominators in algebraic expressions.
-/
open Lean Meta Qq
namespace Mathlib.Tactic.FieldSimp
initialize registerTraceClass `Tactic.field_simp
variable {v : Level} {M : Q(Type v)}
/-! ### Lists of expressions representing exponents and atoms, and operations on such lists -/
/-- Basic meta-code "normal form" object of the `field_simp` tactic: a type synonym
for a list of ordered triples comprising an expression representing a term of a type `M` (where
typically `M` is a field), together with an integer "power" and a natural number "index".
The natural number represents the index of the `M` term in the `AtomM` monad: this is not enforced,
but is sometimes assumed in operations. Thus when items `((a₁, x₁), k)` and `((a₂, x₂), k)`
appear in two different `FieldSimp.qNF` objects (i.e. with the same `ℕ`-index `k`), it is expected
that the expressions `x₁` and `x₂` are the same. It is also expected that the items in a
`FieldSimp.qNF` list are in strictly decreasing order by natural-number index.
By forgetting the natural number indices, an expression representing a `Mathlib.Tactic.FieldSimp.NF`
object can be built from a `FieldSimp.qNF` object; this construction is provided as
`Mathlib.Tactic.FieldSimp.qNF.toNF`. -/
abbrev qNF (M : Q(Type v)) := List ((ℤ × Q($M)) × ℕ)
namespace qNF
/-- Given `l` of type `qNF M`, i.e. a list of `(ℤ × Q($M)) × ℕ`s (two `Expr`s and a natural
number), build an `Expr` representing an object of type `NF M` (i.e. `List (ℤ × M)`) in the
in the obvious way: by forgetting the natural numbers and gluing together the integers and `Expr`s.
-/
def toNF (l : qNF q($M)) : Q(NF $M) :=
let l' : List Q(ℤ × $M) := (l.map Prod.fst).map (fun (a, x) ↦ q(($a, $x)))
let qt : List Q(ℤ × $M) → Q(List (ℤ × $M)) := List.rec q([]) (fun e _ l ↦ q($e ::ᵣ $l))
qt l'
/-- Given `l` of type `qNF M`, i.e. a list of `(ℤ × Q($M)) × ℕ`s (two `Expr`s and a natural
number), apply an expression representing a function with domain `ℤ` to each of the `ℤ`
components. -/
def onExponent (l : qNF M) (f : ℤ → ℤ) : qNF M :=
l.map fun ((a, x), k) ↦ ((f a, x), k)
/-- Build a transparent expression for the product of powers represented by `l : qNF M`. -/
def evalPrettyMonomial (iM : Q(GroupWithZero $M)) (r : ℤ) (x : Q($M)) :
MetaM (Σ e : Q($M), Q(zpow' $x $r = $e)) := do
match r with
| 0 => /- If an exponent is zero then we must not have been able to prove that x is nonzero. -/
return ⟨q($x / $x), q(zpow'_zero_eq_div ..)⟩
| 1 => return ⟨x, q(zpow'_one $x)⟩
| .ofNat r => do
let pf ← mkDecideProofQ q($r ≠ 0)
return ⟨q($x ^ $r), q(zpow'_ofNat $x $pf)⟩
| r => do
let pf ← mkDecideProofQ q($r ≠ 0)
return ⟨q($x ^ $r), q(zpow'_of_ne_zero_right _ _ $pf)⟩
/-- Try to drop an expression `zpow' x r` from the beginning of a product. If `r ≠ 0` this of course
can't be done. If `r = 0`, then `zpow' x r` is equal to `x / x`, so it can be simplified to 1 (hence
dropped from the beginning of the product) if we can find a proof that `x ≠ 0`. -/
def tryClearZero
(disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type)) (iM : Q(CommGroupWithZero $M))
(r : ℤ) (x : Q($M)) (i : ℕ) (l : qNF M) :
MetaM <| Σ l' : qNF M, Q(NF.eval $(qNF.toNF (((r, x), i) :: l)) = NF.eval $(l'.toNF)) := do
if r != 0 then
return ⟨((r, x), i) :: l, q(rfl)⟩
try
let pf' : Q($x ≠ 0) ← disch q($x ≠ 0)
have pf_r : Q($r = 0) := ← mkDecideProofQ q($r = 0)
return ⟨l, (q(NF.eval_cons_of_pow_eq_zero $pf_r $pf' $(l.toNF)):)⟩
catch _=>
return ⟨((r, x), i) :: l, q(rfl)⟩
/-- Given `l : qNF M`, obtain `l' : qNF M` by removing all `l`'s exponent-zero entries where the
corresponding atom can be proved nonzero, and construct a proof that their associated expressions
are equal. -/
def removeZeros
(disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type)) (iM : Q(CommGroupWithZero $M))
(l : qNF M) :
MetaM <| Σ l' : qNF M, Q(NF.eval $(l.toNF) = NF.eval $(l'.toNF)) :=
match l with
| [] => return ⟨[], q(rfl)⟩
| ((r, x), i) :: t => do
let ⟨t', pf⟩ ← removeZeros disch iM t
let ⟨l', pf'⟩ ← tryClearZero disch iM r x i t'
let pf' : Q(NF.eval (($r, $x) ::ᵣ $(qNF.toNF t')) = NF.eval $(qNF.toNF l')) := pf'
let pf'' : Q(NF.eval (($r, $x) ::ᵣ $(qNF.toNF t)) = NF.eval $(qNF.toNF l')) :=
q(NF.eval_cons_eq_eval_of_eq_of_eq $r $x $pf $pf')
return ⟨l', pf''⟩
/-- Given a product of powers, split as a quotient: the positive powers divided by (the negations
of) the negative powers. -/
def split (iM : Q(CommGroupWithZero $M)) (l : qNF M) :
MetaM (Σ l_n l_d : qNF M, Q(NF.eval $(l.toNF)
= NF.eval $(l_n.toNF) / NF.eval $(l_d.toNF))) := do
match l with
| [] => return ⟨[], [], q(Eq.symm (div_one (1:$M)))⟩
| ((r, x), i) :: t =>
let ⟨t_n, t_d, pf⟩ ← split iM t
if r > 0 then
return ⟨((r, x), i) :: t_n, t_d, (q(NF.cons_eq_div_of_eq_div $r $x $pf):)⟩
else if r = 0 then
return ⟨((1, x), i) :: t_n, ((1, x), i) :: t_d, (q(NF.cons_zero_eq_div_of_eq_div $x $pf):)⟩
else
let r' : ℤ := -r
return ⟨t_n, ((r', x), i) :: t_d, (q(NF.cons_eq_div_of_eq_div' $r' $x $pf):)⟩
private def evalPrettyAux (iM : Q(CommGroupWithZero $M)) (l : qNF M) :
MetaM (Σ e : Q($M), Q(NF.eval $(l.toNF) = $e)) := do
match l with
| [] => return ⟨q(1), q(rfl)⟩
| [((r, x), _)] =>
let ⟨e, pf⟩ ← evalPrettyMonomial q(inferInstance) r x
return ⟨e, q(by rw [NF.eval_cons]; exact Eq.trans (one_mul _) $pf)⟩
| ((r, x), k) :: t =>
let ⟨e, pf_e⟩ ← evalPrettyMonomial q(inferInstance) r x
let ⟨t', pf⟩ ← evalPrettyAux iM t
have pf'' : Q(NF.eval $(qNF.toNF (((r, x), k) :: t)) = (NF.eval $(qNF.toNF t)) * zpow' $x $r) :=
(q(NF.eval_cons ($r, $x) $(qNF.toNF t)):)
return ⟨q($t' * $e), q(Eq.trans $pf'' (congr_arg₂ HMul.hMul $pf $pf_e))⟩
/-- Build a transparent expression for the product of powers represented by `l : qNF M`. -/
def evalPretty (iM : Q(CommGroupWithZero $M)) (l : qNF M) :
MetaM (Σ e : Q($M), Q(NF.eval $(l.toNF) = $e)) := do
let ⟨l_n, l_d, pf⟩ ← split iM l
let ⟨num, pf_n⟩ ← evalPrettyAux q(inferInstance) l_n
let ⟨den, pf_d⟩ ← evalPrettyAux q(inferInstance) l_d
match l_d with
| [] => return ⟨num, q(eq_div_of_eq_one_of_subst $pf $pf_n)⟩
| _ =>
let pf_n : Q(NF.eval $(l_n.toNF) = $num) := pf_n
let pf_d : Q(NF.eval $(l_d.toNF) = $den) := pf_d
let pf : Q(NF.eval $(l.toNF) = NF.eval $(l_n.toNF) / NF.eval $(l_d.toNF)) := pf
let pf_tot := q(eq_div_of_subst $pf $pf_n $pf_d)
return ⟨q($num / $den), pf_tot⟩
/-- Given two terms `l₁`, `l₂` of type `qNF M`, i.e. lists of `(ℤ × Q($M)) × ℕ`s (an integer, an
`Expr` and a natural number), construct another such term `l`, which will have the property that in
the field `$M`, the product of the "multiplicative linear combinations" represented by `l₁` and
`l₂` is the multiplicative linear combination represented by `l`.
The construction assumes, to be valid, that the lists `l₁` and `l₂` are in strictly decreasing order
by `ℕ`-component, and that if pairs `(a₁, x₁)` and `(a₂, x₂)` appear in `l₁`, `l₂` respectively with
the same `ℕ`-component `k`, then the expressions `x₁` and `x₂` are equal.
The construction is as follows: merge the two lists, except that if pairs `(a₁, x₁)` and `(a₂, x₂)`
appear in `l₁`, `l₂` respectively with the same `ℕ`-component `k`, then contribute a term
`(a₁ + a₂, x₁)` to the output list with `ℕ`-component `k`. -/
def mul : qNF q($M) → qNF q($M) → qNF q($M)
| [], l => l
| l, [] => l
| ((a₁, x₁), k₁) :: t₁, ((a₂, x₂), k₂) :: t₂ =>
if k₁ > k₂ then
((a₁, x₁), k₁) :: mul t₁ (((a₂, x₂), k₂) :: t₂)
else if k₁ = k₂ then
/- If we can prove that the atom is nonzero then we could remove it from this list,
but this will be done at a later stage. -/
((a₁ + a₂, x₁), k₁) :: mul t₁ t₂
else
((a₂, x₂), k₂) :: mul (((a₁, x₁), k₁) :: t₁) t₂
/-- Given two terms `l₁`, `l₂` of type `qNF M`, i.e. lists of `(ℤ × Q($M)) × ℕ`s (an integer, an
`Expr` and a natural number), recursively construct a proof that in the field `$M`, the product of
the "multiplicative linear combinations" represented by `l₁` and `l₂` is the multiplicative linear
combination represented by `FieldSimp.qNF.mul l₁ l₁`. -/
def mkMulProof (iM : Q(CommGroupWithZero $M)) (l₁ l₂ : qNF M) :
Q((NF.eval $(l₁.toNF)) * NF.eval $(l₂.toNF) = NF.eval $((qNF.mul l₁ l₂).toNF)) :=
match l₁, l₂ with
| [], l => (q(one_mul (NF.eval $(l.toNF))):)
| l, [] => (q(mul_one (NF.eval $(l.toNF))):)
| ((a₁, x₁), k₁) :: t₁, ((a₂, x₂), k₂) :: t₂ =>
if k₁ > k₂ then
let pf := mkMulProof iM t₁ (((a₂, x₂), k₂) :: t₂)
(q(NF.mul_eq_eval₁ ($a₁, $x₁) $pf):)
else if k₁ = k₂ then
let pf := mkMulProof iM t₁ t₂
(q(NF.mul_eq_eval₂ $a₁ $a₂ $x₁ $pf):)
else
let pf := mkMulProof iM (((a₁, x₁), k₁) :: t₁) t₂
(q(NF.mul_eq_eval₃ ($a₂, $x₂) $pf):)
/-- Given two terms `l₁`, `l₂` of type `qNF M`, i.e. lists of `(ℤ × Q($M)) × ℕ`s (an integer, an
`Expr` and a natural number), construct another such term `l`, which will have the property that in
the field `$M`, the quotient of the "multiplicative linear combinations" represented by `l₁` and
`l₂` is the multiplicative linear combination represented by `l`.
The construction assumes, to be valid, that the lists `l₁` and `l₂` are in strictly decreasing order
by `ℕ`-component, and that if pairs `(a₁, x₁)` and `(a₂, x₂)` appear in `l₁`, `l₂` respectively with
the same `ℕ`-component `k`, then the expressions `x₁` and `x₂` are equal.
The construction is as follows: merge the first list and the negation of the second list, except
that if pairs `(a₁, x₁)` and `(a₂, x₂)` appear in `l₁`, `l₂` respectively with the same
`ℕ`-component `k`, then contribute a term `(a₁ - a₂, x₁)` to the output list with `ℕ`-component `k`.
-/
def div : qNF M → qNF M → qNF M
| [], l => l.onExponent Neg.neg
| l, [] => l
| ((a₁, x₁), k₁) :: t₁, ((a₂, x₂), k₂) :: t₂ =>
if k₁ > k₂ then
((a₁, x₁), k₁) :: div t₁ (((a₂, x₂), k₂) :: t₂)
else if k₁ = k₂ then
((a₁ - a₂, x₁), k₁) :: div t₁ t₂
else
((-a₂, x₂), k₂) :: div (((a₁, x₁), k₁) :: t₁) t₂
/-- Given two terms `l₁`, `l₂` of type `qNF M`, i.e. lists of `(ℤ × Q($M)) × ℕ`s (an integer, an
`Expr` and a natural number), recursively construct a proof that in the field `$M`, the quotient
of the "multiplicative linear combinations" represented by `l₁` and `l₂` is the multiplicative
linear combination represented by `FieldSimp.qNF.div l₁ l₁`. -/
def mkDivProof (iM : Q(CommGroupWithZero $M)) (l₁ l₂ : qNF M) :
Q(NF.eval $(l₁.toNF) / NF.eval $(l₂.toNF) = NF.eval $((qNF.div l₁ l₂).toNF)) :=
match l₁, l₂ with
| [], l => (q(NF.one_div_eq_eval $(l.toNF)):)
| l, [] => (q(div_one (NF.eval $(l.toNF))):)
| ((a₁, x₁), k₁) :: t₁, ((a₂, x₂), k₂) :: t₂ =>
if k₁ > k₂ then
let pf := mkDivProof iM t₁ (((a₂, x₂), k₂) :: t₂)
(q(NF.div_eq_eval₁ ($a₁, $x₁) $pf):)
else if k₁ = k₂ then
let pf := mkDivProof iM t₁ t₂
(q(NF.div_eq_eval₂ $a₁ $a₂ $x₁ $pf):)
else
let pf := mkDivProof iM (((a₁, x₁), k₁) :: t₁) t₂
(q(NF.div_eq_eval₃ ($a₂, $x₂) $pf):)
end qNF
/-- Constraints on denominators which may need to be considered in `field_simp`: no condition,
nonzeroness, or strict positivity. -/
inductive DenomCondition (iM : Q(GroupWithZero $M))
| none
| nonzero
| positive (iM' : Q(PartialOrder $M)) (iM'' : Q(PosMulStrictMono $M))
(iM''' : Q(PosMulReflectLT $M)) (iM'''' : Q(ZeroLEOneClass $M))
namespace DenomCondition
/-- Given a field-simp-normal-form expression `L` (a product of powers of atoms), a proof (according
to the value of `DenomCondition`) of that expression's nonzeroness, strict positivity, etc. -/
def proof {iM : Q(GroupWithZero $M)} (L : qNF M) : DenomCondition iM → Type
| .none => Unit
| .nonzero => Q(NF.eval $(qNF.toNF L) ≠ 0)
| .positive _ _ _ _ => Q(0 < NF.eval $(qNF.toNF L))
/-- The empty field-simp-normal-form expression `[]` (representing `1` as an empty product of powers
of atoms) can be proved to be nonzero, strict positivity, etc., as needed, as specified by the
value of `DenomCondition`. -/
def proofZero {iM : Q(CommGroupWithZero $M)} :
∀ cond : DenomCondition (M := M) q(inferInstance), cond.proof []
| .none => Unit.unit
| .nonzero => q(one_ne_zero (α := $M))
| .positive _ _ _ _ => q(zero_lt_one (α := $M))
end DenomCondition
/-- Given a proof of the nonzeroness, strict positivity, etc. (as specified by the value of
`DenomCondition`) of a field-simp-normal-form expression `L` (a product of powers of atoms),
construct a corresponding proof for `((r, e), i) :: L`.
In this version we also expose the proof of nonzeroness of `e`. -/
def mkDenomConditionProofSucc {iM : Q(CommGroupWithZero $M)}
(disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type))
{cond : DenomCondition (M := M) q(inferInstance)}
{L : qNF M} (hL : cond.proof L) (e : Q($M)) (r : ℤ) (i : ℕ) :
MetaM (Q($e ≠ 0) × cond.proof (((r, e), i) :: L)) := do
match cond with
| .none => return (← disch q($e ≠ 0), Unit.unit)
| .nonzero =>
let pf ← disch q($e ≠ 0)
let pf₀ : Q(NF.eval $(qNF.toNF L) ≠ 0) := hL
return (pf, q(NF.cons_ne_zero $r $pf $pf₀))
| .positive _ _ _ _ =>
let pf ← disch q(0 < $e)
let pf₀ : Q(0 < NF.eval $(qNF.toNF L)) := hL
let pf' := q(NF.cons_pos $r (x := $e) $pf $pf₀)
return (q(LT.lt.ne' $pf), pf')
/-- Given a proof of the nonzeroness, strict positivity, etc. (as specified by the value of
`DenomCondition`) of a field-simp-normal-form expression `L` (a product of powers of atoms),
construct a corresponding proof for `((r, e), i) :: L`. -/
def mkDenomConditionProofSucc' {iM : Q(CommGroupWithZero $M)}
(disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type))
{cond : DenomCondition (M := M) q(inferInstance)}
{L : qNF M} (hL : cond.proof L) (e : Q($M)) (r : ℤ) (i : ℕ) :
MetaM (cond.proof (((r, e), i) :: L)) := do
match cond with
| .none => return Unit.unit
| .nonzero =>
let pf ← disch q($e ≠ 0)
let pf₀ : Q(NF.eval $(qNF.toNF L) ≠ 0) := hL
return q(NF.cons_ne_zero $r $pf $pf₀)
| .positive _ _ _ _ =>
let pf ← disch q(0 < $e)
let pf₀ : Q(0 < NF.eval $(qNF.toNF L)) := hL
return q(NF.cons_pos $r (x := $e) $pf $pf₀)
namespace qNF
/-- Extract a common factor `L` of two products-of-powers `l₁` and `l₂` in `M`, in the sense that
both `l₁` and `l₂` are quotients by `L` of products of *positive* powers.
The variable `cond` specifies whether we extract a *certified nonzero[/positive]* (and therefore
potentially smaller) common factor. If so, the metaprogram returns a "proof" that this common factor
is nonzero/positive, i.e. an expression `Q(NF.eval $(L.toNF) ≠ 0)` / `Q(0 < NF.eval $(L.toNF))`. -/
partial def gcd (iM : Q(CommGroupWithZero $M)) (l₁ l₂ : qNF M)
(disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type))
(cond : DenomCondition (M := M) q(inferInstance)) :
MetaM <| Σ (L l₁' l₂' : qNF M),
Q((NF.eval $(L.toNF)) * NF.eval $(l₁'.toNF) = NF.eval $(l₁.toNF)) ×
Q((NF.eval $(L.toNF)) * NF.eval $(l₂'.toNF) = NF.eval $(l₂.toNF)) ×
cond.proof L :=
/- Handle the case where atom `i` is present in the first list but not the second. -/
let absent (l₁ l₂ : qNF M) (n : ℤ) (e : Q($M)) (i : ℕ) :
MetaM <| Σ (L l₁' l₂' : qNF M),
Q((NF.eval $(L.toNF)) * NF.eval $(l₁'.toNF) = NF.eval $(qNF.toNF (((n, e), i) :: l₁))) ×
Q((NF.eval $(L.toNF)) * NF.eval $(l₂'.toNF) = NF.eval $(l₂.toNF)) ×
cond.proof L := do
let ⟨L, l₁', l₂', pf₁, pf₂, pf₀⟩ ← gcd iM l₁ l₂ disch cond
if 0 < n then
-- Don't pull anything out
return ⟨L, ((n, e), i) :: l₁', l₂', (q(NF.eval_mul_eval_cons $n $e $pf₁):), q($pf₂), pf₀⟩
else if n = 0 then
-- Don't pull anything out, but eliminate the term if it is a cancellable zero
let ⟨l₁'', pf''⟩ ← tryClearZero disch iM 0 e i l₁'
let pf'' : Q(NF.eval ((0, $e) ::ᵣ $(l₁'.toNF)) = NF.eval $(l₁''.toNF)) := pf''
return ⟨L, l₁'', l₂', (q(NF.eval_mul_eval_cons_zero $pf₁ $pf''):), q($pf₂), pf₀⟩
try
let (pf, b) ← mkDenomConditionProofSucc disch pf₀ e n i
-- if nonzeroness proof succeeds
return ⟨((n, e), i) :: L, l₁', ((-n, e), i) :: l₂', (q(NF.eval_cons_mul_eval $n $e $pf₁):),
(q(NF.eval_cons_mul_eval_cons_neg $n $pf $pf₂):), b⟩
catch _ =>
-- if we can't prove nonzeroness, don't pull out e.
return ⟨L, ((n, e), i) :: l₁', l₂', (q(NF.eval_mul_eval_cons $n $e $pf₁):), q($pf₂), pf₀⟩
/- Handle the case where atom `i` is present in both lists. -/
let bothPresent (t₁ t₂ : qNF M) (n₁ n₂ : ℤ) (e : Q($M)) (i : ℕ) :
MetaM <| Σ (L l₁' l₂' : qNF M),
Q((NF.eval $(L.toNF)) * NF.eval $(l₁'.toNF) = NF.eval $(qNF.toNF (((n₁, e), i) :: t₁))) ×
Q((NF.eval $(L.toNF)) * NF.eval $(l₂'.toNF) = NF.eval $(qNF.toNF (((n₂, e), i) :: t₂))) ×
cond.proof L := do
let ⟨L, l₁', l₂', pf₁, pf₂, pf₀⟩ ← gcd iM t₁ t₂ disch cond
if n₁ < n₂ then
let N : ℤ := n₂ - n₁
return ⟨((n₁, e), i) :: L, l₁', ((n₂ - n₁, e), i) :: l₂',
(q(NF.eval_cons_mul_eval $n₁ $e $pf₁):), (q(NF.mul_eq_eval₂ $n₁ $N $e $pf₂):),
← mkDenomConditionProofSucc' disch pf₀ e n₁ i⟩
else if n₁ = n₂ then
return ⟨((n₁, e), i) :: L, l₁', l₂', (q(NF.eval_cons_mul_eval $n₁ $e $pf₁):),
(q(NF.eval_cons_mul_eval $n₂ $e $pf₂):), ← mkDenomConditionProofSucc' disch pf₀ e n₁ i⟩
else
let N : ℤ := n₁ - n₂
return ⟨((n₂, e), i) :: L, ((n₁ - n₂, e), i) :: l₁', l₂',
(q(NF.mul_eq_eval₂ $n₂ $N $e $pf₁):), (q(NF.eval_cons_mul_eval $n₂ $e $pf₂):),
← mkDenomConditionProofSucc' disch pf₀ e n₂ i⟩
match l₁, l₂ with
| [], [] => pure ⟨[], [], [],
(q(one_mul (NF.eval $(qNF.toNF (M := M) []))):),
(q(one_mul (NF.eval $(qNF.toNF (M := M) []))):), cond.proofZero⟩
| ((n, e), i) :: t, [] => do
let ⟨L, l₁', l₂', pf₁, pf₂, pf₀⟩ ← absent t [] n e i
return ⟨L, l₁', l₂', q($pf₁), q($pf₂), pf₀⟩
| [], ((n, e), i) :: t => do
let ⟨L, l₂', l₁', pf₂, pf₁, pf₀⟩ ← absent t [] n e i
return ⟨L, l₁', l₂', q($pf₁), q($pf₂), pf₀⟩
| ((n₁, e₁), i₁) :: t₁, ((n₂, e₂), i₂) :: t₂ => do
if i₁ > i₂ then
let ⟨L, l₁', l₂', pf₁, pf₂, pf₀⟩ ← absent t₁ (((n₂, e₂), i₂) :: t₂) n₁ e₁ i₁
return ⟨L, l₁', l₂', q($pf₁), q($pf₂), pf₀⟩
else if i₁ == i₂ then
try
bothPresent t₁ t₂ n₁ n₂ e₁ i₁
catch _ =>
-- if `bothPresent` fails, don't pull out `e`
-- the failure case of `bothPresent` should be:
-- * `.none` case: never
-- * `.nonzero` case: if `e` can't be proved nonzero
-- * `.positive _` case: if `e` can't be proved positive
let ⟨L, l₁', l₂', pf₁, pf₂, pf₀⟩ ← gcd iM t₁ t₂ disch cond
return ⟨L, ((n₁, e₁), i₁) :: l₁', ((n₂, e₂), i₂) :: l₂',
(q(NF.eval_mul_eval_cons $n₁ $e₁ $pf₁):), (q(NF.eval_mul_eval_cons $n₂ $e₂ $pf₂):), pf₀⟩
else
let ⟨L, l₂', l₁', pf₂, pf₁, pf₀⟩ ← absent t₂ (((n₁, e₁), i₁) :: t₁) n₂ e₂ i₂
return ⟨L, l₁', l₂', q($pf₁), q($pf₂), pf₀⟩
end qNF
/-! ### Core of the `field_simp` tactic -/
/-- The main algorithm behind the `field_simp` tactic: partially-normalizing an
expression in a field `M` into the form x1 ^ c1 * x2 ^ c2 * ... x_k ^ c_k,
where x1, x2, ... are distinct atoms in `M`, and c1, c2, ... are integers. -/
partial def normalize (disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type))
(iM : Q(CommGroupWithZero $M)) (x : Q($M)) :
AtomM (Σ y : Q($M), (Σ g : Sign M, Q($x = $(g.expr y))) ×
Σ l : qNF M, Q($y = NF.eval $(l.toNF))) := do
let baseCase (y : Q($M)) (normalize? : Bool) :
AtomM (Σ (l : qNF M), Q($y = NF.eval $(l.toNF))) := do
if normalize? then
let r ← (← read).evalAtom y
have y' : Q($M) := r.expr
have pf : Q($y = $y') := ← r.getProof
let (k, ⟨x', _⟩) ← AtomM.addAtomQ y'
pure ⟨[((1, x'), k)], q(Eq.trans $pf (NF.atom_eq_eval $x'))⟩
else
let (k, ⟨x', _⟩) ← AtomM.addAtomQ y
pure ⟨[((1, x'), k)], q(NF.atom_eq_eval $x')⟩
match x with
/- normalize a multiplication: `x₁ * x₂` -/
| ~q($x₁ * $x₂) =>
let ⟨y₁, ⟨g₁, pf₁_sgn⟩, l₁, pf₁⟩ ← normalize disch iM x₁
let ⟨y₂, ⟨g₂, pf₂_sgn⟩, l₂, pf₂⟩ ← normalize disch iM x₂
-- build the new list and proof
have pf := qNF.mkMulProof iM l₁ l₂
let ⟨G, pf_y⟩ := ← Sign.mul iM y₁ y₂ g₁ g₂
pure ⟨q($y₁ * $y₂), ⟨G, q(Eq.trans (congr_arg₂ HMul.hMul $pf₁_sgn $pf₂_sgn) $pf_y)⟩,
qNF.mul l₁ l₂, q(NF.mul_eq_eval $pf₁ $pf₂ $pf)⟩
/- normalize a division: `x₁ / x₂` -/
| ~q($x₁ / $x₂) =>
let ⟨y₁, ⟨g₁, pf₁_sgn⟩, l₁, pf₁⟩ ← normalize disch iM x₁
let ⟨y₂, ⟨g₂, pf₂_sgn⟩, l₂, pf₂⟩ ← normalize disch iM x₂
-- build the new list and proof
let pf := qNF.mkDivProof iM l₁ l₂
let ⟨G, pf_y⟩ := ← Sign.div iM y₁ y₂ g₁ g₂
pure ⟨q($y₁ / $y₂), ⟨G, q(Eq.trans (congr_arg₂ HDiv.hDiv $pf₁_sgn $pf₂_sgn) $pf_y)⟩,
qNF.div l₁ l₂, q(NF.div_eq_eval $pf₁ $pf₂ $pf)⟩
/- normalize a inversion: `y⁻¹` -/
| ~q($y⁻¹) =>
let ⟨y', ⟨g, pf_sgn⟩, l, pf⟩ ← normalize disch iM y
let pf_y ← Sign.inv iM y' g
-- build the new list and proof, casing according to the sign of `x`
pure ⟨q($y'⁻¹), ⟨g, q(Eq.trans (congr_arg Inv.inv $pf_sgn) $pf_y)⟩,
l.onExponent Neg.neg, (q(NF.inv_eq_eval $pf):)⟩
/- normalize an integer exponentiation: `y ^ (s : ℤ)` -/
| ~q($y ^ ($s : ℤ)) =>
let some s := Expr.int? s | pure ⟨x, ⟨.plus, q(rfl)⟩, ← baseCase x true⟩
if s = 0 then
pure ⟨q(1), ⟨Sign.plus, (q(zpow_zero $y):)⟩, [], q(NF.one_eq_eval $M)⟩
else
let ⟨y', ⟨g, pf_sgn⟩, l, pf⟩ ← normalize disch iM y
let pf_s ← mkDecideProofQ q($s ≠ 0)
let ⟨G, pf_y⟩ ← Sign.zpow iM y' g s
let pf_y' := q(Eq.trans (congr_arg (· ^ $s) $pf_sgn) $pf_y)
pure ⟨q($y' ^ $s), ⟨G, pf_y'⟩, l.onExponent (HMul.hMul s), (q(NF.zpow_eq_eval $pf_s $pf):)⟩
/- normalize a natural number exponentiation: `y ^ (s : ℕ)` -/
| ~q($y ^ ($s : ℕ)) =>
let some s := Expr.nat? s | pure ⟨x, ⟨.plus, q(rfl)⟩, ← baseCase x true⟩
if s = 0 then
pure ⟨q(1), ⟨Sign.plus, (q(pow_zero $y):)⟩, [], q(NF.one_eq_eval $M)⟩
else
let ⟨y', ⟨g, pf_sgn⟩, l, pf⟩ ← normalize disch iM y
let pf_s ← mkDecideProofQ q($s ≠ 0)
let ⟨G, pf_y⟩ ← Sign.pow iM y' g s
let pf_y' := q(Eq.trans (congr_arg (· ^ $s) $pf_sgn) $pf_y)
pure ⟨q($y' ^ $s), ⟨G, pf_y'⟩, l.onExponent (HSMul.hSMul s), (q(NF.pow_eq_eval $pf_s $pf):)⟩
/- normalize a `(1:M)` -/
| ~q(1) => pure ⟨q(1), ⟨Sign.plus, q(rfl)⟩, [], q(NF.one_eq_eval $M)⟩
/- normalize an addition: `a + b` -/
| ~q(HAdd.hAdd (self := @instHAdd _ $i) $a $b) =>
try
let _i ← synthInstanceQ q(Semifield $M)
assumeInstancesCommute
let ⟨_, ⟨g₁, pf_sgn₁⟩, l₁, pf₁⟩ ← normalize disch iM a
let ⟨_, ⟨g₂, pf_sgn₂⟩, l₂, pf₂⟩ ← normalize disch iM b
let ⟨L, l₁', l₂', pf₁', pf₂', _⟩ ← l₁.gcd iM l₂ disch .none
let ⟨e₁, pf₁''⟩ ← qNF.evalPretty iM l₁'
let ⟨e₂, pf₂''⟩ ← qNF.evalPretty iM l₂'
have pf_a := ← Sign.mkEqMul iM pf_sgn₁ q(Eq.trans $pf₁ (Eq.symm $pf₁')) pf₁''
have pf_b := ← Sign.mkEqMul iM pf_sgn₂ q(Eq.trans $pf₂ (Eq.symm $pf₂')) pf₂''
let e : Q($M) := q($(g₁.expr e₁) + $(g₂.expr e₂))
let ⟨sum, pf_atom⟩ ← baseCase e false
let L' := qNF.mul L sum
let pf_mul : Q((NF.eval $(L.toNF)) * NF.eval $(sum.toNF) = NF.eval $(L'.toNF)) :=
qNF.mkMulProof iM L sum
pure ⟨x, ⟨Sign.plus, q(rfl)⟩, L', q(subst_add $pf_a $pf_b $pf_atom $pf_mul)⟩
catch _ => pure ⟨x, ⟨.plus, q(rfl)⟩, ← baseCase x true⟩
/- normalize a subtraction: `a - b` -/
| ~q(HSub.hSub (self := @instHSub _ $i) $a $b) =>
try
let _i ← synthInstanceQ q(Field $M)
assumeInstancesCommute
let ⟨_, ⟨g₁, pf_sgn₁⟩, l₁, pf₁⟩ ← normalize disch iM a
let ⟨_, ⟨g₂, pf_sgn₂⟩, l₂, pf₂⟩ ← normalize disch iM b
let ⟨L, l₁', l₂', pf₁', pf₂', _⟩ ← l₁.gcd iM l₂ disch .none
let ⟨e₁, pf₁''⟩ ← qNF.evalPretty iM l₁'
let ⟨e₂, pf₂''⟩ ← qNF.evalPretty iM l₂'
have pf_a := ← Sign.mkEqMul iM pf_sgn₁ q(Eq.trans $pf₁ (Eq.symm $pf₁')) pf₁''
have pf_b := ← Sign.mkEqMul iM pf_sgn₂ q(Eq.trans $pf₂ (Eq.symm $pf₂')) pf₂''
let e : Q($M) := q($(g₁.expr e₁) - $(g₂.expr e₂))
let ⟨sum, pf_atom⟩ ← baseCase e false
let L' := qNF.mul L sum
let pf_mul : Q((NF.eval $(L.toNF)) * NF.eval $(sum.toNF) = NF.eval $(L'.toNF)) :=
qNF.mkMulProof iM L sum
pure ⟨x, ⟨Sign.plus, q(rfl)⟩, L', q(subst_sub $pf_a $pf_b $pf_atom $pf_mul)⟩
catch _ => pure ⟨x, ⟨.plus, q(rfl)⟩, ← baseCase x true⟩
/- normalize a negation: `-a` -/
| ~q(Neg.neg (self := $i) $a) =>
try
let iM' ← synthInstanceQ q(Field $M)
assumeInstancesCommute
let ⟨y, ⟨g, pf_sgn⟩, l, pf⟩ ← normalize disch iM a
let ⟨G, pf_y⟩ ← Sign.neg iM' y g
pure ⟨y, ⟨G, q(Eq.trans (congr_arg Neg.neg $pf_sgn) $pf_y)⟩, l, pf⟩
catch _ => pure ⟨x, ⟨.plus, q(rfl)⟩, ← baseCase x true⟩
-- TODO special-case handling of zero? maybe not necessary
/- anything else should be treated as an atom -/
| _ => pure ⟨x, ⟨.plus, q(rfl)⟩, ← baseCase x true⟩
/-- Given `x` in a commutative group-with-zero, construct a new expression in the standard form
*** / *** (all denominators at the end) which is equal to `x`. -/
def reduceExprQ (disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type))
(iM : Q(CommGroupWithZero $M)) (x : Q($M)) : AtomM (Σ x' : Q($M), Q($x = $x')) := do
let ⟨y, ⟨g, pf_sgn⟩, l, pf⟩ ← normalize disch iM x
let ⟨l', pf'⟩ ← qNF.removeZeros disch iM l
let ⟨x', pf''⟩ ← qNF.evalPretty iM l'
let pf_yx : Q($y = $x') := q(Eq.trans (Eq.trans $pf $pf') $pf'')
return ⟨g.expr x', q(Eq.trans $pf_sgn $(g.congr pf_yx))⟩
/-- Given `e₁` and `e₂`, cancel nonzero factors to construct a new equality which is logically
equivalent to `e₁ = e₂`. -/
def reduceEqQ (disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type))
(iM : Q(CommGroupWithZero $M)) (e₁ e₂ : Q($M)) :
AtomM (Σ f₁ f₂ : Q($M), Q(($e₁ = $e₂) = ($f₁ = $f₂))) := do
let ⟨_, ⟨g₁, pf_sgn₁⟩, l₁, pf_l₁⟩ ← normalize disch iM e₁
let ⟨_, ⟨g₂, pf_sgn₂⟩, l₂, pf_l₂⟩ ← normalize disch iM e₂
let ⟨L, l₁', l₂', pf_lhs, pf_rhs, pf₀⟩ ← l₁.gcd iM l₂ disch .nonzero
let pf₀ : Q(NF.eval $(qNF.toNF L) ≠ 0) := pf₀
let ⟨f₁', pf_l₁'⟩ ← l₁'.evalPretty iM
let ⟨f₂', pf_l₂'⟩ ← l₂'.evalPretty iM
have pf_ef₁ := ← Sign.mkEqMul iM pf_sgn₁ q(Eq.trans $pf_l₁ (Eq.symm $pf_lhs)) pf_l₁'
have pf_ef₂ := ← Sign.mkEqMul iM pf_sgn₂ q(Eq.trans $pf_l₂ (Eq.symm $pf_rhs)) pf_l₂'
return ⟨g₁.expr f₁', g₂.expr f₂', q(eq_eq_cancel_eq $pf_ef₁ $pf_ef₂ $pf₀)⟩
/-- Given `e₁` and `e₂`, cancel positive factors to construct a new inequality which is logically
equivalent to `e₁ ≤ e₂`. -/
def reduceLeQ (disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type))
(iM : Q(CommGroupWithZero $M)) (iM' : Q(PartialOrder $M))
(iM'' : Q(PosMulStrictMono $M)) (iM''' : Q(PosMulReflectLE $M)) (iM'''' : Q(ZeroLEOneClass $M))
(e₁ e₂ : Q($M)) :
AtomM (Σ f₁ f₂ : Q($M), Q(($e₁ ≤ $e₂) = ($f₁ ≤ $f₂))) := do
let ⟨_, ⟨g₁, pf_sgn₁⟩, l₁, pf_l₁⟩ ← normalize disch iM e₁
let ⟨_, ⟨g₂, pf_sgn₂⟩, l₂, pf_l₂⟩ ← normalize disch iM e₂
let ⟨L, l₁', l₂', pf_lhs, pf_rhs, pf₀⟩
← l₁.gcd iM l₂ disch (.positive iM' iM'' q(inferInstance) iM'''')
let pf₀ : Q(0 < NF.eval $(qNF.toNF L)) := pf₀
let ⟨f₁', pf_l₁'⟩ ← l₁'.evalPretty iM
let ⟨f₂', pf_l₂'⟩ ← l₂'.evalPretty iM
have pf_ef₁ := ← Sign.mkEqMul iM pf_sgn₁ q(Eq.trans $pf_l₁ (Eq.symm $pf_lhs)) pf_l₁'
have pf_ef₂ := ← Sign.mkEqMul iM pf_sgn₂ q(Eq.trans $pf_l₂ (Eq.symm $pf_rhs)) pf_l₂'
return ⟨g₁.expr f₁', g₂.expr f₂', q(le_eq_cancel_le $pf_ef₁ $pf_ef₂ $pf₀)⟩
/-- Given `e₁` and `e₂`, cancel positive factors to construct a new inequality which is logically
equivalent to `e₁ < e₂`. -/
def reduceLtQ (disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type))
(iM : Q(CommGroupWithZero $M)) (iM' : Q(PartialOrder $M))
(iM'' : Q(PosMulStrictMono $M)) (iM''' : Q(PosMulReflectLT $M)) (iM'''' : Q(ZeroLEOneClass $M))
(e₁ e₂ : Q($M)) :
AtomM (Σ f₁ f₂ : Q($M), Q(($e₁ < $e₂) = ($f₁ < $f₂))) := do
let ⟨_, ⟨g₁, pf_sgn₁⟩, l₁, pf_l₁⟩ ← normalize disch iM e₁
let ⟨_, ⟨g₂, pf_sgn₂⟩, l₂, pf_l₂⟩ ← normalize disch iM e₂
let ⟨L, l₁', l₂', pf_lhs, pf_rhs, pf₀⟩
← l₁.gcd iM l₂ disch (.positive iM' iM'' iM''' iM'''')
let pf₀ : Q(0 < NF.eval $(qNF.toNF L)) := pf₀
let ⟨f₁', pf_l₁'⟩ ← l₁'.evalPretty iM
let ⟨f₂', pf_l₂'⟩ ← l₂'.evalPretty iM
have pf_ef₁ := ← Sign.mkEqMul iM pf_sgn₁ q(Eq.trans $pf_l₁ (Eq.symm $pf_lhs)) pf_l₁'
have pf_ef₂ := ← Sign.mkEqMul iM pf_sgn₂ q(Eq.trans $pf_l₂ (Eq.symm $pf_rhs)) pf_l₂'
return ⟨g₁.expr f₁', g₂.expr f₂', q(lt_eq_cancel_lt $pf_ef₁ $pf_ef₂ $pf₀)⟩
/-- Given `x` in a commutative group-with-zero, construct a new expression in the standard form
*** / *** (all denominators at the end) which is equal to `x`. -/
def reduceExpr (disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type)) (x : Expr) :
AtomM Simp.Result := do
-- for `field_simp` to work with the recursive infrastructure in `AtomM.recurse`, we need to fail
-- on things `field_simp` would treat as atoms
guard x.isApp
let ⟨f, _⟩ := x.getAppFnArgs
guard <|
f ∈ [``HMul.hMul, ``HDiv.hDiv, ``Inv.inv, ``HPow.hPow, ``HAdd.hAdd, ``HSub.hSub, ``Neg.neg]
-- infer `u` and `K : Q(Type u)` such that `x : Q($K)`
let ⟨u, K, _⟩ ← inferTypeQ' x
-- find a `CommGroupWithZero` instance on `K`
let iK : Q(CommGroupWithZero $K) ← synthInstanceQ q(CommGroupWithZero $K)
-- run the core normalization function `normalizePretty` on `x`
trace[Tactic.field_simp] "putting {x} in \"field_simp\"-normal-form"
let ⟨e, pf⟩ ← reduceExprQ disch iK x
return { expr := e, proof? := some pf }
/-- Given an (in)equality `a = b` (respectively, `a ≤ b`, `a < b`), cancel nonzero (resp. positive)
factors to construct a new (in)equality which is logically equivalent to `a = b` (respectively,
`a ≤ b`, `a < b`). -/
def reduceProp (disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type)) (t : Expr) :
AtomM Simp.Result := do
let ⟨i, _, a, b⟩ ← t.ineq?
-- infer `u` and `K : Q(Type u)` such that `x : Q($K)`
let ⟨u, K, a⟩ ← inferTypeQ' a
-- find a `CommGroupWithZero` instance on `K`
let iK : Q(CommGroupWithZero $K) ← synthInstanceQ q(CommGroupWithZero $K)
trace[Tactic.field_simp] "clearing denominators in {a} ~ {b}"
-- run the core (in)equality-transforming mechanism on `a =/≤/< b`
match i with
| .eq =>
let ⟨a', b', pf⟩ ← reduceEqQ disch iK a b
let t' ← mkAppM `Eq #[a', b']
return { expr := t', proof? := pf }
| .le =>
let iK' : Q(PartialOrder $K) ← synthInstanceQ q(PartialOrder $K)
let iK'' : Q(PosMulStrictMono $K) ← synthInstanceQ q(PosMulStrictMono $K)
let iK''' : Q(PosMulReflectLE $K) ← synthInstanceQ q(PosMulReflectLE $K)
let iK'''' : Q(ZeroLEOneClass $K) ← synthInstanceQ q(ZeroLEOneClass $K)
let ⟨a', b', pf⟩ ← reduceLeQ disch iK iK' iK'' iK''' iK'''' a b
let t' ← mkAppM `LE.le #[a', b']
return { expr := t', proof? := pf }
| _ =>
let iK' : Q(PartialOrder $K) ← synthInstanceQ q(PartialOrder $K)
let iK'' : Q(PosMulStrictMono $K) ← synthInstanceQ q(PosMulStrictMono $K)
let iK''' : Q(PosMulReflectLT $K) ← synthInstanceQ q(PosMulReflectLT $K)
let iK'''' : Q(ZeroLEOneClass $K) ← synthInstanceQ q(ZeroLEOneClass $K)
let ⟨a', b', pf⟩ ← reduceLtQ disch iK iK' iK'' iK''' iK'''' a b
let t' ← mkAppM `LT.lt #[a', b']
return { expr := t', proof? := pf }
/-! ### Frontend -/
open Elab Tactic Lean.Parser.Tactic
/-- If the user provided a discharger, elaborate it. If not, we will use the `field_simp` default
discharger, which (among other things) includes a simp-run for the specified argument list, so we
elaborate those arguments. -/
def parseDischarger (d : Option (TSyntax ``discharger)) (args : Option (TSyntax ``simpArgs)) :
TacticM (∀ {u : Level} (type : Q(Sort u)), MetaM Q($type)) := do
match d with
| none =>
let ctx ← Simp.Context.ofArgs (args.getD ⟨.missing⟩) { contextual := true }
return fun e ↦ Prod.fst <$> (FieldSimp.discharge e).run ctx >>= Option.getM
| some d =>
if args.isSome then
logWarningAt args.get!
"Custom `field_simp` dischargers do not make use of the `field_simp` arguments list"
match d with
| `(discharger| (discharger := $tac)) =>
let tac := (evalTactic (← `(tactic| ($tac))) *> pruneSolvedGoals)
return (synthesizeUsing' · tac)
| _ => throwError "could not parse the provided discharger {d}"
/--
The goal of `field_simp` is to bring expressions in (semi-)fields over a common denominator, i.e. to
reduce them to expressions of the form `n / d` where neither `n` nor `d` contains any division
symbol. For example, `x / (1 - y) / (1 + y / (1 - y))` is reduced to `x / (1 - y + y)`:
```
example (x y z : ℚ) (hy : 1 - y ≠ 0) :
⌊x / (1 - y) / (1 + y / (1 - y))⌋ < 3 := by
field_simp
-- new goal: `⊢ ⌊x / (1 - y + y)⌋ < 3`
```
The `field_simp` tactic will also clear denominators in field *(in)equalities*, by
cross-multiplying. For example, `field_simp` will clear the `x` denominators in the following
equation:
```
example {K : Type*} [Field K] {x : K} (hx0 : x ≠ 0) :
(x + 1 / x) ^ 2 + (x + 1 / x) = 1 := by
field_simp
-- new goal: `⊢ (x ^ 2 + 1) * (x ^ 2 + 1 + x) = x ^ 2`
```
A very common pattern is `field_simp; ring` (clear denominators, then the resulting goal is
solvable by the axioms of a commutative ring). The finishing tactic `field` is a shorthand for this
pattern.
Cancelling and combining denominators will generally require checking "nonzeroness"/"positivity"
side conditions. The `field_simp` tactic attempts to discharge these, and will omit such steps if it
cannot discharge the corresponding side conditions. The discharger will try, among other things,
`positivity` and `norm_num`, and will also use any nonzeroness/positivity proofs included explicitly
(e.g. `field_simp [hx]`). If your expression is not completely reduced by `field_simp`, check the
denominators of the resulting expression and provide proofs that they are nonzero/positive to enable
further progress.
-/
elab (name := fieldSimp) "field_simp" d:(discharger)? args:(simpArgs)? loc:(location)? :
tactic => withMainContext do
let disch ← parseDischarger d args
let s ← IO.mkRef {}
let cleanup r := do r.mkEqTrans (← simpOnlyNames [] r.expr) -- convert e.g. `x = x` to `True`
let m := AtomM.recurse s {} (fun e ↦ reduceProp disch e <|> reduceExpr disch e) cleanup
let loc := (loc.map expandLocation).getD (.targets #[] true)
transformAtLocation (m ·) "field_simp" (failIfUnchanged := true) (mayCloseGoalFromHyp := true) loc
/--
The goal of the `field_simp` conv tactic is to bring an expression in a (semi-)field over a common
denominator, i.e. to reduce it to an expression of the form `n / d` where neither `n` nor `d`
contains any division symbol. For example, `x / (1 - y) / (1 + y / (1 - y))` is reduced to
`x / (1 - y + y)`:
```
example (x y z : ℚ) (hy : 1 - y ≠ 0) :
⌊x / (1 - y) / (1 + y / (1 - y))⌋ < 3 := by
conv => enter [1, 1]; field_simp
-- new goal: `⊢ ⌊x / (1 - y + y)⌋ < 3`
```
As in this example, cancelling and combining denominators will generally require checking
"nonzeroness" side conditions. The `field_simp` tactic attempts to discharge these, and will omit
such steps if it cannot discharge the corresponding side conditions. The discharger will try, among
other things, `positivity` and `norm_num`, and will also use any nonzeroness proofs included
explicitly (e.g. `field_simp [hx]`). If your expression is not completely reduced by `field_simp`,
check the denominators of the resulting expression and provide proofs that they are nonzero to
enable further progress.
The `field_simp` conv tactic is a variant of the main (i.e., not conv) `field_simp` tactic. The
latter operates recursively on subexpressions, bringing *every* field-expression encountered to the
form `n / d`.
-/
elab "field_simp" d:(discharger)? args:(simpArgs)? : conv => do
-- find the expression `x` to `conv` on
let x ← Conv.getLhs
let disch : ∀ {u : Level} (type : Q(Sort u)), MetaM Q($type) ← parseDischarger d args
-- bring into field_simp standard form
let r ← AtomM.run .reducible <| reduceExpr disch x
-- convert `x` to the output of the normalization
Conv.applySimpResult r
/--
The goal of the simprocs grouped under the `field` attribute is to clear denominators in
(semi-)field (in)equalities, by bringing LHS and RHS each over a common denominator and then
cross-multiplying. For example, the `field` simproc will clear the `x` denominators in the following
equation:
```
example {K : Type*} [Field K] {x : K} (hx0 : x ≠ 0) :
(x + 1 / x) ^ 2 + (x + 1 / x) = 1 := by
simp only [field]
-- new goal: `⊢ (x ^ 2 + 1) * (x ^ 2 + 1 + x) = x ^ 2`
```
The `field` simproc-set's functionality is a variant of the more general `field_simp` tactic, which
not only clears denominators in field (in)equalities but also brings isolated field expressions into
the normal form `n / d` (where neither `n` nor `d` contains any division symbol). (For confluence
reasons, the `field` simprocs also have a slightly different normal form from `field_simp`'s.)
Cancelling and combining denominators will generally require checking "nonzeroness"/"positivity"
side conditions. The `field` simproc-set attempts to discharge these, and will omit such steps if it
cannot discharge the corresponding side conditions. The discharger will try, among other things,
`positivity` and `norm_num`, and will also use any nonzeroness/positivity proofs included explicitly
in the simp call (e.g. `simp [field, hx]`). If your (in)equality is not completely reduced by the
`field` simproc-set, check the denominators of the resulting (in)equality and provide proofs that
they are nonzero/positive to enable further progress.
-/
def proc : Simp.Simproc := fun (t : Expr) ↦ do
let ctx ← Simp.getContext
let disch e : MetaM Expr := Prod.fst <$> (FieldSimp.discharge e).run ctx >>= Option.getM
try
let r ← AtomM.run .reducible <| FieldSimp.reduceProp disch t
-- the `field_simp`-normal form is in opposition to the `simp`-lemmas `one_div` and `mul_inv`,
-- so we need to undo any such lemma applications, otherwise we can get infinite loops
return .visit <| ← r.mkEqTrans (← simpOnlyNames [``one_div, ``mul_inv] r.expr)
catch _ =>
return .continue
end Mathlib.Tactic.FieldSimp
open Mathlib.Tactic
simproc_decl fieldEq (Eq _ _) := FieldSimp.proc
simproc_decl fieldLe (LE.le _ _) := FieldSimp.proc
simproc_decl fieldLt (LT.lt _ _) := FieldSimp.proc
attribute [field, inherit_doc FieldSimp.proc] fieldEq fieldLe fieldLt
/-!
We register `field_simp` with the `hint` tactic.
-/
register_hint 1000 field_simp |
.lake/packages/mathlib/Mathlib/Tactic/Hint.lean | import Lean.Meta.Tactic.TryThis
import Batteries.Linter.UnreachableTactic
import Batteries.Control.Nondet.Basic
import Mathlib.Init
import Mathlib.Lean.Elab.InfoTree
import Mathlib.Tactic.Basic
/-!
# The `hint` tactic.
The `hint` tactic tries the kitchen sink:
it runs every tactic registered via the `register_hint <prio> tac` command
on the current goal, and reports which ones succeed.
## Future work
It would be nice to run the tactics in parallel.
-/
open Lean Elab Tactic
open Lean.Meta.Tactic.TryThis
namespace Mathlib.Tactic.Hint
/-- An environment extension for registering hint tactics with priorities. -/
initialize hintExtension :
SimplePersistentEnvExtension (Nat × TSyntax `tactic) (List (Nat × TSyntax `tactic)) ←
registerSimplePersistentEnvExtension {
addEntryFn := (·.cons)
addImportedFn := mkStateFromImportedEntries (·.cons) {}
}
/-- Register a new hint tactic. -/
def addHint (prio : Nat) (stx : TSyntax `tactic) : CoreM Unit := do
modifyEnv fun env => hintExtension.addEntry env (prio, stx)
/-- Return the list of registered hint tactics. -/
def getHints : CoreM (List (Nat × TSyntax `tactic)) :=
return hintExtension.getState (← getEnv)
open Lean.Elab.Command in
/--
Register a tactic for use with the `hint` tactic, e.g. `register_hint 1000 simp_all`.
The numeric argument specifies the priority: tactics with larger priorities run before
those with smaller priorities. The priority must be provided explicitly.
-/
elab (name := registerHintStx)
"register_hint" prio:num tac:tactic : command =>
liftTermElabM do
let tac : TSyntax `tactic := ⟨tac.raw.copyHeadTailInfoFrom .missing⟩
let some prio := prio.raw.isNatLit?
| throwError "expected a numeric literal for priority"
addHint prio tac
initialize
Batteries.Linter.UnreachableTactic.ignoreTacticKindsRef.modify fun s => s.insert ``registerHintStx
/--
Construct a suggestion for a tactic.
* Check the passed `MessageLog` for an info message beginning with "Try this: ".
* If found, use that as the suggestion.
* Otherwise use the provided syntax.
* Also, look for remaining goals and pretty print them after the suggestion.
-/
def suggestion (tac : TSyntax `tactic) (trees : PersistentArray InfoTree) : TacticM Suggestion := do
-- TODO `addExactSuggestion` has an option to construct `postInfo?`
-- Factor that out so we can use it here instead of copying and pasting?
let goals ← getGoals
let postInfo? ← if goals.isEmpty then pure none else
let mut str := "\nRemaining subgoals:"
for g in goals do
let e ← PrettyPrinter.ppExpr (← instantiateMVars (← g.getType))
str := str ++ Format.pretty ("\n⊢ " ++ e)
pure (some str)
/-
#adaptation_note 2025-08-27
Suggestion styling was deprecated in lean4#9966.
We use emojis for now instead.
-/
-- let style? := if goals.isEmpty then some .success else none
let preInfo? := if goals.isEmpty then some "🎉 " else none
let suggestions := collectTryThisSuggestions trees
let suggestion := match suggestions[0]? with
| some s => s.suggestion
| none => SuggestionText.tsyntax tac
return { preInfo?, suggestion, postInfo? }
/--
Run all tactics registered using `register_hint`.
Print a "Try these:" suggestion for each of the successful tactics.
If one tactic succeeds and closes the goal, we don't look at subsequent tactics.
-/
-- TODO We could run the tactics in parallel.
-- TODO With widget support, could we run the tactics in parallel
-- and do live updates of the widget as results come in?
def hint (stx : Syntax) : TacticM Unit := withMainContext do
let tacs := (← getHints).toArray.qsort (·.1 > ·.1) |>.toList.map (·.2)
let tacs := Nondet.ofList tacs
let results := tacs.filterMapM fun t : TSyntax `tactic => do
if let some { msgs, trees, .. } ← observing? (withResetServerInfo (evalTactic t)) then
if msgs.hasErrors then
return none
else
return some (← getGoals, ← suggestion t trees)
else
return none
let results ← (results.toMLList.takeUpToFirst fun r => r.1.1.isEmpty).asArray
let results := results.qsort (·.1.1.length < ·.1.1.length)
addSuggestions stx (results.map (·.1.2))
match results.find? (·.1.1.isEmpty) with
| some r =>
-- We don't restore the entire state, as that would delete the suggestion messages.
setMCtx r.2.term.meta.meta.mctx
| none => admitGoal (← getMainGoal)
/--
The `hint` tactic tries every tactic registered using `register_hint <prio> tac`,
and reports any that succeed.
-/
syntax (name := hintStx) "hint" : tactic
@[inherit_doc hintStx]
elab_rules : tactic
| `(tactic| hint%$tk) => hint tk
end Mathlib.Tactic.Hint |
.lake/packages/mathlib/Mathlib/Tactic/Qify.lean | import Mathlib.Algebra.Order.Ring.Cast
import Mathlib.Data.Int.Cast.Lemmas
import Mathlib.Data.NNRat.Defs
import Mathlib.Tactic.Basic
import Mathlib.Tactic.Zify
/-!
# `qify` tactic
The `qify` tactic is used to shift propositions from `ℕ` or `ℤ` to `ℚ`.
This is often useful since `ℚ` has well-behaved division.
```
example (a b c x y z : ℕ) (h : ¬ x*y*z < 0) : c < a + 3*b := by
qify
qify at h
/-
h : ¬↑x * ↑y * ↑z < 0
⊢ ↑c < ↑a + 3 * ↑b
-/
sorry
```
-/
namespace Mathlib.Tactic.Qify
open Lean
open Lean.Meta
open Lean.Parser.Tactic
open Lean.Elab.Tactic
/--
The `qify` tactic is used to shift propositions from `ℕ` or `ℤ` to `ℚ`.
This is often useful since `ℚ` has well-behaved division.
```
example (a b c x y z : ℕ) (h : ¬ x*y*z < 0) : c < a + 3*b := by
qify
qify at h
/-
h : ¬↑x * ↑y * ↑z < 0
⊢ ↑c < ↑a + 3 * ↑b
-/
sorry
```
`qify` can be given extra lemmas to use in simplification. This is especially useful in the
presence of nat subtraction: passing `≤` arguments will allow `push_cast` to do more work.
```
example (a b c : ℤ) (h : a / b = c) (hab : b ∣ a) (hb : b ≠ 0) : a = c * b := by
qify [hab] at h hb ⊢
exact (div_eq_iff hb).1 h
```
`qify` makes use of the `@[zify_simps]` and `@[qify_simps]` attributes to move propositions,
and the `push_cast` tactic to simplify the `ℚ`-valued expressions. -/
syntax (name := qify) "qify" (simpArgs)? (location)? : tactic
macro_rules
| `(tactic| qify $[[$simpArgs,*]]? $[at $location]?) =>
let args := simpArgs.map (·.getElems) |>.getD #[]
`(tactic|
simp -decide only [zify_simps, qify_simps, push_cast, $args,*]
$[at $location]?)
@[qify_simps] lemma intCast_eq (a b : ℤ) : a = b ↔ (a : ℚ) = (b : ℚ) := by simp only [Int.cast_inj]
@[qify_simps] lemma intCast_le (a b : ℤ) : a ≤ b ↔ (a : ℚ) ≤ (b : ℚ) := Int.cast_le.symm
@[qify_simps] lemma intCast_lt (a b : ℤ) : a < b ↔ (a : ℚ) < (b : ℚ) := Int.cast_lt.symm
@[qify_simps] lemma intCast_ne (a b : ℤ) : a ≠ b ↔ (a : ℚ) ≠ (b : ℚ) := by
simp only [ne_eq, Int.cast_inj]
end Qify
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/TypeStar.lean | import Mathlib.Init
/-!
# Support for `Sort*` and `Type*`.
These elaborate as `Sort u` and `Type u` with a fresh implicit universe variable `u`.
-/
open Lean
/-- The syntax `variable (X Y ... Z : Sort*)` creates a new distinct implicit universe variable
for each variable in the sequence. -/
elab "Sort*" : term => do
let u ← Lean.Meta.mkFreshLevelMVar
Elab.Term.levelMVarToParam (.sort u)
/-- The syntax `variable (X Y ... Z : Type*)` creates a new distinct implicit universe variable
`> 0` for each variable in the sequence. -/
elab "Type*" : term => do
let u ← Lean.Meta.mkFreshLevelMVar
Elab.Term.levelMVarToParam (.sort (.succ u)) |
.lake/packages/mathlib/Mathlib/Tactic/Lemma.lean | import Mathlib.Init
import Lean.Parser.Command
/-!
# Support for `lemma` as a synonym for `theorem`.
-/
open Lean
-- higher priority to override the one in Batteries
/-- `lemma` means the same as `theorem`. It is used to denote "less important" theorems -/
syntax (name := lemma) (priority := default + 1) declModifiers
group("lemma " declId ppIndent(declSig) declVal) : command
/-- Implementation of the `lemma` command, by macro expansion to `theorem`. -/
@[macro «lemma»] def expandLemma : Macro := fun stx =>
-- Not using a macro match, to be more resilient against changes to `lemma`.
-- This implementation ensures that any future changes to `theorem` are reflected in `lemma`
let stx := stx.modifyArg 1 fun stx =>
let stx := stx.modifyArg 0 (mkAtomFrom · "theorem" (canonical := true))
stx.setKind ``Parser.Command.theorem
pure <| stx.setKind ``Parser.Command.declaration |
.lake/packages/mathlib/Mathlib/Tactic/DeriveEncodable.lean | import Lean.Meta.Transform
import Lean.Meta.Inductive
import Lean.Elab.Deriving.Basic
import Lean.Elab.Deriving.Util
import Mathlib.Logic.Encodable.Basic
import Mathlib.Data.Nat.Pairing
/-!
# `Encodable` deriving handler
Adds a deriving handler for the `Encodable` class.
The resulting `Encodable` instance should be considered to be opaque.
The specific encoding used is an implementation detail.
-/
namespace Mathlib.Deriving.Encodable
open Lean Parser.Term Elab Deriving Meta
/-!
### Theory
The idea is that every encodable inductive type can be represented as a tree of natural numbers.
Inspiration for this is s-expressions used in Lisp/Scheme.
```lean
inductive S : Type where
| nat (n : ℕ)
| cons (a b : S)
```
We start by constructing a equivalence `S ≃ ℕ` using the `Nat.pair` function.
Here is an example of how this module constructs an encoding.
Suppose we are given the following type:
```lean
inductive T (α : Type) where
| a (x : α) (y : Bool) (z : T α)
| b
```
The deriving handler constructs the following declarations:
```lean
def encodableT_toS {α} [Encodable α] (x : T α) : S :=
match x with
| T.a a a_1 a_2 =>
S.cons (S.nat 0)
(S.cons (S.nat (Encodable.encode a))
(S.cons (S.nat (Encodable.encode a_1)) (S.cons (encodableT_toS a_2) (S.nat 0))))
| T.b => S.cons (S.nat 1) (S.nat 0)
private def encodableT_fromS {α} [Encodable α] : S → Option (T α) := fun
| S.cons (S.nat 0) (S.cons (S.nat a) (S.cons (S.nat a_1) (S.cons a_2 (S.nat 0)))) =>
match Encodable.decode a, Encodable.decode a_1, encodableT_fromS a_2 with
| some a, some a_1, some a_2 => some <| T.a a a_1 a_2
| _, _, _ => none
| S.cons (S.nat 1) (S.nat 0) => some <| T.b
| _ => none
private theorem encodableT {α} [Encodable α] (x : @T α) :
encodableT_fromS (encodableT_toS x) = some x := by
cases x <;> (unfold encodableT_toS encodableT_fromS; simp only [Encodable.encodek, encodableT])
instance {α} [Encodable α] : Encodable (@T α) :=
Encodable.ofLeftInjection encodableT_toS encodableT_fromS encodableT
```
The idea is that each constructor gets encoded as a linked list made of `S.cons` constructors
that is tagged with the constructor index.
-/
private inductive S : Type where
| nat (n : ℕ)
| cons (a b : S)
private def S.encode : S → ℕ
| nat n => Nat.pair 0 n
| cons a b => Nat.pair (S.encode a + 1) (S.encode b)
private lemma nat_unpair_lt_2 {n : ℕ} (h : (Nat.unpair n).1 ≠ 0) : (Nat.unpair n).2 < n := by
obtain ⟨⟨a, b⟩, rfl⟩ := Nat.pairEquiv.surjective n
simp only [Nat.pairEquiv_apply, Function.uncurry_apply_pair, Nat.unpair_pair] at *
unfold Nat.pair
have := Nat.le_mul_self a
have := Nat.le_mul_self b
split <;> omega
private def S.decode (n : ℕ) : S :=
let p := Nat.unpair n
if h : p.1 = 0 then
S.nat p.2
else
have : p.1 ≤ n := Nat.unpair_left_le n
have := Nat.unpair_lt (by cutsat : 1 ≤ n)
have := nat_unpair_lt_2 h
S.cons (S.decode (p.1 - 1)) (S.decode p.2)
private def S_equiv : S ≃ ℕ where
toFun := S.encode
invFun := S.decode
left_inv s := by
induction s with
| nat n =>
unfold S.encode S.decode
simp
| cons a b iha ihb =>
unfold S.encode S.decode
simp [iha, ihb]
right_inv n := by -- The fact it's a right inverse isn't needed for the deriving handler.
induction n using Nat.strongRecOn with | _ n ih =>
unfold S.decode
dsimp only
split
next h =>
unfold S.encode
rw [← h, Nat.pair_unpair]
next h =>
unfold S.encode
rw [ih, ih, Nat.sub_add_cancel, Nat.pair_unpair]
· rwa [Nat.one_le_iff_ne_zero]
· exact nat_unpair_lt_2 h
· obtain _ | n' := n
· exact False.elim (h rfl)
· have := Nat.unpair_lt (by omega : 1 ≤ n' + 1)
omega
instance : Encodable S := Encodable.ofEquiv ℕ S_equiv
/-!
### Implementation
-/
/-!
Constructing the `toS` encoding functions.
-/
private def mkToSMatch (ctx : Deriving.Context) (header : Header) (indVal : InductiveVal)
(toSNames : Array Name) : TermElabM Term := do
let discrs ← mkDiscrs header indVal
let alts ← mkAlts
`(match $[$discrs],* with $alts:matchAlt*)
where
mkAlts : TermElabM (Array (TSyntax ``matchAlt)) := do
let mut alts := #[]
for ctorName in indVal.ctors do
let ctorInfo ← getConstInfoCtor ctorName
alts := alts.push <| ← forallTelescopeReducing ctorInfo.type fun xs _ => do
let mut patterns := #[]
let mut ctorArgs := #[]
let mut rhsArgs : Array Term := #[]
for _ in [:indVal.numIndices] do
patterns := patterns.push (← `(_))
for _ in [:ctorInfo.numParams] do
ctorArgs := ctorArgs.push (← `(_))
for i in [:ctorInfo.numFields] do
let a := mkIdent (← mkFreshUserName `a)
ctorArgs := ctorArgs.push a
let x := xs[ctorInfo.numParams + i]!
let xTy ← inferType x
let recName? := ctx.typeInfos.findIdx? (xTy.isAppOf ·.name) |>.map (toSNames[·]!)
rhsArgs := rhsArgs.push <| ←
if let some recName := recName? then
`($(mkIdent recName) $a)
else
``(S.nat (Encodable.encode $a))
patterns := patterns.push (← `(@$(mkIdent ctorName):ident $ctorArgs:term*))
let rhs' : Term ← rhsArgs.foldrM (init := ← ``(S.nat 0)) fun arg acc => ``(S.cons $arg $acc)
let rhs : Term ← ``(S.cons (S.nat $(quote ctorInfo.cidx)) $rhs')
`(matchAltExpr| | $[$patterns:term],* => $rhs)
return alts
/-- Constructs a function from the inductive type to `S`. -/
private def mkToSFuns (ctx : Deriving.Context) (toSFunNames : Array Name) :
TermElabM (TSyntax `command) := do
let mut res : Array (TSyntax `command) := #[]
for i in [:toSFunNames.size] do
let toNatFnName := toSFunNames[i]!
let indVal := ctx.typeInfos[i]!
let header ← mkHeader ``Encodable 1 indVal
let body ← mkToSMatch ctx header indVal toSFunNames
res := res.push <| ← `(
private def $(mkIdent toNatFnName):ident $header.binders:bracketedBinder* :
$(mkCIdent ``S) := $body:term
)
`(command| mutual $[$res:command]* end)
/-!
Constructing the `fromS` functions.
-/
private def mkFromSMatch (ctx : Deriving.Context) (indVal : InductiveVal)
(fromSNames : Array Name) : TermElabM Term := do
let alts ← mkAlts
`(fun $alts:matchAlt*)
where
mkAlts : TermElabM (Array (TSyntax ``matchAlt)) := do
let mut alts := #[]
for ctorName in indVal.ctors do
let ctorInfo ← getConstInfoCtor ctorName
alts := alts.push <| ← forallTelescopeReducing ctorInfo.type fun xs _ => do
let mut patternArgs : Array Term := #[]
let mut discrs : Array (TSyntax ``Lean.Parser.Term.matchDiscr) := #[]
let mut ctorArgs : Array Term := #[]
let mut patternArgs2 : Array Term := #[]
let mut patternArgs3 : Array Term := #[]
for _ in [:indVal.numParams] do
ctorArgs := ctorArgs.push (← `(_))
for i in [:ctorInfo.numFields] do
let a := mkIdent (← mkFreshUserName `a)
let x := xs[ctorInfo.numParams + i]!
let xTy ← inferType x
let recName? := ctx.typeInfos.findIdx? (xTy.isAppOf ·.name) |>.map (fromSNames[·]!)
if let some recName := recName? then
patternArgs := patternArgs.push a
discrs := discrs.push <| ← `(matchDiscr| $(mkIdent recName) $a)
else
patternArgs := patternArgs.push <| ← ``(S.nat $a)
discrs := discrs.push <| ← `(matchDiscr| $(mkCIdent ``Encodable.decode) $a)
ctorArgs := ctorArgs.push a
patternArgs2 := patternArgs2.push <| ← ``(some $a)
patternArgs3 := patternArgs3.push <| ← `(_)
let pattern ← patternArgs.foldrM (init := ← ``(S.nat 0)) fun arg acc => ``(S.cons $arg $acc)
let pattern ← ``(S.cons (S.nat $(quote ctorInfo.cidx)) $pattern)
-- Note: this is where we could try to handle indexed types.
-- The idea would be to use DecidableEq to test the computed index against the expected
-- index and then rewrite.
let res ← ``(some <| @$(mkIdent ctorName):ident $ctorArgs:term*)
if discrs.isEmpty then
`(matchAltExpr| | $pattern:term => $res)
else
let rhs : Term ← `(
match $[$discrs],* with
| $[$patternArgs2],* => $res
| $[$patternArgs3],* => none
)
`(matchAltExpr| | $pattern:term => $rhs)
alts := alts.push <| ← `(matchAltExpr| | _ => none)
return alts
/-- Constructs a function from `S` to the inductive type. -/
private def mkFromSFuns (ctx : Deriving.Context) (fromSFunNames : Array Name) :
TermElabM (TSyntax `command) := do
let mut res : Array (TSyntax `command) := #[]
for i in [:fromSFunNames.size] do
let fromNatFnName := fromSFunNames[i]!
let indVal := ctx.typeInfos[i]!
let header ← mkHeader ``Encodable 1 indVal
let body ← mkFromSMatch ctx indVal fromSFunNames
-- Last binder is for the target
let binders := header.binders[0:header.binders.size - 1]
res := res.push <| ← `(
private def $(mkIdent fromNatFnName):ident $binders:bracketedBinder* :
$(mkCIdent ``S) → Option $header.targetType := $body:term
)
`(command| mutual $[$res:command]* end)
/-!
Constructing the proofs that the `fromS` functions are left inverses of the `toS` functions.
-/
/--
Constructs a proof that the functions created by `mkFromSFuns` are left inverses
of the ones created by `mkToSFuns`.
-/
private def mkInjThms (ctx : Deriving.Context) (toSFunNames fromSFunNames : Array Name) :
TermElabM (TSyntax `command) := do
let mut res : Array (TSyntax `command) := #[]
for i in [:toSFunNames.size] do
let toSFunName := toSFunNames[i]!
let fromSFunName := fromSFunNames[i]!
let injThmName := ctx.auxFunNames[i]!
let indVal := ctx.typeInfos[i]!
let header ← mkHeader ``Encodable 1 indVal
let enc := mkIdent toSFunName
let dec := mkIdent fromSFunName
let t := mkIdent header.targetNames[0]!
let lemmas : TSyntaxArray ``Parser.Tactic.simpLemma ← ctx.auxFunNames.mapM fun i =>
`(Parser.Tactic.simpLemma| $(mkIdent i):term)
let tactic : Term ← `(by
cases $t:ident
<;> (unfold $(mkIdent toSFunName):ident $(mkIdent fromSFunName):ident;
simp only [Encodable.encodek, $lemmas,*]; try rfl)
)
res := res.push <| ← `(
private theorem $(mkIdent injThmName):ident $header.binders:bracketedBinder* :
$dec ($enc $t) = some $t := $tactic
)
`(command| mutual $[$res:command]* end)
/-!
Assembling the `Encodable` instances.
-/
open TSyntax.Compat in
/-- Assuming all of the auxiliary definitions exist, create all the `instance` commands
for the `ToExpr` instances for the (mutual) inductive type(s). -/
private def mkEncodableInstanceCmds (ctx : Deriving.Context) (typeNames : Array Name)
(toSFunNames fromSFunNames : Array Name) : TermElabM (Array Command) := do
let mut instances := #[]
for i in [:ctx.typeInfos.size] do
let indVal := ctx.typeInfos[i]!
if typeNames.contains indVal.name then
let auxFunName := ctx.auxFunNames[i]!
let argNames ← mkInductArgNames indVal
let binders ← mkImplicitBinders argNames
let binders := binders ++ (← mkInstImplicitBinders ``Encodable indVal argNames)
let indType ← mkInductiveApp indVal argNames
let type ← `($(mkCIdent ``Encodable) $indType)
let encode := mkIdent toSFunNames[i]!
let decode := mkIdent fromSFunNames[i]!
let kencode := mkIdent auxFunName
let instCmd ← `(
instance $binders:implicitBinder* : $type :=
$(mkCIdent ``Encodable.ofLeftInjection) $encode $decode $kencode
)
instances := instances.push instCmd
return instances
private def mkEncodableCmds (indVal : InductiveVal) (declNames : Array Name) :
TermElabM (Array Syntax) := do
let ctx ← mkContext ``Encodable "encodable" indVal.name
let toSFunNames : Array Name ← ctx.auxFunNames.mapM fun name => do
let .str n' s := name.eraseMacroScopes | unreachable!
mkFreshUserName <| .str n' (s ++ "_toS")
let fromSFunNames : Array Name ← ctx.auxFunNames.mapM fun name => do
let .str n' s := name.eraseMacroScopes | unreachable!
mkFreshUserName <| .str n' (s ++ "_fromS")
let cmds :=
#[← mkToSFuns ctx toSFunNames,
← mkFromSFuns ctx fromSFunNames,
← mkInjThms ctx toSFunNames fromSFunNames]
++ (← mkEncodableInstanceCmds ctx declNames toSFunNames fromSFunNames)
trace[Mathlib.Deriving.encodable] "\n{cmds}"
return cmds
open Command
/--
The deriving handler for the `Encodable` class.
Handles non-nested non-reflexive inductive types.
They can be mutual too — in that case, there is an optimization to re-use all the generated
functions and proofs.
-/
def mkEncodableInstance (declNames : Array Name) : CommandElabM Bool := do
let mut seen : NameSet := {}
let mut toVisit : Array InductiveVal := #[]
for declName in declNames do
if seen.contains declName then continue
let indVal ← getConstInfoInduct declName
if indVal.isNested || indVal.isReflexive || indVal.numIndices != 0 then
return false -- not supported yet
seen := seen.append (NameSet.ofList indVal.all)
toVisit := toVisit.push indVal
for indVal in toVisit do
let cmds ← liftTermElabM <| mkEncodableCmds indVal (declNames.filter indVal.all.contains)
withEnableInfoTree false do
elabCommand <| mkNullNode cmds
return true
initialize
registerDerivingHandler ``Encodable mkEncodableInstance
registerTraceClass `Mathlib.Deriving.Encodable
end Mathlib.Deriving.Encodable |
.lake/packages/mathlib/Mathlib/Tactic/Subsingleton.lean | import Mathlib.Logic.Basic
/-!
# `subsingleton` tactic
The `subsingleton` tactic closes `Eq` or `HEq` goals using an argument
that the types involved are subsingletons.
To first approximation, it does `apply Subsingleton.elim` but it also will try `proof_irrel_heq`,
and it is careful not to accidentally specialize `Sort _` to `Prop.
-/
open Lean Meta
/-- Returns the expression `Subsingleton ty`. -/
def Lean.Meta.mkSubsingleton (ty : Expr) : MetaM Expr := do
let u ← getLevel ty
return Expr.app (.const ``Subsingleton [u]) ty
/-- Synthesizes a `Subsingleton ty` instance with the additional local instances made available. -/
def Lean.Meta.synthSubsingletonInst (ty : Expr)
(insts : Array (Term × AbstractMVarsResult) := #[]) :
MetaM Expr := do
-- Synthesize a subsingleton instance. The new metacontext depth ensures that universe
-- level metavariables are not specialized.
withNewMCtxDepth do
-- We need to process the local instances *under* `withNewMCtxDepth` since they might
-- have universe parameters, which we need to let `synthInstance` assign to.
let (insts', uss) ← Array.unzip <$> insts.mapM fun inst => do
let us ← inst.2.paramNames.mapM fun _ => mkFreshLevelMVar
pure <| (inst.2.expr.instantiateLevelParamsArray inst.2.paramNames us, us)
withLocalDeclsD (insts'.map fun e => (`inst, fun _ => inferType e)) fun fvars => do
withNewLocalInstances fvars 0 do
let res ← instantiateMVars <| ← synthInstance <| ← mkSubsingleton ty
let res' := res.abstract fvars
for i in [0 : fvars.size] do
if res'.hasLooseBVar (fvars.size - i - 1) then
uss[i]!.forM fun u => do
let u ← instantiateLevelMVars u
if u.isMVar then
-- This shouldn't happen, `synthInstance` should solve for all level metavariables
throwErrorAt insts[i]!.1 "\
Instance provided to 'subsingleton' has unassigned universe level metavariable\
{indentD insts'[i]!}"
else
-- Unused local instance.
-- Not logging a warning since this might be `... <;> subsingleton [...]`
pure ()
instantiateMVars <| res'.instantiateRev insts'
/--
Closes the goal `g` whose target is an `Eq` or `HEq` by appealing to the fact that the types
are subsingletons.
Fails if it cannot find a way to do this.
Has support for showing `BEq` instances are equal if they have `LawfulBEq` instances.
-/
def Lean.MVarId.subsingleton (g : MVarId) (insts : Array (Term × AbstractMVarsResult) := #[]) :
MetaM Unit := commitIfNoEx do
let g ← g.heqOfEq
g.withContext do
let tgt ← whnfR (← g.getType)
if let some (ty, x, y) := tgt.eq? then
-- Proof irrelevance. This is not necessary since `rfl` suffices,
-- but propositions are subsingletons so we may as well.
if ← Meta.isProp ty then
g.assign <| mkApp3 (.const ``proof_irrel []) ty x y
return
-- Try `Subsingleton.elim`
let u ← getLevel ty
try
let inst ← synthSubsingletonInst ty insts
g.assign <| mkApp4 (.const ``Subsingleton.elim [u]) ty inst x y
return
catch _ => pure ()
-- Try `lawful_beq_subsingleton`
let ty' ← whnfR ty
if ty'.isAppOfArity ``BEq 1 then
let α := ty'.appArg!
try
let some u' := u.dec | failure
let xInst ← withNewMCtxDepth <| Meta.synthInstance <| mkApp2 (.const ``LawfulBEq [u']) α x
let yInst ← withNewMCtxDepth <| Meta.synthInstance <| mkApp2 (.const ``LawfulBEq [u']) α y
g.assign <| mkApp5 (.const ``lawful_beq_subsingleton [u']) α x y xInst yInst
return
catch _ => pure ()
throwError "\
tactic 'subsingleton' could not prove equality since it could not synthesize\
{indentD (← mkSubsingleton ty)}"
else if let some (xTy, x, yTy, y) := tgt.heq? then
-- The HEq version of proof irrelevance.
if ← (Meta.isProp xTy <&&> Meta.isProp yTy) then
g.assign <| mkApp4 (.const ``proof_irrel_heq []) xTy yTy x y
return
throwError "tactic 'subsingleton' could not prove heterogeneous equality"
throwError "tactic 'subsingleton' failed, goal is neither an equality nor a \
heterogeneous equality"
namespace Mathlib.Tactic
/--
The `subsingleton` tactic tries to prove a goal of the form `x = y` or `x ≍ y`
using the fact that the types involved are *subsingletons*
(a type with exactly zero or one terms).
To a first approximation, it does `apply Subsingleton.elim`.
As a nicety, `subsingleton` first runs the `intros` tactic.
- If the goal is an equality, it either closes the goal or fails.
- `subsingleton [inst1, inst2, ...]` can be used to add additional `Subsingleton` instances
to the local context. This can be more flexible than
`have := inst1; have := inst2; ...; subsingleton` since the tactic does not require that
all placeholders be solved for.
Techniques the `subsingleton` tactic can apply:
- proof irrelevance
- heterogeneous proof irrelevance (via `proof_irrel_heq`)
- using `Subsingleton` (via `Subsingleton.elim`)
- proving `BEq` instances are equal if they are both lawful (via `lawful_beq_subsingleton`)
### Properties
The tactic is careful not to accidentally specialize `Sort _` to `Prop`,
avoiding the following surprising behavior of `apply Subsingleton.elim`:
```lean
example (α : Sort _) (x y : α) : x = y := by apply Subsingleton.elim
```
The reason this `example` goes through is that
it applies the `∀ (p : Prop), Subsingleton p` instance,
specializing the universe level metavariable in `Sort _` to `0`.
-/
syntax (name := subsingletonStx) "subsingleton" (ppSpace "[" term,* "]")? : tactic
open Elab Tactic
/--
Elaborates the terms like how `Lean.Elab.Tactic.addSimpTheorem` does,
abstracting their metavariables.
-/
def elabSubsingletonInsts
(instTerms? : Option (Array Term)) : TermElabM (Array (Term × AbstractMVarsResult)) := do
if let some instTerms := instTerms? then
go instTerms.toList #[]
else
return #[]
where
/-- Main loop for `addSubsingletonInsts`. -/
go (instTerms : List Term) (insts : Array (Term × AbstractMVarsResult)) :
TermElabM (Array (Term × AbstractMVarsResult)) := do
match instTerms with
| [] => return insts
| instTerm :: instTerms =>
let inst ← withNewMCtxDepth <| Term.withoutModifyingElabMetaStateWithInfo do
withRef instTerm <| Term.withoutErrToSorry do
let e ← Term.elabTerm instTerm none
Term.synthesizeSyntheticMVars (postpone := .no) (ignoreStuckTC := true)
let e ← instantiateMVars e
unless (← isClass? (← inferType e)).isSome do
throwError "Not an instance. Term has type{indentD <| ← inferType e}"
if e.hasMVar then
let r ← abstractMVars e
-- Change all instance arguments corresponding to the mvars to be inst implicit.
let e' ← forallBoundedTelescope (← inferType r.expr) r.numMVars fun args _ => do
let newBIs ← args.filterMapM fun arg => do
if (← isClass? (← inferType arg)).isSome then
return some (arg.fvarId!, .instImplicit)
else
return none
withNewBinderInfos newBIs do
mkLambdaFVars args (r.expr.beta args)
pure { r with expr := e' }
else
pure { paramNames := #[], mvars := #[], expr := e }
go instTerms (insts.push (instTerm, inst))
elab_rules : tactic
| `(tactic| subsingleton $[[$[$instTerms?],*]]?) => withMainContext do
let recover := (← read).recover
let insts ← elabSubsingletonInsts instTerms?
Elab.Tactic.liftMetaTactic1 fun g => do
let (fvars, g) ← g.intros
-- note: `insts` are still valid after `intros`
try
g.subsingleton (insts := insts)
return none
catch e =>
-- Try `refl` when all else fails, to give a hint to the user
if recover then
try
g.refl <|> g.hrefl
let tac ← if !fvars.isEmpty then `(tactic| (intros; rfl)) else `(tactic| rfl)
Meta.Tactic.TryThis.addSuggestion (← getRef) tac (origSpan? := ← getRef)
return none
catch _ => pure ()
throw e
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Lift.lean | import Mathlib.Tactic.Basic
import Batteries.Lean.Expr
import Batteries.Lean.Meta.UnusedNames
/-!
# lift tactic
This file defines the `lift` tactic, allowing the user to lift elements from one type to another
under a specified condition.
## Tags
lift, tactic
-/
/-- A class specifying that you can lift elements from `α` to `β` assuming `cond` is true.
Used by the tactic `lift`. -/
class CanLift (α β : Sort*) (coe : outParam <| β → α) (cond : outParam <| α → Prop) : Prop where
/-- An element of `α` that satisfies `cond` belongs to the range of `coe`. -/
prf : ∀ x : α, cond x → ∃ y : β, coe y = x
instance : CanLift Int Nat (fun n : Nat ↦ n) (0 ≤ ·) :=
⟨fun n hn ↦ ⟨n.natAbs, Int.natAbs_of_nonneg hn⟩⟩
/-- Enable automatic handling of pi types in `CanLift`. -/
instance Pi.canLift (ι : Sort*) (α β : ι → Sort*) (coe : ∀ i, β i → α i) (P : ∀ i, α i → Prop)
[∀ i, CanLift (α i) (β i) (coe i) (P i)] :
CanLift (∀ i, α i) (∀ i, β i) (fun f i ↦ coe i (f i)) fun f ↦ ∀ i, P i (f i) where
prf f hf := ⟨fun i => Classical.choose (CanLift.prf (f i) (hf i)),
funext fun i => Classical.choose_spec (CanLift.prf (f i) (hf i))⟩
/-- Enable automatic handling of product types in `CanLift`. -/
instance Prod.instCanLift {α β γ δ : Type*} {coeβα condβα coeδγ condδγ} [CanLift α β coeβα condβα]
[CanLift γ δ coeδγ condδγ] :
CanLift (α × γ) (β × δ) (Prod.map coeβα coeδγ) (fun x ↦ condβα x.1 ∧ condδγ x.2) where
prf := by
rintro ⟨x, y⟩ ⟨hx, hy⟩
rcases CanLift.prf (β := β) x hx with ⟨x, rfl⟩
rcases CanLift.prf (β := δ) y hy with ⟨y, rfl⟩
exact ⟨(x, y), by simp⟩
theorem Subtype.exists_pi_extension {ι : Sort*} {α : ι → Sort*} [ne : ∀ i, Nonempty (α i)]
{p : ι → Prop} (f : ∀ i : Subtype p, α i) :
∃ g : ∀ i : ι, α i, (fun i : Subtype p => g i) = f := by
haveI : DecidablePred p := fun i ↦ Classical.propDecidable (p i)
exact ⟨fun i => if hi : p i then f ⟨i, hi⟩ else Classical.choice (ne i),
funext fun i ↦ dif_pos i.2⟩
instance PiSubtype.canLift (ι : Sort*) (α : ι → Sort*) [∀ i, Nonempty (α i)] (p : ι → Prop) :
CanLift (∀ i : Subtype p, α i) (∀ i, α i) (fun f i => f i) fun _ => True where
prf f _ := Subtype.exists_pi_extension f
-- TODO: test if we need this instance in Lean 4
instance PiSubtype.canLift' (ι : Sort*) (α : Sort*) [Nonempty α] (p : ι → Prop) :
CanLift (Subtype p → α) (ι → α) (fun f i => f i) fun _ => True :=
PiSubtype.canLift ι (fun _ => α) p
instance Subtype.canLift {α : Sort*} (p : α → Prop) :
CanLift α { x // p x } Subtype.val p where prf a ha :=
⟨⟨a, ha⟩, rfl⟩
namespace Mathlib.Tactic
open Lean Parser Elab Tactic Meta
/-- Lift an expression to another type.
* Usage: `'lift' expr 'to' expr ('using' expr)? ('with' id (id id?)?)?`.
* If `n : ℤ` and `hn : n ≥ 0` then the tactic `lift n to ℕ using hn` creates a new
constant of type `ℕ`, also named `n` and replaces all occurrences of the old variable `(n : ℤ)`
with `↑n` (where `n` in the new variable). It will clear `n` from the context and
try to clear `hn` from the context.
+ So for example the tactic `lift n to ℕ using hn` transforms the goal
`n : ℤ, hn : n ≥ 0, h : P n ⊢ n = 3` to `n : ℕ, h : P ↑n ⊢ ↑n = 3`
(here `P` is some term of type `ℤ → Prop`).
* The argument `using hn` is optional, the tactic `lift n to ℕ` does the same, but also creates a
new subgoal that `n ≥ 0` (where `n` is the old variable).
This subgoal will be placed at the top of the goal list.
+ So for example the tactic `lift n to ℕ` transforms the goal
`n : ℤ, h : P n ⊢ n = 3` to two goals
`n : ℤ, h : P n ⊢ n ≥ 0` and `n : ℕ, h : P ↑n ⊢ ↑n = 3`.
* You can also use `lift n to ℕ using e` where `e` is any expression of type `n ≥ 0`.
* Use `lift n to ℕ with k` to specify the name of the new variable.
* Use `lift n to ℕ with k hk` to also specify the name of the equality `↑k = n`. In this case, `n`
will remain in the context. You can use `rfl` for the name of `hk` to substitute `n` away
(i.e. the default behavior).
* You can also use `lift e to ℕ with k hk` where `e` is any expression of type `ℤ`.
In this case, the `hk` will always stay in the context, but it will be used to rewrite `e` in
all hypotheses and the target.
+ So for example the tactic `lift n + 3 to ℕ using hn with k hk` transforms the goal
`n : ℤ, hn : n + 3 ≥ 0, h : P (n + 3) ⊢ n + 3 = 2 * n` to the goal
`n : ℤ, k : ℕ, hk : ↑k = n + 3, h : P ↑k ⊢ ↑k = 2 * n`.
* The tactic `lift n to ℕ using h` will remove `h` from the context. If you want to keep it,
specify it again as the third argument to `with`, like this: `lift n to ℕ using h with n rfl h`.
* More generally, this can lift an expression from `α` to `β` assuming that there is an instance
of `CanLift α β`. In this case the proof obligation is specified by `CanLift.prf`.
* Given an instance `CanLift β γ`, it can also lift `α → β` to `α → γ`; more generally, given
`β : Π a : α, Type*`, `γ : Π a : α, Type*`, and `[Π a : α, CanLift (β a) (γ a)]`, it
automatically generates an instance `CanLift (Π a, β a) (Π a, γ a)`.
`lift` is in some sense dual to the `zify` tactic. `lift (z : ℤ) to ℕ` will change the type of an
integer `z` (in the supertype) to `ℕ` (the subtype), given a proof that `z ≥ 0`;
propositions concerning `z` will still be over `ℤ`. `zify` changes propositions about `ℕ` (the
subtype) to propositions about `ℤ` (the supertype), without changing the type of any variable.
-/
syntax (name := lift) "lift " term " to " term (" using " term)?
(" with " ident (ppSpace colGt ident)? (ppSpace colGt ident)?)? : tactic
/-- Generate instance for the `lift` tactic. -/
def Lift.getInst (old_tp new_tp : Expr) : MetaM (Expr × Expr × Expr) := do
let coe ← mkFreshExprMVar (some <| .forallE `a new_tp old_tp .default)
let p ← mkFreshExprMVar (some <| .forallE `a old_tp (.sort .zero) .default)
let inst_type ← mkAppM ``CanLift #[old_tp, new_tp, coe, p]
let inst ← synthInstance inst_type -- TODO: catch error
return (← instantiateMVars p, ← instantiateMVars coe, ← instantiateMVars inst)
/-- Main function for the `lift` tactic. -/
def Lift.main (e t : TSyntax `term) (hUsing : Option (TSyntax `term))
(newVarName newEqName : Option (TSyntax `ident)) (keepUsing : Bool) : TacticM Unit :=
withMainContext do
-- Are we using a new variable for the lifted var?
let isNewVar := !newVarName.isNone
-- Name of the new hypothesis containing the equality of the lifted variable with the old one
-- rfl if none is given
let newEqName := (newEqName.map Syntax.getId).getD `rfl
-- Was a new hypothesis given?
let isNewEq := newEqName != `rfl
let e ← elabTerm e none
let goal ← getMainGoal
if !(← inferType (← instantiateMVars (← goal.getType))).isProp then throwError
"lift tactic failed. Tactic is only applicable when the target is a proposition."
if newVarName == none ∧ !e.isFVar then throwError
"lift tactic failed. When lifting an expression, a new variable name must be given"
let (p, coe, inst) ← Lift.getInst (← inferType e) (← Term.elabType t)
let prf ← match hUsing with
| some h => elabTermEnsuringType h (p.betaRev #[e])
| none => mkFreshExprMVar (some (p.betaRev #[e]))
let newVarName ← match newVarName with
| some v => pure v.getId
| none => e.fvarId!.getUserName
let prfEx ← mkAppOptM ``CanLift.prf #[none, none, coe, p, inst, e, prf]
let prfEx ← instantiateMVars prfEx
let prfSyn ← prfEx.toSyntax
-- if we have a new variable, but no hypothesis name was provided, we temporarily use a dummy
-- hypothesis name
let newEqName ← if isNewVar && !isNewEq then withMainContext <| getUnusedUserName `tmpVar
else pure newEqName
let newEqIdent := mkIdent newEqName
-- Run rcases on the proof of the lift condition
replaceMainGoal (← Lean.Elab.Tactic.RCases.rcases #[(none, prfSyn)]
(.tuple Syntax.missing <| [newVarName, newEqName].map (.one Syntax.missing)) goal)
-- if we use a new variable, then substitute it everywhere
if isNewVar then
for decl in ← getLCtx do
if decl.userName != newEqName then
let declIdent := mkIdent decl.userName
evalTactic (← `(tactic| simp -failIfUnchanged only [← $newEqIdent] at $declIdent:ident))
evalTactic (← `(tactic| simp -failIfUnchanged only [← $newEqIdent]))
-- Clear the temporary hypothesis used for the new variable name if applicable
if isNewVar && !isNewEq then
evalTactic (← `(tactic| clear $newEqIdent))
-- Clear the "using" hypothesis if it's a variable in the context
if prf.isFVar && !keepUsing then
let some hUsingStx := hUsing | throwError "lift tactic failed: unreachable code was reached"
evalTactic (← `(tactic| try clear $hUsingStx))
if hUsing.isNone then withMainContext <| setGoals (prf.mvarId! :: (← getGoals))
elab_rules : tactic
| `(tactic| lift $e to $t $[using $h]? $[with $newVarName $[$newEqName]? $[$newPrfName]?]?) =>
withMainContext <|
let keepUsing := match h, newPrfName.join with
| some h, some newPrfName => h.raw == newPrfName
| _, _ => false
Lift.main e t h newVarName newEqName.join keepUsing
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/DeriveCountable.lean | import Lean.Meta.Transform
import Lean.Meta.Inductive
import Lean.Elab.Deriving.Basic
import Lean.Elab.Deriving.Util
import Mathlib.Data.Countable.Defs
import Mathlib.Data.Nat.Pairing
/-!
# `Countable` deriving handler
Adds a deriving handler for the `Countable` class.
-/
namespace Mathlib.Deriving.Countable
open Lean Parser.Term Elab Deriving Meta
/-!
### Theory
We use the `Nat.pair` function to encode an inductive type in the natural numbers,
following a pattern similar to the tagged s-expressions used in Scheme/Lisp.
We develop a little theory to make constructing the injectivity functions very straightforward.
This is easiest to explain by example. Given a type
```lean
inductive T (α : Type)
| a (n : α)
| b (n m : α) (t : T α)
```
the deriving handler constructs the following three declarations:
```lean
noncomputable def T.toNat (α : Type) [Countable α] : T α → ℕ
| a n => Nat.pair 0 (Nat.pair (encode n) 0)
| b n m t => Nat.pair 1 (Nat.pair (encode n) (Nat.pair (encode m) (Nat.pair t.toNat 0)))
theorem T.toNat_injective (α : Type) [Countable α] : Function.Injective (T.toNat α) := fun
| a .., a .. => by
refine cons_eq_imp_init ?_
refine pair_encode_step ?_; rintro ⟨⟩
intro; rfl
| a .., b .. => by refine cons_eq_imp ?_; rintro ⟨⟩
| b .., a .. => by refine cons_eq_imp ?_; rintro ⟨⟩
| b .., b .. => by
refine cons_eq_imp_init ?_
refine pair_encode_step ?_; rintro ⟨⟩
refine pair_encode_step ?_; rintro ⟨⟩
refine cons_eq_imp ?_; intro h; cases T.toNat_injective α h
intro; rfl
instance (α : Type) [Countable α] : Countable (T α) := ⟨_, T.toNat_injective α⟩
```
-/
private noncomputable def encode {α : Sort*} [Countable α] : α → ℕ :=
(Countable.exists_injective_nat α).choose
private noncomputable def encode_injective {α : Sort*} [Countable α] :
Function.Injective (encode : α → ℕ) :=
(Countable.exists_injective_nat α).choose_spec
/--
Initialize the injectivity argument. Pops the constructor tag.
-/
private theorem cons_eq_imp_init {p : Prop} {a b b' : ℕ}
(h : b = b' → p) : Nat.pair a b = Nat.pair a b' → p := by
simpa [Nat.pair_eq_pair, and_imp] using h
/--
Generic step of the injectivity argument, pops the head of the pairs.
Used in the recursive steps where we need to supply an additional injectivity argument.
-/
private theorem cons_eq_imp {p : Prop} {a b a' b' : ℕ}
(h : a = a' → b = b' → p) : Nat.pair a b = Nat.pair a' b' → p := by
rwa [Nat.pair_eq_pair, and_imp]
/--
Specialized step of the injectivity argument, pops the head of the pairs and decodes.
-/
private theorem pair_encode_step {p : Prop} {α : Sort*} [Countable α]
{a b : α} {m n : ℕ}
(h : a = b → m = n → p) : Nat.pair (encode a) m = Nat.pair (encode b) n → p :=
cons_eq_imp fun ha => h (encode_injective ha)
/-!
### Implementation
-/
/-!
Constructing the `toNat` functions.
-/
private def mkToNatMatch (ctx : Deriving.Context) (header : Header) (indVal : InductiveVal)
(toFunNames : Array Name) : TermElabM Term := do
let discrs ← mkDiscrs header indVal
let alts ← mkAlts
`(match $[$discrs],* with $alts:matchAlt*)
where
mkAlts : TermElabM (Array (TSyntax ``matchAlt)) := do
let mut alts := #[]
for ctorName in indVal.ctors do
let ctorInfo ← getConstInfoCtor ctorName
alts := alts.push <| ← forallTelescopeReducing ctorInfo.type fun xs _ => do
let mut patterns := #[]
let mut ctorArgs := #[]
let mut rhsArgs : Array Term := #[]
for _ in [:indVal.numIndices] do
patterns := patterns.push (← `(_))
for _ in [:ctorInfo.numParams] do
ctorArgs := ctorArgs.push (← `(_))
for i in [:ctorInfo.numFields] do
let a := mkIdent (← mkFreshUserName `a)
ctorArgs := ctorArgs.push a
let x := xs[ctorInfo.numParams + i]!
let xTy ← inferType x
let recName? := ctx.typeInfos.findIdx? (xTy.isAppOf ·.name) |>.map (toFunNames[·]!)
rhsArgs := rhsArgs.push <| ←
if let some recName := recName? then
`($(mkIdent recName) $a)
else
``(encode $a)
patterns := patterns.push (← `(@$(mkIdent ctorName):ident $ctorArgs:term*))
let rhs' : Term ← rhsArgs.foldrM (init := ← `(0)) fun arg acc => ``(Nat.pair $arg $acc)
let rhs : Term ← ``(Nat.pair $(quote ctorInfo.cidx) $rhs')
`(matchAltExpr| | $[$patterns:term],* => $rhs)
return alts
/-- Constructs a function from the inductive type to `Nat`. -/
def mkToNatFuns (ctx : Deriving.Context) (toNatFnNames : Array Name) :
TermElabM (TSyntax `command) := do
let mut res : Array (TSyntax `command) := #[]
for i in [:toNatFnNames.size] do
let toNatFnName := toNatFnNames[i]!
let indVal := ctx.typeInfos[i]!
let header ← mkHeader ``Countable 1 indVal
let body ← mkToNatMatch ctx header indVal toNatFnNames
res := res.push <| ← `(
private noncomputable def $(mkIdent toNatFnName):ident $header.binders:bracketedBinder* :
Nat := $body:term
)
`(command| mutual $[$res:command]* end)
/-!
Constructing the injectivity proofs for these `toNat` functions.
-/
private def mkInjThmMatch (ctx : Deriving.Context) (header : Header) (indVal : InductiveVal) :
TermElabM Term := do
let discrs ← mkDiscrs header indVal
let alts ← mkAlts
`(match $[$discrs],* with $alts:matchAlt*)
where
mkAlts : TermElabM (Array (TSyntax ``matchAlt)) := do
let mut alts := #[]
for ctorName₁ in indVal.ctors do
let ctorInfo ← getConstInfoCtor ctorName₁
for ctorName₂ in indVal.ctors do
let mut patterns := #[]
for _ in [:indVal.numIndices] do
patterns := patterns.push (← `(_))
patterns := patterns ++ #[(← `($(mkIdent ctorName₁) ..)), (← `($(mkIdent ctorName₂) ..))]
if ctorName₁ == ctorName₂ then
alts := alts.push <| ← forallTelescopeReducing ctorInfo.type fun xs _ => do
let mut tactics : Array (TSyntax `tactic) := #[]
for i in [:ctorInfo.numFields] do
let x := xs[indVal.numParams + i]!
let xTy ← inferType x
let recName? :=
ctx.typeInfos.findIdx? (xTy.isAppOf ·.name) |>.map (ctx.auxFunNames[·]!)
tactics := tactics.push <| ←
if let some recName := recName? then
`(tactic| (
refine $(mkCIdent ``cons_eq_imp) ?_;
intro h;
cases $(mkIdent recName) _ _ h
))
else
`(tactic| (
refine $(mkCIdent ``pair_encode_step) ?_;
rintro ⟨⟩
))
tactics := tactics.push (← `(tactic| (intro; rfl)))
`(matchAltExpr| | $[$patterns:term],* => cons_eq_imp_init (by $tactics:tactic*))
else if (← compatibleCtors ctorName₁ ctorName₂) then
let rhs ← ``(cons_eq_imp (by rintro ⟨⟩))
alts := alts.push (← `(matchAltExpr| | $[$patterns:term],* => $rhs:term))
return alts
/-- Constructs a proof that the functions created by `mkToNatFuns` are injective. -/
def mkInjThms (ctx : Deriving.Context) (toNatFnNames : Array Name) :
TermElabM (TSyntax `command) := do
let mut res : Array (TSyntax `command) := #[]
for i in [:toNatFnNames.size] do
let toNatFnName := toNatFnNames[i]!
let injThmName := ctx.auxFunNames[i]!
let indVal := ctx.typeInfos[i]!
let header ← mkHeader ``Countable 2 indVal
let body ← mkInjThmMatch ctx header indVal
let f := mkIdent toNatFnName
let t1 := mkIdent header.targetNames[0]!
let t2 := mkIdent header.targetNames[1]!
res := res.push <| ← `(
private theorem $(mkIdent injThmName):ident $header.binders:bracketedBinder* :
$f $t1 = $f $t2 → $t1 = $t2 := $body:term
)
`(command| mutual $[$res:command]* end)
/-!
Assembling the `Countable` instances.
-/
open TSyntax.Compat in
/-- Assuming all of the auxiliary definitions exist, create all the `instance` commands
for the `ToExpr` instances for the (mutual) inductive type(s). -/
private def mkCountableInstanceCmds (ctx : Deriving.Context) (typeNames : Array Name) :
TermElabM (Array Command) := do
let mut instances := #[]
for i in [:ctx.typeInfos.size] do
let indVal := ctx.typeInfos[i]!
if typeNames.contains indVal.name then
let auxFunName := ctx.auxFunNames[i]!
let argNames ← mkInductArgNames indVal
let binders ← mkImplicitBinders argNames
let binders := binders ++ (← mkInstImplicitBinders ``Countable indVal argNames)
let indType ← mkInductiveApp indVal argNames
let type ← `($(mkCIdent ``Countable) $indType)
let mut val := mkIdent auxFunName
let instCmd ← `(instance $binders:implicitBinder* : $type := ⟨_, $val⟩)
instances := instances.push instCmd
return instances
private def mkCountableCmds (indVal : InductiveVal) (declNames : Array Name) :
TermElabM (Array Syntax) := do
let ctx ← mkContext ``Countable "countable" indVal.name
let toNatFunNames : Array Name ← ctx.auxFunNames.mapM fun name => do
let .str n' s := name.eraseMacroScopes | unreachable!
mkFreshUserName <| .str n' (s ++ "ToNat")
let cmds := #[← mkToNatFuns ctx toNatFunNames, ← mkInjThms ctx toNatFunNames]
++ (← mkCountableInstanceCmds ctx declNames)
trace[Mathlib.Deriving.countable] "\n{cmds}"
return cmds
open Command
/--
The deriving handler for the `Countable` class.
Handles non-nested non-reflexive inductive types.
They can be mutual too — in that case, there is an optimization to re-use all the generated
functions and proofs.
-/
def mkCountableInstance (declNames : Array Name) : CommandElabM Bool := do
let mut seen : NameSet := {}
let mut toVisit : Array InductiveVal := #[]
for declName in declNames do
if seen.contains declName then continue
let indVal ← getConstInfoInduct declName
if indVal.isNested || indVal.isReflexive then
return false -- not supported yet
seen := seen.append (NameSet.ofList indVal.all)
toVisit := toVisit.push indVal
for indVal in toVisit do
let cmds ← liftTermElabM <| mkCountableCmds indVal (declNames.filter indVal.all.contains)
withEnableInfoTree false do
elabCommand <| mkNullNode cmds
return true
initialize
registerDerivingHandler ``Countable mkCountableInstance
registerTraceClass `Mathlib.Deriving.countable
end Mathlib.Deriving.Countable |
.lake/packages/mathlib/Mathlib/Tactic/TryThis.lean | import Mathlib.Init
import Lean.Meta.Tactic.TryThis
/-!
# 'Try this' tactic macro
This is a convenient shorthand intended for macro authors to be able to generate "Try this"
recommendations. (It is not the main implementation of 'Try this',
which is implemented in Lean core, see `Lean.Meta.Tactic.TryThis`.)
-/
namespace Mathlib.Tactic
open Lean
/-- Produces the text `Try this: <tac>` with the given tactic, and then executes it. -/
elab tk:"try_this" tac:tactic info:(str)? : tactic => do
Elab.Tactic.evalTactic tac
Meta.Tactic.TryThis.addSuggestion tk
{ suggestion := tac, postInfo? := TSyntax.getString <$> info }
(origSpan? := ← getRef)
/-- Produces the text `Try this: <tac>` with the given conv tactic, and then executes it. -/
elab tk:"try_this" tac:conv info:(str)? : conv => do
Elab.Tactic.evalTactic tac
Meta.Tactic.TryThis.addSuggestion tk
{ suggestion := tac, postInfo? := TSyntax.getString <$> info }
(origSpan? := ← getRef)
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/ReduceModChar.lean | import Mathlib.Data.ZMod.Basic
import Mathlib.RingTheory.Polynomial.Basic
import Mathlib.Tactic.NormNum.DivMod
import Mathlib.Tactic.NormNum.PowMod
import Mathlib.Tactic.ReduceModChar.Ext
import Mathlib.Util.AtLocation
/-!
# `reduce_mod_char` tactic
Define the `reduce_mod_char` tactic, which traverses expressions looking for numerals `n`,
such that the type of `n` is a ring of (positive) characteristic `p`, and reduces these
numerals modulo `p`, to lie between `0` and `p`.
## Implementation
The main entry point is `ReduceModChar.derive`, which uses `simp` to traverse expressions and
calls `matchAndNorm` on each subexpression.
The type of each subexpression is matched syntactically to determine if it is a ring with positive
characteristic in `typeToCharP`. Using syntactic matching should be faster than trying to infer
a `CharP` instance on each subexpression.
The actual reduction happens in `normIntNumeral`. This is written to be compatible with `norm_num`
so it can serve as a drop-in replacement for some `norm_num`-based routines (specifically, the
intended use is as an option for the `ring` tactic).
In addition to the main functionality, we call `normNeg` and `normNegCoeffMul` to replace negation
with multiplication by `p - 1`, and simp lemmas tagged `@[reduce_mod_char]` to clean up the
resulting expression: e.g. `1 * X + 0` becomes `X`.
-/
open Lean Meta Simp
open Lean.Elab
open Tactic
open Qq
namespace Tactic
namespace ReduceModChar
open Mathlib.Meta.NormNum
variable {u : Level}
lemma CharP.isInt_of_mod {e' r : ℤ} {α : Type*} [Ring α] {n n' : ℕ} (inst : CharP α n) {e : α}
(he : IsInt e e') (hn : IsNat n n') (h₂ : IsInt (e' % n') r) : IsInt e r :=
⟨by rw [he.out, CharP.intCast_eq_intCast_mod α n, show n = n' from hn.out, h₂.out, Int.cast_id]⟩
lemma CharP.isNat_pow {α} [Semiring α] : ∀ {f : α → ℕ → α} {a : α} {a' b b' c n n' : ℕ},
CharP α n → f = HPow.hPow → IsNat a a' → IsNat b b' → IsNat n n' →
Nat.mod (Nat.pow a' b') n' = c → IsNat (f a b) c
| _, _, a, _, b, _, _, n, _, rfl, ⟨h⟩, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨by
rw [h, Nat.cast_id, Nat.pow_eq, ← Nat.cast_pow, CharP.natCast_eq_natCast_mod α n]
rfl⟩
attribute [local instance] Mathlib.Meta.monadLiftOptionMetaM in
/-- Evaluates `e` to an integer using `norm_num` and reduces the result modulo `n`. -/
def normBareNumeral {α : Q(Type u)} (n n' : Q(ℕ)) (pn : Q(IsNat «$n» «$n'»))
(e : Q($α)) (_ : Q(Ring $α)) (instCharP : Q(CharP $α $n)) : MetaM (Result e) := do
let ⟨ze, ne, pe⟩ ← Result.toInt _ (← Mathlib.Meta.NormNum.derive e)
let rr ← evalIntMod.go _ _ ze q(IsInt.raw_refl $ne) _ <|
.isNat q(instAddMonoidWithOne) _ q(isNat_natCast _ _ (IsNat.raw_refl $n'))
let ⟨zr, nr, pr⟩ ← rr.toInt _
return .isInt _ nr zr q(CharP.isInt_of_mod $instCharP $pe $pn $pr)
mutual
/-- Given an expression of the form `a ^ b` in a ring of characteristic `n`, reduces `a`
modulo `n` recursively and then calculates `a ^ b` using fast modular exponentiation. -/
partial def normPow {α : Q(Type u)} (n n' : Q(ℕ)) (pn : Q(IsNat «$n» «$n'»)) (e : Q($α))
(_ : Q(Ring $α)) (instCharP : Q(CharP $α $n)) : MetaM (Result e) := do
let .app (.app (f : Q($α → ℕ → $α)) (a : Q($α))) (b : Q(ℕ)) ← whnfR e | failure
let .isNat sα na pa ← normIntNumeral' n n' pn a _ instCharP | failure
let ⟨nb, pb⟩ ← Mathlib.Meta.NormNum.deriveNat b q(instAddMonoidWithOneNat)
guard <|← withNewMCtxDepth <| isDefEq f q(HPow.hPow (α := $α))
haveI' : $e =Q $a ^ $b := ⟨⟩
haveI' : $f =Q HPow.hPow := ⟨⟩
have ⟨c, r⟩ := evalNatPowMod na nb n'
assumeInstancesCommute
return .isNat sα c q(CharP.isNat_pow (f := $f) $instCharP (.refl $f) $pa $pb $pn $r)
/-- If `e` is of the form `a ^ b`, reduce it using fast modular exponentiation, otherwise
reduce it using `norm_num`. -/
partial def normIntNumeral' {α : Q(Type u)} (n n' : Q(ℕ)) (pn : Q(IsNat «$n» «$n'»))
(e : Q($α)) (_ : Q(Ring $α)) (instCharP : Q(CharP $α $n)) : MetaM (Result e) :=
normPow n n' pn e _ instCharP <|> normBareNumeral n n' pn e _ instCharP
end
lemma CharP.intCast_eq_mod (R : Type _) [Ring R] (p : ℕ) [CharP R p] (k : ℤ) :
(k : R) = (k % p : ℤ) :=
CharP.intCast_eq_intCast_mod R p
/-- Given an integral expression `e : t` such that `t` is a ring of characteristic `n`,
reduce `e` modulo `n`. -/
partial def normIntNumeral {α : Q(Type u)} (n : Q(ℕ)) (e : Q($α)) (_ : Q(Ring $α))
(instCharP : Q(CharP $α $n)) : MetaM (Result e) := do
let ⟨n', pn⟩ ← deriveNat n q(instAddMonoidWithOneNat)
normIntNumeral' n n' pn e _ instCharP
lemma CharP.neg_eq_sub_one_mul {α : Type _} [Ring α] (n : ℕ) (inst : CharP α n) (b : α)
(a : ℕ) (a' : α) (p : IsNat (n - 1 : α) a) (pa : a = a') :
-b = a' * b := by
rw [← pa, ← p.out, ← neg_one_mul]
simp
/-- Given an expression `(-e) : t` such that `t` is a ring of characteristic `n`,
simplify this to `(n - 1) * e`.
This should be called only when `normIntNumeral` fails, because `normIntNumeral` would otherwise
be more useful by evaluating `-e` mod `n` to an actual numeral.
-/
@[nolint unusedHavesSuffices] -- the `=Q` is necessary for type checking
partial def normNeg {α : Q(Type u)} (n : Q(ℕ)) (e : Q($α)) (_instRing : Q(Ring $α))
(instCharP : Q(CharP $α $n)) :
MetaM Simp.Result := do
let .app f (b : Q($α)) ← whnfR e | failure
guard <|← withNewMCtxDepth <| isDefEq f q(Neg.neg (α := $α))
let r ← (derive (α := α) q($n - 1))
match r with
| .isNat sα a p => do
have : instAddMonoidWithOne =Q $sα := ⟨⟩
let ⟨a', pa'⟩ ← mkOfNat α sα a
let pf : Q(-$b = $a' * $b) := q(CharP.neg_eq_sub_one_mul $n $instCharP $b $a $a' $p $pa')
return { expr := q($a' * $b), proof? := pf }
| .isNegNat _ _ _ =>
throwError "normNeg: nothing useful to do in negative characteristic"
| _ => throwError "normNeg: evaluating `{n} - 1` should give an integer result"
lemma CharP.neg_mul_eq_sub_one_mul {α : Type _} [Ring α] (n : ℕ) (inst : CharP α n) (a b : α)
(na : ℕ) (na' : α) (p : IsNat ((n - 1) * a : α) na) (pa : na = na') :
-(a * b) = na' * b := by
rw [← pa, ← p.out, ← neg_one_mul]
simp
/-- Given an expression `-(a * b) : t` such that `t` is a ring of characteristic `n`,
and `a` is a numeral, simplify this to `((n - 1) * a) * b`. -/
@[nolint unusedHavesSuffices] -- the `=Q` is necessary for type checking
partial def normNegCoeffMul {α : Q(Type u)} (n : Q(ℕ)) (e : Q($α)) (_instRing : Q(Ring $α))
(instCharP : Q(CharP $α $n)) :
MetaM Simp.Result := do
let .app neg (.app (.app mul (a : Q($α))) (b : Q($α))) ← whnfR e | failure
guard <|← withNewMCtxDepth <| isDefEq neg q(Neg.neg (α := $α))
guard <|← withNewMCtxDepth <| isDefEq mul q(HMul.hMul (α := $α))
let r ← (derive (α := α) q(($n - 1) * $a))
match r with
| .isNat sα na np => do
have : AddGroupWithOne.toAddMonoidWithOne =Q $sα := ⟨⟩
let ⟨na', npa'⟩ ← mkOfNat α sα na
let pf : Q(-($a * $b) = $na' * $b) :=
q(CharP.neg_mul_eq_sub_one_mul $n $instCharP $a $b $na $na' $np $npa')
return { expr := q($na' * $b), proof? := pf }
| .isNegNat _ _ _ =>
throwError "normNegCoeffMul: nothing useful to do in negative characteristic"
| _ => throwError "normNegCoeffMul: evaluating `{n} - 1` should give an integer result"
/-- A `TypeToCharPResult α` indicates if `α` can be determined to be a ring of characteristic `p`.
-/
inductive TypeToCharPResult (α : Q(Type u))
| intLike (n : Q(ℕ)) (instRing : Q(Ring $α)) (instCharP : Q(CharP $α $n))
| failure
instance {α : Q(Type u)} : Inhabited (TypeToCharPResult α) := ⟨.failure⟩
/-- Determine the characteristic of a ring from the type.
This should be fast, so this pattern-matches on the type, rather than searching for a
`CharP` instance.
Use `typeToCharP (expensive := true)` to do more work in finding the characteristic,
in particular it will search for a `CharP` instance in the context. -/
partial def typeToCharP (expensive := false) (t : Q(Type u)) : MetaM (TypeToCharPResult t) :=
match Expr.getAppFnArgs t with
| (``ZMod, #[(n : Q(ℕ))]) =>
return .intLike n
(q((ZMod.commRing _).toRing) : Q(Ring (ZMod $n)))
(q(ZMod.charP _) : Q(CharP (ZMod $n) $n))
| (``Polynomial, #[(R : Q(Type u)), _]) => do match ← typeToCharP (expensive := expensive) R with
| (.intLike n _ _) =>
return .intLike n
(q(Polynomial.ring) : Q(Ring (Polynomial $R)))
(q(Polynomial.instCharP _) : Q(CharP (Polynomial $R) $n))
| .failure => return .failure
| _ => if ! expensive then return .failure else do
-- Fallback: run an expensive procedures to determine a characteristic,
-- by looking for a `CharP` instance.
withNewMCtxDepth do
/- If we want to support semirings, here we could implement the `natLike` fallback. -/
let .some instRing ← trySynthInstanceQ q(Ring $t) | return .failure
let n ← mkFreshExprMVarQ q(ℕ)
let some instCharP ← findLocalDeclWithTypeQ? q(CharP $t $n) | return .failure
return .intLike (← instantiateMVarsQ n) instRing instCharP
/-- Given an expression `e`, determine whether it is a numeric expression in characteristic `n`,
and if so, reduce `e` modulo `n`.
This is not a `norm_num` plugin because it does not match on the syntax of `e`,
rather it matches on the type of `e`.
Use `matchAndNorm (expensive := true)` to do more work in finding the characteristic of
the type of `e`.
-/
partial def matchAndNorm (expensive := false) (e : Expr) : MetaM Simp.Result := do
let α ← inferType e
let u_succ : Level ← getLevel α
let (.succ u) := u_succ | throwError "expected {α} to be a `Type _`, not `Sort {u_succ}`"
have α : Q(Type u) := α
match ← typeToCharP (expensive := expensive) α with
| (.intLike n instRing instCharP) =>
-- Handle the numeric expressions first, e.g. `-5` (which shouldn't become `-1 * 5`)
normIntNumeral n e instRing instCharP >>= Result.toSimpResult <|>
normNegCoeffMul n e instRing instCharP <|> -- `-(3 * X) → ((n - 1) * 3) * X`
normNeg n e instRing instCharP -- `-X → (n - 1) * X`
/- Here we could add a `natLike` result using only a `Semiring` instance.
This would activate only the less-powerful procedures
that cannot handle subtraction.
-/
| .failure =>
throwError "inferred type `{α}` does not have a known characteristic"
-- We use a few `simp` lemmas to preprocess the expression and clean up subterms like `0 * X`.
attribute [reduce_mod_char] sub_eq_add_neg
attribute [reduce_mod_char] zero_add add_zero zero_mul mul_zero one_mul mul_one
attribute [reduce_mod_char] eq_self_iff_true -- For closing non-numeric goals, e.g. `X = X`
/-- Reduce all numeric subexpressions of `e` modulo their characteristic.
Use `derive (expensive := true)` to do more work in finding the characteristic of
the type of `e`.
-/
partial def derive (expensive := false) (e : Expr) : MetaM Simp.Result := do
withTraceNode `Tactic.reduce_mod_char (fun _ => return m!"{e}") do
let e ← instantiateMVars e
let config : Simp.Config := {
zeta := false
beta := false
eta := false
proj := false
iota := false
}
let congrTheorems ← Meta.getSimpCongrTheorems
let ext? ← getSimpExtension? `reduce_mod_char
let ext ← match ext? with
| some ext => pure ext
| none => throwError "internal error: reduce_mod_char not registered as simp extension"
let ctx ← Simp.mkContext config (congrTheorems := congrTheorems)
(simpTheorems := #[← ext.getTheorems])
let discharge := Mathlib.Meta.NormNum.discharge ctx
let r : Simp.Result := {expr := e}
let pre := Simp.preDefault #[] >> fun e =>
try return (Simp.Step.done (← matchAndNorm (expensive := expensive) e))
catch _ => pure .continue
let post := Simp.postDefault #[]
let r ← r.mkEqTrans (← Simp.main r.expr ctx (methods := { pre, post, discharge? := discharge })).1
return r
open Parser.Tactic
/--
The tactic `reduce_mod_char` looks for numeric expressions in characteristic `p`
and reduces these to lie between `0` and `p`.
For example:
```
example : (5 : ZMod 4) = 1 := by reduce_mod_char
example : (X ^ 2 - 3 * X + 4 : (ZMod 4)[X]) = X ^ 2 + X := by reduce_mod_char
```
It also handles negation, turning it into multiplication by `p - 1`,
and similarly subtraction.
This tactic uses the type of the subexpression to figure out if it is indeed of positive
characteristic, for improved performance compared to trying to synthesise a `CharP` instance.
The variant `reduce_mod_char!` also tries to use `CharP R n` hypotheses in the context.
(Limitations of the typeclass system mean the tactic can't search for a `CharP R n` instance if
`n` is not yet known; use `have : CharP R n := inferInstance; reduce_mod_char!` as a workaround.)
-/
syntax (name := reduce_mod_char) "reduce_mod_char" (location)? : tactic
@[inherit_doc reduce_mod_char]
syntax (name := reduce_mod_char!) "reduce_mod_char!" (location)? : tactic
open Mathlib.Tactic in
elab_rules : tactic
| `(tactic| reduce_mod_char $[$loc]?) => unsafe do
let loc := expandOptLocation (Lean.mkOptionalNode loc)
transformAtNondepPropLocation (derive (expensive := false) ·) "reduce_mod_char" loc
(failIfUnchanged := false)
| `(tactic| reduce_mod_char! $[$loc]?) => unsafe do
let loc := expandOptLocation (Lean.mkOptionalNode loc)
transformAtNondepPropLocation (derive (expensive := true) ·) "reduce_mod_char"
loc (failIfUnchanged := false)
end ReduceModChar
end Tactic |
.lake/packages/mathlib/Mathlib/Tactic/CC.lean | import Mathlib.Tactic.CC.Addition
/-!
# Congruence closure
The congruence closure tactic `cc` tries to solve the goal by chaining
equalities from context and applying congruence (i.e. if `a = b`, then `f a = f b`).
It is a finishing tactic, i.e. it is meant to close
the current goal, not to make some inconclusive progress.
A mostly trivial example would be:
```lean
example (a b c : ℕ) (f : ℕ → ℕ) (h: a = b) (h' : b = c) : f a = f c := by
cc
```
As an example requiring some thinking to do by hand, consider:
```lean
example (f : ℕ → ℕ) (x : ℕ)
(H1 : f (f (f x)) = x) (H2 : f (f (f (f (f x)))) = x) :
f x = x := by
cc
```
The tactic works by building an equality matching graph. It's a graph where
the vertices are terms and they are linked by edges if they are known to
be equal. Once you've added all the equalities in your context, you take
the transitive closure of the graph and, for each connected component
(i.e. equivalence class) you can elect a term that will represent the
whole class and store proofs that the other elements are equal to it.
You then take the transitive closure of these equalities under the
congruence lemmas.
The `cc` implementation in Lean does a few more tricks: for example it
derives `a = b` from `Nat.succ a = Nat.succ b`, and `Nat.succ a != Nat.zero` for any `a`.
* The starting reference point is Nelson, Oppen, [Fast decision procedures based on congruence
closure](http://www.cs.colorado.edu/~bec/courses/csci5535-s09/reading/nelson-oppen-congruence.pdf),
Journal of the ACM (1980)
* The congruence lemmas for dependent type theory as used in Lean are described in
[Congruence closure in intensional type theory](https://leanprover.github.io/papers/congr.pdf)
(de Moura, Selsam IJCAR 2016).
-/
universe u
open Lean Meta Elab Tactic Std
namespace Mathlib.Tactic.CC
namespace CCState
open CCM
/-- Make an new `CCState` from the given `config`. -/
def mkCore (config : CCConfig) : CCState :=
let s : CCState := { config with }
s.mkEntryCore (.const ``True []) true false |>.mkEntryCore (.const ``False []) true false
/-- Create a congruence closure state object from the given `config` using the hypotheses in the
current goal. -/
def mkUsingHsCore (config : CCConfig) : MetaM CCState := do
let ctx ← getLCtx
let ctx ← instantiateLCtxMVars ctx
let (_, c) ← CCM.run (ctx.forM fun dcl => do
unless dcl.isImplementationDetail do
if ← isProp dcl.type then
add dcl.type dcl.toExpr) { mkCore config with }
return c.toCCState
/-- Returns the root expression for each equivalence class in the graph.
If the `Bool` argument is set to `true` then it only returns roots of non-singleton classes. -/
def rootsCore (ccs : CCState) (nonsingleton : Bool) : List Expr :=
ccs.getRoots #[] nonsingleton |>.toList
/-- Increment the Global Modification time. -/
def incGMT (ccs : CCState) : CCState :=
{ ccs with gmt := ccs.gmt + 1 }
/-- Add the given expression to the graph. -/
def internalize (ccs : CCState) (e : Expr) : MetaM CCState := do
let (_, c) ← CCM.run (CCM.internalize e) { ccs with }
return c.toCCState
/-- Add the given proof term as a new rule.
The proof term `H` must be an `Eq _ _`, `HEq _ _`, `Iff _ _`, or a negation of these. -/
def add (ccs : CCState) (H : Expr) : MetaM CCState := do
let type ← inferType H
unless ← isProp type do
throwError "CCState.add failed, given expression is not a proof term"
let (_, c) ← CCM.run (CCM.add type H) { ccs with }
return c.toCCState
/-- Check whether two expressions are in the same equivalence class. -/
def isEqv (ccs : CCState) (e₁ e₂ : Expr) : MetaM Bool := do
let (b, _) ← CCM.run (CCM.isEqv e₁ e₂) { ccs with }
return b
/-- Check whether two expressions are not in the same equivalence class. -/
def isNotEqv (ccs : CCState) (e₁ e₂ : Expr) : MetaM Bool := do
let (b, _) ← CCM.run (CCM.isNotEqv e₁ e₂) { ccs with }
return b
/-- Returns a proof term that the given terms are equivalent in the given `CCState` -/
def eqvProof (ccs : CCState) (e₁ e₂ : Expr) : MetaM Expr := do
let (some r, _) ← CCM.run (CCM.getEqProof e₁ e₂) { ccs with }
| throwError "CCState.eqvProof failed to build proof"
return r
/-- `proofFor cc e` constructs a proof for e if it is equivalent to true in `CCState` -/
def proofFor (ccs : CCState) (e : Expr) : MetaM Expr := do
let (some r, _) ← CCM.run (CCM.getEqProof e (.const ``True [])) { ccs with }
| throwError "CCState.proofFor failed to build proof"
mkAppM ``of_eq_true #[r]
/-- `refutationFor cc e` constructs a proof for `Not e` if it is equivalent to `False` in `CCState`
-/
def refutationFor (ccs : CCState) (e : Expr) : MetaM Expr := do
let (some r, _) ← CCM.run (CCM.getEqProof e (.const ``False [])) { ccs with }
| throwError "CCState.refutationFor failed to build proof"
mkAppM ``of_eq_false #[r]
/-- If the given state is inconsistent, return a proof for `False`. Otherwise fail. -/
def proofForFalse (ccs : CCState) : MetaM Expr := do
let (some pr, _) ← CCM.run CCM.getInconsistencyProof { ccs with }
| throwError "CCState.proofForFalse failed, state is not inconsistent"
return pr
/-- Create a congruence closure state object using the hypotheses in the current goal. -/
def mkUsingHs : MetaM CCState :=
CCState.mkUsingHsCore {}
/-- The root expressions for each equivalence class in the graph. -/
def roots (s : CCState) : List Expr :=
CCState.rootsCore s true
instance : ToMessageData CCState :=
⟨fun s => CCState.ppEqcs s true⟩
/-- Continue to append following expressions in the equivalence class of `e` to `r` until `f` is
found. -/
partial def eqcOfCore (s : CCState) (e : Expr) (f : Expr) (r : List Expr) : List Expr :=
let n := s.next e
if n == f then e :: r else eqcOfCore s n f (e :: r)
/-- The equivalence class of `e`. -/
def eqcOf (s : CCState) (e : Expr) : List Expr :=
s.eqcOfCore e e []
/-- The size of the equivalence class of `e`. -/
def eqcSize (s : CCState) (e : Expr) : Nat :=
s.eqcOf e |>.length
/-- Fold `f` over the equivalence class of `c`, accumulating the result in `a`.
Loops until the element `first` is encountered.
See `foldEqc` for folding `f` over all elements of the equivalence class. -/
partial def foldEqcCore {α} (s : CCState) (f : α → Expr → α) (first : Expr) (c : Expr) (a : α) :
α :=
let new_a := f a c
let next := s.next c
if next == first then new_a else foldEqcCore s f first next new_a
/-- Fold the function of `f` over the equivalence class of `e`. -/
def foldEqc {α} (s : CCState) (e : Expr) (a : α) (f : α → Expr → α) : α :=
foldEqcCore s f e e a
/-- Fold the monadic function of `f` over the equivalence class of `e`. -/
def foldEqcM {α} {m : Type → Type} [Monad m] (s : CCState) (e : Expr) (a : α)
(f : α → Expr → m α) : m α :=
foldEqc s e (return a) fun act e => do
let a ← act
f a e
end CCState
/-- Option to control whether to show a deprecation warning for the `cc` tactic. -/
register_option mathlib.tactic.cc.warning : Bool := {
defValue := true
descr := "Show a deprecation warning when using the `cc` tactic"
}
/--
Applies congruence closure to solve the given metavariable.
This procedure tries to solve the goal by chaining
equalities from context and applying congruence (i.e. if `a = b`, then `f a = f b`).
The tactic works by building an equality matching graph. It's a graph where
the vertices are terms and they are linked by edges if they are known to
be equal. Once you've added all the equalities in your context, you take
the transitive closure of the graph and, for each connected component
(i.e. equivalence class) you can elect a term that will represent the
whole class and store proofs that the other elements are equal to it.
You then take the transitive closure of these equalities under the
congruence lemmas.
The `cc` implementation in Lean does a few more tricks: for example it
derives `a = b` from `Nat.succ a = Nat.succ b`, and `Nat.succ a != Nat.zero` for any `a`.
* The starting reference point is Nelson, Oppen, [Fast decision procedures based on congruence
closure](http://www.cs.colorado.edu/~bec/courses/csci5535-s09/reading/nelson-oppen-congruence.pdf),
Journal of the ACM (1980)
* The congruence lemmas for dependent type theory as used in Lean are described in
[Congruence closure in intensional type theory](https://leanprover.github.io/papers/congr.pdf)
(de Moura, Selsam IJCAR 2016).
-/
def _root_.Lean.MVarId.cc (m : MVarId) (cfg : CCConfig := {}) : MetaM Unit := do
-- Check if warning should be shown
if ← getBoolOption `mathlib.tactic.cc.warning true then
logWarning "The tactic `cc` is deprecated since 2025-07-31, please use `grind` instead.\n\n\
Please report any regressions at https://github.com/leanprover/lean4/issues/.\n\
Note that `cc` supports some goals that `grind` doesn't,\n\
but these rely on higher-order unification and can result in unpredictable performance.\n\
If a downstream library is relying on this functionality,\n\
please report this in an issue and we'll help find a solution."
let (_, m) ← m.intros
m.withContext do
let s ← CCState.mkUsingHsCore cfg
let t ← m.getType >>= instantiateMVars
let s ← s.internalize t
if s.inconsistent then
let pr ← s.proofForFalse
mkAppOptM ``False.elim #[t, pr] >>= m.assign
else
let tr := Expr.const ``True []
let b ← s.isEqv t tr
if b then
let pr ← s.eqvProof t tr
mkAppM ``of_eq_true #[pr] >>= m.assign
else
let dbg ← getBoolOption `trace.Meta.Tactic.cc.failure false
if dbg then
throwError m!"cc tactic failed, equivalence classes: {s}"
else
throwError "cc tactic failed"
/--
Allow elaboration of `CCConfig` arguments to tactics.
-/
declare_config_elab elabCCConfig CCConfig
open Parser.Tactic in
/--
The congruence closure tactic `cc` tries to solve the goal by chaining
equalities from context and applying congruence (i.e. if `a = b`, then `f a = f b`).
It is a finishing tactic, i.e. it is meant to close
the current goal, not to make some inconclusive progress.
A mostly trivial example would be:
```lean
example (a b c : ℕ) (f : ℕ → ℕ) (h : a = b) (h' : b = c) : f a = f c := by
cc
```
As an example requiring some thinking to do by hand, consider:
```lean
example (f : ℕ → ℕ) (x : ℕ)
(H1 : f (f (f x)) = x) (H2 : f (f (f (f (f x)))) = x) :
f x = x := by
cc
``` -/
elab (name := _root_.Mathlib.Tactic.cc) "cc" cfg:optConfig : tactic => do
let cfg ← elabCCConfig cfg
withMainContext <| liftMetaFinishingTactic (·.cc cfg)
end Mathlib.Tactic.CC |
.lake/packages/mathlib/Mathlib/Tactic/Push.lean | import Lean.Elab.Tactic.Location
import Mathlib.Tactic.Push.Attr
import Mathlib.Logic.Basic
import Mathlib.Tactic.Conv
import Mathlib.Util.AtLocation
/-!
# The `push`, `push_neg` and `pull` tactics
The `push` tactic pushes a given constant inside expressions: it can be applied to goals as well
as local hypotheses and also works as a `conv` tactic. `push_neg` is a macro for `push Not`.
The `pull` tactic does the reverse: it pulls the given constant towards the head of the expression.
-/
namespace Mathlib.Tactic.Push
variable (p q : Prop) {α : Sort*} (s : α → Prop)
-- The more specific `Classical.not_imp` is attempted before the more general `not_forall_eq`.
-- This happens because `not_forall_eq` is handled manually in `pushNegBuiltin`.
attribute [push] not_not not_or Classical.not_imp not_false_eq_true not_true_eq_false
attribute [push ←] ne_eq
@[push] theorem not_iff : ¬ (p ↔ q) ↔ (p ∧ ¬ q) ∨ (¬ p ∧ q) :=
_root_.not_iff.trans <| iff_iff_and_or_not_and_not.trans <| by rw [not_not, or_comm]
@[push] theorem not_exists : (¬ Exists s) ↔ (∀ x, binderNameHint x s <| ¬ s x) :=
_root_.not_exists
-- TODO: lemmas involving `∃` should be tagged using `binderNameHint`,
-- and lemmas involving `∀` would need manual rewriting to keep the binder name.
attribute [push]
forall_const forall_and forall_or_left forall_or_right forall_eq forall_eq' forall_self_imp
exists_const exists_or exists_and_left exists_and_right exists_eq exists_eq'
and_or_left and_or_right and_true true_and and_false false_and
or_and_left or_and_right or_true true_or or_false false_or
-- these lemmas are only for the `pull` tactic
attribute [push low]
forall_and_left forall_and_right -- needs lower priority than `forall_and` in the `pull` tactic
attribute [push ←] Function.id_def
-- TODO: decide if we want this lemma, and if so, fix the proofs that break as a result
-- @[push high] theorem Nat.not_nonneg_iff_eq_zero (n : Nat) : ¬ 0 < n ↔ n = 0 :=
-- Nat.not_lt.trans Nat.le_zero
theorem not_and_eq : (¬ (p ∧ q)) = (p → ¬ q) := propext not_and
theorem not_and_or_eq : (¬ (p ∧ q)) = (¬ p ∨ ¬ q) := propext not_and_or
theorem not_forall_eq : (¬ ∀ x, s x) = (∃ x, ¬ s x) := propext not_forall
/-- Make `push_neg` use `not_and_or` rather than the default `not_and`. -/
register_option push_neg.use_distrib : Bool :=
{ defValue := false
group := ""
descr := "Make `push_neg` use `not_and_or` rather than the default `not_and`." }
open Lean Meta Elab.Tactic Parser.Tactic
/--
`pushNegBuiltin` is a simproc for pushing `¬` in a way that can't be done
using the `@[push]` attribute.
- `¬ (p ∧ q)` turns into `p → ¬ q` or `¬ p ∨ ¬ q`, depending on the option `push_neg.use_distrib`.
- `¬ ∀ a, p` turns into `∃ a, ¬ p`, where the binder name `a` is preserved.
-/
private def pushNegBuiltin : Simp.Simproc := fun e => do
let e := (← instantiateMVars e).cleanupAnnotations
match e with
| .app (.app (.const ``And _) p) q =>
if ← getBoolOption `push_neg.use_distrib then
return mkSimpStep (mkOr (mkNot p) (mkNot q)) (mkApp2 (.const ``not_and_or_eq []) p q)
else
return mkSimpStep (.forallE `_ p (mkNot q) .default) (mkApp2 (.const ``not_and_eq []) p q)
| .forallE name ty body binfo =>
let body' : Expr := .lam name ty (mkNot body) binfo
let body'' : Expr := .lam name ty body binfo
return mkSimpStep (← mkAppM ``Exists #[body']) (← mkAppM ``not_forall_eq #[body''])
| _ =>
return Simp.Step.continue
where
mkSimpStep (e : Expr) (pf : Expr) : Simp.Step :=
Simp.Step.continue (some { expr := e, proof? := some pf })
/-- The `simp` configuration used in `push`. -/
def pushSimpConfig : Simp.Config where
zeta := false
proj := false
/-- Try to rewrite using a `push` lemma. -/
def pushStep (head : Head) : Simp.Simproc := fun e => do
let e_whnf ← whnf e
let some e_head := Head.ofExpr? e_whnf | return Simp.Step.continue
unless e_head == head do
return Simp.Step.continue
let thms := pushExt.getState (← getEnv)
if let some r ← Simp.rewrite? e thms {} "push" false then
-- We return `.visit r` instead of `.continue r`, because in the case of a triple negation,
-- after rewriting `¬ ¬ ¬ p` into `¬ p`, we may want to rewrite `¬ p` again.
return Simp.Step.visit r
if let some e := e_whnf.not? then
pushNegBuiltin e
else
return Simp.Step.continue
/-- Common entry point to the implementation of `push`. -/
def pushCore (head : Head) (tgt : Expr) (disch? : Option Simp.Discharge) : MetaM Simp.Result := do
let ctx : Simp.Context ← Simp.mkContext pushSimpConfig
(simpTheorems := #[])
(congrTheorems := ← getSimpCongrTheorems)
let methods := match disch? with
| none => { pre := pushStep head }
| some disch => { pre := pushStep head, discharge? := disch, wellBehavedDischarge := false }
(·.1) <$> Simp.main tgt ctx (methods := methods)
/-- Try to rewrite using a `pull` lemma. -/
def pullStep (head : Head) : Simp.Simproc := fun e => do
let thms := pullExt.getState (← getEnv)
-- We can't use `Simp.rewrite?` here, because we need to only allow rewriting with theorems
-- that pull the correct head.
let candidates ← Simp.withSimpIndexConfig <| thms.getMatchWithExtra e
if candidates.isEmpty then
return Simp.Step.continue
let candidates := candidates.insertionSort fun e₁ e₂ => e₁.1.1.priority > e₂.1.1.priority
for ((thm, thm_head), numExtraArgs) in candidates do
if thm_head == head then
if let some result ← Simp.tryTheoremWithExtraArgs? e thm numExtraArgs then
return Simp.Step.continue result
return Simp.Step.continue
/-- Common entry point to the implementation of `pull`. -/
def pullCore (head : Head) (tgt : Expr) (disch? : Option Simp.Discharge) : MetaM Simp.Result := do
let ctx : Simp.Context ← Simp.mkContext pushSimpConfig
(simpTheorems := #[])
(congrTheorems := ← getSimpCongrTheorems)
let methods := match disch? with
| none => { post := pullStep head }
| some disch => { post := pullStep head, discharge? := disch, wellBehavedDischarge := false }
(·.1) <$> Simp.main tgt ctx (methods := methods)
section ElabHead
open Elab Term
/-- Return `true` if `stx` is an underscore, i.e. `_` or `fun $_ => _`/`fun $_ ↦ _`. -/
partial def isUnderscore : Term → Bool
| `(_) | `(fun $_ => _) => true
| _ => false
/-- `resolvePushId?` is a version of `resolveId?` that also supports notations like `_ ∈ _`,
`∃ x, _` and `∑ x, _`. -/
def resolvePushId? (stx : Term) : TermElabM (Option Expr) := do
match ← liftMacroM <| expandMacros stx with
| `($f $args*) =>
-- Note: we would like to insist that all arguments in the notation are given as underscores,
-- but for example `∑ x, _` expands to `Finset.sum Finset.univ fun _ ↦ _`,
-- in which `Finset.univ` is not an underscore. So instead
-- we only insist that the last argument is an underscore.
if args.back?.all isUnderscore then
try resolveId? f catch _ => return none
else
return none
| `(binop% $f _ _)
| `(binop_lazy% $f _ _)
| `(leftact% $f _ _)
| `(rightact% $f _ _)
| `(binrel% $f _ _)
| `(binrel_no_prop% $f _ _)
| `(unop% $f _)
| f => try resolveId? f catch _ => return none
/-- Elaborator for the argument passed to `push`. It accepts a constant, or a function -/
def elabHead (stx : Term) : TermElabM Head := withRef stx do
-- we elaborate `stx` to get an appropriate error message if the term isn't well formed,
-- and to add hover information
_ ← withTheReader Term.Context ({ · with ignoreTCFailures := true }) <|
Term.withoutModifyingElabMetaStateWithInfo <| Term.withoutErrToSorry <| Term.elabTerm stx none
match stx with
| `(fun $_ => _) => return .lambda
| `(∀ $_, _) => return .forall
| _ =>
match ← resolvePushId? stx with
| some (.const c _) => return .const c
| _ => throwError "Could not resolve `push` argument {stx}. \
Expected either a constant, e.g. `push Not`, \
or notation with underscores, e.g. `push ¬ _`"
end ElabHead
/-- Elaborate the `(disch := ...)` syntax for a `simp`-like tactic. -/
def elabDischarger (stx : TSyntax ``discharger) : TacticM Simp.Discharge :=
(·.2) <$> tacticToDischarge stx.raw[3]
/--
`push` pushes the given constant away from the head of the expression. For example
- `push _ ∈ _` rewrites `x ∈ {y} ∪ zᶜ` into `x = y ∨ ¬ x ∈ z`.
- `push (disch := positivity) Real.log` rewrites `log (a * b ^ 2)` into `log a + 2 * log b`.
- `push ¬ _` is the same as `push_neg` or `push Not`, and it rewrites
`¬ ∀ ε > 0, ∃ δ > 0, δ < ε` into `∃ ε > 0, ∀ δ > 0, ε ≤ δ`.
In addition to constants, `push` can be used to push `fun` and `∀` binders:
- `push fun _ ↦ _` rewrites `fun x => f x ^ 2 + 5` into `f ^ 2 + 5`
- `push ∀ _, _` rewrites `∀ a, p a ∧ q a` into `(∀ a, p a) ∧ (∀ a, q a)`.
The `push` tactic can be extended using the `@[push]` attribute.
To instead move a constant closer to the head of the expression, use the `pull` tactic.
To push a constant at a hypothesis, use the `push ... at h` or `push ... at *` syntax.
-/
elab (name := push) "push" disch?:(discharger)? head:(ppSpace colGt term) loc:(location)? :
tactic => do
let disch? ← disch?.mapM elabDischarger
let head ← elabHead head
let loc := (loc.map expandLocation).getD (.targets #[] true)
transformAtLocation (pushCore head · disch?) "push" loc (failIfUnchanged := true) false
/--
Push negations into the conclusion or a hypothesis.
For instance, a hypothesis `h : ¬ ∀ x, ∃ y, x ≤ y` will be transformed by `push_neg at h` into
`h : ∃ x, ∀ y, y < x`. Binder names are preserved.
`push_neg` is a special case of the more general `push` tactic, namely `push Not`.
The `push` tactic can be extended using the `@[push]` attribute. `push` has special-casing
built in for `push Not`, so that it can preserve binder names, and so that `¬ (p ∧ q)` can be
transformed to either `p → ¬ q` (the default) or `¬ p ∨ ¬ q`. To get `¬ p ∨ ¬ q`, use
`set_option push_neg.use_distrib true`.
Tactics that introduce a negation usually have a version that automatically calls `push_neg` on
that negation. These include `by_cases!`, `contrapose!` and `by_contra!`.
Another example: given a hypothesis
```lean
h : ¬ ∀ ε > 0, ∃ δ > 0, ∀ x, |x - x₀| ≤ δ → |f x - y₀| ≤ ε
```
writing `push_neg at h` will turn `h` into
```lean
h : ∃ ε > 0, ∀ δ > 0, ∃ x, |x - x₀| ≤ δ ∧ ε < |f x - y₀|
```
Note that binder names are preserved by this tactic, contrary to what would happen with `simp`
using the relevant lemmas. One can use this tactic at the goal using `push_neg`,
at every hypothesis and the goal using `push_neg at *` or at selected hypotheses and the goal
using say `push_neg at h h' ⊢`, as usual.
-/
macro (name := push_neg) "push_neg" loc:(location)? : tactic => `(tactic| push Not $[$loc]?)
/--
`pull` is the inverse tactic to `push`.
It pulls the given constant towards the head of the expression. For example
- `pull _ ∈ _` rewrites `x ∈ y ∨ ¬ x ∈ z` into `x ∈ y ∪ zᶜ`.
- `pull (disch := positivity) Real.log` rewrites `log a + 2 * log b` into `log (a * b ^ 2)`.
- `pull fun _ ↦ _` rewrites `f ^ 2 + 5` into `fun x => f x ^ 2 + 5` where `f` is a function.
A lemma is considered a `pull` lemma if its reverse direction is a `push` lemma
that actually moves the given constant away from the head. For example
- `not_or : ¬ (p ∨ q) ↔ ¬ p ∧ ¬ q` is a `pull` lemma, but `not_not : ¬ ¬ p ↔ p` is not.
- `log_mul : log (x * y) = log x + log y` is a `pull` lemma, but `log_abs : log |x| = log x` is not.
- `Pi.mul_def : f * g = fun (i : ι) => f i * g i` and `Pi.one_def : 1 = fun (x : ι) => 1` are both
`pull` lemmas for `fun`, because every `push fun _ ↦ _` lemma is also considered a `pull` lemma.
TODO: define a `@[pull]` attribute for tagging `pull` lemmas that are not `push` lemmas.
-/
elab (name := pull) "pull" disch?:(discharger)? head:(ppSpace colGt term) loc:(location)? :
tactic => do
let disch? ← disch?.mapM elabDischarger
let head ← elabHead head
let loc := (loc.map expandLocation).getD (.targets #[] true)
transformAtLocation (pullCore head · disch?) "pull" loc (failIfUnchanged := true) false
/-- A simproc variant of `push fun _ ↦ _`, to be used as `simp [↓pushFun]`. -/
simproc_decl _root_.pushFun (fun _ ↦ ?_) := pushStep .lambda
/-- A simproc variant of `pull fun _ ↦ _`, to be used as `simp [pullFun]`. -/
simproc_decl _root_.pullFun (_) := pullStep .lambda
section Conv
@[inherit_doc push]
elab "push" disch?:(discharger)? head:(ppSpace colGt term) : conv => withMainContext do
let disch? ← disch?.mapM elabDischarger
let head ← elabHead head
Conv.applySimpResult (← pushCore head (← instantiateMVars (← Conv.getLhs)) disch?)
@[inherit_doc push_neg]
macro "push_neg" : conv => `(conv| push Not)
/--
The syntax is `#push head e`, where `head` is a constant and `e` is an expression,
which will print the `push head` form of `e`.
`#push` understands local variables, so you can use them to introduce parameters.
-/
macro (name := pushCommand) tk:"#push " head:ident ppSpace e:term : command =>
`(command| #conv%$tk push $head:ident => $e)
/--
The syntax is `#push_neg e`, where `e` is an expression,
which will print the `push_neg` form of `e`.
`#push_neg` understands local variables, so you can use them to introduce parameters.
-/
macro (name := pushNegCommand) tk:"#push_neg " e:term : command => `(command| #push%$tk Not $e)
@[inherit_doc pull]
elab "pull" disch?:(discharger)? head:(ppSpace colGt term) : conv => withMainContext do
let disch? ← disch?.mapM elabDischarger
let head ← elabHead head
Conv.applySimpResult (← pullCore head (← instantiateMVars (← Conv.getLhs)) disch?)
/--
The syntax is `#pull head e`, where `head` is a constant and `e` is an expression,
which will print the `pull head` form of `e`.
`#pull` understands local variables, so you can use them to introduce parameters.
-/
macro (name := pullCommand) tk:"#pull " head:ident ppSpace e:term : command =>
`(command| #conv%$tk pull $head:ident => $e)
end Conv
section DiscrTree
/--
`#push_discr_tree X` shows the discrimination tree of all lemmas used by `push X`.
This can be helpful when you are constructing a set of `push` lemmas for the constant `X`.
-/
syntax (name := pushTree) "#push_discr_tree " (colGt term) : command
@[command_elab pushTree, inherit_doc pushTree]
def elabPushTree : Elab.Command.CommandElab := fun stx => do
Elab.Command.runTermElabM fun _ => do
let head ← elabHead ⟨stx[1]⟩
let thms := pushExt.getState (← getEnv)
let mut logged := false
for (key, trie) in thms.root do
let matchesHead (k : DiscrTree.Key) : Bool :=
match k, head with
| .const c _, .const c' => c == c'
| .other , .lambda => true
| .arrow , .forall => true
| _ , _ => false
if matchesHead key then
logInfo m! "DiscrTree branch for {key}:{indentD (format trie)}"
logged := true
unless logged do
logInfo m! "There are no `push` theorems for `{head.toString}`"
end DiscrTree
end Mathlib.Tactic.Push |
.lake/packages/mathlib/Mathlib/Tactic/TermCongr.lean | import Mathlib.Lean.Expr.Basic
import Mathlib.Lean.Meta.CongrTheorems
import Mathlib.Logic.Basic
import Mathlib.Tactic.CongrExclamation
/-! # `congr(...)` congruence quotations
This module defines a term elaborator for generating congruence lemmas
from patterns written using quotation syntax.
One can write `congr($hf $hx)` with `hf : f = f'` and `hx : x = x'` to get `f x = f' x'`.
While in simple cases it might be possible to use `congr_arg` or `congr_fun`,
congruence quotations are more general,
since for example `f` could have implicit arguments, complicated dependent types,
and subsingleton instance arguments such as `Decidable` or `Fintype`.
The implementation strategy is the following:
1. The pattern is elaborated twice, once with each hole replaced by the LHS
and again with each hole replaced by the RHS. We do not force the hole to
have any particular type while elaborating, but if the hole has a type
with an obvious LHS or RHS, then we propagate this information outward.
We use `Mathlib.Tactic.TermCongr.cHole` with metadata for these replacements
to hold onto the hole itself.
2. Once the pattern has been elaborated twice,
we unify them against the respective LHS and RHS of the target type
if the target has a type with an obvious LHS and RHS.
This can fill in some metavariables and help typeclass inference make progress.
3. Then we simultaneously walk along the elaborated LHS and RHS expressions
to generate a congruence.
When we reach `cHole`s, we make sure they elaborated in a compatible way.
Each `Expr` type has some logic to come up with a suitable congruence.
For applications we use a version of `Lean.Meta.mkHCongrWithArity` that tries
to fill in some of the equality proofs using subsingleton lemmas.
The point of elaborating the expression twice is that we let the elaborator handle
activities like synthesizing instances, etc., specialized to LHS or RHS, without trying
to derive one side from the other.
During development there was a version using `simp` transformations, but there was
no way to inform `simp` about the expected RHS, which could cause `simp` to fail because
it eagerly wants to solve for instance arguments. The current version is able to use the
expected LHS and RHS to fill in arguments before solving for instance arguments.
-/
universe u
namespace Mathlib.Tactic.TermCongr
open Lean Elab Meta
initialize registerTraceClass `Elab.congr
/--
`congr(expr)` generates a congruence from an expression containing
congruence holes of the form `$h` or `$(h)`.
In these congruence holes, `h : a = b` indicates that, in the generated congruence,
on the left-hand side `a` is substituted for `$h`
and on the right-hand side `b` is substituted for `$h`.
For example, if `h : a = b` then `congr(1 + $h) : 1 + a = 1 + b`.
This is able to make use of the expected type, for example `(congr(_ + $h) : 1 + _ = _)`
with `h : x = y` gives `1 + x = 1 + y`.
The expected type can be an `Iff`, `Eq`, or `HEq`.
If there is no expected type, then it generates an equality.
Note: the process of generating a congruence lemma involves elaborating the pattern
using terms with attached metadata and a reducible wrapper.
We try to avoid doing so, but these terms can leak into the local context through unification.
This can potentially break tactics that are sensitive to metadata or reducible functions.
Please report anything that goes wrong with `congr(...)` lemmas on Zulip.
For debugging, you can set `set_option trace.Elab.congr true`.
-/
syntax (name := termCongr) "congr(" withoutForbidden(ppDedentIfGrouped(term)) ")" : term
/-! ### Congruence holes
This section sets up the way congruence holes are elaborated for `congr(...)` quotations.
The basic problem is that if we have `$h` with `h : x = y`, we need to elaborate it once
as `x` and once as `y`, and in both cases the term needs to remember that it's associated
to `h`.
-/
/-- Key for congruence hole metadata.
For a `Bool` recording whether this hole is for the LHS elaboration. -/
private def congrHoleForLhsKey : Name := decl_name%
/-- Key for congruence hole metadata.
For a `Nat` recording how old this congruence hole is, to prevent reprocessing them
if they leak into the local context. -/
private def congrHoleIndex : Name := decl_name%
/-- For holding onto the hole's value along with the value of either the LHS or RHS of the hole.
These occur wrapped in metadata so that they always appear as function application
with exactly four arguments.
Note that there is no relation between `val` and the proof.
We need to decouple these to support letting the proof's elaboration be deferred until
we know whether we want an iff, eq, or heq, while also allowing it to choose
to elaborate as an iff, eq, or heq.
Later, the congruence generator handles any discrepancies.
See `Mathlib/Tactic/TermCongr/CongrResult.lean`. -/
@[reducible, nolint unusedArguments]
def cHole {α : Sort u} (val : α) {p : Prop} (_pf : p) : α := val
/-- For error reporting purposes, make the hole pretty print as its value.
We can still see that it is a hole in the info view on mouseover. -/
@[app_unexpander cHole] def unexpandCHole : Lean.PrettyPrinter.Unexpander
| `($_ $val $_) => pure val
| _ => throw ()
/-- Create the congruence hole. Used by `elabCHole`.
Saves the current mvarCounter as a proxy for age. We use this to avoid
reprocessing old congruence holes that happened to leak into the local context. -/
def mkCHole (forLhs : Bool) (val pf : Expr) : MetaM Expr := do
-- Create a metavariable to bump the mvarCounter.
discard <| mkFreshTypeMVar
let d : MData := KVMap.empty
|>.insert congrHoleForLhsKey forLhs
|>.insert congrHoleIndex (← getMCtx).mvarCounter
return Expr.mdata d <| ← mkAppM ``cHole #[val, pf]
/-- If the expression is a congruence hole, returns `(forLhs, sideVal, pf)`.
If `mvarCounterSaved?` is not none, then only returns the hole if it is at least as recent. -/
def cHole? (e : Expr) (mvarCounterSaved? : Option Nat := none) : Option (Bool × Expr × Expr) := do
match e with
| .mdata d e' =>
let forLhs : Bool ← d.get? congrHoleForLhsKey
let mvarCounter : Nat ← d.get? congrHoleIndex
if let some mvarCounterSaved := mvarCounterSaved? then
guard <| mvarCounterSaved ≤ mvarCounter
let #[_, val, _, pf] := e'.getAppArgs | failure
return (forLhs, val, pf)
| _ => none
/-- Returns any subexpression that is a recent congruence hole. -/
def hasCHole (mvarCounterSaved : Nat) (e : Expr) : Option Expr :=
e.find? fun e' => (cHole? e' mvarCounterSaved).isSome
/-- Eliminate all congruence holes from an expression by replacing them with their values. -/
def removeCHoles (e : Expr) : Expr :=
e.replace fun e' => if let some (_, val, _) := cHole? e' then val else none
/-- Elaborates a congruence hole and returns either the left-hand side or the right-hand side,
annotated with information necessary to generate a congruence lemma. -/
def elabCHole (h : Syntax) (forLhs : Bool) (expectedType? : Option Expr) : Term.TermElabM Expr := do
let pf ← Term.elabTerm h none
let pfTy ← inferType pf
-- Ensure that `pfTy` is a proposition
unless ← isDefEq (← inferType pfTy) (.sort .zero) do
throwError "Hole has type{indentD pfTy}\nbut is expected to be a Prop"
if let some (_, lhs, _, rhs) := (← whnf pfTy).sides? then
let val := if forLhs then lhs else rhs
if let some expectedType := expectedType? then
-- Propagate type hint:
discard <| isDefEq expectedType (← inferType val)
mkCHole forLhs val pf
else
-- Since `pf` doesn't yet have sides, we resort to the value and the proof being decoupled.
-- These will be unified during congruence generation.
mkCHole forLhs (← mkFreshExprMVar expectedType?) pf
/-- (Internal for `congr(...)`)
Elaborates to an expression satisfying `cHole?` that equals the LHS or RHS of `h`,
if the LHS or RHS is available after elaborating `h`. Uses the expected type as a hint. -/
syntax (name := cHoleExpand) "cHole% " (&"lhs" <|> &"rhs") term : term
@[term_elab cHoleExpand, inherit_doc cHoleExpand]
def elabCHoleExpand : Term.TermElab := fun stx expectedType? =>
match stx with
| `(cHole% lhs $h) => elabCHole h true expectedType?
| `(cHole% rhs $h) => elabCHole h false expectedType?
| _ => throwUnsupportedSyntax
/-- Replace all `term` antiquotations in a term using the given `expand` function. -/
def processAntiquot (t : Term) (expand : Term → Term.TermElabM Term) : Term.TermElabM Term := do
let t' ← t.raw.replaceM fun s => do
if s.isAntiquots then
let ks := s.antiquotKinds
unless ks.any (fun (k, _) => k == `term) do
throwErrorAt s "Expecting term"
let h : Term := ⟨s.getCanonicalAntiquot.getAntiquotTerm⟩
expand h
else
pure none
return ⟨t'⟩
/-- Given the pattern `t` in `congr(t)`, elaborate it for the given side
by replacing antiquotations with `cHole%` terms, and ensure the elaborated term
is of the expected type. -/
def elaboratePattern (t : Term) (expectedType? : Option Expr) (forLhs : Bool) :
Term.TermElabM Expr :=
Term.withoutErrToSorry do
let t' ← processAntiquot t (fun h => if forLhs then `(cHole% lhs $h) else `(cHole% rhs $h))
Term.elabTermEnsuringType t' expectedType?
/-! ### Congruence generation -/
/-- Ensures the expected type is an equality. Returns the equality.
The returned expression satisfies `Lean.Expr.eq?`. -/
def mkEqForExpectedType (expectedType? : Option Expr) : MetaM Expr := do
let u ← mkFreshLevelMVar
let ty ← mkFreshExprMVar (mkSort u)
let eq := mkApp3 (mkConst ``Eq [u]) ty (← mkFreshExprMVar ty) (← mkFreshExprMVar ty)
if let some expectedType := expectedType? then
unless ← isDefEq expectedType eq do
throwError m!"Type{indentD expectedType}\nis expected to be an equality."
return eq
/-- Ensures the expected type is a HEq. Returns the HEq.
This expression satisfies `Lean.Expr.heq?`. -/
def mkHEqForExpectedType (expectedType? : Option Expr) : MetaM Expr := do
let u ← mkFreshLevelMVar
let tya ← mkFreshExprMVar (mkSort u)
let tyb ← mkFreshExprMVar (mkSort u)
let heq := mkApp4 (mkConst ``HEq [u]) tya (← mkFreshExprMVar tya) tyb (← mkFreshExprMVar tyb)
if let some expectedType := expectedType? then
unless ← isDefEq expectedType heq do
throwError m!"Type{indentD expectedType}\nis expected to be a `HEq`."
return heq
/-- Ensures the expected type is an iff. Returns the iff.
This expression satisfies `Lean.Expr.iff?`. -/
def mkIffForExpectedType (expectedType? : Option Expr) : MetaM Expr := do
let a ← mkFreshExprMVar (Expr.sort .zero)
let b ← mkFreshExprMVar (Expr.sort .zero)
let iff := mkApp2 (Expr.const `Iff []) a b
if let some expectedType := expectedType? then
unless ← isDefEq expectedType iff do
throwError m!"Type{indentD expectedType}\nis expected to be an `Iff`."
return iff
/-- Make sure that the expected type of `pf` is an iff by unification. -/
def ensureIff (pf : Expr) : MetaM Expr := do
discard <| mkIffForExpectedType (← inferType pf)
return pf
/-- A request for a type of congruence lemma from a `CongrResult`. -/
inductive CongrType
| eq | heq
/--
A congruence lemma between two expressions. The proof is generated dynamically, depending on
whether the resulting lemma should be an `Eq` or a `HEq`.
If generating a proof impossible, then the generator can throw an error.
This can be due to either an `Eq` proof being impossible
or due to the lhs/rhs not being defeq to the lhs/rhs of the generated proof,
which can happen for user-supplied congruence holes.
This complexity is to support two features:
1. The user is free to supply Iff, Eq, and HEq lemmas in congruence holes,
and we're able to transform them into whatever is appropriate for a
given congruence lemma.
2. If the congruence hole is a metavariable, then we can specialize that
hole to an Iff, Eq, or HEq depending on what's necessary at that site. -/
structure CongrResult where
/-- The left-hand side of the congruence result. -/
lhs : Expr
/-- The right-hand side of the congruence result. -/
rhs : Expr
/-- A generator for an `Eq lhs rhs` or `HEq lhs rhs` proof.
If such a proof is impossible, the generator can throw an error.
The inferred type of the generated proof needs only be defeq to `Eq lhs rhs` or `HEq lhs rhs`.
This function can assign metavariables when constructing the proof.
If `pf? = none`, then `lhs` and `rhs` are defeq, and the proof is by reflexivity. -/
(pf? : Option (CongrType → MetaM Expr))
/-- Returns whether the proof is by reflexivity.
Such congruence proofs are trivial. -/
def CongrResult.isRfl (res : CongrResult) : Bool := res.pf?.isNone
/-- Returns the proof that `lhs = rhs`. Fails if the `CongrResult` is inapplicable.
Throws an error if the `lhs` and `rhs` have non-defeq types.
If `pf? = none`, this returns the `rfl` proof. -/
def CongrResult.eq (res : CongrResult) : MetaM Expr := do
unless ← isDefEq (← inferType res.lhs) (← inferType res.rhs) do
throwError "Expecting{indentD res.lhs}\nand{indentD res.rhs}\n\
to have definitionally equal types."
match res.pf? with
| some pf => pf .eq
| none => mkEqRefl res.lhs
/-- Returns the proof that `lhs ≍ rhs`. Fails if the `CongrResult` is inapplicable.
If `pf? = none`, this returns the `rfl` proof. -/
def CongrResult.heq (res : CongrResult) : MetaM Expr := do
match res.pf? with
| some pf => pf .heq
| none => mkHEqRefl res.lhs
/-- Returns a proof of `lhs ↔ rhs`. Uses `CongrResult.eq`. -/
def CongrResult.iff (res : CongrResult) : MetaM Expr := do
unless ← Meta.isProp res.lhs do
throwError "Expecting{indentD res.lhs}\nto be a proposition."
return mkApp3 (.const ``iff_of_eq []) res.lhs res.rhs (← res.eq)
/-- Combine two congruence proofs using transitivity.
Does not check that `res1.rhs` is defeq to `res2.lhs`.
If both `res1` and `res2` are trivial then the result is trivial. -/
def CongrResult.trans (res1 res2 : CongrResult) : CongrResult where
lhs := res1.lhs
rhs := res2.rhs
pf? :=
if res1.isRfl then
res2.pf?
else if res2.isRfl then
res1.pf?
else
some fun
| .eq => do mkEqTrans (← res1.eq) (← res2.eq)
| .heq => do mkHEqTrans (← res1.heq) (← res2.heq)
/-- Make a `CongrResult` from a LHS, a RHS, and a proof of an Iff, Eq, or HEq.
The proof is allowed to have a metavariable for its type.
Validates the inputs and throws errors in the `pf?` function.
The `pf?` function is responsible for finally unifying the type of `pf` with `lhs` and `rhs`. -/
def CongrResult.mk' (lhs rhs : Expr) (pf : Expr) : CongrResult where
lhs := lhs
rhs := rhs
pf? :=
if (isRefl? pf).isSome then
none
else
some fun
| .eq => do ensureSidesDefeq (← toEqPf)
| .heq => do ensureSidesDefeq (← toHEqPf)
where
/-- Given a `pf` of an `Iff`, `Eq`, or `HEq`, return a proof of `Eq`.
If `pf` is not obviously any of these, weakly try inserting `propext` to make an `Iff`
and otherwise unify the type with `Eq`. -/
toEqPf : MetaM Expr := do
let ty ← whnf (← inferType pf)
if let some .. := ty.iff? then
mkPropExt pf
else if let some .. := ty.eq? then
return pf
else if let some (lhsTy, _, rhsTy, _) := ty.heq? then
unless ← isDefEq lhsTy rhsTy do
throwError "Cannot turn HEq proof into an equality proof. Has type{indentD ty}"
mkAppM ``eq_of_heq #[pf]
else if ← Meta.isProp lhs then
mkPropExt (← ensureIff pf)
else
discard <| mkEqForExpectedType (← inferType pf)
return pf
/-- Given a `pf` of an `Iff`, `Eq`, or `HEq`, return a proof of `HEq`.
If `pf` is not obviously any of these, weakly try making it be an `Eq` or an `Iff`,
and otherwise make it be a `HEq`. -/
toHEqPf : MetaM Expr := do
let ty ← whnf (← inferType pf)
if let some .. := ty.iff? then
mkAppM ``heq_of_eq #[← mkPropExt pf]
else if let some .. := ty.eq? then
mkAppM ``heq_of_eq #[pf]
else if let some .. := ty.heq? then
return pf
else if ← withNewMCtxDepth <| isDefEq (← inferType lhs) (← inferType rhs) then
mkAppM ``heq_of_eq #[← toEqPf]
else
discard <| mkHEqForExpectedType (← inferType pf)
return pf
/-- Get the sides of the type of `pf` and unify them with the respective `lhs` and `rhs`. -/
ensureSidesDefeq (pf : Expr) : MetaM Expr := do
let pfTy ← inferType pf
let some (_, lhs', _, rhs') := (← whnf pfTy).sides?
| panic! "Unexpectedly did not generate an eq or heq"
unless ← isDefEq lhs lhs' do
throwError "Congruence hole has type{indentD pfTy}\n\
but its left-hand side is not definitionally equal to the expected value{indentD lhs}"
unless ← isDefEq rhs rhs' do
throwError "Congruence hole has type{indentD pfTy}\n\
but its right-hand side is not definitionally equal to the expected value{indentD rhs}"
return pf
/-- Force the lhs and rhs to be defeq. For when `dsimp`-like congruence is necessary.
Clears the proof. -/
def CongrResult.defeq (res : CongrResult) : MetaM CongrResult := do
if res.isRfl then
return res
else
unless ← isDefEq res.lhs res.rhs do
throwError "Cannot generate congruence because we need{indentD res.lhs}\n\
to be definitionally equal to{indentD res.rhs}"
-- Propagate types into any proofs that we're dropping:
discard <| res.eq
return {res with pf? := none}
/-- Tries to make a congruence between `lhs` and `rhs` automatically.
1. If they are defeq, returns a trivial congruence.
2. Tries using `Subsingleton.elim`.
3. Tries `proof_irrel_heq` as another effort to avoid doing congruence on proofs.
3. Otherwise throws an error.
Note: `mkAppM` uses `withNewMCtxDepth`, which prevents typeclass inference
from accidentally specializing `Sort _` to `Prop`, which could otherwise happen
because there is a `Subsingleton Prop` instance. -/
def CongrResult.mkDefault (lhs rhs : Expr) : MetaM CongrResult := do
if ← isDefEq lhs rhs then
return {lhs, rhs, pf? := none}
else if let some pf ← (observing? <| mkAppM ``Subsingleton.elim #[lhs, rhs]) then
return CongrResult.mk' lhs rhs pf
else if let some pf ← (observing? <| mkAppM ``proof_irrel_heq #[lhs, rhs]) then
return CongrResult.mk' lhs rhs pf
throwError "Could not generate congruence between{indentD lhs}\nand{indentD rhs}"
/-- Does `CongrResult.mkDefault` but makes sure there are no lingering congruence holes. -/
def CongrResult.mkDefault' (mvarCounterSaved : Nat) (lhs rhs : Expr) : MetaM CongrResult := do
if let some h := hasCHole mvarCounterSaved lhs then
throwError "Left-hand side{indentD lhs}\nstill has a congruence hole{indentD h}"
if let some h := hasCHole mvarCounterSaved rhs then
throwError "Right-hand side{indentD rhs}\nstill has a congruence hole{indentD h}"
CongrResult.mkDefault lhs rhs
/-- Throw an internal error. -/
def throwCongrEx {α : Type} (lhs rhs : Expr) (msg : MessageData) : MetaM α := do
throwError "congr(...) failed with left-hand side{indentD lhs}\n\
and right-hand side {indentD rhs}\n{msg}"
/-- If `lhs` or `rhs` is a congruence hole, then process it.
Only process ones that are at least as new as `mvarCounterSaved`
since nothing prevents congruence holes from leaking into the local context. -/
def mkCongrOfCHole? (mvarCounterSaved : Nat) (lhs rhs : Expr) : MetaM (Option CongrResult) := do
match cHole? lhs mvarCounterSaved, cHole? rhs mvarCounterSaved with
| some (isLhs1, val1, pf1), some (isLhs2, val2, pf2) =>
trace[Elab.congr] "mkCongrOfCHole, both holes"
unless isLhs1 == true do
throwCongrEx lhs rhs "A RHS congruence hole leaked into the LHS"
unless isLhs2 == false do
throwCongrEx lhs rhs "A LHS congruence hole leaked into the RHS"
-- Defeq checks to unify the lhs and rhs congruence holes.
unless ← isDefEq (← inferType pf1) (← inferType pf2) do
throwCongrEx lhs rhs "Elaborated types of congruence holes are not defeq."
if let some (_, lhsVal, _, rhsVal) := (← whnf <| ← inferType pf1).sides? then
unless ← isDefEq val1 lhsVal do
throwError "Left-hand side of congruence hole is{indentD lhsVal}\n\
but is expected to be{indentD val1}"
unless ← isDefEq val2 rhsVal do
throwError "Right-hand side of congruence hole is{indentD rhsVal}\n\
but is expected to be{indentD val2}"
return some <| CongrResult.mk' val1 val2 pf1
| some .., none =>
throwCongrEx lhs rhs "Right-hand side lost its congruence hole annotation."
| none, some .. =>
throwCongrEx lhs rhs "Left-hand side lost its congruence hole annotation."
| none, none => return none
/--
Given two applications of the same arity, gives `Expr.getAppFn` of both,
but if these functions are equal, gives the longest common prefix.
-/
private def getJointAppFns (e e' : Expr) : Expr × Expr :=
if e == e' then
(e, e)
else
match e, e' with
| .app f _, .app f' _ => getJointAppFns f f'
| _, _ => (e, e')
/-- Monad for `mkCongrOfAux`, for caching `CongrResult`s. -/
abbrev M := MonadCacheT (Expr × Expr) CongrResult MetaM
mutual
/-- Implementation of `mkCongrOf`, with caching. -/
partial def mkCongrOfAux (depth : Nat) (mvarCounterSaved : Nat) (lhs rhs : Expr) :
M CongrResult := do
trace[Elab.congr] "mkCongrOf: {depth}, {lhs}, {rhs}, {(← mkFreshExprMVar none).mvarId!}"
if depth > 1000 then
throwError "congr(...) internal error: out of gas"
-- Potentially metavariables get assigned as we process congruence holes,
-- so instantiate them to be safe. Placeholders and implicit arguments might
-- end up with congruence holes, so they indeed might need a nontrivial congruence.
let lhs ← instantiateMVars lhs
let rhs ← instantiateMVars rhs
checkCache (lhs, rhs) fun _ => do
if let some res ← mkCongrOfCHole? mvarCounterSaved lhs rhs then
trace[Elab.congr] "hole processing succeeded"
return res
if lhs == rhs then
-- There should not be any cHoles, but to be safe let's remove them.
return { lhs := removeCHoles lhs, rhs := removeCHoles rhs, pf? := none }
if (hasCHole mvarCounterSaved lhs).isNone && (hasCHole mvarCounterSaved rhs).isNone then
-- It's safe to fastforward if the lhs and rhs are defeq and have no congruence holes.
-- This is more conservative than necessary since congruence holes might only be inside
-- proofs, and it is OK to ignore these.
if ← isDefEq lhs rhs then
return { lhs, rhs, pf? := none }
if ← (isProof lhs <||> isProof rhs) then
-- We don't want to look inside proofs at all.
return ← CongrResult.mkDefault lhs rhs
match lhs, rhs with
| .app .., .app .. =>
mkCongrOfApp depth mvarCounterSaved lhs rhs
| .lam .., .lam .. =>
trace[Elab.congr] "lam"
let resDom ← mkCongrOfAux (depth + 1) mvarCounterSaved lhs.bindingDomain! rhs.bindingDomain!
-- We do not yet support congruences in the binding domain for lambdas.
discard <| resDom.defeq
withLocalDecl lhs.bindingName! lhs.bindingInfo! resDom.lhs fun x => do
let lhsb := lhs.bindingBody!.instantiate1 x
let rhsb := rhs.bindingBody!.instantiate1 x
let resBody ← mkCongrOfAux (depth + 1) mvarCounterSaved lhsb rhsb
let lhs ← mkLambdaFVars #[x] resBody.lhs
let rhs ← mkLambdaFVars #[x] resBody.rhs
if resBody.isRfl then
return {lhs, rhs, pf? := none}
else
let pf ← mkLambdaFVars #[x] (← resBody.eq)
return CongrResult.mk' lhs rhs (← mkAppM ``funext #[pf])
| .forallE .., .forallE .. =>
trace[Elab.congr] "forallE"
let resDom ← mkCongrOfAux (depth + 1) mvarCounterSaved lhs.bindingDomain! rhs.bindingDomain!
if lhs.isArrow && rhs.isArrow then
let resBody ← mkCongrOfAux (depth + 1) mvarCounterSaved lhs.bindingBody! rhs.bindingBody!
let lhs := Expr.forallE lhs.bindingName! resDom.lhs resBody.lhs lhs.bindingInfo!
let rhs := Expr.forallE rhs.bindingName! resDom.rhs resBody.rhs rhs.bindingInfo!
if resDom.isRfl && resBody.isRfl then
return {lhs, rhs, pf? := none}
else
return CongrResult.mk' lhs rhs (← mkImpCongr (← resDom.eq) (← resBody.eq))
else
-- We do not yet support congruences in the binding domain for dependent pi types.
discard <| resDom.defeq
withLocalDecl lhs.bindingName! lhs.bindingInfo! resDom.lhs fun x => do
let lhsb := lhs.bindingBody!.instantiate1 x
let rhsb := rhs.bindingBody!.instantiate1 x
let resBody ← mkCongrOfAux (depth + 1) mvarCounterSaved lhsb rhsb
let lhs ← mkForallFVars #[x] resBody.lhs
let rhs ← mkForallFVars #[x] resBody.rhs
if resBody.isRfl then
return {lhs, rhs, pf? := none}
else
let pf ← mkLambdaFVars #[x] (← resBody.eq)
return CongrResult.mk' lhs rhs (← mkAppM ``pi_congr #[pf])
| .letE .., .letE .. =>
trace[Elab.congr] "letE"
-- Just zeta reduce for now. Could look at `Lean.Meta.Simp.simp.simpLet`
let lhs := lhs.letBody!.instantiate1 lhs.letValue!
let rhs := rhs.letBody!.instantiate1 rhs.letValue!
mkCongrOfAux (depth + 1) mvarCounterSaved lhs rhs
| .mdata _ lhs', .mdata _ rhs' =>
trace[Elab.congr] "mdata"
let res ← mkCongrOfAux (depth + 1) mvarCounterSaved lhs' rhs'
return {res with lhs := lhs.updateMData! res.lhs, rhs := rhs.updateMData! res.rhs}
| .proj n1 i1 e1, .proj n2 i2 e2 =>
trace[Elab.congr] "proj"
-- Only handles defeq at the moment.
unless n1 == n2 && i1 == i2 do
throwCongrEx lhs rhs "Incompatible primitive projections"
let res ← mkCongrOfAux (depth + 1) mvarCounterSaved e1 e2
discard <| res.defeq
return {lhs := lhs.updateProj! res.lhs, rhs := rhs.updateProj! res.rhs, pf? := none}
| _, _ =>
trace[Elab.congr] "base case"
CongrResult.mkDefault' mvarCounterSaved lhs rhs
/--
Generate congruence for applications `lhs` and `rhs`.
Key detail: functions might be *overapplied* due to the values of their arguments.
For example, `id id 2` is overapplied.
To handle these, we need to segment the applications into their natural arities,
since `mkHCongrWithArity'` does not know how to generate congruence lemmas for the overapplied case.
-/
partial def mkCongrOfApp (depth : Nat) (mvarCounterSaved : Nat) (lhs rhs : Expr) :
M CongrResult := do
-- Even if a function is being rewritten (e.g. with `f x = g`), both sides should have the same
-- number of arguments since there will be a cHole around both `f x` and `g`.
let arity := lhs.getAppNumArgs
trace[Elab.congr] "app, arity {arity}"
unless arity == rhs.getAppNumArgs do
trace[Elab.congr] "app desync (arity)"
return ← CongrResult.mkDefault' mvarCounterSaved lhs rhs
-- Optimization: congruences often have a shared prefix (e.g. some type parameters an instances)
-- so if there's a shared prefix we use it.
let mut (f, f') := getJointAppFns lhs rhs
let arity := arity - f.getAppNumArgs
trace[Elab.congr] "app, updated arity {arity}"
if f != f' then
unless ← isDefEq (← inferType f) (← inferType f') do
trace[Elab.congr] "app desync (function types)"
return ← CongrResult.mkDefault' mvarCounterSaved lhs rhs
-- First try using `congr`/`congrFun` to build a proof as far as possible.
-- We update `f`, `f'`, and `finfo` as we go.
let lhsArgs := lhs.getBoundedAppArgs arity
let rhsArgs := rhs.getBoundedAppArgs arity
let rec
/--
Argument processing loop
- `i` is index into `lhsArgs`/`rhsArgs`.
- `finfo` is the funinfo of `f` applied to the first `finfoIdx` arguments
- `f` and `f'` are the current head functions, after the first `i` arguments have been applied.
-/
go (i : Nat) (finfo : FunInfo) (finfoIdx : Nat) (f f' : Expr) (pf : Expr) :
M CongrResult := do
if i ≥ arity then
return CongrResult.mk' f f' pf
else
let mut finfo := finfo
let mut finfoIdx := finfoIdx
unless i - finfoIdx < finfo.getArity do
finfo ← getFunInfoNArgs f (arity - finfoIdx)
finfoIdx := i
let info := finfo.paramInfo[i - finfoIdx]!
let a := lhsArgs[i]!
let a' := rhsArgs[i]!
let ra ← mkCongrOfAux (depth + 1) mvarCounterSaved a a'
if ra.isRfl then
trace[Elab.congr] "app, arg {i} by rfl"
go (i + 1) finfo finfoIdx (.app f ra.lhs) (.app f' ra.rhs) (← mkCongrFun pf ra.lhs)
else if !info.hasFwdDeps then
trace[Elab.congr] "app, arg {i} by eq"
go (i + 1) finfo finfoIdx (.app f ra.lhs) (.app f' ra.rhs) (← mkCongr pf (← ra.eq))
else
-- Otherwise, we can make progress with an hcongr lemma.
if (isRefl? pf).isNone then
trace[Elab.congr] "app, hcongr needs transitivity"
-- If there's a nontrivial proof, then since `mkHCongrWithArity'` fixes the function,
-- we need to use transitivity to make the functions be the same.
let lhsArgs' := (lhsArgs.extract i).map removeCHoles
let lhs := mkAppN f lhsArgs'
let lhs' := mkAppN f' lhsArgs'
let mut pf' := pf
for arg in lhsArgs' do
pf' ← mkCongrFun pf' arg
let res1 := CongrResult.mk' lhs lhs' pf'
let res2 ← go i finfo finfoIdx f' f' (← mkEqRefl f')
return res1.trans res2
else
-- Get an accurate measure of the arity of `f`, following `getFunInfoNArgs`.
-- No need to update `finfo` itself.
let fArity ←
if finfoIdx == i then pure finfo.getArity
else withAtLeastTransparency .default do
forallBoundedTelescope (← inferType f) (some (arity - i)) fun xs _ => pure xs.size
trace[Elab.congr] "app, args {i}-{i+arity-1} by hcongr, {arity} arguments"
let thm ← mkHCongrWithArity' f fArity
let mut args := #[]
let mut lhsArgs' := #[]
let mut rhsArgs' := #[]
for lhs' in lhsArgs[i:], rhs' in rhsArgs[i:], kind in thm.argKinds do
match kind with
| .eq =>
let ares ← mkCongrOfAux (depth + 1) mvarCounterSaved lhs' rhs'
args := args |>.push ares.lhs |>.push ares.rhs |>.push (← ares.eq)
lhsArgs' := lhsArgs'.push ares.lhs
rhsArgs' := rhsArgs'.push ares.rhs
| .heq =>
let ares ← mkCongrOfAux (depth + 1) mvarCounterSaved lhs' rhs'
args := args |>.push ares.lhs |>.push ares.rhs |>.push (← ares.heq)
lhsArgs' := lhsArgs'.push ares.lhs
rhsArgs' := rhsArgs'.push ares.rhs
| .subsingletonInst =>
-- Warning: we're not processing any congruence holes here.
-- Users shouldn't be intentionally placing them in such arguments anyway.
-- We can't throw an error because these arguments might incidentally have
-- congruence holes by unification.
let lhs' := removeCHoles lhs'
let rhs' := removeCHoles rhs'
args := args |>.push lhs' |>.push rhs'
lhsArgs' := lhsArgs'.push lhs'
rhsArgs' := rhsArgs'.push rhs'
| _ => panic! "unexpected hcongr argument kind"
let lhs' := mkAppN f lhsArgs'
let rhs' := mkAppN f' rhsArgs'
let res := CongrResult.mk' lhs' rhs' (mkAppN thm.proof args)
if i + fArity < arity then
-- There are more arguments after this. The only way this can work is if
-- `res` can prove an equality.
go (i + fArity) finfo finfoIdx lhs' rhs' (← res.eq)
else
-- Otherwise, we can return `res`, which might only be a HEq.
return res
let res ← mkCongrOfAux (depth + 1) mvarCounterSaved f f'
let pf ← res.eq
go 0 (← getFunInfoNArgs f arity) 0 res.lhs res.rhs pf
end
/--
Walks along both `lhs` and `rhs` simultaneously to create a congruence lemma between them.
Where they are desynchronized, we fall back to the base case (using `CongrResult.mkDefault'`)
since it's likely due to unification with the expected type,
from `_` placeholders or implicit arguments being filled in.
-/
partial def mkCongrOf (depth : Nat) (mvarCounterSaved : Nat) (lhs rhs : Expr) :
MetaM CongrResult :=
mkCongrOfAux depth mvarCounterSaved lhs rhs |>.run
/-! ### Elaborating congruence quotations -/
@[term_elab termCongr, inherit_doc termCongr]
def elabTermCongr : Term.TermElab := fun stx expectedType? => do
match stx with
| `(congr($t)) =>
-- Save the current mvarCounter so that we know which cHoles are for this congr quotation.
let mvarCounterSaved := (← getMCtx).mvarCounter
-- Case 1: There is an expected type and it's obviously an Iff/Eq/HEq.
if let some expectedType := expectedType? then
if let some (expLhsTy, expLhs, expRhsTy, expRhs) := (← whnf expectedType).sides? then
let lhs ← elaboratePattern t expLhsTy true
let rhs ← elaboratePattern t expRhsTy false
-- Note: these defeq checks can leak congruence holes.
unless ← isDefEq expLhs lhs do
throwError "Left-hand side of elaborated pattern{indentD lhs}\n\
is not definitionally equal to left-hand side of expected type{indentD expectedType}"
unless ← isDefEq expRhs rhs do
throwError "Right-hand side of elaborated pattern{indentD rhs}\n\
is not definitionally equal to right-hand side of expected type{indentD expectedType}"
Term.synthesizeSyntheticMVars (postpone := .yes)
let res ← mkCongrOf 0 mvarCounterSaved lhs rhs
let expectedType' ← whnf expectedType
let pf ← if expectedType'.iff?.isSome then res.iff
else if expectedType'.isEq then res.eq
else if expectedType'.isHEq then res.heq
else panic! "unreachable case, sides? guarantees Iff, Eq, and HEq"
return ← mkExpectedTypeHint pf expectedType
-- Case 2: No expected type or it's not obviously Iff/Eq/HEq. We generate an Eq.
let lhs ← elaboratePattern t none true
let rhs ← elaboratePattern t none false
Term.synthesizeSyntheticMVars (postpone := .yes)
let res ← mkCongrOf 0 mvarCounterSaved lhs rhs
let pf ← res.eq
let ty ← mkEq res.lhs res.rhs
mkExpectedTypeHint pf ty
| _ => throwUnsupportedSyntax
end TermCongr
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/ComputeDegree.lean | import Mathlib.Algebra.Polynomial.Degree.Lemmas
/-!
# `compute_degree` and `monicity`: tactics for explicit polynomials
This file defines two related tactics: `compute_degree` and `monicity`.
Using `compute_degree` when the goal is of one of the seven forms
* `natDegree f ≤ d` (or `<`),
* `degree f ≤ d` (or `<`),
* `natDegree f = d`,
* `degree f = d`,
* `coeff f d = r`, if `d` is the degree of `f`,
tries to solve the goal.
It may leave side-goals, in case it is not entirely successful.
Using `monicity` when the goal is of the form `Monic f` tries to solve the goal.
It may leave side-goals, in case it is not entirely successful.
Both tactics admit a `!` modifier (`compute_degree!` and `monicity!`) instructing
Lean to try harder to close the goal.
See the doc-strings for more details.
## Future work
* Currently, `compute_degree` does not deal correctly with some edge cases. For instance,
```lean
example [Semiring R] : natDegree (C 0 : R[X]) = 0 := by
compute_degree
-- ⊢ 0 ≠ 0
```
Still, it may not be worth to provide special support for `natDegree f = 0`.
* Make sure that numerals in coefficients are treated correctly.
* Make sure that `compute_degree` works with goals of the form `degree f ≤ ↑d`, with an
explicit coercion from `ℕ` on the RHS.
* Add support for proving goals of the from `natDegree f ≠ 0` and `degree f ≠ 0`.
* Make sure that `degree`, `natDegree` and `coeff` are equally supported.
## Implementation details
Assume that `f : R[X]` is a polynomial with coefficients in a semiring `R` and
`d` is either in `ℕ` or in `WithBot ℕ`.
If the goal has the form `natDegree f < d`, then we convert it to two separate goals:
* `natDegree f ≤ ?_`, on which we apply the following steps;
* `?_ < d`;
where `?_` is a metavariable that `compute_degree` computes in its process.
We proceed similarly for `degree f < d`.
If the goal has the form `natDegree f = d`, then we convert it to three separate goals:
* `natDegree f ≤ d`;
* `coeff f d = r`;
* `r ≠ 0`.
Similarly, an initial goal of the form `degree f = d` gives rise to goals of the form
* `degree f ≤ d`;
* `coeff f d = r`;
* `r ≠ 0`.
Next, we apply successively lemmas whose side-goals all have the shape
* `natDegree f ≤ d`;
* `degree f ≤ d`;
* `coeff f d = r`;
plus possibly "numerical" identities and choices of elements in `ℕ`, `WithBot ℕ`, and `R`.
Recursing into `f`, we break apart additions, multiplications, powers, subtractions,...
The leaves of the process are
* numerals, `C a`, `X` and `monomial a n`, to which we assign degree `0`, `1` and `a` respectively;
* `fvar`s `f`, to which we tautologically assign degree `natDegree f`.
-/
open Polynomial
namespace Mathlib.Tactic.ComputeDegree
section recursion_lemmas
/-!
### Simple lemmas about `natDegree`
The lemmas in this section all have the form `natDegree <some form of cast> ≤ 0`.
Their proofs are weakenings of the stronger lemmas `natDegree <same> = 0`.
These are the lemmas called by `compute_degree` on (almost) all the leaves of its recursion.
-/
variable {R : Type*}
section semiring
variable [Semiring R]
theorem natDegree_C_le (a : R) : natDegree (C a) ≤ 0 := (natDegree_C a).le
theorem natDegree_natCast_le (n : ℕ) : natDegree (n : R[X]) ≤ 0 := (natDegree_natCast _).le
theorem natDegree_zero_le : natDegree (0 : R[X]) ≤ 0 := natDegree_zero.le
theorem natDegree_one_le : natDegree (1 : R[X]) ≤ 0 := natDegree_one.le
theorem coeff_add_of_eq {n : ℕ} {a b : R} {f g : R[X]}
(h_add_left : f.coeff n = a) (h_add_right : g.coeff n = b) :
(f + g).coeff n = a + b := by subst ‹_› ‹_›; apply coeff_add
theorem coeff_mul_add_of_le_natDegree_of_eq_ite {d df dg : ℕ} {a b : R} {f g : R[X]}
(h_mul_left : natDegree f ≤ df) (h_mul_right : natDegree g ≤ dg)
(h_mul_left : f.coeff df = a) (h_mul_right : g.coeff dg = b) (ddf : df + dg ≤ d) :
(f * g).coeff d = if d = df + dg then a * b else 0 := by
split_ifs with h
· subst h_mul_left h_mul_right h
exact coeff_mul_add_eq_of_natDegree_le ‹_› ‹_›
· apply coeff_eq_zero_of_natDegree_lt
apply lt_of_le_of_lt ?_ (lt_of_le_of_ne ddf ?_)
· exact natDegree_mul_le_of_le ‹_› ‹_›
· exact ne_comm.mp h
theorem coeff_pow_of_natDegree_le_of_eq_ite' {m n o : ℕ} {a : R} {p : R[X]}
(h_pow : natDegree p ≤ n) (h_exp : m * n ≤ o) (h_pow_bas : coeff p n = a) :
coeff (p ^ m) o = if o = m * n then a ^ m else 0 := by
split_ifs with h
· subst h h_pow_bas
exact coeff_pow_of_natDegree_le ‹_›
· apply coeff_eq_zero_of_natDegree_lt
apply lt_of_le_of_lt ?_ (lt_of_le_of_ne ‹_› ?_)
· exact natDegree_pow_le_of_le m ‹_›
· exact Iff.mp ne_comm h
section SMul
variable {S : Type*} [SMulZeroClass S R] {n : ℕ} {a : S} {f : R[X]}
theorem natDegree_smul_le_of_le (hf : natDegree f ≤ n) :
natDegree (a • f) ≤ n :=
(natDegree_smul_le a f).trans hf
theorem degree_smul_le_of_le (hf : degree f ≤ n) :
degree (a • f) ≤ n :=
(degree_smul_le a f).trans hf
theorem coeff_smul : (a • f).coeff n = a • f.coeff n := rfl
end SMul
section congr_lemmas
/-- The following two lemmas should be viewed as a hand-made "congr"-lemmas.
They achieve the following goals.
* They introduce *two* fresh metavariables replacing the given one `deg`,
one for the `natDegree ≤` computation and one for the `coeff =` computation.
This helps `compute_degree`, since it does not "pre-estimate" the degree,
but it "picks it up along the way".
* They split checking the inequality `coeff p n ≠ 0` into the task of
finding a value `c` for the `coeff` and then
proving that this value is non-zero by `coeff_ne_zero`.
-/
theorem natDegree_eq_of_le_of_coeff_ne_zero' {deg m o : ℕ} {c : R} {p : R[X]}
(h_natDeg_le : natDegree p ≤ m) (coeff_eq : coeff p o = c)
(coeff_ne_zero : c ≠ 0) (deg_eq_deg : m = deg) (coeff_eq_deg : o = deg) :
natDegree p = deg := by
subst coeff_eq deg_eq_deg coeff_eq_deg
exact natDegree_eq_of_le_of_coeff_ne_zero ‹_› ‹_›
theorem degree_eq_of_le_of_coeff_ne_zero' {deg m o : WithBot ℕ} {c : R} {p : R[X]}
(h_deg_le : degree p ≤ m) (coeff_eq : coeff p (WithBot.unbotD 0 deg) = c)
(coeff_ne_zero : c ≠ 0) (deg_eq_deg : m = deg) (coeff_eq_deg : o = deg) :
degree p = deg := by
subst coeff_eq coeff_eq_deg deg_eq_deg
rcases eq_or_ne m ⊥ with rfl | hh
· exact bot_unique h_deg_le
· obtain ⟨m, rfl⟩ := WithBot.ne_bot_iff_exists.mp hh
exact degree_eq_of_le_of_coeff_ne_zero ‹_› ‹_›
variable {m n : ℕ} {f : R[X]} {r : R}
theorem coeff_congr_lhs (h : coeff f m = r) (natDeg_eq_coeff : m = n) : coeff f n = r :=
natDeg_eq_coeff ▸ h
theorem coeff_congr (h : coeff f m = r) (natDeg_eq_coeff : m = n) {s : R} (rs : r = s) :
coeff f n = s :=
natDeg_eq_coeff ▸ rs ▸ h
end congr_lemmas
end semiring
section ring
variable [Ring R]
theorem natDegree_intCast_le (n : ℤ) : natDegree (n : R[X]) ≤ 0 := (natDegree_intCast _).le
theorem coeff_sub_of_eq {n : ℕ} {a b : R} {f g : R[X]} (hf : f.coeff n = a) (hg : g.coeff n = b) :
(f - g).coeff n = a - b := by subst hf hg; apply coeff_sub
theorem coeff_intCast_ite {n : ℕ} {a : ℤ} : (Int.cast a : R[X]).coeff n = ite (n = 0) a 0 := by
simp only [← C_eq_intCast, coeff_C, Int.cast_ite, Int.cast_zero]
end ring
end recursion_lemmas
section Tactic
open Lean Elab Tactic Meta Expr
/-- `twoHeadsArgs e` takes an `Expr`ession `e` as input and recurses into `e` to make sure
that `e` looks like `lhs ≤ rhs`, `lhs < rhs` or `lhs = rhs` and that `lhs` is one of
`natDegree f, degree f, coeff f d`.
It returns
* the function being applied on the LHS (`natDegree`, `degree`, or `coeff`),
or else `.anonymous` if it's none of these;
* the name of the relation (`Eq`, `LE.le` or `LT.lt`), or else `.anonymous` if it's none of these;
* either
* `.inl zero`, `.inl one`, or `.inl many` if the polynomial in a numeral;
* or `.inr` of the head symbol of `f`;
* or `.inl .anonymous` if inapplicable;
* if it exists, whether the `rhs` is a metavariable;
* if the LHS is `coeff f d`, whether `d` is a metavariable.
This is all the data needed to figure out whether `compute_degree` can make progress on `e`
and, if so, which lemma it should apply.
Sample outputs:
* `natDegree (f + g) ≤ d => (natDegree, LE.le, HAdd.hAdd, d.isMVar, none)` (similarly for `=`);
* `degree (f * g) = d => (degree, Eq, HMul.hMul, d.isMVar, none)` (similarly for `≤`);
* `coeff (1 : ℕ[X]) c = x => (coeff, Eq, one, x.isMVar, c.isMVar)` (no `≤` option!).
-/
def twoHeadsArgs (e : Expr) : Name × Name × (Name ⊕ Name) × List Bool := Id.run do
let (eq_or_le, lhs, rhs) ← match e.getAppFnArgs with
| (na@``Eq, #[_, lhs, rhs]) => pure (na, lhs, rhs)
| (na@``LE.le, #[_, _, lhs, rhs]) => pure (na, lhs, rhs)
| (na@``LT.lt, #[_, _, lhs, rhs]) => pure (na, lhs, rhs)
| _ => return (.anonymous, .anonymous, .inl .anonymous, [])
let (ndeg_or_deg_or_coeff, pol, and?) ← match lhs.getAppFnArgs with
| (na@``Polynomial.natDegree, #[_, _, pol]) => (na, pol, [rhs.isMVar])
| (na@``Polynomial.degree, #[_, _, pol]) => (na, pol, [rhs.isMVar])
| (na@``Polynomial.coeff, #[_, _, pol, c]) => (na, pol, [rhs.isMVar, c.isMVar])
| _ => return (.anonymous, eq_or_le, .inl .anonymous, [])
let head := match pol.numeral? with
-- can I avoid the tri-splitting `n = 0`, `n = 1`, and generic `n`?
| some 0 => .inl `zero
| some 1 => .inl `one
| some _ => .inl `many
| none => match pol.getAppFnArgs with
| (``DFunLike.coe, #[_, _, _, _, polFun, _]) =>
let na := polFun.getAppFn.constName
if na ∈ [``Polynomial.monomial, ``Polynomial.C] then
.inr na
else
.inl .anonymous
| (na, _) => .inr na
(ndeg_or_deg_or_coeff, eq_or_le, head, and?)
/--
`getCongrLemma (lhs_name, rel_name, Mvars?)` returns the name of a lemma that preprocesses
one of the seven targets
* `natDegree f ≤ d`;
* `natDegree f < d`;
* `natDegree f = d`;
* `degree f ≤ d`;
* `degree f < d`;
* `degree f = d`.
* `coeff f d = r`.
The end goals are of the form
* `natDegree f ≤ ?_`, `degree f ≤ ?_`, `coeff f ?_ = ?_`, or `?_ < d` with fresh metavariables;
* `coeff f m ≠ s` with `m, s` not necessarily metavariables;
* several equalities/inequalities between expressions and assignments for metavariables.
`getCongrLemma` gets called at the very beginning of `compute_degree` and whenever an intermediate
goal does not have the right metavariables.
Note that the side-goals of the congruence lemma are neither of the form `natDegree f = d` nor
of the form `degree f = d`.
`getCongrLemma` admits an optional "debug" flag: `getCongrLemma data true` prints the name of
the congruence lemma that it returns.
-/
def getCongrLemma (twoH : Name × Name × List Bool) (debug : Bool := false) : Name :=
let nam := match twoH with
| (_, ``LE.le, [rhs]) => if rhs then ``id else ``le_trans
| (_, ``LT.lt, [rhs]) => if rhs then ``id else ``lt_of_le_of_lt
| (``natDegree, ``Eq, [rhs]) => if rhs then ``id else ``natDegree_eq_of_le_of_coeff_ne_zero'
| (``degree, ``Eq, [rhs]) => if rhs then ``id else ``degree_eq_of_le_of_coeff_ne_zero'
| (``coeff, ``Eq, [rhs, c]) =>
match rhs, c with
| false, false => ``coeff_congr
| false, true => ``Eq.trans
| true, false => ``coeff_congr_lhs
| true, true => ``id
| _ => ``id
if debug then
let last := nam.lastComponentAsString
let natr := if last == "trans" then nam.toString else last
dbg_trace f!"congr lemma: '{natr}'"
nam
else
nam
/--
`dispatchLemma twoH` takes its input `twoH` from the output of `twoHeadsArgs`.
Using the information contained in `twoH`, it decides which lemma is the most appropriate.
`dispatchLemma` is essentially the main dictionary for `compute_degree`.
-/
-- Internally, `dispatchLemma` produces 3 names: these are the lemmas that are appropriate
-- for goals of the form `natDegree f ≤ d`, `degree f ≤ d`, `coeff f d = a`, in this order.
def dispatchLemma
(twoH : Name × Name × (Name ⊕ Name) × List Bool) (debug : Bool := false) : Name :=
match twoH with
| (.anonymous, _, _) => ``id -- `twoH` gave default value, so we do nothing
| (_, .anonymous, _) => ``id -- `twoH` gave default value, so we do nothing
| (na1, na2, head, bools) =>
let msg := f!"\ndispatchLemma:\n {head}"
-- if there is some non-metavariable on the way, we "congr" it away
if false ∈ bools then getCongrLemma (na1, na2, bools) debug
else
-- otherwise, we select either the first, second or third element of the triple in `nas` below
let π (natDegLE : Name) (degLE : Name) (coeff : Name) : Name := Id.run do
let lem := match na1, na2 with
| ``natDegree, ``LE.le => natDegLE
| ``degree, ``LE.le => degLE
| ``coeff, ``Eq => coeff
| _, ``LE.le => ``le_rfl
| _, _ => ``rfl
if debug then
dbg_trace f!"{lem.lastComponentAsString}\n{msg}"
lem
match head with
| .inl `zero => π ``natDegree_zero_le ``degree_zero_le ``coeff_zero
| .inl `one => π ``natDegree_one_le ``degree_one_le ``coeff_one
| .inl `many => π ``natDegree_natCast_le ``degree_natCast_le ``coeff_natCast_ite
| .inl .anonymous => π ``le_rfl ``le_rfl ``rfl
| .inr ``HAdd.hAdd =>
π ``natDegree_add_le_of_le ``degree_add_le_of_le ``coeff_add_of_eq
| .inr ``HSub.hSub =>
π ``natDegree_sub_le_of_le ``degree_sub_le_of_le ``coeff_sub_of_eq
| .inr ``HMul.hMul =>
π ``natDegree_mul_le_of_le ``degree_mul_le_of_le ``coeff_mul_add_of_le_natDegree_of_eq_ite
| .inr ``HPow.hPow =>
π ``natDegree_pow_le_of_le ``degree_pow_le_of_le ``coeff_pow_of_natDegree_le_of_eq_ite'
| .inr ``Neg.neg =>
π ``natDegree_neg_le_of_le ``degree_neg_le_of_le ``coeff_neg
| .inr ``Polynomial.X =>
π ``natDegree_X_le ``degree_X_le ``coeff_X
| .inr ``Nat.cast =>
π ``natDegree_natCast_le ``degree_natCast_le ``coeff_natCast_ite
| .inr ``NatCast.natCast =>
π ``natDegree_natCast_le ``degree_natCast_le ``coeff_natCast_ite
| .inr ``Int.cast =>
π ``natDegree_intCast_le ``degree_intCast_le ``coeff_intCast_ite
| .inr ``IntCast.intCast =>
π ``natDegree_intCast_le ``degree_intCast_le ``coeff_intCast_ite
| .inr ``Polynomial.monomial =>
π ``natDegree_monomial_le ``degree_monomial_le ``coeff_monomial
| .inr ``Polynomial.C =>
π ``natDegree_C_le ``degree_C_le ``coeff_C
| .inr ``HSMul.hSMul =>
π ``natDegree_smul_le_of_le ``degree_smul_le_of_le ``coeff_smul
| _ => π ``le_rfl ``le_rfl ``rfl
/-- `try_rfl mvs` takes as input a list of `MVarId`s, scans them partitioning them into two
lists: the goals containing some metavariables and the goals not containing any metavariable.
If a goal containing a metavariable has the form `?_ = x`, `x = ?_`, where `?_` is a metavariable
and `x` is an expression that does not involve metavariables, then it closes this goal using `rfl`,
effectively assigning the metavariable to `x`.
If a goal does not contain metavariables, it tries `rfl` on it.
It returns the list of `MVarId`s, beginning with the ones that initially involved (`Expr`)
metavariables followed by the rest.
-/
def try_rfl (mvs : List MVarId) : MetaM (List MVarId) := do
let (yesMV, noMV) := ← mvs.partitionM fun mv =>
return hasExprMVar (← instantiateMVars (← mv.getDecl).type)
let tried_rfl := ← noMV.mapM fun g => g.applyConst ``rfl <|> return [g]
let assignable := ← yesMV.mapM fun g => do
let tgt := ← instantiateMVars (← g.getDecl).type
match tgt.eq? with
| some (_, lhs, rhs) =>
if (isMVar rhs && (! hasExprMVar lhs)) ||
(isMVar lhs && (! hasExprMVar rhs)) then
g.applyConst ``rfl
else pure [g]
| none =>
return [g]
return (assignable.flatten ++ tried_rfl.flatten)
/--
`splitApply mvs static` takes two lists of `MVarId`s. The first list, `mvs`,
corresponds to goals that are potentially within the scope of `compute_degree`:
namely, goals of the form
`natDegree f ≤ d`, `degree f ≤ d`, `natDegree f = d`, `degree f = d`, `coeff f d = r`.
`splitApply` determines which of these goals are actually within the scope, it applies the relevant
lemma and returns two lists: the left-over goals of all the applications, followed by the
concatenation of the previous `static` list, followed by the newly discovered goals outside of the
scope of `compute_degree`. -/
def splitApply (mvs static : List MVarId) : MetaM ((List MVarId) × (List MVarId)) := do
let (can_progress, curr_static) := ← mvs.partitionM fun mv => do
return dispatchLemma (twoHeadsArgs (← mv.getType'')) != ``id
let progress := ← can_progress.mapM fun mv => do
let lem := dispatchLemma <| twoHeadsArgs (← mv.getType'')
mv.applyConst <| lem
return (progress.flatten, static ++ curr_static)
/-- `miscomputedDegree? deg false_goals` takes as input
* an `Expr`ession `deg`, representing the degree of a polynomial
(i.e. an `Expr`ession of inferred type either `ℕ` or `WithBot ℕ`);
* a list of `MVarId`s `false_goals`.
Although inconsequential for this function, the list of goals `false_goals` reduces to `False`
if `norm_num`med.
`miscomputedDegree?` extracts error information from goals of the form
* `a ≠ b`, assuming it comes from `⊢ coeff_of_given_degree ≠ 0`
--- reducing to `False` means that the coefficient that was supposed to vanish, does not;
* `a ≤ b`, assuming it comes from `⊢ degree_of_subterm ≤ degree_of_polynomial`
--- reducing to `False` means that there is a term of degree that is apparently too large;
* `a = b`, assuming it comes from `⊢ computed_degree ≤ given_degree`
--- reducing to `False` means that there is a term of degree that is apparently too large.
The cases `a ≠ b` and `a = b` are not a perfect match with the top coefficient:
reducing to `False` is not exactly correlated with a coefficient being non-zero.
It does mean that `compute_degree` reduced the initial goal to an unprovable state
(unless there was already a contradiction in the initial hypotheses!), but it is indicative that
there may be some problem.
-/
def miscomputedDegree? (deg : Expr) : List Expr → List MessageData
| tgt::tgts =>
let rest := miscomputedDegree? deg tgts
if tgt.ne?.isSome then
m!"* the coefficient of degree {deg} may be zero" :: rest
else if let some ((Expr.const ``Nat []), lhs, _) := tgt.le? then
m!"* there is at least one term of naïve degree {lhs}" :: rest
else if let some (_, lhs, _) := tgt.eq? then
m!"* there may be a term of naïve degree {lhs}" :: rest
else rest
| [] => []
/--
`compute_degree` is a tactic to solve goals of the form
* `natDegree f = d`,
* `degree f = d`,
* `natDegree f ≤ d` (or `<`),
* `degree f ≤ d` (or `<`),
* `coeff f d = r`, if `d` is the degree of `f`.
The tactic may leave goals of the form `d' = d`, `d' ≤ d`, `d' < d`, or `r ≠ 0`, where `d'` in `ℕ`
or `WithBot ℕ` is the tactic's guess of the degree, and `r` is the coefficient's guess of the
leading coefficient of `f`.
`compute_degree` applies `norm_num` to the left-hand side of all side goals, trying to close them.
The variant `compute_degree!` first applies `compute_degree`.
Then it uses `norm_num` on all the remaining goals and tries `assumption`.
-/
syntax (name := computeDegree) "compute_degree" "!"? : tactic
initialize registerTraceClass `Tactic.compute_degree
@[inherit_doc computeDegree]
macro "compute_degree!" : tactic => `(tactic| compute_degree !)
elab_rules : tactic | `(tactic| compute_degree $[!%$bang]?) => focus <| withMainContext do
let goal ← getMainGoal
let gt ← goal.getType''
let deg? := match gt.eq? with
| some (_, _, rhs) => some rhs
| _ => none
let twoH := twoHeadsArgs gt
match twoH with
| (_, .anonymous, _) => throwError m!"'compute_degree' inapplicable. \
The goal{indentD gt}\nis expected to be '≤', '<' or '='."
| (.anonymous, _, _) => throwError m!"'compute_degree' inapplicable. \
The LHS must be an application of 'natDegree', 'degree', or 'coeff'."
| _ =>
let lem := dispatchLemma twoH
trace[Tactic.compute_degree]
f!"'compute_degree' first applies lemma '{lem.lastComponentAsString}'"
let mut (gls, static) := (← goal.applyConst lem, [])
while gls != [] do (gls, static) ← splitApply gls static
let rfled ← try_rfl static
setGoals rfled
-- simplify the left-hand sides, since this is where the degree computations leave
-- expressions such as `max (0 * 1) (max (1 + 0 + 3 * 4) (7 * 0))`
evalTactic
(← `(tactic| try any_goals conv_lhs =>
(simp +decide only [Nat.cast_withBot]; norm_num)))
if bang.isSome then
let mut false_goals : Array MVarId := #[]
let mut new_goals : Array MVarId := #[]
for g in ← getGoals do
let gs' ← run g do evalTactic (←
`(tactic| try (any_goals norm_num <;> norm_cast <;> try assumption)))
new_goals := new_goals ++ gs'.toArray
if ← gs'.anyM fun g' => g'.withContext do return (← g'.getType'').isConstOf ``False then
false_goals := false_goals.push g
setGoals new_goals.toList
if let some deg := deg? then
let errors := miscomputedDegree? deg (← false_goals.mapM (MVarId.getType'' ·)).toList
unless errors.isEmpty do
throwError Lean.MessageData.joinSep
(m!"The given degree is '{deg}'. However,\n" :: errors) "\n"
/-- `monicity` tries to solve a goal of the form `Monic f`.
It converts the goal into a goal of the form `natDegree f ≤ n` and one of the form `f.coeff n = 1`
and calls `compute_degree` on those two goals.
The variant `monicity!` starts like `monicity`, but calls `compute_degree!` on the two side-goals.
-/
macro (name := monicityMacro) "monicity" : tactic =>
`(tactic| (apply monic_of_natDegree_le_of_coeff_eq_one <;> compute_degree))
@[inherit_doc monicityMacro]
macro "monicity!" : tactic =>
`(tactic| (apply monic_of_natDegree_le_of_coeff_eq_one <;> compute_degree!))
end Tactic
end Mathlib.Tactic.ComputeDegree
/-!
We register `compute_degree` with the `hint` tactic.
-/
register_hint 1000 compute_degree |
.lake/packages/mathlib/Mathlib/Tactic/StacksAttribute.lean | import Lean.Elab.Command
import Mathlib.Init
/-!
# The `stacks` and `kerodon` attributes
This allows tagging of mathlib results with the corresponding
tags from the [Stacks Project](https://stacks.math.columbia.edu/tags) and
[Kerodon](https://kerodon.net/tag/).
While the Stacks Project is the main focus, because the tag format at Kerodon is
compatible, the attribute can be used to tag results with Kerodon tags as well.
-/
open Lean Elab
namespace Mathlib.StacksTag
/-- Web database users of projects tags -/
inductive Database where
| kerodon
| stacks
deriving BEq, Hashable
/-- `Tag` is the structure that carries the data of a project tag and a corresponding
Mathlib declaration. -/
structure Tag where
/-- The name of the declaration with the given tag. -/
declName : Name
/-- The online database where the tag is found. -/
database : Database
/-- The database tag. -/
tag : String
/-- The (optional) comment that comes with the given tag. -/
comment : String
deriving BEq, Hashable
/-- Defines the `tagExt` extension for adding a `HashSet` of `Tag`s
to the environment. -/
initialize tagExt : SimplePersistentEnvExtension Tag (Std.HashSet Tag) ←
registerSimplePersistentEnvExtension {
addImportedFn := fun as => as.foldl Std.HashSet.insertMany {}
addEntryFn := .insert
}
/--
`addTagEntry declName tag comment` takes as input the `Name` `declName` of a declaration and
the `String`s `tag` and `comment` of the `stacks` attribute.
It extends the `Tag` environment extension with the data `declName, tag, comment`.
-/
def addTagEntry {m : Type → Type} [MonadEnv m]
(declName : Name) (db : Database) (tag comment : String) : m Unit :=
modifyEnv (tagExt.addEntry ·
{ declName := declName, database := db, tag := tag, comment := comment })
open Parser
/-- `stacksTag` is the node kind of Stacks Project Tags: a sequence of digits and
uppercase letters. -/
abbrev stacksTagKind : SyntaxNodeKind := `stacksTag
/-- The main parser for Stacks Project Tags: it accepts any sequence of 4 digits or
uppercase letters. -/
def stacksTagFn : ParserFn := fun c s =>
let i := s.pos
let s := takeWhileFn (fun c => c.isAlphanum) c s
if s.hasError then
s
else if s.pos == i then
ParserState.mkError s "stacks tag"
else
let tag := c.extract i s.pos
if !tag.all fun c => c.isDigit || c.isUpper then
ParserState.mkUnexpectedError s
"Stacks tags must consist only of digits and uppercase letters."
else if tag.length != 4 then
ParserState.mkUnexpectedError s "Stacks tags must be exactly 4 characters"
else
mkNodeToken stacksTagKind i true c s
@[inherit_doc stacksTagFn]
def stacksTagNoAntiquot : Parser := {
fn := stacksTagFn
info := mkAtomicInfo "stacksTag"
}
@[inherit_doc stacksTagFn]
def stacksTagParser : Parser :=
withAntiquot (mkAntiquot "stacksTag" stacksTagKind) stacksTagNoAntiquot
end Mathlib.StacksTag
open Mathlib.StacksTag
/-- Extract the underlying tag as a string from a `stacksTag` node. -/
def Lean.TSyntax.getStacksTag (stx : TSyntax stacksTagKind) : CoreM String := do
let some val := Syntax.isLit? stacksTagKind stx | throwError "Malformed Stacks tag"
return val
namespace Lean.PrettyPrinter
namespace Formatter
/-- The formatter for Stacks Project Tags syntax. -/
@[combinator_formatter stacksTagNoAntiquot] def stacksTagNoAntiquot.formatter :=
visitAtom stacksTagKind
end Formatter
namespace Parenthesizer
/-- The parenthesizer for Stacks Project Tags syntax. -/
@[combinator_parenthesizer stacksTagNoAntiquot] def stacksTagAntiquot.parenthesizer := visitToken
end Lean.PrettyPrinter.Parenthesizer
namespace Mathlib.StacksTag
/-- The syntax category for the database name. -/
declare_syntax_cat stacksTagDB
/-- The syntax for a "kerodon" database identifier in a `@[kerodon]` attribute. -/
syntax "kerodon" : stacksTagDB
/-- The syntax for a "stacks" database identifier in a `@[stacks]` attribute. -/
syntax "stacks" : stacksTagDB
/-- The `stacksTag` attribute.
Use it as `@[kerodon TAG "Optional comment"]` or `@[stacks TAG "Optional comment"]`
depending on the database you are referencing.
The `TAG` is mandatory and should be a sequence of 4 digits or uppercase letters.
See the [Tags page](https://stacks.math.columbia.edu/tags) in the Stacks project or
[Tags page](https://kerodon.net/tag/) in the Kerodon project for more details.
-/
syntax (name := stacksTag) stacksTagDB stacksTagParser (ppSpace str)? : attr
initialize Lean.registerBuiltinAttribute {
name := `stacksTag
descr := "Apply a Stacks or Kerodon project tag to a theorem."
add := fun decl stx _attrKind => do
let oldDoc := (← findDocString? (← getEnv) decl).getD ""
let (SorK, database, url, tag, comment) := ← match stx with
| `(attr| stacks $tag $[$comment]?) =>
return ("Stacks", Database.stacks, "https://stacks.math.columbia.edu/tag", tag, comment)
| `(attr| kerodon $tag $[$comment]?) =>
return ("Kerodon", Database.kerodon, "https://kerodon.net/tag", tag, comment)
| _ => throwUnsupportedSyntax
let tagStr ← tag.getStacksTag
let comment := (comment.map (·.getString)).getD ""
let commentInDoc := if comment = "" then "" else s!" ({comment})"
let newDoc := [oldDoc, s!"[{SorK} Tag {tagStr}]({url}/{tagStr}){commentInDoc}"]
addDocStringCore decl <| "\n\n".intercalate (newDoc.filter (· != ""))
addTagEntry decl database tagStr <| comment
-- docstrings are immutable once an asynchronous elaboration task has been started
applicationTime := .beforeElaboration
}
end Mathlib.StacksTag
/--
`getSortedStackProjectTags env` returns the array of `Tags`, sorted by alphabetical order of tag.
-/
private def Lean.Environment.getSortedStackProjectTags (env : Environment) : Array Tag :=
tagExt.getState env |>.toArray.qsort (·.tag < ·.tag)
/--
`getSortedStackProjectDeclNames env tag` returns the array of declaration names of results
with Stacks Project tag equal to `tag`.
-/
private def Lean.Environment.getSortedStackProjectDeclNames (env : Environment) (tag : String) :
Array Name :=
let tags := env.getSortedStackProjectTags
tags.filterMap fun d => if d.tag == tag then some d.declName else none
namespace Mathlib.StacksTag
private def databaseURL (db : Database) : String :=
match db with
| .kerodon => "https://kerodon.net/tag/"
| .stacks => "https://stacks.math.columbia.edu/tag/"
/--
`traceStacksTags db verbose` prints the tags of the database `db` to the user and
inlines the theorem statements if `verbose` is `true`.
-/
def traceStacksTags (db : Database) (verbose : Bool := false) :
Command.CommandElabM Unit := do
let env ← getEnv
let entries := env.getSortedStackProjectTags |>.filter (·.database == db)
if entries.isEmpty then logInfo "No tags found." else
let mut msgs := #[m!""]
for d in entries do
let (parL, parR) := if d.comment.isEmpty then ("", "") else (" (", ")")
let cmt := parL ++ d.comment ++ parR
msgs := msgs.push
m!"[Stacks Tag {d.tag}]({databaseURL db ++ d.tag}) \
corresponds to declaration '{.ofConstName d.declName}'.{cmt}"
if verbose then
let dType := ((env.find? d.declName).getD default).type
msgs := (msgs.push m!"{dType}").push ""
let msg := MessageData.joinSep msgs.toList "\n"
logInfo msg
/--
`#stacks_tags` retrieves all declarations that have the `stacks` attribute.
For each found declaration, it prints a line
```
'declaration_name' corresponds to tag 'declaration_tag'.
```
The variant `#stacks_tags!` also adds the theorem statement after each summary line.
-/
elab (name := stacksTags) "#stacks_tags" tk:("!")?: command =>
traceStacksTags .stacks (tk.isSome)
/-- The `#kerodon_tags` command retrieves all declarations that have the `kerodon` attribute.
For each found declaration, it prints a line
```
'declaration_name' corresponds to tag 'declaration_tag'.
```
The variant `#kerodon_tags!` also adds the theorem statement after each summary line.
-/
elab (name := kerodonTags) "#kerodon_tags" tk:("!")?: command =>
traceStacksTags .kerodon (tk.isSome)
end Mathlib.StacksTag |
.lake/packages/mathlib/Mathlib/Tactic/Contrapose.lean | import Mathlib.Tactic.Push
/-! # Contrapose
The `contrapose` tactic transforms the goal into its contrapositive when that goal is an
implication.
* `contrapose` turns a goal `P → Q` into `¬ Q → ¬ P`
* `contrapose!` turns a goal `P → Q` into `¬ Q → ¬ P` and pushes negations inside `P` and `Q`
using `push_neg`
* `contrapose h` first reverts the local assumption `h`, and then uses `contrapose` and `intro h`
* `contrapose! h` first reverts the local assumption `h`, and then uses `contrapose!` and `intro h`
* `contrapose h with new_h` uses the name `new_h` for the introduced hypothesis
-/
namespace Mathlib.Tactic.Contrapose
lemma mtr {p q : Prop} : (¬ q → ¬ p) → (p → q) := fun h hp ↦ by_contra (fun h' ↦ h h' hp)
/--
Transforms the goal into its contrapositive.
* `contrapose` turns a goal `P → Q` into `¬ Q → ¬ P`
* `contrapose h` first reverts the local assumption `h`, and then uses `contrapose` and `intro h`
* `contrapose h with new_h` uses the name `new_h` for the introduced hypothesis
-/
syntax (name := contrapose) "contrapose" (ppSpace colGt ident (" with " ident)?)? : tactic
macro_rules
| `(tactic| contrapose) => `(tactic| (refine mtr ?_))
| `(tactic| contrapose $e) => `(tactic| (revert $e:ident; contrapose; intro $e:ident))
| `(tactic| contrapose $e with $e') => `(tactic| (revert $e:ident; contrapose; intro $e':ident))
/--
Transforms the goal into its contrapositive and uses pushes negations inside `P` and `Q`.
Usage matches `contrapose`
-/
syntax (name := contrapose!) "contrapose!" (ppSpace colGt ident (" with " ident)?)? : tactic
macro_rules
| `(tactic| contrapose!) => `(tactic| (contrapose; try push_neg))
| `(tactic| contrapose! $e) => `(tactic| (revert $e:ident; contrapose!; intro $e:ident))
| `(tactic| contrapose! $e with $e') => `(tactic| (revert $e:ident; contrapose!; intro $e':ident))
end Mathlib.Tactic.Contrapose |
.lake/packages/mathlib/Mathlib/Tactic/Finiteness.lean | import Mathlib.Tactic.Positivity.Core
/-!
# Finiteness tactic
This file implements a basic `finiteness` tactic, designed to solve goals of the form `*** < ∞` and
(equivalently) `*** ≠ ∞` in the extended nonnegative reals (`ENNReal`, aka `ℝ≥0∞`).
It works recursively according to the syntax of the expression. It is implemented as an `aesop` rule
set.
## Syntax
Standard `aesop` syntax applies. Namely one can write
* `finiteness (add unfold [def1, def2])` to make `finiteness` unfold `def1`, `def2`
* `finiteness?` for the tactic to show what proof it found
* etc
* Note that `finiteness` disables `simp`, so `finiteness (add simp [lemma1, lemma2])` does not do
anything more than a bare `finiteness`.
We also provide `finiteness_nonterminal` as a version of `finiteness` that doesn't have to close the
goal.
Note to users: when tagging a lemma for finiteness, prefer tagging a lemma with `≠ ⊤`.
Aesop can deduce `< ∞` from `≠ ∞` safely (`Ne.lt_top` is a safe rule), but not conversely
(`ne_top_of_lt` is an unsafe rule): in simpler words, aesop tries to use `≠` as its intermediate
representation that things are finite, so we do so as well.
## TODO
Improve `finiteness` to also deal with other situations, such as balls in proper spaces with
a locally finite measure.
-/
open Aesop.BuiltinRules in
attribute [aesop (rule_sets := [finiteness]) safe -50] assumption intros
set_option linter.unusedTactic false in
add_aesop_rules safe tactic (rule_sets := [finiteness]) (by positivity)
/-- Tactic to solve goals of the form `*** < ∞` and (equivalently) `*** ≠ ∞` in the extended
nonnegative reals (`ℝ≥0∞`). -/
macro (name := finiteness) "finiteness" c:Aesop.tactic_clause* : tactic =>
`(tactic|
aesop $c*
(config := { introsTransparency? := some .reducible, terminal := true, enableSimp := false })
(rule_sets := [$(Lean.mkIdent `finiteness):ident, -default, -builtin]))
/-- Tactic to solve goals of the form `*** < ∞` and (equivalently) `*** ≠ ∞` in the extended
nonnegative reals (`ℝ≥0∞`). -/
macro (name := finiteness?) "finiteness?" c:Aesop.tactic_clause* : tactic =>
`(tactic|
aesop? $c*
(config := { introsTransparency? := some .reducible, terminal := true, enableSimp := false })
(rule_sets := [$(Lean.mkIdent `finiteness):ident, -default, -builtin]))
/-- Tactic to solve goals of the form `*** < ∞` and (equivalently) `*** ≠ ∞` in the extended
nonnegative reals (`ℝ≥0∞`). -/
macro (name := finiteness_nonterminal) "finiteness_nonterminal" c:Aesop.tactic_clause* : tactic =>
`(tactic|
aesop $c*
(config := { introsTransparency? := some .reducible, terminal := false, enableSimp := false,
warnOnNonterminal := false })
(rule_sets := [$(Lean.mkIdent `finiteness):ident, -default, -builtin]))
/-!
We register `finiteness` with the `hint` tactic.
-/
register_hint 1000 finiteness |
.lake/packages/mathlib/Mathlib/Tactic/MinImports.lean | import Lean.Elab.DefView
import Lean.Util.CollectAxioms
import ImportGraph.Imports
import ImportGraph.RequiredModules
-- Import this linter explicitly to ensure that
-- this file has a valid copyright header and module docstring.
import Mathlib.Tactic.Linter.Header
/-! # `#min_imports in` a command to find minimal imports
`#min_imports in stx` scans the syntax `stx` to find a collection of minimal imports that should be
sufficient for `stx` to make sense.
If `stx` is a command, then it also elaborates `stx` and, in case it is a declaration, then
it also finds the imports implied by the declaration.
Unlike the related `#find_home`, this command takes into account notation and tactic information.
## Limitations
Parsing of `attribute`s is hard and the command makes minimal effort to support them.
Here is an example where the command fails to notice a dependency:
```lean
import Mathlib.Data.Sym.Sym2.Init -- the actual minimal import
import Aesop.Frontend.Attribute -- the import that `#min_imports in` suggests
import Mathlib.Tactic.MinImports
-- import Aesop.Frontend.Attribute
#min_imports in
@[aesop (rule_sets := [Sym2]) [safe [constructors, cases], norm]]
inductive Rel (α : Type) : α × α → α × α → Prop
| refl (x y : α) : Rel _ (x, y) (x, y)
| swap (x y : α) : Rel _ (x, y) (y, x)
-- `import Mathlib.Data.Sym.Sym2.Init` is not detected by `#min_imports in`.
```
## Todo
*Examples*
When parsing an `example`, `#min_imports in` retrieves all the information that it can from the
`Syntax` of the `example`, but, since the `example` is not added to the environment, it fails
to retrieve any `Expr` information about the proof term.
It would be desirable to make `#min_imports in example ...` inspect the resulting proof and
report imports, but this feature is missing for the moment.
*Using `InfoTrees`*
It may be more efficient (not necessarily in terms of speed, but of simplicity of code),
to inspect the `InfoTrees` for each command and retrieve information from there.
I have not looked into this yet.
-/
open Lean Elab Command
namespace Mathlib.Command.MinImports
/-- `getSyntaxNodeKinds stx` takes a `Syntax` input `stx` and returns the `NameSet` of all the
`SyntaxNodeKinds` and all the identifiers contained in `stx`. -/
partial
def getSyntaxNodeKinds : Syntax → NameSet
| .node _ kind args =>
((args.map getSyntaxNodeKinds).foldl (NameSet.append · ·) {}).insert kind
| .ident _ _ nm _ => NameSet.empty.insert nm
| _ => {}
/-- Extracts the names of the declarations in `env` on which `decl` depends. -/
def getVisited (decl : Name) : CommandElabM NameSet := do
unless (← hasConst decl) do
return {}
-- without resetting the state, the "unused tactics" linter gets confused?
let st ← get
liftCoreM decl.transitivelyUsedConstants <* set st
/-- `getId stx` takes as input a `Syntax` `stx`.
If `stx` contains a `declId`, then it returns the `ident`-syntax for the `declId`.
If `stx` is a nameless instance, then it also tries to fetch the `ident` for the instance.
Otherwise it returns `.missing`. -/
def getId (stx : Syntax) : CommandElabM Syntax := do
-- If the command contains a `declId`, we use the implied `ident`.
match stx.find? (·.isOfKind ``Lean.Parser.Command.declId) with
| some declId => return declId[0]
| none =>
-- Otherwise, the command could be a nameless `instance`.
match stx.find? (·.isOfKind ``Lean.Parser.Command.instance) with
| none => return .missing
| some stx => do
-- if it is a nameless `instance`, we retrieve the autogenerated name
let dv ← mkDefViewOfInstance {} stx
return dv.declId[0]
/-- `getIds stx` extracts all identifiers, collecting them in a `NameSet`. -/
partial
def getIds : Syntax → NameSet
| .node _ _ args => (args.map getIds).foldl (·.append ·) {}
| .ident _ _ nm _ => NameSet.empty.insert nm
| _ => {}
/-- `getAttrNames stx` extracts `attribute`s from `stx`.
It does not collect `simp`, `ext`, `to_additive`.
It should collect almost all other attributes, e.g., `fun_prop`. -/
def getAttrNames (stx : Syntax) : NameSet :=
match stx.find? (·.isOfKind ``Lean.Parser.Term.attributes) with
| none => {}
| some stx => getIds stx
/-- `getAttrs env stx` returns all attribute declaration names contained in `stx` and registered
in the `Environment `env`. -/
def getAttrs (env : Environment) (stx : Syntax) : NameSet :=
Id.run do
let mut new : NameSet := {}
for attr in (getAttrNames stx) do
match getAttributeImpl env attr with
| .ok attr => new := new.insert attr.ref
| .error .. => pure ()
return new
/-- `previousInstName nm` takes as input a name `nm`, assuming that it is the name of an
auto-generated "nameless" `instance`.
If `nm` ends in `..._n`, where `n` is a number, it returns the same name, but with `_n` replaced
by `_(n-1)`, unless `n ≤ 1`, in which case it simply removes the `_n` suffix.
-/
def previousInstName : Name → Name
| nm@(.str init tail) =>
let last := tail.takeRightWhile (· != '_')
let newTail := match last.toNat? with
| some (n + 2) => s!"_{n + 1}"
| _ => ""
let newTailPrefix := tail.dropRightWhile (· != '_')
if newTailPrefix.isEmpty then nm else
let newTail :=
(if newTailPrefix.back == '_' then newTailPrefix.dropRight 1 else newTailPrefix) ++ newTail
.str init newTail
| nm => nm
/--
`getDeclName cmd id` takes a `Syntax` input `cmd` and returns the `Name` of the declaration defined
by `cmd`.
-/
def getDeclName (cmd : Syntax) : CommandElabM Name := do
let ns ← getCurrNamespace
let id1 ← getId cmd
let id2 := mkIdentFrom id1 (previousInstName id1.getId)
let some declStx := cmd.find? (·.isOfKind ``Parser.Command.declaration) | pure default
let some modifiersStx := declStx.find? (·.isOfKind ``Parser.Command.declModifiers) | pure default
let modifiers : TSyntax ``Parser.Command.declModifiers := ⟨modifiersStx⟩
-- the `get`/`set` state catches issues with elaboration of, for instance, `scoped` attributes
let s ← get
let modifiers ← elabModifiers modifiers
set s
liftCoreM do (
-- Try applying the algorithm in `Lean.mkDeclName` to attach a namespace to the name.
-- Unfortunately calling `Lean.mkDeclName` directly won't work: it will complain that there is
-- already a declaration with this name.
(do
let shortName := id1.getId
let view := extractMacroScopes shortName
let name := view.name
let isRootName := (`_root_).isPrefixOf name
let mut fullName := if isRootName then
{ view with name := name.replacePrefix `_root_ Name.anonymous }.review
else
ns ++ shortName
-- Apply name visibility rules: private names get mangled.
match modifiers.visibility with
| .private => return mkPrivateName (← getEnv) fullName
| _ => return fullName) <|>
-- try the visible name or the current "nameless" `instance` name
realizeGlobalConstNoOverload id1 <|>
-- otherwise, guess what the previous "nameless" `instance` name was
realizeGlobalConstNoOverload id2 <|>
-- failing everything, use the current namespace followed by the visible name
return ns ++ id1.getId)
/-- `getAllDependencies cmd id` takes a `Syntax` input `cmd` and returns the `NameSet` of all the
declaration names that are implied by
* the `SyntaxNodeKinds`,
* the attributes of `cmd` (if there are any),
* the identifiers contained in `cmd`,
* if `cmd` adds a declaration `d` to the environment, then also all the module names implied by `d`.
The argument `id` is expected to be an identifier.
It is used either for the internally generated name of a "nameless" `instance` or when parsing
an identifier representing the name of a declaration.
Note that the return value does not contain dependencies of the dependencies;
you can use `Lean.NameSet.transitivelyUsedConstants` to get those.
-/
def getAllDependencies (cmd id : Syntax) :
CommandElabM NameSet := do
let env ← getEnv
let nm ← getDeclName cmd
-- We collect the implied declaration names, the `SyntaxNodeKinds` and the attributes.
return (← getVisited nm)
|>.append (← getVisited id.getId)
|>.append (getSyntaxNodeKinds cmd)
|>.append (getAttrs env cmd)
/-- `getAllImports cmd id` takes a `Syntax` input `cmd` and returns the `NameSet` of all the
module names that are implied by
* the `SyntaxNodeKinds`,
* the attributes of `cmd` (if there are any),
* the identifiers contained in `cmd`,
* if `cmd` adds a declaration `d` to the environment, then also all the module names implied by `d`.
The argument `id` is expected to be an identifier.
It is used either for the internally generated name of a "nameless" `instance` or when parsing
an identifier representing the name of a declaration.
-/
def getAllImports (cmd id : Syntax) (dbg? : Bool := false) :
CommandElabM NameSet := do
let env ← getEnv
-- We collect the implied declaration names, the `SyntaxNodeKinds` and the attributes.
let ts ← getAllDependencies cmd id
if dbg? then dbg_trace "{ts.toArray.qsort Name.lt}"
let mut hm : Std.HashMap Nat Name := {}
for imp in env.header.moduleNames do
hm := hm.insert ((env.getModuleIdx? imp).getD default) imp
let mut fins : NameSet := {}
for t in ts do
let new := match env.getModuleIdxFor? t with
| some t => (hm.get? t).get!
| none => .anonymous -- instead of `getMainModule`, we omit the current module
if !fins.contains new then fins := fins.insert new
return fins.erase .anonymous
/-- `getIrredundantImports env importNames` takes an `Environment` and a `NameSet` as inputs.
Assuming that `importNames` are module names,
it returns the `NameSet` consisting of a minimal collection of module names whose transitive
closure is enough to parse (and elaborate) `cmd`. -/
def getIrredundantImports (env : Environment) (importNames : NameSet) : NameSet :=
importNames \ (env.findRedundantImports importNames.toArray)
/-- `minImpsCore stx id` is the internal function to elaborate the `#min_imports in` command.
It collects the irredundant imports to parse and elaborate `stx` and logs
```lean
import A
import B
...
import Z
```
The `id` input is expected to be the name of the declaration that is currently processed.
It is used to provide the internally generated name for "nameless" `instance`s.
-/
def minImpsCore (stx id : Syntax) : CommandElabM Unit := do
let tot := getIrredundantImports (← getEnv) (← getAllImports stx id)
let fileNames := tot.toArray.qsort Name.lt
logInfoAt (← getRef) m!"{"\n".intercalate (fileNames.map (s!"import {·}")).toList}"
/-- `#min_imports in cmd` scans the syntax `cmd` and the declaration obtained by elaborating `cmd`
to find a collection of minimal imports that should be sufficient for `cmd` to work. -/
syntax (name := minImpsStx) "#min_imports" " in " command : command
@[inherit_doc minImpsStx]
syntax "#min_imports" " in " term : command
elab_rules : command
| `(#min_imports in $cmd:command) => do
-- In case `cmd` is a "nameless" `instance`, we produce its name.
-- It is important that this is collected *before* adding the declaration to the environment,
-- since `getId` probes the name-generator using the current environment: if the declaration
-- were already present, `getId` would return a new name that does not clash with it!
let id ← getId cmd
Elab.Command.elabCommand cmd <|> pure ()
minImpsCore cmd id
| `(#min_imports in $cmd:term) => minImpsCore cmd cmd
end Mathlib.Command.MinImports |
.lake/packages/mathlib/Mathlib/Tactic/LinearCombination.lean | import Mathlib.Tactic.LinearCombination.Lemmas
import Mathlib.Tactic.Positivity.Core
import Mathlib.Tactic.Ring
import Mathlib.Tactic.Ring.Compare
/-!
# linear_combination Tactic
In this file, the `linear_combination` tactic is created. This tactic, which
works over `CommRing`s, attempts to simplify the target by creating a linear combination
of a list of equalities and subtracting it from the target. A `Syntax.Tactic`
object can also be passed into the tactic, allowing the user to specify a
normalization tactic.
Over ordered algebraic objects (such as `LinearOrderedCommRing`), taking linear combinations of
inequalities is also supported.
## Implementation Notes
This tactic works by creating a weighted sum of the given equations with the
given coefficients. Then, it subtracts the right side of the weighted sum
from the left side so that the right side equals 0, and it does the same with
the target. Afterwards, it sets the goal to be the equality between the
left-hand side of the new goal and the left-hand side of the new weighted sum.
Lastly, calls a normalization tactic on this target.
## References
* <https://leanprover.zulipchat.com/#narrow/stream/239415-metaprogramming-.2F.20tactics/topic/Linear.20algebra.20tactic/near/213928196>
-/
namespace Mathlib.Tactic.LinearCombination
open Lean
open Elab Meta Term Ineq
/-- Result of `expandLinearCombo`, either an equality/inequality proof or a value. -/
inductive Expanded
/-- A proof of `a = b`, `a ≤ b`, or `a < b` (according to the value of `Ineq`). -/
| proof (rel : Ineq) (pf : Syntax.Term)
/-- A value, equivalently a proof of `c = c`. -/
| const (c : Syntax.Term)
/-- The handling in `linear_combination` of left- and right-multiplication and scalar-multiplication
and of division all five proceed according to the same logic, specified here: given a proof `p` of
an (in)equality and a constant `c`,
* if `p` is a proof of an equation, multiply/divide through by `c`;
* if `p` is a proof of a non-strict inequality, run `positivity` to find a proof that `c` is
nonnegative, then multiply/divide through by `c`, invoking the nonnegativity of `c` where needed;
* if `p` is a proof of a strict inequality, run `positivity` to find a proof that `c` is positive
(if possible) or nonnegative (if not), then multiply/divide through by `c`, invoking the
positivity or nonnegativity of `c` where needed.
This generic logic takes as a parameter the object `lems`: the four lemmas corresponding to the four
cases. -/
def rescale (lems : Ineq.WithStrictness → Name) (ty : Option Expr) (p c : Term) :
Ineq → TermElabM Expanded
| eq => do
let i := mkIdent <| lems .eq
.proof eq <$> ``($i $p $c)
| le => do
let i := mkIdent <| lems .le
let e₂ ← withSynthesizeLight <| Term.elabTerm c ty
let hc₂ ← Meta.Positivity.proveNonneg e₂
.proof le <$> ``($i $p $(← hc₂.toSyntax))
| lt => do
let e₂ ← withSynthesizeLight <| Term.elabTerm c ty
let (strict, hc₂) ← Meta.Positivity.bestResult e₂
let i := mkIdent <| lems (.lt strict)
let p' : Term ← ``($i $p $(← hc₂.toSyntax))
if strict then pure (.proof lt p') else pure (.proof le p')
/--
Performs macro expansion of a linear combination expression,
using `+`/`-`/`*`/`/` on equations and values.
* `.proof eq p` means that `p` is a syntax corresponding to a proof of an equation.
For example, if `h : a = b` then `expandLinearCombo (2 * h)` returns `.proof (c_add_pf 2 h)`
which is a proof of `2 * a = 2 * b`.
Similarly, `.proof le p` means that `p` is a syntax corresponding to a proof of a non-strict
inequality, and `.proof lt p` means that `p` is a syntax corresponding to a proof of a strict
inequality.
* `.const c` means that the input expression is not an equation but a value.
-/
partial def expandLinearCombo (ty : Option Expr) (stx : Syntax.Term) :
TermElabM Expanded := withRef stx do
match stx with
| `(($e)) => expandLinearCombo ty e
| `($e₁ + $e₂) => do
match ← expandLinearCombo ty e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ + $c₂)
| .proof rel₁ p₁, .proof rel₂ p₂ =>
let i := mkIdent <| Ineq.addRelRelData rel₁ rel₂
.proof (max rel₁ rel₂) <$> ``($i $p₁ $p₂)
| .proof rel p, .const c | .const c, .proof rel p =>
logWarningAt c "this constant has no effect on the linear combination; it can be dropped \
from the term"
pure (.proof rel p)
| `($e₁ - $e₂) => do
match ← expandLinearCombo ty e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ - $c₂)
| .proof rel p, .const c =>
logWarningAt c "this constant has no effect on the linear combination; it can be dropped \
from the term"
pure (.proof rel p)
| .const c, .proof eq p =>
logWarningAt c "this constant has no effect on the linear combination; it can be dropped \
from the term"
.proof eq <$> ``(Eq.symm $p)
| .proof rel₁ p₁, .proof eq p₂ =>
let i := mkIdent <| Ineq.addRelRelData rel₁ eq
.proof rel₁ <$> ``($i $p₁ (Eq.symm $p₂))
| _, .proof _ _ =>
throwError "coefficients of inequalities in 'linear_combination' must be nonnegative"
| `(-$e) => do
match ← expandLinearCombo ty e with
| .const c => .const <$> `(-$c)
| .proof eq p => .proof eq <$> ``(Eq.symm $p)
| .proof _ _ =>
throwError "coefficients of inequalities in 'linear_combination' must be nonnegative"
| `($e₁ *%$tk $e₂) => do
match ← expandLinearCombo ty e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ * $c₂)
| .proof rel₁ p₁, .const c₂ => rescale mulRelConstData ty p₁ c₂ rel₁
| .const c₁, .proof rel₂ p₂ => rescale mulConstRelData ty p₂ c₁ rel₂
| .proof _ _, .proof _ _ =>
throwErrorAt tk "'linear_combination' supports only linear operations"
| `($e₁ •%$tk $e₂) => do
match ← expandLinearCombo none e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ • $c₂)
| .proof rel₁ p₁, .const c₂ => rescale smulRelConstData ty p₁ c₂ rel₁
| .const c₁, .proof rel₂ p₂ => rescale smulConstRelData none p₂ c₁ rel₂
| .proof _ _, .proof _ _ =>
throwErrorAt tk "'linear_combination' supports only linear operations"
| `($e₁ /%$tk $e₂) => do
match ← expandLinearCombo ty e₁, ← expandLinearCombo ty e₂ with
| .const c₁, .const c₂ => .const <$> ``($c₁ / $c₂)
| .proof rel₁ p₁, .const c₂ => rescale divRelConstData ty p₁ c₂ rel₁
| _, .proof _ _ => throwErrorAt tk "'linear_combination' supports only linear operations"
| e =>
-- We have the expected type from the goal, so we can fully synthesize this leaf node.
withSynthesize do
-- It is OK to use `ty` as the expected type even if `e` is a proof.
-- The expected type is just a hint.
let c ← withSynthesizeLight <| Term.elabTerm e ty
match ← try? (← inferType c).ineq? with
| some (rel, _) => .proof rel <$> c.toSyntax
| none => .const <$> c.toSyntax
/-- Implementation of `linear_combination`. -/
def elabLinearCombination (tk : Syntax)
(norm? : Option Syntax.Tactic) (exp? : Option Syntax.NumLit) (input : Option Syntax.Term) :
Tactic.TacticM Unit := Tactic.withMainContext <| Tactic.focus do
let eType ← withReducible <| (← Tactic.getMainGoal).getType'
let (goalRel, ty, _) ← eType.ineq?
-- build the specified linear combination of the hypotheses
let (hypRel, p) ← match input with
| none => Prod.mk eq <$> `(Eq.refl 0)
| some e =>
match ← expandLinearCombo ty e with
| .const c =>
logWarningAt c "this constant has no effect on the linear combination; it can be dropped \
from the term"
Prod.mk eq <$> `(Eq.refl 0)
| .proof hypRel p => pure (hypRel, p)
-- look up the lemma for the central `refine` in `linear_combination`
let (reduceLem, newGoalRel) : Name × Ineq := ← do
match Ineq.relImpRelData hypRel goalRel with
| none => throwError "cannot prove an equality from inequality hypotheses"
| some n => pure n
-- build the term for the central `refine` in `linear_combination`
let p' ← do
match exp? with
| some n =>
if n.getNat = 1 then
`($(mkIdent reduceLem) $p ?a)
else
match hypRel with
| eq => `(eq_of_add_pow $n $p ?a)
| _ => throwError
"linear_combination tactic not implemented for exponentiation of inequality goals"
| _ => `($(mkIdent reduceLem) $p ?a)
-- run the central `refine` in `linear_combination`
Term.withoutErrToSorry <| Tactic.refineCore p' `refine false
-- if we are in a "true" ring, with well-behaved negation, we rearrange from the form
-- `[stuff] = [stuff]` (or `≤` or `<`) to the form `[stuff] = 0` (or `≤` or `<`), because this
-- gives more useful error messages on failure
let _ ← Tactic.tryTactic <| Tactic.liftMetaTactic fun g ↦ g.applyConst newGoalRel.rearrangeData
match norm? with
-- now run the normalization tactic provided
| some norm => Tactic.evalTactic norm
-- or the default normalization tactic if none is provided
| none => withRef tk <| Tactic.liftMetaFinishingTactic <|
match newGoalRel with
-- for an equality task the default normalization tactic is (the internals of) `ring1` (but we
-- use `.instances` transparency, which is arguably more robust in algebraic settings than the
-- choice `.reducible` made in `ring1`)
| eq => fun g ↦ AtomM.run .instances <| Ring.proveEq g
| le => Ring.proveLE
| lt => Ring.proveLT
/--
The `(norm := $tac)` syntax says to use `tac` as a normalization postprocessor for
`linear_combination`. The default normalizer is `ring1`, but you can override it with `ring_nf`
to get subgoals from `linear_combination` or with `skip` to disable normalization.
-/
syntax normStx := atomic(" (" &"norm" " := ") withoutPosition(tactic) ")"
/--
The `(exp := n)` syntax for `linear_combination` says to take the goal to the `n`th power before
subtracting the given combination of hypotheses.
-/
syntax expStx := atomic(" (" &"exp" " := ") withoutPosition(num) ")"
/--
The `linear_combination` tactic attempts to prove an (in)equality goal by exhibiting it as a
specified linear combination of (in)equality hypotheses, or other (in)equality proof terms, modulo
(A) moving terms between the LHS and RHS of the (in)equalities, and (B) a normalization tactic
which by default is ring-normalization.
Example usage:
```
example {a b : ℚ} (h1 : a = 1) (h2 : b = 3) : (a + b) / 2 = 2 := by
linear_combination (h1 + h2) / 2
example {a b : ℚ} (h1 : a ≤ 1) (h2 : b ≤ 3) : (a + b) / 2 ≤ 2 := by
linear_combination (h1 + h2) / 2
example {a b : ℚ} : 2 * a * b ≤ a ^ 2 + b ^ 2 := by
linear_combination sq_nonneg (a - b)
example {x y z w : ℤ} (h₁ : x * z = y ^ 2) (h₂ : y * w = z ^ 2) :
z * (x * w - y * z) = 0 := by
linear_combination w * h₁ + y * h₂
example {x : ℚ} (h : x ≥ 5) : x ^ 2 > 2 * x + 11 := by
linear_combination (x + 3) * h
example {R : Type*} [CommRing R] {a b : R} (h : a = b) : a ^ 2 = b ^ 2 := by
linear_combination (a + b) * h
example {A : Type*} [AddCommGroup A]
{x y z : A} (h1 : x + y = 10 • z) (h2 : x - y = 6 • z) :
2 • x = 2 • (8 • z) := by
linear_combination (norm := abel) h1 + h2
example (x y : ℤ) (h1 : x * y + 2 * x = 1) (h2 : x = y) :
x * y = -2 * y + 1 := by
linear_combination (norm := ring_nf) -2 * h2
-- leaves goal `⊢ x * y + x * 2 - 1 = 0`
```
The input `e` in `linear_combination e` is a linear combination of proofs of (in)equalities,
given as a sum/difference of coefficients multiplied by expressions.
The coefficients may be arbitrary expressions (with nonnegativity constraints in the case of
inequalities).
The expressions can be arbitrary proof terms proving (in)equalities;
most commonly they are hypothesis names `h1`, `h2`, ....
The left and right sides of all the (in)equalities should have the same type `α`, and the
coefficients should also have type `α`. For full functionality `α` should be a commutative ring --
strictly speaking, a commutative semiring with "cancellative" addition (in the semiring case,
negation and subtraction will be handled "formally" as if operating in the enveloping ring). If a
nonstandard normalization is used (for example `abel` or `skip`), the tactic will work over types
`α` with less algebraic structure: for equalities, the minimum is instances of
`[Add α] [IsRightCancelAdd α]` together with instances of whatever operations are used in the tactic
call.
The variant `linear_combination (norm := tac) e` specifies explicitly the "normalization tactic"
`tac` to be run on the subgoal(s) after constructing the linear combination.
* The default normalization tactic is `ring1` (for equalities) or `Mathlib.Tactic.Ring.prove{LE,LT}`
(for inequalities). These are finishing tactics: they close the goal or fail.
* When working in algebraic categories other than commutative rings -- for example fields, abelian
groups, modules -- it is sometimes useful to use normalization tactics adapted to those categories
(`field_simp`, `abel`, `module`).
* To skip normalization entirely, use `skip` as the normalization tactic.
* The `linear_combination` tactic creates a linear combination by adding the provided (in)equalities
together from left to right, so if `tac` is not invariant under commutation of additive
expressions, then the order of the input hypotheses can matter.
The variant `linear_combination (exp := n) e` will take the goal to the `n`th power before
subtracting the combination `e`. In other words, if the goal is `t1 = t2`,
`linear_combination (exp := n) e` will change the goal to `(t1 - t2)^n = 0` before proceeding as
above. This variant is implemented only for linear combinations of equalities (i.e., not for
inequalities).
-/
syntax (name := linearCombination) "linear_combination"
(normStx)? (expStx)? (ppSpace colGt term)? : tactic
elab_rules : tactic
| `(tactic| linear_combination%$tk $[(norm := $tac)]? $[(exp := $n)]? $(e)?) =>
elabLinearCombination tk tac n e
end Mathlib.Tactic.LinearCombination |
.lake/packages/mathlib/Mathlib/Tactic/Use.lean | import Mathlib.Tactic.WithoutCDot
import Lean.Meta.Tactic.Util
import Lean.Elab.Tactic.Basic
/-!
# The `use` tactic
The `use` and `use!` tactics are for instantiating one-constructor inductive types
just like the `exists` tactic, but they can be a little more flexible.
`use` is the more restrained version for mathlib4, and `use!` is the exuberant version
that more closely matches `use` from mathlib3.
Note: The `use!` tactic is almost exactly the mathlib3 `use` except that it does not try
applying `exists_prop`. See the failing test in `MathlibTest/Use.lean`.
-/
namespace Mathlib.Tactic
open Lean Meta Elab Tactic
initialize registerTraceClass `tactic.use
/--
When the goal `mvarId` is an inductive datatype with a single constructor,
this applies that constructor, then returns metavariables for the non-parameter explicit arguments
along with metavariables for the parameters and implicit arguments.
The first list of returned metavariables correspond to the arguments that `⟨x,y,...⟩` notation uses.
The second list corresponds to everything else: the parameters and implicit arguments.
The third list consists of those implicit arguments that are instance implicits, which one can
try to synthesize. The third list is a sublist of the second list.
Returns metavariables for all arguments whether or not the metavariables are assigned.
-/
def applyTheConstructor (mvarId : MVarId) :
MetaM (List MVarId × List MVarId × List MVarId) := do
mvarId.withContext do
mvarId.checkNotAssigned `constructor
let target ← mvarId.getType'
matchConstInduct target.getAppFn
(fun _ => throwTacticEx `constructor mvarId
m!"target is not an inductive datatype{indentExpr target}")
fun ival us => do
match ival.ctors with
| [ctor] =>
let cinfo ← getConstInfoCtor ctor
let ctorConst := Lean.mkConst ctor us
let (args, binderInfos, _) ← forallMetaTelescopeReducing (← inferType ctorConst)
let mut explicit := #[]
let mut implicit := #[]
let mut insts := #[]
for arg in args, binderInfo in binderInfos, i in [0:args.size] do
if cinfo.numParams ≤ i ∧ binderInfo.isExplicit then
explicit := explicit.push arg.mvarId!
else
implicit := implicit.push arg.mvarId!
if binderInfo.isInstImplicit then
insts := insts.push arg.mvarId!
let e := mkAppN ctorConst args
let eType ← inferType e
unless (← withAssignableSyntheticOpaque <| isDefEq eType target) do
throwError m!"type mismatch{indentExpr e}\n{← mkHasTypeButIsExpectedMsg eType target}"
mvarId.assign e
return (explicit.toList, implicit.toList, insts.toList)
| _ => throwTacticEx `constructor mvarId
m!"target inductive type does not have exactly one constructor{indentExpr target}"
/-- Use the `args` to refine the goals `gs` in order, but whenever there is a single
goal remaining then first try applying a single constructor if it's for a single-constructor
inductive type. In `eager` mode, instead we always first try to refine, and if that fails we
always try to apply such a constructor no matter if it's the last goal.
Returns the remaining explicit goals `gs`, any goals `acc` due to `refine`, and a sublist of these
of instance arguments that we should try synthesizing after the loop.
The new set of goals should be `gs ++ acc`. -/
partial
def useLoop (eager : Bool) (gs : List MVarId) (args : List Term) (acc insts : List MVarId) :
TermElabM (List MVarId × List MVarId × List MVarId) := do
trace[tactic.use] "gs = {gs}\nargs = {args}\nacc = {acc}"
match gs, args with
| gs, [] =>
return (gs, acc, insts)
| [], arg :: _ =>
throwErrorAt arg "too many arguments supplied to `use`"
| g :: gs', arg :: args' => g.withContext do
if ← g.isAssigned then
-- Goals might become assigned in inductive types with indices.
-- Let's check that what's supplied is defeq to what's already there.
let e ← Term.elabTermEnsuringType arg (← g.getType)
unless ← isDefEq e (.mvar g) do
throwErrorAt arg
"argument is not definitionally equal to inferred value{indentExpr (.mvar g)}"
return ← useLoop eager gs' args' acc insts
-- Type ascription is a workaround for `refine` ensuring the type after synthesizing mvars.
let refineArg ← `(tactic| refine ($arg : $(← Term.exprToSyntax (← g.getType))))
if eager then
-- In eager mode, first try refining with the argument before applying the constructor
if let some newGoals ← observing? (run g do withoutRecover <| evalTactic refineArg) then
return ← useLoop eager gs' args' (acc ++ newGoals) insts
if eager || gs'.isEmpty then
if let some (expl, impl, insts') ← observing? do
try applyTheConstructor g
catch e => trace[tactic.use] "Constructor. {e.toMessageData}"; throw e then
trace[tactic.use] "expl.length = {expl.length}, impl.length = {impl.length}"
return ← useLoop eager (expl ++ gs') args (acc ++ impl) (insts ++ insts')
-- In eager mode, the following will give an error, which hopefully is more informative than
-- the one provided by `applyTheConstructor`.
let newGoals ← run g do evalTactic refineArg
useLoop eager gs' args' (acc ++ newGoals) insts
/-- Run the `useLoop` on the main goal then discharge remaining explicit `Prop` arguments. -/
def runUse (eager : Bool) (discharger : TacticM Unit) (args : List Term) : TacticM Unit := do
let egoals ← focus do
let (egoals, acc, insts) ← useLoop eager (← getGoals) args [] []
-- Try synthesizing instance arguments
for inst in insts do
if !(← inst.isAssigned) then
discard <| inst.withContext <| observing? do inst.assign (← synthInstance (← inst.getType))
-- Set the goals.
setGoals (egoals ++ acc)
pruneSolvedGoals
pure egoals
-- Run the discharger on non-assigned proposition metavariables
-- (`trivial` uses `assumption`, which isn't great for non-propositions)
for g in egoals do
if !(← g.isAssigned) then
g.withContext do
if ← isProp (← g.getType) then
trace[tactic.use] "running discharger on {g}"
discard <| run g discharger
/-- Default discharger to try to use for the `use` and `use!` tactics.
This is similar to the `trivial` tactic but doesn't do things like `contradiction` or `decide`. -/
syntax "use_discharger" : tactic
macro_rules | `(tactic| use_discharger) => `(tactic| apply exists_prop.mpr <;> use_discharger)
macro_rules | `(tactic| use_discharger) => `(tactic| apply And.intro <;> use_discharger)
macro_rules | `(tactic| use_discharger) => `(tactic| rfl)
macro_rules | `(tactic| use_discharger) => `(tactic| assumption)
macro_rules | `(tactic| use_discharger) => `(tactic| apply True.intro)
/-- Returns a `TacticM Unit` that either runs the tactic sequence from `discharger?` if it's
non-`none`, or it does `try with_reducible use_discharger`. -/
def mkUseDischarger (discharger? : Option (TSyntax ``Parser.Tactic.discharger)) :
TacticM (TacticM Unit) := do
let discharger ←
if let some disch := discharger? then
match disch with
| `(Parser.Tactic.discharger| ($_ := $d)) => `(tactic| ($d))
| _ => throwUnsupportedSyntax
else
`(tactic| try with_reducible use_discharger)
return evalTactic discharger
/--
`use e₁, e₂, ⋯` is similar to `exists`, but unlike `exists` it is equivalent to applying the tactic
`refine ⟨e₁, e₂, ⋯, ?_, ⋯, ?_⟩` with any number of placeholders (rather than just one) and
then trying to close goals associated to the placeholders with a configurable discharger (rather
than just `try trivial`).
Examples:
```lean
example : ∃ x : Nat, x = x := by use 42
example : ∃ x : Nat, ∃ y : Nat, x = y := by use 42, 42
example : ∃ x : String × String, x.1 = x.2 := by use ("forty-two", "forty-two")
```
`use! e₁, e₂, ⋯` is similar but it applies constructors everywhere rather than just for
goals that correspond to the last argument of a constructor. This gives the effect that
nested constructors are being flattened out, with the supplied values being used along the
leaves and nodes of the tree of constructors.
With `use!` one can feed in each `42` one at a time:
```lean
example : ∃ p : Nat × Nat, p.1 = p.2 := by use! 42, 42
example : ∃ p : Nat × Nat, p.1 = p.2 := by use! (42, 42)
```
The second line makes use of the fact that `use!` tries refining with the argument before
applying a constructor. Also note that `use`/`use!` by default uses a tactic
called `use_discharger` to discharge goals, so `use! 42` will close the goal in this example since
`use_discharger` applies `rfl`, which as a consequence solves for the other `Nat` metavariable.
These tactics take an optional discharger to handle remaining explicit `Prop` constructor arguments.
By default it is `use (discharger := try with_reducible use_discharger) e₁, e₂, ⋯`.
To turn off the discharger and keep all goals, use `(discharger := skip)`.
To allow "heavy refls", use `(discharger := try use_discharger)`.
-/
elab (name := useSyntax)
"use" discharger?:(Parser.Tactic.discharger)? ppSpace args:term,+ : tactic => do
runUse false (← mkUseDischarger discharger?) args.getElems.toList
@[inherit_doc useSyntax]
elab "use!" discharger?:(Parser.Tactic.discharger)? ppSpace args:term,+ : tactic => do
runUse true (← mkUseDischarger discharger?) args.getElems.toList
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Variable.lean | import Mathlib.Init
import Lean.Meta.Tactic.TryThis
/-!
# The `variable?` command
This defines a command like `variable` that automatically adds all missing typeclass
arguments. For example, `variable? [Module R M]` is the same as
`variable [Semiring R] [AddCommMonoid M] [Module R M]`, though if any of these three instance
arguments can be inferred from previous variables then they will be omitted.
An inherent limitation with this command is that variables are recorded in the scope as
*syntax*. This means that `variable?` needs to pretty print the expressions we get
from typeclass synthesis errors, and these might fail to round trip.
-/
namespace Mathlib.Command.Variable
open Lean Elab Command Parser.Term Meta
initialize registerTraceClass `variable?
register_option variable?.maxSteps : Nat :=
{ defValue := 15
group := "variable?"
descr :=
"The maximum number of instance arguments `variable?` will try to insert before giving up" }
register_option variable?.checkRedundant : Bool :=
{ defValue := true
group := "variable?"
descr := "Warn if instance arguments can be inferred from preceding ones" }
/-- Get the type out of a bracketed binder. -/
def bracketedBinderType : Syntax → Option Term
| `(bracketedBinderF|($_* $[: $ty?]? $(_annot?)?)) => ty?
| `(bracketedBinderF|{$_* $[: $ty?]?}) => ty?
| `(bracketedBinderF|⦃$_* $[: $ty?]?⦄) => ty?
| `(bracketedBinderF|[$[$_ :]? $ty]) => some ty
| _ => none
/-- The `variable?` command has the same syntax as `variable`, but it will auto-insert
missing instance arguments wherever they are needed.
It does not add variables that can already be deduced from others in the current context.
By default the command checks that variables aren't implied by earlier ones, but it does *not*
check that earlier variables aren't implied by later ones.
Unlike `variable`, the `variable?` command does not support changing variable binder types.
The `variable?` command will give a suggestion to replace itself with a command of the form
`variable? ...binders... => ...binders...`. The binders after the `=>` are the completed
list of binders. When this `=>` clause is present, the command verifies that the expanded
binders match the post-`=>` binders. The purpose of this is to help keep code that uses
`variable?` resilient against changes to the typeclass hierarchy, at least in the sense
that this additional information can be used to debug issues that might arise.
One can also replace `variable? ...binders... =>` with `variable`.
The core algorithm is to try elaborating binders one at a time, and whenever there is a
typeclass instance inference failure, it synthesizes binder syntax for it and adds it to
the list of binders and tries again, recursively. There are no guarantees that this
process gives the "correct" list of binders.
Structures tagged with the `variable_alias` attribute can serve as aliases for a collection
of typeclasses. For example, given
```lean
@[variable_alias]
structure VectorSpace (k V : Type*) [Field k] [AddCommGroup V] [Module k V]
```
then `variable? [VectorSpace k V]` is
equivalent to `variable {k V : Type*} [Field k] [AddCommGroup V] [Module k V]`, assuming
that there are no pre-existing instances on `k` and `V`.
Note that this is not a simple replacement: it only adds instances not inferable
from others in the current scope.
A word of warning: the core algorithm depends on pretty printing, so if terms that appear
in binders do not round trip, this algorithm can fail. That said, it has some support
for quantified binders such as `[∀ i, F i]`. -/
syntax (name := «variable?»)
"variable?" (ppSpace bracketedBinder)* (" =>" (ppSpace bracketedBinder)*)? : command
/--
Attribute to record aliases for the `variable?` command. Aliases are structures that have no
fields, and additional typeclasses are recorded as *arguments* to the structure.
Example:
```
@[variable_alias]
structure VectorSpace (k V : Type*)
[Field k] [AddCommGroup V] [Module k V]
```
Then `variable? [VectorSpace k V]` ensures that these three typeclasses are present in
the current scope. Notice that it's looking at the arguments to the `VectorSpace` type
constructor. You should not have any fields in `variable_alias` structures.
Notice that `VectorSpace` is not a class; the `variable?` command allows non-classes with the
`variable_alias` attribute to use instance binders.
-/
initialize variableAliasAttr : TagAttribute ←
registerTagAttribute `variable_alias "Attribute to record aliases for the `variable?` command."
/-- Find a synthetic typeclass metavariable with no expr metavariables in its type. -/
def pendingActionableSynthMVar (binder : TSyntax ``bracketedBinder) :
TermElabM (Option MVarId) := do
let pendingMVars := (← get).pendingMVars
if pendingMVars.isEmpty then
return none
for mvarId in pendingMVars.reverse do
let some decl ← Term.getSyntheticMVarDecl? mvarId | continue
match decl.kind with
| .typeClass _ =>
let ty ← instantiateMVars (← mvarId.getType)
if !ty.hasExprMVar then
return mvarId
| _ => pure ()
throwErrorAt binder "Cannot satisfy requirements for {binder} due to metavariables."
/-- Try elaborating `ty`. Returns `none` if it doesn't need any additional typeclasses,
or it returns a new binder that needs to come first. Does not add info unless it throws
an exception. -/
partial def getSubproblem
(binder : TSyntax ``bracketedBinder) (ty : Term) :
TermElabM (Option (MessageData × TSyntax ``bracketedBinder)) := do
let res : Term.TermElabResult (Option (MessageData × TSyntax ``bracketedBinder)) ←
Term.observing do
withTheReader Term.Context (fun ctx => {ctx with ignoreTCFailures := true}) do
Term.withAutoBoundImplicit do
_ ← Term.elabType ty
Term.synthesizeSyntheticMVars (postpone := .yes) (ignoreStuckTC := true)
let fvarIds := (← getLCtx).getFVarIds
if let some mvarId ← pendingActionableSynthMVar binder then
trace[«variable?»] "Actionable mvar:{mvarId}"
-- TODO alter goal based on configuration, for example Semiring -> CommRing.
-- 1. Find the new fvars that this instance problem depends on:
let fvarIds' := (← mvarId.getDecl).lctx.getFVarIds.filter
(fun fvar => !(fvarIds.contains fvar))
-- 2. Abstract the instance problem with respect to these fvars
let goal ← mvarId.withContext do instantiateMVars <|
(← mkForallFVars (usedOnly := true) (fvarIds'.map .fvar) (← mvarId.getType))
-- Note: pretty printing is not guaranteed to round-trip, but it's what we can do.
let ty' ← PrettyPrinter.delab goal
let binder' ← withRef binder `(bracketedBinderF| [$ty'])
return some (← addMessageContext m!"{mvarId}", binder')
else
return none
match res with
| .ok v _ => return v
| .error .. => Term.applyResult res
/-- Tries elaborating binders, inserting new binders whenever typeclass inference fails.
`i` is the index of the next binder that needs to be checked.
The `toOmit` array keeps track of which binders should be removed at the end,
in particular the `variable_alias` binders and any redundant binders. -/
partial def completeBinders' (maxSteps : Nat) (gas : Nat)
(checkRedundant : Bool)
(binders : TSyntaxArray ``bracketedBinder)
(toOmit : Array Bool) (i : Nat) :
TermElabM (TSyntaxArray ``bracketedBinder × Array Bool) := do
if h : 0 < gas ∧ i < binders.size then
let binder := binders[i]
trace[«variable?»] "\
Have {(← getLCtx).getFVarIds.size} fvars and {(← getLocalInstances).size} local instances. \
Looking at{indentD binder}"
let sub? ← getSubproblem binder (bracketedBinderType binder).get!
if let some (goalMsg, binder') := sub? then
trace[«variable?»] m!"new subproblem:{indentD binder'}"
if binders.any (stop := i) (· == binder') then
let binders' := binders.extract 0 i
throwErrorAt binder "\
Binder{indentD binder}\nwas not able to satisfy one of its dependencies using \
the pre-existing binder{indentD binder'}\n\n\
This might be due to differences in implicit arguments, which are not represented \
in binders since they are generated by pretty printing unsatisfied dependencies.\n\n\
Current variable command:{indentD (← `(command| variable $binders'*))}\n\n\
Local context for the unsatisfied dependency:{goalMsg}"
let binders := binders.insertIdx i binder'
completeBinders' maxSteps (gas - 1) checkRedundant binders toOmit i
else
let lctx ← getLCtx
let linst ← getLocalInstances
withOptions (fun opts => Term.checkBinderAnnotations.set opts false) <| -- for variable_alias
Term.withAutoBoundImplicit <|
Term.elabBinders #[binder] fun bindersElab => do
let types : Array Expr ← bindersElab.mapM (inferType ·)
trace[«variable?»] m!"elaborated binder types array = {types}"
Term.synthesizeSyntheticMVarsNoPostponing -- checkpoint for withAutoBoundImplicit
Term.withoutAutoBoundImplicit do
let (binders, toOmit) := ← do
match binder with
| `(bracketedBinderF|[$[$ident? :]? $ty]) =>
-- Check if it's an alias
let type ← instantiateMVars (← inferType bindersElab.back!)
if ← isVariableAlias type then
if ident?.isSome then
throwErrorAt binder "`variable_alias` binders can't have an explicit name"
-- Switch to implicit so that `elabBinders` succeeds.
-- We keep it around so that it gets infotrees
let binder' ← withRef binder `(bracketedBinderF|{_ : $ty})
return (binders.set! i binder', toOmit.push true)
-- Check that this wasn't already an instance
let res ← try withLCtx lctx linst <| trySynthInstance type catch _ => pure .none
if let .some _ := res then
if checkRedundant then
let mvar ← mkFreshExprMVarAt lctx linst type
logWarningAt binder
m!"Instance argument can be inferred from earlier arguments.\n{mvar.mvarId!}"
return (binders, toOmit.push true)
else
return (binders, toOmit.push false)
| _ => return (binders, toOmit.push false)
completeBinders' maxSteps gas checkRedundant binders toOmit (i + 1)
else
if h : gas = 0 ∧ i < binders.size then
let binders' := binders.extract 0 i
logErrorAt binders[i] m!"Maximum recursion depth for variables! reached. This might be a \
bug, or you can try adjusting `set_option variable?.maxSteps {maxSteps}`\n\n\
Current variable command:{indentD (← `(command| variable $binders'*))}"
return (binders, toOmit)
where
isVariableAlias (type : Expr) : MetaM Bool := do
forallTelescope type fun _ type => do
if let .const name _ := type.getAppFn then
if variableAliasAttr.hasTag (← getEnv) name then
return true
return false
def completeBinders (maxSteps : Nat) (checkRedundant : Bool)
(binders : TSyntaxArray ``bracketedBinder) :
TermElabM (TSyntaxArray ``bracketedBinder × Array Bool) :=
completeBinders' maxSteps maxSteps checkRedundant binders #[] 0
/-- Strip off whitespace and comments. -/
def cleanBinders (binders : TSyntaxArray ``bracketedBinder) :
TSyntaxArray ``bracketedBinder := Id.run do
let mut binders' := #[]
for binder in binders do
binders' := binders'.push <| ⟨binder.raw.unsetTrailing⟩
return binders'
@[command_elab «variable?», inherit_doc «variable?»]
def elabVariables : CommandElab := fun stx =>
match stx with
| `(variable? $binders* $[=> $expectedBinders?*]?) => do
let checkRedundant := variable?.checkRedundant.get (← getOptions)
process stx checkRedundant binders expectedBinders?
| _ => throwUnsupportedSyntax
where
extendScope (binders : TSyntaxArray ``bracketedBinder) : CommandElabM Unit := do
for binder in binders do
let varUIds ← (← getBracketedBinderIds binder) |>.mapM
(withFreshMacroScope ∘ MonadQuotation.addMacroScope)
modifyScope fun scope =>
{ scope with varDecls := scope.varDecls.push binder, varUIds := scope.varUIds ++ varUIds }
process (stx : Syntax) (checkRedundant : Bool)
(binders : TSyntaxArray ``bracketedBinder)
(expectedBinders? : Option <| TSyntaxArray ``bracketedBinder) : CommandElabM Unit := do
let binders := cleanBinders binders
let maxSteps := variable?.maxSteps.get (← getOptions)
trace[«variable?»] "variable?.maxSteps = {maxSteps}"
for binder in binders do
if (bracketedBinderType binder).isNone then
throwErrorAt binder "variable? cannot update pre-existing variables"
let (binders', suggest) ← runTermElabM fun _ => do
let (binders, toOmit) ← completeBinders maxSteps checkRedundant binders
/- Elaborate the binders again, which also adds the infotrees.
This also makes sure the list works with auto-bound implicits at the front. -/
Term.withAutoBoundImplicit <| Term.elabBinders binders fun _ => pure ()
-- Filter out omitted binders
let binders' : TSyntaxArray ``bracketedBinder :=
(binders.zip toOmit).filterMap fun (b, toOmit) => if toOmit then none else some b
if let some expectedBinders := expectedBinders? then
trace[«variable?»] "checking expected binders"
/- We re-elaborate the binders to create an expression that represents the entire resulting
local context (auto-bound implicits mean we can't just the `binders` array). -/
let elabAndPackageBinders (binders : TSyntaxArray ``bracketedBinder) :
TermElabM AbstractMVarsResult :=
withoutModifyingStateWithInfoAndMessages <| Term.withAutoBoundImplicit <|
Term.elabBinders binders fun _ => do
let e ← mkForallFVars (← getLCtx).getFVars (.sort .zero)
let res ← abstractMVars e
-- Throw in the level names from the current state since `Type*` produces new
-- level names.
return {res with paramNames := (← get).levelNames.toArray ++ res.paramNames}
let ctx1 ← elabAndPackageBinders binders'
let ctx2 ← elabAndPackageBinders expectedBinders
trace[«variable?»] "new context: paramNames = {ctx1.paramNames}, {
""}numMVars = {ctx1.numMVars}\n{indentD ctx1.expr}"
trace[«variable?»] "expected context: paramNames = {ctx2.paramNames}, {
""}numMVars = {ctx2.numMVars}\n{indentD ctx2.expr}"
if ctx1.paramNames == ctx2.paramNames && ctx1.numMVars == ctx2.numMVars then
if ← isDefEq ctx1.expr ctx2.expr then
return (binders', false)
logWarning "Calculated binders do not match the expected binders given after `=>`."
return (binders', true)
else
return (binders', true)
extendScope binders'
let varComm ← `(command| variable? $binders* => $binders'*)
trace[«variable?»] "derived{indentD varComm}"
if suggest then
liftTermElabM <| Lean.Meta.Tactic.TryThis.addSuggestion stx (origSpan? := stx) varComm
/-- Hint for the unused variables linter. Copies the one for `variable`. -/
@[unused_variables_ignore_fn]
def ignorevariable? : Lean.Linter.IgnoreFunction := fun _ stack _ =>
stack.matches [`null, none, `null, ``Mathlib.Command.Variable.variable?]
|| stack.matches [`null, none, `null, `null, ``Mathlib.Command.Variable.variable?]
end Variable
end Command
end Mathlib |
.lake/packages/mathlib/Mathlib/Tactic/Find.lean | import Mathlib.Init
import Batteries.Util.Cache
import Lean.HeadIndex
import Lean.Elab.Command
/-!
# The `#find` command and tactic.
The `#find` command finds definitions & lemmas using pattern matching on the type. For instance:
```lean
#find _ + _ = _ + _
#find ?n + _ = _ + ?n
#find (_ : Nat) + _ = _ + _
#find Nat → Nat
```
Inside tactic proofs, there is a `#find` tactic with the same syntax,
or the `find` tactic which looks for lemmas which are `apply`able against the current goal.
-/
open Lean Std
open Lean.Meta
open Lean.Elab
open Batteries.Tactic
namespace Mathlib.Tactic.Find
private partial def matchHyps : List Expr → List Expr → List Expr → MetaM Bool
| p::ps, oldHyps, h::newHyps => do
let pt ← inferType p
let t ← inferType h
if (← isDefEq pt t) then
matchHyps ps [] (oldHyps ++ newHyps)
else
matchHyps (p::ps) (h::oldHyps) newHyps
| [], _, _ => pure true
| _::_, _, [] => pure false
-- from Lean.Server.Completion
private def isBlackListed (declName : Name) : MetaM Bool := do
let env ← getEnv
pure <| declName.isInternal
|| isAuxRecursor env declName
|| isNoConfusion env declName
<||> isRec declName
<||> isMatcher declName
initialize findDeclsPerHead : DeclCache (Std.HashMap HeadIndex (Array Name)) ←
DeclCache.mk "#find: init cache" failure {} fun _ c headMap ↦ do
if (← isBlackListed c.name) then
return headMap
-- TODO: this should perhaps use `forallTelescopeReducing` instead,
-- to avoid leaking metavariables.
let (_, _, ty) ← forallMetaTelescopeReducing c.type
let head := ty.toHeadIndex
pure <| headMap.insert head (headMap.getD head #[] |>.push c.name)
def findType (t : Expr) : TermElabM Unit := withReducible do
let t ← instantiateMVars t
let head := (← forallMetaTelescopeReducing t).2.2.toHeadIndex
let pat ← abstractMVars t
let env ← getEnv
let mut numFound := 0
for n in (← findDeclsPerHead.get).getD head #[] do
let c := env.find? n |>.get!
let cTy := c.instantiateTypeLevelParams (← mkFreshLevelMVars c.numLevelParams)
let found ← forallTelescopeReducing cTy fun cParams cTy' ↦ do
let pat := pat.expr.instantiateLevelParamsArray pat.paramNames
(← mkFreshLevelMVars pat.numMVars).toArray
let (_, _, pat) ← lambdaMetaTelescope pat
let (patParams, _, pat) ← forallMetaTelescopeReducing pat
isDefEq cTy' pat <&&> matchHyps patParams.toList [] cParams.toList
if found then
numFound := numFound + 1
if numFound > 20 then
logInfo m!"maximum number of search results reached"
break
logInfo m!"{n}: {cTy}"
open Lean.Elab.Command in
/-
The `#find` command finds definitions & lemmas using pattern matching on the type. For instance:
```lean
#find _ + _ = _ + _
#find ?n + _ = _ + ?n
#find (_ : Nat) + _ = _ + _
#find Nat → Nat
```
Inside tactic proofs, the `#find` tactic can be used instead.
There is also the `find` tactic which looks for
lemmas which are `apply`able against the current goal.
-/
elab "#find " t:term : command =>
liftTermElabM do
let t ← Term.elabTerm t none
Term.synthesizeSyntheticMVars (postpone := .no) (ignoreStuckTC := true)
findType t
/- (Note that you'll get an error trying to run these here:
``cannot evaluate `[init]` declaration 'findDeclsPerHead' in the same module``
but they will work fine in a new file!) -/
-- #find _ + _ = _ + _
-- #find _ + _ = _ + _
-- #find ?n + _ = _ + ?n
-- #find (_ : Nat) + _ = _ + _
-- #find Nat → Nat
-- #find ?n ≤ ?m → ?n + _ ≤ ?m + _
open Lean.Elab.Tactic
/-
Display theorems (and definitions) whose result type matches the current goal,
i.e. which should be `apply`able.
```lean
example : True := by find
```
`find` will not affect the goal by itself and should be removed from the finished proof.
For a command that takes the type to search for as an argument,
see `#find`, which is also available as a tactic.
-/
elab "find" : tactic => do
findType (← getMainTarget)
/-
Tactic version of the `#find` command.
See also the `find` tactic to search for theorems matching the current goal.
-/
elab "#find " t:term : tactic => do
let t ← Term.elabTerm t none
Term.synthesizeSyntheticMVars (postpone := .no) (ignoreStuckTC := true)
findType t
end Mathlib.Tactic.Find |
.lake/packages/mathlib/Mathlib/Tactic/SimpIntro.lean | import Lean.Elab.Tactic.Simp
import Mathlib.Init
/-! # `simp_intro` tactic -/
namespace Mathlib.Tactic
open Lean Meta Elab Tactic
/--
Main loop of the `simp_intro` tactic.
* `g`: the original goal
* `ctx`: the simp context, which is extended with local variables as we enter the binders
* `discharge?`: the discharger
* `more`: if true, we will keep introducing binders as long as we can
* `ids`: the list of binder identifiers
-/
partial def simpIntroCore (g : MVarId) (ctx : Simp.Context) (simprocs : Simp.SimprocsArray := #[])
(discharge? : Option Simp.Discharge) (more : Bool) (ids : List (TSyntax ``binderIdent)) :
TermElabM (Option MVarId) := do
let done := return (← simpTargetCore g ctx simprocs discharge?).1
let (transp, var, ids') ← match ids with
| [] => if more then pure (.reducible, mkHole (← getRef), []) else return ← done
| v::ids => pure (.default, v.raw[0], ids)
let t ← withTransparency transp g.getType'
let n := if var.isIdent then var.getId else `_
let withFVar := fun (fvar, g) ↦ g.withContext do
Term.addLocalVarInfo var (mkFVar fvar)
let ctx : Simp.Context ←
if (← Meta.isProp <| ← fvar.getType) then
let simpTheorems ← ctx.simpTheorems.addTheorem (.fvar fvar) (.fvar fvar)
pure <| ctx.setSimpTheorems simpTheorems
else
pure ctx
simpIntroCore g ctx simprocs discharge? more ids'
match t with
| .letE .. => withFVar (← g.intro n)
| .forallE (body := body) .. =>
let (fvar, g) ← g.intro n
if body.hasLooseBVars then withFVar (fvar, g) else
match (← simpLocalDecl g fvar ctx simprocs discharge?).1 with
| none =>
g.withContext <| Term.addLocalVarInfo var (mkFVar fvar)
return none
| some g' => withFVar g'
| _ =>
if more && ids.isEmpty then done else
throwErrorAt var "simp_intro failed to introduce {var}\n{g}"
open Parser.Tactic
/--
The `simp_intro` tactic is a combination of `simp` and `intro`: it will simplify the types of
variables as it introduces them and uses the new variables to simplify later arguments
and the goal.
* `simp_intro x y z` introduces variables named `x y z`
* `simp_intro x y z ..` introduces variables named `x y z` and then keeps introducing `_` binders
* `simp_intro (config := cfg) (discharger := tac) x y .. only [h₁, h₂]`:
`simp_intro` takes the same options as `simp` (see `simp`)
```
example : x + 0 = y → x = z := by
simp_intro h
-- h: x = y ⊢ y = z
sorry
```
-/
elab "simp_intro" cfg:optConfig disch:(discharger)?
ids:(ppSpace colGt binderIdent)* more:" .."? only:(&" only")? args:(simpArgs)? : tactic => do
let args := args.map fun args ↦ ⟨args.raw[1].getArgs⟩
let stx ← `(tactic| simp $cfg:optConfig $(disch)? $[only%$only]? $[[$args,*]]?)
let { ctx, simprocs, dischargeWrapper, .. } ←
withMainContext <| mkSimpContext stx (eraseLocal := false)
dischargeWrapper.with fun discharge? ↦ do
let g ← getMainGoal
g.checkNotAssigned `simp_intro
g.withContext do
let g? ← simpIntroCore g ctx (simprocs := simprocs) discharge? more.isSome ids.toList
replaceMainGoal <| if let some g := g? then [g] else []
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/MoveAdd.lean | import Mathlib.Algebra.Group.Basic
import Mathlib.Lean.Meta
import Mathlib.Order.Defs.LinearOrder
/-!
# `move_add` a tactic for moving summands in expressions
The tactic `move_add` rearranges summands in expressions.
The tactic takes as input a list of terms, each one optionally preceded by `←`.
A term preceded by `←` gets moved to the left, while a term without `←` gets moved to the right.
* Empty input: `move_add []`
In this case, the effect of `move_add []` is equivalent to `simp only [← add_assoc]`:
essentially the tactic removes all visible parentheses.
* Singleton input: `move_add [a]` and `move_add [← a]`
If `⊢ b + a + c` is (a summand in) the goal, then
* `move_add [← a]` changes the goal to `a + b + c` (effectively, `a` moved to the left).
* `move_add [a]` changes the goal to `b + c + a` (effectively, `a` moved to the right);
The tactic reorders *all* sub-expressions of the target at the same time.
For instance, if `⊢ 0 < if b + a < b + a + c then a + b else b + a` is the goal, then
* `move_add [a]` changes the goal to `0 < if b + a < b + c + a then b + a else b + a`
(`a` moved to the right in three sums);
* `move_add [← a]` changes the goal to `0 < if a + b < a + b + c then a + b else a + b`
(`a` again moved to the left in three sums).
* Longer inputs: `move_add [..., a, ..., ← b, ...]`
If the list contains more than one term, the tactic effectively tries to move each term preceded
by `←` to the left, each term not preceded by `←` to the right
*maintaining the relative order in the call*.
Thus, applying `move_add [a, b, c, ← d, ← e]` returns summands of the form
`d + e + [...] + a + b + c`, i.e. `d` and `e` have the same relative position in the input list
and in the final rearrangement (and similarly for `a, b, c`).
In particular, `move_add [a, b]` likely has the same effect as
`move_add [a]; move_add [b]`: first, we move `a` to the right, then we move `b` also to the
right, *after* `a`.
However, if the terms matched by `a` and `b` do not overlap, then `move_add [← a, ← b]`
has the same effect as `move_add [b]; move_add [a]`:
first, we move `b` to the left, then we move `a` also to the left, *before* `a`.
The behaviour in the situation where `a` and `b` overlap is unspecified: `move_add`
will descend into subexpressions, but the order in which they are visited depends
on which rearrangements have already happened.
Also note, though, that `move_add [a, b]` may differ `move_add [a]; move_add [b]`,
for instance when `a` and `b` are `DefEq`.
* Unification of inputs and repetitions: `move_add [_, ← _, a * _]`
The matching of the user inputs with the atoms of the summands in the target expression
is performed via checking `DefEq` and selecting the first, still available match.
Thus, if a sum in the target is `2 * 3 + 4 * (5 + 6) + 4 * 7 + 10 * 10`, then
`move_add [4 * _]` moves the summand `4 * (5 + 6)` to the right.
The unification of later terms only uses the atoms in the target that have not yet been unified.
Thus, if again the target contains `2 * 3 + 4 * (5 + 6) + 4 * 7 + 10 * 10`, then
`move_add [_, ← _, 4 * _]`
matches
* the first input (`_`) with `2 * 3`;
* the second input (`_`) with `4 * (5 + 6)`;
* the third input (`4 * _`) with `4 * 7`.
The resulting permutation therefore places `2 * 3` and `4 * 7` to the left (in this order) and
`4 * (5 + 6)` to the right: `2 * 3 + 4 * 7 + 10 * 10 + 4 * (5 + 6)`.
For the technical description, look at `Mathlib.MoveAdd.weight` and `Mathlib.MoveAdd.reorderUsing`.
`move_add` is the specialization of a more general `move_oper` tactic that takes a binary,
associative, commutative operation and a list of "operand atoms" and rearranges the operation.
## Extension notes
To work with a general associative, commutative binary operation, `move_oper`
needs to have inbuilt the lemmas asserting the analogues of
`add_comm, add_assoc, add_left_comm` for the new operation.
Currently, `move_oper` supports `HAdd.hAdd`, `HMul.hMul`, `And`, `Or`, `Max.max`, `Min.min`.
These lemmas should be added to `Mathlib.MoveAdd.move_oper_simpCtx`.
See `MathlibTest/MoveAdd.lean` for sample usage of `move_oper`.
## Implementation notes
The main driver behind the tactic is `Mathlib.MoveAdd.reorderAndSimp`.
The tactic takes the target, replaces the maximal subexpressions whose head symbol is the given
operation and replaces them by their reordered versions.
Once that is done, it tries to replace the initial goal with the permuted one by using `simp`.
Currently, no attempt is made at guiding `simp` by doing a `congr`-like destruction of the goal.
This will be the content of a later PR.
-/
open Lean Expr
/-- `getExprInputs e` inspects the outermost constructor of `e` and returns the array of all the
arguments to that constructor that are themselves `Expr`essions. -/
def Lean.Expr.getExprInputs : Expr → Array Expr
| app fn arg => #[fn, arg]
| lam _ bt bb _ => #[bt, bb]
| forallE _ bt bb _ => #[bt, bb]
| letE _ t v b _ => #[t, v, b]
| mdata _ e => #[e]
| proj _ _ e => #[e]
| _ => #[]
/-- `size e` returns the number of subexpressions of `e`. -/
@[deprecated Lean.Expr.sizeWithoutSharing (since := "2025-09-04")]
partial def Lean.Expr.size (e : Expr) : ℕ := (e.getExprInputs.map size).foldl (· + ·) 1
namespace Mathlib.MoveAdd
section ExprProcessing
section reorder
variable {α : Type*} [BEq α]
/-!
## Reordering the variables
This section produces the permutations of the variables for `move_add`.
The user controls the final order by passing a list of terms to the tactic.
Each term can be preceded by `←` or not.
In the final ordering,
* terms preceded by `←` appear first,
* terms not preceded by `←` appear last,
* all remaining terms remain in their current relative order.
-/
/-- `uniquify L` takes a list `L : List α` as input and it returns a list `L' : List (α × ℕ)`.
The two lists `L` and `L'.map Prod.fst` coincide.
The second component of each entry `(a, n)` in `L'` is the number of times that `a` appears in `L`
before the current location.
The resulting list of pairs has no duplicates.
-/
def uniquify : List α → List (α × ℕ)
| [] => []
| m::ms =>
let lms := uniquify ms
(m, 0) :: (lms.map fun (x, n) => if x == m then (x, n + 1) else (x, n))
/-- Return a sorting key so that all `(a, true)`s are in the list's order
and sorted before all `(a, false)`s, which are also in the list's order.
Although `weight` does not require this, we use `weight` in the case where the list obtained
from `L` by only keeping the first component (i.e. `L.map Prod.fst`) has no duplicates.
The properties that we mention here assume that this is the case.
Thus, `weight L` is a function `α → ℤ` with the following properties:
* if `(a, true) ∈ L`, then `weight L a` is strictly negative;
* if `(a, false) ∈ L`, then `weight L a` is strictly positive;
* if neither `(a, true)` nor `(a, false)` is in `L`, then `weight L a = 0`.
Moreover, the function `weight L` is strictly monotone increasing on both
`{a : α | (a, true) ∈ L}` and `{a : α | (a, false) ∈ L}`,
in the sense that if `a' = (a, true)` and `b' = (b, true)` are in `L`,
then `a'` appears before `b'` in `L` if and only if `weight L a < weight L b` and
similarly for the pairs with second coordinate equal to `false`.
-/
def weight (L : List (α × Bool)) (a : α) : ℤ :=
let l := L.length
match L.find? (Prod.fst · == a) with
| some (_, b) => if b then - l + (L.idxOf (a, b) : ℤ) else (L.idxOf (a, b) + 1 : ℤ)
| none => 0
/-- `reorderUsing toReorder instructions` produces a reordering of `toReorder : List α`,
following the requirements imposed by `instructions : List (α × Bool)`.
These are the requirements:
* elements of `toReorder` that appear with `true` in `instructions` appear at the
*beginning* of the reordered list, in the order in which they appear in `instructions`;
* similarly, elements of `toReorder` that appear with `false` in `instructions` appear at the
*end* of the reordered list, in the order in which they appear in `instructions`;
* finally, elements of `toReorder` that do not appear in `instructions` appear "in the middle"
with the order that they had in `toReorder`.
For example,
* `reorderUsing [0, 1, 2] [(0, false)] = [1, 2, 0]`,
* `reorderUsing [0, 1, 2] [(1, true)] = [1, 0, 2]`,
* `reorderUsing [0, 1, 2] [(1, true), (0, false)] = [1, 2, 0]`.
-/
def reorderUsing (toReorder : List α) (instructions : List (α × Bool)) : List α :=
let uInstructions :=
let (as, as?) := instructions.unzip
(uniquify as).zip as?
let uToReorder := (uniquify toReorder).toArray
let reorder := uToReorder.qsort fun x y =>
match uInstructions.find? (Prod.fst · == x), uInstructions.find? (Prod.fst · == y) with
| none, none =>
(uToReorder.idxOf? x).get! ≤ (uToReorder.idxOf? y).get!
| _, _ => weight uInstructions x ≤ weight uInstructions y
(reorder.map Prod.fst).toList
end reorder
/-- `prepareOp sum` takes an `Expr`ession as input. It assumes that `sum` is a well-formed
term representing a repeated application of a binary operation and that the summands are the
last two arguments passed to the operation.
It returns the expression consisting of the operation with all its arguments already applied,
except for the last two.
This is similar to `Lean.Meta.mkAdd, Lean.Meta.mkMul`, except that the resulting operation is
primed to work with operands of the same type as the ones already appearing in `sum`.
This is useful to rearrange the operands.
-/
def prepareOp (sum : Expr) : Expr :=
let opargs := sum.getAppArgs
(opargs.toList.take (opargs.size - 2)).foldl (fun x y => Expr.app x y) sum.getAppFn
/-- `sumList prepOp left_assoc? exs` assumes that `prepOp` is an `Expr`ession representing a
binary operation already fully applied up until its last two arguments and assumes that the
last two arguments are the operands of the operation.
Such an expression is the result of `prepareOp`.
If `exs` is the list `[e₁, e₂, ..., eₙ]` of `Expr`essions, then `sumList prepOp left_assoc? exs`
returns
* `prepOp (prepOp( ... prepOp (prepOp e₁ e₂) e₃) ... eₙ)`, if `left_assoc?` is `false`, and
* `prepOp e₁ (prepOp e₂ (... prepOp (prepOp eₙ₋₁ eₙ))`, if `left_assoc?` is `true`.
-/
partial
def sumList (prepOp : Expr) (left_assoc? : Bool) : List Expr → Expr
| [] => default
| [a] => a
| a::as =>
if left_assoc? then
Expr.app (prepOp.app a) (sumList prepOp true as)
else
as.foldl (fun x y => Expr.app (prepOp.app x) y) a
end ExprProcessing
open Meta
variable (op : Name)
variable (R : Expr) in
/-- If `sum` is an expression consisting of repeated applications of `op`, then `getAddends`
returns the Array of those recursively determined arguments whose type is DefEq to `R`. -/
partial def getAddends (sum : Expr) : MetaM (Array Expr) := do
if sum.isAppOf op then
let inR ← sum.getAppArgs.filterM fun r => do isDefEq R (← inferType r <|> pure R)
let new ← inR.mapM (getAddends ·)
return new.foldl Array.append #[]
else return #[sum]
/-- Recursively compute the Array of `getAddends` Arrays by recursing into the expression `sum`
looking for instance of the operation `op`.
Possibly returns duplicates!
-/
partial def getOps (sum : Expr) : MetaM (Array ((Array Expr) × Expr)) := do
let summands ← getAddends op (← inferType sum <|> return sum) sum
let (first, rest) := if summands.size == 1 then (#[], sum.getExprInputs) else
(#[(summands, sum)], summands)
let rest ← rest.mapM getOps
return rest.foldl Array.append first
/-- `rankSums op tgt instructions` takes as input
* the name `op` of a binary operation,
* an `Expr`ession `tgt`,
* a list `instructions` of pair `(expression, Boolean)`.
It extracts the maximal subexpressions of `tgt` whose head symbol is `op`
(i.e. the maximal subexpressions that consist only of applications of the binary operation `op`),
it rearranges the operands of such subexpressions following the order implied by `instructions`
(as in `reorderUsing`),
it returns the list of pairs of expressions `(old_sum, new_sum)`, for which `old_sum ≠ new_sum`
sorted by decreasing value of `Lean.Expr.size`.
In particular, a subexpression of an `old_sum` can only appear *after* its over-expression.
-/
def rankSums (tgt : Expr) (instructions : List (Expr × Bool)) : MetaM (List (Expr × Expr)) := do
let sums ← getOps op (← instantiateMVars tgt)
let candidates := sums.map fun (addends, sum) => do
let reord := reorderUsing addends.toList instructions
let left_assoc? := sum.getAppFn.isConstOf `And || sum.getAppFn.isConstOf `Or
let resummed := sumList (prepareOp sum) left_assoc? reord
if (resummed != sum) then some (sum, resummed) else none
return (candidates.toList.reduceOption.toArray.qsort
(fun x y : Expr × Expr ↦ (y.1.sizeWithoutSharing ≤ x.1.sizeWithoutSharing))).toList
/-- `permuteExpr op tgt instructions` takes the same input as `rankSums` and returns the
expression obtained from `tgt` by replacing all `old_sum`s by the corresponding `new_sum`.
If there were no required changes, then `permuteExpr` reports this in its second factor. -/
def permuteExpr (tgt : Expr) (instructions : List (Expr × Bool)) : MetaM Expr := do
let permInstructions ← rankSums op tgt instructions
if permInstructions == [] then throwError "The goal is already in the required form"
let mut permTgt := tgt
-- We cannot do `Expr.replace` all at once here, we need to follow
-- the order of the instructions.
for (old, new) in permInstructions do
permTgt := permTgt.replace (if · == old then new else none)
return permTgt
/-- `pairUp L R` takes to lists `L R : List Expr` as inputs.
It scans the elements of `L`, looking for a corresponding `DefEq` `Expr`ession in `R`.
If it finds one such element `d`, then it sets the element `d : R` aside, removing it from `R`, and
it continues with the matching on the remainder of `L` and on `R.erase d`.
At the end, it returns the sublist of `R` of the elements that were matched to some element of `R`,
in the order in which they appeared in `L`,
as well as the sublist of `L` of elements that were not matched, also in the order in which they
appeared in `L`.
Example:
```lean
#eval do
let L := [mkNatLit 0, (← mkFreshExprMVar (some (mkConst ``Nat))), mkNatLit 0] -- i.e. [0, _, 0]
let R := [mkNatLit 0, mkNatLit 0, mkNatLit 1] -- i.e. [0, 1]
dbg_trace f!"{(← pairUp L R)}"
/- output:
`([0, 0], [0])`
the output LHS list `[0, 0]` consists of the first `0` and the `MVarId`.
the output RHS list `[0]` corresponds to the last `0` in `L`.
-/
```
-/
def pairUp : List (Expr × Bool × Syntax) → List Expr →
MetaM ((List (Expr × Bool)) × List (Expr × Bool × Syntax))
| (m::ms), l => do
match ← l.findM? (isDefEq · m.1) with
| none => let (found, unfound) ← pairUp ms l; return (found, m::unfound)
| some d => let (found, unfound) ← pairUp ms (l.erase d)
return ((d, m.2.1)::found, unfound)
| _, _ => return ([], [])
/-- `move_oper_simpCtx` is the `Simp.Context` for the reordering internal to `move_oper`.
To support a new binary operation, extend the list in this definition, so that it contains
enough lemmas to allow `simp` to close a generic permutation goal for the new binary operation.
-/
def moveOperSimpCtx : MetaM Simp.Context := do
let simpNames := Elab.Tactic.simpOnlyBuiltins ++ [
``add_comm, ``add_assoc, ``add_left_comm, -- for `HAdd.hAdd`
``mul_comm, ``mul_assoc, ``mul_left_comm, -- for `HMul.hMul`
``and_comm, ``and_assoc, ``and_left_comm, -- for `and`
``or_comm, ``or_assoc, ``or_left_comm, -- for `or`
``max_comm, ``max_assoc, ``max_left_comm, -- for `max`
``min_comm, ``min_assoc, ``min_left_comm -- for `min`
]
let simpThms ← simpNames.foldlM (·.addConst ·) ({} : SimpTheorems)
Simp.mkContext {} (simpTheorems := #[simpThms])
/-- `reorderAndSimp mv op instr` takes as input an `MVarId` `mv`, the name `op` of a binary
operation and a list of "instructions" `instr` that it passes to `permuteExpr`.
* It creates a version `permuted_mv` of `mv` with subexpressions representing `op`-sums reordered
following `instructions`.
* It produces 2 temporary goals by applying `Eq.mpr` and unifying the resulting meta-variable with
`permuted_mv`: `[⊢ mv = permuted_mv, ⊢ permuted_mv]`.
* It tries to solve the goal `mv = permuted_mv` by a simple-minded `simp` call, using the
`op`-analogues of `add_comm, add_assoc, add_left_comm`.
-/
def reorderAndSimp (mv : MVarId) (instr : List (Expr × Bool)) :
MetaM (List MVarId) := mv.withContext do
let permExpr ← permuteExpr op (← mv.getType'') instr
-- generate the implication `permutedMv → mv = permutedMv → mv`
let eqmpr ← mkAppM ``Eq.mpr #[← mkFreshExprMVar (← mkEq (← mv.getType) permExpr)]
let twoGoals ← mv.apply eqmpr
guard (twoGoals.length == 2) <|>
throwError m!"There should only be 2 goals, instead of {twoGoals.length}"
-- `permGoal` is the single goal `mv_permuted`, possibly more operations will be permuted later on
let permGoal ← twoGoals.filterM fun v => return !(← v.isAssigned)
match ← (simpGoal (permGoal[1]!) (← moveOperSimpCtx)) with
| (some x, _) => throwError m!"'move_oper' could not solve {indentD x.2}"
| (none, _) => return permGoal
/-- `unifyMovements` takes as input
* an array of `Expr × Bool × Syntax`, as in the output of `parseArrows`,
* the `Name` `op` of a binary operation,
* an `Expr`ession `tgt`.
It unifies each `Expr`ession appearing as a first factor of the array with the atoms
for the operation `op` in the expression `tgt`, returning
* the lists of pairs of a matched subexpression with the corresponding `Bool`ean;
* a pair of a list of error messages and the corresponding list of Syntax terms where the error
should be thrown;
* an array of debugging messages.
-/
def unifyMovements (data : Array (Expr × Bool × Syntax)) (tgt : Expr) :
MetaM (List (Expr × Bool) × (List MessageData × List Syntax) × Array MessageData) := do
let ops ← getOps op tgt
let atoms := (ops.map Prod.fst).flatten.toList.filter (!isBVar ·)
-- `instr` are the unified user-provided terms, `neverMatched` are non-unified ones
let (instr, neverMatched) ← pairUp data.toList atoms
let dbgMsg := #[m!"Matching of input variables:\n\
* pre-match: {data.map (Prod.snd ∘ Prod.snd)}\n\
* post-match: {instr}",
m!"\nMaximum number of iterations: {ops.size}"]
-- if there are `neverMatched` terms, return the parsed terms and the syntax
let errMsg := neverMatched.map fun (t, a, stx) => (if a then m!"← {t}" else m!"{t}", stx)
return (instr, errMsg.unzip, dbgMsg)
section parsing
open Elab Parser Tactic
/-- `parseArrows` parses an input of the form `[a, ← b, _ * (1 : ℤ)]`, consisting of a list of
terms, each optionally preceded by the arrow `←`.
It returns an array of triples consisting of
* the `Expr`ession corresponding to the parsed term,
* the `Bool`ean `true` if the arrow is present in front of the term,
* the underlying `Syntax` of the given term.
E.g. convert `[a, ← b, _ * (1 : ℤ)]` to
``[(a, false, `(a)), (b, true, `(b)), (_ * 1, false, `(_ * 1))]``.
-/
def parseArrows : TSyntax `Lean.Parser.Tactic.rwRuleSeq → TermElabM (Array (Expr × Bool × Syntax))
| `(rwRuleSeq| [$rs,*]) => do
rs.getElems.mapM fun rstx => do
let r : Syntax := rstx
return (← Term.elabTerm r[1]! none, ! r[0]!.isNone, rstx)
| _ => failure
initialize registerTraceClass `Tactic.move_oper
/-- The tactic `move_add` rearranges summands of expressions.
Calling `move_add [a, ← b, ...]` matches `a, b,...` with summands in the main goal.
It then moves `a` to the far right and `b` to the far left of each addition in which they appear.
The side to which the summands are moved is determined by the presence or absence of the arrow `←`.
The inputs `a, b,...` can be any terms, also with underscores.
The tactic uses the first "new" summand that unifies with each one of the given inputs.
There is a multiplicative variant, called `move_mul`.
There is also a general tactic for a "binary associative commutative operation": `move_oper`.
In this case the syntax requires providing first a term whose head symbol is the operation.
E.g. `move_oper HAdd.hAdd [...]` is the same as `move_add`, while `move_oper Max.max [...]`
rearranges `max`s.
-/
elab (name := moveOperTac) "move_oper" id:ident rws:rwRuleSeq : tactic => withMainContext do
-- parse the operation
let op := id.getId
-- parse the list of terms
let (instr, (unmatched, stxs), dbgMsg) ← unifyMovements op (← parseArrows rws)
(← instantiateMVars (← getMainTarget))
unless unmatched.length = 0 do
let _ ← stxs.mapM (logErrorAt · "") -- underline all non-matching terms
trace[Tactic.move_oper] dbgMsg.foldl (fun x y => (x.compose y).compose "\n\n---\n") ""
throwErrorAt stxs[0]! m!"Errors:\nThe terms in '{unmatched}' were not matched to any atom"
-- move around the operands
replaceMainGoal (← reorderAndSimp op (← getMainGoal) instr)
@[inherit_doc moveOperTac]
elab "move_add" rws:rwRuleSeq : tactic => do
let hadd := mkIdent ``HAdd.hAdd
evalTactic (← `(tactic| move_oper $hadd $rws))
@[inherit_doc moveOperTac]
elab "move_mul" rws:rwRuleSeq : tactic => do
let hmul := mkIdent ``HMul.hMul
evalTactic (← `(tactic| move_oper $hmul $rws))
end parsing
end MoveAdd
end Mathlib |
.lake/packages/mathlib/Mathlib/Tactic/Linarith.lean | import Mathlib.Tactic.Linarith.Frontend
import Mathlib.Tactic.NormNum
import Mathlib.Tactic.Hint
/-!
We register `linarith` with the `hint` tactic.
-/
register_hint 100 linarith |
.lake/packages/mathlib/Mathlib/Tactic/Choose.lean | import Mathlib.Util.Tactic
import Mathlib.Logic.Function.Basic
/-!
# `choose` tactic
Performs Skolemization, that is, given `h : ∀ a:α, ∃ b:β, p a b |- G` produces
`f : α → β, hf: ∀ a, p a (f a) |- G`.
TODO: switch to `rcases` syntax: `choose ⟨i, j, h₁ -⟩ := expr`.
-/
open Lean Meta Elab Tactic
namespace Mathlib.Tactic.Choose
/-- Given `α : Sort u`, `nonemp : Nonempty α`, `p : α → Prop`, a context of free variables
`ctx`, and a pair of an element `val : α` and `spec : p val`,
`mk_sometimes u α nonemp p ctx (val, spec)` produces another pair `val', spec'`
such that `val'` does not have any free variables from elements of `ctx` whose types are
propositions. This is done by applying `Function.sometimes` to abstract over all the propositional
arguments. -/
def mk_sometimes (u : Level) (α nonemp p : Expr) :
List Expr → Expr × Expr → MetaM (Expr × Expr)
| [], (val, spec) => pure (val, spec)
| (e :: ctx), (val, spec) => do
let (val, spec) ← mk_sometimes u α nonemp p ctx (val, spec)
let t ← inferType e
let b ← isProp t
if b then do
let val' ← mkLambdaFVars #[e] val
pure
(mkApp4 (Expr.const ``Function.sometimes [Level.zero, u]) t α nonemp val',
mkApp7 (Expr.const ``Function.sometimes_spec [u]) t α nonemp p val' e spec)
else pure (val, spec)
/-- Results of searching for nonempty instances,
to eliminate dependencies on propositions (`choose!`).
`success` means we found at least one instance;
`failure ts` means we didn't find instances for any `t ∈ ts`.
(`failure []` means we didn't look for instances at all.)
Rationale:
`choose!` means we are expected to succeed at least once
in eliminating dependencies on propositions.
-/
inductive ElimStatus
| success
| failure (ts : List Expr)
/-- Combine two statuses, keeping a success from either side
or merging the failures. -/
def ElimStatus.merge : ElimStatus → ElimStatus → ElimStatus
| success, _ => success
| _, success => success
| failure ts₁, failure ts₂ => failure (ts₁ ++ ts₂)
/-- `mkFreshNameFrom orig base` returns `mkFreshUserName base` if ``orig = `_``
and `orig` otherwise. -/
def mkFreshNameFrom (orig base : Name) : CoreM Name :=
if orig = `_ then mkFreshUserName base else pure orig
/-- Changes `(h : ∀ xs, ∃ a:α, p a) ⊢ g` to `(d : ∀ xs, a) ⊢ (s : ∀ xs, p (d xs)) → g` and
`(h : ∀ xs, p xs ∧ q xs) ⊢ g` to `(d : ∀ xs, p xs) ⊢ (s : ∀ xs, q xs) → g`.
`choose1` returns a tuple of
- the error result (see `ElimStatus`)
- the data new free variable that was "chosen"
- the new goal (which contains the spec of the data as domain of an arrow type)
If `nondep` is true and `α` is inhabited, then it will remove the dependency of `d` on
all propositional assumptions in `xs`. For example if `ys` are propositions then
`(h : ∀ xs ys, ∃ a:α, p a) ⊢ g` becomes `(d : ∀ xs, a) (s : ∀ xs ys, p (d xs)) ⊢ g`. -/
def choose1 (g : MVarId) (nondep : Bool) (h : Option Expr) (data : Name) :
MetaM (ElimStatus × Expr × MVarId) := do
let (g, h) ← match h with
| some e => pure (g, e)
| none => do
let (e, g) ← g.intro1P
pure (g, .fvar e)
g.withContext do
let h ← instantiateMVars h
let t ← inferType h
forallTelescopeReducing t fun ctx t ↦ do
(← withTransparency .all (whnf t)).withApp fun
| .const ``Exists [u], #[α, p] => do
let data ← mkFreshNameFrom data ((← p.getBinderName).getD `h)
let ((neFail : ElimStatus), (nonemp : Option Expr)) ← if nondep then
let ne := (Expr.const ``Nonempty [u]).app α
let m ← mkFreshExprMVar ne
let mut g' := m.mvarId!
for e in ctx do
if (← isProof e) then continue
let ty ← whnf (← inferType e)
let nety := (Expr.const ``Nonempty [u]).app ty
let neval := mkApp2 (Expr.const ``Nonempty.intro [u]) ty e
g' ← g'.assert .anonymous nety neval
(_, g') ← g'.intros
g'.withContext do
match ← synthInstance? (← g'.getType) with
| some e => do
g'.assign e
let m ← instantiateMVars m
pure (.success, some m)
| none => pure (.failure [ne], none)
else pure (.failure [], none)
let ctx' ← if nonemp.isSome then ctx.filterM (not <$> isProof ·) else pure ctx
let dataTy ← mkForallFVars ctx' α
let mut dataVal := mkApp3 (.const ``Classical.choose [u]) α p (mkAppN h ctx)
let mut specVal := mkApp3 (.const ``Classical.choose_spec [u]) α p (mkAppN h ctx)
if let some nonemp := nonemp then
(dataVal, specVal) ← mk_sometimes u α nonemp p ctx.toList (dataVal, specVal)
dataVal ← mkLambdaFVars ctx' dataVal
specVal ← mkLambdaFVars ctx specVal
let (fvar, g) ← withLocalDeclD .anonymous dataTy fun d ↦ do
let specTy ← mkForallFVars ctx (p.app (mkAppN d ctx')).headBeta
g.withContext <| withLocalDeclD data dataTy fun d' ↦ do
let mvarTy ← mkArrow (specTy.replaceFVar d d') (← g.getType)
let newMVar ← mkFreshExprSyntheticOpaqueMVar mvarTy (← g.getTag)
g.assign <| mkApp2 (← mkLambdaFVars #[d'] newMVar) dataVal specVal
pure (d', newMVar.mvarId!)
let g ← match h with
| .fvar v => g.clear v
| _ => pure g
return (neFail, fvar, g)
| .const ``And _, #[p, q] => do
let data ← mkFreshNameFrom data `h
let e1 ← mkLambdaFVars ctx <| mkApp3 (.const ``And.left []) p q (mkAppN h ctx)
let e2 ← mkLambdaFVars ctx <| mkApp3 (.const ``And.right []) p q (mkAppN h ctx)
let t1 ← inferType e1
let t2 ← inferType e2
let (fvar, g) ← (← (← g.assert .anonymous t2 e2).assert data t1 e1).intro1P
let g ← match h with
| .fvar v => g.clear v
| _ => pure g
return (.success, .fvar fvar, g)
-- TODO: support Σ, ×, or even any inductive type with 1 constructor ?
| _, _ => throwError "expected a term of the shape `∀ xs, ∃ a, p xs a` or `∀ xs, p xs ∧ q xs`"
/-- A wrapper around `choose1` that parses identifiers and adds variable info to new variables. -/
def choose1WithInfo (g : MVarId) (nondep : Bool) (h : Option Expr) (data : TSyntax ``binderIdent) :
MetaM (ElimStatus × MVarId) := do
let n := if let `(binderIdent| $n:ident) := data then n.getId else `_
let (status, fvar, g) ← choose1 g nondep h n
g.withContext <| fvar.addLocalVarInfoForBinderIdent data
pure (status, g)
/-- A loop around `choose1`. The main entry point for the `choose` tactic. -/
def elabChoose (nondep : Bool) (h : Option Expr) :
List (TSyntax ``binderIdent) → ElimStatus → MVarId → MetaM MVarId
| [], _, _ => throwError "expect list of variables"
| [n], status, g =>
match nondep, status with
| true, .failure tys => do -- We expected some elimination, but it didn't happen.
let mut msg := m!"choose!: failed to synthesize any nonempty instances"
for ty in tys do
msg := msg ++ m!"{(← mkFreshExprMVar ty).mvarId!}"
throwError msg
| _, _ => do
let (fvar, g) ← match n with
| `(binderIdent| $n:ident) => g.intro n.getId
| _ => g.intro1
g.withContext <| (Expr.fvar fvar).addLocalVarInfoForBinderIdent n
return g
| n::ns, status, g => do
let (status', g) ← choose1WithInfo g nondep h n
elabChoose nondep none ns (status.merge status') g
/--
* `choose a b h h' using hyp` takes a hypothesis `hyp` of the form
`∀ (x : X) (y : Y), ∃ (a : A) (b : B), P x y a b ∧ Q x y a b`
for some `P Q : X → Y → A → B → Prop` and outputs
into context a function `a : X → Y → A`, `b : X → Y → B` and two assumptions:
`h : ∀ (x : X) (y : Y), P x y (a x y) (b x y)` and
`h' : ∀ (x : X) (y : Y), Q x y (a x y) (b x y)`. It also works with dependent versions.
* `choose! a b h h' using hyp` does the same, except that it will remove dependency of
the functions on propositional arguments if possible. For example if `Y` is a proposition
and `A` and `B` are nonempty in the above example then we will instead get
`a : X → A`, `b : X → B`, and the assumptions
`h : ∀ (x : X) (y : Y), P x y (a x) (b x)` and
`h' : ∀ (x : X) (y : Y), Q x y (a x) (b x)`.
The `using hyp` part can be omitted,
which will effectively cause `choose` to start with an `intro hyp`.
Examples:
```
example (h : ∀ n m : ℕ, ∃ i j, m = n + i ∨ m + j = n) : True := by
choose i j h using h
guard_hyp i : ℕ → ℕ → ℕ
guard_hyp j : ℕ → ℕ → ℕ
guard_hyp h : ∀ (n m : ℕ), m = n + i n m ∨ m + j n m = n
trivial
```
```
example (h : ∀ i : ℕ, i < 7 → ∃ j, i < j ∧ j < i+i) : True := by
choose! f h h' using h
guard_hyp f : ℕ → ℕ
guard_hyp h : ∀ (i : ℕ), i < 7 → i < f i
guard_hyp h' : ∀ (i : ℕ), i < 7 → f i < i + i
trivial
```
-/
syntax (name := choose) "choose" "!"? (ppSpace colGt binderIdent)+ (" using " term)? : tactic
elab_rules : tactic
| `(tactic| choose $[!%$b]? $[$ids]* $[using $h]?) => withMainContext do
let h ← h.mapM (Elab.Tactic.elabTerm · none)
let g ← elabChoose b.isSome h ids.toList (.failure []) (← getMainGoal)
replaceMainGoal [g]
@[inherit_doc choose]
syntax "choose!" (ppSpace colGt binderIdent)+ (" using " term)? : tactic
macro_rules
| `(tactic| choose! $[$ids]* $[using $h]?) => `(tactic| choose ! $[$ids]* $[using $h]?)
end Mathlib.Tactic.Choose |
.lake/packages/mathlib/Mathlib/Tactic/LinearCombination/Lemmas.lean | import Mathlib.Algebra.Field.Defs
import Mathlib.Algebra.Order.Module.Defs
import Mathlib.Data.Ineq
/-!
# Lemmas for the linear_combination tactic
These should not be used directly in user code.
-/
open Lean
namespace Mathlib.Tactic.LinearCombination
variable {α : Type*} {a a' a₁ a₂ b b' b₁ b₂ c : α}
variable {K : Type*} {t s : K}
/-! ### Addition -/
theorem add_eq_eq [Add α] (p₁ : (a₁ : α) = b₁) (p₂ : a₂ = b₂) : a₁ + a₂ = b₁ + b₂ := p₁ ▸ p₂ ▸ rfl
theorem add_le_eq [AddCommMonoid α] [PartialOrder α] [IsOrderedAddMonoid α]
(p₁ : (a₁ : α) ≤ b₁) (p₂ : a₂ = b₂) : a₁ + a₂ ≤ b₁ + b₂ :=
p₂ ▸ add_le_add_right p₁ b₂
theorem add_eq_le [AddCommMonoid α] [PartialOrder α] [IsOrderedAddMonoid α]
(p₁ : (a₁ : α) = b₁) (p₂ : a₂ ≤ b₂) : a₁ + a₂ ≤ b₁ + b₂ :=
p₁ ▸ add_le_add_left p₂ b₁
theorem add_lt_eq [AddCommMonoid α] [PartialOrder α] [IsOrderedCancelAddMonoid α]
(p₁ : (a₁ : α) < b₁) (p₂ : a₂ = b₂) : a₁ + a₂ < b₁ + b₂ :=
p₂ ▸ add_lt_add_right p₁ b₂
theorem add_eq_lt [AddCommMonoid α] [PartialOrder α] [IsOrderedCancelAddMonoid α] {a₁ b₁ a₂ b₂ : α}
(p₁ : a₁ = b₁) (p₂ : a₂ < b₂) : a₁ + a₂ < b₁ + b₂ :=
p₁ ▸ add_lt_add_left p₂ b₁
/-! ### Multiplication -/
theorem mul_eq_const [Mul α] (p : a = b) (c : α) : a * c = b * c := p ▸ rfl
theorem mul_le_const [Semiring α] [PartialOrder α] [IsOrderedRing α]
(p : b ≤ c) {a : α} (ha : 0 ≤ a) :
b * a ≤ c * a :=
mul_le_mul_of_nonneg_right p ha
theorem mul_lt_const [Semiring α] [PartialOrder α] [IsStrictOrderedRing α]
(p : b < c) {a : α} (ha : 0 < a) :
b * a < c * a :=
mul_lt_mul_of_pos_right p ha
theorem mul_lt_const_weak [Semiring α] [PartialOrder α] [IsOrderedRing α]
(p : b < c) {a : α} (ha : 0 ≤ a) :
b * a ≤ c * a :=
mul_le_mul_of_nonneg_right p.le ha
theorem mul_const_eq [Mul α] (p : b = c) (a : α) : a * b = a * c := p ▸ rfl
theorem mul_const_le [Semiring α] [PartialOrder α] [IsOrderedRing α]
(p : b ≤ c) {a : α} (ha : 0 ≤ a) :
a * b ≤ a * c :=
mul_le_mul_of_nonneg_left p ha
theorem mul_const_lt [Semiring α] [PartialOrder α] [IsStrictOrderedRing α]
(p : b < c) {a : α} (ha : 0 < a) :
a * b < a * c :=
mul_lt_mul_of_pos_left p ha
theorem mul_const_lt_weak [Semiring α] [PartialOrder α] [IsOrderedRing α]
(p : b < c) {a : α} (ha : 0 ≤ a) :
a * b ≤ a * c :=
mul_le_mul_of_nonneg_left p.le ha
/-! ### Scalar multiplication -/
theorem smul_eq_const [SMul K α] (p : t = s) (c : α) : t • c = s • c := p ▸ rfl
theorem smul_le_const [Ring K] [PartialOrder K] [IsOrderedRing K]
[AddCommGroup α] [PartialOrder α] [IsOrderedAddMonoid α] [Module K α]
[IsOrderedModule K α] (p : t ≤ s) {a : α} (ha : 0 ≤ a) :
t • a ≤ s • a :=
smul_le_smul_of_nonneg_right p ha
theorem smul_lt_const [Ring K] [PartialOrder K] [IsOrderedRing K]
[AddCommGroup α] [PartialOrder α] [IsOrderedAddMonoid α] [Module K α]
[IsStrictOrderedModule K α] (p : t < s) {a : α} (ha : 0 < a) :
t • a < s • a :=
smul_lt_smul_of_pos_right p ha
theorem smul_lt_const_weak [Ring K] [PartialOrder K] [IsOrderedRing K]
[AddCommGroup α] [PartialOrder α] [IsOrderedAddMonoid α] [Module K α]
[IsStrictOrderedModule K α] (p : t < s) {a : α} (ha : 0 ≤ a) :
t • a ≤ s • a :=
smul_le_smul_of_nonneg_right p.le ha
theorem smul_const_eq [SMul K α] (p : b = c) (s : K) : s • b = s • c := p ▸ rfl
theorem smul_const_le [Semiring K] [PartialOrder K]
[AddCommMonoid α] [PartialOrder α] [Module K α]
[PosSMulMono K α] (p : b ≤ c) {s : K} (hs : 0 ≤ s) :
s • b ≤ s • c :=
smul_le_smul_of_nonneg_left p hs
theorem smul_const_lt [Semiring K] [PartialOrder K]
[AddCommMonoid α] [PartialOrder α] [Module K α]
[PosSMulStrictMono K α] (p : b < c) {s : K} (hs : 0 < s) :
s • b < s • c :=
smul_lt_smul_of_pos_left p hs
theorem smul_const_lt_weak [Semiring K] [PartialOrder K]
[AddCommMonoid α] [PartialOrder α] [Module K α]
[PosSMulMono K α] (p : b < c) {s : K} (hs : 0 ≤ s) :
s • b ≤ s • c :=
smul_le_smul_of_nonneg_left p.le hs
/-! ### Division -/
theorem div_eq_const [Div α] (p : a = b) (c : α) : a / c = b / c := p ▸ rfl
theorem div_le_const [Semifield α] [LinearOrder α] [IsStrictOrderedRing α]
(p : b ≤ c) {a : α} (ha : 0 ≤ a) : b / a ≤ c / a :=
div_le_div_of_nonneg_right p ha
theorem div_lt_const [Semifield α] [LinearOrder α] [IsStrictOrderedRing α]
(p : b < c) {a : α} (ha : 0 < a) : b / a < c / a :=
div_lt_div_of_pos_right p ha
theorem div_lt_const_weak [Semifield α] [LinearOrder α] [IsStrictOrderedRing α]
(p : b < c) {a : α} (ha : 0 ≤ a) :
b / a ≤ c / a :=
div_le_div_of_nonneg_right p.le ha
/-! ### Lemmas constructing the reduction of a goal to a specified built-up hypothesis -/
theorem eq_of_eq [Add α] [IsRightCancelAdd α] (p : (a : α) = b) (H : a' + b = b' + a) :
a' = b' := by
rw [p] at H
exact add_right_cancel H
theorem le_of_le [AddCommMonoid α] [PartialOrder α] [IsOrderedCancelAddMonoid α]
(p : (a : α) ≤ b) (H : a' + b ≤ b' + a) :
a' ≤ b' := by
grw [← add_le_add_iff_right b, H, p]
theorem le_of_eq [AddCommMonoid α] [PartialOrder α] [IsOrderedCancelAddMonoid α]
(p : (a : α) = b) (H : a' + b ≤ b' + a) :
a' ≤ b' := by
rwa [p, add_le_add_iff_right] at H
theorem le_of_lt [AddCommMonoid α] [PartialOrder α] [IsOrderedCancelAddMonoid α]
(p : (a : α) < b) (H : a' + b ≤ b' + a) :
a' ≤ b' :=
le_of_le p.le H
theorem lt_of_le [AddCommMonoid α] [PartialOrder α] [IsOrderedCancelAddMonoid α]
(p : (a : α) ≤ b) (H : a' + b < b' + a) :
a' < b' := by
grw [p] at H; simpa using H
theorem lt_of_eq [AddCommMonoid α] [PartialOrder α] [IsOrderedCancelAddMonoid α]
(p : (a : α) = b) (H : a' + b < b' + a) :
a' < b' := by
rwa [p, add_lt_add_iff_right] at H
theorem lt_of_lt [AddCommMonoid α] [PartialOrder α] [IsOrderedCancelAddMonoid α]
(p : (a : α) < b) (H : a' + b ≤ b' + a) :
a' < b' := by
grw [← add_lt_add_iff_right b, H]
gcongr
alias ⟨eq_rearrange, _⟩ := sub_eq_zero
theorem le_rearrange {α : Type*} [AddCommGroup α] [PartialOrder α] [IsOrderedAddMonoid α]
{a b : α} (h : a - b ≤ 0) : a ≤ b :=
sub_nonpos.mp h
theorem lt_rearrange {α : Type*} [AddCommGroup α] [PartialOrder α] [IsOrderedAddMonoid α]
{a b : α} (h : a - b < 0) : a < b :=
sub_neg.mp h
theorem eq_of_add_pow [Ring α] [NoZeroDivisors α] (n : ℕ) (p : (a : α) = b)
(H : (a' - b') ^ n - (a - b) = 0) : a' = b' := by
rw [← sub_eq_zero] at p ⊢; apply eq_zero_of_pow_eq_zero (n := n); rwa [sub_eq_zero, p] at H
end Tactic.LinearCombination
/-! ### Lookup functions for lemmas by operation and relation(s) -/
open Tactic.LinearCombination
namespace Ineq
/-- Given two (in)equalities, look up the lemma to add them. -/
def addRelRelData : Ineq → Ineq → Name
| eq, eq => ``add_eq_eq
| eq, le => ``add_eq_le
| eq, lt => ``add_eq_lt
| le, eq => ``add_le_eq
| le, le => ``add_le_add
| le, lt => ``add_lt_add_of_le_of_lt
| lt, eq => ``add_lt_eq
| lt, le => ``add_lt_add_of_lt_of_le
| lt, lt => ``add_lt_add
/-- Finite inductive type extending `Mathlib.Ineq`: a type of inequality (`eq`, `le` or `lt`),
together with, in the case of `lt`, a Boolean, typically representing the strictness (< or ≤) of
some other inequality. -/
protected inductive WithStrictness : Type
| eq : Ineq.WithStrictness
| le : Ineq.WithStrictness
| lt (strict : Bool) : Ineq.WithStrictness
/-- Given an (in)equality, look up the lemma to left-multiply it by a constant. If relevant, also
take into account the degree of positivity which can be proved of the constant: strict or
non-strict. -/
def mulRelConstData : Ineq.WithStrictness → Name
| .eq => ``mul_eq_const
| .le => ``mul_le_const
| .lt true => ``mul_lt_const
| .lt false => ``mul_lt_const_weak
/-- Given an (in)equality, look up the lemma to right-multiply it by a constant. If relevant, also
take into account the degree of positivity which can be proved of the constant: strict or
non-strict. -/
def mulConstRelData : Ineq.WithStrictness → Name
| .eq => ``mul_const_eq
| .le => ``mul_const_le
| .lt true => ``mul_const_lt
| .lt false => ``mul_const_lt_weak
/-- Given an (in)equality, look up the lemma to left-scalar-multiply it by a constant (scalar).
If relevant, also take into account the degree of positivity which can be proved of the constant:
strict or non-strict. -/
def smulRelConstData : Ineq.WithStrictness → Name
| .eq => ``smul_eq_const
| .le => ``smul_le_const
| .lt true => ``smul_lt_const
| .lt false => ``smul_lt_const_weak
/-- Given an (in)equality, look up the lemma to right-scalar-multiply it by a constant (vector).
If relevant, also take into account the degree of positivity which can be proved of the constant:
strict or non-strict. -/
def smulConstRelData : Ineq.WithStrictness → Name
| .eq => ``smul_const_eq
| .le => ``smul_const_le
| .lt true => ``smul_const_lt
| .lt false => ``smul_const_lt_weak
/-- Given an (in)equality, look up the lemma to divide it by a constant. If relevant, also take
into account the degree of positivity which can be proved of the constant: strict or non-strict. -/
def divRelConstData : Ineq.WithStrictness → Name
| .eq => ``div_eq_const
| .le => ``div_le_const
| .lt true => ``div_lt_const
| .lt false => ``div_lt_const_weak
/-- Given two (in)equalities `P` and `Q`, look up the lemma to deduce `Q` from `P`, and the relation
appearing in the side condition produced by this lemma. -/
def relImpRelData : Ineq → Ineq → Option (Name × Ineq)
| eq, eq => some (``eq_of_eq, eq)
| eq, le => some (``Tactic.LinearCombination.le_of_eq, le)
| eq, lt => some (``lt_of_eq, lt)
| le, eq => none
| le, le => some (``le_of_le, le)
| le, lt => some (``lt_of_le, lt)
| lt, eq => none
| lt, le => some (``Tactic.LinearCombination.le_of_lt, le)
| lt, lt => some (``lt_of_lt, le)
/-- Given an (in)equality, look up the lemma to move everything to the LHS. -/
def rearrangeData : Ineq → Name
| eq => ``eq_rearrange
| le => ``le_rearrange
| lt => ``lt_rearrange
end Mathlib.Ineq |
.lake/packages/mathlib/Mathlib/Tactic/CancelDenoms/Core.lean | import Mathlib.Algebra.Field.Basic
import Mathlib.Algebra.Order.Ring.Defs
import Mathlib.Data.Tree.Basic
import Mathlib.Logic.Basic
import Mathlib.Tactic.NormNum.Core
import Mathlib.Util.SynthesizeUsing
import Mathlib.Util.Qq
/-!
# A tactic for canceling numeric denominators
This file defines tactics that cancel numeric denominators from field Expressions.
As an example, we want to transform a comparison `5*(a/3 + b/4) < c/3` into the equivalent
`5*(4*a + 3*b) < 4*c`.
## Implementation notes
The tooling here was originally written for `linarith`, not intended as an interactive tactic.
The interactive version has been split off because it is sometimes convenient to use on its own.
There are likely some rough edges to it.
Improving this tactic would be a good project for someone interested in learning tactic programming.
-/
open Lean Parser Tactic Mathlib Meta NormNum Qq
initialize registerTraceClass `CancelDenoms
namespace CancelDenoms
/-! ### Lemmas used in the procedure -/
theorem mul_subst {α} [CommRing α] {n1 n2 k e1 e2 t1 t2 : α}
(h1 : n1 * e1 = t1) (h2 : n2 * e2 = t2) (h3 : n1 * n2 = k) : k * (e1 * e2) = t1 * t2 := by
rw [← h3, mul_comm n1, mul_assoc n2, ← mul_assoc n1, h1,
← mul_assoc n2, mul_comm n2, mul_assoc, h2]
theorem div_subst {α} [Field α] {n1 n2 k e1 e2 t1 : α}
(h1 : n1 * e1 = t1) (h2 : n2 / e2 = 1) (h3 : n1 * n2 = k) : k * (e1 / e2) = t1 := by
rw [← h3, mul_assoc, mul_div_left_comm, h2, ← mul_assoc, h1, mul_comm, one_mul]
theorem cancel_factors_eq_div {α} [Field α] {n e e' : α}
(h : n * e = e') (h2 : n ≠ 0) : e = e' / n :=
eq_div_of_mul_eq h2 <| by rwa [mul_comm] at h
theorem add_subst {α} [Ring α] {n e1 e2 t1 t2 : α} (h1 : n * e1 = t1) (h2 : n * e2 = t2) :
n * (e1 + e2) = t1 + t2 := by simp [left_distrib, *]
theorem sub_subst {α} [Ring α] {n e1 e2 t1 t2 : α} (h1 : n * e1 = t1) (h2 : n * e2 = t2) :
n * (e1 - e2) = t1 - t2 := by simp [left_distrib, *, sub_eq_add_neg]
theorem neg_subst {α} [Ring α] {n e t : α} (h1 : n * e = t) : n * -e = -t := by simp [*]
theorem pow_subst {α} [CommRing α] {n e1 t1 k l : α} {e2 : ℕ}
(h1 : n * e1 = t1) (h2 : l * n ^ e2 = k) : k * (e1 ^ e2) = l * t1 ^ e2 := by
rw [← h2, ← h1, mul_pow, mul_assoc]
theorem inv_subst {α} [Field α] {n k e : α} (h2 : e ≠ 0) (h3 : n * e = k) :
k * (e ⁻¹) = n := by rw [← div_eq_mul_inv, ← h3, mul_div_cancel_right₀ _ h2]
theorem cancel_factors_lt {α} [Field α] [LinearOrder α] [IsStrictOrderedRing α]
{a b ad bd a' b' gcd : α}
(ha : ad * a = a') (hb : bd * b = b') (had : 0 < ad) (hbd : 0 < bd) (hgcd : 0 < gcd) :
(a < b) = (1 / gcd * (bd * a') < 1 / gcd * (ad * b')) := by
rw [mul_lt_mul_iff_right₀, ← ha, ← hb, ← mul_assoc, ← mul_assoc, mul_comm bd,
mul_lt_mul_iff_right₀]
· exact mul_pos had hbd
· exact one_div_pos.2 hgcd
theorem cancel_factors_le {α} [Field α] [LinearOrder α] [IsStrictOrderedRing α]
{a b ad bd a' b' gcd : α}
(ha : ad * a = a') (hb : bd * b = b') (had : 0 < ad) (hbd : 0 < bd) (hgcd : 0 < gcd) :
(a ≤ b) = (1 / gcd * (bd * a') ≤ 1 / gcd * (ad * b')) := by
rw [mul_le_mul_iff_right₀, ← ha, ← hb, ← mul_assoc, ← mul_assoc, mul_comm bd,
mul_le_mul_iff_right₀]
· exact mul_pos had hbd
· exact one_div_pos.2 hgcd
theorem cancel_factors_eq {α} [Field α] {a b ad bd a' b' gcd : α} (ha : ad * a = a')
(hb : bd * b = b') (had : ad ≠ 0) (hbd : bd ≠ 0) (hgcd : gcd ≠ 0) :
(a = b) = (1 / gcd * (bd * a') = 1 / gcd * (ad * b')) := by
grind
theorem cancel_factors_ne {α} [Field α] {a b ad bd a' b' gcd : α} (ha : ad * a = a')
(hb : bd * b = b') (had : ad ≠ 0) (hbd : bd ≠ 0) (hgcd : gcd ≠ 0) :
(a ≠ b) = (1 / gcd * (bd * a') ≠ 1 / gcd * (ad * b')) := by
classical
rw [eq_iff_iff, not_iff_not, cancel_factors_eq ha hb had hbd hgcd]
/-! ### Computing cancellation factors -/
/--
`findCancelFactor e` produces a natural number `n`, such that multiplying `e` by `n` will
be able to cancel all the numeric denominators in `e`. The returned `Tree` describes how to
distribute the value `n` over products inside `e`.
-/
partial def findCancelFactor (e : Expr) : ℕ × Tree ℕ :=
match e.getAppFnArgs with
| (``HAdd.hAdd, #[_, _, _, _, e1, e2]) | (``HSub.hSub, #[_, _, _, _, e1, e2]) =>
let (v1, t1) := findCancelFactor e1
let (v2, t2) := findCancelFactor e2
let lcm := v1.lcm v2
(lcm, .node lcm t1 t2)
| (``HMul.hMul, #[_, _, _, _, e1, e2]) =>
let (v1, t1) := findCancelFactor e1
let (v2, t2) := findCancelFactor e2
let pd := v1 * v2
(pd, .node pd t1 t2)
| (``HDiv.hDiv, #[_, _, _, _, e1, e2]) =>
-- If e2 is a rational, then it's a natural number due to the simp lemmas in `deriveThms`.
match e2.nat? with
| some q =>
let (v1, t1) := findCancelFactor e1
let n := v1 * q
(n, .node n t1 <| .node q .nil .nil)
| none => (1, .node 1 .nil .nil)
| (``Neg.neg, #[_, _, e]) => findCancelFactor e
| (``HPow.hPow, #[_, ℕ, _, _, e1, e2]) =>
match e2.nat? with
| some k =>
let (v1, t1) := findCancelFactor e1
let n := v1 ^ k
(n, .node n t1 <| .node k .nil .nil)
| none => (1, .node 1 .nil .nil)
| (``Inv.inv, #[_, _, e]) =>
match e.nat? with
| some q => (q, .node q .nil <| .node q .nil .nil)
| none => (1, .node 1 .nil .nil)
| _ => (1, .node 1 .nil .nil)
def synthesizeUsingNormNum (type : Q(Prop)) : MetaM Q($type) := do
try
synthesizeUsingTactic' type (← `(tactic| norm_num))
catch e =>
throwError "Could not prove {type} using norm_num. {e.toMessageData}"
/-- `CancelResult mα e v'` provides a value for `v * e` where the denominators have been cancelled.
-/
structure CancelResult {u : Level} {α : Q(Type u)} (mα : Q(Mul $α)) (e : Q($α)) (v : Q($α)) where
/-- An expression with denominators cancelled. -/
cancelled : Q($α)
/-- The proof that `cancelled` is valid. -/
pf : Q($v * $e = $cancelled)
/--
`mkProdPrf α sα v v' tr e` produces a proof of `v'*e = e'`, where numeric denominators have been
canceled in `e'`, distributing `v` proportionally according to the tree `tr` computed
by `findCancelFactor`.
The `v'` argument is a numeral expression corresponding to `v`, which we need in order to state
the return type accurately.
-/
partial def mkProdPrf {u : Level} (α : Q(Type u)) (sα : Q(Field $α)) (v : ℕ) (v' : Q($α))
(t : Tree ℕ) (e : Q($α)) : MetaM (CancelResult q(inferInstance) e v') := do
let amwo : Q(AddMonoidWithOne $α) := q(inferInstance)
trace[CancelDenoms] "mkProdPrf {e} {v}"
match t, e with
| .node _ lhs rhs, ~q($e1 + $e2) => do
let ⟨v1, hv1⟩ ← mkProdPrf α sα v v' lhs e1
let ⟨v2, hv2⟩ ← mkProdPrf α sα v v' rhs e2
return ⟨q($v1 + $v2), q(CancelDenoms.add_subst $hv1 $hv2)⟩
| .node _ lhs rhs, ~q($e1 - $e2) => do
let ⟨v1, hv1⟩ ← mkProdPrf α sα v v' lhs e1
let ⟨v2, hv2⟩ ← mkProdPrf α sα v v' rhs e2
return ⟨q($v1 - $v2), q(CancelDenoms.sub_subst $hv1 $hv2)⟩
| .node _ lhs@(.node ln _ _) rhs, ~q($e1 * $e2) => do
trace[CancelDenoms] "recursing into mul"
have ln' := (← mkOfNat α amwo <| mkRawNatLit ln).1
have vln' := (← mkOfNat α amwo <| mkRawNatLit (v/ln)).1
let ⟨v1, hv1⟩ ← mkProdPrf α sα ln ln' lhs e1
let ⟨v2, hv2⟩ ← mkProdPrf α sα (v / ln) vln' rhs e2
let npf ← synthesizeUsingNormNum q($ln' * $vln' = $v')
return ⟨q($v1 * $v2), q(CancelDenoms.mul_subst $hv1 $hv2 $npf)⟩
| .node _ lhs (.node rn _ _), ~q($e1 / $e2) => do
-- Invariant: e2 is equal to the natural number rn
have rn' := (← mkOfNat α amwo <| mkRawNatLit rn).1
have vrn' := (← mkOfNat α amwo <| mkRawNatLit <| v / rn).1
let ⟨v1, hv1⟩ ← mkProdPrf α sα (v / rn) vrn' lhs e1
let npf ← synthesizeUsingNormNum q($rn' / $e2 = 1)
let npf2 ← synthesizeUsingNormNum q($vrn' * $rn' = $v')
return ⟨q($v1), q(CancelDenoms.div_subst $hv1 $npf $npf2)⟩
| t, ~q(-$e) => do
let ⟨v, hv⟩ ← mkProdPrf α sα v v' t e
return ⟨q(-$v), q(CancelDenoms.neg_subst $hv)⟩
| .node _ lhs@(.node k1 _ _) (.node k2 .nil .nil), ~q($e1 ^ $e2) => do
have k1' := (← mkOfNat α amwo <| mkRawNatLit k1).1
let ⟨v1, hv1⟩ ← mkProdPrf α sα k1 k1' lhs e1
have l : ℕ := v / (k1 ^ k2)
have l' := (← mkOfNat α amwo <| mkRawNatLit l).1
let npf ← synthesizeUsingNormNum q($l' * $k1' ^ $e2 = $v')
return ⟨q($l' * $v1 ^ $e2), q(CancelDenoms.pow_subst $hv1 $npf)⟩
| .node _ .nil (.node rn _ _), ~q($ei ⁻¹) => do
have rn' := (← mkOfNat α amwo <| mkRawNatLit rn).1
have vrn' := (← mkOfNat α amwo <| mkRawNatLit <| v / rn).1
have _ : $rn' =Q $ei := ⟨⟩
let npf ← synthesizeUsingNormNum q($rn' ≠ 0)
let npf2 ← synthesizeUsingNormNum q($vrn' * $rn' = $v')
return ⟨q($vrn'), q(CancelDenoms.inv_subst $npf $npf2)⟩
| _, _ => do
return ⟨q($v' * $e), q(rfl)⟩
/-- Theorems to get expression into a form that `findCancelFactor` and `mkProdPrf`
can more easily handle. These are important for dividing by rationals and negative integers. -/
def deriveThms : List Name :=
[``div_div_eq_mul_div, ``div_neg]
/-- Helper lemma to chain together a `simp` proof and the result of `mkProdPrf`. -/
theorem derive_trans {α} [Mul α] {a b c d : α} (h : a = b) (h' : c * b = d) : c * a = d := h ▸ h'
/-- Helper lemma to chain together two `simp` proofs and the result of `mkProdPrf`. -/
theorem derive_trans₂ {α} [Mul α] {a b c d e : α} (h : a = b) (h' : b = c) (h'' : d * c = e) :
d * a = e := h ▸ h' ▸ h''
/--
Given `e`, a term with rational division, produces a natural number `n` and a proof of `n*e = e'`,
where `e'` has no division. Assumes "well-behaved" division.
-/
def derive (e : Expr) : MetaM (ℕ × Expr) := do
trace[CancelDenoms] "e = {e}"
let eSimp ← simpOnlyNames (config := Simp.neutralConfig) deriveThms e
trace[CancelDenoms] "e simplified = {eSimp.expr}"
let eSimpNormNum ← Mathlib.Meta.NormNum.deriveSimp (← Simp.mkContext) false eSimp.expr
trace[CancelDenoms] "e norm_num'd = {eSimpNormNum.expr}"
let (n, t) := findCancelFactor eSimpNormNum.expr
let ⟨u, tp, e⟩ ← inferTypeQ' eSimpNormNum.expr
let stp : Q(Field $tp) ← synthInstanceQ q(Field $tp)
try
have n' := (← mkOfNat tp q(inferInstance) <| mkRawNatLit <| n).1
let r ← mkProdPrf tp stp n n' t e
trace[CancelDenoms] "pf : {← inferType r.pf}"
let pf' ←
match eSimp.proof?, eSimpNormNum.proof? with
| some pfSimp, some pfSimp' => mkAppM ``derive_trans₂ #[pfSimp, pfSimp', r.pf]
| some pfSimp, none | none, some pfSimp => mkAppM ``derive_trans #[pfSimp, r.pf]
| none, none => pure r.pf
return (n, pf')
catch E => do
throwError "CancelDenoms.derive failed to normalize {e}.\n{E.toMessageData}"
/--
`findCompLemma e` arranges `e` in the form `lhs R rhs`, where `R ∈ {<, ≤, =, ≠}`, and returns
`lhs`, `rhs`, the `cancel_factors` lemma corresponding to `R`, and a Boolean indicating whether
`R` involves the order (i.e. `<` and `≤`) or not (i.e. `=` and `≠`).
In the case of `LT`, `LE`, `GE`, and `GT` an order on the type is needed, in the last case
it is not, the final component of the return value tracks this.
-/
def findCompLemma (e : Expr) : MetaM (Option (Expr × Expr × Name × Bool)) := do
match (← whnfR e).getAppFnArgs with
| (``LT.lt, #[_, _, a, b]) => return (a, b, ``cancel_factors_lt, true)
| (``LE.le, #[_, _, a, b]) => return (a, b, ``cancel_factors_le, true)
| (``Eq, #[_, a, b]) => return (a, b, ``cancel_factors_eq, false)
-- `a ≠ b` reduces to `¬ a = b` under `whnf`
| (``Not, #[p]) => match (← whnfR p).getAppFnArgs with
| (``Eq, #[_, a, b]) => return (a, b, ``cancel_factors_ne, false)
| _ => return none
| (``GE.ge, #[_, _, a, b]) => return (b, a, ``cancel_factors_le, true)
| (``GT.gt, #[_, _, a, b]) => return (b, a, ``cancel_factors_lt, true)
| _ => return none
/--
`cancelDenominatorsInType h` assumes that `h` is of the form `lhs R rhs`,
where `R ∈ {<, ≤, =, ≠, ≥, >}`.
It produces an Expression `h'` of the form `lhs' R rhs'` and a proof that `h = h'`.
Numeric denominators have been canceled in `lhs'` and `rhs'`.
-/
def cancelDenominatorsInType (h : Expr) : MetaM (Expr × Expr) := do
let some (lhs, rhs, lem, ord) ← findCompLemma h | throwError m!"cannot kill factors"
let (al, lhs_p) ← derive lhs
let ⟨u, α, _⟩ ← inferTypeQ' lhs
let amwo ← synthInstanceQ q(AddMonoidWithOne $α)
let (ar, rhs_p) ← derive rhs
let gcd := al.gcd ar
have al := (← mkOfNat α amwo <| mkRawNatLit al).1
have ar := (← mkOfNat α amwo <| mkRawNatLit ar).1
have gcd := (← mkOfNat α amwo <| mkRawNatLit gcd).1
let (al_cond, ar_cond, gcd_cond) ← if ord then do
let _ ← synthInstanceQ q(Field $α)
let _ ← synthInstanceQ q(LinearOrder $α)
let _ ← synthInstanceQ q(IsStrictOrderedRing $α)
let al_pos : Q(Prop) := q(0 < $al)
let ar_pos : Q(Prop) := q(0 < $ar)
let gcd_pos : Q(Prop) := q(0 < $gcd)
pure (al_pos, ar_pos, gcd_pos)
else do
let _ ← synthInstanceQ q(Field $α)
let al_ne : Q(Prop) := q($al ≠ 0)
let ar_ne : Q(Prop) := q($ar ≠ 0)
let gcd_ne : Q(Prop) := q($gcd ≠ 0)
pure (al_ne, ar_ne, gcd_ne)
let al_cond ← synthesizeUsingNormNum al_cond
let ar_cond ← synthesizeUsingNormNum ar_cond
let gcd_cond ← synthesizeUsingNormNum gcd_cond
let pf ← mkAppM lem #[lhs_p, rhs_p, al_cond, ar_cond, gcd_cond]
let pf_tp ← inferType pf
return ((← findCompLemma pf_tp).elim default (Prod.fst ∘ Prod.snd), pf)
end CancelDenoms
/--
`cancel_denoms` attempts to remove numerals from the denominators of fractions.
It works on propositions that are field-valued inequalities.
```lean
variable [LinearOrderedField α] (a b c : α)
example (h : a / 5 + b / 4 < c) : 4*a + 5*b < 20*c := by
cancel_denoms at h
exact h
example (h : a > 0) : a / 5 > 0 := by
cancel_denoms
exact h
```
-/
syntax (name := cancelDenoms) "cancel_denoms" (location)? : tactic
open Elab Tactic
def cancelDenominatorsAt (fvar : FVarId) : TacticM Unit := do
let t ← instantiateMVars (← fvar.getDecl).type
let (new, eqPrf) ← CancelDenoms.cancelDenominatorsInType t
liftMetaTactic' fun g => do
let res ← g.replaceLocalDecl fvar new eqPrf
return res.mvarId
def cancelDenominatorsTarget : TacticM Unit := do
let (new, eqPrf) ← CancelDenoms.cancelDenominatorsInType (← getMainTarget)
liftMetaTactic' fun g => g.replaceTargetEq new eqPrf
def cancelDenominators (loc : Location) : TacticM Unit := do
withLocation loc cancelDenominatorsAt cancelDenominatorsTarget
(fun _ ↦ throwError "Failed to cancel any denominators")
elab "cancel_denoms" loc?:(location)? : tactic => do
cancelDenominators (expandOptLocation (Lean.mkOptionalNode loc?))
Lean.Elab.Tactic.evalTactic (← `(tactic| try norm_num [← mul_assoc] $[$loc?]?)) |
.lake/packages/mathlib/Mathlib/Tactic/Order/Preprocessing.lean | import Mathlib.Tactic.Order.CollectFacts
/-!
# Facts preprocessing for the `order` tactic
In this file we implement the preprocessing procedure for the `order` tactic.
See `Mathlib/Tactic/Order.lean` for details of preprocessing.
-/
namespace Mathlib.Tactic.Order
universe u
open Lean Expr Meta
section Lemmas
lemma not_lt_of_not_le {α : Type u} [Preorder α] {x y : α} (h : ¬(x ≤ y)) : ¬(x < y) :=
(h ·.le)
lemma le_of_not_lt_le {α : Type u} [Preorder α] {x y : α} (h1 : ¬(x < y)) (h2 : x ≤ y) :
y ≤ x :=
not_lt_iff_le_imp_ge.mp h1 h2
end Lemmas
/-- Supported order types: linear, partial, and preorder. -/
inductive OrderType
| lin | part | pre
deriving BEq
instance : ToString OrderType where
toString
| .lin => "linear order"
| .part => "partial order"
| .pre => "preorder"
/-- Find the "best" instance of an order on a given type. A linear order is preferred over a partial
order, and a partial order is preferred over a preorder. -/
def findBestOrderInstance (type : Expr) : MetaM <| Option OrderType := do
if (← synthInstance? (← mkAppM ``LinearOrder #[type])).isSome then
return some .lin
if (← synthInstance? (← mkAppM ``PartialOrder #[type])).isSome then
return some .part
if (← synthInstance? (← mkAppM ``Preorder #[type])).isSome then
return some .pre
return none
/-- Replaces facts of the form `x = ⊤` with `y ≤ x` for all `y`, and similarly for `x = ⊥`. -/
def replaceBotTop (facts : Array AtomicFact) (idxToAtom : Std.HashMap Nat Expr) :
MetaM <| Array AtomicFact := do
let mut res : Array AtomicFact := #[]
let nAtoms := idxToAtom.size
for fact in facts do
match fact with
| .isBot idx =>
for i in [:nAtoms] do
if i != idx then
res := res.push <| .le idx i (← mkAppOptM ``bot_le #[none, none, none, idxToAtom.get! i])
| .isTop idx =>
for i in [:nAtoms] do
if i != idx then
res := res.push <| .le i idx (← mkAppOptM ``le_top #[none, none, none, idxToAtom.get! i])
| _ =>
res := res.push fact
return res
/-- Preprocesses facts for preorders. Replaces `x < y` with two equivalent facts: `x ≤ y` and
`¬ (y ≤ x)`. Replaces `x = y` with `x ≤ y`, `y ≤ x` and removes `x ≠ y`. -/
def preprocessFactsPreorder (facts : Array AtomicFact) : MetaM <| Array AtomicFact := do
let mut res : Array AtomicFact := #[]
for fact in facts do
match fact with
| .lt lhs rhs proof =>
res := res.push <| .le lhs rhs (← mkAppM ``le_of_lt #[proof])
res := res.push <| .nle rhs lhs (← mkAppM ``not_le_of_gt #[proof])
| .eq lhs rhs proof =>
res := res.push <| .le lhs rhs (← mkAppM ``le_of_eq #[proof])
res := res.push <| .le rhs lhs (← mkAppM ``ge_of_eq #[proof])
| .ne _ _ _ =>
continue
| _ =>
res := res.push fact
return res
/-- Preprocesses facts for partial orders. Replaces `x < y`, `¬ (x ≤ y)`, and `x = y` with
equivalent facts involving only `≤`, `≠`, and `≮`. For each fact `x = y ⊔ z` adds `y ≤ x`
and `z ≤ x` facts, and similarly for `⊓`. -/
def preprocessFactsPartial (facts : Array AtomicFact) (idxToAtom : Std.HashMap Nat Expr) :
MetaM <| Array AtomicFact := do
let mut res : Array AtomicFact := #[]
for fact in facts do
match fact with
| .lt lhs rhs proof =>
res := res.push <| .ne lhs rhs (← mkAppM ``ne_of_lt #[proof])
res := res.push <| .le lhs rhs (← mkAppM ``le_of_lt #[proof])
| .nle lhs rhs proof =>
res := res.push <| .ne lhs rhs (← mkAppM ``ne_of_not_le #[proof])
res := res.push <| .nlt lhs rhs (← mkAppM ``not_lt_of_not_le #[proof])
| .eq lhs rhs proof =>
res := res.push <| .le lhs rhs (← mkAppM ``le_of_eq #[proof])
res := res.push <| .le rhs lhs (← mkAppM ``ge_of_eq #[proof])
| .isSup lhs rhs sup =>
res := res.push <| .le lhs sup
(← mkAppOptM ``le_sup_left #[none, none, idxToAtom.get! lhs, idxToAtom.get! rhs])
res := res.push <| .le rhs sup
(← mkAppOptM ``le_sup_right #[none, none, idxToAtom.get! lhs, idxToAtom.get! rhs])
res := res.push fact
| .isInf lhs rhs inf =>
res := res.push <| .le inf lhs
(← mkAppOptM ``inf_le_left #[none, none, idxToAtom.get! lhs, idxToAtom.get! rhs])
res := res.push <| .le inf rhs
(← mkAppOptM ``inf_le_right #[none, none, idxToAtom.get! lhs, idxToAtom.get! rhs])
res := res.push fact
| _ =>
res := res.push fact
return res
/-- Preprocesses facts for linear orders. Replaces `x < y`, `¬ (x ≤ y)`, `¬ (x < y)`, and `x = y`
with equivalent facts involving only `≤` and `≠`. For each fact `x = y ⊔ z` adds `y ≤ x`
and `z ≤ x` facts, and similarly for `⊓`. -/
def preprocessFactsLinear (facts : Array AtomicFact) (idxToAtom : Std.HashMap Nat Expr) :
MetaM <| Array AtomicFact := do
let mut res : Array AtomicFact := #[]
for fact in facts do
match fact with
| .lt lhs rhs proof =>
res := res.push <| .ne lhs rhs (← mkAppM ``ne_of_lt #[proof])
res := res.push <| .le lhs rhs (← mkAppM ``le_of_lt #[proof])
| .nle lhs rhs proof =>
res := res.push <| .ne lhs rhs (← mkAppM ``ne_of_not_le #[proof])
res := res.push <| .le rhs lhs (← mkAppM ``le_of_not_ge #[proof])
| .nlt lhs rhs proof =>
res := res.push <| .le rhs lhs (← mkAppM ``le_of_not_gt #[proof])
| .eq lhs rhs proof =>
res := res.push <| .le lhs rhs (← mkAppM ``le_of_eq #[proof])
res := res.push <| .le rhs lhs (← mkAppM ``ge_of_eq #[proof])
| .isSup lhs rhs sup =>
res := res.push <| .le lhs sup
(← mkAppOptM ``le_sup_left #[none, none, idxToAtom.get! lhs, idxToAtom.get! rhs])
res := res.push <| .le rhs sup
(← mkAppOptM ``le_sup_right #[none, none, idxToAtom.get! lhs, idxToAtom.get! rhs])
res := res.push fact
| .isInf lhs rhs inf =>
res := res.push <| .le inf lhs
(← mkAppOptM ``inf_le_left #[none, none, idxToAtom.get! lhs, idxToAtom.get! rhs])
res := res.push <| .le inf rhs
(← mkAppOptM ``inf_le_right #[none, none, idxToAtom.get! lhs, idxToAtom.get! rhs])
res := res.push fact
| _ =>
res := res.push fact
return res
/-- Preprocesses facts for order of `orderType` using either `preprocessFactsPreorder` or
`preprocessFactsPartial` or `preprocessFactsLinear`. -/
def preprocessFacts (facts : Array AtomicFact) (idxToAtom : Std.HashMap Nat Expr)
(orderType : OrderType) : MetaM <| Array AtomicFact :=
match orderType with
| .pre => preprocessFactsPreorder facts
| .part => preprocessFactsPartial facts idxToAtom
| .lin => preprocessFactsLinear facts idxToAtom
end Mathlib.Tactic.Order |
.lake/packages/mathlib/Mathlib/Tactic/Order/CollectFacts.lean | import Mathlib.Order.BoundedOrder.Basic
import Mathlib.Order.Lattice
import Qq
/-!
# Facts collection for the `order` Tactic
This file implements the collection of facts for the `order` tactic.
-/
namespace Mathlib.Tactic.Order
open Lean Qq Elab Meta Tactic
/-- A structure for storing facts about variables. -/
inductive AtomicFact
| eq (lhs : Nat) (rhs : Nat) (proof : Expr)
| ne (lhs : Nat) (rhs : Nat) (proof : Expr)
| le (lhs : Nat) (rhs : Nat) (proof : Expr)
| nle (lhs : Nat) (rhs : Nat) (proof : Expr)
| lt (lhs : Nat) (rhs : Nat) (proof : Expr)
| nlt (lhs : Nat) (rhs : Nat) (proof : Expr)
| isTop (idx : Nat)
| isBot (idx : Nat)
| isInf (lhs : Nat) (rhs : Nat) (res : Nat)
| isSup (lhs : Nat) (rhs : Nat) (res : Nat)
deriving Inhabited, BEq
-- For debugging purposes.
instance : ToString AtomicFact where
toString fa := match fa with
| .eq lhs rhs _ => s!"#{lhs} = #{rhs}"
| .ne lhs rhs _ => s!"#{lhs} ≠ #{rhs}"
| .le lhs rhs _ => s!"#{lhs} ≤ #{rhs}"
| .nle lhs rhs _ => s!"¬ #{lhs} ≤ #{rhs}"
| .lt lhs rhs _ => s!"#{lhs} < #{rhs}"
| .nlt lhs rhs _ => s!"¬ #{lhs} < #{rhs}"
| .isTop idx => s!"#{idx} := ⊤"
| .isBot idx => s!"#{idx} := ⊥"
| .isInf lhs rhs res => s!"#{res} := #{lhs} ⊓ #{rhs}"
| .isSup lhs rhs res => s!"#{res} := #{lhs} ⊔ #{rhs}"
/-- State for `CollectFactsM`. It contains a map where the key `t` maps to a
pair `(atomToIdx, facts)`. `atomToIdx` is a `DiscrTree` containing atomic expressions with their
indices, and `facts` stores `AtomicFact`s about them. -/
abbrev CollectFactsState := Std.HashMap Expr <| DiscrTree (Nat × Expr) × Array AtomicFact
/-- Monad for the fact collection procedure. -/
abbrev CollectFactsM := StateT CollectFactsState MetaM
/-- Adds `fact` to the state. -/
def addFact (type : Expr) (fact : AtomicFact) : CollectFactsM Unit :=
modify fun res => res.modify type fun (atomToIdx, facts) =>
(atomToIdx, facts.push fact)
/-- Updates the state with the atom `x`. If `x` is `⊤` or `⊥`, adds the corresponding fact. If `x`
is `y ⊔ z`, adds a fact about it, then recursively calls `addAtom` on `y` and `z`.
Similarly for `⊓`. -/
partial def addAtom {u : Level} (type : Q(Type u)) (x : Q($type)) : CollectFactsM Nat := do
modify fun res => res.insertIfNew type (.empty, #[])
let (atomToIdx, facts) := (← get).get! type
match ← (← atomToIdx.getUnify x).findM? fun (_, e) => isDefEq x e with
| some (idx, _) => return idx
| none =>
let idx := atomToIdx.size
let atomToIdx ← atomToIdx.insert x (idx, x)
modify fun res => res.insert type (atomToIdx, facts)
match x with
| ~q((@OrderTop.toTop _ $instLE $instTop).top) =>
addFact type (.isTop idx)
| ~q((@OrderBot.toBot _ $instLE $instBot).bot) =>
addFact type (.isBot idx)
| ~q((@SemilatticeSup.toMax _ $inst).max $a $b) =>
let aIdx ← addAtom type a
let bIdx ← addAtom type b
addFact type (.isSup aIdx bIdx idx)
| ~q((@SemilatticeInf.toMin _ $inst).min $a $b) =>
let aIdx ← addAtom type a
let bIdx ← addAtom type b
addFact type (.isInf aIdx bIdx idx)
| _ => pure ()
return idx
-- The linter claims `u` is unused, but it used on the next line.
set_option linter.unusedVariables false in
/-- Implementation for `collectFacts` in `CollectFactsM` monad. -/
partial def collectFactsImp (only? : Bool) (hyps : Array Expr) (negGoal : Expr) :
CollectFactsM Unit := do
let ctx ← getLCtx
for expr in hyps do
processExpr expr
processExpr negGoal
if !only? then
for ldecl in ctx do
if ldecl.isImplementationDetail then
continue
let e := ldecl.toExpr
if e == negGoal then
continue
processExpr e
where
/-- Extracts facts and atoms from the expression. -/
processExpr (expr : Expr) : CollectFactsM Unit := do
let type ← inferType expr
if !(← isProp type) then
return
let ⟨u, type, expr⟩ ← inferTypeQ expr
let _ : u =QL 0 := ⟨⟩
match type with
| ~q(@Eq ($α : Type _) $x $y) =>
if (← synthInstance? (q(Preorder $α))).isSome then
let xIdx ← addAtom α x
let yIdx ← addAtom α y
addFact α <| .eq xIdx yIdx expr
| ~q(@LE.le $α $inst $x $y) =>
let xIdx ← addAtom α x
let yIdx ← addAtom α y
addFact α <| .le xIdx yIdx expr
| ~q(@LT.lt $α $inst $x $y) =>
let xIdx ← addAtom α x
let yIdx ← addAtom α y
addFact α <| .lt xIdx yIdx expr
| ~q(@Ne ($α : Type _) $x $y) =>
if (← synthInstance? (q(Preorder $α))).isSome then
let xIdx ← addAtom α x
let yIdx ← addAtom α y
addFact α <| .ne xIdx yIdx expr
| ~q(Not $p) =>
match p with
| ~q(@LE.le $α $inst $x $y) =>
let xIdx ← addAtom α x
let yIdx ← addAtom α y
addFact α <| .nle xIdx yIdx expr
| ~q(@LT.lt $α $inst $x $y) =>
let xIdx ← addAtom α x
let yIdx ← addAtom α y
addFact α <| .nlt xIdx yIdx expr
| _ => return
| ~q($p ∧ $q) =>
processExpr q(And.left $expr)
processExpr q(And.right $expr)
| ~q(Exists $P) =>
processExpr q(Exists.choose_spec $expr)
| _ => return
/-- Collects facts from the local context. `negGoal` is the negated goal, `hyps` is the expressions
passed to the tactic using square brackets. If `only?` is true, we collect facts only from `hyps`
and `negGoal`, otherwise we also use the local context.
For each occurring type `α`, the returned map contains a pair `(idxToAtom, facts)`,
where the map `idxToAtom` converts indices to found atomic expressions of type `α`,
and `facts` contains all collected `AtomicFact`s about them. -/
def collectFacts (only? : Bool) (hyps : Array Expr) (negGoal : Expr) :
MetaM <| Std.HashMap Expr <| Std.HashMap Nat Expr × Array AtomicFact := do
let res := (← (collectFactsImp only? hyps negGoal).run ∅).snd
return res.map fun _ (atomToIdx, facts) =>
let idxToAtom : Std.HashMap Nat Expr := atomToIdx.fold (init := ∅) fun acc _ value =>
acc.insert value.fst value.snd
(idxToAtom, facts)
end Mathlib.Tactic.Order |
.lake/packages/mathlib/Mathlib/Tactic/Order/ToInt.lean | import Batteries.Data.List.Pairwise
import Mathlib.Tactic.Order.CollectFacts
import Batteries.Tactic.GeneralizeProofs
import Mathlib.Util.Qq
/-!
# Translating linear orders to ℤ
In this file we implement the translation of a problem in any linearly ordered type to a problem in
`ℤ`. This allows us to use the `omega` tactic to solve it.
While the core algorithm of the `order` tactic is complete for the theory of linear orders in the
signature (`<`, `≤`),
it becomes incomplete in the signature with lattice operations `⊓` and `⊔`. With these operations,
the problem becomes NP-hard, and the idea is to reuse a smart and efficient procedure, such as
`omega`.
## TODO
Migrate to `grind` when it is ready.
-/
namespace Mathlib.Tactic.Order.ToInt
variable {α : Type*} [LinearOrder α] {n : ℕ} (val : Fin n → α)
/-- The main theorem asserting the existence of a translation.
We use `Classical.chooose` to turn this into a value for use in the `order` tactic,
see `toInt`.
-/
theorem exists_translation : ∃ tr : Fin n → ℤ, ∀ i j, val i ≤ val j ↔ tr i ≤ tr j := by
let li := List.ofFn val
let sli := li.mergeSort
have (i : Fin n) : ∃ j : Fin sli.length, sli[j] = val i := by
apply List.get_of_mem
rw [List.Perm.mem_iff (List.mergeSort_perm _ _)]
simp [li]
use fun i ↦ (this i).choose
intro i j
simp only [Fin.getElem_fin, Int.ofNat_le]
by_cases h_eq : val i = val j
· simp [h_eq]
generalize_proofs _ hi hj
rw [← hi.choose_spec, ← hj.choose_spec] at h_eq
conv_lhs => rw [← hi.choose_spec, ← hj.choose_spec]
have := List.sorted_mergeSort (l := li) (le := fun a b ↦ decide (a ≤ b))
(by simpa using Preorder.le_trans) (by simpa using LinearOrder.le_total)
rw [List.pairwise_iff_get] at this
refine ⟨fun h ↦ ?_, fun h ↦ ?_⟩
· contrapose! h
exact lt_of_le_of_ne (by simpa using (this hj.choose hi.choose (by simpa)))
(fun h ↦ h_eq (h.symm))
· simpa using this hi.choose hj.choose (by apply lt_of_le_of_ne h; contrapose! h_eq; simp [h_eq])
/-- Auxiliary definition used by the `order` tactic to transfer facts in a linear order to `ℤ`. -/
noncomputable def toInt (k : Fin n) : ℤ :=
(exists_translation val).choose k
variable (i j k : Fin n)
theorem toInt_le_toInt : toInt val i ≤ toInt val j ↔ val i ≤ val j := by
simp [toInt, (exists_translation val).choose_spec]
theorem toInt_lt_toInt : toInt val i < toInt val j ↔ val i < val j := by
simpa using (toInt_le_toInt val j i).not
theorem toInt_eq_toInt : toInt val i = toInt val j ↔ val i = val j := by
simp [toInt_le_toInt, le_antisymm_iff]
theorem toInt_ne_toInt : toInt val i ≠ toInt val j ↔ val i ≠ val j := by
simpa using (toInt_eq_toInt val i j).not
theorem toInt_nle_toInt : ¬toInt val i ≤ toInt val j ↔ ¬val i ≤ val j := by
simpa using toInt_lt_toInt val j i
theorem toInt_nlt_toInt : ¬toInt val i < toInt val j ↔ ¬val i < val j := by
simpa using toInt_le_toInt val j i
theorem toInt_sup_toInt_eq_toInt :
toInt val i ⊔ toInt val j = toInt val k ↔ val i ⊔ val j = val k := by
simp [le_antisymm_iff, sup_le_iff, le_sup_iff, toInt_le_toInt]
theorem toInt_inf_toInt_eq_toInt :
toInt val i ⊓ toInt val j = toInt val k ↔ val i ⊓ val j = val k := by
simp [le_antisymm_iff, inf_le_iff, le_inf_iff, toInt_le_toInt]
open Lean Meta Qq
/-- Given an array `atoms : Array α`, create an expression representing a function
`f : Fin atoms.size → α` such that `f n` is defeq to `atoms[n]` for `n : Fin atoms.size`. -/
def mkFinFun {u : Level} {α : Q(Type $u)} (atoms : Array Q($α)) : MetaM Expr := do
if h : atoms.isEmpty then
return q(Fin.elim0 : Fin 0 → $α)
else
let rarray := RArray.ofArray atoms (by simpa [Array.size_pos_iff] using h)
let rarrayExpr : Q(RArray $α) ← rarray.toExpr α (fun x ↦ x)
haveI m : Q(ℕ) := mkNatLit atoms.size
return q(fun (x : Fin $m) ↦ ($rarrayExpr).get x.val)
/-- Translates a set of values in a linear ordered type to `ℤ`,
preserving all the facts except for `.isTop` and `.isBot`. These facts are filtered at the
preprocessing step. -/
def translateToInt {u : Lean.Level} (type : Q(Type u)) (inst : Q(LinearOrder $type))
(idxToAtom : Std.HashMap ℕ Q($type))
(facts : Array AtomicFact) :
MetaM <| Std.HashMap ℕ Q(ℤ) × Array AtomicFact := do
haveI nE : Q(ℕ) := mkNatLitQ idxToAtom.size
haveI finFun : Q(Fin $nE → $type) :=
← mkFinFun (Array.ofFn fun (n : Fin idxToAtom.size) => idxToAtom[n]!)
let toFinUnsafe : ℕ → Q(Fin $nE) := fun k =>
haveI kE := mkNatLitQ k
haveI heq : decide ($kE < $nE) =Q true := ⟨⟩
q(⟨$kE, of_decide_eq_true $heq⟩)
return Prod.snd <| facts.foldl (fun (curr, map, facts) fact =>
match fact with
| .eq lhs rhs prf =>
(curr, map, facts.push (
haveI lhsFin := toFinUnsafe lhs
haveI rhsFin := toFinUnsafe rhs
haveI prfQ : Q($finFun $lhsFin = $finFun $rhsFin) := prf
.eq lhs rhs q((toInt_eq_toInt $finFun $lhsFin $rhsFin).mpr $prfQ)
))
| .ne lhs rhs prf =>
(curr, map, facts.push (
haveI lhsFin := toFinUnsafe lhs
haveI rhsFin := toFinUnsafe rhs
haveI prfQ : Q($finFun $lhsFin ≠ $finFun $rhsFin) := prf
.ne lhs rhs q((toInt_ne_toInt $finFun $lhsFin $rhsFin).mpr $prfQ)
))
| .le lhs rhs prf =>
(curr, map, facts.push (
haveI lhsFin := toFinUnsafe lhs
haveI rhsFin := toFinUnsafe rhs
haveI prfQ : Q($finFun $lhsFin ≤ $finFun $rhsFin) := prf
.le lhs rhs q((toInt_le_toInt $finFun $lhsFin $rhsFin).mpr $prfQ)
))
| .lt lhs rhs prf =>
(curr, map, facts.push (
haveI lhsFin := toFinUnsafe lhs
haveI rhsFin := toFinUnsafe rhs
haveI prfQ : Q($finFun $lhsFin < $finFun $rhsFin) := prf
.lt lhs rhs q((toInt_lt_toInt $finFun $lhsFin $rhsFin).mpr $prfQ)
))
| .nle lhs rhs prf =>
(curr, map, facts.push (
haveI lhsFin := toFinUnsafe lhs
haveI rhsFin := toFinUnsafe rhs
haveI prfQ : Q(¬$finFun $lhsFin ≤ $finFun $rhsFin) := prf
.nle lhs rhs q((toInt_nle_toInt $finFun $lhsFin $rhsFin).mpr $prfQ)
))
| .nlt lhs rhs prf =>
(curr, map, facts.push (
haveI lhsFin := toFinUnsafe lhs
haveI rhsFin := toFinUnsafe rhs
haveI prfQ : Q(¬$finFun $lhsFin < $finFun $rhsFin) := prf
.nlt lhs rhs q((toInt_nlt_toInt $finFun $lhsFin $rhsFin).mpr $prfQ)
))
| .isBot _
| .isTop _ => (curr, map, facts)
| .isSup lhs rhs val =>
haveI lhsFin := toFinUnsafe lhs
haveI rhsFin := toFinUnsafe rhs
haveI valFin := toFinUnsafe val
haveI heq : max («$finFun» «$lhsFin») («$finFun» «$rhsFin») =Q «$finFun» «$valFin» := ⟨⟩
(curr + 1, map.insert curr q(toInt $finFun $lhsFin ⊔ toInt $finFun $rhsFin),
(facts.push (.isSup lhs rhs curr)).push (.eq curr val
q((toInt_sup_toInt_eq_toInt $finFun $lhsFin $rhsFin $valFin).mpr $heq)
)
)
| .isInf lhs rhs val =>
haveI lhsFin := toFinUnsafe lhs
haveI rhsFin := toFinUnsafe rhs
haveI valFin := toFinUnsafe val
haveI heq : min («$finFun» «$lhsFin») («$finFun» «$rhsFin») =Q «$finFun» «$valFin» := ⟨⟩
(curr + 1, map.insert curr q(toInt $finFun $lhsFin ⊓ toInt $finFun $rhsFin),
(facts.push (.isInf lhs rhs curr)).push (.eq curr val
q((toInt_inf_toInt_eq_toInt $finFun $lhsFin $rhsFin $valFin).mpr $heq)
)
))
(idxToAtom.size, idxToAtom.map fun k _ =>
haveI kFin := toFinUnsafe k
q(toInt $finFun $kFin), Array.emptyWithCapacity idxToAtom.size)
end Mathlib.Tactic.Order.ToInt
export Mathlib.Tactic.Order.ToInt (translateToInt) |
.lake/packages/mathlib/Mathlib/Tactic/Order/Graph/Basic.lean | import Mathlib.Tactic.Order.CollectFacts
/-!
# Graphs for the `order` tactic
This module defines the `Graph` structure and basic operations on it. The `order` tactic uses
`≤`-graphs, where the vertices represent atoms, and an edge `(x, y)` exists if `x ≤ y`.
-/
namespace Mathlib.Tactic.Order
open Lean Expr Meta
/-- An edge in a graph. In the `order` tactic, the `proof` field stores the of
`atomToIdx[src] ≤ atomToIdx[dst]`. -/
structure Edge where
/-- Source of the edge. -/
src : Nat
/-- Destination of the edge. -/
dst : Nat
/-- Proof of `atomToIdx[src] ≤ atomToIdx[dst]`. -/
proof : Expr
-- For debugging purposes.
instance : ToString Edge where
toString e := s!"{e.src} ⟶ {e.dst}"
/-- If `g` is a `Graph`, then for a vertex with index `v`, `g[v]` is an array containing
the edges starting from this vertex. -/
abbrev Graph := Array (Array Edge)
namespace Graph
/-- Adds an `edge` to the graph. -/
def addEdge (g : Graph) (edge : Edge) : Graph :=
g.modify edge.src fun edges => edges.push edge
/-- Constructs a directed `Graph` using `≤` facts. It ignores all other facts. -/
def constructLeGraph (nVertexes : Nat) (facts : Array AtomicFact) : MetaM Graph := do
let mut res : Graph := Array.replicate nVertexes #[]
for fact in facts do
if let .le lhs rhs proof := fact then
res := res.addEdge ⟨lhs, rhs, proof⟩
return res
/-- State for the DFS algorithm. -/
structure DFSState where
/-- `visited[v] = true` if and only if the algorithm has already entered vertex `v`. -/
visited : Array Bool
/-- DFS algorithm for constructing a proof that `x ≤ y` by finding a path from `x` to `y` in the
`≤`-graph. -/
partial def buildTransitiveLeProofDFS (g : Graph) (v t : Nat) (tExpr : Expr) :
StateT DFSState MetaM (Option Expr) := do
modify fun s => {s with visited := s.visited.set! v true}
if v == t then
return ← mkAppM ``le_refl #[tExpr]
for edge in g[v]! do
let u := edge.dst
if !(← get).visited[u]! then
match ← buildTransitiveLeProofDFS g u t tExpr with
| some pf => return some <| ← mkAppM ``le_trans #[edge.proof, pf]
| none => continue
return none
/-- Given a `≤`-graph `g`, finds a proof of `s ≤ t` using transitivity. -/
def buildTransitiveLeProof (g : Graph) (idxToAtom : Std.HashMap Nat Expr) (s t : Nat) :
MetaM (Option Expr) := do
let state : DFSState := ⟨.replicate g.size false⟩
(buildTransitiveLeProofDFS g s t (idxToAtom.get! t)).run' state
end Graph
end Mathlib.Tactic.Order |
.lake/packages/mathlib/Mathlib/Tactic/Order/Graph/Tarjan.lean | import Mathlib.Tactic.Order.Graph.Basic
/-!
# Tarjan's Algorithm
This file implements Tarjan's algorithm for finding the strongly connected components (SCCs) of
a graph.
-/
namespace Mathlib.Tactic.Order.Graph
/-- State for Tarjan's algorithm. -/
structure TarjanState extends DFSState where
/-- `id[v]` is the index of the vertex `v` in the DFS traversal. -/
id : Array Nat
/-- `lowlink[v]` is the smallest index of any node on the stack that is reachable from `v`
through `v`'s DFS subtree. -/
lowlink : Array Nat
/-- The stack of visited vertices used in Tarjan's algorithm. -/
stack : Array Nat
/-- `onStack[v] = true` iff `v` is in `stack`. The structure is used to check it efficiently. -/
onStack : Array Bool
/-- A time counter that increments each time the algorithm visits an unvisited vertex. -/
time : Nat
/-- The Tarjan's algorithm.
See [Wikipedia](https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm). -/
partial def tarjanDFS (g : Graph) (v : Nat) : StateM TarjanState Unit := do
modify fun s => {
visited := s.visited.set! v true,
id := s.id.set! v s.time,
lowlink := s.lowlink.set! v s.time,
stack := s.stack.push v,
onStack := s.onStack.set! v true,
time := s.time + 1
}
for edge in g[v]! do
let u := edge.dst
if !(← get).visited[u]! then
tarjanDFS g u
modify fun s => {s with
lowlink := s.lowlink.set! v (min s.lowlink[v]! s.lowlink[u]!),
}
else if (← get).onStack[u]! then
modify fun s => {s with
lowlink := s.lowlink.set! v (min s.lowlink[v]! s.id[u]!),
}
if (← get).id[v]! = (← get).lowlink[v]! then
let mut w := 0
while true do
w := (← get).stack.back!
modify fun s => {s with
stack := s.stack.pop
onStack := s.onStack.set! w false
lowlink := s.lowlink.set! w s.lowlink[v]!
}
if w = v then
break
/-- Implementation of `findSCCs` in the `StateM TarjanState` monad. -/
def findSCCsImp (g : Graph) : StateM TarjanState Unit := do
for v in [:g.size] do
if !(← get).visited[v]! then
tarjanDFS g v
/-- Finds the strongly connected components of the graph `g`. Returns an array where the value at
index `v` represents the SCC number containing vertex `v`. The numbering of SCCs is arbitrary. -/
def findSCCs (g : Graph) : Array Nat :=
let s : TarjanState := {
visited := .replicate g.size false
id := .replicate g.size 0
lowlink := .replicate g.size 0
stack := #[]
onStack := .replicate g.size false
time := 0
}
(findSCCsImp g).run s |>.snd.lowlink
end Mathlib.Tactic.Order.Graph |
.lake/packages/mathlib/Mathlib/Tactic/Continuity/Init.lean | import Mathlib.Init
import Aesop
/-!
# Continuity Rule Set
This module defines the `Continuous` Aesop rule set which is used by the
`continuity` tactic. Aesop rule sets only become visible once the file in which
they're declared is imported, so we must put this declaration into its own file.
-/
declare_aesop_rule_sets [Continuous] |
.lake/packages/mathlib/Mathlib/Tactic/Relation/Rfl.lean | import Mathlib.Init
import Lean.Meta.Tactic.Rfl
/-!
# `Lean.MVarId.liftReflToEq`
Convert a goal of the form `x ~ y` into the form `x = y`, where `~` is a reflexive
relation, that is, a relation which has a reflexive lemma tagged with the attribute `[refl]`.
If this can't be done, returns the original `MVarId`.
-/
namespace Mathlib.Tactic
open Lean Meta Elab Tactic Rfl
/--
This tactic applies to a goal whose target has the form `x ~ x`, where `~` is a reflexive
relation, that is, a relation which has a reflexive lemma tagged with the attribute [refl].
-/
def rflTac : TacticM Unit :=
withMainContext do liftMetaFinishingTactic (·.applyRfl)
/-- If `e` is the form `@R .. x y`, where `R` is a reflexive
relation, return `some (R, x, y)`.
As a special case, if `e` is `@HEq α a β b`, return ``some (`HEq, a, b)``. -/
def _root_.Lean.Expr.relSidesIfRefl? (e : Expr) : MetaM (Option (Name × Expr × Expr)) := do
if let some (_, lhs, rhs) := e.eq? then
return (``Eq, lhs, rhs)
if let some (lhs, rhs) := e.iff? then
return (``Iff, lhs, rhs)
if let some (_, lhs, _, rhs) := e.heq? then
return (``HEq, lhs, rhs)
if let .app (.app rel lhs) rhs := e then
unless (← (reflExt.getState (← getEnv)).getMatch rel).isEmpty do
match rel.getAppFn.constName? with
| some n => return some (n, lhs, rhs)
| none => return none
return none
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Relation/Symm.lean | import Mathlib.Init
import Lean.Meta.Tactic.Symm
/-!
# `relSidesIfSymm?`
-/
open Lean Meta Symm
namespace Mathlib.Tactic
open Lean.Elab.Tactic
/-- If `e` is the form `@R .. x y`, where `R` is a symmetric
relation, return `some (R, x, y)`.
As a special case, if `e` is `@HEq α a β b`, return ``some (`HEq, a, b)``. -/
def _root_.Lean.Expr.relSidesIfSymm? (e : Expr) : MetaM (Option (Name × Expr × Expr)) := do
if let some (_, lhs, rhs) := e.eq? then
return (``Eq, lhs, rhs)
if let some (lhs, rhs) := e.iff? then
return (``Iff, lhs, rhs)
if let some (_, lhs, _, rhs) := e.heq? then
return (``HEq, lhs, rhs)
if let .app (.app rel lhs) rhs := e then
unless (← (symmExt.getState (← getEnv)).getMatch rel).isEmpty do
match rel.getAppFn.constName? with
| some n => return some (n, lhs, rhs)
| none => return none
return none
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Simproc/ExistsAndEq.lean | import Mathlib.Init
import Qq
/-!
# Simproc for `∃ a', ... ∧ a' = a ∧ ...`
This module implements the `existsAndEq` simproc, which triggers on goals of the form `∃ a, P`.
It checks whether `P` allows only one possible value for `a`, and if so, substitutes it, eliminating
the leading quantifier.
The procedure traverses the body, branching at each `∧` and entering existential quantifiers,
searching for a subexpression of the form `a = a'` or `a' = a` for `a'` that is independent of `a`.
If such an expression is found, all occurrences of `a` are replaced with `a'`. If `a'` depends on
variables bound by existential quantifiers, those quantifiers are moved outside.
For example, `∃ a, p a ∧ ∃ b, a = f b ∧ q b` will be rewritten as `∃ b, p (f b) ∧ q b`.
-/
open Lean Meta Qq
namespace ExistsAndEq
/-- Type for storing the chosen branch at `And` nodes. -/
inductive GoTo
| left | right
deriving BEq, Inhabited
/-- Type for storing the path in the body expression leading to `a = a'`. We store only the chosen
directions at each `And` node because there is no branching at `Exists` nodes, and `Exists` nodes
will be removed from the body. -/
abbrev Path := List GoTo
/-- Qq-fied version of `Expr`. Here, we use it to store free variables introduced when unpacking
existential quantifiers. -/
abbrev VarQ := (u : Level) × (α : Q(Sort u)) × Q($α)
instance : Inhabited VarQ where
default := ⟨default, default, default⟩
/-- Qq-fied version of `Expr` proving some `P : Prop`. -/
abbrev HypQ := (P : Q(Prop)) × Q($P)
instance : Inhabited HypQ where
default := ⟨default, default⟩
/-- Used to indicate the current case should be unreachable, unless an invariant is violated.
`context` should be used to indicate which case is asserted to be unreachable.
For example, `"findEq: path for a conjunction should be nonempty"`. -/
private def assertUnreachable {α : Type} (context : String) : MetaM α := do
let e := s!"existsAndEq: internal error, unreachable case has occurred:\n{context}."
logError e
-- the following error will be caught by `simp` so we additionaly log it above
throwError e
/-- Constructs `∃ f₁ f₂ ... fₙ, body`, where `[f₁, ..., fₙ] = fvars`. -/
def mkNestedExists (fvars : List VarQ) (body : Q(Prop)) : MetaM Q(Prop) := do
match fvars with
| [] => pure body
| ⟨_, β, b⟩ :: tl =>
let res ← mkNestedExists tl body
let name := (← getLCtx).findFVar? b |>.get!.userName
let p : Q($β → Prop) ← Impl.mkLambdaQ name b res
pure q(Exists $p)
/-- Finds a `Path` for `findEq`. It leads to a subexpression `a = a'` or `a' = a`, where
`a'` doesn't contain the free variable `a`.
This is a fast version that quickly returns `none` when the simproc
is not applicable. -/
partial def findEqPath {u : Level} {α : Q(Sort u)} (a : Q($α)) (P : Q(Prop)) :
MetaM <| Option Path := do
match_expr P with
| Eq _ x y =>
if a == x && !(y.containsFVar a.fvarId!) then
return some []
if a == y && !(x.containsFVar a.fvarId!) then
return some []
return none
| And L R =>
if let some path ← findEqPath a L then
return some (.left :: path)
if let some path ← findEqPath a R then
return some (.right :: path)
return none
| Exists tb pb =>
if (tb.containsFVar a.fvarId!) then
return none
let .lam _ _ body _ := pb | return none
findEqPath a body
| _ => return none
/-- Given `P : Prop` and `a : α`, traverses the expression `P` to find a subexpression of
the form `a = a'` or `a' = a` for some `a'`. It branches at each `And` and walks into
existential quantifiers.
Returns a tuple `(fvars, lctx, P', a')`, where:
* `fvars` is a list of all variables bound by existential quantifiers along the path.
* `lctx` is the local context containing all these free variables.
* `P'` is `P` with all existential quantifiers along the path removed, and corresponding bound
variables replaced with `fvars`.
* `a'` is the expression found that must be equal to `a`.
It may contain free variables from `fvars`. -/
partial def findEq {u : Level} {α : Q(Sort u)} (a : Q($α)) (P : Q(Prop)) (path : Path) :
MetaM (List VarQ × LocalContext × Q(Prop) × Q($α)) := do
go a P path
where
/-- Recursive part of `findEq`. -/
go {u : Level} {α : Q(Sort u)} (a : Q($α)) (P : Q(Prop)) (path : Path) :
MetaM (List VarQ × LocalContext × Q(Prop) × Q($α)) := do
match P with
| ~q(@Eq.{u} $γ $x $y) =>
if a == x && !(y.containsFVar a.fvarId!) then
return ([], ← getLCtx, P, y)
if a == y && !(x.containsFVar a.fvarId!) then
return ([], ← getLCtx, P, x)
assertUnreachable
"findEq: some side of equality must be `a`, and the other must not depend on `a`"
| ~q($L ∧ $R) =>
match (generalizing := false) path with
| [] => assertUnreachable "findEq: P is conjuction but path is empty"
| .left :: tl =>
let (fvars, lctx, P', a') ← go a q($L) tl
return (fvars, lctx, q($P' ∧ $R), a')
| .right :: tl =>
let (fvars, lctx, P', a') ← go a q($R) tl
return (fvars, lctx, q($L ∧ $P'), a')
| ~q(@Exists $β $pb) =>
lambdaBoundedTelescope pb 1 fun bs (body : Q(Prop)) => do
let #[(b : Q($β))] := bs | unreachable!
let (fvars, lctx, P', a') ← go a q($body) path
return (⟨_, _, b⟩ :: fvars, lctx, P', a')
| _ => assertUnreachable s!"findEq: unexpected P = {← ppExpr P}"
/-- When `P = ∃ f₁ ... fₙ, body`, where `exs = [f₁, ..., fₙ]`, this function takes
`act : body → goal` and proves `P → goal` using `Exists.elim`.
Example:
```
exs = []: act h
exs = [b]:
P := ∃ b, body
Exists.elim h (fun b hb ↦ act hb)
exs = [b, c]:
P := ∃ b c, body
Exists.elim h (fun b hb ↦
Exists.elim hb (fun c hc ↦ act hc)
)
...
``` -/
def withNestedExistsElim {P body goal : Q(Prop)} (exs : List VarQ) (h : Q($P))
(act : Q($body) → MetaM Q($goal)) : MetaM Q($goal) := do
match exs with
| [] =>
let _ : $P =Q $body := ⟨⟩
act q($h)
| ⟨u, β, b⟩ :: tl =>
let ~q(@Exists.{u} $γ $p) := P
| assertUnreachable <| "withNestedExistsElim: exs is not empty but P is not `Exists`.\n" ++
s!"P = {← ppExpr P}"
let _ : $β =Q $γ := ⟨⟩
withLocalDeclQ .anonymous .default q($p $b) fun hb => do
let pf1 ← withNestedExistsElim tl hb act
let pf2 : Q(∀ b, $p b → $goal) ← mkLambdaFVars #[b, hb] pf1
return q(Exists.elim $h $pf2)
/-- Generates a proof of `P' → ∃ a, p a`. We assume that `fvars = [f₁, ..., fₙ]` are free variables
and `P' = ∃ f₁ ... fₙ, newBody`, and `path` leads to `a = a'` in `∃ a, p a`.
The proof follows the following structure:
```
example {α β : Type} (f : β → α) {p : α → Prop} :
(∃ b, p (f b) ∧ f b = f b) → (∃ a, p a ∧ ∃ b, a = f b) := by
-- withLocalDeclQ
intro h
-- withNestedExistsElim : we unpack all quantifiers in `P` to get `h : newBody`.
refine h.elim (fun b h ↦ ?_)
-- use `a'` in the leading existential quantifier
refine Exists.intro (f b) ?_
-- then we traverse `newBody` and goal simultaneously
refine And.intro ?_ ?_
-- at branches outside the path `h` must concide with goal
· replace h := h.left
exact h
-- inside path we substitute variables from `fvars` into existential quantifiers.
· replace h := h.right
refine Exists.intro b ?_
-- at the end the goal must be `x' = x'`.
rfl
``` -/
partial def mkAfterToBefore {u : Level} {α : Q(Sort u)} {p : Q($α → Prop)}
{P' : Q(Prop)} (a' : Q($α)) (newBody : Q(Prop)) (fvars : List VarQ) (path : Path) :
MetaM <| Q($P' → (∃ a, $p a)) := do
withLocalDeclQ .anonymous .default P' fun (h : Q($P')) => do
let pf : Q(∃ a, $p a) ← withNestedExistsElim fvars h fun (h : Q($newBody)) => do
let pf1 : Q($p $a') ← go h fvars path
return q(Exists.intro $a' $pf1)
mkLambdaFVars #[h] pf
where
/-- Traverses `P` and `goal` simultaneously, proving `goal`. -/
go {goal P : Q(Prop)} (h : Q($P)) (exs : List VarQ) (path : Path) :
MetaM Q($goal) := do
match goal with
| ~q(@Exists $β $pb) =>
match (generalizing := false) exs with
| [] => assertUnreachable "mkAfterToBefore: goal is `Exists` but `exs` is empty"
| ⟨v, γ, c⟩ :: exsTail =>
let _ : u_1 =QL v := ⟨⟩
let _ : $γ =Q $β := ⟨⟩
let pf1 : Q($pb $c) := ← go h exsTail path
return q(Exists.intro $c $pf1)
| ~q(And $L $R) =>
let ~q($L' ∧ $R') := P
| assertUnreachable "mkAfterToBefore: goal is `And` but `P` is not `And`"
match (generalizing := false) path with
| [] => assertUnreachable "mkAfterToBefore: goal is `And` but `exs` is empty"
| .left :: tl =>
let _ : $R =Q $R' := ⟨⟩
let pfRight : Q($R) := q(And.right $h)
let pfLeft : Q($L) ← go q(And.left $h) exs tl
return q(And.intro $pfLeft $pfRight)
| .right :: tl =>
let _ : $L =Q $L' := ⟨⟩
let pfLeft : Q($L) := q(And.left $h)
let pfRight : Q($R) ← go q(And.right $h) exs tl
return q(And.intro $pfLeft $pfRight)
| _ =>
let ~q($x = $y) := goal
| assertUnreachable "mkAfterToBefore: unexpected goal: {← ppExpr goal}"
if !path.isEmpty then
assertUnreachable "mkAfterToBefore: `goal` is equality but `path` is not empty"
let _ : $x =Q $y := ⟨⟩
return q(rfl)
/-- Recursive implementation for `withExistsElimAlongPath`. -/
partial def withExistsElimAlongPathImp {u : Level} {α : Q(Sort u)}
{P goal : Q(Prop)} (h : Q($P)) {a a' : Q($α)} (exs : List VarQ) (path : Path)
(hs : List HypQ)
(act : Q($a = $a') → List HypQ → MetaM Q($goal)) :
MetaM Q($goal) := do
match P with
| ~q(@Exists $β $pb) =>
match (generalizing := false) exs with
| [] => assertUnreachable "withExistsElimAlongPathImp: `P` is `Exists` but `exs` is empty"
| ⟨v, γ, b⟩ :: exsTail =>
let _ : u_1 =QL v := ⟨⟩
let _ : $γ =Q $β := ⟨⟩
withLocalDeclQ .anonymous .default q($pb $b) fun hb => do
let newHs := hs ++ [⟨_, hb⟩]
let pf1 ← withExistsElimAlongPathImp (P := q($pb $b)) hb exsTail path newHs act
let pf2 : Q(∀ b, $pb b → $goal) ← mkLambdaFVars #[b, hb] pf1
return q(Exists.elim $h $pf2)
| ~q(And $L' $R') =>
match (generalizing := false) path with
| [] => assertUnreachable "withExistsElimAlongPathImp: `P` is `And` but `path` is empty"
| .left :: tl =>
withExistsElimAlongPathImp q(And.left $h) exs tl hs act
| .right :: tl =>
withExistsElimAlongPathImp q(And.right $h) exs tl hs act
| ~q(@Eq.{u} $γ $x $y) =>
let _ : $γ =Q $α := ⟨⟩
if !path.isEmpty then
assertUnreachable "withExistsElimAlongPathImp: `P` is equality but `path` is not empty"
if a == x then
let _ : $a =Q $x := ⟨⟩
let _ : $a' =Q $y := ⟨⟩
act q($h) hs
else if a == y then
let _ : $a =Q $y := ⟨⟩
let _ : $a' =Q $x := ⟨⟩
act q(Eq.symm $h) hs
else
assertUnreachable "withExistsElimAlongPathImp: `P` is equality but neither of sides is `a`"
| _ => assertUnreachable s!"withExistsElimAlongPathImp: unexpected P = {← ppExpr P}"
/-- Given `act : (a = a') → hb₁ → hb₂ → ... → hbₙ → goal` where `hb₁, ..., hbₙ` are hypotheses
obtained when unpacking existential quantifiers with variables from `exs`, it proves `goal` using
`Exists.elim`. We use this to prove implication in the forward direction. -/
def withExistsElimAlongPath {u : Level} {α : Q(Sort u)}
{P goal : Q(Prop)} (h : Q($P)) {a a' : Q($α)} (exs : List VarQ) (path : Path)
(act : Q($a = $a') → List HypQ → MetaM Q($goal)) :
MetaM Q($goal) :=
withExistsElimAlongPathImp h exs path [] act
/-- When `P = ∃ f₁ ... fₙ, body`, where `exs = [f₁, ..., fₙ]`, this function takes
`act : body` and proves `P` using `Exists.intro`.
Example:
```
exs = []: act
exs = [b]:
P := ∃ b, body
Exists.intro b act
exs = [b, c]:
P := ∃ b c, body
Exists.intro b (Exists.intro c act)
...
``` -/
def withNestedExistsIntro {P body : Q(Prop)} (exs : List VarQ)
(act : MetaM Q($body)) : MetaM Q($P) := do
match exs with
| [] =>
let _ : $P =Q $body := ⟨⟩
act
| ⟨u, β, b⟩ :: tl =>
let ~q(@Exists.{u} $γ $p) := P
| assertUnreachable "withNestedExistsIntro: `exs` is not empty but `P` is not `Exists`"
let _ : $β =Q $γ := ⟨⟩
let pf ← withNestedExistsIntro tl act
return q(Exists.intro $b $pf)
/-- Generates a proof of `∃ a, p a → P'`. We assume that `fvars = [f₁, ..., fₙ]` are free variables
and `P' = ∃ f₁ ... fₙ, newBody`, and `path` leads to `a = a'` in `∃ a, p a`.
The proof follows the following structure:
```
example {α β : Type} (f : β → α) {p : α → Prop} :
(∃ a, p a ∧ ∃ b, a = f b) → (∃ b, p (f b) ∧ f b = f b) := by
-- withLocalDeclQ
intro h
refine h.elim (fun a ha ↦ ?_)
-- withExistsElimAlongPath: following the path we unpack all existential quantifiers.
-- at the end `hs = [hb]`.
have h' := ha
replace h' := h'.right
refine Exists.elim h' (fun b hb ↦ ?_)
replace h' := hb
have h_eq := h'
clear h'
-- go: we traverse `P` and `goal` simultaneously
have h' := ha
refine Exists.intro b ?_
refine And.intro ?_ ?_
-- outside the path goal must concide with `h_eq ▸ h'`
· replace h' := h'.left
exact Eq.mp (congrArg (fun t ↦ p t) h_eq) h'
-- inside the path:
· replace h' := h'.right
-- when `h'` starts with existential quantifier we replace it with next hypothesis from `hs`.
replace h' := hb
-- at the end the goal must be `x' = x'`.
rfl
``` -/
partial def mkBeforeToAfter {u : Level} {α : Q(Sort u)} {p : Q($α → Prop)}
{P' : Q(Prop)} (a' : Q($α)) (newBody : Q(Prop)) (fvars : List VarQ) (path : Path) :
MetaM <| Q((∃ a, $p a) → $P') := do
withLocalDeclQ .anonymous .default q(∃ a, $p a) fun h => do
withLocalDeclQ .anonymous .default q($α) fun a => do
withLocalDeclQ .anonymous .default q($p $a) fun ha => do
let pf1 ← withExistsElimAlongPath ha fvars path fun (h_eq : Q($a = $a')) hs => do
let pf1 : Q($P') ← withNestedExistsIntro fvars (body := newBody) do
let pf ← go ha fvars hs path h_eq
pure pf
pure pf1
let pf2 : Q(∀ a : $α, $p a → $P') ← mkLambdaFVars #[a, ha] pf1
let pf3 : Q($P') := q(Exists.elim $h $pf2)
mkLambdaFVars #[h] pf3
where
/-- Traverses `P` and `goal` simultaneously, proving `goal`. -/
go {goal P : Q(Prop)} (h : Q($P)) (exs : List VarQ) (hs : List HypQ) (path : Path)
{u : Level} {α : Q(Sort u)} {a a' : Q($α)} (h_eq : Q($a = $a')) :
MetaM Q($goal) := do
match P with
| ~q(@Exists $β $pb) =>
match (generalizing := false) exs with
| [] => assertUnreachable "mkBeforeToAfter: `P` is `Exists` but `exs` is empty"
| ⟨v, γ, b⟩ :: exsTail =>
let _ : u_1 =QL v := ⟨⟩
let _ : $γ =Q $β := ⟨⟩
match (generalizing := false) hs with
| [] => assertUnreachable "mkBeforeToAfter: `P` is `Exists` but `hs` is empty"
| ⟨H, hb⟩ :: hsTail =>
let _ : $H =Q $pb $b := ⟨⟩
let pf : Q($goal) := ← go hb exsTail hsTail path h_eq
return pf
| ~q(And $L $R) =>
let ~q($L' ∧ $R') := goal
| assertUnreachable "mkBeforeToAfter: `P` is `And` but `goal` is not `And`"
match (generalizing := false) path with
| [] => assertUnreachable "mkBeforeToAfter: `P` is `And` but `path` is empty"
| .left :: tl =>
let pa : Q($α → Prop) ← mkLambdaFVars #[a] R
let _ : $R =Q $pa $a := ⟨⟩
let _ : $R' =Q $pa $a' := ⟨⟩
let pfRight : Q($R) := q(And.right $h)
let pfRight' : Q($R') := q(Eq.mp (congrArg $pa $h_eq) $pfRight)
let pfLeft' : Q($L') ← go q(And.left $h) exs hs tl h_eq
return q(And.intro $pfLeft' $pfRight')
| .right :: tl =>
let pa : Q($α → Prop) ← mkLambdaFVars #[a] L
let _ : $L =Q $pa $a := ⟨⟩
let _ : $L' =Q $pa $a' := ⟨⟩
let pfLeft : Q($L) := q(And.left $h)
let pfLeft' : Q($L') := q(Eq.mp (congrArg $pa $h_eq) $pfLeft)
let pfRight' : Q($R') ← go q(And.right $h) exs hs tl h_eq
return q(And.intro $pfLeft' $pfRight')
| _ =>
let ~q($x = $y) := goal
| assertUnreachable s!"mkBeforeToAfter: unexpected goal = {← ppExpr goal}"
if !path.isEmpty then
assertUnreachable "mkBeforeToAfter: goal is equality but path is not empty"
let _ : $x =Q $y := ⟨⟩
return q(rfl)
/-- Triggers at goals of the form `∃ a, body` and checks if `body` allows a single value `a'`
for `a`. If so, replaces `a` with `a'` and removes quantifier.
It looks through nested quantifiers and conjuctions searching for a `a = a'`
or `a' = a` subexpression. -/
simproc ↓ existsAndEq (Exists _) := fun e => do
let_expr f@Exists α p := e | return .continue
lambdaBoundedTelescope p 1 fun xs (body : Q(Prop)) => withNewMCtxDepth do
let some u := f.constLevels![0]? | unreachable!
have α : Q(Sort $u) := α; have p : Q($α → Prop) := p
let some (a : Q($α)) := xs[0]? | return .continue
let some path ← findEqPath a body | return .continue
let (fvars, lctx, newBody, a') ← findEq a body path
withLCtx' lctx do
let newBody := newBody.replaceFVar a a'
let P' : Q(Prop) ← mkNestedExists fvars newBody
let pfBeforeAfter : Q((∃ a, $p a) → $P') ← mkBeforeToAfter a' newBody fvars path
let pfAfterBefore : Q($P' → (∃ a, $p a)) ← mkAfterToBefore a' newBody fvars path
let pf := q(propext (Iff.intro $pfBeforeAfter $pfAfterBefore))
return .visit <| Simp.ResultQ.mk _ <| some q($pf)
end ExistsAndEq
export ExistsAndEq (existsAndEq) |
.lake/packages/mathlib/Mathlib/Tactic/Simproc/Factors.lean | import Mathlib.Data.Nat.Factors
import Mathlib.Tactic.NormNum.Prime
/-!
# `simproc` for `Nat.primeFactorsList`
Note that since `norm_num` can only produce numerals,
we can't register this as a `norm_num` extension.
-/
open Nat
namespace Mathlib.Meta.Simproc
open Mathlib.Meta.NormNum
/-- A proof of the partial computation of `primeFactorsList`.
Asserts that `l` is a sorted list of primes multiplying to `n` and lower bounded by a prime `p`. -/
def FactorsHelper (n p : ℕ) (l : List ℕ) : Prop :=
p.Prime → (p :: l).IsChain (· ≤ ·) ∧ (∀ a ∈ l, Nat.Prime a) ∧ l.prod = n
/-! The argument explicitness in this section is chosen to make only the numerals in the factors
list appear in the proof term. -/
theorem FactorsHelper.nil {a : ℕ} : FactorsHelper 1 a [] := fun _ =>
⟨.singleton _, List.forall_mem_nil _, List.prod_nil⟩
theorem FactorsHelper.cons_of_le
{n m : ℕ} (a : ℕ) {b : ℕ} {l : List ℕ} (h₁ : IsNat (b * m) n) (h₂ : a ≤ b)
(h₃ : minFac b = b) (H : FactorsHelper m b l) : FactorsHelper n a (b :: l) := fun pa =>
have pb : b.Prime := Nat.prime_def_minFac.2 ⟨le_trans pa.two_le h₂, h₃⟩
let ⟨f₁, f₂, f₃⟩ := H pb
⟨List.IsChain.cons_cons h₂ f₁,
fun _ h => (List.eq_or_mem_of_mem_cons h).elim (fun e => e.symm ▸ pb) (f₂ _),
by rw [List.prod_cons, f₃, h₁.out, cast_id]⟩
theorem FactorsHelper.cons
{n m : ℕ} {a : ℕ} (b : ℕ) {l : List ℕ} (h₁ : IsNat (b * m) n) (h₂ : Nat.blt a b)
(h₃ : IsNat (minFac b) b) (H : FactorsHelper m b l) : FactorsHelper n a (b :: l) :=
H.cons_of_le _ h₁ (Nat.blt_eq.mp h₂).le h₃.out
theorem FactorsHelper.singleton (n : ℕ) {a : ℕ} (h₁ : Nat.blt a n) (h₂ : IsNat (minFac n) n) :
FactorsHelper n a [n] :=
FactorsHelper.nil.cons _ ⟨mul_one _⟩ h₁ h₂
theorem FactorsHelper.cons_self {n m : ℕ} (a : ℕ) {l : List ℕ}
(h : IsNat (a * m) n) (H : FactorsHelper m a l) :
FactorsHelper n a (a :: l) := fun pa =>
H.cons_of_le _ h le_rfl (Nat.prime_def_minFac.1 pa).2 pa
theorem FactorsHelper.singleton_self (a : ℕ) : FactorsHelper a a [a] :=
FactorsHelper.nil.cons_self _ ⟨mul_one _⟩
theorem FactorsHelper.primeFactorsList_eq {n : ℕ} {l : List ℕ} (H : FactorsHelper n 2 l) :
Nat.primeFactorsList n = l :=
let ⟨h₁, h₂, h₃⟩ := H Nat.prime_two
have := List.isChain_iff_pairwise.1 (@List.IsChain.tail _ _ (_ :: _) h₁)
(List.eq_of_perm_of_sorted
(Nat.primeFactorsList_unique h₃ h₂) this (Nat.primeFactorsList_sorted _)).symm
open Lean Elab Tactic Qq
/-- Given `n` and `a` (in expressions `en` and `ea`) corresponding to literal numerals
(in `enl` and `eal`), returns `(l, ⊢ factorsHelper n a l)`. -/
private partial def evalPrimeFactorsListAux
{en enl : Q(ℕ)} {ea eal : Q(ℕ)} (ehn : Q(IsNat $en $enl)) (eha : Q(IsNat $ea $eal)) :
MetaM ((l : Q(List ℕ)) × Q(FactorsHelper $en $ea $l)) := do
/-
In this function we will use the convention that all `e` prefixed variables (proofs or otherwise)
contain `Expr`s. The variables starting with `h` are proofs about the _meta_ code;
these will not actually be used in the construction of the proof, and are simply used to help the
reader reason about why the proof construction is correct.
-/
let n := enl.natLit!
let ⟨hn0⟩ ← if h : 0 < n then pure <| PLift.up h else
throwError m!"{enl} must be positive"
let a := eal.natLit!
let b := n.minFac
let ⟨hab⟩ ← if h : a ≤ b then pure <| PLift.up h else
throwError m!"{q($eal < $(enl).minFac)} does not hold"
if h_bn : b < n then
-- the factor is less than `n`, so we are not done; remove it to get `m`
let m := n / b
have em : Q(ℕ) := mkRawNatLit m
have ehm : Q(IsNat (OfNat.ofNat $em) $em) := q(⟨rfl⟩)
if h_ba_eq : b = a then
-- if the factor is our minimum `a`, then recurse without changing the minimum
have eh : Q($eal * $em = $en) :=
have : a * m = n := by simp [m, b, ← h_ba_eq, Nat.mul_div_cancel' (minFac_dvd _)]
(q(Eq.refl $en) : Expr)
let ehp₁ := q(isNat_mul rfl $eha $ehm $eh)
let ⟨el, ehp₂⟩ ← evalPrimeFactorsListAux ehm eha
pure ⟨q($ea :: $el), q(($ehp₂).cons_self _ $ehp₁)⟩
else
-- Otherwise when we recurse, we should use `b` as the new minimum factor. Note that
-- we must use `evalMinFac.core` to get a proof that `b` is what we computed it as.
have eb : Q(ℕ) := mkRawNatLit b
have ehb : Q(IsNat (OfNat.ofNat $eb) $eb) := q(⟨rfl⟩)
have ehbm : Q($eb * $em = $en) :=
have : b * m = n := Nat.mul_div_cancel' (minFac_dvd _)
(q(Eq.refl $en) : Expr)
have ehp₁ := q(isNat_mul rfl $ehb $ehm $ehbm)
have ehp₂ : Q(Nat.blt $ea $eb = true) :=
have : a < b := lt_of_le_of_ne' hab h_ba_eq
(q(Eq.refl (true)) : Expr)
let .isNat _ lit ehp₃ ← evalMinFac.core q($eb) q(inferInstance) q($eb) ehb b | failure
assertInstancesCommute
have : $lit =Q $eb := ⟨⟩
let ⟨l, p₄⟩ ← evalPrimeFactorsListAux ehm ehb
pure ⟨q($eb :: $l), q(($p₄).cons _ $ehp₁ $ehp₂ $ehp₃ )⟩
else
-- the factor is our number itself, so we are done
have hbn_eq : b = n := (minFac_le hn0).eq_or_lt.resolve_right h_bn
if hba : b = a then
have eh : Q($en = $ea) :=
have : n = a := hbn_eq.symm.trans hba
(q(Eq.refl $en) : Expr)
pure ⟨q([$ea]), q($eh ▸ FactorsHelper.singleton_self $ea)⟩
else do
let eh_a_lt_n : Q(Nat.blt $ea $en = true) :=
have : a < n := by cutsat
(q(Eq.refl true) : Expr)
let .isNat _ lit ehn_minFac ← evalMinFac.core q($en) q(inferInstance) q($enl) ehn n | failure
have : $lit =Q $en := ⟨⟩
assertInstancesCommute
pure ⟨q([$en]), q(FactorsHelper.singleton $en $eh_a_lt_n $ehn_minFac)⟩
/-- Given a natural number `n`, returns `(l, ⊢ Nat.primeFactorsList n = l)`. -/
def evalPrimeFactorsList
{en enl : Q(ℕ)} (hn : Q(IsNat $en $enl)) :
MetaM ((l : Q(List ℕ)) × Q(Nat.primeFactorsList $en = $l)) := do
match enl.natLit! with
| 0 =>
have _ : $enl =Q nat_lit 0 := ⟨⟩
have hen : Q($en = 0) := q($(hn).out)
return ⟨_, q($hen ▸ Nat.primeFactorsList_zero)⟩
| 1 =>
let _ : $enl =Q nat_lit 1 := ⟨⟩
have hen : Q($en = 1) := q($(hn).out)
return ⟨_, q($hen ▸ Nat.primeFactorsList_one)⟩
| _ => do
have h2 : Q(IsNat 2 (nat_lit 2)) := q(⟨Eq.refl (nat_lit 2)⟩)
let ⟨l, p⟩ ← evalPrimeFactorsListAux hn h2
return ⟨l, q(($p).primeFactorsList_eq)⟩
end Mathlib.Meta.Simproc
open Qq Mathlib.Meta.Simproc Mathlib.Meta.NormNum
/-- A simproc for terms of the form `Nat.primeFactorsList (OfNat.ofNat n)`. -/
simproc Nat.primeFactorsList_ofNat (Nat.primeFactorsList _) := .ofQ fun u α e => do
match u, α, e with
| 1, ~q(List ℕ), ~q(Nat.primeFactorsList (OfNat.ofNat $n)) =>
let hn : Q(IsNat (OfNat.ofNat $n) $n) := q(⟨rfl⟩)
let ⟨l, p⟩ ← evalPrimeFactorsList hn
return .done <| .mk q($l) <| some q($p)
| _ =>
return .continue |
.lake/packages/mathlib/Mathlib/Tactic/Simproc/Divisors.lean | import Mathlib.NumberTheory.Divisors
import Mathlib.Util.Qq
/-! # Divisor Simprocs
This file implements (d)simprocs to compute various objects related to divisors:
- `Nat.divisors_ofNat`: computes `Nat.divisors n` for explicit values of `n`
- `Nat.properDivisors_ofNat`: computes `Nat.properDivisors n` for explicit values of `n`
-/
open Lean Meta Simp Qq
/-- The dsimproc `Nat.divisors_ofNat` computes the finset `Nat.divisors n` when `n` is a
numeral. For instance, this simplifies `Nat.divisors 6` to `{1, 2, 3, 6}`. -/
dsimproc_decl Nat.divisors_ofNat (Nat.divisors _) := fun e => do
unless e.isAppOfArity `Nat.divisors 1 do return .continue
let some n ← fromExpr? e.appArg! | return .continue
return .done <| mkSetLiteralQ q(Finset ℕ) <| ((unsafe n.divisors.val.unquot).map mkNatLit)
/-- The dsimproc `Nat.properDivisors_ofNat` computes the finset `Nat.properDivisors n` when
`n` is a numeral. For instance, this simplifies `Nat.properDivisors 12` to `{1, 2, 3, 4, 6}`. -/
dsimproc_decl Nat.properDivisors_ofNat (Nat.properDivisors _) := fun e => do
unless e.isAppOfArity `Nat.properDivisors 1 do return .continue
let some n ← fromExpr? e.appArg! | return .continue
return unsafe .done <| mkSetLiteralQ q(Finset ℕ) <|
((unsafe n.properDivisors.val.unquot).map mkNatLit) |
.lake/packages/mathlib/Mathlib/Tactic/Simproc/FinsetInterval.lean | import Mathlib.Algebra.Order.Interval.Finset.SuccPred
import Mathlib.Data.Nat.SuccPred
import Mathlib.Data.Int.Interval
import Mathlib.Data.Int.SuccPred
import Mathlib.Order.Interval.Finset.Nat
import Mathlib.Util.Qq
/-!
# Simproc for intervals of natural numbers
-/
open Qq Lean Finset
namespace Mathlib.Tactic.Simp
namespace Nat
variable {m n : ℕ} {s : Finset ℕ}
private lemma Icc_eq_empty_of_lt (hnm : n.blt m) : Icc m n = ∅ := by simpa using hnm
private lemma Icc_eq_insert_of_Icc_succ_eq (hmn : m.ble n) (hs : Icc (m + 1) n = s) :
Icc m n = insert m s := by rw [← hs, insert_Icc_add_one_left_eq_Icc (by simpa using hmn)]
private lemma Ico_succ_eq_of_Icc_eq (hs : Icc m n = s) : Ico m (n + 1) = s := by
rw [← hs, Ico_add_one_right_eq_Icc]
private lemma Ico_zero (m : ℕ) : Ico m 0 = ∅ := by simp
private lemma Ioc_eq_of_Icc_succ_eq (hs : Icc (m + 1) n = s) : Ioc m n = s := by
rw [← hs, Icc_add_one_left_eq_Ioc]
private lemma Ioo_eq_of_Icc_succ_pred_eq (hs : Icc (m + 1) (n - 1) = s) : Ioo m n = s := by
rw [← hs, ← Icc_add_one_sub_one_eq_Ioo]
private lemma Iic_eq_of_Icc_zero_eq (hs : Icc 0 n = s) : Iic n = s := hs
private lemma Iio_succ_eq_of_Icc_zero_eq (hs : Icc 0 n = s) : Iio (n + 1) = s := by
erw [Iio_eq_Ico, Ico_add_one_right_eq_Icc, hs]
private lemma Iio_zero : Iio 0 = ∅ := by simp
end Nat
namespace Int
variable {m n : ℤ} {s : Finset ℤ}
private lemma Icc_eq_empty_of_lt (hnm : n < m) : Icc m n = ∅ := by simpa using hnm
private lemma Icc_eq_insert_of_Icc_succ_eq (hmn : m ≤ n) (hs : Icc (m + 1) n = s) :
Icc m n = insert m s := by rw [← hs, insert_Icc_add_one_left_eq_Icc (by simpa using hmn)]
private lemma Ico_eq_of_Icc_pred_eq (hs : Icc m (n - 1) = s) : Ico m n = s := by
rw [← hs, Icc_sub_one_right_eq_Ico]
private lemma Ioc_eq_of_Icc_succ_eq (hs : Icc (m + 1) n = s) : Ioc m n = s := by
rw [← hs, Icc_add_one_left_eq_Ioc]
private lemma Ioo_eq_of_Icc_succ_pred_eq (hs : Icc (m + 1) (n - 1) = s) : Ioo m n = s := by
rw [← hs, ← Icc_add_one_sub_one_eq_Ioo]
private lemma Iio_zero : Iio 0 = ∅ := by simp
end Int
/-- Given natural numbers `m` and `n` and corresponding natural literals `em` and `en`,
returns `(s, ⊢ Finset.Icc m n = s)`.
This cannot be easily merged with `evalFinsetIccInt` since they require different
handling of numerals for `ℕ` and `ℤ`. -/
def evalFinsetIccNat (m n : ℕ) (em en : Q(ℕ)) :
MetaM ((s : Q(Finset ℕ)) × Q(.Icc $em $en = $s)) := do
-- If `m = n`, then `Icc m n = {m}`. We handle this case separately because `insert m ∅` is
-- not syntactically `{m}`.
if m = n then
have : $em =Q $en := ⟨⟩
return ⟨q({$em}), q(Icc_self _)⟩
-- If `m < n`, then `Icc m n = insert m (Icc (m + 1) n)`.
else if m < n then
let hmn : Q(Nat.ble $em $en = true) := (q(Eq.refl true) :)
have em' : Q(ℕ) := mkNatLitQ (m + 1)
have : $em' =Q $em + 1 := ⟨⟩
let ⟨s, hs⟩ ← evalFinsetIccNat (m + 1) n em' en
return ⟨q(insert $em $s), q(Nat.Icc_eq_insert_of_Icc_succ_eq $hmn $hs)⟩
-- Else `n < m` and `Icc m n = ∅`.
else
let hnm : Q(Nat.blt $en $em = true) := (q(Eq.refl true) :)
return ⟨q(∅), q(Nat.Icc_eq_empty_of_lt $hnm)⟩
/-- Given integers `m` and `n` and corresponding integer literals `em` and `en`,
returns `(s, ⊢ Finset.Icc m n = s)`.
This cannot be easily merged with `evalFinsetIccNat` since they require different
handling of numerals for `ℕ` and `ℤ`. -/
partial def evalFinsetIccInt (m n : ℤ) (em en : Q(ℤ)) :
MetaM ((s : Q(Finset ℤ)) × Q(.Icc $em $en = $s)) := do
-- If `m = n`, then `Icc m n = {m}`. We handle this case separately because `insert m ∅` is
-- not syntactically `{m}`.
if m = n then
have : $em =Q $en := ⟨⟩
return ⟨q({$em}), q(Icc_self _)⟩
-- If `m < n`, then `Icc m n = insert m (Icc m n)`.
else if m < n then
let hmn ← mkDecideProofQ q($em ≤ $en)
have em' : Q(ℤ) := mkIntLitQ (m + 1)
have : $em' =Q $em + 1 := ⟨⟩
let ⟨s, hs⟩ ← evalFinsetIccInt (m + 1) n em' en
return ⟨q(insert $em $s), q(Int.Icc_eq_insert_of_Icc_succ_eq $hmn $hs)⟩
-- Else `n < m` and `Icc m n = ∅`.
else
let hnm ← mkDecideProofQ q($en < $em)
return ⟨q(∅), q(Icc_eq_empty_of_lt $hnm)⟩
end Mathlib.Tactic.Simp
open Mathlib.Tactic.Simp
/-!
Note that these simprocs are not made simp to avoid simp blowing up on goals containing things of
the form `Iic (2 ^ 1024)`.
-/
namespace Finset
/-- Simproc to compute `Finset.Icc a b` where `a` and `b` are numerals.
**Warnings**:
* With the standard depth recursion limit, this simproc can compute intervals of size 250 at most.
* Make sure to exclude `Finset.insert_eq_of_mem` from your simp call when using this simproc. This
avoids a quadratic time performance hit. -/
simproc_decl Icc_ofNat_ofNat (Icc _ _) := .ofQ fun u α e ↦ do
match u, α, e with
| 1, ~q(Finset ℕ), ~q(Icc $em $en) =>
let some m := em.nat? | return .continue
let some n := en.nat? | return .continue
let ⟨es, p⟩ ← evalFinsetIccNat m n em en
return .done <| .mk es <| .some p
| 1, ~q(Finset ℤ), ~q(Icc $em $en) =>
let some m := em.int? | return .continue
let some n := en.int? | return .continue
let ⟨es, p⟩ ← evalFinsetIccInt m n em en
return .done <| .mk es <| .some p
| _, _, _ => return .continue
/-- Simproc to compute `Finset.Ico a b` where `a` and `b` are numerals.
**Warnings**:
* With the standard depth recursion limit, this simproc can compute intervals of size 250 at most.
* Make sure to exclude `Finset.insert_eq_of_mem` from your simp call when using this simproc. This
avoids a quadratic time performance hit. -/
simproc_decl Ico_ofNat_ofNat (Ico _ _) := .ofQ fun u α e ↦ do
match u, α, e with
| 1, ~q(Finset ℕ), ~q(Ico $em $en) =>
let some m := em.nat? | return .continue
let some n := en.nat? | return .continue
match n with
| 0 =>
have : $en =Q 0 := ⟨⟩
return .done <| .mk q(∅) <| .some q(Nat.Ico_zero $em)
| n + 1 =>
have en' := mkNatLitQ n
have : $en =Q $en' + 1 := ⟨⟩
let ⟨es, p⟩ ← evalFinsetIccNat m n em en'
return .done { expr := es, proof? := q(Nat.Ico_succ_eq_of_Icc_eq $p) }
| 1, ~q(Finset ℤ), ~q(Ico $em $en) =>
let some m := em.int? | return .continue
let some n := en.int? | return .continue
have en' := mkIntLitQ (n - 1)
have : $en' =Q $en - 1 := ⟨⟩
let ⟨es, p⟩ ← evalFinsetIccInt m (n - 1) em en'
return .done { expr := es, proof? := q(Int.Ico_eq_of_Icc_pred_eq $p) }
| _, _, _ => return .continue
/-- Simproc to compute `Finset.Ioc a b` where `a` and `b` are numerals.
**Warnings**:
* With the standard depth recursion limit, this simproc can compute intervals of size 250 at most.
* Make sure to exclude `Finset.insert_eq_of_mem` from your simp call when using this simproc. This
avoids a quadratic time performance hit. -/
simproc_decl Ioc_ofNat_ofNat (Ioc _ _) := .ofQ fun u α e ↦ do
match u, α, e with
| 1, ~q(Finset ℕ), ~q(Ioc $em $en) =>
let some m := em.nat? | return .continue
let some n := en.nat? | return .continue
have em' := mkNatLitQ (m + 1)
have : $em' =Q $em + 1 := ⟨⟩
let ⟨es, p⟩ ← evalFinsetIccNat (m + 1) n em' en
return .done <| .mk es <| .some q(Nat.Ioc_eq_of_Icc_succ_eq $p)
| 1, ~q(Finset ℤ), ~q(Ioc $em $en) =>
let some m := em.int? | return .continue
let some n := en.int? | return .continue
have em' := mkIntLitQ (m + 1)
have : $em' =Q $em + 1 := ⟨⟩
let ⟨es, p⟩ ← evalFinsetIccInt (m + 1) n em' en
return .done { expr := es, proof? := q(Int.Ioc_eq_of_Icc_succ_eq $p) }
| _, _, _ => return .continue
/-- Simproc to compute `Finset.Ioo a b` where `a` and `b` are numerals.
**Warnings**:
* With the standard depth recursion limit, this simproc can compute intervals of size 250 at most.
* Make sure to exclude `Finset.insert_eq_of_mem` from your simp call when using this simproc. This
avoids a quadratic time performance hit. -/
simproc_decl Ioo_ofNat_ofNat (Ioo _ _) := .ofQ fun u α e ↦ do
match u, α, e with
| 1, ~q(Finset ℕ), ~q(Ioo $em $en) =>
let some m := em.nat? | return .continue
let some n := en.nat? | return .continue
let ⟨es, p⟩ ← evalFinsetIccNat (m + 1) (n - 1) q($em + 1) q($en - 1)
return .done <| .mk es <| .some q(Nat.Ioo_eq_of_Icc_succ_pred_eq $p)
| 1, ~q(Finset ℤ), ~q(Ioo $em $en) =>
let some m := em.int? | return .continue
let some n := en.int? | return .continue
have em' := mkIntLitQ (m + 1)
have : $em' =Q $em + 1 := ⟨⟩
have en' := mkIntLitQ (n - 1)
have : $en' =Q $en - 1 := ⟨⟩
let ⟨es, p⟩ ← evalFinsetIccInt (m + 1) (n - 1) em' en'
return .done { expr := es, proof? := q(Int.Ioo_eq_of_Icc_succ_pred_eq $p) }
| _, _, _ => return .continue
/-- Simproc to compute `Finset.Iic b` where `b` is a numeral.
**Warnings**:
* With the standard depth recursion limit, this simproc can compute intervals of size 250 at most.
* Make sure to exclude `Finset.insert_eq_of_mem` from your simp call when using this simproc. This
avoids a quadratic time performance hit. -/
simproc_decl Iic_ofNat (Iic _) := .ofQ fun u α e ↦ do
match u, α, e with
| 1, ~q(Finset ℕ), ~q(Iic $en) =>
let some n := en.nat? | return .continue
let ⟨es, p⟩ ← evalFinsetIccNat 0 n q(0) en
return .done <| .mk es <| .some q(Nat.Iic_eq_of_Icc_zero_eq $p)
| _, _, _ => return .continue
/-- Simproc to compute `Finset.Iio b` where `b` is a numeral.
**Warnings**:
* With the standard depth recursion limit, this simproc can compute intervals of size 250 at most.
* Make sure to exclude `Finset.insert_eq_of_mem` from your simp call when using this simproc. This
avoids a quadratic time performance hit. -/
simproc_decl Iio_ofNat (Iio _) := .ofQ fun u α e ↦ do
match u, α, e with
| 1, ~q(Finset ℕ), ~q(Iio $en) =>
let some n := en.nat? | return .continue
match n with
| 0 =>
have : $en =Q 0 := ⟨⟩
return .done <| .mk q(∅) <| .some q(Nat.Iio_zero)
| n + 1 =>
have en' := mkNatLitQ n
have : $en =Q $en' + 1 := ⟨⟩
let ⟨es, p⟩ ← evalFinsetIccNat 0 n q(0) q($en')
return .done <| .mk es <| some q(Nat.Iio_succ_eq_of_Icc_zero_eq $p)
| _, _, _ => return .continue
attribute [nolint unusedHavesSuffices]
Iio_ofNat Ico_ofNat_ofNat Ioc_ofNat_ofNat Ioo_ofNat_ofNat
/-! ### `ℕ` -/
example : Icc 1 0 = ∅ := by simp only [Icc_ofNat_ofNat]
example : Icc 1 1 = {1} := by simp only [Icc_ofNat_ofNat]
example : Icc 1 2 = {1, 2} := by simp only [Icc_ofNat_ofNat]
example : Ico 1 1 = ∅ := by simp only [Ico_ofNat_ofNat]
example : Ico 1 2 = {1} := by simp only [Ico_ofNat_ofNat]
example : Ico 1 3 = {1, 2} := by simp only [Ico_ofNat_ofNat]
example : Ioc 1 1 = ∅ := by simp only [Ioc_ofNat_ofNat]
example : Ioc 1 2 = {2} := by simp only [Ioc_ofNat_ofNat]
example : Ioc 1 3 = {2, 3} := by simp only [Ioc_ofNat_ofNat]
example : Ioo 1 2 = ∅ := by simp only [Ioo_ofNat_ofNat]
example : Ioo 1 3 = {2} := by simp only [Ioo_ofNat_ofNat]
example : Ioo 1 4 = {2, 3} := by simp only [Ioo_ofNat_ofNat]
example : Iic 0 = {0} := by simp only [Iic_ofNat]
example : Iic 1 = {0, 1} := by simp only [Iic_ofNat]
example : Iic 2 = {0, 1, 2} := by simp only [Iic_ofNat]
example : Iio 0 = ∅ := by simp only [Iio_ofNat]
example : Iio 1 = {0} := by simp only [Iio_ofNat]
example : Iio 2 = {0, 1} := by simp only [Iio_ofNat]
/-! ### `ℤ` -/
example : Icc (1 : ℤ) 0 = ∅ := by simp only [Icc_ofNat_ofNat]
example : Icc (1 : ℤ) 1 = {1} := by simp only [Icc_ofNat_ofNat]
example : Icc (1 : ℤ) 2 = {1, 2} := by simp only [Icc_ofNat_ofNat]
example : Ico (1 : ℤ) 1 = ∅ := by simp only [Ico_ofNat_ofNat]
example : Ico (1 : ℤ) 2 = {1} := by simp only [Ico_ofNat_ofNat]
example : Ico (1 : ℤ) 3 = {1, 2} := by simp only [Ico_ofNat_ofNat]
example : Ioc (1 : ℤ) 1 = ∅ := by simp only [Ioc_ofNat_ofNat]
example : Ioc (1 : ℤ) 2 = {2} := by simp only [Ioc_ofNat_ofNat]
example : Ioc (1 : ℤ) 3 = {2, 3} := by simp only [Ioc_ofNat_ofNat]
example : Ioo (1 : ℤ) 2 = ∅ := by simp only [Ioo_ofNat_ofNat]
example : Ioo (1 : ℤ) 3 = {2} := by simp only [Ioo_ofNat_ofNat]
example : Ioo (1 : ℤ) 4 = {2, 3} := by simp only [Ioo_ofNat_ofNat]
end Finset |
.lake/packages/mathlib/Mathlib/Tactic/Widget/Calc.lean | import Lean.Elab.Tactic.Calc
import Lean.Meta.Tactic.TryThis
import Mathlib.Data.String.Defs
import Mathlib.Tactic.Widget.SelectPanelUtils
import Batteries.CodeAction.Attr
/-! # Calc widget
This file redefines the `calc` tactic so that it displays a widget panel allowing to create
new calc steps with holes specified by selected sub-expressions in the goal.
-/
section code_action
open Batteries.CodeAction
open Lean Server RequestM
/-- Code action to create a `calc` tactic from the current goal. -/
@[tactic_code_action calcTactic]
def createCalc : TacticCodeAction := fun _params _snap ctx _stack node => do
let .node (.ofTacticInfo info) _ := node | return #[]
if info.goalsBefore.isEmpty then return #[]
let eager := {
title := s!"Generate a calc block."
kind? := "quickfix"
}
let doc ← readDoc
return #[{
eager
lazy? := some do
let tacPos := doc.meta.text.utf8PosToLspPos info.stx.getPos?.get!
let endPos := doc.meta.text.utf8PosToLspPos info.stx.getTailPos?.get!
let goal := info.goalsBefore[0]!
let goalFmt ← ctx.runMetaM {} <| goal.withContext do Meta.ppExpr (← goal.getType)
return { eager with
edit? := some <|.ofTextEdit doc.versionedIdentifier
{ range := ⟨tacPos, endPos⟩, newText := s!"calc {goalFmt} := by sorry" }
}
}]
end code_action
open ProofWidgets
open Lean Meta
open Lean Server in
/-- Parameters for the calc widget. -/
structure CalcParams extends SelectInsertParams where
/-- Is this the first calc step? -/
isFirst : Bool
/-- indentation level of the calc block. -/
indent : Nat
deriving SelectInsertParamsClass, RpcEncodable
/-- Return the link text and inserted text above and below of the calc widget. -/
def suggestSteps (pos : Array Lean.SubExpr.GoalsLocation) (goalType : Expr) (params : CalcParams) :
MetaM (String × String × Option (String.Pos.Raw × String.Pos.Raw)) := do
let subexprPos := getGoalLocations pos
let some (rel, lhs, rhs) ← Lean.Elab.Term.getCalcRelation? goalType |
throwError "invalid 'calc' step, relation expected{indentExpr goalType}"
let relApp := mkApp2 rel
(← mkFreshExprMVar none)
(← mkFreshExprMVar none)
let some relStr := ((← Meta.ppExpr relApp) |> toString |>.splitOn)[1]?
| throwError "could not find relation symbol in {relApp}"
let isSelectedLeft := subexprPos.any (fun L ↦ #[0, 1].isPrefixOf L.toArray)
let isSelectedRight := subexprPos.any (fun L ↦ #[1].isPrefixOf L.toArray)
let mut goalType := goalType
for pos in subexprPos do
goalType ← insertMetaVar goalType pos
let some (_, newLhs, newRhs) ← Lean.Elab.Term.getCalcRelation? goalType |
throwError "invalid 'calc' step, relation expected{indentExpr goalType}"
let lhsStr := (toString <| ← Meta.ppExpr lhs).renameMetaVar
let newLhsStr := (toString <| ← Meta.ppExpr newLhs).renameMetaVar
let rhsStr := (toString <| ← Meta.ppExpr rhs).renameMetaVar
let newRhsStr := (toString <| ← Meta.ppExpr newRhs).renameMetaVar
let spc := String.replicate params.indent ' '
let insertedCode := match isSelectedLeft, isSelectedRight with
| true, true =>
if params.isFirst then
s!"{lhsStr} {relStr} {newLhsStr} := by sorry\n{spc}_ {relStr} {newRhsStr} := by sorry\n\
{spc}_ {relStr} {rhsStr} := by sorry"
else
s!"_ {relStr} {newLhsStr} := by sorry\n{spc}\
_ {relStr} {newRhsStr} := by sorry\n{spc}\
_ {relStr} {rhsStr} := by sorry"
| false, true =>
if params.isFirst then
s!"{lhsStr} {relStr} {newRhsStr} := by sorry\n{spc}_ {relStr} {rhsStr} := by sorry"
else
s!"_ {relStr} {newRhsStr} := by sorry\n{spc}_ {relStr} {rhsStr} := by sorry"
| true, false =>
if params.isFirst then
s!"{lhsStr} {relStr} {newLhsStr} := by sorry\n{spc}_ {relStr} {rhsStr} := by sorry"
else
s!"_ {relStr} {newLhsStr} := by sorry\n{spc}_ {relStr} {rhsStr} := by sorry"
| false, false => "This should not happen"
let stepInfo := match isSelectedLeft, isSelectedRight with
| true, true => "Create two new steps"
| true, false | false, true => "Create a new step"
| false, false => "This should not happen"
let pos : String.Pos.Raw := insertedCode.find (fun c => c == '?')
return (stepInfo, insertedCode, some (pos, ⟨pos.byteIdx + 2⟩) )
/-- Rpc function for the calc widget. -/
@[server_rpc_method]
def CalcPanel.rpc := mkSelectionPanelRPC suggestSteps
"Please select subterms using Shift-click."
"Calc 🔍"
/-- The calc widget. -/
@[widget_module]
def CalcPanel : Component CalcParams :=
mk_rpc_widget% CalcPanel.rpc
namespace Lean.Elab.Tactic
open Lean Meta Tactic TryThis in
/-- Create a `calc` proof. -/
elab stx:"calc?" : tactic => withMainContext do
let goalType ← whnfR (← getMainTarget)
unless (← Lean.Elab.Term.getCalcRelation? goalType).isSome do
throwError "Cannot start a calculation here: the goal{indentExpr goalType}\nis not a relation."
let s ← `(tactic| calc $(← Lean.PrettyPrinter.delab (← getMainTarget)) := by sorry)
addSuggestions stx #[.suggestion s] (header := "Create calc tactic:")
evalTactic (← `(tactic|sorry))
/-- Elaborator for the `calc` tactic mode variant with widgets. -/
elab_rules : tactic
| `(tactic|calc%$calcstx $steps) => do
let mut isFirst := true
for step in ← Lean.Elab.Term.mkCalcStepViews steps do
let some replaceRange := (← getFileMap).lspRangeOfStx? step.ref | continue
let json := json% {"replaceRange": $(replaceRange),
"isFirst": $(isFirst),
"indent": $(replaceRange.start.character)}
Widget.savePanelWidgetInfo CalcPanel.javascriptHash (pure json) step.proof
isFirst := false
evalCalc (← `(tactic|calc%$calcstx $steps))
end Lean.Elab.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Widget/CongrM.lean | import Mathlib.Tactic.Widget.SelectPanelUtils
import Mathlib.Tactic.CongrM
/-! # CongrM widget
This file defines a `congrm?` tactic that displays a widget panel allowing to generate
a `congrm` call with holes specified by selecting subexpressions in the goal.
-/
open Lean Meta Server ProofWidgets
/-! ### CongrM widget -/
/-- Return the link text and inserted text above and below of the congrm widget. -/
@[nolint unusedArguments]
def makeCongrMString (pos : Array Lean.SubExpr.GoalsLocation) (goalType : Expr)
(_ : SelectInsertParams) :
MetaM (String × String × Option (String.Pos.Raw × String.Pos.Raw)) := do
let subexprPos := getGoalLocations pos
unless goalType.isAppOf ``Eq || goalType.isAppOf ``Iff do
throwError "The goal must be an equality or iff."
let mut goalTypeWithMetaVars := goalType
for pos in subexprPos do
goalTypeWithMetaVars ← insertMetaVar goalTypeWithMetaVars pos
let side := if subexprPos[0]!.toArray[0]! = 0 then 1 else 2
let sideExpr := goalTypeWithMetaVars.getAppArgs[side]!
let res := "congrm " ++ (toString (← Meta.ppExpr sideExpr)).renameMetaVar
return (res, res, none)
/-- Rpc function for the congrm widget. -/
@[server_rpc_method]
def CongrMSelectionPanel.rpc := mkSelectionPanelRPC makeCongrMString
"Use shift-click to select sub-expressions in the goal that should become holes in congrm."
"CongrM 🔍"
/-- The congrm widget. -/
@[widget_module]
def CongrMSelectionPanel : Component SelectInsertParams :=
mk_rpc_widget% CongrMSelectionPanel.rpc
open scoped Json in
/-- Display a widget panel allowing to generate a `congrm` call with holes specified by selecting
subexpressions in the goal. -/
elab stx:"congrm?" : tactic => do
let some replaceRange := (← getFileMap).lspRangeOfStx? stx | return
Widget.savePanelWidgetInfo CongrMSelectionPanel.javascriptHash
(pure <| json% { replaceRange: $(replaceRange) }) stx |
.lake/packages/mathlib/Mathlib/Tactic/Widget/Conv.lean | import Mathlib.Tactic.Widget.SelectPanelUtils
import Mathlib.Data.String.Defs
import Batteries.Tactic.Lint
/-! # Conv widget
This is a slightly improved version of one of the examples in the ProofWidget library.
It defines a `conv?` tactic that displays a widget panel allowing to generate
a `conv` call zooming to the subexpression selected in the goal.
-/
open Lean Meta Server ProofWidgets
private structure SolveReturn where
expr : Expr
val? : Option String
listRest : List Nat
private def solveLevel (expr : Expr) (path : List Nat) : MetaM SolveReturn := match expr with
| Expr.app _ _ => do
let mut descExp := expr
let mut count := 0
let mut explicitList := []
-- we go through the application until we reach the end, counting how many explicit arguments
-- it has and noting whether they are explicit or implicit
while descExp.isApp do
if (← Lean.Meta.inferType descExp.appFn!).bindingInfo!.isExplicit then
explicitList := true::explicitList
count := count + 1
else
explicitList := false::explicitList
descExp := descExp.appFn!
-- we get the correct `enter` command by subtracting the number of `true`s in our list
let mut mutablePath := path
let mut length := count
explicitList := List.reverse explicitList
while !mutablePath.isEmpty && mutablePath.head! == 0 do
if explicitList.head! == true then
count := count - 1
explicitList := explicitList.tail!
mutablePath := mutablePath.tail!
let mut nextExp := expr
while length > count do
nextExp := nextExp.appFn!
length := length - 1
nextExp := nextExp.appArg!
let pathRest := if mutablePath.isEmpty then [] else mutablePath.tail!
return { expr := nextExp, val? := toString count, listRest := pathRest }
| Expr.lam n _ b _ => do
let name := match n with
| Name.str _ s => s
| _ => panic! "no name found"
return { expr := b, val? := name, listRest := path.tail! }
| Expr.forallE n _ b _ => do
let name := match n with
| Name.str _ s => s
| _ => panic! "no name found"
return { expr := b, val? := name, listRest := path.tail! }
| Expr.mdata _ b => do
match b with
| Expr.mdata _ _ => return { expr := b, val? := none, listRest := path }
| _ => return { expr := b.appFn!.appArg!, val? := none, listRest := path.tail!.tail! }
| _ => do
return {
expr := ← (Lean.Core.viewSubexpr path.head! expr)
val? := toString (path.head! + 1)
listRest := path.tail!
}
open Lean Syntax in
/-- Return the link text and inserted text above and below of the conv widget. -/
@[nolint unusedArguments]
def insertEnter (locations : Array Lean.SubExpr.GoalsLocation) (goalType : Expr)
(params : SelectInsertParams) :
MetaM (String × String × Option (String.Pos.Raw × String.Pos.Raw)) := do
let some pos := locations[0]? | throwError "You must select something."
let (fvar, subexprPos) ← match pos with
| ⟨_, .target subexprPos⟩ => pure (none, subexprPos)
| ⟨_, .hypType fvar subexprPos⟩ => pure (some fvar, subexprPos)
| ⟨_, .hypValue fvar subexprPos⟩ => pure (some fvar, subexprPos)
| _ => throwError "You must select something in the goal or in a local value."
let mut list := (SubExpr.Pos.toArray subexprPos).toList
let mut expr := goalType
let mut retList := []
-- generate list of commands for `enter`
while !list.isEmpty do
let res ← solveLevel expr list
expr := res.expr
retList := match res.val? with
| none => retList
| some val => val::retList
list := res.listRest
-- build `enter [...]` string
retList := List.reverse retList
-- prepare `enter` indentation
let spc := String.replicate (SelectInsertParamsClass.replaceRange params).start.character ' '
let loc ← match fvar with
| some fvarId => pure s!"at {← fvarId.getUserName} "
| none => pure ""
let mut enterval := s!"conv {loc}=>\n{spc} enter {retList}"
if enterval.contains '0' then enterval := "Error: Not a valid conv target"
if retList.isEmpty then enterval := ""
return ("Generate conv", enterval, none)
/-- Rpc function for the conv widget. -/
@[server_rpc_method]
def ConvSelectionPanel.rpc :=
mkSelectionPanelRPC insertEnter
"Use shift-click to select one sub-expression in the goal that you want to zoom on."
"Conv 🔍" (onlyGoal := false) (onlyOne := true)
/-- The conv widget. -/
@[widget_module]
def ConvSelectionPanel : Component SelectInsertParams :=
mk_rpc_widget% ConvSelectionPanel.rpc
open scoped Json in
/-- Display a widget panel allowing to generate a `conv` call zooming to the subexpression selected
in the goal. -/
elab stx:"conv?" : tactic => do
let some replaceRange := (← getFileMap).lspRangeOfStx? stx | return
Widget.savePanelWidgetInfo ConvSelectionPanel.javascriptHash
(pure <| json% { replaceRange: $(replaceRange) }) stx |
.lake/packages/mathlib/Mathlib/Tactic/Widget/InteractiveUnfold.lean | import Mathlib.Tactic.NthRewrite
import Mathlib.Tactic.Widget.SelectPanelUtils
import Mathlib.Lean.GoalsLocation
import Mathlib.Lean.Meta.KAbstractPositions
/-!
# Interactive unfolding
This file defines the interactive tactic `unfold?`.
It allows you to shift-click on an expression in the goal, and then it suggests rewrites to replace
the expression with an unfolded version.
It can be used on its own, but it can also be used as part of the library rewrite tactic `rw??`,
where these unfoldings are a subset of the suggestions.
For example, if the goal contains `1+1`, then it will suggest rewriting this into one of
- `Nat.add 1 1`
- `2`
Clicking on a suggestion pastes a rewrite into the editor, which will be of the form
- `rw [show 1+1 = Nat.add 1 1 from rfl]`
- `rw [show 1+1 = 2 from rfl]`
It also takes into account the position of the selected expression if it appears in multiple places,
and whether the rewrite is in the goal or a local hypothesis.
The rewrite string is created using `mkRewrite`.
## Reduction rules
The basic idea is to repeatedly apply `unfoldDefinition?` followed by `whnfCore`, which gives
the list of all suggested unfoldings. Each suggested unfolding is in `whnfCore` normal form.
Additionally, eta-reduction is tried, and basic natural number reduction is tried.
## Filtering
`HAdd.hAdd` in `1+1` actually first unfolds into `Add.add`, but this is not very useful,
because this is just unfolding a notational type class. Therefore, unfoldings of default instances
are not presented in the list of suggested rewrites.
This is implemented with `unfoldProjDefaultInst?`.
Additionally, we don't want to unfold into expressions involving `match` terms or other
constants marked as `Name.isInternalDetail`. So all such results are filtered out.
This is implemented with `isUserFriendly`.
-/
open Lean Meta Server Widget ProofWidgets Jsx
namespace Mathlib.Tactic.InteractiveUnfold
/-- Unfold a class projection if the instance is tagged with `@[default_instance]`.
This is used in the `unfold?` tactic in order to not show these unfolds to the user.
Similar to `Lean.Meta.unfoldProjInst?`. -/
def unfoldProjDefaultInst? (e : Expr) : MetaM (Option Expr) := do
let .const declName _ := e.getAppFn | return none
let some { fromClass := true, ctorName, .. } ← getProjectionFnInfo? declName | return none
-- get the list of default instances of the class
let some (ConstantInfo.ctorInfo ci) := (← getEnv).find? ctorName | return none
let defaults ← getDefaultInstances ci.induct
if defaults.isEmpty then return none
let some e ← withDefault <| unfoldDefinition? e | return none
let .proj _ i c := e.getAppFn | return none
-- check that the structure `c` comes from one of the default instances
let .const inst _ := c.getAppFn | return none
unless defaults.any (·.1 == inst) do return none
let some r ← withReducibleAndInstances <| project? c i | return none
return mkAppN r e.getAppArgs |>.headBeta
/-- Return the consecutive unfoldings of `e`. -/
partial def unfolds (e : Expr) : MetaM (Array Expr) := do
let e' ← whnfCore e
go e' (if e == e' then #[] else #[e'])
where
/-- Append the unfoldings of `e` to `acc`. Assume `e` is in `whnfCore` form. -/
go (e : Expr) (acc : Array Expr) : MetaM (Array Expr) :=
tryCatchRuntimeEx
(withIncRecDepth do
if let some e := e.etaExpandedStrict? then
let e ← whnfCore e
return ← go e (acc.push e)
if let some e ← reduceNat? e then
return acc.push e
if let some e ← reduceNative? e then
return acc.push e
if let some e ← unfoldProjDefaultInst? e then
-- when unfolding a default instance, don't add it to the array of unfolds.
let e ← whnfCore e
return ← go e acc
if let some e ← unfoldDefinition? e then
-- Note: whnfCore can give a recursion depth error
let e ← whnfCore e
return ← go e (acc.push e)
return acc)
fun _ =>
return acc
/-- Determine whether `e` contains no internal names. -/
def isUserFriendly (e : Expr) : Bool :=
!e.foldConsts (init := false) (fun name => (· || name.isInternalDetail))
/-- Return the consecutive unfoldings of `e` that are user friendly. -/
def filteredUnfolds (e : Expr) : MetaM (Array Expr) :=
return (← unfolds e).filter isUserFriendly
end InteractiveUnfold
/-- Return syntax for the rewrite tactic `rw [e]`. -/
def mkRewrite (occ : Option Nat) (symm : Bool) (e : Term) (loc : Option Name) :
CoreM (TSyntax `tactic) := do
let loc ← loc.mapM fun h => `(Lean.Parser.Tactic.location| at $(mkIdent h):term)
let rule ← if symm then `(Parser.Tactic.rwRule| ← $e) else `(Parser.Tactic.rwRule| $e:term)
match occ with
| some n => `(tactic| nth_rw $(Syntax.mkNatLit n):num [$rule] $(loc)?)
| none => `(tactic| rw [$rule] $(loc)?)
/-- Given tactic syntax `tac` that we want to paste into the editor, return it as a string.
This function respects the 100 character limit for long lines. -/
def tacticPasteString (tac : TSyntax `tactic) (range : Lsp.Range) : CoreM String := do
let column := range.start.character
let indent := column
return (← PrettyPrinter.ppTactic tac).pretty 100 indent column
namespace InteractiveUnfold
/-- Return the tactic string that does the unfolding. -/
def tacticSyntax (e eNew : Expr) (occ : Option Nat) (loc : Option Name) :
MetaM (TSyntax `tactic) := do
let e ← PrettyPrinter.delab e
let eNew ← PrettyPrinter.delab eNew
let fromRfl ← `(show $e = $eNew from $(mkIdent `rfl))
mkRewrite occ false fromRfl loc
/-- Render the unfolds of `e` as given by `filteredUnfolds`, with buttons at each suggestion
for pasting the rewrite tactic. Return `none` when there are no unfolds. -/
def renderUnfolds (e : Expr) (occ : Option Nat) (loc : Option Name) (range : Lsp.Range)
(doc : FileWorker.EditableDocument) : MetaM (Option Html) := do
let results ← filteredUnfolds e
if results.isEmpty then
return none
let core ← results.mapM fun unfold => do
let tactic ← tacticSyntax e unfold occ loc
let tactic ← tacticPasteString tactic range
return <li> {
.element "p" #[] <|
#[<span className="font-code" style={json% { "white-space" : "pre-wrap" }}> {
Html.ofComponent MakeEditLink
(.ofReplaceRange doc.meta range tactic)
#[.text <| Format.pretty <| (← Meta.ppExpr unfold)] }
</span>]
} </li>
return <details «open»={true}>
<summary className="mv2 pointer">
{.text "Definitional rewrites:"}
</summary>
{.element "ul" #[("style", json% { "padding-left" : "30px"})] core}
</details>
@[server_rpc_method_cancellable]
private def rpc (props : SelectInsertParams) : RequestM (RequestTask Html) :=
RequestM.asTask do
let doc ← RequestM.readDoc
let some loc := props.selectedLocations.back? |
return .text "unfold?: Please shift-click an expression."
if loc.loc matches .hypValue .. then
return .text "unfold? doesn't work on the value of a let-bound free variable."
let some goal := props.goals[0]? |
return .text "There is no goal to solve!"
if loc.mvarId != goal.mvarId then
return .text "The selected expression should be in the main goal."
goal.ctx.val.runMetaM {} do
let md ← goal.mvarId.getDecl
let lctx := md.lctx |>.sanitizeNames.run' {options := (← getOptions)}
Meta.withLCtx lctx md.localInstances do
let rootExpr ← loc.rootExpr
let some (subExpr, occ) ← withReducible <| viewKAbstractSubExpr rootExpr loc.pos |
return .text "expressions with bound variables are not supported"
unless ← kabstractIsTypeCorrect rootExpr subExpr loc.pos do
return .text <| "The selected expression cannot be rewritten, because the motive is " ++
"not type correct. This usually occurs when trying to rewrite a term that appears " ++
"as a dependent argument."
let location ← loc.fvarId?.mapM FVarId.getUserName
let html ← renderUnfolds subExpr occ location props.replaceRange doc
return html.getD
<span>
No unfolds found for {<InteractiveCode fmt={← ppExprTagged subExpr}/>}
</span>
/-- The component called by the `unfold?` tactic -/
@[widget_module]
def UnfoldComponent : Component SelectInsertParams :=
mk_rpc_widget% InteractiveUnfold.rpc
/-- Replace the selected expression with a definitional unfolding.
- After each unfolding, we apply `whnfCore` to simplify the expression.
- Explicit natural number expressions are evaluated.
- Unfolds of class projections of instances marked with `@[default_instance]` are not shown.
This is relevant for notational type classes like `+`: we don't want to suggest `Add.add a b`
as an unfolding of `a + b`. Similarly for `OfNat n : Nat` which unfolds into `n : Nat`.
To use `unfold?`, shift-click an expression in the tactic state.
This gives a list of rewrite suggestions for the selected expression.
Click on a suggestion to replace `unfold?` by a tactic that performs this rewrite.
-/
elab stx:"unfold?" : tactic => do
let some range := (← getFileMap).lspRangeOfStx? stx | return
Widget.savePanelWidgetInfo (hash UnfoldComponent.javascript)
(pure <| json% { replaceRange : $range }) stx
/-- `#unfold? e` gives all unfolds of `e`.
In tactic mode, use `unfold?` instead. -/
syntax (name := unfoldCommand) "#unfold? " term : command
open Elab
/-- Elaborate a `#unfold?` command. -/
@[command_elab unfoldCommand]
def elabUnfoldCommand : Command.CommandElab := fun stx =>
withoutModifyingEnv <| Command.runTermElabM fun _ => Term.withDeclName `_unfold do
let e ← Term.elabTerm stx[1] none
Term.synthesizeSyntheticMVarsNoPostponing
let e ← Term.levelMVarToParam (← instantiateMVars e) let e ← instantiateMVars e
let unfolds ← filteredUnfolds e
if unfolds.isEmpty then
logInfo m! "No unfolds found for {e}"
else
let unfolds := unfolds.toList.map (m! "· {·}")
logInfo (m! "Unfolds for {e}:\n"
++ .joinSep unfolds "\n")
end InteractiveUnfold
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Widget/SelectPanelUtils.lean | import Lean.Meta.ExprLens
import ProofWidgets.Component.MakeEditLink
import ProofWidgets.Component.OfRpcMethod -- needed in all files using this one.
import Mathlib.Tactic.Widget.SelectInsertParamsClass
/-! # Selection panel utilities
The main declaration is `mkSelectionPanelRPC` which helps creating rpc methods for widgets
generating tactic calls based on selected sub-expressions in the main goal.
There are also some minor helper functions.
-/
open Lean Meta Server
open Lean.SubExpr in
/-- Given a `Array GoalsLocation` return the array of `SubExpr.Pos` for all locations
in the targets of the relevant goals. -/
def getGoalLocations (locations : Array GoalsLocation) : Array SubExpr.Pos := Id.run do
let mut res := #[]
for location in locations do
if let .target pos := location.loc then
res := res.push pos
return res
/-- Replace the sub-expression at the given position by a fresh meta-variable. -/
def insertMetaVar (e : Expr) (pos : SubExpr.Pos) : MetaM Expr :=
replaceSubexpr (fun _ ↦ do mkFreshExprMVar none .synthetic) pos e
/-- Replace all meta-variable names by "?_". -/
def String.renameMetaVar (s : String) : String :=
match s.splitOn "?m." with
| [] => ""
| [s] => s
| head::tail => head ++ "?_" ++ "?_".intercalate (tail.map fun s ↦ s.dropWhile Char.isDigit)
open ProofWidgets
/-- Structures providing parameters for a Select and insert widget. -/
structure SelectInsertParams where
/-- Cursor position in the file at which the widget is being displayed. -/
pos : Lsp.Position
/-- The current tactic-mode goals. -/
goals : Array Widget.InteractiveGoal
/-- Locations currently selected in the goal state. -/
selectedLocations : Array SubExpr.GoalsLocation
/-- The range in the source document where the command will be inserted. -/
replaceRange : Lsp.Range
deriving SelectInsertParamsClass, RpcEncodable
open scoped Jsx in open SelectInsertParamsClass Lean.SubExpr in
/-- Helper function to create a widget allowing to select parts of the main goal
and then display a link that will insert some tactic call.
The main argument is `mkCmdStr` which is a function creating the link text and the tactic call text.
The `helpMsg` argument is displayed when nothing is selected and `title` is used as a panel title.
The `onlyGoal` argument says whether the selected has to be in the goal. Otherwise it
can be in the local context.
The `onlyOne` argument says whether one should select only one sub-expression.
In every cases, all selected subexpressions should be in the main goal or its local context.
The last arguments `params` should not be provided so that the output
has type `Params → RequestM (RequestTask Html)` and can be fed to the `mk_rpc_widget%`
elaborator.
Note that the `pos` and `goalType` arguments to `mkCmdStr` could be extracted for the `Params`
argument but that extraction would happen in every example, hence it is factored out here.
We also make sure `mkCmdStr` is executed in the right context.
-/
def mkSelectionPanelRPC {Params : Type} [SelectInsertParamsClass Params]
(mkCmdStr : (pos : Array GoalsLocation) → (goalType : Expr) → Params →
MetaM (String × String × Option (String.Pos.Raw × String.Pos.Raw)))
(helpMsg : String) (title : String) (onlyGoal := true) (onlyOne := false) :
(params : Params) → RequestM (RequestTask Html) :=
fun params ↦ RequestM.asTask do
let doc ← RequestM.readDoc
if h : 0 < (goals params).size then
let mainGoal := (goals params)[0]
let mainGoalName := mainGoal.mvarId.name
let all := if onlyOne then "The selected sub-expression" else "All selected sub-expressions"
let be_where := if onlyGoal then "in the main goal." else "in the main goal or its context."
let errorMsg := s!"{all} should be {be_where}"
let inner : Html ← (do
if onlyOne && (selectedLocations params).size > 1 then
return <span>{.text "You should select only one sub-expression"}</span>
for selectedLocation in selectedLocations params do
if selectedLocation.mvarId.name != mainGoalName then
return <span>{.text errorMsg}</span>
else if onlyGoal then
if !(selectedLocation.loc matches (.target _)) then
return <span>{.text errorMsg}</span>
if (selectedLocations params).isEmpty then
return <span>{.text helpMsg}</span>
mainGoal.ctx.val.runMetaM {} do
let md ← mainGoal.mvarId.getDecl
let lctx := md.lctx |>.sanitizeNames.run' {options := (← getOptions)}
Meta.withLCtx lctx md.localInstances do
let (linkText, newCode, range?) ← mkCmdStr (selectedLocations params) md.type.consumeMData
params
return .ofComponent
MakeEditLink
(.ofReplaceRange doc.meta (replaceRange params) newCode range?)
#[ .text linkText ])
return <details «open»={true}>
<summary className="mv2 pointer">{.text title}</summary>
<div className="ml1">{inner}</div>
</details>
else
return <span>{.text "There is no goal to solve!"}</span> -- This shouldn't happen. |
.lake/packages/mathlib/Mathlib/Tactic/Widget/CommDiag.lean | import ProofWidgets.Component.PenroseDiagram
import ProofWidgets.Presentation.Expr
import Mathlib.CategoryTheory.Category.Basic
/-! This module defines tactic/meta infrastructure for displaying commutative diagrams in the
infoview. -/
open Lean in
/-- If the expression is a function application of `fName` with 7 arguments, return those arguments.
Otherwise return `none`. -/
@[inline] def _root_.Lean.Expr.app7? (e : Expr) (fName : Name) :
Option (Expr × Expr × Expr × Expr × Expr × Expr × Expr) :=
if e.isAppOfArity fName 7 then
some (
e.appFn!.appFn!.appFn!.appFn!.appFn!.appFn!.appArg!,
e.appFn!.appFn!.appFn!.appFn!.appFn!.appArg!,
e.appFn!.appFn!.appFn!.appFn!.appArg!,
e.appFn!.appFn!.appFn!.appArg!,
e.appFn!.appFn!.appArg!,
e.appFn!.appArg!,
e.appArg!
)
else
none
namespace Mathlib.Tactic.Widget
open Lean Meta
open ProofWidgets
open CategoryTheory
/-! ## Metaprogramming utilities for breaking down category theory expressions -/
/-- Given a Hom type `α ⟶ β`, return `(α, β)`. Otherwise `none`. -/
def homType? (e : Expr) : Option (Expr × Expr) := do
let some (_, _, A, B) := e.app4? ``Quiver.Hom | none
return (A, B)
/-- Given composed homs `g ≫ h`, return `(g, h)`. Otherwise `none`. -/
def homComp? (f : Expr) : Option (Expr × Expr) := do
let some (_, _, _, _, _, f, g) := f.app7? ``CategoryStruct.comp | none
return (f, g)
/-- Expressions to display as labels in a diagram. -/
abbrev ExprEmbeds := Array (String × Expr)
/-! ## Widget for general commutative diagrams -/
open scoped Jsx in
/-- Construct a commutative diagram from a Penrose `sub`stance program and expressions `embeds` to
display as labels in the diagram. -/
def mkCommDiag (sub : String) (embeds : ExprEmbeds) : MetaM Html := do
let embeds ← embeds.mapM fun (s, h) =>
return (s, <InteractiveCode fmt={← Widget.ppExprTagged h} />)
return (
<PenroseDiagram
embeds={embeds}
dsl={include_str ".."/".."/".."/"widget"/"src"/"penrose"/"commutative.dsl"}
sty={include_str ".."/".."/".."/"widget"/"src"/"penrose"/"commutative.sty"}
sub={sub} />)
/-! ## Commutative triangles -/
/--
Triangle with `homs = [f,g,h]` and `objs = [A,B,C]`
```
A f B
h g
C
```
-/
def subTriangle := include_str ".."/".."/".."/"widget"/"src"/"penrose"/"triangle.sub"
/-- Given a commutative triangle `f ≫ g = h` or `e ≡ h = f ≫ g`, return a triangle diagram.
Otherwise `none`. -/
def commTriangleM? (e : Expr) : MetaM (Option Html) := do
let e ← instantiateMVars e
let some (_, lhs, rhs) := e.eq? | return none
if let some (f, g) := homComp? lhs then
let some (A, C) := homType? (← inferType rhs) | return none
let some (_, B) := homType? (← inferType f) | return none
return some <| ← mkCommDiag subTriangle
#[("A", A), ("B", B), ("C", C),
("f", f), ("g", g), ("h", rhs)]
let some (f, g) := homComp? rhs | return none
let some (A, C) := homType? (← inferType lhs) | return none
let some (_, B) := homType? (← inferType f) | return none
return some <| ← mkCommDiag subTriangle
#[("A", A), ("B", B), ("C", C),
("f", f), ("g", g), ("h", lhs)]
/-- Presenter for a commutative triangle -/
@[expr_presenter]
def commutativeTrianglePresenter : ExprPresenter where
userName := "Commutative triangle"
layoutKind := .block
present type := do
if let some d ← commTriangleM? type then
return d
throwError "Couldn't find a commutative triangle."
/-! ## Commutative squares -/
/--
Square with `homs = [f,g,h,i]` and `objs = [A,B,C,D]`
```
A f B
i g
D h C
```
-/
def subSquare := include_str ".."/".."/".."/"widget"/"src"/"penrose"/"square.sub"
/-- Given a commutative square `f ≫ g = i ≫ h`, return a square diagram. Otherwise `none`. -/
def commSquareM? (e : Expr) : MetaM (Option Html) := do
let e ← instantiateMVars e
let some (_, lhs, rhs) := e.eq? | return none
let some (f, g) := homComp? lhs | return none
let some (i, h) := homComp? rhs | return none
let some (A, B) := homType? (← inferType f) | return none
let some (D, C) := homType? (← inferType h) | return none
some <$> mkCommDiag subSquare
#[("A", A), ("B", B), ("C", C), ("D", D),
("f", f), ("g", g), ("h", h), ("i", i)]
/-- Presenter for a commutative square -/
@[expr_presenter]
def commutativeSquarePresenter : ExprPresenter where
userName := "Commutative square"
layoutKind := .block
present type := do
if let some d ← commSquareM? type then
return d
throwError "Couldn't find a commutative square."
end Widget
end Mathlib.Tactic |
.lake/packages/mathlib/Mathlib/Tactic/Widget/LibraryRewrite.lean | import Mathlib.Lean.Meta.RefinedDiscrTree
import Mathlib.Tactic.Widget.InteractiveUnfold
import ProofWidgets.Component.FilterDetails
/-!
# Point & click library rewriting
This file defines `rw??`, an interactive tactic that suggests rewrites for any expression selected
by the user.
`rw??` uses a (lazy) `RefinedDiscrTree` to lookup a list of candidate rewrite lemmas.
It excludes lemmas that are automatically generated.
Each lemma is then checked one by one to see whether it is applicable.
For each lemma that works, the corresponding rewrite tactic is constructed
and converted into a `String` that fits inside mathlib's 100 column limit,
so that it can be pasted into the editor when selected by the user.
The `RefinedDiscrTree` lookup groups the results by match pattern and gives a score to each pattern.
This is used to display the results in sections. The sections are ordered by this score.
Within each section, the lemmas are sorted by
- rewrites with fewer extra goals come first
- left-to-right rewrites come first
- shorter lemma names come first
- shorter replacement expressions come first (when taken as a string)
- alphabetically ordered by lemma name
The lemmas are optionally filtered to avoid duplicate rewrites, or trivial rewrites. This
is controlled by the filter button on the top right of the results.
When a rewrite lemma introduces new goals, these are shown after a `⊢`.
## TODO
Ways to improve `rw??`:
- Improve the logic around `nth_rw` and occurrences,
and about when to pass explicit arguments to the rewrite lemma.
For example, we could only pass explicit arguments if that avoids using `nth_rw`.
Performance may be a limiting factor for this.
Currently, the occurrence is computed by `viewKAbstractSubExpr`.
- Modify the interface to allow creating a whole `rw [.., ..]` chain, without having to go into
the editor in between. For this to work, we will need a more general syntax,
something like `rw [..]??`, which would be pasted into the editor.
- We could look for rewrites of partial applications of the selected expression.
For example, when clicking on `(f + g) x`, there should still be an `add_comm` suggestion.
Ways to extend `rw??`:
- Support generalized rewriting (`grw`)
- Integrate rewrite search with the `calc?` widget so that a `calc` block can be created using
just point & click.
-/
/-! ### Caching -/
namespace Mathlib.Tactic.LibraryRewrite
open Lean Meta RefinedDiscrTree
/-- The structure for rewrite lemmas stored in the `RefinedDiscrTree`. -/
structure RewriteLemma where
/-- The name of the lemma -/
name : Name
/-- `symm` is `true` when rewriting from right to left -/
symm : Bool
deriving BEq, Inhabited
instance : ToFormat RewriteLemma where
format lem := f! "{if lem.symm then "← " else ""}{lem.name}"
/-- Return `true` if `s` and `t` are equal up to changing the `MVarId`s. -/
def isMVarSwap (t s : Expr) : Bool :=
go t s {} |>.isSome
where
/-- The main loop of `isMVarSwap`. Returning `none` corresponds to a failure. -/
go (t s : Expr) (swaps : List (MVarId × MVarId)) : Option (List (MVarId × MVarId)) := do
let isTricky e := e.hasExprMVar || e.hasLevelParam
if isTricky t then
guard (isTricky s)
match t, s with
-- Note we don't bother keeping track of universe level metavariables.
| .const n₁ _ , .const n₂ _ => guard (n₁ == n₂); some swaps
| .sort _ , .sort _ => some swaps
| .forallE _ d₁ b₁ _, .forallE _ d₂ b₂ _ => go d₁ d₂ swaps >>= go b₁ b₂
| .lam _ d₁ b₁ _ , .lam _ d₂ b₂ _ => go d₁ d₂ swaps >>= go b₁ b₂
| .mdata d₁ e₁ , .mdata d₂ e₂ => guard (d₁ == d₂); go e₁ e₂ swaps
| .letE _ t₁ v₁ b₁ _, .letE _ t₂ v₂ b₂ _ => go t₁ t₂ swaps >>= go v₁ v₂ >>= go b₁ b₂
| .app f₁ a₁ , .app f₂ a₂ => go f₁ f₂ swaps >>= go a₁ a₂
| .proj n₁ i₁ e₁ , .proj n₂ i₂ e₂ => guard (n₁ == n₂ && i₁ == i₂); go e₁ e₂ swaps
| .fvar fvarId₁ , .fvar fvarId₂ => guard (fvarId₁ == fvarId₂); some swaps
| .lit v₁ , .lit v₂ => guard (v₁ == v₂); some swaps
| .bvar i₁ , .bvar i₂ => guard (i₁ == i₂); some swaps
| .mvar mvarId₁ , .mvar mvarId₂ =>
match swaps.find? (·.1 == mvarId₁) with
| none =>
guard (swaps.all (·.2 != mvarId₂))
let swaps := (mvarId₁, mvarId₂) :: swaps
if mvarId₁ == mvarId₂ then
some swaps
else
some <| (mvarId₂, mvarId₁) :: swaps
| some (_, mvarId) => guard (mvarId == mvarId₂); some swaps
| _ , _ => none
else
guard (t == s); some swaps
/-- Extract the left and right-hand sides of an equality or iff statement. -/
@[inline] def eqOrIff? (e : Expr) : Option (Expr × Expr) :=
match e.eq? with
| some (_, lhs, rhs) => some (lhs, rhs)
| none => e.iff?
/-- Try adding the lemma to the `RefinedDiscrTree`. -/
def addRewriteEntry (name : Name) (cinfo : ConstantInfo) :
MetaM (List (RewriteLemma × List (Key × LazyEntry))) := do
-- we start with a fast-failing check to see if the lemma has the right shape
let .const head _ := cinfo.type.getForallBody.getAppFn | return []
unless head == ``Eq || head == ``Iff do return []
setMCtx {} -- recall that the metavariable context is not guaranteed to be empty at the start
let (_, _, eqn) ← forallMetaTelescope cinfo.type
let some (lhs, rhs) := eqOrIff? eqn | return []
let badMatch e :=
e.getAppFn.isMVar ||
-- this extra check excludes general equality lemmas that apply at any equality
-- these are almost never useful, and there are very many of them.
e.eq?.any fun (α, l, r) =>
α.getAppFn.isMVar && l.getAppFn.isMVar && r.getAppFn.isMVar && l != r
if badMatch lhs then
if badMatch rhs then
return []
else
return [({ name, symm := true }, ← initializeLazyEntryWithEta rhs)]
else
let result := ({ name, symm := false }, ← initializeLazyEntryWithEta lhs)
if badMatch rhs || isMVarSwap lhs rhs then
return [result]
else
return [result, ({ name, symm := true }, ← initializeLazyEntryWithEta rhs)]
/-- Try adding the local hypothesis to the `RefinedDiscrTree`. -/
def addLocalRewriteEntry (decl : LocalDecl) :
MetaM (List ((FVarId × Bool) × List (Key × LazyEntry))) :=
withReducible do
let (_, _, eqn) ← forallMetaTelescope decl.type
let some (lhs, rhs) := eqOrIff? eqn | return []
let result := ((decl.fvarId, false), ← initializeLazyEntryWithEta lhs)
return [result, ((decl.fvarId, true), ← initializeLazyEntryWithEta rhs)]
private abbrev ExtState := IO.Ref (Option (RefinedDiscrTree RewriteLemma))
private initialize ExtState.default : ExtState ←
IO.mkRef none
private instance : Inhabited ExtState where
default := ExtState.default
private initialize importedRewriteLemmasExt : EnvExtension ExtState ←
registerEnvExtension (IO.mkRef none)
/-! ### Computing the Rewrites -/
/-- Get all potential rewrite lemmas from the imported environment.
By setting the `librarySearch.excludedModules` option, all lemmas from certain modules
can be excluded. -/
def getImportCandidates (e : Expr) : MetaM (Array (Array RewriteLemma)) := do
let matchResult ← findImportMatches importedRewriteLemmasExt addRewriteEntry
/-
5000 constants seems to be approximately the right number of tasks
Too many means the tasks are too long.
Too few means less cache can be reused and more time is spent on combining different results.
With 5000 constants per task, we set the `HashMap` capacity to 256,
which is the largest capacity it gets to reach.
-/
(constantsPerTask := 5000) (capacityPerTask := 256) e
return matchResult.flatten
/-- Get all potential rewrite lemmas from the current file. Exclude lemmas from modules
in the `librarySearch.excludedModules` option. -/
def getModuleCandidates (e : Expr) : MetaM (Array (Array RewriteLemma)) := do
let moduleTreeRef ← createModuleTreeRef addRewriteEntry
let matchResult ← findModuleMatches moduleTreeRef e
return matchResult.flatten
/-- A rewrite lemma that has been applied to an expression. -/
structure Rewrite where
/-- `symm` is `true` when rewriting from right to left -/
symm : Bool
/-- The proof of the rewrite -/
proof : Expr
/-- The replacement expression obtained from the rewrite -/
replacement : Expr
/-- The size of the replacement when printed -/
stringLength : Nat
/-- The extra goals created by the rewrite -/
extraGoals : Array (MVarId × BinderInfo)
/-- Whether the rewrite introduces a new metavariable in the replacement expression. -/
makesNewMVars : Bool
/-- If `thm` can be used to rewrite `e`, return the rewrite. -/
def checkRewrite (thm e : Expr) (symm : Bool) : MetaM (Option Rewrite) := do
withTraceNodeBefore `rw?? (return m!
"rewriting {e} by {if symm then "← " else ""}{thm}") do
let (mvars, binderInfos, eqn) ← forallMetaTelescope (← inferType thm)
let some (lhs, rhs) := eqOrIff? eqn | return none
let (lhs, rhs) := if symm then (rhs, lhs) else (lhs, rhs)
let unifies ← withTraceNodeBefore `rw?? (return m! "unifying {e} =?= {lhs}")
(withReducible (isDefEq lhs e))
unless unifies do return none
-- just like in `kabstract`, we compare the `HeadIndex` and number of arguments
let lhs ← instantiateMVars lhs
if lhs.toHeadIndex != e.toHeadIndex || lhs.headNumArgs != e.headNumArgs then
return none
synthAppInstances `rw?? default mvars binderInfos false false
let mut extraGoals := #[]
for mvar in mvars, bi in binderInfos do
unless ← mvar.mvarId!.isAssigned do
extraGoals := extraGoals.push (mvar.mvarId!, bi)
let replacement ← instantiateMVars rhs
let stringLength := (← ppExpr replacement).pretty.length
let makesNewMVars := (replacement.findMVar? fun mvarId => mvars.any (·.mvarId! == mvarId)).isSome
let proof ← instantiateMVars (mkAppN thm mvars)
return some { symm, proof, replacement, stringLength, extraGoals, makesNewMVars }
initialize
registerTraceClass `rw??
/-- Try to rewrite `e` with each of the rewrite lemmas, and sort the resulting rewrites. -/
def checkAndSortRewriteLemmas (e : Expr) (rewrites : Array RewriteLemma) :
MetaM (Array (Rewrite × Name)) := do
let rewrites ← rewrites.filterMapM fun rw =>
tryCatchRuntimeEx do
let thm ← mkConstWithFreshMVarLevels rw.name
Option.map (·, rw.name) <$> checkRewrite thm e rw.symm
fun _ =>
return none
let lt (a b : (Rewrite × Name)) := Ordering.isLT <|
(compare a.1.extraGoals.size b.1.extraGoals.size).then <|
(compare a.1.symm b.1.symm).then <|
(compare a.2.toString.length b.2.toString.length).then <|
(compare a.1.stringLength b.1.stringLength).then <|
(Name.cmp a.2 b.2)
return rewrites.qsort lt
/-- Return all applicable library rewrites of `e`.
Note that the result may contain duplicate rewrites. These can be removed with `filterRewrites`. -/
def getImportRewrites (e : Expr) : MetaM (Array (Array (Rewrite × Name))) := do
(← getImportCandidates e).mapM (checkAndSortRewriteLemmas e)
/-- Same as `getImportRewrites`, but for lemmas from the current file. -/
def getModuleRewrites (e : Expr) : MetaM (Array (Array (Rewrite × Name))) := do
(← getModuleCandidates e).mapM (checkAndSortRewriteLemmas e)
/-! ### Rewriting by hypotheses -/
/-- Construct the `RefinedDiscrTree` of all local hypotheses. -/
def getHypotheses (except : Option FVarId) : MetaM (RefinedDiscrTree (FVarId × Bool)) := do
let mut tree : PreDiscrTree (FVarId × Bool) := {}
for decl in ← getLCtx do
if !decl.isImplementationDetail && except.all (· != decl.fvarId) then
for (val, entries) in ← addLocalRewriteEntry decl do
for (key, entry) in entries do
tree := tree.push key (entry, val)
return tree.toRefinedDiscrTree
/-- Return all applicable hypothesis rewrites of `e`. Similar to `getImportRewrites`. -/
def getHypothesisRewrites (e : Expr) (except : Option FVarId) :
MetaM (Array (Array (Rewrite × FVarId))) := do
let (candidates, _) ← (← getHypotheses except).getMatch e (unify := false) (matchRootStar := true)
let candidates := (← MonadExcept.ofExcept candidates).flatten
candidates.mapM <| Array.filterMapM fun (fvarId, symm) =>
tryCatchRuntimeEx do
Option.map (·, fvarId) <$> checkRewrite (.fvar fvarId) e symm
fun _ =>
return none
/-! ### Filtering out duplicate lemmas -/
/-- Get the `BinderInfo`s for the arguments of `mkAppN fn args`. -/
def getBinderInfos (fn : Expr) (args : Array Expr) : MetaM (Array BinderInfo) := do
let mut fnType ← inferType fn
let mut result := Array.mkEmpty args.size
let mut j := 0
for i in [:args.size] do
unless fnType.isForall do
fnType ← whnfD (fnType.instantiateRevRange j i args)
j := i
let .forallE _ _ b bi := fnType | throwError m! "expected function type {indentExpr fnType}"
fnType := b
result := result.push bi
return result
/-- Determine whether the explicit parts of two expressions are equal,
and the implicit parts are definitionally equal. -/
partial def isExplicitEq (t s : Expr) : MetaM Bool := do
if t == s then
return true
unless t.getAppNumArgs == s.getAppNumArgs && t.getAppFn == s.getAppFn do
return false
let tArgs := t.getAppArgs
let sArgs := s.getAppArgs
let bis ← getBinderInfos t.getAppFn tArgs
t.getAppNumArgs.allM fun i _ =>
if bis[i]!.isExplicit then
isExplicitEq tArgs[i]! sArgs[i]!
else
isDefEq tArgs[i]! sArgs[i]!
/-- Filter out duplicate rewrites, reflexive rewrites
or rewrites that have metavariables in the replacement expression. -/
@[specialize]
def filterRewrites {α} (e : Expr) (rewrites : Array α) (replacement : α → Expr)
(makesNewMVars : α → Bool) : MetaM (Array α) :=
withNewMCtxDepth do
let mut filtered := #[]
for rw in rewrites do
-- exclude rewrites that introduce new metavariables into the expression
if makesNewMVars rw then continue
-- exclude a reflexive rewrite
if ← isExplicitEq (replacement rw) e then
trace[rw??] "discarded reflexive rewrite {replacement rw}"
continue
-- exclude two identical looking rewrites
if ← filtered.anyM (isExplicitEq (replacement rw) <| replacement ·) then
trace[rw??] "discarded duplicate rewrite {replacement rw}"
continue
filtered := filtered.push rw
return filtered
/-! ### User interface -/
/-- Return the rewrite tactic that performs the rewrite. -/
def tacticSyntax (rw : Rewrite) (occ : Option Nat) (loc : Option Name) :
MetaM (TSyntax `tactic) := withoutModifyingMCtx do
-- we want the new metavariables to be printed as `?_` in the tactic syntax
for (mvarId, _) in rw.extraGoals do mvarId.setTag .anonymous
let proof ← withOptions (pp.mvars.anonymous.set · false) (PrettyPrinter.delab rw.proof)
mkRewrite occ rw.symm proof loc
open Widget ProofWidgets Jsx Server
/-- The structure with all data necessary for rendering a rewrite suggestion -/
structure RewriteInterface where
/-- `symm` is `true` when rewriting from right to left -/
symm : Bool
/-- The rewrite tactic string that performs the rewrite -/
tactic : String
/-- The replacement expression obtained from the rewrite -/
replacement : Expr
/-- The replacement expression obtained from the rewrite -/
replacementString : String
/-- The extra goals created by the rewrite -/
extraGoals : Array CodeWithInfos
/-- The lemma name with hover information -/
prettyLemma : CodeWithInfos
/-- The type of the lemma -/
lemmaType : Expr
/-- Whether the rewrite introduces new metavariables with the replacement. -/
makesNewMVars : Bool
/-- Construct the `RewriteInterface` from a `Rewrite`. -/
def Rewrite.toInterface (rw : Rewrite) (name : Name ⊕ FVarId) (occ : Option Nat)
(loc : Option Name) (range : Lsp.Range) : MetaM RewriteInterface := do
let tactic ← tacticSyntax rw occ loc
let tactic ← tacticPasteString tactic range
let replacementString := Format.pretty (← ppExpr rw.replacement)
let mut extraGoals := #[]
for (mvarId, bi) in rw.extraGoals do
if bi.isExplicit then
let extraGoal ← ppExprTagged (← instantiateMVars (← mvarId.getType))
extraGoals := extraGoals.push extraGoal
match name with
| .inl name =>
let prettyLemma := match ← ppExprTagged (← mkConstWithLevelParams name) with
| .tag tag _ => .tag tag (.text s!"{name}")
| code => code
let lemmaType := (← getConstInfo name).type
return { rw with tactic, replacementString, extraGoals, prettyLemma, lemmaType }
| .inr fvarId =>
let prettyLemma ← ppExprTagged (.fvar fvarId)
let lemmaType ← fvarId.getType
return { rw with tactic, replacementString, extraGoals, prettyLemma, lemmaType }
/-- The kind of rewrite -/
inductive Kind where
/-- A rewrite with a local hypothesis -/
| hypothesis
/-- A rewrite with a lemma from the current file -/
| fromFile
/-- A rewrite with a lemma from an imported file -/
| fromCache
/-- Return the Interfaces for rewriting `e`, both filtered and unfiltered. -/
def getRewriteInterfaces (e : Expr) (occ : Option Nat) (loc : Option Name) (except : Option FVarId)
(range : Lsp.Range) :
MetaM (Array (Array RewriteInterface × Kind) × Array (Array RewriteInterface × Kind)) := do
let mut filtr := #[]
let mut all := #[]
for rewrites in ← getHypothesisRewrites e except do
let rewrites ← rewrites.mapM fun (rw, fvarId) => rw.toInterface (.inr fvarId) occ loc range
all := all.push (rewrites, .hypothesis)
filtr := filtr.push (← filterRewrites e rewrites (·.replacement) (·.makesNewMVars), .hypothesis)
for rewrites in ← getModuleRewrites e do
let rewrites ← rewrites.mapM fun (rw, name) => rw.toInterface (.inl name) occ loc range
all := all.push (rewrites, .fromFile)
filtr := filtr.push (← filterRewrites e rewrites (·.replacement) (·.makesNewMVars), .fromFile)
for rewrites in ← getImportRewrites e do
let rewrites ← rewrites.mapM fun (rw, name) => rw.toInterface (.inl name) occ loc range
all := all.push (rewrites, .fromCache)
filtr := filtr.push (← filterRewrites e rewrites (·.replacement) (·.makesNewMVars), .fromCache)
return (filtr, all)
/-- Render the matching side of the rewrite lemma.
This is shown at the header of each section of rewrite results. -/
def pattern {α} (type : Expr) (symm : Bool) (k : Expr → MetaM α) : MetaM α := do
forallTelescope type fun _ e => do
let some (lhs, rhs) := eqOrIff? e | throwError "Expected equation, not {indentExpr e}"
k (if symm then rhs else lhs)
/-- Render the given rewrite results. -/
def renderRewrites (e : Expr) (results : Array (Array RewriteInterface × Kind)) (init : Option Html)
(range : Lsp.Range) (doc : FileWorker.EditableDocument) (showNames : Bool) :
MetaM Html := do
let htmls ← results.filterMapM (renderSection showNames)
let htmls := match init with
| some html => #[html] ++ htmls
| none => htmls
if htmls.isEmpty then
return <p> No rewrites found for <InteractiveCode fmt={← ppExprTagged e}/> </p>
else
return .element "div" #[("style", json% {"marginLeft" : "4px"})] htmls
where
/-- Render one section of rewrite results. -/
renderSection (showNames : Bool) (sec : Array RewriteInterface × Kind) : MetaM (Option Html) := do
let some head := sec.1[0]? | return none
let suffix := match sec.2 with
| .hypothesis => " (local hypotheses)"
| .fromFile => " (lemmas from current file)"
| .fromCache => ""
return <details «open»={true}>
<summary className="mv2 pointer">
Pattern
{← pattern head.lemmaType head.symm (return <InteractiveCode fmt={← ppExprTagged ·}/>)}
{.text suffix}
</summary>
{renderSectionCore showNames sec.1}
</details>
/-- Render the list of rewrite results in one section. -/
renderSectionCore (showNames : Bool) (sec : Array RewriteInterface) : Html :=
.element "ul" #[("style", json% { "padding-left" : "30px"})] <|
sec.map fun rw =>
<li> { .element "p" #[] <|
let button :=
<span className="font-code"> {
Html.ofComponent MakeEditLink
(.ofReplaceRange doc.meta range rw.tactic)
#[.text rw.replacementString] }
</span>
let extraGoals := rw.extraGoals.flatMap fun extraGoal =>
#[<br/>, <strong className="goal-vdash">⊢ </strong>, <InteractiveCode fmt={extraGoal}/>]
#[button] ++ extraGoals ++
if showNames then #[<br/>, <InteractiveCode fmt={rw.prettyLemma}/>] else #[] }
</li>
@[server_rpc_method_cancellable]
private def rpc (props : SelectInsertParams) : RequestM (RequestTask Html) :=
RequestM.asTask do
let doc ← RequestM.readDoc
let some loc := props.selectedLocations.back? |
return .text "rw??: Please shift-click an expression."
if loc.loc matches .hypValue .. then
return .text "rw??: cannot rewrite in the value of a let variable."
let some goal := props.goals[0]? | return .text "rw??: there is no goal to solve!"
if loc.mvarId != goal.mvarId then
return .text "rw??: the selected expression should be in the main goal."
goal.ctx.val.runMetaM {} do
let md ← goal.mvarId.getDecl
let lctx := md.lctx |>.sanitizeNames.run' {options := (← getOptions)}
Meta.withLCtx lctx md.localInstances do
let rootExpr ← loc.rootExpr
let some (subExpr, occ) ← withReducible <| viewKAbstractSubExpr rootExpr loc.pos |
return .text "rw??: expressions with bound variables are not yet supported"
unless ← kabstractIsTypeCorrect rootExpr subExpr loc.pos do
return .text <| "rw??: the selected expression cannot be rewritten, \
because the motive is not type correct. \
This usually occurs when trying to rewrite a term that appears as a dependent argument."
let location ← loc.fvarId?.mapM FVarId.getUserName
let unfoldsHtml ← InteractiveUnfold.renderUnfolds subExpr occ location props.replaceRange doc
let (filtered, all) ← getRewriteInterfaces subExpr occ location loc.fvarId? props.replaceRange
let filtered ← renderRewrites subExpr filtered unfoldsHtml props.replaceRange doc false
let all ← renderRewrites subExpr all unfoldsHtml props.replaceRange doc true
return <FilterDetails
summary={.text "Rewrite suggestions:"}
all={all}
filtered={filtered}
initiallyFiltered={true} />
/-- The component called by the `rw??` tactic -/
@[widget_module]
def LibraryRewriteComponent : Component SelectInsertParams :=
mk_rpc_widget% LibraryRewrite.rpc
/--
`rw??` is an interactive tactic that suggests rewrites for any expression selected by the user.
To use it, shift-click an expression in the goal or a hypothesis that you want to rewrite.
Clicking on one of the rewrite suggestions will paste the relevant rewrite tactic into the editor.
The rewrite suggestions are grouped and sorted by the pattern that the rewrite lemmas match with.
Rewrites that don't change the goal and rewrites that create the same goal as another rewrite
are filtered out, as well as rewrites that have new metavariables in the replacement expression.
To see all suggestions, click on the filter button (▼) in the top right.
-/
elab stx:"rw??" : tactic => do
let some range := (← getFileMap).lspRangeOfStx? stx | return
Widget.savePanelWidgetInfo (hash LibraryRewriteComponent.javascript)
(pure <| json% { replaceRange : $range }) stx
/-- Represent a `Rewrite` as `MessageData`. -/
def Rewrite.toMessageData (rw : Rewrite) (name : Name) : MetaM MessageData := do
let extraGoals ← rw.extraGoals.filterMapM fun (mvarId, bi) => do
if bi.isExplicit then
return some m! "⊢ {← mvarId.getType}"
return none
let list := [m! "{rw.replacement}"]
++ extraGoals.toList
++ [m! "{name}"]
return .group <| .nest 2 <| "· " ++ .joinSep list "\n"
/-- Represent a section of rewrites as `MessageData`. -/
def SectionToMessageData (sec : Array (Rewrite × Name) × Bool) : MetaM (Option MessageData) := do
let rewrites ← sec.1.toList.mapM fun (rw, name) => rw.toMessageData name
let rewrites : MessageData := .group (.joinSep rewrites "\n")
let some (rw, name) := sec.1[0]? | return none
let head ← pattern (← getConstInfo name).type rw.symm (addMessageContext m! "{·}")
return some <| "Pattern " ++ head ++ "\n" ++ rewrites
/-- `#rw?? e` gives all possible rewrites of `e`. It is a testing command for the `rw??` tactic -/
syntax (name := rw??Command) "#rw??" (&"all")? term : command
open Elab
/-- Elaborate a `#rw??` command. -/
@[command_elab rw??Command]
def elabrw??Command : Command.CommandElab := fun stx =>
withoutModifyingEnv <| Command.runTermElabM fun _ => do
let e ← Term.elabTerm stx[2] none
Term.synthesizeSyntheticMVarsNoPostponing
let e ← Term.levelMVarToParam (← instantiateMVars e)
let filter := stx[1].isNone
let mut rewrites := #[]
for rws in ← getModuleRewrites e do
let rws ← if filter then
filterRewrites e rws (·.1.replacement) (·.1.makesNewMVars)
else pure rws
rewrites := rewrites.push (rws, true)
for rws in ← getImportRewrites e do
let rws ← if filter then
filterRewrites e rws (·.1.replacement) (·.1.makesNewMVars)
else pure rws
rewrites := rewrites.push (rws, false)
let sections ← liftMetaM <| rewrites.filterMapM SectionToMessageData
if sections.isEmpty then
logInfo m! "No rewrites found for {e}"
else
logInfo (.joinSep sections.toList "\n\n")
end Mathlib.Tactic.LibraryRewrite |
.lake/packages/mathlib/Mathlib/Tactic/Widget/StringDiagram.lean | import ProofWidgets.Component.PenroseDiagram
import ProofWidgets.Component.Panel.Basic
import ProofWidgets.Presentation.Expr
import ProofWidgets.Component.HtmlDisplay
import Mathlib.Tactic.CategoryTheory.Bicategory.Normalize
import Mathlib.Tactic.CategoryTheory.Monoidal.Normalize
/-!
# String Diagram Widget
This file provides meta infrastructure for displaying string diagrams for morphisms in monoidal
categories in the infoview. To enable the string diagram widget, you need to import this file and
inserting `with_panel_widgets [Mathlib.Tactic.Widget.StringDiagram]` at the beginning of the
proof. Alternatively, you can also write
```lean
open Mathlib.Tactic.Widget
show_panel_widgets [local StringDiagram]
```
to enable the string diagram widget in the current section.
We also have the `#string_diagram` command. For example,
```lean
#string_diagram MonoidalCategory.whisker_exchange
```
displays the string diagram for the exchange law of the left and right whiskerings.
String diagrams are graphical representations of morphisms in monoidal categories, which are
useful for rewriting computations. More precisely, objects in a monoidal category is represented
by strings, and morphisms between two objects is represented by nodes connecting two strings
associated with the objects. The tensor product `X ⊗ Y` corresponds to putting strings associated
with `X` and `Y` horizontally (from left to right), and the composition of morphisms `f : X ⟶ Y`
and `g : Y ⟶ Z` corresponds to connecting two nodes associated with `f` and `g` vertically (from
top to bottom) by strings associated with `Y`.
Currently, the string diagram widget provided in this file deals with equalities of morphisms
in monoidal categories. It displays string diagrams corresponding to the morphisms for the
left-hand and right-hand sides of the equality.
Some examples can be found in `MathlibTest/StringDiagram.lean`.
When drawing string diagrams, it is common to ignore associators and unitors. We follow this
convention. To do this, we need to extract non-structural morphisms that are not associators
and unitors from lean expressions. This operation is performed using the `Tactic.Monoidal.eval`
function.
A monoidal category can be viewed as a bicategory with a single object. The program in this
file can also be used to display the string diagram for general bicategories. With this in mind we
will sometimes refer to objects and morphisms in monoidal categories as 1-morphisms and 2-morphisms
respectively, borrowing the terminology of bicategories. Note that the relation between monoidal
categories and bicategories is formalized in `Mathlib/CategoryTheory/Bicategory/SingleObj.lean`,
although the string diagram widget does not use it directly.
-/
namespace Mathlib.Tactic
open Lean Meta Elab
open CategoryTheory
open BicategoryLike
namespace Widget.StringDiagram
initialize registerTraceClass `string_diagram
/-! ## Objects in string diagrams -/
/-- Nodes for 2-morphisms in a string diagram. -/
structure AtomNode : Type where
/-- The vertical position of the node in the string diagram. -/
vPos : ℕ
/-- The horizontal position of the node in the string diagram, counting strings in domains. -/
hPosSrc : ℕ
/-- The horizontal position of the node in the string diagram, counting strings in codomains. -/
hPosTar : ℕ
/-- The underlying expression of the node. -/
atom : Atom
/-- Nodes for identity 2-morphisms in a string diagram. -/
structure IdNode : Type where
/-- The vertical position of the node in the string diagram. -/
vPos : ℕ
/-- The horizontal position of the node in the string diagram, counting strings in domains. -/
hPosSrc : ℕ
/-- The horizontal position of the node in the string diagram, counting strings in codomains. -/
hPosTar : ℕ
/-- The underlying expression of the node. -/
id : Atom₁
/-- Nodes in a string diagram. -/
inductive Node : Type
| atom : AtomNode → Node
| id : IdNode → Node
/-- The underlying expression of a node. -/
def Node.e : Node → Expr
| Node.atom n => n.atom.e
| Node.id n => n.id.e
/-- The domain of the 2-morphism associated with a node as a list
(the first component is the node itself). -/
def Node.srcList : Node → List (Node × Atom₁)
| Node.atom n => n.atom.src.toList.map (fun f ↦ (.atom n, f))
| Node.id n => [(.id n, n.id)]
/-- The codomain of the 2-morphism associated with a node as a list
(the first component is the node itself). -/
def Node.tarList : Node → List (Node × Atom₁)
| Node.atom n => n.atom.tgt.toList.map (fun f ↦ (.atom n, f))
| Node.id n => [(.id n, n.id)]
/-- The vertical position of a node in a string diagram. -/
def Node.vPos : Node → ℕ
| Node.atom n => n.vPos
| Node.id n => n.vPos
/-- The horizontal position of a node in a string diagram, counting strings in domains. -/
def Node.hPosSrc : Node → ℕ
| Node.atom n => n.hPosSrc
| Node.id n => n.hPosSrc
/-- The horizontal position of a node in a string diagram, counting strings in codomains. -/
def Node.hPosTar : Node → ℕ
| Node.atom n => n.hPosTar
| Node.id n => n.hPosTar
/-- Strings in a string diagram. -/
structure Strand : Type where
/-- The horizontal position of the strand in the string diagram. -/
hPos : ℕ
/-- The start point of the strand in the string diagram. -/
startPoint : Node
/-- The end point of the strand in the string diagram. -/
endPoint : Node
/-- The underlying expression of the strand. -/
atom₁ : Atom₁
/-- The vertical position of a strand in a string diagram. -/
def Strand.vPos (s : Strand) : ℕ :=
s.startPoint.vPos
end Widget.StringDiagram
namespace BicategoryLike
open Widget.StringDiagram
/-- The list of nodes associated with a 2-morphism. The position is counted from the
specified natural numbers. -/
def WhiskerRight.nodes (v h₁ h₂ : ℕ) : WhiskerRight → List Node
| WhiskerRight.of η => [.atom ⟨v, h₁, h₂, η⟩]
| WhiskerRight.whisker _ η f =>
let ηs := η.nodes v h₁ h₂
let k₁ := (ηs.map (fun n ↦ n.srcList)).flatten.length
let k₂ := (ηs.map (fun n ↦ n.tarList)).flatten.length
let s : Node := .id ⟨v, h₁ + k₁, h₂ + k₂, f⟩
ηs ++ [s]
/-- The list of nodes associated with a 2-morphism. The position is counted from the
specified natural numbers. -/
def HorizontalComp.nodes (v h₁ h₂ : ℕ) : HorizontalComp → List Node
| HorizontalComp.of η => η.nodes v h₁ h₂
| HorizontalComp.cons _ η ηs =>
let s₁ := η.nodes v h₁ h₂
let k₁ := (s₁.map (fun n ↦ n.srcList)).flatten.length
let k₂ := (s₁.map (fun n ↦ n.tarList)).flatten.length
let s₂ := ηs.nodes v (h₁ + k₁) (h₂ + k₂)
s₁ ++ s₂
/-- The list of nodes associated with a 2-morphism. The position is counted from the
specified natural numbers. -/
def WhiskerLeft.nodes (v h₁ h₂ : ℕ) : WhiskerLeft → List Node
| WhiskerLeft.of η => η.nodes v h₁ h₂
| WhiskerLeft.whisker _ f η =>
let s : Node := .id ⟨v, h₁, h₂, f⟩
let ss := η.nodes v (h₁ + 1) (h₂ + 1)
s :: ss
variable {ρ : Type} [MonadMor₁ (CoherenceM ρ)]
/-- The list of nodes at the top of a string diagram. -/
def topNodes (η : WhiskerLeft) : CoherenceM ρ (List Node) := do
return (← η.srcM).toList.mapIdx fun i f => .id ⟨0, i, i, f⟩
/-- The list of nodes at the top of a string diagram. The position is counted from the
specified natural number. -/
def NormalExpr.nodesAux (v : ℕ) : NormalExpr → CoherenceM ρ (List (List Node))
| NormalExpr.nil _ α => return [(← α.srcM).toList.mapIdx fun i f => .id ⟨v, i, i, f⟩]
| NormalExpr.cons _ _ η ηs => do
let s₁ := η.nodes v 0 0
let s₂ ← ηs.nodesAux (v + 1)
return s₁ :: s₂
/-- The list of nodes associated with a 2-morphism. -/
def NormalExpr.nodes (e : NormalExpr) : CoherenceM ρ (List (List Node)) :=
match e with
| NormalExpr.nil _ _ => return []
| NormalExpr.cons _ _ η _ => return (← topNodes η) :: (← e.nodesAux 1)
/-- `pairs [a, b, c, d]` is `[(a, b), (b, c), (c, d)]`. -/
def pairs {α : Type} : List α → List (α × α) :=
fun l => l.zip (l.drop 1)
/-- The list of strands associated with a 2-morphism. -/
def NormalExpr.strands (e : NormalExpr) : CoherenceM ρ (List (List Strand)) := do
let l ← e.nodes
(pairs l).mapM fun (x, y) ↦ do
let xs := (x.map (fun n ↦ n.tarList)).flatten
let ys := (y.map (fun n ↦ n.srcList)).flatten
-- sanity check
if xs.length ≠ ys.length then
throwError "The number of the start and end points of a string does not match."
(xs.zip ys).mapIdxM fun k ((n₁, f₁), (n₂, _)) => do
return ⟨n₁.hPosTar + k, n₁, n₂, f₁⟩
end BicategoryLike
namespace Widget.StringDiagram
/-- A type for Penrose variables. -/
structure PenroseVar : Type where
/-- The identifier of the variable. -/
ident : String
/-- The indices of the variable. -/
indices : List ℕ
/-- The underlying expression of the variable. -/
e : Expr
instance : ToString PenroseVar :=
⟨fun v => v.ident ++ v.indices.foldl (fun s x => s ++ s!"_{x}") ""⟩
/-- The penrose variable associated with a node. -/
def Node.toPenroseVar (n : Node) : PenroseVar :=
⟨"E", [n.vPos, n.hPosSrc, n.hPosTar], n.e⟩
/-- The penrose variable associated with a strand. -/
def Strand.toPenroseVar (s : Strand) : PenroseVar :=
⟨"f", [s.vPos, s.hPos], s.atom₁.e⟩
/-! ## Widget for general string diagrams -/
open ProofWidgets Penrose DiagramBuilderM Lean.Server
open scoped Jsx in
/-- Add the variable `v` with the type `tp` to the substance program. -/
def addPenroseVar (tp : String) (v : PenroseVar) :
DiagramBuilderM Unit := do
let h := <InteractiveCode fmt={← Widget.ppExprTagged v.e} />
addEmbed (toString v) tp h
/-- Add constructor `tp v := nm (vs)` to the substance program. -/
def addConstructor (tp : String) (v : PenroseVar) (nm : String) (vs : List PenroseVar) :
DiagramBuilderM Unit := do
let vs' := ", ".intercalate (vs.map (fun v => toString v))
addInstruction s!"{tp} {v} := {nm} ({vs'})"
open scoped Jsx in
/-- Construct a string diagram from a Penrose `sub`stance program and expressions `embeds` to
display as labels in the diagram. -/
def mkStringDiagram (nodes : List (List Node)) (strands : List (List Strand)) :
DiagramBuilderM PUnit := do
/- Add 2-morphisms. -/
for x in nodes.flatten do
match x with
| .atom _ => do addPenroseVar "Atom" x.toPenroseVar
| .id _ => do addPenroseVar "Id" x.toPenroseVar
/- Add constraints. -/
for l in nodes do
for (x₁, x₂) in pairs l do
addInstruction s!"Left({x₁.toPenroseVar}, {x₂.toPenroseVar})"
/- Add constraints. -/
for (l₁, l₂) in pairs nodes do
if let some x₁ := l₁.head? then
if let some x₂ := l₂.head? then
addInstruction s!"Above({x₁.toPenroseVar}, {x₂.toPenroseVar})"
/- Add 1-morphisms as strings. -/
for l in strands do
for s in l do
addConstructor "Mor1" s.toPenroseVar
"MakeString" [s.startPoint.toPenroseVar, s.endPoint.toPenroseVar]
/-- Penrose dsl file for string diagrams. -/
def dsl :=
include_str ".."/".."/".."/"widget"/"src"/"penrose"/"monoidal.dsl"
/-- Penrose sty file for string diagrams. -/
def sty :=
include_str ".."/".."/".."/"widget"/"src"/"penrose"/"monoidal.sty"
/-- The kind of the context. -/
inductive Kind where
| monoidal : Kind
| bicategory : Kind
| none : Kind
/-- The name of the context. -/
def Kind.name : Kind → Name
| Kind.monoidal => `monoidal
| Kind.bicategory => `bicategory
| Kind.none => default
/-- Given an expression, return the kind of the context. -/
def mkKind (e : Expr) : MetaM Kind := do
let e ← instantiateMVars e
let e ← (match (← whnfR e).eq? with
| some (_, lhs, _) => return lhs
| none => return e)
let ctx? ← BicategoryLike.mkContext? (ρ := Bicategory.Context) e
match ctx? with
| some _ => return .bicategory
| none =>
let ctx? ← BicategoryLike.mkContext? (ρ := Monoidal.Context) e
match ctx? with
| some _ => return .monoidal
| none => return .none
open scoped Jsx in
/-- Given a 2-morphism, return a string diagram. Otherwise `none`. -/
def stringM? (e : Expr) : MetaM (Option Html) := do
let e ← instantiateMVars e
let k ← mkKind e
let x : Option (List (List Node) × List (List Strand)) ← (match k with
| .monoidal => do
let some ctx ← BicategoryLike.mkContext? (ρ := Monoidal.Context) e | return none
CoherenceM.run (ctx := ctx) do
let e' := (← BicategoryLike.eval k.name (← MkMor₂.ofExpr e)).expr
return some (← e'.nodes, ← e'.strands)
| .bicategory => do
let some ctx ← BicategoryLike.mkContext? (ρ := Bicategory.Context) e | return none
CoherenceM.run (ctx := ctx) do
let e' := (← BicategoryLike.eval k.name (← MkMor₂.ofExpr e)).expr
return some (← e'.nodes, ← e'.strands)
| .none => return none)
match x with
| none => return none
| some (nodes, strands) => do
DiagramBuilderM.run do
mkStringDiagram nodes strands
trace[string_diagram] "Penrose substance: \n{(← get).sub}"
match ← DiagramBuilderM.buildDiagram dsl sty with
| some html => return html
| none => return <span>No non-structural morphisms found.</span>
open scoped Jsx in
/-- Help function for displaying two string diagrams in an equality. -/
def mkEqHtml (lhs rhs : Html) : Html :=
<div className="flex">
<div className="w-50">
<details «open»={true}>
<summary className="mv2 pointer">String diagram for LHS</summary> {lhs}
</details>
</div>
<div className="w-50">
<details «open»={true}>
<summary className="mv2 pointer">String diagram for RHS</summary> {rhs}
</details>
</div>
</div>
/-- Given an equality between 2-morphisms, return a string diagram of the LHS and RHS.
Otherwise `none`. -/
def stringEqM? (e : Expr) : MetaM (Option Html) := do
let e ← whnfR <| ← instantiateMVars e
let some (_, lhs, rhs) := e.eq? | return none
let some lhs ← stringM? lhs | return none
let some rhs ← stringM? rhs | return none
return some <| mkEqHtml lhs rhs
/-- Given an 2-morphism or equality between 2-morphisms, return a string diagram.
Otherwise `none`. -/
def stringMorOrEqM? (e : Expr) : MetaM (Option Html) := do
forallTelescopeReducing (← whnfR <| ← inferType e) fun xs a => do
if let some html ← stringM? (mkAppN e xs) then
return some html
else if let some html ← stringEqM? a then
return some html
else
return none
/-- The `Expr` presenter for displaying string diagrams. -/
@[expr_presenter]
def stringPresenter : ExprPresenter where
userName := "String diagram"
layoutKind := .block
present type := do
if let some html ← stringMorOrEqM? type then
return html
throwError "Couldn't find a 2-morphism to display a string diagram."
open scoped Jsx in
/-- The RPC method for displaying string diagrams. -/
@[server_rpc_method]
def rpc (props : PanelWidgetProps) : RequestM (RequestTask Html) :=
RequestM.asTask do
let html : Option Html ← (do
if props.goals.isEmpty then
return none
let some g := props.goals[0]? | unreachable!
g.ctx.val.runMetaM {} do
g.mvarId.withContext do
let type ← g.mvarId.getType
stringEqM? type)
match html with
| none => return <span>No String Diagram.</span>
| some inner => return inner
end StringDiagram
open ProofWidgets
/-- Display the string diagrams if the goal is an equality of morphisms in a monoidal category. -/
@[widget_module]
def StringDiagram : Component PanelWidgetProps :=
mk_rpc_widget% StringDiagram.rpc
open Command
/--
Display the string diagram for a given term.
Example usage:
```
/- String diagram for the equality theorem. -/
#string_diagram MonoidalCategory.whisker_exchange
/- String diagram for the morphism. -/
variable {C : Type u} [Category.{v} C] [MonoidalCategory C] {X Y : C} (f : 𝟙_ C ⟶ X ⊗ Y) in
#string_diagram f
```
-/
syntax (name := stringDiagram) "#string_diagram " term : command
@[command_elab stringDiagram, inherit_doc stringDiagram]
def elabStringDiagramCmd : CommandElab := fun
| stx@`(#string_diagram $t:term) => do
let html ← runTermElabM fun _ => do
let e ← try mkConstWithFreshMVarLevels (← realizeGlobalConstNoOverloadWithInfo t)
catch _ => Term.levelMVarToParam (← instantiateMVars (← Term.elabTerm t none))
match ← StringDiagram.stringMorOrEqM? e with
| some html => return html
| none => throwError "could not find a morphism or equality: {e}"
liftCoreM <| Widget.savePanelWidgetInfo
(hash HtmlDisplay.javascript)
(return json% { html: $(← Server.RpcEncodable.rpcEncode html) })
stx
| stx => throwError "Unexpected syntax {stx}."
end Mathlib.Tactic.Widget |
.lake/packages/mathlib/Mathlib/Tactic/Widget/GCongr.lean | import Mathlib.Tactic.Widget.SelectPanelUtils
import Mathlib.Tactic.GCongr
/-! # GCongr widget
This file defines a `gcongr?` tactic that displays a widget panel allowing to generate
a `gcongr` call with holes specified by selecting subexpressions in the goal.
-/
open Lean Meta Server ProofWidgets
/-- Return the link text and inserted text above and below of the gcongr widget. -/
@[nolint unusedArguments]
def makeGCongrString (pos : Array Lean.SubExpr.GoalsLocation) (goalType : Expr)
(_ : SelectInsertParams) :
MetaM (String × String × Option (String.Pos.Raw × String.Pos.Raw)) := do
let subexprPos := getGoalLocations pos
unless goalType.isAppOf ``LE.le || goalType.isAppOf ``LT.lt || goalType.isAppOf `Int.ModEq do
panic! "The goal must be a ≤ or < or ≡."
let mut goalTypeWithMetaVars := goalType
for pos in subexprPos do
goalTypeWithMetaVars ← insertMetaVar goalTypeWithMetaVars pos
let side := if goalType.isAppOf `Int.ModEq then
if subexprPos[0]!.toArray[0]! = 0 then 1 else 2
else
if subexprPos[0]!.toArray[0]! = 0 then 2 else 3
let sideExpr := goalTypeWithMetaVars.getAppArgs[side]!
let res := "gcongr " ++ (toString (← Meta.ppExpr sideExpr)).renameMetaVar
return (res, res, none)
/-- Rpc function for the gcongr widget. -/
@[server_rpc_method]
def GCongrSelectionPanel.rpc := mkSelectionPanelRPC makeGCongrString
"Use shift-click to select sub-expressions in the goal that should become holes in gcongr."
"GCongr 🔍"
/-- The gcongr widget. -/
@[widget_module]
def GCongrSelectionPanel : Component SelectInsertParams :=
mk_rpc_widget% GCongrSelectionPanel.rpc
open scoped Json in
/-- Display a widget panel allowing to generate a `gcongr` call with holes specified by selecting
subexpressions in the goal. -/
elab stx:"gcongr?" : tactic => do
let some replaceRange := (← getFileMap).lspRangeOfStx? stx | return
Widget.savePanelWidgetInfo GCongrSelectionPanel.javascriptHash
(pure <| json% { replaceRange: $(replaceRange) }) stx |
.lake/packages/mathlib/Mathlib/Tactic/Widget/SelectInsertParamsClass.lean | import Mathlib.Init
import Lean.Widget.InteractiveGoal
import Lean.Elab.Deriving.Basic
/-! # SelectInsertParamsClass
Defines the basic class of parameters for a select and insert widget.
This needs to be in a separate file in order to initialize the deriving handler.
-/
open Lean Meta Server
/-- Structures providing parameters for a Select and insert widget. -/
class SelectInsertParamsClass (α : Type) where
/-- Cursor position in the file at which the widget is being displayed. -/
pos : α → Lsp.Position
/-- The current tactic-mode goals. -/
goals : α → Array Widget.InteractiveGoal
/-- Locations currently selected in the goal state. -/
selectedLocations : α → Array SubExpr.GoalsLocation
/-- The range in the source document where the command will be inserted. -/
replaceRange : α → Lsp.Range
namespace Lean.Elab
open Command Parser
private def mkSelectInsertParamsInstance (declName : Name) : TermElabM Syntax.Command :=
`(command|instance : SelectInsertParamsClass (@$(mkCIdent declName)) :=
⟨fun prop => prop.pos, fun prop => prop.goals,
fun prop => prop.selectedLocations, fun prop => prop.replaceRange⟩)
/-- Handler deriving a `SelectInsertParamsClass` instance. -/
def mkSelectInsertParamsInstanceHandler (declNames : Array Name) : CommandElabM Bool := do
if (← declNames.allM isInductive) then
for declName in declNames do
elabCommand (← liftTermElabM do mkSelectInsertParamsInstance declName)
return true
else
return false
initialize registerDerivingHandler ``SelectInsertParamsClass mkSelectInsertParamsInstanceHandler
end Lean.Elab |
.lake/packages/mathlib/Mathlib/Tactic/Translate/ToAdditive.lean | import Mathlib.Tactic.Translate.Core
/-!
# The `@[to_additive]` attribute.
The `@[to_additive]` attribute is used to translate multiplicative declarations to their
additive equivalent. See the docstrings of `to_additive` for more information.
-/
namespace Mathlib.Tactic.ToAdditive
open Lean Elab Translate
@[inherit_doc TranslateData.ignoreArgsAttr]
syntax (name := to_additive_ignore_args) "to_additive_ignore_args" (ppSpace num)* : attr
@[inherit_doc relevantArgOption]
syntax (name := to_additive_relevant_arg) "to_additive_relevant_arg " num : attr
@[inherit_doc TranslateData.dontTranslateAttr]
syntax (name := to_additive_dont_translate) "to_additive_dont_translate" : attr
/-- The attribute `to_additive` can be used to automatically transport theorems
and definitions (but not inductive types and structures) from a multiplicative
theory to an additive theory.
To use this attribute, just write:
```
@[to_additive]
theorem mul_comm' {α} [CommSemigroup α] (x y : α) : x * y = y * x := mul_comm x y
```
This code will generate a theorem named `add_comm'`. It is also
possible to manually specify the name of the new declaration:
```
@[to_additive add_foo]
theorem foo := sorry
```
An existing documentation string will _not_ be automatically used, so if the theorem or definition
has a doc string, a doc string for the additive version should be passed explicitly to
`to_additive`.
```
/-- Multiplication is commutative -/
@[to_additive /-- Addition is commutative -/]
theorem mul_comm' {α} [CommSemigroup α] (x y : α) : x * y = y * x := CommSemigroup.mul_comm
```
The transport tries to do the right thing in most cases using several
heuristics described below. However, in some cases it fails, and
requires manual intervention.
Use the `to_additive existing` syntax to use an existing additive declaration, instead of
automatically generating it.
Use the `(reorder := ...)` syntax to reorder the arguments in the generated additive declaration.
This is specified using cycle notation. For example `(reorder := 1 2, 5 6)` swaps the first two
arguments with each other and the fifth and the sixth argument and `(reorder := 3 4 5)` will move
the fifth argument before the third argument. This is mostly useful to translate declarations using
`Pow` to those using `SMul`.
Use the `(attr := ...)` syntax to apply attributes to both the multiplicative and the additive
version:
```
@[to_additive (attr := simp)] lemma mul_one' {G : Type*} [Group G] (x : G) : x * 1 = x := mul_one x
```
For `simps` this also ensures that some generated lemmas are added to the additive dictionary.
`@[to_additive (attr := to_additive)]` is a special case, where the `to_additive`
attribute is added to the generated lemma only, to additivize it again.
This is useful for lemmas about `Pow` to generate both lemmas about `SMul` and `VAdd`. Example:
```
@[to_additive (attr := to_additive VAdd_lemma, simp) SMul_lemma]
lemma Pow_lemma ... :=
```
In the above example, the `simp` is added to all 3 lemmas. All other options to `to_additive`
(like the generated name or `(reorder := ...)`) are not passed down,
and can be given manually to each individual `to_additive` call.
## Implementation notes
The transport process generally works by taking all the names of
identifiers appearing in the name, type, and body of a declaration and
creating a new declaration by mapping those names to additive versions
using a simple string-based dictionary and also using all declarations
that have previously been labeled with `to_additive`. The dictionary is `ToAdditive.nameDict`
and can be found in the `Tactic.ToAdditive.GuessName` file. If you introduce a new name which
should be translated by `to_additive` you should add the translation to this dictionary.
In the `mul_comm'` example above, `to_additive` maps:
* `mul_comm'` to `add_comm'`,
* `CommSemigroup` to `AddCommSemigroup`,
* `x * y` to `x + y` and `y * x` to `y + x`, and
* `CommSemigroup.mul_comm'` to `AddCommSemigroup.add_comm'`.
### Heuristics
`to_additive` uses heuristics to determine whether a particular identifier has to be
mapped to its additive version. The basic heuristic is
* Only map an identifier to its additive version if its first argument doesn't
contain any unapplied identifiers.
Examples:
* `@Mul.mul Nat n m` (i.e. `(n * m : Nat)`) will not change to `+`, since its
first argument is `Nat`, an identifier not applied to any arguments.
* `@Mul.mul (α × β) x y` will change to `+`. It's first argument contains only the identifier
`Prod`, but this is applied to arguments, `α` and `β`.
* `@Mul.mul (α × Int) x y` will not change to `+`, since its first argument contains `Int`.
The reasoning behind the heuristic is that the first argument is the type which is "additivized",
and this usually doesn't make sense if this is on a fixed type.
There are some exceptions to this heuristic:
* Identifiers that have the `@[to_additive]` attribute are ignored.
For example, multiplication in `↥Semigroup` is replaced by addition in `↥AddSemigroup`.
You can turn this behavior off by *also* adding the `@[to_additive_dont_translate]` attribute.
* If an identifier `d` has attribute `@[to_additive (relevant_arg := n)]` then the argument
in position `n` is checked for a fixed type, instead of checking the first argument.
`@[to_additive]` will automatically add the attribute `(relevant_arg := n)` to a
declaration when the first argument has no multiplicative type-class, but argument `n` does.
* If an identifier has attribute `@[to_additive_ignore_args n1 n2 ...]` then all the arguments in
positions `n1`, `n2`, ... will not be checked for unapplied identifiers (start counting from 1).
For example, `ContMDiffMap` has attribute `@[to_additive_ignore_args 21]`, which means
that its 21st argument `(n : WithTop ℕ)` can contain `ℕ`
(usually in the form `Top.top ℕ ...`) and still be additivized.
So `@Mul.mul (C^∞⟮I, N; I', G⟯) _ f g` will be additivized.
### Troubleshooting
If `@[to_additive]` fails because the additive declaration raises a type mismatch, there are
various things you can try.
The first thing to do is to figure out what `@[to_additive]` did wrong by looking at the type
mismatch error.
* Option 1: The most common case is that it didn't additivize a declaration that should be
additivized. This happened because the heuristic applied, and the first argument contains a
fixed type, like `ℕ` or `ℝ`. However, the heuristic misfires on some other declarations.
Solutions:
* First figure out what the fixed type is in the first argument of the declaration that didn't
get additivized. Note that this fixed type can occur in implicit arguments. If manually finding
it is hard, you can run `set_option trace.to_additive_detail true` and search the output for the
fragment "contains the fixed type" to find what the fixed type is.
* If the fixed type has an additive counterpart (like `↥Semigroup`), give it the `@[to_additive]`
attribute.
* If the fixed type has nothing to do with algebraic operations (like `TopCat`), add the attribute
`@[to_additive self]` to the fixed type `Foo`.
* If the fixed type occurs inside the `k`-th argument of a declaration `d`, and the
`k`-th argument is not connected to the multiplicative structure on `d`, consider adding
attribute `[to_additive_ignore_args k]` to `d`.
Example: `ContMDiffMap` ignores the argument `(n : WithTop ℕ)`
* If none of the arguments have a multiplicative structure, then the heuristic should not apply at
all. This can be achieved by setting `relevant_arg` out of bounds, e.g. `(relevant_arg := 100)`.
* Option 2: It additivized a declaration `d` that should remain multiplicative. Solution:
* Make sure the first argument of `d` is a type with a multiplicative structure. If not, can you
reorder the (implicit) arguments of `d` so that the first argument becomes a type with a
multiplicative structure (and not some indexing type)?
The reason is that `@[to_additive]` doesn't additivize declarations if their first argument
contains fixed types like `ℕ` or `ℝ`. See section Heuristics.
If the first argument is not the argument with a multiplicative type-class, `@[to_additive]`
should have automatically added the attribute `(relevant_arg := ...)` to the declaration.
You can test this by running the following (where `d` is the full name of the declaration):
```
open Lean in run_cmd logInfo m!"{ToAdditive.relevantArgAttr.find? (← getEnv) `d}"
```
The expected output is `n` where the `n`-th (0-indexed) argument of `d` is a type (family)
with a multiplicative structure on it. `none` means `0`.
If you get a different output (or a failure), you could add the attribute
`@[to_additive (relevant_arg := n)]` manually, where `n` is an (1-indexed) argument with a
multiplicative structure.
* Option 3: Arguments / universe levels are incorrectly ordered in the additive version.
This likely only happens when the multiplicative declaration involves `pow`/`^`. Solutions:
* Ensure that the order of arguments of all relevant declarations are the same for the
multiplicative and additive version. This might mean that arguments have an "unnatural" order
(e.g. `Monoid.npow n x` corresponds to `x ^ n`, but it is convenient that `Monoid.npow` has this
argument order, since it matches `AddMonoid.nsmul n x`.
* If this is not possible, add `(reorder := ...)` argument to `to_additive`.
If neither of these solutions work, and `to_additive` is unable to automatically generate the
additive version of a declaration, manually write and prove the additive version.
Often the proof of a lemma/theorem can just be the multiplicative version of the lemma applied to
`multiplicative G`.
Afterwards, apply the attribute manually:
```
attribute [to_additive foo_add_bar] foo_bar
```
This will allow future uses of `to_additive` to recognize that
`foo_bar` should be replaced with `foo_add_bar`.
### Handling of hidden definitions
Before transporting the “main” declaration `src`, `to_additive` first
scans its type and value for names starting with `src`, and transports
them. This includes auxiliary definitions like `src._match_1`
In addition to transporting the “main” declaration, `to_additive` transports
its equational lemmas and tags them as equational lemmas for the new declaration.
### Structure fields and constructors
If `src` is a structure, then the additive version has to be already written manually.
In this case `to_additive` adds all structure fields to its mapping.
### Name generation
* If `@[to_additive]` is called without a `name` argument, then the
new name is autogenerated. First, it takes the longest prefix of
the source name that is already known to `to_additive`, and replaces
this prefix with its additive counterpart. Second, it takes the last
part of the name (i.e., after the last dot), and replaces common
name parts (“mul”, “one”, “inv”, “prod”) with their additive versions.
* You can add a namespace translation using the following command:
```
insert_to_additive_translation QuotientGroup QuotientAddGroup
```
Later uses of `@[to_additive]` on declarations in the `QuotientGroup`
namespace will be created in the `QuotientAddGroup` namespace.
This is not necessary if there is already a declaration with name `QuotientGroup`.
* If `@[to_additive]` is called with a `name` argument `new_name`
/without a dot/, then `to_additive` updates the prefix as described
above, then replaces the last part of the name with `new_name`.
* If `@[to_additive]` is called with a `name` argument
`NewNamespace.new_name` /with a dot/, then `to_additive` uses this
new name as is.
As a safety check, in the first case `to_additive` double checks
that the new name differs from the original one. -/
syntax (name := to_additive) "to_additive" "?"? attrArgs : attr
@[inherit_doc to_additive]
macro "to_additive?" rest:attrArgs : attr => `(attr| to_additive ? $rest)
@[inherit_doc to_additive_ignore_args]
initialize ignoreArgsAttr : NameMapExtension (List Nat) ←
registerNameMapAttribute {
name := `to_additive_ignore_args
descr :=
"Auxiliary attribute for `to_additive` stating that certain arguments are not additivized."
add := fun _ stx ↦ do
let ids ← match stx with
| `(attr| to_additive_ignore_args $[$ids:num]*) => pure <| ids.map (·.1.isNatLit?.get!)
| _ => throwUnsupportedSyntax
return ids.toList }
/-- An extension that stores all the declarations that need their arguments reordered when
applying `@[to_additive]`. It is applied using the `to_additive (reorder := ...)` syntax. -/
initialize reorderAttr : NameMapExtension (List (List Nat)) ←
registerNameMapExtension _
/-- Linter to check that the `relevant_arg` attribute is not given manually -/
register_option linter.toAdditiveRelevantArg : Bool := {
defValue := true
descr := "Linter to check that the `relevant_arg` attribute is not given manually." }
@[inherit_doc to_additive_relevant_arg]
initialize relevantArgAttr : NameMapExtension Nat ←
registerNameMapAttribute {
name := `to_additive_relevant_arg
descr := "Auxiliary attribute for `to_additive` stating \
which arguments are the types with a multiplicative structure."
add := fun
| _, stx@`(attr| to_additive_relevant_arg $id) => do
Linter.logLintIf linter.toAdditiveRelevantArg stx
m!"This attribute is deprecated. Use `@[to_additive (relevant_arg := ...)]` instead."
pure <| id.getNat.pred
| _, _ => throwUnsupportedSyntax }
@[inherit_doc to_additive_dont_translate]
initialize dontTranslateAttr : NameMapExtension Unit ←
registerNameMapAttribute {
name := `to_additive_dont_translate
descr := "Auxiliary attribute for `to_additive` stating \
that the operations on this type should not be translated."
add := fun
| _, `(attr| to_additive_dont_translate) => return
| _, _ => throwUnsupportedSyntax }
/-- Maps multiplicative names to their additive counterparts. -/
initialize translations : NameMapExtension Name ← registerNameMapExtension _
@[inherit_doc GuessName.GuessNameData.nameDict]
def nameDict : Std.HashMap String (List String) := .ofList [
("one", ["Zero"]),
("mul", ["Add"]),
("smul", ["VAdd"]),
("inv", ["Neg"]),
("div", ["Sub"]),
("prod", ["Sum"]),
("hmul", ["HAdd"]),
("hsmul", ["HVAdd"]),
("hdiv", ["HSub"]),
("hpow", ["HSMul"]),
("finprod", ["Finsum"]),
("tprod", ["TSum"]),
("pow", ["NSMul"]),
("npow", ["NSMul"]),
("zpow", ["ZSMul"]),
("mabs", ["Abs"]),
("monoid", ["Add", "Monoid"]),
("submonoid", ["Add", "Submonoid"]),
("group", ["Add", "Group"]),
("subgroup", ["Add", "Subgroup"]),
("semigroup", ["Add", "Semigroup"]),
("magma", ["Add", "Magma"]),
("haar", ["Add", "Haar"]),
("prehaar", ["Add", "Prehaar"]),
("unit", ["Add", "Unit"]),
("units", ["Add", "Units"]),
("cyclic", ["Add", "Cyclic"]),
("semigrp", ["Add", "Semigrp"]),
("grp", ["Add", "Grp"]),
("commute", ["Add", "Commute"]),
("semiconj", ["Add", "Semiconj"]),
("rootable", ["Divisible"]),
("zpowers", ["ZMultiples"]),
("powers", ["Multiples"]),
("multipliable", ["Summable"]),
("gpfree", ["APFree"]),
("quantale", ["Add", "Quantale"]),
("square", ["Even"]),
("mconv", ["Conv"]),
("irreducible", ["Add", "Irreducible"]),
("mlconvolution", ["LConvolution"])]
@[inherit_doc GuessName.GuessNameData.abbreviationDict]
def abbreviationDict : Std.HashMap String String := .ofList [
("isCancelAdd", "IsCancelAdd"),
("isLeftCancelAdd", "IsLeftCancelAdd"),
("isRightCancelAdd", "IsRightCancelAdd"),
("cancelAdd", "AddCancel"),
("leftCancelAdd", "AddLeftCancel"),
("rightCancelAdd", "AddRightCancel"),
("cancelCommAdd", "AddCancelComm"),
("commAdd", "AddComm"),
("zero_le", "Nonneg"),
("zeroLE", "Nonneg"),
("zero_lt", "Pos"),
("zeroLT", "Pos"),
("lezero", "Nonpos"),
("le_zero", "Nonpos"),
("ltzero", "Neg"),
("lt_zero", "Neg"),
("addSingle", "Single"),
("add_single", "Single"),
("addSupport", "Support"),
("add_support", "Support"),
("addTSupport", "TSupport"),
("add_tsupport", "TSupport"),
("addIndicator", "Indicator"),
("add_indicator", "Indicator"),
("isEven", "Even"),
-- "Regular" is well-used in mathlib with various meanings (e.g. in
-- measure theory) and a direct translation
-- "regular" --> "addRegular" in `nameDict` above seems error-prone.
("isRegular", "IsAddRegular"),
("isLeftRegular", "IsAddLeftRegular"),
("isRightRegular", "IsAddRightRegular"),
("hasFundamentalDomain", "HasAddFundamentalDomain"),
("quotientMeasure", "AddQuotientMeasure"),
("negFun", "InvFun"),
("uniqueProds", "UniqueSums"),
("orderOf", "AddOrderOf"),
("zeroLePart", "PosPart"),
("leZeroPart", "NegPart"),
("isScalarTower", "VAddAssocClass"),
("isOfFinOrder", "IsOfFinAddOrder"),
("isCentralScalar", "IsCentralVAdd"),
("function_addSemiconj", "Function_semiconj"),
("function_addCommute", "Function_commute"),
("divisionAddMonoid", "SubtractionMonoid"),
("subNegZeroAddMonoid", "SubNegZeroMonoid"),
("modularCharacter", "AddModularCharacter")]
/-- The bundle of environment extensions for `to_additive` -/
def data : TranslateData where
ignoreArgsAttr := ignoreArgsAttr
reorderAttr := reorderAttr
relevantArgAttr := relevantArgAttr
dontTranslateAttr := dontTranslateAttr
translations := translations
attrName := `to_additive
changeNumeral := true
isDual := false
guessNameData := { nameDict, abbreviationDict }
initialize registerBuiltinAttribute {
name := `to_additive
descr := "Transport multiplicative to additive"
add := fun src stx kind ↦ discard do
addTranslationAttr data src (← elabTranslationAttr stx) kind
-- we (presumably) need to run after compilation to properly add the `simp` attribute
applicationTime := .afterCompilation
}
/-- `insert_to_additive_translation mulName addName` inserts the translation `mulName ↦ addName`
into the `to_additive` dictionary. This is useful for translating namespaces that don't (yet)
have a corresponding translated declaration. -/
elab "insert_to_additive_translation" src:ident tgt:ident : command => do
Command.liftCoreM <| insertTranslation data src.getId tgt.getId
end Mathlib.Tactic.ToAdditive |
.lake/packages/mathlib/Mathlib/Tactic/Translate/Core.lean | import Batteries.Tactic.Trans
import Lean.Compiler.NoncomputableAttr
import Lean.Elab.Tactic.Ext
import Lean.Meta.Tactic.Rfl
import Lean.Meta.Tactic.Symm
import Mathlib.Data.Array.Defs
import Mathlib.Data.Nat.Notation
import Mathlib.Lean.Expr.ReplaceRec
import Mathlib.Lean.Meta.Simp
import Mathlib.Lean.Name
import Mathlib.Tactic.Eqns -- just to copy the attribute
import Mathlib.Tactic.Simps.Basic
import Mathlib.Tactic.Translate.GuessName
/-!
# The translation attribute.
Implementation of the translation attribute. This is used for `@[to_additive]` and `@[to_dual]`.
See the docstring of `to_additive` for more information
-/
open Lean Meta Elab Command Std
namespace Mathlib.Tactic.Translate
open Translate -- currently needed to enable projection notation
/-- `(attr := ...)` applies the given attributes to the original and the translated declaration.
In the case of `to_additive`, we may want to apply it multiple times,
(such as in `a ^ n` -> `n • a` -> `n +ᵥ a`). In this case, you should use the syntax
`to_additive (attr := some_other_attr, to_additive)`, which will apply `some_other_attr` to all
three generated declarations.
-/
syntax attrOption := &"attr" " := " Parser.Term.attrInstance,*
/--
`(reorder := ...)` reorders the arguments/hypotheses in the generated declaration.
It uses cycle notation. For example `(reorder := 1 2, 5 6)` swaps the first two
arguments with each other and the fifth and the sixth argument and `(reorder := 3 4 5)` will move
the fifth argument before the third argument. This is used in `to_dual` to swap the arguments in
`≤`, `<` and `⟶`. It is also used in `to_additive` to translate from `^` to `•`.
-/
syntax reorderOption := &"reorder" " := " (num+),+
/--
the `(relevant_arg := ...)` option tells which argument to look at to determine whether to
translate this constant. This is inferred automatically using the function `findRelevantArg`,
but it can also be overwritten using this syntax.
If there are multiple possible arguments, we typically tag the first one.
If this argument contains a fixed type, this declaration will not be translated.
See the Heuristics section of the `to_additive` doc-string for more details.
If a declaration is not tagged, it is presumed that the first argument is relevant.
To indicate that there is no relevant argument, set it to a number that is out of bounds,
i.e. larger than the number of arguments, e.g. `(relevant_arg := 100)`.
Implementation note: we only allow exactly 1 relevant argument, even though some declarations
(like `Prod.instGroup`) have multiple relevant argument.
The reason is that whether we translate a declaration is an all-or-nothing decision, and
we will not be able to translate declarations that (e.g.) talk about multiplication on `ℕ × α`
anyway.
-/
syntax relevantArgOption := &"relevant_arg" " := " num
/--
`(dont_translate := ...)` takes a list of type variables (separated by spaces) that should not be
considered for translation. For example in
```
lemma foo {α β : Type} [Group α] [Group β] (a : α) (b : β) : a * a⁻¹ = 1 ↔ b * b⁻¹ = 1
```
we can choose to only translate `α` by writing `to_additive (dont_translate := β)`.
-/
syntax dontTranslateOption := &"dont_translate" " := " ident+
syntax bracketedOption := "(" attrOption <|> reorderOption <|>
relevantArgOption <|> dontTranslateOption ")"
/-- A hint for where to find the translated declaration (`existing` or `self`) -/
syntax existingNameHint := (ppSpace (&"existing" <|> &"self"))?
syntax attrArgs :=
existingNameHint (ppSpace bracketedOption)* (ppSpace ident)? (ppSpace (str <|> docComment))?
-- We omit a doc-string on these syntaxes to instead show the `to_additive` or `to_dual` doc-string
attribute [nolint docBlame] attrArgs bracketedOption
/-- An attribute that stores all the declarations that deal with numeric literals on variable types.
Numeral literals occur in expressions without type information, so in order to decide whether `1`
needs to be changed to `0`, the context around the numeral is relevant.
Most numerals will be in an `OfNat.ofNat` application, though tactics can add numeral literals
inside arbitrary functions. By default we assume that we do not change numerals, unless it is
in a function application with the `translate_change_numeral` attribute.
`@[translate_change_numeral n₁ ...]` should be added to all functions that take one or more
numerals as argument that should be changed if `shouldTranslate` succeeds on the first argument,
i.e. when the numeral is only translated if the first argument is a variable
(or consists of variables).
The arguments `n₁ ...` are the positions of the numeral arguments (starting counting from 1). -/
syntax (name := translate_change_numeral) "translate_change_numeral" (ppSpace num)* : attr
initialize registerTraceClass `translate
initialize registerTraceClass `translate_detail
/-- Linter, mostly used by translate attributes, that checks that the source declaration doesn't
have certain attributes -/
register_option linter.existingAttributeWarning : Bool := {
defValue := true
descr := "Linter, mostly used by translate attributes, that checks that the source declaration \
doesn't have certain attributes" }
/-- Linter used by translate attributes that checks if the given declaration name is
equal to the automatically generated name -/
register_option linter.translateGenerateName : Bool := {
defValue := true
descr := "Linter used by translate attributes that checks if the given declaration name is \
equal to the automatically generated name" }
/-- Linter to check whether the user correctly specified that the translated declaration already
exists -/
register_option linter.translateExisting : Bool := {
defValue := true
descr := "Linter used by translate attributes that checks whether the user correctly specified
that the translated declaration already exists" }
@[inherit_doc translate_change_numeral]
initialize changeNumeralAttr : NameMapExtension (List Nat) ←
registerNameMapAttribute {
name := `translate_change_numeral
descr :=
"Auxiliary attribute for `to_additive` that stores functions that have numerals as argument."
add := fun
| _, `(attr| translate_change_numeral $[$arg]*) =>
pure <| arg.map (·.1.isNatLit?.get!.pred) |>.toList
| _, _ => throwUnsupportedSyntax }
/-- `TranslateData` is a structure that holds all data required for a translation attribute. -/
structure TranslateData : Type where
/-- An attribute that tells that certain arguments of this definition are not
involved when translating.
This helps the translation heuristic by also transforming definitions if `ℕ` or another
fixed type occurs as one of these arguments. -/
ignoreArgsAttr : NameMapExtension (List Nat)
/-- `reorderAttr` stores the declarations that need their arguments reordered when translating.
This is specified using the `(reorder := ...)` syntax. -/
reorderAttr : NameMapExtension (List <| List Nat)
relevantArgAttr : NameMapExtension Nat
/-- The global `dont_translate` attribute specifies that operations on the given type
should not be translated. This can be either for types that are translated,
such as `MonoidAlgebra` -> `AddMonoidAlgebra`, or for fixed types, such as `Fin n`/`ZMod n`.
Note: The name generation is not aware that the operations on this type should not be translated,
so you generally have to specify a name manually, if some part should not be translated.
-/
dontTranslateAttr : NameMapExtension Unit
/-- `translations` stores all of the constants that have been tagged with this attribute,
and maps them to their translation. -/
translations : NameMapExtension Name
/-- The name of the attribute, for example `to_additive` or `to_dual`. -/
attrName : Name
/-- If `changeNumeral := true`, then try to translate the number `1` to `0`. -/
changeNumeral : Bool
/-- When `isDual := true`, every translation `A → B` will also give a translation `B → A`. -/
isDual : Bool
guessNameData : GuessName.GuessNameData
attribute [inherit_doc relevantArgOption] TranslateData.relevantArgAttr
attribute [inherit_doc GuessName.GuessNameData] TranslateData.guessNameData
/-- Get the translation for the given name. -/
def findTranslation? (env : Environment) (t : TranslateData) : Name → Option Name :=
(t.translations.getState env).find?
/-- Get the translation for the given name,
falling back to translating a prefix of the name if the full name can't be translated.
This allows translating automatically generated declarations such as `IsRegular.casesOn`. -/
def findPrefixTranslation (env : Environment) (nm : Name) (t : TranslateData) : Name :=
nm.mapPrefix (findTranslation? env t)
/-- Add a name translation to the translations map. -/
def insertTranslation (t : TranslateData) (src tgt : Name) (failIfExists := true) : CoreM Unit := do
if let some tgt' := findTranslation? (← getEnv) t src then
if failIfExists then
throwError "The translation {src} ↦ {tgt'} already exists"
else
trace[translate] "The translation {src} ↦ {tgt'} already exists"
return
modifyEnv (t.translations.addEntry · (src, tgt))
trace[translate] "Added translation {src} ↦ {tgt}"
-- For an attribute like `to_dual`, we also insert the reverse direction of the translation
if t.isDual && src != tgt then
if let some src' := findTranslation? (← getEnv) t tgt then
if failIfExists then
throwError "The translation {tgt} ↦ {src'} already exists"
else
trace[translate] "The translation {tgt} ↦ {src'} already exists"
return
modifyEnv (t.translations.addEntry · (tgt, src))
trace[translate] "Also added translation {tgt} ↦ {src}"
/-- `ArgInfo` stores information about how a constant should be translated. -/
structure ArgInfo where
/-- The arguments that should be reordered when translating, using cycle notation. -/
reorder : List (List Nat) := []
/-- The argument used to determine whether this constant should be translated. -/
relevantArg : Nat := 0
/-- Add a name translation to the translations map and add the `argInfo` information to `src`. -/
def insertTranslationAndInfo (t : TranslateData) (src tgt : Name) (argInfo : ArgInfo)
(failIfExists := true) : CoreM Unit := do
insertTranslation t src tgt failIfExists
if argInfo.reorder != [] then
trace[translate] "@[{t.attrName}] will reorder the arguments of {tgt} by {argInfo.reorder}."
t.reorderAttr.add src argInfo.reorder
if argInfo.relevantArg != 0 then
trace[translate_detail] "Setting relevant_arg for {src} to be {argInfo.relevantArg}."
t.relevantArgAttr.add src argInfo.relevantArg
/-- `Config` is the type of the arguments that can be provided to `to_additive`. -/
structure Config : Type where
/-- View the trace of the translation procedure.
Equivalent to `set_option trace.translate true`. -/
trace : Bool := false
/-- The given name of the target. -/
tgt : Name := Name.anonymous
/-- An optional doc string. -/
doc : Option String := none
/-- If `allowAutoName` is `false` (default) then
we check whether the given name can be auto-generated. -/
allowAutoName : Bool := false
/-- The arguments that should be reordered when translating, using cycle notation. -/
reorder : List (List Nat) := []
/-- The argument used to determine whether this constant should be translated. -/
relevantArg? : Option Nat := none
/-- The attributes which we want to give to the original and translated declaration.
For `simps` this will also add generated lemmas to the translation dictionary. -/
attrs : Array Syntax := #[]
/-- A list of type variables that should not be translated. -/
dontTranslate : List Ident := []
/-- The `Syntax` element corresponding to the translation attribute,
which we need for adding definition ranges, and for logging messages. -/
ref : Syntax
/-- An optional flag stating that the translated declaration already exists.
If this flag is wrong about whether the translated declaration exists, we raise a linter error.
Note: the linter will never raise an error for inductive types and structures. -/
existing : Bool := false
/-- An optional flag stating that the target of the translation is the target itself.
This can be used to reorder arguments, such as in
`attribute [to_dual self (reorder := 3 4)] LE.le`.
It can also be used to give a hint to `shouldTranslate`, such as in
`attribute [to_additive self] Unit`.
If `self := true`, we should also have `existing := true`. -/
self : Bool := false
deriving Repr
-- See https://github.com/leanprover/lean4/issues/10295
attribute [nolint unusedArguments] instReprConfig.repr
/-- Eta expands `e` at most `n` times. -/
def etaExpandN (n : Nat) (e : Expr) : MetaM Expr := do
forallBoundedTelescope (← inferType e) (some n) fun xs _ ↦ mkLambdaFVars xs (mkAppN e xs)
/-- `e.expand` eta-expands all expressions that have as head a constant `n` in `reorder`.
They are expanded until they are applied to one more argument than the maximum in `reorder.find n`.
It also expands all kernel projections that have as head a constant `n` in `reorder`. -/
def expand (t : TranslateData) (e : Expr) : MetaM Expr := do
let env ← getEnv
let reorderFn : Name → List (List ℕ) := fun nm ↦ (t.reorderAttr.find? env nm |>.getD [])
let e₂ ← Lean.Meta.transform (input := e) (skipConstInApp := true)
(post := fun e => return .done e) fun e ↦
e.withApp fun f args ↦ do
match f with
| .proj n i s =>
let some info := getStructureInfo? (← getEnv) n | return .continue -- e.g. if `n` is `Exists`
let some projName := info.getProjFn? i | unreachable!
-- if `projName` has a translation, replace `f` with the application `projName s`
-- and then visit `projName s args` again.
if findTranslation? env t projName |>.isNone then
return .continue
return .visit <| (← whnfD (← inferType s)).withApp fun sf sargs ↦
mkAppN (mkApp (mkAppN (.const projName sf.constLevels!) sargs) s) args
| .const c _ =>
let reorder := reorderFn c
if reorder.isEmpty then
-- no need to expand if nothing needs reordering
return .continue
let needed_n := reorder.flatten.foldr Nat.max 0 + 1
if needed_n ≤ args.size then
return .continue
else
-- in this case, we need to reorder arguments that are not yet
-- applied, so first η-expand the function.
let e' ← etaExpandN (needed_n - args.size) e
trace[translate_detail] "expanded {e} to {e'}"
return .continue e'
| _ => return .continue
if e != e₂ then
trace[translate_detail] "expand:\nBefore: {e}\nAfter: {e₂}"
return e₂
/-- Implementation function for `shouldTranslate`.
Failure means that in that subexpression there is no constant that blocks `e` from being translated.
We cache previous applications of the function, using an expression cache using ptr equality
to avoid visiting the same subexpression many times. Note that we only need to cache the
expressions without taking the value of `inApp` into account, since `inApp` only matters when
the expression is a constant. However, for this reason we have to make sure that we never
cache constant expressions, so that's why the `if`s in the implementation are in this order.
Note that this function is still called many times by `applyReplacementFun`
and we're not remembering the cache between these calls. -/
private unsafe def shouldTranslateUnsafe (env : Environment) (t : TranslateData) (e : Expr)
(dontTranslate : Array FVarId) : Option (Name ⊕ FVarId) :=
let rec visit (e : Expr) (inApp := false) : OptionT (StateM (PtrSet Expr)) (Name ⊕ FVarId) := do
if e.isConst then
if (t.dontTranslateAttr.find? env e.constName).isNone &&
(inApp || (findTranslation? env t e.constName).isSome) then
failure
else
return .inl e.constName
if (← get).contains e then
failure
modify fun s => s.insert e
match e with
| x@(.app e a) =>
visit e true <|> do
-- make sure that we don't treat `(fun x => α) (n + 1)` as a type that depends on `Nat`
guard !x.isConstantApplication
if let some n := e.getAppFn.constName? then
if let some l := t.ignoreArgsAttr.find? env n then
if e.getAppNumArgs + 1 ∈ l then
failure
visit a
| .lam _ _ t _ => visit t
| .forallE _ _ t _ => visit t
| .letE _ _ e body _ => visit e <|> visit body
| .mdata _ b => visit b
| .proj _ _ b => visit b
| .fvar fvarId => if dontTranslate.contains fvarId then return .inr fvarId else failure
| _ => failure
Id.run <| (visit e).run' mkPtrSet
/-- `shouldTranslate e` tests whether the expression `e` contains a constant
`nm` that is not applied to any arguments, and such that `translations.find?[nm] = none`.
This is used for deciding which subexpressions to translate: we only translate
constants if `shouldTranslate` applied to their relevant argument returns `true`.
This means we will replace expression applied to e.g. `α` or `α × β`, but not when applied to
e.g. `ℕ` or `ℝ × α`.
We ignore all arguments specified by the `ignore` `NameMap`. -/
def shouldTranslate (env : Environment) (t : TranslateData) (e : Expr)
(dontTranslate : Array FVarId := #[]) : Option (Name ⊕ FVarId) :=
unsafe shouldTranslateUnsafe env t e dontTranslate
/-- Swap the first two elements of a list -/
def List.swapFirstTwo {α : Type*} : List α → List α
| [] => []
| [x] => [x]
| x::y::l => y::x::l
/-- Change the numeral `nat_lit 1` to the numeral `nat_lit 0`.
Leave all other expressions unchanged. -/
def changeNumeral : Expr → Expr
| .lit (.natVal 1) => mkRawNatLit 0
| e => e
/--
`applyReplacementFun e` replaces the expression `e` with its tranlsation.
It translates each identifier (inductive type, defined function etc) in an expression, unless
* The identifier occurs in an application with first argument `arg`; and
* `test arg` is false.
However, if `f` is in the dictionary `relevant`, then the argument `relevant.find f`
is tested, instead of the first argument.
It will also reorder arguments of certain functions, using `reorderFn`:
e.g. `g x₁ x₂ x₃ ... xₙ` becomes `g x₂ x₁ x₃ ... xₙ` if `reorderFn g = some [1]`.
-/
def applyReplacementFun (t : TranslateData) (e : Expr) (dontTranslate : Array FVarId := #[]) :
MetaM Expr := do
let e' := aux (← getEnv) (← getBoolOption `trace.translate_detail) (← expand t e)
-- Make sure any new reserved names in the expr are realized; this needs to be done outside of
-- `aux` as it is monadic.
e'.forEach fun
| .const n .. => do
if !(← hasConst (skipRealize := false) n) && isReservedName (← getEnv) n then
executeReservedNameAction n
| _ => pure ()
return e'
where /-- Implementation of `applyReplacementFun`. -/
aux (env : Environment) (trace : Bool) : Expr → Expr :=
let reorderFn : Name → List (List ℕ) := fun nm ↦ (t.reorderAttr.find? env nm |>.getD [])
let relevantArg : Name → ℕ := fun nm ↦ (t.relevantArgAttr.find? env nm).getD 0
Lean.Expr.replaceRec fun r e ↦ Id.run do
if trace then
dbg_trace s!"replacing at {e}"
match e with
| .const n₀ ls₀ => do
let n₁ := findPrefixTranslation env n₀ t
let ls₁ : List Level := if 0 ∈ (reorderFn n₀).flatten then ls₀.swapFirstTwo else ls₀
if trace then
if n₀ != n₁ then
dbg_trace s!"changing {n₀} to {n₁}"
if 0 ∈ (reorderFn n₀).flatten then
dbg_trace s!"reordering the universe variables from {ls₀} to {ls₁}"
return some <| .const n₁ ls₁
| .app g x => do
let mut gf := g.getAppFn
if gf.isBVar && x.isLit then
if trace then
dbg_trace s!"applyReplacementFun: Variables applied to numerals are not changed {g.app x}"
return some <| g.app x
let mut gAllArgs := e.getAppArgs
let some nm := gf.constName? | return mkAppN (← r gf) (← gAllArgs.mapM r)
-- e = `(nm y₁ .. yₙ x)
/- Test if the head should not be replaced. -/
let relevantArgId := relevantArg nm
if h : relevantArgId < gAllArgs.size then
if let some fxd := shouldTranslate env t gAllArgs[relevantArgId] dontTranslate then
if trace then
match fxd with
| .inl fxd => dbg_trace s!"The application of {nm} contains the fixed type \
{fxd}, so it is not changed."
| .inr _ => dbg_trace s!"The application of {nm} contains a fixed \
variable so it is not changed."
else
gf ← r gf
/- Test if arguments should be reordered. -/
let reorder := reorderFn nm
if !reorder.isEmpty then
gAllArgs := gAllArgs.permute! reorder
if trace then
dbg_trace s!"reordering the arguments of {nm} using the cyclic permutations {reorder}"
else
gf ← r gf
/- Do not replace numerals in specific types. -/
if let some changedArgNrs := changeNumeralAttr.find? env nm then
let firstArg := gAllArgs[0]!
if shouldTranslate env t firstArg dontTranslate |>.isNone then
if trace then
dbg_trace s!"applyReplacementFun: We change the numerals in this expression. \
However, we will still recurse into all the non-numeral arguments."
-- In this case, we still update all arguments of `g` that are not numerals,
-- since all other arguments can contain subexpressions like
-- `(fun x ↦ ℕ) (1 : G)`, and we have to update the `(1 : G)` to `(0 : G)`
gAllArgs := gAllArgs.mapIdx fun argNr arg ↦
if changedArgNrs.contains argNr then
changeNumeral arg
else
arg
return mkAppN gf (← gAllArgs.mapM r)
| .proj n₀ idx e => do
let n₁ := findPrefixTranslation env n₀ t
if trace then
dbg_trace s!"applyReplacementFun: in projection {e}.{idx} of type {n₀}, \
replace type with {n₁}"
return some <| .proj n₁ idx <| ← r e
| _ => return none
/-- Rename binder names in pi type. -/
def renameBinderNames (t : TranslateData) (src : Expr) : Expr :=
src.mapForallBinderNames fun
| .str p s => .str p (GuessName.guessName t.guessNameData s)
| n => n
/-- Reorder pi-binders. See doc of `reorderAttr` for the interpretation of the argument -/
def reorderForall (reorder : List (List Nat)) (src : Expr) : MetaM Expr := do
if let some maxReorder := reorder.flatten.max? then
forallBoundedTelescope src (some (maxReorder + 1)) fun xs e => do
if xs.size = maxReorder + 1 then
mkForallFVars (xs.permute! reorder) e
else
throwError "the permutation\n{reorder}\nprovided by the `(reorder := ...)` option is \
out of bounds, the type{indentExpr src}\nhas only {xs.size} arguments"
else
return src
/-- Reorder lambda-binders. See doc of `reorderAttr` for the interpretation of the argument -/
def reorderLambda (reorder : List (List Nat)) (src : Expr) : MetaM Expr := do
if let some maxReorder := reorder.flatten.max? then
let maxReorder := maxReorder + 1
lambdaBoundedTelescope src maxReorder fun xs e => do
if xs.size = maxReorder then
mkLambdaFVars (xs.permute! reorder) e
else
-- we don't have to consider the case where the given permutation is out of bounds,
-- since `reorderForall` applied to the type would already have failed in that case.
forallBoundedTelescope (← inferType e) (maxReorder - xs.size) fun ys _ => do
mkLambdaFVars ((xs ++ ys).permute! reorder) (mkAppN e ys)
else
return src
/-- Run `applyReplacementFun` on an expression `∀ x₁ .. xₙ, e`,
making sure not to translate type-classes on `xᵢ` if `i` is in `dontTranslate`. -/
def applyReplacementForall (t : TranslateData) (dontTranslate : List Nat) (e : Expr) :
MetaM Expr := do
if let some maxDont := dontTranslate.max? then
forallBoundedTelescope e (some (maxDont + 1)) fun xs e => do
let xs := xs.map (·.fvarId!)
let dontTranslate := dontTranslate.filterMap (xs[·]?) |>.toArray
let mut e ← applyReplacementFun t e dontTranslate
for x in xs.reverse do
let decl ← x.getDecl
let xType ← applyReplacementFun t decl.type dontTranslate
e := .forallE decl.userName xType (e.abstract #[.fvar x]) decl.binderInfo
return e
else
applyReplacementFun t e #[]
/-- Run `applyReplacementFun` on an expression `fun x₁ .. xₙ ↦ e`,
making sure not to translate type-classes on `xᵢ` if `i` is in `dontTranslate`. -/
def applyReplacementLambda (t : TranslateData) (dontTranslate : List Nat) (e : Expr) :
MetaM Expr := do
if let some maxDont := dontTranslate.max? then
lambdaBoundedTelescope e (maxDont + 1) fun xs e => do
let xs := xs.map (·.fvarId!)
let dontTranslate := dontTranslate.filterMap (xs[·]?) |>.toArray
let mut e ← applyReplacementFun t e dontTranslate
for x in xs.reverse do
let decl ← x.getDecl
let xType ← applyReplacementFun t decl.type dontTranslate
e := .lam decl.userName xType (e.abstract #[.fvar x]) decl.binderInfo
return e
else
applyReplacementFun t e #[]
/-- Unfold auxlemmas in the type and value. -/
def declUnfoldAuxLemmas (decl : ConstantInfo) : MetaM ConstantInfo := do
let mut decl := decl
decl := decl.updateType <| ← unfoldAuxLemmas decl.type
if let some v := decl.value? then
trace[translate] "value before unfold:{indentExpr v}"
decl := decl.updateValue <| ← unfoldAuxLemmas v
trace[translate] "value after unfold:{indentExpr decl.value!}"
else if let .opaqueInfo info := decl then -- not covered by `value?`
decl := .opaqueInfo { info with value := ← unfoldAuxLemmas info.value }
return decl
/--
Given a list of variable local identifiers that shouldn't be translated,
determine the arguments that shouldn't be translated.
TODO: Currently, this function doesn't deduce any `dont_translate` types from `type`.
In the future we would like that the presence of `MonoidAlgebra k G` will automatically
flag `k` as a type to not be translated.
-/
def getDontTranslates (given : List Ident) (type : Expr) : MetaM (List Nat) := do
forallTelescope type fun xs _ => do
given.mapM fun id => withRef id.raw <| do
let fvarId ← getFVarFromUserName id.getId
return (xs.idxOf? fvarId).get!
/-- Run applyReplacementFun on the given `srcDecl` to make a new declaration with name `tgt` -/
def updateDecl (t : TranslateData) (tgt : Name) (srcDecl : ConstantInfo)
(reorder : List (List Nat)) (dont : List Ident) : MetaM ConstantInfo := do
let mut decl := srcDecl.updateName tgt
if 0 ∈ reorder.flatten then
decl := decl.updateLevelParams decl.levelParams.swapFirstTwo
let dont ← getDontTranslates dont srcDecl.type
decl := decl.updateType <| ← reorderForall reorder <| ← applyReplacementForall t dont <|
renameBinderNames t decl.type
if let some v := decl.value? then
decl := decl.updateValue <| ← reorderLambda reorder <| ← applyReplacementLambda t dont v
else if let .opaqueInfo info := decl then -- not covered by `value?`
decl := .opaqueInfo { info with
value := ← reorderLambda reorder <| ← applyReplacementLambda t dont info.value }
return decl
/-- Abstracts the nested proofs in the value of `decl` if it is a def. -/
def declAbstractNestedProofs (decl : ConstantInfo) : MetaM ConstantInfo := do
if decl matches .defnInfo _ then
return decl.updateValue <| ← withDeclNameForAuxNaming decl.name do
Meta.abstractNestedProofs decl.value!
else
return decl
/-- Find the target name of `pre` and all created auxiliary declarations. -/
def findTargetName (env : Environment) (t : TranslateData) (src pre tgt_pre : Name) : CoreM Name :=
/- This covers auxiliary declarations like `match_i` and `proof_i`. -/
if let some post := pre.isPrefixOf? src then
return tgt_pre ++ post
/- This covers equation lemmas (for other declarations). -/
else if let some post := privateToUserName? src then
match findTranslation? env t post.getPrefix with
-- this is an equation lemma for a declaration without a translation. We will skip this.
| none => return src
-- this is an equation lemma for a declaration with a translation. We will translate this.
-- Note: if this errors we could do this instead by calling `getEqnsFor?`
| some addName => return src.updatePrefix <| mkPrivateName env addName
else if src.hasMacroScopes then
mkFreshUserName src.eraseMacroScopes
else
throwError "internal @[{t.attrName}] error."
/-- Returns a `NameSet` of all auxiliary constants in `e` that might have been generated
when adding `pre` to the environment.
Examples include `pre.match_5` and
`_private.Mathlib.MyFile.someOtherNamespace.someOtherDeclaration._eq_2`.
The last two examples may or may not have been generated by this declaration.
The last example may or may not be the equation lemma of a declaration with a translation attribute.
We will only translate it if it has a translation attribute.
Note that this function would return `proof_nnn` aux lemmas if
we hadn't unfolded them in `declUnfoldAuxLemmas`.
-/
def findAuxDecls (e : Expr) (pre : Name) : NameSet :=
e.foldConsts ∅ fun n l ↦
if n.getPrefix == pre || isPrivateName n || n.hasMacroScopes then
l.insert n
else
l
/-- Transform the declaration `src` and all declarations `pre._proof_i` occurring in `src`
using the transforms dictionary.
`replace_all`, `trace`, `ignore` and `reorder` are configuration options.
`pre` is the declaration that got the translation attribute and `tgt_pre` is the target of this
declaration. -/
partial def transformDeclAux (t : TranslateData) (cfg : Config) (pre tgt_pre : Name) :
Name → CoreM Unit := fun src ↦ do
let env ← getEnv
trace[translate_detail] "visiting {src}"
-- if we have already translated this declaration, we do nothing.
if (findTranslation? env t src).isSome && src != pre then
return
-- if this declaration is not `pre` and not an internal declaration, we return an error,
-- since we should have already translated this declaration.
if src != pre && !src.isInternalDetail then
throwError "The declaration {pre} depends on the declaration {src} which is in the namespace \
{pre}, but does not have the `@[{t.attrName}]` attribute. This is not supported.\n\
Workaround: move {src} to a different namespace."
-- we find, or guess, the translated name of `src`
let tgt ← findTargetName env t src pre tgt_pre
-- we skip if we already transformed this declaration before.
if env.contains tgt then
if tgt == src then
-- Note: this can happen for equation lemmas of declarations without a translation.
trace[translate_detail] "Auxiliary declaration {src} will be translated to itself."
else
trace[translate_detail] "Already visited {tgt} as translation of {src}."
return
let srcDecl ← getConstInfo src
-- we first unfold all auxlemmas, since they are not always able to be translated on their own
let srcDecl ← MetaM.run' do declUnfoldAuxLemmas srcDecl
-- we then transform all auxiliary declarations generated when elaborating `pre`
for n in findAuxDecls srcDecl.type pre do
transformDeclAux t cfg pre tgt_pre n
if let some value := srcDecl.value? then
for n in findAuxDecls value pre do
transformDeclAux t cfg pre tgt_pre n
if let .opaqueInfo {value, ..} := srcDecl then
for n in findAuxDecls value pre do
transformDeclAux t cfg pre tgt_pre n
-- if the auxiliary declaration doesn't have prefix `pre`, then we have to add this declaration
-- to the translation dictionary, since otherwise we cannot translate the name.
if !pre.isPrefixOf src then
insertTranslation t src tgt
-- now transform the source declaration
let trgDecl : ConstantInfo ← MetaM.run' <|
if src == pre then
updateDecl t tgt srcDecl cfg.reorder cfg.dontTranslate
else
updateDecl t tgt srcDecl [] []
let value ← match trgDecl with
| .thmInfo { value, .. } | .defnInfo { value, .. } | .opaqueInfo { value, .. } => pure value
| _ => throwError "Expected {tgt} to have a value."
trace[translate] "generating\n{tgt} : {trgDecl.type} :=\n {value}"
try
-- make sure that the type is correct,
-- and emit a more helpful error message if it fails
MetaM.run' <| check value
catch
| Exception.error _ msg => throwError "@[{t.attrName}] failed. \
The translated value is not type correct. For help, see the docstring \
of `to_additive`, section `Troubleshooting`. \
Failed to add declaration\n{tgt}:\n{msg}"
| _ => panic! "unreachable"
-- "Refold" all the aux lemmas that we unfolded.
let trgDecl ← MetaM.run' <| declAbstractNestedProofs trgDecl
/- If `src` is explicitly marked as `noncomputable`, then add the new decl as a declaration but
do not compile it, and mark is as noncomputable. Otherwise, only log errors in compiling if `src`
has executable code.
Note that `noncomputable section` does not explicitly mark noncomputable definitions as
`noncomputable`, but simply abstains from logging compilation errors.
This is not a perfect solution, as ideally we *should* complain when `src` should
produce executable code but fails to do so (e.g. outside of `noncomputable section`). However,
the `messages` and `infoState` are reset before this runs, so we cannot check for compilation
errors on `src`. The scope set by `noncomputable` section lives in the `CommandElabM` state
(which is inaccessible here), so we cannot test for `noncomputable section` directly. See [Zulip](https://leanprover.zulipchat.com/#narrow/channel/287929-mathlib4/topic/to_additive.20and.20noncomputable/with/310541981). -/
if isNoncomputable env src then
addDecl trgDecl.toDeclaration!
setEnv <| addNoncomputable (← getEnv) tgt
else
addAndCompile trgDecl.toDeclaration! (logCompileErrors := (IR.findEnvDecl env src).isSome)
if let .defnDecl { hints := .abbrev, .. } := trgDecl.toDeclaration! then
if (← getReducibilityStatus src) == .reducible then
setReducibilityStatus tgt .reducible
if Compiler.getInlineAttribute? (← getEnv) src == some .inline then
MetaM.run' <| Meta.setInlineAttribute tgt
-- now add declaration ranges so jump-to-definition works
-- note: we currently also do this for auxiliary declarations, while they are not normally
-- generated for those. We could change that.
addDeclarationRangesFromSyntax tgt (← getRef) cfg.ref
if isProtected (← getEnv) src then
setEnv <| addProtected (← getEnv) tgt
if defeqAttr.hasTag (← getEnv) src then
defeqAttr.setTag tgt
if let some matcherInfo ← getMatcherInfo? src then
/-
Use `Match.addMatcherInfo tgt matcherInfo`
once https://github.com/leanprover/lean4/pull/5068 is in
-/
modifyEnv fun env => Match.Extension.addMatcherInfo env tgt matcherInfo
-- necessary so that e.g. match equations can be generated for `tgt`
enableRealizationsForConst tgt
/-- Copy the instance attribute in a `to_additive`
[todo] it seems not to work when the `to_additive` is added as an attribute later. -/
def copyInstanceAttribute (src tgt : Name) : CoreM Unit := do
if let some prio ← getInstancePriority? src then
let attr_kind := (← getInstanceAttrKind? src).getD .global
trace[translate_detail] "Making {tgt} an instance with priority {prio}."
addInstance tgt attr_kind prio |>.run'
/-- Warn the user when the declaration has an attribute. -/
def warnAttrCore (stx : Syntax) (f : Environment → Name → Bool)
(thisAttr attrName src tgt : Name) : CoreM Unit := do
if f (← getEnv) src then
Linter.logLintIf linter.existingAttributeWarning stx <|
m!"The source declaration {src} was given attribute {attrName} before calling @[{thisAttr}]. \
The preferred method is to use `@[{thisAttr} (attr := {attrName})]` to apply the \
attribute to both {src} and the target declaration {tgt}." ++
if thisAttr == `to_additive then
m!"\nSpecial case: If this declaration was generated by @[to_additive] \
itself, you can use @[to_additive (attr := to_additive, {attrName})] on the original \
declaration."
else ""
/-- Warn the user when the declaration has a simple scoped attribute. -/
def warnAttr {α β : Type} [Inhabited β] (stx : Syntax) (attr : SimpleScopedEnvExtension α β)
(f : β → Name → Bool) (thisAttr attrName src tgt : Name) : CoreM Unit :=
warnAttrCore stx (f <| attr.getState ·) thisAttr attrName src tgt
/-- Warn the user when the declaration has a parametric attribute. -/
def warnParametricAttr {β : Type} [Inhabited β] (stx : Syntax) (attr : ParametricAttribute β)
(thisAttr attrName src tgt : Name) : CoreM Unit :=
warnAttrCore stx (attr.getParam? · · |>.isSome) thisAttr attrName src tgt
/-- `translateLemmas names argInfo desc t` runs `t` on all elements of `names`
and adds translations between the generated lemmas (the output of `t`).
`names` must be non-empty. -/
def translateLemmas {m : Type → Type} [Monad m] [MonadError m] [MonadLiftT CoreM m]
(t : TranslateData) (names : Array Name) (argInfo : ArgInfo) (desc : String)
(runAttr : Name → m (Array Name)) : m Unit := do
let auxLemmas ← names.mapM runAttr
let nLemmas := auxLemmas[0]!.size
for (nm, lemmas) in names.zip auxLemmas do
unless lemmas.size == nLemmas do
throwError "{names[0]!} and {nm} do not generate the same number of {desc}."
for (srcLemmas, tgtLemmas) in auxLemmas.zip <| auxLemmas.eraseIdx! 0 do
for (srcLemma, tgtLemma) in srcLemmas.zip tgtLemmas do
insertTranslationAndInfo t srcLemma tgtLemma argInfo
/--
Find the argument of `nm` that appears in the first translatable (type-class) argument.
Returns 1 if there are no types with a translatable class as arguments.
E.g. `Prod.instGroup` returns 1, and `Pi.instOne` returns 2.
Note: we only consider the relevant argument (`(relevant_arg := ...)`) of each type-class.
E.g. `[Pow A N]` is a translatable type-class on `A`, not on `N`.
-/
def findRelevantArg (t : TranslateData) (nm : Name) : MetaM Nat := do
forallTelescopeReducing (← getConstInfo nm).type fun xs ty ↦ do
let env ← getEnv
-- check if `tgt` has a translatable type argument, and if so,
-- find the index of a type from `xs` appearing in there
let relevantArg? (tgt : Expr) : Option Nat := do
let c ← tgt.getAppFn.constName?
guard (findTranslation? env t c).isSome
let relevantArg := (t.relevantArgAttr.find? env c).getD 0
let arg ← tgt.getArg? relevantArg
xs.findIdx? (arg.containsFVar ·.fvarId!)
-- run the above check on all hypotheses and on the conclusion
let arg ← OptionT.run <| xs.firstM fun x ↦ OptionT.mk do
forallTelescope (← inferType x) fun _ys tgt ↦ return relevantArg? tgt
let arg := arg <|> relevantArg? ty
trace[translate_detail] "findRelevantArg: {arg}"
return arg.getD 0
/-- Return the provided target name or autogenerate one if one was not provided. -/
def targetName (t : TranslateData) (cfg : Config) (src : Name) : CoreM Name := do
if cfg.self then
if cfg.tgt != .anonymous then
logWarning m!"`{t.attrName} self` ignores the provided name {cfg.tgt}"
return src
let .str pre s := src | throwError "{t.attrName}: can't transport {src}"
trace[translate_detail] "The name {s} splits as {open GuessName in s.splitCase}"
let tgt_auto := GuessName.guessName t.guessNameData s
let depth := cfg.tgt.getNumParts
let pre := findPrefixTranslation (← getEnv) pre t
let (pre1, pre2) := pre.splitAt (depth - 1)
let res := if cfg.tgt == .anonymous then pre.str tgt_auto else pre1 ++ cfg.tgt
if res == src then
throwError "{t.attrName}: the generated translated name equals the original name '{src}'.\n\
If this is intentional, use the `@[{t.attrName} self]` syntax.\n\
Otherwise, check that your declaration name is correct \
(if your declaration is an instance, try naming it)\n\
or provide a translated name using the `@[{t.attrName} my_add_name]` syntax."
if cfg.tgt == pre2.str tgt_auto && !cfg.allowAutoName then
Linter.logLintIf linter.translateGenerateName cfg.ref m!"\
`{t.attrName}` correctly autogenerated target name for {src}.\n\
You may remove the explicit argument {cfg.tgt}."
if cfg.tgt != .anonymous then
trace[translate_detail] "The automatically generated name would be {pre.str tgt_auto}"
return res
/-- if `f src = #[a_1, ..., a_n]` and `f tgt = #[b_1, ... b_n]` then `proceedFieldsAux src tgt f`
will insert translations from `a_i` to `b_i`. -/
def proceedFieldsAux (t : TranslateData) (src tgt : Name) (argInfo : ArgInfo)
(f : Name → Array Name) : CoreM Unit := do
let srcFields := f src
let tgtFields := f tgt
if srcFields.size != tgtFields.size then
throwError "Failed to map fields of {src}, {tgt} with {srcFields} ↦ {tgtFields}.\n \
Lengths do not match."
for srcField in srcFields, tgtField in tgtFields do
insertTranslationAndInfo t srcField tgtField argInfo
/-- Add the structure fields of `src` to the translations dictionary
so that they will be translated correctly. -/
def proceedFields (t : TranslateData) (src tgt : Name) (argInfo : ArgInfo) : CoreM Unit := do
let env ← getEnv
let aux := proceedFieldsAux t src tgt argInfo
-- add translations for the structure fields
aux fun declName ↦
if isStructure env declName then
let info := getStructureInfo env declName
Array.ofFn (n := info.fieldNames.size) (info.getProjFn? · |>.get!)
else
#[]
-- add translations for the automatically generated instances with `extend`.
aux fun declName ↦
if isStructure env declName then
getStructureInfo env declName |>.parentInfo
|>.filterMap fun c ↦ if !c.subobject then c.projFn else none
else
#[]
-- add translations for the constructors of an inductive type
aux fun declName ↦ match env.find? declName with
| some (ConstantInfo.inductInfo { ctors, .. }) => ctors.toArray
| _ => #[]
/-- Elaboration of the configuration options for a translation attribute. It is assumed that
- `stx[0]` is the attribute (e.g. `to_additive`)
- `stx[1]` is the optional tracing `?`
- `stx[2]` is the remaining `attrArgs` -/
def elabTranslationAttr (stx : Syntax) : CoreM Config :=
match stx[2] with
| `(attrArgs| $existing? $[$opts:bracketedOption]* $[$tgt]? $[$doc]?) => do
let mut attrs := #[]
let mut reorder := []
let mut relevantArg? := none
let mut dontTranslate := []
for opt in opts do
match opt with
| `(bracketedOption| (attr := $[$stxs],*)) =>
attrs := attrs ++ stxs
| `(bracketedOption| (reorder := $[$[$reorders:num]*],*)) =>
for cycle in reorders do
if h : cycle.size = 1 then
throwErrorAt cycle[0] "\
invalid cycle `{cycle[0]}`, a cycle must have at least 2 elements.\n\
`(reorder := ...)` uses cycle notation to specify a permutation.\n\
For example `(reorder := 1 2, 5 6)` swaps the first two arguments with each other \
and the fifth and the sixth argument and `(reorder := 3 4 5)` will move \
the fifth argument before the third argument."
let cycle ← cycle.toList.mapM fun n => match n.getNat with
| 0 => throwErrorAt n "invalid position `{n}`, positions are counted starting from 1."
| n+1 => pure n
reorder := cycle :: reorder
| `(bracketedOption| (relevant_arg := $n)) =>
if let some arg := relevantArg? then
throwErrorAt opt "cannot specify `relevant_arg` multiple times"
else
relevantArg? := n.getNat.pred
| `(bracketedOption| (dont_translate := $[$types:ident]*)) =>
dontTranslate := dontTranslate ++ types.toList
| _ => throwUnsupportedSyntax
let (existing, self) := match existing? with
| `(existingNameHint| existing) => (true, false)
| `(existingNameHint| self) => (true, true)
| _ => (false, false)
if self && !attrs.isEmpty then
throwError "invalid `(attr := ...)` after `self`, \
as there is only one declaration for the attributes.\n\
Instead, you can write the attributes in the usual way."
trace[translate_detail] "attributes: {attrs}; reorder arguments: {reorder}"
let doc ← doc.mapM fun
| `(str|$doc:str) => open Linter in do
-- Deprecate `str` docstring syntax (since := "2025-08-12")
if getLinterValue linter.deprecated (← getLinterOptions) then
let hintSuggestion := {
diffGranularity := .none
toTryThisSuggestion := { suggestion := "/-- " ++ doc.getString.trim ++ " -/" }
}
let sugg ← Hint.mkSuggestionsMessage #[hintSuggestion] doc
(codeActionPrefix? := "Update to: ") (forceList := false)
logWarningAt doc <| .tagged ``Linter.deprecatedAttr
m!"String syntax for `to_additive` docstrings is deprecated: Use \
docstring syntax instead (e.g. `@[to_additive /-- example -/]`)\n\
\n\
Update deprecated syntax to:{sugg}"
return doc.getString
| `(docComment|$doc:docComment) => do
-- TODO: rely on `addDocString`s call to `validateDocComment` after removing `str` support
/-
#adaptation_note
Without understanding the consequences, I am commenting out the next line,
as `validateDocComment` is now in `TermElabM` which is not trivial to reach from here.
Perhaps the existing comments here suggest it is no longer needed, anyway?
-/
-- validateDocComment doc
/- Note: the following replicates the behavior of `addDocString`. However, this means that
trailing whitespace might appear in docstrings added via `docComment` syntax when compared
to those added via `str` syntax. See this [Zulip thread](https://leanprover.zulipchat.com/#narrow/channel/270676-lean4/topic/Why.20do.20docstrings.20include.20trailing.20whitespace.3F/with/533553356). -/
return (← getDocStringText doc).removeLeadingSpaces
| _ => throwUnsupportedSyntax
return {
trace := !stx[1].isNone
tgt := match tgt with | some tgt => tgt.getId | none => Name.anonymous
doc, attrs, reorder, relevantArg?, dontTranslate, existing, self
ref := match tgt with | some tgt => tgt.raw | none => stx[0] }
| _ => throwUnsupportedSyntax
mutual
/-- Apply attributes to the original and translated declarations. -/
partial def applyAttributes (t : TranslateData) (stx : Syntax) (rawAttrs : Array Syntax)
(src tgt : Name) (argInfo : ArgInfo) : TermElabM (Array Name) := do
-- we only copy the `instance` attribute, since it is nice to directly tag `instance` declarations
copyInstanceAttribute src tgt
-- Warn users if the original declaration has an attributee
if src != tgt && linter.existingAttributeWarning.get (← getOptions) then
let appliedAttrs ← getAllSimpAttrs src
if appliedAttrs.size > 0 then
let appliedAttrs := ", ".intercalate (appliedAttrs.toList.map toString)
-- Note: we're not bothering to print the correct attribute arguments.
Linter.logLintIf linter.existingAttributeWarning stx m!"\
The source declaration {src} was given the simp-attribute(s) {appliedAttrs} before \
calling @[{t.attrName}].\nThe preferred method is to use something like \
`@[{t.attrName} (attr := {appliedAttrs})]`\nto apply the attribute to both \
{src} and the target declaration {tgt}."
warnAttr stx Lean.Meta.Ext.extExtension
(fun b n => (b.tree.values.any fun t => t.declName = n)) t.attrName `ext src tgt
warnAttr stx Lean.Meta.Rfl.reflExt (·.values.contains ·) t.attrName `refl src tgt
warnAttr stx Lean.Meta.Symm.symmExt (·.values.contains ·) t.attrName `symm src tgt
warnAttr stx Batteries.Tactic.transExt (·.values.contains ·) t.attrName `trans src tgt
warnAttr stx Lean.Meta.coeExt (·.contains ·) t.attrName `coe src tgt
warnParametricAttr stx Lean.Linter.deprecatedAttr t.attrName `deprecated src tgt
-- the next line also warns for `@[to_additive, simps]`, because of the application times
warnParametricAttr stx simpsAttr t.attrName `simps src tgt
warnAttrCore stx Term.elabAsElim.hasTag t.attrName `elab_as_elim src tgt
-- add attributes
-- the following is similar to `Term.ApplyAttributesCore`, but we hijack the implementation of
-- `simps` and `to_additive`.
let attrs ← elabAttrs rawAttrs
let (additiveAttrs, attrs) := attrs.partition (·.name == t.attrName)
let nestedDecls ←
match h : additiveAttrs.size with
| 0 => pure #[]
| 1 =>
let cfg ← elabTranslationAttr additiveAttrs[0].stx
addTranslationAttr t tgt cfg additiveAttrs[0].kind
| _ => throwError "cannot apply {t.attrName} multiple times."
let allDecls := #[src, tgt] ++ nestedDecls
if attrs.size > 0 then
trace[translate_detail] "Applying attributes {attrs.map (·.stx)} to {allDecls}"
for attr in attrs do
withRef attr.stx do withLogging do
if attr.name == `simps then
translateLemmas t allDecls argInfo "simps lemmas" (simpsTacFromSyntax · attr.stx)
return
let env ← getEnv
match getAttributeImpl env attr.name with
| Except.error errMsg => throwError errMsg
| Except.ok attrImpl =>
let runAttr := do
for decl in allDecls do
attrImpl.add decl attr.stx attr.kind
-- not truly an elaborator, but a sensible target for go-to-definition
let elaborator := attrImpl.ref
if (← getInfoState).enabled && (← getEnv).contains elaborator then
withInfoContext (mkInfo := return .ofCommandInfo { elaborator, stx := attr.stx }) do
try runAttr
finally if attr.stx[0].isIdent || attr.stx[0].isAtom then
-- Add an additional node over the leading identifier if there is one
-- to make it look more function-like.
-- Do this last because we want user-created infos to take precedence
pushInfoLeaf <| .ofCommandInfo { elaborator, stx := attr.stx[0] }
else
runAttr
return nestedDecls
/--
Copies equation lemmas and attributes from `src` to `tgt`
-/
partial def copyMetaData (t : TranslateData) (cfg : Config) (src tgt : Name) (argInfo : ArgInfo) :
CoreM (Array Name) := do
if let some eqns := eqnsAttribute.find? (← getEnv) src then
unless (eqnsAttribute.find? (← getEnv) tgt).isSome do
for eqn in eqns do
_ ← addTranslationAttr t eqn cfg
eqnsAttribute.add tgt (eqns.map (findTranslation? (← getEnv) t · |>.get!))
else
/- We need to generate all equation lemmas for `src` and `tgt`, even for non-recursive
definitions. If we don't do that, the equation lemma for `src` might be generated later
when doing a `rw`, but it won't be generated for `tgt`. -/
translateLemmas t #[src, tgt] argInfo "equation lemmas" fun nm ↦
(·.getD #[]) <$> MetaM.run' (getEqnsFor? nm)
MetaM.run' <| Elab.Term.TermElabM.run' <|
(applyAttributes t cfg.ref cfg.attrs src tgt) argInfo
/--
Make a new copy of a declaration, replacing fragments of the names of identifiers in the type and
the body using the `translations` dictionary.
-/
partial def transformDecl (t : TranslateData) (cfg : Config) (src tgt : Name)
(argInfo : ArgInfo := {}) : CoreM (Array Name) := do
transformDeclAux t cfg src tgt src
copyMetaData t cfg src tgt argInfo
/-- Verify that the type of given `srcDecl` translates to that of `tgtDecl`. -/
partial def checkExistingType (t : TranslateData) (src tgt : Name) (reorder : List (List Nat))
(dont : List Ident) : MetaM Unit := do
let mut srcDecl ← getConstInfo src
let tgtDecl ← getConstInfo tgt
if 0 ∈ reorder.flatten then
srcDecl := srcDecl.updateLevelParams srcDecl.levelParams.swapFirstTwo
unless srcDecl.levelParams.length == tgtDecl.levelParams.length do
throwError "`{t.attrName}` validation failed:\n expected {srcDecl.levelParams.length} \
universe levels, but '{tgt}' has {tgtDecl.levelParams.length} universe levels"
-- instantiate both types with the same universes. `instantiateLevelParams` applies some
-- normalization, so we have to apply it to both types.
let type := srcDecl.type.instantiateLevelParams
srcDecl.levelParams (tgtDecl.levelParams.map mkLevelParam)
let tgtType := tgtDecl.type.instantiateLevelParams
tgtDecl.levelParams (tgtDecl.levelParams.map mkLevelParam)
let dont ← getDontTranslates dont type
let type ← reorderForall reorder <| ← applyReplacementForall t dont <| ← unfoldAuxLemmas type
-- `instantiateLevelParams` normalizes universes, so we have to normalize both expressions
unless ← withReducible <| isDefEq type tgtType do
throwError "`{t.attrName}` validation failed: expected{indentExpr type}\nbut '{tgt}' has \
type{indentExpr tgtType}"
/-- `addTranslationAttr src cfg` adds a translation attribute to `src` with configuration `cfg`.
See the attribute implementation for more details.
It returns an array with names of translated declarations (usually 1, but more if there are nested
`to_additive` calls). -/
partial def addTranslationAttr (t : TranslateData) (src : Name) (cfg : Config)
(kind := AttributeKind.global) : AttrM (Array Name) := do
if (kind != AttributeKind.global) then
throwError "`{t.attrName}` can only be used as a global attribute"
withOptions (· |>.updateBool `trace.translate (cfg.trace || ·)) <| do
-- If `src` was already tagged, we allow the `(reorder := ...)` or `(relevant_arg := ...)` syntax
-- for updating this information on constants that are already tagged.
-- In particular, this is necessary for structure projections like `HPow.hPow`.
if let some tgt := findTranslation? (← getEnv) t src then
-- If `tgt` is not in the environment, the translation to `tgt` was added only for
-- translating the namespace, and `src` wasn't actually tagged.
if (← getEnv).contains tgt then
let mut updated := false
if cfg.reorder != [] then
modifyEnv (t.reorderAttr.addEntry · (src, cfg.reorder))
updated := true
if let some relevantArg := cfg.relevantArg? then
modifyEnv (t.relevantArgAttr.addEntry · (src, relevantArg))
updated := true
if updated then
MetaM.run' <| checkExistingType t src tgt cfg.reorder cfg.dontTranslate
return #[tgt]
throwError
"Cannot apply attribute @[{t.attrName}] to '{src}': it is already translated to '{tgt}'. \n\
If you need to set the `reorder` or `relevant_arg` option, this is still possible with the \n\
`@[{t.attrName} (reorder := ...)]` or `@[{t.attrName} (relevant_arg := ...)]` syntax."
let tgt ← targetName t cfg src
let alreadyExists := (← getEnv).contains tgt
if cfg.existing != alreadyExists && !(← isInductive src) && !cfg.self then
Linter.logLintIf linter.translateExisting cfg.ref <|
if alreadyExists then
m!"The translated declaration already exists. Please specify this explicitly using \
`@[{t.attrName} existing]`."
else
"The translated declaration doesn't exist. Please remove the option `existing`."
if alreadyExists then
MetaM.run' <| checkExistingType t src tgt cfg.reorder cfg.dontTranslate
let relevantArg ← cfg.relevantArg?.getDM <| MetaM.run' <| findRelevantArg t src
let argInfo := { reorder := cfg.reorder, relevantArg }
insertTranslationAndInfo t src tgt argInfo alreadyExists
let nestedNames ←
if alreadyExists then
-- since `tgt` already exists, we just need to copy metadata and
-- add translations `src.x ↦ tgt.x'` for any subfields.
trace[translate_detail] "declaration {tgt} already exists."
proceedFields t src tgt argInfo
copyMetaData t cfg src tgt argInfo
else
-- tgt doesn't exist, so let's make it
transformDecl t cfg src tgt argInfo
-- add pop-up information when mousing over the given translated name
-- (the information will be over the attribute if no translated name is given)
pushInfoLeaf <| .ofTermInfo {
elaborator := .anonymous, lctx := {}, expectedType? := none, isBinder := !alreadyExists,
stx := cfg.ref, expr := ← mkConstWithLevelParams tgt }
if let some doc := cfg.doc then
addDocStringCore tgt doc
return nestedNames.push tgt
end
end Mathlib.Tactic.Translate |
.lake/packages/mathlib/Mathlib/Tactic/Translate/GuessName.lean | import Std.Data.TreeMap.Basic
import Mathlib.Data.String.Defs
/-!
# Name generation APIs for `to_additive`-like attributes
-/
open Std
namespace Mathlib.Tactic.GuessName
open GuessName -- currently needed to enable projection notation
/-- The data that is required to guess the name of a translation. -/
structure GuessNameData where
/--
Dictionary used by `guessName` to autogenerate names.
This only transforms single name components, unlike `abbreviationDict`.
Note: `guessName` capitalizes the output according to the capitalization of the input.
In order for this to work, the input should always start with a lower case letter, and the output
should always start with an upper case letter.
-/
nameDict : Std.HashMap String (List String)
/--
We need to fix a few abbreviations after applying `nameDict`, i.e. replacing `ZeroLE` by `Nonneg`.
This dictionary contains these fixes.
The input should contain entries that is in `lowerCamelCase` (e.g. `ltzero`; the initial sequence
of capital letters should be lower-cased) and the output should be in `UpperCamelCase`
(e.g. `LTZero`).
When applying the dictionary, we lower-case the output if the input was also given in lower-case.
-/
abbreviationDict : Std.HashMap String String
/-- A set of strings of names that end in a capital letter.
* If the string contains a lowercase letter, the string should be split between the first occurrence
of a lower-case letter followed by an upper-case letter.
* If multiple strings have the same prefix, they should be grouped by prefix
* In this case, the second list should be prefix-free
(no element can be a prefix of a later element)
Todo: automate the translation from `String` to an element in this `TreeMap`
(but this would require having something similar to the `rb_lmap` from Lean 3). -/
def endCapitalNames : TreeMap String (List String) compare :=
-- todo: we want something like
-- endCapitalNamesOfList ["LE", "LT", "WF", "CoeTC", "CoeT", "CoeHTCT"]
.ofList [("LE", [""]), ("LT", [""]), ("WF", [""]), ("Coe", ["TC", "T", "HTCT"])]
open String in
/-- This function takes a String and splits it into separate parts based on the following
[naming conventions](https://github.com/leanprover-community/mathlib4/wiki#naming-convention).
E.g. `#eval "InvHMulLEConjugate₂SMul_ne_top".splitCase` yields
`["Inv", "HMul", "LE", "Conjugate₂", "SMul", "_", "ne", "_", "top"]`. -/
partial def String.splitCase (s : String) (i₀ : Pos.Raw := 0) (r : List String := []) :
List String := Id.run do
-- We test if we need to split between `i₀` and `i₁`.
let i₁ := i₀.next s
if i₁.atEnd s then
-- If `i₀` is the last position, return the list.
let r := s::r
return r.reverse
/- We split the string in three cases
* We split on both sides of `_` to keep them there when rejoining the string;
* We split after a name in `endCapitalNames`;
* We split after a lower-case letter that is followed by an upper-case letter
(unless it is part of a name in `endCapitalNames`). -/
if i₀.get s == '_' || i₁.get s == '_' then
return splitCase (String.Pos.Raw.extract s i₁ s.endPos) 0 <| (String.Pos.Raw.extract s 0 i₁)::r
if (i₁.get s).isUpper then
if let some strs := endCapitalNames[String.Pos.Raw.extract s 0 i₁]? then
if let some (pref, newS) := strs.findSome?
fun x : String ↦ (String.Pos.Raw.extract s i₁ s.endPos).dropPrefix? x
|>.map (x, ·.toString) then
return splitCase newS 0 <| (String.Pos.Raw.extract s 0 i₁ ++ pref)::r
if !(i₀.get s).isUpper then
return splitCase (String.Pos.Raw.extract s i₁ s.endPos) 0 <|
(String.Pos.Raw.extract s 0 i₁)::r
return splitCase s i₁ r
/-- Replaces characters in `s` by lower-casing the first characters until a non-upper-case character
is found. -/
partial def String.decapitalizeSeq (s : String) (i : String.Pos.Raw := 0) : String :=
if i.atEnd s || !(i.get s).isUpper then
s
else
decapitalizeSeq (i.set s (i.get s).toLower) <| i.next s
/-- If `r` starts with an upper-case letter, return `s`, otherwise return `s` with the
initial sequence of upper-case letters lower-cased. -/
def decapitalizeLike (r : String) (s : String) :=
if String.Pos.Raw.get r 0 |>.isUpper then s else s.decapitalizeSeq
/-- Decapitalize the first element of a list if `s` starts with a lower-case letter.
Note that we need to decapitalize multiple characters in some cases,
in examples like `HMul` or `HAdd`. -/
def decapitalizeFirstLike (s : String) : List String → List String
| x :: r => decapitalizeLike s x :: r
| [] => []
/--
Apply the `nameDict` and decapitalize the output like the input.
E.g.
```
#eval applyNameDict ["Inv", "HMul", "LE", "Conjugate₂", "SMul", "_", "ne", "_", "top"]
```
yields `["Neg", "HAdd", "LE", "Conjugate₂", "VAdd", "_", "ne", "_", "top"]`.
-/
def applyNameDict (g : GuessNameData) : List String → List String
| x :: s =>
let z := match g.nameDict.get? x.toLower with
| some y => decapitalizeFirstLike x y
| none => [x]
z ++ applyNameDict g s
| [] => []
/-- Helper for `fixAbbreviation`.
Note: this function has a quadratic number of recursive calls, but is not a performance
bottleneck. -/
def fixAbbreviationAux (g : GuessNameData) : List String → List String → String
| [], [] => ""
| [], x::s => x ++ fixAbbreviationAux g s []
| pre::l, s' =>
let s := s' ++ [pre]
let t := String.join s
/- If a name starts with upper-case, and contains an underscore, it cannot match anything in
the abbreviation dictionary. This is necessary to correctly translate something like
`fixAbbreviation ["eventually", "LE", "_", "one"]` to `"eventuallyLE_one"`, since otherwise the
substring `LE_zero` gets replaced by `Nonpos`. -/
if pre == "_" && (String.Pos.Raw.get t 0).isUpper then
s[0]! ++ fixAbbreviationAux g (s.drop 1 ++ l) []
else match g.abbreviationDict.get? t.decapitalizeSeq with
| some post => decapitalizeLike t post ++ fixAbbreviationAux g l []
| none => fixAbbreviationAux g l s
termination_by l s => (l.length + s.length, l.length)
decreasing_by all_goals grind
/-- Replace substrings according to `abbreviationDict`, matching the case of the first letter.
Example:
```
#eval applyNameDict ["Mul", "Support"]
```
gives the preliminary translation `["Add", "Support"]`. Subsequently
```
#eval fixAbbreviation ["Add", "Support"]
```
"fixes" this translation and returns `Support`.
-/
def fixAbbreviation (g : GuessNameData) (l : List String) : String :=
fixAbbreviationAux g l []
/--
Autogenerate additive name.
This runs in several steps:
1) Split according to capitalisation rule and at `_`.
2) Apply word-by-word translation rules.
3) Fix up abbreviations that are not word-by-word translations, like "addComm" or "Nonneg".
-/
def guessName (g : GuessNameData) : String → String :=
String.mapTokens '\'' <|
fun s =>
fixAbbreviation g <|
applyNameDict g <|
s.splitCase
end Mathlib.Tactic.GuessName |
.lake/packages/mathlib/Mathlib/Tactic/Translate/ToDual.lean | import Mathlib.Tactic.Translate.Core
/-!
# The `@[to_dual]` attribute.
The `@[to_dual]` attribute is used to translate declarations to their dual equivalent.
See the docstrings of `to_dual` and `to_additive` for more information.
Known limitations:
- Reordering arguments of arguments is not yet supported.
This usually comes up in constructors of structures. e.g. `Pow.mk` or `OrderTop.mk`
- When combining `to_additive` and `to_dual`, we need to make sure that all translations are added.
For example `attribute [to_dual (attr := to_additive) le_mul] mul_le` should generate
`le_mul`, `le_add` and `add_le`, and in particular should realize that `le_add` and `add_le`
are dual to eachother. Currently, this requires writing
`attribute [to_dual existing le_add] add_le`.
-/
namespace Mathlib.Tactic.ToDual
open Lean Meta Elab Command Std Translate
@[inherit_doc TranslateData.ignoreArgsAttr]
syntax (name := to_dual_ignore_args) "to_dual_ignore_args" (ppSpace num)* : attr
@[inherit_doc relevantArgOption]
syntax (name := to_dual_relevant_arg) "to_dual_relevant_arg " num : attr
@[inherit_doc TranslateData.dontTranslateAttr]
syntax (name := to_dual_dont_translate) "to_dual_dont_translate" : attr
/-- The attribute `to_dual` can be used to automatically transport theorems
and definitions (but not inductive types and structures) to their dual version.
It uses the same implementation as `to_additive`.
To use this attribute, just write:
```
@[to_dual]
theorem max_comm' {α} [LinearOrder α] (x y : α) : max x y = max y x := max_comm x y
```
This code will generate a theorem named `min_comm'`. It is also
possible to manually specify the name of the new declaration:
```
@[to_dual le_max_left]
lemma min_le_left (a b : α) : min a b ≤ a := sorry
```
An existing documentation string will _not_ be automatically used, so if the theorem or definition
has a doc string, a doc string for the dual version should be passed explicitly to `to_dual`.
```
/-- The maximum is commutative. -/
@[to_dual /-- The minimum is commutative. -/]
theorem max_comm' {α} [LinearOrder α] (x y : α) : max x y = max y x := max_comm x y
```
Use the `(reorder := ...)` syntax to reorder the arguments compared to the dual declaration.
This is specified using cycle notation. For example `(reorder := 1 2, 5 6)` swaps the first two
arguments with each other and the fifth and the sixth argument and `(reorder := 3 4 5)` will move
the fifth argument before the third argument. For example, this is used to tag `LE.le`
with `(reorder := 3 4)`, so that `a ≤ b` gets transformed into `b ≤ a`.
Use the `to_dual self` syntax to use the lemma as its own dual. This is often
combined with the `(reorder := ...)` syntax, because a lemma is usually dual to itself only
up to some reordering of its arguments.
Use the `to_dual existing` syntax to use an existing dual declaration,
instead of automatically generating it.
Use the `(attr := ...)` syntax to apply attributes to both the original and the dual version:
```
@[to_dual (attr := simp)] lemma min_self (a : α) : min a a = a := sorry
```
-/
syntax (name := to_dual) "to_dual" "?"? attrArgs : attr
@[inherit_doc to_dual]
macro "to_dual?" rest:attrArgs : attr => `(attr| to_dual ? $rest)
@[inherit_doc to_dual_ignore_args]
initialize ignoreArgsAttr : NameMapExtension (List Nat) ←
registerNameMapAttribute {
name := `to_dual_ignore_args
descr :=
"Auxiliary attribute for `to_dual` stating that certain arguments are not dualized."
add := fun _ stx ↦ do
let ids ← match stx with
| `(attr| to_dual_ignore_args $[$ids:num]*) => pure <| ids.map (·.1.isNatLit?.get!)
| _ => throwUnsupportedSyntax
return ids.toList }
/-- An extension that stores all the declarations that need their arguments reordered when
applying `@[to_dual]`. It is applied using the `to_dual (reorder := ...)` syntax. -/
initialize reorderAttr : NameMapExtension (List (List Nat)) ←
registerNameMapExtension _
@[inherit_doc to_dual_relevant_arg]
initialize relevantArgAttr : NameMapExtension Nat ←
registerNameMapAttribute {
name := `to_dual_relevant_arg
descr := "Auxiliary attribute for `to_dual` stating \
which arguments are the types with a dual structure."
add := fun
| _, `(attr| to_dual_relevant_arg $id) => pure <| id.1.isNatLit?.get!.pred
| _, _ => throwUnsupportedSyntax }
@[inherit_doc to_dual_dont_translate]
initialize dontTranslateAttr : NameMapExtension Unit ←
registerNameMapAttribute {
name := `to_dual_dont_translate
descr := "Auxiliary attribute for `to_dual` stating \
that the operations on this type should not be translated."
add := fun
| _, `(attr| to_dual_dont_translate) => return
| _, _ => throwUnsupportedSyntax }
/-- Maps names to their dual counterparts. -/
initialize translations : NameMapExtension Name ← registerNameMapExtension _
@[inherit_doc GuessName.GuessNameData.nameDict]
def nameDict : Std.HashMap String (List String) := .ofList [
("top", ["Bot"]),
("bot", ["Top"]),
("inf", ["Sup"]),
("sup", ["Inf"]),
("min", ["Max"]),
("max", ["Min"]),
("untop", ["Unbot"]),
("unbot", ["Untop"]),
("epi", ["Mono"]),
("mono", ["Epi"]),
("terminal", ["Initial"]),
("initial", ["Terminal"]),
("precompose", ["Postcompose"]),
("postcompose", ["Precompose"]),
("cone", ["Cocone"]),
("cocone", ["Cone"]),
("cones", ["Cocones"]),
("cocones", ["Cones"]),
("fan", ["Cofan"]),
("cofan", ["Fan"]),
("limit", ["Colimit"]),
("colimit", ["Limit"]),
("limits", ["Colimits"]),
("colimits", ["Limits"]),
("product", ["Coproduct"]),
("coproduct", ["Product"]),
("products", ["Coproducts"]),
("coproducts", ["Products"]),
("pushout", ["Pullback"]),
("pullback", ["Pushout"]),
("pushouts", ["Pullbacks"]),
("pullbacks", ["Pushouts"]),
("span", ["Cospan"]),
("cospan", ["Span"]),
("kernel", ["Cokernel"]),
("cokernel", ["Kernel"]),
("kernels", ["Cokernel"]),
("cokernels", ["Kernel"]),
("unit", ["Counit"]),
("counit", ["Unit"]),
("monad", ["Comonad"]),
("comonad", ["Monad"]),
("monadic", ["Comonadic"]),
("comonadic", ["Monadic"])]
@[inherit_doc GuessName.GuessNameData.abbreviationDict]
def abbreviationDict : Std.HashMap String String := .ofList []
/-- The bundle of environment extensions for `to_dual` -/
def data : TranslateData where
ignoreArgsAttr := ignoreArgsAttr
reorderAttr := reorderAttr
relevantArgAttr := relevantArgAttr
dontTranslateAttr := dontTranslateAttr
translations := translations
attrName := `to_dual
changeNumeral := false
isDual := true
guessNameData := { nameDict, abbreviationDict }
initialize registerBuiltinAttribute {
name := `to_dual
descr := "Transport to dual"
add := fun src stx kind ↦ discard do
addTranslationAttr data src (← elabTranslationAttr stx) kind
applicationTime := .afterCompilation
}
end Mathlib.Tactic.ToDual |
.lake/packages/mathlib/Mathlib/Tactic/Simps/Basic.lean | import Lean.Elab.Tactic.Simp
import Lean.Elab.App
import Mathlib.Tactic.Simps.NotationClass
import Mathlib.Lean.Expr.Basic
import Mathlib.Tactic.Basic
/-!
# Simps attribute
This file defines the `@[simps]` attribute, to automatically generate `simp` lemmas
reducing a definition when projections are applied to it.
## Implementation Notes
There are three attributes being defined here
* `@[simps]` is the attribute for objects of a structure or instances of a class. It will
automatically generate simplification lemmas for each projection of the object/instance that
contains data. See the doc strings for `Lean.Parser.Attr.simps` and `Simps.Config`
for more details and configuration options.
* `structureExt` (just an environment extension, not actually an attribute)
is automatically added to structures that have been used in `@[simps]`
at least once. This attribute contains the data of the projections used for this structure
by all following invocations of `@[simps]`.
* `@[notation_class]` should be added to all classes that define notation, like `Mul` and
`Zero`. This specifies that the projections that `@[simps]` used are the projections from
these notation classes instead of the projections of the superclasses.
Example: if `Mul` is tagged with `@[notation_class]` then the projection used for `Semigroup`
will be `fun α hα ↦ @Mul.mul α (@Semigroup.toMul α hα)` instead of `@Semigroup.mul`.
[this is not correctly implemented in Lean 4 yet]
### Possible Future Improvements
* If multiple declarations are generated from a `simps` without explicit projection names, then
only the first one is shown when mousing over `simps`.
## Changes w.r.t. Lean 3
There are some small changes in the attribute. None of them should have great effects
* The attribute will now raise an error if it tries to generate a lemma when there already exists
a lemma with that name (in Lean 3 it would generate a different unique name)
* `transparency.none` has been replaced by `TransparencyMode.reducible`
* The `attr` configuration option has been split into `isSimp` and `attrs` (for extra attributes)
* Because Lean 4 uses bundled structures, this means that `simps` applied to anything that
implements a notation class will almost certainly require a user-provided custom simps projection.
## Tags
structures, projections, simp, simplifier, generates declarations
-/
open Lean Elab Parser Command
open Meta hiding Config
open Elab.Term hiding mkConst
/-- An internal representation of a name to be used for a generated lemma. -/
private structure NameStruct where
/-- The namespace that the final name will reside in. -/
parent : Name
/-- A list of pieces to be joined by `toName`. -/
components : List String
/-- Join the components with `_`, or append `_def` if there is only one component. -/
private def NameStruct.toName (n : NameStruct) : Name :=
Name.mkStr n.parent <|
match n.components with
| [] => ""
| [x] => s!"{x}_def"
| e => "_".intercalate e
instance : Coe NameStruct Name where coe := NameStruct.toName
/-- `update nm s isPrefix` adds `s` to the last component of `nm`,
either as prefix or as suffix (specified by `isPrefix`).
Used by `simps_add_projections`. -/
private def NameStruct.update (nm : NameStruct) (s : String) (isPrefix : Bool := false) :
NameStruct :=
{ nm with components := if isPrefix then s :: nm.components else nm.components ++ [s] }
-- move
namespace Lean.Meta
open Tactic Simp
/-- Make `MkSimpContextResult` giving data instead of Syntax. Doesn't support arguments.
Intended to be very similar to `Lean.Elab.Tactic.mkSimpContext`
Todo: support arguments. -/
def mkSimpContextResult (cfg : Meta.Simp.Config := {}) (simpOnly := false) (kind := SimpKind.simp)
(dischargeWrapper := DischargeWrapper.default) (hasStar := false) :
MetaM MkSimpContextResult := do
match dischargeWrapper with
| .default => pure ()
| _ =>
if kind == SimpKind.simpAll then
throwError "'simp_all' tactic does not support 'discharger' option"
if kind == SimpKind.dsimp then
throwError "'dsimp' tactic does not support 'discharger' option"
let simpTheorems ← if simpOnly then
simpOnlyBuiltins.foldlM (·.addConst ·) ({} : SimpTheorems)
else
getSimpTheorems
let simprocs := #[← if simpOnly then pure {} else Simp.getSimprocs]
let congrTheorems ← getSimpCongrTheorems
let ctx : Simp.Context ← Simp.mkContext cfg
(simpTheorems := #[simpTheorems])
(congrTheorems := congrTheorems)
if !hasStar then
return { ctx, simprocs, dischargeWrapper }
else
let mut simpTheorems := ctx.simpTheorems
let hs ← getPropHyps
for h in hs do
unless simpTheorems.isErased (.fvar h) do
simpTheorems ← simpTheorems.addTheorem (.fvar h) (← h.getDecl).toExpr
let ctx := ctx.setSimpTheorems simpTheorems
return { ctx, simprocs, dischargeWrapper }
/-- Make `Simp.Context` giving data instead of Syntax. Doesn't support arguments.
Intended to be very similar to `Lean.Elab.Tactic.mkSimpContext`
Todo: support arguments. -/
def mkSimpContext (cfg : Meta.Simp.Config := {}) (simpOnly := false) (kind := SimpKind.simp)
(dischargeWrapper := DischargeWrapper.default) (hasStar := false) :
MetaM Simp.Context := do
let data ← mkSimpContextResult cfg simpOnly kind dischargeWrapper hasStar
return data.ctx
end Lean.Meta
namespace Lean.Parser
namespace Attr
/-! Declare notation classes. -/
attribute [notation_class add] HAdd
attribute [notation_class mul] HMul
attribute [notation_class sub] HSub
attribute [notation_class div] HDiv
attribute [notation_class mod] HMod
attribute [notation_class append] HAppend
attribute [notation_class pow Simps.copyFirst] HPow
attribute [notation_class andThen] HAndThen
attribute [notation_class] Neg Dvd LE LT HasEquiv HasSubset HasSSubset Union Inter SDiff Insert
Singleton Sep Membership
attribute [notation_class one Simps.findOneArgs] OfNat
attribute [notation_class zero Simps.findZeroArgs] OfNat
/-- An `(attr := ...)` option for `simps`. -/
syntax simpsOptAttrOption := atomic(" (" &"attr" " := " Parser.Term.attrInstance,* ")")?
/-- Arguments to `@[simps]` attribute.
Currently, a potential `(attr := ...)` argument has to come before other configuration options. -/
syntax simpsArgsRest := simpsOptAttrOption Tactic.optConfig (ppSpace ident)*
/-- The `@[simps]` attribute automatically derives lemmas specifying the projections of this
declaration.
Example:
```lean
@[simps] def foo : ℕ × ℤ := (1, 2)
```
derives two `simp` lemmas:
```lean
@[simp] lemma foo_fst : foo.fst = 1
@[simp] lemma foo_snd : foo.snd = 2
```
* It does not derive `simp` lemmas for the prop-valued projections.
* It will automatically reduce newly created beta-redexes, but will not unfold any definitions.
* If the structure has a coercion to either sorts or functions, and this is defined to be one
of the projections, then this coercion will be used instead of the projection.
* If the structure is a class that has an instance to a notation class, like `Neg` or `Mul`,
then this notation is used instead of the corresponding projection.
* You can specify custom projections, by giving a declaration with name
`{StructureName}.Simps.{projectionName}`. See Note [custom simps projection].
Example:
```lean
def Equiv.Simps.invFun (e : α ≃ β) : β → α := e.symm
@[simps] def Equiv.trans (e₁ : α ≃ β) (e₂ : β ≃ γ) : α ≃ γ :=
⟨e₂ ∘ e₁, e₁.symm ∘ e₂.symm⟩
```
generates
```
@[simp] lemma Equiv.trans_toFun : ∀ {α β γ} (e₁ e₂) (a : α), ⇑(e₁.trans e₂) a = (⇑e₂ ∘ ⇑e₁) a
@[simp] lemma Equiv.trans_invFun : ∀ {α β γ} (e₁ e₂) (a : γ),
⇑((e₁.trans e₂).symm) a = (⇑(e₁.symm) ∘ ⇑(e₂.symm)) a
```
* You can specify custom projection names, by specifying the new projection names using
`initialize_simps_projections`.
Example: `initialize_simps_projections Equiv (toFun → apply, invFun → symm_apply)`.
See `initialize_simps_projections` for more information.
* If one of the fields itself is a structure, this command will recursively create
`simp` lemmas for all fields in that structure.
* Exception: by default it will not recursively create `simp` lemmas for fields in the structures
`Prod`, `PProd`, and `Opposite`. You can give explicit projection names or change the value of
`Simps.Config.notRecursive` to override this behavior.
Example:
```lean
structure MyProd (α β : Type*) := (fst : α) (snd : β)
@[simps] def foo : Prod ℕ ℕ × MyProd ℕ ℕ := ⟨⟨1, 2⟩, 3, 4⟩
```
generates
```lean
@[simp] lemma foo_fst : foo.fst = (1, 2)
@[simp] lemma foo_snd_fst : foo.snd.fst = 3
@[simp] lemma foo_snd_snd : foo.snd.snd = 4
```
* You can use `@[simps proj1 proj2 ...]` to only generate the projection lemmas for the specified
projections.
* Recursive projection names can be specified using `proj1_proj2_proj3`.
This will create a lemma of the form `foo.proj1.proj2.proj3 = ...`.
Example:
```lean
structure MyProd (α β : Type*) := (fst : α) (snd : β)
@[simps fst fst_fst snd] def foo : Prod ℕ ℕ × MyProd ℕ ℕ := ⟨⟨1, 2⟩, 3, 4⟩
```
generates
```lean
@[simp] lemma foo_fst : foo.fst = (1, 2)
@[simp] lemma foo_fst_fst : foo.fst.fst = 1
@[simp] lemma foo_snd : foo.snd = {fst := 3, snd := 4}
```
* If one of the values is an eta-expanded structure, we will eta-reduce this structure.
Example:
```lean
structure EquivPlusData (α β) extends α ≃ β where
data : Bool
@[simps] def EquivPlusData.rfl {α} : EquivPlusData α α := { Equiv.refl α with data := true }
```
generates the following:
```lean
@[simp] lemma bar_toEquiv : ∀ {α : Sort*}, bar.toEquiv = Equiv.refl α
@[simp] lemma bar_data : ∀ {α : Sort*}, bar.data = true
```
This is true, even though Lean inserts an eta-expanded version of `Equiv.refl α` in the
definition of `bar`.
* You can add additional attributes to all lemmas generated by `simps` using e.g.
`@[simps (attr := grind =)]`.
* For configuration options, see the doc string of `Simps.Config`.
* The precise syntax is `simps (attr := a) config ident*`, where `a` is a list of attributes,
`config` declares configuration options and `ident*` is a list of desired projection names.
* Configuration options can be given using `(config := e)` where `e : Simps.Config`,
or by specifying options directly, like `-fullyApplied` or `(notRecursive := [])`.
* `@[simps]` reduces let-expressions where necessary.
* When option `trace.simps.verbose` is true, `simps` will print the projections it finds and the
lemmas it generates. The same can be achieved by using `@[simps?]`.
* Use `@[to_additive (attr := simps)]` to apply both `to_additive` and `simps` to a definition
This will also generate the additive versions of all `simp` lemmas.
-/
/- If one of the fields is a partially applied constructor, we will eta-expand it
(this likely never happens, so is not included in the official doc). -/
syntax (name := simps) "simps" "!"? "?"? simpsArgsRest : attr
@[inherit_doc simps] macro "simps?" rest:simpsArgsRest : attr => `(attr| simps ? $rest)
@[inherit_doc simps] macro "simps!" rest:simpsArgsRest : attr => `(attr| simps ! $rest)
@[inherit_doc simps] macro "simps!?" rest:simpsArgsRest : attr => `(attr| simps ! ? $rest)
@[inherit_doc simps] macro "simps?!" rest:simpsArgsRest : attr => `(attr| simps ! ? $rest)
end Attr
/-- Linter to check that `simps!` is used when needed -/
register_option linter.simpsNoConstructor : Bool := {
defValue := true
descr := "Linter to check that `simps!` is used" }
/-- Linter to check that no unused custom declarations are declared for simps. -/
register_option linter.simpsUnusedCustomDeclarations : Bool := {
defValue := true
descr := "Linter to check that no unused custom declarations are declared for simps" }
namespace Command
/-- Syntax for renaming a projection in `initialize_simps_projections`. -/
syntax simpsRule.rename := ident " → " ident
/-- Syntax for making a projection non-default in `initialize_simps_projections`. -/
syntax simpsRule.erase := "-" ident
/-- Syntax for making a projection default in `initialize_simps_projections`. -/
syntax simpsRule.add := "+" ident
/-- Syntax for making a projection prefix. -/
syntax simpsRule.prefix := &"as_prefix " ident
/-- Syntax for a single rule in `initialize_simps_projections`. -/
syntax simpsRule := simpsRule.prefix <|> simpsRule.rename <|> simpsRule.erase <|> simpsRule.add
/-- Syntax for `initialize_simps_projections`. -/
syntax simpsProj := ppSpace ident (" (" simpsRule,+ ")")?
/--
This command allows customisation of the lemmas generated by `simps`.
By default, tagging a definition of an element `myObj` of a structure `MyStruct` with `@[simps]`
generates one `@[simp]` lemma `myObj_myProj` for each projection `myProj` of `MyStruct`. There are a
few exceptions to this general rule:
* For algebraic structures, we will automatically use the notation (like `Mul`)
for the projections if such an instance is available.
* By default, the projections to parent structures are not default projections,
but all the data-carrying fields are (including those in parent structures).
This default behavior is customisable as such:
* You can disable a projection by default by running
`initialize_simps_projections MulEquiv (-invFun)`
This will ensure that no simp lemmas are generated for this projection,
unless this projection is explicitly specified by the user (as in
`@[simps invFun] def myEquiv : MulEquiv _ _ := _`).
* Conversely, you can enable a projection by default by running
`initialize_simps_projections MulEquiv (+toEquiv)`.
* You can specify custom names by writing e.g.
`initialize_simps_projections MulEquiv (toFun → apply, invFun → symm_apply)`.
* If you want the projection name added as a prefix in the generated lemma name, you can use
`as_prefix fieldName`:
`initialize_simps_projections MulEquiv (toFun → coe, as_prefix coe)`
Note that this does not influence the parsing of projection names: if you have a declaration
`foo` and you want to apply the projections `snd`, `coe` (which is a prefix) and `fst`, in that
order you can run `@[simps snd_coe_fst] def foo ...` and this will generate a lemma with the
name `coe_foo_snd_fst`.
Here are a few extra pieces of information:
* Run `initialize_simps_projections?` (or `set_option trace.simps.verbose true`)
to see the generated projections.
* Running `initialize_simps_projections MyStruct` without arguments is not necessary, it has the
same effect if you just add `@[simps]` to a declaration.
* It is recommended to call `@[simps]` or `initialize_simps_projections` in the same file as the
structure declaration. Otherwise, the projections could be generated multiple times in different
files.
Some common uses:
* If you define a new homomorphism-like structure (like `MulHom`) you can just run
`initialize_simps_projections` after defining the `DFunLike` instance (or instance that implies
a `DFunLike` instance).
```
instance {mM : Mul M} {mN : Mul N} : FunLike (MulHom M N) M N := ...
initialize_simps_projections MulHom (toFun → apply)
```
This will generate `foo_apply` lemmas for each declaration `foo`.
* If you prefer `coe_foo` lemmas that state equalities between functions, use
`initialize_simps_projections MulHom (toFun → coe, as_prefix coe)`
In this case you have to use `@[simps -fullyApplied]` whenever you call `@[simps]`.
* You can also initialize to use both, in which case you have to choose which one to use by default,
by using either of the following
```
initialize_simps_projections MulHom (toFun → apply, toFun → coe, as_prefix coe, -coe)
initialize_simps_projections MulHom (toFun → apply, toFun → coe, as_prefix coe, -apply)
```
In the first case, you can get both lemmas using `@[simps, simps -fullyApplied coe]` and in
the second case you can get both lemmas using `@[simps -fullyApplied, simps apply]`.
* If you declare a new homomorphism-like structure (like `RelEmbedding`),
then `initialize_simps_projections` will automatically find any `DFunLike` coercions
that will be used as the default projection for the `toFun` field.
```
initialize_simps_projections relEmbedding (toFun → apply)
```
* If you have an isomorphism-like structure (like `Equiv`) you often want to define a custom
projection for the inverse:
```
def Equiv.Simps.symm_apply (e : α ≃ β) : β → α := e.symm
initialize_simps_projections Equiv (toFun → apply, invFun → symm_apply)
```
-/
syntax (name := initialize_simps_projections)
"initialize_simps_projections" "?"? simpsProj : command
@[inherit_doc «initialize_simps_projections»]
macro "initialize_simps_projections?" rest:simpsProj : command =>
`(initialize_simps_projections ? $rest)
end Command
end Lean.Parser
initialize registerTraceClass `simps.verbose
initialize registerTraceClass `simps.debug
namespace Simps
/-- Projection data for a single projection of a structure -/
structure ProjectionData where
/-- The name used in the generated `simp` lemmas -/
name : Name
/-- An Expression used by simps for the projection. It must be definitionally equal to an original
projection (or a composition of multiple projections).
These Expressions can contain the universe parameters specified in the first argument of
`structureExt`. -/
expr : Expr
/-- A list of natural numbers, which is the projection number(s) that have to be applied to the
Expression. For example the list `[0, 1]` corresponds to applying the first projection of the
structure, and then the second projection of the resulting structure (this assumes that the
target of the first projection is a structure with at least two projections).
The composition of these projections is required to be definitionally equal to the provided
Expression. -/
projNrs : List Nat
/-- A Boolean specifying whether `simp` lemmas are generated for this projection by default. -/
isDefault : Bool
/-- A Boolean specifying whether this projection is written as prefix. -/
isPrefix : Bool
deriving Inhabited
instance : ToMessageData ProjectionData where toMessageData
| ⟨a, b, c, d, e⟩ => .group <| .nest 1 <|
"⟨" ++ .joinSep [toMessageData a, toMessageData b, toMessageData c, toMessageData d,
toMessageData e] ("," ++ Format.line) ++ "⟩"
/--
The `Simps.structureExt` environment extension specifies the preferred projections of the given
structure, used by the `@[simps]` attribute.
- You can generate this with the command `initialize_simps_projections`.
- If not generated, the `@[simps]` attribute will generate this automatically.
- To change the default value, see Note [custom simps projection].
- The first argument is the list of names of the universe variables used in the structure
- The second argument is an array that consists of the projection data for each projection.
-/
initialize structureExt : NameMapExtension (List Name × Array ProjectionData) ←
registerNameMapExtension (List Name × Array ProjectionData)
/-- Projection data used internally in `getRawProjections`. -/
structure ParsedProjectionData where
/-- name for this projection used in the structure definition -/
strName : Name
/-- syntax that might have provided `strName` -/
strStx : Syntax := .missing
/-- name for this projection used in the generated `simp` lemmas -/
newName : Name
/-- syntax that provided `newName` -/
newStx : Syntax := .missing
/-- will simp lemmas be generated for with (without specifically naming this?) -/
isDefault : Bool := true
/-- is the projection name a prefix? -/
isPrefix : Bool := false
/-- projection expression -/
expr? : Option Expr := none
/-- the list of projection numbers this expression corresponds to -/
projNrs : Array Nat := #[]
/-- is this a projection that is changed by the user? -/
isCustom : Bool := false
/-- Turn `ParsedProjectionData` into `ProjectionData`. -/
def ParsedProjectionData.toProjectionData (p : ParsedProjectionData) : ProjectionData :=
{ p with name := p.newName, expr := p.expr?.getD default, projNrs := p.projNrs.toList }
instance : ToMessageData ParsedProjectionData where toMessageData
| ⟨x₁, x₂, x₃, x₄, x₅, x₆, x₇, x₈, x₉⟩ => .group <| .nest 1 <|
"⟨" ++ .joinSep [toMessageData x₁, toMessageData x₂, toMessageData x₃, toMessageData x₄,
toMessageData x₅, toMessageData x₆, toMessageData x₇, toMessageData x₈, toMessageData x₉]
("," ++ Format.line) ++ "⟩"
/-- The type of rules that specify how metadata for projections in changes.
See `initialize_simps_projections`. -/
inductive ProjectionRule where
/-- A renaming rule `before→after` or
Each name comes with the syntax used to write the rule,
which is used to declare hover information. -/
| rename (oldName : Name) (oldStx : Syntax) (newName : Name) (newStx : Syntax) :
ProjectionRule
/-- An adding rule `+fieldName` -/
| add : Name → Syntax → ProjectionRule
/-- A hiding rule `-fieldName` -/
| erase : Name → Syntax → ProjectionRule
/-- A prefix rule `prefix fieldName` -/
| prefix : Name → Syntax → ProjectionRule
instance : ToMessageData ProjectionRule where toMessageData
| .rename x₁ x₂ x₃ x₄ => .group <| .nest 1 <|
"rename ⟨" ++ .joinSep [toMessageData x₁, toMessageData x₂, toMessageData x₃, toMessageData x₄]
("," ++ Format.line) ++ "⟩"
| .add x₁ x₂ => .group <| .nest 1 <|
"+⟨" ++ .joinSep [toMessageData x₁, toMessageData x₂] ("," ++ Format.line) ++ "⟩"
| .erase x₁ x₂ => .group <| .nest 1 <|
"-⟨" ++ .joinSep [toMessageData x₁, toMessageData x₂] ("," ++ Format.line) ++ "⟩"
| .prefix x₁ x₂ => .group <| .nest 1 <|
"prefix ⟨" ++ .joinSep [toMessageData x₁, toMessageData x₂] ("," ++ Format.line) ++ "⟩"
/-- Returns the projection information of a structure. -/
def projectionsInfo (l : List ProjectionData) (pref : String) (str : Name) : MessageData :=
let ⟨defaults, nondefaults⟩ := l.partition (·.isDefault)
let toPrint : List MessageData :=
defaults.map fun s ↦
let prefixStr := if s.isPrefix then "(prefix) " else ""
m!"Projection {prefixStr}{s.name}: {s.expr}"
let print2 : MessageData :=
String.join <| (nondefaults.map fun nm : ProjectionData ↦ toString nm.1).intersperse ", "
let toPrint :=
toPrint ++
if nondefaults.isEmpty then [] else
[("No lemmas are generated for the projections: " : MessageData) ++ print2 ++ "."]
let toPrint := MessageData.joinSep toPrint ("\n" : MessageData)
m!"{pref} {str}:\n{toPrint}"
/-- Find the indices of the projections that need to be applied to elaborate `$e.$projName`.
Example: If `e : α ≃+ β` and ``projName = `invFun`` then this returns `[0, 1]`, because the first
projection of `MulEquiv` is `toEquiv` and the second projection of `Equiv` is `invFun`. -/
def findProjectionIndices (strName projName : Name) : MetaM (List Nat) := do
let env ← getEnv
let some baseStr := findField? env strName projName |
throwError "{strName} has no field {projName} in parent structure"
let some fullProjName := getProjFnForField? env baseStr projName |
throwError "no such field {projName}"
let some pathToField := getPathToBaseStructure? env baseStr strName |
throwError "no such field {projName}"
let allProjs := pathToField ++ [fullProjName]
return allProjs.map (env.getProjectionFnInfo? · |>.get!.i)
/--
A variant of `Substring.dropPrefix?` that does not consider `toFoo` to be a prefix to `toFoo_1`.
This is checked by inspecting whether the first character of the remaining part is a digit.
We use this variant because the latter is often a different field with an auto-generated name.
-/
private def dropPrefixIfNotNumber? (s : String) (pre : String) : Option Substring := do
let ret ← s.dropPrefix? pre
-- flag is true when the remaining part is nonempty and starts with a digit.
let flag := ret.toString.data.head?.elim false Char.isDigit
if flag then none else some ret
/-- A variant of `String.isPrefixOf` that does not consider `toFoo` to be a prefix to `toFoo_1`. -/
private def isPrefixOfAndNotNumber (s p : String) : Bool := (dropPrefixIfNotNumber? p s).isSome
/-- A variant of `String.splitOn` that does not split `toFoo_1` into `toFoo` and `1`. -/
private def splitOnNotNumber (s delim : String) : List String :=
(process (s.splitOn delim).reverse "").reverse where
process (arr : List String) (tail : String) := match arr with
| [] => []
| (x :: xs) =>
-- flag is true when this segment is nonempty and starts with a digit.
let flag := x.data.head?.elim false Char.isDigit
if flag then
process xs (tail ++ delim ++ x)
else
List.cons (x ++ tail) (process xs "")
/-- Auxiliary function of `getCompositeOfProjections`. -/
partial def getCompositeOfProjectionsAux (proj : String) (e : Expr) (pos : Array Nat)
(args : Array Expr) : MetaM (Expr × Array Nat) := do
let env ← getEnv
let .const structName _ := (← whnf (← inferType e)).getAppFn |
throwError "{e} doesn't have a structure as type"
let projs := getStructureFieldsFlattened env structName
let projInfo := projs.toList.map fun p ↦ do
((← dropPrefixIfNotNumber? proj (p.lastComponentAsString ++ "_")).toString, p)
let some (projRest, projName) := projInfo.reduceOption.getLast? |
throwError "Failed to find constructor {proj.dropRight 1} in structure {structName}."
let newE ← mkProjection e projName
let newPos := pos ++ (← findProjectionIndices structName projName)
-- we do this here instead of in a recursive call in order to not get an unnecessary eta-redex
if projRest.isEmpty then
let newE ← mkLambdaFVars args newE
return (newE, newPos)
let type ← inferType newE
forallTelescopeReducing type fun typeArgs _tgt ↦ do
getCompositeOfProjectionsAux projRest (mkAppN newE typeArgs) newPos (args ++ typeArgs)
/-- Suppose we are given a structure `str` and a projection `proj`, that could be multiple nested
projections (separated by `_`), where each projection could be a projection of a parent structure.
This function returns an expression that is the composition of these projections and a
list of natural numbers, that are the projection numbers of the applied projections.
Note that this function is similar to elaborating dot notation, but it can do a little more.
Example: if we do
```
structure gradedFun (A : ℕ → Type*) where
toFun := ∀ i j, A i →+ A j →+ A (i + j)
initialize_simps_projections (toFun_toFun_toFun → myMul)
```
we will be able to generate the "projection"
`fun {A} (f : gradedFun A) (x : A i) (y : A j) ↦ ↑(↑(f.toFun i j) x) y`,
which projection notation cannot do. -/
def getCompositeOfProjections (structName : Name) (proj : String) : MetaM (Expr × Array Nat) := do
let strExpr ← mkConstWithLevelParams structName
let type ← inferType strExpr
forallTelescopeReducing type fun typeArgs _ ↦
withLocalDeclD `x (mkAppN strExpr typeArgs) fun e ↦
getCompositeOfProjectionsAux (proj ++ "_") e #[] <| typeArgs.push e
/-- Get the default `ParsedProjectionData` for structure `str`.
It first returns the direct fields of the structure in the right order, and then
all (non-subobject fields) of all parent structures. The subobject fields are precisely the
non-default fields. -/
def mkParsedProjectionData (structName : Name) : CoreM (Array ParsedProjectionData) := do
let env ← getEnv
let projs := getStructureFields env structName
if projs.size == 0 then
throwError "Declaration {structName} is not a structure."
let projData := projs.map fun fieldName ↦ {
strName := fieldName, newName := fieldName,
isDefault := isSubobjectField? env structName fieldName |>.isNone }
let parentProjs := getStructureFieldsFlattened env structName false
let parentProjs := parentProjs.filter (!projs.contains ·)
let parentProjData := parentProjs.map fun nm ↦
{strName := nm, newName := nm}
return projData ++ parentProjData
/-- Execute the projection renamings (and turning off projections) as specified by `rules`. -/
def applyProjectionRules (projs : Array ParsedProjectionData) (rules : Array ProjectionRule) :
CoreM (Array ParsedProjectionData) := do
let projs : Array ParsedProjectionData := rules.foldl (init := projs) fun projs rule ↦
match rule with
| .rename strName strStx newName newStx =>
if (projs.map (·.newName)).contains strName then
projs.map fun proj ↦ if proj.newName == strName then
{ proj with
newName,
newStx,
strStx := if proj.strStx.isMissing then strStx else proj.strStx } else
proj else
projs.push {strName, strStx, newName, newStx}
| .erase nm stx =>
if (projs.map (·.newName)).contains nm then
projs.map fun proj ↦ if proj.newName = nm then
{ proj with
isDefault := false,
strStx := if proj.strStx.isMissing then stx else proj.strStx } else
proj else
projs.push {strName := nm, newName := nm, strStx := stx, newStx := stx, isDefault := false}
| .add nm stx =>
if (projs.map (·.newName)).contains nm then
projs.map fun proj ↦ if proj.newName = nm then
{ proj with
isDefault := true,
strStx := if proj.strStx.isMissing then stx else proj.strStx } else
proj else
projs.push {strName := nm, newName := nm, strStx := stx, newStx := stx}
| .prefix nm stx =>
if (projs.map (·.newName)).contains nm then
projs.map fun proj ↦ if proj.newName = nm then
{ proj with
isPrefix := true,
strStx := if proj.strStx.isMissing then stx else proj.strStx } else
proj else
projs.push {strName := nm, newName := nm, strStx := stx, newStx := stx, isPrefix := true}
trace[simps.debug] "Projection info after applying the rules: {projs}."
unless (projs.map (·.newName)).toList.Nodup do throwError "\
Invalid projection names. Two projections have the same name.\n\
This is likely because a custom composition of projections was given the same name as an \
existing projection. Solution: rename the existing projection (before naming the \
custom projection)."
pure projs
/-- Auxiliary function for `getRawProjections`.
Generates the default projection, and looks for a custom projection declared by the user,
and replaces the default projection with the custom one, if it can find it. -/
def findProjection (str : Name) (proj : ParsedProjectionData)
(rawUnivs : List Level) : CoreM ParsedProjectionData := do
let env ← getEnv
let (rawExpr, nrs) ← MetaM.run' <|
getCompositeOfProjections str proj.strName.lastComponentAsString
if !proj.strStx.isMissing then
_ ← MetaM.run' <| TermElabM.run' <| addTermInfo proj.strStx rawExpr
trace[simps.debug] "Projection {proj.newName} has default projection {rawExpr} and
uses projection indices {nrs}"
let customName := str ++ `Simps ++ proj.newName
match env.find? customName with
| some d@(.defnInfo _) =>
let customProj := d.instantiateValueLevelParams! rawUnivs
trace[simps.verbose] "found custom projection for {proj.newName}:{indentExpr customProj}"
match (← MetaM.run' <| isDefEq customProj rawExpr) with
| true =>
_ ← MetaM.run' <| TermElabM.run' <| addTermInfo proj.newStx <|
← mkConstWithLevelParams customName
pure { proj with expr? := some customProj, projNrs := nrs, isCustom := true }
| false =>
-- if the type of the Expression is different, we show a different error message, because
-- (in Lean 3) just stating that the expressions are different is quite unhelpful
let customProjType ← MetaM.run' (inferType customProj)
let rawExprType ← MetaM.run' (inferType rawExpr)
if (← MetaM.run' (isDefEq customProjType rawExprType)) then
throwError "Invalid custom projection:{indentExpr customProj}\n\
Expression is not definitionally equal to {indentExpr rawExpr}"
else
throwError "Invalid custom projection:{indentExpr customProj}\n\
Expression has different type than {str ++ proj.strName}. Given type:\
{indentExpr customProjType}\nExpected type:{indentExpr rawExprType}\n\
Note: make sure order of implicit arguments is exactly the same."
| _ =>
_ ← MetaM.run' <| TermElabM.run' <| addTermInfo proj.newStx rawExpr
pure {proj with expr? := some rawExpr, projNrs := nrs}
/-- Checks if there are declarations in the current file in the namespace `{str}.Simps` that are
not used. -/
def checkForUnusedCustomProjs (stx : Syntax) (str : Name) (projs : Array ParsedProjectionData) :
CoreM Unit := do
let nrCustomProjections := projs.toList.countP (·.isCustom)
let env ← getEnv
let customDeclarations := env.constants.map₂.foldl (init := #[]) fun xs nm _ =>
if (str ++ `Simps).isPrefixOf nm && !nm.isInternalDetail && !isReservedName env nm then
xs.push nm
else
xs
if nrCustomProjections < customDeclarations.size then
Linter.logLintIf linter.simpsUnusedCustomDeclarations stx m!"\
Not all of the custom declarations {customDeclarations} are used. Double check the \
spelling, and use `?` to get more information."
/-- If a structure has a field that corresponds to a coercion to functions or sets, or corresponds
to notation, find the custom projection that uses this coercion or notation.
Returns the custom projection and the name of the projection used.
We catch most errors this function causes, so that we don't fail if an unrelated projection has
an applicable name. (e.g. `Iso.inv`)
Implementation note: getting rid of TermElabM is tricky, since `Expr.mkAppOptM` doesn't allow to
keep metavariables around, which are necessary for `OutParam`. -/
def findAutomaticProjectionsAux (str : Name) (proj : ParsedProjectionData) (args : Array Expr) :
TermElabM <| Option (Expr × Name) := do
if let some ⟨className, isNotation, findArgs⟩ :=
notationClassAttr.find? (← getEnv) proj.strName then
let findArgs ← unsafe evalConst findArgType findArgs
let classArgs ← try findArgs str className args
catch ex =>
trace[simps.debug] "Projection {proj.strName} is likely unrelated to the projection of \
{className}:\n{ex.toMessageData}"
return none
let classArgs ← classArgs.mapM fun e => match e with
| none => mkFreshExprMVar none
| some e => pure e
let classArgs := classArgs.map Arg.expr
let projName := (getStructureFields (← getEnv) className)[0]!
let projName := className ++ projName
let eStr := mkAppN (← mkConstWithLevelParams str) args
let eInstType ←
try withoutErrToSorry (elabAppArgs (← Term.mkConst className) #[] classArgs none true false)
catch ex =>
trace[simps.debug] "Projection doesn't have the right type for the automatic projection:\n\
{ex.toMessageData}"
return none
return ← withLocalDeclD `self eStr fun instStr ↦ do
trace[simps.debug] "found projection {proj.strName}. Trying to synthesize {eInstType}."
let eInst ← try synthInstance eInstType
catch ex =>
trace[simps.debug] "Didn't find instance:\n{ex.toMessageData}"
return none
let projExpr ← elabAppArgs (← Term.mkConst projName) #[] (classArgs.push <| .expr eInst)
none true false
let projExpr ← mkLambdaFVars (if isNotation then args.push instStr else args) projExpr
let projExpr ← instantiateMVars projExpr
return (projExpr, projName)
return none
/-- Auxiliary function for `getRawProjections`.
Find custom projections, automatically found by simps.
These come from `DFunLike` and `SetLike` instances. -/
def findAutomaticProjections (str : Name) (projs : Array ParsedProjectionData) :
CoreM (Array ParsedProjectionData) := do
let strDecl ← getConstInfo str
trace[simps.debug] "debug: {projs}"
MetaM.run' <| TermElabM.run' (s := {levelNames := strDecl.levelParams}) <|
forallTelescope strDecl.type fun args _ ↦ do
let projs ← projs.mapM fun proj => do
if let some (projExpr, projName) := ← findAutomaticProjectionsAux str proj args then
unless ← isDefEq projExpr proj.expr?.get! do
throwError "The projection {proj.newName} is not definitionally equal to an application \
of {projName}:{indentExpr proj.expr?.get!}\nvs{indentExpr projExpr}"
if proj.isCustom then
trace[simps.verbose] "Warning: Projection {proj.newName} is given manually by the user, \
but it can be generated automatically."
return proj
trace[simps.verbose] "Using {indentExpr projExpr}\nfor projection {proj.newName}."
return { proj with expr? := some projExpr }
return proj
return projs
/--
Get the projections used by `simps` associated to a given structure `str`.
The returned information is also stored in the environment extension `Simps.structureExt`, which
is given to `str`. If `str` already has this attribute, the information is read from this
extension instead. See the documentation for this extension for the data this tactic returns.
The returned universe levels are the universe levels of the structure. For the projections there
are three cases
* If the declaration `{StructureName}.Simps.{projectionName}` has been declared, then the value
of this declaration is used (after checking that it is definitionally equal to the actual
projection. If you rename the projection name, the declaration should have the *new* projection
name.
* You can also declare a custom projection that is a composite of multiple projections.
* Otherwise, for every class with the `notation_class` attribute, and the structure has an
instance of that notation class, then the projection of that notation class is used for the
projection that is definitionally equal to it (if there is such a projection).
This means in practice that coercions to function types and sorts will be used instead of
a projection, if this coercion is definitionally equal to a projection. Furthermore, for
notation classes like `Mul` and `Zero` those projections are used instead of the
corresponding projection.
Projections for coercions and notation classes are not automatically generated if they are
composites of multiple projections (for example when you use `extend` without the
`oldStructureCmd` (does this exist?)).
* Otherwise, the projection of the structure is chosen.
For example: ``getRawProjections env `Prod`` gives the default projections.
```
([u, v], [(`fst, `(Prod.fst.{u v}), [0], true, false),
(`snd, `(@Prod.snd.{u v}), [1], true, false)])
```
Optionally, this command accepts three optional arguments:
* If `traceIfExists` the command will always generate a trace message when the structure already
has an entry in `structureExt`.
* The `rules` argument specifies whether projections should be added, renamed, used as prefix, and
not used by default.
* if `trc` is true, this tactic will trace information just as if
`set_option trace.simps.verbose true` was set.
-/
def getRawProjections (stx : Syntax) (str : Name) (traceIfExists : Bool := false)
(rules : Array ProjectionRule := #[]) (trc := false) :
CoreM (List Name × Array ProjectionData) := do
withOptions (· |>.updateBool `trace.simps.verbose (trc || ·)) <| do
let env ← getEnv
if let some data := (structureExt.getState env).find? str then
-- We always print the projections when they already exists and are called by
-- `initialize_simps_projections`.
withOptions (· |>.updateBool `trace.simps.verbose (traceIfExists || ·)) <| do
trace[simps.verbose]
projectionsInfo data.2.toList "The projections for this structure have already been \
initialized by a previous invocation of `initialize_simps_projections` or `@[simps]`.\n\
Generated projections for" str
return data
trace[simps.verbose] "generating projection information for structure {str}."
trace[simps.debug] "Applying the rules {rules}."
let strDecl ← getConstInfo str
let rawLevels := strDecl.levelParams
let rawUnivs := rawLevels.map Level.param
let projs ← mkParsedProjectionData str
let projs ← applyProjectionRules projs rules
let projs ← projs.mapM fun proj ↦ findProjection str proj rawUnivs
checkForUnusedCustomProjs stx str projs
let projs ← findAutomaticProjections str projs
let projs := projs.map (·.toProjectionData)
-- make all proofs non-default.
let projs ← projs.mapM fun proj ↦ do
match (← MetaM.run' <| isProof proj.expr) with
| true => pure { proj with isDefault := false }
| false => pure proj
trace[simps.verbose] projectionsInfo projs.toList "generated projections for" str
structureExt.add str (rawLevels, projs)
trace[simps.debug] "Generated raw projection data:{indentD <| toMessageData (rawLevels, projs)}"
pure (rawLevels, projs)
library_note2 «custom simps projection» /--
You can specify custom projections for the `@[simps]` attribute.
To do this for the projection `MyStructure.originalProjection` by adding a declaration
`MyStructure.Simps.myProjection` that is definitionally equal to
`MyStructure.originalProjection` but has the projection in the desired (simp-normal) form.
Then you can call
```
initialize_simps_projections (originalProjection → myProjection, ...)
```
to register this projection. See `elabInitializeSimpsProjections` for more information.
You can also specify custom projections that are definitionally equal to a composite of multiple
projections. This is often desirable when extending structures (without `oldStructureCmd`).
`CoeFun` and notation class (like `Mul`) instances will be automatically used, if they
are definitionally equal to a projection of the structure (but not when they are equal to the
composite of multiple projections).
-/
/-- Parse a rule for `initialize_simps_projections`. It is `<name>→<name>`, `-<name>`, `+<name>`
or `as_prefix <name>`. -/
def elabSimpsRule : Syntax → CommandElabM ProjectionRule
| `(simpsRule| $id1 → $id2) => return .rename id1.getId id1.raw id2.getId id2.raw
| `(simpsRule| - $id) => return .erase id.getId id.raw
| `(simpsRule| + $id) => return .add id.getId id.raw
| `(simpsRule| as_prefix $id) => return .prefix id.getId id.raw
| _ => Elab.throwUnsupportedSyntax
/-- Function elaborating `initialize_simps_projections`. -/
@[command_elab «initialize_simps_projections»] def elabInitializeSimpsProjections : CommandElab
| stx@`(initialize_simps_projections $[?%$trc]? $id $[($stxs,*)]?) => do
let stxs := stxs.getD <| .mk #[]
let rules ← stxs.getElems.raw.mapM elabSimpsRule
let nm ← resolveGlobalConstNoOverload id
_ ← liftTermElabM <| addTermInfo id.raw <| ← mkConstWithLevelParams nm
_ ← liftCoreM <| getRawProjections stx nm true rules trc.isSome
| _ => throwUnsupportedSyntax
/-- Configuration options for `@[simps]` -/
structure Config where
/-- Make generated lemmas simp lemmas -/
isSimp := true
/-- Other attributes to apply to generated lemmas. -/
attrs : Array Syntax := #[]
/-- simplify the right-hand side of generated simp-lemmas using `dsimp, simp`. -/
simpRhs := false
/-- TransparencyMode used to reduce the type in order to detect whether it is a structure. -/
typeMd := TransparencyMode.instances
/-- TransparencyMode used to reduce the right-hand side in order to detect whether it is a
constructor. Note: was `none` in Lean 3 -/
rhsMd := TransparencyMode.reducible
/-- Generated lemmas that are fully applied, i.e. generates equalities between applied functions.
Set this to `false` to generate equalities between functions. -/
fullyApplied := true
/-- List of types in which we are not recursing to generate simplification lemmas.
E.g. if we write `@[simps] def e : α × β ≃ β × α := ...` we will generate `e_apply` and not
`e_apply_fst`. -/
notRecursive := [`Prod, `PProd, `Opposite, `PreOpposite]
/-- Output debug messages. Not used much, use `set_option simps.debug true` instead. -/
debug := false
/-- The stem to use for the projection names. If `none`, the default, use the suffix of the
current declaration name, or the empty string if the declaration is an instance and the instance
is named according to the `inst` convention. -/
nameStem : Option String := none
deriving Inhabited
/-- Function elaborating `Config` -/
declare_command_config_elab elabSimpsConfig Config
/-- `instantiateLambdasOrApps es e` instantiates lambdas in `e` by expressions from `es`.
If the length of `es` is larger than the number of lambdas in `e`,
then the term is applied to the remaining terms.
Also reduces head let-expressions in `e`, including those after instantiating all lambdas.
This is very similar to `expr.substs`, but this also reduces head let-expressions. -/
partial def _root_.Lean.Expr.instantiateLambdasOrApps (es : Array Expr) (e : Expr) : Expr :=
e.betaRev es.reverse true -- check if this is what I want
/-- Get the projections of a structure used by `@[simps]` applied to the appropriate arguments.
Returns a list of tuples
```
(corresponding right-hand-side, given projection name, projection Expression,
future projection numbers, used by default, is prefix)
```
(where all fields except the first are packed in a `ProjectionData` structure)
one for each projection. The given projection name is the name for the projection used by the user
used to generate (and parse) projection names. For example, in the structure
Example 1: ``getProjectionExprs env `(α × β) `(⟨x, y⟩)`` will give the output
```
[(`(x), `fst, `(@Prod.fst.{u v} α β), [], true, false),
(`(y), `snd, `(@Prod.snd.{u v} α β), [], true, false)]
```
Example 2: ``getProjectionExprs env `(α ≃ α) `(⟨id, id, fun _ ↦ rfl, fun _ ↦ rfl⟩)``
will give the output
```
[(`(id), `apply, (Equiv.toFun), [], true, false),
(`(id), `symm_apply, (fun e ↦ e.symm.toFun), [], true, false),
...,
...]
```
-/
def getProjectionExprs (stx : Syntax) (tgt : Expr) (rhs : Expr) (cfg : Config) :
MetaM <| Array <| Expr × ProjectionData := do
-- the parameters of the structure
let params := tgt.getAppArgs
if cfg.debug && !(← (params.zip rhs.getAppArgs).allM fun ⟨a, b⟩ ↦ isDefEq a b) then
throwError "unreachable code: parameters are not definitionally equal"
let str := tgt.getAppFn.constName?.getD default
-- the fields of the object
let rhsArgs := rhs.getAppArgs.toList.drop params.size
let (rawUnivs, projDeclata) ← getRawProjections stx str
projDeclata.mapM fun proj ↦ do
let expr := proj.expr.instantiateLevelParams rawUnivs tgt.getAppFn.constLevels!
-- after instantiating universes, we have to check again whether the expression is a proof.
let proj := if ← isProof expr
then { proj with isDefault := false }
else proj
return (rhsArgs.getD (fallback := default) proj.projNrs.head!,
{ proj with
expr := expr.instantiateLambdasOrApps params
projNrs := proj.projNrs.tail })
variable (ref : Syntax) (univs : List Name)
/-- Add a lemma with `nm` stating that `lhs = rhs`. `type` is the type of both `lhs` and `rhs`,
`args` is the list of local constants occurring, and `univs` is the list of universe variables. -/
def addProjection (declName : Name) (type lhs rhs : Expr) (args : Array Expr)
(cfg : Config) : MetaM Unit := do
trace[simps.debug] "Planning to add the equality{indentD m!"{lhs} = ({rhs} : {type})"}"
let env ← getEnv
-- simplify `rhs` if `cfg.simpRhs` is true
let lvl ← getLevel type
let mut (rhs, prf) := (rhs, mkAppN (mkConst `Eq.refl [lvl]) #[type, lhs])
if cfg.simpRhs then
let ctx ← mkSimpContext
let (rhs2, _) ← dsimp rhs ctx
if rhs != rhs2 then
trace[simps.debug] "`dsimp` simplified rhs to{indentExpr rhs2}"
else
trace[simps.debug] "`dsimp` failed to simplify rhs"
let (result, _) ← simp rhs2 ctx
if rhs2 != result.expr then
trace[simps.debug] "`simp` simplified rhs to{indentExpr result.expr}"
else
trace[simps.debug] "`simp` failed to simplify rhs"
rhs := result.expr
prf := result.proof?.getD prf
let eqAp := mkApp3 (mkConst `Eq [lvl]) type lhs rhs
let declType ← mkForallFVars args eqAp
let declValue ← mkLambdaFVars args prf
if (env.find? declName).isSome then -- diverging behavior from Lean 3
throwError "simps tried to add lemma{indentD m!"{.ofConstName declName} : {declType}"}\n\
to the environment, but it already exists."
trace[simps.verbose] "adding projection {declName}:{indentExpr declType}"
prependError "Failed to add projection lemma {declName}:" do
addDecl <| .thmDecl {
name := declName
levelParams := univs
type := declType
value := declValue }
inferDefEqAttr declName
-- add term info and apply attributes
addDeclarationRangesFromSyntax declName (← getRef) ref
TermElabM.run' do
_ ← addTermInfo (isBinder := true) ref <| ← mkConstWithLevelParams declName
if cfg.isSimp then
addSimpTheorem simpExtension declName true false .global <| eval_prio default
let attrs ← elabAttrs cfg.attrs
Elab.Term.applyAttributes declName attrs
/--
Perform head-structure-eta-reduction on expression `e`. That is, if `e` is of the form
`⟨f.1, f.2, ..., f.n⟩` with `f` definitionally equal to `e`, then
`headStructureEtaReduce e = headStructureEtaReduce f` and `headStructureEtaReduce e = e` otherwise.
-/
partial def headStructureEtaReduce (e : Expr) : MetaM Expr := do
let env ← getEnv
let (ctor, args) := e.getAppFnArgs
let some (.ctorInfo { induct := struct, numParams, ..}) := env.find? ctor | pure e
let some { fieldNames, .. } := getStructureInfo? env struct | pure e
let (params, fields) := args.toList.splitAt numParams -- fix if `Array.take` / `Array.drop` exist
trace[simps.debug]
"rhs is constructor application with params{indentD params}\nand fields {indentD fields}"
let field0 :: fieldsTail := fields | return e
let fieldName0 :: fieldNamesTail := fieldNames.toList | return e
let (fn0, fieldArgs0) := field0.getAppFnArgs
unless fn0 == struct ++ fieldName0 do
trace[simps.debug] "{fn0} ≠ {struct ++ fieldName0}"
return e
let (params', reduct :: _) := fieldArgs0.toList.splitAt numParams | unreachable!
unless params' == params do
trace[simps.debug] "{params'} ≠ {params}"
return e
trace[simps.debug] "Potential structure-eta-reduct:{indentExpr e}\nto{indentExpr reduct}"
let allArgs := params.toArray.push reduct
let isEta ← (fieldsTail.zip fieldNamesTail).allM fun (field, fieldName) ↦
if field.getAppFnArgs == (struct ++ fieldName, allArgs) then pure true else isProof field
unless isEta do return e
trace[simps.debug] "Structure-eta-reduce:{indentExpr e}\nto{indentExpr reduct}"
headStructureEtaReduce reduct
/-- Derive lemmas specifying the projections of the declaration.
`nm`: name of the lemma
If `todo` is non-empty, it will generate exactly the names in `todo`.
`toApply` is non-empty after a custom projection that is a composition of multiple projections
was just used. In that case we need to apply these projections before we continue changing `lhs`.
`simpLemmas`: names of the simp lemmas added so far.(simpLemmas : Array Name)
-/
partial def addProjections (nm : NameStruct) (type lhs rhs : Expr)
(args : Array Expr) (mustBeStr : Bool) (cfg : Config)
(todo : List (String × Syntax)) (toApply : List Nat) : MetaM (Array Name) := do
-- we don't want to unfold non-reducible definitions (like `Set`) to apply more arguments
trace[simps.debug] "Type of the Expression before normalizing: {type}"
withTransparency cfg.typeMd <| forallTelescopeReducing type fun typeArgs tgt ↦ withDefault do
trace[simps.debug] "Type after removing pi's: {tgt}"
let tgt ← whnfD tgt
trace[simps.debug] "Type after reduction: {tgt}"
let newArgs := args ++ typeArgs
let lhsAp := lhs.instantiateLambdasOrApps typeArgs
let rhsAp := rhs.instantiateLambdasOrApps typeArgs
let str := tgt.getAppFn.constName
trace[simps.debug] "todo: {todo}, toApply: {toApply}"
-- We want to generate the current projection if it is in `todo`
let todoNext := todo.filter (·.1 ≠ "")
let env ← getEnv
let stx? := todo.find? (·.1 == "") |>.map (·.2)
/- The syntax object associated to the projection we're making now (if any).
Note that we use `ref[0]` so that with `simps (config := ...)` we associate it to the word `simps`
instead of the application of the attribute to arguments. -/
let stxProj := stx?.getD ref[0]
let strInfo? := getStructureInfo? env str
/- Don't recursively continue if `str` is not a structure or if the structure is in
`notRecursive`. -/
if strInfo?.isNone ||
(todo.isEmpty && str ∈ cfg.notRecursive && !mustBeStr && toApply.isEmpty) then
if mustBeStr then
throwError "Invalid `simps` attribute. Target {str} is not a structure"
if !todoNext.isEmpty && str ∉ cfg.notRecursive then
let firstTodo := todoNext.head!.1
throwError "Invalid simp lemma {nm.update firstTodo false |>.toName}.\nProjection \
{(splitOnNotNumber firstTodo "_")[1]!} doesn't exist, \
because target {str} is not a structure."
if cfg.fullyApplied then
addProjection stxProj univs nm.toName tgt lhsAp rhsAp newArgs cfg
else
addProjection stxProj univs nm.toName type lhs rhs args cfg
return #[nm.toName]
-- if the type is a structure
let some (.inductInfo { isRec := false, ctors := [ctor], .. }) := env.find? str | unreachable!
trace[simps.debug] "{str} is a structure with constructor {ctor}."
let rhsEta ← headStructureEtaReduce rhsAp
-- did the user ask to add this projection?
let addThisProjection := stx?.isSome && toApply.isEmpty
if addThisProjection then
-- we pass the precise argument of simps as syntax argument to `addProjection`
if cfg.fullyApplied then
addProjection stxProj univs nm.toName tgt lhsAp rhsEta newArgs cfg
else
addProjection stxProj univs nm.toName type lhs rhs args cfg
let rhsWhnf ← withTransparency cfg.rhsMd <| whnf rhsEta
trace[simps.debug] "The right-hand-side {indentExpr rhsAp}\n reduces to {indentExpr rhsWhnf}"
if !rhsWhnf.getAppFn.isConstOf ctor then
-- if I'm about to run into an error, try to set the transparency for `rhsMd` higher.
if cfg.rhsMd == .reducible && (mustBeStr || !todoNext.isEmpty || !toApply.isEmpty) then
trace[simps.debug] "Using relaxed reducibility."
Linter.logLintIf linter.simpsNoConstructor ref m!"\
The definition {nm.toName} is not a constructor application. \
Please use `@[simps!]` instead.\n\
\n\
Explanation: `@[simps]` uses the definition to find what the simp lemmas should \
be. If the definition is a constructor, then this is easy, since the values of the \
projections are just the arguments to the constructor. If the definition is not a \
constructor, then `@[simps]` will unfold the right-hand side until it has found a \
constructor application, and uses those values.\n\n\
This might not always result in the simp-lemmas you want, so you are advised to use \
`@[simps?]` to double-check whether `@[simps]` generated satisfactory lemmas.\n\
Note 1: `@[simps!]` also calls the `simp` tactic, and this can be expensive in certain \
cases.\n\
Note 2: `@[simps!]` is equivalent to `@[simps (config := \{rhsMd := .default, \
simpRhs := true})]`. You can also try `@[simps (config := \{rhsMd := .default})]` \
to still unfold the definitions, but avoid calling `simp` on the resulting statement.\n\
Note 3: You need `simps!` if not all fields are given explicitly in this definition, \
even if the definition is a constructor application. For example, if you give a \
`MulEquiv` by giving the corresponding `Equiv` and the proof that it respects \
multiplication, then you need to mark it as `@[simps!]`, since the attribute needs to \
unfold the corresponding `Equiv` to get to the `toFun` field."
let nms ← addProjections nm type lhs rhs args mustBeStr
{ cfg with rhsMd := .default, simpRhs := true } todo toApply
return if addThisProjection then nms.push nm.toName else nms
if !toApply.isEmpty then
throwError "Invalid simp lemma {nm.toName}.\nThe given definition is not a constructor \
application:{indentExpr rhsWhnf}"
if mustBeStr then
throwError "Invalid `simps` attribute. The body is not a constructor application:\
{indentExpr rhsWhnf}"
if !todoNext.isEmpty then
throwError "Invalid simp lemma {nm.update todoNext.head!.1 false |>.toName}.\n\
The given definition is not a constructor application:{indentExpr rhsWhnf}"
if !addThisProjection then
if cfg.fullyApplied then
addProjection stxProj univs nm.toName tgt lhsAp rhsEta newArgs cfg
else
addProjection stxProj univs nm.toName type lhs rhs args cfg
return #[nm.toName]
-- if the value is a constructor application
trace[simps.debug] "Generating raw projection information..."
let projInfo ← getProjectionExprs ref tgt rhsWhnf cfg
trace[simps.debug] "Raw projection information:{indentD m!"{projInfo}"}"
-- If we are in the middle of a composite projection.
if let idx :: rest := toApply then
let some ⟨newRhs, _⟩ := projInfo[idx]?
| throwError "unreachable: index of composite projection is out of bounds."
let newType ← inferType newRhs
trace[simps.debug] "Applying a custom composite projection. Todo: {toApply}. Current lhs:\
{indentExpr lhsAp}"
return ← addProjections nm newType lhsAp newRhs newArgs false cfg todo rest
trace[simps.debug] "Not in the middle of applying a custom composite projection"
/- We stop if no further projection is specified or if we just reduced an eta-expansion and we
automatically choose projections -/
if todo.length == 1 && todo.head!.1 == "" then return #[nm.toName]
let projs : Array Name := projInfo.map fun x ↦ x.2.name
let todo := todoNext
trace[simps.debug] "Next todo: {todoNext}"
-- check whether all elements in `todo` have a projection as prefix
if let some (x, _) := todo.find? fun (x, _) ↦ projs.all
fun proj ↦ !isPrefixOfAndNotNumber (proj.lastComponentAsString ++ "_") x then
let simpLemma := nm.update x |>.toName
let neededProj := (splitOnNotNumber x "_")[0]!
throwError "Invalid simp lemma {simpLemma}. \
Structure {str} does not have projection {neededProj}.\n\
The known projections are:\
{indentD <| toMessageData projs}\n\
You can also see this information by running\
\n `initialize_simps_projections? {str}`.\n\
Note: these projection names might be customly defined for `simps`, \
and could differ from the projection names of the structure."
let nms ← projInfo.flatMapM fun ⟨newRhs, proj, projExpr, projNrs, isDefault, isPrefix⟩ ↦ do
let newType ← inferType newRhs
let newTodo := todo.filterMap
fun (x, stx) ↦ (dropPrefixIfNotNumber? x (proj.lastComponentAsString ++ "_")).map
(·.toString, stx)
-- we only continue with this field if it is default or mentioned in todo
if !(isDefault && todo.isEmpty) && newTodo.isEmpty then return #[]
let newLhs := projExpr.instantiateLambdasOrApps #[lhsAp]
let newName := nm.update proj.lastComponentAsString isPrefix
trace[simps.debug] "Recursively add projections for:{indentExpr newLhs}"
addProjections newName newType newLhs newRhs newArgs false cfg newTodo projNrs
return if addThisProjection then nms.push nm.toName else nms
end Simps
open Simps
/-- `simpsTac` derives `simp` lemmas for all (nested) non-Prop projections of the declaration.
If `todo` is non-empty, it will generate exactly the names in `todo`.
If `shortNm` is true, the generated names will only use the last projection name.
If `trc` is true, trace as if `trace.simps.verbose` is true. -/
def simpsTac (ref : Syntax) (nm : Name) (cfg : Config := {})
(todo : List (String × Syntax) := []) (trc := false) : AttrM (Array Name) :=
withOptions (· |>.updateBool `trace.simps.verbose (trc || ·)) <| do
let env ← getEnv
let some d := env.find? nm | throwError "Declaration {nm} doesn't exist."
let lhs : Expr := mkConst d.name <| d.levelParams.map Level.param
let todo := todo.eraseDups |>.map fun (proj, stx) ↦ (proj ++ "_", stx)
let mut cfg := cfg
let nm : NameStruct :=
{ parent := nm.getPrefix
components :=
if let some n := cfg.nameStem then
if n == "" then [] else [n]
else
let s := nm.lastComponentAsString
if (← isInstance nm) ∧ s.startsWith "inst" then [] else [s]}
MetaM.run' <| addProjections ref d.levelParams
nm d.type lhs (d.value?.getD default) #[] (mustBeStr := true) cfg todo []
/-- elaborate the syntax and run `simpsTac`. -/
def simpsTacFromSyntax (nm : Name) (stx : Syntax) : AttrM (Array Name) :=
match stx with
| `(attr| simps $[!%$bang]? $[?%$trc]? $attrs:simpsOptAttrOption $c:optConfig $[$ids]*) => do
let extraAttrs := match attrs with
| `(Attr.simpsOptAttrOption| (attr := $[$stxs],*)) => stxs
| _ => #[]
let cfg ← liftCommandElabM <| elabSimpsConfig c
let cfg := if bang.isNone then cfg else { cfg with rhsMd := .default, simpRhs := true }
let cfg := { cfg with attrs := cfg.attrs ++ extraAttrs }
let ids := ids.map fun x => (x.getId.eraseMacroScopes.lastComponentAsString, x.raw)
simpsTac stx nm cfg ids.toList trc.isSome
| _ => throwUnsupportedSyntax
/-- The `simps` attribute. -/
initialize simpsAttr : ParametricAttribute (Array Name) ←
registerParametricAttribute {
name := `simps
/- So as to be run _after_ the `instance` attribute, as this handler uses
`Lean.Meta.isInstance`, which requires the `instance` handler to have
already run. -/
applicationTime := .afterCompilation
descr := "Automatically derive lemmas specifying the projections of this declaration.",
getParam := simpsTacFromSyntax } |
.lake/packages/mathlib/Mathlib/Tactic/Simps/NotationClass.lean | import Mathlib.Init
import Lean.Elab.Exception
import Batteries.Lean.NameMapAttribute
import Batteries.Tactic.Lint
/-!
# `@[notation_class]` attribute for `@[simps]`
This declares the `@[notation_class]` attribute, which is used to give smarter default projections
for `@[simps]`.
We put this in a separate file so that we can already tag some declarations with this attribute
in the file where we declare `@[simps]`. For further documentation, see `Tactic.Simps.Basic`.
-/
/-- The `@[notation_class]` attribute specifies that this is a notation class,
and this notation should be used instead of projections by `@[simps]`.
* This is only important if the projection is written differently using notation, e.g.
`+` uses `HAdd.hAdd`, not `Add.add` and `0` uses `OfNat.ofNat` not `Zero.zero`.
We also add it to non-heterogeneous notation classes, like `Neg`, but it doesn't do much for any
class that extends `Neg`.
* `@[notation_class* <projName> Simps.findCoercionArgs]` is used to configure the
`SetLike` and `DFunLike` coercions.
* The first name argument is the projection name we use as the key to search for this class
(default: name of first projection of the class).
* The second argument is the name of a declaration that has type
`findArgType` which is defined to be `Name → Name → Array Expr → MetaM (Array (Option Expr))`.
This declaration specifies how to generate the arguments of the notation class from the
arguments of classes that use the projection. -/
syntax (name := notation_class) "notation_class" "*"? (ppSpace ident)? (ppSpace ident)? : attr
open Lean Meta Elab Term
namespace Simps
/-- The type of methods to find arguments for automatic projections for `simps`.
We partly define this as a separate definition so that the unused arguments linter doesn't complain.
-/
def findArgType : Type := Name → Name → Array Expr → MetaM (Array (Option Expr))
/-- Find arguments for a notation class -/
def defaultfindArgs : findArgType := fun _ className args ↦ do
let some classExpr := (← getEnv).find? className | throwError "no such class {className}"
let arity := classExpr.type.getNumHeadForalls
if arity == args.size then
return args.map some
else if h : args.size = 1 then
return .replicate arity args[0]
else
throwError "initialize_simps_projections cannot automatically find arguments for class \
{className}"
/-- Find arguments by duplicating the first argument. Used for `pow`. -/
def copyFirst : findArgType := fun _ _ args ↦ return (args.push <| args[0]?.getD default).map some
/-- Find arguments by duplicating the first argument. Used for `smul`. -/
def copySecond : findArgType := fun _ _ args ↦ return (args.push <| args[1]?.getD default).map some
/-- Find arguments by prepending `ℕ` and duplicating the first argument. Used for `nsmul`. -/
def nsmulArgs : findArgType := fun _ _ args ↦
return #[Expr.const `Nat [], args[0]?.getD default] ++ args |>.map some
/-- Find arguments by prepending `ℤ` and duplicating the first argument. Used for `zsmul`. -/
def zsmulArgs : findArgType := fun _ _ args ↦
return #[Expr.const `Int [], args[0]?.getD default] ++ args |>.map some
/-- Find arguments for the `Zero` class. -/
def findZeroArgs : findArgType := fun _ _ args ↦
return #[some <| args[0]?.getD default, some <| mkRawNatLit 0]
/-- Find arguments for the `One` class. -/
def findOneArgs : findArgType := fun _ _ args ↦
return #[some <| args[0]?.getD default, some <| mkRawNatLit 1]
/-- Find arguments of a coercion class (`DFunLike` or `SetLike`) -/
def findCoercionArgs : findArgType := fun str className args ↦ do
let some classExpr := (← getEnv).find? className | throwError "no such class {className}"
let arity := classExpr.type.getNumHeadForalls
let eStr := mkAppN (← mkConstWithLevelParams str) args
let classArgs := .replicate (arity - 1) none
return #[some eStr] ++ classArgs
/-- Data needed to generate automatic projections. This data is associated to a name of a projection
in a structure that must be used to trigger the search. -/
structure AutomaticProjectionData where
/-- `className` is the name of the class we are looking for. -/
className : Name
/-- `isNotation` is a Boolean that specifies whether this is notation
(false for the coercions `DFunLike` and `SetLike`). If this is set to true, we add the current
class as hypothesis during type-class synthesis. -/
isNotation := true
/-- The method to find the arguments of the class. -/
findArgs : Name := `Simps.defaultfindArgs
deriving Inhabited
/-- `@[notation_class]` attribute. Note: this is *not* a `NameMapAttribute` because we key on the
argument of the attribute, not the declaration name. -/
initialize notationClassAttr : NameMapExtension AutomaticProjectionData ← do
let ext ← registerNameMapExtension AutomaticProjectionData
registerBuiltinAttribute {
name := `notation_class
descr := "An attribute specifying that this is a notation class. Used by @[simps]."
add := fun src stx _kind => do
unless isStructure (← getEnv) src do
throwError "@[notation_class] attribute can only be added to classes."
match stx with
| `(attr|notation_class $[*%$coercion]? $[$projName?]? $[$findArgs?]?) => do
let projName ← match projName? with
| none => pure (getStructureFields (← getEnv) src)[0]!
| some projName => pure projName.getId
let findArgs := if findArgs?.isSome then findArgs?.get!.getId else `Simps.defaultfindArgs
match (← getEnv).find? findArgs with
| none => throwError "no such declaration {findArgs}"
| some declInfo =>
unless ← MetaM.run' <| isDefEq declInfo.type (mkConst ``findArgType) do
throwError "declaration {findArgs} has wrong type"
ext.add projName ⟨src, coercion.isNone, findArgs⟩
| _ => throwUnsupportedSyntax }
return ext
end Simps |
.lake/packages/mathlib/Mathlib/Tactic/Linter/UnusedTactic.lean | import Lean.Parser.Syntax
import Batteries.Tactic.Unreachable
-- Import this linter explicitly to ensure that
-- this file has a valid copyright header and module docstring.
import Mathlib.Tactic.Linter.Header
import Mathlib.Tactic.Linter.UnusedTacticExtension
/-!
# The unused tactic linter
The unused linter makes sure that every tactic call actually changes *something*.
The inner workings of the linter are as follows.
The linter inspects the goals before and after each tactic execution.
If they are not identical, the linter is happy.
If they are identical, then the linter checks if the tactic is whitelisted.
Possible reason for whitelisting are
* tactics that emit messages, such as `have?`, `extract_goal`, or `says`;
* tactics that are in place to assert something, such as `guard`;
* tactics that allow to work on a specific goal, such as `on_goal`;
* "flow control" tactics, such as `success_if_fail` and related.
The only tactic that has a bespoke criterion is `swap_var`: the reason is that the only change that
`swap_var` has is to relabel the usernames of local declarations.
Thus, to check that `swap_var` was used, so we inspect the names of all the local declarations
before and after and see if there is some change.
## Notable exclusions
* `conv` is completely ignored by the linter.
* The linter does not enter a "sequence tactic": upon finding `tac <;> [tac1, tac2, ...]`
the linter assumes that the tactic is doing something and does not recurse into each
`tac1, tac2, ...`.
This is just for lack of an implementation: it may not be hard to do this.
* The tactic does not check the discharger for `linear_combination`,
but checks `linear_combination` itself.
The main reason is that `skip` is a common discharger tactic and the linter would
then always fail whenever the user explicitly chose to pass `skip` as a discharger tactic.
## TODO
* The linter seems to be silenced by `set_option ... in`: maybe it should enter `in`s?
## Implementation notes
Yet another linter copied from the `unreachableTactic` linter!
-/
open Lean Elab Std Linter
namespace Mathlib.Linter
/-- The unused tactic linter makes sure that every tactic call actually changes *something*. -/
register_option linter.unusedTactic : Bool := {
defValue := true
descr := "enable the unused tactic linter"
}
namespace UnusedTactic
/-- The monad for collecting the ranges of the syntaxes that do not modify any goal. -/
abbrev M := StateRefT (Std.HashMap String.Range Syntax) IO
-- Tactics that are expected to not change the state but should also not be flagged by the
-- unused tactic linter.
#allow_unused_tactic!
Lean.Parser.Term.byTactic
Lean.Parser.Tactic.tacticSeq
Lean.Parser.Tactic.tacticSeq1Indented
Lean.Parser.Tactic.tacticTry_
-- the following `SyntaxNodeKind`s play a role in silencing `test`s
Lean.Parser.Tactic.guardHyp
Lean.Parser.Tactic.guardTarget
Lean.Parser.Tactic.failIfSuccess
/--
A list of blacklisted syntax kinds, which are expected to have subterms that contain
unevaluated tactics.
-/
initialize ignoreTacticKindsRef : IO.Ref NameHashSet ←
IO.mkRef <| .ofArray #[
`Mathlib.Tactic.Says.says,
``Parser.Term.binderTactic,
``Lean.Parser.Term.dynamicQuot,
``Lean.Parser.Tactic.quotSeq,
``Lean.Parser.Tactic.tacticStop_,
``Lean.Parser.Command.notation,
``Lean.Parser.Command.mixfix,
``Lean.Parser.Tactic.discharger,
``Lean.Parser.Tactic.Conv.conv,
`Batteries.Tactic.seq_focus,
`Mathlib.Tactic.Hint.registerHintStx,
`Mathlib.Tactic.LinearCombination.linearCombination,
`Mathlib.Tactic.LinearCombination'.linearCombination',
`Aesop.Frontend.Parser.addRules,
`Aesop.Frontend.Parser.aesopTactic,
`Aesop.Frontend.Parser.aesopTactic?,
-- the following `SyntaxNodeKind`s play a role in silencing `test`s
``Lean.Parser.Tactic.failIfSuccess,
`Mathlib.Tactic.successIfFailWithMsg,
`Mathlib.Tactic.failIfNoProgress
]
/-- Is this a syntax kind that contains intentionally unused tactic subterms? -/
def isIgnoreTacticKind (ignoreTacticKinds : NameHashSet) (k : SyntaxNodeKind) : Bool :=
k.components.contains `Conv ||
"slice".isPrefixOf k.toString ||
k matches .str _ "quot" ||
ignoreTacticKinds.contains k
/--
Adds a new syntax kind whose children will be ignored by the `unusedTactic` linter.
This should be called from an `initialize` block.
-/
def addIgnoreTacticKind (kind : SyntaxNodeKind) : IO Unit :=
ignoreTacticKindsRef.modify (·.insert kind)
variable (ignoreTacticKinds : NameHashSet) (isTacKind : SyntaxNodeKind → Bool) in
/-- Accumulates the set of tactic syntaxes that should be evaluated at least once. -/
@[specialize] partial def getTactics (stx : Syntax) : M Unit := do
if let .node _ k args := stx then
if !isIgnoreTacticKind ignoreTacticKinds k then
args.forM getTactics
if isTacKind k then
if let some r := stx.getRange? true then
modify fun m => m.insert r stx
/-- `getNames mctx` extracts the names of all the local declarations implied by the
`MetavarContext` `mctx`. -/
def getNames (mctx : MetavarContext) : List Name :=
let lcts := mctx.decls.toList.map (MetavarDecl.lctx ∘ Prod.snd)
let locDecls := (lcts.map (PersistentArray.toList ∘ LocalContext.decls)).flatten.reduceOption
locDecls.map LocalDecl.userName
mutual
/-- Search for tactic executions in the info tree and remove the syntax of the tactics that
changed something. -/
partial def eraseUsedTacticsList (exceptions : Std.HashSet SyntaxNodeKind)
(trees : PersistentArray InfoTree) : M Unit :=
trees.forM (eraseUsedTactics exceptions)
/-- Search for tactic executions in the info tree and remove the syntax of the tactics that
changed something. -/
partial def eraseUsedTactics (exceptions : Std.HashSet SyntaxNodeKind) : InfoTree → M Unit
| .node i c => do
if let .ofTacticInfo i := i then
let stx := i.stx
let kind := stx.getKind
if let some r := stx.getRange? true then
if exceptions.contains kind
-- if the tactic is allowed to not change the goals
then modify (·.erase r)
else
-- if the goals have changed
if i.goalsAfter != i.goalsBefore
then modify (·.erase r)
-- bespoke check for `swap_var`: the only change that it does is
-- in the usernames of local declarations, so we check the names before and after
else
if (kind == `Mathlib.Tactic.«tacticSwap_var__,,») &&
(getNames i.mctxBefore != getNames i.mctxAfter)
then modify (·.erase r)
eraseUsedTacticsList exceptions c
| .context _ t => eraseUsedTactics exceptions t
| .hole _ => pure ()
end
/-- The main entry point to the unused tactic linter. -/
def unusedTacticLinter : Linter where run := withSetOptionIn fun stx => do
unless getLinterValue linter.unusedTactic (← getLinterOptions) && (← getInfoState).enabled do
return
if (← get).messages.hasErrors then
return
if stx.isOfKind ``Mathlib.Linter.UnusedTactic.«command#show_kind_» then return
let env ← getEnv
let cats := (Parser.parserExtension.getState env).categories
-- These lookups may fail when the linter is run in a fresh, empty environment
let some tactics := Parser.ParserCategory.kinds <$> cats.find? `tactic
| return
let some convs := Parser.ParserCategory.kinds <$> cats.find? `conv
| return
let trees ← getInfoTrees
let exceptions := (← allowedRef.get).union <| allowedUnusedTacticExt.getState env
let go : M Unit := do
getTactics (← ignoreTacticKindsRef.get) (fun k => tactics.contains k || convs.contains k) stx
eraseUsedTacticsList exceptions trees
let (_, map) ← go.run {}
let unused := map.toArray
let key (r : String.Range) := (r.start.byteIdx, (-r.stop.byteIdx : Int))
let mut last : String.Range := ⟨0, 0⟩
for (r, stx) in let _ := @lexOrd; let _ := @ltOfOrd.{0}; unused.qsort (key ·.1 < key ·.1) do
if stx.getKind ∈ [``Batteries.Tactic.unreachable, ``Batteries.Tactic.unreachableConv] then
continue
if last.start ≤ r.start && r.stop ≤ last.stop then continue
Linter.logLint linter.unusedTactic stx m!"'{stx}' tactic does nothing"
last := r
initialize addLinter unusedTacticLinter |
.lake/packages/mathlib/Mathlib/Tactic/Linter/Lint.lean | import Batteries.Tactic.Lint
import Mathlib.Tactic.DeclarationNames
/-!
# Linters for Mathlib
In this file we define additional linters for mathlib,
which concern the *behaviour* of the linted code, and not issues of code style or formatting.
Perhaps these should be moved to Batteries in the future.
-/
namespace Batteries.Tactic.Lint
open Lean Meta
/--
Linter that checks whether a structure should be in Prop.
-/
@[env_linter] def structureInType : Linter where
noErrorsFound := "no structures that should be in Prop found."
errorsFound := "FOUND STRUCTURES THAT SHOULD BE IN PROP."
test declName := do
unless isStructure (← getEnv) declName do return none
-- remark: using `Lean.Meta.isProp` doesn't suffice here, because it doesn't (always?)
-- recognize predicates as propositional.
let isProp ← forallTelescopeReducing (← inferType (← mkConstWithLevelParams declName))
fun _ ty ↦ return ty == .sort .zero
if isProp then return none
let projs := (getStructureInfo? (← getEnv) declName).get!.fieldNames
if projs.isEmpty then return none -- don't flag empty structures
let allProofs ← projs.allM (do isProof <| ← mkConstWithLevelParams <| declName ++ ·)
unless allProofs do return none
return m!"all fields are propositional but the structure isn't."
/-- Linter that check that all `deprecated` tags come with `since` dates. -/
@[env_linter] def deprecatedNoSince : Linter where
noErrorsFound := "no `deprecated` tags without `since` dates."
errorsFound := "FOUND `deprecated` tags without `since` dates."
test declName := do
let some info := Lean.Linter.deprecatedAttr.getParam? (← getEnv) declName | return none
match info.since? with
| some _ => return none -- TODO: enforce `YYYY-MM-DD` format
| none => return m!"`deprecated` attribute without `since` date"
end Batteries.Tactic.Lint
namespace Mathlib.Linter
/-!
### `dupNamespace` linter
The `dupNamespace` linter produces a warning when a declaration contains the same namespace
at least twice consecutively.
For instance, `Nat.Nat.foo` and `One.two.two` trigger a warning, while `Nat.One.Nat` does not.
-/
/--
The `dupNamespace` linter is set on by default. Lean emits a warning on any declaration that
contains the same namespace at least twice consecutively.
For instance, `Nat.Nat.foo` and `One.two.two` trigger a warning, while `Nat.One.Nat` does not.
*Note.*
This linter will not detect duplication in namespaces of autogenerated declarations
(other than the one whose `declId` is present in the source declaration).
-/
register_option linter.dupNamespace : Bool := {
defValue := true
descr := "enable the duplicated namespace linter"
}
namespace DupNamespaceLinter
open Lean Parser Elab Command Meta Linter
@[inherit_doc linter.dupNamespace]
def dupNamespace : Linter where run := withSetOptionIn fun stx ↦ do
if getLinterValue linter.dupNamespace (← getLinterOptions) then
let mut aliases := #[]
if let some exp := stx.find? (·.isOfKind `Lean.Parser.Command.export) then
aliases ← getAliasSyntax exp
for id in (← getNamesFrom (stx.getPos?.getD default)) ++ aliases do
let declName := id.getId
if declName.hasMacroScopes || isPrivateName declName then continue
let nm := declName.components
let some (dup, _) := nm.zip (nm.tailD []) |>.find? fun (x, y) ↦ x == y
| continue
Linter.logLint linter.dupNamespace id
m!"The namespace '{dup}' is duplicated in the declaration '{declName}'"
initialize addLinter dupNamespace
end Mathlib.Linter.DupNamespaceLinter |
.lake/packages/mathlib/Mathlib/Tactic/Linter/PPRoundtrip.lean | import Lean.Elab.Command
import Mathlib.Init
/-!
# The "ppRoundtrip" linter
The "ppRoundtrip" linter emits a warning when the syntax of a command differs substantially
from the pretty-printed version of itself.
-/
open Lean Elab Command Linter
namespace Mathlib.Linter
/--
The "ppRoundtrip" linter emits a warning when the syntax of a command differs substantially
from the pretty-printed version of itself.
The linter makes an effort to start the highlighting at the first difference.
However, it may not always be successful.
It also prints both the source code and the "expected code" in a 5-character radius from
the first difference.
-/
register_option linter.ppRoundtrip : Bool := {
defValue := false
descr := "enable the ppRoundtrip linter"
}
/-- `polishPP s` takes as input a `String` `s`, assuming that it is the output of
pretty-printing a lean command.
The main intent is to convert `s` to a reasonable candidate for a desirable source code format.
The function first replaces consecutive whitespace sequences into a single space (` `), in an
attempt to side-step line-break differences.
After that, it applies some pre-emptive changes:
* doc-module beginnings tend to have some whitespace following them, so we add a space back in;
* name quotations such as ``` ``Nat``` get pretty-printed as ``` `` Nat```, so we remove a space
after double back-ticks, but take care of adding one more for triple (or more) back-ticks;
* `notation3` is not followed by a pretty-printer space, so we add it here (https://github.com/leanprover-community/mathlib4/pull/15515).
-/
def polishPP (s : String) : String :=
let s := s.splitToList (·.isWhitespace)
(" ".intercalate (s.filter (!·.isEmpty)))
|>.replace "/-!" "/-! "
|>.replace "``` " "``` " -- avoid losing an existing space after the triple back-ticks
-- as a consequence of the following replacement
|>.replace "`` " "``" -- weird pp ```#eval ``«Nat»``` pretty-prints as ```#eval `` «Nat»```
|>.replace "notation3(" "notation3 ("
|>.replace "notation3\"" "notation3 \""
/-- `polishSource s` is similar to `polishPP s`, but expects the input to be actual source code.
For this reason, `polishSource s` performs more conservative changes:
it only replace all whitespace starting from a linebreak (`\n`) with a single whitespace. -/
def polishSource (s : String) : String × Array Nat :=
let split := s.splitToList (· == '\n')
let preWS := split.foldl (init := #[]) fun p q =>
let txt := q.trimLeft.length
(p.push (q.length - txt)).push txt
let preWS := preWS.eraseIdxIfInBounds 0
let s := (split.map .trimLeft).filter (· != "")
(" ".intercalate (s.filter (!·.isEmpty)), preWS)
/-- `posToShiftedPos lths diff` takes as input an array `lths` of natural numbers,
and one further natural number `diff`.
It adds up the elements of `lths` occupying the odd positions, as long as the sum of the
elements in the even positions does not exceed `diff`.
It returns the sum of the accumulated odds and `diff`.
This is useful to figure out the difference between the output of `polishSource s` and `s` itself.
It plays a role similar to the `fileMap`. -/
def posToShiftedPos (lths : Array Nat) (diff : Nat) : Nat := Id.run do
let mut (ws, noWS) := (diff, 0)
for con in [:lths.size / 2] do
let curr := lths[2 * con]!
if noWS + curr < diff then
noWS := noWS + curr
ws := ws + lths[2 * con + 1]!
else
break
return ws
/-- `zoomString str centre offset` returns the substring of `str` consisting of the `offset`
characters around the `centre`th character. -/
def zoomString (str : String) (centre offset : Nat) : Substring :=
{ str := str, startPos := ⟨centre - offset⟩, stopPos := ⟨centre + offset⟩ }
/-- `capSourceInfo s p` "shortens" all end-position information in the `SourceInfo` `s` to be
at most `p`, trimming down also the relevant substrings. -/
def capSourceInfo (s : SourceInfo) (p : Nat) : SourceInfo :=
match s with
| .original leading pos trailing endPos =>
.original leading pos {trailing with stopPos := ⟨min endPos.1 p⟩} ⟨min endPos.1 p⟩
| .synthetic pos endPos canonical =>
.synthetic pos ⟨min endPos.1 p⟩ canonical
| .none => s
/-- `capSyntax stx p` applies `capSourceInfo · s` to all `SourceInfo`s in all
`node`s, `atom`s and `ident`s contained in `stx`.
This is used to trim away all "fluff" that follows a command: comments and whitespace after
a command get removed with `capSyntax stx stx.getTailPos?.get!`.
-/
partial
def capSyntax (stx : Syntax) (p : Nat) : Syntax :=
match stx with
| .node si k args => .node (capSourceInfo si p) k (args.map (capSyntax · p))
| .atom si val => .atom (capSourceInfo si p) (val.take p)
| .ident si r v pr => .ident (capSourceInfo si p) { r with stopPos := ⟨min r.stopPos.1 p⟩ } v pr
| s => s
namespace PPRoundtrip
@[inherit_doc Mathlib.Linter.linter.ppRoundtrip]
def ppRoundtrip : Linter where run := withSetOptionIn fun stx ↦ do
unless getLinterValue linter.ppRoundtrip (← getLinterOptions) do
return
if (← MonadState.get).messages.hasErrors then
return
let stx := capSyntax stx (stx.getTailPos?.getD default).1
let origSubstring := stx.getSubstring?.getD default
let (real, lths) := polishSource origSubstring.toString
let fmt ← (liftCoreM do PrettyPrinter.ppCategory `command stx <|> (do
Linter.logLint linter.ppRoundtrip stx
m!"The ppRoundtrip linter had some parsing issues: \
feel free to silence it with `set_option linter.ppRoundtrip false in` \
and report this error!"
return real))
let st := polishPP fmt.pretty
if st != real then
let diff := real.firstDiffPos st
let pos := posToShiftedPos lths diff.1 + origSubstring.startPos.1
let f := origSubstring.str.drop (pos)
let extraLth := (f.takeWhile (· != diff.get st)).length
let srcCtxt := zoomString real diff.1 5
let ppCtxt := zoomString st diff.1 5
Linter.logLint linter.ppRoundtrip (.ofRange ⟨⟨pos⟩, ⟨pos + extraLth + 1⟩⟩)
m!"source context\n'{srcCtxt}'\n'{ppCtxt}'\npretty-printed context"
initialize addLinter ppRoundtrip
end Mathlib.Linter.PPRoundtrip |
.lake/packages/mathlib/Mathlib/Tactic/Linter/DirectoryDependency.lean | import Lean.Elab.Command
import Lean.Elab.ParseImportsFast
-- This file is imported by the Header linter, hence has no mathlib imports.
/-! # The `directoryDependency` linter
The `directoryDependency` linter detects imports between directories that are supposed to be
independent. By specifying that one directory does not import from another, we can improve the
modularity of Mathlib.
-/
-- XXX: is there a better long-time place for this
/-- Parse all imports in a text file at `path` and return just their names:
this is just a thin wrapper around `Lean.parseImports'`.
Omit `Init` (which is part of the prelude). -/
def findImports (path : System.FilePath) : IO (Array Lean.Name) := do
return (← Lean.parseImports' (← IO.FS.readFile path) path.toString).imports
|>.map (fun imp ↦ imp.module) |>.erase `Init
/-- Find the longest prefix of `n` such that `f` returns `some` (or return `none` otherwise). -/
def Lean.Name.findPrefix {α} (f : Name → Option α) (n : Name) : Option α := do
f n <|> match n with
| anonymous => none
| str n' _ => n'.findPrefix f
| num n' _ => n'.findPrefix f
/-- Make a `NameSet` containing all prefixes of `n`. -/
def Lean.Name.prefixes (n : Name) : NameSet :=
NameSet.insert (n := n) <| match n with
| anonymous => ∅
| str n' _ => n'.prefixes
| num n' _ => n'.prefixes
/-- Return the immediate prefix of `n` (if any). -/
def Lean.Name.prefix? (n : Name) : Option Name :=
match n with
| anonymous => none
| str n' _ => some n'
| num n' _ => some n'
/-- Collect all prefixes of names in `ns` into a single `NameSet`. -/
def Lean.Name.collectPrefixes (ns : Array Name) : NameSet :=
ns.foldl (fun ns n => ns.append n.prefixes) ∅
/-- Find a name in `ns` that starts with prefix `p`. -/
def Lean.Name.prefixToName (p : Name) (ns : Array Name) : Option Name :=
ns.find? p.isPrefixOf
/-- Find the dependency chain, starting at a module that imports `imported`, and ends with the
current module.
The path only contains the intermediate steps: it excludes `imported` and the current module.
-/
def Lean.Environment.importPath (env : Environment) (imported : Name) : Array Name := Id.run do
let mut result := #[]
let modData := env.header.moduleData
let modNames := env.header.moduleNames
if let some idx := env.getModuleIdx? imported then
let mut target := imported
for i in [idx.toNat + 1 : modData.size] do
if modData[i]!.imports.any (·.module == target) then
target := modNames[i]!
result := result.push modNames[i]!
return result
namespace Mathlib.Linter
open Lean Elab Command Linter
/--
The `directoryDependency` linter detects imports between directories that are supposed to be
independent. If this is the case, it emits a warning.
-/
register_option linter.directoryDependency : Bool := {
defValue := true
descr := "enable the directoryDependency linter"
}
namespace DirectoryDependency
/-- `NamePrefixRel` is a datatype for storing relations between name prefixes.
That is, `R : NamePrefixRel` is supposed to answer given names `(n₁, n₂)` whether there are any
prefixes `n₁'` of `n₁` and `n₂'` of `n₂` such that `n₁' R n₂'`.
The current implementation is a `NameMap` of `NameSet`s, testing each prefix of `n₁` and `n₂` in
turn. This can probably be optimized.
-/
def NamePrefixRel := NameMap NameSet
namespace NamePrefixRel
instance : EmptyCollection NamePrefixRel := inferInstanceAs (EmptyCollection (NameMap _))
/-- Make all names with prefix `n₁` related to names with prefix `n₂`. -/
def insert (r : NamePrefixRel) (n₁ n₂ : Name) : NamePrefixRel :=
match r.find? n₁ with
| some ns => NameMap.insert r n₁ (ns.insert n₂)
| none => NameMap.insert r n₁ (.insert ∅ n₂)
/-- Convert an array of prefix pairs to a `NamePrefixRel`. -/
def ofArray (xs : Array (Name × Name)) : NamePrefixRel :=
xs.foldl (init := ∅)
fun r (n₁, n₂) => r.insert n₁ n₂
/-- Get a prefix of `n₁` that is related to a prefix of `n₂`. -/
def find (r : NamePrefixRel) (n₁ n₂ : Name) : Option (Name × Name) :=
n₁.findPrefix fun n₁' => do
let ns ← r.find? n₁'
n₂.findPrefix fun n₂' =>
if ns.contains n₂' then
(n₁', n₂')
else
none
/-- Get a prefix of `n₁` that is related to any prefix of the names in `ns`; return the prefixes.
This should be more efficient than iterating over all names in `ns` and calling `find`,
since it doesn't need to worry about overlapping prefixes.
-/
def findAny (r : NamePrefixRel) (n₁ : Name) (ns : Array Name) : Option (Name × Name) :=
let prefixes := Lean.Name.collectPrefixes ns
n₁.findPrefix fun n₁' => do
let ns ← r.find? n₁'
for n₂' in prefixes do
if ns.contains n₂' then
return (n₁', n₂')
else
pure ()
none
/-- Does `r` contain any entries with key `n`? -/
def containsKey (r : NamePrefixRel) (n : Name) : Bool := NameMap.contains r n
/-- Is a prefix of `n₁` related to a prefix of `n₂`? -/
def contains (r : NamePrefixRel) (n₁ n₂ : Name) : Bool := (r.find n₁ n₂).isSome
/-- Look up all names `m` which are values of some prefix of `n` under this relation. -/
def getAllLeft (r : NamePrefixRel) (n : Name) : NameSet := Id.run do
let matchingPrefixes := n.prefixes.filter (fun prf ↦ r.containsKey prf)
let mut allRules := NameSet.empty
for prefix_ in matchingPrefixes do
let some rules := r.find? prefix_ | unreachable!
allRules := allRules.append rules
allRules
end NamePrefixRel
-- TODO: add/extend tests for this linter, to ensure the allow-list works
-- TODO: move the following three lists to a JSON file, for easier evolution over time!
-- Future: enforce that allowed and forbidden keys are disjoint
-- Future: move further directories to use this allow-list instead of the blocklist
/-- `allowedImportDirs` relates module prefixes, specifying that modules with the first prefix
are only allowed to import modules in the second directory.
For directories which are low in the import hierarchy, this opt-out approach is both more ergonomic
(fewer updates needed) and needs less configuration.
We always allow imports of `Init`, `Lean`, `Std`, `Qq` and
`Mathlib.Init` (as well as their transitive dependencies.)
-/
def allowedImportDirs : NamePrefixRel := .ofArray #[
-- This is used to test the linter.
(`MathlibTest.DirectoryDependencyLinter, `Mathlib.Lean),
-- Mathlib.Tactic has large transitive imports: just allow all of mathlib,
-- as we don't care about the details a lot.
(`MathlibTest.Header, `Mathlib),
(`MathlibTest.Header, `Aesop),
(`MathlibTest.Header, `ImportGraph),
(`MathlibTest.Header, `LeanSearchClient),
(`MathlibTest.Header, `Plausible),
(`MathlibTest.Header, `ProofWidgets),
(`MathlibTest.Header, `Qq),
-- (`MathlibTest.Header, `Mathlib.Tactic),
-- (`MathlibTest.Header, `Mathlib.Deprecated),
(`MathlibTest.Header, `Batteries),
(`MathlibTest.Header, `Lake),
(`Mathlib.Util, `Batteries),
(`Mathlib.Util, `Mathlib.Lean),
(`Mathlib.Util, `Mathlib.Tactic),
-- TODO: reduce this dependency by upstreaming `Data.String.Defs to batteries
(`Mathlib.Util.FormatTable, `Mathlib.Data.String.Defs),
(`Mathlib.Lean, `Batteries.Tactic.Lint),
(`Mathlib.Lean, `Batteries.CodeAction),
-- TODO: should this be minimised further?
(`Mathlib.Lean.Meta.CongrTheorems, `Batteries),
-- These modules are transitively imported by `Batteries.CodeAction.
(`Mathlib.Lean, `Batteries.Classes.SatisfiesM),
(`Mathlib.Lean, `Batteries.Data.Array.Match),
(`Mathlib.Lean, `Batteries.Data.Fin),
(`Mathlib.Lean, `Batteries.Data.List),
(`Mathlib.Lean, `Batteries.Lean),
(`Mathlib.Lean, `Batteries.Control.ForInStep),
(`Mathlib.Lean, `Batteries.Tactic.Alias),
(`Mathlib.Lean, `Batteries.Util.ProofWanted),
(`Mathlib.Lean.Expr, `Mathlib.Util),
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Util),
-- Fine-grained exceptions: TODO decide if these are fine, or should be scoped more broadly.
(`Mathlib.Lean.CoreM, `Mathlib.Tactic.ToExpr),
(`Mathlib.Lean.CoreM, `Mathlib.Util.WhatsNew),
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Tactic.Lemma),
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Tactic.TypeStar),
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Tactic.ToAdditive),
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Tactic), -- split this up further?
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Data), -- split this up further?
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Algebra.Notation),
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Data.Notation),
(`Mathlib.Lean.Meta.RefinedDiscrTree, `Mathlib.Data.Array),
(`Mathlib.Lean.Meta.CongrTheorems, `Mathlib.Data),
(`Mathlib.Lean.Meta.CongrTheorems, `Mathlib.Logic),
(`Mathlib.Lean.Meta.CongrTheorems, `Mathlib.Order.Defs),
(`Mathlib.Lean.Meta.CongrTheorems, `Mathlib.Tactic),
(`Mathlib.Lean.Expr.ExtraRecognizers, `Batteries.Util.ExtendedBinder),
(`Mathlib.Lean.Expr.ExtraRecognizers, `Batteries.Logic),
(`Mathlib.Lean.Expr.ExtraRecognizers, `Batteries.Tactic.Trans),
(`Mathlib.Lean.Expr.ExtraRecognizers, `Batteries.Tactic.Init),
(`Mathlib.Lean.Expr.ExtraRecognizers, `Mathlib.Data),
(`Mathlib.Lean.Expr.ExtraRecognizers, `Mathlib.Order),
(`Mathlib.Lean.Expr.ExtraRecognizers, `Mathlib.Logic),
(`Mathlib.Lean.Expr.ExtraRecognizers, `Mathlib.Tactic),
(`Mathlib.Tactic.Linter, `Batteries),
-- The Mathlib.Tactic.Linter *module* imports all linters, hence requires all the imports.
-- For more fine-grained exceptions of the next two imports, one needs to rename that file.
(`Mathlib.Tactic.Linter, `ImportGraph),
(`Mathlib.Tactic.Linter, `Mathlib.Tactic.MinImports),
(`Mathlib.Tactic.Linter.TextBased, `Mathlib.Data.Nat.Notation),
(`Mathlib.Logic, `Batteries),
-- TODO: should the next import direction be flipped?
(`Mathlib.Logic, `Mathlib.Control),
(`Mathlib.Logic, `Mathlib.Lean),
(`Mathlib.Logic, `Mathlib.Util),
(`Mathlib.Logic, `Mathlib.Tactic),
(`Mathlib.Logic.Fin.Rotate, `Mathlib.Algebra.Group.Fin.Basic),
(`Mathlib.Logic, `Mathlib.Algebra.Notation),
(`Mathlib.Logic, `Mathlib.Algebra.NeZero),
(`Mathlib.Logic, `Mathlib.Data),
-- TODO: this next dependency should be made more fine-grained.
(`Mathlib.Logic, `Mathlib.Order),
-- Particular modules with larger imports.
(`Mathlib.Logic.Hydra, `Mathlib.GroupTheory),
(`Mathlib.Logic.Hydra, `Mathlib.Algebra),
(`Mathlib.Logic.Encodable.Pi, `Mathlib.Algebra),
(`Mathlib.Logic.Equiv.Fin.Rotate, `Mathlib.Algebra.Group),
(`Mathlib.Logic.Equiv.Fin.Rotate, `Mathlib.Algebra.Regular),
(`Mathlib.Logic.Equiv.Array, `Mathlib.Algebra),
(`Mathlib.Logic.Equiv.Finset, `Mathlib.Algebra),
(`Mathlib.Logic.Godel.GodelBetaFunction, `Mathlib.Algebra),
(`Mathlib.Logic.Small.List, `Mathlib.Algebra),
(`Mathlib.Testing, `Batteries),
-- TODO: this next import should be eliminated.
(`Mathlib.Testing, `Mathlib.GroupTheory),
(`Mathlib.Testing, `Mathlib.Control),
(`Mathlib.Testing, `Mathlib.Algebra),
(`Mathlib.Testing, `Mathlib.Data),
(`Mathlib.Testing, `Mathlib.Logic),
(`Mathlib.Testing, `Mathlib.Order),
(`Mathlib.Testing, `Mathlib.Lean),
(`Mathlib.Testing, `Mathlib.Tactic),
(`Mathlib.Testing, `Mathlib.Util),
]
/-- `forbiddenImportDirs` relates module prefixes, specifying that modules with the first prefix
should not import modules with the second prefix (except if specifically allowed in
`overrideAllowedImportDirs`).
For example, ``(`Mathlib.Algebra.Notation, `Mathlib.Algebra)`` is in `forbiddenImportDirs` and
``(`Mathlib.Algebra.Notation, `Mathlib.Algebra.Notation)`` is in `overrideAllowedImportDirs`
because modules in `Mathlib/Algebra/Notation.lean` cannot import modules in `Mathlib.Algebra` that are
outside `Mathlib/Algebra/Notation.lean`.
-/
def forbiddenImportDirs : NamePrefixRel := .ofArray #[
(`Mathlib.Algebra.Notation, `Mathlib.Algebra),
(`Mathlib, `Mathlib.Deprecated),
-- This is used to test the linter.
(`MathlibTest.Header, `Mathlib.Deprecated),
-- TODO:
-- (`Mathlib.Data, `Mathlib.Dynamics),
-- (`Mathlib.Topology, `Mathlib.Algebra),
-- The following are a list of existing non-dependent top-level directory pairs.
(`Mathlib.Algebra, `Mathlib.AlgebraicGeometry),
(`Mathlib.Algebra, `Mathlib.Analysis),
(`Mathlib.Algebra, `Mathlib.Computability),
(`Mathlib.Algebra, `Mathlib.Condensed),
(`Mathlib.Algebra, `Mathlib.Geometry),
(`Mathlib.Algebra, `Mathlib.InformationTheory),
(`Mathlib.Algebra, `Mathlib.ModelTheory),
(`Mathlib.Algebra, `Mathlib.RepresentationTheory),
(`Mathlib.Algebra, `Mathlib.Testing),
(`Mathlib.AlgebraicGeometry, `Mathlib.AlgebraicTopology),
(`Mathlib.AlgebraicGeometry, `Mathlib.Analysis),
(`Mathlib.AlgebraicGeometry, `Mathlib.Computability),
(`Mathlib.AlgebraicGeometry, `Mathlib.Condensed),
(`Mathlib.AlgebraicGeometry, `Mathlib.InformationTheory),
(`Mathlib.AlgebraicGeometry, `Mathlib.MeasureTheory),
(`Mathlib.AlgebraicGeometry, `Mathlib.ModelTheory),
(`Mathlib.AlgebraicGeometry, `Mathlib.Probability),
(`Mathlib.AlgebraicGeometry, `Mathlib.RepresentationTheory),
(`Mathlib.AlgebraicGeometry, `Mathlib.Testing),
(`Mathlib.AlgebraicTopology, `Mathlib.AlgebraicGeometry),
(`Mathlib.AlgebraicTopology, `Mathlib.Computability),
(`Mathlib.AlgebraicTopology, `Mathlib.Condensed),
(`Mathlib.AlgebraicTopology, `Mathlib.FieldTheory),
(`Mathlib.AlgebraicTopology, `Mathlib.Geometry),
(`Mathlib.AlgebraicTopology, `Mathlib.InformationTheory),
(`Mathlib.AlgebraicTopology, `Mathlib.MeasureTheory),
(`Mathlib.AlgebraicTopology, `Mathlib.ModelTheory),
(`Mathlib.AlgebraicTopology, `Mathlib.NumberTheory),
(`Mathlib.AlgebraicTopology, `Mathlib.Probability),
(`Mathlib.AlgebraicTopology, `Mathlib.RepresentationTheory),
(`Mathlib.AlgebraicTopology, `Mathlib.SetTheory),
(`Mathlib.AlgebraicTopology, `Mathlib.Testing),
(`Mathlib.Analysis, `Mathlib.AlgebraicGeometry),
(`Mathlib.Analysis, `Mathlib.AlgebraicTopology),
(`Mathlib.Analysis, `Mathlib.Computability),
(`Mathlib.Analysis, `Mathlib.Condensed),
(`Mathlib.Analysis, `Mathlib.InformationTheory),
(`Mathlib.Analysis, `Mathlib.ModelTheory),
(`Mathlib.Analysis, `Mathlib.RepresentationTheory),
(`Mathlib.Analysis, `Mathlib.Testing),
(`Mathlib.CategoryTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.CategoryTheory, `Mathlib.Analysis),
(`Mathlib.CategoryTheory, `Mathlib.Computability),
(`Mathlib.CategoryTheory, `Mathlib.Condensed),
(`Mathlib.CategoryTheory, `Mathlib.Geometry),
(`Mathlib.CategoryTheory, `Mathlib.InformationTheory),
(`Mathlib.CategoryTheory, `Mathlib.MeasureTheory),
(`Mathlib.CategoryTheory, `Mathlib.ModelTheory),
(`Mathlib.CategoryTheory, `Mathlib.Probability),
(`Mathlib.CategoryTheory, `Mathlib.RepresentationTheory),
(`Mathlib.CategoryTheory, `Mathlib.Testing),
(`Mathlib.Combinatorics, `Mathlib.AlgebraicGeometry),
(`Mathlib.Combinatorics, `Mathlib.AlgebraicTopology),
(`Mathlib.Combinatorics, `Mathlib.Computability),
(`Mathlib.Combinatorics, `Mathlib.Condensed),
(`Mathlib.Combinatorics, `Mathlib.Geometry.Euclidean),
(`Mathlib.Combinatorics, `Mathlib.Geometry.Group),
(`Mathlib.Combinatorics, `Mathlib.Geometry.Manifold),
(`Mathlib.Combinatorics, `Mathlib.Geometry.RingedSpace),
(`Mathlib.Combinatorics, `Mathlib.InformationTheory),
(`Mathlib.Combinatorics, `Mathlib.MeasureTheory),
(`Mathlib.Combinatorics, `Mathlib.ModelTheory),
(`Mathlib.Combinatorics, `Mathlib.Probability),
(`Mathlib.Combinatorics, `Mathlib.RepresentationTheory),
(`Mathlib.Combinatorics, `Mathlib.Testing),
(`Mathlib.Computability, `Mathlib.AlgebraicGeometry),
(`Mathlib.Computability, `Mathlib.AlgebraicTopology),
(`Mathlib.Computability, `Mathlib.CategoryTheory),
(`Mathlib.Computability, `Mathlib.Condensed),
(`Mathlib.Computability, `Mathlib.FieldTheory),
(`Mathlib.Computability, `Mathlib.Geometry),
(`Mathlib.Computability, `Mathlib.InformationTheory),
(`Mathlib.Computability, `Mathlib.MeasureTheory),
(`Mathlib.Computability, `Mathlib.ModelTheory),
(`Mathlib.Computability, `Mathlib.Probability),
(`Mathlib.Computability, `Mathlib.RepresentationTheory),
(`Mathlib.Computability, `Mathlib.Testing),
(`Mathlib.Condensed, `Mathlib.AlgebraicGeometry),
(`Mathlib.Condensed, `Mathlib.AlgebraicTopology),
(`Mathlib.Condensed, `Mathlib.Computability),
(`Mathlib.Condensed, `Mathlib.FieldTheory),
(`Mathlib.Condensed, `Mathlib.Geometry),
(`Mathlib.Condensed, `Mathlib.InformationTheory),
(`Mathlib.Condensed, `Mathlib.MeasureTheory),
(`Mathlib.Condensed, `Mathlib.ModelTheory),
(`Mathlib.Condensed, `Mathlib.Probability),
(`Mathlib.Condensed, `Mathlib.RepresentationTheory),
(`Mathlib.Condensed, `Mathlib.Testing),
(`Mathlib.Control, `Mathlib.AlgebraicGeometry),
(`Mathlib.Control, `Mathlib.AlgebraicTopology),
(`Mathlib.Control, `Mathlib.Analysis),
(`Mathlib.Control, `Mathlib.Computability),
(`Mathlib.Control, `Mathlib.Condensed),
(`Mathlib.Control, `Mathlib.FieldTheory),
(`Mathlib.Control, `Mathlib.Geometry),
(`Mathlib.Control, `Mathlib.GroupTheory),
(`Mathlib.Control, `Mathlib.InformationTheory),
(`Mathlib.Control, `Mathlib.LinearAlgebra),
(`Mathlib.Control, `Mathlib.MeasureTheory),
(`Mathlib.Control, `Mathlib.ModelTheory),
(`Mathlib.Control, `Mathlib.NumberTheory),
(`Mathlib.Control, `Mathlib.Probability),
(`Mathlib.Control, `Mathlib.RepresentationTheory),
(`Mathlib.Control, `Mathlib.RingTheory),
(`Mathlib.Control, `Mathlib.SetTheory),
(`Mathlib.Control, `Mathlib.Testing),
(`Mathlib.Control, `Mathlib.Topology),
(`Mathlib.Data, `Mathlib.AlgebraicGeometry),
(`Mathlib.Data, `Mathlib.AlgebraicTopology),
(`Mathlib.Data, `Mathlib.Analysis),
(`Mathlib.Data, `Mathlib.Computability),
(`Mathlib.Data, `Mathlib.Condensed),
(`Mathlib.Data, `Mathlib.FieldTheory),
(`Mathlib.Data, `Mathlib.Geometry.Euclidean),
(`Mathlib.Data, `Mathlib.Geometry.Group),
(`Mathlib.Data, `Mathlib.Geometry.Manifold),
(`Mathlib.Data, `Mathlib.Geometry.RingedSpace),
(`Mathlib.Data, `Mathlib.InformationTheory),
(`Mathlib.Data, `Mathlib.ModelTheory),
(`Mathlib.Data, `Mathlib.RepresentationTheory),
(`Mathlib.Data, `Mathlib.Testing),
(`Mathlib.Dynamics, `Mathlib.AlgebraicGeometry),
(`Mathlib.Dynamics, `Mathlib.AlgebraicTopology),
(`Mathlib.Dynamics, `Mathlib.CategoryTheory),
(`Mathlib.Dynamics, `Mathlib.Computability),
(`Mathlib.Dynamics, `Mathlib.Condensed),
(`Mathlib.Dynamics, `Mathlib.Geometry.Euclidean),
(`Mathlib.Dynamics, `Mathlib.Geometry.Group),
(`Mathlib.Dynamics, `Mathlib.Geometry.Manifold),
(`Mathlib.Dynamics, `Mathlib.Geometry.RingedSpace),
(`Mathlib.Dynamics, `Mathlib.InformationTheory),
(`Mathlib.Dynamics, `Mathlib.ModelTheory),
(`Mathlib.Dynamics, `Mathlib.RepresentationTheory),
(`Mathlib.Dynamics, `Mathlib.Testing),
(`Mathlib.FieldTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.FieldTheory, `Mathlib.AlgebraicTopology),
(`Mathlib.FieldTheory, `Mathlib.Condensed),
(`Mathlib.FieldTheory, `Mathlib.Geometry),
(`Mathlib.FieldTheory, `Mathlib.InformationTheory),
(`Mathlib.FieldTheory, `Mathlib.MeasureTheory),
(`Mathlib.FieldTheory, `Mathlib.Probability),
(`Mathlib.FieldTheory, `Mathlib.RepresentationTheory),
(`Mathlib.FieldTheory, `Mathlib.Testing),
(`Mathlib.Geometry, `Mathlib.AlgebraicGeometry),
(`Mathlib.Geometry, `Mathlib.Computability),
(`Mathlib.Geometry, `Mathlib.Condensed),
(`Mathlib.Geometry, `Mathlib.InformationTheory),
(`Mathlib.Geometry, `Mathlib.ModelTheory),
(`Mathlib.Geometry, `Mathlib.RepresentationTheory),
(`Mathlib.Geometry, `Mathlib.Testing),
(`Mathlib.GroupTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.GroupTheory, `Mathlib.AlgebraicTopology),
(`Mathlib.GroupTheory, `Mathlib.Analysis),
(`Mathlib.GroupTheory, `Mathlib.Computability),
(`Mathlib.GroupTheory, `Mathlib.Condensed),
(`Mathlib.GroupTheory, `Mathlib.Geometry),
(`Mathlib.GroupTheory, `Mathlib.InformationTheory),
(`Mathlib.GroupTheory, `Mathlib.MeasureTheory),
(`Mathlib.GroupTheory, `Mathlib.ModelTheory),
(`Mathlib.GroupTheory, `Mathlib.Probability),
(`Mathlib.GroupTheory, `Mathlib.RepresentationTheory),
(`Mathlib.GroupTheory, `Mathlib.Testing),
(`Mathlib.GroupTheory, `Mathlib.Topology),
(`Mathlib.InformationTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.InformationTheory, `Mathlib.AlgebraicTopology),
(`Mathlib.InformationTheory, `Mathlib.CategoryTheory),
(`Mathlib.InformationTheory, `Mathlib.Computability),
(`Mathlib.InformationTheory, `Mathlib.Condensed),
(`Mathlib.InformationTheory, `Mathlib.Geometry.Euclidean),
(`Mathlib.InformationTheory, `Mathlib.Geometry.Group),
(`Mathlib.InformationTheory, `Mathlib.Geometry.Manifold),
(`Mathlib.InformationTheory, `Mathlib.Geometry.RingedSpace),
(`Mathlib.InformationTheory, `Mathlib.ModelTheory),
(`Mathlib.InformationTheory, `Mathlib.RepresentationTheory),
(`Mathlib.InformationTheory, `Mathlib.Testing),
(`Mathlib.LinearAlgebra, `Mathlib.AlgebraicGeometry),
(`Mathlib.LinearAlgebra, `Mathlib.AlgebraicTopology),
(`Mathlib.LinearAlgebra, `Mathlib.Computability),
(`Mathlib.LinearAlgebra, `Mathlib.Condensed),
(`Mathlib.LinearAlgebra, `Mathlib.Geometry.Euclidean),
(`Mathlib.LinearAlgebra, `Mathlib.Geometry.Group),
(`Mathlib.LinearAlgebra, `Mathlib.Geometry.Manifold),
(`Mathlib.LinearAlgebra, `Mathlib.Geometry.RingedSpace),
(`Mathlib.LinearAlgebra, `Mathlib.InformationTheory),
(`Mathlib.LinearAlgebra, `Mathlib.MeasureTheory),
(`Mathlib.LinearAlgebra, `Mathlib.ModelTheory),
(`Mathlib.LinearAlgebra, `Mathlib.Probability),
(`Mathlib.LinearAlgebra, `Mathlib.Testing),
(`Mathlib.LinearAlgebra, `Mathlib.Topology),
(`Mathlib.MeasureTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.MeasureTheory, `Mathlib.AlgebraicTopology),
(`Mathlib.MeasureTheory, `Mathlib.Computability),
(`Mathlib.MeasureTheory, `Mathlib.Condensed),
(`Mathlib.MeasureTheory, `Mathlib.Geometry.Euclidean),
(`Mathlib.MeasureTheory, `Mathlib.Geometry.Group),
(`Mathlib.MeasureTheory, `Mathlib.Geometry.Manifold),
(`Mathlib.MeasureTheory, `Mathlib.Geometry.RingedSpace),
(`Mathlib.MeasureTheory, `Mathlib.InformationTheory),
(`Mathlib.MeasureTheory, `Mathlib.ModelTheory),
(`Mathlib.MeasureTheory, `Mathlib.RepresentationTheory),
(`Mathlib.MeasureTheory, `Mathlib.Testing),
(`Mathlib.ModelTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.ModelTheory, `Mathlib.AlgebraicTopology),
(`Mathlib.ModelTheory, `Mathlib.Analysis),
(`Mathlib.ModelTheory, `Mathlib.Condensed),
(`Mathlib.ModelTheory, `Mathlib.Geometry),
(`Mathlib.ModelTheory, `Mathlib.InformationTheory),
(`Mathlib.ModelTheory, `Mathlib.MeasureTheory),
(`Mathlib.ModelTheory, `Mathlib.Probability),
(`Mathlib.ModelTheory, `Mathlib.RepresentationTheory),
(`Mathlib.ModelTheory, `Mathlib.Testing),
(`Mathlib.ModelTheory, `Mathlib.Topology),
(`Mathlib.NumberTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.NumberTheory, `Mathlib.AlgebraicTopology),
(`Mathlib.NumberTheory, `Mathlib.Computability),
(`Mathlib.NumberTheory, `Mathlib.Condensed),
(`Mathlib.NumberTheory, `Mathlib.InformationTheory),
(`Mathlib.NumberTheory, `Mathlib.ModelTheory),
(`Mathlib.NumberTheory, `Mathlib.RepresentationTheory),
(`Mathlib.NumberTheory, `Mathlib.Testing),
(`Mathlib.Order, `Mathlib.AlgebraicGeometry),
(`Mathlib.Order, `Mathlib.AlgebraicTopology),
(`Mathlib.Order, `Mathlib.Computability),
(`Mathlib.Order, `Mathlib.Condensed),
(`Mathlib.Order, `Mathlib.FieldTheory),
(`Mathlib.Order, `Mathlib.Geometry),
(`Mathlib.Order, `Mathlib.InformationTheory),
(`Mathlib.Order, `Mathlib.MeasureTheory),
(`Mathlib.Order, `Mathlib.ModelTheory),
(`Mathlib.Order, `Mathlib.NumberTheory),
(`Mathlib.Order, `Mathlib.Probability),
(`Mathlib.Order, `Mathlib.RepresentationTheory),
(`Mathlib.Order, `Mathlib.Testing),
(`Mathlib.Probability, `Mathlib.AlgebraicGeometry),
(`Mathlib.Probability, `Mathlib.AlgebraicTopology),
(`Mathlib.Probability, `Mathlib.CategoryTheory),
(`Mathlib.Probability, `Mathlib.Computability),
(`Mathlib.Probability, `Mathlib.Condensed),
(`Mathlib.Probability, `Mathlib.Geometry.Euclidean),
(`Mathlib.Probability, `Mathlib.Geometry.Group),
(`Mathlib.Probability, `Mathlib.Geometry.Manifold),
(`Mathlib.Probability, `Mathlib.Geometry.RingedSpace),
(`Mathlib.Probability, `Mathlib.InformationTheory),
(`Mathlib.Probability, `Mathlib.ModelTheory),
(`Mathlib.Probability, `Mathlib.RepresentationTheory),
(`Mathlib.Probability, `Mathlib.Testing),
(`Mathlib.RepresentationTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.RepresentationTheory, `Mathlib.Analysis),
(`Mathlib.RepresentationTheory, `Mathlib.Computability),
(`Mathlib.RepresentationTheory, `Mathlib.Condensed),
(`Mathlib.RepresentationTheory, `Mathlib.Geometry),
(`Mathlib.RepresentationTheory, `Mathlib.InformationTheory),
(`Mathlib.RepresentationTheory, `Mathlib.MeasureTheory),
(`Mathlib.RepresentationTheory, `Mathlib.ModelTheory),
(`Mathlib.RepresentationTheory, `Mathlib.Probability),
(`Mathlib.RepresentationTheory, `Mathlib.Testing),
(`Mathlib.RepresentationTheory, `Mathlib.Topology),
(`Mathlib.RingTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.RingTheory, `Mathlib.AlgebraicTopology),
(`Mathlib.RingTheory, `Mathlib.Computability),
(`Mathlib.RingTheory, `Mathlib.Condensed),
(`Mathlib.RingTheory, `Mathlib.Geometry.Euclidean),
(`Mathlib.RingTheory, `Mathlib.Geometry.Group),
(`Mathlib.RingTheory, `Mathlib.Geometry.Manifold),
(`Mathlib.RingTheory, `Mathlib.Geometry.RingedSpace),
(`Mathlib.RingTheory, `Mathlib.InformationTheory),
(`Mathlib.RingTheory, `Mathlib.ModelTheory),
(`Mathlib.RingTheory, `Mathlib.RepresentationTheory),
(`Mathlib.RingTheory, `Mathlib.Testing),
(`Mathlib.SetTheory, `Mathlib.AlgebraicGeometry),
(`Mathlib.SetTheory, `Mathlib.AlgebraicTopology),
(`Mathlib.SetTheory, `Mathlib.Analysis),
(`Mathlib.SetTheory, `Mathlib.CategoryTheory),
(`Mathlib.SetTheory, `Mathlib.Combinatorics),
(`Mathlib.SetTheory, `Mathlib.Computability),
(`Mathlib.SetTheory, `Mathlib.Condensed),
(`Mathlib.SetTheory, `Mathlib.FieldTheory),
(`Mathlib.SetTheory, `Mathlib.Geometry),
(`Mathlib.SetTheory, `Mathlib.InformationTheory),
(`Mathlib.SetTheory, `Mathlib.MeasureTheory),
(`Mathlib.SetTheory, `Mathlib.ModelTheory),
(`Mathlib.SetTheory, `Mathlib.Probability),
(`Mathlib.SetTheory, `Mathlib.RepresentationTheory),
(`Mathlib.SetTheory, `Mathlib.Testing),
(`Mathlib.Topology, `Mathlib.AlgebraicGeometry),
(`Mathlib.Topology, `Mathlib.Computability),
(`Mathlib.Topology, `Mathlib.Condensed),
(`Mathlib.Topology, `Mathlib.Geometry),
(`Mathlib.Topology, `Mathlib.InformationTheory),
(`Mathlib.Topology, `Mathlib.ModelTheory),
(`Mathlib.Topology, `Mathlib.Probability),
(`Mathlib.Topology, `Mathlib.RepresentationTheory),
(`Mathlib.Topology, `Mathlib.Testing),
]
/-- `overrideAllowedImportDirs` relates module prefixes, specifying that modules with the first
prefix are allowed to import modules with the second prefix, even if disallowed in
`forbiddenImportDirs`.
For example, ``(`Mathlib.Algebra.Notation, `Mathlib.Algebra)`` is in `forbiddenImportDirs` and
``(`Mathlib.Algebra.Notation, `Mathlib.Algebra.Notation)`` is in `overrideAllowedImportDirs`
because modules in `Mathlib/Algebra/Notation.lean` cannot import modules in `Mathlib.Algebra` that are
outside `Mathlib/Algebra/Notation.lean`.
-/
def overrideAllowedImportDirs : NamePrefixRel := .ofArray #[
(`Mathlib.Algebra.Lie, `Mathlib.RepresentationTheory),
(`Mathlib.Algebra.Module.ZLattice, `Mathlib.Analysis),
(`Mathlib.Algebra.Notation, `Mathlib.Algebra.Notation),
(`Mathlib.Deprecated, `Mathlib.Deprecated),
(`Mathlib.LinearAlgebra.Complex, `Mathlib.Topology), -- Complex numbers are analysis/topology.
(`Mathlib.LinearAlgebra.Matrix, `Mathlib.Topology), -- For e.g. spectra.
(`Mathlib.LinearAlgebra.QuadraticForm, `Mathlib.Topology), -- For real/complex quadratic forms.
(`Mathlib.LinearAlgebra.SesquilinearForm, `Mathlib.Topology), -- for links with positive semidefinite matrices
(`Mathlib.Topology.Algebra, `Mathlib.Algebra),
(`Mathlib.Topology.Compactification, `Mathlib.Geometry.Manifold)
]
end DirectoryDependency
open DirectoryDependency
/-- Check if one of the imports `imports` to `mainModule` is forbidden by `forbiddenImportDirs`;
if so, return an error describing how the import transitively arises. -/
private def checkBlocklist (env : Environment) (mainModule : Name) (imports : Array Name) : Option MessageData := Id.run do
match forbiddenImportDirs.findAny mainModule imports with
| some (n₁, n₂) => do
if let some imported := n₂.prefixToName imports then
if !overrideAllowedImportDirs.contains mainModule imported then
let mut msg := m!"Modules starting with {n₁} are not allowed to import modules starting with {n₂}. \
This module depends on {imported}\n"
for dep in env.importPath imported do
msg := msg ++ m!"which is imported by {dep},\n"
return some (msg ++ m!"which is imported by this module. \
(Exceptions can be added to `overrideAllowedImportDirs`.)")
else none
else
return some m!"Internal error in `directoryDependency` linter: this module claims to depend \
on a module starting with {n₂} but a module with that prefix was not found in the import graph."
| none => none
@[inherit_doc Mathlib.Linter.linter.directoryDependency]
def directoryDependencyCheck (mainModule : Name) : CommandElabM (Array MessageData) := do
unless Linter.getLinterValue linter.directoryDependency (← getLinterOptions) do
return #[]
let env ← getEnv
let imports := env.allImportedModuleNames
-- If this module is in the allow-list, we only allow imports from directories specified there.
-- Collect all prefixes which have a matching entry.
let matchingPrefixes := mainModule.prefixes.filter (fun prf ↦ allowedImportDirs.containsKey prf)
if matchingPrefixes.isEmpty then
-- Otherwise, we fall back to the blocklist `forbiddenImportDirs`.
if let some msg := checkBlocklist env mainModule imports then return #[msg] else return #[]
else
-- We always allow imports in the same directory (for each matching prefix),
-- from `Init`, `Lean` and `Std`, as well as imports in `Aesop`, `Qq`, `Plausible`,
-- `ImportGraph`, `ProofWidgets` or `LeanSearchClient` (as these are imported in Tactic.Common).
-- We also allow transitive imports of Mathlib.Init, as well as Mathlib.Init itself.
let initImports := (← findImports ("Mathlib" / "Init.lean")).append
#[`Mathlib.Init, `Mathlib.Tactic.DeclarationNames]
let exclude := [
`Init, `Std, `Lean,
`Aesop, `Qq, `Plausible, `ImportGraph, `ProofWidgets, `LeanSearchClient
]
let importsToCheck := imports.filter (fun imp ↦ !exclude.any (·.isPrefixOf imp))
|>.filter (fun imp ↦ !matchingPrefixes.any (·.isPrefixOf imp))
|>.filter (!initImports.contains ·)
-- Find all prefixes which are allowed for one of these directories.
let allRules := allowedImportDirs.getAllLeft mainModule
-- Error about those imports which are not covered by allowedImportDirs.
let mut messages := #[]
for imported in importsToCheck do
if !allowedImportDirs.contains mainModule imported then
let importPath := env.importPath imported
let mut msg := m!"Module {mainModule} depends on {imported},\n\
but is only allowed to import modules starting with one of \
{allRules.toArray.qsort (·.toString < ·.toString)}.\n\
Note: module {imported}"
let mut superseded := false
match importPath.toList with
| [] => msg := msg ++ " is directly imported by this module"
| a :: rest =>
-- Only add messages about imports that aren't themselves transitive imports of
-- forbidden imports.
-- This should prevent redundant messages.
if !allowedImportDirs.contains mainModule a then
superseded := true
else
msg := msg ++ s!" is imported by {a},\n"
for dep in rest do
if !allowedImportDirs.contains mainModule dep then
superseded := true
break
msg := msg ++ m!"which is imported by {dep},\n"
msg := msg ++ m!"which is imported by this module."
msg := msg ++ "(Exceptions can be added to `allowedImportDirs`.)"
if !superseded then
messages := messages.push msg
return messages
end Mathlib.Linter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.