source
stringlengths
17
118
lean4
stringlengths
0
335k
reference-manual/Manual/Meta/LakeToml.lean
import Lean.Elab.Command import Lean.Elab.InfoTree import Verso import Verso.Doc.ArgParse import Verso.Doc.Elab.Monad import VersoManual import Verso.Code import SubVerso.Highlighting import SubVerso.Examples import Manual.Meta.Basic import Manual.Meta.ExpectString import Manual.Meta.LakeToml.Toml import Manual.Meta.LakeToml.Test import Lake.Toml.Decode import Lake.Load.Toml open Verso ArgParse Doc Elab Genre.Manual Html Code Highlighted.WebAssets open Lean Elab open SubVerso.Highlighting Highlighted open scoped Lean.Doc.Syntax open Lean.Elab.Tactic.GuardMsgs set_option guard_msgs.diff true namespace Manual namespace Toml inductive FieldType where | string | stringOf (what : String) | version | path | array (contents : FieldType) | oneOf (choices : List String) | option (t : FieldType) | bool | target | other (name : Name) (what : String) (whatPlural : Option String) deriving Repr, ToJson, FromJson open Lean Syntax in instance : Quote FieldType where quote := q where q : FieldType → Term | .string => mkCApp ``FieldType.string #[] | .stringOf x => mkCApp ``FieldType.stringOf #[quote x] | .version => mkCApp ``FieldType.version #[] | .path => mkCApp ``FieldType.path #[] | .array x => mkCApp ``FieldType.array #[q x] | .oneOf cs => mkCApp ``FieldType.oneOf #[quote cs] | .option x => mkCApp ``FieldType.option #[q x] | .bool => mkCApp ``FieldType.bool #[] | .target => mkCApp ``FieldType.target #[] | .other x y z => mkCApp ``FieldType.other #[quote x, quote y, quote z] open Output Html in def FieldType.toHtml (plural : Bool := false) : FieldType → Html | .string => if plural then "strings" else "String" | .stringOf x => s!"{x} (as string{if plural then "s" else ""}" | .version => if plural then "version strings" else "Version string" | .path => if plural then "paths" else "Path" | .array t => (if plural then "arrays of " else "Array of ") ++ t.toHtml true | .other _ y none => if plural then y ++ "s" else y | .other _ y (some z) => if plural then z else y | .bool => if plural then "Booleans" else "Boolean" | .target => if plural then "targets" else "target" | .option t => t.toHtml ++ " (optional)" | .oneOf xs => let opts := xs |>.map ({{<code>{{show Html from .text true s!"\"{·}\""}}</code>}}) |>.intersperse {{", "}} {{"one of " {{opts}} }} structure Field (α) where name : Name type : FieldType docs? : Option α deriving Repr, ToJson, FromJson instance : Functor Field where map f | ⟨n, t, d?⟩ => ⟨n, t, d?.map f⟩ def Field.mapM [Monad m] (f : α → m β) : Field α → m (Field β) | ⟨n, t, d?⟩ => d?.mapM f <&> (⟨n, t, ·⟩) def Field.takeDocs (f : Field α) : Field Empty × Option α := ({f with docs? := none}, f.docs?) open Lean Syntax in instance [Quote α] : Quote (Field α) where quote | ⟨n, t, d?⟩ => mkCApp ``Field.mk #[quote n, quote t, quote d?] structure Table (α) where name : String typeName : Name fields : Array (Field α) deriving Repr open Lean Syntax in instance [Quote α] : Quote (Table α) where quote | ⟨n, tn, fs⟩ => mkCApp ``Table.mk #[quote n, quote tn, quote fs] end Toml def Block.tomlFieldCategory (title : String) (fields : List Name) : Block where name := `Manual.Block.tomlFieldCategory data := .arr #[.str title, toJson fields] def Block.tomlField (sort : Option Nat) (inTable : Name) (field : Toml.Field Empty) : Block where name := `Manual.Block.tomlField data := ToJson.toJson (sort, inTable, field) def Inline.tomlField (inTable : Name) (field : Name) : Inline where name := `Manual.Inline.tomlField data := ToJson.toJson (inTable, field) def Block.tomlTable (arrayKey : Option String) (name : String) (typeName : Name) : Block where name := `Manual.Block.tomlTable data := ToJson.toJson (arrayKey, name, typeName) structure TomlFieldOpts where inTable : Name field : Name typeDesc : String typeDescPlural : String type : Name sort : Option Nat instance [Inhabited α] [Applicative f] : Inhabited (f α) where default := pure default @[specialize] private partial def many [Applicative f] [Alternative f] (p : f α) : f (List α) := ((· :: ·) <$> p <*> many p) <|> pure [] def TomlFieldOpts.parse [Monad m] [MonadError m] [MonadLiftT CoreM m] : ArgParse m TomlFieldOpts := TomlFieldOpts.mk <$> .positional `inTable .name <*> .positional `field .name <*> .positional `typeDesc .string <*> .positional `typeDescPlural .string <*> .positional `type .resolvedName <*> .named `sort .nat true instance : Quote Empty where quote := nofun @[directive_expander tomlField] def tomlField : DirectiveExpander | args, contents => do let {inTable, field, typeDesc, typeDescPlural, type, sort} ← TomlFieldOpts.parse.run args let field : Toml.Field Empty := {name := field, type := .other type typeDesc typeDescPlural, docs? := none} let contents ← contents.mapM elabBlock return #[← ``(Block.other (Block.tomlField $(quote sort) $(quote inTable) $(quote field)) #[$contents,*])] open Verso.Search in def tomlTableDomainMapper := { displayName := "Lake TOML Table", className := "lake-toml-table-domain", dataToSearchables := "(domainData) => Object.entries(domainData.contents).map(([key, value]) => { let arrayKey = value[0].data.arrayKey; let arr = arrayKey ? `[[${arrayKey}]] — ` : ''; return { searchKey: arr + value[0].data.description, address: `${value[0].address}#${value[0].id}`, domainId: 'Manual.lakeTomlTable', ref: value, }}) " : DomainMapper }.setFont { family := .code } open Verso.Search in def tomlFieldDomainMapper := { displayName := "Lake TOML Field", className := "lake-toml-field-domain", dataToSearchables := "(domainData) => Object.entries(domainData.contents).map(([key, value]) => { let tableArrayKey = value[0].data.tableArrayKey; let arr = tableArrayKey ? `[[${tableArrayKey}]]` : 'package configuration'; return { searchKey: `${value[0].data.field} in ${arr}`, address: `${value[0].address}#${value[0].id}`, domainId: 'Manual.lakeTomlField', ref: value, }})" : DomainMapper }.setFont { family := .code } @[block_extension Block.tomlField] def Block.tomlField.descr : BlockDescr where init s := s.addQuickJumpMapper tomlFieldDomain tomlFieldDomainMapper traverse id info _ := do let .ok (_, inTable, field) := FromJson.fromJson? (α := Option Nat × Name × Toml.Field Empty) info | do logError "Failed to deserialize field doc data"; pure none let tableArrayKey : Option Json := (← get).getDomainObject? tomlTableDomain inTable.toString |>.bind fun t => t.data.getObjVal? "arrayKey" |>.toOption modify fun s => let name := s!"{inTable} {field.name}" s |>.saveDomainObject tomlFieldDomain name id |>.saveDomainObjectData tomlFieldDomain name (json%{ "table": $inTable.toString, "tableArrayKey": $(tableArrayKey.getD .null), "field": $field.name.toString }) discard <| externalTag id (← read).path s!"{inTable}-{field.name}" pure none toTeX := none extraCss := [".namedocs .label a { color: inherit; }"] toHtml := some <| fun _goI goB id info contents => open Verso.Doc.Html in open Verso.Output Html in do let .ok (_, _inTable, field) := FromJson.fromJson? (α := Option Nat × Name × Toml.Field Empty) info | do Verso.Doc.Html.HtmlT.logError "Failed to deserialize field doc data"; pure .empty let sig : Html := {{ {{field.name.toString}} }} let xref ← HtmlT.state let idAttr := xref.htmlId id return {{ <dt {{idAttr}}> <code class="field-name">{{sig}}</code> </dt> <dd> <p><strong>"Contains:"</strong>" " {{field.type.toHtml}}</p> {{← contents.mapM goB}} </dd> }} localContentItem _ info _ := open Verso.Output Html in do let (_, _inTable, field) ← FromJson.fromJson? (α := Option Nat × Name × Toml.Field Empty) info let name := field.name.toString pure #[ (name, {{<code class="field-name">{{name}}</code>}}) ] private partial def flattenBlocks (blocks : Array (Block genre)) : Array (Block genre) := blocks.flatMap fun | .concat bs => flattenBlocks bs | other => #[other] structure TomlFieldCategoryOpts where title : String fields : List Name def TomlFieldCategoryOpts.parse [Monad m] [MonadError m] : ArgParse m TomlFieldCategoryOpts := TomlFieldCategoryOpts.mk <$> .positional `title .string <*> many (.positional `field .name) @[directive_expander tomlFieldCategory] def tomlFieldCategory : DirectiveExpander | args, contents => do let {title, fields} ← TomlFieldCategoryOpts.parse.run args let contents ← contents.mapM elabBlock return #[← ``(Block.other (Block.tomlFieldCategory $(quote title) $(quote fields)) #[$contents,*])] @[block_extension Block.tomlFieldCategory] def Block.tomlFieldCategory.descr : BlockDescr where traverse _id _info _ := pure none toTeX := none extraCss := [r#" .field-category > :first-child { } .field-category > :not(:first-child) { margin-left: 1rem; } "# ] toHtml := some <| fun _goI goB _id info contents => open Verso.Doc.Html in open Verso.Output Html in do let .arr #[.str title, _fields] := info | do Verso.Doc.Html.HtmlT.logError "Failed to deserialize field category doc data"; pure .empty let (nonField, field) := flattenBlocks contents |>.partition fun | .other {name := `Manual.Block.tomlField, ..} _ => false | _ => true return {{ <div class="field-category"> <p><strong>{{title}}":"</strong></p> {{← nonField.mapM goB}} <dl> {{← field.mapM goB}} </dl> </div> }} @[block_extension Block.tomlTable] def Block.tomlTable.descr : BlockDescr where init s := s.addQuickJumpMapper tomlTableDomain tomlTableDomainMapper traverse id info _ := do let .ok (arrayKey, humanName, typeName) := FromJson.fromJson? (α := Option String × String × Name) info | do logError "Failed to deserialize FFI doc data"; pure none let arrayKeyJson := arrayKey.map Json.str |>.getD Json.null modify fun s => s |>.saveDomainObject tomlTableDomain typeName.toString id |>.saveDomainObjectData tomlTableDomain typeName.toString (json%{"description": $humanName, "type": $typeName.toString, "arrayKey": $arrayKeyJson}) discard <| externalTag id (← read).path typeName.toString pure none toTeX := none extraCss := [ r#" dl.toml-table-field-spec { } "# ] toHtml := some <| fun _goI goB id info contents => open Verso.Doc.Html in open Verso.Output Html in do let .ok (arrayKey, humanName, typeName) := FromJson.fromJson? (α := Option String × String × Name) info | do Verso.Doc.Html.HtmlT.logError "Failed to deserialize Lake TOML table doc data"; pure .empty let tableArrayName : Option Toml.Highlighted := arrayKey.map fun k => .tableHeader <| .tableDelim (.text "[[") ++ .tableName (some typeName.toString) (.key (some k) (.text k)) ++ .tableDelim (.text "]]") -- Don't include links here because they'd just be self-links anyway let tableArrayName : Option Html := tableArrayName.map (Toml.Highlighted.toHtml (fun _ => none) (fun _ _ => none)) let sig : Html := {{ {{humanName}} {{tableArrayName.map ({{" — " <code class="toml">{{·}}</code> }}) |>.getD .empty }} }} let xref ← HtmlT.state let idAttr := xref.htmlId id let (categories, contents) := flattenBlocks contents |>.partition (· matches Block.other {name := `Manual.Block.tomlFieldCategory, ..} _) let categories := categories.map fun | blk@(Block.other {name := `Manual.Block.tomlFieldCategory, data := .arr #[.str title, fields], ..} _) => if let .ok fields := FromJson.fromJson? fields (α := List Name) then (fields, some title, blk) else ([], none, blk) | blk => ([], none, blk) let category? (f : Name) : Option String := Id.run do for (fs, title, _) in categories do if f ∈ fs then return title return none -- First partition the inner blocks into unsorted fields, sorted fields, and other blocks let mut fields := #[] let mut sorted := #[] let mut notFields := #[] for f in flattenBlocks contents do if let Block.other {name:=`Manual.Block.tomlField, data, .. : Genre.Manual.Block} .. := f then if let .ok (sort?, _, field) := FromJson.fromJson? (α := Option Nat × Name × Toml.Field Empty) data then if let some sort := sort? then sorted := sorted.push (sort, f, field.name) else fields := fields.push (f, field.name) else notFields := notFields.push f -- Next, find all the categories and the names that they expect let mut categorized : Std.HashMap String (Array (Block Genre.Manual)) := {} let mut uncategorized := #[] for (f, fieldName) in fields do if let some title := category? fieldName then categorized := categorized.insert title <| (categorized.getD title #[]).push f else uncategorized := uncategorized.push f -- Finally, distribute fields into categories, respecting the requested sort orders for (n, f, fieldName) in sorted.qsort (lt := (·.1 < ·.1)) do if let some title := category? fieldName then let inCat := categorized.getD title #[] if h : n < inCat.size then categorized := categorized.insert title <| inCat.insertIdx n f else categorized := categorized.insert title <| inCat.push f else if h : n < uncategorized.size then uncategorized := uncategorized.insertIdx n f else uncategorized := uncategorized.push f -- Add the contents of each category to its corresponding block let categories := categories.map fun | (_, some title, .other which contents) => let inCategory := categorized.getD title #[] .other which (contents ++ inCategory) | (_, _, blk) => blk let uncatHtml ← uncategorized.mapM goB let catHtml ← categories.mapM goB let fieldHeader := {{ <p> <strong> {{if categories.isEmpty then "Fields:" else "Other Fields:"}} </strong> </p> }} let fieldHtml := {{ {{if categories.isEmpty then .empty else catHtml}} {{if uncategorized.isEmpty then .empty else {{ <div class="field-category"> {{fieldHeader}} <dl class="toml-table-field-spec"> {{uncatHtml}} </dl> </div> }} }} }} return {{ <div class="namedocs" {{idAttr}}> <span class="label">"TOML table"</span> <pre class="signature">{{sig}}</pre> <div class="text"> {{← notFields.mapM goB}} {{fieldHtml}} </div> </div> }} localContentItem _ info _ := open Verso.Output Html in do let (arrayKey, humanName, typeName) ← FromJson.fromJson? (α := Option String × String × Name) info if let some arrayKey := arrayKey then pure #[(s!"[[{arrayKey}]]", {{<code>s!"[[{arrayKey}]]"</code>}})] else pure #[(humanName, {{ {{humanName}} }})] namespace Toml section open Lean Meta variable {m : Type → Type} variable [Monad m] variable [MonadEnv m] [MonadMCtx m] [MonadWithOptions m] [MonadFileMap m] [MonadError m] variable [MonadQuotation m] variable [MonadControlT MetaM m] [MonadLiftT MetaM m] [MonadLiftT IO m] def buildTypes := ["debug", "relWithDebInfo", "minSizeRel", "release"] -- Fail if more types added theorem builtTypes_exhaustive (isLower : s.decapitalize = s) : s ∈ buildTypes ↔ (Lake.BuildType.ofString? s).isSome := by simp only [buildTypes] constructor . intro h simp only [Lake.BuildType.ofString?] split <;> try (simp; done) simp_all . intro h simp [Lake.BuildType.ofString?] at h split at h <;> simp_all def asTable (humanName : String) (n : Name) (skip : List Name := []) : DocElabM Term := do let env ← getEnv if let some (.inductInfo ii) := env.find? n then let allFields := getStructureFieldsFlattened env n (includeSubobjectFields := false) |>.filter (!skip.contains ·) let directFields := getStructureFields env n -- Sort the direct fields first, because that makes the ordering "more intuitive" in the docs let allFields := allFields.filter (directFields.contains ·) ++ allFields.filter (!directFields.contains ·) let ancestry ← getStructureResolutionOrder n let tomlFields : Array (Field (Array Term)) ← forallTelescopeReducing ii.type fun params _ => withLocalDeclD `self (mkAppN (mkConst n (ii.levelParams.map mkLevelParam)) params) fun s => allFields.mapM fun fieldName => do let proj ← mkProjection s fieldName let type ← inferType proj >>= instantiateMVars for struct in ancestry do if let some projFn := getProjFnInfoForField? env struct fieldName then let docs? ← findDocString? env projFn.1 let docs? ← docs?.mapM fun mdDocs => do let some ast := MD4Lean.parse mdDocs | throwError "Failed to parse docstring as Markdown" -- Here most code elements are not Lean code; don't elaborate them ast.blocks.mapM Markdown.blockFromMarkdown let type' : Option FieldType := if type.isConstOf ``String then some .string else if type.isConstOf ``Name then some .string else if type.isConstOf ``Bool then some .bool else if type.isConstOf ``System.FilePath then some .path else if type.isConstOf ``Lake.WorkspaceConfig then some (.other ``Lake.WorkspaceConfig "Workspace configuration" none) else if type.isConstOf ``Lake.BuildType then some (.oneOf buildTypes) else if type.isConstOf ``Lake.StdVer then some .version else if type.isConstOf ``Lake.StrPat then some (.other ``Lake.StrPat "String pattern" none) else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``Lean.LeanOption then some (.array (.other ``Lean.LeanOption "Lean option" none)) else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``String then some (.array .string) else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``Name then some (.array .string) else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``System.FilePath then some (.array .path) else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``Lake.PartialBuildKey then some (.array .target) else if type.isAppOfArity ``Option 1 && (type.getArg! 0).isConstOf ``Bool then some (.option .bool) else if type.isAppOfArity ``Option 1 && (type.getArg! 0).isConstOf ``String then some (.option .string) else if type.isAppOfArity ``Option 1 && (type.getArg! 0).isConstOf ``System.FilePath then some (.option .path) else if type.isAppOfArity ``Lake.TargetArray 1 && (type.getArg! 0).isConstOf ``Lake.Dynlib then some (.array (.other ``Lake.Dynlib "dynamic library" "dynamic libraries")) else if type.isAppOfArity ``Lake.TargetArray 1 && (type.getArg! 0).isConstOf ``System.FilePath then some (.array .path) else none if let some type := type' then return { name := fieldName, type, docs?} else throwError "Can't convert Lean type '{type}' to a field type for '{fieldName}'" throwError "No projection function found for {n}.{fieldName}" ``(Table.mk $(quote humanName) $(quote n) $(quote tomlFields)) else throwError "Not an inductive type: {n}" def Field.toBlock (inTable : Name) (f : Field (Array (Block Genre.Manual))) : Block Genre.Manual := let (f, docs?) := f.takeDocs .other (Block.tomlField none inTable f) (docs?.getD #[]) def Table.toBlock (arrayKey : Option String) (docs : Array (Block Genre.Manual)) (t : Table (Array (Block Genre.Manual))) : Array (Block Genre.Manual) := let (fieldBlocks, notFields) := docs.partition (fun b => b matches Block.other {name:=`Manual.Block.tomlField, .. : Genre.Manual.Block} ..) #[.other (Block.tomlTable arrayKey t.name t.typeName) <| notFields ++ (fieldBlocks ++ t.fields.map (Field.toBlock t.typeName))] end end Toml structure TomlTableOpts where /-- `none` to describe the root of the configuration, or a key that points at a table array to describe a nested entry. -/ arrayKey : Option String description : String name : Name skip : List Name def TomlTableOpts.parse [Monad m] [MonadError m] [MonadLiftT CoreM m] : ArgParse m TomlTableOpts := TomlTableOpts.mk <$> .positional `key arrayKey <*> .positional `description .string <*> .positional `name .resolvedName <*> many (.named `skip .name false) where arrayKey := { description := "'root' for the root table, or a string that contains a key for nested tables", signature := .Ident ∪ .String get | .name n => if n.getId == `root then pure none else throwErrorAt n "Expected 'root' or a string" | .str s => pure (some s.getString) | .num n => throwErrorAt n "Expected 'root' or a string" } open Markdown in /-- Interpret a structure type as a TOML table, and generate docs. -/ @[directive_expander tomlTableDocs] def tomlTableDocs : DirectiveExpander | args, contents => do let {arrayKey, description, name, skip} ← TomlTableOpts.parse.run args let docsStx ← match ← Lean.findDocString? (← getEnv) name with | none => throwError m!"No docs found for '{name}'"; pure #[] | some docs => let some ast := MD4Lean.parse docs | throwError "Failed to parse docstring as Markdown" -- Don't render these as ordinary Lean docstrings, because code samples in them -- are usually things like shell commands rather than Lean code. -- TODO: detect and add xref to `lake` subcommands or other fields here. ast.blocks.mapM (blockFromMarkdown (handleHeaders := strongEmphHeaders)) let tableStx ← Toml.asTable description name skip let userContents ← contents.mapM elabBlock return #[← `(Block.concat (Toml.Table.toBlock $(quote arrayKey) #[$(docsStx),* , $userContents,*] $tableStx))] namespace Toml instance [Test α] : Test (Lake.OrdNameMap α) where toString xs := Id.run do let mut out : Std.Format := Std.Format.nil for (k, v) in xs.toTreeMap do out := out ++ .group (.nest 2 <| Test.toString k ++ " ↦" ++ .line ++ Test.toString v) ++ "," ++ .line return .group (.nest 2 <| "{" ++ out) ++ "}" instance [{n : Name} → Test (f n)] : Test (Lake.DNameMap f) where toString xs := Id.run do let mut out : Std.Format := Std.Format.nil for ⟨k, v⟩ in xs do out := out ++ .group (.nest 2 <| Test.toString k ++ " ↦" ++ .line ++ Test.toString v) ++ "," ++ .line return .group (.nest 2 <| "{" ++ out) ++ "}" mutual partial def testPatDescr [Test α] [Test β] : (Lake.PatternDescr α β) → Std.Format | .not x => .group <| .nest 2 <| ".not" ++ .line ++ testPat x | .coe x => .group <| .nest 2 <| ".coe" ++ .line ++ Test.toString x | .any xs => .group <| .nest 2 <| ".any" ++ .line ++ "#[" ++ arrElems (xs.map testPat) ++ "]" | .all xs => .group <| .nest 2 <| ".all" ++ .line ++ "#[" ++ arrElems (xs.map testPat) ++ "]" where arrElems (xs : Array Std.Format) : Std.Format := .group <| .nest 2 <| (Std.Format.text "," ++ .line).joinSep xs.toList partial def testPat [Test α] [Test β] : (Lake.Pattern α β) → Std.Format | {filter, name, descr?} => let fields : List Std.Format := [ "filter :=" ++ .line ++ Test.toString filter, "name :=" ++ .line ++ Test.toString name, "descr? :=" ++ .line ++ Test.toString (descr?.map testPatDescr), ] .group <| (.nest 2 <| "{" ++ .line ++ (Std.Format.text "," ++ .line).joinSep fields) ++ "}" end instance [Test α] [Test β] : Test (Lake.PatternDescr α β) := ⟨testPatDescr⟩ instance [Test α] [Test β] : Test (Lake.Pattern α β) := ⟨testPat⟩ deriving instance Repr for Lake.StrPatDescr instance : Test (Lake.Script) where toString s := s!"#<script {s.name}>" instance : Test (Lake.ExternLibConfig n n') where toString _ := s!"#<extern lib>" instance : Test (Lake.OpaqueTargetConfig n n') where toString _ := s!"#<opaque target>" instance : Test (Lake.OpaquePostUpdateHook α) where toString _ := s!"#<post-update-hook>" instance : Test Lake.Toml.DecodeError where toString | {ref, msg} => s!"{msg} at {ref}" deriving instance Test for Lake.Dependency deriving instance Test for Lake.PackageConfig deriving instance Test for Lake.LeanLibConfig deriving instance Test for Lake.LeanExeConfig instance : Test (Lake.ConfigType kind pkg name) where toString := match kind with | `lean_lib => fun (x : Lake.LeanLibConfig name) => Test.toString x | `lean_exe => fun (x : Lake.LeanExeConfig name) => Test.toString x | `extern_lib => fun (x : Lake.ExternLibConfig pkg name) => Test.toString x | .anonymous => fun (x : Lake.OpaqueTargetConfig pkg name) => Test.toString x | _ => fun _ => "Impossible!" instance : Test Lake.CacheRef where toString _ := "#<cacheref>" private def contains (fmt : Format) (c : Char) : Bool := match fmt with | .text s => s.contains c | .tag _ x | .group x .. | .nest _ x => contains x c | .append x y => contains x c || contains y c | .align .. | .line | .nil => false instance [Test α] : Test (Option α) where toString | none => "none" | some x => let s := Test.toString x let s := if contains s '(' || contains s ' ' then "(" ++ s ++ ")" else s s!"some " ++ s deriving instance Test for Lake.ConfigDecl deriving instance Test for Lake.PConfigDecl deriving instance Test for Lake.NConfigDecl deriving instance Test for Lake.Package open Lake Toml in def report [Monad m] [Lean.MonadLog m] [MonadFileMap m] [Test α] (val : α) (errs : Array DecodeError) : m String := do let mut result := "" unless errs.isEmpty do result := result ++ "Errors:\n" for e in errs do result := result ++ (← posStr e.ref) ++ e.msg ++ "\n" result := result ++ "-------------\n" result := result ++ (Test.toString val).pretty ++ "\n" pure result where posStr (stx : Syntax) : m String := do let text ← getFileMap let fn ← getFileName <&> (System.FilePath.fileName · |>.getD "") let head := (stx.getHeadInfo? >>= SourceInfo.getPos?) <&> text.utf8PosToLspPos let tail := (stx.getTailInfo? >>= SourceInfo.getPos?) <&> text.utf8PosToLspPos if let some ⟨l, c⟩ := head then if let some ⟨l', c'⟩ := tail then if l = l' then return s!"{fn}:{l}:{c}-{c'}: " else return s!"{fn}:{l}-{l'}:{c}-{c'}: " return s!"{fn}:{l}:{c}: " return "" end Toml section variable [Monad m] [MonadLiftT BaseIO m] [MonadFileMap m] [Lean.MonadLog m] open Lean.Parser in open Lake Toml in def checkToml (α : Type) [Inhabited α] [DecodeToml α] [Toml.Test α] (str : String) (what : Name) : m (Except String String) := do let ictx := mkInputContext str "<example TOML>" match (← Lake.Toml.loadToml ictx |>.toBaseIO) with | .error err => return .error <| toString (← err.unreported.toArray.mapM (·.toString)) | .ok tbl => let .ok (out : α) errs := (tbl.tryDecode what).run #[] .ok <$> report out errs structure Named (α : Name → Type u) where name : Name val : α name instance [(n : Name) → Toml.Test (α n)] : Toml.Test (Named α) where toString | ⟨n, v⟩ => "{ " ++ .group (.nest 2 <| "name := " ++ n.toString ++ "," ++ .line ++ "val := " ++ Toml.Test.toString v ++ "}") instance [(n : Name) → Lake.DecodeToml (α n)] : Lake.DecodeToml (Named α) where decode v := do let table ← v.decodeTable -- let name ← Lake.stringToLegalOrSimpleName <$> table.decode `name let val ← Lake.DecodeToml.decode v return ⟨name, val⟩ open Lean.Parser in open Lake Toml in def checkTomlArrayWithName (α : Name → Type) [(n : Name) → Inhabited (α n)] [(n : Name) → DecodeToml (α n)] [(n : Name) → Toml.Test (α n)] (str : String) (what : Name) : m (Except String String) := do let ictx := mkInputContext str "<example TOML>" match (← Lake.Toml.loadToml ictx |>.toBaseIO) with | .error err => return .error <| toString (← err.unreported.toArray.mapM (·.toString)) | .ok tbl => let .ok (name : Name) errs := (tbl.tryDecode `name).run #[] let .ok out errs := (tbl.tryDecode what).run errs .ok <$> report (out : α name) errs -- TODO this became private upstream, so it's been copied to fix the build. -- Negotiate a public API. open Lake Toml in private def decodeTargetDecls (pkg : Name) (t : Table) : DecodeM (Array (PConfigDecl pkg) × DNameMap (NConfigDecl pkg)) := do let r := (#[], {}) let r ← go r LeanLib.keyword LeanLib.configKind LeanLibConfig.decodeToml let r ← go r LeanExe.keyword LeanExe.configKind LeanExeConfig.decodeToml let r ← go r InputFile.keyword InputFile.configKind InputFileConfig.decodeToml let r ← go r InputDir.keyword InputDir.configKind InputDirConfig.decodeToml return r where go r kw kind (decode : {n : Name} → Table → DecodeM (ConfigType kind pkg n)) := do let some tableArrayVal := t.find? kw | return r let some vals ← tryDecode? tableArrayVal.decodeValueArray | return r vals.foldlM (init := r) fun r val => do let some t ← tryDecode? val.decodeTable | return r let some name ← tryDecode? <| stringToLegalOrSimpleName <$> t.decode `name | return r let (decls, map) := r if let some orig := map.get? name then modify fun es => es.push <| .mk val.ref s!"\ {pkg}: target '{name}' was already defined as a '{orig.kind}', \ but then redefined as a '{kind}'" return (decls, map) else let config ← @decode name t let decl : NConfigDecl pkg name := -- Safety: By definition, config kind = facet kind for declarative configurations. unsafe {pkg, name, kind, config, wf_data := lcProof} return (decls.push decl.toPConfigDecl, map.insert name decl) open Lean.Parser in open Lake Toml in def checkTomlPackage [Lean.MonadError m] (str : String) : m (Except String String) := do let ictx := mkInputContext str "<example TOML>" match (← Lake.Toml.loadToml ictx |>.toBaseIO) with | .error err => return .error <| toString (← err.unreported.toArray.mapM (·.toString)) | .ok tbl => let .ok env ← EIO.toBaseIO <| Lake.Env.compute {home:=""} {sysroot:=""} none none | throwError "Failed to make env" let cfg : LoadConfig := {lakeEnv := env, wsDir := "."} let .ok (pkg : Lake.Package) errs := Id.run <| (EStateM.run · #[]) <| do let name ← stringToLegalOrSimpleName <$> tbl.tryDecode `name let config : PackageConfig name name ← PackageConfig.decodeToml tbl let (targetDecls, targetDeclMap) ← decodeTargetDecls name tbl let defaultTargets ← tbl.tryDecodeD `defaultTargets #[] let defaultTargets := defaultTargets.map stringToLegalOrSimpleName let depConfigs ← tbl.tryDecodeD `require #[] pure { dir := cfg.pkgDir relDir := cfg.relPkgDir relConfigFile := cfg.relConfigFile scope := cfg.scope remoteUrl := cfg.remoteUrl configFile := cfg.configFile config, depConfigs, targetDecls, targetDeclMap defaultTargets baseName := name wsIdx := 0 origName := name } .ok <$> report pkg errs end structure LakeTomlOpts where /-- The type to check it against -/ type : Name /-- The field of the table to use -/ field : Name /-- Whether to keep the result -/ «show» : Bool def LakeTomlOpts.parse [Monad m] [MonadInfoTree m] [MonadLiftT CoreM m] [MonadEnv m] [MonadError m] : ArgParse m LakeTomlOpts := LakeTomlOpts.mk <$> .positional `type .resolvedName <*> .positional `field .name <*> (.named `show .bool true <&> (·.getD true)) @[directive_expander lakeToml] def lakeToml : DirectiveExpander | args, contents => do let opts ← LakeTomlOpts.parse.run args let (expected, contents) := contents.partition fun | `(block| ``` expected | $_ ```) => true | _ => false let toml := contents.filterMap fun | `(block| ``` toml $_* | $tomlStr ```) => some tomlStr | _ => none if h : expected.size ≠ 1 then throwError "Expected exactly 1 'expected' code block, got {expected.size}" else let `(block| ```expected | $expectedStr ```) := expected[0] | throwErrorAt expected[0] "Expected an 'expected' code block with no arguments" if h : toml.size ≠ 1 then throwError "Expected exactly 1 toml code block, got {toml.size}" else let tomlStr := toml[0] let tomlInput := tomlStr.getString ++ "\n" let v ← match opts.field, opts.type with | `_root_, ``Lake.PackageConfig => match (← checkTomlPackage ((← parserInputString tomlStr) ++ "\n")) with | .error e => throwErrorAt tomlStr e | .ok v => pure v | `_root_, other => throwError "'_root_' can only be used with 'Lake.PackageConfig'" | f, ``Lake.Dependency => match (← checkToml (Array Lake.Dependency) ((← parserInputString tomlStr) ++ "\n") f) with | .error e => throwErrorAt tomlStr e | .ok v => pure v | `lean_lib, ``Lake.LeanLibConfig => -- TODO get the name first! match (← checkToml (Array (Named Lake.LeanLibConfig)) ((← parserInputString tomlStr) ++ "\n") `lean_lib) with | .error e => throwErrorAt tomlStr e | .ok v => pure v | `lean_exe, ``Lake.LeanExeConfig => match (← checkToml (Array (Named Lake.LeanExeConfig)) ((← parserInputString tomlStr) ++ "\n") `lean_exe) with | .error e => throwErrorAt tomlStr e | .ok v => pure v | _, _ => throwError s!"Unsupported type {opts.type}" discard <| expectString "elaborated configuration output" expectedStr v (useLine := (·.any (! Char.isWhitespace ·))) contents.mapM (elabBlock ⟨·⟩) @[role_expander tomlField] def tomlFieldInline : RoleExpander | args, inlines => do let table ← (ArgParse.positional `table .resolvedName).run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $name:str )) := arg | throwErrorAt arg "Expected code literal with the field name" let name := name.getString pure #[← `(show Verso.Doc.Inline Verso.Genre.Manual from .other (Manual.Inline.tomlField $(quote table) $(quote name.toName)) #[Inline.code $(quote name)])] @[inline_extension Manual.Inline.tomlField] def tomlFieldInline.descr : InlineDescr where traverse _ _ _ := do pure none toTeX := none extraCss := [ r#" .toml-field a { color: inherit; text-decoration: currentcolor underline dotted; } .toml-field a:hover { text-decoration: currentcolor underline solid; } "#] toHtml := open Verso.Output.Html in some <| fun goB _id data content => do let .ok (tableName, fieldName) := fromJson? (α := Name × Name) data | HtmlT.logError s!"Failed to deserialize metadata for Lake option ref: {data}"; content.mapM goB if let some obj := (← read).traverseState.getDomainObject? tomlFieldDomain s!"{tableName} {fieldName}" then for id in obj.ids do if let some dest := (← read).traverseState.externalTags[id]? then return {{<code class="toml-field"><a href={{dest.link}}>{{fieldName.toString}}</a></code>}} else HtmlT.logError s!"No link destination for TOML field {tableName}:{fieldName}" pure {{<code class="toml-field">{{fieldName.toString}}</code>}}
reference-manual/Manual/Meta/LakeCmd.lean
import Lean.Elab.Command import Lean.Elab.InfoTree import Verso import Verso.Doc.ArgParse import Verso.Doc.Elab.Monad import VersoManual import Verso.Code import SubVerso.Highlighting import SubVerso.Examples import Manual.Meta.Basic open Verso ArgParse Doc Elab Genre.Manual Html Code Highlighted.WebAssets open Lean Elab open SubVerso.Highlighting Highlighted open Lean.Doc.Syntax open Lean.Elab.Tactic.GuardMsgs namespace Manual namespace CommandSpec mutual inductive Item where | metavar (name : String) | literalSyntax (string : String) | ellipses | optional (contents : List DecoratedItem) | or deriving ToJson, FromJson, Repr structure DecoratedItem where leading : String item : Item trailing : String deriving ToJson, FromJson, Repr end mutual partial def Item.toHighlighted : Item → Highlighted | .metavar x => .token ⟨.var ⟨x.toName⟩ x, x⟩ -- Hack: abusing FVarId here | .literalSyntax s => .token ⟨.keyword none none none, s⟩ | .ellipses => .token ⟨.unknown, "..."⟩ | .or => .token ⟨.keyword none none none, "|"⟩ | .optional xs => .token ⟨.keyword none none none, "["⟩ ++ .seq (xs.toArray.map DecoratedItem.toHighlighted) ++ .token ⟨.keyword none none none, "]"⟩ partial def DecoratedItem.toHighlighted : DecoratedItem → Highlighted | ⟨l, x, r⟩ => .text l ++ x.toHighlighted ++ .text r end open Syntax (mkCApp) private def quoteList [Quote α `term] : List α → Term | [] => mkCIdent ``List.nil | (x::xs) => Syntax.mkCApp ``List.cons #[quote x, quoteList xs] mutual partial def Item.quote : Item → Term | .metavar x => mkCApp ``Item.metavar #[Quote.quote x] | .literalSyntax s => mkCApp ``Item.literalSyntax #[Quote.quote s] | .ellipses => mkCApp ``Item.ellipses #[] | .or => mkCApp ``Item.or #[] | .optional xs => have : Quote DecoratedItem := ⟨DecoratedItem.quote⟩ mkCApp ``Item.optional #[quoteList xs] partial def DecoratedItem.quote : DecoratedItem → Term | ⟨l, i, t⟩ => mkCApp ``DecoratedItem.mk #[quote l, i.quote, quote t] end instance : Quote Item := ⟨Item.quote⟩ instance : Quote DecoratedItem := ⟨DecoratedItem.quote⟩ end CommandSpec abbrev CommandSpec : Type := List CommandSpec.DecoratedItem def CommandSpec.toHighlighted (spec : CommandSpec) : Highlighted := .seq (spec.map (·.toHighlighted)).toArray declare_syntax_cat lake_cmd_spec_item syntax ident : lake_cmd_spec_item syntax str : lake_cmd_spec_item syntax "..." : lake_cmd_spec_item syntax "|" : lake_cmd_spec_item syntax "[" lake_cmd_spec_item+ "]" : lake_cmd_spec_item declare_syntax_cat lake_cmd_spec syntax lake_cmd_spec_item* : lake_cmd_spec mutual partial def CommandSpec.Item.ofSyntax (stx : TSyntax `lake_cmd_spec_item) : Except String CommandSpec.Item := match stx with | `(lake_cmd_spec_item|$i:ident) => pure <| .metavar <| i.getId.toString (escape := false) | `(lake_cmd_spec_item|$s:str) => pure <| .literalSyntax s.getString | `(lake_cmd_spec_item|...) => pure <| .ellipses | `(lake_cmd_spec_item||) => pure <| .or | `(lake_cmd_spec_item|[ $items* ]) => .optional <$> items.toList.mapM DecoratedItem.ofSyntax | _ => .error s!"Not a command spec item: {stx}" partial def CommandSpec.DecoratedItem.ofSyntax (stx : TSyntax `lake_cmd_spec_item) : Except String CommandSpec.DecoratedItem := do return ⟨lead stx.raw.getHeadInfo, ← Item.ofSyntax stx, trail stx.raw.getTailInfo⟩ where lead : SourceInfo → String | .original l .. => l.toString | _ => "" trail : SourceInfo → String | .original _ _ t _ => t.toString | _ => "" end def CommandSpec.ofSyntax (stx : Syntax) : Except String CommandSpec := match stx with | `(lake_cmd_spec|$items:lake_cmd_spec_item*) => do items.toList.mapM DecoratedItem.ofSyntax | _ => .error s!"Not a command spec: {stx}" structure LakeCommandOptions where name : List Name spec : StrLit -- This only allows one level of subcommand, but it's sufficient for Lake as it is today aliases : List Name partial def LakeCommandOptions.parse [Monad m] [MonadError m] : ArgParse m LakeCommandOptions := LakeCommandOptions.mk <$> many1 (.positional `name .name) <*> (.positional `spec strLit <|> (pure (Syntax.mkStrLit ""))) <*> .many (.named `alias .name false) where many1 {α} (p : ArgParse m α) : ArgParse m (List α) := (· :: ·) <$> p <*> .many p strLit : ValDesc m StrLit := { description := "string literal containing a Lake command spec", signature := .String get | .str s => pure s | other => throwError "Expected string, got {repr other}" } def Block.lakeCommand (name : String) (aliases : List String) (spec : CommandSpec) : Block where name := `Manual.Block.lakeCommand data := Json.arr #[Json.str name, toJson aliases, toJson spec] def Inline.lakeMeta : Inline where name := `Manual.lakeMeta data := .arr #[.null, .null] def Inline.lakeArgs (hl : Highlighted) : Inline where name := `Manual.lakeArgs data := .arr #[toJson hl, .null] def Inline.lake : Inline where name := `Manual.lake data := .null private partial def addLakeMetaInline (name : String) : Doc.Inline Verso.Genre.Manual → Doc.Inline Verso.Genre.Manual | .other i xs => if i.name == Inline.lakeMeta.name || i.name == `Manual.lakeArgs then if let Json.arr #[mn, _] := i.data then .other {i with data := .arr #[mn, .str name]} <| xs.map (addLakeMetaInline name) else .other i <| xs.map (addLakeMetaInline name) else .other i <| xs.map (addLakeMetaInline name) | .concat xs => .concat <| xs.map (addLakeMetaInline name) | .emph xs => .emph <| xs.map (addLakeMetaInline name) | .bold xs => .bold <| xs.map (addLakeMetaInline name) | .image alt url => .image alt url | .math t txt => .math t txt | .footnote x xs => .footnote x (xs.map (addLakeMetaInline name)) | .link xs url => .link (xs.map (addLakeMetaInline name)) url | .code s => .code s | .linebreak str => .linebreak str | .text str => .text str private partial def addLakeMetaBlock (name : String) : Doc.Block Verso.Genre.Manual → Doc.Block Verso.Genre.Manual | .para xs => .para (xs.map (addLakeMetaInline name)) | .other b xs => .other b (xs.map (addLakeMetaBlock name)) | .concat xs => .concat (xs.map (addLakeMetaBlock name)) | .blockquote xs => .blockquote (xs.map (addLakeMetaBlock name)) | .code s => .code s | .dl items => .dl (items.map fun ⟨xs, ys⟩ => ⟨xs.map (addLakeMetaInline name), ys.map (addLakeMetaBlock name)⟩) | .ul items => .ul (items.map fun ⟨ys⟩ => ⟨ys.map (addLakeMetaBlock name)⟩) | .ol n items => .ol n (items.map fun ⟨ys⟩ => ⟨ys.map (addLakeMetaBlock name)⟩) @[directive_expander lake] def lake : DirectiveExpander | args, contents => do let {name, spec, aliases} ← LakeCommandOptions.parse.run args let spec ← if spec.getString.trimAscii.isEmpty then pure [] else match Parser.runParserCategory (← getEnv) `lake_cmd_spec spec.getString (← getFileName) with | .error e => throwErrorAt spec e | .ok stx => match CommandSpec.ofSyntax stx with | .error e => throwErrorAt spec e | .ok spec => pure spec let contents ← contents.mapM fun b => do ``(addLakeMetaBlock $(quote <| String.intercalate "~~" (name.map (·.toString (escape := false)))) $(← elabBlock b)) pure #[← ``(Verso.Doc.Block.other (Block.lakeCommand $(quote <| String.intercalate " " <| name.map (·.toString (escape := false))) $(quote <| aliases.map (·.toString (escape := false))) $(quote spec)) #[$contents,*])] def lakeCommandDomain : Name := `Manual.lakeCommand open Verso.Search in def lakeCommandDomainMapper : DomainMapper where displayName := "Lake Command" className := "lake-command-domain" dataToSearchables := "(domainData) => Object.entries(domainData.contents).map(([key, value]) => ({ searchKey: `lake ${key}`, address: `${value[0].address}#${value[0].id}`, domainId: 'Manual.lakeCommand', ref: value, }))" open Verso.Genre.Manual.Markdown in open Lean Elab Term Parser Tactic in @[block_extension Block.lakeCommand] def lakeCommand.descr : BlockDescr where init st := st |>.setDomainTitle lakeCommandDomain "Lake commands" |>.setDomainDescription lakeCommandDomain "Detailed descriptions of Lake commands" |>.addQuickJumpMapper lakeCommandDomain (lakeCommandDomainMapper.setFont { family := .code }) traverse id info _ := do let Json.arr #[Json.str name, aliases, _] := info | do logError s!"Failed to deserialize data while traversing a Lake command, expected 3-element array starting with string but got {info}"; pure none let aliases : List String ← match fromJson? (α := List String) aliases with | .ok v => pure v | .error e => logError s!"Failed to deserialize aliases while traversing a Lake command: {e}"; pure [] let path ← (·.path) <$> read let _ ← Verso.Genre.Manual.externalTag id path name Index.addEntry id {term := Inline.concat #[.code name, .text " (Lake command)"]} modify fun st => st.saveDomainObject lakeCommandDomain name id for a in aliases do modify fun st => st.saveDomainObject lakeCommandDomain a id pure none toHtml := some <| fun _goI goB id info contents => open Verso.Doc.Html in open Verso.Output Html in do let Json.arr #[ Json.str name, aliases, spec] := info | do Verso.Doc.Html.HtmlT.logError s!"Failed to deserialize data while making HTML for Lake command, got {info}"; pure .empty let .ok (aliases : List String) := FromJson.fromJson? aliases | do Verso.Doc.Html.HtmlT.logError s!"Failed to deserialize aliases while making HTML for Lake command, got {spec}"; pure .empty let .ok (spec : CommandSpec) := FromJson.fromJson? spec | do Verso.Doc.Html.HtmlT.logError s!"Failed to deserialize spec while making HTML for Lake command, got {spec}"; pure .empty let lakeTok : Highlighted := .token ⟨.keyword none none none, "lake"⟩ let nameTok : Highlighted := .token ⟨.keyword none none none, name⟩ let spec : Highlighted := spec.toHighlighted let xref ← HtmlT.state let idAttr := xref.htmlId id let aliasHtml : Html := match aliases with | [] => .empty | _::more => {{ <p> <strong>{{if more.isEmpty then "Alias:" else "Aliases:"}}</strong> " " {{aliases.map (fun (a : String) => {{<code>s!"lake {a}"</code>}}) |>.intersperse {{", "}}}} </p> }} return {{ <div class="namedocs" {{idAttr}}> {{permalink id xref false}} <span class="label">"Lake command"</span> <pre class="signature hl lean block" data-lean-context={{name}}> {{← (Highlighted.seq #[lakeTok, .text " ", nameTok, .text " ", spec]).toHtml (g := Verso.Genre.Manual)}} </pre> <div class="text"> {{aliasHtml}} {{← contents.mapM goB}} </div> </div> }} toTeX := none extraCss := [highlightingStyle, docstringStyle] extraJs := [highlightingJs] localContentItem _ info _ := open Verso.Output.Html in do if let Json.arr #[ Json.str name, _, _] := info then let str := s!"lake {name}" pure #[(str, {{<code>{{str}}</code>}})] else throw s!"Expected a three-element array with a string first, got {info}" @[role_expander lakeMeta] def lakeMeta : RoleExpander | args, inlines => do let () ← ArgParse.done.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $mName:str )) := arg | throwErrorAt arg "Expected code literal with the metavariable" let mName := mName.getString pure #[← `(show Verso.Doc.Inline Verso.Genre.Manual from .other {Manual.Inline.lakeMeta with data := Json.arr #[$(quote mName), .null]} #[Inline.code $(quote mName)])] @[inline_extension lakeMeta] def lakeMeta.descr : InlineDescr := withHighlighting { traverse _ _ _ := do pure none toTeX := some <| fun go _ _ content => do pure <| .seq <| ← content.mapM fun b => do pure <| .seq #[← go b, .raw "\n"] toHtml := open Verso.Output.Html in some <| fun _ _ data _ => do let (mName, ctx) := match data with | .arr #[.str mName, .str cmdName] => (mName, some cmdName) | .arr #[.str mName, _] => (mName, none) | _ => ("", none) let hl : Highlighted := .token ⟨.var ⟨mName.toName⟩ mName, mName⟩ hl.inlineHtml ctx (g := Verso.Genre.Manual) } @[role_expander lake] def lakeInline : RoleExpander | args, inlines => do let () ← ArgParse.done.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $cmdName:str )) := arg | throwErrorAt arg "Expected code literal with the Lake command name" let name := cmdName.getString pure #[← `(show Verso.Doc.Inline Verso.Genre.Manual from .other {Manual.Inline.lake with data := $(quote name)} #[Inline.code $(quote name)])] @[inline_extension lake] def lake.descr : InlineDescr where traverse _ _ _ := do pure none toTeX := some <| fun go _ _ content => do pure <| .seq <| ← content.mapM fun b => do pure <| .seq #[← go b, .raw "\n"] extraCss := [ r#" a.lake-command { color: inherit; text-decoration: currentcolor underline dotted; } a.lake-command:hover { text-decoration: currentcolor underline solid; } "# ] toHtml := open Verso.Output.Html in some <| fun goI _ data is => do let (name) := match data with | .str x => some x | _ => none if let some n := name then if let some dest := (← read).traverseState.getDomainObject? lakeCommandDomain n then for id in dest.ids do if let some dest := (← read).traverseState.externalTags[id]? then return {{<a href={{dest.link}} class="lake-command"><code>s!"lake {n}"</code></a>}} HtmlT.logError s!"No name for lake command in {data.compress}" is.mapM goI @[role_expander lakeArgs] def lakeArgs : RoleExpander | args, inlines => do let () ← ArgParse.done.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $spec:str )) := arg | throwErrorAt arg "Expected code literal with the Lake command name" match Parser.runParserCategory (← getEnv) `lake_cmd_spec spec.getString (← getFileName) with | .error e => throwErrorAt spec e | .ok stx => match CommandSpec.ofSyntax stx with | .error e => throwErrorAt spec e | .ok spec => let hl := spec.toHighlighted pure #[← ``(Verso.Doc.Inline.other (Inline.lakeArgs $(quote hl)) #[])] @[inline_extension lakeArgs] def lakeArgs.descr : InlineDescr := withHighlighting { traverse _ _ _ := do pure none toTeX := none toHtml := open Verso.Output.Html in some <| fun _ _ data _ => do if let .arr #[hl, name] := data then match fromJson? (α := Highlighted) hl with | .error e => HtmlT.logError s!"Couldn't deserialize Lake args: {e}"; return .empty | .ok hl => let name := if let Json.str n := name then some n else none hl.inlineHtml name (g := Verso.Genre.Manual) else HtmlT.logError s!"Expected two-element JSON array, got {data}"; return .empty }
reference-manual/Manual/Meta/CheckMessages.lean
import Lean.Elab.GuardMsgs import SubVerso.Examples.Messages open Lean Elab Command open SubVerso.Examples.Messages (messagesMatch) /-- A version of `#guard_msgs` that compares messages modulo metavariable and line number normalization. -/ syntax (name := checkMsgsCmd) (docComment)? "#check_msgs" (ppSpace guardMsgsSpec)? " in" ppLine command : command /-- Gives a string representation of a message without source position information. Ensures the message ends with a '\n'. -/ private def messageToStringWithoutPos (msg : Message) : BaseIO String := do let mut str ← msg.data.toString unless msg.caption == "" do str := msg.caption ++ ":\n" ++ str if !("\n".isPrefixOf str) then str := " " ++ str match msg.severity with | MessageSeverity.information => str := "info:" ++ str | MessageSeverity.warning => str := "warning:" ++ str | MessageSeverity.error => str := "error:" ++ str if str.isEmpty || str.back != '\n' then str := str ++ "\n" return str open Tactic.GuardMsgs in @[command_elab checkMsgsCmd] def elabCheckMsgs : CommandElab | `(command| $[$dc?:docComment]? #check_msgs%$tk $(spec?)? in $cmd) => do let expected : String := (← dc?.mapM (getDocStringText ·)).getD "" |>.trimAscii |>.copy |> removeTrailingWhitespaceMarker let {whitespace, ordering, filterFn, ..} ← parseGuardMsgsSpec spec? let initMsgs ← modifyGet fun st => (st.messages, { st with messages := {} }) -- do not forward snapshot as we don't want messages assigned to it to leak outside withReader ({ · with snap? := none }) do -- The `#guard_msgs` command is special-cased in `elabCommandTopLevel` to ensure linters only run once. elabCommandTopLevel cmd -- collect sync and async messages let msgs := (← get).messages ++ (← get).snapshotTasks.foldl (· ++ ·.get.getAll.foldl (· ++ ·.diagnostics.msgLog) {}) {} -- clear async messages as we don't want them to leak outside modify ({ · with snapshotTasks := #[] }) let mut toCheck : MessageLog := .empty let mut toPassthrough : MessageLog := .empty for msg in msgs.toList do if msg.isSilent then continue match filterFn msg with | .check => toCheck := toCheck.add msg | .drop => pure () | .pass => toPassthrough := toPassthrough.add msg let strings ← toCheck.toList.mapM (messageToStringWithoutPos ·) let strings := ordering.apply strings let res := "---\n".intercalate strings |>.trimAscii |>.copy if messagesMatch (whitespace.apply expected) (whitespace.apply res) then -- Passed. Only put toPassthrough messages back on the message log modify fun st => { st with messages := initMsgs ++ toPassthrough } else -- Failed. Put all the messages back on the message log and add an error modify fun st => { st with messages := initMsgs ++ msgs } let feedback := if guard_msgs.diff.get (← getOptions) then let diff := Diff.diff (expected.splitToList (· == '\n')).toArray (res.splitToList (· == '\n')).toArray Diff.linesToString diff else res logErrorAt tk m!"❌️ Docstring on `#guard_msgs` does not match generated message:\n\n{feedback}" pushInfoLeaf (.ofCustomInfo { stx := ← getRef, value := Dynamic.mk (GuardMsgFailure.mk res) }) | _ => throwUnsupportedSyntax attribute [command_code_action checkMsgsCmd] Tactic.GuardMsgs.guardMsgsCodeAction
reference-manual/Manual/Meta/Attribute.lean
import VersoManual import Verso.Code.Highlighted import Manual.Meta.Basic import Manual.Meta.PPrint open Verso Doc Elab open Verso.Genre Manual open Verso.ArgParse open Verso.Code (highlightingJs) open Verso.Code.Highlighted.WebAssets open scoped Lean.Doc.Syntax open Lean Elab Parser open Lean.Widget (TaggedText) open SubVerso.Highlighting open Verso.Code namespace Manual def Inline.attr : Inline where name := `Manual.attr @[role_expander attr] def attr : RoleExpander | args, inlines => do let () ← ArgParse.done.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $a:str )) := arg | throwErrorAt arg "Expected code literal with the attribute" let altStr ← parserInputString a match Parser.runParserCategory (← getEnv) `attr altStr (← getFileName) with | .error e => throwErrorAt a e | .ok stx => let attrName ← match stx.getKind with | `Lean.Parser.Attr.simple => pure stx[0].getId | .str (.str (.str (.str .anonymous "Lean") "Parser") "Attr") k => pure k.toName | .str (.str (.str .anonymous "Lean") "Attr") k => pure k.toName | other => let allAttrs := attributeExtension.getState (← getEnv) |>.map |>.toArray |>.map (·.fst) |>.qsort (·.toString < ·.toString) throwErrorAt a "Failed to process attribute kind: {stx.getKind} {isAttribute (← getEnv) stx.getKind} {allAttrs |> repr}" match getAttributeImpl (← getEnv) attrName with | .error e => throwErrorAt a e | .ok {descr, name, ref, ..} => do let attrTok := a.getString let hl : Highlighted := attrToken ref descr attrTok try -- Attempt to add info to the document source for go-to-def and the like, but this doesn't -- work for all attributes (e.g. `csimp`) discard <| realizeGlobalConstNoOverloadWithInfo (mkIdentFrom a ref) catch _ => pure () pure #[← `(Verso.Doc.Inline.other {Inline.attr with data := ToJson.toJson $(quote hl)} #[Verso.Doc.Inline.code $(quote attrTok)])] where -- TODO: This will eventually generate the right cross-reference, but VersoManual needs to have a -- domain for syntax categories/kinds upstreamed to it first (and then the appropriate link target -- code added) attrToken (ref : Name) (descr : String) (tok : String) : Highlighted := .token ⟨.keyword ref none (some descr), tok⟩ def Inline.attrs : Inline where name := `Manual.attrs @[inline_extension attr] def attr.descr : InlineDescr where traverse _ _ _ := do pure none toTeX := none extraCss := [highlightingStyle, docstringStyle] extraJs := [highlightingJs] toHtml := open Verso.Output.Html Verso.Doc.Html in some <| fun _ _ data _ => do match FromJson.fromJson? data with | .error err => HtmlT.logError <| "Couldn't deserialize Lean attribute code while rendering HTML: " ++ err pure .empty | .ok (hl : Highlighted) => hl.inlineHtml (g := Manual) "examples" /-- Shows a collection of applied attributes -/ @[role_expander attrs] def attrs : RoleExpander | args, inlines => do let () ← ArgParse.done.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $a:str )) := arg | throwErrorAt arg "Expected code literal with the attribute application syntax" let altStr ← parserInputString a let p := andthenFn whitespace Lean.Parser.Term.attributes.fn let `(Term.attributes|@[%$s$insts,*]%$e) ← withRef a <| p.parseString altStr | throwErrorAt a "Didn't match syntax" let mut hl : Highlighted := .empty let text ← getFileMap for stx in (insts : Array Syntax) do let `(Lean.Parser.Term.attrInstance | $[$scopedOrLocal]? $stx:attr) := stx | throwErrorAt stx "Didn't parse attribute instance" let stx := stx.raw let attrName ← match stx.getKind with | `Lean.Parser.Attr.simple => pure stx[0].getId | .str (.str (.str (.str .anonymous "Lean") "Parser") "Attr") k => pure k.toName | .str (.str (.str .anonymous "Lean") "Attr") k => pure k.toName | other => let allAttrs := attributeExtension.getState (← getEnv) |>.map |>.toArray |>.map (·.fst) |>.qsort (·.toString < ·.toString) throwErrorAt a "Failed to process attribute kind: {stx.getKind} {isAttribute (← getEnv) stx.getKind} {allAttrs |> repr}" match getAttributeImpl (← getEnv) attrName with | .error e => throwErrorAt a e | .ok {descr, name, ref, ..} => let mod : Highlighted := if let some tok := scopedOrLocal then let ⟨s, e⟩ := tok.raw.getRange?.get! let str := s.extract text.source e .token ⟨.keyword none none none, str⟩ ++ .text " " else .empty let ⟨s, e⟩ := stx.getRange?.get! let attrTok := s.extract text.source e unless hl.isEmpty do hl := hl ++ mod ++ .token ⟨.keyword ``Term.attributes none none, ", "⟩ hl := hl ++ attrToken ref descr attrTok try -- Attempt to add info to the document source for go-to-def and the like, but this doesn't -- work for all attributes (e.g. `csimp`) discard <| realizeGlobalConstNoOverloadWithInfo (mkIdentFrom a ref) catch _ => pure () hl := .token ⟨.keyword ``Term.attributes none none, "@["⟩ ++ hl ++ .token ⟨.keyword ``Term.attributes none none, "]"⟩ pure #[← `(Verso.Doc.Inline.other {Inline.attrs with data := ToJson.toJson $(quote hl)} #[Verso.Doc.Inline.code $(quote a.getString)])] where -- TODO: This will eventually generate the right cross-reference, but VersoManual needs to have a -- domain for syntax categories/kinds upstreamed to it first (and then the appropriate link target -- code added) attrToken (ref : Name) (descr : String) (tok : String) : Highlighted := .token ⟨.keyword ref none (some descr), tok⟩ @[inline_extension attrs] def attrs.descr : InlineDescr where traverse _ _ _ := do pure none toTeX := none extraCss := [highlightingStyle, docstringStyle] extraJs := [highlightingJs] toHtml := open Verso.Output.Html Verso.Doc.Html in some <| fun _ _ data _ => do match FromJson.fromJson? data with | .error err => HtmlT.logError <| "Couldn't deserialize Lean attribute code while rendering HTML: " ++ err pure .empty | .ok (hl : Highlighted) => hl.inlineHtml (g := Manual) "examples"
reference-manual/Manual/Meta/SpliceContents.lean
import Verso import Verso.Doc.ArgParse import Verso.Doc.Elab.Monad import VersoManual import Verso.Code namespace Manual open Verso ArgParse Doc Elab Genre.Manual Html Code Highlighted.WebAssets open Lean Elab open Lean.Doc.Syntax structure SpliceContentsConfig where moduleName : Ident def SpliceContentsConfig.parse [Monad m] [MonadInfoTree m] [MonadLiftT CoreM m] [MonadEnv m] [MonadError m] : ArgParse m SpliceContentsConfig := SpliceContentsConfig.mk <$> .positional `moduleName .ident @[part_command Lean.Doc.Syntax.command] def spliceContents : PartCommand | `(block|command{spliceContents $args*}) => do let {moduleName} ← SpliceContentsConfig.parse.run (← parseArgs args) let moduleIdent ← mkIdentFrom moduleName <$> realizeGlobalConstNoOverloadWithInfo (mkIdentFrom moduleName (docName moduleName.getId)) let modulePart ← `(($moduleIdent).toPart) let contentsAsBlock ← ``(Block.concat (Part.content $modulePart)) PartElabM.addBlock contentsAsBlock | _ => throwUnsupportedSyntax
reference-manual/Manual/Meta/ConfigFile.lean
import Lean.Elab.Command import Lean.Elab.InfoTree import Verso import Verso.Doc.ArgParse import Verso.Doc.Elab.Monad import VersoManual import Verso.Code import SubVerso.Highlighting import SubVerso.Examples open Verso.Genre.Manual namespace Manual def configFileDomain := `Manual.configFile open Verso.Search in def configFileDomainMapper : DomainMapper where displayName := "Configuration File" className := "config-file-domain" dataToSearchables := "(domainData) => Object.entries(domainData.contents).map(([key, value]) => ({ searchKey: key, address: `${value[0].address}#${value[0].id}`, domainId: 'Manual.configFile', ref: value, }))" inline_extension Inline.configFile (filename : String) where init st := st |>.setDomainTitle configFileDomain "Configuration Files" |>.setDomainDescription configFileDomain "Descriptions of files used to configure Lean and its associated tooling" |>.addQuickJumpMapper configFileDomain (configFileDomainMapper.setFont { family := .code }) data := .str filename traverse id data _ := do let .str filename := data | logError s!"Failed to deserialize {data} as a string for the filename" pure none let path ← (·.path) <$> read let _ ← Verso.Genre.Manual.externalTag id path filename modify fun st => st.saveDomainObject configFileDomain filename id pure none toHtml := open Verso.Output.Html in open Verso.Doc.Html in some fun _ id data _ => do let .str filename := data | HtmlT.logError s!"Failed to deserialize {data} as a string for the filename" pure .empty let xref ← HtmlT.state let idAttr := xref.htmlId id return {{<code {{idAttr}}>{{filename}}</code>}} toTeX := none open Verso.Doc.Elab open Lean.Doc.Syntax open Lean @[role] def configFile : RoleExpanderOf Unit | (), inlines => do let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $cmdName:str )) := arg | throwErrorAt arg "Expected code literal with the config file's name" let filename := cmdName.getString `(show Verso.Doc.Inline Verso.Genre.Manual from .other (Manual.Inline.configFile $(quote filename)) #[.code $(quote filename)])
reference-manual/Manual/Meta/ListBullet.lean
import VersoManual import Lean.Elab.InfoTree.Types import Manual.Meta.Basic open Verso Doc Elab open Verso.Genre Manual open Verso.ArgParse open Lean Elab namespace Manual def Block.listBullet (bullet : String) : Block where name := `Manual.listBullet data := .str bullet @[directive_expander listBullet] def listBullet : DirectiveExpander | args, contents => do let bullet ← ArgParse.run (.positional `bullet .string) args let blocks ← contents.mapM elabBlock pure #[← ``(Block.other (Block.listBullet $(quote bullet)) #[$blocks,*])] @[block_extension listBullet] def listBullet.descr : BlockDescr where traverse _id _data _contents := pure none toTeX := none toHtml := open Verso.Doc.Html in open Verso.Output.Html in some <| fun _goI goB _id data blocks => do let bullet ← match data with | .str bullet => pure bullet | _ => HtmlT.logError "Invalid data for listBullet block" pure "" pure {{ <div class="listBullet" style=s!"--bullet: '{bullet} ';"> {{← blocks.mapM goB}} </div> }} extraCss := [r##".listBullet li::marker { content: var(--bullet); font-size: 1.2em; }"##]
reference-manual/Manual/Meta/ElanOpt.lean
import Lean.Elab.Command import Lean.Elab.InfoTree import Verso import Verso.Doc.ArgParse import Verso.Doc.Elab.Monad import VersoManual import Verso.Code import Manual.Meta.Basic -- TODO: this is copied from LakeOpt for reasons of expediency. Factor out the common parts to a library! open Verso ArgParse Doc Elab Genre.Manual Html Code Highlighted.WebAssets open Lean.Doc.Syntax open Lean Elab namespace Manual inductive ElanOptKind where | flag | option deriving ToJson, FromJson, DecidableEq, Ord, Repr def ElanOptKind.ns : ElanOptKind → String | .flag => "elan-flag" | .option => "elan-option" open ElanOptKind in instance : Quote ElanOptKind where quote | .flag => Syntax.mkCApp ``ElanOptKind.flag #[] | .option => Syntax.mkCApp ``ElanOptKind.option #[] def Inline.elanOptDef (name : String) (kind : ElanOptKind) (argMeta : Option String) : Inline where name := `Manual.elanOptDef data := .arr #[.str name, toJson kind, toJson argMeta] def Inline.elanOpt (name : String) (original : String) : Inline where name := `Manual.elanOpt data := .arr #[.str name, .str original] def elanOptDomain := `Manual.elanOpt structure ElanOptDefOpts where kind : ElanOptKind def ElanOptDefOpts.parse [Monad m] [MonadError m] : ArgParse m ElanOptDefOpts := ElanOptDefOpts.mk <$> .positional `kind optKind where optKind : ValDesc m ElanOptKind := { description := "'flag' or 'option'", signature := .Ident get | .name x => match x.getId with | `flag => pure .flag | `option => pure .option | _ => throwErrorAt x "Expected 'flag' or 'option'" | .num x | .str x => throwErrorAt x "Expected 'flag' or 'option'" } def elanOptCss : String := r#" .elan-opt a { color: inherit; text-decoration: currentcolor underline dotted; } .elan-opt a:hover { text-decoration: currentcolor underline solid; } "# @[role_expander elanOptDef] def elanOptDef : RoleExpander | args, inlines => do let {kind} ← ElanOptDefOpts.parse.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $name:str )) := arg | throwErrorAt arg "Expected code literal with the option or flag" let origName := name.getString let name := origName.takeWhile fun c => c == '-' || c.isAlphanum let name := name.copy let valMeta := origName.drop name.length |>.dropWhile fun (c : Char) => !c.isAlphanum pure #[← `(show Verso.Doc.Inline Verso.Genre.Manual from .other (Manual.Inline.elanOptDef $(quote name) $(quote kind) $(quote (if valMeta.isEmpty then none else some valMeta.copy : Option String))) #[Inline.code $(quote name)])] open Verso.Search in def elanOptDomainMapper : DomainMapper := DomainMapper.withDefaultJs elanOptDomain "Elan Command-Line Option" "elan-option-domain" |>.setFont { family := .code } @[inline_extension elanOptDef] def elanOptDef.descr : InlineDescr where init s := s.addQuickJumpMapper elanOptDomain elanOptDomainMapper traverse id data _ := do let .arr #[.str name, jsonKind, _] := data | logError s!"Failed to deserialize metadata for Elan option def: {data}"; return none let .ok kind := fromJson? (α := ElanOptKind) jsonKind | logError s!"Failed to deserialize metadata for Elan option def '{name}' kind: {jsonKind}"; return none modify fun s => s |>.saveDomainObject elanOptDomain name id |>.saveDomainObjectData elanOptDomain name jsonKind discard <| externalTag id (← read).path (kind.ns ++ name) pure none toTeX := none toHtml := open Verso.Output.Html in some <| fun goB id data content => do let .arr #[.str name, _jsonKind, metadata] := data | HtmlT.logError s!"Failed to deserialize metadata for Elan option def: {data}"; content.mapM goB let idAttr := (← read).traverseState.htmlId id let .ok metadata := FromJson.fromJson? (α := Option String) metadata | HtmlT.logError s!"Failed to deserialize argument metadata for Elan option def: {metadata}"; content.mapM goB if let some mv := metadata then pure {{<code {{idAttr}} class="elan-opt">{{name}}" "{{mv}}</code>}} else pure {{<code {{idAttr}} class="elan-opt">{{name}}</code>}} localContentItem _ info _ := open Verso.Output.Html in do if let .arr #[.str name, _jsonKind, _meta] := info then pure #[(name, {{<code>{{name}}</code>}})] else throw s!"Expected three-element array with string first, got {info}" @[role_expander elanOpt] def elanOpt : RoleExpander | args, inlines => do let () ← ArgParse.done.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $name:str )) := arg | throwErrorAt arg "Expected code literal with the option or flag" let optName := name.getString.takeWhile fun c => c == '-' || c.isAlphanum let optName := optName.copy pure #[← `(show Verso.Doc.Inline Verso.Genre.Manual from .other (Manual.Inline.elanOpt $(quote optName) $(quote name.getString)) #[Inline.code $(quote name.getString)])] @[inline_extension elanOpt] def elanOpt.descr : InlineDescr where traverse _ _ _ := do pure none toTeX := none extraCss := [elanOptCss] toHtml := open Verso.Output.Html in some <| fun goB _id data content => do let .arr #[.str name, .str original] := data | HtmlT.logError s!"Failed to deserialize metadata for Elan option ref: {data}"; content.mapM goB if let some obj := (← read).traverseState.getDomainObject? elanOptDomain name then for id in obj.ids do if let some dest := (← read).traverseState.externalTags[id]? then return {{<code class="elan-opt"><a href={{dest.link}} class="elan-command">{{name}}</a>{{original.drop name.length |>.copy}}</code>}} pure {{<code class="elan-opt">{{original}}</code>}}
reference-manual/Manual/Meta/Markdown.lean
import VersoManual import Manual.Meta.Figure import Lean.Elab.InfoTree open Verso Doc Elab open Verso.Genre Manual open Verso.ArgParse open Lean Elab namespace Manual def Block.noVale : Block where name := `Manual.Block.noVale @[block_extension Block.noVale] def Block.noVale.descr : BlockDescr where traverse _ _ _ := pure none toTeX := none toHtml := open Verso.Output.Html in some <| fun _ goB _ _ content => do pure {{<div class="no-vale">{{← content.mapM goB}}</div>}} /-- Closes the last-opened section, throwing an error on failure. -/ def closeEnclosingSection : PartElabM Unit := do -- We use `default` as the source position because the Markdown doesn't have one if let some ctxt' := (← getThe PartElabM.State).partContext.close default then modifyThe PartElabM.State fun st => {st with partContext := ctxt'} else throwError m!"Failed to close the last-opened explanation part" /-- Closes as many sections as were created by markdown processing. -/ def closeEnclosingSections (headerMapping : Markdown.HeaderMapping) : PartElabM Unit := do for _ in headerMapping do closeEnclosingSection @[part_command Lean.Doc.Syntax.codeblock] def markdown : PartCommand | `(Lean.Doc.Syntax.codeblock| ``` $markdown:ident $args*| $txt ``` ) => do let x ← Lean.Elab.realizeGlobalConstNoOverloadWithInfo markdown if x != by exact decl_name% then Elab.throwUnsupportedSyntax for arg in args do let h ← MessageData.hint m!"Remove it" #[""] (ref? := arg) logErrorAt arg m!"No arguments expected{h}" let some ast := MD4Lean.parse txt.getString | throwError "Failed to parse body of markdown code block" let mut currentHeaderLevels : Markdown.HeaderMapping := default for block in ast.blocks do currentHeaderLevels ← Markdown.addPartFromMarkdown block currentHeaderLevels closeEnclosingSections currentHeaderLevels | _ => Elab.throwUnsupportedSyntax
reference-manual/Manual/Meta/Example.lean
import VersoManual import Manual.Meta.Figure import Manual.Meta.LzCompress import Lean.Elab.InfoTree.Types open Verso Doc Elab open Verso.Genre Manual open Verso.ArgParse open Lean.Doc.Syntax open Lean Elab namespace Manual def Block.example (descriptionString : String) (name : Option String) (opened : Bool) (liveText : Option String := none) : Block where -- FIXME: This should be a double-backtickable name name := `Manual.example data := ToJson.toJson (descriptionString, name, opened, (none : Option Tag), liveText) properties := .empty |>.insert `Verso.Genre.Manual.exampleDefContext descriptionString /-- The type of the Json stored with Block.example -/ abbrev ExampleBlockJson := String × Option String × Bool × Option Tag × Option String structure ExampleConfig where description : FileMap × TSyntaxArray `inline /-- Name for refs -/ tag : Option String := none keep : Bool := false opened : Bool := false section variable [Monad m] [MonadInfoTree m] [MonadLiftT CoreM m] [MonadEnv m] [MonadError m] [MonadFileMap m] def ExampleConfig.parse : ArgParse m ExampleConfig := ExampleConfig.mk <$> .positional `description .inlinesString <*> .named `tag .string true <*> (.named `keep .bool true <&> (·.getD false)) <*> (.named `open .bool true <&> (·.getD false)) instance : FromArgs ExampleConfig m where fromArgs := ExampleConfig.parse end def prioritizedElab [Monad m] (prioritize : α → m Bool) (act : α → m β) (xs : Array α) : m (Array β) := do let mut out := #[] let mut later := #[] for h:i in [0:xs.size] do let x := xs[i] if ← prioritize x then out := out.push (i, (← act x)) else later := later.push (i, x) for (i, x) in later do out := out.push (i, (← act x)) out := out.qsort (fun (i, _) (j, _) => i < j) return out.map (·.2) def isLeanBlock : TSyntax `block → CoreM Bool | `(block|```$nameStx:ident $_args*|$_contents:str```) => do let name ← realizeGlobalConstNoOverload nameStx return name == ``Verso.Genre.Manual.InlineLean.lean | _ => pure false structure LeanBlockContent where content : Option String shouldElab : Bool def getLeanBlockContents? : TSyntax `block → DocElabM (LeanBlockContent) | `(block|```$nameStx:ident $args*|$contents:str```) => do let name ← realizeGlobalConstNoOverload nameStx if name == ``Verso.Genre.Manual.imports then return { content := some contents.getString, shouldElab := false } if name != ``Verso.Genre.Manual.InlineLean.lean then return { content := none, shouldElab := false } let args ← Verso.Doc.Elab.parseArgs args let args ← parseThe InlineLean.LeanBlockConfig args if !args.keep || args.error then return { content := none, shouldElab := true } pure <| { content := some contents.getString, shouldElab := true } | _ => pure { content := none, shouldElab := false } /-- Elaborates all Lean blocks first, enabling local forward references -/ @[directive_expander leanFirst] def leanFirst : DirectiveExpander | args, contents => do let () ← ArgParse.done.run args -- Elaborate Lean blocks first, so inlines in prior blocks can refer to them prioritizedElab (isLeanBlock ·) elabBlock contents /-- Turn a list of lean blocks into one string with the appropriate amount of whitespace -/ def renderExampleContent (exampleBlocks : List String) : String := "\n\n".intercalate <| exampleBlocks.map (·.trimAscii.copy) /-- info: "a\n\nb\n\nc" -/ #guard_msgs in #eval renderExampleContent ["\n \na\n", "\n b", "c "] /-- A domain for named examples -/ def examples : Domain := {} @[directive] def «example» : DirectiveExpanderOf ExampleConfig | cfg, contents => do let description ← DocElabM.withFileMap cfg.description.1 <| cfg.description.2.mapM elabInline let descriptionString := inlinesToString (← getEnv) cfg.description.2 PointOfInterest.save (← getRef) (inlinesToString (← getEnv) cfg.description.2) (selectionRange := mkNullNode cfg.description.2) (kind := Lsp.SymbolKind.interface) (detail? := some "Example") let accumulate (b : TSyntax `block) : StateT (List String) DocElabM Bool := do let {content, shouldElab} ← getLeanBlockContents? b if let some x := content then modify (· ++ [x]) pure shouldElab -- Elaborate Lean blocks first, so inlines in prior blocks can refer to them -- Also accumulate text of lean blocks. let exampleCode := prioritizedElab accumulate (elabBlock ·) contents |>.run [] let (blocks, acc) ← if cfg.keep then exampleCode else withoutModifyingEnv <| exampleCode let liveLinkContent := if acc = [] then none else some (renderExampleContent acc) -- Examples are represented using the first block to hold the description. Storing it in the JSON -- entails repeated (de)serialization. ``(Block.other (Block.example $(quote descriptionString) $(quote cfg.tag) (opened := $(quote cfg.opened)) $(quote liveLinkContent)) #[Block.para #[$description,*], $blocks,*]) @[block_extension «example»] def example.descr : BlockDescr where traverse id data contents := do match FromJson.fromJson? data (α := ExampleBlockJson) with | .error e => logError s!"Error deserializing example tag: {e}"; pure none | .ok (descrString, none, _, _, _) => do modify (·.saveDomainObject ``examples descrString id) pure none | .ok (descrString, some x, opened, none, liveText) => modify (·.saveDomainObject ``examples descrString id) let path ← (·.path) <$> read let tag ← Verso.Genre.Manual.externalTag id path x pure <| some <| Block.other {Block.example descrString none false liveText with id := some id, data := toJson (some x, opened, some tag)} contents -- Is this line reachable? | .ok (descrString, some _, _, some _, liveText) => modify (·.saveDomainObject ``examples descrString id) pure none toTeX := some <| fun _ go _ _ content => do pure <| .seq <| ← content.mapM fun b => do pure <| .seq #[← go b, .raw "\n"] toHtml := open Verso.Doc.Html in open Verso.Output.Html in some <| fun goI goB id data blocks => do if h : blocks.size < 1 then HtmlT.logError "Malformed example" pure .empty else let .para description := blocks[0] | HtmlT.logError "Malformed example - description not paragraph"; pure .empty let (descrString, opened, liveText) ← match FromJson.fromJson? data (α := ExampleBlockJson) with | .error e => HtmlT.logError s!"Error deserializing example data {data}: {e}"; pure ("", false, none) | .ok (descrString, _, opened, _, liveText) => pure (descrString, opened, liveText) let xref ← HtmlT.state let ctxt ← HtmlT.context let mut attrs := xref.htmlId id if opened then attrs := attrs.push ("open", "") withReader (fun ρ => { ρ with definitionIds := xref.definitionIds ctxt, codeOptions.definitionsAsTargets := true}) do let liveLink := match liveText with | .none => Output.Html.empty | .some content => let href := s!"javascript:openLiveLink(\"{lzCompress content}\")" -- This link is `display: none` hidden by default, and enabled by maybeShowLiveLinks, -- assuming we detect that we're a sufficiently recent version of the manual -- to be compatible with the versions served by https://live.lean-lang.org {{ <div class="live-link"><a href={{href}}>"Live ↪"</a></div> }} pure {{ <details class="example" {{attrs}}> <summary class="description">{{← description.mapM goI}}</summary> <div class="example-content"> {{← blocks.extract 1 blocks.size |>.mapM goB}} {{liveLink}} </div> </details> }} extraJs := [ r#"function openDetailsForHashTarget() { // Get the current hash from the URL const hash = window.location.hash; // Exit early if no hash is present if (!hash) return; // Remove the # to get the actual ID const targetId = hash.substring(1); // Find the target element const targetElement = document.getElementById(targetId); // Exit if target element doesn't exist if (!targetElement) return; // Find the closest details element that contains the target const detailsElement = targetElement.closest('details'); // If the target is inside a details element, open it if (detailsElement) { detailsElement.open = true; } } function liveLinkUrlOfCodez(codez) { if (window.metadata !== undefined && window.metadata.stable) { return "https://live.lean-lang.org/#project=mathlib-stable&codez=" + codez; } if (window.metadata !== undefined && window.metadata.latest) { return "https://live.lean-lang.org/#codez=" + codez; } return undefined; } function maybeShowLiveLinks() { if (liveLinkUrlOfCodez('') !== undefined) { const style = document.createElement('style'); style.textContent = `.live-link { display: initial !important; }`; document.head.appendChild(style); } } function openLiveLink(codez) { const url = liveLinkUrlOfCodez(codez); if (url !== undefined) { window.open(url, '_blank') } else { // This case shouldn't be possible, because maybeShowLiveLinks returns undefined, // then maybeShowLiveLinks wouldn't have ever shown the links in the first place. // Just in case, throw up a dialog. alert("Don't know which version of live to use. Please report this at https://github.com/leanprover/reference-manual/issues"); } } function pageInit() { openDetailsForHashTarget(); maybeShowLiveLinks(); } // Run the function when the page loads document.addEventListener('DOMContentLoaded', pageInit); // Also run when the hash changes (for single-page applications) window.addEventListener('hashchange', pageInit); // Run immediately in case the script loads after DOMContentLoaded if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', pageInit); } else { pageInit(); } "# ] extraCss := [ r#".example { border: 1px solid #98B2C0; border-radius: 0.5rem; margin-bottom: var(--verso--box-vertical-margin); margin-top: var(--verso--box-vertical-margin); } /* 1400 px is the cutoff for when the margin notes move out of the margin and into floated elements. */ @media screen and (700px < width <= 1400px) { .example { clear: both; /* Don't overlap margin notes with examples */ } } .example .description::before { content: "Example: "; } .example .description { font-style: italic; font-family: var(--verso-structure-font-family); padding: var(--verso--box-padding); } .example[open] .description { margin-bottom: 0; } .example-content { padding: 0 var(--verso--box-padding) var(--verso--box-padding); position: relative; } .example-content > :first-child { margin-top: 0; } .example-content p:last-child { margin-bottom:0; } .example .hl.lean.block { overflow-x: auto; } .live-link { font-family: var(--verso-structure-font-family); position: absolute; bottom: 0px; right: 0px; padding: 0.5rem; border-top: 1px solid #98B2C0; border-left: 1px solid #98B2C0; border-top-left-radius: 0.5rem; display: none; /* default to not showing */ } .live-link a { text-decoration: none; color: var(--lean-blue); } "# ] def Block.keepEnv : Block where name := `Manual.example -- TODO rename to `withoutModifyingEnv` or something more clear @[directive_expander keepEnv] def keepEnv : DirectiveExpander | args, contents => do let () ← ArgParse.done.run args PointOfInterest.save (← getRef) "keepEnv" (kind := .package) withoutModifyingEnv <| withSaveInfoContext <| contents.mapM elabBlock @[block_extension keepEnv] def keepEnv.descr : BlockDescr where traverse _ _ _ := pure none toTeX := none toHtml := open Verso.Doc.Html in open Verso.Output.Html in some <| fun _ goB _ _ blocks => do blocks.mapM goB
reference-manual/Manual/Meta/CustomStyle.lean
import VersoManual import Lean.Data.Json open Verso Doc Elab Output Html Code open Verso.Genre Manual open Verso.ArgParse open Lean namespace Manual block_extension Block.customCSS (css : String) where data := toJson css traverse _ _ _ := pure none toTeX := none toHtml := open Verso.Output.Html in some <| fun _ _ _ data _ => do match FromJson.fromJson? data with | .error err => HtmlT.logError <| "Couldn't deserialize CSS while rendering HTML: " ++ err pure .empty | .ok (css : String) => pure {{ <style>{{css}}</style> }} @[code_block] def customCSS : CodeBlockExpanderOf Unit | (), str => `(Block.other (Block.customCSS $(quote str.getString)) #[])
reference-manual/Manual/Meta/Env.lean
import Lean.Elab.Command import Lean.Elab.InfoTree import Verso import Verso.Doc.ArgParse import Verso.Doc.Elab.Monad import VersoManual import Verso.Code import SubVerso.Highlighting import SubVerso.Examples import Manual.Meta.Basic open Lean.Doc.Syntax open Verso ArgParse Doc Elab Genre.Manual Html Code Highlighted.WebAssets open SubVerso.Highlighting Highlighted open Lean Elab open Lean.Elab.Tactic.GuardMsgs namespace Manual def Inline.envVar : Inline where name := `Manual.envVar data := .arr #[.null, .bool false] @[role_expander envVar] def envVar : RoleExpander | args, inlines => do let isDef ← parseOpts.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $varName:str )) := arg | throwErrorAt arg "Expected code literal with the environment variable" let v := varName.getString pure #[← `(.other {Manual.Inline.envVar with data := Json.arr #[.str $(quote v), .bool $(quote isDef)] } #[Inline.code $(quote v)])] where parseOpts : ArgParse DocElabM Bool := .flag `def false "If true, this is the definition site (i.e. the link target) for the variable" def envVarDomain := `Manual.envVar open Verso.Search in def envVarDomainMapper : DomainMapper := DomainMapper.withDefaultJs envVarDomain "Environment Variable" "env-var-domain" |>.setFont { family := .code } @[inline_extension envVar] def envVar.descr : InlineDescr where init s := s |>.setDomainTitle envVarDomain "Environment Variables" |>.addQuickJumpMapper envVarDomain envVarDomainMapper traverse id data _ := do let .arr #[.str var, .bool isDef] := data | logError s!"Couldn't deserialize environment variable info from {data}"; return none if isDef then let path ← (·.path) <$> read let _ ← Verso.Genre.Manual.externalTag id path var Index.addEntry id {term := Inline.concat #[.code var, .text " (environment variable)"]} modify fun s => s.saveDomainObject envVarDomain var id return none toTeX := none extraCss := [ r#" .env-var a { color: inherit; text-decoration: currentcolor underline dotted; } .env-var a:hover { text-decoration: currentcolor underline solid; } "# ] toHtml := open Verso.Output.Html in some <| fun _ _ data _ => do let (var, isDef) ← match data with | .arr #[.str var, .bool isDef] => pure (var, isDef) | _ => HtmlT.logError s!"Couldn't deserialize environment var info from {data}"; return .empty if let some dest := (← read).traverseState.getDomainObject? envVarDomain var then for id in dest.ids do if let some dest := (← read).traverseState.externalTags[id]? then if isDef then -- TODO find an inline permalink widget that doesn't mess up text flow return {{ <code class="env-var" id={{dest.htmlId.toString}}>s!"{var}"</code> }} else let url := dest.link return {{<code class="env-var"><a href={{url}}>s!"{var}"</a></code>}} return {{<code class="env-var">s!"{var}"</code>}} localContentItem _ info _ := open Verso.Output.Html in do if let .arr #[.str var, .bool isDef] := info then if isDef then pure #[(var, {{<code>{{var}}</code>}}), (s!"{var} (Environment Variable)", {{<code>{{var}}</code>" (Environment Variable)"}})] else pure #[] else throw s!"Expected a two-element array with a string and a Boolean, got {info}"
reference-manual/Manual/Meta/LzCompress.lean
import Std.Data.HashMap def keyStrBase64 := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=" def getCharFromInt (n : Nat) : Char := String.Pos.Raw.get keyStrBase64 ⟨n⟩ open Std /-! This code was adapted from https://github.com/pieroxy/lz-string which was distributed under the MIT License as given below. In its current state it is quite imperative and not particularly idiomatic lean. The reason for using this code at all is to match the compressed strings that lean4web uses. MIT License Copyright (c) 2013 Pieroxy pieroxy@pieroxy.net Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -/ def compress (uncompressed : String) (bitsPerChar : Nat) (getCharFromInt : Nat → Char) : String := Id.run do if uncompressed.isEmpty then return "" let mut dictionary : HashMap String Nat := {} let mut dictionaryToCreate : HashMap String Bool := {} let mut wc : String := "" let mut w : String := "" let mut enlargeIn : Nat := 2 let mut dictSize : Nat := 3 let mut numBits : Nat := 2 let mut data : Array Char := #[] let mut data_val : Nat := 0 let mut data_position : Nat := 0 for c in uncompressed.toList.map toString do if !dictionary.contains c then dictionary := dictionary.insert c dictSize dictSize := dictSize + 1 dictionaryToCreate := dictionaryToCreate.insert c true wc := w ++ c if dictionary.contains wc then w := wc else if dictionaryToCreate.contains w then let code := String.Pos.Raw.get! w 0 |>.toNat if code < 256 then for _ in [:numBits] do data_val := data_val <<< 1 if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 let mut value := code for _ in [0:8] do data_val := (data_val <<< 1) ||| (value &&& 1) if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value := value >>> 1 else let mut value := 1 for _ in [:numBits] do data_val := (data_val <<< 1) ||| value if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value := 0 let mut value' := String.Pos.Raw.get! w 0 |> Char.toNat for _ in [0:16] do data_val := (data_val <<< 1) ||| (value' &&& 1) if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value' := value' >>> 1 enlargeIn := enlargeIn - 1 if enlargeIn == 0 then enlargeIn := Nat.pow 2 numBits numBits := numBits + 1 dictionaryToCreate := dictionaryToCreate.erase w else let mut value := dictionary.get! w for _ in [:numBits] do data_val := (data_val <<< 1) ||| (value &&& 1) if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value := value >>> 1 enlargeIn := enlargeIn - 1 if enlargeIn == 0 then enlargeIn := Nat.pow 2 numBits numBits := numBits + 1 dictionary := dictionary.insert wc dictSize dictSize := dictSize + 1 w := c -- Output the code for _w if not empty if !w.isEmpty then if dictionaryToCreate.contains w then let code := String.Pos.Raw.get! w 0 |>.toNat if code < 256 then for _ in [:numBits] do data_val := data_val <<< 1 if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 let mut value := code for _ in [0:8] do data_val := (data_val <<< 1) ||| (value &&& 1) if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value := value >>> 1 else let mut value := 1 for _ in [:numBits] do data_val := (data_val <<< 1) ||| value if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value := 0 let mut value' := code for _ in [0:16] do data_val := (data_val <<< 1) ||| (value' &&& 1) if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value' := value' >>> 1 enlargeIn := enlargeIn - 1 if enlargeIn == 0 then enlargeIn := Nat.pow 2 numBits numBits := numBits + 1 dictionaryToCreate := dictionaryToCreate.erase w else let mut value := dictionary.get! w for _ in [:numBits] do data_val := (data_val <<< 1) ||| (value &&& 1) if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value := value >>> 1 enlargeIn := enlargeIn - 1 if enlargeIn == 0 then enlargeIn := Nat.pow 2 numBits numBits := numBits + 1 -- End of stream marker let mut value := 2 for _ in [:numBits] do data_val := (data_val <<< 1) ||| (value &&& 1) if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) data_position := 0 data_val := 0 else data_position := data_position + 1 value := value >>> 1 -- Flush last char let mut loop := true while loop do data_val := data_val <<< 1 if data_position == bitsPerChar - 1 then data := data.push (getCharFromInt data_val) loop := false else data_position := data_position + 1 return data.foldl (init := "") (·.push ·) def lzCompress (uncompressed : String) := compress uncompressed 6 getCharFromInt /-- info: "JYWwDg9gTgLgBAWQIYwBYBtgCMB0AZCAc2AGMcAhJAZ1LgFo64traAzJEmKuYAOznRFSAKAZw0AU2gSQ3PnDwSkvAOTcQKVDJSlumLFCRQAnsNGNF8AApxlAEzgBFJhPFQArhLrt0VV1RgUGQleLmEANyNgJCx0VwAKG2cALjgrKAgwAEozMQAVLThWCHRBAHc+Qh5uJCYWEjgoCSp3dHh5QWISYQkADyRwOLhUgBq4RLhAciInLLhAFMI4MZtACiJFp2GAXiZTOHpGYC44MAyIVmrbdCakO2MefkVlNTgNSRfdAWxDE2Fdvo54XgQGAAfXswOguUYAAkJE1zsogVooHUaA0mi02ncBEJun9Bq5RuMVjN5msbNMxiktlgdrYwGB0MYAPx7OBlVwkZRwPxGEioIrQcSFY4QU5YyQfAxGWlidlwTn8JC+CCNCQMjiuAAGSHpjKZmrZB35B24EHcMDA5uEQA" -/ #guard_msgs in #eval lzCompress r#"import Mathlib.Logic.Basic -- basic facts in logic -- theorems in Lean's mathematics library -- Let P and Q be true-false statements variable (P Q : Prop) -- The following is a basic result in logic example : ¬ (P ∧ Q) ↔ ¬ P ∨ ¬ Q := by -- its proof is already in Lean's mathematics library exact not_and_or -- Here is another basic result in logic example : ¬ (P ∨ Q) ↔ ¬ P ∧ ¬ Q := by apply? -- we can search for the proof in the library -- we can also replace `apply?` with its output "#
reference-manual/Manual/Meta/ElanCheck.lean
import Lean.Elab.Command import Lean.Elab.InfoTree import Verso import Verso.Doc.ArgParse import Verso.Doc.Elab.Monad import VersoManual import Verso.Code import SubVerso.Highlighting import SubVerso.Examples import Manual.Meta.Basic import Manual.Meta.ExpectString open Lean Elab open Verso ArgParse Doc Elab Genre.Manual Html Code Highlighted.WebAssets open SubVerso.Highlighting Highlighted open Lean.Elab.Tactic.GuardMsgs namespace Manual private partial def parseOpts [Monad m] [MonadInfoTree m] [MonadLiftT CoreM m] [MonadEnv m] [MonadError m] : ArgParse m (List String) := (.many (.positional `subcommand stringOrIdent)) where stringOrIdent : ValDesc m String := { get | .name x => pure <| x.getId.toString (escape := false) | .str x => pure x.getString | .num n => throwErrorAt n "Expected string or identifier" signature := .String ∪ .Ident description := "subcommand" } /-- Check that the output of `elan --help` has not changed unexpectedly -/ @[code_block_expander elanHelp] def elanHelp : CodeBlockExpander | args, str => do let sub ← parseOpts.run args let args := sub.toArray ++ #["--help"] let out ← IO.Process.output {cmd := "elan", args} if out.exitCode != 0 then throwError m!"When running 'elan --help', the exit code was {out.exitCode}\n" ++ m!"Stderr:\n{out.stderr}\n\nStdout:\n{out.stdout}\n\n" let elanOutput := out.stdout discard <| expectString "'elan --help' output" str elanOutput (useLine := useLine) (preEq := (·.trimAsciiEnd.copy)) return #[] where -- Ignore the version spec or empty lines to reduce false positives useLine (l : String) : Bool := !l.isEmpty && !"elan ".isPrefixOf l
reference-manual/Manual/Meta/ErrorExplanation/Domain.lean
import VersoManual open Lean open Verso.Genre.Manual namespace Manual def errorExplanationDomain := `Manual.errorExplanation open Verso.Search in def errorExplanationDomainMapper := DomainMapper.withDefaultJs errorExplanationDomain "Error Explanation" "error-explanation-domain" |>.setFont { family := .code }
reference-manual/Manual/Meta/ErrorExplanation/Header.lean
import VersoManual import Manual.Meta.ErrorExplanation.Domain open Lean open Verso.Genre.Manual set_option doc.verso true variable [Monad m] [MonadError m] structure ErrorExplanationExtendedMetadata extends ErrorExplanation.Metadata where name : Name deriving ToJson, FromJson deriving instance Quote for ErrorExplanation.Metadata deriving instance Quote for ErrorExplanationExtendedMetadata block_extension Block.errorExplanationHeader (metadata : ErrorExplanationExtendedMetadata) where data := toJson metadata init st := st |>.setDomainTitle Manual.errorExplanationDomain "Error Explanations" |>.setDomainDescription Manual.errorExplanationDomain "Explanations of error messages and warnings produced during compilation" |>.addQuickJumpMapper Manual.errorExplanationDomain Manual.errorExplanationDomainMapper traverse id info _ := do let .ok errorMetadata := FromJson.fromJson? (α := ErrorExplanationExtendedMetadata) info | logError s!"Invalid JSON for error explanation:\n{info}"; pure none modify (·.saveDomainObject Manual.errorExplanationDomain errorMetadata.name.toString id) discard <| Verso.Genre.Manual.externalTag id (← read).path errorMetadata.name.toString pure none toTeX := none extraCss := [" .error-explanation-metadata { margin-bottom: 2rem; /* Double the paragraph margin */ } .error-explanation-metadatum:not(:last-child):after { content: '|'; margin: 0 10px; } .error-explanation-removed-warning { border: 1px solid var(--verso-warning-color); border-radius: 0.5rem; padding-left: var(--verso--box-padding); padding-right: var(--verso--box-padding); } "] toHtml := some fun _goI _goB _id info _contents => open Verso.Doc.Html in open Verso.Output Html in do let .ok metadata := FromJson.fromJson? (α := ErrorExplanationExtendedMetadata) info | HtmlT.logError "Failed to parse info for error explanation metadata block:\n{metadata}" pure .empty let deprecatedWarning := if metadata.removedVersion?.isSome then {{ <div class="error-explanation-removed-warning"> <p><strong>"Note: "</strong> "This diagnostic is no longer produced."</p> </div> }} else .empty let sevText := if metadata.severity matches .warning then "Warning" else "Error" let entries := #[("Severity", sevText), ("Since", metadata.sinceVersion)] ++ (metadata.removedVersion?.map fun v => #[("Removed", v)]).getD #[] let entries := entries.map fun (label, data) => {{ <span class="error-explanation-metadatum"> <strong>{{Html.text true label}}": "</strong> {{Html.text true data}} </span> }} return {{ <div class="error-explanation-metadata"> {{deprecatedWarning}} <p>"Error code: "<code>{{metadata.name.toString}}</code></p> <p><em>{{metadata.summary}}</em></p> <p>{{entries}}</p> </div> }} structure ErrorHeaderConfig where name : Name instance : Verso.ArgParse.FromArgs ErrorHeaderConfig m where fromArgs := ErrorHeaderConfig.mk <$> Verso.ArgParse.positional `title Verso.ArgParse.ValDesc.name /-- Error explanations must start with a header inserted by the {lit}`{errorExplanationHeader}` block command. This elaboration of this command contains a {name}`Block.errorExplanationHeader` block that handles both formatting the header and inserting the external reference to the error identifier. Concretely, traversing the {name}`Block.errorExplanationHeader` block generated from the command {lit}`{errorExplanationHeader lean.inductiveParamMissing}` registers the error name {lean}`` `lean.unknownIdentifier`` with the domain {name}`Manual.errorExplanationDomain` so that the URI ``` https://lean-lang.org/doc/reference/latest/find/?domain=Manual.errorExplanation&name=lean.unknownIdentifier ``` will redirect to a URI like ``` https://lean-lang.org/doc/reference/latest/Error-Explanations/lean___unknownIdentifier/#lean___unknownIdentifier ``` -/ @[block_command] def errorExplanationHeader : Verso.Doc.Elab.BlockCommandOf ErrorHeaderConfig | cfg, _contents => do match ← getErrorExplanation? cfg.name with | .none => Lean.logWarning m!"The named error `{cfg.name}` is not known by the Lean compiler" let metadata := ErrorExplanationExtendedMetadata.mk (ErrorExplanation.Metadata.mk "Summary unavailable" "Metadata unavailable" MessageSeverity.error Option.none) cfg.name ``(Doc.Block.other (Block.errorExplanationHeader $(quote metadata)) #[]) | .some explan => let metadata := ErrorExplanationExtendedMetadata.mk explan.metadata cfg.name ``(Doc.Block.other (Block.errorExplanationHeader $(quote metadata)) #[])
reference-manual/Manual/Meta/ErrorExplanation/Example.lean
import VersoManual import Manual.Meta.Example open Lean set_option doc.verso true /- A tabbed container for MWEs in an error explanation example. Must satisfy the invariant that `titles.size` is equal to the number of children of this block. This is intended to be formatted with a bottom border separating it from subsequent content, and is only intended to be used in the context of elaborated `errorExample` directives. -/ block_extension Manual.Block.tabbedErrorReproduction (titles : Array String) where data := toJson titles traverse id _data _blocks := do discard <| Verso.Genre.Manual.externalTag id (← read).path "error-example" pure none toTeX := none extraCss := [r#" .error-example-container:not(:last-child) { border-bottom: 1px solid gray; padding-bottom: var(--verso--box-padding); } .error-example-tab-list [role="tab"] { position: relative; z-index: 1; background: white; border: 0; padding: 0.2em; cursor: pointer; } .error-example-tab-list [role="tab"]:not(:last-child) { margin-right: 1rem; } .error-example-tab-list [role="tab"][aria-selected="true"] { border-bottom: 1px solid gray; } /* this rule and the following ensure that all tabs are the same height */ .error-example-tab-view { display: flex; } .error-example-tabpanel { margin-right: -100%; width: 100%; display: block; } .error-example-tabpanel.error-example-tabpanel-hidden { visibility: hidden; } .error-example-tabpanel .hl.lean .token { /* unset transition to avoid lag when switching panels */ transition: visibility 0s; } "#] extraJs := [r#" window.addEventListener('DOMContentLoaded', () => { const tabLists = document.querySelectorAll('.error-example-tab-list') tabLists.forEach(tabList => { const tabs = tabList.querySelectorAll(':scope > [role="tab"]') const setActiveTab = (e) => { for (const tab of tabs) { const controllee = document.getElementById(tab.getAttribute('aria-controls')) if (tab === e.target) { tab.setAttribute('aria-selected', true) controllee.classList.remove('error-example-tabpanel-hidden') } else { tab.setAttribute('aria-selected', false) controllee.classList.add('error-example-tabpanel-hidden') } } } tabs.forEach(tab => { tab.addEventListener('click', setActiveTab) }) let focusedIdx = 0 tabList.addEventListener('keydown', e => { if (e.key === 'ArrowRight' || e.key === 'ArrowLeft') { tabs[focusedIdx].setAttribute('tabindex', -1) focusedIdx = e.key === 'ArrowRight' ? (focusedIdx + 1) % tabs.length : (focusedIdx - 1 + tabs.length) % tabs.length tabs[focusedIdx].setAttribute('tabindex', 0) tabs[focusedIdx].focus() } }) }) }) "#] toHtml := some fun _goI goB id info contents => open Verso.Doc.Html in open Verso.Output Html in do let .ok titles := FromJson.fromJson? (α := Array String) info | HtmlT.logError "Invalid titles JSON for example block" pure .empty unless titles.size == contents.size do HtmlT.logError s!"Mismatched number of titles and contents for example block: \ Found {contents.size} tab panels but {titles.size} titles." return .empty let some { htmlId, .. } := (← HtmlT.state).externalTags[id]? | HtmlT.logError "Could not find tag for error example" pure .empty let buttons ← titles.mapIdxM fun i (title : String) => do let (tabIndex, selected) := if i == 0 then ("0", "true") else ("-1", "false") let idxStr := toString i return {{ <button type="button" role="tab" aria-selected={{selected}} tabindex={{tabIndex}} id={{s!"{htmlId.toString}-button-{idxStr}"}} aria-controls={{s!"{htmlId.toString}-panel-{idxStr}"}}> {{title}} </button> }} let panels ← contents.mapIdxM fun i b => do let className := "error-example-tabpanel" ++ if i == 0 then "" else " error-example-tabpanel-hidden" let idxStr := toString i -- Turn off `inlineProofStates` rendering in the context of the panel to avoid painful -- z-index issues let panelContents ← withReader (fun ctx => { ctx with codeOptions := { ctx.codeOptions with inlineProofStates := false } } ) (goB b) return {{ <div role="tabpanel" class={{className}} id={{s!"{htmlId.toString}-panel-{idxStr}"}} aria-labelledby={{s!"{htmlId.toString}-button-{i}"}}> {{ panelContents }} </div> }} pure {{ <div class="error-example-container"> <div class="error-example-tab-list" role="tablist" aria-label="Code Samples"> {{buttons}} </div> <div class="error-example-tab-view"> {{panels}} </div> </div> }} variable [Monad m] [MonadError m] set_option pp.rawOnError true structure ErrorExampleConfig where title : String instance : Verso.ArgParse.FromArgs ErrorExampleConfig m where fromArgs := ErrorExampleConfig.mk <$> Verso.ArgParse.positional `title Verso.ArgParse.ValDesc.string /-- Structured examples that show a minimum working example (MWE) of an error, as well as one or more corrections, in a fashion suitable for error explanations. The structure is relatively rigid: * One code block labeled "broken" containing Lean code that generates an error * One code block labeled "output" that contains the plan text of (one of the) generated errors * One or more code blocks labeled "fixed" that contain Lean code demonstrating how the error can be corrected. If there is more than one block, the block must have an additional positional argument, a string describing the example. (A block labeled {lit}`fixed "descr"` will be displayed as "Fixed (descr).") **Example** ````` :::errorExample "Only a Dot Before the Numeral" ```broken example := .3 ``` ```output invalid occurrence of `·` notation, it must be surrounded by parentheses (e.g. `(· + 1)`) ``` ```fixed "make it an `Nat`" example := 3 ``` ```fixed "make it a `Float`" example := 0.3 ``` Some explanatory text here. ::: ````` -/ @[directive] def errorExample : Verso.Doc.Elab.DirectiveExpanderOf ErrorExampleConfig | { title }, contents => do let brokenStx :: restStx := contents.toList | throwError m!"The error example had no contents" let `(Lean.Doc.Syntax.codeblock|``` broken| $brokenTxt ```) := brokenStx | throwErrorAt brokenStx m!"First element in errorExample must be a `broken` codeblock containing the broken minimal working example" let errorStx :: restStx := restStx | throwError m!"The error example did not contain a second element" let `(Lean.Doc.Syntax.codeblock|``` output| $errorTxt ```) := errorStx | throwErrorAt errorStx m!"Second element in errorExample must be an `output` codeblock containing the generated error message" let brokenBlock ← brokenTxt |> Verso.Genre.Manual.InlineLean.lean { «show» := true, keep := false, name := `broken.lean, error := true, fresh := true } let errorBlock ← errorTxt |> Verso.Genre.Manual.InlineLean.leanOutput { «show» := true, summarize := false, name := mkIdentFrom errorStx `broken.lean, severity := .none, whitespace := .exact, normalizeMetas := true, allowDiff := 0 } let (fixedExamples, narrativeStx) ← partitionFixed restStx let fixedBlocks : List (String × Term) ← match fixedExamples with | [] => throwErrorAt restStx[0]! "Error examples must include one or more `fixed` codeblocks containing a fix for broken code" | [(_, .none, fixedTxt)] => let contents ← Verso.Genre.Manual.InlineLean.lean { «show» := true, keep := false, name := none, error := false, fresh := true } fixedTxt pure [("Fixed", contents)] | [(_, .some title, _)] => throwErrorAt title m!"Error explanations with a single title don't need to name the title" | _ => fixedExamples.mapM (fun (syn, exampleName, fixedTxt) => do let .some q := exampleName | throwErrorAt syn "Error explanations with more than one title need to name each title" let contents ← Verso.Genre.Manual.InlineLean.lean { «show» := true, keep := false, name := none, error := false, fresh := true } fixedTxt pure (s!"Fixed ({q.getString})", contents) ) let narrativeBlocks ← narrativeStx.mapM Verso.Doc.Elab.elabBlock let tabbedContentHeaders := "Original" :: fixedBlocks.map (·.1) let tabbedContentBlocks := (← ``(Doc.Block.concat #[$brokenBlock, $errorBlock])) :: fixedBlocks.map (·.2) ``(Doc.Block.other (Manual.Block.example $(quote (title)) none (opened := true)) #[Doc.Block.para #[Doc.Inline.text $(quote (title))], Doc.Block.other (Manual.Block.tabbedErrorReproduction $(quote tabbedContentHeaders.toArray)) #[$tabbedContentBlocks.toArray,*], $narrativeBlocks.toArray,*]) where partitionFixed (blocks: List (TSyntax `block)) : Verso.Doc.Elab.DocElabM (List (Syntax × Option StrLit × TSyntax `str) × List (TSyntax `block)) := do match blocks with | [] => pure ([], []) | block :: rest => let `(Lean.Doc.Syntax.codeblock|``` fixed $args*| $fixedTxt ```) := block | return ([], blocks) let parsedArgs ← Verso.Doc.Elab.parseArgs args let arg? : Option _ ← match parsedArgs.toList with | [] => pure none | [.anon (.str title)] => pure <| some title | [_] => throwErrorAt args[0]! m!"String arg expected" | _ => throwErrorAt args[1]! m!"At most one string arg expected" let (fixedExamples, descrBlocks) ← partitionFixed rest return ((block, arg?, fixedTxt) :: fixedExamples, descrBlocks)
reference-manual/Manual/Meta/LakeToml/Test.lean
import Lean.Elab.Command import Lean.Elab.Deriving namespace Manual.Toml open Std (Format) open Lean Elab /-- Types that can be used in tests embedded in the manual for TOML decoding -/ class Test (α : Sort u) where toString : α → Format instance {p : Prop} : Test p where toString _ := .text "…" instance [ToString α] : Test α where toString := .text ∘ toString instance [Repr α] : Test α where toString x := repr x instance [Test α] : Test (Array α) where toString arr := "#[" ++ .group (.nest 2 <| Format.joinSep (arr.map Test.toString).toList ("," ++ .line)) ++ "]" instance [Test α] : Test (NameMap α) where toString xs := "{" ++ .group (.nest 2 <| Format.joinSep (xs.toList.map (fun x => s!"'{x.1}' ↦ {Test.toString x.2}")) ("," ++ .line)) ++ "}" instance {α : Type u} {β : Type v} : Test (α → β) where toString _ := "#<fun>" -- HACK: elide these fields that are platform-specific def ignoreFields := [`buildArchive] open Lean Elab Command in def deriveTest (declNames : Array Name) : CommandElabM Bool := do if h : declNames.size ≠ 1 then return false else let declName := declNames[0] if !isStructure (← getEnv) declName then throwError "Can't derive 'Test' for non-structure '{declName}'" let params ← liftTermElabM do let uniParams ← (← getEnv).find? declName |>.mapM (Meta.mkFreshLevelMVarsFor ·) let ty ← Meta.inferType (.const declName <| uniParams.getD []) Meta.forallTelescopeReducing ty fun params ret => pure <| params.mapIdx fun i _ => s!"x{i}".toName let fs := getStructureFields (← getEnv) declName let fieldBinds ← fs.mapM fun f => `(Lean.Parser.Term.structInstField|$(mkIdent f):ident := $(mkIdent f)) let header : Term := Syntax.mkApp (mkIdent declName) (params.map (mkIdent ·)) let fields : TSyntaxArray `term ← fs.mapM fun f => do let rhs ← if f ∈ ignoreFields then pure (quote "ELIDED") else `(Test.toString $(mkIdent f)) `(Format.group <| Format.nest 2 <| Format.text $(quote <| toString f ++ " :=") ++ Format.line ++ $rhs) let cmd ← `(instance : Test $header where toString | ⟨$(fs.map mkIdent),*⟩ => "{" ++ .group (.nest 2 <| (Format.text "," ++ .line).joinSep [$fields,*]) ++ "}") elabCommand cmd return true initialize registerDerivingHandler ``Test deriveTest
reference-manual/Manual/Meta/LakeToml/Toml.lean
import VersoManual import Manual.Meta.Basic import Lake.Toml.Decode import Lake.Load.Toml open Verso ArgParse Doc Elab Genre.Manual Html Code Highlighted.WebAssets Multi open SubVerso.Highlighting Highlighted open Lean Elab open scoped Lean.Doc.Syntax open Lean.Elab.Tactic.GuardMsgs namespace Manual def tomlFieldDomain := `Manual.lakeTomlField def tomlTableDomain := `Manual.lakeTomlTable namespace Toml namespace Highlighted inductive Token where | string : String → Token | bool : Bool → Token | num : Nat → Token -- TODO other kinds of number? deriving DecidableEq, Repr, Ord, ToJson, FromJson open Lean.Syntax in open Token in instance : Quote Token where quote | .string s => mkCApp ``string #[quote s] | .bool b => mkCApp ``Token.bool #[quote b] | .num n => mkCApp ``num #[quote n] end Highlighted inductive Highlighted where | token : Highlighted.Token → String → Highlighted | key (fullPath : Option String) : Highlighted → Highlighted | text : String → Highlighted | /-- Whitespace from leading/trailing source info -/ ws : String → Highlighted | link (url : String) : Highlighted → Highlighted | concat : Array Highlighted → Highlighted | tableHeader : Highlighted → Highlighted | tableName (name : Option String) : Highlighted → Highlighted | tableDelim : Highlighted → Highlighted deriving Inhabited, Repr, BEq, ToJson, FromJson open Lean.Syntax in open Highlighted in partial instance : Quote Highlighted where quote := q where q | .token t s => mkCApp ``Highlighted.token #[quote t, quote s] | .key p s => mkCApp ``key #[quote p, q s] | .text s => mkCApp ``Highlighted.text #[quote s] | .ws s => mkCApp ``Highlighted.text #[quote s] | .link url s => mkCApp ``Highlighted.link #[quote url, q s] | .concat xs => let _ : Quote Highlighted := ⟨q⟩ mkCApp ``Highlighted.concat #[quote xs] | .tableHeader hl => mkCApp ``Highlighted.tableHeader #[q hl] | .tableName n hl => mkCApp ``Highlighted.tableName #[quote n, q hl] | .tableDelim hl => mkCApp ``Highlighted.tableDelim #[q hl] instance : Append Highlighted where append | .concat xs, .concat ys => .concat (xs ++ ys) | .concat xs, y => .concat (xs.push y) | x, .concat ys => .concat (#[x] ++ ys) | x, y => .concat (#[x] ++ [y]) instance : Coe (Array Highlighted) Highlighted where coe := .concat def srcInfoHl : SourceInfo → Highlighted → Highlighted | .original leading _ trailing _, hl => mkWs leading.toString ++ #[hl] ++ mkWs trailing.toString | _, hl => hl where mkWs : String → Array Highlighted | "" => #[] | w => #[.ws w] private def takeDropWhile (xs : Array α) (p : α → Bool) : Array α × Array α := Id.run do for h : i in [0:xs.size] do if !p xs[i] then return (xs.extract 0 i, xs.extract i xs.size) (xs, #[]) private def takeDropWhileRev (xs : Array α) (p : α → Bool) : Array α × Array α := Id.run do for h : i in [0:xs.size] do have : i < xs.size := by get_elem_tactic let j := xs.size - (i + 1) if !p xs[j] then return (xs.extract 0 (j + 1), xs.extract (j + 1) xs.size) (#[], xs) /-- info: (#[], #[1, 2, 3, 4, 5]) -/ #guard_msgs in #eval takeDropWhile #[1,2,3,4,5] (· > 3) /-- info: (#[1, 2], #[3, 4, 5]) -/ #guard_msgs in #eval takeDropWhile #[1,2,3,4,5] (· < 3) /-- info: (#[1, 2, 3], #[4, 5]) -/ #guard_msgs in #eval takeDropWhileRev #[1,2,3,4,5] (· > 3) /-- info: (#[1, 2, 3, 4, 5], #[]) -/ #guard_msgs in #eval takeDropWhileRev #[1,2,3,4,5] (· < 3) /-- Normalizes semantic info such that it doesn't have leading or trailing whitespace. -/ partial def Highlighted.normalize : Highlighted → Highlighted | .concat xs => .concat (normArray (xs.map Highlighted.normalize)) | .ws x => .ws x | .text x => .text x | .token t x => .token t x | .tableDelim x => let (pre, y, post) := splitWs (normArray #[x.normalize]) pre ++ .tableDelim y ++ post | .tableHeader x => let (pre, y, post) := splitWs (normArray #[x.normalize]) pre ++ .tableHeader y ++ post | .tableName n x => let (pre, y, post) := splitWs (normArray #[x.normalize]) pre ++ .tableName n y ++ post | .key p x => let (pre, y, post) := splitWs (normArray #[x.normalize]) pre ++ .key p y ++ post | .link d x => let (pre, y, post) := splitWs (normArray #[x.normalize]) pre ++ .link d y ++ post where splitWs (xs : Array Highlighted) : (Array Highlighted × Array Highlighted × Array Highlighted) := let (pre, rest) := takeDropWhile xs (· matches (.ws ..)) let (mid, post) := takeDropWhileRev rest (· matches (.ws ..)) (pre, mid, post) normArray (xs : Array Highlighted) := xs.flatMap fun | .concat ys => normArray ys | .ws "" => #[] | other => #[other] -- Inefficient string matching, which is fine because URLs are assumed short here private def hasSubstring (haystack : String) (needle : String) : Bool := Id.run do if needle.isEmpty then return true if needle.length > haystack.length then return false let mut iter := haystack.startPos let fst := String.Pos.Raw.get needle 0 while h : iter ≠ haystack.endPos do if iter.get h == fst then let mut iter' := iter let mut iter'' := needle.startPos while h : iter'≠ haystack.endPos ∧ iter'' ≠ needle.endPos do if iter'.get h.1 == iter''.get h.2 then iter' := iter'.next h.1 iter'' := iter''.next h.2 else break if iter'' ≠ needle.endPos then iter := iter.next h continue else return true else iter := iter.next h continue return false /-- info: true -/ #guard_msgs in #eval hasSubstring "" "" /-- info: false -/ #guard_msgs in #eval hasSubstring "" "a" /-- info: true -/ #guard_msgs in #eval hasSubstring "bab" "a" /-- info: true -/ #guard_msgs in #eval hasSubstring "bab" "ab" /-- info: false -/ #guard_msgs in #eval hasSubstring "bab" "abb" /-- info: true -/ #guard_msgs in #eval hasSubstring "abcdef" "ef" /-- info: true -/ #guard_msgs in #eval hasSubstring "https://repohost.example.com/example2.git" "example.com" /-- info: false -/ #guard_msgs in #eval hasSubstring "https://github.com/example2.git" "example.com" partial def highlightToml : Syntax → StateM (Option String) Highlighted := fun stx => match stx with | .node info `null elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.toml elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.header elts => srcInfoHl info <$> elts.mapM highlightToml | stx@(.node info ``Lake.Toml.keyval #[k, eq, v]) => do let keypath := (← get).map (· ++ ".") |>.getD "" let fullKey := if let `(Lake.Toml.keyval|$k = $_) := stx then getKey k |>.map (keypath ++ ·) else none let hlK ← (.key fullKey) <$> highlightToml k return srcInfoHl info #[hlK, (← highlightToml eq), (← highlightToml v)] | .node info ``Lake.Toml.keyval elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.key elts => (srcInfoHl info ∘ .key none) <$> elts.mapM highlightToml | .node info ``Lake.Toml.simpleKey elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.unquotedKey elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.string elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.basicString #[s@(.atom _ str)] => if let some str' := Lean.Syntax.decodeStrLit str then if (str'.startsWith "https://" || str'.startsWith "http://".toSlice ) && !hasSubstring str' "example.com" then (srcInfoHl info ∘ .link str') <$> highlightToml s else srcInfoHl info <$> highlightToml s else srcInfoHl info <$> highlightToml s | .node info ``Lake.Toml.boolean elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.true #[.atom _ b] => pure <| srcInfoHl info <| .token (.bool true) b | .node info ``Lake.Toml.false #[.atom _ b] => pure <| srcInfoHl info <| .token (.bool false) b | .node info ``Lake.Toml.arrayTable #[open1, open2, contents, close1, close2] => do let n := getKey ⟨contents⟩ set n return srcInfoHl info <| .tableHeader <| .tableDelim ((← highlightToml open1) ++ (← highlightToml open2)) ++ (.tableName n (← highlightToml contents)) ++ .tableDelim ((← highlightToml close1) ++ (← highlightToml close2)) | .node info ``Lake.Toml.arrayTable elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.decInt #[.atom _ n] => pure <| srcInfoHl info <| .token (.num n.toNat!) n | .node info ``Lake.Toml.array elts => srcInfoHl info <$> elts.mapM highlightToml | .node info ``Lake.Toml.inlineTable elts => do let x ← get set (none : Option String) let out ← srcInfoHl info <$> elts.mapM highlightToml set x return out | .atom info str => pure <| srcInfoHl info (.text str) | other => panic! s!"Failed to highlight TOML (probably highlightToml in Manual.Meta.LakeToml needs another pattern case): {toString other}" where getKey : TSyntax `Lake.Toml.key → Option String | `(Lake.Toml.key| $d:unquotedKey) => d.raw.isLit? ``Lake.Toml.unquotedKey | `(Lake.Toml.key| $d:literalString) => d.raw.isLit? ``Lake.Toml.literalString | `(Lake.Toml.key| $d:basicString) => d.raw.isLit? ``Lake.Toml.basicString | _ => none /-- A mapping from paths into the nested tables of the config file to the datatypes at which the field documentation can be found. -/ def configPaths : Std.HashMap (List String) Name := Std.HashMap.ofList [ (["require"], ``Lake.Dependency), (["lean_lib"], ``Lake.LeanLibConfig), (["lean_exe"], ``Lake.LeanExeConfig), ] open Verso Output Html in partial def Highlighted.toHtml (tableLink : Name → Option String) (keyLink : Name → Name → Option String) : Highlighted -> Html | .token t s => match t with | .bool _ => {{<span class="bool">{{s}}</span>}} | .string _ => {{<span class="string">{{s}}</span>}} | .num _ => {{<span class="num">{{s}}</span>}} | .tableHeader hl => {{<span class="table-header">{{hl.toHtml tableLink keyLink}}</span>}} | .tableName n hl => let tableName := n.map (·.splitOn ".") >>= (configPaths[·]?) if let some dest := tableName >>= tableLink then {{<a href={{dest}}>{{hl.toHtml tableLink keyLink}}</a>}} else hl.toHtml tableLink keyLink | .tableDelim hl => {{<span class="table-delimiter">{{hl.toHtml tableLink keyLink}}</span>}} | .concat hls => .seq (hls.map (toHtml tableLink keyLink)) | .link url hl => {{<a href={{url}}>{{hl.toHtml tableLink keyLink}}</a>}} | .text s => s | .ws s => let comment := s.find (· == '#') let commentStr := s.extract comment s.endPos let commentHtml := if commentStr.isEmpty then .empty else {{<span class="comment">{{commentStr}}</span>}} {{ {{s.extract s.startPos comment}} {{commentHtml}} }} | .key none k => {{ <span class="key"> {{k.toHtml tableLink keyLink}} </span> }} | .key (some p) k => let path := p.splitOn "." let dest := if let (table, [k]) := path.splitAt (path.length - 1) then if let some t := configPaths[table]? then keyLink t k.toName else none else none {{ <span class="key" data-toml-key={{p}}> {{ if let some url := dest then {{ <a href={{url}}>{{k.toHtml tableLink keyLink}}</a> }} else k.toHtml tableLink keyLink }} </span> }} end Toml def Block.toml (highlighted : Toml.Highlighted) : Block where name := `Manual.Block.toml data := toJson highlighted def Inline.toml (highlighted : Toml.Highlighted) : Inline where name := `Manual.Inline.toml data := toJson highlighted open Verso.Output Html in def htmlLink (state : TraverseState) (id : InternalId) (html : Html) : Html := if let some dest := state.externalTags[id]? then {{<a href={{dest.link}}>{{html}}</a>}} else html open Verso.Output Html in def htmlDest (state : TraverseState) (id : InternalId) : Option String := if let some dest := state.externalTags[id]? then some <| dest.link else none -- TODO upstream /-- Return an arbitrary ID assigned to the object (or `none` if there are none). -/ defmethod Object.getId (obj : Object) : Option InternalId := do for i in obj.ids do return i failure def Toml.fieldLink (xref : Genre.Manual.TraverseState) (inTable fieldName : Name) : Option String := do let obj ← xref.getDomainObject? tomlFieldDomain s!"{inTable} {fieldName}" let dest← xref.externalTags[← obj.getId]? return dest.link def Toml.tableLink (xref : Genre.Manual.TraverseState) (table : Name) : Option String := do let obj ← xref.getDomainObject? tomlTableDomain table.toString let dest ← xref.externalTags[← obj.getId]? return dest.link open Lean.Parser in def tomlContent (str : StrLit) : DocElabM Toml.Highlighted := do let scope : Command.Scope := {header := ""} let inputCtx := Parser.mkInputContext (← parserInputString str) (← getFileName) let pmctx : Parser.ParserModuleContext := { env := ← getEnv, options := scope.opts, currNamespace := scope.currNamespace, openDecls := scope.openDecls } let pos := str.raw.getPos? |>.getD 0 let p := andthenFn whitespace Lake.Toml.toml.fn let s := p.run inputCtx pmctx (getTokenTable pmctx.env) { cache := initCacheForInput inputCtx.inputString, pos } match s.errorMsg with | some err => throwErrorAt str "Couldn't parse TOML: {err}" | none => let #[stx] := s.stxStack.toSubarray.toArray | throwErrorAt str s!"Internal error parsing TOML - expected one result, got {s.stxStack.toSubarray.toArray}" return Toml.highlightToml stx |>.run' none |>.normalize def tomlCSS : String := r#" .toml { font-family: var(--verso-code-font-family); } pre.toml { margin: 0.5rem .75rem; padding: 0.1rem 0; } .toml .bool, .toml .table-header { font-weight: 600; } .toml .table-header .key { color: #3030c0; } .toml .bool { color: #107090; } .toml .string { color: #0a5020; } .toml a, .toml a:link { color: inherit; text-decoration: none; border-bottom: 1px dotted #a2a2a2; } .toml a:hover { border-bottom-style: solid; } "# open Lean.Parser in @[code_block_expander toml] def toml : CodeBlockExpander | args, str => do ArgParse.done.run args let hl ← tomlContent str pure #[← ``(Block.other (Block.toml $(quote hl)) #[Block.code $(quote str.getString)])] open Lean.Parser in @[role_expander toml] def tomlInline : RoleExpander | args, inlines => do ArgParse.done.run args let #[arg] := inlines | throwError "Expected exactly one argument" let `(inline|code( $str:str )) := arg | throwErrorAt arg "Expected code literal with TOML code" let hl ← tomlContent str pure #[← ``(Inline.other (Inline.toml $(quote hl)) #[Inline.code $(quote str.getString)])] @[block_extension Block.toml] def Block.toml.descr : BlockDescr where traverse _ _ _ := pure none toTeX := none extraCss := [tomlCSS] toHtml := some <| fun _goI _ _ info _ => open Verso.Doc.Html in open Verso.Output Html in do let .ok hl := FromJson.fromJson? (α := Toml.Highlighted) info | do Verso.Doc.Html.HtmlT.logError "Failed to deserialize highlighted TOML data"; pure .empty let xref := (← read).traverseState return {{ <pre class="toml"> {{hl.toHtml (Toml.tableLink xref) (Toml.fieldLink xref)}} </pre> }} @[inline_extension Inline.toml] def Inline.toml.descr : InlineDescr where traverse _ _ _ := pure none toTeX := none extraCss := [tomlCSS] toHtml := some <| fun _ _ info _ => open Verso.Doc.Html in open Verso.Output Html in do let .ok hl := FromJson.fromJson? (α := Toml.Highlighted) info | do Verso.Doc.Html.HtmlT.logError "Failed to deserialize highlighted TOML data"; pure .empty let xref := (← read).traverseState return {{ <code class="toml"> {{hl.toHtml (Toml.tableLink xref) (Toml.fieldLink xref)}} </code> }}
reference-manual/Manual/Language/Namespaces.lean
import VersoManual import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean #doc (Manual) "Namespaces" => %%% tag := "namespaces" %%% Names that contain periods (that aren't inside {tech}[guillemets]) are hierarchical names; the periods separate the _components_ of a name. All but the final component of a name are the namespace, while the final component is the name itself. Namespaces serve to group related definitions, theorems, types, and other declarations. When a namespace corresponds to a type's name, {tech}[generalized field notation] can be used to access its contents. In addition to organizing names, namespaces also group {ref "language-extension"}[syntax extensions], {ref "attributes"}[attributes], and {ref "type-classes"}[instances]. Namespaces are orthogonal to {tech}[modules]: a module is a unit of code that is elaborated, compiled, and loaded together, but there is no necessary connection between a module's name and the names that it provides. A module may contain names in any namespace, and the nesting structure of hierarchical modules is unrelated to that of hierarchical namespaces. There is a root namespace, ordinarily denoted by simply omitting a namespace. It can be explicitly indicated by beginning a name with `_root_`. This can be necessary in contexts where a name would otherwise be interpreted relative to an ambient namespace (e.g. from a {tech}[section scope]) or local scope. :::example "Explicit Root Namespace" Names in the current namespace take precedence over names in the root namespace. In this example, {name Forest.color}`color` in the definition of {name}`Forest.statement` refers to {name}`Forest.color`: ```lean def color := "yellow" namespace Forest def color := "green" def statement := s!"Lemons are {color}" end Forest ``` ```lean (name := green) #eval Forest.statement ``` ```leanOutput green "Lemons are green" ``` Within the `Forest` namespace, references to {name _root_.color}`color` in the root namespace must be qualified with `_root_`: ```lean namespace Forest def nextStatement := s!"Ripe lemons are {_root_.color}, not {color}" end Forest ``` ```lean (name := ygreen) #eval Forest.nextStatement ``` ```leanOutput ygreen "Ripe lemons are yellow, not green" ``` ::: # Namespaces and Section Scopes Every {tech}[section scope] has a {tech}[current namespace], which is determined by the {keywordOf Lean.Parser.Command.namespace}`namespace` command.{margin}[The {keywordOf Lean.Parser.Command.namespace}`namespace` command is described in the {ref "scope-commands"}[section on commands that introduce section scopes].] Names that are declared within the section scope are added to the current namespace. If the declared name has more than one component, then its namespace is nested within the current namespace; the body of the declaration's current namespace is the nested namespace. Section scopes also include a set of {deftech}_opened namespaces_, which are namespaces whose contents are in scope without additional qualification. {tech (key := "resolve")}[Resolving] an identifier to a particular name takes the current namespace and opened namespaces into account. However, {deftech}[protected] declarations (that is, those with the {keyword}`protected` {ref "declaration-modifiers"}[modifier]) are not brought into scope when their namespace is opened. The rules for resolving identifiers into names that take the current namespace and opened namespaces into account are described in the {ref "identifiers-and-resolution"}[section on identifiers as terms]. :::example "Current Namespace" Defining an inductive type results in the type's constructors being placed in its namespace, in this case as {name}`HotDrink.coffee`, {name}`HotDrink.tea`, and {name}`HotDrink.cocoa`. ```lean inductive HotDrink where | coffee | tea | cocoa ``` Outside the namespace, these names must be qualified unless the namespace is opened: ```lean (name := okTea) #check HotDrink.tea ``` ```leanOutput okTea HotDrink.tea : HotDrink ``` ```lean (name := notOkTea) +error #check tea ``` ```leanOutput notOkTea Unknown identifier `tea` ``` ```lean (name := okTea2) section open HotDrink #check tea end ``` ```leanOutput okTea2 HotDrink.tea : HotDrink ``` If a function is defined directly inside the `HotDrink` namespace, then the body of the function is elaborated with the current namespace set to `HotDrink`. The constructors are in scope: ```lean def HotDrink.ofString? : String → Option HotDrink | "coffee" => some coffee | "tea" => some tea | "cocoa" => some cocoa | _ => none ``` Defining another inductive type creates a new namespace: ```lean inductive ColdDrink where | water | juice ``` From within the `HotDrink` namespace, {name}`HotDrink.toString` can be defined without an explicit prefix. Defining a function in the `ColdDrink` namespace requires an explicit `_root_` qualifier to avoid defining `HotDrink.ColdDrink.toString`: ```lean namespace HotDrink def toString : HotDrink → String | coffee => "coffee" | tea => "tea" | cocoa => "cocoa" def _root_.ColdDrink.toString : ColdDrink → String | .water => "water" | .juice => "juice" end HotDrink ``` ::: The {keywordOf Lean.Parser.Command.open}`open` command opens a namespace, making its contents available in the current section scope. There are many variations on opening namespaces, providing flexibility in managing the local scope. :::syntax command (title := "Opening Namespaces") The {keywordOf Lean.Parser.Command.open}`open` command is used to open a namespace: ```grammar open $_:openDecl ``` ::: :::syntax Lean.Parser.Command.openDecl (title := "Opening Entire Namespaces") (label := "open declaration") A sequence of one or more identifiers results in each namespace in the sequence being opened: ```grammar $_:ident $_:ident* ``` Each namespace in the sequence is considered relative to all currently-open namespaces, yielding a set of namespaces. Every namespace in this set is opened before the next namespace in the sequence is processed. ::: :::example "Opening Nested Namespaces" Namespaces to be opened are considered relative to the currently-open namespaces. If the same component occurs in different namespace paths, a single {keywordOf Lean.Parser.Command.open}`open` command can be used to open all of them by iteratively bringing each into scope. This example defines names in a variety of namespaces: ```lean namespace A -- _root_.A def a1 := 0 namespace B -- _root_.A.B def a2 := 0 namespace C -- _root_.A.B.C def a3 := 0 end C end B end A namespace B -- _root_.B def a4 := 0 namespace C -- _root_.B.C def a5 := 0 end C end B namespace C -- _root_.C def a6 := 0 end C ``` The names are: * {name}`A.a1` * {name}`A.B.a2` * {name}`A.B.C.a3` * {name}`B.a4` * {name}`B.C.a5` * {name}`C.a6` All six names can be brought into scope with a single iterated {keywordOf Lean.Parser.Command.open}`open` command: ```lean section open A B C example := [a1, a2, a3, a4, a5, a6] end ``` If the initial namespace in the command is `A.B` instead, then neither `_root_.A`, `_root_.B`, nor `_root_.B.C` is opened: ```lean +error (name := dotted) section open A.B C example := [a1, a2, a3, a4, a5, a6] end ``` ```leanOutput dotted Unknown identifier `a1` ``` ```leanOutput dotted Unknown identifier `a4` ``` ```leanOutput dotted Unknown identifier `a5` ``` Opening `A.B` makes `A.B.C` visible as `C` along with `_root_.C`, so the subsequent `C` opens both. ::: :::syntax Lean.Parser.Command.openDecl (title := "Hiding Names") (label := "open declaration") A {keyword}`hiding` declaration specifies a set of names that should _not_ be brought into scope. In contrast to opening an entire namespace, the provided identifier must uniquely designate a namespace to be opened. ```grammar $_:ident hiding $x:ident $x:ident* ``` ::: ```lean -show -keep namespace A namespace B def x := 5 end B end A namespace B end B open A -- test claim in preceding box /-- error: ambiguous namespace `B`, possible interpretations: `[B, A.B]` -/ #check_msgs in open B hiding x ``` :::syntax Lean.Parser.Command.openDecl (title := "Renaming") (label := "open declaration") A {keyword}`renaming` declaration allows some names from the opened namespace to be renamed; they are accessible under the new name in the current section scope. The provided identifier must uniquely designate a namespace to be opened. ```grammar $_:ident renaming $[$x:ident → $x:ident],* ``` An ASCII arrow (`->`) may be used instead of the Unicode arrow (`→`). ::: ```lean -show -keep namespace A namespace B def x := 5 end B end A namespace B end B open A -- test claim in preceding box /-- error: ambiguous namespace `B`, possible interpretations: `[B, A.B]` -/ #check_msgs in open B renaming x → y /-- error: ambiguous namespace `B`, possible interpretations: `[B, A.B]` -/ #check_msgs in open B renaming x -> y ``` :::syntax Lean.Parser.Command.openDecl (title := "Restricted Opening") (label := "open declaration") Parentheses indicate that _only_ the names listed in the parentheses should be brought into scope. ```grammar $_:ident ($x:ident $x*) ``` The indicated namespace is added to each currently-opened namespace, and each name is considered in each resulting namespace. All of the listed names must be unambiguous; that is, they must exist in exactly one of the considered namespaces. ::: ```lean -show -keep namespace A namespace B def y := "" end B end A namespace B end B open A -- test claim in preceding box -- TODO the reality is a bit more subtle - the name should be accessible by only one path. This should be clarified. /-- error: ambiguous identifier `y`, possible interpretations: [B.y, B.y] -/ #check_msgs in open B (y) ``` :::syntax Lean.Parser.Command.openDecl (title := "Scoped Declarations Only") (label := "open declaration") The {keyword}`scoped` keyword indicates that all scoped attributes, instances, and syntax from the provided namespaces should be opened, while not making any of the names available. ```grammar scoped $x:ident $x* ``` ::: ::::example "Opening Scoped Declarations" In this example, a scoped {tech}[notation] and a definition are created in the namespace `NS`: ```lean namespace NS scoped notation "{!{" e "}!}" => (e, e) def three := 3 end NS ``` Outside of the namespace, the notation is not available: ```syntaxError closed def x := {!{ "pear" }!} ``` ```leanOutput closed <example>:1:21-1:22: unexpected token '!'; expected '}' ``` An {keyword}`open scoped` command makes the notation available: :::keepEnv ```lean open scoped NS def x := {!{ "pear" }!} ``` However, the name {name}`NS.three` is not in scope: ```lean +error (name := nothree) def y := three ``` ```leanOutput nothree Unknown identifier `three` ``` ::: :::: # Exporting Names {deftech}_Exporting_ a name makes it available in the current namespace. Unlike a definition, this alias is completely transparent: uses are resolved directly to the original name. Exporting a name to the root namespace makes it available without qualification; the Lean standard library does this for names such as the constructors of {name}`Option` and key type class methods such as {name}`get`. :::syntax command (title := "Exporting Names") The {keyword}`export` command adds names from other namespaces to the current namespace, as if they had been declared in it. When the current namespace is opened, these exported names are also brought into scope. ```grammar export $_ ($_*) ``` Internally, exported names are registered as aliases of their targets. From the perspective of the kernel, only the original name exists; the elaborator resolves aliases as part of {tech (key := "resolve")}[resolving] identifiers to names. ::: :::example "Exported Names" The declaration of the {tech}[inductive type] {name}`Veg.Leafy` establishes the constructors {name}`Veg.Leafy.spinach` and {name}`Veg.Leafy.cabbage`: ```lean namespace Veg inductive Leafy where | spinach | cabbage export Leafy (spinach) end Veg export Veg.Leafy (cabbage) ``` The first {keyword}`export` command makes {name}`Veg.Leafy.spinach` accessible as {name}`Veg.spinach` because the {tech}[current namespace] is `Veg`. The second makes {name}`Veg.Leafy.cabbage` accessible as {name}`cabbage`, because the current namespace is the root namespace. :::
reference-manual/Manual/Language/Functions.lean
import VersoManual import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean #doc (Manual) "Functions" => %%% tag := "functions" %%% Function types are a built-in feature of Lean. {deftech}[Functions] map the values of one type (the {deftech}_domain_) into those of another type (the {deftech}_codomain_), and {deftech}_function types_ specify the domain and codomain of functions. There are two kinds of function type: : {deftech}[Dependent] Dependent function types explicitly name the parameter, and the function's codomain may refer explicitly to this name. Because types can be computed from values, a dependent function may return values from any number of different types, depending on its argument.{margin}[Dependent functions are sometimes referred to as {deftech}_dependent products_, because they correspond to an indexed product of sets.] : {deftech}[Non-Dependent] Non-dependent function types do not include a name for the parameter, and the codomain does not vary based on the specific argument provided. ::::keepEnv :::example "Dependent Function Types" The function {lean}`two` returns values in different types, depending on which argument it is called with: ```lean def two : (b : Bool) → if b then Unit × Unit else String := fun b => match b with | true => ((), ()) | false => "two" ``` The body of the function cannot be written with `if...then...else...` because it does not refine types the same way that {keywordOf Lean.Parser.Term.match}`match` does. ::: :::: In Lean's core language, all function types are dependent: non-dependent function types are dependent function types in which the parameter name does not occur in the {tech}[codomain]. Additionally, two dependent function types that have different parameter names may be definitionally equal if renaming the parameter makes them equal. However, the Lean elaborator does not introduce a local binding for non-dependent functions' parameters. :::example "Definitional Equality of Dependent and Non-Dependent Functions" The types {lean}`(x : Nat) → String` and {lean}`Nat → String` are definitionally equal: ```lean example : ((x : Nat) → String) = (Nat → String) := rfl ``` Similarly, the types {lean}`(n : Nat) → n + 1 = 1 + n` and {lean}`(k : Nat) → k + 1 = 1 + k` are definitionally equal: ```lean example : ((n : Nat) → n + 1 = 1 + n) = ((k : Nat) → k + 1 = 1 + k) := rfl ``` ::: :::::keepEnv ::::example "Non-Dependent Functions Don't Bind Variables" :::keepEnv A dependent function is required in the following statement that all elements of an array are non-zero: ```lean def AllNonZero (xs : Array Nat) : Prop := (i : Nat) → (lt : i < xs.size) → xs[i] ≠ 0 ``` ::: :::keepEnv This is because the elaborator for array access requires a proof that the index is in bounds. The non-dependent version of the statement does not introduce this assumption: ```lean +error (name := nondepOops) def AllNonZero (xs : Array Nat) : Prop := (i : Nat) → (i < xs.size) → xs[i] ≠ 0 ``` ```leanOutput nondepOops failed to prove index is valid, possible solutions: - Use `have`-expressions to prove the index is valid - Use `a[i]!` notation instead, runtime check is performed, and 'Panic' error message is produced if index is not valid - Use `a[i]?` notation instead, result is an `Option` type - Use `a[i]'h` notation instead, where `h` is a proof that index is valid xs : Array Nat i : Nat ⊢ i < xs.size ``` ::: :::: ::::: While the core type theory does not feature {tech}[implicit] parameters, function types do include an indication of whether the parameter is implicit. This information is used by the Lean elaborator, but it does not affect type checking or definitional equality in the core theory and can be ignored when thinking only about the core type theory. :::example "Definitional Equality of Implicit and Explicit Function Types" The types {lean}`{α : Type} → (x : α) → α` and {lean}`(α : Type) → (x : α) → α` are definitionally equal, even though the first parameter is implicit in one and explicit in the other. ```lean example : ({α : Type} → (x : α) → α) = ((α : Type) → (x : α) → α) := rfl ``` ::: # Function Abstractions In Lean's type theory, functions are created using {deftech}_function abstractions_ that bind a variable. {margin}[In various communities, function abstractions are also known as _lambdas_, due to Alonzo Church's notation for them, or _anonymous functions_ because they don't need to be defined with a name in the global environment.] When the function is applied, the result is found by {tech (key := "β")}[β-reduction]: substituting the argument for the bound variable. In compiled code, this happens strictly: the argument must already be a value. When type checking, there are no such restrictions; the equational theory of definitional equality allows β-reduction with any term. In Lean's {ref "function-terms"}[term language], function abstractions may take multiple parameters or use pattern matching. These features are translated to simpler operations in the core language, where all functions abstractions take exactly one parameter. Not all functions originate from abstractions: {tech}[type constructors], {tech}[constructors], and {tech}[recursors] may have function types, but they cannot be defined using function abstractions alone. # Currying %%% tag := "currying" %%% In Lean's core type theory, every function maps each element of the {tech}[domain] to a single element of the {tech}[codomain]. In other words, every function expects exactly one parameter. Multiple-parameter functions are implemented by defining higher-order functions that, when supplied with the first parameter, return a new function that expects the remaining parameters. This encoding is called {deftech}_currying_, popularized by and named after Haskell B. Curry. Lean's syntax for defining functions, specifying their types, and applying them creates the illusion of multiple-parameter functions, but the result of elaboration contains only single-parameter functions. # Extensionality %%% tag := "function-extensionality" %%% Definitional equality of functions in Lean is {deftech}_intensional_. This means that definitional equality is defined _syntactically_, modulo renaming of bound variables and {tech}[reduction]. To a first approximation, this means that two functions are definitionally equal if they implement the same algorithm, rather than the usual mathematical notion of equality that states that two functions are equal if they map equal elements of the {tech}[domain] to equal elements of the {tech}[codomain]. Definitional equality is used by the type checker, so it's important that it be predictable. The syntactic character of intensional equality means that the algorithm to check it can be feasibly specified. Checking extensional equality involves proving essentially arbitrary theorems about equality of functions, and there is no clear specification for an algorithm to check it. This makes extensional equality a poor choice for a type checker. Function extensionality is instead made available as a reasoning principle that can be invoked when proving the {tech}[proposition] that two functions are equal. ::::keepEnv ```lean -show axiom α : Type axiom β : α → Type axiom f : (x : α) → β x -- test claims in next para example : (fun x => f x) = f := by rfl ``` In addition to reduction and renaming of bound variables, definitional equality does support one limited form of extensionality, called {tech}_η-equivalence_, in which functions are equal to abstractions whose bodies apply them to the argument. Given {lean}`f` with type {lean}`(x : α) → β x`, {lean}`f` is definitionally equal to {lean}`fun x => f x`. :::: When reasoning about functions, the theorem {lean}`funext`{margin}[Unlike some intensional type theories, {lean}`funext` is a theorem in Lean. It can be proved {ref "quotient-funext"}[using quotient types].] or the corresponding tactics {tactic}`funext` or {tactic}`ext` can be used to prove that two functions are equal if they map equal inputs to equal outputs. {docstring funext} # Totality and Termination %%% tag := "totality" %%% Functions can be defined recursively using {keywordOf Lean.Parser.Command.declaration}`def`. From the perspective of Lean's logic, all functions are {deftech}_total_, meaning that they map each element of the {tech}[domain] to an element of the {tech}[codomain] in finite time.{margin}[Some programming language communities use the term _total_ in a different sense, where functions are considered total if they do not crash due to unhandled cases but non-termination is ignored.] The values of total functions are defined for all type-correct arguments, and they cannot fail to terminate or crash due to a missing case in a pattern match. While the logical model of Lean considers all functions to be total, Lean is also a practical programming language that provides certain “escape hatches”. Functions that have not been proven to terminate can still be used in Lean's logic as long as their {tech}[codomain] is proven nonempty. These functions are treated as uninterpreted functions by Lean's logic, and their computational behavior is ignored. In compiled code, these functions are treated just like any others. Other functions may be marked unsafe; these functions are not available to Lean's logic at all. The section on {ref "partial-unsafe"}[partial and unsafe function definitions] contains more detail on programming with recursive functions. Similarly, operations that should fail at runtime in compiled code, such as out-of-bounds access to an array, can only be used when the resulting type is known to be inhabited. These operations result in an arbitrarily chosen inhabitant of the type in Lean's logic (specifically, the one specified in the type's {name}`Inhabited` instance). :::example "Panic" The function {name}`thirdChar` extracts the third element of an array, or panics if the array has two or fewer elements: ```lean def thirdChar (xs : Array Char) : Char := xs[2]! ``` The (nonexistent) third elements of {lean}`#['!']` and {lean}`#['-', 'x']` are equal, because they result in the same arbitrarily-chosen character: ```lean example : thirdChar #['!'] = thirdChar #['-', 'x'] := rfl ``` Indeed, both are equal to {lean}`'A'`, which happens to be the default fallback for {lean}`Char`: ```lean example : thirdChar #['!'] = 'A' := rfl example : thirdChar #['-', 'x'] = 'A' := rfl ``` ::: # API Reference %%% tag := "function-api" %%% The `Function` namespace contains general-purpose helpers for working with functions. {docstring Function.comp} {docstring Function.const} {docstring Function.curry} {docstring Function.uncurry} ## Properties %%% tag := "function-api-properties" %%% {docstring Function.Injective} {docstring Function.Surjective} {docstring Function.LeftInverse} {docstring Function.HasLeftInverse} {docstring Function.RightInverse} {docstring Function.HasRightInverse}
reference-manual/Manual/Language/InductiveTypes.lean
import VersoManual import Manual.Meta import Manual.Meta.LexedText import Manual.Language.InductiveTypes.LogicalModel import Manual.Language.InductiveTypes.Structures import Manual.Language.InductiveTypes.Nested open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Lean.Parser.Command («inductive» «structure» declValEqns computedField) set_option maxRecDepth 800 #doc (Manual) "Inductive Types" => %%% tag := "inductive-types" %%% {deftech}_Inductive types_ are the primary means of introducing new types to Lean. While {tech}[universes], {tech}[functions], and {tech}[quotient types] are built-in primitives that could not be added by users, every other type in Lean is either an inductive type or defined in terms of universes, functions, and inductive types. Inductive types are specified by their {deftech}_type constructors_ {index}[type constructor] and their {deftech}_constructors_; {index}[constructor] their other properties are derived from these. Each inductive type has a single type constructor, which may take both {tech}[universe parameters] and ordinary parameters. Inductive types may have any number of constructors; these constructors introduce new values whose types are headed by the inductive type's type constructor. Based on the type constructor and the constructors for an inductive type, Lean derives a {deftech}_recursor_{index}[recursor]{see "recursor"}[eliminator]. Logically, recursors represent induction principles or elimination rules; computationally, they represent primitive recursive computations. The termination of recursive functions is justified by translating them into uses of the recursors, so Lean's kernel only needs to perform type checking of recursor applications, rather than including a separate termination analysis. Lean additionally produces a number of helper constructions based on the recursor,{margin}[The term _recursor_ is always used, even for non-recursive types.] which are used elsewhere in the system. _Structures_ are a special case of inductive types that have exactly one constructor. When a structure is declared, Lean generates helpers that enable additional language features to be used with the new structure. This section describes the specific details of the syntax used to specify both inductive types and structures, the new constants and definitions in the environment that result from inductive type declarations, and the run-time representation of inductive types' values in compiled code. # Inductive Type Declarations %%% tag := "inductive-declarations" %%% :::syntax command (alias := «inductive») (title := "Inductive Type Declarations") ```grammar $_:declModifiers inductive $d:declId $_:optDeclSig where $[| $_ $c:ident $_]* $[deriving $[$x:ident],*]? ``` Declares a new inductive type. The meaning of the {syntaxKind}`declModifiers` is as described in the section {ref "declaration-modifiers"}[on declaration modifiers]. ::: After declaring an inductive type, its type constructor, constructors, and recursor are present in the environment. New inductive types extend Lean's core logic—they are not encoded or represented by some other already-present data. Inductive type declarations must satisfy {ref "well-formed-inductives"}[a number of well-formedness requirements] to ensure that the logic remains consistent. The first line of the declaration, from {keywordOf Lean.Parser.Command.declaration (parser:=«inductive»)}`inductive` to {keywordOf Lean.Parser.Command.declaration (parser:=«inductive»)}`where`, specifies the new {tech}[type constructor]'s name and type. If a type signature for the type constructor is provided, then its result type must be a {tech}[universe], but the parameters do not need to be types. If no signature is provided, then Lean will attempt to infer a universe that's just big enough to contain the resulting type. In some situations, this process may fail to find a minimal universe or fail to find one at all, necessitating an annotation. The constructor specifications follow {keywordOf Lean.Parser.Command.declaration (parser:=«inductive»)}`where`. Constructors are not mandatory, as constructorless inductive types such as {lean}`False` and {lean}`Empty` are perfectly sensible. Each constructor specification begins with a vertical bar (`'|'`, Unicode `'VERTICAL BAR' (U+007c)`), declaration modifiers, and a name. The name is a {tech}[raw identifier]. A declaration signature follows the name. The signature may specify any parameters, modulo the well-formedness requirements for inductive type declarations, but the return type in the signature must be a saturated application of the type constructor of the inductive type being specified. If no signature is provided, then the constructor's type is inferred by inserting sufficient implicit parameters to construct a well-formed return type. The new inductive type's name is defined in the {tech}[current namespace]. Each constructor's name is in the inductive type's namespace.{index (subterm := "of inductive type")}[namespace] ## Parameters and Indices %%% tag := "inductive-datatypes-parameters-and-indices" %%% Type constructors may take two kinds of arguments: {deftech}_parameters_ {index (subterm := "of inductive type")}[parameter] and {deftech (key := "index")}_indices_.{index (subterm := "of inductive type")}[index] Parameters must be used consistently in the entire definition; all occurrences of the type constructor in each constructor in the declaration must take precisely the same argument. Indices may vary among the occurrences of the type constructor. All parameters must precede all indices in the type constructor's signature. Parameters that occur prior to the colon (`':'`) in the type constructor's signature are considered parameters to the entire inductive type declaration. They are always parameters that must be uniform throughout the type's definition. Generally speaking, parameters that occur after the colon are indices that may vary throughout the definition of the type. However, if the option {option}`inductive.autoPromoteIndices` is {lean}`true`, then syntactic indices that could have been parameters are made into parameters. An index could have been a parameter if all of its type dependencies are themselves parameters and it is used uniformly as an uninstantiated variable in all occurrences of the inductive type's type constructor in all constructors. {optionDocs inductive.autoPromoteIndices} Indices can be seen as defining a _family_ of types. Each choice of indices selects a type from the family, which has its own set of available constructors. Type constructors with indices are said to specify {deftech}_indexed families_ {index (subterm := "of types")}[indexed family] of types. ## Example Inductive Types %%% tag := "example-inductive-types" %%% :::example "A constructorless type" {lean}`Vacant` is an empty inductive type, equivalent to Lean's {lean}`Empty` type: ```lean inductive Vacant : Type where ``` Empty inductive types are not useless; they can be used to indicate unreachable code. ::: :::example "A constructorless proposition" {lean}`No` is a false {tech}[proposition], equivalent to Lean's {lean}`False`: ```lean inductive No : Prop where ``` ```lean -show -keep theorem no_is_false : No = False := by apply propext constructor <;> intro h <;> cases h ``` ::: :::example "A unit type" (keep := true) {lean}`Solo` is equivalent to Lean's {lean}`Unit` type: ```lean inductive Solo where | solo ``` It is an example of an inductive type in which the signatures have been omitted for both the type constructor and the constructor. Lean assigns {lean}`Solo` to {lean}`Type`: ```lean (name := OneTy) #check Solo ``` ```leanOutput OneTy Solo : Type ``` The constructor is named {lean}`Solo.solo`, because constructor names are the type constructor's namespace. Because {lean}`Solo` expects no arguments, the signature inferred for {lean}`Solo.solo` is: ```lean (name := oneTy) #check Solo.solo ``` ```leanOutput oneTy Solo.solo : Solo ``` ::: :::example "A true proposition" {lean}`Yes` is equivalent to Lean's {lean}`True` proposition: ```lean inductive Yes : Prop where | intro ``` Unlike {lean}`One`, the new inductive type {lean}`Yes` is specified to be in the {lean}`Prop` universe. ```lean (name := YesTy) #check Yes ``` ```leanOutput YesTy Yes : Prop ``` The signature inferred for {lean}`Yes.intro` is: ```lean (name := yesTy) #check Yes.intro ``` ```leanOutput yesTy Yes.intro : Yes ``` ```lean -show -keep theorem yes_is_true : Yes = True := by apply propext constructor <;> intros <;> constructor ``` ::: ::::example "A type with parameter and index" (keep := true) :::keepEnv ```lean -show universe u axiom α : Type u axiom b : Bool ``` An {lean}`EvenOddList α b` is a list where {lean}`α` is the type of the data stored in the list and {lean}`b` is {lean}`true` when there are an even number of entries: ::: ```lean inductive EvenOddList (α : Type u) : Bool → Type u where | nil : EvenOddList α true | cons : α → EvenOddList α isEven → EvenOddList α (not isEven) ``` This example is well typed because there are two entries in the list: ```lean example : EvenOddList String true := .cons "a" (.cons "b" .nil) ``` This example is not well typed because there are three entries in the list: ```lean +error (name := evenOddOops) example : EvenOddList String true := .cons "a" (.cons "b" (.cons "c" .nil)) ``` ```leanOutput evenOddOops Type mismatch EvenOddList.cons "a" (EvenOddList.cons "b" (EvenOddList.cons "c" EvenOddList.nil)) has type EvenOddList String !!!true but is expected to have type EvenOddList String true ``` :::keepEnv ```lean -show universe u axiom α : Type u axiom b : Bool ``` In this declaration, {lean}`α` is a {tech}[parameter], because it is used consistently in all occurrences of {name}`EvenOddList`. {lean}`b` is an {tech}[index], because different {lean}`Bool` values are used for it at different occurrences. ::: ```lean -show -keep def EvenOddList.length : EvenOddList α b → Nat | .nil => 0 | .cons _ xs => xs.length + 1 theorem EvenOddList.length_matches_evenness (xs : EvenOddList α b) : b = (xs.length % 2 = 0) := by induction xs . simp [length] next b' _ xs ih => simp [length] cases b' <;> simp only [Bool.true_eq_false, false_iff, true_iff] <;> simp at ih <;> omega ``` :::: :::::keepEnv ::::example "Parameters before and after the colon" In this example, both parameters are specified before the colon in {name}`Either`'s signature. ```lean inductive Either (α : Type u) (β : Type v) : Type (max u v) where | left : α → Either α β | right : β → Either α β ``` In this version, there are two types named `α` that might not be identical: ```lean (name := Either') +error inductive Either' (α : Type u) (β : Type v) : Type (max u v) where | left : {α : Type u} → {β : Type v} → α → Either' α β | right : β → Either' α β ``` ```leanOutput Either' Mismatched inductive type parameter in Either' α β The provided argument α is not definitionally equal to the expected parameter α✝ Note: The value of parameter `α✝` must be fixed throughout the inductive declaration. Consider making this parameter an index if it must vary. ``` Placing the parameters after the colon results in parameters that can be instantiated by the constructors: ```lean (name := Either'') inductive Either'' : Type u → Type v → Type (max u v + 1) where | left : {α : Type u} → {β : Type v} → α → Either'' α β | right : β → Either'' α β ``` A larger universe is required for this type because {ref "inductive-type-universe-levels"}[constructor parameters must be in universes that are smaller than the inductive type's universe]. {name}`Either''.right`'s type parameter is discovered via Lean's ordinary rules for {tech}[automatic implicit parameters]. :::: ::::: ## Anonymous Constructor Syntax %%% tag := "anonymous-constructor-syntax" %%% If an inductive type has just one constructor, then this constructor is eligible for {deftech}_anonymous constructor syntax_. Instead of writing the constructor's name applied to its arguments, the explicit arguments can be enclosed in angle brackets (`'⟨'` and `'⟩'`, Unicode `MATHEMATICAL LEFT ANGLE BRACKET (U+0x27e8)` and `MATHEMATICAL RIGHT ANGLE BRACKET (U+0x27e9)`) and separated with commas. This works in both pattern and expression contexts. Providing arguments by name or converting all implicit parameters to explicit parameters with `@` requires using the ordinary constructor syntax. :::syntax term (title := "Anonymous Constructors") Constructors can be invoked anonymously by enclosing their explicit arguments in angle brackets, separated by commas. ```grammar ⟨ $_,* ⟩ ``` ::: ::::example "Anonymous constructors" :::keepEnv ```lean -show axiom α : Type ``` The type {lean}`AtLeastOne α` is similar to `List α`, except there's always at least one element present: ::: ```lean inductive AtLeastOne (α : Type u) : Type u where | mk : α → Option (AtLeastOne α) → AtLeastOne α ``` Anonymous constructor syntax can be used to construct them: ```lean def oneTwoThree : AtLeastOne Nat := ⟨1, some ⟨2, some ⟨3, none⟩⟩⟩ ``` and to match against them: ```lean def AtLeastOne.head : AtLeastOne α → α | ⟨x, _⟩ => x ``` Equivalently, traditional constructor syntax could have been used: ```lean def oneTwoThree' : AtLeastOne Nat := .mk 1 (some (.mk 2 (some (.mk 3 none)))) def AtLeastOne.head' : AtLeastOne α → α | .mk x _ => x ``` :::: ## Deriving Instances %%% tag := "inductive-declarations-deriving-instances" %%% The optional {keywordOf Lean.Parser.Command.declaration (parser:=«inductive»)}`deriving` clause of an inductive type declaration can be used to derive instances of type classes. Please refer to {ref "deriving-instances"}[the section on instance deriving] for more information. {include 0 Manual.Language.InductiveTypes.Structures} {include 0 Manual.Language.InductiveTypes.LogicalModel} # Run-Time Representation %%% tag := "run-time-inductives" %%% An inductive type's run-time representation depends both on how many constructors it has, how many arguments each constructor takes, and whether these arguments are {tech}[relevant]. ## Exceptions %%% tag := "inductive-types-runtime-special-support" %%% Not every inductive type is represented as indicated here—some inductive types have special support from the Lean compiler: :::keepEnv ```lean -show axiom α : Prop ``` * The representation of the fixed-width integer types {lean}`UInt8`, …, {lean}`UInt64`, {lean}`Int8`, …, {lean}`Int64`, and {lean}`USize` depends on the whether the code is compiled for a 32- or 64-bit architecture. Their representation is described {ref "fixed-int-runtime"}[in a dedicated section]. * {lean}`Char` is represented by `uint32_t`. Because {lean}`Char` values never require more than 21 bits, they are always unboxed. * {lean}`Float` is represented by a pointer to a Lean object that contains a “double”. * An {deftech}_enum inductive_ type of at least 2 and at most $`2^{32}` constructors, each of which has no parameters, is represented by the first type of {c}`uint8_t`, {c}`uint16_t`, {c}`uint32_t` that is sufficient to assign a unique value to each constructor. For example, the type {lean}`Bool` is represented by {c}`uint8_t`, with values {c}`0` for {lean}`false` and {c}`1` for {lean}`true`. {TODO}[Find out whether this should say “no relevant parameters”] * {lean}`Decidable α` is represented the same way as `Bool` {TODO}[Aren't Decidable and Bool just special cases of the rules for trivial constructors and irrelevance?] * {lean}`Nat` and {lean}`Int` are represented by {c}`lean_object *`. Their representations are described in more detail in {ref "nat-runtime"}[the section on natural numbers] and {ref "int-runtime"}[the section on integers]. ::: ## Relevance %%% tag := "inductive-types-runtime-relevance" %%% Types and proofs have no run-time representation. That is, if an inductive type is a `Prop`, then its values are erased prior to compilation. Similarly, all theorem statements and types are erased. Types with run-time representations are called {deftech}_relevant_, while types without run-time representations are called {deftech}_irrelevant_. :::example "Types are irrelevant" Even though {name}`List.cons` has the following signature, which indicates three parameters: ```signature List.cons.{u} {α : Type u} : α → List α → List α ``` its run-time representation has only two, because the type argument is run-time irrelevant. ::: :::example "Proofs are irrelevant" Even though {name}`Fin.mk` has the following signature, which indicates three parameters: ```signature Fin.mk {n : Nat} (val : Nat) : val < n → Fin n ``` its run-time representation has only two, because the proof is erased. ::: In most cases, irrelevant values simply disappear from compiled code. However, in cases where some representation is required (such as when they are arguments to polymorphic constructors), they are represented by a trivial value. ## Trivial Wrappers %%% tag := "inductive-types-trivial-wrappers" %%% If an inductive type has exactly one constructor, and that constructor has exactly one run-time relevant parameter, then the inductive type is represented identically to its parameter. :::example "Zero-Overhead Subtypes" The structure {name}`Subtype` bundles an element of some type with a proof that it satisfies a predicate. Its constructor takes four arguments, but three of them are irrelevant: ```signature Subtype.mk.{u} {α : Sort u} {p : α → Prop} (val : α) (property : p val) : Subtype p ``` Thus, subtypes impose no runtime overhead in compiled code, and are represented identically to the type of the {name Subtype.val}`val` field. ::: :::example "Signed Integers" The signed integer types {lean}`Int8`, ..., {lean}`Int64`, {lean}`ISize` are structures with a single field that wraps the corresponding unsigned integer type. They are represented by the unsigned C types {c}`uint8_t`, ..., {c}`uint64_t`, {c}`size_t`, respectively, because they have a trivial structure. ::: ## Other Inductive Types %%% tag := "inductive-types-standard-representation" %%% If an inductive type doesn't fall into one of the categories above, then its representation is determined by its constructors. Constructors without relevant parameters are represented by their index into the list of constructors, as unboxed unsigned machine integers (scalars). Constructors with relevant parameters are represented as an object with a header, the constructor's index, an array of pointers to other objects, and then arrays of scalar fields sorted by their types. The header tracks the object's reference count and other necessary bookkeeping. Recursive functions are compiled as they are in most programming languages, rather than by using the inductive type's recursor. Elaborating recursive functions to recursors serves to provide reliable termination evidence, not executable code. ### FFI %%% tag := "inductive-types-ffi" %%% From the perspective of C, these other inductive types are represented by {c}`lean_object *`. Each constructor is stored as a {c}`lean_ctor_object`, and {c}`lean_is_ctor` will return true. A {c}`lean_ctor_object` stores the constructor index in its header, and the fields are stored in the {c}`m_objs` portion of the object. Lean assumes that {c}`sizeof(size_t) == sizeof(void*)`—while this is not guaranteed by C, the Lean run-time system contains an assertion that fails if this is not the case. The memory order of the fields is derived from the types and order of the fields in the declaration. They are ordered as follows: * Non-scalar fields stored as {c}`lean_object *` * Fields of type {lean}`USize` * Other scalar fields, in decreasing order by size Within each group the fields are ordered in declaration order. *Warning*: Trivial wrapper types count as their underlying wrapped type for this purpose. * To access fields of the first kind, use {c}`lean_ctor_get(val, i)` to get the `i`th non-scalar field. * To access {lean}`USize` fields, use {c}`lean_ctor_get_usize(val, n+i)` to get the {c}`i`th `USize` field and {c}`n` is the total number of fields of the first kind. * To access other scalar fields, use {c}`lean_ctor_get_uintN(val, off)` or {c}`lean_ctor_get_usize(val, off)` as appropriate. Here `off` is the byte offset of the field in the structure, starting at {c}`n*sizeof(void*)` where `n` is the number of fields of the first two kinds. ::::keepEnv For example, a structure such as ```lean structure S where ptr_1 : Array Nat usize_1 : USize sc64_1 : UInt64 -- Wrappers of scalars count as scalars: sc64_2 : { x : UInt64 // x > 0 } sc64_3 : Float -- `Float` is 64 bit sc8_1 : Bool sc16_1 : UInt16 sc8_2 : UInt8 sc64_4 : UInt64 usize_2 : USize -- Trivial wrapper around `UInt32` sc32_1 : Char sc32_2 : UInt32 sc16_2 : UInt16 ``` would get re-sorted into the following memory order: * {name}`S.ptr_1`: {c}`lean_ctor_get(val, 0)` * {name}`S.usize_1`: {c}`lean_ctor_get_usize(val, 1)` * {name}`S.usize_2`: {c}`lean_ctor_get_usize(val, 2)` * {name}`S.sc64_1`: {c}`lean_ctor_get_uint64(val, sizeof(void*)*3)` * {name}`S.sc64_2`: {c}`lean_ctor_get_uint64(val, sizeof(void*)*3 + 8)` * {name}`S.sc64_3`: {c}`lean_ctor_get_float(val, sizeof(void*)*3 + 16)` * {name}`S.sc64_4`: {c}`lean_ctor_get_uint64(val, sizeof(void*)*3 + 24)` * {name}`S.sc32_1`: {c}`lean_ctor_get_uint32(val, sizeof(void*)*3 + 32)` * {name}`S.sc32_2`: {c}`lean_ctor_get_uint32(val, sizeof(void*)*3 + 36)` * {name}`S.sc16_1`: {c}`lean_ctor_get_uint16(val, sizeof(void*)*3 + 40)` * {name}`S.sc16_2`: {c}`lean_ctor_get_uint16(val, sizeof(void*)*3 + 42)` * {name}`S.sc8_1`: {c}`lean_ctor_get_uint8(val, sizeof(void*)*3 + 44)` * {name}`S.sc8_2`: {c}`lean_ctor_get_uint8(val, sizeof(void*)*3 + 45)` :::: ::: TODO Figure out how to test/validate/CI these statements ::: # Mutual Inductive Types %%% tag := "mutual-inductive-types" %%% Inductive types may be mutually recursive. Mutually recursive definitions of inductive types are specified by defining the types in a `mutual ... end` block. :::example "Mutually Defined Inductive Types" The type {name}`EvenOddList` in a prior example used a Boolean index to select whether the list in question should have an even or odd number of elements. This distinction can also be expressed by the choice of one of two mutually inductive types {name}`EvenList` and {name}`OddList`: ```lean mutual inductive EvenList (α : Type u) : Type u where | nil : EvenList α | cons : α → OddList α → EvenList α inductive OddList (α : Type u) : Type u where | cons : α → EvenList α → OddList α end example : EvenList String := .cons "x" (.cons "y" .nil) example : OddList String := .cons "x" (.cons "y" (.cons "z" .nil)) ``` ```lean +error (name := evenOddMut) example : OddList String := .cons "x" (.cons "y" .nil) ``` ```leanOutput evenOddMut Unknown constant `OddList.nil` Note: Inferred this name from the expected resulting type of `.nil`: OddList String ``` ::: ## Requirements %%% tag := "mutual-inductive-types-requirements" %%% The inductive types declared in a `mutual` block are considered as a group; they must collectively satisfy generalized versions of the well-formedness criteria for non-mutually-recursive inductive types. This is true even if they could be defined without the `mutual` block, because they are not in fact mutually recursive. ### Mutual Dependencies %%% tag := "mutual-inductive-types-dependencies" %%% Each type constructor's signature must be able to be elaborated without reference to the other inductive types in the `mutual` group. In other words, the inductive types in the `mutual` group may not take each other as arguments. The constructors of each inductive type may mention the other type constructors in the group in their parameter types, with restrictions that are a generalization of those for recursive occurrences in non-mutual inductive types. :::example "Mutual inductive type constructors may not mention each other" These inductive types are not accepted by Lean: ```lean +error (name := mutualNoMention) mutual inductive FreshList (α : Type) (r : α → α → Prop) : Type where | nil : FreshList α r | cons (x : α) (xs : FreshList α r) (fresh : Fresh r x xs) inductive Fresh (r : α → FreshList α → Prop) : α → FreshList α r → Prop where | nil : Fresh r x .nil | cons : r x y → (f : Fresh r x ys) → Fresh r x (.cons y ys f) end ``` The type constructors may not refer to the other type constructors in the `mutual` group, so `FreshList` is not in scope in the type constructor of `Fresh`: ```leanOutput mutualNoMention Unknown identifier `FreshList` ``` ::: ### Parameters Must Match %%% tag := "mutual-inductive-types-same-parameters" %%% All inductive types in the `mutual` group must have the same {tech}[parameters]. Their indices may differ. ::::keepEnv ::: example "Differing numbers of parameters" Even though `Both` and `OneOf` are not mutually recursive, they are declared in the same `mutual` block and must therefore have identical parameters: ```lean (name := bothOptional) +error mutual inductive Both (α : Type u) (β : Type v) where | mk : α → β → Both α β inductive Optional (α : Type u) where | none | some : α → Optional α end ``` ```leanOutput bothOptional Invalid mutually inductive types: `Optional` has 1 parameter(s), but the preceding type `Both` has 2 Note: All inductive types declared in the same `mutual` block must have the same parameters ``` ::: :::: ::::keepEnv ::: example "Differing parameter types" Even though `Many` and `OneOf` are not mutually recursive, they are declared in the same `mutual` block and must therefore have identical parameters. They both have exactly one parameter, but `Many`'s parameter is not necessarily in the same universe as `Optional`'s: ```lean (name := manyOptional) +error mutual inductive Many (α : Type) : Type u where | nil : Many α | cons : α → Many α → Many α inductive Optional (α : Type u) where | none | some : α → Optional α end ``` ```leanOutput manyOptional Invalid mutually inductive types: Parameter `α` has type Type u of sort `Type (u + 1)` but is expected to have type Type of sort `Type 1` ``` ::: :::: ### Universe Levels %%% tag := "mutual-inductive-types-same-universe" %%% The universe levels of each inductive type in a mutual group must obey the same requirements as non-mutually-recursive inductive types. Additionally, all the inductive types in a mutual group must be in the same universe, which implies that their constructors are similarly limited with respect to their parameters' universes. ::::example "Universe mismatch" :::keepEnv These mutually-inductive types are a somewhat complicated way to represent run-length encoding of a list: ```lean mutual inductive RLE : List α → Type where | nil : RLE [] | run (x : α) (n : Nat) : n ≠ 0 → PrefixRunOf n x xs ys → RLE ys → RLE xs inductive PrefixRunOf : Nat → α → List α → List α → Type where | zero (noMore : ¬∃zs, xs = x :: zs := by simp) : PrefixRunOf 0 x xs xs | succ : PrefixRunOf n x xs ys → PrefixRunOf (n + 1) x (x :: xs) ys end example : RLE [1, 1, 2, 2, 3, 1, 1, 1] := .run 1 2 (by decide) (.succ (.succ .zero)) <| .run 2 2 (by decide) (.succ (.succ .zero)) <| .run 3 1 (by decide) (.succ .zero) <| .run 1 3 (by decide) (.succ (.succ (.succ (.zero)))) <| .nil ``` Specifying {name}`PrefixRunOf` as a {lean}`Prop` would be sensible, but it cannot be done because the types would be in different universes: ::: :::keepEnv ```lean +error (name := rleBad) mutual inductive RLE : List α → Type where | nil : RLE [] | run (x : α) (n : Nat) : n ≠ 0 → PrefixRunOf n x xs ys → RLE ys → RLE xs inductive PrefixRunOf : Nat → α → List α → List α → Prop where | zero (noMore : ¬∃zs, xs = x :: zs := by simp) : PrefixRunOf 0 x xs xs | succ : PrefixRunOf n x xs ys → PrefixRunOf (n + 1) x (x :: xs) ys end ``` ```leanOutput rleBad Invalid mutually inductive types: The resulting type of this declaration Prop differs from a preceding one Type Note: All inductive types declared in the same `mutual` block must belong to the same type universe ``` ::: :::keepEnv This particular property can be expressed by separately defining the well-formedness condition and using a subtype: ```lean def RunLengths α := List (α × Nat) def NoRepeats : RunLengths α → Prop | [] => True | [_] => True | (x, _) :: ((y, n) :: xs) => x ≠ y ∧ NoRepeats ((y, n) :: xs) def RunsMatch : RunLengths α → List α → Prop | [], [] => True | (x, n) :: xs, ys => ys.take n = List.replicate n x ∧ RunsMatch xs (ys.drop n) | _, _ => False def NonZero : RunLengths α → Prop | [] => True | (_, n) :: xs => n ≠ 0 ∧ NonZero xs structure RLE (xs : List α) where rle : RunLengths α noRepeats : NoRepeats rle runsMatch : RunsMatch rle xs nonZero : NonZero rle example : RLE [1, 1, 2, 2, 3, 1, 1, 1] where rle := [(1, 2), (2, 2), (3, 1), (1, 3)] noRepeats := by simp [NoRepeats] runsMatch := by simp [RunsMatch] nonZero := by simp [NonZero] ``` ::: :::: ### Positivity %%% tag := "mutual-inductive-types-positivity" %%% Each inductive type that is defined in the `mutual` group may occur only strictly positively in the types of the parameters of the constructors of all the types in the group. In other words, in the type of each parameter to each constructor in all the types of the group, none of the type constructors in the group occur to the left of any arrows, and none of them occur in argument positions unless they are an argument to an inductive type's type constructor. ::: example "Mutual strict positivity" In the following mutual group, `Tm` occurs in a negative position in the argument to `Binding.scope`: ```lean +error (name := mutualHoas) mutual inductive Tm where | app : Tm → Tm → Tm | lam : Binding → Tm inductive Binding where | scope : (Tm → Tm) → Binding end ``` Because `Tm` is part of the same mutual group, it must occur only strictly positively in the arguments to the constructors of `Binding`. It occurs, however, negatively: ```leanOutput mutualHoas (kernel) arg #1 of 'Binding.scope' has a non positive occurrence of the datatypes being declared ``` ::: ::: example "Nested positions" The definitions of {name}`LocatedStx` and {name}`Stx` satisfy the positivity condition because the recursive occurrences are not to the left of any arrows and, when they are arguments, they are arguments to inductive type constructors. ```lean mutual inductive LocatedStx where | mk (line col : Nat) (val : Stx) inductive Stx where | atom (str : String) | node (kind : String) (args : List LocatedStx) end ``` ::: ## Recursors %%% tag := "mutual-inductive-types-recursors" %%% Mutual inductive types are provided with primitive recursors, just like non-mutually-defined inductive types. These recursors take into account that they must process the other types in the group, and thus will have a motive for each inductive type. Because all inductive types in the `mutual` group are required to have identical parameters, the recursors still take the parameters first, abstracting them over the motives and the rest of the recursor. Additionally, because the recursor must process the group's other types, it will require cases for each constructor of each of the types in the group. The actual dependency structure between the types is not taken into account; even if an additional motive or constructor case is not really required due to there being fewer mutual dependencies than there could be, the generated recursor still requires them. ::::keepEnv ::: example "Even and odd" ```lean mutual inductive Even : Nat → Prop where | zero : Even 0 | succ : Odd n → Even (n + 1) inductive Odd : Nat → Prop where | succ : Even n → Odd (n + 1) end ``` ```signature Even.rec {motive_1 : (a : Nat) → Even a → Prop} {motive_2 : (a : Nat) → Odd a → Prop} (zero : motive_1 0 Even.zero) (succ : {n : Nat} → (a : Odd n) → motive_2 n a → motive_1 (n + 1) (Even.succ a)) : (∀ {n : Nat} (a : Even n), motive_1 n a → motive_2 (n + 1) (Odd.succ a)) → ∀ {a : Nat} (t : Even a), motive_1 a t ``` ```signature Odd.rec {motive_1 : (a : Nat) → Even a → Prop} {motive_2 : (a : Nat) → Odd a → Prop} (zero : motive_1 0 Even.zero) (succ : ∀ {n : Nat} (a : Odd n), motive_2 n a → motive_1 (n + 1) (Even.succ a)) : (∀ {n : Nat} (a : Even n), motive_1 n a → motive_2 (n + 1) (Odd.succ a)) → ∀ {a : Nat} (t : Odd a), motive_2 a t ``` ::: :::: ::::keepEnv :::example "Spuriously mutual types" The types {name}`Two` and {name}`Three` are defined in a mutual block, even though they do not refer to each other: ```lean mutual inductive Two (α : Type) where | mk : α → α → Two α inductive Three (α : Type) where | mk : α → α → α → Three α end ``` {name}`Two`'s recursor, {name}`Two.rec`, nonetheless requires a motive and a case for {name}`Three`: ```signature Two.rec.{u} {α : Type} {motive_1 : Two α → Sort u} {motive_2 : Three α → Sort u} (mk : (a a_1 : α) → motive_1 (Two.mk a a_1)) : ((a a_1 a_2 : α) → motive_2 (Three.mk a a_1 a_2)) → (t : Two α) → motive_1 t ``` ::: :::: ## Run-Time Representation %%% tag := "mutual-inductive-types-run-time" %%% Mutual inductive types are represented identically to {ref "run-time-inductives"}[non-mutual inductive types] in compiled code and in the runtime. The restrictions on mutual inductive types exist to ensure Lean's consistency as a logic, and do not impact compiled code. {include 2 Manual.Language.InductiveTypes.Nested}
reference-manual/Manual/Language/InductiveTypes/Nested.lean
import VersoManual import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean set_option guard_msgs.diff true #doc (Manual) "Nested Inductive Types" => %%% tag := "nested-inductive-types" %%% {deftech}_Nested inductive types_ are inductive types in which recursive occurrences of the type being defined are parameters to other inductive type constructors. These recursive occurrences are “nested” underneath the other type constructors. Nested inductive types that satisfy certain requirements can be translated into mutual inductive types; this translation demonstrates that they are sound. Internally, the {tech}[kernel] performs this translation; if it succeeds, then the _original_ nested inductive type is accepted. This avoids performance and usability issues that would arise from details of the translation surfacing. :::paragraph Nested recursive occurrences must satisfy the following requirements: * They must be nested _directly_ under an inductive type's type constructor. Terms that reduce to such nested occurrences are not accepted. * Local variables such as the constructor's parameters may not occur in the arguments to the nested occurrence. * The nested occurrences must occur strictly positively. They must occur strictly positively in the position in which they are nested, and the type constructor in which they are nested must itself occur in a strictly positive position. * Constructor parameters whose types include nested occurrences may not be used in ways that rely on the specific choice of outer type constructor. The translated version will not be usable in those contexts. * Nested occurrences may not be used as parameters to the outer type constructor that occur in the types of the outer type's indices. ::: :::example "Nested Inductive Types" Instead of using two constructors, the natural numbers can be defined using {name}`Option`: ```lean inductive ONat : Type where | mk (pred : Option ONat) ``` Arbitrarily-branching trees, also known as _rose trees_, are nested inductive types: ```lean inductive RTree (α : Type u) : Type u where | empty | node (val : α) (children : List (RTree α)) ``` ::: :::::example "Invalid Nested Inductive Types" This declaration of arbitrarily-branching rose trees declares an alias for {name}`List`, rather than using {name}`List` directly: ```lean +error (name := viaAlias) abbrev Children := List inductive RTree (α : Type u) : Type u where | empty | node (val : α) (children : Children (RTree α)) ``` ```leanOutput viaAlias (kernel) arg #3 of 'RTree.node' contains a non valid occurrence of the datatypes being declared ``` ::::paragraph :::leanSection ```lean -show variable {n : Nat} ``` This declaration of arbitrarily-branching rose trees tracks the depth of the tree using an index. The constructor `DRTree.node` has an {tech}[automatic implicit parameter] {lean}`n` that represents the depths of all sub-trees. However, local variables such as constructor parameters are not permitted as arguments to nested occurrences: ::: ```lean +error (name := localVar) inductive DRTree (α : Type u) : Nat → Type u where | empty : DRTree α 0 | node (val : α) (children : List (DRTree α n)) : DRTree α (n + 1) ``` :::: This declaration includes a non-strictly-positive occurrence of the inductive type, nested under an {name}`Option`: ```lean +error (name := nonPos) inductive WithCheck where | done | check (f : Option WithCheck → Bool) ``` ```leanOutput nonPos (kernel) arg #1 of 'WithCheck.check' has a non positive occurrence of the datatypes being declared ``` :::paragraph This rose tree has a branching factor that's limited by its parameter: ```lean +error (name := brtree) inductive BRTree (branches : Nat) (α : Type u) : Type u where | mk : (children : List (BRTree branches α)) → children.length < branches → BRTree branches α ``` Only nested inductive types that can be translated to mutual inductive types are allowed. However, translating this type would require a translation of {name}`List.length` to the translated types, but function definitions may not occur in mutual blocks with inductive types. The resulting error message shows that the function was not translated, but was applied to a term of the translated type: ```leanOutput brtree (kernel) application type mismatch List.length children argument has type @_nested.List_1 branches α but function has type List (@BRTree branches α) → Nat ``` It is acceptable to use the parameter with the nested occurrence with fully polymorphic functions, such as {name}`id`: ```lean (name := nondep) inductive RTree'' (α : Type u) : Type u where | mk : (children : List (BRTree branches α)) → id children = children → BRTree branches α ``` In this case, the function applies equally well to the translated version as it does to the original. ::: :::paragraph A _palindrome_ is a list that is the same when reversed: ```lean inductive Palindrome (α : Type) : List α → Prop where | nil : Palindrome α [] | single : Palindrome α [x] | cons (x : α) (p : Palindrome α xs) : Palindrome α (x :: xs ++ [x]) ``` In this predicate, the list is an index whose type depends on the parameter, which is explicit for clarity. This means it cannot be used ::: ::::: The translation from nested inductive types to mutual inductive types proceeds as follows: : Nested occurrences become inductive types Nested occurrences of the inductive type are translated into new inductive types in the same mutual group, which replace the original nested occurrences. These new inductive types have the same constructors as the outer inductive type, except the original parameters are instantiated by the translated version of the type. The original inductive type becomes an alias for the version in which the nested occurrences have been rewritten. This process is repeated if the resulting type is also a nested inductive type (e.g. a type nested under {name}`Array` becomes a type nested under {name}`List`, because {name}`Array`'s constructor takes a {name}`List`). : Conversions to and from the nested types Conversions between the outer inductive type applied to the new alias and the generated auxiliary types are generated. These conversions are then proved to be mutual inverses. : Constructor reconstruction Each constructor of the original type is defined as a function that returns the constructor of the translated type, after applying the appropriate conversions. : Recursor reconstruction The recursor for the nested inductive type is constructed from the recursor for the translated type. In the translation, the motives for the nested occurrences are composed with the conversion functions and the {tech}[minor premises] use them as needed. The proofs that the conversion functions are mutually inverse are needed because the encoded constructors convert in one direction, but end up applied to the result of the conversion in the other direction. ::::example "Translating Nested Inductive Types" This nested inductive type represents the natural numbers: ```lean -keep inductive ONat where | mk (pred : Option ONat) : ONat #check ONat.rec ``` The first step in the internal translation is to replace the nested occurrences with auxiliary inductive types that “inline” the resulting type. In this case, the nested occurrence is under {name}`Option`; thus, the auxiliary type has the constructors of {name}`Option`, with {name}`ONat'` substituted for the type parameter: ```lean mutual inductive ONat' where | mk (pred : OptONat) : ONat' inductive OptONat where | none | some : ONat' → OptONat end ``` {lean}`ONat'` is the encoding of {lean}`ONat`: ```lean def ONat := ONat' ``` The next step is to define conversion functions that translate the original nested type to and from the auxiliary type: ```lean def OptONat.ofOption : Option ONat → OptONat | Option.none => OptONat.none | Option.some o => OptONat.some o def OptONat.toOption : OptONat → Option ONat | OptONat.none => Option.none | OptONat.some o => Option.some o ``` These conversion functions are mutually inverse: ```lean def OptONat.to_of_eq_id o : OptONat.toOption (ofOption o) = o := by cases o <;> rfl def OptONat.of_to_eq_id o : OptONat.ofOption (OptONat.toOption o) = o := by cases o <;> rfl ``` The original constructor is translated to an application of the translation's corresponding constructor, with the appropriate conversion applied for the nested occurrence: ```lean def ONat.mk (pred : Option ONat) : ONat := ONat'.mk (.ofOption pred) ``` Finally, the original type's recursor can be translated. The translated recursor uses the translated type's recursor. The original nested occurrences are translated using the conversions, and the proofs that the conversions are mutually inverse are used to rewrite types as needed. ```lean noncomputable def ONat.rec {motive1 : ONat → Sort u} {motive2 : Option ONat → Sort u} (h1 : (pred : Option ONat) → motive2 pred → motive1 (ONat.mk pred)) (h2 : motive2 none) (h3 : (o : ONat) → motive1 o → motive2 (some o)) : (t : ONat) → motive1 t := @ONat'.rec motive1 (motive2 ∘ OptONat.toOption) (fun pred ih => OptONat.of_to_eq_id pred ▸ h1 pred.toOption ih) h2 h3 ``` ::::
reference-manual/Manual/Language/InductiveTypes/LogicalModel.lean
import VersoManual import Manual.Meta import Manual.Papers open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Lean.Parser.Command («inductive» «structure» declValEqns computedField) set_option maxRecDepth 800 #doc (Manual) "Logical Model" => %%% tag := "inductive-types-logical-model" %%% # Recursors %%% tag := "recursors" %%% Every inductive type is equipped with a {tech}[recursor]. The recursor is completely determined by the signatures of the type constructor and the constructors. Recursors have function types, but they are primitive and are not definable using `fun`. ## Recursor Types %%% tag := "recursor-types" %%% :::paragraph The recursor takes the following parameters: : The inductive type's {tech}[parameters] Because parameters are consistent, they can be abstracted over the entire recursor. : The {deftech}_motive_ The motive determines the type of an application of the recursor. The motive is a function whose arguments are the type's indices and an instance of the type with these indices instantiated. The specific universe for the type that the motive determines depends on the inductive type's universe and the specific constructors—see the section on {ref "subsingleton-elimination"}[{tech}[subsingleton] elimination] for details. : A {deftech}_minor premise_ for each constructor For each constructor, the recursor expects a function that satisfies the motive for an arbitrary application of the constructor. Each minor premise abstracts over all of the constructor's parameters. If the constructor's parameter's type is the inductive type itself, then the minor premise additionally takes a parameter whose type is the motive applied to that parameter's value—this will receive the result of recursively processing the recursive parameter. : The {deftech}_major premise_, or target Finally, the recursor takes an instance of the type as an argument, along with any index values. The result type of the recursor is the motive applied to these indices and the major premise. ::: :::example "The recursor for {lean}`Bool`" {lean}`Bool`'s recursor {name}`Bool.rec` has the following parameters: * The motive computes a type in any universe, given a {lean}`Bool`. * There are minor premises for both constructors, in which the motive is satisfied for both {lean}`false` and {lean}`true`. * The major premise is some {lean}`Bool`. The return type is the motive applied to the major premise. ```signature Bool.rec.{u} {motive : Bool → Sort u} (false : motive false) (true : motive true) (t : Bool) : motive t ``` ::: ::::example "The recursor for {lean}`List`" {lean}`List`'s recursor {name}`List.rec` has the following parameters: :::keepEnv ```lean -show axiom α.{u} : Type u ``` * The parameter {lean}`α` comes first, because the motive, minor premises, and major premise need to refer to it. * The motive computes a type in any universe, given a {lean}`List α`. There is no connection between the universe levels `u` and `v`. * There are minor premises for both constructors: - The motive is satisfied for {name}`List.nil` - The motive should be satisfiable for any application of {name}`List.cons`, given that it is satisfiable for the tail. The extra parameter `motive tail` is because `tail`'s type is a recursive occurrence of {name}`List`. * The major premise is some {lean}`List α`. ::: Once again, the return type is the motive applied to the major premise. ```signature List.rec.{u, v} {α : Type v} {motive : List α → Sort u} (nil : motive []) (cons : (head : α) → (tail : List α) → motive tail → motive (head :: tail)) (t : List α) : motive t ``` :::: :::::keepEnv ::::example "Recursor with parameters and indices" Given the definition of {name}`EvenOddList`: ```lean inductive EvenOddList (α : Type u) : Bool → Type u where | nil : EvenOddList α true | cons : α → EvenOddList α isEven → EvenOddList α (not isEven) ``` The recursor {name}`EvenOddList.rec` is very similar to that for `List`. The difference comes from the presence of the index: * The motive now abstracts over any arbitrary choice of index. * The minor premise for {name EvenOddList.nil}`nil` applies the motive to {name EvenOddList.nil}`nil`'s index value `true`. * The minor premise {name EvenOddList.cons}`cons` abstracts over the index value used in its recursive occurrence, and instantiates the motive with its negation. * The major premise additionally abstracts over an arbitrary choice of index. ```signature EvenOddList.rec.{u, v} {α : Type v} {motive : (isEven : Bool) → EvenOddList α isEven → Sort u} (nil : motive true EvenOddList.nil) (cons : {isEven : Bool} → (head : α) → (tail : EvenOddList α isEven) → motive isEven tail → motive (!isEven) (EvenOddList.cons head tail)) : {isEven : Bool} → (t : EvenOddList α isEven) → motive isEven t ``` :::: ::::: When using a predicate (that is, a function that returns a {lean}`Prop`) for the motive, recursors express induction. The minor premises for non-recursive constructors are the base cases, and the additional arguments supplied to minor premises for constructors with recursive arguments are the induction hypotheses. ### Subsingleton Elimination %%% tag := "subsingleton-elimination" %%% Proofs in Lean are computationally irrelevant. In other words, having been provided with *some* proof of a proposition, it should be impossible for a program to check *which* proof it has received. This is reflected in the types of recursors for inductively defined propositions or predicates. For these types, if there's more than one potential proof of the theorem then the motive may only return another {lean}`Prop`. If the type is structured such that there's only at most one proof anyway, then the motive may return a type in any universe. A proposition that has at most one inhabitant is called a {deftech}_subsingleton_. Rather than obligating users to _prove_ that there's only one possible proof, a conservative syntactic approximation is used to check whether a proposition is a subsingleton. Propositions that fulfill both of the following requirements are considered to be subsingletons: * There is at most one constructor. * Each of the constructor's parameter types is either a {lean}`Prop`, a parameter, or an index. :::example "{lean}`True` is a subsingleton" {lean}`True` is a subsingleton because it has one constructor, and this constructor has no parameters. Its recursor has the following signature: ```signature True.rec.{u} {motive : True → Sort u} (intro : motive True.intro) (t : True) : motive t ``` ::: :::example "{lean}`False` is a subsingleton" {lean}`False` is a subsingleton because it has no constructors. Its recursor has the following signature: ```signature False.rec.{u} (motive : False → Sort u) (t : False) : motive t ``` Note that the motive is an explicit parameter. This is because it is not mentioned in any further parameters' types, so it could not be solved by unification. ::: :::example "{name}`And` is a subsingleton" {lean}`And` is a subsingleton because it has one constructor, and both of the constructor's parameters' types are propositions. Its recursor has the following signature: ```signature And.rec.{u} {a b : Prop} {motive : a ∧ b → Sort u} (intro : (left : a) → (right : b) → motive (And.intro left right)) (t : a ∧ b) : motive t ``` ::: :::example "{name}`Or` is not a subsingleton" {lean}`Or` is not a subsingleton because it has more than one constructor. Its recursor has the following signature: ```signature Or.rec {a b : Prop} {motive : a ∨ b → Prop} (inl : ∀ (h : a), motive (.inl h)) (inr : ∀ (h : b), motive (.inr h)) (t : a ∨ b) : motive t ``` The motive's type indicates that {name}`Or.rec` can only be used to produce proofs. A proof of a disjunction can be used to prove something else, but there's no way for a program to inspect _which_ of the two disjuncts was true and used for the proof. ::: :::example "{name}`Eq` is a subsingleton" {lean}`Eq` is a subsingleton because it has just one constructor, {name}`Eq.refl`. This constructor instantiates {lean}`Eq`'s index with a parameter value, so all arguments are parameters: ```signature Eq.refl.{u} {α : Sort u} (x : α) : Eq x x ``` Its recursor has the following signature: ```signature Eq.rec.{u, v} {α : Sort v} {x : α} {motive : (y : α) → x = y → Sort u} (refl : motive x (.refl x)) {y : α} (t : x = y) : motive y t ``` This means that proofs of equality can be used to rewrite the types of non-propositions. ::: ## Reduction %%% tag := "iota-reduction" %%% In addition to adding new constants to the logic, inductive type declarations also add new reduction rules. These rules govern the interaction between recursors and constructors; specifically recursors that have constructors as their major premise. This form of reduction is called {deftech}_ι-reduction_ (iota reduction){index}[ι-reduction]{index (subterm:="ι (iota)")}[reduction]. When the recursor's major premise is a constructor with no recursive parameters, the recursor application reduces to an application of the constructor's minor premise to the constructor's arguments. If there are recursive parameters, then these arguments to the minor premise are found by applying the recursor to the recursive occurrence. # Well-Formedness Requirements %%% tag := "well-formed-inductives" %%% Inductive type declarations are subject to a number of well-formedness requirements. These requirements ensure that Lean remains consistent as a logic when it is extended with the inductive type's new rules. They are conservative: there exist potential inductive types that do not undermine consistency, but that these requirements nonetheless reject. ## Universe Levels %%% tag := "inductive-type-universe-levels" %%% Type constructors of inductive types must either inhabit a {tech}[universe] or a function type whose return type is a universe. Each constructor must inhabit a function type that returns a saturated application of the inductive type. If the inductive type's universe is {lean}`Prop`, then there are no further restrictions on universes, because {lean}`Prop` is {tech}[impredicative]. If the universe is not {lean}`Prop`, then the following must hold for each parameter to the constructor: * If the constructor's parameter is a parameter (in the sense of parameters vs indices) of the inductive type, then this parameter's type may be no larger than the type constructor's universe. * All other constructor parameters must be smaller than the type constructor's universe. :::::keepEnv ::::example "Universes, constructors, and parameters" {lean}`Either` is in the greater of its arguments' universes, because both are parameters to the inductive type: ```lean inductive Either (α : Type u) (β : Type v) : Type (max u v) where | inl : α → Either α β | inr : β → Either α β ``` {lean}`CanRepr` is in a larger universe than the constructor parameter `α`, because `α` is not one of the inductive type's parameters: ```lean inductive CanRepr : Type (u + 1) where | mk : (α : Type u) → [Repr α] → CanRepr ``` Constructorless inductive types may be in universes smaller than their parameters: ```lean inductive Spurious (α : Type 5) : Type 0 where ``` It would, however, be impossible to add a constructor to {name}`Spurious` without changing its levels. :::: ::::: ## Strict Positivity %%% tag := "strict-positivity" %%% All occurrences of the type being defined in the types of the parameters of the constructors must be in {deftech}_strictly positive_ positions. A position is strictly positive if it is not in a function's argument type (no matter how many function types are nested around it) and it is not an argument of any expression other than type constructors of inductive types. This restriction rules out unsound inductive type definitions, at the cost of also ruling out some unproblematic ones. :::::example "Non-strictly-positive inductive types" ::::keepEnv :::keepEnv The type `Bad` would make Lean inconsistent if it were not rejected: ```lean (name := Bad) +error inductive Bad where | bad : (Bad → Bad) → Bad ``` ```leanOutput Bad (kernel) arg #1 of 'Bad.bad' has a non positive occurrence of the datatypes being declared ``` ::: :::keepEnv ```lean -show axiom Bad : Type axiom Bad.bad : (Bad → Bad) → Bad ``` This is because it would be possible to write a circular argument that proves {lean}`False` under the assumption {lean}`Bad`. {lean}`Bad.bad` is rejected because the constructor's parameter has type {lean}`Bad → Bad`, which is a function type in which {lean}`Bad` occurs as an argument type. ::: :::keepEnv This declaration of a fixed point operator is rejected, because `Fix` occurs as an argument to `f`: ```lean (name := Fix) +error inductive Fix (f : Type u → Type u) where | fix : f (Fix f) → Fix f ``` ```leanOutput Fix (kernel) arg #2 of 'Fix.fix' contains a non valid occurrence of the datatypes being declared ``` ::: `Fix.fix` is rejected because `f` is not a type constructor of an inductive type, but `Fix` itself occurs as an argument to it. In this case, `Fix` is also sufficient to construct a type equivalent to `Bad`: ```lean -show axiom Fix : (Type → Type) → Type ``` ```lean def Bad : Type := Fix fun t => t → t ``` :::: ::::: ## Prop vs Type %%% tag := "prop-vs-type" %%% Lean rejects universe-polymorphic types that could not, in practice, be used polymorphically. This could arise if certain instantiations of the universe parameters would cause the type itself to be a {lean}`Prop`. If this type is not a {tech}[subsingleton], then its recursor can only target propositions (that is, the {tech}[motive] must return a {lean}`Prop`). These types only really make sense as {lean}`Prop`s themselves, so the universe polymorphism is probably a mistake. Because they are largely useless, Lean's inductive type elaborator has not been designed to support these types. When such universe-polymorphic inductive types are indeed subsingletons, it can make sense to define them. Lean's standard library defines {name}`PUnit` and {name}`PEmpty`. To define a subsingleton that can inhabit {lean}`Prop` or a {lean}`Type`, set the option {option}`bootstrap.inductiveCheckResultingUniverse` to {lean}`false`. {optionDocs bootstrap.inductiveCheckResultingUniverse} ::::keepEnv :::example "Overly-universe-polymorphic {lean}`Bool`" Defining a version of {lean}`Bool` that can be in any universe is not allowed: ```lean +error (name := PBool) inductive PBool : Sort u where | true | false ``` ```leanOutput PBool Invalid universe polymorphic resulting type: The resulting universe is not `Prop`, but it may be `Prop` for some parameter values: Sort u Hint: A possible solution is to use levels of the form `max 1 _` or `_ + 1` to ensure the universe is of the form `Type _` ``` ::: :::: # Constructions for Termination Checking %%% tag := "recursor-elaboration-helpers" %%% In addition to the type constructor, constructors, and recursors that Lean's core type theory prescribes for inductive types, Lean constructs a number of useful helpers. First, the equation compiler (which translates recursive functions with pattern matching in to applications of recursors) makes use of these additional constructs: * `recOn` is a version of the recursor in which the major premise is prior to the minor premise for each constructor. * `casesOn` is a version of the recursor in which the major premise is prior to the minor premise for each constructor, and recursive arguments do not yield induction hypotheses. It expresses case analysis rather than primitive recursion. * `below` computes a type that, for some motive, expresses that _all_ inhabitants of the inductive type that are subtrees of the major premise satisfy the motive. It transforms a motive for induction or primitive recursion into a motive for strong recursion or strong induction. * `brecOn` is a version of the recursor in which `below` is used to provide access to all subtrees, rather than just immediate recursive parameters. It represents strong induction. * `noConfusion` is a general statement from which injectivity and disjointness of constructors can be derived. * `noConfusionType` is the motive used for `noConfusion` that determines what the consequences of two constructors being equal would be. For separate constructors, this is {lean}`False`; if both constructors are the same, then the consequence is the equality of their respective parameters. These constructions follow the description in {citet constructionsOnConstructors}[]. For {tech}[well-founded recursion], it is frequently useful to have a generic notion of size available. This is captured in the {name}`SizeOf` class. {docstring SizeOf}
reference-manual/Manual/Language/InductiveTypes/Structures.lean
import VersoManual import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Lean.Parser.Command («inductive» «structure» declValEqns computedField) set_option guard_msgs.diff true #doc (Manual) "Structure Declarations" => %%% tag := "structures" %%% :::syntax command (title := "Structure Declarations") ```grammar $_:declModifiers structure $d:declId $_:bracketedBinder* $[: $_]? $[extends $[$[$_ : ]?$_],*]? where $[$_:declModifiers $_ ::]? $_ $[deriving $[$_],*]? ``` Declares a new structure type. ::: {deftech}[Structures] are inductive types that have only a single constructor and no indices. In exchange for these restrictions, Lean generates code for structures that offers a number of conveniences: projection functions are generated for each field, an additional constructor syntax based on field names rather than positional arguments is available, a similar syntax may be used to replace the values of certain named fields, and structures may extend other structures. Just like other inductive types, structures may be recursive; they are subject to the same restrictions regarding strict positivity. Structures do not add any expressive power to Lean; all of their features are implemented in terms of code generation. ```lean -show -- Test claim about recursive above /-- error: (kernel) arg #1 of 'RecStruct.mk' has a non positive occurrence of the datatypes being declared -/ #check_msgs in structure RecStruct where next : RecStruct → RecStruct ``` # Structure Parameters %%% tag := "structure-params" %%% Just like ordinary inductive type declarations, the header of the structure declaration contains a signature that may specify both parameters and a resulting universe. Structures may not define {tech}[indexed families]. # Fields %%% tag := "structure-fields" %%% Each field of a structure declaration corresponds to a parameter of the constructor. Auto-implicit arguments are inserted in each field separately, even if their names coincide, and the fields become constructor parameters that quantify over types. :::: example "Auto-implicit parameters in structure fields" The structure {lean}`MyStructure` contains a field whose type is an auto-implicit parameter: ```lean structure MyStructure where field1 : α field2 : α ``` The type constructor {name}`MyStructure` takes two universe parameters: ```signature MyStructure.{u, v} : Type (max u v) ``` The resulting type is in `Type` rather than `Sort` because the constructor fields quantify over types in `Sort`. In particular, both fields in its constructor {name}`MyStructure.mk` take an implicit type parameter: ```signature MyStructure.mk.{u, v} (field1 : {α : Sort u} → α) (field2 : {α : Sort v} → α) : MyStructure.{u,v} ``` :::: For each field, a {deftech}[projection function] is generated that extracts the field's value from the underlying type's constructor. This function is in the structure's name's namespace. Structure field projections are handled specially by the elaborator (as described in the {ref "structure-inheritance"}[section on structure inheritance]), which performs extra steps beyond looking up a namespace. When field types depend on prior fields, the types of the dependent projection functions are written in terms of earlier projections, rather than explicit pattern matching. :::: example "Dependent projection types" The structure {lean}`ArraySized` contains a field whose type depends on both a structure parameter and an earlier field: ```lean structure ArraySized (α : Type u) (length : Nat) where array : Array α size_eq_length : array.size = length ``` The signature of the projection function {name ArraySized.size_eq_length}`size_eq_length` takes the structure type's parameter as an implicit parameter and refers to the earlier field using the corresponding projection: ```signature ArraySized.size_eq_length.{u} {α : Type u} {length : Nat} (self : ArraySized α length) : self.array.size = length ``` :::: Structure fields may have default values, specified with `:=`. These values are used if no explicit value is provided. ::: example "Default values" An adjacency list representation of a graph can be represented as an array of lists of {lean}`Nat`. The size of the array indicates the number of vertices, and the outgoing edges from each vertex are stored in the array at the vertex's index. Because the default value {lean}`#[]` is provided for the field {name Graph.adjacency}`adjacency`, the empty graph {lean}`Graph.empty` can be constructed without providing any field values. ```lean structure Graph where adjacency : Array (List Nat) := #[] def Graph.empty : Graph := {} ``` ::: Structure fields may additionally be accessed via their index, using dot notation. Fields are numbered beginning with `1`. # Structure Constructors %%% tag := "structure-constructors" %%% Structure constructors may be explicitly named by providing the constructor name and `::` prior to the fields. If no name is explicitly provided, then the constructor is named `mk` in the structure type's namespace. {ref "declaration-modifiers"}[Declaration modifiers] may additionally be provided along with an explicit constructor name. ::: example "Non-default constructor name" The structure {lean}`Palindrome` contains a string and a proof that the string is the same when reversed: ```lean structure Palindrome where ofString :: text : String is_palindrome : text.data.reverse = text.data ``` Its constructor is named {name}`Palindrome.ofString`, rather than `Palindrome.mk`. ::: ::: example "Modifiers on structure constructor" ```imports -show import Std ``` The structure {lean}`NatStringBimap` maintains a finite bijection between natural numbers and strings. It consists of a pair of maps, such that the keys each occur as values exactly once in the other map. Because the constructor is private, code outside the defining module can't construct new instances and must use the provided API, which maintains the invariants of the type. Additionally, providing the default constructor name explicitly is an opportunity to attach a {tech}[documentation comment] to the constructor. ```lean structure NatStringBimap where /-- Build a finite bijection between some natural numbers and strings -/ private mk :: natToString : Std.HashMap Nat String stringToNat : Std.HashMap String Nat def NatStringBimap.empty : NatStringBimap := ⟨{}, {}⟩ def NatStringBimap.insert (nat : Nat) (string : String) (map : NatStringBimap) : Option NatStringBimap := if map.natToString.contains nat || map.stringToNat.contains string then none else some <| NatStringBimap.mk (map.natToString.insert nat string) (map.stringToNat.insert string nat) ``` ::: Because structures are represented by single-constructor inductive types, their constructors can be invoked or matched against using {tech}[anonymous constructor syntax]. Additionally, structures may be constructed or matched against using {deftech}_structure instance_ notation, which includes the names of the fields together with values for them. ::::syntax term (title := "Structure Instances") ```grammar { $_,* $[: $ty:term]? } ``` Constructs a value of a constructor type given values for named fields. Field specifiers may take two forms: ```grammar (of := Lean.Parser.Term.structInstField) $x := $[private]? $y ``` ```grammar (of := Lean.Parser.Term.structInstField) $f:ident ``` A {syntaxKind}`structInstLVal` is a field name (an identifier), a field index (a natural number), or a term in square brackets, followed by a sequence of zero or more subfields. Subfields are either a field name or index preceded by a dot, or a term in square brackets. This syntax is elaborated to applications of structure constructors. The values provided for fields are by name, and they may be provided in any order. The values provided for subfields are used to initialize fields of constructors of structures that are themselves found in fields. Terms in square brackets are not allowed when constructing a structure; they are used in structure updates. Field specifiers that do not contain `:=` are field abbreviations. In this context, the identifier `f` is an abbreviation for `f := f`; that is, the value of `f` in the current scope is used to initialize the field `f`. Every field that does not have a default value must be provided. If a tactic is specified as the default argument, then it is run at elaboration time to construct the argument's value. In a pattern context, field names are mapped to patterns that match the corresponding projection, and field abbreviations bind a pattern variable that is the field's name. Default arguments are still present in patterns; if a pattern does not specify a value for a field with a default value, then the pattern only matches the default. When a field definition contains the {keywordOf Lean.Parser.Term.stuctInstField}`private` modifier, the value is placed in the current module's {tech}[private scope], even if the structure value is itself in the public scope. The value is wrapped in a public but non-exposed helper definition. This is particularly useful with instances of type classes, because the implementation of {tech}[methods] in public {tech}[instances] of type classes are {tech}[exposed] by default. This modifier allows them to be made private. The optional type annotation allows the structure type to be specified in contexts where it is not otherwise determined. :::: ::::example "Patterns and default values" The structure {name}`AugmentedIntList` contains a list together with some extra information, which is empty if omitted: ```lean structure AugmentedIntList where list : List Int augmentation : String := "" ``` When testing whether the list is empty, the function {name AugmentedIntList.isEmpty}`isEmpty` is also testing whether the {name AugmentedIntList.augmentation}`augmentation` field is empty, because the omitted field's default value is also used in pattern contexts: ```lean (name := isEmptyDefaults) def AugmentedIntList.isEmpty : AugmentedIntList → Bool | {list := []} => true | _ => false #eval {list := [], augmentation := "extra" : AugmentedIntList}.isEmpty ``` ```leanOutput isEmptyDefaults false ``` :::: ::::example "Private Field Values" :::leanModules Even when a definition of a structure is {tech}[exposed], individual fields may be hidden using the field-level {keywordOf Lean.Parser.Term.stuctInstField}`private` modifier. In this module, the exposed public definition of {name}`x` may use the private definition {name}`secret` because the {name}`imaginary` field's value is not exposed: ```leanModule (moduleName := Main) module public structure Complex where real : Float imaginary : Float private def secret := 2.3 @[expose] public def x : Complex := { real := 5.0 imaginary := private 2 * secret } ``` ::: :::: ::::example "Private Methods" :::leanModules +error In this module, the existence of the {name}`State` structure is public, but its constructor and field are private. The function {name}`State.toString` is also private, and is intended to be accessed via the {name}`ToString` instance. However, because the implementations of {tech}[methods] are exposed for public instances, this is not allowed: ```leanModule (moduleName := Main) (name := tooExposed) module public structure State where private mk :: private count : Nat private def State.toString (s : State) : String := s!"⟨{s.count}⟩" public instance : ToString State where toString s := s.toString ``` ```leanOutput tooExposed Invalid field `toString`: The environment does not contain `State.toString`, so it is not possible to project the field `toString` from an expression s of type `State` Note: A private declaration `State.toString` (from the current module) exists but would need to be public to access here. ``` ::: :::leanModules Marking the implementation of {name}`toString` as {keyword}`private` removes it from the module's {tech}[public scope], giving it access to private functions: ```leanModule (moduleName := Main) (name := tooExposed) module public structure State where private mk :: private count : Nat private def State.toString (s : State) : String := s!"⟨{s.count}⟩" public instance : ToString State where toString s := private s.toString ``` ::: :::: :::syntax term (title := "Structure Updates") ```grammar {$e:term with $_,* $[: $ty:term]?} ``` Updates a value of a constructor type. The term that precedes the {keywordOf Lean.Parser.Term.structInst}`with` clause is expected to have a structure type; it is the value that is being updated. A new instance of the structure is created in which every field not specified is copied from the value that is being updated, and the specified fields are replaced with their new values. When updating a structure, array values may also be replaced by including the index to be updated in square brackets. This updating does not require that the index expression be in bounds for the array, and out-of-bounds updates are discarded. ::: ::::example "Updating arrays" :::keepEnv Updating structures may use array indices as well as projection names. Updates at indices that are out of bounds are ignored: ```lean (name := arrayUpdate) structure AugmentedIntArray where array : Array Int augmentation : String := "" deriving Repr def one : AugmentedIntArray := {array := #[1]} def two : AugmentedIntArray := {one with array := #[1, 2]} def two' : AugmentedIntArray := {two with array[0] := 2} def two'' : AugmentedIntArray := {two with array[99] := 3} #eval (one, two, two', two'') ``` ```leanOutput arrayUpdate ({ array := #[1], augmentation := "" }, { array := #[1, 2], augmentation := "" }, { array := #[2, 2], augmentation := "" }, { array := #[1, 2], augmentation := "" }) ``` ::: :::: Values of structure types may also be declared using {keywordOf Lean.Parser.Command.declaration (parser:=declValEqns)}`where`, followed by definitions for each field. This may only be used as part of a definition, not in an expression context. ::::example "`where` for structures" :::keepEnv The product type in Lean is a structure named {name}`Prod`. Products can be defined using their projections: ```lean def location : Float × Float where fst := 22.807 snd := -13.923 ``` ::: :::: # Structure Inheritance %%% tag := "structure-inheritance" %%% Structures may be declared as extending other structures using the optional {keywordOf Lean.Parser.Command.declaration (parser:=«structure»)}`extends` clause. The resulting structure type has all of the fields of all of the parent structure types. If the parent structure types have overlapping field names, then all overlapping field names must have the same type. The resulting structure has a {deftech}_field resolution order_ that affects the values of fields. When possible, this resolution order is the [C3 linearization](https://en.wikipedia.org/wiki/C3_linearization) of the structure's parents. Essentially, the field resolution order should be a total ordering of the entire set of parents such that every {keywordOf Lean.Parser.Command.declaration (parser:=«structure»)}`extends` list is in order. When there is no C3 linearization, a heuristic is used to find an order nonetheless. Every structure type is first in its own field resolution order. The field resolution order is used to compute the default values of optional fields. When the value of a field is not specified, the first default value defined in the resolution order is used. References to fields in the default value use the field resolution order as well; this means that child structures that override default fields of parent constructors may also change the computed default values of parent fields. Because the child structure is the first element of its own resolution order, default values in the child structure take precedence over default values from the parent structures. ```lean -show -keep -- If the overlapping fields have different default values, then the default value from the first -- parent structure in the resolution order that includes the field is used. structure Q where x : Nat := 0 deriving Repr structure Q' where x : Nat := 3 deriving Repr structure Q'' extends Q, Q' deriving Repr structure Q''' extends Q', Q deriving Repr /-- info: structure Q'' : Type number of parameters: 0 parents: Q''.toQ : Q Q''.toQ' : Q' fields: Q.x : Nat := 0 constructor: Q''.mk (toQ : Q) : Q'' field notation resolution order: Q'', Q, Q' -/ #check_msgs in #print Q'' /-- info: 0 -/ #check_msgs in #eval ({} : Q'').x /-- info: structure Q''' : Type number of parameters: 0 parents: Q'''.toQ' : Q' Q'''.toQ : Q fields: Q'.x : Nat := 3 constructor: Q'''.mk (toQ' : Q') : Q''' field notation resolution order: Q''', Q', Q -/ #check_msgs in #print Q''' /-- info: 3 -/ #check_msgs in #eval ({} : Q''').x -- Defaults use local values structure A where n : Nat := 0 deriving Repr structure B extends A where k : Nat := n deriving Repr structure C extends A where n := 5 deriving Repr structure C' extends A where n := 3 deriving Repr structure D extends B, C, C' deriving Repr structure D' extends B, C', C deriving Repr #eval ({} : D).k #eval ({} : D').k ``` When the new structure extends existing structures, the new structure's constructor takes the existing structure's information as additional arguments. Typically, this is in the form of a constructor parameter for each parent structure type. This parent value contains all of the parent's fields. If the parents' fields overlap, however, then the subset of non-overlapping fields from one or more of the parents is included instead of an entire value of the parent structure to prevent duplicating field information. There is no subtyping relation between a parent structure type and its children. Even if structure `B` extends structure `A`, a function expecting an `A` will not accept a `B`. However, conversion functions are generated that convert a structure into each of its parents. These conversion functions are called {deftech}_parent projections_. Parent projections are in the child structure's namespace, and their name is the parent structure's name preceded by `to`. ::: example "Structure type inheritance with overlapping fields" In this example, a {lean}`Textbook` is a {lean}`Book` that is also an {lean}`AcademicWork`: ```lean structure Book where title : String author : String structure AcademicWork where author : String discipline : String structure Textbook extends Book, AcademicWork #check Textbook.toBook ``` Because the field `author` occurs in both {lean}`Book` and {lean}`AcademicWork`, the constructor {name}`Textbook.mk` does not take both parents as arguments. Its signature is: ```signature Textbook.mk (toBook : Book) (discipline : String) : Textbook ``` The conversion functions are: ```signature Textbook.toBook (self : Textbook) : Book ``` ```signature Textbook.toAcademicWork (self : Textbook) : AcademicWork ``` The latter combines the `author` field of the included {lean}`Book` with the unbundled `Discipline` field, and is equivalent to: ```lean def toAcademicWork (self : Textbook) : AcademicWork := let .mk book discipline := self let .mk _title author := book .mk author discipline ``` ```lean -show -- check claim of equivalence example : toAcademicWork = Textbook.toAcademicWork := by funext b cases b dsimp [toAcademicWork] ``` ::: The resulting structure's projections can be used as if its fields are simply the union of the parents' fields. The Lean elaborator automatically generates an appropriate projection when fields are used. Likewise, the field-based initialization and structure update notations hide the details of the encoding of inheritance. The encoding is, however, visible when using the constructor's name, when using {tech}[anonymous constructor syntax], or when referring to fields by their index rather than their name. :::: example "Field Indices and Structure Inheritance" ```lean structure Pair (α : Type u) where fst : α snd : α deriving Repr structure Triple (α : Type u) extends Pair α where thd : α deriving Repr def coords : Triple Nat := {fst := 17, snd := 2, thd := 95} ``` Evaluating the first field index of {name}`coords` yields the underlying {name}`Pair`, rather than the contents of the field `fst`: ```lean (name := coords1) #eval coords.1 ``` ```leanOutput coords1 { fst := 17, snd := 2 } ``` The elaborator translates {lean}`coords.fst` into {lean}`coords.toPair.fst`. ```lean -show -keep example (t : Triple α) : t.fst = t.toPair.fst := rfl ``` :::: :::: example "No structure subtyping" :::keepEnv Given these definitions of even numbers, even prime numbers, and a concrete even prime: ```lean structure EvenNumber where val : Nat isEven : 2 ∣ val := by decide structure EvenPrime extends EvenNumber where notOne : val ≠ 1 := by decide isPrime : ∀ n, n ≤ val → n ∣ val → n = 1 ∨ n = val def two : EvenPrime where val := 2 isPrime := by intros repeat' (cases ‹Nat.le _ _›) all_goals omega def printEven (num : EvenNumber) : IO Unit := IO.print num.val ``` it is a type error to apply {name}`printEven` directly to {name}`two`: ```lean +error (name := printTwo) #check printEven two ``` ```leanOutput printTwo Application type mismatch: The argument two has type EvenPrime but is expected to have type EvenNumber in the application printEven two ``` because values of type {name}`EvenPrime` are not also values of type {name}`EvenNumber`. ::: :::: ```lean -show -keep structure A where x : Nat y : Int structure A' where x : Int structure B where foo : Nat structure C extends A where z : String /-- info: C.mk (toA : A) (z : String) : C -/ #check_msgs in #check C.mk def someC : C where x := 1 y := 2 z := "" /-- error: Type mismatch someC has type C but is expected to have type A -/ #check_msgs in #check (someC : A) structure D extends A, B where z : String /-- info: D.mk (toA : A) (toB : B) (z : String) : D -/ #check_msgs in #check D.mk structure E extends A, B where x := 44 z : String /-- info: E.mk (toA : A) (toB : B) (z : String) : E -/ #check_msgs in #check E.mk /-- error: Field type mismatch: Field `x` from parent `A'` has type Int but is expected to have type Nat -/ #check_msgs in structure F extends A, A' where ``` The {keywordOf Lean.Parser.Command.print}`#print` command displays the most important information about structure types, including the {tech}[parent projections], all the fields with their default values, the constructor, and the {tech}[field resolution order]. When working with deep hierarchies that contain inheritance diamonds, this information can be very useful. ::: example "{keyword}`#print` and Structure Types" This collection of structure types models a variety of bicycles, both electric and non-electric and both ordinary-sized and large family bicycles. The final structure type, {lean}`ElectricFamilyBike`, contains a diamond in its inheritance graph, because both {lean}`FamilyBike` and {lean}`ElectricBike` extend {lean}`Bicycle`. ```lean structure Vehicle where wheels : Nat structure Bicycle extends Vehicle where wheels := 2 structure ElectricVehicle extends Vehicle where batteries : Nat := 1 structure FamilyBike extends Bicycle where wheels := 3 structure ElectricBike extends Bicycle, ElectricVehicle structure ElectricFamilyBike extends FamilyBike, ElectricBike where batteries := 2 ``` The {keywordOf Lean.Parser.Command.print}`#print` command displays the important information about each structure type: ```lean (name := el) #print ElectricBike ``` ```leanOutput el structure ElectricBike : Type number of parameters: 0 parents: ElectricBike.toBicycle : Bicycle ElectricBike.toElectricVehicle : ElectricVehicle fields: Vehicle.wheels : Nat := 2 ElectricVehicle.batteries : Nat := 1 constructor: ElectricBike.mk (toBicycle : Bicycle) (batteries : Nat) : ElectricBike field notation resolution order: ElectricBike, Bicycle, ElectricVehicle, Vehicle ``` An {lean}`ElectricFamilyBike` has three wheels by default because {lean}`FamilyBike` precedes {lean}`Bicycle` in its resolution order: ```lean (name := elFam) #print ElectricFamilyBike ``` ```leanOutput elFam structure ElectricFamilyBike : Type number of parameters: 0 parents: ElectricFamilyBike.toFamilyBike : FamilyBike ElectricFamilyBike.toElectricBike : ElectricBike fields: Vehicle.wheels : Nat := 3 ElectricVehicle.batteries : Nat := 2 constructor: ElectricFamilyBike.mk (toFamilyBike : FamilyBike) (batteries : Nat) : ElectricFamilyBike field notation resolution order: ElectricFamilyBike, FamilyBike, ElectricBike, Bicycle, ElectricVehicle, Vehicle ``` :::
reference-manual/Manual/ErrorExplanations/InferDefTypeFailed.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `inferDefTypeFailed`" => %%% shortTitle := "inferDefTypeFailed" %%% {errorExplanationHeader lean.inferDefTypeFailed} This error occurs when the type of a definition is not fully specified and Lean is unable to infer its type from the available information. If the definition has parameters, this error refers only to the resulting type after the colon (the error {ref "lean.inferBinderTypeFailed" (domain := Manual.errorExplanation)}[`lean.inferBinderTypeFailed`] indicates that a parameter type could not be inferred). To resolve this error, provide additional type information in the definition. This can be done straightforwardly by providing an explicit resulting type after the colon in the definition header. Alternatively, if an explicit resulting type is not provided, adding further type information to the definition's body—such as by specifying implicit type arguments or giving explicit types to `let` binders—may allow Lean to infer the type of the definition. Look for type inference or implicit argument synthesis errors that arise alongside this one to identify ambiguities that may be contributing to this error. Note that when an explicit resulting type is provided—even if that type contains holes—Lean will not use information from the definition body to help infer the type of the definition or its parameters. Thus, adding an explicit resulting type may also necessitate adding type annotations to parameters whose types were previously inferable. Additionally, it is always necessary to provide an explicit type in a `theorem` declaration: the `theorem` syntax requires a type annotation, and the elaborator will never attempt to use the theorem body to infer the proposition being proved. # Examples :::errorExample "Implicit Argument Cannot be Inferred" ```broken def emptyNats := [] ``` ```output Failed to infer type of definition `emptyNats` ``` ```fixed "type annotation" def emptyNats : List Nat := [] ``` ```fixed "implicit argument" def emptyNats := List.nil (α := Nat) ``` Here, Lean is unable to infer the value of the parameter `α` of the `List` type constructor, which in turn prevents it from inferring the type of the definition. Two fixes are possible: specifying the expected type of the definition allows Lean to infer the appropriate implicit argument to the `List.nil` constructor; alternatively, making this implicit argument explicit in the function body provides sufficient information for Lean to infer the definition's type. ::: :::errorExample "Definition Type Uninferrable Due to Unknown Parameter Type" ```broken def identity x := x ``` ```output Failed to infer type of definition `identity` ``` ```fixed def identity (x : α) := x ``` In this example, the type of `identity` is determined by the type of `x`, which cannot be inferred. Both the indicated error and {ref "lean.inferBinderTypeFailed" (domain := Manual.errorExplanation)}[`lean.inferBinderTypeFailed`] therefore appear (see that explanation for additional discussion of this example). Resolving the latter—by explicitly specifying the type of `x`—provides Lean with sufficient information to infer the definition type. :::
reference-manual/Manual/ErrorExplanations/DependsOnNoncomputable.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `dependsOnNoncomputable`" => %%% shortTitle := "dependsOnNoncomputable" %%% {errorExplanationHeader lean.dependsOnNoncomputable} This error indicates that the specified definition depends on one or more definitions that do not contain executable code and is therefore required to be marked as {keyword}`noncomputable`. Such definitions can be type-checked but do not contain code that can be executed by Lean. If you intended for the definition named in the error message to be noncomputable, marking it as {keyword}`noncomputable` will resolve this error. If you did not, inspect the noncomputable definitions on which it depends: they may be noncomputable because they failed to compile, are {keyword}`axiom`s, or were themselves marked as {keyword}`noncomputable`. Making all of your definition's noncomputable dependencies computable will also resolve this error. See the manual section on {ref "declaration-modifiers"}[Modifiers] for more information about noncomputable definitions. # Examples :::errorExample "Necessarily Noncomputable Function Not Appropriately Marked" ```broken axiom transform : Nat → Nat def transformIfZero : Nat → Nat | 0 => transform 0 | n => n ``` ```output `transform` not supported by code generator; consider marking definition as `noncomputable` ``` ```fixed axiom transform : Nat → Nat noncomputable def transformIfZero : Nat → Nat | 0 => transform 0 | n => n ``` In this example, `transformIfZero` depends on the axiom `transform`. Because `transform` is an axiom, it does not contain any executable code; although the value `transform 0` has type `Nat`, there is no way to compute its value. Thus, `transformIfZero` must be marked `noncomputable` because its execution would depend on this axiom. ::: :::errorExample "Noncomputable Dependency Can Be Made Computable" ```broken noncomputable def getOrDefault [Nonempty α] : Option α → α | some x => x | none => Classical.ofNonempty def endsOrDefault (ns : List Nat) : Nat × Nat := let head := getOrDefault ns.head? let tail := getOrDefault ns.getLast? (head, tail) ``` ```output failed to compile definition, consider marking it as 'noncomputable' because it depends on 'getOrDefault', which is 'noncomputable' ``` ```fixed def getOrDefault [Inhabited α] : Option α → α | some x => x | none => default def endsOrDefault (ns : List Nat) : Nat × Nat := let head := getOrDefault ns.head? let tail := getOrDefault ns.getLast? (head, tail) ``` The original definition of `getOrDefault` is noncomputable due to its use of `Classical.choice`. Unlike in the preceding example, however, it is possible to implement a similar but computable version of `getOrDefault` (using the `Inhabited` type class), allowing `endsOrDefault` to be computable. (The differences between `Inhabited` and `Nonempty` are described in the documentation of inhabited types in the manual section on {ref "basic-classes"}[Basic Classes].) ::: :::errorExample "Noncomputable Instance in Namespace" ```broken open Classical in /-- Returns `y` if it is in the image of `f`, or an element of the image of `f` otherwise. -/ def fromImage (f : Nat → Nat) (y : Nat) := if ∃ x, f x = y then y else f 0 ``` ```output failed to compile definition, consider marking it as 'noncomputable' because it depends on 'propDecidable', which is 'noncomputable' ``` ```fixed open Classical in /-- Returns `y` if it is in the image of `f`, or an element of the image of `f` otherwise. -/ noncomputable def fromImage (f : Nat → Nat) (y : Nat) := if ∃ x, f x = y then y else f 0 ``` The `Classical` namespace contains `Decidable` instances that are not computable. These are a common source of noncomputable dependencies that do not explicitly appear in the source code of a definition. In the above example, for instance, a `Decidable` instance for the proposition `∃ x, f x = y` is synthesized using a `Classical` decidability instance; therefore, `fromImage` must be marked `noncomputable`. :::
reference-manual/Manual/ErrorExplanations/RedundantMatchAlt.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `redundantMatchAlt`" => %%% shortTitle := "redundantMatchAlt" %%% {errorExplanationHeader lean.redundantMatchAlt} This error occurs when an alternative in a pattern match can never be reached: any values that would match the provided patterns would also match some preceding alternative. Refer to the {ref "pattern-matching"}[Pattern Matching] manual section for additional details about pattern matching. This error may appear in any pattern matching expression, including {keywordOf Lean.Parser.Term.match}`match` expressions, equational function definitions, `if let` bindings, and monadic {keywordOf Lean.Parser.Term.let}`let` bindings with fallback clauses. In pattern-matches with multiple arms, this error may occur if a less-specific pattern precedes a more-specific one that it subsumes. Bear in mind that expressions are matched against patterns from top to bottom, so specific patterns should precede generic ones. In {keywordOf termIfLet}`if let` bindings and monadic {keywordOf Lean.Parser.Term.let}`let` bindings with fallback clauses, in which only one pattern is specified, this error indicates that the specified pattern will always be matched. In this case, the binding in question can be replaced with a standard pattern-matching {keywordOf Lean.Parser.Term.let}`let`. One common cause of this error is that a pattern that was intended to match a constructor was instead interpreted as a variable binding. This occurs, for instance, if a constructor name (e.g., `cons`) is written without its prefix ({name}`List`) outside of that type's namespace. The constructor-name-as-variable linter, enabled by default, will display a warning on any variable patterns that resemble constructor names. This error nearly always indicates an issue with the code where it appears. If needed, however, `set_option match.ignoreUnusedAlts true` will disable the check for this error and allow pattern matches with redundant alternatives to be compiled by discarding the unused arms. # Examples :::errorExample "Incorrect Ordering of Pattern Matches" ```broken def seconds : List (List α) → List α | [] => [] | _ :: xss => seconds xss | (_ :: x :: _) :: xss => x :: seconds xss ``` ```output Redundant alternative: Any expression matching (head✝ :: x :: tail✝) :: xss will match one of the preceding alternatives ``` ```fixed def seconds : List (List α) → List α | [] => [] | (_ :: x :: _) :: xss => x :: seconds xss | _ :: xss => seconds xss ``` Since any expression matching `(_ :: x :: _) :: xss` will also match `_ :: xss`, the last alternative in the broken implementation is never reached. We resolve this by moving the more specific alternative before the more general one. ::: :::errorExample "Unnecessary Fallback Clause" ```broken example (p : Nat × Nat) : IO Nat := do let (m, n) := p | return 0 return m + n ``` ```output Redundant alternative: Any expression matching x✝ will match one of the preceding alternatives ``` ```fixed example (p : Nat × Nat) : IO Nat := do let (m, n) := p return m + n ``` Here, the fallback clause serves as a catch-all for all values of `p` that do not match `(m, n)`. However, no such values exist, so the fallback clause is unnecessary and can be removed. A similar error arises when using `if let pat := e` when `e` will always match `pat`. ::: :::errorExample "Pattern Treated as Variable, Not Constructor" ```broken example (xs : List Nat) : Bool := match xs with | nil => false | _ => true ``` ```output Redundant alternative: Any expression matching x✝ will match one of the preceding alternatives ``` ```fixed example (xs : List Nat) : Bool := match xs with | .nil => false | _ => true ``` In the original example, `nil` is treated as a variable, not as a constructor name, since this definition is not within the {name}`List` namespace. Thus, all values of `xs` will match the first pattern, rendering the second unused. Notice that the constructor-name-as-variable linter displays a warning at `nil`, indicating its similarity to a valid constructor name. Using dot-prefix notation, as shown in the fixed example, or specifying the full constructor name {name}`List.nil` achieves the intended behavior. :::
reference-manual/Manual/ErrorExplanations/InductionWithNoAlts.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `inductionWithNoAlts`" => %%% shortTitle := "inductionWithNoAlts" %%% {errorExplanationHeader lean.inductionWithNoAlts} Tactic-based proofs using induction in Lean need to use a pattern-matching-like notation to describe individual cases of the proof. However, the `induction'` tactic in Mathlib and the specialized `induction` tactic for natural numbers used in the Natural Number Game follows a different pattern. # Examples :::errorExample "Adding Explicit Cases to an Induction Proof" ```broken theorem zero_mul (m : Nat) : 0 * m = 0 := by induction m with n n_ih rw [Nat.mul_zero] rw [Nat.mul_succ] rw [Nat.add_zero] rw [n_ih] ``` ```output Invalid syntax for induction tactic: The `with` keyword must be followed by a tactic or by an alternative (e.g. `| zero =>`), but here it is followed by the identifier `n`. ``` ```fixed theorem zero_mul (m : Nat) : 0 * m = 0 := by induction m with | zero => rw [Nat.mul_zero] | succ n n_ih => rw [Nat.mul_succ] rw [Nat.add_zero] rw [n_ih] ``` The broken example has the structure of a correct proof in the Natural Numbers Game, and this proof will work if you `import Mathlib` and replace `induction` with `induction'`. Induction tactics in basic Lean expect the {keyword}`with` keyword to be followed by a series of cases, and the names for the inductive case are provided in the {name Nat.succ}`succ` case rather than being provided up-front. :::
reference-manual/Manual/ErrorExplanations/InvalidField.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `invalidField`" => %%% shortTitle := "invalidField" %%% {errorExplanationHeader lean.invalidField} This error indicates that an expression containing a dot followed by an identifier was encountered, and that it wasn't possible to understand the identifier as a field. Lean's field notation is very powerful, but this can also make it confusing: the expression `color.value` can either be a single {ref "identifiers-and-resolution"}[identifier]. it can be a reference to the {ref "structure-fields"}[field of a structure], and it and be a calling a function on the value `color` with {ref "generalized-field-notation"}[generalized field notation]. # Examples :::errorExample "Incorrect Field Name" ```broken #eval (4 + 2).suc ``` ```output Invalid field `suc`: The environment does not contain `Nat.suc`, so it is not possible to project the field `suc` from an expression 4 + 2 of type `Nat` ``` ```fixed #eval (4 + 1).succ ``` The simplest reason for an invalid field error is that the function being sought, like `Nat.suc`, does not exist. ::: :::errorExample "Projecting from the Wrong Expression" ```broken #eval '>'.leftpad 10 ['a', 'b', 'c'] ``` ```output Invalid field `leftpad`: The environment does not contain `Char.leftpad`, so it is not possible to project the field `leftpad` from an expression '>' of type `Char` ``` ```fixed #eval ['a', 'b', 'c'].leftpad 10 '>' ``` The type of the expression before the dot entirely determines the function being called by the field projection. There is no `Char.leftpad`, and the only way to invoke `List.leftpad` with generalized field notation is to have the list come before the dot. ::: :::errorExample "Type is Not Specific" ```broken def double_plus_one {α} [Add α] (x : α) := (x + x).succ ``` ```output Invalid field notation: Field projection operates on types of the form `C ...` where C is a constant. The expression x + x has type `α` which does not have the necessary form. ``` ```fixed def double_plus_one (x : Nat) := (x + x).succ ``` The `Add` type class is sufficient for performing the addition `x + x`, but the `.succ` field notation cannot operate without knowing more about the actual type from which `succ` is being projected. ::: :::errorExample "Insufficient Type Information" ```broken example := fun (n) => n.succ.succ ``` ```output Invalid field notation: Type of n is not known; cannot resolve field `succ` Hint: Consider replacing the field projection with a call to one of the following: • `Fin.succ` • `Nat.succ` • `Lean.Level.succ` • `Std.PRange.succ` • `Lean.Level.PP.Result.succ` • `Std.Time.Internal.Bounded.LE.succ` ``` ```fixed example := fun (n : Nat) => n.succ.succ ``` Generalized field notation can only be used when it is possible to determine the type that is being projected. Type annotations may need to be added to make generalized field notation work. :::
reference-manual/Manual/ErrorExplanations/SynthInstanceFailed.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `synthInstanceFailed`" => %%% shortTitle := "synthInstanceFailed" %%% {errorExplanationHeader lean.synthInstanceFailed} ```lean -show variable {t : Type} (x y : Int) ``` {ref "type-classes"}[Type classes] are the mechanism that Lean and many other programming languages use to handle overloaded operations. The code that handles a particular overloaded operation is an {tech}_instance_ of a type class; deciding which instance to use for a given overloaded operation is called _synthesizing_ an instance. As an example, when Lean encounters an expression {lean}`x + y` where {lean}`x` and {lean}`y` both have type {name}`Int`, it is necessary to look up how it should add two integers and also look up what the resulting type will be. This is described as synthesizing an instance of the type class {lean}`HAdd Int Int t` for some type `t`. Many failures to synthesize an instance of a type class are the result of using the wrong binary operation. Both success and failure are not always straightforward, because some instances are defined in terms of other instances, and Lean must recursively search to find appropriate instances. It's possible to {ref "instance-search"}[inspect Lean's instance synthesis], and this can be helpful for diagnosing tricky failures of type class instance synthesis. # Examples :::errorExample "Using the Wrong Binary Operation" ```broken #eval "A" + "3" ``` ```output failed to synthesize instance of type class HAdd String String ?m.4 Hint: Type class instance resolution failures can be inspected with the `set_option trace.Meta.synthInstance true` command. ``` ```fixed #eval "A" ++ "3" ``` The binary operation `+` is associated with the {name}`HAdd` type class, and there's no way to add two strings. The binary operation `++`, associated with the {name}`HAppend` type class, is the correct way to append strings. ::: :::errorExample "Arguments Have the Wrong Type" ```broken def x : Int := 3 #eval x ++ "meters" ``` ```output failed to synthesize instance of type class HAppend Int String ?m.4 Hint: Type class instance resolution failures can be inspected with the `set_option trace.Meta.synthInstance true` command. ``` ```fixed def x : Int := 3 #eval ToString.toString x ++ "meters" ``` Lean does not allow integers and strings to be added directly. The function {name}`ToString.toString` uses type class overloading to convert values to strings; by successfully searching for an instance of {lean}`ToString Int`, the second example will succeed. ::: :::errorExample "Missing Type Class Instance" ```broken inductive MyColor where | chartreuse | sienna | thistle def forceColor (oc : Option MyColor) := oc.get! ``` ```output failed to synthesize instance of type class Inhabited MyColor Hint: Adding the command `deriving instance Inhabited for MyColor` may allow Lean to derive the missing instance. ``` ```fixed "derive instance when defining type" inductive MyColor where | chartreuse | sienna | thistle deriving Inhabited def forceColor (oc : Option MyColor) := oc.get! ``` ```fixed "derive instance separately" inductive MyColor where | chartreuse | sienna | thistle deriving instance Inhabited for MyColor def forceColor (oc : Option MyColor) := oc.get! ``` ```fixed "define instance" inductive MyColor where | chartreuse | sienna | thistle instance : Inhabited MyColor where default := .sienna def forceColor (oc : Option MyColor) := oc.get! ``` Type class synthesis can fail because an instance of the type class simply needs to be provided. This commonly happens for type classes like {name}`Repr`, {name}`BEq`, {name}`ToJson` and {name}`Inhabited`. Lean can often {ref "deriving-instances"}[automatically generate instances of the type class with the `deriving` keyword] either when the type is defined or with the stand-alone {keywordOf Lean.Parser.Command.deriving}`deriving` command. :::
reference-manual/Manual/ErrorExplanations/CtorResultingTypeMismatch.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean Doc open Verso.Genre Manual InlineLean #doc (Manual) "About: `ctorResultingTypeMismatch`" => %%% shortTitle := "ctorResultingTypeMismatch" %%% {errorExplanationHeader lean.ctorResultingTypeMismatch} In an inductive declaration, the resulting type of each constructor must match the type being declared; if it does not, this error is raised. That is, every constructor of an inductive type must return a value of that type. See the {ref "inductive-types"}[Inductive Types] manual section for additional details. Note that it is possible to omit the resulting type for a constructor if the inductive type being defined has no indices. # Examples :::errorExample "Typo in Resulting Type" ```broken inductive Tree (α : Type) where | leaf : Tree α | node : α → Tree α → Treee α ``` ```output Unexpected resulting type for constructor `Tree.node`: Expected an application of Tree but found ?m.2 ``` ```fixed inductive Tree (α : Type) where | leaf : Tree α | node : α → Tree α → Tree α ``` ::: :::errorExample "Missing Resulting Type After Constructor Parameter" ```broken inductive Credential where | pin : Nat | password : String ``` ```output Unexpected resulting type for constructor `Credential.pin`: Expected Credential but found Nat ``` ```fixed "resulting type" inductive Credential where | pin : Nat → Credential | password : String → Credential ``` ```fixed "named parameter" inductive Credential where | pin (num : Nat) | password (str : String) ``` If the type of a constructor is annotated, the full type—including the resulting type—must be provided. Alternatively, constructor parameters can be written using named binders; this allows the omission of the constructor's resulting type because it contains no indices. :::
reference-manual/Manual/ErrorExplanations/InductiveParamMissing.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `inductiveParamMissing`" => %%% shortTitle := "inductiveParamMissing" %%% {errorExplanationHeader lean.inductiveParamMissing} This error occurs when an inductive type constructor is partially applied in the type of one of its constructors such that one or more parameters of the type are omitted. The elaborator requires that all parameters of an inductive type be specified everywhere that type is referenced in its definition, including in the types of its constructors. If it is necessary to allow the type constructor to be partially applied, without specifying a given type parameter, that parameter must be converted to an index. See the manual section on {ref "inductive-types"}[Inductive Types] for further explanation of the difference between indices and parameters. # Examples :::errorExample "Omitting Parameter in Argument to Higher-Order Predicate" ```broken inductive List.All {α : Type u} (P : α → Prop) : List α → Prop | nil : All P [] | cons {x xs} : P x → All P xs → All P (x :: xs) structure RoseTree (α : Type u) where val : α children : List (RoseTree α) inductive RoseTree.All (P : α → Prop) (t : RoseTree α) : Prop | intro : P t.val → List.All (All P) t.children → All P t ``` ```output Missing parameter(s) in occurrence of inductive type: In the expression List.All (All P) t.children found All P but expected all parameters to be specified: All P t Note: All occurrences of an inductive type in the types of its constructors must specify its fixed parameters. Only indices can be omitted in a partial application of the type constructor. ``` ```fixed inductive List.All {α : Type u} (P : α → Prop) : List α → Prop | nil : All P [] | cons {x xs} : P x → All P xs → All P (x :: xs) structure RoseTree (α : Type u) where val : α children : List (RoseTree α) inductive RoseTree.All (P : α → Prop) : RoseTree α → Prop | intro : P t.val → List.All (All P) t.children → All P t ``` Because the `RoseTree.All` type constructor must be partially applied in the argument to `List.All`, the unspecified argument (`t`) must not be a parameter of the `RoseTree.All` predicate. Making it an index to the right of the colon in the header of `RoseTree.All` allows this partial application to succeed. :::
reference-manual/Manual/ErrorExplanations/ProjNonPropFromProp.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `projNonPropFromProp`" => %%% shortTitle := "projNonPropFromProp" %%% {errorExplanationHeader lean.projNonPropFromProp} This error occurs when attempting to project a piece of data from a proof of a proposition using an index projection. For example, if `h` is a proof of an existential proposition, attempting to extract the witness `h.1` is an example of this error. Such projections are disallowed because they may violate Lean's prohibition of large elimination from {lean}`Prop` (refer to the {ref "propositions"}[Propositions] manual section for further details). Instead of an index projection, consider using a pattern-matching {keywordOf Lean.Parser.Term.let}`let`, {keywordOf Lean.Parser.Term.match}`match` expression, or a destructuring tactic like {tactic}`cases` to eliminate from one propositional type to another. Note that such elimination is only valid if the resulting value is also in {lean}`Prop`; if it is not, the error {ref "lean.propRecLargeElim" (domain := Manual.errorExplanation)}[`lean.propRecLargeElim`] will be raised. # Examples :::errorExample "Attempting to Use Index Projection on Existential Proof" ```broken example (a : Nat) (h : ∃ x : Nat, x > a + 1) : ∃ x : Nat, x > 0 := ⟨h.1, Nat.lt_of_succ_lt h.2⟩ ``` ```output Invalid projection: Cannot project a value of non-propositional type Nat from the expression h which has propositional type ∃ x, x > a + 1 ``` ```fixed "let" example (a : Nat) (h : ∃ x : Nat, x > a + 1) : ∃ x : Nat, x > a := let ⟨w, hw⟩ := h ⟨w, Nat.lt_of_succ_lt hw⟩ ``` ```fixed "cases" example (a : Nat) (h : ∃ x : Nat, x > a + 1) : ∃ x : Nat, x > a := by cases h with | intro w hw => exists w omega ``` The witness associated with a proof of an existential proposition cannot be extracted using an index projection. Instead, it is necessary to use a pattern match: either a term like a {keywordOf Lean.Parser.Term.let}`let` binding or a tactic like {tactic}`cases`. :::
reference-manual/Manual/ErrorExplanations/UnknownIdentifier.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `unknownIdentifier`" => %%% shortTitle := "unknownIdentifier" %%% {errorExplanationHeader lean.unknownIdentifier} This error means that Lean was unable to find a variable or constant matching the given name. More precisely, this means that the name could not be *resolved*, as described in the manual section on {ref "identifiers-and-resolution"}[Identifiers]: no interpretation of the input as the name of a local or section variable (if applicable), a previously declared global constant, or a projection of either of the preceding was valid. (“If applicable” refers to the fact that in some cases—e.g., the {keywordOf Lean.Parser.Command.print}`#print` command's argument—names are resolved only to global constants.) Note that this error message will display only one possible resolution of the identifier, but the presence of this error indicates failures for *all* possible names to which it might refer. For example, if the identifier `x` is entered with the namespaces `A` and `B` are open, the error message “Unknown identifier \`x\`” indicates that none of `x`, `A.x`, or `B.x` could be found (or that `A.x` or `B.x`, if either exists, is a protected declaration). Common causes of this error include forgetting to import the module in which a constant is defined, omitting a constant's namespace when that namespace is not open, or attempting to refer to a local variable that is not in scope. To help resolve some of these common issues, this error message is accompanied by a code action that suggests constant names similar to the one provided. These include constants in the environment as well as those that can be imported from other modules. Note that these suggestions are available only through supported code editors' built-in code action mechanisms and not as a hint in the error message itself. # Examples :::errorExample "Variable Not in Scope" ```broken example (s : IO.FS.Stream) := do IO.withStdout s do let text := "Hello" IO.println text IO.println s!"Wrote '{text}' to stream" ``` ```output Unknown identifier `text` ``` ```fixed example (s : IO.FS.Stream) := do let text := "Hello" IO.withStdout s do IO.println text IO.println s!"Wrote '{text}' to stream" ``` An unknown identifier error occurs on the last line of this example because the variable `text` is not in scope. The {keywordOf Lean.Parser.Term.let}`let`-binding on the third line is scoped to the inner {keywordOf Lean.Parser.Term.do}`do` block and cannot be accessed in the outer {keywordOf Lean.Parser.Term.do}`do` block. Moving this binding to the outer {keywordOf Lean.Parser.Term.do}`do` block—from which it remains in scope in the inner block as well—resolves the issue. ::: :::errorExample "Missing Namespace" ```broken inductive Color where | rgb (r g b : Nat) | grayscale (k : Nat) def red : Color := rgb 255 0 0 ``` ```output Unknown identifier `rgb` ``` ```fixed "qualified name" inductive Color where | rgb (r g b : Nat) | grayscale (k : Nat) def red : Color := Color.rgb 255 0 0 ``` ```fixed "open namespace" inductive Color where | rgb (r g b : Nat) | grayscale (k : Nat) open Color in def red : Color := rgb 255 0 0 ``` In this example, the identifier `rgb` on the last line does not resolve to the `Color` constructor of that name. This is because the constructor's name is actually `Color.rgb`: all constructors of an inductive type have names in that type's namespace. Because the `Color` namespace is not open, the identifier `rgb` cannot be used without its namespace prefix. One way to resolve this error is to provide the fully qualified constructor name `Color.rgb`; the dotted-identifier notation `.rgb` can also be used, since the expected type of `.rgb 255 0 0` is `Color`. Alternatively, one can open the `Color` namespace and continue to omit the `Color` prefix from the identifier. ::: :::errorExample "Protected Constant Name Without Namespace Prefix" ```broken protected def A.x := () open A example := x ``` ```output Unknown identifier `x` ``` ```fixed "qualified name" protected def A.x := () open A example := A.x ``` ```fixed "restricted open" protected def A.x := () open A (x) example := x ``` In this example, because the constant `A.x` is {keyword}`protected`, it cannot be referred to by the suffix `x` even with the namespace `A` open. Therefore, the identifier `x` fails to resolve. Instead, to refer to a {keyword}`protected` constant, it is necessary to include at least its innermost namespace—in this case, `A`. Alternatively, the *restricted opening* syntax—demonstrated in the second corrected example—allows a {keyword}`protected` constant to be referred to by its unqualified name, without opening the remainder of the namespace in which it occurs (see the manual section on {ref "namespaces-sections"}[Namespaces and Sections] for details). ::: :::errorExample "Unresolvable Name Inferred by Dotted-Identifier Notation" ```broken def disjoinToNat (b₁ b₂ : Bool) : Nat := .toNat (b₁ || b₂) ``` ```output Unknown constant `Nat.toNat` Note: Inferred this name from the expected resulting type of `.toNat`: Nat ``` ```fixed "generalized field notation" def disjoinToNat (b₁ b₂ : Bool) : Nat := (b₁ || b₂).toNat ``` ```fixed "qualified name" def disjoinToNat (b₁ b₂ : Bool) : Nat := Bool.toNat (b₁ || b₂) ``` In this example, the dotted-identifier notation `.toNat` causes Lean to infer an unresolvable name (`Nat.toNat`). The namespace used by dotted-identifier notation is always inferred from the expected type of the expression in which it occurs, which—due to the type annotation on `disjoinToNat`—is `Nat` in this example. To use the namespace of an argument's type—as the author of this code seemingly intended—use *generalized field notation* as shown in the first corrected example. Alternatively, the correct namespace can be explicitly specified by writing the fully qualified function name. ::: :::errorExample "Auto-bound variables" ```broken set_option relaxedAutoImplicit false in def thisBreaks (x : α₁) (y : size₁) := () set_option autoImplicit false in def thisAlsoBreaks (x : α₂) (y : size₂) := () ``` ```output Unknown identifier `size₁` Note: It is not possible to treat `size₁` as an implicitly bound variable here because it has multiple characters while the `relaxedAutoImplicit` option is set to `false`. ``` ```fixed "modifying options" set_option relaxedAutoImplicit true in def thisWorks (x : α₁) (y : size₁) := () set_option autoImplicit true in def thisAlsoWorks (x : α₂) (y : size₂) := () ``` ```fixed "add implicit bindings for the unknown identifiers" set_option relaxedAutoImplicit false in def thisWorks {size₁} (x : α₁) (y : size₁) := () set_option autoImplicit false in def thisAlsoWorks {α₂ size₂} (x : α₂) (y : size₂) := () ``` Lean's default behavior, when it encounters an identifier it can't identify in the type of a definition, is to add {ref "automatic-implicit-parameters"}[automatic implicit parameters] for those unknown identifiers. However, many files or projects disable this feature by setting the {option}`autoImplicit` or {option}`relaxedAutoImplicit` options to {name}`false`. Without re-enabling the {option}`autoImplicit` or {option}`relaxedAutoImplicit` options, the easiest way to fix this error is to add the unknown identifiers as {ref "implicit-functions"}[ordinary implicit parameters] as shown in the example above. :::
reference-manual/Manual/ErrorExplanations/README.md
# Writing Error Explanations Error explanations give more context and discussion than is reasonable to include in an actual error message. The error message in the Lean InfoView contains a link to the error explanation for further reading. An error explanation is just a manual page obeying a few conventions: - Defined in the module `Manual.ErrorExplanations.{ErrorSuffix}` - Titled ``About: `{errorSuffix}`"`` - Has the `shortTitle` property `"{errorSuffix}"` - Starts with a `{errorExplanationHeader lean.{errorSuffix}}` block command - A description - Contains a single section header, `Examples`, and that section contains only a series of `errorExample` directives - Included in alphabetical order in the `Manual.ErrorExplanations` module For the `lean.ctorResultingTypeMismatch` named error, `{errorSuffix}` is `ctorResultingTypeMismatch` and `{ErrorSuffix}` is `CtorResultingTypeMismatch`. The reference manual should only need to contain docs for `lean.*` error names. ## Error Explanations in the Compiler New error explanations are declared with the `register_error_explanation` command and take a `Lean.ErrorExplanation.Metadata` structure describing the error. All errors have two-component names: the first identifies the package or "domain" to which the error belongs (in core, this will always be `lean`); the second identifies the error itself. Error names are written in camel case and should be descriptive but not excessively verbose. Abbreviations in error names are acceptable, but they should be reasonably clear (e.g., abbreviations that are commonly used elsewhere in Lean, such as `Ctor` for "constructor") and should not be ambiguous with other vocabulary likely to appear in error names (e.g., use `Indep` rather than `Ind` for "independent," since the latter could be confused with "inductive"). You can write an error explanation for a named error that hasn't been registered yet; this will only generate a warning when building the reference manual. You shouldn't merge a new registered error name until the reference manual contains an error explanation. Failing to include an error explanation for an error defined in the toolchain's Lean version will cause an error when generating the manual. ## Descriptions The description should begin by explaining the meaning of the error and why it occurs. It should also briefly explain, if appropriate, any relevant context, such as related errors or relevant entries in the reference manual. The latter is especially useful for directing users to important concepts for understanding an error: while it is appropriate to provide brief conceptual exposition in an error explanation, avoid extensively duplicating content that can be found elsewhere in the manual. General resolution or debugging strategies not tied to specific examples can also be discussed in the description portion of an explanation. ## Examples The second half of an explanation (set off by the level-1 header "Examples") contains annotated code examples. Each contains a `broken` code block containing a self-contained minimal working (or error-producing) examples (MWEs), followed by an `output` code block containing the error. Subsequent code blocks should be labeled `fixed` and should illustrate how to rewrite the code correctly. (If there is more than one, they require a title given as a positional string argument.) Finally, after these MWEs, include explanatory text describing the example and the cause and resolution of the error it demonstrates. Note that each MWE in an example will be rendered as a tab, and the full example (including its explanatory text) will appear in a collapsible "Example" block like those used elsewhere in the manual. Examples should center on code: prose not specific to the example should generally appear in the initial half of the explanation. However, an example should provide sufficient commentary for users to understand how it illustrates relevant ideas from the preceding description and what was done to resolve the exemplified error. Choose examples carefully: they should be relatively minimal, so as to draw out the error itself, but not so contrived as to impede comprehension. Each should contain a distinct, representative instance of the error to avoid the need for excessively many. ## Example Error Explanation For a new named error `lean.foo`, the `Manual.ErrorExplanations` module will need to import `Manual.ErrorExplanations.Foo` and include the line `{include 0 Manual.ErrorExplanations.Foo}`. ````` /- Manual/ErrorExplanations/Foo.lean -/ import VersoManual import Manual.Meta.ErrorExplanation open Lean Doc open Verso.Genre Manual InlineLean #doc (Manual) "About: `foo`" => %%% shortTitle := "foo" %%% {errorExplanationHeader lean.foo} ...mandatory short description... # Examples :::errorExample "Description Should Use Headline Case" ```broken example := x ```` ```output Unknown identifier `x` ``` ```fixed def x := 19 example := x ``` ...optional short discussion of example... ::: `````
reference-manual/Manual/ErrorExplanations/InvalidDottedIdent.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `invalidDottedIdent`" => %%% shortTitle := "invalidDottedIdent" %%% {errorExplanationHeader lean.invalidDottedIdent} This error indicates that dotted identifier notation was used in an invalid or unsupported context. Dotted identifier notation allows an identifier's namespace to be omitted, provided that it can be inferred by Lean based on type information. Details about this notation can be found in the manual section on {ref "identifiers-and-resolution"}[identifiers]. This notation can only be used in a term whose type Lean is able to infer. If there is insufficient type information for Lean to do so, this error will be raised. The inferred type must not be a type universe (e.g., {lean}`Prop` or {lean}`Type`), as dotted-identifier notation is not supported on these types. # Examples :::errorExample "Insufficient Type Information" ```broken def reverseDuplicate (xs : List α) := .reverse (xs ++ xs) ``` ```output Invalid dotted identifier notation: The expected type of `.reverse` could not be determined Hint: Using one of these would be unambiguous: [apply] `Array.reverse` [apply] `BitVec.reverse` [apply] `List.reverse` [apply] `Vector.reverse` [apply] `List.IsInfix.reverse` [apply] `List.IsPrefix.reverse` [apply] `List.IsSuffix.reverse` [apply] `List.Sublist.reverse` [apply] `Lean.Grind.AC.Seq.reverse` [apply] `Std.DTreeMap.Internal.Impl.reverse` [apply] `Std.Tactic.BVDecide.BVUnOp.reverse` [apply] `Std.DTreeMap.Internal.Impl.Ordered.reverse` ``` ```fixed def reverseDuplicate (xs : List α) : List α := .reverse (xs ++ xs) ``` ```lean -show variable (α : Type) (xs : List α) ``` Because the return type of `reverseDuplicate` is not specified, the expected type of `.reverse` cannot be determined. Lean will not use the type of the argument {lean}`xs ++ xs` to infer the omitted namespace. Adding the return type {lean}`List α` allows Lean to infer the type of `.reverse` and thus the appropriate namespace ({name}`List`) in which to resolve this identifier. Note that this means that changing the return type of `reverseDuplicate` changes how `.reverse` resolves: if the return type is `T`, then Lean will (attempt to) resolve `.reverse` to a function `T.reverse` whose return type is `T`—even if `T.reverse` does not take an argument of type `List α`. ::: :::errorExample "Dotted Identifier Where Type Universe Expected" ```broken example (n : Nat) := match n > 42 with | .true => n - 1 | .false => n + 1 ``` ```output Invalid dotted identifier notation: Not supported on type universe Prop ``` ```fixed example (n : Nat) := match decide (n > 42) with | .true => n - 1 | .false => n + 1 ``` ```lean -show variable (n : Nat) ``` The proposition {lean}`n > 42` has type {lean}`Prop`, which, being a type universe, does not support dotted-identifier notation. As this example demonstrates, attempting to use this notation in such a context is almost always an error. The intent in this example was for `.true` and `.false` to be Booleans, not propositions; however, {keywordOf Lean.Parser.Term.match}`match` expressions do not automatically perform this coercion for decidable propositions. Explicitly adding {name}`decide` makes the discriminant a {name}`Bool` and allows the dotted-identifier resolution to succeed. :::
reference-manual/Manual/ErrorExplanations/InferBinderTypeFailed.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `inferBinderTypeFailed`" => %%% shortTitle := "inferBinderTypeFailed" %%% {errorExplanationHeader lean.inferBinderTypeFailed} This error occurs when the type of a binder in a declaration header or local binding is not fully specified and cannot be inferred by Lean. Generally, this can be resolved by providing more information to help Lean determine the type of the binder, either by explicitly annotating its type or by providing additional type information at sites where it is used. When the binder in question occurs in the header of a declaration, this error is often accompanied by {ref "lean.inferDefTypeFailed" (domain := Manual.errorExplanation)}[`lean.inferDefTypeFailed`]. Note that if a declaration is annotated with an explicit resulting type—even one that contains holes—Lean will not use information from the definition body to infer parameter types. It may therefore be necessary to explicitly specify the types of parameters whose types would otherwise be inferable without the resulting-type annotation; see the “uninferred binder due to resulting type annotation” example below for a demonstration. In {keyword}`theorem` declarations, the body is never used to infer the types of binders, so any binders whose types cannot be inferred from the rest of the theorem type must include a type annotation. This error may also arise when identifiers that were intended to be declaration names are inadvertently written in binder position instead. In these cases, the erroneous identifiers are treated as binders with unspecified type, leading to a type inference failure. This frequently occurs when attempting to simultaneously define multiple constants of the same type using syntax that does not support this. Such situations include: * Attempting to name an example by writing an identifier after the {keyword}`example` keyword; * Attempting to define multiple constants with the same type and (if applicable) value by listing them sequentially after {keyword}`def`, {keyword}`opaque`, or another declaration keyword; * Attempting to define multiple fields of a structure of the same type by sequentially listing their names on the same line of a structure declaration; and * Omitting vertical bars between inductive constructor names. The first three cases are demonstrated in examples below. # Examples :::errorExample "Binder Type Requires New Type Variable" ```broken def identity x := x ``` ```output Failed to infer type of binder `x` ``` ```fixed def identity (x : α) := x ``` In the code above, the type of `x` is unconstrained; as this example demonstrates, Lean does not automatically generate fresh type variables for such binders. Instead, the type `α` of `x` must be specified explicitly. Note that if automatic implicit parameter insertion is enabled (as it is by default), a binder for `α` itself need not be provided; Lean will insert an implicit binder for this parameter automatically. ::: :::errorExample "Uninferred Binder Type Due to Resulting Type Annotation" ```broken def plusTwo x : Nat := x + 2 ``` ```output Failed to infer type of binder `x` Note: Because this declaration's type has been explicitly provided, all parameter types and holes (e.g., `_`) in its header are resolved before its body is processed; information from the declaration body cannot be used to infer what these values should be ``` ```fixed def plusTwo (x : Nat) : Nat := x + 2 ``` Even though `x` is inferred to have type `Nat` in the body of `plusTwo`, this information is not available when elaborating the type of the definition because its resulting type (`Nat`) has been explicitly specified. Considering only the information in the header, the type of `x` cannot be determined, resulting in the shown error. It is therefore necessary to include the type of `x` in its binder. ::: :::errorExample "Attempting to Name an Example Declaration" ```broken example trivial_proof : True := trivial ``` ```output Failed to infer type of binder `trivial_proof` Note: Examples do not have names. The identifier `trivial_proof` is being interpreted as a parameter `(trivial_proof : _)`. ``` ```fixed example : True := trivial ``` This code is invalid because it attempts to give a name to an `example` declaration. Examples cannot be named, and an identifier written where a name would appear in other declaration forms is instead elaborated as a binder, whose type cannot be inferred. If a declaration must be named, it should be defined using a declaration form that supports naming, such as `def` or `theorem`. ::: :::errorExample "Attempting to Define Multiple Opaque Constants at Once" ```broken opaque m n : Nat ``` ```output Failed to infer type of binder `n` Note: Multiple constants cannot be declared in a single declaration. The identifier `n` is being interpreted as a parameter `(n : _)`. ``` ```fixed opaque m : Nat opaque n : Nat ``` This example incorrectly attempts to define multiple constants with a single `opaque` declaration. Such a declaration can define only one constant: it is not possible to list multiple identifiers after `opaque` or `def` to define them all to have the same type (or value). Such a declaration is instead elaborated as defining a single constant (e.g., `m` above) with parameters given by the subsequent identifiers (`n`), whose types are unspecified and cannot be inferred. To define multiple global constants, it is necessary to declare each separately. ::: :::errorExample "Attempting to Define Multiple Structure Fields on the Same Line" ```broken structure Person where givenName familyName : String age : Nat ``` ```output Failed to infer type of binder `familyName` ``` ```fixed "Fixed (separate lines)" structure Person where givenName : String familyName : String age : Nat ``` ```fixed "Fixed (parenthesized)" structure Person where (givenName familyName : String) age : Nat ``` This example incorrectly attempts to define multiple structure fields (`givenName` and `familyName`) of the same type by listing them consecutively on the same line. Lean instead interprets this as defining a single field, `givenName`, parametrized by a binder `familyName` with no specified type. The intended behavior can be achieved by either listing each field on a separate line, or enclosing the line specifying multiple field names in parentheses (see the manual section on {ref "inductive-types"}[Inductive Types] for further details about structure declarations). :::
reference-manual/Manual/ErrorExplanations/PropRecLargeElim.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `propRecLargeElim`" => %%% shortTitle := "propRecLargeElim" %%% {errorExplanationHeader lean.propRecLargeElim} This error occurs when attempting to eliminate a proof of a proposition into a higher type universe. Because Lean's type theory does not allow large elimination from {lean}`Prop`, it is invalid to pattern-match on such values—e.g., by using {keywordOf Lean.Parser.Term.let}`let` or {keywordOf Lean.Parser.Term.match}`match`—to produce a piece of data in a non-propositional universe (i.e., `Type u`). More precisely, the motive of a propositional recursor must be a proposition. (See the manual section on {ref "subsingleton-elimination"}[Subsingleton Elimination] for exceptions to this rule.) Note that this error will arise in any expression that eliminates from a proof into a non-propositional universe, even if that expression occurs within another expression of propositional type (e.g., in a {keywordOf Lean.Parser.Term.let}`let` binding in a proof). The “Defining an intermediate data value within a proof” example below demonstrates such an occurrence. Errors of this kind can usually be resolved by moving the recursor application “outward,” so that its motive is the proposition being proved rather than the type of data-valued term. # Examples :::errorExample "Defining an Intermediate Data Value Within a Proof" ```broken example {α : Type} [inst : Nonempty α] (p : α → Prop) : ∃ x, p x ∨ ¬ p x := let val := match inst with | .intro x => x ⟨val, Classical.em (p val)⟩ ``` ```output Tactic `cases` failed with a nested error: Tactic `induction` failed: recursor `Nonempty.casesOn` can only eliminate into `Prop` α : Type motive : Nonempty α → Sort ?u.48 h_1 : (x : α) → motive ⋯ inst✝ : Nonempty α ⊢ motive inst✝ after processing _ the dependent pattern matcher can solve the following kinds of equations - <var> = <term> and <term> = <var> - <term> = <term> where the terms are definitionally equal - <constructor> = <constructor>, examples: List.cons x xs = List.cons y ys, and List.cons x xs = List.nil ``` ```fixed example {α : Type} [inst : Nonempty α] (p : α → Prop) : ∃ x, p x ∨ ¬ p x := match inst with | .intro x => ⟨x, Classical.em (p x)⟩ ``` Even though the {keywordOf Lean.Parser.Command.example}`example` being defined has a propositional type, the body of `val` does not; it has type `α : Type`. Thus, pattern-matching on the proof of `Nonempty α` (a proposition) to produce `val` requires eliminating that proof into a non-propositional type and is disallowed. Instead, the {keywordOf Lean.Parser.Term.match}`match` expression must be moved to the top level of the `example`, where the result is a {lean}`Prop`-valued proof of the existential claim stated in the example's header. This restructuring could also be done using a pattern-matching {keywordOf Lean.Parser.Term.let}`let` binding. ::: :::errorExample "Extracting the Witness from an Existential Proof" ```broken def getWitness {α : Type u} {p : α → Prop} (h : ∃ x, p x) : α := match h with | .intro x _ => x ``` ```output Tactic `cases` failed with a nested error: Tactic `induction` failed: recursor `Exists.casesOn` can only eliminate into `Prop` α : Type u p : α → Prop motive : (∃ x, p x) → Sort ?u.52 h_1 : (x : α) → (h : p x) → motive ⋯ h✝ : ∃ x, p x ⊢ motive h✝ after processing _ the dependent pattern matcher can solve the following kinds of equations - <var> = <term> and <term> = <var> - <term> = <term> where the terms are definitionally equal - <constructor> = <constructor>, examples: List.cons x xs = List.cons y ys, and List.cons x xs = List.nil ``` ```fixed "in Prop" -- This is `Exists.elim` theorem useWitness {α : Type u} {p : α → Prop} {q : Prop} (h : ∃ x, p x) (hq : (x : α) → p x → q) : q := match h with | .intro x hx => hq x hx ``` ```fixed "in Type" def getWitness {α : Type u} {p : α → Prop} (h : (x : α) ×' p x) : α := match h with | .mk x _ => x ``` In this example, simply relocating the pattern-match is insufficient; the attempted definition `getWitness` is fundamentally unsound. (Consider the case where `p` is {lean}`fun (n : Nat) => n > 0`: if `h` and `h'` are proofs of {lean}`∃ x, x > 0`, with `h` using witness `1` and `h'` witness `2`, then since `h = h'` by proof irrelevance, it follows that `getWitness h = getWitness h'`—i.e., `1 = 2`.) Instead, `getWitness` must be rewritten: either the resulting type of the function must be a proposition (the first fixed example above), or `h` must not be a proposition (the second). In the first corrected example, the resulting type of `useWitness` is now a proposition `q`. This allows us to pattern-match on `h`—since we are eliminating into a propositional type—and pass the unpacked values to `hq`. From a programmatic perspective, one can view `useWitness` as rewriting `getWitness` in continuation-passing style, restricting subsequent computations to use its result only to construct values in {lean}`Prop`, as required by the prohibition on propositional large elimination. Note that `useWitness` is the existential elimination principle {name}`Exists.elim`. The second corrected example changes the type of `h` from an existential proposition to a {lean}`Type`-valued dependent pair (corresponding to the {name}`PSigma` type constructor). Since this type is not propositional, eliminating into `α : Type u` is no longer invalid, and the previously attempted pattern match now type-checks. :::
reference-manual/Manual/ErrorExplanations/InductiveParamMismatch.lean
import VersoManual import Manual.Meta.ErrorExplanation open Lean open Verso.Genre Manual InlineLean #doc (Manual) "About: `inductiveParamMismatch`" => %%% shortTitle := "inductiveParamMismatch" %%% {errorExplanationHeader lean.inductiveParamMismatch} This error occurs when a parameter of an inductive type is not uniform in an inductive declaration. The parameters of an inductive type (i.e., those that appear before the colon following the {keyword}`inductive` keyword) must be identical in all occurrences of the type being defined in its constructors' types. If a parameter of an inductive type must vary between constructors, make the parameter an index by moving it to the right of the colon. See the manual section on {ref "inductive-types"}[Inductive Types] for additional details. Note that auto-implicit inlay hints always appear left of the colon in an inductive declaration (i.e., as parameters), even when they are actually indices. This means that double-clicking on an inlay hint to insert such parameters may result in this error. If it does, change the inserted parameters to indices. # Examples :::errorExample "Vector Length Index as a Parameter" ```broken inductive Vec (α : Type) (n : Nat) : Type where | nil : Vec α 0 | cons : α → Vec α n → Vec α (n + 1) ``` ```output Mismatched inductive type parameter in Vec α 0 The provided argument 0 is not definitionally equal to the expected parameter n Note: The value of parameter `n` must be fixed throughout the inductive declaration. Consider making this parameter an index if it must vary. ``` ```fixed inductive Vec (α : Type) : Nat → Type where | nil : Vec α 0 | cons : α → Vec α n → Vec α (n + 1) ``` The length argument `n` of the `Vec` type constructor is declared as a parameter, but other values for this argument appear in the `nil` and `cons` constructors (namely, `0` and `n + 1`). An error therefore appears at the first occurrence of such an argument. To correct this, `n` cannot be a parameter of the inductive declaration and must instead be an index, as in the corrected example. On the other hand, `α` remains unchanged throughout all occurrences of `Vec` in the declaration and so is a valid parameter. :::
reference-manual/Manual/NotationsMacros/SyntaxDef.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Defining New Syntax" => %%% tag := "syntax-ext" %%% Lean's uniform representation of syntax is very general and flexible. This means that extensions to Lean's parser do not require extensions to the representation of parsed syntax. # Syntax Model %%% tag := "syntax-data" %%% Lean's parser produces a concrete syntax tree, of type {name}`Lean.Syntax`. {name}`Lean.Syntax` is an inductive type that represents all of Lean's syntax, including commands, terms, tactics, and any custom extensions. All of these are represented by a few basic building blocks: : {deftech}[Atoms] Atoms are the fundamental terminals of the grammar, including literals (such as those for characters and numbers), parentheses, operators, and keywords. : {deftech}[Identifiers] :::keepEnv ```lean -show variable {α : Type u} variable {x : α} ``` Identifiers represent names, such as {lean}`x`, {lean}`Nat`, or {lean}`Nat.add`. Identifier syntax includes a list of pre-resolved names that the identifier might refer to. ::: : {deftech}[Nodes] Nodes represent the parsing of nonterminals. Nodes contain a {deftech}_syntax kind_, which identifies the syntax rule that the node results from, along with an array of child {name Lean.Syntax}`Syntax` values. : Missing Syntax When the parser encounters an error, it returns a partial result, so Lean can provide some feedback about partially-written programs or programs that contain mistakes. Partial results contain one or more instances of missing syntax. Atoms and identifiers are collectively referred to as {deftech}_tokens_. {docstring Lean.Syntax} {docstring Lean.Syntax.Preresolved} # Syntax Node Kinds Syntax node kinds typically identify the parser that produced the node. This is one place where the names given to operators or notations (or their automatically-generated internal names) occur. While only nodes contain a field that identifies their kind, identifiers have the kind {name Lean.identKind}`identKind` by convention, while atoms have their internal string as their kind by convention. Lean's parser wraps each keyword atom `KW` in a singleton node whose kind is `` `token.KW ``. The kind of a syntax value can be extracted using {name Lean.Syntax.getKind}`Syntax.getKind`. {docstring Lean.SyntaxNodeKind} {docstring Lean.Syntax.isOfKind} {docstring Lean.Syntax.getKind} {docstring Lean.Syntax.setKind} # Token and Literal Kinds A number of named kinds are associated with the basic tokens produced by the parser. Typically, single-token syntax productions consist of a {name Lean.Syntax.node}`node` that contains a single {name Lean.Syntax.atom}`atom`; the kind saved in the node allows the value to be recognized. Atoms for literals are not interpreted by the parser: string atoms include their leading and trailing double-quote characters along with any escape sequences contained within, and hexadecimal numerals are saved as a string that begins with {lean}`"0x"`. {ref "typed-syntax-helpers"}[Helpers] such as {name}`Lean.TSyntax.getString` are provided to perform this decoding on demand. ```lean -show -keep -- Verify claims about atoms and nodes open Lean in partial def noInfo : Syntax → Syntax | .node _ k children => .node .none k (children.map noInfo) | .ident _ s x pre => .ident .none s x pre | .atom _ s => .atom .none s | .missing => .missing /-- info: Lean.Syntax.node (Lean.SourceInfo.none) `num #[Lean.Syntax.atom (Lean.SourceInfo.none) "0xabc123"] -/ #check_msgs in #eval noInfo <$> `(term|0xabc123) /-- info: Lean.Syntax.node (Lean.SourceInfo.none) `str #[Lean.Syntax.atom (Lean.SourceInfo.none) "\"ab\\tc\""] -/ #check_msgs in #eval noInfo <$> `(term|"ab\tc") ``` {docstring Lean.identKind} {docstring Lean.strLitKind} {docstring Lean.interpolatedStrKind} {docstring Lean.interpolatedStrLitKind} {docstring Lean.charLitKind} {docstring Lean.numLitKind} {docstring Lean.scientificLitKind} {docstring Lean.nameLitKind} {docstring Lean.fieldIdxKind} # Internal Kinds {docstring Lean.groupKind} {docstring Lean.nullKind} {docstring Lean.choiceKind} {docstring Lean.hygieneInfoKind} # Source Positions %%% tag := "source-info" %%% Atoms, identifiers, and nodes optionally contain {deftech}[source information] that tracks their correspondence with the original file. The parser saves source information for all tokens, but not for nodes; position information for parsed nodes is reconstructed from their first and last tokens. Not all {name Lean.Syntax}`Syntax` data results from the parser: it may be the result of {tech}[macro expansion], in which case it typically contains a mix of generated and parsed syntax, or it may be the result of {tech (key := "delaborator")}[delaborating] an internal term to display it to a user. In these use cases, nodes may themselves contain source information. Source information comes in two varieties: : {deftech}[Original] Original source information comes from the parser. In addition to the original source location, it also contains leading and trailing whitespace that was skipped by the parser, which allows the original string to be reconstructed. This whitespace is saved as offsets into the string representation of the original source code (that is, as {name}`Substring`) to avoid having to allocate copies of substrings. : {deftech}[Synthetic] Synthetic source information comes from metaprograms (including macros) or from Lean's internals. Because there is no original string to be reconstructed, it does not save leading and trailing whitespace. Synthetic source positions are used to provide accurate feedback even when terms have been automatically transformed, as well as to track the correspondence between elaborated expressions and their presentation in Lean's output. A synthetic position may be marked {deftech}_canonical_, in which case some operations that would ordinarily ignore synthetic positions will treat it as if it were not. {docstring Lean.SourceInfo} # Inspecting Syntax ```lean -show section Inspecting open Lean ``` There are three primary ways to inspect {lean}`Syntax` values: : The {lean}`Repr` Instance The {lean}`Repr Syntax` instance produces a very detailed representation of syntax in terms of the constructors of the {lean}`Syntax` type. : The {lean}`ToString` Instance The {lean}`ToString Syntax` instance produces a compact view, representing certain syntax kinds with particular conventions that can make it easier to read at a glance. This instance suppresses source position information. : The Pretty Printer Lean's pretty printer attempts to render the syntax as it would look in a source file, but fails if the nesting structure of the syntax doesn't match the expected shape. ::::keepEnv :::example "Representing Syntax as Constructors" ```imports -show import Lean.Elab ``` ```lean -show open Lean ``` The {name}`Repr` instance's representation of syntax can be inspected by quoting it in the context of {keywordOf Lean.Parser.Command.eval}`#eval`, which can run actions in the command elaboration monad {name Lean.Elab.Command.CommandElabM}`CommandElabM`. To reduce the size of the example output, the helper {lean}`removeSourceInfo` is used to remove source information prior to display. ```lean partial def removeSourceInfo : Syntax → Syntax | .atom _ str => .atom .none str | .ident _ str x pre => .ident .none str x pre | .node _ k children => .node .none k (children.map removeSourceInfo) | .missing => .missing ``` ```lean (name := reprStx1) #eval do let stx ← `(2 + $(⟨.missing⟩)) logInfo (repr (removeSourceInfo stx.raw)) ``` ```leanOutput reprStx1 Lean.Syntax.node (Lean.SourceInfo.none) `«term_+_» #[Lean.Syntax.node (Lean.SourceInfo.none) `num #[Lean.Syntax.atom (Lean.SourceInfo.none) "2"], Lean.Syntax.atom (Lean.SourceInfo.none) "+", Lean.Syntax.missing] ``` In the second example, {tech}[macro scopes] inserted by quotation are visible on the call to {name}`List.length`. ```lean (name := reprStx2) #eval do let stx ← `(List.length ["Rose", "Daffodil", "Lily"]) logInfo (repr (removeSourceInfo stx.raw)) ``` The contents of the {tech}[pre-resolved identifier] {name}`List.length` are visible here: ```leanOutput reprStx2 Lean.Syntax.node (Lean.SourceInfo.none) `Lean.Parser.Term.app #[Lean.Syntax.ident (Lean.SourceInfo.none) "List.length".toRawSubstring (Lean.Name.mkNum `List.length._@.Manual.NotationsMacros.SyntaxDef._hyg 2) [Lean.Syntax.Preresolved.decl `List.length []], Lean.Syntax.node (Lean.SourceInfo.none) `null #[Lean.Syntax.node (Lean.SourceInfo.none) `«term[_]» #[Lean.Syntax.atom (Lean.SourceInfo.none) "[", Lean.Syntax.node (Lean.SourceInfo.none) `null #[Lean.Syntax.node (Lean.SourceInfo.none) `str #[Lean.Syntax.atom (Lean.SourceInfo.none) "\"Rose\""], Lean.Syntax.atom (Lean.SourceInfo.none) ",", Lean.Syntax.node (Lean.SourceInfo.none) `str #[Lean.Syntax.atom (Lean.SourceInfo.none) "\"Daffodil\""], Lean.Syntax.atom (Lean.SourceInfo.none) ",", Lean.Syntax.node (Lean.SourceInfo.none) `str #[Lean.Syntax.atom (Lean.SourceInfo.none) "\"Lily\""]], Lean.Syntax.atom (Lean.SourceInfo.none) "]"]]] ``` ::: :::: The {name}`ToString` instance represents the constructors of {name}`Syntax` as follows: * The {name Syntax.ident}`ident` constructor is represented as the underlying name. Source information and pre-resolved names are not shown. * The {name Syntax.atom}`atom` constructor is represented as a string. * The {name Syntax.missing}`missing` constructor is represented by `<missing>`. * The representation of the {name Syntax.node}`node` constructor depends on the kind. If the kind is {lean}`` `null ``, then the node is represented by its child nodes order in square brackets. Otherwise, the node is represented by its kind followed by its child nodes, both surrounded by parentheses. :::example "Syntax as Strings" ```imports -show import Lean.Elab ``` ```lean -show open Lean ``` The string representation of syntax can be inspected by quoting it in the context of {keywordOf Lean.Parser.Command.eval}`#eval`, which can run actions in the command elaboration monad {name Lean.Elab.Command.CommandElabM}`CommandElabM`. ```lean (name := toStringStx1) #eval do let stx ← `(2 + $(⟨.missing⟩)) logInfo (toString stx) ``` ```leanOutput toStringStx1 («term_+_» (num "2") "+" <missing>) ``` In the second example, {tech}[macro scopes] inserted by quotation are visible on the call to {name}`List.length`. ```lean (name := toStringStx2) #eval do let stx ← `(List.length ["Rose", "Daffodil", "Lily"]) logInfo (toString stx) ``` ```leanOutput toStringStx2 (Term.app `List.length._@.Manual.NotationsMacros.SyntaxDef._hyg.2 [(«term[_]» "[" [(str "\"Rose\"") "," (str "\"Daffodil\"") "," (str "\"Lily\"")] "]")]) ``` ::: Pretty printing syntax is typically most useful when including it in a message to a user. Normally, Lean automatically invokes the pretty printer when necessary. However, {name}`ppTerm` can be explicitly invoked if needed. ::::keepEnv :::example "Pretty-Printed Syntax" ```imports -show import Lean.Elab ``` ```lean -show open Lean Elab Command ``` The string representation of syntax can be inspected by quoting it in the context of {keywordOf Lean.Parser.Command.eval}`#eval`, which can run actions in the command elaboration monad {name Lean.Elab.Command.CommandElabM}`CommandElabM`. Because new syntax declarations also equip the pretty printer with instructions for displaying them, the pretty printer requires a configuration object. This context can be constructed with a helper: ```lean def getPPContext : CommandElabM PPContext := do return { env := (← getEnv), opts := (← getOptions), currNamespace := (← getCurrNamespace), openDecls := (← getOpenDecls) } ``` ```lean (name := ppStx1) #eval show CommandElabM Unit from do let stx ← `(2 + 5) let fmt ← ppTerm (← getPPContext) stx logInfo fmt ``` ```leanOutput ppStx1 2 + 5 ``` In the second example, the {tech}[macro scopes] inserted on {name}`List.length` by quotation cause it to be displayed with a dagger (`✝`). ```lean (name := ppStx2) #eval do let stx ← `(List.length ["Rose", "Daffodil", "Lily"]) let fmt ← ppTerm (← getPPContext) stx logInfo fmt ``` ```leanOutput ppStx2 List.length✝ ["Rose", "Daffodil", "Lily"] ``` Pretty printing wraps lines and inserts indentation automatically. A {tech}[coercion] typically converts the pretty printer's output to the type expected by {name}`logInfo`, using a default layout width. The width can be controlled by explicitly calling {name Std.Format.pretty}`pretty` with a named argument. ```lean (name := ppStx3) #eval do let flowers := #["Rose", "Daffodil", "Lily"] let manyFlowers := flowers ++ flowers ++ flowers let stx ← `(List.length [$(manyFlowers.map (quote (k := `term))),*]) let fmt ← ppTerm (← getPPContext) stx logInfo (fmt.pretty (width := 40)) ``` ```leanOutput ppStx3 List.length✝ ["Rose", "Daffodil", "Lily", "Rose", "Daffodil", "Lily", "Rose", "Daffodil", "Lily"] ``` ::: :::: ```lean -show end Inspecting ``` # Typed Syntax %%% tag := "typed-syntax" %%% Syntax may additionally be annotated with a type that specifies which {tech}[syntax category] it belongs to. {TODO}[Describe the problem here—complicated invisible internal invariants leading to weird error msgs] The {name Lean.TSyntax}`TSyntax` structure contains a type-level list of syntax categories along with a syntax tree. The list of syntax categories typically contains precisely one element, in which case the list structure itself is not shown. {docstring Lean.TSyntax} {docstring Lean.SyntaxNodeKinds} {tech}[Quasiquotations] prevent the substitution of typed syntax that does not come from the correct syntactic category. For many of Lean's built-in syntactic categories, there is a set of {tech}[coercions] that appropriately wrap one kind of syntax for another category, such as a coercion from the syntax of string literals to the syntax of terms. Additionally, many helper functions that are only valid on some syntactic categories are defined for the appropriate typed syntax only. ```lean -show /-- info: instCoeHTCTOfCoeHTC -/ #check_msgs in open Lean in #synth CoeHTCT (TSyntax `str) (TSyntax `term) ``` The constructor of {name Lean.TSyntax}`TSyntax` is public, and nothing prevents users from constructing values that break internal invariants. The use of {name Lean.TSyntax}`TSyntax` should be seen as a way to reduce common mistakes, rather than rule them out entirely. :::leanSection ```lean -show open Lean Syntax variable {ks : SyntaxNodeKinds} {sep : String} ``` In addition to {name Lean.TSyntax}`TSyntax`, there are types that represent arrays of syntax, with or without separators. These correspond to {TODO}[xref] repeated elements in syntax declarations or antiquotations. {lean}`TSyntaxArray ks` is an {tech}[abbreviation] for {lean}`Array (TSyntax ks)`, while {lean}`TSepArray ks sep` is a structure; this means that {tech}[generalized field notation] can be used to apply array functions to {name}`TSyntaxArray` but not {name}`TSepArray`. There is a {tech}[coercion] between {lean}`TSepArray ks` and {lean}`TSyntaxArray ks`, as well as explicit conversion functions. This conversion inserts or removes separator elements from the underlying array, and takes time linear in the number of elements. ::: {docstring Lean.TSyntaxArray} {docstring Lean.TSyntaxArray.raw} {docstring Lean.Syntax.TSepArray} {docstring Lean.Syntax.TSepArray.getElems +allowMissing} {docstring Lean.Syntax.TSepArray.elemsAndSeps} {docstring Lean.Syntax.TSepArray.ofElems} {docstring Lean.Syntax.TSepArray.push +allowMissing} # Aliases A number of aliases are provided for commonly-used typed syntax varieties. These aliases allow code to be written at a higher level of abstraction. {docstring Lean.Term} {docstring Lean.Command} {docstring Lean.Syntax.Level} {docstring Lean.Syntax.Tactic} {docstring Lean.Prec} {docstring Lean.Prio} {docstring Lean.Ident} {docstring Lean.StrLit} {docstring Lean.CharLit} {docstring Lean.NameLit} {docstring Lean.NumLit} {docstring Lean.ScientificLit} {docstring Lean.HygieneInfo} # Helpers for Constructing Syntax %%% tag := "syntax-construction-helpers" %%% {docstring Lean.mkIdent +allowMissing} {docstring Lean.mkIdentFrom} {docstring Lean.mkIdentFromRef +allowMissing} {docstring Lean.mkCIdent +allowMissing} {docstring Lean.mkCIdentFrom} {docstring Lean.mkCIdentFromRef +allowMissing} {docstring Lean.Syntax.mkApp} {docstring Lean.Syntax.mkCApp +allowMissing} {docstring Lean.Syntax.mkLit +allowMissing} {docstring Lean.Syntax.mkCharLit +allowMissing} {docstring Lean.Syntax.mkStrLit +allowMissing} {docstring Lean.Syntax.mkNumLit +allowMissing} {docstring Lean.Syntax.mkNatLit +allowMissing} {docstring Lean.Syntax.mkScientificLit +allowMissing} {docstring Lean.Syntax.mkNameLit +allowMissing} {docstring Lean.mkOptionalNode +allowMissing} {docstring Lean.mkGroupNode +allowMissing} {docstring Lean.mkHole +allowMissing} ## Quoting Data %%% tag := "quote-class" %%% :::leanSection ```lean -show open Lean ``` The {name Lean.Quote}`Quote` class allows values to be converted into typed syntax that represents them. For example, {lean (type:="Term")}`quote 5` represents {lean (type := "Term")}``⟨.node .none `num #[.atom .none "5"]⟩``. The class is parameterized over syntax kinds; this allows the same value to be represented appropriately at different kinds. Instance resolution for {name}`Quote` takes typed syntax {tech}[coercions] into account. The syntax kind's default value is {lean}`` `term ``. ```lean -show /-- info: { raw := Lean.Syntax.node (Lean.SourceInfo.none) `num #[Lean.Syntax.atom (Lean.SourceInfo.none) "5"] } -/ #guard_msgs in #eval (quote 5 : Term) ``` ::: :::paragraph There is no guarantee that the result of {name Lean.Quote.quote}`Quote.quote` will successfully elaborate. Generally speaking, the resulting syntax contains quoted versions of all explicit arguments and omits implicit arguments. {docstring Lean.Quote +allowMissing} When defining instances of {name Lean.Quote}`Quote`, use {name Lean.mkCIdent}`mkCIdent` and {name Lean.Syntax.mkCApp}`mkCApp` to avoid variable capture in the generated syntax. ::: :::example "Defining `Quote` Instances" ```lean -show open Lean Syntax ``` To quote a tree of type {name}`Tree`, {name}`mkCIdent` and {name}`mkCApp` are used to ensure that local bindings with similar names cannot interfere. Using double backticks ensures that the constructor names don't contain typos and are correctly resolved. ```lean inductive Tree (α : Type u) : Type u where | leaf | branch (left : Tree α) (val : α) (right : Tree α) instance [Quote α] : Quote (Tree α) where quote := quoteTree where quoteTree | .leaf => mkCIdent ``Tree.leaf | .branch l v r => mkCApp ``Tree.branch #[quoteTree l, quote v, quoteTree r] ``` ::: # Decoding Typed Syntax %%% tag := "typed-syntax-helpers" %%% For literals, Lean's parser produces a singleton node that contains an {name Lean.Syntax.atom}`atom`. The inner atom contains a string with source information, while the node's kind specifies how the atom is to be interpreted. This may involve decoding string escape sequences or interpreting base-16 numeric literals. The helpers in this section perform the correct interpretation. {docstring Lean.TSyntax.getId} {docstring Lean.TSyntax.getName} {docstring Lean.TSyntax.getNat} {docstring Lean.TSyntax.getScientific} {docstring Lean.TSyntax.getString} {docstring Lean.TSyntax.getChar} {docstring Lean.TSyntax.getHygieneInfo} # Syntax Categories %%% tag := "syntax-categories" %%% Lean's parser contains a table of {deftech}_syntax categories_, which correspond to nonterminals in a context-free grammar. Some of the most important categories are terms, commands, universe levels, priorities, precedences, and the categories that represent tokens such as literals. Typically, each {tech}[syntax kind] corresponds to a category. New categories can be declared using {keywordOf Lean.Parser.Command.syntaxCat}`declare_syntax_cat`. :::syntax command (title := "Declaring Syntactic Categories") Declares a new syntactic category. ```grammar $[$_:docComment]? declare_syntax_cat $_ $[(behavior := $_)]? ``` ::: The leading identifier behavior is an advanced feature that usually does not need to be modified. It controls the behavior of the parser when it encounters an identifier, and can sometimes cause the identifier to be treated as a non-reserved keyword instead. This is used to avoid turning the name of every {ref "tactics"}[tactic] into a reserved keyword. {docstring Lean.Parser.LeadingIdentBehavior} # Syntax Rules %%% tag := "syntax-rules" %%% Each {tech}[syntax category] is associated with a set of {deftech}_syntax rules_, which correspond to productions in a context-free grammar. Syntax rules can be defined using the {keywordOf Lean.Parser.Command.syntax}`syntax` command. :::syntax command (title := "Syntax Rules") ```grammar $[$_:docComment]? $[$_:attributes]? $_:attrKind syntax$[:$p]? $[(name := $x)]? $[(priority := $p)]? $_* : $c ``` ::: As with operator and notation declarations, the contents of the documentation comments are shown to users while they interact with the new syntax. Attributes may be added to invoke compile-time metaprograms on the resulting definition. Syntax rules interact with {tech}[section scopes] in the same manner as attributes, operators, and notations. By default, syntax rules are available to the parser in any module that transitively imports the one in which they are established, but they may be declared `scoped` or `local` to restrict their availability either to contexts in which the current namespace has been opened or to the current {tech}[section scope], respectively. When multiple syntax rules for a category can match the current input, the {tech}[local longest-match rule] is used to select one of them. Like notations and operators, if there is a tie for the longest match then the declared priorities are used to determine which parse result applies. If this still does not resolve the ambiguity, then all the results that tied are saved. The elaborator is expected to attempt all of them, succeeding when exactly one can be elaborated. The syntax rule's precedence, written immediately after the {keywordOf Lean.Parser.Command.syntax}`syntax` keyword, restricts the parser to use this new syntax only when the precedence context is at least the provided value. {TODO}[Default precedence] Just as with operators and notations, syntax rules may be manually provided with a name; if they are not, an otherwise-unused name is generated. Whether provided or generated, this name is used as the syntax kind in the resulting {name Lean.Syntax.node}`node`. The body of a syntax declaration is even more flexible than that of a notation. String literals specify atoms to match. Subterms may be drawn from any syntax category, rather than just terms, and they may be optional or repeated, with or without interleaved comma separators. Identifiers in syntax rules indicate syntax categories, rather than naming subterms as they do in notations. Finally, the syntax rule specifies which syntax category it extends. It is an error to declare a syntax rule in a nonexistent category. ```lean -show -- verify preceding para /-- error: unknown category `nuhUh` -/ #check_msgs in syntax "blah" : nuhUh ``` :::syntax stx -open (title := "Syntax Specifiers") The syntactic category `stx` is the grammar of specifiers that may occur in the body of a {keywordOf Lean.Parser.Command.syntax}`syntax` command. String literals are parsed as {tech}[atoms] (including both keywords such as `if`, `#eval`, or `where`): ```grammar $s:str ``` Leading and trailing spaces in the strings do not affect parsing, but they cause Lean to insert spaces in the corresponding position when displaying the syntax in {tech}[proof states] and error messages. Ordinarily, valid identifiers occurring as atoms in syntax rules become reserved keywords. Preceding a string literal with an ampersand (`&`) suppresses this behavior: ```grammar &$s:str ``` Identifiers specify the syntactic category expected in a given position, and may optionally provide a precedence:{TODO}[Default prec here?] ```grammar $x:ident$[:$p]? ``` The `*` modifier is the Kleene star, matching zero or more repetitions of the preceding syntax. It can also be written using `many`. ```grammar $s:stx * ``` The `+` modifier matches one or more repetitions of the preceding syntax. It can also be written using `many1`. ```grammar $s:stx + ``` The `?` modifier makes a subterm optional, and matches zero or one, but not more, repetitions of the preceding syntax. It can also be written as `optional`. ```grammar $s:stx ? ``` ```grammar optional($s:stx) ``` The `,*` modifier matches zero or more repetitions of the preceding syntax with interleaved commas. It can also be written using `sepBy`. ```grammar $_:stx ,* ``` The `,+` modifier matches one or more repetitions of the preceding syntax with interleaved commas. It can also be written using `sepBy1`. ```grammar $_:stx ,+ ``` The `,*,?` modifier matches zero or more repetitions of the preceding syntax with interleaved commas, allowing an optional trailing comma after the final repetition. It can also be written using `sepBy` with the `allowTrailingSep` modifier. ```grammar $_:stx ,*,? ``` The `,+,?` modifier matches one or more repetitions of the preceding syntax with interleaved commas, allowing an optional trailing comma after the final repetition. It can also be written using `sepBy1` with the `allowTrailingSep` modifier. ```grammar $_:stx ,+,? ``` The `<|>` operator, which can be written `orelse`, matches either syntax. However, if the first branch consumes any tokens, then it is committed to, and failures will not be backtracked: ```grammar $_:stx <|> $_:stx ``` ```grammar orelse($_:stx, $_:stx) ``` The `!` operator matches the complement of its argument. If its argument fails, then it succeeds, resetting the parsing state. ```grammar ! $_:stx ``` Syntax specifiers may be grouped using parentheses. ```grammar ($_:stx) ``` Repetitions may be defined using `many` and `many1`. The latter requires at least one instance of the repeated syntax. ```grammar many($_:stx) ``` ```grammar many1($_:stx) ``` Repetitions with separators may be defined using `sepBy` and `sepBy1`, which respectively match zero or more occurrences and one or more occurrences, separated by some other syntax. They come in three varieties: * The two-parameter version uses the atom provided in the string literal to parse the separators, and does not allow trailing separators. * The three-parameter version uses the third parameter to parse the separators, using the atom for pretty-printing. * The four-parameter version optionally allows the separator to occur an extra time at the end of the sequence. The fourth argument must always literally be the keyword `allowTrailingSep`. ```grammar sepBy($_:stx, $_:str) ``` ```grammar sepBy($_:stx, $_:str, $_:stx) ``` ```grammar sepBy($_:stx, $_:str, $_:stx, allowTrailingSep) ``` ```grammar sepBy1($_:stx, $_:str) ``` ```grammar sepBy1($_:stx, $_:str, $_:stx) ``` ```grammar sepBy1($_:stx, $_:str, $_:stx, allowTrailingSep) ``` ::: ::::keepEnv :::example "Parsing Matched Parentheses and Brackets" A language that consists of matched parentheses and brackets can be defined using syntax rules. The first step is to declare a new {tech}[syntax category]: ```lean declare_syntax_cat balanced ``` Next, rules can be added for parentheses and square brackets. To rule out empty strings, the base cases consist of empty pairs. ```lean syntax "(" ")" : balanced syntax "[" "]" : balanced syntax "(" balanced ")" : balanced syntax "[" balanced "]" : balanced syntax balanced balanced : balanced ``` In order to invoke Lean's parser on these rules, there must also be an embedding from the new syntax category into one that may already be parsed: ```lean syntax (name := termBalanced) "balanced " balanced : term ``` These terms cannot be elaborated, but reaching an elaboration error indicates that parsing succeeded: ```lean /-- error: elaboration function for `termBalanced` has not been implemented balanced () -/ #guard_msgs in example := balanced () /-- error: elaboration function for `termBalanced` has not been implemented balanced [] -/ #guard_msgs in example := balanced [] /-- error: elaboration function for `termBalanced` has not been implemented balanced [[]()([])] -/ #guard_msgs in example := balanced [[] () ([])] ``` Similarly, parsing fails when they are mismatched: ```syntaxError mismatch example := balanced [() (]] ``` ```leanOutput mismatch <example>:1:25-1:26: unexpected token ']'; expected ')' or balanced ``` ::: :::: ::::keepEnv :::example "Parsing Comma-Separated Repetitions" A variant of list literals that requires double square brackets and allows a trailing comma can be added with the following syntax: ```lean syntax "[[" term,*,? "]]" : term ``` Adding a {tech}[macro] that describes how to translate it into an ordinary list literal allows it to be used in tests. ```lean macro_rules | `(term|[[$e:term,*]]) => `([$e,*]) ``` ```lean (name := evFunnyList) #eval [["Dandelion", "Thistle",]] ``` ```leanOutput evFunnyList ["Dandelion", "Thistle"] ``` ::: :::: # Indentation %%% tag := "syntax-indentation" %%% Internally, the parser maintains a saved source position. Syntax rules may include instructions that interact with these saved positions, causing parsing to fail when a condition is not met. Indentation-sensitive constructs, such as {keywordOf Lean.Parser.Term.do}`do`, save a source position, parse their constituent parts while taking this saved position into account, and then restore the original position. In particular, Indentation-sensitvity is specified by combining {name Lean.Parser.withPosition}`withPosition` or {name Lean.Parser.withPositionAfterLinebreak}`withPositionAfterLinebreak`, which save the source position at the start of parsing some other syntax, with {name Lean.Parser.checkColGt}`colGt`, {name Lean.Parser.checkColGe}`colGe`, and {name Lean.Parser.checkColEq}`colEq`, which compare the current column with the column from the most recently-saved position. {name Lean.Parser.checkLineEq}`lineEq` can also be used to ensure that two positions are on the same line in the source file. :::parserAlias withPosition ::: :::parserAlias withoutPosition ::: :::parserAlias withPositionAfterLinebreak ::: :::parserAlias colGt ::: :::parserAlias colGe ::: :::parserAlias colEq ::: :::parserAlias lineEq ::: ::::keepEnv :::example "Aligned Columns" This syntax for saving notes takes a bulleted list of items, each of which must be aligned at the same column. ```lean syntax "note " ppLine withPosition((colEq "◦ " str ppLine)+) : term ``` There is no elaborator or macro associated with this syntax, but the following example is accepted by the parser: ```lean +error (name := noteEx1) #check note ◦ "One" ◦ "Two" ``` ```leanOutput noteEx1 elaboration function for `«termNote__◦__»` has not been implemented note ◦ "One" ◦ "Two" ``` The syntax does not require that the list is indented with respect to the opening token, which would require an extra `withPosition` and a `colGt`. ```lean +error (name := noteEx15) #check note ◦ "One" ◦ "Two" ``` ```leanOutput noteEx15 elaboration function for `«termNote__◦__»` has not been implemented note ◦ "One" ◦ "Two" ``` The following examples are not syntactically valid because the columns of the bullet points do not match. ```syntaxError noteEx2 #check note ◦ "One" ◦ "Two" ``` ```leanOutput noteEx2 <example>:4:3-4:4: expected end of input ``` ```syntaxError noteEx2 #check note ◦ "One" ◦ "Two" ``` ```leanOutput noteEx2 <example>:4:5-4:6: expected end of input ``` ::: ::::
reference-manual/Manual/NotationsMacros/Elab.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual hiding seeAlso open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Elaborators" => %%% tag := "elaborators" %%% :::seeAlso * Elaborators process {ref "syntax-ext"}[new syntax extensions]. * {ref "quote-patterns"}[Quotation patterns] are the most typical way to destructure syntax. ::: While macros allow Lean to be extended by translating new syntax into existing syntax, {deftech}_elaborators_ allow the new syntax to be processed directly. Elaborators have access to everything that Lean itself uses to implement each feature of the language. Defining a new elaborator allows a language extension to be just as powerful as any built-in feature of Lean. :::paragraph Elaborators come in two varieties: * {deftech}_Command elaborators_ are used to add new commands to Lean. Commands are implemented as side effects: they may add new constants to the global environment, extend compile-time tables such as the one that tracks {tech}[instances], they can provide feedback in the form of information, warnings, or errors, and they have full access to the {name}`IO` monad. Command elaborators are associated with the {tech (key := "kind")}[syntax kinds] that they can handle. * {deftech}_Term elaborators_ are used to implement new terms by translating the syntax into Lean's core type theory. They can do everything that command elaborators can do, and they additionally have access to the local context in which the term is being elaborated. Term elaborators can look up bound variables, bind new variables, unify two terms, and much more. A term elaborator must return a value of type {name}`Lean.Expr`, which is the AST of the core type theory. ::: This section provides an overview and a few examples of elaborators. Because Lean's own elaborator uses the same tools, the source code of the elaborator is a good source of further examples. Just like macros, multiple elaborators may be associated with a syntax kind; they are tried in order, and elaborators may delegate to the next elaborator in the table by throwing the {name Lean.Macro.Exception.unsupportedSyntax}`unsupportedSyntax` exception. :::syntax command (title := "Elaboration Rules") The {keywordOf Lean.Parser.Command.elab_rules}`elab_rules` command takes a sequence of elaboration rules, specified as syntax pattern matches, and adds each as an elaborator. The rules are attempted in order, before previously-defined elaborators, and later elaborators may add further options. ```grammar $[$d:docComment]? $[@[$attrs,*]]? $_:attrKind elab_rules $[(kind := $k)]? $[: $_]? $[<= $_]? $[| `(free{(p:ident"|")?/-- Suitable syntax for {p} -/}) => $e]* ``` ::: Commands, terms, and tactics each maintain a table that maps syntax kinds to elaborators. The syntax category for which the elaborator should be used is specified after the colon, and must be `term`, `command`, or `tactic`. The {keywordOf Lean.Parser.Command.elab_rules}`<=` binds the provided identifier to the current expected type in the context in which a term is being elaborated; it may only be used for term elaborators, and if present, then `term` is implied as the syntax category. :::syntax attr (title := "Elaborator Attribute") Elaborators can be directly associated with syntax kinds by applying the appropriate attributes. Each takes the name of a syntax kind and associates the definition with the kind. ```grammar term_elab $_ ``` ```grammar command_elab $_ ``` ```grammar tactic $_ ``` ::: # Command Elaborators :::::leanSection ```lean -show open Lean Elab Command ``` A command elaborator has type {name}`CommandElab`, which is an abbreviation for {lean}`Syntax → CommandElabM Unit`. Command elaborators may be implicitly defined using {keywordOf Lean.Parser.Command.elab_rules}`elab_rules`, or explicitly by defining a function and applying the {attr}`command_elab` attribute. :::example "Querying the Environment" ```imports -show import Lean.Elab ``` ```lean -show open Lean ``` A command elaborator can be used to query the environment to discover how many constants have a given name. This example uses {name}`getEnv` from the {name}`MonadEnv` class to get the current environment. {name}`Environment.constants` yields a mapping from names to information about them (e.g. their type and whether they are a definition, {tech}[inductive type] declaration, etc). {name}`logInfoAt` allows informational output to be associated with syntax from the original program, and a {tech}[token antiquotation] is used to implement the Lean convention that output from interactive commands is associated with their keyword. ```lean syntax "#count_constants " ident : command elab_rules : command | `(#count_constants%$tok $x) => do let pattern := x.getId let env ← getEnv let mut count := 0 for (y, _) in env.constants do if pattern.isSuffixOf y then count := count + 1 logInfoAt tok m!"Found {count} instances of '{pattern}'" ``` ```lean (name := run) def interestingName := 55 def NS.interestingName := "Another one" #count_constants interestingName ``` ```leanOutput run Found 2 instances of 'interestingName' ``` ::: ::::: # Term Elaborators :::::leanSection ```lean -show open Lean Elab Term ``` A term elaborator has type {name}`TermElab`, which is an abbreviation for {lean}`Syntax → Option Expr → TermElabM Expr`. The optional {lean}`Expr` parameter is the type expected for the term being elaborated, which is `none` if no type is yet known. Like command elaborators, term elaborators may be implicitly defined using {keywordOf Lean.Parser.Command.elab_rules}`elab_rules`, or explicitly by defining a function and applying the {attr}`term_elab` attribute. :::example "Avoiding a Type" ```imports -show import Lean.Elab ``` ```lean -show open Lean Elab Term ``` This examples demonstrates an elaborator for syntax that is the opposite of a type ascription. The provided term may have any type _other_ than the one indicated, and metavariables are solved pessimistically. In this example, {name}`elabType` invokes the term elaborator and then ensures that the resulting term is a type. {name}`Meta.inferType` infers a type for a term, and {name}`Meta.isDefEq` attempts to make two terms {tech (key := "definitional equality")}[definitionally equal] by unification, returning {lean}`true` if it succeeds. ```lean syntax (name := notType) "(" term " !: " term ")" : term @[term_elab notType] def elabNotType : TermElab := fun stx _ => do let `(($tm:term !: $ty:term)) := stx | throwUnsupportedSyntax let unexpected ← elabType ty let e ← elabTerm tm none let eTy ← Meta.inferType e if (← Meta.isDefEq eTy unexpected) then throwErrorAt tm m!"Got unwanted type {eTy}" else pure e ``` If the type position does not contain a type, then `elabType` throws an error: ```lean (name := notType) +error #eval ([1, 2, 3] !: "not a type") ``` ```leanOutput notType type expected, got ("not a type" : String) ``` If the term's type is definitely not equal to the provided type, then elaboration succeeds: ```lean (name := ok) #eval ([1, 2, 3] !: String) ``` ```leanOutput ok [1, 2, 3] ``` If the types match, an error is thrown: ```lean (name := nope) +error #eval (5 !: Nat) ``` ```leanOutput nope Got unwanted type Nat ``` The type equality check may fill in missing information, so {lean (type := "String")}`sorry` (which may have any type) is also rejected: ```lean (name := unif) +error #eval (sorry !: String) ``` ```leanOutput unif Got unwanted type String ``` ::: :::example "Using Any Local Variable" ```imports -show import Lean.Elab ``` ```lean -show open Lean ``` Term elaborators have access to the expected type and to the local context. This can be used to create a term analogue of the {tactic}`assumption` tactic. The first step is to access the local context using {name}`getLocalHyps`. It returns the context with the outermost bindings on the left, so it is traversed in reverse order. For each local assumption, a type is inferred with {name}`Meta.inferType`. If it can be equal to the expected type, then the assumption is returned; if no assumption is suitable, then an error is produced. ```lean syntax "anything!" : term elab_rules <= expected | `(anything!) => do let hyps ← getLocalHyps for h in hyps.reverse do let t ← Meta.inferType h if (← Meta.isDefEq t expected) then return h throwError m!"No assumption in {hyps} has type {expected}" ``` The new syntax finds the function's bound variable: ```lean (name := app) #eval (fun (n : Nat) => 2 + anything!) 5 ``` ```leanOutput app 7 ``` It chooses the most recent suitable variable, as desired: ```lean (name := lets) #eval let x := "x" let y := "y" "It was " ++ y ``` ```leanOutput lets "It was y" ``` When no assumption is suitable, it returns an error that describes the attempt: ```lean (name := noFun) +error #eval let x := Nat.zero let y := "hello" fun (f : Nat → Nat) => (anything! : Int → Int) ``` ```leanOutput noFun No assumption in [x, y, f] has type Int → Int ``` Because it uses unification, the natural number literal is chosen here, because numeric literals may have any type with an {name}`OfNat` instance. Unfortunately, there is no {name}`OfNat` instance for functions, so instance synthesis later fails. ```lean (name := poly) +error #eval let x := 5 let y := "hello" (anything! : Int → Int) ``` ```leanOutput poly failed to synthesize instance of type class OfNat (Int → Int) 5 numerals are polymorphic in Lean, but the numeral `5` cannot be used in a context where the expected type is Int → Int due to the absence of the instance above Hint: Type class instance resolution failures can be inspected with the `set_option trace.Meta.synthInstance true` command. ``` ::: ::::: # Custom Tactics Custom tactics are described in the {ref "custom-tactics"}[section on tactics].
reference-manual/Manual/NotationsMacros/Operators.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Custom Operators" => %%% tag := "operators" %%% Lean supports custom infix, prefix, and postfix operators. New operators can be added by any Lean library, and the new operators have equal status to those that are part of the language. Each new operator is assigned an interpretation as a function, after which uses of the operator are translated into uses of the function. The operator's translation into a function call is referred to as its {deftech}_expansion_. If this function is a {tech}[type class] {tech}[method], then the resulting operator can be overloaded by defining instances of the class. All operators have a {deftech}_precedence_. Operator precedence determines the order of operations for unparenthesized expressions: because multiplication has a higher precedence than addition, {lean}`2 + 3 * 4` is equivalent to {lean}`2 + (3 * 4)`, and {lean}`2 * 3 + 4` is equivalent to {lean}`(2 * 3) + 4`. Infix operators additionally have an {deftech}_associativity_ that determines the meaning of a chain of operators that have the same precedence: : {deftech}[Left-associative] These operators nest to the left. Addition is left- associative, so {lean}`2 + 3 + 4 + 5` is equivalent to {lean}`((2 + 3) + 4) + 5`. : {deftech}[Right-associative] These operators nest to the right. The product type is right-associative, so {lean}`Nat × String × Unit × Option Int` is equivalent to {lean}`Nat × (String × (Unit × Option Int))`. : {deftech}[Non-associative] Chaining these operators is a syntax error. Explicit parenthesization is required. Equality is non-associative, so the following is an error: ```syntaxError eqs (category := term) 1 + 2 = 3 = 2 + 1 ``` The parser error is: ```leanOutput eqs <example>:1:10-1:11: expected end of input ``` ::::keepEnv :::example "Precedence for Prefix and Infix Operators" ```lean -show axiom A : Prop axiom B : Prop example : (¬A ∧ B = (¬A) ∧ B) = (¬A ∧ ((B = ¬A) ∧ B)) := rfl example : (¬A ∧ B) = ((¬A) ∧ B) := rfl ``` The proposition {lean}`¬A ∧ B` is equivalent to {lean}`(¬A) ∧ B`, because `¬` has a higher precedence than `∧`. Because `∧` has higher precedence than `=` and is right-associative, {lean}`¬A ∧ B = (¬A) ∧ B` is equivalent to {lean}`¬A ∧ ((B = ¬A) ∧ B)`. ::: :::: Lean provides commands for defining new operators: :::syntax command (title := "Operator Declarations") Non-associative infix operators are defined using {keywordOf Lean.Parser.Command.mixfix}`infix`: ```grammar $[$_:docComment]? $[$_:attributes]? $_:attrKind infix:$_ $[(name := $x)]? $[(priority := $_:prio)]? $s:str => $t:term ``` Left-associative infix operators are defined using {keywordOf Lean.Parser.Command.mixfix}`infixl`: ```grammar $[$_:docComment]? $[$_:attributes]? $_:attrKind infixl:$_ $[(name := $x)]? $[(priority := $_:prio)]? $s:str => $t:term ``` Right-associative infix operators are defined using {keywordOf Lean.Parser.Command.mixfix}`infixr`: ```grammar $[$_:docComment]? $[$_:attributes]? $_:attrKind infixr:$_ $[(name := $x)]? $[(priority := $_:prio)]? $s:str => $t:term ``` Prefix operators are defined using {keywordOf Lean.Parser.Command.mixfix}`prefix`: ```grammar $[$_:docComment]? $[$_:attributes]? $_:attrKind prefix:$_ $[(name := $x)]? $[(priority := $_:prio)]? $s:str => $t:term ``` Postfix operators are defined using {keywordOf Lean.Parser.Command.mixfix}`postfix`: ```grammar $[$_:docComment]? $[$_:attributes]? $_:attrKind postfix:$_ $[(name := $x)]? $[(priority := $_:prio)]? $s:str => $t:term ``` ::: Each of these commands may be preceded by {tech}[documentation comments] and {tech}[attributes]. The documentation comment is shown when the user hovers their mouse over the operator, and attributes may invoke arbitrary metaprograms, just as for any other declaration. The attribute {attr}`inherit_doc` causes the documentation of the function that implements the operator to be reused for the operator itself. Operators interact with {tech}[section scopes] in the same manner as attributes. By default, operators are available in any module that transitively imports the one in which they are established, but they may be declared `scoped` or `local` to restrict their availability either to contexts in which the current namespace has been opened or to the current {tech}[section scope], respectively. Custom operators require a {ref "precedence"}[precedence] specifier, following a colon. There is no default precedence to fall back to for custom operators. Operators may be explicitly named. This name denotes the extension to Lean's syntax, and is primarily used for metaprogramming. If no name is explicitly provided, then Lean generates one based on the operator. The specifics of the assignment of this name should not be relied upon, both because the internal name assignment algorithm may change and because the introduction of similar operators in upstream dependencies may lead to a clash, in which case Lean will modify the assigned name until it is unique. ::::keepEnv :::example "Assigned Operator Names" Given this infix operator: ```lean infix:90 " ⤴ " => Option.getD ``` the internal name {name}`«term_⤴_»` is assigned to the resulting parser extension. ::: :::: ::::keepEnv :::example "Provided Operator Names" Given this infix operator: ```lean infix:90 (name := getDOp) " ⤴ " => Option.getD ``` the resulting parser extension is named {name}`getDOp`. ::: :::: ::::keepEnv :::example "Inheriting Documentation" Given this infix operator: ```lean @[inherit_doc] infix:90 " ⤴ " => Option.getD ``` the resulting parser extension has the same documentation as {name}`Option.getD`. ::: :::: When multiple operators are defined that share the same syntax, Lean's parser attempts all of them. If more than one succeed, the one that used the most input is selected—this is called the {deftech}_local longest-match rule_. In some cases, parsing multiple operators may succeed, all of them covering the same range of the input. In these cases, the operator's {tech}[priority] is used to select the appropriate result. Finally, if multiple operators with the same priority tie for the longest match, the parser saves all of the results, and the elaborator attempts each in turn, failing if elaboration does not succeed on exactly one of them. :::::keepEnv ::::example "Ambiguous Operators and Priorities" :::keepEnv Defining an alternative implementation of `+` as {lean}`Or` requires only an infix operator declaration. ```lean infix:65 " + " => Or ``` With this declaration, Lean attempts to elaborate addition both using the built-in syntax for {name}`HAdd.hAdd` and the new syntax for {lean}`Or`: ```lean (name := trueOrFalse1) #check True + False ``` ```leanOutput trueOrFalse1 True + False : Prop ``` ```lean (name := twoPlusTwo1) #check 2 + 2 ``` ```leanOutput twoPlusTwo1 2 + 2 : Nat ``` However, because the new operator is not associative, the {tech}[local longest-match rule] means that only {name}`HAdd.hAdd` applies to an unparenthesized three-argument version: ```lean +error (name := trueOrFalseOrTrue1) #check True + False + True ``` ```leanOutput trueOrFalseOrTrue1 failed to synthesize instance of type class HAdd Prop Prop ?m.3 Hint: Type class instance resolution failures can be inspected with the `set_option trace.Meta.synthInstance true` command. ``` ::: :::keepEnv If the infix operator is declared with high priority, then Lean does not try the built-in {name}`HAdd.hAdd` operator in ambiguous cases: ```lean infix:65 (priority := high) " + " => Or ``` ```lean (name := trueOrFalse2) #check True + False ``` ```leanOutput trueOrFalse2 True + False : Prop ``` ```lean (name := twoPlusTwo2) +error #check 2 + 2 ``` ```leanOutput twoPlusTwo2 failed to synthesize instance of type class OfNat Prop 2 numerals are polymorphic in Lean, but the numeral `2` cannot be used in a context where the expected type is Prop due to the absence of the instance above Hint: Type class instance resolution failures can be inspected with the `set_option trace.Meta.synthInstance true` command. ``` The new operator is not associative, so the {tech}[local longest-match rule] means that only {name}`HAdd.hAdd` applies to the three-argument version: ```lean +error (name := trueOrFalseOrTrue2) #check True + False + True ``` ```leanOutput trueOrFalseOrTrue2 failed to synthesize instance of type class HAdd Prop Prop ?m.3 Hint: Type class instance resolution failures can be inspected with the `set_option trace.Meta.synthInstance true` command. ``` ::: :::: ::::: The actual operator is provided as a string literal. The new operator must satisfy the following requirements: * It must contain at least one character. * The first character may not be a single or double quote (`'` or `"`), unless the operator is `''`. * It may not begin with a backtick (`` ` ``) followed by a character that would be a valid prefix of a quoted name. * It may not begin with a digit. * It may not include internal whitespace. The operator string literal may begin or end with a space. These are not part of the operator's syntax, and their presence does not require spaces around uses of the operator. However, the presence of spaces cause Lean to insert spaces when showing the operator to the user. Omitting them causes the operator's arguments to be displayed immediately next to the operator itself. :::keepEnv ```lean -show -- Test claim about internal whitespace in preceding paragraph /-- error: invalid atom -/ #check_msgs in infix:99 " <<<< >>>> " => Nat.add --- Test further claims about allowed atoms /-- error: invalid atom -/ #check_msgs in infix:9 (name := bogus) "" => Nat.mul /-- error: invalid atom -/ #check_msgs in infix:9 (name := alsobogus) " ` " => Nat.mul -- this one's OK #check_msgs in infix:9 (name := nonbogus) " `` " => Nat.mul /-- error: invalid atom -/ #check_msgs in infix:9 (name := bogus) "`a" => Nat.mul ``` ::: Finally, the operator's meaning is provided, separated from the operator by {keywordOf Lean.Parser.Command.mixfix}`=>`. This may be any Lean term. Uses of the operator are desugared into function applications, with the provided term in the function position. Prefix and postfix operators apply the term to their single argument as an explicit argument. Infix operators apply the term to the left and right arguments, in that order. Other than its ability to accept arguments at each call site, there are no specific requirements imposed on the term. Operators may construct functions, so the term may expect more parameters than the operator. Implicit and {tech}[instance-implicit] parameters are resolved at each application site, which allows the operator to be defined by a {tech}[type class] {tech}[method]. ```lean -show -keep -- Double-check claims about operators above prefix:max "blah" => Nat.add #check (blah 5) ``` If the term consists either of a name from the global environment or of an application of such a name to one or more arguments, then Lean automatically generates an {tech}[unexpander] for the operator. This means that the operator will be displayed in {tech}[proof states], error messages, and other output from Lean when the function term otherwise would have been displayed. Lean does not track whether the operator was used in the original term; it is inserted at every opportunity. :::::keepEnv ::::example "Custom Operators in Lean's Output" The function {lean}`perhapsFactorial` computes a factorial for a number if it's not too large. ```lean def fact : Nat → Nat | 0 => 1 | n+1 => (n + 1) * fact n def perhapsFactorial (n : Nat) : Option Nat := if n < 8 then some (fact n) else none ``` The postfix interrobang operator can be used to represent it. ```lean postfix:90 "‽" => perhapsFactorial ``` When attempting to prove that {lean}`∀ n, n ≥ 8 → (perhapsFactorial n).isNone`, the initial proof state uses the new operator, even though the theorem as written does not: ```proofState ∀ n, n ≥ 8 → (perhapsFactorial n).isNone := by skip /-- ⊢ ∀ (n : Nat), n ≥ 8 → n‽.isNone = true -/ ``` :::: ::::: :::example "Infix Operators, Defined Functions, and Unexpanders" When an operator does not expand to the application of a defiend function, no unexpander is generated. Here, the postfix interrobang expands to an anonymous function that takes a factorial if its argument is not too large. ```lean def fact : Nat → Nat | 0 => 1 | n+1 => (n + 1) * fact n set_option quotPrecheck false in postfix:90 "‽" => fun (n : Nat) => if n < 8 then some (fact n) else none ``` Because there is no named function in the expansion, no unexpander can be generated: ```lean (name := noUnexp) #check 7‽ ``` ```leanOutput noUnexp (fun n => if n < 8 then some (fact n) else none) 7 : Option Nat ``` Using a named function results in an unexpander, which is used for terms that consist of applications of {name}`perhapsFactorial`: ```lean def perhapsFactorial (n : Nat) : Option Nat := if n < 8 then some (fact n) else none postfix:90 "‽'" => perhapsFactorial ``` ```lean (name := withUnexp) #check 7‽' ``` ```leanOutput withUnexp 7‽' : Option Nat ``` :::
reference-manual/Manual/NotationsMacros/Precedence.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Precedence" => %%% tag := "precedence" %%% Infix operators, notations, and other syntactic extensions to Lean make use of explicit {tech}[precedence] annotations. While precedences in Lean can technically be any natural number, by convention they range from {evalPrec}`min` to {evalPrec}`max`, respectively denoted `min` and `max`.{TODO}[Fix the keywordOf operator and use it here] Function application has the highest precedence. :::syntax prec -open (title := "Parser Precedences") Most operator precedences consist of explicit numbers. The named precedence levels denote the outer edges of the range, close to the minimum or maximum, and are typically used by more involved syntax extensions. ```grammar $n:num ``` Precedences may also be denoted as sums or differences of precedences; these are typically used to assign precedences that are relative to one of the named precedences. ```grammar $p + $p ``` ```grammar $p - $p ``` ```grammar ($p) ``` The maximum precedence is used to parse terms that occur in a function position. Operators should typically not use this level, because it can interfere with users' expectation that function application binds more tightly than any other operator, but it is useful in more involved syntax extensions to indicate how other constructs interact with function application. ```grammar max ``` Argument precedence is one less than the maximum precedence. This level is useful for defining syntax that should be treated as an argument to a function, such as {keywordOf Lean.Parser.Term.fun}`fun` or {keywordOf Lean.Parser.Term.do}`do`. ```grammar arg ``` Lead precedence is less than argument precedence, and should be used for custom syntax that should not occur as a function argument, such as {keywordOf Lean.Parser.Term.let}`let`. ```grammar lead ``` The minimum precedence can be used to ensure that an operator binds less tightly than all other operators. ```grammar min ``` :::
reference-manual/Manual/NotationsMacros/Notations.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Notations" => %%% tag := "notations" %%% The term {deftech}_notation_ is used in two ways in Lean: it can refer to the general concept of concise ways of writing down ideas, and it is the name of a language feature that allows notations to be conveniently implemented with little code. Like custom operators, Lean notations allow the grammar of terms to be extended with new forms. However, notations are more general: the new syntax may freely intermix required keywords or operators with subterms, and they provide more precise control over precedence levels. Notations may also rearrange their parameters in the resulting subterms, while infix operators provide them to the function term in a fixed order. Because notations may define operators that use a mix of prefix, infix, and postfix components, they can be called {deftech}_mixfix_ operators. :::syntax command (title := "Notation Declarations") Notations are defined using the {keywordOf Lean.Parser.Command.notation}`notation` command. ```grammar $[$_:docComment]? $[$_:attributes]? $_:attrKind notation$[:$_:prec]? $[(name := $_:ident)]? $[(priority := $_:prio)]? $[$_:notationItem]* => $_:term ``` ::: :::syntax Lean.Parser.Command.notationItem -open (title := "Notation Items") The body of a notation definition consists of a sequence of {deftech}_notation items_, which may be either string literals or identifiers with optional precedences. ```grammar $s:str ``` ```grammar $x:ident$[:$_:prec]? ``` ::: As with operator declarations, the contents of the documentation comments are shown to users while they interact with the new syntax. Adding the {attr}`inherit_doc` attribute causes the documentation comment of the function at the head of the term into which the notation expands to be copied to the new syntax. Other attributes may be added to invoke other compile-time metaprograms on the resulting definition. Notations interact with {tech}[section scopes] in the same manner as attributes and operators. By default, notations are available in any module that transitively imports the one in which they are established, but they may be declared `scoped` or `local` to restrict their availability either to contexts in which the current namespace has been opened or to the current {tech}[section scope], respectively. Like operators, the {tech}[local longest-match rule] is used while parsing notations. If more than one notation ties for the longest match, the declared priorities are used to determine which parse result applies. If this still does not resolve the ambiguity, then all are saved, and the elaborator is expected to attempt all of them, succeeding when exactly one can be elaborated. Rather than a single operator with its fixity and token, the body of a notation declaration consists of a sequence of {deftech}_notation items_, which may be either new {tech}[atoms] (including both keywords such as `if`, `#eval`, or `where` and symbols such as `=>`, `+`, `↗`, `⟦`, or `⋉`) or positions for terms. Just as they do in operators, string literals identify the placement of atoms. Leading and trailing spaces in the strings do not affect parsing, but they cause Lean to insert spaces in the corresponding position when displaying the syntax in {tech}[proof states] and error messages. Identifiers indicate positions where terms are expected, and name the corresponding term so it can be inserted in the notation's expansion. While custom operators have a single notion of precedence, there are many involved in a notation. The notation itself has a precedence, as does each term to be parsed. The notation's precedence determines which contexts it may be parsed in: the parser only attempts to parse productions whose precedence is at least as high as the current context. For example, because multiplication has higher precedence than addition, the parser will attempt to parse an infix multiplication term while parsing the arguments to addition, but not vice versa. The precedence of each term to be parsed determines which other productions may occur in them. If no precedence is supplied for the notation itself, the default value depends on the form of the notation. If the notation both begins and ends with an atom (represented by string literals), then the default precedence is `max`.{TODO}[keywordOf] This applies both to notations that consist only of a single atom and to notations with multiple items, in which the first and last items are both atoms. Otherwise, the default precedence of the whole notation is `lead`. If no precedence is provided for notation items that are terms, then they default to precedence `min`. ```lean -keep -show -- Test for default precedences for notations /-- Parser max -/ notation "takesMax " e:max => e /-- Parser lead -/ notation "takesLead " e:lead => e /-- Parser min -/ notation "takesMin " e:min => e /-- Take the first one -/ notation e1 " <# " e2 => e1 /-- Take the first one in brackets! -/ notation "<<<<<" e1 " <<# " e2 ">>>>>" => e1 elab "#parse_test " "[" e:term "]" : command => do Lean.logInfoAt e (toString e) pure () -- Here, takesMax vs takesLead distinguishes the notations /-- info: («term_<#_» (termTakesMax_ "takesMax" (num "1")) "<#" (num "2")) -/ #check_msgs in #parse_test [ takesMax 1 <# 2 ] /-- info: (termTakesLead_ "takesLead" («term_<#_» (num "1") "<#" (num "2"))) -/ #check_msgs in #parse_test [ takesLead 1 <# 2 ] -- Here, takesMax vs takesLead does not distinguish the notations because both have precedence `max` /-- info: (termTakesMax_ "takesMax" («term<<<<<_<<#_>>>>>» "<<<<<" (num "1") "<<#" (num "2") ">>>>>")) -/ #check_msgs in #parse_test [ takesMax <<<<< 1 <<# 2 >>>>> ] /-- info: (termTakesLead_ "takesLead" («term<<<<<_<<#_>>>>>» "<<<<<" (num "1") "<<#" (num "2") ">>>>>")) -/ #check_msgs in #parse_test [ takesLead <<<<< 1 <<# 2 >>>>> ] ``` After the required double arrow ({keywordOf Lean.Parser.Command.notation}`=>`), the notation is provided with an expansion. While operators are always expanded by applying their function to the operator's arguments in order, notations may place their term items in any position in the expansion. The terms are referred to by name. Term items may occur any number of times in the expansion. Because notation expansion is a purely syntactic process that occurs prior to elaboration or code generation, duplicating terms in the expansion may lead to duplicated computation when the resulting term is evaluated, or even duplicated side effects when working in a monad. ::::keepEnv :::example "Ignored Terms in Notation Expansion" This notation ignores its first parameter: ```lean notation (name := ignore) "ignore " _ign:arg e:arg => e ``` The term in the ignored position is discarded, and Lean never attempts to elaborate it, so terms that would otherwise result in errors can be used here: ```lean (name := ignore) #eval ignore (2 + "whatever") 5 ``` ```leanOutput ignore 5 ``` However, the ignored term must still be syntactically valid: ```syntaxError ignore' (category := command) #eval ignore (2 +) 5 ``` ```leanOutput ignore' <example>:1:17-1:18: unexpected token ')'; expected term ``` ::: :::: ::::keepEnv :::example "Duplicated Terms in Notation Expansion" The {keywordOf dup}`dup!` notation duplicates its sub-term. ```lean notation (name := dup) "dup!" t:arg => (t, t) ``` Because the term is duplicated, it can be elaborated separately with different types: ```lean def e : Nat × Int := dup! (2 + 2) ``` Printing the resulting definition demonstrates that the work of addition will be performed twice: ```lean (name := dup) #print e ``` ```leanOutput dup def e : Nat × Int := (2 + 2, 2 + 2) ``` ::: :::: When the expansion consists of the application of a function defined in the global environment and each term in the notation occurs exactly once, an {tech}[unexpander] is generated. The new notation will be displayed in {tech}[proof states], error messages, and other output from Lean when matching function application terms otherwise would have been displayed. As with custom operators, Lean does not track whether the notation was used in the original term; it is used at every opportunity in Lean's output. :::example "Notations, Defined Functions, and Unexpanders" When a notation does not expand to the application of a defined function, no unexpander is generated. Here, the notation expands to an anonymous function: ```lean notation "[" start " ⇒ " stop "]" => fun x => x > start && x < stop ``` Because there is no named function in the expansion, no unexpander can be generated: ```lean (name := noUnexp) #check [5 ⇒ 8] ``` ```leanOutput noUnexp fun x => decide (x > 5) && decide (x < 8) : Nat → Bool ``` Using a named function results in an unexpander, which is used for terms that consist of applications of {name}`between`: ```lean def between (start stop : Nat) : Nat → Prop := fun x => x > start && x < stop notation "[" start " ⇒' " stop "]" => between start stop ``` ```lean (name := withUnexp) #check [5 ⇒' 8] ``` ```leanOutput withUnexp [5 ⇒' 8] : Nat → Prop ``` ::: # Operators and Notations %%% tag := "operators-and-notations" %%% Internally, operator declarations are translated into notation declarations. Term notation items are inserted where the operator would expect arguments, and in the corresponding positions in the expansion. For prefix and postfix operators, the notation's precedence as well as the precedences of its term items is the operator's declared precedence. For non-associative infix operators, the notation's precedence is the declared precedence, but both arguments are parsed at a precedence level that is one higher, which prevents successive uses of the notation without parentheses. Associative infix operators use the operator's precedence for the notation and for one argument, while a precedence that is one level higher is used for the other argument; this prevents successive applications in one direction only. Left-associative operators use the higher precedence for their right argument, while right-associative operators use the higher precedence for their left argument.
reference-manual/Manual/NotationsMacros/Delab.lean
import VersoManual import Lean.PrettyPrinter.Delaborator import Manual.Meta open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false open Lean (Syntax Expr) #doc (Manual) "Extending Lean's Output" => %%% tag := "unexpand-and-delab" %%% Extending Lean with new syntax and implementing the new syntax with macros and elaborators allows users to express ideas to Lean more conveniently. However, Lean is an _interactive_ theorem prover: it is also important that the feedback it provides can be readily understood. Syntax extensions should be used in _output_ as well as in _input_. :::paragraph There are two primary mechanisms for instructing Lean to use a syntax extension in its output: : Unexpanders Unexpanders are the inverse of {tech}[macros]. Macros implement new syntax in terms of the old syntax by translation, _expanding_ the new feature into encodings in pre-existing features. Like macros, {deftech}_unexpanders_ translate {lean}`Syntax` into {lean}`Syntax`; unlike macros, they transform the encodings into the new extensions. : Delaborators Delaborators are the inverse of {tech}[elaborators]. While elaborators translate {lean}`Syntax` into the core type theory's {lean}`Expr`, {deftech}_delaborators_ translate {lean}`Expr`s into {lean}`Syntax`. ::: Before an {name}`Expr` is displayed, it is first delaborated and then unexpanded. The delaborator tracks the position in the original {name}`Expr` that its output originated from; this position is encoded in the resulting syntax's {name Lean.SourceInfo}`SourceInfo`. Just as macro expansion automatically annotates the resulting syntax with synthetic source information that correspond to the original syntax's position, the unexpansion mechanism preserves the resulting syntax's association with the underlying {name}`Expr`. This association enables Lean's interactive features that provide information about the resulting syntax when it is shown in {tech}[proof states] and diagnostics. # Unexpanders %%% tag := "Unexpanders" %%% Just as macros are registered in a table that maps {tech}[syntax kinds] to macro implementations, unexpanders are registered in a table that maps the names of constants to unexpander implementations. Before Lean displays syntax to users, it attempts to rewrite each application of a constant in the syntax according to this table. Occurrences of the context that are not applications are treated as applications with zero arguments. Unexpansion proceeds from the inside out. The unexpander is passed the syntax of the application, with implicit arguments hidden, after the arguments have been unexpanded. If the option {option}`pp.explicit` is {lean}`true` or {option}`pp.notation` is {lean}`false`, then unexpanders are not used. ::::::::leanSection ```lean -show open Lean.PrettyPrinter (Unexpander UnexpandM) ``` An unexpander has type {lean}`Lean.PrettyPrinter.Unexpander`, which is an abbreviation for `Syntax → Lean.PrettyPrinter.UnexpandM Syntax`. In the remainder of this section, the names {lean}`Unexpander` and {lean}`UnexpandM` are used unqualified. {lean}`UnexpandM` is a monad that supports quotation and failure via its {name Lean.MonadQuotation}`MonadQuotation` and {lean}`MonadExcept Unit` instances. An unexpander should either return unexpanded syntax or fail using {lean (type := "UnexpandM Syntax")}`throw ()`. If the unexpander succeeds, then the resulting syntax is unexpanded again; if it fails, then the next unexpander is tried. When no unexpander succeeds for the syntax, its child nodes are unexpanded until all opportunities for unexpansion are exhausted. {docstring Lean.PrettyPrinter.Unexpander} {docstring Lean.PrettyPrinter.UnexpandM} An unexpander for a constant is registered by applying the {attr}`app_unexpander` attribute. {ref "operators"}[Custom operators] and {ref "notations"}[notations] automatically create unexpanders for the syntax that they introduce. :::syntax attr (title := "Unexpander Registration") ```grammar app_unexpander $_:ident ``` Registers an unexpander of type {name}`Unexpander` for applications of a constant. ::: :::::example "Custom Unit Type" ::::keepEnv A type equivalent to {lean}`Unit`, but with its own notation, can be defined as a zero-field structure and a macro: ```lean structure Solo where mk :: syntax "‹" "›" : term macro_rules | `(term|‹›) => ``(Solo.mk) ``` While the new notation can be used to write theorem statements, it does not appear in proof states. For example, when proving that all values of type {lean}`Solo` are equal to {lean}`‹›`, the initial proof state is: ```proofState ∀v, v = ‹› := by intro v /-- v : Solo ⊢ v = { } -/ ``` This proof state shows the constructor using {tech}[structure instance] syntax. An unexpander can be used to override this choice. Because {name}`Solo.mk` cannot be applied to any arguments, the unexpander is free to ignore the syntax, which will always be {lean (type := "UnexpandM Syntax")}`` `(Solo.mk) ``. ```lean @[app_unexpander Solo.mk] def unexpandSolo : Lean.PrettyPrinter.Unexpander | _ => `(‹›) ``` With this unexpander, the initial state of the proof now renders with the correct syntax: ```proofState ∀v, v = ‹› := by intro v /-- v : Solo ⊢ v = ‹› -/ ``` :::: ::::: :::::example "Unexpansion and Arguments" A {name}`ListCursor` represents a position in a {lean}`List`. {name}`ListCursor.before` contains the reversed list of elements prior to the position, and {name}`ListCursor.after` contains the elements after the position. ```lean structure ListCursor (α) where before : List α after : List α deriving Repr ``` List cursors can be moved to the left or to the right: ```lean def ListCursor.left : ListCursor α → Option (ListCursor α) | ⟨[], _⟩ => none | ⟨l :: ls, rs⟩ => some ⟨ls, l :: rs⟩ def ListCursor.right : ListCursor α → Option (ListCursor α) | ⟨_, []⟩ => none | ⟨ls, r :: rs⟩ => some ⟨r :: ls, rs⟩ ``` They can also be moved all the way to the left or all the way to the right: ```lean def ListCursor.rewind : ListCursor α → ListCursor α | xs@⟨[], _⟩ => xs | ⟨l :: ls, rs⟩ => rewind ⟨ls, l :: rs⟩ termination_by xs => xs.before def ListCursor.fastForward : ListCursor α → ListCursor α | xs@⟨_, []⟩ => xs | ⟨ls, r :: rs⟩ => fastForward ⟨r :: ls, rs⟩ termination_by xs => xs.after ``` ```lean -show def ListCursor.ofList (xs : List α) : ListCursor α where before := [] after := xs def ListCursor.toList : (xs : ListCursor α) → List α | ⟨[], rs⟩ => rs | ⟨l::ls, rs⟩ => toList ⟨ls, l :: rs⟩ termination_by xs => xs.before ``` However, the need to reverse the list of previous elements can make list cursors difficult to understand. A cursor can be given a notation in which a flag (`🚩`) marks the cursor's location in a list: ```lean syntax "[" term,* " 🚩 " term,* "]": term macro_rules | `([$ls,* 🚩 $rs,*]) => ``(ListCursor.mk [$[$((ls : Array Lean.Term).reverse)],*] [$rs,*]) ``` In the macro, the sequences of elements have type {lean}``Syntax.TSepArray `term ","``. The type annotation as {lean}`Array Lean.Term` causes a coercion to fire so that {name}`Array.reverse` can be applied, and a similar coercion reinserts the separating commas. These coercions are described in the section on {ref "typed-syntax"}[typed syntax]. While the syntax works, it is not used in Lean's output: ```lean (name := flagNo) #check [1, 2, 3 🚩 4, 5] ``` ```leanOutput flagNo { before := [3, 2, 1], after := [4, 5] } : ListCursor Nat ``` An unexpander solves this problem. The unexpander relies on the built-in unexpanders for list literals already having rewritten the two lists: ```lean @[app_unexpander ListCursor.mk] def unexpandListCursor : Lean.PrettyPrinter.Unexpander | `($_ [$ls,*] [$rs,*]) => `([$((ls : Array Lean.Term).reverse),* 🚩 $(rs),*]) | _ => throw () ``` ```lean (name := flagYes) #check [1, 2, 3 🚩 4, 5] ``` ```leanOutput flagYes [1, 2, 3 🚩 4, 5] : ListCursor Nat ``` ```lean (name := flagYes2) #reduce [1, 2, 3 🚩 4, 5].right ``` ```leanOutput flagYes2 some [1, 2, 3, 4 🚩 5] ``` ```lean (name := flagYes3) #reduce [1, 2, 3 🚩 4, 5].left >>= (·.left) ``` ```leanOutput flagYes3 some [1 🚩 2, 3, 4, 5] ``` ::::: :::::::: # Delaborators %%% tag := "delaborators" %%% ::::::::leanSection ```lean -show open Lean.PrettyPrinter.Delaborator (DelabM Delab) open Lean (Term) ``` A delaborator is function of type {lean}`Lean.PrettyPrinter.Delaborator.Delab`, which is an abbreviation for {lean}`Lean.PrettyPrinter.Delaborator.DelabM Term`. Unlike unexpanders, delaborators are not implemented as functions. This is to make it easier to implement them correctly: the monad {name}`DelabM` tracks the current position in the expression that's being delaborated so the delaboration mechanism can annotate the resulting syntax. Delaborators are registered with the {attr}`delab` attribute. An internal table maps the names of the constructors of {name}`Expr` (without namespaces) to delaborators. Additionally, the name `app.`$`c` is consulted to find delaborators for applications of the constant $`c`, and the name `mdata.`$`k` is consulted to find delaborators for {name}`Expr.mdata` constructors with a single key $`k` in their metadata. :::syntax attr (title := "Delaborator Registration") The {attr}`delab` attribute registers a delaborator for the indicated constructor or metadata key of {lean}`Expr`. ```grammar delab $_:ident ``` The {keyword}`app_delab ` attribute registers a delaborator for applications of the indicated constant after {tech (key := "resolve")}[resolving] it in the current {tech (key := "section scope")}[scope]. ```grammar app_delab $_:ident ``` ::: ::::leanSection ```lean -show open Lean.PrettyPrinter.Delaborator.SubExpr ``` :::paragraph The monad {name}`DelabM` is a {tech}[reader monad] that includes access to the current position in the {lean}`Expr`. Recursive delaboration is performed by adjusting the reader monad's tracked position, rather than by explicitly passing a subexpression to another function. The most important functions for working with subexpressions in delaborators are in the namespace `Lean.PrettyPrinter.Delaborator.SubExp`: * {name}`getExpr` retrieves the current expression for analysis. * {name}`withAppFn` adjusts the current position to be that of the function in an application. * {name}`withAppArg` adjusts the current position to be that of the argument in an application * {name}`withAppFnArgs` decomposes the current expression into a non-application function and its arguments, focusing on each. * {name}`withBindingBody` descends into the body of a function or function type. Further functions to descend into the remaining constructors of {name}`Expr` are available. ::: :::: :::::::: ::::draft :::planned 122 * Delaboration example and combinator reference * Pretty printing * Parenthesizers ::: ::::
reference-manual/Manual/RecursiveDefs/PartialFixpoint.lean
import VersoManual import Manual.Meta import Manual.Meta.Monotonicity import Manual.RecursiveDefs.PartialFixpoint.Theory open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode open Lean.Order set_option maxRecDepth 600 #doc (Manual) "Partial Fixpoint Recursion" => %%% tag := "partial-fixpoint" %%% All definitions are fundamentally equations: the new constant being defined is equal to the right-hand side of the definition. For functions defined by {ref "structural-recursion"}[structural recursion], this equation holds {tech (key := "definitional equality")}[definitionally], and there is a unique value returned by application of the function. For functions defined by {ref "well-founded-recursion"}[well-founded recursion], the equation may hold only {tech (key := "proposition")}[propositionally], but all type-correct applications of the function to arguments are equal to the respective values prescribed by the definition. In both cases, the fact that the function terminates for all inputs means that the value computed by applying the function is always uniquely determined. In some cases where a function does not terminate for all arguments, the equation may not _uniquely_ determine the function's return value for each input, but there are nonetheless functions for which the defining equation holds. In these cases, a definition as a {deftech}_partial fixpoint_ may still be possible. Any function that satisfies the defining equation can be used to demonstrate that the equation does not create a logical contradiction, and the equation can then be proven as a theorem about this function. As with the other strategies for defining recursive functions, compiled code uses the function as it was originally written; like definitions in terms of eliminators or recursion over accessibility proofs, the function used to define the partial fixpoint is used only to justify the function's equations in Lean's logic for purposes of mathematical reasoning. The term {tech}_partial fixpoint_ is specific to Lean. Functions declared {keywordOf Lean.Parser.Command.declaration}`partial` do not require termination proofs, so long as the type of their return values is inhabited, but they are completely opaque from the perspective of Lean's logic. Partial fixpoints, on the other hand, can be rewritten using their defining equations while writing proofs. Logically speaking, partial fixpoints are total functions that don't reduce {tech (key := "definitional equality")}[definitionally] when applied, but for which equational rewrite rule are provided. They are _partial_ in the sense that the defining equation does not necessarily specify a value for all possible arguments. While partial fixpoints do allow functions to be defined that cannot be expressed using structural or well-founded recursion, the technique is also useful in other cases. Even in cases where the defining equation fully describes the function's behavior and a termination proof using {ref "well-founded-recursion"}[well-founded recursion] would be possible, it may simply be more convenient to define the function as a partial fixpoint to avoid a having to write a termination proof. Defining recursive functions as partial fixpoints only occurs when explicitly requested by annotating the definition with {keywordOf Lean.Parser.Command.declaration}`partial_fixpoint`. :::paragraph There are two classes of functions that can be defined as partial fixpoints: * Tail-recursive functions whose return type is inhabited type * Functions that return values in a suitable monad, such as the {name}`Option` monad Both classes are backed by the same theory and construction: least fixpoints of monotone equations in chain-complete partial orders. ::: Just as with structural and well-founded recursion, Lean allows {tech}[mutually recursive] functions to be defined as partial fixpoints. To use this feature, every function definition in a {tech}[mutual block] must be annotated with the {keywordOf Lean.Parser.Command.declaration}`partial_fixpoint` modifier. ```lean -show section variable (p : Nat → Bool) ``` :::example "Definition by Partial Fixpoint" The following function finds the least natural number for which the predicate {lean}`p` holds. If `p` never holds, then this equation does not specify the behavior: the function {lean}`find` could return {lean (type := "Nat")}`42` or any other {lean}`Nat` in that case and still satisfy the equation. ```lean def find (p : Nat → Bool) (i : Nat := 0) : Nat := if p i then i else find p (i + 1) partial_fixpoint ``` The elaborator can prove that functions satisfying the equation exist. Within Lean's logic, {lean}`find` is defined to be an arbitrary such function. ::: ```lean -show end ``` # Tail-Recursive Functions %%% tag := "partial-fixpoint-tailrec" %%% :::paragraph A recursive function can be defined as a partial fixpoint if the following two conditions hold: 1. The function's return type is inhabited (as with {ref "partial-unsafe"}[functions marked {keywordOf Lean.Parser.Command.declaration}`partial`])—either a {name}`Nonempty` or {name}`Inhabited` instance works. 2. All recursive calls are in {tech}[tail position] of the function. An expression is in {deftech}_tail position_ in the function body if it is: * the function body itself, * the branches of a {keywordOf Lean.Parser.Term.match}`match` expression that is in tail position, * the branches of an {keywordOf termIfThenElse}`if` expression that is in tail position, and * the body of a {keywordOf Lean.Parser.Term.let}`let` expression that is in tail position. In particular, the {tech (key := "match discriminant")}[discriminant] of a {keywordOf Lean.Parser.Term.match}`match` expression, the condition of an {keywordOf termIfThenElse}`if` expression and the arguments of functions are not tail positions. ::: ```lean -show -- Test that nonempty is enough inductive A : Type where | mkA | mkA' instance : Nonempty A := ⟨.mkA⟩ def getA (n : Nat) : A := getA (n + 1) partial_fixpoint example (n : Nat) : getA n = getA (n + 3) := by conv => lhs; rw [getA, getA, getA] ``` :::example "Loops are Tail Recursive Functions" Because the function body itself is a {tech}[tail position], the infinitely looping function {lean}`loop` is tail recursive. It can be defined as a partial fixpoint. ```lean def loop (x : Nat) : Nat := loop (x + 1) partial_fixpoint ``` ::: :::example "Tail Recursion with Branching" {lean}`Array.find` could also be constructed using well-founded recursion with a termination proof, but may be more convenient to define using {keywordOf Lean.Parser.Command.declaration}`partial_fixpoint`, where no termination proof is needed. ```lean def Array.find (xs : Array α) (p : α → Bool) (i : Nat := 0) : Option α := if h : i < xs.size then if p xs[i] then some xs[i] else Array.find xs p (i + 1) else none partial_fixpoint ``` If the result of the recursive call is not just returned, but passed to another function, it is not in tail position and this definition fails. ```lean -keep +error (name := nonTailPos) def List.findIndex (xs : List α) (p : α → Bool) : Int := match xs with | [] => -1 | x::ys => if p x then 0 else have r := List.findIndex ys p if r = -1 then -1 else r + 1 partial_fixpoint ``` The error message on the recursive call is: ```leanOutput nonTailPos Could not prove 'List.findIndex' to be monotone in its recursive calls: Cannot eliminate recursive call `List.findIndex ys p` enclosed in if ys✝.findIndex p = -1 then -1 else ys✝.findIndex p + 1 Tried to apply 'monotone_ite', but failed. Possible cause: A missing `MonoBind` instance. Use `set_option trace.Elab.Tactic.monotonicity true` to debug. ``` ::: # Monadic functions %%% tag := "partial-fixpoint-monadic" %%% Defining a function as a partial fixpoint is more powerful if the function's return type is a monad that is an instance of {name}`Lean.Order.MonoBind`, such as {name}`Option`. In this case, recursive call are not restricted to tail-positions, but may also occur inside higher-order monadic functions such as {name}`bind` and {name}`List.mapM`. The set of higher-order functions for which this works is {ref "partial-fixpoint-theory"}[extensible], so no exhaustive list is given here. The aspiration is that a monadic recursive function definition that is built using abstract monadic operations like {name}`bind`, but that does not open the abstraction of the monad (e.g. by matching on the {name}`Option` value), is accepted. In particular, using {tech}[{keywordOf Lean.Parser.Term.do}`do`-notation] should work. :::example "Monadic functions" The following function implements the Ackermann function in the {name}`Option` monad, and is accepted without an (explicit or implicit) termination proof: ```lean -keep def ack : (n m : Nat) → Option Nat | 0, y => some (y+1) | x+1, 0 => ack x 1 | x+1, y+1 => do ack x (← ack (x+1) y) partial_fixpoint ``` Recursive calls may also occur within higher-order functions such as {name}`List.mapM`, if they are set up appropriately, and {tech}[{keywordOf Lean.Parser.Term.do}`do`-notation]: ```lean -keep structure Tree where cs : List Tree def Tree.rev (t : Tree) : Option Tree := do Tree.mk (← t.cs.reverse.mapM (Tree.rev ·)) partial_fixpoint def Tree.rev' (t : Tree) : Option Tree := do let mut cs := [] for c in t.cs do cs := (← c.rev') :: cs return Tree.mk cs partial_fixpoint ``` Pattern matching on the result of the recursive call will prevent the definition by partial fixpoint from going through: ```lean -keep +error (name := monoMatch) def List.findIndex (xs : List α) (p : α → Bool) : Option Nat := match xs with | [] => none | x::ys => if p x then some 0 else match List.findIndex ys p with | none => none | some r => some (r + 1) partial_fixpoint ``` ```leanOutput monoMatch Could not prove 'List.findIndex' to be monotone in its recursive calls: Cannot eliminate recursive call `List.findIndex ys p` enclosed in match ys✝.findIndex p with | none => none | some r => some (r + 1) ``` In this particular case, using {name}`Functor.map` instead of explicit pattern matching helps: ```lean def List.findIndex (xs : List α) (p : α → Bool) : Option Nat := match xs with | [] => none | x::ys => if p x then some 0 else (· + 1) <$> List.findIndex ys p partial_fixpoint ``` ::: # Partial Correctness Theorems %%% tag := "partial-correctness-theorem" %%% For every function defined as a partial fixpoint, Lean proves that the defining equation is satisfied. This enables proofs by rewriting. However, these equational theorems are not sufficient for reasoning about the behavior of the function on arguments for which the function specification does not terminate. Code paths that lead to infinite recursion at runtime would end up as infinite chains of rewrites in a potential proof. Partial fixpoints in suitable monads, on the other hand, provide additional theorems that map the undefined values from non-termination to suitable values in the monad. In the {name}`Option` monad, then partial fixpoint equals {name}`Option.none` on all function inputs for which the defining equation specifies non-termination. From this fact, Lean proves a {deftech}_partial correctness theorem_ for the function which allows facts to be concluded when the function's result is {name}`Option.some`. ::::example "Partial Correctness Theorem" Recall {lean}`List.findIndex` from an earlier example: ```lean def List.findIndex (xs : List α) (p : α → Bool) : Option Nat := match xs with | [] => none | x::ys => if p x then some 0 else (· + 1) <$> List.findIndex ys p partial_fixpoint ``` With this function definition, Lean automatically proves the following partial correctness theorem: ```signature List.findIndex.partial_correctness.{u_1} {α : Type u_1} (p : α → Bool) (motive : List α → Nat → Prop) (h : ∀ (findIndex : List α → Option Nat), (∀ (xs : List α) (r : Nat), findIndex xs = some r → motive xs r) → ∀ (xs : List α) (r : Nat), (match xs with | [] => none | x :: ys => if p x = true then some 0 else (fun x => x + 1) <$> findIndex ys) = some r → motive xs r) (xs : List α) (r : Nat) : xs.findIndex p = some r → motive xs r ``` :::paragraph Here, the motive is a relation between the parameter and return types of {lean}`List.findIndex`, with the {name}`Option` removed from the return type. If, when given an arbitrary partial function with a signature that's compatible with {lean}`List.findIndex`, the following hold: * the motive is satisfied for all inputs for which the arbitrary function returns a value (rather than {name}`none`), * taking one rewriting step with the defining equation, in which the recursive calls are replaced by the arbitrary function, also implies the satisfaction of the motive then the motive is satisfied for all inputs for which the {lean}`List.findIndex` returns {name}`some`. ::: The partial correctness theorem is a reasoning principle. It can be used to prove that the resulting number is a valid index in the list and that the predicate holds for that index: ```lean theorem List.findIndex_implies_pred (xs : List α) (p : α → Bool) : xs.findIndex p = some i → ∃x, xs[i]? = some x ∧ p x := by apply List.findIndex.partial_correctness (motive := fun xs i => ∃ x, xs[i]? = some x ∧ p x) intro findIndex ih xs r hsome split at hsome next => contradiction next x ys => split at hsome next => have : r = 0 := by simp_all simp_all next => simp only [Option.map_eq_map, Option.map_eq_some_iff] at hsome obtain ⟨r', hr, rfl⟩ := hsome specialize ih _ _ hr simpa ``` :::: # Mutual Recursion with Partial Fixpoints %%% tag := "mutual-partial-fixpoint" %%% Lean supports the definition of {tech}[mutually recursive] functions using {tech}[partial fixpoint]. Mutual recursion may be introduced using a {tech}[mutual block], but it also results from {keywordOf Lean.Parser.Term.letrec}`let rec` expressions and {keywordOf Lean.Parser.Command.declaration}`where` blocks. The rules for mutual well-founded recursion are applied to a group of actually mutually recursive, lifted definitions, that results from the {ref "mutual-syntax"}[elaboration steps] for mutual groups. If all functions in the mutual group have the {keywordOf Lean.Parser.Command.declaration}`partial_fixpoint` clause, then this strategy is used. {include 1 Manual.RecursiveDefs.PartialFixpoint.Theory}
reference-manual/Manual/RecursiveDefs/Structural.lean
import VersoManual import Manual.RecursiveDefs.Structural.RecursorExample import Manual.RecursiveDefs.Structural.CourseOfValuesExample import Manual.Meta open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode set_option guard_msgs.diff true #doc (Manual) "Structural Recursion" => %%% tag := "structural-recursion" %%% Structurally recursive functions are those in which each recursive call is on a structurally smaller term than the argument. The same parameter must decrease in all recursive calls; this parameter is called the {deftech}_decreasing parameter_. Structural recursion is stronger than the primitive recursion that recursors provide, because the recursive call can use more deeply nested sub-terms of the argument, rather than only an immediate sub-term. The constructions used to implement structural recursion are, however, implemented using the recursor; these helper constructions are described in the {ref "recursor-elaboration-helpers"}[section on inductive types]. The rules that govern structural recursion are fundamentally _syntactic_ in nature. There are many recursive definitions that exhibit structurally recursive computational behavior, but which are not accepted by these rules; this is a fundamental consequence of the analysis being fully automatic. {tech}[Well-founded recursion] provides a semantic approach to demonstrating termination that can be used in situations where a recursive function is not structurally recursive, but it can also be used when a function that computes according to structural recursion doesn't satisfy the syntactic requirements. ```lean -show section variable (n n' : Nat) ``` :::example "Structural Recursion vs Subtraction" The function {lean}`countdown` is structurally recursive. The parameter {lean}`n` was matched against the pattern {lean}`n' + 1`, which means that {lean}`n'` is a direct subterm of {lean}`n` in the second branch of the pattern match: ```lean def countdown (n : Nat) : List Nat := match n with | 0 => [] | n' + 1 => n' :: countdown n' ``` Replacing pattern matching with an equivalent Boolean test and subtraction results in an error: ```lean +error (name := countdown') -keep def countdown' (n : Nat) : List Nat := if n == 0 then [] else let n' := n - 1 n' :: countdown' n' ``` ```leanOutput countdown' fail to show termination for countdown' with errors failed to infer structural recursion: Cannot use parameter n: failed to eliminate recursive application countdown' n' failed to prove termination, possible solutions: - Use `have`-expressions to prove the remaining goals - Use `termination_by` to specify a different well-founded relation - Use `decreasing_by` to specify your own tactic for discharging this kind of goal n : Nat h✝ : ¬(n == 0) = true n' : Nat := n - 1 ⊢ n - 1 < n ``` This is because there was no pattern matching on the parameter {lean}`n`. While this function indeed terminates, the argument that it does so is based on properties of if, the equality test, and subtraction, rather than being a generic feature of {lean}`Nat` being an {tech}[inductive type]. These arguments are expressed using {tech}[well-founded recursion], and a slight change to the function definition allows Lean's automatic support for well-founded recursion to construct an alternative termination proof. This version branches on the decidability of {tech}[propositional equality] for {lean}`Nat` rather than the result of a Boolean equality test: ```lean def countdown' (n : Nat) : List Nat := if n = 0 then [] else let n' := n - 1 n' :: countdown' n' ``` Here, Lean's automation automatically constructs a termination proof from facts about propositional equality and subtraction. It uses well-founded recursion rather than structural recursion behind the scenes. ::: ```lean -show end ``` Structural recursion may be used explicitly or automatically. With explicit structural recursion, the function definition declares which parameter is the {tech}[decreasing parameter]. If no termination strategy is explicitly declared, Lean performs a search for a decreasing parameter as well as a decreasing measure for use with {tech}[well-founded recursion]. Explicitly annotating structural recursion has the following benefits: * It can speed up elaboration, because no search occurs. * It documents the termination argument for readers. * In situations where structural recursion is explicitly desired, it prevents the accidental use of well-founded recursion. # Explicit Structural Recursion To explicitly use structural recursion, a function or theorem definition can be annotated with a {keywordOf Lean.Parser.Command.declaration}`termination_by structural` clause that specifies the {tech}[decreasing parameter]. The decreasing parameter may be a reference to a parameter named in the signature. When the signature specifies a function type, the decreasing parameter may additionally be a parameter not named in the signature; in this case, names for the remaining parameters may be introduced by writing them before an arrow ({keywordOf Lean.Parser.Command.declaration}`=>`). :::example "Specifying Decreasing Parameters" When the decreasing parameter is a named parameter to the function, it can be specified by referring to its name. ```lean -keep def half (n : Nat) : Nat := match n with | 0 | 1 => 0 | n + 2 => half n + 1 termination_by structural n ``` When the decreasing parameter is not named in the signature, a name can be introduced locally in the {keywordOf Lean.Parser.Command.declaration}`termination_by` clause. ```lean -keep def half : Nat → Nat | 0 | 1 => 0 | n + 2 => half n + 1 termination_by structural n => n ``` ::: :::syntax Lean.Parser.Termination.terminationBy (title := "Explicit Structural Recursion") The `termination_by structural` clause introduces a decreasing parameter. ```grammar termination_by structural $[$_:ident* =>]? $term ``` The identifiers before the optional `=>` can bring function parameters into scope that are not already bound in the declaration header, and the mandatory term must indicate one of the function's parameters, whether introduced in the header or locally in the clause. ::: The decreasing parameter must satisfy the following conditions: * Its type must be an {tech}[inductive type]. * If its type is an {tech}[indexed family], then all indices must be parameters of the function. * If the inductive or indexed family of the decreasing parameter has data type parameters, then these data type parameters may themselves only depend on function parameters that are part of the {tech}[fixed prefix]. A {deftech}_fixed parameter_ is a function parameter that is passed unmodified in all recursive calls and is not an index of the recursive parameter's type. The {deftech}_fixed prefix_ is the longest prefix of the function's parameters in which all are fixed. :::example "Ineligible decreasing parameters" The decreasing parameter's type must be an inductive type. In {lean}`notInductive`, a function is specified as the decreasing parameter: ```lean +error (name := badnoindct) def notInductive (x : Nat → Nat) : Nat := notInductive (fun n => x (n+1)) termination_by structural x ``` ```leanOutput badnoindct cannot use specified measure for structural recursion: its type is not an inductive ``` If the decreasing parameter is an indexed family, all the indices must be variables. In {lean}`constantIndex`, the indexed family {lean}`Fin'` is instead applied to a constant value: ```lean +error (name := badidx) inductive Fin' : Nat → Type where | zero : Fin' (n+1) | succ : Fin' n → Fin' (n+1) def constantIndex (x : Fin' 100) : Nat := constantIndex .zero termination_by structural x ``` ```leanOutput badidx cannot use specified measure for structural recursion: its type Fin' is an inductive family and indices are not variables Fin' 100 ``` The parameters of the decreasing parameter's type must not depend on function parameters that come after varying parameters or indices. In {lean}`afterVarying`, the {tech}[fixed prefix] is empty, because the first parameter `n` varies, so `p` is not part of the fixed prefix: ```lean +error (name := badparam) inductive WithParam' (p : Nat) : Nat → Type where | zero : WithParam' p (n+1) | succ : WithParam' p n → WithParam' p (n+1) def afterVarying (n : Nat) (p : Nat) (x : WithParam' p n) : Nat := afterVarying (n+1) p .zero termination_by structural x ``` ```leanOutput badparam failed to infer structural recursion: Cannot use parameter x: failed to eliminate recursive application afterVarying (n + 1) p WithParam'.zero ``` ::: Furthermore, every recursive call of the functions must be on a {deftech}_strict sub-term_ of the decreasing parameter. * The decreasing parameter itself is a sub-term, but not a strict sub-term. * If a sub-term is the {tech (key := "match discriminant")}[discriminant] of a {keywordOf Lean.Parser.Term.match}`match` expression or other pattern-matching syntax, the pattern that matches the discriminant is a sub-term in the {tech}[right-hand side] of each {tech}[match alternative]. In particular, the rules of {ref "match-generalization"}[match generalization] are used to connect the discriminant to the occurrences of the pattern term in the right-hand side; thus, it respects {tech}[definitional equality]. The pattern is a _strict_ sub-term if and only if the discriminant is a strict sub-term. * If a sub-term is a constructor applied to arguments, then its recursive arguments are strict sub-terms. ```lean -show section variable (n : Nat) ``` ::::example "Nested Patterns and Sub-Terms" In the following example, the decreasing parameter {lean}`n` is matched against the nested pattern {lean (type := "Nat")}`.succ (.succ n)`. Therefore {lean (type := "Nat")}`.succ (.succ n)` is a (non-strict) sub-term of {lean (type := "Nat")}`n`, and consequently both {lean (type := "Nat")}`n` and {lean (type := "Nat")}`.succ n` are strict sub-terms, and the definition is accepted. ```lean def fib : Nat → Nat | 0 | 1 => 1 | .succ (.succ n) => fib n + fib (.succ n) termination_by structural n => n ``` For clarity, this example uses {lean (type := "Nat")}`.succ n` and {lean (type := "Nat")}`.succ (.succ n)` instead of the equivalent {lean}`Nat`-specific {lean}`n+1` and {lean}`n+2`. :::TODO Link to where this special syntax is documented. ::: :::: ```lean -show end ``` ```lean -show section variable {α : Type u} (n n' : Nat) (xs : List α) ``` :::example "Matching on Complex Expressions Can Prevent Elaboration" In the following example, the decreasing parameter {lean}`n` is not directly the {tech (key := "match discriminant")}[discriminant] of the {keywordOf Lean.Parser.Term.match}`match` expression. Therefore, {lean}`n'` is not considered a sub-term of {lean}`n`. ```lean +error -keep (name := badtarget) def half (n : Nat) : Nat := match Option.some n with | .some (n' + 2) => half n' + 1 | _ => 0 termination_by structural n ``` ```leanOutput badtarget failed to infer structural recursion: Cannot use parameter n: failed to eliminate recursive application half n' ``` Using {tech}[well-founded recursion], and explicitly connecting the discriminant to the pattern of the match, this definition can be accepted. ```lean def half (n : Nat) : Nat := match h : Option.some n with | .some (n' + 2) => half n' + 1 | _ => 0 termination_by n decreasing_by simp_all; omega ``` Similarly, the following example fails: although {lean}`xs.tail` would reduce to a strict sub-term of {lean}`xs`, this is not visible to Lean according to the rules above. In particular, {lean}`xs.tail` is not {tech (key := "definitional equality")}[definitionally equal] to a strict sub-term of {lean}`xs`. ```lean +error -keep def listLen : List α → Nat | [] => 0 | xs => listLen xs.tail + 1 termination_by structural xs => xs ``` ::: ```lean -show end ``` :::example "Simultaneous Matching vs Matching Pairs for Structural Recursion" An important consequence of the strategies that are used to prove termination is that *simultaneous matching of two {tech (key := "match discriminant")}[discriminants] is not equivalent to matching a pair*. Simultaneous matching maintains the connection between the discriminants and the patterns, allowing the pattern matching to refine the types of the assumptions in the local context as well as the expected type of the {keywordOf Lean.Parser.Term.match}`match`. Essentially, the elaboration rules for {keywordOf Lean.Parser.Term.match}`match` treat the discriminants specially, and changing discriminants in a way that preserves the run-time meaning of a program does not necessarily preserve the compile-time meaning. This function that finds the minimum of two natural numbers is defined by structural recursion on its first parameter: ```lean -keep def min' (n k : Nat) : Nat := match n, k with | 0, _ => 0 | _, 0 => 0 | n' + 1, k' + 1 => min' n' k' + 1 termination_by structural n ``` Replacing the simultaneous pattern match on both parameters with a match on a pair causes termination analysis to fail: ```lean +error (name := noMin) def min' (n k : Nat) : Nat := match (n, k) with | (0, _) => 0 | (_, 0) => 0 | (n' + 1, k' + 1) => min' n' k' + 1 termination_by structural n ``` ```leanOutput noMin failed to infer structural recursion: Cannot use parameter n: failed to eliminate recursive application min' n' k' ``` This is because the analysis only considers direct pattern matching on parameters when matching recursive calls to strictly-smaller argument values. Wrapping the discriminants in a pair breaks the connection. ::: :::example "Structural Recursion Under Pairs" This function that finds the minimum of the two components of a pair can't be elaborated via structural recursion. ```lean +error (name := minpair) -keep def min' (nk : Nat × Nat) : Nat := match nk with | (0, _) => 0 | (_, 0) => 0 | (n' + 1, k' + 1) => min' (n', k') + 1 termination_by structural nk ``` ```leanOutput minpair failed to infer structural recursion: Cannot use parameter nk: the type Nat × Nat does not have a `.brecOn` recursor ``` This is because the parameter's type, {name}`Prod`, is not recursive. Thus, its constructor has no recursive parameters that can be exposed by pattern matching. This definition is accepted using {tech}[well-founded recursion], however: ```lean def min' (nk : Nat × Nat) : Nat := match nk with | (0, _) => 0 | (_, 0) => 0 | (n' + 1, k' + 1) => min' (n', k') + 1 termination_by nk ``` ::: ```lean -show section variable (n n' : Nat) ``` :::example "Structural Recursion and Definitional Equality" Even though the recursive occurrence of {lean}`countdown` is applied to a term that is not a strict sub-term of the decreasing parameter, the following definition is accepted: ```lean def countdown (n : Nat) : List Nat := match n with | 0 => [] | n' + 1 => n' :: countdown (n' + 0) termination_by structural n ``` This is because {lean}`n' + 0` is {tech (key := "definitional equality")}[definitionally equal] to {lean}`n'`, which is a strict sub-term of {lean}`n`. {tech (key := "strict sub-term")}[Sub-terms] that result from pattern matching are connected to the {tech (key := "match discriminant")}[discriminant] using the rules for {ref "match-generalization"}[match generalization], which respect definitional equality. In {lean}`countdown'`, the recursive occurrence is applied to {lean}`0 + n'`, which is not definitionally equal to `n'` because addition on natural numbers is structurally recursive in its second parameter: ```lean +error (name := countdownNonDefEq) def countdown' (n : Nat) : List Nat := match n with | 0 => [] | n' + 1 => n' :: countdown' (0 + n') termination_by structural n ``` ```leanOutput countdownNonDefEq failed to infer structural recursion: Cannot use parameter n: failed to eliminate recursive application countdown' (0 + n') ``` ::: ```lean -show end ``` # Mutual Structural Recursion %%% tag := "mutual-structural-recursion" %%% Lean supports the definition of {tech}[mutually recursive] functions using structural recursion. Mutual recursion may be introduced using a {tech}[mutual block], but it also results from {keywordOf Lean.Parser.Term.letrec}`let rec` expressions and {keywordOf Lean.Parser.Command.declaration}`where` blocks. The rules for mutual structural recursion are applied to a group of actually mutually recursive, lifted definitions, that results from the {ref "mutual-syntax"}[elaboration steps] for mutual groups. If every function in the mutual group has a {keyword}`termination_by structural` annotation indicating that function’s decreasing argument, then structural recursion is used to translate the definitions. The requirements on the decreasing argument above are extended: * All the types of all the decreasing arguments must be from the same inductive type, or more generally from the same {ref "mutual-inductive-types"}[mutual group of inductive types]. * The parameters of the decreasing parameter's types must be the same for all functions, and may depend only on the _common_ fixed prefix of function arguments. The functions do not have to be in a one-to-one correspondence to the mutual inductive types. Multiple functions can have a decreasing argument of the same type, and not all types that are mutually recursive with the decreasing argument need have a corresponding function. :::example "Mutual Structural Recursion Over Non-Mutual Types" The following example demonstrates mutual recursion over a non-mutual inductive data type: ```lean mutual def even : Nat → Prop | 0 => True | n+1 => odd n termination_by structural n => n def odd : Nat → Prop | 0 => False | n+1 => even n termination_by structural n => n end ``` ::: :::example "Mutual Structural Recursion Over Mutual Types" The following example demonstrates recursion over mutually inductive types. The functions {lean}`Exp.size` and {lean}`App.size` are mutually recursive. ```lean mutual inductive Exp where | var : String → Exp | app : App → Exp inductive App where | fn : String → App | app : App → Exp → App end mutual def Exp.size : Exp → Nat | .var _ => 1 | .app a => a.size termination_by structural e => e def App.size : App → Nat | .fn _ => 1 | .app a e => a.size + e.size + 1 termination_by structural a => a end ``` The definition of {lean}`App.numArgs` is structurally recursive over type {lean}`App`. It demonstrates that not all inductive types in the mutual group need to be handled. ```lean def App.numArgs : App → Nat | .fn _ => 0 | .app a _ => a.numArgs + 1 termination_by structural a => a ``` ::: ::::draft :::planned 235 Describe mutual structural recursion over {ref "nested-inductive-types"}[nested inductive types]. ::: :::: # Inferring Structural Recursion %%% tag := "inferring-structural-recursion" %%% If no {keyword}`termination_by` clauses are present in a recursive or mutually recursive function definition, then Lean attempts to infer a suitable structurally decreasing argument, effectively by trying all suitable parameters in sequence. If this search fails, Lean then attempts to infer {tech}[well-founded recursion]. For mutually recursive functions, all combinations of parameters are tried, up to a limit to avoid combinatorial explosion. If only some of the mutually recursive functions have {keyword}`termination_by structural` clauses, then only those parameters are considered, while for the other functions all parameters are considered for structural recursion. A {keyword}`termination_by?` clause causes the inferred termination annotation to be shown. It can be automatically added to the source file using the offered suggestion or code action. :::example "Inferred Termination Annotations" Lean automatically infers that the function {lean}`half` is structurally recursive. The {keyword}`termination_by?` clause causes the inferred termination annotation to be displayed, and it can be automatically added to the source file with a single click. ```lean (name := inferStruct) def half : Nat → Nat | 0 | 1 => 0 | n + 2 => half n + 1 termination_by? ``` ```leanOutput inferStruct Try this: [apply] termination_by structural x => x ``` ::: # Elaboration Using Course-of-Values Recursion %%% tag := "elab-as-course-of-values" %%% In this section, the construction used to elaborate structurally recursive functions is explained in more detail. This elaboration uses the {ref "recursor-elaboration-helpers"}[`below` and `brecOn` constructions] that are automatically generated from inductive types' recursors. {spliceContents Manual.RecursiveDefs.Structural.RecursorExample} The structural recursion analysis attempts to translate the recursive {tech}[pre-definition] into a use of the appropriate structural recursion constructions. At this step, pattern matching has already been translated into the use of matcher functions; these are treated specially by the termination checker. Next, for each group of parameters, a translation using `brecOn` is attempted. {spliceContents Manual.RecursiveDefs.Structural.CourseOfValuesExample} The `below` construction is a mapping from each value of a type to the results of some function call on _all_ smaller values; it can be understood as a memoization table that already contains the results for all smaller values. The notion of “smaller value” that is expressed in the `below` construction corresponds directly to the definition of {tech}[strict sub-terms]. Recursors expect an argument for each of the inductive type's constructors; these arguments are called with the constructor's arguments (and the result of recursion on recursive parameters) during {tech}[ι-reduction]. The course-of-values recursion operator `brecOn`, on the other hand, expects just a single case that covers all constructors at once. This case is provided with a value and a `below` table that contains the results of recursion on all values smaller than the given value; it should use the contents of the table to satisfy the motive for the provided value. If the function is structurally recursive over a given parameter (or parameter group), then the results of all recursive calls will be present in this table already. When the body of the recursive function is transformed into an invocation of `brecOn` on one of the function's parameters, the parameter and its course-of-values table are in scope. The analysis traverses the body of the function, looking for recursive calls. If the parameter is matched against, then its occurrences in the local context are {ref "match-generalization"}[generalized] and then instantiated with the pattern; this is also true for the type of the course-of-values table. Typically, this pattern matching results in the type of the course-of-values table becoming more specific, which gives access to the recursive results for smaller values. This generalization process implements the rule that patterns are {tech (key := "strict sub-term")}[sub-terms] of match discriminants. When an recursive occurrence of the function is detected, the course-of-values table is consulted to see whether it contains a result for the argument being checked. If so, the recursive call can be replaced with a projection from the table. If not, then the parameter in question doesn't support structural recursion. ```lean -show section ``` :::example "Elaboration Walkthrough" The first step in walking through the elaboration of {name}`half` is to manually desugar it to a simpler form. This doesn't match the way Lean works, but its output is much easier to read when there are fewer {name}`OfNat` instances present. This readable definition: ```lean -keep def half : Nat → Nat | 0 | 1 => 0 | n + 2 => half n + 1 ``` can be rewritten to this somewhat lower-level version: ```lean -keep def half : Nat → Nat | .zero | .succ .zero => .zero | .succ (.succ n) => half n |>.succ ``` The elaborator begins by elaborating a pre-definition in which recursion is still present but the definition is otherwise in Lean's core type theory. Turning on the compiler's tracing of pre-definitions, as well as making the pretty printer more explicit, makes the resulting pre-definition visible: ```lean -keep -show -- Test of next block - visually check correspondence when updating! set_option trace.Elab.definition.body true in set_option pp.all true in /-- trace: [Elab.definition.body] half : Nat → Nat := fun (x : Nat) => half.match_1.{1} (fun (x : Nat) => Nat) x (fun (_ : Unit) => Nat.zero) (fun (_ : Unit) => Nat.zero) fun (n : Nat) => Nat.succ (_root_.half n) -/ #guard_msgs in def half : Nat → Nat | .zero | .succ .zero => .zero | .succ (.succ n) => half n |>.succ ``` ```lean (name := tracedHalf) set_option trace.Elab.definition.body true in set_option pp.all true in def half : Nat → Nat | .zero | .succ .zero => .zero | .succ (.succ n) => half n |>.succ ``` The returned trace message is:{TODO}[Trace not showing up in serialized info—figure out why so this test can work better, or better yet, add proper trace rendering to Verso] ``` [Elab.definition.body] half : Nat → Nat := fun (x : Nat) => half.match_1.{1} (fun (x : Nat) => Nat) x (fun (_ : Unit) => Nat.zero) (fun (_ : Unit) => Nat.zero) fun (n : Nat) => Nat.succ (half n) ``` The auxiliary match function's definition is: ```lean (name := halfmatch) #print half.match_1 ``` ```leanOutput halfmatch (whitespace := lax) def half.match_1.{u_1} : (motive : Nat → Sort u_1) → (x : Nat) → (Unit → motive Nat.zero) → (Unit → motive 1) → ((n : Nat) → motive n.succ.succ) → motive x := fun motive x h_1 h_2 h_3 => Nat.casesOn x (h_1 ()) fun n => Nat.casesOn n (h_2 ()) fun n => h_3 n ``` Formatted more readably, this definition is: ```lean def half.match_1'.{u} : (motive : Nat → Sort u) → (x : Nat) → (Unit → motive Nat.zero) → (Unit → motive 1) → ((n : Nat) → motive n.succ.succ) → motive x := fun motive x h_1 h_2 h_3 => Nat.casesOn x (h_1 ()) fun n => Nat.casesOn n (h_2 ()) fun n => h_3 n ``` In other words, the specific configuration of patterns used in {name}`half` are captured in {name}`half.match_1`. This definition is a more readable version of {name}`half`'s pre-definition: ```lean def half' : Nat → Nat := fun (x : Nat) => half.match_1 (motive := fun _ => Nat) x (fun _ => 0) -- Case for 0 (fun _ => 0) -- Case for 1 (fun n => Nat.succ (half' n)) -- Case for n + 2 ``` To elaborate it as a structurally recursive function, the first step is to establish the `bRecOn` invocation. The definition must be marked {keywordOf Lean.Parser.Command.declaration}`noncomputable` because Lean does not support code generation for recursors such as {name}`Nat.brecOn`. ```lean +error -keep noncomputable def half'' : Nat → Nat := fun (x : Nat) => x.brecOn fun n table => _ /- To translate: half.match_1 (motive := fun _ => Nat) x (fun _ => 0) -- Case for 0 (fun _ => 0) -- Case for 1 (fun n => Nat.succ (half' n)) -- Case for n + 2 -/ ``` The next step is to replace occurrences of `x` in the original function body with the `n` provided by {name Nat.brecOn}`brecOn`. Because `table`'s type depends on `x`, it must also be generalized when splitting cases with {name}`half.match_1`, leading to a motive with an extra parameter. ```lean +error -keep (name := threeCases) noncomputable def half'' : Nat → Nat := fun (x : Nat) => x.brecOn fun n table => (half.match_1 (motive := fun k => k.below (motive := fun _ => Nat) → Nat) n _ _ _) table /- To translate: (fun _ => 0) -- Case for 0 (fun _ => 0) -- Case for 1 (fun n => Nat.succ (half' n)) -- Case for n + 2 -/ ``` The three cases' placeholders expect the following types: ```leanOutput threeCases don't know how to synthesize placeholder for argument `h_1` context: x n : Nat table : Nat.below n ⊢ Unit → (fun k => Nat.below k → Nat) Nat.zero ``` ```leanOutput threeCases don't know how to synthesize placeholder for argument `h_2` context: x n : Nat table : Nat.below n ⊢ Unit → (fun k => Nat.below k → Nat) 1 ``` ```leanOutput threeCases don't know how to synthesize placeholder for argument `h_3` context: x n : Nat table : Nat.below n ⊢ (n : Nat) → (fun k => Nat.below k → Nat) n.succ.succ ``` The first two cases in the pre-definition are constant functions, with no recursion to check: ```lean +error -keep (name := oneMore) noncomputable def half'' : Nat → Nat := fun (x : Nat) => x.brecOn fun n table => (half.match_1 (motive := fun k => k.below (motive := fun _ => Nat) → Nat) n (fun () _ => .zero) (fun () _ => .zero) _) table /- To translate: (fun n => Nat.succ (half' n)) -- Case for n + 2 -/ ``` The final case contains a recursive call. It should be translated into a lookup into the course-of-values table. A more readable representation of the last hole's type is: ```leanTerm (n : Nat) → Nat.below (motive := fun _ => Nat) n.succ.succ → Nat ``` which is equivalent to ```leanTerm (n : Nat) → Nat ×' (Nat ×' Nat.below (motive := fun _ => Nat) n) → Nat ``` ```lean -show example : ((n : Nat) → Nat.below (motive := fun _ => Nat) n.succ.succ → Nat) = ((n : Nat) → Nat ×' (Nat ×' Nat.below (motive := fun _ => Nat) n) → Nat) := rfl ``` ```lean -show variable {n : Nat} ``` The first {lean}`Nat` in the course-of-values table is the result of recursion on {lean}`n + 1`, and the second is the result of recursion on {lean}`n`. The recursive call can thus be replaced by a lookup, and the elaboration is successful: ```lean +error -keep (name := oneMore) noncomputable def half'' : Nat → Nat := fun (x : Nat) => x.brecOn fun n table => (half.match_1 (motive := fun k => k.below (motive := fun _ => Nat) → Nat) n (fun () _ => .zero) (fun () _ => .zero) (fun _ table => Nat.succ table.2.1) table ``` The actual elaborator keeps track of the relationship between the parameter being checked for structural recursion and the positions in the course-of-values tables by inserting sentinel types with fresh names into the motive. ::: ```lean -show end ``` ::::draft ::: planned 56 A description of the elaboration of mutually recursive functions ::: ::::
reference-manual/Manual/RecursiveDefs/WF.lean
import VersoManual import Manual.Meta import Manual.Papers import Manual.RecursiveDefs.WF.GuessLexExample import Manual.RecursiveDefs.WF.PreprocessExample open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Well-Founded Recursion" => %%% tag := "well-founded-recursion" %%% Functions defined by {deftech}_well-founded recursion_ are those in which each recursive call has arguments that are _smaller_ (in a {ref "wf-rel"}[suitable sense]) than the functions' parameters. In contrast to {ref "structural-recursion"}[structural recursion], in which recursive definitions must satisfy particular _syntactic_ requirements, definitions that use well-founded recursion employ _semantic_ arguments. This allows a larger class of recursive definitions to be accepted. Furthermore, when Lean's automation fails to construct a termination proof, it is possible to specify one manually. All definitions are treated identically by the Lean compiler. In Lean's logic, definitions that use well-founded recursion typically do not reduce {tech (key := "definitional equality")}[definitionally]. The reductions do hold as propositional equalities, however, and Lean automatically proves them. This does not typically make it more difficult to prove properties of definitions that use well-founded recursion, because the propositional reductions can be used to reason about the behavior of the function. It does mean, however, that using these functions in types typically does not work well. Even when the reduction behavior happens to hold definitionally, it is often much slower than structurally recursive definitions in the kernel, which must unfold the termination proof along with the definition. When possible, recursive function that are intended for use in types or in other situations where definitional equality is important should be defined with structural recursion. To explicitly use well-founded recursion, a function or theorem definition can be annotated with a {keywordOf Lean.Parser.Command.declaration}`termination_by` clause that specifies the {deftech}_measure_ by which the function terminates. The measure should be a term that decreases at each recursive call; it may be one of the function's parameters or a tuple of the parameters, but it may also be any other term. The measure's type must be equipped with a {tech}[well-founded relation], which determines what it means for the measure to decrease. :::syntax Lean.Parser.Termination.terminationBy (title := "Explicit Well-Founded Recursion") The {keywordOf Lean.Parser.Command.declaration}`termination_by` clause introduces the termination argument. ```grammar termination_by $[$_:ident* =>]? $term ``` The identifiers before the optional `=>` can bring function parameters into scope that are not already bound in the declaration header, and the mandatory term must indicate one of the function's parameters, whether introduced in the header or locally in the clause. ::: :::example "Division by Iterated Subtraction" Division can be specified as the number of times the divisor can be subtracted from the dividend. This operation cannot be elaborated using structural recursion because subtraction is not pattern matching. The value of `n` does decrease with each recursive call, so well-founded recursion can be used to justify the definition of division by iterated subtraction. ```lean def div (n k : Nat) : Nat := if k = 0 then 0 else if k > n then 0 else 1 + div (n - k) k termination_by n ``` ::: # Well-Founded Relations %%% tag := "wf-rel" %%% A relation `≺` is a {deftech}_well-founded relation_ if there exists no infinitely descending chain $$` x_0 ≻ x_1 ≻ \cdots` In Lean, types that are equipped with a canonical well-founded relation are instances of the {name}`WellFoundedRelation` type class. {docstring WellFoundedRelation} ```lean -show section variable {α : Type u} {β : Type v} (a₁ a₂ : α) (b₁ b₂ : β) [WellFoundedRelation α] [WellFoundedRelation β] variable {γ : Type u} (x₁ x₂ : γ) [SizeOf γ] local notation x " ≺ " y => WellFoundedRelation.rel x y ``` The most important instances are: * {name}[`Nat`], ordered by {lean (type := "Nat → Nat → Prop")}`(· < ·)`. * {name}[`Prod`], ordered lexicographically: {lean}`(a₁, b₁) ≺ (a₂, b₂)` if and only if {lean}`a₁ ≺ a₂` or {lean}`a₁ = a₂` and {lean}`b₁ ≺ b₂`. * Every type that is an instance of the {name}`SizeOf` type class, which provides a method {name}`SizeOf.sizeOf`, has a well-founded relation. For these types, {lean}`x₁ ≺ x₂` if and only if {lean}`sizeOf x₁ < sizeOf x₂`. For {tech}[inductive types], a {lean}`SizeOf` instance is automatically derived by Lean. ```lean -show end ``` Note that there exists a low-priority instance {name}`instSizeOfDefault` that provides a {lean}`SizeOf` instance for any type, and always returns {lean}`0`. This instance cannot be used to prove that a function terminates using well-founded recursion because {lean}`0 < 0` is false. ```lean -show -- Check claims about instSizeOfDefault example {α} (x : α) : sizeOf x = 0 := by rfl /-- info: instSizeOfDefault.{u} (α : Sort u) : SizeOf α -/ #check_msgs in #check instSizeOfDefault ``` :::example "Default Size Instance" Function types in general do not have a well-founded relation that's useful for termination proofs. {ref "instance-synth"}[Instance synthesis] thus selects {name}`instSizeOfDefault` and the corresponding well-founded relation. If the measure is a function, the default {name}`SizeOf` instance is selected and the proof cannot succeed. ```lean -keep def fooInst (b : Bool → Bool) : Unit := fooInst (b ∘ b) termination_by b decreasing_by guard_target = @sizeOf (Bool → Bool) (instSizeOfDefault _) (b ∘ b) < sizeOf b simp only [sizeOf, default.sizeOf] guard_target = 0 < 0 simp guard_target = False sorry ``` ::: # Termination proofs Once a {tech}[measure] is specified and its {tech}[well-founded relation] is determined, Lean determines the termination proof obligation for every recursive call. ```lean -show section variable {α : Type u} {β : α → Type v} {β' : Type v} (more : β') (g : (x : α) → (y : β x) → β' → γ) [WellFoundedRelation γ] (a₁ p₁ : α) (a₂ : β a₁) (p₂ : β p₁) local notation (name := decRelStx) x " ≺ " y => WellFoundedRelation.rel x y local notation "…" => more ``` The proof obligation for each recursive call is of the form {lean}`g a₁ a₂ … ≺ g p₁ p₂ …`, where: * {lean}`g` is the measure as a function of the parameters, * {name WellFoundedRelation.rel}`≺` is the inferred well-founded relation, * {lean}`a₁` {lean}`a₂` {lean}`…` are the arguments of the recursive call and * {lean}`p₁` {lean}`p₂` {lean}`…` are the parameters of the function definition. The context of the proof obligation is the local context of the recursive call. In particular, local assumptions (such as those introduced by `if h : _`, `match h : _ with ` or `have`) are available. If a function parameter is the {tech (key := "match discriminant")}[discriminant] of a pattern match (e.g. by a {keywordOf Lean.Parser.Term.match}`match` expression), then this parameter is refined to the matched pattern in the proof obligation. ```lean -show end ``` The overall termination proof obligation consists of one goal for each recursive call. By default, the tactic {tactic}`decreasing_trivial` is used to prove each proof obligation. A custom tactic script can be provided using the optional {keywordOf Lean.Parser.Command.declaration}`decreasing_by` clause, which comes after the {keywordOf Lean.Parser.Command.declaration}`termination_by` clause. This tactic script is run once, with one goal for each proof obligation, rather than separately on each proof obligation. ```lean -show section variable {n : Nat} ``` ::::example "Termination Proof Obligations" The following recursive definition of the Fibonacci numbers has two recursive calls, which results in two goals in the termination proof. ```lean +error -keep (name := fibGoals) def fib (n : Nat) := if h : n ≤ 1 then 1 else fib (n - 1) + fib (n - 2) termination_by n decreasing_by skip ``` ```leanOutput fibGoals (whitespace := lax) -show unsolved goals n : Nat h : ¬n ≤ 1 ⊢ n - 1 < n n : Nat h : ¬n ≤ 1 ⊢ n - 2 < n ``` ```proofState ∀ (n : Nat), (h : ¬ n ≤ 1) → n - 1 < n ∧ n - 2 < n := by intro n h apply And.intro ?_ ?_ /-- n : Nat h : ¬n ≤ 1 ⊢ n - 1 < n n : Nat h : ¬n ≤ 1 ⊢ n - 2 < n -/ ``` Here, the {tech}[measure] is simply the parameter itself, and the well-founded order is the less-than relation on natural numbers. The first proof goal requires the user to prove that the argument of the first recursive call, namely {lean}`n - 1`, is strictly smaller than the function's parameter, {lean}`n`. Both termination proofs can be easily discharged using the {tactic}`omega` tactic. ```lean -keep def fib (n : Nat) := if h : n ≤ 1 then 1 else fib (n - 1) + fib (n - 2) termination_by n decreasing_by · omega · omega ``` :::: ```lean -show end ``` :::example "Refined Parameters" If a parameter of the function is the {tech (key := "match discriminant")}[discriminant] of a pattern match, then the proof obligations mention the refined parameter. ```lean +error -keep (name := fibGoals2) def fib : Nat → Nat | 0 | 1 => 1 | .succ (.succ n) => fib (n + 1) + fib n termination_by n => n decreasing_by skip ``` ```leanOutput fibGoals2 (whitespace := lax) -show unsolved goals n : Nat ⊢ n + 1 < n.succ.succ n : Nat ⊢ n < n.succ.succ ``` ```proofState ∀ (n : Nat), n + 1 < n.succ.succ ∧ n < n.succ.succ := by intro n apply And.intro ?_ ?_ /-- n : Nat ⊢ n + 1 < n.succ.succ n : Nat ⊢ n < n.succ.succ -/ ``` ::: :::paragraph Additionally, the context is enriched with additional assumptions that can make it easier to prove termination. Some examples include: * In the branches of an {ref "if-then-else"}[if-then-else] expression, a hypothesis that asserts the current branch's condition is added, much as if the dependent if-then-else syntax had been used. * In the function argument to certain higher-order functions, the context of the function's body is enriched with assumptions about the argument. This list is not exhaustive, and the mechanism is extensible. It is described in detail in {ref "well-founded-preprocessing"}[the section on preprocessing]. ::: ```lean -show section variable {x : Nat} {xs : List Nat} {n : Nat} ``` :::example "Enriched Proof Obligation Contexts" Here, the {keywordOf termIfThenElse}`if` does not add a local assumption about the condition (that is, whether {lean}`n ≤ 1`) to the local contexts in the branches. ```lean +error -keep (name := fibGoals3) def fib (n : Nat) := if n ≤ 1 then 1 else fib (n - 1) + fib (n - 2) termination_by n decreasing_by skip ``` ```leanOutput fibGoals3 (whitespace := lax) -show unsolved goals n : Nat h✝ : ¬n ≤ 1 ⊢ n - 1 < n n : Nat h✝ : ¬n ≤ 1 ⊢ n - 2 < n ``` Nevertheless, the assumptions are available in the context of the termination proof: ```proofState ∀ (n : Nat), («h✝» : ¬ n ≤ 1) → n - 1 < n ∧ n - 2 < n := by intro n «h✝» apply And.intro ?_ ?_ /-- n : Nat h✝ : ¬n ≤ 1 ⊢ n - 1 < n n : Nat h✝ : ¬n ≤ 1 ⊢ n - 2 < n -/ ``` Termination proof obligations in body of a {keywordOf Lean.Parser.Term.doFor}`for`​`…`​{keywordOf Lean.Parser.Term.doFor}`in` loop are also enriched, in this case with a {name}`Std.Legacy.Range` membership hypothesis: ```lean +error -keep (name := nestGoal3) def f (xs : Array Nat) : Nat := Id.run do let mut s := xs.sum for i in [:xs.size] do s := s + f (xs.take i) pure s termination_by xs decreasing_by skip ``` ```leanOutput nestGoal3 (whitespace := lax) -show unsolved goals xs : Array Nat s : Nat := xs.sum i : Nat h✝ : i ∈ [:xs.size] ⊢ sizeOf (xs.take i) < sizeOf xs ``` ```proofState ∀ (xs : Array Nat) (i : Nat) («h✝» : i ∈ [:xs.size]), sizeOf (xs.take i) < sizeOf xs := by set_option tactic.hygienic false in intros ``` Similarly, in the following (contrived) example, the termination proof contains an additional assumption showing that {lean}`x ∈ xs`. ```lean +error -keep (name := nestGoal1) def f (n : Nat) (xs : List Nat) : Nat := List.sum (xs.map (fun x => f x [])) termination_by xs decreasing_by skip ``` ```leanOutput nestGoal1 (whitespace := lax) -show unsolved goals n : Nat xs : List Nat x : Nat h✝ : x ∈ xs ⊢ sizeOf [] < sizeOf xs ``` ```proofState ∀ (n : Nat) (xs : List Nat) (x : Nat) («h✝» : x ∈ xs), sizeOf ([] : List Nat) < sizeOf xs := by set_option tactic.hygienic false in intros /-- n : Nat xs : List Nat x : Nat h✝ : x ∈ xs ⊢ sizeOf [] < sizeOf xs -/ ``` This feature requires special setup for the higher-order function under which the recursive call is nested, as described in {ref "well-founded-preprocessing"}[the section on preprocessing]. In the following definition, identical to the one above except using a custom, equivalent function instead of {name}`List.map`, the proof obligation context is not enriched: ```lean +error -keep (name := nestGoal4) def List.myMap := @List.map def f (n : Nat) (xs : List Nat) : Nat := List.sum (xs.myMap (fun x => f x [])) termination_by xs decreasing_by skip ``` ```leanOutput nestGoal4 (whitespace := lax) -show unsolved goals n : Nat xs : List Nat x : Nat ⊢ sizeOf [] < sizeOf xs ``` ```proofState ∀ (n : Nat) (xs : List Nat) (x : Nat), sizeOf ([] : List Nat) < sizeOf xs := by set_option tactic.hygienic false in intros ``` ::: ```lean -show end ``` ```lean -show section ``` ::::TODO :::example "Nested recursive calls and subtypes" I (Joachim) wanted to include a good example where recursive calls are nested inside each other, and one likely needs to introduce a subtype in the result to make it go through. But can't think of something nice and natural right now. ::: :::: # Default Termination Proof Tactic If no {keywordOf Lean.Parser.Command.declaration}`decreasing_by` clause is given, then the {tactic}`decreasing_tactic` is used implicitly, and applied to each proof obligation separately. :::tactic "decreasing_tactic" +replace The tactic {tactic}`decreasing_tactic` mainly deals with lexicographic ordering of tuples, applying {name}`Prod.Lex.right` if the left components of the product are {tech (key := "definitional equality")}[definitionally equal], and {name}`Prod.Lex.left` otherwise. After preprocessing tuples this way, it calls the {tactic}`decreasing_trivial` tactic. ::: :::tactic "decreasing_trivial" The tactic {tactic}`decreasing_trivial` is an extensible tactic that applies a few common heuristics to solve a termination goal. In particular, it tries the following tactics and theorems: * {tactic}`simp_arith` * {tactic}`assumption` * theorems {name}`Nat.sub_succ_lt_self`, {name}`Nat.pred_lt_of_lt`, and {name}`Nat.pred_lt`, which handle common arithmetic goals * {tactic}`omega` * {tactic}`array_get_dec` and {tactic}`array_mem_dec`, which prove that the size of array elements is less than the size of the array * {tactic}`sizeOf_list_dec` that the size of list elements is less than the size of the list * {name}`String.Legacy.Iterator.sizeOf_next_lt_of_hasNext` and {name}`String.Legacy.Iterator.sizeOf_next_lt_of_atEnd`, to handle iteration through a string using {keywordOf Lean.Parser.Term.doFor}`for` This tactic is intended to be extended with further heuristics using {keywordOf Lean.Parser.Command.macro_rules}`macro_rules`. ::: :::example "No Backtracking of Lexicographic Order" A classic example of a recursive function that needs a more complex {tech}[measure] is the Ackermann function: ```lean -keep def ack : Nat → Nat → Nat | 0, n => n + 1 | m + 1, 0 => ack m 1 | m + 1, n + 1 => ack m (ack (m + 1) n) termination_by m n => (m, n) ``` The measure is a tuple, so every recursive call has to be on arguments that are lexicographically smaller than the parameters. The default {tactic}`decreasing_tactic` can handle this. In particular, note that the third recursive call has a second argument that is smaller than the second parameter and a first argument that is definitionally equal to the first parameter. This allowed {tactic}`decreasing_tactic` to apply {name}`Prod.Lex.right`. ```signature Prod.Lex.right {α β} {ra : α → α → Prop} {rb : β → β → Prop} (a : α) {b₁ b₂ : β} (h : rb b₁ b₂) : Prod.Lex ra rb (a, b₁) (a, b₂) ``` It fails, however, with the following modified function definition, where the third recursive call's first argument is provably smaller or equal to the first parameter, but not syntactically equal: ```lean -keep +error (name := synack) def synack : Nat → Nat → Nat | 0, n => n + 1 | m + 1, 0 => synack m 1 | m + 1, n + 1 => synack m (synack (m / 2 + 1) n) termination_by m n => (m, n) ``` ```leanOutput synack (whitespace := lax) failed to prove termination, possible solutions: - Use `have`-expressions to prove the remaining goals - Use `termination_by` to specify a different well-founded relation - Use `decreasing_by` to specify your own tactic for discharging this kind of goal case h m n : Nat ⊢ m / 2 + 1 < m + 1 ``` Because {name}`Prod.Lex.right` is not applicable, the tactic used {name}`Prod.Lex.left`, which resulted in the unprovable goal above. This function definition may require a manual proof that uses the more general theorem {name}`Prod.Lex.right'`, which allows the first component of the tuple (which must be of type {name}`Nat`) to be less or equal instead of strictly equal: ```signature Prod.Lex.right' {β} (rb : β → β → Prop) {a₂ : Nat} {b₂ : β} {a₁ : Nat} {b₁ : β} (h₁ : a₁ ≤ a₂) (h₂ : rb b₁ b₂) : Prod.Lex Nat.lt rb (a₁, b₁) (a₂, b₂) ``` ```lean -keep def synack : Nat → Nat → Nat | 0, n => n + 1 | m + 1, 0 => synack m 1 | m + 1, n + 1 => synack m (synack (m / 2 + 1) n) termination_by m n => (m, n) decreasing_by · apply Prod.Lex.left omega -- the next goal corresponds to the third recursive call · apply Prod.Lex.right' · omega · omega · apply Prod.Lex.left omega ``` The {tactic}`decreasing_tactic` tactic does not use the stronger {name}`Prod.Lex.right'` because it would require backtracking on failure. ::: # Inferring Well-Founded Recursion %%% tag := "inferring-well-founded-recursion" %%% If a recursive function definition does not indicate a termination {tech}[measure], Lean will attempt to discover one automatically. If neither {keywordOf Lean.Parser.Command.declaration}`termination_by` nor {keywordOf Lean.Parser.Command.declaration}`decreasing_by` is provided, Lean will try to {ref "inferring-structural-recursion"}[infer structural recursion] before attempting well-founded recursion. If a {keywordOf Lean.Parser.Command.declaration}`decreasing_by` clause is present, only well-founded recursion is attempted. To infer a suitable termination {tech}[measure], Lean considers multiple {deftech}_basic termination measures_, which are termination measures of type {name}`Nat`, and then tries all tuples of these measures. The basic termination measures considered are: * all parameters whose type have a non-trivial {name}`SizeOf` instance * the expression `e₂ - e₁` whenever the local context of a recursive call has an assumption of type `e₁ < e₂` or `e₁ ≤ e₂`, where `e₁` and `e₂` are of type {name}`Nat` and depend only on the function's parameters. {margin}[This approach is based on work by {citehere manolios2006}[].] * in a mutual group, an additional basic measure is used to distinguish between recursive calls to other functions in the group and recursive calls to the function being defined (for details, see {ref "mutual-well-founded-recursion"}[the section on mutual well-founded recursion]) {deftech}_Candidate measures_ are basic measures or tuples of basic measures. If any of the candidate measures allow all proof obligations to be discharged by the termination proof tactic (that is, the tactic specified by {keywordOf Lean.Parser.Command.declaration}`decreasing_by`, or {tactic}`decreasing_trivial` if there is no {keywordOf Lean.Parser.Command.declaration}`decreasing_by` clause), then an arbitrary such candidate measure is selected as the automatic termination measure. A {keyword}`termination_by?` clause causes the inferred termination annotation to be shown. It can be automatically added to the source file using the offered suggestion or code action. To avoid the combinatorial explosion of trying all tuples of measures, Lean first tabulates all {tech}[basic termination measures], determining whether the basic measure is decreasing, strictly decreasing, or non-decreasing. A decreasing measure is smaller for at least one recursive call and never increases at any recursive call, while a strictly decreasing measure is smaller at all recursive calls. A non-decreasing measure is one that the termination tactic could not show to be decreasing or strictly decreasing. A suitable tuple is chosen based on the table.{margin}[This approach is based on {citehere bulwahn2007}[].] This table shows up in the error message when no automatic measure could be found. {spliceContents Manual.RecursiveDefs.WF.GuessLexExample} ```lean -show section variable {e₁ e₂ i j : Nat} ``` :::example "Array Indexing" The purpose of considering expressions of the form {lean}`e₂ - e₁` as measures is to support the common idiom of counting up to some upper bound, in particular when traversing arrays in possibly interesting ways. In the following function, which performs binary search on a sorted array, this heuristic helps Lean to find the {lean}`j - i` measure. ```lean (name := binarySearch) def binarySearch (x : Int) (xs : Array Int) : Option Nat := go 0 xs.size where go (i j : Nat) (hj : j ≤ xs.size := by omega) := if h : i < j then let mid := (i + j) / 2 let y := xs[mid] if x = y then some mid else if x < y then go i mid else go (mid + 1) j else none termination_by? ``` The fact that the inferred termination argument uses some arbitrary measure, rather than an optimal or minimal one, is visible in the inferred measure, which contains a redundant `j`: ```leanOutput binarySearch Try this: [apply] termination_by (j, j - i) ``` ::: ```lean -show end ``` :::example "Termination Proof Tactics During Inference" The tactic indicated by {keywordOf Lean.Parser.Command.declaration}`decreasing_by` is used slightly differently when inferring the termination {tech}[measure] than it is in the actual termination proof. * During inference, it is applied to a _single_ goal, attempting to prove {name LT.lt}`<` or {name LE.le}`≤` on {name}`Nat`. * During the termination proof, it is applied to many simultaneous goals (one per recursive call), and the goals may involve the lexicographic ordering of pairs. A consequence is that a {keywordOf Lean.Parser.Command.declaration}`decreasing_by` block that addresses goals individually and which works successfully with an explicit termination argument can cause inference of the termination measure to fail: ```lean -keep +error def ack : Nat → Nat → Nat | 0, n => n + 1 | m + 1, 0 => ack m 1 | m + 1, n + 1 => ack m (ack (m + 1) n) decreasing_by · apply Prod.Lex.left omega · apply Prod.Lex.right omega · apply Prod.Lex.left omega ``` It is advisable to always include a {keywordOf Lean.Parser.Command.declaration}`termination_by` clause whenever an explicit {keywordOf Lean.Parser.Command.declaration}`decreasing_by` proof is given. ::: :::example "Inference too powerful" Because {tactic}`decreasing_tactic` avoids the need to backtrack by being incomplete with regard to lexicographic ordering, Lean may infer a termination {tech}[measure] that leads to goals that the tactic cannot prove. In this case, the error message is the one that results from the failing tactic rather than the one that results from being unable to find a measure. This is what happens in {lean}`notAck`: ```lean +error (name := badInfer) def notAck : Nat → Nat → Nat | 0, n => n + 1 | m + 1, 0 => notAck m 1 | m + 1, n + 1 => notAck m (notAck (m / 2 + 1) n) decreasing_by all_goals decreasing_tactic ``` ```leanOutput badInfer failed to prove termination, possible solutions: - Use `have`-expressions to prove the remaining goals - Use `termination_by` to specify a different well-founded relation - Use `decreasing_by` to specify your own tactic for discharging this kind of goal case h m n : Nat ⊢ m / 2 + 1 < m + 1 ``` In this case, explicitly stating the termination {tech}[measure] helps. ::: # Mutual Well-Founded Recursion %%% tag := "mutual-well-founded-recursion" %%% Lean supports the definition of {tech}[mutually recursive] functions using {tech}[well-founded recursion]. Mutual recursion may be introduced using a {tech}[mutual block], but it also results from {keywordOf Lean.Parser.Term.letrec}`let rec` expressions and {keywordOf Lean.Parser.Command.declaration}`where` blocks. The rules for mutual well-founded recursion are applied to a group of actually mutually recursive, lifted definitions, that results from the {ref "mutual-syntax"}[elaboration steps] for mutual groups. If any function in the mutual group has a {keywordOf Lean.Parser.Command.declaration}`termination_by` or {keywordOf Lean.Parser.Command.declaration}`decreasing_by` clause, well-founded recursion is attempted. If a termination {tech}[measure] is specified using {keywordOf Lean.Parser.Command.declaration}`termination_by` for _any_ function in the mutual group, then _all_ functions in the group must specify a termination measure, and they have to have the same type. If no termination argument is specified, the termination argument is {ref "inferring-well-founded-recursion"}[inferred, as described above]. In the case of mutual recursion, a third class of basic measures is considered during inference, namely for each function in the mutual group the measure that is `1` for that function and `0` for the others. This allows Lean to order the functions so that some calls from one function to another are allowed even if the parameters do not decrease. :::example "Mutual recursion without parameter decrease" In the following mutual function definitions, the parameter does not decrease in the call from {lean}`g` to {lean}`f`. Nonetheless, the definition is accepted due to the ordering imposed on the functions themselves by the additional basic measure. ```lean (name := fg) mutual def f : (n : Nat) → Nat | 0 => 0 | n + 1 => g n termination_by? def g (n : Nat) : Nat := (f n) + 1 termination_by? end ``` The inferred termination argument for {lean}`f` is: ```leanOutput fg Try this: [apply] termination_by n => (n, 0) ``` The inferred termination argument for {lean}`g` is: ```leanOutput fg Try this: [apply] termination_by (n, 1) ``` ::: # Preprocessing Function Definitions %%% tag := "well-founded-preprocessing" %%% Lean _preprocesses_ the function's body before determining the proof obligations at each call site, transforming it into an equivalent definition that may include additional information. This preprocessing step is primarily used to enrich the local context with additional assumptions that may be necessary in order to solve the termination proof obligations, freeing users from the need to perform equivalent transformations by hand. Preprocessing uses the {ref "the-simplifier"}[simplifier] and is extensible by the user. :::paragraph The preprocessing happens in three steps: 1. Lean annotates occurrences of a function's parameter, or a subterm of a parameter, with the {name}`wfParam` {tech}[gadget]. ```signature wfParam {α} (a : α) : α ``` More precisely, every occurrence of the function's parameters is wrapped in {name}`wfParam`. Whenever a {keywordOf Lean.Parser.Term.match}`match` expression has _any_ discriminant wrapped in {name}`wfParam`, the gadget is removed and every occurrence of a pattern match variable (regardless of whether it comes from the discriminant with the {name}`wfParam` gadget) is wrapped in {name}`wfParam`. The {name}`wfParam` gadget is additionally floated out of {tech}[projection function] applications. 2. The annotated function body is simplified using {ref "the-simplifier"}[the simplifier], using only simplification rules from the {attr}`wf_preprocess` {tech}[custom simp set]. 3. Finally, any left-over {name}`wfParam` markers are removed. Annotating function parameters that are used for well-founded recursion allows the preprocessing simplification rules to distinguish between parameters and other terms. ::: :::syntax attr (title := "Preprocessing Simp Set for Well-Founded Recursion") ```grammar wf_preprocess ``` {includeDocstring Lean.Parser.Attr.wf_preprocess} ::: {docstring wfParam} Some rewrite rules in the {attr}`wf_preprocess` simp set apply generally, without heeding the {lean}`wfParam` marker. In particular, the theorem {name}`ite_eq_dite` is used to extend the context of an {ref "if-then-else"}[if-then-else] expression branch with an assumption about the condition:{margin}[This assumption's name should be an inaccessible name based on `h`, as is indicated by using {name}`binderNameHint` with the term {lean}`()`. Binder name hints are described in the {ref "bound-variable-name-hints"}[tactic language reference].] ```signature ite_eq_dite {P : Prop} {α : Sort u} {a b : α} [Decidable P] : (if P then a else b) = if h : P then binderNameHint h () a else binderNameHint h () b ``` ```lean -show section variable (xs : List α) (p : α → Bool) (f : α → β) (x : α) ``` :::paragraph Other rewrite rules use the {name}`wfParam` marker to restrict their applicability; they are used only when a function (like {name}`List.map`) is applied to a parameter or subterm of a parameter, but not otherwise. This is typically done in two steps: 1. A theorem such as {name}`List.map_wfParam` recognizes a call of {name}`List.map` on a function parameter (or subterm), and uses {name}`List.attach` to enrich the type of the list elements with the assertion that they are indeed elements of that list: ```signature List.map_wfParam (xs : List α) (f : α → β) : (wfParam xs).map f = xs.attach.unattach.map f ``` 2. A theorem such as {name}`List.map_unattach` makes that assertion available to the function parameter of {name}`List.map`. ```signature List.map_unattach (P : α → Prop) (xs : List { x : α // P x }) (f : α → β) : xs.unattach.map f = xs.map fun ⟨x, h⟩ => binderNameHint x f <| binderNameHint h () <| f (wfParam x) ``` This theorem uses the {name}`binderNameHint` gadget to preserve a user-chosen binder name, should {lean}`f` be a lambda expression. By separating the introduction of {name}`List.attach` from the propagation of the introduced assumption, the desired the {lean}`x ∈ xs` assumption is made available to {lean}`f` even in chains such as `(xs.reverse.filter p).map f`. ::: ```lean -show end ``` This preprocessing can be disabled by setting the option {option}`wf.preprocess` to {lean}`false`. To see the preprocessed function definition, before and after the removal of {name}`wfParam` markers, set the option {option}`trace.Elab.definition.wf` to {lean}`true`. {optionDocs trace.Elab.definition.wf} {spliceContents Manual.RecursiveDefs.WF.PreprocessExample} # Theory and Construction ```lean -show section variable {α : Type u} ``` This section gives a very brief glimpse into the mathematical constructions that underlie termination proofs via {tech}[well-founded recursion], which may surface occasionally. The elaboration of functions defined by well-founded recursion is based on the {name}`WellFounded.fix` operator. {docstring WellFounded.fix} The type {lean}`α` is instantiated with the function's (varying) parameters, packed into one type using {name}`PSigma`. The {name}`WellFounded` relation is constructed from the termination {tech}[measure] via {name}`invImage`. {docstring invImage} The function's body is passed to {name}`WellFounded.fix`, with parameters suitably packed and unpacked, and recursive calls are replaced with a call to the value provided by {name}`WellFounded.fix`. The termination proofs generated by the {keywordOf Lean.Parser.Command.declaration}`decreasing_by` tactics are inserted in the right place. Finally, the equational and unfolding theorems for the recursive function are proved from {name}`WellFounded.fix_eq`. These theorems hide the details of packing and unpacking arguments and describe the function's behavior in terms of the original definition. In the case of mutual recursion, an equivalent non-mutual function is constructed by combining the function's arguments using {name}`PSum`, and pattern-matching on that sum type in the result type and the body. The definition of {name}`WellFounded` builds on the notion of _accessible elements_ of the relation: {docstring WellFounded} {docstring Acc} ::: example "Division by Iterated Subtraction: Termination Proof" The definition of division by iterated subtraction can be written explicitly using well-founded recursion. ```lean noncomputable def div (n k : Nat) : Nat := (inferInstanceAs (WellFoundedRelation Nat)).wf.fix (fun n r => if h : k = 0 then 0 else if h : k > n then 0 else 1 + (r (n - k) <| by show (n - k) < n omega)) n ``` The definition must be marked {keywordOf Lean.Parser.Command.declaration}`noncomputable` because well-founded recursion is not supported by the compiler. Like {tech}[recursors], it is part of Lean's logic. The definition of division should satisfy the following equations: * {lean}`∀{n k : Nat}, (k = 0) → div n k = 0` * {lean}`∀{n k : Nat}, (k > n) → div n k = 0` * {lean}`∀{n k : Nat}, (k ≠ 0) → (¬ k > n) → div n k = 1 + div (n - k) k` This reduction behavior does not hold {tech (key := "definitional equality")}[definitionally]: ```lean +error (name := nonDef) -keep theorem div.eq0 : div n 0 = 0 := by rfl ``` ```leanOutput nonDef Tactic `rfl` failed: The left-hand side div n 0 is not definitionally equal to the right-hand side 0 n : Nat ⊢ div n 0 = 0 ``` However, using `WellFounded.fix_eq` to unfold the well-founded recursion, the three equations can be proved to hold: ```lean theorem div.eq0 : div n 0 = 0 := by unfold div apply WellFounded.fix_eq theorem div.eq1 : k > n → div n k = 0 := by intro h unfold div rw [WellFounded.fix_eq] simp only [gt_iff_lt, dite_eq_ite, ite_eq_left_iff, Nat.not_lt] intros; omega theorem div.eq2 : ¬ k = 0 → ¬ (k > n) → div n k = 1 + div (n - k) k := by intros unfold div rw [WellFounded.fix_eq] simp_all only [ gt_iff_lt, Nat.not_lt, dite_false, dite_eq_ite, ite_false, ite_eq_right_iff ] omega ``` :::
reference-manual/Manual/RecursiveDefs/PartialFixpoint/Theory.lean
import VersoManual import Manual.Meta import Manual.Meta.Monotonicity open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode open Lean.Order #doc (Manual) "Theory and Construction" => %%% tag := "partial-fixpoint-theory" %%% The construction builds on a variant of the Knaster–Tarski theorem: In a chain-complete partial order, every monotone function has a least fixed point. The necessary theory is found in the `Lean.Order` namespace. This is not meant to be a general purpose library of order theoretic results. Instead, the definitions and theorems in `Lean.Order` are only intended as implementation details of the {keywordOf Lean.Parser.Command.declaration}`partial_fixpoint` feature, and they should be considered a private API that may change without notice. The notion of a partial order, and that of a chain-complete partial order, are represented by the type classes {name}`Lean.Order.PartialOrder` and {name}`Lean.Order.CCPO`, respectively. {docstring Lean.Order.PartialOrder +allowMissing} {docstring Lean.Order.CCPO +allowMissing} ```lean -show section open Lean.Order variable {α : Type u} {β : Type v} [PartialOrder α] [PartialOrder β] (f : α → β) (x y : α) ``` A function is monotone if it preserves partial orders. That is, if {lean}`x ⊑ y` then {lean}`f x ⊑ f y`. The operator `⊑` represent {name}`Lean.Order.PartialOrder.rel`. {docstring Lean.Order.monotone} The fixpoint of a monotone function can be taken using {name}`fix`, which indeed constructs a fixpoint, as shown by {name}`fix_eq`, {docstring Lean.Order.fix} {docstring Lean.Order.fix_eq} :::paragraph To construct the partial fixpoint, Lean first synthesizes a suitable {name}`CCPO` instance. ```lean -show section universe u v variable (α : Type u) variable (β : α → Sort v) [∀ x, CCPO (β x)] variable (w : α) ``` * If the function's result type has a dedicated instance, like {name}`Option` has with {name}`instCCPOOption`, this is used together with the instance for the function type, {name}`instCCPOPi`, to construct an instance for the whole function's type. * Otherwise, if the function's type can be shown to be inhabited by a witness {lean}`w`, then the instance {name}`FlatOrder.instCCPO` for the wrapper type {lean}`FlatOrder w` is used. In this order, {lean}`w` is a least element and all other elements are incomparable. ```lean -show end ``` ::: Next, the recursive calls in the right-hand side of the function definitions are abstracted; this turns into the argument `f` of {name}`fix`. The monotonicity requirement is solved by the {tactic}`monotonicity` tactic, which applies compositional monotonicity lemmas in a syntax-driven way. ```lean -show section set_option linter.unusedVariables false variable {α : Sort u} {β : Sort v} [PartialOrder α] [PartialOrder β] (more : (x : α) → β) (x : α) local macro "…" x:term:arg "…" : term => `(more $x) ``` The tactic solves goals of the form {lean}`monotone (fun x => … x …)` using the following steps: * Applying {name}`monotone_const` when there is no dependency on {lean}`x` left. * Splitting on {keywordOf Lean.Parser.Term.match}`match` expressions. * Splitting on {keywordOf termIfThenElse}`if` expressions. * Moving {keywordOf Lean.Parser.Term.let}`let` expression to the context, if the value and type do not depend on {lean}`x`. * Zeta-reducing a {keywordOf Lean.Parser.Term.let}`let` expression when value and type do depend on {lean}`x`. * Applying lemmas annotated with {attr}`partial_fixpoint_monotone` ```lean -show end ``` The following monotonicity lemmas are registered, and should allow recursive calls under the given higher-order functions in the arguments indicated by `·` (but not the other arguments, shown as `_`). {monotonicityLemmas}
reference-manual/Manual/RecursiveDefs/Structural/CourseOfValuesExample.lean
import VersoManual import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Recursion Example (for inclusion elsewhere)" => :::example "Course-of-Values Tables" This definition is equivalent to {name}`List.below`: ```lean def List.below' {α : Type u} {motive : List α → Sort u} : List α → Sort (max (u + 1) u) | [] => PUnit | _ :: xs => motive xs ×' xs.below' (motive := motive) ``` ```lean -show theorem List.below_eq_below' : @List.below = @List.below' := by funext α motive xs induction xs <;> simp [below'] congr ``` In other words, for a given {tech}[motive], {lean}`List.below'` is a type that contains a realization of the motive for all suffixes of the list. More recursive arguments require further nested iterations of the product type. For instance, binary trees have two recursive occurrences. ```lean inductive Tree (α : Type u) : Type u where | leaf | branch (left : Tree α) (val : α) (right : Tree α) ``` Its corresponding course-of-values table contains the realizations of the motive for all subtrees: ```lean def Tree.below' {α : Type u} {motive : Tree α → Sort u} : Tree α → Sort (max (u + 1) u) | .leaf => PUnit | .branch left _val right => (motive left ×' left.below' (motive := motive)) ×' (motive right ×' right.below' (motive := motive)) ``` ```lean -show theorem Tree.below_eq_below' : @Tree.below = @Tree.below' := by funext α motive t induction t next => simp [Tree.below'] next ihl ihr => simp [Tree.below', ihl, ihr] ``` For both lists and trees, the `brecOn` operator expects just a single case, rather than one per constructor. This case accepts a list or tree along with a table of results for all smaller values; from this, it should satisfy the motive for the provided value. Dependent case analysis of the provided value automatically refines the type of the memo table, providing everything needed. The following definitions are equivalent to {name}`List.brecOn` and {name}`Tree.brecOn`, respectively. The primitive recursive helpers {name}`List.brecOnTable` and {name}`Tree.brecOnTable` compute the course-of-values tables along with the final results, and the actual definitions of the `brecOn` operators simply project out the result. ```lean def List.brecOnTable {α : Type u} {motive : List α → Sort u} (xs : List α) (step : (ys : List α) → ys.below' (motive := motive) → motive ys) : motive xs ×' xs.below' (motive := motive) := match xs with | [] => ⟨step [] PUnit.unit, PUnit.unit⟩ | x :: xs => let res := xs.brecOnTable (motive := motive) step let val := step (x :: xs) res ⟨val, res⟩ ``` ```lean def Tree.brecOnTable {α : Type u} {motive : Tree α → Sort u} (t : Tree α) (step : (ys : Tree α) → ys.below' (motive := motive) → motive ys) : motive t ×' t.below' (motive := motive) := match t with | .leaf => ⟨step .leaf PUnit.unit, PUnit.unit⟩ | .branch left val right => let resLeft := left.brecOnTable (motive := motive) step let resRight := right.brecOnTable (motive := motive) step let branchRes := ⟨resLeft, resRight⟩ let val := step (.branch left val right) branchRes ⟨val, branchRes⟩ ``` ```lean def List.brecOn' {α : Type u} {motive : List α → Sort u} (xs : List α) (step : (ys : List α) → ys.below' (motive := motive) → motive ys) : motive xs := (xs.brecOnTable (motive := motive) step).1 ``` ```lean def Tree.brecOn' {α : Type u} {motive : Tree α → Sort u} (t : Tree α) (step : (ys : Tree α) → ys.below' (motive := motive) → motive ys) : motive t := (t.brecOnTable (motive := motive) step).1 ``` ```lean -show -keep -- Proving the above-claimed equivalence is too time consuming, but evaluating a few examples will at least catch silly mistakes! /-- info: fun motive x y z step => step [x, y, z] ⟨step [y, z] ⟨step [z] ⟨step [] PUnit.unit, PUnit.unit⟩, step [] PUnit.unit, PUnit.unit⟩, step [z] ⟨step [] PUnit.unit, PUnit.unit⟩, step [] PUnit.unit, PUnit.unit⟩ -/ #check_msgs in #reduce fun motive x y z step => List.brecOn' (motive := motive) [x, y, z] step /-- info: fun motive x y z step => step [x, y, z] ⟨step [y, z] ⟨step [z] ⟨step [] PUnit.unit, PUnit.unit⟩, step [] PUnit.unit, PUnit.unit⟩, step [z] ⟨step [] PUnit.unit, PUnit.unit⟩, step [] PUnit.unit, PUnit.unit⟩ -/ #check_msgs in #reduce fun motive x y z step => List.brecOn (motive := motive) [x, y, z] step /-- info: fun motive x z step => step ((Tree.leaf.branch x Tree.leaf).branch z Tree.leaf) ⟨⟨step (Tree.leaf.branch x Tree.leaf) ⟨⟨step Tree.leaf PUnit.unit, PUnit.unit⟩, step Tree.leaf PUnit.unit, PUnit.unit⟩, ⟨step Tree.leaf PUnit.unit, PUnit.unit⟩, step Tree.leaf PUnit.unit, PUnit.unit⟩, step Tree.leaf PUnit.unit, PUnit.unit⟩ -/ #check_msgs in #reduce fun motive x z step => Tree.brecOn' (motive := motive) (.branch (.branch .leaf x .leaf) z .leaf) step /-- info: fun motive x z step => step ((Tree.leaf.branch x Tree.leaf).branch z Tree.leaf) ⟨⟨step (Tree.leaf.branch x Tree.leaf) ⟨⟨step Tree.leaf PUnit.unit, PUnit.unit⟩, step Tree.leaf PUnit.unit, PUnit.unit⟩, ⟨step Tree.leaf PUnit.unit, PUnit.unit⟩, step Tree.leaf PUnit.unit, PUnit.unit⟩, step Tree.leaf PUnit.unit, PUnit.unit⟩ -/ #check_msgs in #reduce fun motive x z step => Tree.brecOn (motive := motive) (.branch (.branch .leaf x .leaf) z .leaf) step ``` :::
reference-manual/Manual/RecursiveDefs/Structural/RecursorExample.lean
import VersoManual import Manual.Meta open Verso.Genre Manual open Verso.Genre Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Recursion Example (for inclusion elsewhere)" => ```lean -show section variable (n k : Nat) (mot : Nat → Sort u) ``` :::example "Recursion vs Recursors" Addition of natural numbers can be defined via recursion on the second argument. This function is straightforwardly structurally recursive. ```lean def add (n : Nat) : Nat → Nat | .zero => n | .succ k => .succ (add n k) ``` Defined using {name}`Nat.rec`, it is much further from the notations that most people are used to. ```lean def add' (n : Nat) := Nat.rec (motive := fun _ => Nat) n (fun k soFar => .succ soFar) ``` Structural recursive calls made on data that isn't the immediate child of the function parameter requires either creativity or a complex yet systematic encoding. ```lean def half : Nat → Nat | 0 | 1 => 0 | n + 2 => half n + 1 ``` One way to think about this function is as a structural recursion that flips a bit at each call, only incrementing the result when the bit is set. ```lean def helper : Nat → Bool → Nat := Nat.rec (motive := fun _ => Bool → Nat) (fun _ => 0) (fun _ soFar => fun b => (if b then Nat.succ else id) (soFar !b)) def half' (n : Nat) : Nat := helper n false ``` ```lean (name := halfTest) #eval [0, 1, 2, 3, 4, 5, 6, 7, 8].map half' ``` ```leanOutput halfTest [0, 0, 1, 1, 2, 2, 3, 3, 4] ``` Instead of creativity, a general technique called {deftech}[course-of-values recursion] can be used. Course-of-values recursion uses helpers that can be systematically derived for every inductive type, defined in terms of the recursor; Lean derives them automatically. For every {lean}`Nat` {lean}`n`, the type {lean}`n.below (motive := mot)` provides a value of type {lean}`mot k` for all {lean}`k < n`, represented as an iterated {TODO}[xref sigma] dependent pair type. The course-of-values recursor {name}`Nat.brecOn` allows a function to use the result for any smaller {lean}`Nat`. Using it to define the function is inconvenient: ```lean noncomputable def half'' (n : Nat) : Nat := Nat.brecOn n (motive := fun _ => Nat) fun k soFar => match k, soFar with | 0, _ | 1, _ => 0 | _ + 2, ⟨_, ⟨h, _⟩⟩ => h + 1 ``` The function is marked {keywordOf Lean.Parser.Command.declaration}`noncomputable` because the compiler doesn't support generating code for course-of-values recursion, which is intended for reasoning rather that efficient code. The kernel can still be used to test the function, however: ```lean (name := halfTest2) #reduce [0,1,2,3,4,5,6,7,8].map half'' ``` ```leanOutput halfTest2 [0, 0, 1, 1, 2, 2, 3, 3, 4] ``` The dependent pattern matching in the body of {lean}`half''` can also be encoded using recursors (specifically, {name}`Nat.casesOn`), if necessary: ```lean noncomputable def half''' (n : Nat) : Nat := n.brecOn (motive := fun _ => Nat) fun k => k.casesOn (motive := fun k' => (k'.below (motive := fun _ => Nat)) → Nat) (fun _ => 0) (fun k' => k'.casesOn (motive := fun k'' => (k''.succ.below (motive := fun _ => Nat)) → Nat) (fun _ => 0) (fun _ soFar => soFar.2.1.succ)) ``` This definition still works. ```lean (name := halfTest3) #reduce [0,1,2,3,4,5,6,7,8].map half'' ``` ```leanOutput halfTest3 [0, 0, 1, 1, 2, 2, 3, 3, 4] ``` However, it is now far from the original definition and it has become difficult for most people to understand. Recursors are an excellent logical foundation, but not an easy way to write programs or proofs. ::: ```lean -show end ```
reference-manual/Manual/RecursiveDefs/WF/GuessLexExample.lean
import VersoManual import Manual.Meta /-! This is extracted into its own file because line numbers show up in the error message, and we don't want to update it over and over again as we edit the large file. -/ open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Termination failure (for inclusion elsewhere)" => :::example "Termination failure" If there is no {keywordOf Lean.Parser.Command.declaration}`termination_by` clause, Lean attempts to infer a measure for well-founded recursion. If it fails, then it prints the table mentioned above. In this example, the {keywordOf Lean.Parser.Command.declaration}`decreasing_by` clause simply prevents Lean from also attempting structural recursion; this keeps the error message specific. ```lean +error -keep (name := badwf) def f : (n m l : Nat) → Nat | n+1, m+1, l+1 => [ f (n+1) (m+1) (l+1), f (n+1) (m-1) (l), f (n) (m+1) (l) ].sum | _, _, _ => 0 decreasing_by all_goals decreasing_tactic ``` ```leanOutput badwf (whitespace := lax) Could not find a decreasing measure. The basic measures relate at each recursive call as follows: (<, ≤, =: relation proved, ? all proofs failed, _: no proof attempted) n m l 1) 32:6-25 = = = 2) 33:6-23 = < _ 3) 34:6-23 < _ _ Please use `termination_by` to specify a decreasing measure. ``` The three recursive calls are identified by their source positions. This message conveys the following facts: * In the first recursive call, all arguments are (provably) equal to the parameters * In the second recursive call, the first argument is equal to the first parameter and the second argument is provably smaller than the second parameter. The third parameter was not checked for this recursive call, because it was not necessary to determine that no suitable termination argument exists. * In the third recursive call, the first argument decreases strictly, and the other arguments were not checked. When termination proofs fail in this manner, a good technique to discover the problem is to explicitly indicate the expected termination argument using {keywordOf Lean.Parser.Command.declaration}`termination_by`. This will surface the messages from the failing tactic. :::
reference-manual/Manual/RecursiveDefs/WF/PreprocessExample.lean
import VersoManual import Manual.Meta /-! This is extracted into its own file because line numbers show up in the error message, and we don't want to update it over and over again as we edit the large file. -/ open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode set_option linter.constructorNameAsVariable false #doc (Manual) "Well-founded recursion preprocessing example (for inclusion elsewhere)" => ::::example "Preprocessing for a custom data type" This example demonstrates what is necessary to enable automatic well-founded recursion for a custom container type. The structure type {name}`Pair` is a homogeneous pair: it contains precisely two elements, both of which have the same type. It can be thought of as being similar to a list or array that always contains precisely two elements. As a container, {name}`Pair` can support a {name Pair.map}`map` operation. To support well-founded recursion in which recursive calls occur in the body of a function being mapped over a {name}`Pair`, some additional definitions are required, including a membership predicate, a theorem that relates the size of a member to the size of the containing pair, helpers to introduce and eliminate assumptions about membership, {attr}`wf_preprocess` rules to insert these helpers, and an extension to the {tactic}`decreasing_trivial` tactic. Each of these steps makes it easier to work with {name}`Pair`, but none are strictly necessary; there's no need to immediately implement all steps for every type. ```lean /-- A homogeneous pair -/ structure Pair (α : Type u) where fst : α snd : α /-- Mapping a function over the elements of a pair -/ def Pair.map (f : α → β) (p : Pair α) : Pair β where fst := f p.fst snd := f p.snd ``` Defining a nested inductive data type of binary trees that uses {name}`Pair` and attempting to define its {name Tree.map}`map` function demonstrates the need for preprocessing rules. ```lean /-- A binary tree defined using `Pair` -/ inductive Tree (α : Type u) where | leaf : α → Tree α | node : Pair (Tree α) → Tree α ``` A straightforward definition of the {name Tree.map}`map` function fails: ```lean +error -keep (name := badwf) def Tree.map (f : α → β) : Tree α → Tree β | leaf x => leaf (f x) | node p => node (p.map (fun t' => t'.map f)) termination_by t => t ``` ```leanOutput badwf (whitespace := lax) failed to prove termination, possible solutions: - Use `have`-expressions to prove the remaining goals - Use `termination_by` to specify a different well-founded relation - Use `decreasing_by` to specify your own tactic for discharging this kind of goal α : Type u_1 p : Pair (Tree α) t' : Tree α ⊢ sizeOf t' < 1 + sizeOf p ``` :::paragraph ```lean -show section variable (t' : Tree α) (p : Pair (Tree α)) ``` Clearly the proof obligation is not solvable, because nothing connects {lean}`t'` to {lean}`p`. ```lean -show end ``` ::: The standard idiom to enable this kind of function definition is to have a function that enriches each element of a collection with a proof that they are, in fact, elements of the collection. Stating this property requires a membership predicate. ```lean inductive Pair.Mem (p : Pair α) : α → Prop where | fst : Mem p p.fst | snd : Mem p p.snd instance : Membership α (Pair α) where mem := Pair.Mem ``` Every inductive type automatically has a {name}`SizeOf` instance. An element of a collection should be smaller than the collection, but this fact must be proved before it can be used to construct a termination proof: ```lean theorem Pair.sizeOf_lt_of_mem {α} [SizeOf α] {p : Pair α} {x : α} (h : x ∈ p) : sizeOf x < sizeOf p := by cases h <;> cases p <;> (simp; omega) ``` The next step is to define {name Pair.attach}`attach` and {name Pair.unattach}`unattach` functions that enrich the elements of the pair with a proof that they are elements of the pair, or remove said proof. Here, the type of {name}`Pair.unattach` is more general and can be used with any {ref "Subtype"}[subtype]; this is a typical pattern. ```lean def Pair.attach (p : Pair α) : Pair {x : α // x ∈ p} where fst := ⟨p.fst, .fst⟩ snd := ⟨p.snd, .snd⟩ def Pair.unattach {P : α → Prop} : Pair {x : α // P x} → Pair α := Pair.map Subtype.val ``` {name Tree.map}`Tree.map` can now be defined by using {name}`Pair.attach` and {name}`Pair.sizeOf_lt_of_mem` explicitly: ```lean -keep def Tree.map (f : α → β) : Tree α → Tree β | leaf x => leaf (f x) | node p => node (p.attach.map (fun ⟨t', _⟩ => t'.map f)) termination_by t => t decreasing_by have := Pair.sizeOf_lt_of_mem ‹_› simp_all +arith omega ``` This transformation can be made fully automatic. The preprocessing feature of well-founded recursion can be used to automate the introduction of the {lean}`Pair.attach` function. This is done in two stages. First, when {name}`Pair.map` is applied to one of the function's parameters, it is rewritten to an {name Pair.attach}`attach`/{name Pair.unattach}`unattach` composition. Then, when a function is mapped over the result of {name}`Pair.unattach`, the function is rewritten to accept the proof of membership and bring it into scope. ```lean @[wf_preprocess] theorem Pair.map_wfParam (f : α → β) (p : Pair α) : (wfParam p).map f = p.attach.unattach.map f := by cases p simp [wfParam, Pair.attach, Pair.unattach, Pair.map] @[wf_preprocess] theorem Pair.map_unattach {P : α → Prop} (p : Pair (Subtype P)) (f : α → β) : p.unattach.map f = p.map fun ⟨x, h⟩ => binderNameHint x f <| f (wfParam x) := by cases p; simp [wfParam, Pair.unattach, Pair.map] ``` Now the function body can be written without extra considerations, and the membership assumption is still available to the termination proof. ```lean -keep def Tree.map (f : α → β) : Tree α → Tree β | leaf x => leaf (f x) | node p => node (p.map (fun t' => t'.map f)) termination_by t => t decreasing_by have := Pair.sizeOf_lt_of_mem ‹_› simp_all omega ``` The proof can be made fully automatic by adding {name Pair.sizeOf_lt_of_mem}`sizeOf_lt_of_mem` to the {tactic}`decreasing_trivial` tactic, as is done for similar built-in theorems. ```lean macro "sizeOf_pair_dec" : tactic => `(tactic| with_reducible have := Pair.sizeOf_lt_of_mem ‹_› omega done) macro_rules | `(tactic| decreasing_trivial) => `(tactic| sizeOf_pair_dec) def Tree.map (f : α → β) : Tree α → Tree β | leaf x => leaf (f x) | node p => node (p.map (fun t' => t'.map f)) termination_by t => t ``` To keep the example short, the {tactic}`sizeOf_pair_dec` tactic is tailored to this particular recursion pattern and isn't really general enough for a general-purpose container library. It does, however, demonstrate that libraries can be just as convenient in practice as the container types in the standard library. ::::
reference-manual/Manual/Monads/Laws.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Laws" => %%% tag := "monad-laws" %%% ::::keepEnv ```lean -show section Laws universe u u' v axiom f : Type u → Type v axiom m : Type u → Type v variable [Functor f] variable [Applicative f] variable [Monad m] axiom α : Type u' axiom β : Type u' axiom γ : Type u' axiom x : f α ``` ```lean -show section F variable {f : Type u → Type v} [Functor f] {α β : Type u} {g : α → β} {h : β → γ} {x : f α} ``` Having {name Functor.map}`map`, {name Pure.pure}`pure`, {name Seq.seq}`seq`, and {name Bind.bind}`bind` operators with the appropriate types is not really sufficient to have a functor, applicative functor, or monad. These operators must additionally satisfy certain axioms, which are often called the {deftech}_laws_ of the type class. For a functor, the {name Functor.map}`map` operation must preserve identity and function composition. In other words, given a purported {name}`Functor` {lean}`f`, for all {lean}`x`​` : `​{lean}`f α`: * {lean}`id <$> x = x`, and * for all function {lean}`g` and {lean}`h`, {lean}`(h ∘ g) <$> x = h <$> g <$> x`. Instances that violate these assumptions can be very surprising! Additionally, because {lean}`Functor` includes {name Functor.mapConst}`mapConst` to enable instances to provide a more efficient implementation, a lawful functor's {name Functor.mapConst}`mapConst` should be equivalent to its default implementation. The Lean standard library does not require proofs of these properties in every instance of {name}`Functor`. Nonetheless, if an instance violates them, then it should be considered a bug. When proofs of these properties are necessary, an instance implicit parameter of type {lean}`LawfulFunctor f` can be used. The {name}`LawfulFunctor` class includes the necessary proofs. {docstring LawfulFunctor} ```lean -show end F ``` In addition to proving that the potentially-optimized {name}`SeqLeft.seqLeft` and {name}`SeqRight.seqRight` operations are equivalent to their default implementations, Applicative functors {lean}`f` must satisfy four laws. :::TODO Discuss of the relationship between the traditional Applicative laws and this presentation ::: {docstring LawfulApplicative} The {deftech}[monad laws] specify that {name}`pure` followed by {name}`bind` should be equivalent to function application (that is, {name}`pure` has no effects), that {name}`bind` followed by {name}`pure` around a function application is equivalent to {name Functor.map}`map`, and that {name}`bind` is associative. {docstring LawfulMonad} {docstring LawfulMonad.mk'} ::::
reference-manual/Manual/Monads/Zoo.lean
import Manual.Monads.Zoo.State import Manual.Monads.Zoo.Reader import Manual.Monads.Zoo.Except import Manual.Monads.Zoo.Combined import Manual.Monads.Zoo.Id import Manual.Monads.Zoo.Option import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false -- set_option trace.SubVerso.Highlighting.Code true #doc (Manual) "Varieties of Monads" => %%% tag := "monad-varieties" %%% The {lean}`IO` monad has many, many effects, and is used for writing programs that need to interact with the world. It is described in {ref "io"}[its own section]. Programs that use {lean}`IO` are essentially black boxes: they are typically not particularly amenable to verification. Many algorithms are easiest to express with a much smaller set of effects. These effects can often be simulated; for example, mutable state can be simulated by passing around a tuple that contains both the program's value and the state. These simulated effects are easier to reason formally about, because they are defined using ordinary code rather than new language primitives. The standard library provides abstractions for working with commonly-used effects. Many frequently-used effects fall into a small number of categories: : {deftech}[State monads] have mutable state Computations that have access to some data that may be modified by other parts of the computation use _mutable state_. State can be implemented in a variety of ways, described in the section on {ref "state-monads"}[state monads] and captured in the {name}`MonadState` type class. : {deftech}[Reader monads] are parameterized computations Computations that can read the value of some parameter provided by a context exist in most programming languages, but many languages that feature state and exceptions as first-class features do not have built-in facilities for defining new parameterized computations. Typically, these computations are provided with a parameter value when invoked, and sometimes they can locally override it. Parameter values have _dynamic extent_: the value provided most recently in the call stack is the one that is used. They can be simulated by passing a value unchanged through a sequence of function calls; however, this technique can make code harder to read and introduces a risk that the values may be passed incorrectly to further calls by mistake. They can also be simulated using mutable state with a careful discipline surrounding the modification of the state. Monads that maintain a parameter, potentially allowing it to be overridden in a section of the call stack, are called _reader monads_. Reader monads are captured in the {lean}`MonadReader` type class. Additionally, reader monads that allow the parameter value to be locally overridden are captured in the {lean}`MonadWithReader` type class. : {deftech}[Exception monads] have exceptions Computations that may terminate early with an exceptional value use _exceptions_. They are typically modeled with a sum type that has a constructor for ordinary termination and a constructor for early termination with errors. Exception monads are described in the section on {ref "exception-monads"}[exception monads], and captured in the {name}`MonadExcept` type class. # Monad Type Classes Using type classes like {lean}`MonadState` and {lean}`MonadExcept` allow client code to be polymorphic with respect to monads. Together with automatic lifting, this allows programs to be reusable in many different monads and makes them more robust to refactoring. It's important to be aware that effects in a monad may not interact in only one way. For example, a monad with state and exceptions may or may not roll back state changes when an exception is thrown. If this matters for the correctness of a function, then it should use a more specific signature. ::::keepEnv :::example "Effect Ordering" The function {name}`sumNonFives` adds the contents of a list using a state monad, terminating early if it encounters a {lean}`5`. ```lean def sumNonFives {m} [Monad m] [MonadState Nat m] [MonadExcept String m] (xs : List Nat) : m Unit := do for x in xs do if x == 5 then throw "Five was encountered" else modify (· + x) ``` Running it in one monad returns the state at the time that {lean}`5` was encountered: ```lean (name := exSt) #eval sumNonFives (m := ExceptT String (StateM Nat)) [1, 2, 3, 4, 5, 6] |>.run |>.run 0 ``` ```leanOutput exSt (Except.error "Five was encountered", 10) ``` In another, the state is discarded: ```lean (name := stEx) #eval sumNonFives (m := StateT Nat (Except String)) [1, 2, 3, 4, 5, 6] |>.run 0 ``` ```leanOutput stEx Except.error "Five was encountered" ``` In the second case, an exception handler would roll back the state to its value at the start of the {keywordOf Lean.Parser.Term.termTry}`try`. The following function is thus incorrect: ```lean /-- Computes the sum of the non-5 prefix of a list. -/ def sumUntilFive {m} [Monad m] [MonadState Nat m] [MonadExcept String m] (xs : List Nat) : m Nat := do MonadState.set 0 try sumNonFives xs catch _ => pure () get ``` In one monad, the answer is correct: ```lean (name := exSt2) #eval sumUntilFive (m := ExceptT String (StateM Nat)) [1, 2, 3, 4, 5, 6] |>.run |>.run' 0 ``` ```leanOutput exSt2 Except.ok 10 ``` In the other, it is not: ```lean (name := stEx2) #eval sumUntilFive (m := StateT Nat (Except String)) [1, 2, 3, 4, 5, 6] |>.run' 0 ``` ```leanOutput stEx2 Except.ok 0 ``` ::: :::: A single monad may support multiple version of the same effect. For example, there might be a mutable {lean}`Nat` and a mutable {lean}`String` or two separate reader parameters. As long as they have different types, it should be convenient to access both. In typical use, some monadic operations that are overloaded in type classes have type information available for {tech (key := "synthesis")}[instance synthesis], while others do not. For example, the argument passed to {name MonadState.set}`set` determines the type of the state to be used, while {name MonadState.get}`get` takes no such argument. The type information present in applications of {name MonadState.set}`set` can be used to pick the correct instance when multiple states are available, which suggests that the type of the mutable state should be an input parameter or {tech}[semi-output parameter] so that it can be used to select instances. The lack of type information present in uses of {name MonadState.get}`get`, on the other hand, suggests that the type of the mutable state should be an {tech}[output parameter] in {lean}`MonadState`, so type class synthesis determines the state's type from the monad itself. This dichotomy is solved by having two versions of many of the effect type classes. The version with a semi-output parameter has the suffix `-Of`, and its operations take types explicitly as needed. Examples include {name}`MonadStateOf`, {name}`MonadReaderOf`, and {name}`MonadExceptOf`. The operations with explicit type parameters have names ending in `-The`, such as {name}`getThe`, {name}`readThe`, and {name}`tryCatchThe`. The name of the version with an output parameter is undecorated. The standard library exports a mix of operations from the `-Of` and undecorated versions of each type class, based on what has good inference behavior in typical use cases. :::table +header * * Operation * From Class * Notes * * {name}`get` * {name}`MonadState` * Output parameter improves type inference * * {name}`set` * {name}`MonadStateOf` * Semi-output parameter uses type information from {name}`set`'s argument * * {name}`modify` * {name}`MonadState` * Output parameter is needed to allow functions without annotations * * {name}`modifyGet` * {name}`MonadState` * Output parameter is needed to allow functions without annotations * * {name}`read` * {name}`MonadReader` * Output parameter is needed due to lack of type information from arguments * * {name}`readThe` * {name}`MonadReaderOf` * Semi-output parameter uses the provided type to guide synthesis * * {name}`withReader` * {name}`MonadWithReader` * Output parameter avoids the need for type annotations on the function * * {name}`withTheReader` * {name}`MonadWithReaderOf` * Semi-output parameter uses provided type to guide synthesis * * {name}`throw` * {name}`MonadExcept` * Output parameter enables the use of constructor dot notation for the exception * * {name}`throwThe` * {name}`MonadExceptOf` * Semi-output parameter uses provided type to guide synthesis * * {name}`tryCatch` * {name}`MonadExcept` * Output parameter enables the use of constructor dot notation for the exception * * {name}`tryCatchThe` * {name}`MonadExceptOf` * Semi-output parameter uses provided type to guide synthesis ::: ```lean -show example : @get = @MonadState.get := by rfl example : @set = @MonadStateOf.set := by rfl example {inst} (f : σ → σ) : @modify σ m inst f = @MonadState.modifyGet σ m inst PUnit fun (s : σ) => (PUnit.unit, f s) := by rfl example : @modifyGet = @MonadState.modifyGet := by rfl example : @read = @MonadReader.read := by rfl example : @readThe = @MonadReaderOf.read := by rfl example : @withReader = @MonadWithReader.withReader := by rfl example : @withTheReader = @MonadWithReaderOf.withReader := by rfl example : @throw = @MonadExcept.throw := by rfl example : @throwThe = @MonadExceptOf.throw := by rfl example : @tryCatch = @MonadExcept.tryCatch := by rfl example : @tryCatchThe = @MonadExceptOf.tryCatch := by rfl ``` :::example "State Types" The state monad {name}`M` has two separate states: a {lean}`Nat` and a {lean}`String`. ```lean abbrev M := StateT Nat (StateM String) ``` Because {name}`get` is an alias for {name}`MonadState.get`, the state type is an output parameter. This means that Lean selects a state type automatically, in this case the one from the outermost monad transformer: ```lean (name := getM) #check (get : M _) ``` ```leanOutput getM get : M Nat ``` Only the outermost may be used, because the type of the state is an output parameter. ```lean (name := getMStr) +error #check (get : M String) ``` ```leanOutput getMStr failed to synthesize instance of type class MonadState String M Hint: Type class instance resolution failures can be inspected with the `set_option trace.Meta.synthInstance true` command. ``` Providing the state type explicitly using {name}`getThe` from {name}`MonadStateOf` allows both states to be read. ```lean (name := getTheM) #check ((getThe String, getThe Nat) : M String × M Nat) ``` ```leanOutput getTheM (getThe String, getThe Nat) : M String × M Nat ``` Setting a state works for either type, because the state type is a {tech}[semi-output parameter] on {name}`MonadStateOf`. ```lean (name := setNat) #check (set 4 : M Unit) ``` ```leanOutput setNat set 4 : M PUnit ``` ```lean (name := setStr) #check (set "Four" : M Unit) ``` ```leanOutput setStr set "Four" : M PUnit ``` ::: # Monad Transformers %%% tag := "monad-transformers" %%% A {deftech}_monad transformer_ is a function that, when provided with a monad, gives back a new monad. Typically, this new monad has all the effects of the original monad along with some additional ones. ```lean -show variable {α : Type u} (T : (Type u → Type v) → Type u → Type w) (m : Type u → Type v) ``` A monad transformer consists of the following: * A function {lean}`T` that constructs the new monad's type from an existing monad * A `run` function that adapts a {lean}`T m α` into some variant of {lean}`m`, often requiring additional parameters and returning a more specific type under {lean}`m` * An instance of {lean}`[Monad m] → Monad (T m)` that allows the transformed monad to be used as a monad * An instance of {lean}`MonadLift` that allows the original monad's code to be used in the transformed monad * If possible, an instance of {lean}`MonadControl m (T m)` that allows actions from the transformed monad to be used in the original monad Typically, a monad transformer also provides instances of one or more type classes that describe the effects that it introduces. The transformer's {name}`Monad` and {name}`MonadLift` instances make it practical to write code in the transformed monad, while the type class instances allow the transformed monad to be used with polymorphic functions. ::::keepEnv ```lean -show universe u v variable {m : Type u → Type v} {α : Type u} ``` :::example "The Identity Monad Transformer " The identity monad transformer neither adds nor removes capabilities to the transformed monad. Its definition is the identity function, suitably specialized: ```lean def IdT (m : Type u → Type v) : Type u → Type v := m ``` Similarly, the {name IdT.run}`run` function requires no additional arguments and just returns an {lean}`m α`: ```lean def IdT.run (act : IdT m α) : m α := act ``` The monad instance relies on the monad instance for the transformed monad, selecting it via {tech}[type ascriptions]: ```lean instance [Monad m] : Monad (IdT m) where pure x := (pure x : m _) bind x f := (x >>= f : m _) ``` Because {lean}`IdT m` is definitionally equal to {lean}`m`, the {lean}`MonadLift m (IdT m)` instance doesn't need to modify the action being lifted: ```lean instance : MonadLift m (IdT m) where monadLift x := x ``` The {lean}`MonadControl` instance is similarly simple. ```lean instance [Monad m] : MonadControl m (IdT m) where stM α := α liftWith f := f (fun x => Id.run <| pure x) restoreM v := v ``` ::: :::: The Lean standard library provides transformer versions of many different monads, including {name}`ReaderT`, {name}`ExceptT`, and {name}`StateT`, along with variants using other representations such as {name}`StateCpsT`, {name StateRefT'}`StateRefT`, and {name}`ExceptCpsT`. Additionally, the {name}`EStateM` monad is equivalent to combining {name}`ExceptT` and {name}`StateT`, but it can use a more specialized representation to improve performance. {include 0 Monads.Zoo.Id} {include 0 Monads.Zoo.State} {include 0 Monads.Zoo.Reader} {include 0 Monads.Zoo.Option} {include 0 Monads.Zoo.Except} {include 0 Monads.Zoo.Combined}
reference-manual/Manual/Monads/Syntax.lean
import VersoManual import Manual.Meta import Manual.Papers import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false -- set_option trace.SubVerso.Highlighting.Code true set_option guard_msgs.diff true #doc (Manual) "Syntax" => Lean supports programming with functors, applicative functors, and monads via special syntax: * Infix operators are provided for the most common operations. * An embedded language called {tech}[{keywordOf Lean.Parser.Term.do}`do`-notation] allows the use of imperative syntax when writing programs in a monad. # Infix Operators Infix operators are primarily useful in smaller expressions, or when there is no {lean}`Monad` instance. ## Functors ```lean -show section FOps variable {f : Type u → Type v} [Functor f] {α β : Type u} {g : α → β} {x : f α} ``` There are two infix operators for {name}`Functor.map`. :::syntax term (title := "Functor Operators") {lean}`g <$> x` is short for {lean}`Functor.map g x`. ```grammar $_ <$> $_ ``` {lean}`x <&> g` is short for {lean}`Functor.map g x`. ```grammar $_ <&> $_ ``` ::: ```lean -show example : g <$> x = Functor.map g x := by rfl example : x <&> g = Functor.map g x := by rfl end FOps ``` ## Applicative Functors ```lean -show section AOps variable {f : Type u → Type v} [Applicative f] [Alternative f] {α β : Type u} {g : f (α → β)} {x e1 e e' : f α} {e2 : f β} ``` :::syntax term (title := "Applicative Operators") {lean}`g <*> x` is short for {lean}`Seq.seq g (fun () => x)`. The function is inserted to delay evaluation because control might not reach the argument. ```grammar $_ <*> $_ ``` {lean}`e1 *> e2` is short for {lean}`SeqRight.seqRight e1 (fun () => e2)`. ```grammar $_ *> $_ ``` {lean}`e1 <* e2` is short for {lean}`SeqLeft.seqLeft e1 (fun () => e2)`. ```grammar $_ <* $_ ``` ::: Many applicative functors also support failure and recovery via the {name}`Alternative` type class. This class also has an infix operator. :::syntax term (title := "Alternative Operators") {lean}`e <|> e'` is short for {lean}`OrElse.orElse e (fun () => e')`. The function is inserted to delay evaluation because control might not reach the argument. ```grammar $_ <|> $_ ``` ::: ```lean -show example : g <*> x = Seq.seq g (fun () => x) := by rfl example : e1 *> e2 = SeqRight.seqRight e1 (fun () => e2) := by rfl example : e1 <* e2 = SeqLeft.seqLeft e1 (fun () => e2) := by rfl example : (e <|> e') = (OrElse.orElse e (fun () => e')) := by rfl end AOps ``` :::::keepEnv ```lean structure User where name : String favoriteNat : Nat def main : IO Unit := pure () ``` ::::example "Infix `Functor` and `Applicative` Operators" A common functional programming idiom is to use a pure function in some context with effects by applying it via {name}`Functor.map` and {name}`Seq.seq`. The function is applied to its sequence of arguments using `<$>`, and the arguments are separated by `<*>`. In this example, the constructor {name}`User.mk` is applied via this idiom in the body of {lean}`main`. :::ioExample ```ioLean def getName : IO String := do IO.println "What is your name?" return (← (← IO.getStdin).getLine).trimAsciiEnd.copy partial def getFavoriteNat : IO Nat := do IO.println "What is your favorite natural number?" let line ← (← IO.getStdin).getLine if let some n := line.trimAscii.copy.toNat? then return n else IO.println "Let's try again." getFavoriteNat structure User where name : String favoriteNat : Nat deriving Repr def main : IO Unit := do let user ← User.mk <$> getName <*> getFavoriteNat IO.println (repr user) ``` When run with this input: ```stdin A. Lean User None 42 ``` it produces this output: ```stdout What is your name? What is your favorite natural number? Let's try again. What is your favorite natural number? { name := "A. Lean User", favoriteNat := 42 } ``` ::: :::: ::::: ## Monads Monads are primarily used via {tech}[{keywordOf Lean.Parser.Term.do}`do`-notation]. However, it can sometimes be convenient to describe monadic computations via operators. ```lean -show section MOps variable {m : Type u → Type v} [Monad m] {α β : Type u} {act : m α} {f : α → m β} {g : β → m γ} ``` :::syntax term (title := "Monad Operators") {lean}`act >>= f` is syntax for {lean}`Bind.bind act f`. ```grammar $_ >>= $_ ``` Similarly, the reversed operator {lean}`f =<< act` is syntax for {lean}`Bind.bind act f`. ```grammar $_ =<< $_ ``` The Kleisli composition operators {name}`Bind.kleisliRight` and {name}`Bind.kleisliLeft` also have infix operators. ```grammar $_ >=> $_ ``` ```grammar $_ <=< $_ ``` ::: ```lean -show example : act >>= f = Bind.bind act f := by rfl example : f =<< act = Bind.bind act f := rfl example : f >=> g = Bind.kleisliRight f g := by rfl example : g <=< f = Bind.kleisliLeft g f := by rfl end MOps ``` # `do`-Notation %%% tag := "do-notation" %%% Monads are primarily used via {deftech}[{keywordOf Lean.Parser.Term.do}`do`-notation], which is an embedded language for programming in an imperative style. It provides familiar syntax for sequencing effectful operations, early return, local mutable variables, loops, and exception handling. All of these features are translated to the operations of the {lean}`Monad` type class, with a few of them requiring addition instances of classes such as {lean}`ForIn` that specify iteration over containers. For more details about the design of {keywordOf Lean.Parser.Term.do}`do`-notation, please consult {citet doUnchained}[]. A {keywordOf Lean.Parser.Term.do}`do` term consists of the keyword {keywordOf Lean.Parser.Term.do}`do` followed by a sequence of {deftech}_{keywordOf Lean.Parser.Term.do}`do` items_. :::syntax term (title := "`do`-Notation") ```grammar do $stmt* ``` The items in a {keywordOf Lean.Parser.Term.do}`do` may be separated by semicolons; otherwise, each should be on its own line and they should have equal indentation. ::: ```lean -show section variable {m : Type → Type} [Monad m] {α β γ: Type} {e1 : m Unit} {e : β} {es : m α} ``` ## Sequential Computations One form of {tech}[{keywordOf Lean.Parser.Term.do}`do` item] is a term. :::syntax Lean.Parser.Term.doSeqItem (title := "Terms in `do`-Notation") ```grammar $e:term ``` ::: A term followed by a sequence of items is translated to a use of {name}`bind`; in particular, {lean}`do e1; es` is translated to {lean}`e1 >>= fun () => do es`. :::table +header * * {keywordOf Lean.Parser.Term.do}`do` Item * Desugaring * * ```leanTerm do e1 es ``` * ```leanTerm e1 >>= fun () => do es ``` ::: ```lean -show -keep def ex1a := do e1; es def ex1b := e1 >>= fun () => do es example : @ex1a = @ex1b := by rfl ``` The result of the term's computation may also be named, allowing it to be used in subsequent steps. This is done using {keywordOf Lean.Parser.Term.doLet}`let`. ```lean -show section variable {e1 : m β} {e1? : m (Option β)} {fallback : m α} {e2 : m γ} {f : β → γ → m Unit} {g : γ → α} {h : β → m γ} ``` :::syntax Lean.Parser.Term.doSeqItem (title := "Data Dependence in `do`-Notation") There are two forms of monadic {keywordOf Lean.Parser.Term.doLet}`let`-binding in a {keywordOf Lean.Parser.Term.do}`do` block. The first binds an identifier to the result, with an optional type annotation: ```grammar let $x:ident$[:$e]? ← $e:term ``` The second binds a pattern to the result. The fallback clause, beginning with `|`, specifies the behavior when the pattern does not match the result. ```grammar let $x:term ← $e:term $[| $e]? ``` ::: This syntax is also translated to a use of {name}`bind`. {lean}`do let x ← e1; es` is translated to {lean}`e1 >>= fun x => do es`, and fallback clauses are translated to default pattern matches. {keywordOf Lean.Parser.Term.doLet}`let` may also be used with the standard definition syntax `:=` instead of `←`. This indicates a pure, rather than monadic, definition: :::syntax Lean.Parser.Term.doSeqItem (title := "Local Definitions in `do`-Notation") ```grammar let $v := $e:term ``` ::: {lean}`do let x := e; es` is translated to {lean}`let x := e; do es`. :::table +header * * {keywordOf Lean.Parser.Term.do}`do` Item * Desugaring * * ```leanTerm do let x ← e1 es ``` * ```leanTerm e1 >>= fun x => do es ``` * * ```leanTerm do let some x ← e1? | fallback es ``` * ```leanTerm e1? >>= fun | some x => do es | _ => fallback ``` * * ```leanTerm do let x := e es ``` * ```leanTerm let x := e do es ``` ::: ```lean -show -keep -- Test desugarings def ex1a := do let x ← e1 es def ex1b := e1 >>= fun x => do es example : @ex1a = @ex1b := by rfl def ex2a := do let some x ← e1? | fallback es def ex2b := e1? >>= fun | some x => do es | _ => fallback example : @ex2a = @ex2b := by rfl def ex3a := do let x := e es def ex3b := let x := e do es example : @ex3a = @ex3b := by rfl ``` Within a {keywordOf Lean.Parser.Term.do}`do` block, `←` may be used as a prefix operator. The expression to which it is applied is replaced with a fresh variable, which is bound using {name}`bind` just before the current step. This allows monadic effects to be used in positions that otherwise might expect a pure value, while still maintaining the distinction between _describing_ an effectful computation and actually _executing_ its effects. Multiple occurrences of `←` are processed from left to right, inside to outside. ::::figure "Example Nested Action Desugarings" :::table +header * * Example {keywordOf Lean.Parser.Term.do}`do` Item * Desugaring * * ```leanTerm do f (← e1) (← e2) es ``` * ```leanTerm do let x ← e1 let y ← e2 f x y es ``` * * ```leanTerm do let x := g (← h (← e1)) es ``` * ```leanTerm do let y ← e1 let z ← h y let x := g z es ``` ::: :::: ```lean -show -keep -- Test desugarings def ex1a := do f (← e1) (← e2) es def ex1b := do let x ← e1 let y ← e2 f x y es example : @ex1a = @ex1b := by rfl def ex2a := do let x := g (← h (← e1)) es def ex2b := do let y ← e1 let z ← h y let x := g z es example : @ex2a = @ex2b := by rfl ``` In addition to convenient support for sequential computations with data dependencies, {keywordOf Lean.Parser.Term.do}`do`-notation also supports the local addition of a variety of effects, including early return, local mutable state, and loops with early termination. These effects are implemented via transformations of the entire {keywordOf Lean.Parser.Term.do}`do` block in a manner akin to {tech}[monad transformers], rather than via a local desugaring. ## Early Return %%% tag := "early-return" %%% Early return terminates a computation immediately with a given value. The value is returned from the closest containing {keywordOf Lean.Parser.Term.do}`do` block; however, this may not be the closest `do` keyword. The rules for determining the extent of a {keywordOf Lean.Parser.Term.do}`do` block are described {ref "closest-do-block"}[in their own section]. :::syntax Lean.Parser.Term.doSeqItem (title := "Early Return") ```grammar return $e ``` ```grammar return ``` ::: Not all monads include early return. Thus, when a {keywordOf Lean.Parser.Term.do}`do` block contains {keywordOf Lean.Parser.Term.doReturn}`return`, the code needs to be rewritten to simulate the effect. A program that uses early return to compute a value of type {lean}`α` in a monad {lean}`m` can be thought of as a program in the monad {lean}`ExceptT α m α`: early-returned values take the exception pathway, while ordinary returns do not. Then, an outer handler can return the value from either code paths. Internally, the {keywordOf Lean.Parser.Term.do}`do` elaborator performs a translation very much like this one. On its own, {keywordOf Lean.Parser.Term.doReturn}`return` is short for {keywordOf Lean.Parser.Term.doReturn}`return`​` `​{lean}`()`. ## Local Mutable State %%% tag := "let-mut" %%% Local mutable state is mutable state that cannot escape the {keywordOf Lean.Parser.Term.do}`do` block in which it is defined. The {keywordOf Lean.Parser.Term.doLet}`let mut` binder introduces a locally-mutable binding. :::syntax Lean.Parser.Term.doSeqItem (title := "Local Mutability") Mutable bindings may be initialized either with pure computations or with monadic computations: ```grammar let mut $x := $e ``` ```grammar let mut $x ← $e ``` Similarly, they can be mutated either with pure values or the results of monad computations: ```grammar (of := Lean.Parser.Term.doReassign) $x:ident$[: $_]? := $e:term ``` ```grammar (of := Lean.Parser.Term.doReassign) $x:term$[: $_]? := $e:term ``` ```grammar (of := Lean.Parser.Term.doReassignArrow) $x:ident$[: $_]? ← $e:term ``` ```grammar (of := Lean.Parser.Term.doReassignArrow) $x:term ← $e:term $[| $e]? ``` ::: These locally-mutable bindings are less powerful than a {tech}[state monad] because they are not mutable outside their lexical scope; this also makes them easier to reason about. When {keywordOf Lean.Parser.Term.do}`do` blocks contain mutable bindings, the {keywordOf Lean.Parser.Term.do}`do` elaborator transforms the expression similarly to the way that {lean}`StateT` would, constructing a new monad and initializing it with the correct values. ## Control Structures %%% tag := "do-control-structures" %%% There are {keywordOf Lean.Parser.Term.do}`do` items that correspond to most of Lean's term-level control structures. When they occur as a step in a {keywordOf Lean.Parser.Term.do}`do` block, they are interpreted as {keywordOf Lean.Parser.Term.do}`do` items rather than terms. Each branch of the control structures is a sequence of {keywordOf Lean.Parser.Term.do}`do` items, rather than a term, and some of them are more syntactically flexible than their corresponding terms. :::syntax Lean.Parser.Term.doSeqItem (title := "Conditionals") In a {keywordOf Lean.Parser.Term.do}`do` block, {keywordOf Lean.Parser.Term.doIf}`if` statements may omit their {keywordOf Lean.Parser.Term.doIf}`else` branch. Omitting an {keywordOf Lean.Parser.Term.doIf}`else` branch is equivalent to using {name}`pure`{lean}` ()` as the contents of the branch. ```grammar if $[$h :]? $e then $e* $[else $_*]? ``` ::: Syntactically, the {keywordOf Lean.Parser.Term.doIf}`then` branch cannot be omitted. For these cases, {keywordOf Lean.Parser.Term.doUnless}`unless` only executes its body when the condition is false. The {keywordOf Lean.Parser.Term.do}`do` in {keywordOf Lean.Parser.Term.doUnless}`unless` is part of its syntax and does not induce a nested {keywordOf Lean.Parser.Term.do}`do` block. :::syntax Lean.Parser.Term.doSeqItem (title := "Reverse Conditionals") ```grammar unless $e do $e* ``` ::: When {keywordOf Lean.Parser.Term.doMatch}`match` is used in a {keywordOf Lean.Parser.Term.do}`do` block, each branch is considered to be part of the same block. Otherwise, it is equivalent to the {keywordOf Lean.Parser.Term.match}`match` term. :::syntax Lean.Parser.Term.doSeqItem (title := "Pattern Matching") ```grammar match $[$[$h :]? $e],* with $[| $t,* => $e*]* ``` ::: ## Iteration %%% tag := "monad-iteration-syntax" %%% Within a {keywordOf Lean.Parser.Term.do}`do` block, {keywordOf Lean.Parser.Term.doFor}`for`​`…`​{keywordOf Lean.Parser.Term.doFor}`in` loops allow iteration over a data structure. The body of the loop is part of the containing {keywordOf Lean.Parser.Term.do}`do` block, so local effects such as early return and mutable variables may be used. :::syntax Lean.Parser.Term.doSeqItem (title := "Iteration over Collections") ```grammar for $[$[$h :]? $x in $y],* do $e* ``` ::: A {keywordOf Lean.Parser.Term.doFor}`for`​`…`​{keywordOf Lean.Parser.Term.doFor}`in` loop requires at least one clause that specifies the iteration to be performed, which consists of an optional membership proof name followed by a colon (`:`), a pattern to bind, the keyword {keywordOf Lean.Parser.Term.doFor}`in`, and a collection term. The pattern, which may just be an {tech}[identifier], must match any element of the collection; patterns in this position cannot be used as implicit filters. Further clauses may be provided by separating them with commas. Each collection is iterated over at the same time, and iteration stops when any of the collections runs out of elements. :::example "Iteration Over Multiple Collections" When iterating over multiple collections, iteration stops when any of the collections runs out of elements. ```lean (name := earlyStop) #eval Id.run do let mut v := #[] for x in [0:43], y in ['a', 'b'] do v := v.push (x, y) return v ``` ```leanOutput earlyStop #[(0, 'a'), (1, 'b')] ``` ::: ::::keepEnv :::example "Iteration over Array Indices with {keywordOf Lean.Parser.Term.doFor}`for`" When iterating over the valid indices for an array with {keywordOf Lean.Parser.Term.doFor}`for`, naming the membership proof allows the tactic that searches for proofs that array indices are in bounds to succeed. ```lean -keep def satisfyingIndices (p : α → Prop) [DecidablePred p] (xs : Array α) : Array Nat := Id.run do let mut out := #[] for h : i in [0:xs.size] do if p xs[i] then out := out.push i return out ``` Omitting the hypothesis name causes the array lookup to fail, because no proof is available in the context that the iteration variable is within the specified range. ```lean -keep -show -- test it /-- error: failed to prove index is valid, possible solutions: - Use `have`-expressions to prove the index is valid - Use `a[i]!` notation instead, runtime check is performed, and 'Panic' error message is produced if index is not valid - Use `a[i]?` notation instead, result is an `Option` type - Use `a[i]'h` notation instead, where `h` is a proof that index is valid m : Type → Type inst✝¹ : Monad m α β γ : Type e1✝ : m Unit e : β es : m α e1 : m β e1? : m (Option β) fallback : m α e2 : m γ f : β → γ → m Unit g : γ → α h : β → m γ p : α → Prop inst✝ : DecidablePred p xs : Array α out✝ : Array Nat := #[] i : Nat r✝ : Array Nat out : Array Nat := r✝ ⊢ i < xs.size -/ #check_msgs in def satisfyingIndices (p : α → Prop) [DecidablePred p] (xs : Array α) : Array Nat := Id.run do let mut out := #[] for i in [0:xs.size] do if p xs[i] then out := out.push i return out ``` ::: :::: :::::keepEnv ::::leanSection Iteration with `for`-loops is translated into uses of `ForIn.forIn`, which is an analogue of `ForM.forM` with added support for local mutations and early termination. {name}`ForIn.forIn` receives an initial value for the local mutable state and a monadic action as parameters, along with the collection being iterated over. The monadic action passed to {name}`ForIn.forIn` takes a current state as a parameter and, after carrying out actions in the monad {lean}`m`, returns either {name}`ForInStep.yield` to indicate that iteration should continue with an updated set of local mutable values, or {name}`ForInStep.done` to indicate that {keywordOf Lean.Parser.Term.doBreak}`break` or {keywordOf Lean.Parser.Term.doReturn}`return` was executed. When iteration is complete, {name}`ForIn.forIn` returns the final values of the local mutable values. The specific desugaring of a loop depends on how state and early termination are used in its body. Here are some examples: ```lean -show axiom «<B>» : Type u axiom «<b>» : β variable [Monad m] (xs : Coll) [ForIn m Coll α] [instMem : Membership α Coll] [ForIn' m Coll α instMem] variable (f : α → β → m β) (f' : (x : α) → x ∈ xs → β → m β) macro "…" : term => `((«<b>» : β)) ``` :::table +header * * {keywordOf Lean.Parser.Term.do}`do` Item * Desugaring * * ```leanTerm (type := "m α") do let mut b := … for x in xs do b ← f x b es ``` * ```leanTerm (type := "m α") do let b := … let b ← ForIn.forIn xs b fun x b => do let b ← f x b return ForInStep.yield b es ``` * * ```leanTerm (type := "m α") do let mut b := … for x in xs do b ← f x b break es ``` * ```leanTerm (type := "m α") do let b := … let b ← ForIn.forIn xs b fun x b => do let b ← f x b return ForInStep.done b es ``` * * ```leanTerm (type := "m α") do let mut b := … for h : x in xs do b ← f' x h b es ``` * ```leanTerm (type := "m α") do let b := … let b ← ForIn'.forIn' xs b fun x h b => do let b ← f' x h b return ForInStep.yield b es ``` * * ```leanTerm (type := "m α") do let mut b := … for h : x in xs do b ← f' x h b break es ``` * ```leanTerm (type := "m α") do let b := … let b ← ForIn'.forIn' xs b fun x h b => do let b ← f' x h b return ForInStep.done b es ``` ::: :::: ::::: The body of a {keywordOf Lean.doElemWhile_Do_}`while` loop is repeated while the condition remains true. It is possible to write infinite loops using them in functions that are not marked {keywordOf Lean.Parser.Command.declaration}`partial`. This is because the {keywordOf Lean.Parser.Command.declaration}`partial` modifier only applies to non-termination or infinite regress induced by the function being defined, and not by those that it calls. The translation of {keywordOf Lean.doElemWhile_Do_}`while` loops relies on a separate helper. :::syntax Lean.Parser.Term.doSeqItem (title := "Conditional Loops") ```grammar while $e do $e* ``` ```grammar while $h : $e do $e* ``` ::: The body of a {keywordOf Lean.doElemRepeat__Until_}`repeat`-{keywordOf Lean.doElemRepeat__Until_}`until` loop is always executed at least once. After each iteration, the condition is checked, and the loop is repeated when the condition is *false*. When the condition becomes true, iteration stops. :::syntax Lean.Parser.Term.doSeqItem (title := "Post-Tested Loops") ```grammar repeat $e* until $_ ``` ::: The body of a {keywordOf Lean.doElemRepeat_}`repeat` loop is repeated until a {keywordOf Lean.Parser.Term.doBreak}`break` statement is executed. Just like {keywordOf Lean.doElemWhile_Do_}`while` loops, these loops can be used in functions that are not marked {keywordOf Lean.Parser.Command.declaration}`partial`. :::syntax Lean.Parser.Term.doSeqItem (title := "Unconditional Loops") ```grammar repeat $e* ``` ::: The {keywordOf Lean.Parser.Term.doContinue}`continue` statement skips the rest of the body of the closest enclosing {keywordOf Lean.doElemRepeat_}`repeat`, {keywordOf Lean.doElemWhile_Do_}`while`, or {keywordOf Lean.Parser.Term.doFor}`for` loop, moving on to the next iteration. The {keywordOf Lean.Parser.Term.doBreak}`break` statement terminates the closest enclosing {keywordOf Lean.doElemRepeat_}`repeat`, {keywordOf Lean.doElemWhile_Do_}`while`, or {keywordOf Lean.Parser.Term.doFor}`for` loop, stopping iteration. :::syntax Lean.Parser.Term.doSeqItem (title := "Loop Control Statements") ```grammar continue ``` ```grammar break ``` ::: In addition to {keywordOf Lean.Parser.Term.doBreak}`break`, loops can always be terminated by effects in the current monad. Throwing an exception from a loop terminates the loop. :::example "Terminating Loops in the {lean}`Option` Monad" The {name}`failure` method from the {name}`Alternative` class can be used to terminate an otherwise-infinite loop in the {name}`Option` monad. ```lean (name := natBreak) #eval show Option Nat from do let mut i := 0 repeat if i > 1000 then failure else i := 2 * (i + 1) return i ``` ```leanOutput natBreak none ``` ::: ## Identifying `do` Blocks %%% tag := "closest-do-block" %%% Many features of {keywordOf Lean.Parser.Term.do}`do`-notation have an effect on the {deftech}[current {keywordOf Lean.Parser.Term.do}`do` block]. In particular, early return aborts the current block, causing it to evaluate to the returned value, and mutable bindings can only be mutated in the block in which they are defined. Understanding these features requires a precise definition of what it means to be in the “same” block. Empirically, this can be checked using the Lean language server. When the cursor is on a {keywordOf Lean.Parser.Term.doReturn}`return` statement, the corresponding {keywordOf Lean.Parser.Term.do}`do` keyword is highlighted. Attempting to mutate a mutable binding outside of the same {keywordOf Lean.Parser.Term.do}`do` block results in an error message. :::figure "Highlighting {keywordOf Lean.Parser.Term.do}`do`" ![Highlighting do from return](/static/screenshots/do-return-hl-1.png) ![Highlighting do from return with errors](/static/screenshots/do-return-hl-2.png) ::: The rules are as follows: * Each item immediately nested under the {keywordOf Lean.Parser.Term.do}`do` keyword that begins a block belongs to that block. * Each item immediately nested under the {keywordOf Lean.Parser.Term.do}`do` keyword that is an item in a containing {keywordOf Lean.Parser.Term.do}`do` block belongs to the outer block. * Items in the branches of an {keywordOf Lean.Parser.Term.doIf}`if`, {keywordOf Lean.Parser.Term.doMatch}`match`, or {keywordOf Lean.Parser.Term.doUnless}`unless` item belong to the same {keywordOf Lean.Parser.Term.do}`do` block as the control structure that contains them. The {keywordOf Lean.Parser.Term.doUnless}`do` keyword that is part of the syntax of {keywordOf Lean.Parser.Term.doUnless}`unless` does not introduce a new {keywordOf Lean.Parser.Term.do}`do` block. * Items in the body of {keywordOf Lean.doElemRepeat_}`repeat`, {keywordOf Lean.doElemWhile_Do_}`while`, and {keywordOf Lean.Parser.Term.doFor}`for` belong to the same {keywordOf Lean.Parser.Term.do}`do` block as the loop that contains them. The {keywordOf Lean.Parser.Term.doFor}`do` keyword that is part of the syntax of {keywordOf Lean.doElemWhile_Do_}`while` and {keywordOf Lean.Parser.Term.doFor}`for` does not introduce a new {keywordOf Lean.Parser.Term.do}`do` block. ```lean -show -- Test nested `do` rules /-- info: ((), 6) -/ #check_msgs in #eval (·.run 0) <| show StateM Nat Unit from do set 5 do set 6 return /-- error: must be last element in a `do` sequence -/ #check_msgs in #eval (·.run 0) <| show StateM Nat Unit from do set 5 do set 6 return set 7 return /-- info: ((), 6) -/ #check_msgs in #eval (·.run 0) <| show StateM Nat Unit from do set 5 if true then set 6 do return set 7 return ``` ::::keepEnv :::example "Nested `do` and Branches" The following example outputs {lean}`6` rather than {lean}`7`: ```lean (name := nestedDo) def test : StateM Nat Unit := do set 5 if true then set 6 do return set 7 return #eval test.run 0 ``` ```leanOutput nestedDo ((), 6) ``` This is because the {keywordOf Lean.Parser.Term.doReturn}`return` statement under the {keywordOf Lean.Parser.Term.doIf}`if` belongs to the same {keywordOf Lean.Parser.Term.do}`do` as its immediate parent, which itself belongs to the same {keywordOf Lean.Parser.Term.do}`do` as the {keywordOf Lean.Parser.Term.doIf}`if`. If {keywordOf Lean.Parser.Term.do}`do` blocks that occurred as items in other {keywordOf Lean.Parser.Term.do}`do` blocks instead created new blocks, then the example would output {lean}`7`. ::: :::: ```lean -show end ``` ```lean -show -- tests for this section set_option pp.all true /-- info: @Bind.bind.{0, 0} m (@Monad.toBind.{0, 0} m inst✝) Unit α e1 fun (x : PUnit.{1}) => es : m α -/ #check_msgs in #check do e1; es section variable {e1 : m β} /-- info: @Bind.bind.{0, 0} m (@Monad.toBind.{0, 0} m inst✝) β α e1 fun (x : β) => es : m α -/ #check_msgs in #check do let x ← e1; es end /-- info: let x : β := e; es : m α -/ #check_msgs in #check do let x := e; es variable {e1 : m β} {e2 : m γ} {f : β → γ → m Unit} {g : γ → α} {h : β → m γ} /-- info: @Bind.bind.{0, 0} m (@Monad.toBind.{0, 0} m inst✝) β α e1 fun (__do_lift : β) => @Bind.bind.{0, 0} m (@Monad.toBind.{0, 0} m inst✝) γ α e2 fun (__do_lift_1 : γ) => @Bind.bind.{0, 0} m (@Monad.toBind.{0, 0} m inst✝) Unit α (f __do_lift __do_lift_1) fun (x : PUnit.{1}) => es : m α -/ #check_msgs in #check do f (← e1) (← e2); es /-- info: @Bind.bind.{0, 0} m (@Monad.toBind.{0, 0} m inst✝) β α e1 fun (__do_lift : β) => @Bind.bind.{0, 0} m (@Monad.toBind.{0, 0} m inst✝) γ α (h __do_lift) fun (__do_lift : γ) => let x : α := g __do_lift; es : m α -/ #check_msgs in #check do let x := g (← h (← e1)); es end ``` ## Type Classes for Iteration To be used with {keywordOf Lean.Parser.Term.doFor}`for` loops without membership proofs, collections must implement the {name}`ForIn` type class. Implementing {lean}`ForIn'` additionally allows the use of {keywordOf Lean.Parser.Term.doFor}`for` loops with membership proofs. {docstring ForIn} {docstring ForIn'} {docstring ForInStep} {docstring ForInStep.value} {docstring ForM} {docstring ForM.forIn}
reference-manual/Manual/Monads/API.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "API Reference" => In addition to the general functions described here, there are some functions that are conventionally defined as part of the API of in the namespace of each collection type: * `mapM` maps a monadic function. * `forM` maps a monadic function, throwing away the result. * `filterM` filters using a monadic predicate, returning the values that satisfy it. ::::example "Monadic Collection Operations" {name}`Array.filterM` can be used to write a filter that depends on a side effect. :::ioExample ```ioLean def values := #[1, 2, 3, 5, 8] def main : IO Unit := do let filtered ← values.filterM fun v => do repeat IO.println s!"Keep {v}? [y/n]" let answer := (← (← IO.getStdin).getLine).trimAscii.copy if answer == "y" then return true if answer == "n" then return false return false IO.println "These values were kept:" for v in filtered do IO.println s!" * {v}" ``` ```stdin y n oops y n y ``` ```stdout Keep 1? [y/n] Keep 2? [y/n] Keep 3? [y/n] Keep 3? [y/n] Keep 5? [y/n] Keep 8? [y/n] These values were kept: * 1 * 3 * 8 ``` ::: :::: # Discarding Results The {name}`discard` function is especially useful when using an action that returns a value only for its side effects. {docstring discard} # Control Flow {docstring guard} {docstring optional} # Lifting Boolean Operations {docstring andM} {docstring orM} {docstring notM} # Kleisli Composition {deftech}_Kleisli composition_ is the composition of monadic functions, analogous to {name}`Function.comp` for ordinary functions. {docstring Bind.kleisliRight} {docstring Bind.kleisliLeft} # Re-Ordered Operations Sometimes, it can be convenient to partially apply a function to its second argument. These functions reverse the order of arguments, making it this easier. {docstring Functor.mapRev} {docstring Bind.bindLeft}
reference-manual/Manual/Monads/Lift.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Lifting Monads" => %%% tag := "lifting-monads" %%% ::::keepEnv ```lean -show variable {m m' n : Type u → Type v} [Monad m] [Monad m'] [Monad n] [MonadLift m n] variable {α β : Type u} ``` When one monad is at least as capable as another, then actions from the latter monad can be used in a context that expects actions from the former. This is called {deftech (key := "lift")}_lifting_ the action from one monad to another. Lean automatically inserts lifts when they are available; lifts are defined in the {name}`MonadLift` type class. Automatic monad lifting is attempted before the general {tech}[coercion] mechanism. {docstring MonadLift} {tech (key := "lift")}[Lifting] between monads is reflexive and transitive: * Any monad can run its own actions. * Lifts from {lean}`m` to {lean}`m'` and from {lean}`m'` to {lean}`n` can be composed to yield a lift from {lean}`m` to {lean}`n`. The utility type class {name}`MonadLiftT` constructs lifts via the reflexive and transitive closure of {name}`MonadLift` instances. Users should not define new instances of {name}`MonadLiftT`, but it is useful as an instance implicit parameter to a polymorphic function that needs to run actions from multiple monads in some user-provided monad. {docstring MonadLiftT} ```lean -show section variable {m : Type → Type u} ``` :::example "Monad Lifts in Function Signatures" The function {name}`IO.withStdin` has the following signature: ```signature IO.withStdin.{u} {m : Type → Type u} {α : Type} [Monad m] [MonadFinally m] [MonadLiftT BaseIO m] (h : IO.FS.Stream) (x : m α) : m α ``` Because it doesn't require its parameter to precisely be in {name}`IO`, it can be used in many monads, and the body does not need to restrict itself to {name}`IO`. The instance implicit parameter {lean}`MonadLiftT BaseIO m` allows the reflexive transitive closure of {name}`MonadLift` to be used to assemble the lift. ::: ```lean -show end ``` When a term of type {lean}`n β` is expected, but the provided term has type {lean}`m α`, and the two types are not definitionally equal, Lean attempts to insert lifts and coercions before reporting an error. There are the following possibilities: 1. If {lean}`m` and {lean}`n` can be unified to the same monad, then {lean}`α` and {lean}`β` are not the same. In this case, no monad lifts are necessary, but the value in the monad must be {tech (key := "coercion")}[coerced]. If the appropriate coercion is found, then a call to {name}`Lean.Internal.coeM` is inserted, which has the following signature: ```signature Lean.Internal.coeM.{u, v} {m : Type u → Type v} {α β : Type u} [(a : α) → CoeT α a β] [Monad m] (x : m α) : m β ``` 2. If {lean}`α` and {lean}`β` can be unified, then the monads differ. In this case, a monad lift is necessary to transform an expression with type {lean}`m α` to {lean}`n α`. If {lean}`m` can be lifted to {lean}`n` (that is, there is an instance of {lean}`MonadLiftT m n`) then a call to {name}`liftM`, which is an alias for {name}`MonadLiftT.monadLift`, is inserted. ```signature liftM.{u, v, w} {m : Type u → Type v} {n : Type u → Type w} [self : MonadLiftT m n] {α : Type u} : m α → n α ``` 3. If neither {lean}`m` and {lean}`n` nor {lean}`α` and {lean}`β` can be unified, but {lean}`m` can be lifted into {lean}`n` and {lean}`α` can be {tech (key := "coercion")}[coerced] to {lean}`β`, then a lift and a coercion can be combined. This is done by inserting a call to {name}`Lean.Internal.liftCoeM`: ```signature Lean.Internal.liftCoeM.{u, v, w} {m : Type u → Type v} {n : Type u → Type w} {α β : Type u} [MonadLiftT m n] [(a : α) → CoeT α a β] [Monad n] (x : m α) : n β ``` As their names suggest, {name}`Lean.Internal.coeM` and {name}`Lean.Internal.liftCoeM` are implementation details, not part of the public API. In the resulting terms, occurrences of {name}`Lean.Internal.coeM`, {name}`Lean.Internal.liftCoeM`, and coercions are unfolded. :::: ::::keepEnv :::example "Lifting `IO` Monads" There is an instance of {lean}`MonadLift BaseIO IO`, so any `BaseIO` action can be run in `IO` as well: ```lean def fromBaseIO (act : BaseIO α) : IO α := act ``` Behind the scenes, {name}`liftM` is inserted: ```lean (name := fromBase) #check fun {α} (act : BaseIO α) => (act : IO α) ``` ```leanOutput fromBase fun {α} act => liftM act : {α : Type} → BaseIO α → EIO IO.Error α ``` ::: :::: :::::keepEnv ::::example "Lifting Transformed Monads" There are also instances of {name}`MonadLift` for most of the standard library's {tech}[monad transformers], so base monad actions can be used in transformed monads without additional work. For example, state monad actions can be lifted across reader and exception transformers, allowing compatible monads to be intermixed freely: ```lean -keep def incrBy (n : Nat) : StateM Nat Unit := modify (· + n) def incrOrFail : ReaderT Nat (ExceptT String (StateM Nat)) Unit := do if (← read) > 5 then throw "Too much!" incrBy (← read) ``` Disabling lifting causes an error: ```lean (name := noLift) +error set_option autoLift false def incrBy (n : Nat) : StateM Nat Unit := modify (. + n) def incrOrFail : ReaderT Nat (ExceptT String (StateM Nat)) Unit := do if (← read) > 5 then throw "Too much!" incrBy (← read) ``` ```leanOutput noLift Type mismatch incrBy __do_lift✝ has type StateM Nat Unit but is expected to have type ReaderT Nat (ExceptT String (StateM Nat)) Unit ``` :::: ::::: Automatic lifting can be disabled by setting {option}`autoLift` to {lean}`false`. {optionDocs autoLift} # Reversing Lifts ```lean -show variable {m n : Type u → Type v} {α ε : Type u} ``` Monad lifting is not always sufficient to combine monads. Many operations provided by monads are higher order, taking an action _in the same monad_ as a parameter. Even if these operations are lifted to some more powerful monad, their arguments are still restricted to the original monad. There are two type classes that support this kind of “reverse lifting”: {name}`MonadFunctor` and {name}`MonadControl`. An instance of {lean}`MonadFunctor m n` explains how to interpret a fully-polymorphic function in {lean}`m` into {lean}`n`. This polymorphic function must work for _all_ types {lean}`α`: it has type {lean}`{α : Type u} → m α → n α`. Such a function can be thought of as one that may have effects, but can't do so based on specific values that are provided. An instance of {lean}`MonadControl m n` explains how to interpret an arbitrary action from {lean}`m` into {lean}`n`, while at the same time providing a “reverse interpreter” that allows the {lean}`m` action to run {lean}`n` actions. ## Monad Functors {docstring MonadFunctor} {docstring MonadFunctorT} ## Reversible Lifting with `MonadControl` {docstring MonadControl} {docstring MonadControlT} {docstring control} {docstring controlAt} ::::keepEnv :::example "Exceptions and Lifting" One example is {name}`Except.tryCatch`: ```signature Except.tryCatch.{u, v} {ε : Type u} {α : Type v} (ma : Except ε α) (handle : ε → Except ε α) : Except ε α ``` Both of its parameters are in {lean}`Except ε`. {name}`MonadLift` can lift the entire application of the handler. The function {lean}`getBytes`, which extracts the single bytes from an array of {lean}`Nat`s using state and exceptions, is written without {keywordOf Lean.Parser.Term.do}`do`-notation or automatic lifting in order to make its structure explicit. ```lean set_option autoLift false def getByte (n : Nat) : Except String UInt8 := if n < 256 then pure n.toUInt8 else throw s!"Out of range: {n}" def getBytes (input : Array Nat) : StateT (Array UInt8) (Except String) Unit := do input.forM fun i => liftM (Except.tryCatch (some <$> getByte i) fun _ => pure none) >>= fun | some b => modify (·.push b) | none => pure () ``` ```lean (name := getBytesEval1) #eval getBytes #[1, 58, 255, 300, 2, 1000000] |>.run #[] |>.map (·.2) ``` ```leanOutput getBytesEval1 Except.ok #[1, 58, 255, 2] ``` {name}`getBytes` uses an `Option` returned from the lifted action to signal the desired state updates. This quickly becomes unwieldy if there is more than one way to react to the inner action, such as saving handled exceptions. Ideally, state updates would be performed within the {name}`tryCatch` call directly. Attempting to save bytes and handled exceptions does not work, however, because the arguments to {name}`Except.tryCatch` have type {lean}`Except String Unit`: ```lean +error (name := getBytesErr) -keep def getBytes' (input : Array Nat) : StateT (Array String) (StateT (Array UInt8) (Except String)) Unit := do input.forM fun i => liftM (Except.tryCatch (getByte i >>= fun b => modifyThe (Array UInt8) (·.push b)) fun e => modifyThe (Array String) (·.push e)) ``` ```leanOutput getBytesErr failed to synthesize instance of type class MonadStateOf (Array String) (Except String) Hint: Type class instance resolution failures can be inspected with the `set_option trace.Meta.synthInstance true` command. ``` Because {name}`StateT` has a {name}`MonadControl` instance, {name}`control` can be used instead of {name}`liftM`. It provides the inner action with an interpreter for the outer monad. In the case of {name}`StateT`, this interpreter expects that the inner monad returns a tuple that includes the updated state, and takes care of providing the initial state and extracting the updated state from the tuple. ```lean def getBytes' (input : Array Nat) : StateT (Array String) (StateT (Array UInt8) (Except String)) Unit := do input.forM fun i => control fun run => (Except.tryCatch (getByte i >>= fun b => run (modifyThe (Array UInt8) (·.push b)))) fun e => run (modifyThe (Array String) (·.push e)) ``` ```lean (name := getBytesEval2) #eval getBytes' #[1, 58, 255, 300, 2, 1000000] |>.run #[] |>.run #[] |>.map (fun (((), bytes), errs) => (bytes, errs)) ``` ```leanOutput getBytesEval2 Except.ok (#["Out of range: 300", "Out of range: 1000000"], #[1, 58, 255, 2]) ``` ::: ::::
reference-manual/Manual/Monads/Zoo/Reader.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Reader" => %%% tag := "reader-monad" %%% {docstring MonadReader} {docstring MonadReaderOf} {docstring readThe} {docstring MonadWithReader} {docstring MonadWithReaderOf} {docstring withTheReader} {docstring ReaderT} {docstring ReaderM} {docstring ReaderT.run} {docstring ReaderT.read} {docstring ReaderT.adapt} {docstring ReaderT.pure} {docstring ReaderT.bind} {docstring ReaderT.orElse} {docstring ReaderT.failure}
reference-manual/Manual/Monads/Zoo/Option.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Option" => %%% tag := "option-monad" %%% Ordinarily, {lean}`Option` is thought of as data, similarly to a nullable type. It can also be considered as a monad, and thus a way of performing computations. The {lean}`Option` monad and its transformer {lean}`OptionT` can be understood as describing computations that may terminate early, discarding the results. Callers can check for early termination and invoke a fallback if desired using {name}`OrElse.orElse` or by treating it as a {lean}`MonadExcept Unit`. {docstring OptionT} {docstring OptionT.run} {docstring OptionT.lift} {docstring OptionT.mk} {docstring OptionT.pure} {docstring OptionT.bind} {docstring OptionT.fail} {docstring OptionT.orElse} {docstring OptionT.tryCatch}
reference-manual/Manual/Monads/Zoo/Except.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Exceptions" => %%% tag := "exception-monads" %%% Exception monads describe computations that terminate early (fail). Failing computations provide their caller with an _exception_ value that describes _why_ they failed. In other words, computations either return a value or an exception. The inductive type {name}`Except` captures this pattern, and is itself a monad. # Exceptions {docstring Except} {docstring Except.pure} {docstring Except.bind} {docstring Except.map} {docstring Except.mapError} {docstring Except.tryCatch} {docstring Except.orElseLazy} {docstring Except.isOk} {docstring Except.toOption} {docstring Except.toBool} # Type Class {docstring MonadExcept} {docstring MonadExcept.ofExcept} {docstring MonadExcept.orElse} {docstring MonadExcept.orelse'} {docstring MonadExceptOf} {docstring throwThe} {docstring tryCatchThe} # “Finally” Computations {docstring MonadFinally} # Transformer {docstring ExceptT} {docstring ExceptT.lift} {docstring ExceptT.run} {docstring ExceptT.pure} {docstring ExceptT.bind} {docstring ExceptT.bindCont} {docstring ExceptT.tryCatch} {docstring ExceptT.mk} {docstring ExceptT.map} {docstring ExceptT.adapt} # Exception Monads in Continuation Passing Style ```lean -show universe u variable (α : Type u) variable (ε : Type u) variable {m : Type u → Type v} ``` Continuation-passing-style exception monads represent potentially-failing computations as functions that take success and failure continuations, both of which return the same type, returning that type. They must work for _any_ return type. An example of such a type is {lean}`(β : Type u) → (α → β) → (ε → β) → β`. {lean}`ExceptCpsT` is a transformer that can be applied to any monad, so {lean}`ExceptCpsT ε m α` is actually defined as {lean}`(β : Type u) → (α → m β) → (ε → m β) → m β`. Exception monads in continuation passing style have different performance characteristics than {name}`Except`-based state monads; for some applications, it may be worth benchmarking them. ```lean -show /-- info: (β : Type u) → (α → m β) → (ε → m β) → m β -/ #check_msgs in #reduce (types := true) ExceptCpsT ε m α ``` {docstring ExceptCpsT} {docstring ExceptCpsT.runCatch} {docstring ExceptCpsT.runK} {docstring ExceptCpsT.run} {docstring ExceptCpsT.lift}
reference-manual/Manual/Monads/Zoo/Id.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Identity" => The identity monad {name}`Id` has no effects whatsoever. Both {name}`Id` and the corresponding implementation of {name}`pure` are the identity function, and {name}`bind` is reversed function application. The identity monad has two primary use cases: 1. It can be the type of a {keywordOf Lean.Parser.Term.do}`do` block that implements a pure function with local effects. 2. It can be placed at the bottom of a stack of monad transformers. ```lean -show -- Verify claims example : Id = id := rfl example : Id.run (α := α) = id := rfl example : (pure (f := Id)) = (id : α → α) := rfl example : (bind (m := Id)) = (fun (x : α) (f : α → Id β) => f x) := rfl ``` {docstring Id} {docstring Id.run} :::example "Local Effects with the Identity Monad" This code block implements a countdown procedure by using simulated local mutability in the identity monad. ```lean (name := idDo) #eval Id.run do let mut xs := [] for x in [0:10] do xs := x :: xs pure xs ``` ```leanOutput idDo [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ``` :::
reference-manual/Manual/Monads/Zoo/Combined.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "Combined Error and State Monads" => ```lean -show variable (ε : Type u) (σ σ' : Type u) (α : Type u) ``` The {name}`EStateM` monad has both exceptions and mutable state. {lean}`EStateM ε σ α` is logically equivalent to {lean}`ExceptT ε (StateM σ) α`. While {lean}`ExceptT ε (StateM σ)` evaluates to the type {lean}`σ → Except ε α × σ`, the type {lean}`EStateM ε σ α` evaluates to {lean}`σ → EStateM.Result ε σ α`. {name}`EStateM.Result` is an inductive type that's very similar to {name}`Except`, except both constructors have an additional field for the state. In compiled code, this representation removes one level of indirection from each monadic bind. ```lean -show /-- info: σ → Except ε α × σ -/ #check_msgs in #reduce (types := true) ExceptT ε (StateM σ) α /-- info: σ → EStateM.Result ε σ α -/ #check_msgs in #reduce (types := true) EStateM ε σ α ``` {docstring EStateM} {docstring EStateM.Result} {docstring EStateM.run} {docstring EStateM.run'} {docstring EStateM.adaptExcept} {docstring EStateM.fromStateM +allowMissing} # State Rollback Composing {name}`StateT` and {name}`ExceptT` in different orders causes exceptions to interact differently with state. In one ordering, state changes are rolled back when exceptions are caught; in the other, they persist. The latter option matches the semantics of most imperative programming languages, but the former is very useful for search-based problems. Often, some but not all state should be rolled back; this can be achieved by “sandwiching” {name}`ExceptT` between two separate uses of {name}`StateT`. To avoid yet another layer of indirection via the use of {lean}`StateT σ (EStateM ε σ') α`, {name}`EStateM` offers the {name}`EStateM.Backtrackable` {tech}[type class]. This class specifies some part of the state that can be saved and restored. {name}`EStateM` then arranges for the saving and restoring to take place around error handling. {docstring EStateM.Backtrackable} There is a universally-applicable instance of {name EStateM.Backtrackable}`Backtrackable` that neither saves nor restores anything. Because instance synthesis chooses the most recent instance first, the universal instance is used only if no other instance has been defined. {docstring EStateM.nonBacktrackable} # Implementations These functions are typically not called directly, but rather are accessed through their corresponding type classes. {docstring EStateM.map} {docstring EStateM.pure} {docstring EStateM.bind} {docstring EStateM.orElse} {docstring EStateM.orElse'} {docstring EStateM.seqRight} {docstring EStateM.tryCatch} {docstring EStateM.throw} {docstring EStateM.get} {docstring EStateM.set} {docstring EStateM.modifyGet}
reference-manual/Manual/Monads/Zoo/State.lean
import VersoManual import Manual.Meta import Lean.Parser.Command open Manual open Verso.Genre open Verso.Genre.Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option linter.unusedVariables false #doc (Manual) "State" => %%% tag := "state-monads" %%% {tech}[State monads] provide access to a mutable value. The underlying implementation may use a tuple to simulate mutability, or it may use something like {name}`ST.Ref` to ensure mutation. Even those implementations that use a tuple may in fact use mutation at run-time due to Lean's use of mutation when there are unique references to values, but this requires a programming style that prefers {name}`modify` and {name}`modifyGet` over {name}`get` and {name}`set`. # General State API {docstring MonadState} {docstring get} {docstring modify} {docstring modifyGet} {docstring getModify} {docstring MonadStateOf} {docstring getThe} {docstring modifyThe} {docstring modifyGetThe} # Tuple-Based State Monads ```lean -show variable {α σ : Type u} ``` The tuple-based state monads represent a computation with states that have type {lean}`σ` yielding values of type {lean}`α` as functions that take a starting state and yield a value paired with a final state, e.g. {lean}`σ → α × σ`. The {name}`Monad` operations thread the state correctly through the computation. {docstring StateM} {docstring StateT} {docstring StateT.run} {docstring StateT.get} {docstring StateT.set} {docstring StateT.orElse} {docstring StateT.failure} {docstring StateT.run'} {docstring StateT.bind} {docstring StateT.modifyGet} {docstring StateT.lift} {docstring StateT.map} {docstring StateT.pure} # State Monads in Continuation Passing Style Continuation-passing-style state monads represent stateful computations as functions that, for any type whatsoever, take an initial state and a continuation (modeled as a function) that accepts a value and an updated state. An example of such a type is {lean}`(δ : Type u) → σ → (α → σ → δ) → δ`, though {lean}`StateCpsT` is a transformer that can be applied to any monad. State monads in continuation passing style have different performance characteristics than tuple-based state monads; for some applications, it may be worth benchmarking them. ```lean -show /-- info: (δ : Type u) → σ → (α → σ → Id δ) → δ -/ #check_msgs in #reduce (types := true) StateCpsT σ Id α ``` {docstring StateCpsT} {docstring StateCpsT.lift} {docstring StateCpsT.runK} {docstring StateCpsT.run'} {docstring StateCpsT.run} # State Monads from Mutable References ```lean -show variable {m : Type → Type} {σ ω : Type} [STWorld σ m] ``` The monad {lean}`StateRefT σ m` is a specialized state monad transformer that can be used when {lean}`m` is a monad to which {name}`ST` computations can be lifted. It implements the operations of {name}`MonadState` using an {name}`ST.Ref`, rather than pure functions. This ensures that mutation is actually used at run time. {name}`ST` and {name}`EST` require a phantom type parameter that's used together with {name}`runST`'s polymorphic function argument to encapsulate mutability. Rather than require this as a parameter to the transformer, an auxiliary type class {name}`STWorld` is used to propagate it directly from {lean}`m`. The transformer itself is defined as a {ref "syntax-ext"}[syntax extension] and an {ref "elaborators"}[elaborator], rather than an ordinary function. This is because {name}`STWorld` has no methods: it exists only to propagate information from the inner monad to the transformed monad. Nonetheless, its instances are terms; keeping them around could lead to unnecessarily large types. {docstring STWorld} :::syntax term (title := "`StateRefT`") The syntax for {lean}`StateRefT σ m` accepts two arguments: ```grammar StateRefT $_ $_ ``` Its elaborator synthesizes an instance of {lean}`STWorld ω m` to ensure that {lean}`m` supports mutable references. Having discovered the value of {lean}`ω`, it then produces the term {lean}`StateRefT' ω σ m`, discarding the synthesized instance. ::: {docstring StateRefT'} {docstring StateRefT'.get} {docstring StateRefT'.set} {docstring StateRefT'.modifyGet} {docstring StateRefT'.run} {docstring StateRefT'.run'} {docstring StateRefT'.lift}
reference-manual/Manual/Interaction/FormatRepr.lean
import VersoManual import Std.Data.HashSet import Manual.Meta import Manual.Papers open Lean.MessageSeverity open Verso.Genre Manual open Verso.Genre.Manual.InlineLean set_option pp.rawOnError true set_option verso.code.warnLineLength 72 set_option verso.docstring.allowMissing true #doc (Manual) "Formatted Output" => %%% tag := "format-repr" %%% The {name}`Repr` type class is used to provide a standard representation for data that can be parsed and evaluated to obtain an equivalent value. This is not a strict correctness criterion: for some types, especially those with embedded propositions, it is impossible to achieve. However, the output produced by a {name}`Repr` instance should be as close as possible to something that can be parsed and evaluated. :::paragraph In addition to being machine-readable, this representation should be convenient for humans to understand—in particular, lines should not be too long, and nested values should be indented. This is achieved through a two-step process: 1. The {name}`Repr` instance produces an intermediate document of type {name}`Std.Format`, which compactly represents a _set_ of strings that differ with respect to the placement of newlines and indentation. 2. A rendering process selects the “best” representative from the set, according to criteria such as a desired maximum line length. In particular, {name}`Std.Format` can be built compositionally, so {name}`Repr` instances don't need to take the surrounding indentation context into account. ::: # Format %%% tag := "Format" %%% ::::leanSection ```lean -show open Std (Format) open Std.Format variable {str : String} {indent : String} {n : Nat} ``` :::paragraph A {name}`Format`{margin}[The API described here is an adaptation of Wadler's ({citehere wadler2003}[]) It has been modified to be efficient in a strict language and with support for additional features such as metadata tags.] is a compact representation of a set of strings. The most important {name Std.Format}`Format` operations are: : Strings A {name}`String` can be made into a {name}`Format` using the {name}`text` constructor. This constructor is registered as a {ref "coercions"}[coercion] from {name}`String` to {name}`Format`, so it is often unnecessary to invoke it explicitly. {lean}`text str` represents the singleton set that contains only {lean}`str`. If the string contains newline characters ({lean}`'\n'`), then they are unconditionally inserted as newlines into the resulting output, regardless of groups. They are, however, indented according to the current indentation level. : Appending Two {name}`Format`s can be appended using the `++` operator from the {inst}`Append Format` instance. : Groups and Newlines The constructor {name}`line` represents the set that contains both {lean}`"\n" ++ indent` and {lean}`" "`, where {lean}`indent` is a string with enough spaces to indent the line correctly. Imperatively, it can be thought of as a newline that will be “flattened” to a space if there is sufficient room on the current line. Newlines occur in _groups_: the nearest enclosing application of the {name}`group` operator determines which group the newline belongs to. By default, either all {name}`line`s in a group represent {lean}`"\n"` or all represent {lean}`" "`; groups may also be configured to fill lines, in which case the minimal number of {name}`line`s in the group represent {lean}`"\n"`. Uses of {name}`line` that do not belong to a group always represent {lean}`"\n"`. : Indentation When a newline is inserted, the output is also indented. {lean}`nest n` increases the indentation of a document by {lean}`n` spaces. This is not sufficient to represent all Lean syntax, which sometimes requires that columns align exactly. {lean}`align` is a document that ensures that the output string is at the current indentation level, inserting just spaces if possible, or a newline followed by spaces if needed. : Tagging Lean's interactive features require the ability to associate output with the underlying values that they represent. This allows Lean development environments to present elaborated terms when hovering over terms proof states or error messages, for example. Documents can be _tagged_ with a {name}`Nat` value {lean}`n` using {lean}`tag n`; these {name}`Nat`s should be mapped to the underlying value in a side table. ::: :::: :::example "Widths and Newlines" ```imports -show import Std ``` ```lean open Std Format ``` The helper {name}`parenSeq` creates a parenthesized sequence, with grouping and indentation to make it responsive to different output widths. ```lean def parenSeq (xs : List Format) : Format := group <| nest 2 (text "(" ++ line ++ joinSep xs line) ++ line ++ ")" ``` This document represents a parenthesized sequence of numbers: ```lean def lst : Format := parenSeq nums where nums := [1, 2, 3, 4, 5].map (text s!"{·}") ``` ```lean -show -keep -- check statement in next paragraph /-- info: 120 -/ #check_msgs in #eval defWidth ``` Rendering it with the default line width of 120 characters places the entire sequence on one line: ```lean (name := lstp) #eval IO.println lst.pretty ``` ```leanOutput lstp ( 1 2 3 4 5 ) ``` Because all the {name}`line`s belong to the same {name}`group`, they will either all be rendered as spaces or all be rendered as newlines. If only 9 characters are available, all of the {name}`line`s in {name}`lst` become newlines: ```lean (name := lstp9) #eval IO.println (lst.pretty (width := 9)) ``` ```leanOutput lstp9 ( 1 2 3 4 5 ) ``` This document contains three copies of {name}`lst` in a further parenthesized sequence: ```lean def lsts := parenSeq [lst, lst, lst] ``` At the default width, it remains on one line: ```lean (name := lstsp) #eval IO.println lsts.pretty ``` ```leanOutput lstsp ( ( 1 2 3 4 5 ) ( 1 2 3 4 5 ) ( 1 2 3 4 5 ) ) ``` If only 20 characters are available, each occurrence of {name}`lst` ends up on its own line. This is because converting the outer {name}`group` to newlines is sufficient to keep the string within 20 columns: ```lean (name := lstsp20) #eval IO.println (lsts.pretty (width := 20)) ``` ```leanOutput lstsp20 ( ( 1 2 3 4 5 ) ( 1 2 3 4 5 ) ( 1 2 3 4 5 ) ) ``` If only 10 characters are available, each number must be on its own line: ```lean (name := lstsp10) #eval IO.println (lsts.pretty (width := 10)) ``` ```leanOutput lstsp10 ( ( 1 2 3 4 5 ) ( 1 2 3 4 5 ) ( 1 2 3 4 5 ) ) ``` ::: :::example "Grouping and Filling" ```lean open Std Format ``` The helper {name}`parenSeq` creates a parenthesized sequence, with each element placed on a new line and indented: ```lean def parenSeq (xs : List Format) : Format := nest 2 (text "(" ++ line ++ joinSep xs line) ++ line ++ ")" ``` {name}`nums` contains the numbers one through twenty, as a list of formats: ```lean def nums : List Format := Nat.fold 20 (init := []) fun i _ ys => text s!"{20 - i}" :: ys ``` ```lean (name := nums) #eval nums ``` Because {name}`parenSeq` does not introduce any groups, the resulting document is rendered on a single line: ```lean #eval IO.println (pretty (parenSeq nums)) ``` This can be fixed by grouping them. {name}`grouped` does so with {name}`group`, while {name}`filled` does so with {name}`fill`. ```lean def grouped := group (parenSeq nums) def filled := fill (parenSeq nums) ``` Both grouping operators cause uses of {name}`line` to render as spaces. Given sufficient space, both render on a single line: ```lean (name := groupedp) #eval IO.println (pretty grouped) ``` ```leanOutput groupedp ( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ) ``` ```lean (name := filledp) #eval IO.println (pretty filled) ``` ```leanOutput filledp ( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ) ``` However, difference become apparent when there is not sufficient space on a single line. Unless _all_ newlines in a {name}`group` can be spaces, none can: ```lean (name := groupedp30) #eval IO.println (pretty (width := 30) grouped) ``` ```leanOutput groupedp30 ( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ) ``` Using {name}`fill`, on the other hand, only inserts newlines as required to avoid being two wide: ```lean (name := filledp30) #eval IO.println (pretty (width := 30) filled) ``` ```leanOutput filledp30 ( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ) ``` The behavior of {name}`fill` can be seen clearly with longer sequences: ```lean (name := filledbigp30) #eval IO.println <| pretty (width := 30) (fill (parenSeq (nums ++ nums ++ nums ++ nums))) ``` ```leanOutput filledbigp30 ( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ) ``` ::: ::::example "Newline Characters in Strings" Including a newline character in a string causes the rendering process to unconditionally insert a newline. These newlines do, however, respect the current indentation level. The document {name}`str` consists of an embedded string with two newlines: ```lean open Std Format def str : Format := text "abc\nxyz\n123" ``` :::paragraph Printing the string both with and without grouping results in the newlines being used: ```lean (name := str1) #eval IO.println str.pretty ``` ```leanOutput str1 abc xyz 123 ``` ```lean (name := str2) #eval IO.println (group str).pretty ``` ```leanOutput str2 abc xyz 123 ``` ::: :::paragraph Because the string does not terminate with a newline, the last line of the first string is on the same line as the first line of the second string: ```lean (name := str3) #eval IO.println (str ++ str).pretty ``` ```leanOutput str3 abc xyz 123abc xyz 123 ``` ::: :::paragraph Increasing the indentation level, however, causes all three lines of the string to begin at the same column: ```lean (name := str4) #eval IO.println (text "It is:" ++ indentD str).pretty ``` ```leanOutput str4 It is: abc xyz 123 ``` ```lean (name := str5) #eval IO.println (nest 8 <| text "It is:" ++ align true ++ str).pretty ``` ```leanOutput str5 It is: abc xyz 123 ``` ::: :::: ## Documents %%% tag := "format-api" %%% {docstring Std.Format} {docstring Std.Format.FlattenBehavior} {docstring Std.Format.fill} ## Empty Documents %%% tag := "format-empty" %%% :::paragraph The empty string does not have a single unique representative in {name}`Std.Format`. All of the following represent the empty string: * {lean (type := "Std.Format")}`.nil` * {lean (type := "Std.Format")}`.text ""` * {lean (type := "Std.Format")}`.text "" ++ .nil` * {lean (type := "Std.Format")}`.nil ++ .text ""` Use {name}`Std.Format.isEmpty` to check whether a document contains zero characters, and {name}`Std.Format.isNil` to specifically check whether it is the constructor {lean}`Std.Format.nil`. ::: {docstring Std.Format.isEmpty} {docstring Std.Format.isNil} ## Sequences %%% tag := "format-join" %%% The operators in this section are useful when there is some kind of repeated content, such as the elements of a list. This is typically done by including {name Std.Format.line}`line` in their separator parameters, using a {ref "format-brackets"}[bracketing operator] {docstring Std.Format.join} {docstring Std.Format.joinSep} {docstring Std.Format.prefixJoin} {docstring Std.Format.joinSuffix} ## Indentation %%% tag := "format-indent" %%% These operators make it easier to achieve a consistent indentation style on top of {name}`Std.Format.nest`. {docstring Std.Format.nestD} {docstring Std.Format.defIndent} {docstring Std.Format.indentD} ## Brackets and Parentheses %%% tag := "format-brackets" %%% These operators make it easier to achieve a consistent parenthesization style. {docstring Std.Format.bracket} {docstring Std.Format.sbracket} {docstring Std.Format.paren} {docstring Std.Format.bracketFill} ## Rendering %%% tag := "format-render" %%% The {inst}`ToString Std.Format` instance invokes {name}`Std.Format.pretty` with its default arguments. There are two ways to render a document: * Use {name Std.Format.pretty}`pretty` to construct a {name}`String`. The entire string must be constructed up front before any can be sent to a user. * Use {name Std.Format.prettyM}`prettyM` to incrementally emit the {name}`String`, using effects in some {name}`Monad`. As soon as each line is rendered, it is emitted. This is suitable for streaming output. {docstring Std.Format.pretty} {docstring Std.Format.defWidth} {docstring Std.Format.prettyM} {docstring Std.Format.MonadPrettyFormat} ## The `ToFormat` Class The {name}`Std.ToFormat` class is used to provide a standard means to format a value, with no expectation that this formatting be valid Lean syntax. These instances are used in error messages and by some of the {ref "format-join"}[sequence concatenation operators]. {docstring Std.ToFormat} # `Repr` %%% tag := "repr" %%% A {name}`Repr` instance describes how to represent a value as a {name}`Std.Format`. Because they should emit valid Lean syntax, these instances need to take {tech}[precedence] into account. Inserting the maximal number of parentheses would work, but it makes it more difficult for humans to read the resulting output. {docstring Repr} {docstring repr} {docstring reprStr} :::example "Maximal Parentheses" The type {name}`NatOrInt` can contain a {name}`Nat` or an {name}`Int`: ```lean inductive NatOrInt where | nat : Nat → NatOrInt | int : Int → NatOrInt ``` This {inst}`Repr NatOrInt` instance ensures that the output is valid Lean syntax by inserting many parentheses: ```lean instance : Repr NatOrInt where reprPrec x _ := .nestD <| .group <| match x with | .nat n => .text "(" ++ "NatOrInt.nat" ++ .line ++ "(" ++ repr n ++ "))" | .int i => .text "(" ++ "NatOrInt.int" ++ .line ++ "(" ++ repr i ++ "))" ``` Whether it contains a {name}`Nat`, a non-negative {name}`Int`, or a negative {name}`Int`, the result can be parsed: ```lean (name := parens) open NatOrInt in #eval do IO.println <| repr <| nat 3 IO.println <| repr <| int 5 IO.println <| repr <| int (-5) ``` ```leanOutput parens (NatOrInt.nat (3)) (NatOrInt.int (5)) (NatOrInt.int (-5)) ``` However, {lean}`(NatOrInt.nat (3))` is not particularly idiomatic Lean, and redundant parentheses can make it difficult to read large expressions. ::: The method {name}`Repr.reprPrec` has the following signature: ```signature Repr.reprPrec.{u} {α : Type u} [Repr α] : α → Nat → Std.Format ``` The first explicit parameter is the value to be represented, while the second is the {tech}[precedence] of the context in which it occurs. This precedence can be used to decide whether to insert parentheses: if the precedence of the syntax being produced by the instance is greater than that of its context, parentheses are necessary. ## How To Write a `Repr` Instance %%% tag := "repr-instance-howto" %%% Lean can produce an appropriate {name}`Repr` instance for most types automatically using {ref "deriving-instances"}[instance deriving]. In some cases, however, it's necessary to write an instance by hand: * Some libraries provide functions as the primary instance to a type, rather than its constructors; in these cases, the {name}`Repr` instance should represent a call to these functions. For example, {name}`Std.HashSet.ofList` is used in the {inst}`Repr (HashSet α)` instance. * Some inductive types include well-formedness proofs. Because programs can't inspect proofs, they cannot be rendered directly. This is a common reason why a type would have an interface other than its constructors. * Types with special syntax, such as {name}`List`, should use this syntax in their {name}`Repr` instances. * The derived {name}`Repr` instance for structures uses {tech}[structure instance] notation. A hand-written instance can use the constructor's name explicitly or use {tech}[anonymous constructor syntax]. ```lean -show -keep /-- info: Std.HashSet.ofList [0, 3, 5] -/ #check_msgs in #eval IO.println <| repr (({} : Std.HashSet Nat).insert 3 |>.insert 5 |>.insert 0) ``` ```lean -show -keep structure S where x : Nat y : Nat deriving Repr /-- info: { x := 2, y := 3 } -/ #check_msgs in #eval IO.println <| repr <| S.mk 2 3 ``` When writing a custom {name}`Repr` instance, please follow these conventions: : Precedence Check precedence, adding parentheses as needed, and pass the correct precedence to the {name}`reprPrec` instances of embedded data. Each instance is responsible for surrounding itself in parentheses if needed; instances should generally not parenthesize recursive calls to {name}`reprPrec`. Function application has the maximum precedence, {lean}`max_prec`. The helpers {name}`Repr.addAppParen` and {name}`reprArg` respectively insert parentheses around applications when needed and pass the appropriate precedence to function arguments. : Fully-Qualified Names A {name}`Repr` instance does have access to the set of open namespaces in a given position. All names of constants in the environment should be fully qualified to remove ambiguity. : Default Nesting Nested data should be indented using {name Std.Format.nestD}`nestD` to ensure consistent indentation across instances. : Grouping and Line Breaks The output of every {name}`Repr` instance that includes line breaks should be surrounded in a {name Std.Format.group}`group`. Furthermore, if the resulting code contains notional expressions that are nested, a {name Std.Format.group}`group` should be inserted around each nested level. Line breaks should usually be inserted in the following positions: * Between a constructor and each of its arguments * After `:=` * After `,` * Between the opening and closing braces of {tech}[structure instance] notation and its contents * After, but not before, an infix operator : Parentheses and Brackets Parentheses and brackets should be inserted using {name}`Std.Format.bracket` or its specializations {name}`Std.Format.paren` for parentheses and {name}`Std.Format.sbracket` for square brackets. These operators align the contents of the parenthesized or bracketed expression in the same way that Lean's do. Trailing parentheses and brackets should not be placed on their own line, but rather stay with their contents. {docstring Repr.addAppParen} {docstring reprArg} :::example "Inductive Types with Constructors" The inductive type {name}`N.NatOrInt` can contain a {name}`Nat` or an {name}`Int`: ```lean namespace N inductive NatOrInt where | nat : Nat → NatOrInt | int : Int → NatOrInt ``` The {inst}`Repr NatOrInt` instance adheres to the conventions: * The right-hand side is a function application, so it uses {name}`Repr.addAppParen` to add parentheses if necessary. * Parentheses are wrapped around the entire body with no additional {name Std.Format.line}`line`s. * The entire function application is grouped, and it is nested the default amount. * The function is separated from its parameters by a use of {name Std.Format.line}`line`; this newline will usually be a space because the {inst}`Repr Nat` and {inst}`Repr Int` instances are unlikely to produce long output. * Recursive calls to {name}`reprPrec` pass {lean}`max_prec` because they are in function parameter positions, and function application has the highest precedence. ```lean instance : Repr NatOrInt where reprPrec | .nat n => Repr.addAppParen <| .group <| .nestD <| "N.NatOrInt.nat" ++ .line ++ reprPrec n max_prec | .int i => Repr.addAppParen <| .group <| .nestD <| "N.NatOrInt.int" ++ .line ++ reprPrec i max_prec ``` ```lean (name := nat5) #eval IO.println (repr (NatOrInt.nat 5)) ``` ```leanOutput nat5 N.NatOrInt.nat 5 ``` ```lean (name := int5) #eval IO.println (repr (NatOrInt.int 5)) ``` ```leanOutput int5 N.NatOrInt.int 5 ``` ```lean (name := intm5) #eval IO.println (repr (NatOrInt.int (-5))) ``` ```leanOutput intm5 N.NatOrInt.int (-5) ``` ```lean (name := someintm5) #eval IO.println (repr (some (NatOrInt.int (-5)))) ``` ```leanOutput someintm5 some (N.NatOrInt.int (-5)) ``` ```lean (name := lstnat) #eval IO.println (repr <| (List.range 10).map (NatOrInt.nat)) ``` ```leanOutput lstnat [N.NatOrInt.nat 0, N.NatOrInt.nat 1, N.NatOrInt.nat 2, N.NatOrInt.nat 3, N.NatOrInt.nat 4, N.NatOrInt.nat 5, N.NatOrInt.nat 6, N.NatOrInt.nat 7, N.NatOrInt.nat 8, N.NatOrInt.nat 9] ``` ```lean (name := lstnat3) #eval IO.println <| Std.Format.pretty (width := 3) <| repr <| (List.range 10).map NatOrInt.nat ``` ```leanOutput lstnat3 [N.NatOrInt.nat 0, N.NatOrInt.nat 1, N.NatOrInt.nat 2, N.NatOrInt.nat 3, N.NatOrInt.nat 4, N.NatOrInt.nat 5, N.NatOrInt.nat 6, N.NatOrInt.nat 7, N.NatOrInt.nat 8, N.NatOrInt.nat 9] ``` ::: :::example "Infix Syntax" This example demonstrates the use of precedences to encode a left-associative pretty printer. The type {lean}`AddExpr` represents expressions with constants and addition: ```lean inductive AddExpr where | nat : Nat → AddExpr | add : AddExpr → AddExpr → AddExpr ``` The {name}`OfNat` and {name}`Add` instances provide a more convenient syntax for {name}`AddExpr`: ```lean instance : OfNat AddExpr n where ofNat := .nat n instance : Add AddExpr where add := .add ``` The {inst}`Repr AddExpr` instance should insert only the necessary parentheses. Lean's addition operator is left-associative, with precedence 65, so the recursive call to the left uses precedence 64 and the operator itself is parenthesized if the current context has precedence greater than or equal to 65: ```lean protected def AddExpr.reprPrec : AddExpr → Nat → Std.Format | .nat n, p => Repr.reprPrec n p | .add e1 e2, p => let out : Std.Format := .nestD <| .group <| AddExpr.reprPrec e1 64 ++ " " ++ "+" ++ .line ++ AddExpr.reprPrec e2 65 if p ≥ 65 then out.paren else out instance : Repr AddExpr := ⟨AddExpr.reprPrec⟩ ``` ```lean -show -keep -- Test that the guidelines provided for infix operators match Lean's own pretty printer /-- info: 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 : Nat -/ #check_msgs in #check 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 /-- info: 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 -/ #check_msgs in #eval (1 : AddExpr) + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 ``` Regardless of the input's parenthesization, this instance inserts only the necessary parentheses: ```lean (name := prec1) #eval IO.println (repr (((2 + 3) + 4) : AddExpr)) ``` ```leanOutput prec1 2 + 3 + 4 ``` ```lean (name:=prec2) #eval IO.println (repr ((2 + 3 + 4) : AddExpr)) ``` ```leanOutput prec2 2 + 3 + 4 ``` ```lean (name:=prec3) #eval IO.println (repr ((2 + (3 + 4)) : AddExpr)) ``` ```leanOutput prec3 2 + (3 + 4) ``` ```lean (name:=prec4) #eval IO.println (repr ([2 + (3 + 4), (2 + 3) + 4] : List AddExpr)) ``` ```leanOutput prec4 [2 + (3 + 4), 2 + 3 + 4] ``` The uses of {name Std.Format.group}`group`, {name Std.Format.nestD}`nestD`, and {name Std.Format.line}`line` in the implementation lead to the expected newlines and indentation in a narrow context: ```lean (name:=prec5) #eval ([2 + (3 + 4), (2 + 3) + 4] : List AddExpr) |> repr |>.pretty (width := 0) |> IO.println ``` ```leanOutput prec5 [2 + (3 + 4), 2 + 3 + 4] ``` ::: ## Atomic Types %%% tag := "ReprAtom" %%% When the elements of a list are sufficiently small, it can be both difficult to read and wasteful of space to render the list with one element per line. To improve readability, {name}`List` has two {name}`Repr` instances: one that uses {name}`Std.Format.bracket` for its contents, and one that uses {name}`Std.Format.bracketFill`. The latter is defined after the former and is thus selected when possible; however, it requires an instance of the empty type class {name}`ReprAtom`. If the {name}`Repr` instance for a type never generates spaces or newlines, then it should have a {name}`ReprAtom` instance. Lean has {name}`ReprAtom` instances for types such as {name}`String`, {name}`UInt8`, {name}`Nat`, {name}`Char`, and {name}`Bool`. ```lean -show open Lean Elab Command in #eval show CommandElabM Unit from for x in [``String, ``UInt8, ``Nat, ``Char, ``Bool] do runTermElabM fun _ => do discard <| Meta.synthInstance (.app (.const ``ReprAtom [0]) (.const x [])) Term.synthesizeSyntheticMVarsNoPostponing ``` {docstring ReprAtom} ::::example "Atomic Types and `Repr`" All constructors of the inductive type {name}`ABC` are without parameters: ```lean inductive ABC where | a | b | c deriving Repr ``` The derived {inst}`Repr ABC` instance is used to display lists: ```lean (name := abc1) def abc : List ABC := [.a, .b, .c] def abcs : List ABC := abc ++ abc ++ abc #eval IO.println ((repr abcs).pretty (width := 14)) ``` Because of the narrow width, line breaks are inserted: ```leanOutput abc1 [ABC.a, ABC.b, ABC.c, ABC.a, ABC.b, ABC.c, ABC.a, ABC.b, ABC.c] ``` :::paragraph However, converting the list to a {lean}`List Nat` leads to a differently-formatted result. ```lean (name := abc2) def ABC.toNat : ABC → Nat | .a => 0 | .b => 1 | .c => 2 #eval IO.print ((repr (abcs.map ABC.toNat)).pretty (width := 14)) ``` There are far fewer line breaks: ```leanOutput abc2 [0, 1, 2, 0, 1, 2, 0, 1, 2] ``` ::: This is because of the existence of a {inst}`ReprAtom Nat` instance. Adding one for {name}`ABC` leads to similar behavior: ```lean (name := abc3) instance : ReprAtom ABC := ⟨⟩ #eval IO.println ((repr abcs).pretty (width := 14)) ``` ```leanOutput abc3 [ABC.a, ABC.b, ABC.c, ABC.a, ABC.b, ABC.c, ABC.a, ABC.b, ABC.c] ``` ::::
reference-manual/Manual/Grind/EMatching.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "E‑matching" => %%% tag := "e-matching" %%% {deftech}_E-matching_ is a procedure for efficiently instantiating quantified theorem statements with ground terms. It is widely employed in SMT solvers, and {tactic}`grind` uses it to instantiate theorems efficiently. It is especially effective when combined with {tech}[congruence closure], enabling {tactic}`grind` to discover non-obvious consequences of equalities and annotated theorems automatically. E-matching adds new facts to the metaphorical whiteboard, based on an index of theorems. When the whiteboard contains terms that match the index, the E-matching engine instantiates the corresponding theorems, and the resulting terms can feed further rounds of {tech}[congruence closure], {tech}[constraint propagation], and theory-specific solvers. Each fact added to the whiteboard by E-matching is referred to as an {deftech (key := "e-matching instance")}_instance_. Annotating theorems for E-matching, thus adding them to the index, is essential for enabling {tactic}`grind` to make effective use of a library. In addition to user-specified theorems, {tactic}`grind` uses automatically generated equations for {keywordOf Lean.Parser.Term.match}`match`-expressions as E-matching theorems. Behind the scenes, the {tech (key := "Lean elaborator")}[elaborator] generates auxiliary functions that implement pattern matches, along with equational theorems that specify their behavior. Using these equations with E-matching enables {tactic}`grind` to reduce these instances of pattern matching. # Patterns %%% tag := "e-matching-patterns" %%% The E-matching index is a table of _patterns_. When a term matches one of the patterns in the table, {tactic}`grind` attempts to instantiate and apply the corresponding theorem, giving rise to further facts and equalities. Selecting appropriate patterns is an important part of using {tactic}`grind` effectively: if the patterns are too restrictive, then useful theorems may not be applied; if they are too general, performance may suffer. ::::example "E-matching Patterns" Consider the following functions and theorems: ```lean def f (a : Nat) : Nat := a + 1 def g (a : Nat) : Nat := a - 1 @[grind =] theorem gf (x : Nat) : g (f x) = x := by simp [f, g] ``` ```lean -show variable {x a b : Nat} ``` The theorem {lean}`gf` asserts that {lean}`g (f x) = x` for all natural numbers {lean}`x`. The attribute {attr}`grind =` instructs {tactic}`grind` to use the left-hand side of the equation, {lean}`g (f x)`, as a pattern for heuristic instantiation via E-matching. This proof goal does not include an instance of {lean}`g (f x)`, but {tactic}`grind` is nonetheless able to solve it: ```lean example {a b} (h : f b = a) : g a = b := by grind ``` Although {lean}`g a` is not an instance of the pattern {lean}`g (f x)`, it becomes one modulo the equation {lean}`f b = a`. By substituting {lean}`a` with {lean}`f b` in {lean}`g a`, we obtain the term {lean}`g (f b)`, which matches the pattern {lean}`g (f x)` with the assignment `x := b`. Thus, the theorem {lean}`gf` is instantiated with `x := b`, and the new equality {lean}`g (f b) = b` is asserted. {tactic}`grind` then uses congruence closure to derive the implied equality {lean}`g a = g (f b)` and completes the proof. :::: The {keywordOf Lean.Parser.Command.grind_pattern}`grind_pattern` command can be used to manually select an E-matching pattern for a theorem. Enabling the option {option}`trace.grind.ematch.instance` causes {tactic}`grind` print a trace message for each theorem instance it generates, which can be helpful when determining E-matching patterns. :::syntax command (title := "E-matching Pattern Selection") ```grammar grind_pattern $_ => $_,* ``` Associates a theorem with one or more patterns. When multiple patterns are provided in a single {keywordOf Lean.Parser.Command.grind_pattern}`grind_pattern` command, _all_ of them must match a term before {tactic}`grind` will attempt to instantiate the theorem. ```grammar grind_pattern $_ => $_,* where $_ ``` The optional {keywordOf Lean.Parser.Command.grind_pattern}`where` clause specifies constraints that must be satisfied before {tactic}`grind` attempts to instantiate the theorem. Each constraint has the form `variable =/= value`, preventing instantiation when the pattern variable would be assigned the specified value. This is useful to avoid unbounded or excessive instantiations with problematic terms. ::: ::::example "Selecting Patterns" The {attr}`grind =` attribute uses the left side of the equality as the E-matching pattern for {lean}`gf`: ```lean def f (a : Nat) : Nat := a + 1 def g (a : Nat) : Nat := a - 1 @[grind =] theorem gf (x : Nat) : g (f x) = x := by simp [f, g] ``` For example, the pattern `g (f x)` is too restrictive in the following case: the theorem `gf` will not be instantiated because the goal does not even contain the function symbol `g`. In this example, {tactic}`grind` fails because the pattern is too restrictive: the goal does not contain the function symbol {lean}`g`. ```lean +error (name := restrictivePattern) example (h₁ : f b = a) (h₂ : f c = a) : b = c := by grind ``` ```leanOutput restrictivePattern (expandTrace := eqc) `grind` failed case grind b a c : Nat h₁ : f b = a h₂ : f c = a h : ¬b = c ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] False propositions [prop] b = c [eqc] Equivalence classes [eqc] {a, f b, f c} ``` Using just `f x` as the pattern allows {tactic}`grind` to solve the goal automatically: ```lean grind_pattern gf => f x example {a b c} (h₁ : f b = a) (h₂ : f c = a) : b = c := by grind ``` Enabling {option}`trace.grind.ematch.instance` makes it possible to see the equalities found by E-matching: ```lean (name := ematchInstanceTrace) example (h₁ : f b = a) (h₂ : f c = a) : b = c := by set_option trace.grind.ematch.instance true in grind ``` ```leanOutput ematchInstanceTrace [grind.ematch.instance] gf: g (f c) = c [grind.ematch.instance] gf: g (f b) = b ``` After E-matching, the proof succeeds because congruence closure equates `g (f c)` with `g (f b)`, because both `f b` and `f c` are equal to `a`. Thus, `b` and `c` must be in the same equivalence class. :::: When multiple patterns are specified together, all of them must match in the current context before {tactic}`grind` attempts to instantiate the theorem. This is referred to as a {deftech}_multi-pattern_. This is useful for lemmas such as transitivity rules, where multiple premises must be simultaneously present for the rule to apply. A single theorem may be associated with multiple separate patterns by using multiple invocations of {keywordOf Lean.Parser.Command.grind_pattern}`grind_pattern` or the {attrs}`@[grind _=_]` attribute. If _any_ of these separate patterns match, the theorem will be instantiated. ::::example "Multi-Patterns" {lean}`R` is a transitive binary relation over {lean}`Int`: ```lean opaque R : Int → Int → Prop axiom Rtrans {x y z : Int} : R x y → R y z → R x z ``` To use the fact that {lean}`R` is transitive, {tactic}`grind` must already be able to satisfy both premises. This is represented using a {tech}[multi-pattern]: ```lean grind_pattern Rtrans => R x y, R y z example {a b c d} : R a b → R b c → R c d → R a d := by grind ``` ```lean -show variable {x y z a b c d : Int} ``` The multi-pattern `R x y, R y z` instructs {tactic}`grind` to instantiate {lean}`Rtrans` only when both {lean}`R x y` and {lean}`R y z` are available in the context. In the example, {tactic}`grind` applies {lean}`Rtrans` to derive {lean}`R a c` from {lean}`R a b` and {lean}`R b c`, and can then repeat the same reasoning to deduce {lean}`R a d` from {lean}`R a c` and {lean}`R c d`. :::: ::::example "Pattern Constraints" Certain combinations of theorems can lead to unbounded instantiation, where E-matching repeatedly generates longer and longer terms. Consider theorems about {name}`List.flatMap` and {name}`List.reverse`. If {name}`List.flatMap_def`, {name}`List.flatMap_reverse`, and {name}`List.reverse_flatMap` are all annotated with {attrs}`@[grind =]`, then as soon as {name}`List.flatMap_reverse` is instantiated, the following chain of instantiations occurs, creating progressively longer function compositions with {name}`List.reverse`. This can be observed using the `#grind_lint` command: ``` attribute [local grind =] List.reverse_flatMap set_option trace.grind.ematch.instance true in #grind_lint inspect List.flatMap_reverse ``` The trace output shows the unbounded instantiation: ``` [grind.ematch.instance] List.flatMap_def: List.flatMap (List.reverse ∘ f) l = (List.map (List.reverse ∘ f) l).flatten [grind.ematch.instance] List.flatMap_def: List.flatMap f l.reverse = (List.map f l.reverse).flatten [grind.ematch.instance] List.flatMap_reverse: List.flatMap f l.reverse = (List.flatMap (List.reverse ∘ f) l).reverse [grind.ematch.instance] List.reverse_flatMap: (List.flatMap (List.reverse ∘ f) l).reverse = List.flatMap (List.reverse ∘ List.reverse ∘ f) l.reverse [grind.ematch.instance] List.flatMap_def: List.flatMap (List.reverse ∘ List.reverse ∘ f) l.reverse = (List.map (List.reverse ∘ List.reverse ∘ f) l.reverse).flatten ``` This pattern continues indefinitely, with each iteration adding another {name}`List.reverse` to the composition. The {keywordOf Lean.Parser.Command.grind_pattern}`where` clause prevents this by excluding problematic instantiations: ``` grind_pattern reverse_flatMap => (l.flatMap f).reverse where f =/= List.reverse ∘ _ ``` This instructs {tactic}`grind` to use the pattern `(l.flatMap f).reverse`, but only when `f` is not a composition with {name}`List.reverse`, preventing the unbounded chain of instantiations. You can use `#grind_lint check` to look for problematic patterns, or `#grind_lint check in List` or `#grind_lint check in module Std.Data` to look in specific namespaces or modules. :::: The {attr}`grind` attribute automatically generates an E-matching pattern or multi-pattern using a heuristic, instead of using {keywordOf Lean.Parser.Command.grindPattern}`grind_pattern` to explicitly specify a pattern. It includes a number of variants that select different heuristics. The {attr}`grind?` attribute displays an info message showing the pattern which was selected—this is very helpful for debugging! Patterns are subexpressions of theorem statements. A subexpression is {deftech}_indexable_ if it has an indexable constant as its head, and it is said to {deftech}_cover_ one of the theorem's arguments if it fixes the argument's value. Indexable constants are all constants other than {name}`Eq`, {name}`HEq`, {name}`Iff`, {name}`And`, {name}`Or`, and {name}`Not`. The set of arguments that are covered by a pattern or multi-pattern is referred to as its {deftech}_coverage_. Some constants are lower priority than others; in particular, the arithmetic operators {name}`HAdd.hAdd`, {name}`HSub.hSub`, {name}`HMul.hMul`, {name}`Dvd.dvd`, {name}`HDiv.hDiv`, and {name}`HMod.hMod` have low priority. An indexable subexpression is {deftech}_minimal_ if there is no smaller indexable subexpression whose head constant has at least as high priority. :::syntax attr (title := "Grind Patterns") When the {attr}`grind` attribute is added to a definition, it causes `grind` to unfold that definition to its body whenever it is encountered. When using the module system, if the body of the definition is not visible (e.g. via {attrs}`@[expose]`), then the {attr}`grind` attribute is ignored. ```grammar grind $[$_:grindMod]? ``` The {attr}`grind` attribute automatically generates an E-matching pattern for a theorem, using a strategy determined by the provided modifier. If no modifier is provided, then {attr}`grind` suggests suitable modifiers, displaying the resulting patterns. ```grammar grind! $[$_:grindMod]? ``` The {attr}`grind!` attribute automatically generates an E-matching pattern for a theorem, using a strategy determined by the provided modifier. It additionally enforces the condition that the selected pattern(s) should be minimal indexable subexpressions. ```grammar grind? $[$_:grindMod]? ``` The {attr}`grind?` displays the pattern that was generated. ```grammar grind!? $[$_:grindMod]? ``` The {attr}`grind!?` attribute is equivalent to {attr}`grind!`, except it displays the resulting pattern for inspection. Without any modifier, {attrs}`@[grind]` traverses the conclusion and then the hypotheses from left to right, adding patterns as they increase the coverage, stopping when all arguments are covered. This default strategy can be explicitly requested using the {keywordOf Lean.Parser.Attr.grindDef}`.` modifier. In addition to using the default strategy, the attribute checks which other strategies could be applied, and displays all of the resulting patterns. ::: ```lean -keep -show -- This test will start failing if new grind modifiers are added. It's to make sure they're all -- documented (or at least that a decision has been made to _not_ document one of them). open Lean Parser Attr open Lean Elab Command deriving instance Repr for ParserDescr def getName : ParserDescr → CommandElabM String | .nodeWithAntiquot name .. => pure name | other => throwError m!"Expected a {.ofConstName ``nodeWithAntiquot}, got {repr other}" def getOrElse (descr : ParserDescr) : CommandElabM (Array ParserDescr) := do match descr with | .binary `orelse x y => return (← getOrElse x) ++ (← getOrElse y) | other => return #[other] def getGrindAlts (descr : ParserDescr) : CommandElabM (Array String) := do if let .nodeWithAntiquot "grindMod" ``grindMod d' := descr then let cases ← getOrElse d' return (← cases.mapM getName).qsort else throwError "Expected a {.ofConstName ``nodeWithAntiquot}, got {repr descr}" /-- info: `grindMod` can be these: grindBwd grindCases grindCasesEager grindDef grindEq grindEqBoth grindEqBwd grindEqRhs grindExt grindFunCC grindFwd grindGen grindInj grindIntro grindLR grindNorm grindRL grindSym grindUnfold grindUsr -/ #guard_msgs in #eval show CommandElabM Unit from do let allMods ← getGrindAlts grindMod IO.println "`grindMod` can be these:" for gmod in allMods do IO.println gmod ``` :::syntax Lean.Parser.Attr.grindMod (title := "Default Pattern") ```grammar . ``` ```grammar · ``` {includeDocstring Lean.Parser.Attr.grindDef} ::: :::syntax Lean.Parser.Attr.grindMod (title := "Equality Rewrites") ```grammar = ``` {includeDocstring Lean.Parser.Attr.grindEq} ::: :::syntax Lean.Parser.Attr.grindMod (title := "Backward Equality Rewrites") ```grammar =_ ``` {includeDocstring Lean.Parser.Attr.grindEqRhs} ::: :::syntax Lean.Parser.Attr.grindMod (title := "Bidirectional Equality Rewrites") ```grammar _=_ ``` {includeDocstring Lean.Parser.Attr.grindEqBoth} ::: :::syntax Lean.Parser.Attr.grindMod (title := "Forward Reasoning") ```grammar → ``` {includeDocstring Lean.Parser.Attr.grindFwd} ::: :::syntax Lean.Parser.Attr.grindMod (title := "Backward Reasoning") ```grammar ← ``` {includeDocstring Lean.Parser.Attr.grindBwd} ::: It is important to inspect the patterns generated by the {attrs}`@[grind]` attribute to ensure that they match the correct parts of the lemma. If the pattern is too strict, the lemma will not be applied in situations where it would be relevant, leading to less automation. If it is too general, then performance will suffer as the lemma is tried in many situations where it is not helpful. There are also three less commonly used modifiers for lemmas: :::syntax Lean.Parser.Attr.grindMod (title := "Left-to-Right Traversal") ```grammar => ``` ```grammar ⇒ ``` {includeDocstring Lean.Parser.Attr.grindLR} ::: :::syntax Lean.Parser.Attr.grindMod (title := "Right-to-Left Traversal") ```grammar <= ``` ```grammar ⇐ ``` {includeDocstring Lean.Parser.Attr.grindRL} ::: :::syntax Lean.Parser.Attr.grindMod (title := "Backward Reasoning on Equality") ```grammar ←= ``` {includeDocstring Lean.Parser.Attr.grindEqBwd} ::: :::example "The `@[grind ←=]` Attribute" ```lean -show variable {α} {a b : α} [Inv α] ``` When attempting to prove that {lean}`a⁻¹ = b`, {tactic}`grind` uses {name}`inv_eq` due to the {attrs}`@[grind ←=]` annotation. ```lean @[grind ←=] theorem inv_eq [One α] [Mul α] [Inv α] {a b : α} (w : a * b = 1) : a⁻¹ = b := sorry ``` ::: :::syntax Lean.Parser.Attr.grindMod (title := "Function-Valued Congruence Closure") ```grammar funCC ``` {includeDocstring Lean.Parser.Attr.grindFunCC} ::: Some additional modifiers can be used to add other kinds of lemmas to the index. This includes extensionality theorems, injectivity theorems for functions, and a shortcut to add all constructors of an inductively defined predicate to the index. :::syntax Lean.Parser.Attr.grindMod (title := "Extensionality") ```grammar ext ``` {includeDocstring Lean.Parser.Attr.grindExt} In addition, adding {attrs}`@[grind ext]` to a structure registers a its extensionality theorem. ::: ::::example "The `@[grind ext]` Attribute" {lean}`Point` is a structure with two fields: ```lean structure Point where x : Int y : Int ``` By default, {tactic}`grind` can solve goals like this one, because definitional equality includes {tech (key := "η-equivalence")}[η-equivalence] for product types: ```lean example (p : Point) : p = ⟨p.x, p.y⟩ := by grind ``` However, it can't solve goals like this one that require an appeal to propositional equalities: ```lean +error (name := noExt) example (p : Point) (a : Int) : a = p.x → p = ⟨a, p.y⟩ := by grind ``` ```leanOutput noExt `grind` failed case grind p : Point a : Int h : a = p.x h_1 : ¬p = { x := a, y := p.y } ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] False propositions [eqc] Equivalence classes ``` This kind of goal may come up when proving theorems like the fact that swapping the fields of a point twice is the identity: ```lean def Point.swap (p : Point) : Point := ⟨p.y, p.x⟩ ``` ```lean +error (name := noExt') theorem swap_swap_eq_id : Point.swap ∘ Point.swap = id := by unfold Point.swap grind ``` ```leanOutput noExt' `grind` failed case grind h : ¬((fun p => { x := p.y, y := p.x }) ∘ fun p => { x := p.y, y := p.x }) = id w : Point h_1 : ¬{ x := w.x, y := w.y } = id w ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [eqc] False propositions [eqc] Equivalence classes [cases] Case analyses [ematch] E-matching patterns [grind] Diagnostics ``` Adding the {attrs}`@[grind ext]` attribute to {name}`Point` enables {tactic}`grind` to solve both the original example and prove this theorem: ```lean attribute [grind ext] Point example (p : Point) (a : Int) : a = p.x → p = ⟨a, p.y⟩ := by grind theorem swap_swap_eq_id' : Point.swap ∘ Point.swap = id := by unfold Point.swap grind ``` :::: :::syntax Lean.Parser.Attr.grindMod (title := "Injectivity") ```grammar inj ``` {includeDocstring Lean.Parser.Attr.grindInj} ::: :::example "Injectivity Patterns" This function {name}`double` doubles its argument: ```lean def double (x : Nat) : Nat := x + x ``` By default, {tactic}`grind` cannot prove the following theorem: ```lean +error theorem A {n k : Nat} : double (n + 5) = double (k - 3) → n + 8 = k := by grind ``` However, {name}`double` is injective, and this fact can be registered for {tactic}`grind` using the {attr}`grind inj` attribute: ```lean @[grind inj] theorem double_inj : Function.Injective double := by simp only [double, Function.Injective] grind ``` This injectivity lemma suffices to prove the theorem: ```lean theorem B {n k : Nat} : double (n + 5) = double (k - 3) → n + 8 = k := by grind ``` ::: :::syntax Lean.Parser.Attr.grindMod (title := "Constructor Patterns") ```grammar intro ``` {includeDocstring Lean.Parser.Attr.grindIntro} ::: :::example "Patterns for Constructors" The predicate {name}`Decreasing` states that each of the values in a list of integers is less than the one before, and the function {name}`decreasing` checks this property, returning a {name}`Bool`. ```lean inductive Decreasing : List Int → Prop | nil : Decreasing [] | singleton : Decreasing [x] | cons : Decreasing (x :: xs) → y > x → Decreasing (y :: x :: xs) def decreasing : List Int → Bool | [] | [_] => true | y :: x :: xs => y > x && decreasing (x :: xs) ``` The function is correct if it returns {name}`true` exactly when {name}`Decreasing` holds for its argument. Attempting to prove this fact using a combination of {tactic}`fun_induction` and {tactic}`grind` fails immediately, with none of the three cases proven: ```lean +error (name := decreasingCorrect1) def decreasingCorrect : decreasing xs = Decreasing xs := by fun_induction decreasing <;> grind ``` ```leanOutput decreasingCorrect1 `grind` failed case grind h : True = ¬Decreasing [] ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [eqc] False propositions ``` ```leanOutput decreasingCorrect1 `grind` failed case grind head : Int h : True = ¬Decreasing [head] ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [eqc] False propositions ``` ```leanOutput decreasingCorrect1 `grind` failed case grind.1 y x : Int xs : List Int ih1 : (decreasing (x :: xs) = true) = Decreasing (x :: xs) h : (-1 * y + x + 1 ≤ 0 ∧ decreasing (x :: xs) = true) = ¬Decreasing (y :: x :: xs) left : -1 * y + x + 1 ≤ 0 left_1 : decreasing (x :: xs) = true right_1 : ¬Decreasing (y :: x :: xs) ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [eqc] False propositions [eqc] Equivalence classes [cases] Case analyses [cutsat] Assignment satisfying linear constraints ``` Adding the {attr}`grind intro` attribute to {name}`Decreasing` results in E-matching patterns being added for each of the three constructors, after which {tactic}`grind` can prove the first two goals, and requires only a case analysis of a hypothesis to prove the final goal: ```lean attribute [grind intro] Decreasing def decreasingCorrect' : decreasing xs = Decreasing xs := by fun_induction decreasing <;> try grind case case3 y x xs ih => apply propext constructor . grind . intro | .cons hDec hLt => grind ``` Adding {attr}`grind cases` to {name}`Decreasing` enables this case analysis automatically, resulting in a fully automatic proof: ```lean attribute [grind cases] Decreasing def decreasingCorrect'' : decreasing xs = Decreasing xs := by fun_induction decreasing <;> grind ``` ::: :::syntax Lean.Parser.Attr.grindMod (title := "Unfolding During Preprocessing") ```grammar unfold ``` {includeDocstring Lean.Parser.Attr.grindUnfold} ::: :::syntax Lean.Parser.Attr.grindMod (title := "Normalization Rules") ```grammar norm ``` {includeDocstring Lean.Parser.Attr.grindNorm} ::: {TODO}[Document `gen` modifier for `grind` patterns] # Inspecting Patterns The {attr}`grind?` attribute is a version of the {attr}`grind` attribute that additionally displays the generated pattern or {tech}[multi-pattern]. Patterns and multi-patterns are displayed as lists of subexpressions, each of which is a pattern; ordinary patterns are displayed as singleton lists. In these displayed patterns, the names of defined constants are printed as-is. When the theorem's parameters occur in the pattern, they are displayed using numbers rather than names. In particular, they are numbered from right to left, starting at 0; this representation is referred to as {deftech}_de Bruijn indices_. :::example "Inspecting Patterns" (open := true) In order to use this proof that divisibility is transitive with {tactic}`grind`, it requires E-matching patterns: ```lean theorem div_trans {n k j : Nat} : n ∣ k → k ∣ j → n ∣ j := by intro ⟨d₁, p₁⟩ ⟨d₂, p₂⟩ exact ⟨d₁ * d₂, by rw [p₂, p₁, Nat.mul_assoc]⟩ ``` The right attribute to use is {attrs}`@[grind →]`, because there should be a pattern for each premise. Using {attrs}`@[grind? →]`, it is possible to see which patterns are generated: ```lean (name := grindHuh) attribute [grind? →] div_trans ``` There are two: ```leanOutput grindHuh div_trans: [@Dvd.dvd `[Nat] `[Nat.instDvd] #4 #3, @Dvd.dvd `[Nat] `[Nat.instDvd] #3 #2] ``` Arguments are numbered from right to left, so `#0` is the assumption that `k ∣ j`, while `#4` is `n`. Thus, these two patterns correspond to the terms `n ∣ k` and `k ∣ j`. ::: The rules for selecting patterns from subexpressions of the hypotheses and conclusion are subtle. :::TODO more text ::: :::example "Forward Pattern Generation" (open := true) ```lean axiom p : Nat → Nat axiom q : Nat → Nat ``` ```lean (name := h1) @[grind!? →] theorem h₁ (w : 7 = p (q x)) : p (x + 1) = q x := sorry ``` ```leanOutput h1 h₁: [q #1] ``` The pattern is `q x`. Counting from the right, parameter `#0` is the premise `w` and parameter `#1` is the implicit parameter `x`. Why did `@[grind →]`? select `q #1`? The attribute `@[grind →]` finds patterns by traversing the hypotheses (that is, parameters whose types are propositions) from left to right. In this case, there's only a single hypothesis: `7 = p (q x)`. The heuristic described above says that {attr}`grind` will search for a minimal {tech}[indexable] subexpression which {tech}[covers] a previously uncovered parameter. There's just one uncovered parameter, namely `x`. The whole hypothesis `p (q x) = 7` can't be used because {tactic}`grind` will not index on equality. The right-hand side `7` is not helpful, because it doesn't determine the value of `x`. `p (q x)` is not suitable because it is not minimal: it has `q x` inside of it, which is indexable (its head is the constant `q`), and it determines the value of `x`. The expression `q x` itself is minimal, because `x` is not indexable. Thus, `q x` is selected as the pattern. ::: :::example "Backward Pattern Generation" (open := true) ```lean -show axiom p : Nat → Nat axiom q : Nat → Nat ``` In this example, the {keywordOf Lean.Parser.Attr.grindMod}`←` modifier indicates that the pattern should be found in the conclusion: ```lean (name := h2) set_option trace.grind.debug.ematch.pattern true in @[grind? ←] theorem h₂ (w : 7 = p (q x)) : p (x + 1) = q x := sorry ``` The left side of the equality is used because {name}`Eq` is not indexable and {name}`HAdd.hAdd` has lower priority than {lean}`p`. ```leanOutput h2 h₂: [p (#1 + 1)] ``` ::: :::example "Bidirectional Equality Pattern Generation" (open := true) ```lean -show axiom p : Nat → Nat axiom q : Nat → Nat ``` In this example, two separate E-matching patterns are generated from the equality conclusion. One matches the left-hand side, and the other matches the right-hand side. ```lean (name := h3) @[grind? _=_] theorem h₃ (w : 7 = p (q x)) : p (x + 1) = q x := sorry ``` ```leanOutput h3 h₃: [q #1] ``` The entire left side of the equality is used instead of just `x + 1` because {name}`HAdd.hAdd` has lower priority than {lean}`p`. ```leanOutput h3 h₃: [p (#1 + 1)] ``` ::: :::example "Patterns from Conclusion and Hypotheses" (open := true) ```lean -show axiom p : Nat → Nat axiom q : Nat → Nat ``` Without any modifiers, {attrs}`@[grind]` produces a multipattern by first checking the conclusion and then the premises: ```lean (name := h4) @[grind? .] theorem h₄ (w : p x = q y) : p (x + 2) = 7 := sorry ``` Here, argument `x` is `#2`, `y` is `#1`, and `w` is `#0`. The resulting multipattern contains the left-hand side of the equality, which is the only {tech}[minimal] {tech}[indexable] subexpression of the conclusion that covers an argument (namely `x`). It also contains `q y`, which is the only minimal indexable subexpression of the hypothesis `w` that covers an additional argument (namely `y`). ```leanOutput h4 h₄: [p (#2 + 2), q #1] ``` ::: :::example "Failing Backward Pattern Generation" (open := true) ```lean -show axiom p : Nat → Nat axiom q : Nat → Nat ``` In this example, pattern generation fails because the theorem's conclusion doesn't mention the argument `y`. ```lean (name := h5) +error @[grind? ←] theorem h₅ (w : p x = q y) : p (x + 2) = 7 := sorry ``` ```leanOutput h5 `@[grind ←] theorem h₅` failed to find patterns in the theorem's conclusion, consider using different options or the `grind_pattern` command ``` ::: :::example "Left-to-Right Generation" (open := true) ```lean -show axiom p : Nat → Nat axiom q : Nat → Nat ``` In this example, the pattern is generated by traversing the premises from left to right, followed by the conclusion: ```lean (name := h6) @[grind? =>] theorem h₆ (_ : q (y + 2) = q y) (_ : q (y + 1) = q y) : p (x + 2) = 7 := sorry ``` In the patterns, `y` is argument `#3` and `x` is argument `#2`, because {tech}[automatic implicit parameters] are inserted from left to right and `y` occurs before `x` in the theorem statement. The premises are arguments `#1` and `#0`. In the resulting multipattern, `y` is covered by a subexpression of the first premise, and `z` is covered by a subexpression of the conclusion: ```leanOutput h6 h₆: [q (#3 + 2), p (#2 + 2)] ``` ::: # Resource Limits %%% tag := "grind-limits" %%% E-matching can generate an unbounded number of theorem {tech (key := "e-matching instance")}[instances]. For the sake of both efficiency and termination, {tactic}`grind` limits the number of times that E-matching can run using two mechanisms: : Generations Each term is assigned a {deftech}_generation_, and terms produced by E-matching have a generation that is one greater than the maximal generation of all the terms used to instantiate the theorem. E-matching only considers terms whose generation is beneath a configurable threshold. The `gen` option to {tactic}`grind` controls the generation threshold. : Round Limits Each invocation of the E-matching engine is referred to as a {deftech}_round_. Only a limited number of rounds of E-matching are performed. The `ematch` option to {tactic}`grind` controls the round limit. :::example "Too Many Instances" (open := true) E-matching can generate too many theorem {tech (key := "e-matching instance")}[instances]. Some patterns may even generate an unbounded number of instances. In this example, {name}`s_eq` is added to the index with the pattern `s x`: ```lean (name := ematchUnboundedPat) def s (x : Nat) := 0 @[grind? =] theorem s_eq (x : Nat) : s x = s (x + 1) := rfl ``` ```leanOutput ematchUnboundedPat s_eq: [s #0] ``` Attempting to use this theorem results in many facts about {lean}`s` applied to concrete values being generated. In particular, {lean}`s_eq` is instantiated with a new {lean}`Nat` in each of the five rounds. First, {tactic}`grind` instantiates {lean}`s_eq` with `x := 0`, which generates the term {lean}`s 1`. This matches the pattern `s x`, and is thus used to instantiate {lean}`s_eq` with `x := 1`, which generates the term {lean}`s 2`, and so on until the round limit is reached. ```lean +error (name := ematchUnbounded) example : s 0 > 0 := by grind ``` ```leanOutput ematchUnbounded (expandTrace := limits) (expandTrace := ematch) (expandTrace := facts) `grind` failed case grind h : s 0 = 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [prop] s 0 = 0 [prop] s 0 = s 1 [prop] s 1 = s 2 [prop] s 2 = s 3 [prop] s 3 = s 4 [prop] s 4 = s 5 [eqc] Equivalence classes [ematch] E-matching patterns [thm] s_eq: [s #0] [cutsat] Assignment satisfying linear constraints [limits] Thresholds reached [limit] maximum number of E-matching rounds has been reached, threshold: `(ematch := 5)` [grind] Diagnostics ``` Increasing the round limit to 20 causes E-matching to terminate due to the default generation limit of 8: ```lean +error (name := ematchUnbounded2) example : s 0 > 0 := by grind (ematch := 20) ``` ```leanOutput ematchUnbounded2 (expandTrace := limits) (expandTrace := ematch) (expandTrace := facts) `grind` failed case grind h : s 0 = 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [prop] s 0 = 0 [prop] s 0 = s 1 [prop] s 1 = s 2 [prop] s 2 = s 3 [prop] s 3 = s 4 [prop] s 4 = s 5 [prop] s 5 = s 6 [prop] s 6 = s 7 [prop] s 7 = s 8 [eqc] Equivalence classes [ematch] E-matching patterns [thm] s_eq: [s #0] [cutsat] Assignment satisfying linear constraints [limits] Thresholds reached [limit] maximum term generation has been reached, threshold: `(gen := 8)` [grind] Diagnostics ``` ::: :::example "Increasing E-matching Limits" {lean}`iota` returns the list of all numbers strictly less than its argument, and the theorem {lean}`iota_succ` describes its behavior on {lean}`Nat.succ`: ```lean def iota : Nat → List Nat | 0 => [] | n + 1 => n :: iota n @[grind =] theorem iota_succ : iota (n + 1) = n :: iota n := rfl ``` The fact that {lean}`(iota 20).length > 10` can be proven by repeatedly instantiating {lean}`iota_succ` and {lean}`List.length_cons`. However, {tactic}`grind` does not succeed: ```lean +error (name := biggerGrindLimits) example : (iota 20).length > 10 := by grind ``` ```leanOutput biggerGrindLimits (expandTrace := limits) (expandTrace := facts) `grind` failed case grind h : (iota 20).length ≤ 10 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [prop] (iota 20).length ≤ 10 [prop] iota 20 = 19 :: iota 19 [prop] iota 19 = 18 :: iota 18 [prop] (19 :: iota 19).length = (iota 19).length + 1 [prop] iota 18 = 17 :: iota 17 [prop] (18 :: iota 18).length = (iota 18).length + 1 [prop] iota 17 = 16 :: iota 16 [prop] (17 :: iota 17).length = (iota 17).length + 1 [prop] iota 16 = 15 :: iota 15 [prop] (16 :: iota 16).length = (iota 16).length + 1 [eqc] True propositions [eqc] Equivalence classes [ematch] E-matching patterns [cutsat] Assignment satisfying linear constraints [ring] Ring `Lean.Grind.Ring.OfSemiring.Q Nat` [limits] Thresholds reached [limit] maximum number of E-matching rounds has been reached, threshold: `(ematch := 5)` [grind] Diagnostics ``` Due to the limited number of E-matching rounds, the chain of instantiations is not completed. Increasing these limits allows {tactic}`grind` to succeed: ```lean example : (iota 20).length > 10 := by grind (gen := 20) (ematch := 20) ``` When the option {option}`diagnostics` is set to {lean}`true`, {tactic}`grind` displays the number of instances that it generates for each theorem. This is useful to detect theorems that contain patterns that are triggering too many instances. In this case, the diagnostics show that {name}`iota_succ` is instantiated 12 times: ```lean (name := grindDiagnostics) set_option diagnostics true in set_option diagnostics.threshold 10 in example : (iota 20).length > 10 := by grind (gen := 20) (ematch := 20) ``` ```leanOutput grindDiagnostics (expandTrace := grind) (expandTrace := thm) [grind] Diagnostics [thm] E-Matching instances [thm] iota_succ ↦ 12 [thm] List.length_cons ↦ 11 [app] Applications [grind] Simplifier [simp] used theorems (max: 15, num: 2): [simp] tried theorems (max: 46, num: 1): use `set_option diagnostics.threshold <num>` to control threshold for reporting counters ``` ::: By default, {tactic}`grind` uses automatically generated equations for {keywordOf Lean.Parser.Term.match}`match`-expressions as E-matching theorems. This can be disabled by setting the `matchEqs` flag to {lean}`false`. :::example "E-matching and Pattern Matching" Enabling diagnostics shows that {tactic}`grind` uses one of the equations of the auxiliary matching function during E-matching: ```lean (name := gt1diag) theorem gt1 (x y : Nat) : x = y + 1 → 0 < match x with | 0 => 0 | _ + 1 => 1 := by set_option diagnostics true in grind ``` ```leanOutput gt1diag (expandTrace := grind) (expandTrace := thm) [grind] Diagnostics [thm] E-Matching instances [thm] gt1.match_1.congr_eq_2 ↦ 1 [app] Applications ``` The theorem has this type: ```lean (name := gt1matchtype) #check gt1.match_1.congr_eq_2 ``` ```leanOutput gt1matchtype gt1.match_1.congr_eq_2.{u_1} (motive : Nat → Sort u_1) (x✝ : Nat) (h_1 : Unit → motive 0) (h_2 : (n : Nat) → motive n.succ) (n✝ : Nat) (heq_1 : x✝ = n✝.succ) : (match x✝ with | 0 => h_1 () | n.succ => h_2 n) ≍ h_2 n✝ ``` Disabling the use of matcher function equations causes the proof to fail: ```lean +error (name := noMatchEqs) example (x y : Nat) : x = y + 1 → 0 < match x with | 0 => 0 | _+1 => 1 := by grind -matchEqs ``` ```leanOutput noMatchEqs `grind` failed case grind.2 x y : Nat h : x = y + 1 h_1 : (match x with | 0 => 0 | n.succ => 1) = 0 n : Nat h_2 : x = n + 1 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] Equivalence classes [cases] Case analyses [cutsat] Assignment satisfying linear constraints [ring] Rings [grind] Diagnostics ``` ::: {optionDocs trace.grind.ematch.instance} :::comment TBD * anti‑patterns * local vs global attributes * `gen` modifier? :::
reference-manual/Manual/Grind/Annotation.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Annotating Libraries for `grind`" => %%% tag := "grind-annotation" %%% To use {tactic}`grind` effectively with a library, it must be annotated by applying the {attr}`grind` attribute to suitable lemmas or declaring {keywordOf Lean.Parser.Command.grindPattern}`grind_pattern`s. These annotations direct {tactic}`grind`'s selection of theorems, which lead to further facts on the metaphorical whiteboard. With too few annotations, {tactic}`grind` will fail to use the lemmas; with too many, it may become slow or it fail due to exhausting resource limitations. Annotations should generally be conservative: only add an annotation if you expect that {tactic}`grind` should _always_ instantiate the theorem once the patterns are matched. # Simp Lemmas Typically, many theorems that are annotated with {attrs}`@[simp]` should also be annotated with {attrs}`@[grind =]`. One significant exception is that typically we avoid having {attrs}`@[simp]` theorems that introduce an {keywordOf Lean.Parser.Term.if}`if` on the right hand side, instead preferring a pair of theorems with the positive and negative conditions as hypotheses. Because {tactic}`grind` is designed to perform case splitting, it is generally better to instead annotate the single theorem introducing the {keywordOf Lean.Parser.Term.if}`if` with {attrs}`@[grind =]`. Besides using {attrs}`@[grind =]` to encourage {tactic}`grind` to perform rewriting from left to right, you can also use {attrs}`@[grind _=_]` to “saturate”, allowing bidirectional rewriting whenever either side is encountered. # Backwards and Forwards Reasoning :::paragraph Use {attrs}`@[grind ←]` (which generates patterns from the conclusion of the theorem) for backwards reasoning theorems, i.e. theorems that should be tried when their conclusion matches a goal. Some examples of theorems in the standard library that are annotated with {attr}`grind ←` are: * ```signature Array.not_mem_empty (a : α) : ¬ a ∈ #[] ``` * ```signature Array.getElem_filter {xs : Array α} {p : α → Bool} {i : Nat} (h : i < (xs.filter p).size) : p (xs.filter p)[i] ``` * ```signature List.Pairwise.tail {l : List α} (h : Pairwise R l) : Pairwise R l.tail ``` In each case, the lemma is relevant when its conclusion matches a proof goal. ::: :::paragraph Use {attrs}`@[grind →]` (which generates patterns from the hypotheses) for forwards reasoning theorems, i.e. where facts should be propagated from existing facts on the whiteboard. Some examples of theorems in the standard library that are annotated with {attr}`grind →` are: * ```signature List.getElem_of_getElem? {l : List α} : l[i]? = some a → ∃ h : i < l.length, l[i] = a ``` * ```signature Array.mem_of_mem_erase [BEq α] {a b : α} {xs : Array α} (h : a ∈ xs.erase b) : a ∈ xs ``` * ```signature List.forall_none_of_filterMap_eq_nil (h : filterMap f xs = []) : ∀ x ∈ xs, f x = none ``` In these cases, the theorems' assumptions determine when they are relevant. ::: There are many uses for custom patterns created with the {keywordOf Lean.Parser.Command.grindPattern}`grind_pattern` command. One common use is to introduce inequalities about terms, or membership propositions. :::keepEnv ```lean -show section def count := @Array.count theorem countP_le_size [BEq α] {a : α} {xs : Array α} : count a xs ≤ xs.size := Array.countP_le_size notation "..." => countP_le_size ``` We might have ```lean variable [BEq α] theorem count_le_size {a : α} {xs : Array α} : count a xs ≤ xs.size := ... grind_pattern count_le_size => count a xs ``` ```lean -show variable {a : α} {xs : Array α} ``` which will register this inequality as soon as a {lean}`count a xs` term is encountered (even if the problem has not previously involved inequalities). ```lean -show end ``` ::: We can also use multi-patterns to be more restrictive, e.g. only introducing an inequality about sizes if the whiteboard already contains facts about sizes: ```lean theorem size_pos_of_mem {xs : Array α} (h : a ∈ xs) : 0 < xs.size := sorry grind_pattern size_pos_of_mem => a ∈ xs, xs.size ``` :::leanSection ```lean -show variable {a : α} {xs : Array α} ``` Unlike a {attrs}`@[grind →]` attribute, which would cause this theorem to be instantiated whenever {lean}`a ∈ xs` is encountered, this pattern will only be used when {lean}`xs.size` is already on the whiteboard. (Note that this grind pattern could also be produced using the {attrs}`@[grind <=]` attribute, which looks at the conclusion first, then backwards through the hypotheses to select patterns. On the other hand, {attrs}`@[grind →]` would select only {lean}`a ∈ xs`.) ::: ::::keepEnv :::leanSection ```lean -show axiom R : Type axiom sin : R → R axiom cos : R → R @[instance] axiom instAdd : Add R @[instance] axiom instOfNatR : OfNat R n @[instance] axiom instHPowR : HPow R Nat R variable {x : R} axiom sin_sq_add_cos_sq' : sin x ^ 2 + cos x ^ 2 = 1 notation "..." => sin_sq_add_cos_sq' ``` In Mathlib we might want to enable polynomial reasoning about the sine and cosine functions, and so add a custom grind pattern ```lean theorem sin_sq_add_cos_sq : sin x ^ 2 + cos x ^ 2 = 1 := ... grind_pattern sin_sq_add_cos_sq => sin x, cos x ``` which will instantiate the theorem as soon as *both* {lean}`sin x` and {lean}`cos x` (with the same {lean}`x`) are encountered. This theorem will then automatically enter the Gröbner basis module, and be used to reason about polynomial expressions involving both {lean}`sin x` and {lean}`cos x`. One both alternatively, more aggressively, write two separate grind patterns so that this theorem instantiated when either {lean}`sin x` or {lean}`cos x` is encountered. ::: ::::
reference-manual/Manual/Grind/CaseAnalysis.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Case Analysis" => %%% tag := "grind-split" %%% In addition to congruence closure and constraint propagation, {tactic}`grind` performs case analysis. During case analysis, {tactic}`grind` considers each possible way that a term could have been built, or each possible value of a particular term, in a manner similar to the {tactic}`cases` and {tactic}`split` tactics. This case analysis is not exhaustive: {tactic}`grind` only recursively splits cases up to a configured depth limit, and configuration options and annotations control which terms are candidates for splitting. # Selection Heuristics {tactic}`grind` decides which sub‑term to split on by combining three sources of signal: : Structural flags These configuration flags determine whether {tactic}`grind` performs certain case splits: : `splitIte` (default {lean}`true`) Every {keywordOf Lean.Parser.Term.ite}`if`-term should be split, as if by the {tactic}`split` tactic. : `splitMatch` (default {lean}`true`) Every {keywordOf Lean.Parser.Term.match}`match`-term should be split, as if by the {tactic}`split` tactic. : `splitImp` (default {lean}`false`) :::leanSection ```lean -show variable {A : Prop} {B : Sort u} ``` Hypotheses of the form {lean}`A → B` whose antecedent {lean}`A` is *propositional* are split by considering all possibilities for {lean}`A`. Arithmetic antecedents are special‑cased: if {lean}`A` is an arithmetic literal (that is, a proposition formed by operators such as `≤`, `=`, `¬`, {lean}`Dvd`, …) then {tactic}`grind` will split _even when `splitImp := false`_ so the integer solver can propagate facts. ::: : Global limits The {tactic}`grind` option `splits := n` caps the depth of the search tree. Once a branch performs `n` splits {tactic}`grind` stops splitting further in that branch; if the branch cannot be closed it reports that the split threshold has been reached. : Manual annotations Inductive predicates or structures may be tagged with the {attr}`grind cases` attribute. {tactic}`grind` treats every instance of that predicate as a candidate for splitting. :::syntax attr (title := "Case Analysis") ```grammar grind cases ``` {includeDocstring Lean.Parser.Attr.grindCases} ::: :::syntax attr (title := "Eager Case Analysis") ```grammar grind cases eager ``` {includeDocstring Lean.Parser.Attr.grindCasesEager} ::: :::example "Splitting Conditional Expressions" In this example, {tactic}`grind` proves the theorem by considering both cases for the conditional: ```lean example (c : Bool) (x y : Nat) (h : (if c then x else y) = 0) : x = 0 ∨ y = 0 := by grind ``` Disabling `splitIte` causes the proof to fail: ```lean +error (name := noSplitIte) example (c : Bool) (x y : Nat) (h : (if c then x else y) = 0) : x = 0 ∨ y = 0 := by grind -splitIte ``` In particular, it cannot make progress after discovering that the conditional expression is equal to {lean}`0`: ```leanOutput noSplitIte (expandTrace := eqc) `grind` failed case grind c : Bool x y : Nat h : (if c = true then x else y) = 0 left : ¬x = 0 right : ¬y = 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] False propositions [prop] x = 0 [prop] y = 0 [eqc] Equivalence classes [eqc] others [eqc] {0, if c = true then x else y} [cutsat] Assignment satisfying linear constraints ``` Forbidding all case splitting causes the proof to fail for the same reason: ```lean +error (name := noSplitsAtAll) example (c : Bool) (x y : Nat) (h : (if c then x else y) = 0) : x = 0 ∨ y = 0 := by grind (splits := 0) ``` ```leanOutput noSplitsAtAll (expandTrace := eqc) `grind` failed case grind c : Bool x y : Nat h : (if c = true then x else y) = 0 left : ¬x = 0 right : ¬y = 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] False propositions [prop] x = 0 [prop] y = 0 [eqc] Equivalence classes [eqc] others [eqc] {0, if c = true then x else y} [cutsat] Assignment satisfying linear constraints [limits] Thresholds reached ``` Allowing just one split is sufficient: ```lean example (c : Bool) (x y : Nat) (h : (if c then x else y) = 0) : x = 0 ∨ y = 0 := by grind (splits := 1) ``` ::: :::example "Splitting Pattern Matching" Disabling case splitting on pattern matches causes {tactic}`grind` to fail in this example: ```lean +error (name := noSplitMatch) example (h : y = match x with | 0 => 1 | _ => 2) : y > 0 := by grind -splitMatch ``` ```leanOutput noSplitMatch (expandTrace := eqc) `grind` failed case grind y x : Nat h : y = match x with | 0 => 1 | x => 2 h_1 : y = 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [prop] (x = 0 → False) → (match x with | 0 => 1 | x => 2) = 2 [eqc] Equivalence classes [eqc] {y, 0} [eqc] {match x with | 0 => 1 | x => 2} [eqc] {x = 0 → False, (fun x_0 => x_0 = 0 → False) x, x = 0 → False} [ematch] E-matching patterns [cutsat] Assignment satisfying linear constraints [grind] Diagnostics ``` Enabling the option causes the proof to succeed: ```lean example (h : y = match x with | 0 => 1 | _ => 2) : y > 0 := by grind ``` ::: :::example "Splitting Predicates" {lean}`Not30` is a somewhat verbose way to state that a number is not {lean}`30`: ```lean inductive Not30 : Nat → Prop where | gt : x > 30 → Not30 x | lt : x < 30 → Not30 x ``` By default, {tactic}`grind` cannot show that {lean}`Not30` implies that a number is, in fact, not {lean}`30`: ```lean +error (name := not30fail) example : Not30 n → n ≠ 30 := by grind ``` This is because {tactic}`grind` does not consider both cases for {lean}`Not30` ```leanOutput not30fail (expandTrace := eqc) `grind` failed case grind n : Nat h : Not30 n h_1 : n = 30 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [prop] Not30 n [eqc] Equivalence classes [eqc] {n, 30} [cutsat] Assignment satisfying linear constraints ``` Adding the {attr}`grind cases` attribute to {lean}`Not30` allows the proof to succeed: ```lean attribute [grind cases] Not30 example : Not30 n → n ≠ 30 := by grind ``` Similarly, the {attr}`grind cases` attribute on {lean}`Even` allows {tactic}`grind` to perform case splits: ```lean (name := blah) @[grind cases] inductive Even : Nat → Prop | zero : Even 0 | step : Even n → Even (n + 2) attribute [grind cases] Even example (h : Even 5) : False := by grind set_option trace.grind.split true in example (h : Even (n + 2)) : Even n := by grind ``` ::: # Performance Case analysis is powerful, but computationally expensive: each level of case splitting multiplies the search space. It's important to be judicious and not perform unnecessary splits. In particular: * Increase `splits` *only* when the goal genuinely needs deeper branching; each extra level multiplies the search space. * Disable `splitMatch` when large pattern‑matching definitions explode the tree; this can be observed by setting the {option}`trace.grind.split`. * Flags can be combined, e.g. `by grind -splitMatch (splits := 10) +splitImp`. * The {attr}`grind cases` attribute is {ref "scoped-attributes"}_scoped_. The modifiers {keywordOf Lean.Parser.Term.attrKind}`local` and {keywordOf Lean.Parser.Term.attrKind}`scoped` restrict extra splitting to a section or namespace. {optionDocs trace.grind.split}
reference-manual/Manual/Grind/CongrClosure.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Congruence Closure" => %%% tag := "congruence-closure" %%% :::leanSection ```lean -show variable {a a' : α} {b b' : β} {f : α → β → γ} ``` {deftech}_Congruence closure_ maintains equivalence classes of terms under the reflexive, symmetric, and transitive closure of “is equal to” _and_ the rule that equal arguments yield equal function results. Formally, if {lean}`a = a'` and {lean}`b = b'`, then {lean}`f a b = f a' b'` is added. The algorithm merges equivalence classes until a fixed point is reached. If a contradiction is discovered, then the goal can be closed immediately. ::: ::::leanSection ```lean -show variable {t₁ t₂ : α} {h : t₁ = t₂} {a : α} {f : α → β} {g : β → β} ``` :::paragraph Using the analogy of the shared whiteboard: 1. Every hypothesis {typed}`h : t₁ = t₂` writes a line connecting {lean}`t₁` and {lean}`t₂`. 2. Whenever two terms are connected by one or more lines, they're considered to be equal. Soon, whole constellations ({lean}`f a`, {lean}`g (f a)`, …) are connected. 3. If two different constructors of the same inductive type are connected by one or more lines, then a contradiction is discovered and the goal is closed. For example, equating {lean}`True` and {lean}`False` or {lean (type := "Option Nat")}`none` and {lean}`some 1` would be a contradiction. ::: :::: :::example "Congruence Closure" (open := true) This theorem is proved using congruence closure: ```lean example {α} (f g : α → α) (x y : α) (h₁ : x = y) (h₂ : f y = g y) : f x = g x := by grind ``` Initially, `f y`, `g y`, `x`, and `y` are in separate equivalence classes. The congruence closure engine uses `h₁` to merge `x` and `y`, after which the equivalence classes are `{x, y}`, `f y`, and `g y`. Next, `h₂` is used to merge `f y` and `g y`, after which the classes are `{x, y}` and `{f y, g y}`. This is sufficient to prove that `f x = g x`, because `y` and `x` are in the same class. Similar reasoning is used for constructors: ```lean example (a b c : Nat) (h : a = b) : (a, c) = (b, c) := by grind ``` Because the pair constructor {name}`Prod.mk` obeys congruence, the tuples become equal as soon as `a` and `b` are placed in the same class. ::: # Congruence Closure vs. Simplification ::::leanSection ```lean -show variable {t₁ t₂ : α} {h : t₁ = t₂} {a : α} {f : α → β} {g : β → β} ``` :::paragraph Congruence closure is a fundamentally different operation from simplification: * {tactic}`simp` _rewrites_ a goal, replacing occurrences of {lean}`t₁` with {lean}`t₂` as soon as it sees {typed}`h : t₁ = t₂`. The rewrite is directional and destructive. * {tactic}`grind` _accumulates_ equalities bidirectionally. No term is rewritten; instead, both representatives live in the same class. All other engines ({tech}[E‑matching], theory solvers, {tech (key := "constraint propagation")}[propagation]) can query these classes and add new facts, then the closure updates incrementally. This makes congruence closure especially robust in the presence of symmetrical reasoning, mutual recursion, and large nestings of constructors where rewriting would duplicate work. ::: ::::
reference-manual/Manual/Grind/Cutsat.lean
import VersoManual import Lean.Parser.Term import Manual.Meta import Manual.Papers open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Verso.Code.External (lit) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Linear Integer Arithmetic" => %%% tag := "cutsat" %%% :::paragraph The linear integer arithmetic solver implements a model-based decision procedure for linear integer arithmetic. The solver can process four categories of linear polynomial constraints (where `p` is a [linear polynomial](https://en.wikipedia.org/wiki/Degree_of_a_polynomial)): : Equality `p = 0` : Divisibility `d ∣ p` : Inequality `p ≤ 0` : Disequality `p ≠ 0` It is complete for linear integer arithmetic, and natural numbers are supported by converting them to integers with {name}`Int.ofNat`. Support for additional types that can be embedded into {lean}`Int` can be added via instances of {name}`Lean.Grind.ToInt`. Nonlinear terms (e.g. `x * x`) are allowed, and are represented as variables. The solver is additionally capable of propagating information back to the metaphorical {tactic}`grind` whiteboard, which can trigger further progress from the other subsystems. By default, it is enabled; it can be disabled using the flag {lit}`-lia` ::: ::::example "Examples of Linear Integer Arithmetic" (open := true) All of these statements can be proved using the linear integer arithmetic solver. In the first example, the left-hand side must be a multiple of 2, and thus cannot be 5: ```lean example {x y : Int} : 2 * x + 4 * y ≠ 5 := by grind ``` The solver supports mixing equalities and inequalities: ```lean example {x y : Int} : 2 * x + 3 * y = 0 → 1 ≤ x → y < 1 := by grind ``` It also supports linear divisibility constraints: ```lean example (a b : Int) : 2 ∣ a + 1 → 2 ∣ b + a → ¬ 2 ∣ b + 2 * a := by grind ``` Without `lia`, {tactic}`grind` cannot prove the statement: ```lean +error (name := noLia) example (a b : Int) : 2 ∣ a + 1 → 2 ∣ b + a → ¬ 2 ∣ b + 2 * a := by grind -lia ``` ```leanOutput noLia `grind` failed case grind a b : Int h : 2 ∣ a + 1 h_1 : 2 ∣ a + b h_2 : 2 ∣ 2 * a + b ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [ematch] E-matching patterns [linarith] Linarith assignment for `Int` ``` :::: # Rational Solutions %%% tag := "cutsat-qlia" %%% The solver is complete for linear integer arithmetic. However, the search can become vast with very few constraints, but the solver was not designed to perform massive case-analysis. The `qlia` option to {tactic}`grind` reduces the search space by instructing the solver to accept rational solutions. With this option, the solver is likely to be faster, but it is incomplete. :::example "Rational Solutions" The following example has a rational solution, but does not have integer solutions: ```lean example {x y : Int} : 27 ≤ 13 * x + 11 * y → 13 * x + 11 * y ≤ 30 → -10 ≤ 9 * x - 7 * y → 9 * x - 7 * y > 4 := by grind ``` Because it uses the rational solution, {tactic}`grind` fails to refute the negation of the goal when `+qlia` is specified: ```lean +error (name := withqlia) example {x y : Int} : 27 ≤ 13 * x + 11 * y → 13 * x + 11 * y ≤ 30 → -10 ≤ 9 * x - 7 * y → 9 * x - 7 * y > 4 := by grind +qlia ``` ```leanOutput withqlia (expandTrace := cutsat) `grind` failed case grind x y : Int h : -13 * x + -11 * y + 27 ≤ 0 h_1 : 13 * x + 11 * y + -30 ≤ 0 h_2 : -9 * x + 7 * y + -10 ≤ 0 h_3 : 9 * x + -7 * y + -4 ≤ 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [cutsat] Assignment satisfying linear constraints [assign] x := 62/117 [assign] y := 2 ``` The rational model constructed by the solver is in the section `Assignment satisfying linear constraints` in the goal diagnostics. ::: # Nonlinear Constraints The solver currently does support nonlinear constraints, and treats nonlinear terms such as `x * x` as variables. ::::example "Nonlinear Terms" (open := true) The linear integer arithmetic solver fails to prove this theorem: ```lean +error (name := nonlinear) example (x : Int) : x * x ≥ 0 := by grind ``` ```leanOutput nonlinear `grind` failed case grind x : Int h : x * x + 1 ≤ 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [ematch] E-matching patterns [cutsat] Assignment satisfying linear constraints ``` From the perspective of the linear integer arithmetic solver, it is equivalent to: ```lean +error (name := nonlinear2) example {y : Int} (x : Int) : y ≥ 0 := by grind ``` ```leanOutput nonlinear `grind` failed case grind x : Int h : x * x + 1 ≤ 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [ematch] E-matching patterns [cutsat] Assignment satisfying linear constraints ``` :::paragraph This can be seen by setting the option {option}`trace.grind.lia.assert` to {lean}`true`, which traces all constraints processed by the solver. ```lean +error (name := liaDiag) example (x : Int) : x*x ≥ 0 := by set_option trace.grind.lia.assert true in grind ``` ```leanOutput liaDiag [grind.lia.assert] -1*「x ^ 2 + 1」 + 「x ^ 2」 + 1 = 0 [grind.lia.assert] 「x ^ 2」 + 1 ≤ 0 ``` The term `x ^ 2` is “quoted” in `「x ^ 2」 + 1 ≤ 0` to indicate that `x ^ 2` is treated as a variable. ::: :::: # Division and Modulus The solver supports linear division and modulo operations. :::example "Linear Division and Modulo" ```lean example (x y : Int) : x = y / 2 → y % 2 = 0 → y - 2 * x = 0 := by grind ``` ::: # Algebraic Processing The solver normalizes commutative (semi)ring expressions. :::example "Commutative (Semi)ring Normalization" Commutative ring normalization allows this goal to be solved: ```lean example (a b : Nat) (h₁ : a + 1 ≠ a * b * a) (h₂ : a * a * b ≤ a + 1) : b * a ^ 2 < a + 1 := by grind ``` ::: # Propagating Information %%% tag := "cutsat-mbtc" %%% The solver also implements {deftech}_model-based theory combination_, which is a mechanism for propagating equalities back to the metaphorical shared whiteboard. These additional equalities may in turn trigger new congruences. Model-based theory combination increases the size of the search space; it can be disabled using the option `grind -mbtc`. ::::example "Propagating Equalities" In the example above, the linear inequalities and disequalities imply `y = 0`: ```lean example (f : Int → Int) (x y : Int) : f x = 0 → 0 ≤ y → y ≤ 1 → y ≠ 1 → f (x + y) = 0 := by grind ``` Consequently `x = x + y`, so `f x = f (x + y)` by {tech (key := "congruence closure")}[congruence]. Without model-based theory combination, the proof gets stuck: ```lean +error (name := noMbtc) example (f : Int → Int) (x y : Int) : f x = 0 → 0 ≤ y → y ≤ 1 → y ≠ 1 → f (x + y) = 0 := by grind -mbtc ``` ```leanOutput noMbtc `grind` failed case grind f : Int → Int x y : Int h : f x = 0 h_1 : -1 * y ≤ 0 h_2 : y + -1 ≤ 0 h_3 : ¬y = 1 h_4 : ¬f (x + y) = 0 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [eqc] False propositions [eqc] Equivalence classes [cutsat] Assignment satisfying linear constraints [ring] Ring `Int` ``` :::: # Other Types %%% tag := "cutsat-ToInt" %%% The LIA solver can also process linear constraints that contain natural numbers. It converts them into integer constraints using `Int.ofNat`. :::example "Natural Numbers as Linear Integer Arithmetic" ```lean example (x y z : Nat) : x < y + z → y + 1 < z → z + x < 3 * z := by grind ``` ::: There is an extensible mechanism via the {lean}`Lean.Grind.ToInt` type class to tell the solver that a type embeds in the integers. Using this, we can solve goals such as: ```lean example (a b c : Fin 11) : a ≤ 2 → b ≤ 3 → c = a + b → c ≤ 5 := by grind example (a : Fin 2) : a ≠ 0 → a ≠ 1 → False := by grind example (a b c : UInt64) : a ≤ 2 → b ≤ 3 → c - a - b = 0 → c ≤ 5 := by grind ``` {docstring Lean.Grind.ToInt} {docstring Lean.Grind.IntInterval} # Implementation Notes ::::leanSection ```lean -show variable {x y : Int} ``` :::paragraph The implementation of the linear integer arithmetic solver is inspired by Section 4 of {citet cuttingToTheChase}[]. Compared to the paper, it includes several enhancements and modifications such as: * extended constraint support (equality and disequality), * an optimized encoding of the `Cooper-Left` rule using a “big”-disjunction instead of fresh variables, and * decision variable tracking for case splits (disequalities, `Cooper-Left`, `Cooper-Right`). ::: :::paragraph The solver procedure builds a model (that is, an assignment of the variables in the term) incrementally, resolving conflicts through constraint generation. For example, given a partial model `{x := 1}` and constraint {lean}`3 ∣ 3 * y + x + 1`: - The solver cannot extend the model to {lean}`y` because {lean}`3 ∣ 3 * y + 2` is unsatisfiable. - Thus, it resolves the conflict by generating the implied constraint {lean}`3 ∣ x + 1`. - The new constraint forces the solver to find a new assignment for {lean}`x`. ::: :::paragraph When assigning a variable `y`, the solver considers: - The best upper and lower bounds (inequalities). - A divisibility constraint. - All disequality constraints where `y` is the maximal variable. ::: :::: The `Cooper-Left` and `Cooper-Right` rules handle the combination of inequalities and divisibility. For unsatisfiable disequalities `p ≠ 0`, the solver generates the case split: `p + 1 ≤ 0 ∨ -p + 1 ≤ 0`. :::comment Planned future features: improved constraint propagation. :::
reference-manual/Manual/Grind/ConstraintPropagation.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode open Lean Lean.Grind Lean.Meta.Grind #doc (Manual) "Constraint Propagation" => %%% tag := "grind-propagation" %%% {deftech}[Constraint propagation] works on the {lean}`True` and {lean}`False` buckets of the whiteboard. Whenever a term is added to one of those buckets, {tactic}`grind` fires dozens of small {deftech}_forward rules_ that derive further information from its logical consequences: : Boolean connectives ::::leanSection ```lean -show variable {A B : Prop} ``` :::paragraph The truth tables of the Boolean connectives can be used to derive further true and false facts. For example: * If {lean}`A` is {lean}`True`, then {lean}`A ∨ B` becomes {lean}`True`. * If {lean}`A ∧ B` is {lean}`True`, then both {lean}`A` and {lean}`B` become {lean}`True`. * If {lean}`A ∧ B` is {lean}`False`, at least one of {lean}`A`, {lean}`B` becomes {lean}`False`. ::: :::: : Inductive Types If terms formed by applications of two different constructors of the same {tech}[inductive type] (e.g. {name}`none` and {name}`some`) are placed in the same equivalence class, a contradiction is derived. If two terms formed by applications of the same constructor are placed in the same equivalence class, then their arguments are also made equal. : Projections :::leanSection ```lean -show variable {x x' : α} {y y' : β} {h : (x, y) = (x', y')} {a : α} ``` From {typed}`h : (x, y) = (x', y')` we derive {lean}`x = x'` and {lean}`y = y'`. ::: : Casts :::leanSection ```lean -show variable {h : α = β} {a : α} ``` Any term {typed}`cast h a : β` is equated with {typed}`a : α` immediately (using {tech}[heterogeneous equality]). ::: : Reduction ::::keepEnv :::leanSection ```lean -show variable {α : Type u} {β : Type v} {a : α} {b : β} structure S α β where x : α y : β variable {p : S α β} ``` Definitional reduction is propagated, so {lean}`(a, b).1` is equated with {lean}`a`. ::: :::: :::paragraph Below is a _representative slice_ of the propagators that demonstrates their overall style. Each follows the same skeleton. 1. It inspect the truth value of sub‑expressions. 2. If further facts can be derived, it either equates terms (connecting them on the metaphorical whiteboard) using ({lean}`pushEq`), or it indicates truth values using ({lean}`pushEqTrue` / {lean}`pushEqFalse`). These steps produce proof terms using internal helper lemmas such as {name}`Grind.and_eq_of_eq_true_left`. 3. If a contradiction arises, the goal is closed using ({lean}`closeGoal`). {deftech}_Upward propagation_ derives facts about a term from facts about sub-terms, while {deftech}_downward propagation_ derives facts about sub-terms from facts about a term. ::: ```lean -show namespace ExamplePropagators ``` ```lean -keep /-- Propagate equalities *upwards* for conjunctions. -/ builtin_grind_propagator propagateAndUp ↑And := fun e => do let_expr And a b := e | return () if (← isEqTrue a) then -- a = True ⇒ (a ∧ b) = b pushEq e b <| mkApp3 (mkConst ``Grind.and_eq_of_eq_true_left) a b (← mkEqTrueProof a) else if (← isEqTrue b) then -- b = True ⇒ (a ∧ b) = a pushEq e a <| mkApp3 (mkConst ``Grind.and_eq_of_eq_true_right) a b (← mkEqTrueProof b) else if (← isEqFalse a) then -- a = False ⇒ (a ∧ b) = False pushEqFalse e <| mkApp3 (mkConst ``Grind.and_eq_of_eq_false_left) a b (← mkEqFalseProof a) else if (← isEqFalse b) then -- b = False ⇒ (a ∧ b) = False pushEqFalse e <| mkApp3 (mkConst ``Grind.and_eq_of_eq_false_right) a b (← mkEqFalseProof b) /-- Truth flows *down* when the whole `And` is proven `True`. -/ builtin_grind_propagator propagateAndDown ↓And := fun e => do if (← isEqTrue e) then let_expr And a b := e | return () let h ← mkEqTrueProof e -- (a ∧ b) = True ⇒ a = True pushEqTrue a <| mkApp3 (mkConst ``Grind.eq_true_of_and_eq_true_left) a b h -- (a ∧ b) = True ⇒ B = True pushEqTrue b <| mkApp3 (mkConst ``Grind.eq_true_of_and_eq_true_right) a b h ``` ```lean -show end ExamplePropagators ``` Other frequently‑triggered propagators follow the same pattern: ::::leanSection ```lean -show variable {A B : Prop} {a b : α} ``` :::table +header * * Propagator * Handles * Notes * * {lean}`propagateOrUp` / {lean}`propagateOrDown` * {lean}`A ∨ B` * Uses the truth table for disjunction to derive further truth values * * {lean}`propagateNotUp` / {lean}`propagateNotDown` * {lean}`¬ A` * Ensures that {lean}`¬ A` and {lean}`A` have opposite truth values * * {lean}`propagateEqUp` / {lean}`propagateEqDown` * `a = b` * Bridges Booleans, detects constructor clash {TODO}[What does 'bridges booleans' mean? Find out] * * {lean}`propagateIte` / {lean}`propagateDIte` * {name}`ite` / {name}`dite` * Equates the term with the chosen branch once the condition's truth value is known * * `propagateEtaStruct` * Values of structures tagged `[grind ext]` * Generates η‑expansion `a = ⟨a.1, …⟩` ::: :::: :::comment TODO (@kim-em): we don't add the `{lean}` literal type to `propagateEtaStruct` above because it is private. ::: Many specialized variants for {lean}`Bool` mirror these rules exactly (e.g. {lean}`propagateBoolAndUp`). # Propagation‑Only Examples These goals are closed *purely* by constraint propagation—no case splits, no theory solvers: ```lean -- Boolean connective: a && !a is always false. example (a : Bool) : (a && !a) = false := by grind -- Conditional (ite): -- once the condition is true, ite picks the 'then' branch. example (c : Bool) (t e : Nat) (h : c = true) : (if c then t else e) = t := by grind -- Negation propagates truth downwards. example (a : Bool) (h : (!a) = true) : a = false := by grind ``` These snippets run instantly because the relevant propagators ({lean}`propagateBoolAndUp`, {lean}`propagateIte`, {lean}`propagateBoolNotDown`) fire as soon as the hypotheses are internalized. Setting the option {option}`trace.grind.eqc` to {lean}`true` causes {tactic}`grind` to print a line every time two equivalence classes merge, which is handy for seeing propagation in action. :::TODO This section should be uncommented when the command is implemented: ```lean -show -- Test to ensure that this section is uncommented when the command is implemented /-- error: elaboration function for `Lean.Parser.«command_Grind_propagator___(_):=_»` has not been implemented -/ #guard_msgs in grind_propagator ↑x(y) := _ ``` {tactic}`grind` is still under active development, and its implementation is likely to change. Until the API has stabilized we recommend _refraining from writing custom elaborators or satellite solvers_. If a project-local custom propagator is really needed, then it should be defined using the {keywordOf «command_Grind_propagator___(_):=_»}`grind_propagator` command, rather than {keywordOf «command_Builtin_grind_propagator____:=_»}`builtin_grind_propagator` (the latter is reserved for Lean’s own code). When adding new propagators, keep them *small and orthogonal*—they should fire in ≤1 µs and either push one fact or close the goal. This keeps the propagation phase predictable and easy to debug. ::: The set of propagation rules is expanded and refined over time, so the InfoView will show increasingly rich {lean}`True` and {lean}`False` buckets. The full equivalence classes are displayed automatically _only when {tactic}`grind` fails_, and only for the first subgoal that it could not close—use this output to inspect missing facts and understand why the subgoal remains open. :::example "Identifying Missing Facts" In this example, {tactic}`grind` fails: ```lean +error (name := missing) example : x = y ∧ y = z → w = x ∨ w = v → w = z := by grind ``` The resulting error message includes the identified equivalence classes along with the true and false propositions: ```leanOutput missing (expandTrace := eqc) `grind` failed case grind α : Sort u_1 x y z w v : α left : x = y right : y = z h_1 : w = x ∨ w = v h_2 : ¬w = z ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [prop] w = x ∨ w = v [prop] w = v [eqc] False propositions [prop] w = x [prop] w = z [eqc] Equivalence classes [eqc] {x, y, z} [eqc] {w, v} ``` Both `x = y` and `y = z` were discovered by constraint propagation from the `x = y ∧ y = z` premise. In this proof, {tactic}`grind` performed a case split on `w = x ∨ w = v`. In the second branch, it could not place `w` and `z` in the same equivalence class. :::
reference-manual/Manual/Grind/Algebra.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode -- Due to Lean.Grind.Semiring.nsmul_eq_natCast_mul set_option verso.docstring.allowMissing true set_option maxHeartbeats 300000 #doc (Manual) "Algebraic Solver (Commutative Rings, Fields)" => %%% tag := "grind-ring" %%% The `ring` solver in {tactic}`grind` is inspired by Gröbner basis computation procedures and term rewriting completion. It views multivariate polynomials as rewriting rules. For example, the polynomial equality `x * y + x - 2 = 0` is treated as a rewriting rule `x * y ↦ -x + 2`. It uses superposition to ensure the rewriting system is confluent. The following examples demonstrate goals that can be decided by the `ring` solver. In these examples, the `Lean` and `Lean.Grind` namespaces are open: ```lean open Lean Grind ``` :::example "Commutative Rings" (open := true) ```lean -show open Lean.Grind ``` ```lean example [CommRing α] (x : α) : (x + 1) * (x - 1) = x ^ 2 - 1 := by grind ``` ::: :::example "Ring Characteristics" (open := true) The solver “knows” that `16*16 = 0` because the [ring characteristic](https://en.wikipedia.org/wiki/Characteristic_%28algebra%29) (that is, the minimum number of copies of the multiplicative identity that sum to the additive identity) is `256`, which is provided by the {name}`IsCharP` instance. ```lean -show open Lean.Grind ``` ```lean example [CommRing α] [IsCharP α 256] (x : α) : (x + 16)*(x - 16) = x^2 := by grind ``` ::: :::example "Standard Library Types" (open := true) ```lean -show open Lean.Grind ``` Types in the standard library are supported by the solver out of the box. `UInt8` is a commutative ring with characteristic `256`, and thus has instances of {inst}`CommRing UInt8` and {inst}`IsCharP UInt8 256`. ```lean example (x : UInt8) : (x + 16) * (x - 16) = x ^ 2 := by grind ``` ::: :::example "More Commutative Ring Proofs" (open := true) ```lean -show open Lean.Grind ``` The axioms of a commutative ring are sufficient to prove these statements. ```lean example [CommRing α] (a b c : α) : a + b + c = 3 → a ^ 2 + b ^ 2 + c ^ 2 = 5 → a ^ 3 + b ^ 3 + c ^ 3 = 7 → a ^ 4 + b ^ 4 = 9 - c ^ 4 := by grind ``` ```lean example [CommRing α] (x y : α) : x ^ 2 * y = 1 → x * y ^ 2 = y → y * x = 1 := by grind ``` ::: :::example "Characteristic Zero" (open := true) ```lean -show open Lean.Grind ``` `ring` proves that `a + 1 = 2 + a` is unsatisfiable because the characteristic is known to be 0. ```lean example [CommRing α] [IsCharP α 0] (a : α) : a + 1 = 2 + a → False := by grind ``` ::: :::example "Inferred Characteristic" (open := true) ```lean -show open Lean.Grind ``` Even when the characteristic is not initially known, when `grind` discovers that `n = 0` for some numeral `n`, it makes inferences about the characteristic: ```lean example [CommRing α] (a b c : α) (h₁ : a + 6 = a) (h₂ : c = c + 9) (h : b + 3*c = 0) : 27*a + b = 0 := by grind ``` ::: # Solver Type Classes %%% tag := "grind-ring-classes" %%% :::paragraph Users can enable the `ring` solver for their own types by providing instances of the following {tech (key := "type class")}[type classes], all in the `Lean.Grind` namespace: * {name Lean.Grind.Semiring}`Semiring` * {name Lean.Grind.Ring}`Ring` * {name Lean.Grind.CommSemiring}`CommSemiring` * {name Lean.Grind.CommRing}`CommRing` * {name Lean.Grind.IsCharP}`IsCharP` * {name Lean.Grind.AddRightCancel}`AddRightCancel` * {name Lean.Grind.NoNatZeroDivisors}`NoNatZeroDivisors` * {name Lean.Grind.Field}`Field` The algebraic solvers will self-configure depending on the availability of these instances, so not all need to be provided. The capabilities of the algebraic solvers will, of course, degrade when some are not available. ::: The Lean standard library contains the applicable instances for the types defined in the standard library. By providing these instances, other libraries can also enable {tactic}`grind`'s `ring` solver. For example, the Mathlib `CommRing` type class implements `Lean.Grind.CommRing` to ensure the `ring` solver works out-of-the-box. ## Algebraic Structures To enable the algebraic solver, a type should have an instance of the most specific possible algebraic structure that the solver supports. In order of increasing specificity, that is {name Lean.Grind.Semiring}`Semiring`, {name Lean.Grind.Ring}`Ring`, {name Lean.Grind.CommSemiring}`CommSemiring`, {name Lean.Grind.CommRing}`CommRing`, and {name Lean.Grind.Field}`Field`. {docstring Lean.Grind.Semiring} {docstring Lean.Grind.CommSemiring} {docstring Lean.Grind.Ring} {docstring Lean.Grind.CommRing} ### Fields %%% tag := "grind-ring-field" %%% :::leanSection ```lean -show variable {a b p : α} [Field α] ``` The `ring` solver also has support for {name}`Field`s. If a {name}`Field` instance is available, the solver preprocesses the term `a / b` into `a * b⁻¹`. It also rewrites every disequality `p ≠ 0` as the equality `p * p⁻¹ = 1`. ::: ::::example "Fields and `grind`" ```lean -show open Lean.Grind ``` This example requires its {name}`Field` instance: ```lean example [Field α] (a : α) : a ^ 2 = 0 → a = 0 := by grind ``` :::: {docstring Lean.Grind.Field} ## Ring Characteristics :::TODO write ::: {docstring Lean.Grind.IsCharP} ## Natural Number Zero Divisors %%% tag := "NoNatZeroDivisors" %%% The class `NoNatZeroDivisors` is used to control coefficient growth. For example, the polynomial `2 * x * y + 4 * z = 0` is simplified to `x * y + 2 * z = 0`. It also used when processing disequalities. :::example "Using `NoNatZeroDivisors`" ```lean -show open Lean.Grind ``` In this example, {tactic}`grind` relies on the {name}`NoNatZeroDivisors` instance to simplify the goal: ```lean example [CommRing α] [NoNatZeroDivisors α] (a b : α) : 2 * a + 2 * b = 0 → b ≠ -a → False := by grind ``` Without it, the proof fails: ```lean (name := NoNatZero) +error example [CommRing α] (a b : α) : 2 * a + 2 * b = 0 → b ≠ -a → False := by grind ``` ```leanOutput NoNatZero `grind` failed case grind α : Type u_1 inst : CommRing α a b : α h : 2 * a + 2 * b = 0 h_1 : ¬b = -a ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] False propositions [eqc] Equivalence classes [ring] Ring `α` ``` ::: {docstring Lean.Grind.NoNatZeroDivisors} {docstring Lean.Grind.NoNatZeroDivisors.mk'} The `ring` module also performs case-analysis for terms `a⁻¹` on whether `a` is zero or not. In the following example, if `2*a` is zero, then `a` is also zero since we have `NoNatZeroDivisors α`, and all terms are zero and the equality hold. Otherwise, `ring` adds the equalities `a*a⁻¹ = 1` and `2*a*(2*a)⁻¹ = 1`, and closes the goal. ```lean example [Field α] [NoNatZeroDivisors α] (a : α) : 1 / a + 1 / (2 * a) = 3 / (2 * a) := by grind ``` Without `NoNatZeroDivisors`, `grind` will perform case splits on numerals being zero as needed: ```lean example [Field α] (a : α) : (2 * a)⁻¹ = a⁻¹ / 2 := by grind ``` In the following example, `ring` does not need to perform any case split because the goal contains the disequalities `y ≠ 0` and `w ≠ 0`. ```lean example [Field α] {x y z w : α} : x / y = z / w → y ≠ 0 → w ≠ 0 → x * w = z * y := by grind (splits := 0) ``` You can disable the `ring` solver using the option `grind -ring`. ```lean +error (name := noRing) example [CommRing α] (x y : α) : x ^ 2 * y = 1 → x * y ^ 2 = y → y * x = 1 := by grind -ring ``` ```leanOutput noRing `grind` failed case grind α : Type u_1 inst : CommRing α x y : α h : x ^ 2 * y = 1 h_1 : x * y ^ 2 = y h_2 : ¬y * x = 1 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] False propositions [eqc] Equivalence classes [ematch] E-matching patterns [linarith] Linarith assignment for `α` ``` ### Right-Cancellative Addition %%% tag := "AddRightCancel" %%% The `ring` solver automatically embeds `CommSemiring`s into a `CommRing` envelope (using the construction `Lean.Grind.Ring.OfSemiring.Q`). However, the embedding is injective only when the `CommSemiring` implements the type class `AddRightCancel`. `Nat` is an example of a commutative semiring that implements `AddRightCancel`. ```lean example (x y : Nat) : x ^ 2 * y = 1 → x * y ^ 2 = y → y * x = 1 := by grind ``` {docstring Lean.Grind.AddRightCancel} # Resource Limits Gröbner basis computation can be very expensive. You can limit the number of steps performed by the `ring` solver using the option `grind (ringSteps := <num>)` :::example "Limiting `ring` Steps" ```lean -show open Lean.Grind ``` This example cannot be solved by performing at most 100 steps: ```lean +error (name := ring100) example [CommRing α] [IsCharP α 0] (d t c : α) (d_inv PSO3_inv : α) : d ^ 2 * (d + t - d * t - 2) * (d + t + d * t) = 0 → -d ^ 4 * (d + t - d * t - 2) * (2 * d + 2 * d * t - 4 * d * t ^ 2 + 2 * d * t^4 + 2 * d^2 * t^4 - c * (d + t + d * t)) = 0 → d * d_inv = 1 → (d + t - d * t - 2) * PSO3_inv = 1 → t^2 = t + 1 := by grind (ringSteps := 100) ``` ```leanOutput ring100 `grind` failed case grind α : Type u_1 inst : CommRing α inst_1 : IsCharP α 0 d t c d_inv PSO3_inv : α h : d ^ 2 * (d + t - d * t - 2) * (d + t + d * t) = 0 h_1 : -d ^ 4 * (d + t - d * t - 2) * (2 * d + 2 * d * t - 4 * d * t ^ 2 + 2 * d * t ^ 4 + 2 * d ^ 2 * t ^ 4 - c * (d + t + d * t)) = 0 h_2 : d * d_inv = 1 h_3 : (d + t - d * t - 2) * PSO3_inv = 1 h_4 : ¬t ^ 2 = t + 1 ⊢ False [grind] Goal diagnostics [facts] Asserted facts [eqc] True propositions [eqc] False propositions [eqc] Equivalence classes [ematch] E-matching patterns [ring] Ring `α` [limits] Thresholds reached ``` ::: The `ring` solver propagates equalities back to the `grind` core by normalizing terms using the computed Gröbner basis. In the following example, the equations `x ^ 2 * y = 1` and `x * y ^ 2 = y` imply the equalities `x = 1` and `y = 1`. Thus, the terms `x * y` and `1` are equal, and consequently `some (x * y) = some 1` by congruence. ```lean example (x y : Int) : x ^ 2 * y = 1 → x * y ^ 2 = y → some (y * x) = some 1 := by grind ``` :::comment Planned future features: support for noncommutative rings and semirings. :::
reference-manual/Manual/Grind/ExtendedExamples.lean
import VersoManual import Lean.Parser.Term import Manual.Meta import Manual.Grind.ExtendedExamples.Integration import Manual.Grind.ExtendedExamples.IfElseNorm open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode open Lean.Grind #doc (Manual) "Bigger Examples" => %%% tag := "grind-bigger-examples" %%% :::TODO Properly link to tutorial section ::: {include 1 Manual.Grind.ExtendedExamples.Integration} {include 1 Manual.Grind.ExtendedExamples.IfElseNorm}
reference-manual/Manual/Grind/Linarith.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode -- Due to Lean.Grind.Semiring.nsmul_eq_natCast_mul set_option verso.docstring.allowMissing true open Lean.Grind #doc (Manual) "Linear Arithmetic Solver" => %%% tag := "grind-linarith" %%% The {tactic}`grind` tactic includes a linear arithmetic solver for arbitrary types, called `linarith`, that is used for types not supported by {ref "cutsat"}`cutsat`. Like the {ref "grind-ring"}`ring` solver, it can be used with any type that has instances of certain type classes. It self-configures depending on the availability of these type classes, so it is not necessary to provide all of them to use the solver; however, its capabilities are increased by the availability of more instances. This solver is useful for reasoning about the real numbers, ordered vector spaces, and other types that can't be embedded into {name}`Int`. The core functionality of `linarith` is a model-based solver for linear inequalities with integer coefficients. It can be disabled using the option `grind -linarith`. :::example "Goals Decided by `linarith`" (open := true) ```imports -show import Std ``` ```lean -show open Lean.Grind ``` All of these examples rely on instances of the following ordering notation and `linarith` classes: ```lean variable [LE α] [LT α] [Std.LawfulOrderLT α] [Std.IsLinearOrder α] variable [IntModule α] [OrderedAdd α] ``` Integer modules ({name}`IntModule`) are types with zero, addition, negation, subtraction, and scalar multiplication by integers that satisfy the expected properties of these operations. Linear orders ({name}`Std.IsLinearOrder`) are orders in which any pair of elements is ordered, and {name}`OrderedAdd` states that adding a constant to both sides preserves orderings. ```lean example {a b : α} : 2 • a + b ≥ b + a + a := by grind example {a b : α} (h : a ≤ b) : 3 • a + b ≤ 4 • b := by grind example {a b c : α} : a = b + c → 2 • b ≤ c → 2 • a ≤ 3 • c := by grind example {a b c d e : α} : 2 • a + b ≥ 0 → b ≥ 0 → c ≥ 0 → d ≥ 0 → e ≥ 0 → a ≥ 3 • c → c ≥ 6 • e → d - 5 • e ≥ 0 → a + b + 3 • c + d + 2 • e < 0 → False := by grind ``` ::: :::example "Commutative Ring Goals Decided by `linarith`" (open := true) ```imports -show import Std ``` ```lean -show open Lean.Grind ``` For types that are commmutative rings (that is, types in which the multiplication operator is commutative) with {name}`CommRing` instances, `linarith` has more capabilities. ```lean variable [LE R] [LT R] [Std.IsLinearOrder R] [Std.LawfulOrderLT R] variable [CommRing R] [OrderedRing R] ``` The {inst}`CommRing R` instance allows `linarith` to perform basic normalization, such as identifying linear atoms `a * b` and `b * a`, and to account for scalar multiplication on both sides. The {inst}`OrderedRing R` instance allows the solver to support constants, because it has access to the fact that {lean}`(0 : R) < 1`. ```lean example (a b : R) (h : a * b ≤ 1) : b * 3 • a + 1 ≤ 4 := by grind example (a b c d e f : R) : 2 • a + b ≥ 1 → b ≥ 0 → c ≥ 0 → d ≥ 0 → e • f ≥ 0 → a ≥ 3 • c → c ≥ 6 • e • f → d - f * e * 5 ≥ 0 → a + b + 3 • c + d + 2 • e • f < 0 → False := by grind ``` ::: :::TODO Planned future features * Support for `NatModule` (by embedding in the Grothendieck envelope, as we already do for semirings), * Better communication between the `ring` and `linarith` solvers. There is currently very little communication between these two solvers. * Non-linear arithmetic over ordered rings. ::: # Supporting `linarith` %%% tag := "grind-linarith-classes" %%% To add support for a new type to `linarith`, the first step is to implement {name}`IntModule` if possible, or {name}`NatModule` otherwise. Every {name}`Ring` is already an {name}`IntModule`, and every {name}`Semiring` is already a {name}`NatModule`, so implementing one of those instances is also sufficient. Next, one of the order classes ({name}`Std.IsPreorder`, {name}`Std.IsPartialOrder`, or {name}`Std.IsLinearOrder`) should be implemented. Typically an {name Std.IsPreorder}`IsPreorder` instance is enough when the context already includes a contradiction, but an {name Std.IsLinearOrder}`IsLinearOrder` instance is required in order to prove linear inequality goals. Additional features are enabled by implementing {name}`OrderedAdd`, which expresses that the additive structure in a module is compatible with the order, and {name}`OrderedRing`, which improves support for constants. {docstring Lean.Grind.NatModule} {docstring Lean.Grind.IntModule} {docstring Lean.Grind.OrderedAdd} {docstring Lean.Grind.OrderedRing}
reference-manual/Manual/Grind/ExtendedExamples/IfElseNorm.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode open Lean.Grind #doc (Manual) "`if`-`then`-`else` Normalization" => %%% tag := "grind-if-then-else-norm" %%% ```lean -show open Std ``` This example is a showcase for the “out of the box” power of {tactic}`grind`. Later examples will explore adding {attrs}`@[grind]` annotations as part of the development process, to make {tactic}`grind` more effective in a new domain. This example does not rely on any of the algebra extensions to {tactic}`grind`, we're just using: * instantiation of annotated theorems from the library, * {tech}[congruence closure], and * case splitting. The solution here builds on an earlier formalization by Chris Hughes, but with some notable improvements: * the verification is separate from the code, * the proof is now a one-liner combining {tactic}`fun_induction` and {tactic}`grind`, * the proof is robust to changes in the code (e.g. swapping out {name}`HashMap` for {name}`TreeMap`) as well as changes to the precise verification conditions. # The problem Here is Rustan Leino's original description of the problem, as [posted by Leonardo de Moura](https://leanprover.zulipchat.com/#narrow/stream/113488-general/topic/Rustan's.20challenge) on the Lean Zulip: > The data structure is an expression with Boolean literals, variables, and if-then-else expressions. The goal is to normalize such expressions into a form where: a) No nested ifs: the condition part of an if-expression is not itself an if-expression b) No constant tests: the condition part of an if-expression is not a constant c) No redundant ifs: the then and else branches of an if are not the same d) Each variable is evaluated at most once: the free variables of the condition are disjoint from those in the then branch, and also disjoint from those in the else branch. One should show that a normalization function produces an expression satisfying these four conditions, and one should also prove that the normalization function preserves the meaning of the given expression. # The Formal Statement :::leanFirst To formalize the statement in Lean, we use an inductive type {name}`IfExpr`: ```lean /-- An if-expression is either boolean literal, a numbered variable, or an if-then-else expression where each subexpression is an if-expression. -/ inductive IfExpr | lit : Bool → IfExpr | var : Nat → IfExpr | ite : IfExpr → IfExpr → IfExpr → IfExpr deriving DecidableEq ``` ::: :::leanFirst and define some inductive predicates and an {name IfExpr.eval}`eval` function, so we can state the four desired properties: ```lean namespace IfExpr /-- An if-expression has a "nested if" if it contains an if-then-else where the "if" is itself an if-then-else. -/ def hasNestedIf : IfExpr → Bool | lit _ => false | var _ => false | ite (ite _ _ _) _ _ => true | ite _ t e => t.hasNestedIf || e.hasNestedIf /-- An if-expression has a "constant if" if it contains an if-then-else where the "if" is itself a literal. -/ def hasConstantIf : IfExpr → Bool | lit _ => false | var _ => false | ite (lit _) _ _ => true | ite i t e => i.hasConstantIf || t.hasConstantIf || e.hasConstantIf /-- An if-expression has a "redundant if" if it contains an if-then-else where the "then" and "else" clauses are identical. -/ def hasRedundantIf : IfExpr → Bool | lit _ => false | var _ => false | ite i t e => t == e || i.hasRedundantIf || t.hasRedundantIf || e.hasRedundantIf /-- All the variables appearing in an if-expressions, read left to right, without removing duplicates. -/ def vars : IfExpr → List Nat | lit _ => [] | var i => [i] | ite i t e => i.vars ++ t.vars ++ e.vars /-- A helper function to specify that two lists are disjoint. -/ def _root_.List.disjoint {α} [DecidableEq α] : List α → List α → Bool | [], _ => true | x::xs, ys => x ∉ ys && xs.disjoint ys /-- An if expression evaluates each variable at most once if for each if-then-else the variables in the "if" clause are disjoint from the variables in the "then" clause and the variables in the "if" clause are disjoint from the variables in the "else" clause. -/ def disjoint : IfExpr → Bool | lit _ => true | var _ => true | ite i t e => i.vars.disjoint t.vars && i.vars.disjoint e.vars && i.disjoint && t.disjoint && e.disjoint /-- An if expression is "normalized" if it has no nested, constant, or redundant ifs, and it evaluates each variable at most once. -/ def normalized (e : IfExpr) : Bool := !e.hasNestedIf && !e.hasConstantIf && !e.hasRedundantIf && e.disjoint /-- The evaluation of an if expression at some assignment of variables. -/ def eval (f : Nat → Bool) : IfExpr → Bool | lit b => b | var i => f i | ite i t e => bif i.eval f then t.eval f else e.eval f end IfExpr ``` ::: Using these we can state the problem. The challenge is to inhabit the following type (and to do so nicely!): ```lean def IfNormalization : Type := { Z : IfExpr → IfExpr // ∀ e, (Z e).normalized ∧ (Z e).eval = e.eval } ``` # Other solutions At this point, it's worth pausing and doing at least one of the following: :::comment TODO (@david-christiansen): We include a link here to live-lean and an externally hosted blob of code. There's no way to keep this in sync. :-( ::: * Try to prove this yourself! It's quite challenging for a beginner! You can [have a go](https://live.lean-lang.org/#project=lean-nightly&url=https%3A%2F%2Fgist.githubusercontent.com%2Fkim-em%2Ff416b31fe29de8a3f1b2b3a84e0f1793%2Fraw%2F75ca61230b50c126f8658bacd933ecf7bfcaa4b8%2Fgrind_ite.lean) in the Live Lean editor without any installation. * Read Chris Hughes's [solution](https://github.com/leanprover-community/mathlib4/blob/master/Archive/Examples/IfNormalization/Result.lean), which is included in the Mathlib Archive. This solution makes good use of Aesop, but is not ideal because 1. It defines the solution using a subtype, simultaneously giving the construction and proving properties about it. We think it's better stylistically to keep these separate. 2. Even with Aesop automation, there's still about 15 lines of manual proof work before we can hand off to Aesop. * Read Wojciech Nawrocki's [solution](https://leanprover.zulipchat.com/#narrow/channel/113488-general/topic/Rustan's.20challenge/near/398824748). This one uses less automation, at about 300 lines of proof work. # The solution using {tactic}`grind` Actually solving the problem is not that hard: we just need a recursive function that carries along a record of “already assigned variables”, and then, whenever performing a branch on a variable, adding a new assignment in each of the branches. It also needs to flatten nested if-then-else expressions which have another if-then-else in the “condition” position. (This is extracted from Chris Hughes's solution, but without the subtyping.) Let's work inside the `IfExpr` namespace. ```lean namespace IfExpr ``` :::keepEnv ```lean +error (name := failed_to_show_termination) def normalize (assign : Std.HashMap Nat Bool) : IfExpr → IfExpr | lit b => lit b | var v => match assign[v]? with | none => var v | some b => lit b | ite (lit true) t _ => normalize assign t | ite (lit false) _ e => normalize assign e | ite (ite a b c) t e => normalize assign (ite a (ite b t e) (ite c t e)) | ite (var v) t e => match assign[v]? with | none => let t' := normalize (assign.insert v true) t let e' := normalize (assign.insert v false) e if t' = e' then t' else ite (var v) t' e' | some b => normalize assign (ite (lit b) t e) ``` This is pretty straightforward, but it immediately runs into a problem: ```leanOutput failed_to_show_termination (stopAt := "Could not find a decreasing measure.") fail to show termination for IfExpr.normalize with errors failed to infer structural recursion: Cannot use parameter assign: the type HashMap Nat Bool does not have a `.brecOn` recursor Cannot use parameter #2: failed to eliminate recursive application normalize assign (a.ite (b.ite t e) (c.ite t e)) Could not find a decreasing measure. ``` Lean here is telling us that it can't see that the function is terminating. Often Lean is pretty good at working this out for itself, but for sufficiently complicated functions we need to step in to give it a hint. In this case we can see that it's the recursive call `ite (ite a b c) t e` which is calling {lean}`normalize` on `(ite a (ite b t e) (ite c t e))` where Lean is having difficulty. Lean has made a guess at a plausible termination measure, based on using automatically generated {name}`sizeOf` function, but can't prove the resulting goal, essentially because `t` and `e` appear multiple times in the recursive call. ::: To address problems like this, we nearly always want to stop using the automatically generated `sizeOf` function, and construct our own termination measure. We'll use ```lean @[simp] def normSize : IfExpr → Nat | lit _ => 0 | var _ => 1 | .ite i t e => 2 * normSize i + max (normSize t) (normSize e) + 1 ``` Many different functions would work here. The basic idea is to increase the “weight” of the “condition” branch (this is the multiplicative factor in the `2 * normSize i` ), so that as long the “condition” part shrinks a bit, the whole expression counts as shrinking even if the “then” and “else” branches have grown. We've annotated the definition with {attrs}`@[simp]` so Lean's automated termination checker is allowed to unfold the definition. With this in place, the definition goes through using the {keywordOf Lean.Parser.Command.declaration}`termination_by` clause: :::keepEnv ```lean def normalize (assign : Std.HashMap Nat Bool) : IfExpr → IfExpr | lit b => lit b | var v => match assign[v]? with | none => var v | some b => lit b | ite (lit true) t _ => normalize assign t | ite (lit false) _ e => normalize assign e | ite (ite a b c) t e => normalize assign (ite a (ite b t e) (ite c t e)) | ite (var v) t e => match assign[v]? with | none => let t' := normalize (assign.insert v true) t let e' := normalize (assign.insert v false) e if t' = e' then t' else ite (var v) t' e' | some b => normalize assign (ite (lit b) t e) termination_by e => e.normSize ``` Now it's time to prove some properties of this function. We're just going to package together all the properties we want: ```lean -keep theorem normalize_spec (assign : Std.HashMap Nat Bool) (e : IfExpr) : (normalize assign e).normalized ∧ (∀ f, (normalize assign e).eval f = e.eval fun w => assign[w]?.getD (f w)) ∧ ∀ (v : Nat), v ∈ vars (normalize assign e) → ¬ v ∈ assign := sorry ``` That is: * the result of {lean}`normalize` is actually normalized according to the initial definitions, * if we normalize an “if-then-else” expression using some assignments, and then evaluate the remaining variables, we get the same result as evaluating the original “if-then-else” using the composite of the two assignments, * and any variable appearing in the assignments no longer appears in the normalized expression. You might think that we should state these three properties as separate lemmas, but it turns out that proving them all at once is really convenient, because we can use the {tactic}`fun_induction` tactic to assume that all these properties hold for {lean}`normalize` in the recursive calls, and then {tactic}`grind` will just put all the facts together for the result: ```lean -- We tell `grind` to unfold our definitions above. attribute [local grind] normalized hasNestedIf hasConstantIf hasRedundantIf disjoint vars eval List.disjoint theorem normalize_spec (assign : Std.HashMap Nat Bool) (e : IfExpr) : (normalize assign e).normalized ∧ (∀ f, (normalize assign e).eval f = e.eval fun w => assign[w]?.getD (f w)) ∧ ∀ (v : Nat), v ∈ vars (normalize assign e) → ¬ v ∈ assign := by fun_induction normalize with grind ``` The fact that the {tactic}`fun_induction` plus {tactic}`grind` combination just works here is sort of astonishing. We're really excited about this, and we're hoping to see a lot more proofs in this style! A lovely consequence of highly automated proofs is that often you have some flexibility to change the statements, without changing the proof at all! As examples, the particular way that we asserted above that “any variable appearing in the assignments no longer appears in the normalized expression” could be stated in many different ways (although not omitted!). The variations really don't matter, and {tactic}`grind` can both prove, and use, any of them: Here we use `assign.contains v = false`: ```lean example (assign : Std.HashMap Nat Bool) (e : IfExpr) : (normalize assign e).normalized ∧ (∀ f, (normalize assign e).eval f = e.eval fun w => assign[w]?.getD (f w)) ∧ ∀ (v : Nat), v ∈ vars (normalize assign e) → assign.contains v = false := by fun_induction normalize with grind ``` and here we use `assign[v]? = none`: ```lean example (assign : Std.HashMap Nat Bool) (e : IfExpr) : (normalize assign e).normalized ∧ (∀ f, (normalize assign e).eval f = e.eval fun w => assign[w]?.getD (f w)) ∧ ∀ (v : Nat), v ∈ vars (normalize assign e) → assign[v]? = none := by fun_induction normalize with grind ``` In fact, it's also of no consequence to `grind` whether we use a {name}`HashMap` or a {name}`TreeMap` to store the assignments, we can simply switch that implementation detail out, without having to touch the proofs: ::: ```lean -show -- We have to repeat these annotations because we've rolled back the environment to before we defined `normalize`. attribute [local grind] normalized hasNestedIf hasConstantIf hasRedundantIf disjoint vars eval List.disjoint ``` ```lean def normalize (assign : Std.TreeMap Nat Bool) : IfExpr → IfExpr | lit b => lit b | var v => match assign[v]? with | none => var v | some b => lit b | ite (lit true) t _ => normalize assign t | ite (lit false) _ e => normalize assign e | ite (ite a b c) t e => normalize assign (ite a (ite b t e) (ite c t e)) | ite (var v) t e => match assign[v]? with | none => let t' := normalize (assign.insert v true) t let e' := normalize (assign.insert v false) e if t' = e' then t' else ite (var v) t' e' | some b => normalize assign (ite (lit b) t e) termination_by e => e.normSize theorem normalize_spec (assign : Std.TreeMap Nat Bool) (e : IfExpr) : (normalize assign e).normalized ∧ (∀ f, (normalize assign e).eval f = e.eval fun w => assign[w]?.getD (f w)) ∧ ∀ (v : Nat), v ∈ vars (normalize assign e) → ¬ v ∈ assign := by fun_induction normalize with grind ``` (The fact that we can do this relies on the fact that all the lemmas for both {name}`HashMap` and for {name}`TreeMap` that {tactic}`grind` needs have already be annotated in the standard library.) If you'd like to play around with this code, you can find the whole file [here](https://github.com/leanprover/lean4/blob/master/tests/lean/run/grind_ite.lean), or in fact [play with it with no installation](https://live.lean-lang.org/#project=lean-nightly&url=https%3A%2F%2Fraw.githubusercontent.com%2Fleanprover%2Flean4%2Frefs%2Fheads%2Fmaster%2Ftests%2Flean%2Frun%2Fgrind_ite.lean) in the Live Lean editor. ```lean -show end IfExpr ```
reference-manual/Manual/Grind/ExtendedExamples/Integration.lean
import VersoManual import Lean.Parser.Term import Manual.Meta open Verso.Genre Manual open Verso.Genre.Manual.InlineLean open Verso.Doc.Elab (CodeBlockExpander) open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode open Lean.Grind #doc (Manual) "Integrating `grind`'s Features" => :::paragraph This example demonstrates how the various submodules of {tactic}`grind` are seamlessly integrated. In particular we can: * instantiate theorems from the library, using custom patterns, * perform case splitting, * do linear integer arithmetic reasoning, including modularity conditions, and * do Gröbner basis reasoning all without providing explicit instructions to drive the interactions between these modes of reasoning. ::: For this example we'll begin with a “mocked up” version of the real numbers, and the `sin` and `cos` functions. Of course, this example works [without any changes](https://github.com/leanprover-community/mathlib4/blob/master/MathlibTest/grind/trig.lean) using Mathlib's versions of these! :::TODO A `sorry` for `instCommRingR` causes a run-time crash. It's unclear why. ::: ```lean axiom R : Type @[instance] axiom instCommRingR : Lean.Grind.CommRing R axiom sin : R → R axiom cos : R → R axiom trig_identity : ∀ x, (cos x)^2 + (sin x)^2 = 1 ``` :::paragraph Our first step is to tell grind to “put the trig identity on the whiteboard” whenever it sees a goal involving {name}`sin` or {name}`cos`: ```lean grind_pattern trig_identity => cos x grind_pattern trig_identity => sin x ``` Note here we use *two* different patterns for the same theorem, so the theorem is instantiated even if {tactic}`grind` sees just one of these functions. If we preferred to more conservatively instantiate the theorem only when both {name}`sin` and {name}`cos` are present, we could have used a multi-pattern: ```lean -keep grind_pattern trig_identity => cos x, sin x ``` For this example, either approach will work. ::: ::::leanSection ```lean -show variable {x : R} ``` :::paragraph Because `grind` immediately notices the trig identity, we can prove goals like this: ```lean example : (cos x + sin x)^2 = 2 * cos x * sin x + 1 := by grind ``` Here {tactic}`grind` does the following: 1. It notices {lean}`cos x` and {lean}`sin x`, so instantiates the trig identity. 2. It notices that this is a polynomial in {inst}`CommRing R`, and sends it to the Gröbner basis module. No calculation happens at this point: it's the first polynomial relation in this ring, so the Gröbner basis is updated to {lean}`[(cos x)^2 + (sin x)^2 - 1]`. 3. It notices that the left and right hand sides of the goal are polynomials in {inst}`CommRing R`, and sends them to the Gröbner basis module for normalization. Since their normal forms modulo {lean}`(cos x)^2 + (sin x)^2 = 1` are equal, their equivalence classes are merged, and the goal is solved. ::: :::paragraph We can also do this sort of argument when {tech}[congruence closure] is needed: ```lean example (f : R → Nat) : f ((cos x + sin x)^2) = f (2 * cos x * sin x + 1) := by grind ``` ```lean -show variable (f : R → Nat) (n : Nat) ``` As before, {tactic}`grind` instantiates the trig identity, notices that {lean}`(cos x + sin x)^2` and {lean}`2 * cos x * sin x + 1` are equal modulo {lean}`(cos x)^2 + (sin x)^2 = 1`, puts those algebraic expressions in the same equivalence class, and then puts the function applications {lean}`f ((cos x + sin x)^2)` and {lean}`f (2 * cos x * sin x + 1)` in the same equivalence class, and closes the goal. ::: Notice that we've used an arbitrary function {typed}`f : R → Nat` here; let's check that `grind` can use some linear integer arithmetic reasoning after the Gröbner basis steps: ```lean example (f : R → Nat) : 4 * f ((cos x + sin x)^2) ≠ 2 + f (2 * cos x * sin x + 1) := by grind ``` Here {tactic}`grind` first works out that this goal reduces to {lean}`4 * n ≠ 2 + n` for some {typed}`n : Nat` (i.e. by identifying the two function applications as described above), and then uses modularity to derive a contradiction. Finally, we can also mix in some case splitting: ```lean example (f : R → Nat) : max 3 (4 * f ((cos x + sin x)^2)) ≠ 2 + f (2 * cos x * sin x + 1) := by grind ``` As before, {tactic}`grind` first does the instantiation and Gröbner basis calculations required to identify the two function applications. However the `cutsat` algorithm by itself can't do anything with {lean}`max 3 (4 * n) ≠ 2 + n`. Next, after instantiating {lean}`Nat.max_def` (automatically, because of an annotation in the standard library) which states {lean}`∀ {n m : Nat}, max n m = if n ≤ m then m else n`, {tactic}`grind` can then case split on the inequality. In the branch {lean}`3 ≤ 4 * n`, `cutsat` again uses modularity to prove `4 * n ≠ 2 + n`. In the branch {lean}`4 * n < 3`, `cutsat` quickly determines {lean}`n = 0`, and then notices that {lean}`4 * 0 ≠ 2 + 0`. This has been, of course, a quite artificial example! In practice, this sort of automatic integration of different reasoning modes is very powerful: the central “whiteboard” which tracks instantiated theorems and equivalence classes can hand off relevant terms and equalities to the appropriate modules (here, `cutsat` and Gröbner bases), which can then return new facts to the whiteboard. ::::
reference-manual/static/README.txt
The directory `katex` contains KaTeX v0.16.11 (MIT license)
reference-manual/static/fonts/source-serif/LICENSE.md
Copyright 2014 - 2023 Adobe (http://www.adobe.com/), with Reserved Font Name ‘Source’. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
reference-manual/static/fonts/noto-sans-mono/OFL.txt
Copyright 2022 The Noto Project Authors (https://github.com/notofonts/latin-greek-cyrillic) This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: https://openfontlicense.org ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
reference-manual/static/fonts/source-code-pro/LICENSE.md
© 2023 Adobe (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
reference-manual/static/fonts/source-sans/LICENSE.md
Copyright 2010-2024 Adobe (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
reference-manual/figures/array.tex
\documentclass{standalone} \usepackage{fontspec} \usepackage{sourcecodepro} \usepackage{tikz} \setmainfont{TeX Gyre Heros} \setmonofont{Source Code Pro} \usetikzlibrary{matrix, positioning, decorations.pathreplacing, calligraphy} \tikzset{ layout/.style={ matrix of nodes, thick, row sep=-\pgflinewidth, %column sep=-\pgflinewidth, column sep=2pt, nodes={rectangle, draw=black, align=center, font=\ttfamily}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, }, descr/.style={ matrix of nodes, row sep=-\pgflinewidth, column sep=-\pgflinewidth, nodes={rectangle, align=right, draw=black}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, column 1/.style={anchor=base east}, }, header/.style={ text width=10em }, sizet/.style={ text width=6.5em }, arr/.style={ text width=10em }, legend/.style={ decorate, decoration={calligraphic brace, amplitude=10pt, mirror}, line width=0.5pt, % node options align=center, midway, below } } \begin{document} \begin{tikzpicture} \matrix (array) [layout] { \node[header](array-1-1){m\_header}; & \node[sizet](array-1-2){m\_size}; & \node[sizet](array-1-3){m\_capacity}; & \node[arr](array-1-4){m\_data}; \\ }; \draw[legend] ([yshift=-2pt] array-1-1.south west) -- ([yshift=-2pt]array-1-1.south east) node [midway, below, yshift=-1em] {Lean object header}; \draw[legend] ([yshift=-2pt]array-1-2.south west) -- ([yshift=-2pt]array-1-2.south east) node [align=center,midway, below, yshift=-1em] {Size\\\texttt{size\_t}}; \draw[legend] ([yshift=-2pt]array-1-3.south west) -- ([yshift=-2pt]array-1-3.south east) node [midway, below, yshift=-1em] {Allocated space\\\texttt{size\_t}}; \draw[legend] ([yshift=-2pt]array-1-4.south west) -- ([yshift=-2pt]array-1-4.south east) node [midway, below, yshift=-1em] {Array data\\\texttt{lean\_object *} array}; \end{tikzpicture} \end{document} % Local Variables: % TeX-engine: luatex % End:
reference-manual/figures/thunk.tex
\documentclass{standalone} \usepackage{fontspec} \usepackage{sourcecodepro} \usepackage{tikz} \setmainfont{TeX Gyre Heros} \setmonofont{Source Code Pro} \usetikzlibrary{matrix, positioning, decorations.pathreplacing, calligraphy} \tikzset{ layout/.style={ matrix of nodes, thick, row sep=-\pgflinewidth, %column sep=-\pgflinewidth, column sep=2pt, nodes={rectangle, draw=black, align=center, font=\ttfamily}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, }, descr/.style={ matrix of nodes, row sep=-\pgflinewidth, column sep=-\pgflinewidth, nodes={rectangle, align=right, draw=black}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, column 1/.style={anchor=base east}, }, header/.style={ text width=10em }, sizet/.style={ text width=10em }, arr/.style={ text width=10em }, legend/.style={ decorate, decoration={calligraphic brace, amplitude=10pt, mirror}, line width=0.5pt, % node options align=center, midway, below } } \begin{document} \begin{tikzpicture} \matrix (string) [layout] { \node[header](string-1-1){m\_header}; & \node[sizet](string-1-2){m\_value}; & \node[sizet](string-1-3){m\_closure}; \\ }; \draw[legend] ([yshift=-2pt] string-1-1.south west) -- ([yshift=-2pt]string-1-1.south east) node [midway, below, yshift=-1em] {Lean object header}; \draw[legend] ([yshift=-2pt]string-1-2.south west) -- ([yshift=-2pt]string-1-2.south east) node [align=center,midway, below, yshift=-1em] {Saved value\\\texttt{lean\_object *}}; \draw[legend] ([yshift=-2pt]string-1-3.south west) -- ([yshift=-2pt]string-1-3.south east) node [midway, below, yshift=-1em] {Closure\\\texttt{lean\_object *}}; \end{tikzpicture} \end{document} % Local Variables: % TeX-engine: luatex % End:
reference-manual/figures/coe-chain.tex
\documentclass{standalone} \usepackage{fontspec} \usepackage{sourcecodepro} \usepackage{tikz} \usepackage{unicode-math} \setmainfont{TeX Gyre Heros} \setmonofont{Source Code Pro} \setmathfont{TeX Gyre Schola Math} \usetikzlibrary{matrix, positioning, decorations.pathreplacing, calligraphy, calc} \tikzset{ layout/.style={ matrix of nodes, row sep=-\pgflinewidth, %column sep=-\pgflinewidth, column sep=2pt, nodes={rectangle, align=center, font=\ttfamily, text depth=0.25ex, text height=1em}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, }, cls/.style={rectangle, align=center, font=\ttfamily, text depth=0.25ex, text height=1em}, arr/.style={ text width=10em }, slegend/.style={ decorate, decoration={calligraphic brace, amplitude=3pt, mirror}, line width=0.5pt, % node options align=center, midway, below, font=\ttfamily }, legend/.style={ decorate, decoration={calligraphic brace, amplitude=5pt, mirror}, line width=0.5pt, % node options align=center, midway, below, font=\ttfamily, text depth=0.25ex, text height=1em } } \begin{document} \begin{tikzpicture} \matrix (string) [layout] { % \node[](string-1-1){CoeHead$?$}; & \node[](string-1-2){CoeOut$^*$}; & \node[](string-1-3){Coe$^*$}; & \node[](string-1-4){CoeTail$?$};&\\% }; \draw[slegend] ([yshift=-2pt]string-1-3.south west) -- ([yshift=-2pt]string-1-3.south east) node (coetc) [midway, below, yshift=-0.3em] {CoeTC}; \draw[legend] ([yshift=-2.5em]string-1-2.south west) -- ([yshift=-2.5em]string-1-3.south east) node (coeotc) [midway, below, yshift=-0.5em] {CoeOTC}; \draw[legend] ([yshift=-5em]string-1-1.south west) -- ([yshift=-5em]string-1-3.south east) node (coehtc) [midway, below, yshift=-0.5em] {CoeHTC}; \draw[legend] ([yshift=-7.5em]string-1-1.south west) -- ([yshift=-7.5em]string-1-4.south east) node (coehtct) [midway, below, yshift=-0.5em] {CoeHTCT}; \node[cls, right=9em of coehtct](coedep){CoeDep}; \node(or) at ([yshift=-1.5em] $(coehtct)!0.5!(coedep)$){\textit{or}}; \draw (coehtct.south) |- (or.west); \draw (coedep.south) |- (or.east); \node[cls, below=1em of or](coet){CoeT}; \draw (or.south) -- (coet.north); \end{tikzpicture} \end{document} % Local Variables: % TeX-engine: luatex % End:
reference-manual/figures/string.tex
\documentclass{standalone} \usepackage{fontspec} \usepackage{sourcecodepro} \usepackage{tikz} \setmainfont{TeX Gyre Heros} \setmonofont{Source Code Pro} \usetikzlibrary{matrix, positioning, decorations.pathreplacing, calligraphy} \tikzset{ layout/.style={ matrix of nodes, thick, row sep=-\pgflinewidth, %column sep=-\pgflinewidth, column sep=2pt, nodes={rectangle, draw=black, align=center, font=\ttfamily}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, }, descr/.style={ matrix of nodes, row sep=-\pgflinewidth, column sep=-\pgflinewidth, nodes={rectangle, align=right, draw=black}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, column 1/.style={anchor=base east}, }, header/.style={ text width=10em }, sizet/.style={ text width=6.5em }, arr/.style={ text width=10em }, legend/.style={ decorate, decoration={calligraphic brace, amplitude=10pt, mirror}, line width=0.5pt, % node options align=center, midway, below } } \begin{document} \begin{tikzpicture} \matrix (string) [layout] { \node[header](string-1-1){m\_header}; & \node[sizet](string-1-2){m\_size}; & \node[sizet](string-1-3){m\_capacity}; & \node[sizet](string-1-4){m\_length}; & \node[arr](string-1-5){m\_data}; & '\textbackslash{}0'\\ }; \draw[legend] ([yshift=-2pt] string-1-1.south west) -- ([yshift=-2pt]string-1-1.south east) node [midway, below, yshift=-1em] {Lean object header}; \draw[legend] ([yshift=-2pt]string-1-2.south west) -- ([yshift=-2pt]string-1-2.south east) node [align=center,midway, below, yshift=-1em] {Byte count\\\texttt{size\_t}}; \draw[legend] ([yshift=-2pt]string-1-3.south west) -- ([yshift=-2pt]string-1-3.south east) node [midway, below, yshift=-1em] {Allocated space\\\texttt{size\_t}}; \draw[legend] ([yshift=-2pt]string-1-4.south west) -- ([yshift=-2pt]string-1-4.south east) node [midway, below, yshift=-1em] {Characters\\\texttt{size\_t}}; \draw[legend] ([yshift=-2pt]string-1-5.south west) -- ([yshift=-2pt]string-1-5.south east) node [midway, below, yshift=-1em] {String data\\\texttt{char} array}; \end{tikzpicture} \end{document} % Local Variables: % TeX-engine: luatex % End:
reference-manual/figures/pipeline-overview.tex
\documentclass{standalone} \usepackage{fontspec} \usepackage{fontawesome} \usepackage[svgnames]{xcolor} \usepackage{sourcecodepro} \setmainfont{TeX Gyre Heros} \setmonofont{Source Code Pro} \usepackage{tikz} \usetikzlibrary{matrix, positioning, decorations.pathreplacing, calligraphy, shapes, arrows.meta} \makeatletter \pgfdeclareshape{document}{ \inheritsavedanchors[from=rectangle] % this is nearly a rectangle \inheritanchorborder[from=rectangle] \inheritanchor[from=rectangle]{center} \inheritanchor[from=rectangle]{north} \inheritanchor[from=rectangle]{south} \inheritanchor[from=rectangle]{west} \inheritanchor[from=rectangle]{east} % ... and possibly more \backgroundpath{% this is new % store lower right in xa/ya and upper right in xb/yb \southwest \pgf@xa=\pgf@x \pgf@ya=\pgf@y \northeast \pgf@xb=\pgf@x \pgf@yb=\pgf@y % compute corner of ‘‘flipped page’’ \pgf@xc=\pgf@xb \advance\pgf@xc by-18pt % this should be a parameter \pgf@yc=\pgf@yb \advance\pgf@yc by-18pt % construct main path \pgfpathmoveto{\pgfpoint{\pgf@xa}{\pgf@ya}} \pgfpathlineto{\pgfpoint{\pgf@xa}{\pgf@yb}} \pgfpathlineto{\pgfpoint{\pgf@xc}{\pgf@yb}} \pgfpathlineto{\pgfpoint{\pgf@xb}{\pgf@yc}} \pgfpathlineto{\pgfpoint{\pgf@xb}{\pgf@ya}} \pgfpathclose % add little corner \pgfpathmoveto{\pgfpoint{\pgf@xc}{\pgf@yb}} \pgfpathlineto{\pgfpoint{\pgf@xc}{\pgf@yc}} \pgfpathlineto{\pgfpoint{\pgf@xb}{\pgf@yc}} \pgfpathlineto{\pgfpoint{\pgf@xc}{\pgf@yc}} } } \makeatother \tikzset{ >={Stealth[width=3mm, length=3mm]}, layout/.style={ matrix of nodes, thick, row sep=-\pgflinewidth, %column sep=-\pgflinewidth, column sep=2pt, nodes={rectangle, draw=black, align=center, font=\ttfamily}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, }, descr/.style={ matrix of nodes, row sep=-\pgflinewidth, column sep=-\pgflinewidth, nodes={rectangle, align=right, draw=black}, minimum height=1.5em, text depth=0.5ex, text height=2ex, nodes in empty cells, column 1/.style={anchor=base east}, }, header/.style={ text width=10em }, sizet/.style={ text width=6.5em }, arr/.style={ text width=10em }, legend/.style={ decorate, decoration={calligraphic brace, amplitude=10pt, mirror}, line width=0.5pt, % node options align=center, midway, below }, adjust height/.style={minimum height=#1*\pgfkeysvalueof{/pgf/minimum width}}, doc/.style={ draw, thick, align=center, color=black, shape=document, inner sep=2ex, adjust height=1.4 }, obj/.style={ draw, thick, align=center, color=black, shape=rectangle, inner sep=2ex, text width=6em }, pass/.style={ thick, arrows={->} }, } \usepackage[sfdefault]{inter} \begin{document} \begin{tikzpicture}[node distance=2cm] \node(code) [doc, text width=5em, minimum width=6em]{Code.lean}; \node(ast) [obj, draw, below=of code, text width=5em]{Syntax Tree}; \node(core) [obj, draw, below=of ast]{Core Type Theory}; \node(core2) [obj, draw, right=3cm of core, text width=6.8em]{Core Type Theory\\(no recursion)}; \node(ok) [below=of core2]{\huge {\color{Green}\faCheck{}}/\faTimes{}}; \node(exe) [obj, draw, below=of core]{Executable}; \draw[pass] (code) edge node[anchor=west]{Parsing} (ast); \draw[pass] (ast) edge node[anchor=west]{Elaboration} (core); \draw[pass] (core) edge node[anchor=west]{Compilation} (exe); \draw[pass] (core) edge node[anchor=north, text width=6em]{Recursion Elimination} (core2); \draw[pass] (core2) edge node[anchor=west, text width=4em, align=center]{Kernel Check} (ok); \draw[pass, loop left] (ast) edge node[] {Macro Expansion} (ast); \end{tikzpicture} \end{document} % Local Variables: % TeX-engine: luatex % End:
reference-manual/figures/lake-workspace.tex
\documentclass{standalone} \usepackage{fontspec} \usepackage{sourcecodepro} \usepackage{tikz} \setmainfont{TeX Gyre Heros} \setmonofont{Source Code Pro} \usetikzlibrary{matrix, positioning, decorations.pathreplacing, calligraphy, fit, shapes} \tikzset{ dir/.style={draw=black, rounded corners=0.15cm, inner sep=0.5cm}, file/.style={}, boxlabel/.style={anchor=north east}, fit label/.style={ yshift={(height("#1")+8pt)/2}, inner ysep={(height("#1")+16pt)/2}, label={[anchor=north west]north west:#1} } } \begin{document} \begin{tikzpicture}[node distance=0.5cm] \node[file](toolchain){\texttt{lean-toolchain}}; \begin{scope}[node distance=0.5cm] \node[file,below=1.25cm of toolchain.south west, anchor=west, xshift=0.65cm](lakefile){Package configuration file (\texttt{lakefile.\{toml,lean\}})}; \node[below=of lakefile.north west, anchor=north west](lib){Libraries}; \node[below=of lib.north west, anchor=north west](exe){Executables}; \node[below=of exe.north west, anchor=north west](manifest){Manifest (\texttt{lake-manifest.json})}; \node[dir, fit=(lakefile)(lib)(exe)(manifest), fit label={Root package}](src){}; \end{scope} \begin{scope}[node distance=0.4cm, dir/.style={draw, font=\footnotesize, rounded corners=0.15cm}, file/.style={font=\footnotesize}]] \node[file,below=2.25cm of src.south west, anchor=north west, xshift=1.25cm](lakefile1){Package configuration file}; \node[file, below=of lakefile1.north west, anchor=north west](lib1){Libraries}; \node[file, below=of lib1.north west, anchor=north west](exe1){Executables}; \node[file, below=of exe1.north west, anchor=north west](art1){Artifacts}; \node[dir, fit=(lakefile1)(lib1)(exe1)(art1), fit label={Dependency 1}](dep1){}; \end{scope} \begin{scope}[node distance=0.4cm, dir/.style={draw, font=\footnotesize, rounded corners=0.15cm}, file/.style={font=\footnotesize}]] \node[file,right=0.75cm of lakefile1.north east, anchor=north west](lakefile2){Package configuration file}; \node[file, below=of lakefile2.north west, anchor=north west](lib2){Libraries}; \node[file, below=of lib2.north west, anchor=north west](exe2){Executables}; \node[file, below=of exe2.north west, anchor=north west](art2){Artifacts}; \node[dir, fit=(lakefile2)(lib2)(exe2)(art2), fit label={Dependency 2}](dep2){}; \end{scope} \node[file,below=of art1.south west, anchor=north west, xshift=-0.25cm](morepackages){\LARGE $\cdots$}; \node[dir, fit=(dep1)(dep2)(morepackages), fit label={Packages}](pkgs){}; \begin{scope}[node distance=0.5cm] \node[file, below=1cm of morepackages.south west, anchor=north west](builtlib){Built libraries}; \node[file, below=of builtlib.north west, anchor=north west](builtexe){Built executables}; \end{scope} \node[dir, fit=(builtlib)(builtexe), fit label={Artifacts}](arts){}; \node[dir, fit=(pkgs)(arts), fit label={Lake Directory (\texttt{.lake})}](builddir){}; \node[dir, fit=(src)(lakefile)(toolchain)(builddir), fit label={Workspace}](ws){}; \end{tikzpicture} \end{document} % Local Variables: % TeX-engine: luatex % End:
reference-manual/fix_diagnostics/README.md
# fix_diagnostics A Python tool to automate fixing Lean diagnostics via LSP code actions. This is mostly LLM-generated code that hasn't been extensively tested/checked, so please treat it with due caution. ## Overview `fix_diagnostics` helps you automatically apply code actions to fix diagnostics (errors, warnings, info messages) in your Lean projects. It works by: 1. Running `lake build` to find files with diagnostics 2. Using the Lean LSP server to get structured diagnostic information 3. Querying available code actions for each diagnostic (including widget-based "Try this:" suggestions) 4. Applying selected code actions to fix issues The tool has **no external dependencies** - it uses only Python's standard library with a manual JSON-RPC implementation for LSP communication. **Widget Support**: The tool queries Lean's interactive diagnostics via the RPC extension to discover code actions embedded in widget components (like "Try this:" suggestions). These appear as regular code actions that can be filtered and applied just like standard LSP code actions. ## Requirements - Python 3.7+ - Lean 4 with Lake (the `lake serve` command must be available) - Must be run from a Lake project directory (containing `lakefile.lean` or `lakefile.toml`) ## Quick Start ### CLI Usage **Typical workflow:** 1. List diagnostics and available actions with `--list` 2. Filter by patterns to find the fixes you want 3. Preview with dry-run (default) 4. Apply with `--no-dry-run` By default, the CLI applies **unique actions per diagnostic** (deduplicating by edit effect). Use `--minimal` to apply only the **smallest edit** for each diagnostic. ```bash # Step 1: RECOMMENDED - List all diagnostics and their available code actions # (shows edit_size for each action) python3 -m fix_diagnostics --list # Filter diagnostics by pattern and list actions python3 -m fix_diagnostics --diagnostic-pattern "unused variable" --list # Preview fixes (dry-run, default behavior) - applies unique actions per diagnostic python3 -m fix_diagnostics --diagnostic-pattern "unused" --action-pattern "Remove" # Apply only the smallest edit for each diagnostic python3 -m fix_diagnostics --diagnostic-pattern "unused" --minimal --no-dry-run # Apply all unique fixes (default deduplication) python3 -m fix_diagnostics --diagnostic-pattern "unused" --action-pattern "Remove" --no-dry-run # Filter by severity (1=error, 2=warning, 3=info, 4=hint) python3 -m fix_diagnostics --severity 2 --action-pattern "Remove" # Preview ALL available code actions (no filters) # Warning: This can produce a lot of output! python3 -m fix_diagnostics ``` ### REPL Usage The tool is designed for interactive exploration in the Python REPL: ```python from fix_diagnostics import * # Step 1: Find files with diagnostics files = get_files() print(f"Found {len(files)} files with diagnostics") # Step 2: Get all diagnostics from LSP diagnostics = get_diagnostics(files) print(f"Got {len(diagnostics)} diagnostics") # Inspect diagnostics for d in diagnostics[:5]: print(f"{d.file.name}:{d.line}:{d.col} - {d.message}") # Step 3: Get code actions for diagnostics actions = get_code_actions(diagnostics) print(f"Found {len(actions)} code actions") # Filter actions unused_actions = [a for a in actions if 'unused' in a.diagnostic.message] remove_actions = [a for a in actions if 'Remove' in a.title] foo_file_actions = [a for a in actions if 'Foo.lean' in str(a.diagnostic.file)] # Step 4: Apply actions (dry-run by default) results = apply_code_actions(remove_actions, dry_run=True) # Review diffs for result in results: if result.success: print(f"\n{result.action.title}") print(result.diff) # Apply for real results = apply_code_actions(remove_actions, dry_run=False) print(f"Applied {sum(r.success for r in results)} actions") ``` ### Defaults Functions automatically call previous steps if inputs aren't provided: ```python # Get diagnostics without specifying files (calls get_files() automatically) diagnostics = get_diagnostics() # Get actions without specifying diagnostics (calls get_diagnostics() automatically) actions = get_code_actions() # This is equivalent to: # files = get_files() # diagnostics = get_diagnostics(files) # actions = get_code_actions(diagnostics) ``` ### Working with Single Items Both `get_code_actions()` and `apply_code_actions()` accept single items or lists: ```python from fix_diagnostics import get_diagnostics, get_code_actions, apply_code_actions diagnostics = get_diagnostics() # Pass a single diagnostic single_diag = diagnostics[0] actions = get_code_actions(single_diag) # Works! # Pass a single action single_action = actions[0] results = apply_code_actions(single_action, dry_run=True) # Works! # Both always return lists print(f"Got {len(results)} result(s)") # Always a list, even for single input ``` ### Persistent LSP Client for REPL For interactive work, use `lsp_start()` to create a persistent LSP client that's automatically reused across function calls. This avoids repeatedly starting and stopping the Lean server: ```python from fix_diagnostics import lsp_start, get_diagnostics, get_code_actions, lsp_stop # Start persistent client (call once) lsp_start() # All subsequent calls automatically reuse the global client diagnostics = get_diagnostics() actions = get_code_actions(diagnostics) # Filter and continue working - client is still alive unused_diags = [d for d in diagnostics if 'unused' in d.message] more_actions = get_code_actions(unused_diags) # You can call the same diagnostic multiple times - it works correctly same_diag_actions = get_code_actions(diagnostics[0]) # Works! again = get_code_actions(diagnostics[0]) # Still works! # Optional: manually stop when done (also happens automatically on exit) lsp_stop() ``` **Without `lsp_start()`**, each function call creates and destroys a temporary LSP client, which is slower but works fine for scripts. **With `lsp_start()`**, a single LSP client persists across all calls, dramatically speeding up interactive exploration. The client tracks which files are open and avoids reopening them, so repeated calls on the same diagnostics work correctly. ## API Reference ### Core Functions #### `get_files(build_cmd="lake build")` Find files with diagnostics from build output. **Returns:** `List[Path]` - Files with diagnostics, ordered by appearance in build output **Raises:** `FileNotFoundError` if not in a Lake project directory #### `lsp_start()` Start a persistent LSP client for REPL use. Creates a global LSP client that will be automatically reused by `get_diagnostics()` and `get_code_actions()`. The client is cleaned up automatically on exit. **Returns:** `LspClient` - The started global LSP client **Example:** ```python from fix_diagnostics import lsp_start, get_diagnostics lsp_start() diagnostics = get_diagnostics() # Reuses the global client ``` #### `lsp_stop()` Stop the global LSP client. Shuts down the persistent LSP client created by `lsp_start()`. Normally not needed as cleanup happens automatically on exit, but useful for restarting the server. **Example:** ```python from fix_diagnostics import lsp_start, lsp_stop lsp_start() # ... do work lsp_stop() # Explicitly stop lsp_start() # Can start again ``` #### `get_diagnostics(files=None, lsp_client=None)` Get diagnostics from LSP for files. If `lsp_start()` was called, automatically reuses the global client. Otherwise, creates a temporary client. **Args:** - `files`: `List[Path]` or `None` (calls `get_files()` if not provided) - `lsp_client`: Optional `LspClient` to reuse (overrides global client) **Returns:** `List[Diagnostic]` #### `get_code_actions(diagnostics=None, lsp_client=None)` Get code actions for diagnostics. If `lsp_start()` was called, automatically reuses the global client. Otherwise, creates a temporary client. **Args:** - `diagnostics`: Single `Diagnostic`, `List[Diagnostic]`, or `None` (calls `get_diagnostics()` if not provided) - `lsp_client`: Optional `LspClient` to reuse (overrides global client) **Returns:** `List[CodeAction]` - Each action has a back-pointer to its diagnostic #### `apply_code_actions(actions, dry_run=True)` Apply code actions to files. **Args:** - `actions`: Single `CodeAction` or `List[CodeAction]` - `dry_run`: If `True`, compute diffs but don't modify files (default: `True`) **Returns:** `List[ApplyResult]` - One result per action ### Data Classes #### `Diagnostic` ```python @dataclass class Diagnostic: file: Path # File containing the diagnostic line: int # Line number (0-based, LSP protocol) col: int # Column number (0-based, UTF-16 offset, LSP protocol) severity: int # 1=Error, 2=Warning, 3=Info, 4=Hint message: str # Diagnostic message code: Optional[str] # Diagnostic code source: Optional[str] # Source (e.g., "Lean", linter name) range: LspRange # Full LSP range structure (with UTF-16 offsets) range_size: int # Size in codepoints (Python characters) ``` #### `CodeAction` ```python @dataclass class CodeAction: title: str # Human-readable action title kind: Optional[str] # Action kind (e.g., "quickfix") edit: dict # LSP WorkspaceEdit (with UTF-16 offsets) diagnostic: Diagnostic # Back-pointer to diagnostic edit_size: int # Magnitude of edit in codepoints (Python characters) ``` #### `ApplyResult` ```python @dataclass class ApplyResult: success: bool # Whether action was applied successfully action: CodeAction # The code action that was applied diff: str # Unified diff showing changes error: Optional[str] # Error message if success=False ``` ### LSP Client #### `LspClient(server_cmd=None)` Manual LSP client using JSON-RPC over stdio. **Args:** - `server_cmd`: Command to start LSP server (default: `['lake', 'serve']`) **Usage:** ```python # Context manager (recommended) with LspClient() as lsp: lsp.did_open(file_path, text) diagnostics = lsp.get_diagnostics(file_path) actions = lsp.code_action(file_path, line, col, [diagnostic]) # Manual lifecycle lsp = LspClient() lsp.initialize() # ... use lsp ... lsp.shutdown() ``` ## CLI Options ``` --diagnostic-pattern PATTERN Regex to filter diagnostic messages --action-pattern PATTERN Regex to filter code action titles --severity {1,2,3,4} Filter by severity (can specify multiple) --list List diagnostics and actions without applying --dry-run Show diffs without applying (default) --no-dry-run Actually apply changes --minimal Apply only the smallest edit per diagnostic --build-cmd CMD Build command (default: "lake build") ``` **Default behavior (no options):** - Finds ALL diagnostics in the project - Gets ALL available code actions - Deduplicates actions per diagnostic by edit effect - Previews all unique actions in dry-run mode (shows diffs but doesn't modify files) - Recommended: Use `--list` first to see what actions are available ## Examples ### Fix all unused variable warnings ```bash python3 -m fix_diagnostics \ --diagnostic-pattern "unused variable" \ --action-pattern "Remove" \ --no-dry-run ``` ### Preview all available fixes for a specific file ```python from fix_diagnostics import get_diagnostics, get_code_actions from pathlib import Path # Get diagnostics for specific file all_diags = get_diagnostics() my_file_diags = [d for d in all_diags if d.file == Path("Manual/Foo.lean")] # Get and preview actions actions = get_code_actions(my_file_diags) for action in actions: print(f"{action.title} at line {action.diagnostic.line}") ``` ### Apply specific action to all matching diagnostics ```python from fix_diagnostics import get_code_actions, apply_code_actions # Get all actions actions = get_code_actions() # Filter to specific action type rename_actions = [a for a in actions if a.title.startswith("Rename to _")] # Apply results = apply_code_actions(rename_actions, dry_run=False) print(f"Applied {sum(r.success for r in results)}/{len(results)} actions") ``` ### Filter by edit size Each `CodeAction` has an `edit_size` field indicating the magnitude of the change, and each `Diagnostic` has a `range_size` field indicating the size of the diagnostic's range. You can use these to filter for small changes: ```python from fix_diagnostics import get_code_actions # Get all actions actions = get_code_actions() # Filter to actions that change less than 5 characters small_edits = [a for a in actions if a.edit_size < 5] # Filter to actions that change less than 5% of the diagnostic's range small_percentage_edits = [ a for a in actions if a.diagnostic.range_size > 0 and a.edit_size / a.diagnostic.range_size < 0.05 ] # Sort by edit size to see smallest changes first sorted_actions = sorted(actions, key=lambda a: a.edit_size) # Group by diagnostic and keep smallest action for each from itertools import groupby from operator import attrgetter actions_by_diag = {} for action in actions: key = (action.diagnostic.file, action.diagnostic.line, action.diagnostic.col) if key not in actions_by_diag or action.edit_size < actions_by_diag[key].edit_size: actions_by_diag[key] = action smallest_per_diag = list(actions_by_diag.values()) ``` ### Iteratively fix issues ```python from fix_diagnostics import get_diagnostics, get_code_actions, apply_code_actions max_iterations = 5 for i in range(max_iterations): # Get current diagnostics diagnostics = get_diagnostics() unused = [d for d in diagnostics if 'unused' in d.message] if not unused: print(f"All unused variables fixed after {i} iterations!") break # Get and apply fixes actions = get_code_actions(unused) remove = [a for a in actions if 'Remove' in a.title] if not remove: print(f"No more remove actions available") break results = apply_code_actions(remove, dry_run=False) print(f"Iteration {i+1}: applied {sum(r.success for r in results)} fixes") ``` ## How It Works 1. **Build Output Parsing**: Runs `lake build` and parses the output to identify files with diagnostics. Supports both formats: - `warning: path/to/file.lean:123:45: message` - `path/to/file.lean:123:45: error: message` 2. **LSP Communication**: Opens each file in the Lean LSP server using `textDocument/didOpen` and waits for file processing to complete. The tool uses Lean's `$/lean/fileProgress` notifications to know when the server has finished analyzing each file, ensuring diagnostics are complete before proceeding. 3. **Code Action Query**: For each diagnostic, queries the LSP server for available code actions using `textDocument/codeAction` with the diagnostic in the context. Additionally, queries Lean's interactive diagnostics via `$/lean/rpc/call` with method `Lean.Widget.getInteractiveDiagnostics` to extract code actions from widget components (like "Try this:" suggestions). The LSP client enables widget support by setting `initializationOptions.hasWidgets` to `true`. **RPC Session Management**: RPC sessions are cached per file URI and reused across multiple calls. During long-running operations (like waiting for large files to process), the tool automatically sends keepalive messages every 2.5 seconds to prevent RPC sessions from timing out. This ensures reliable operation on large codebases. 4. **Edit Application**: Applies LSP `WorkspaceEdit` changes to files, computing unified diffs to show what changed. In dry-run mode, computes diffs without modifying files. When running in an interactive terminal, diffs are displayed with color and token-level highlighting to show exactly which characters changed. ## Limitations - The tool only handles `WorkspaceEdit` in the `changes` or `documentChanges` format. - Processing time depends on how long the Lean server takes to analyze each file (typically 1-3 seconds per file). ## License This tool is part of the Lean reference manual and shares its license.
reference-manual/.github/PULL_REQUEST_TEMPLATE.md
# Read this section before submitting * Ensure your PR follows the [External Contribution Guidelines](https://github.com/leanprover/reference-manual/blob/master/CONTRIBUTING.md). * Help your PR get merged quickly by making it fast to review: * Please open multiple pull requests to fix multiple small issues * Don't make systematic changes to English usage without discussing them first * Don't submit large amounts of new content without discussing it first * If the issue does not already have approval from a developer, submit the PR as draft. * The PR title/description will become the commit message. Keep it up-to-date as the PR evolves. * Remove this section, up to and including the `---` before submitting. --- Closes #0000 (issue number fixed by this PR, if any)
reference-manual/.github/ISSUE_TEMPLATE/technical-mistake.md
--- name: Technical Mistake about: Point out an incorrect statement of fact title: '' labels: bug assignees: '' --- **Describe the bug** > Please quote the incorrect text here In what way is the content incorrect? **Demonstration** Please provide a self-contained Lean example that demonstrates the incorrectness of the statement.
reference-manual/.github/ISSUE_TEMPLATE/language-issue.md
--- name: Language Issue about: Point out incorrect or confusing use of English title: '' labels: '' assignees: '' --- **Describe the error** > Please quote the incorrect usage or confusing text here Why is it incorrect and/or confusing? Unless otherwise documented, this document is written in US English following the Chicago Manual of Style. However, _specifically_ US usages that are not widely understood in other communities may also be replaced with more universally-understood formulations.
reference-manual/.github/ISSUE_TEMPLATE/documentation-request.md
--- name: Documentation Request about: Request documentation title: '' labels: doc-request assignees: '' --- **What question should the reference manual answer?** Please describe what the reference manual should enable you to do that you can't do right now. Ideally, this is phrased in terms of a question, like "How are strings represented in compiled code?". **Additional context** Add any other context about the question here that will help us ensure that the documentation meets your needs.