licenses
sequencelengths
1
3
version
stringclasses
636 values
tree_hash
stringlengths
40
40
path
stringlengths
5
135
type
stringclasses
2 values
size
stringlengths
2
8
text
stringlengths
25
67.1M
package_name
stringlengths
2
41
repo
stringlengths
33
86
[ "MIT" ]
1.0.0
0fd73bf40485c791e6c33672c643bf1303045e9a
src/BatchIterators.jl
code
3037
module BatchIterators using Statistics export BatchIterator export choose_batchsize export centered_batch_iterator """ BatchIterator(X; batchsize = nothing, limit=size(X,2)) Wrapper allowing to iterate over batches of `batchsize` columns of `X`. `X` can be of any type supporting `size` and 2d indexing. When `limit` is provided, iteration is restricted to the columns of `X[:, 1:limit]`. """ struct BatchIterator{T} X::T length::Int # Number of batches bsz::Int # Batch size limit::Int function BatchIterator(X; batchsize=nothing, limit=size(X,2)) @assert limit > 0 && limit ≤ size(X,2) bsz = (batchsize == nothing) ? choose_batchsize(size(X,1), limit) : batchsize nb = ceil(Int, limit/bsz) new{typeof(X)}(X, nb, bsz, limit) end end view_compatible(::Any) = false view_compatible(::Array) = true view_compatible(bi::BatchIterator) = view_compatible(bi.X) ####################################################################### # Iteration # ####################################################################### function Base.getindex(it::BatchIterator, i) d = i - it.length # > 0 means overflow, == 0 means last batch cbsz = (d == 0) ? mod(it.limit - 1, it.bsz) + 1 : it.bsz # Size of current batch if (i<1 || d > 0) @error "Out of bounds." else # TODO using views here might impact type stability. view_compatible(it) ? (@view it.X[:, (i-1)*it.bsz+1:(i-1)*it.bsz+cbsz]) : it.X[:, (i-1)*it.bsz+1:(i-1)*it.bsz+cbsz] end end Base.length(it::BatchIterator) = it.length function Base.iterate(it::BatchIterator{T}, st = 0) where T st = st + 1 # new state d = st - it.length # > 0 means overflow, == 0 means last batch (d > 0) ? nothing : (it[st], st) end """ centered_batch_iterator(X; kwargs...) Similar to BatchIterator, but performs first one pass over the data to compute the mean, and centers the batches. """ function centered_batch_iterator(X; kwargs...) bi = BatchIterator(X; kwargs...) μ = vec(mean(mean(b, dims=2) for b in BatchIterator(X))) (b .- μ for b in bi) end ####################################################################### # Utilities # ####################################################################### """ choose_batchsize(d, n; maxmemGB = 1.0, maxbatchsize = 2^14, sizeoneB = d*sizeof(Float64)) Computes the size (nb. of columns) of a batch, so that each column of the batch can be converted to a vector of size `sizeoneB` (in bytes) with a total memory constrained by `maxmemGB` (gigabytes). """ function choose_batchsize(d, n; maxmemGB = 1.0, maxbatchsize = 2^14, sizeoneB = d*sizeof(Float64), forcepow2 = true) fullsizeGB = n * sizeoneB/1024^3 # Size of the sketches of all samples batchsize = (fullsizeGB > maxmemGB) ? ceil(Int, n/ceil(Int, fullsizeGB/maxmemGB)) : n batchsize = min(batchsize, maxbatchsize) (forcepow2 && batchsize != n) ? prevpow(2, batchsize) : batchsize end end # module
BatchIterators
https://github.com/Djoop/BatchIterators.jl.git
[ "MIT" ]
1.0.0
0fd73bf40485c791e6c33672c643bf1303045e9a
README.md
docs
497
# Summary Licence: MIT. A very small package providing the constructor `BatchIterator(X; batchsize=…, limit=…)` and the function `centered_batch_iterator(X; kwargs…)`, which allow iteration over blocks of columns of `X`, for any object `X` supporting 2d indexing and for which the function `size` is defined. The function `choose_batchsize` helps finding a good batch size while controlling memory usage. The package was originally designed to iterate over samples of an out-of-core dataset.
BatchIterators
https://github.com/Djoop/BatchIterators.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
docs/make.jl
code
1151
using PythonCallHelpers using Documenter DocMeta.setdocmeta!(PythonCallHelpers, :DocTestSetup, :(using PythonCallHelpers); recursive=true) makedocs(; modules=[PythonCallHelpers], authors="singularitti <singularitti@outlook.com> and contributors", repo="https://github.com/singularitti/PythonCallHelpers.jl/blob/{commit}{path}#{line}", sitename="PythonCallHelpers.jl", format=Documenter.HTML(; prettyurls=get(ENV, "CI", "false") == "true", canonical="https://singularitti.github.io/PythonCallHelpers.jl", edit_link="main", assets=String[], ), pages=[ "Home" => "index.md", "Manual" => [ "Installation guide" => "installation.md", ], "Public API" => "public.md", "Developer Docs" => [ "Contributing" => "developers/contributing.md", "Style Guide" => "developers/style-guide.md", "Design Principles" => "developers/design-principles.md", ], "Troubleshooting" => "troubleshooting.md", ], ) deploydocs(; repo="github.com/singularitti/PythonCallHelpers.jl", devbranch="main", )
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
src/PythonCallHelpers.jl
code
3656
module PythonCallHelpers using PythonCall: Py, pygetattr, pyhasattr, pyconvert export @pyimmutable, @pymutable, @pycallable # Code from https://github.com/stevengj/PythonPlot.jl/blob/d17c1d5/src/PythonPlot.jl#L26-L52 struct LazyHelp obj::Py keys::Tuple{Vararg{String}} LazyHelp(obj) = new(obj, ()) LazyHelp(obj, key::AbstractString) = new(obj, (key,)) LazyHelp(obj, key1::AbstractString, key2::AbstractString) = new(obj, (key1, key2)) LazyHelp(obj, keys::AbstractString...) = new(obj, keys) end function Base.show(io::IO, ::MIME"text/plain", help::LazyHelp) obj = help.obj for key in help.keys obj = pygetattr(obj, key) end if pyhasattr(obj, "__doc__") print(io, pyconvert(String, obj.__doc__)) else print(io, "no Python docstring found for ", obj) end end Base.show(io::IO, help::LazyHelp) = show(io, "text/plain", help) function Base.Docs.catdoc(helps::LazyHelp...) Base.Docs.Text() do io for help in helps show(io, "text/plain", help) end end end # See https://github.com/rafaqz/DimensionalData.jl/blob/4814246/src/Dimensions/dimension.jl#L382-L398 function pybasic(type, field) return quote using PythonCall: pyhasattr import PythonCall: Py, pyconvert # Code from https://github.com/stevengj/PythonPlot.jl/blob/d58f6c4/src/PythonPlot.jl#L65-L72 Py(x::$type) = getfield(x, $(QuoteNode(Symbol(field)))) pyconvert(::Type{$type}, py::Py) = $type(py) Base.:(==)(x::$type, y::$type) = pyconvert(Bool, Py(x) == Py(y)) Base.isequal(x::$type, y::$type) = isequal(Py(x), Py(y)) Base.hash(x::$type, h::UInt) = hash(Py(x), h) Base.Docs.doc(x::$type) = Text(pyconvert(String, Py(x).__doc__)) # Code from https://github.com/stevengj/PythonPlot.jl/blob/d58f6c4/src/PythonPlot.jl#L75-L80 Base.getproperty(x::$type, s::Symbol) = getproperty(Py(x), s) Base.getproperty(x::$type, s::AbstractString) = getproperty(Py(x), Symbol(s)) Base.hasproperty(x::$type, s::Symbol) = pyhasattr(Py(x), s) Base.propertynames(x::$type) = propertynames(Py(x)) end end """ @pyimmutable type [supertype] [field] Construct an immutable wrapper for a Python object, with a supertype and a default fieldname. """ macro pyimmutable(type, supertype=Any, field=:py) return esc( quote using PythonCall: Py struct $type <: $supertype $field::Py end $(pybasic(type, field)) end, ) end """ @pymutable type [supertype] [field] Construct an mutable wrapper for a Python object, with a supertype and a default fieldname. """ macro pymutable(type, supertype=Any, field=:py) return esc( quote using PythonCall: Py mutable struct $type <: $supertype $field::Py end $(pybasic(type, field)) # Code from https://github.com/stevengj/PythonPlot.jl/blob/d58f6c4/src/PythonPlot.jl#L77-L78 Base.setproperty!(x::$type, s::Symbol, v) = setproperty!(Py(x), s, v) Base.setproperty!(x::$type, s::AbstractString, v) = setproperty!(Py(x), Symbol(s), v) end, ) end # See https://github.com/stevengj/PythonPlot.jl/issues/19 """ @pycallable type Make an existing type callable. """ macro pycallable(type) return quote using PythonCall: Py import PythonCall: pycall pycall(x::$type, args...; kws...) = pycall(Py(x), args...; kws...) (x::$type)(args...; kws...) = pycall(Py(x), args...; kws...) end end end
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
test/runtests.jl
code
440
using PythonCallHelpers using Test @testset "PythonCallHelpers.jl" begin @testset "Test subtyping from `Any`" begin @pymutable T Any o @test supertype(T) == Any @test fieldnames(T) == (:o,) end @testset "Test subtyping from an abstract type" begin abstract type MyType end @pyimmutable My MyType o @test supertype(My) == MyType @test fieldnames(My) == (:o,) end end
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
README.md
docs
4084
# PythonCallHelpers | **Documentation** | **Build Status** | **Others** | | :--------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------: | | [![Stable][docs-stable-img]][docs-stable-url] [![Dev][docs-dev-img]][docs-dev-url] | [![Build Status][gha-img]][gha-url] [![Build Status][appveyor-img]][appveyor-url] [![Build Status][cirrus-img]][cirrus-url] [![pipeline status][gitlab-img]][gitlab-url] [![Coverage][codecov-img]][codecov-url] | [![GitHub license][license-img]][license-url] [![Code Style: Blue][style-img]][style-url] | [docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg [docs-stable-url]: https://singularitti.github.io/PythonCallHelpers.jl/stable [docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg [docs-dev-url]: https://singularitti.github.io/PythonCallHelpers.jl/dev [gha-img]: https://github.com/singularitti/PythonCallHelpers.jl/workflows/CI/badge.svg [gha-url]: https://github.com/singularitti/PythonCallHelpers.jl/actions [appveyor-img]: https://ci.appveyor.com/api/projects/status/github/singularitti/PythonCallHelpers.jl?svg=true [appveyor-url]: https://ci.appveyor.com/project/singularitti/PythonCallHelpers-jl [cirrus-img]: https://api.cirrus-ci.com/github/singularitti/PythonCallHelpers.jl.svg [cirrus-url]: https://cirrus-ci.com/github/singularitti/PythonCallHelpers.jl [gitlab-img]: https://gitlab.com/singularitti/PythonCallHelpers.jl/badges/main/pipeline.svg [gitlab-url]: https://gitlab.com/singularitti/PythonCallHelpers.jl/-/pipelines [codecov-img]: https://codecov.io/gh/singularitti/PythonCallHelpers.jl/branch/main/graph/badge.svg [codecov-url]: https://codecov.io/gh/singularitti/PythonCallHelpers.jl [license-img]: https://img.shields.io/github/license/singularitti/PythonCallHelpers.jl [license-url]: https://github.com/singularitti/PythonCallHelpers.jl/blob/main/LICENSE [style-img]: https://img.shields.io/badge/code%20style-blue-4495d1.svg [style-url]: https://github.com/invenia/BlueStyle The code is [hosted on GitHub](https://github.com/singularitti/PythonCallHelpers.jl), with some continuous integration services to test its validity. This repository is created and maintained by [@singularitti](https://github.com/singularitti). You are very welcome to contribute. ## Installation The package can be installed with the Julia package manager. From the Julia REPL, type `]` to enter the Pkg REPL mode and run: ``` pkg> add PythonCallHelpers ``` Or, equivalently, via the [`Pkg` API](https://pkgdocs.julialang.org/v1/getting-started/): ```julia julia> import Pkg; Pkg.add("PythonCallHelpers") ``` ## Documentation - [**STABLE**][docs-stable-url] — **documentation of the most recently tagged version.** - [**DEV**][docs-dev-url] — _documentation of the in-development version._ ## Project status The package is tested against, and being developed for, Julia `1.6` and above on Linux, macOS, and Windows. ## Questions and contributions You are welcome to post usage questions on [our discussion page][discussions-url]. Contributions are very welcome, as are feature requests and suggestions. Please open an [issue][issues-url] if you encounter any problems. The [Contributing](@ref) page has guidelines that should be followed when opening pull requests and contributing code. [discussions-url]: https://github.com/singularitti/PythonCallHelpers.jl/discussions [issues-url]: https://github.com/singularitti/PythonCallHelpers.jl/issues
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
docs/src/index.md
docs
2021
```@meta CurrentModule = PythonCallHelpers ``` # PythonCallHelpers Documentation for [PythonCallHelpers](https://github.com/singularitti/PythonCallHelpers.jl). See the [Index](@ref main-index) for the complete list of documented functions and types. The code is [hosted on GitHub](https://github.com/singularitti/PythonCallHelpers.jl), with some continuous integration services to test its validity. This repository is created and maintained by [@singularitti](https://github.com/singularitti). You are very welcome to contribute. ## Installation The package can be installed with the Julia package manager. From the Julia REPL, type `]` to enter the Pkg REPL mode and run: ```julia pkg> add PythonCallHelpers ``` Or, equivalently, via the `Pkg` API: ```@repl import Pkg; Pkg.add("PythonCallHelpers") ``` ## Documentation - [**STABLE**](https://singularitti.github.io/PythonCallHelpers.jl/stable) — **documentation of the most recently tagged version.** - [**DEV**](https://singularitti.github.io/PythonCallHelpers.jl/dev) — _documentation of the in-development version._ ## Project status The package is tested against, and being developed for, Julia `1.6` and above on Linux, macOS, and Windows. ## Questions and contributions Usage questions can be posted on [our discussion page](https://github.com/singularitti/PythonCallHelpers.jl/discussions). Contributions are very welcome, as are feature requests and suggestions. Please open an [issue](https://github.com/singularitti/PythonCallHelpers.jl/issues) if you encounter any problems. The [Contributing](@ref) page has a few guidelines that should be followed when opening pull requests and contributing code. ## Manual outline ```@contents Pages = [ "installation.md", "developers/contributing.md", "developers/style-guide.md", "developers/design-principles.md", "troubleshooting.md", ] Depth = 3 ``` ## Library outline ```@contents Pages = ["public.md"] ``` ### [Index](@id main-index) ```@index Pages = ["public.md"] ```
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
docs/src/installation.md
docs
5269
# [Installation Guide](@id installation) Here are the installation instructions for package [PythonCallHelpers](https://github.com/singularitti/PythonCallHelpers.jl). If you have trouble installing it, please refer to our [Troubleshooting](@ref) page for more information. ## Install Julia First, you should install [Julia](https://julialang.org/). We recommend downloading it from [its official website](https://julialang.org/downloads/). Please follow the detailed instructions on its website if you have to [build Julia from source](https://docs.julialang.org/en/v1/devdocs/build/build/). Some computing centers provide preinstalled Julia. Please contact your administrator for more information in that case. Here's some additional information on [how to set up Julia on HPC systems](https://github.com/hlrs-tasc/julia-on-hpc-systems). If you have [Homebrew](https://brew.sh) installed, [open `Terminal.app`](https://support.apple.com/guide/terminal/open-or-quit-terminal-apd5265185d-f365-44cb-8b09-71a064a42125/mac) and type ```shell brew install julia ``` to install it as a [formula](https://docs.brew.sh/Formula-Cookbook). If you are also using macOS and want to install it as a prebuilt binary app, type ```shell brew install --cask julia ``` instead. If you want to install multiple Julia versions in the same operating system, a recommended way is to use a version manager such as [`juliaup`](https://github.com/JuliaLang/juliaup). First, [install `juliaup`](https://github.com/JuliaLang/juliaup#installation). Then, run ```shell juliaup add release juliaup default release ``` to configure the `julia` command to start the latest stable version of Julia (this is also the default value). There is a [short video introduction to `juliaup`](https://youtu.be/14zfdbzq5BM) made by its authors. ### Which version should I pick? You can install the "Current stable release" or the "Long-term support (LTS) release". - The "Current stable release" is the latest release of Julia. It has access to newer features, and is likely faster. - The "Long-term support release" is an older version of Julia that has continued to receive bug and security fixes. However, it may not have the latest features or performance improvements. For most users, you should install the "Current stable release", and whenever Julia releases a new version of the current stable release, you should update your version of Julia. Note that any code you write on one version of the current stable release will continue to work on all subsequent releases. For users in restricted software environments (e.g., your enterprise IT controls what software you can install), you may be better off installing the long-term support release because you will not have to update Julia as frequently. Versions higher than `v1.3`, especially `v1.6`, are strongly recommended. This package may not work on `v1.0` and below. Since the Julia team has set `v1.6` as the LTS release, we will gradually drop support for versions below `v1.6`. Julia and Julia packages support multiple operating systems and CPU architectures; check [this table](https://julialang.org/downloads/#supported_platforms) to see if it can be installed on your machine. For Mac computers with M-series processors, this package and its dependencies may not work. Please install the Intel-compatible version of Julia (for macOS x86-64) if any platform-related error occurs. ## Install PythonCallHelpers Now I am using [macOS](https://en.wikipedia.org/wiki/MacOS) as a standard platform to explain the following steps: 1. Open `Terminal.app`, and type `julia` to start an interactive session (known as the [REPL](https://docs.julialang.org/en/v1/stdlib/REPL/)). 2. Run the following commands and wait for them to finish: ```julia-repl julia> using Pkg julia> Pkg.update() julia> Pkg.add("PythonCallHelpers") ``` 3. Run ```julia-repl julia> using PythonCallHelpers ``` and have fun! 4. While using, please keep this Julia session alive. Restarting might cost some time. If you want to install the latest in-development (probably buggy) version of PythonCallHelpers, type ```@repl using Pkg Pkg.update() pkg"add https://github.com/singularitti/PythonCallHelpers.jl" ``` in the second step above. ## Update PythonCallHelpers Please [watch](https://docs.github.com/en/account-and-profile/managing-subscriptions-and-notifications-on-github/setting-up-notifications/configuring-notifications#configuring-your-watch-settings-for-an-individual-repository) our [GitHub repository](https://github.com/singularitti/PythonCallHelpers.jl) for new releases. Once we release a new version, you can update PythonCallHelpers by typing ```@repl using Pkg Pkg.update("PythonCallHelpers") Pkg.gc() ``` in the Julia REPL. ## Uninstall and reinstall PythonCallHelpers Sometimes errors may occur if the package is not properly installed. In this case, you may want to uninstall and reinstall the package. Here is how to do that: 1. To uninstall, in a Julia session, run ```julia-repl julia> using Pkg julia> Pkg.rm("PythonCallHelpers") julia> Pkg.gc() ``` 2. Press `ctrl+d` to quit the current session. Start a new Julia session and reinstall PythonCallHelpers.
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
docs/src/public.md
docs
164
```@meta CurrentModule = PythonCallHelpers ``` # API Reference ```@contents Pages = ["public.md"] Depth = 3 ``` ```@docs @pyimmutable @pymutable @pycallable ```
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
docs/src/troubleshooting.md
docs
2158
# Troubleshooting ```@contents Pages = ["troubleshooting.md"] Depth = 3 ``` This page collects some possible errors you may encounter and trick how to fix them. If you have some questions about how to use this code, you are welcome to [discuss with us](https://github.com/singularitti/PythonCallHelpers.jl/discussions). If you have additional tips, please either [report an issue](https://github.com/singularitti/PythonCallHelpers.jl/issues/new) or [submit a PR](https://github.com/singularitti/PythonCallHelpers.jl/compare) with suggestions. ## Installation problems ### Cannot find the `julia` executable Make sure you have Julia installed in your environment. Please download the latest [stable version](https://julialang.org/downloads/#current_stable_release) for your platform. If you are using a *nix system, the recommended way is to use [Juliaup](https://github.com/JuliaLang/juliaup). If you do not want to install Juliaup or you are using other platforms that Julia supports, download the corresponding binaries. Then, create a symbolic link to the Julia executable. If the path is not in your `$PATH` environment variable, export it to your `$PATH`. Some clusters, like [Habanero](https://confluence.columbia.edu/confluence/display/rcs/Habanero+HPC+Cluster+User+Documentation), [Comet](https://www.sdsc.edu/support/user_guides/comet.html), or [Expanse](https://www.sdsc.edu/services/hpc/expanse/index.html), already have Julia installed as a module, you may just `module load julia` to use it. If not, either install by yourself or contact your administrator. ## Loading PythonCallHelpers ### Julia compiles/loads slow First, we recommend you download the latest version of Julia. Usually, the newest version has the best performance. If you just want Julia to do a simple task and only once, you could start the Julia REPL with ```bash julia --compile=min ``` to minimize compilation or ```bash julia --optimize=0 ``` to minimize optimizations, or just use both. Or you could make a system image and run with ```bash julia --sysimage custom-image.so ``` See [Fredrik Ekre's talk](https://youtu.be/IuwxE3m0_QQ?t=313) for details.
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
docs/src/developers/contributing.md
docs
8613
# Contributing ```@contents Pages = ["contributing.md"] Depth = 3 ``` Welcome! This document explains some ways you can contribute to PythonCallHelpers. ## Code of conduct This project and everyone participating in it is governed by the ["Contributor Covenant Code of Conduct"](https://github.com/MineralsCloud/.github/blob/main/CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. ## Join the community forum First up, join the [community forum](https://github.com/singularitti/PythonCallHelpers.jl/discussions). The forum is a good place to ask questions about how to use PythonCallHelpers. You can also use the forum to discuss possible feature requests and bugs before raising a GitHub issue (more on this below). Aside from asking questions, the easiest way you can contribute to PythonCallHelpers is to help answer questions on the forum! ## Improve the documentation Chances are, if you asked (or answered) a question on the community forum, then it is a sign that the [documentation](https://singularitti.github.io/PythonCallHelpers.jl/dev/) could be improved. Moreover, since it is your question, you are probably the best-placed person to improve it! The docs are written in Markdown and are built using [Documenter.jl](https://github.com/JuliaDocs/Documenter.jl). You can find the source of all the docs [here](https://github.com/singularitti/PythonCallHelpers.jl/tree/main/docs). If your change is small (like fixing typos, or one or two sentence corrections), the easiest way to do this is via GitHub's online editor. (GitHub has [help](https://help.github.com/articles/editing-files-in-another-user-s-repository/) on how to do this.) If your change is larger, or touches multiple files, you will need to make the change locally and then use Git to submit a [pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests). (See [Contribute code to PythonCallHelpers](@ref) below for more on this.) ## File a bug report Another way to contribute to PythonCallHelpers is to file [bug reports](https://github.com/singularitti/PythonCallHelpers.jl/issues/new?template=bug_report.md). Make sure you read the info in the box where you write the body of the issue before posting. You can also find a copy of that info [here](https://github.com/singularitti/PythonCallHelpers.jl/blob/main/.github/ISSUE_TEMPLATE/bug_report.md). !!! tip If you're unsure whether you have a real bug, post on the [community forum](https://github.com/singularitti/PythonCallHelpers.jl/discussions) first. Someone will either help you fix the problem, or let you know the most appropriate place to open a bug report. ## Contribute code to PythonCallHelpers Finally, you can also contribute code to PythonCallHelpers! !!! warning If you do not have experience with Git, GitHub, and Julia development, the first steps can be a little daunting. However, there are lots of tutorials available online, including: - [GitHub](https://guides.github.com/activities/hello-world/) - [Git and GitHub](https://try.github.io/) - [Git](https://git-scm.com/book/en/v2) - [Julia package development](https://docs.julialang.org/en/v1/stdlib/Pkg/#Developing-packages-1) Once you are familiar with Git and GitHub, the workflow for contributing code to PythonCallHelpers is similar to the following: ### Step 1: decide what to work on The first step is to find an [open issue](https://github.com/singularitti/PythonCallHelpers.jl/issues) (or open a new one) for the problem you want to solve. Then, _before_ spending too much time on it, discuss what you are planning to do in the issue to see if other contributors are fine with your proposed changes. Getting feedback early can improve code quality, and avoid time spent writing code that does not get merged into PythonCallHelpers. !!! tip At this point, remember to be patient and polite; you may get a _lot_ of comments on your issue! However, do not be afraid! Comments mean that people are willing to help you improve the code that you are contributing to PythonCallHelpers. ### Step 2: fork PythonCallHelpers Go to [https://github.com/singularitti/PythonCallHelpers.jl](https://github.com/singularitti/PythonCallHelpers.jl) and click the "Fork" button in the top-right corner. This will create a copy of PythonCallHelpers under your GitHub account. ### Step 3: install PythonCallHelpers locally Similar to [Installation](@ref), open the Julia REPL and run: ```@repl using Pkg Pkg.update() Pkg.develop("PythonCallHelpers") ``` Then the package will be cloned to your local machine. On *nix systems, the default path is `~/.julia/dev/PythonCallHelpers` unless you modify the [`JULIA_DEPOT_PATH`](http://docs.julialang.org/en/v1/manual/environment-variables/#JULIA_DEPOT_PATH-1) environment variable. If you're on Windows, this will be `C:\\Users\\<my_name>\\.julia\\dev\\PythonCallHelpers`. In the following text, we will call it `PKGROOT`. Go to `PKGROOT`, start a new Julia session and run ```@repl using Pkg Pkg.instantiate() ``` to instantiate the project. ### Step 4: checkout a new branch !!! note In the following, replace any instance of `GITHUB_ACCOUNT` with your GitHub username. The next step is to checkout a development branch. In a terminal (or command prompt on Windows), run: ```shell cd ~/.julia/dev/PythonCallHelpers git remote add GITHUB_ACCOUNT https://github.com/GITHUB_ACCOUNT/PythonCallHelpers.jl.git git checkout main git pull git checkout -b my_new_branch ``` ### Step 5: make changes Now make any changes to the source code inside the `~/.julia/dev/PythonCallHelpers` directory. Make sure you: - Follow our [Style Guide](@ref style) and [run `JuliaFormatter.jl`](@ref formatter) - Add tests and documentation for any changes or new features !!! tip When you change the source code, you'll need to restart Julia for the changes to take effect. This is a pain, so install [Revise.jl](https://github.com/timholy/Revise.jl). ### Step 6a: test your code changes To test that your changes work, run the PythonCallHelpers test-suite by opening Julia and running: ```@repl cd("~/.julia/dev/PythonCallHelpers") using Pkg Pkg.activate(".") Pkg.test() ``` !!! warning Running the tests might take a long time. !!! tip If you're using `Revise.jl`, you can also run the tests by calling `include`: ```julia include("test/runtests.jl") ``` This can be faster if you want to re-run the tests multiple times. ### Step 6b: test your documentation changes Open Julia, then run: ```@repl cd("~/.julia/dev/PythonCallHelpers/docs") using Pkg Pkg.activate(".") include("src/make.jl") ``` After a while, a folder `PKGROOT/docs/build` will appear. Open `PKGROOT/docs/build/index.html` with your favorite browser, and have fun! !!! warning Building the documentation might take a long time. !!! tip If there's a problem with the tests that you don't know how to fix, don't worry. Continue to step 5, and one of the PythonCallHelpers contributors will comment on your pull request telling you how to fix things. ### Step 7: make a pull request Once you've made changes, you're ready to push the changes to GitHub. Run: ```shell cd ~/.julia/dev/PythonCallHelpers git add . git commit -m "A descriptive message of the changes" git push -u GITHUB_ACCOUNT my_new_branch ``` Then go to [https://github.com/singularitti/PythonCallHelpers.jl/pulls](https://github.com/singularitti/PythonCallHelpers.jl/pulls) and follow the instructions that pop up to open a pull request. ### Step 8: respond to comments At this point, remember to be patient and polite; you may get a _lot_ of comments on your pull request! However, do not be afraid! A lot of comments means that people are willing to help you improve the code that you are contributing to PythonCallHelpers. To respond to the comments, go back to step 5, make any changes, test the changes in step 6, and then make a new commit in step 7. Your PR will automatically update. ### Step 9: cleaning up Once the PR is merged, clean-up your Git repository ready for the next contribution! ```shell cd ~/.julia/dev/PythonCallHelpers git checkout main git pull ``` !!! note If you have suggestions to improve this guide, please make a pull request! It's particularly helpful if you do this after your first pull request because you'll know all the parts that could be explained better. Thanks for contributing to PythonCallHelpers!
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
docs/src/developers/design-principles.md
docs
15492
# Design Principles ```@contents Pages = ["design-principles.md"] Depth = 3 ``` We adopt some [`SciML`](https://sciml.ai/) design [guidelines](https://github.com/SciML/SciMLStyle) here. Please read it before contributing! ## Consistency vs adherence According to PEP8: > A style guide is about consistency. Consistency with this style guide is important. > Consistency within a project is more important. Consistency within one module or function is the most important. > > However, know when to be inconsistent—sometimes style guide recommendations just aren't > applicable. When in doubt, use your best judgment. Look at other examples and decide what > looks best. And don’t hesitate to ask! ## Community contribution guidelines For a comprehensive set of community contribution guidelines, refer to [ColPrac](https://github.com/SciML/ColPrac). A relevant point to highlight PRs should do one thing. In the context of style, this means that PRs which update the style of a package's code should not be mixed with fundamental code contributions. This separation makes it easier to ensure that large style improvement are isolated from substantive (and potentially breaking) code changes. ## Open source contributions are allowed to start small and grow over time If the standard for code contributions is that every PR needs to support every possible input type that anyone can think of, the barrier would be too high for newcomers. Instead, the principle is to be as correct as possible to begin with, and grow the generic support over time. All recommended functionality should be tested, any known generality issues should be documented in an issue (and with a `@test_broken` test when possible). However, a function which is known to not be GPU-compatible is not grounds to block merging, rather it is an encouragement for a follow-up PR to improve the general type support! ## Generic code is preferred unless code is known to be specific For example, the code: ```@repl function f(A, B) for i in 1:length(A) A[i] = A[i] + B[i] end end ``` would not be preferred for two reasons. One is that it assumes `A` uses one-based indexing, which would fail in cases like [OffsetArrays](https://github.com/JuliaArrays/OffsetArrays.jl) and [FFTViews](https://github.com/JuliaArrays/FFTViews.jl). Another issue is that it requires indexing, while not all array types support indexing (for example, [CuArrays](https://github.com/JuliaGPU/CuArrays.jl)). A more generic compatible implementation of this function would be to use broadcast, for example: ```@repl function f(A, B) @. A = A + B end ``` which would allow support for a wider variety of array types. ## Internal types should match the types used by users when possible If `f(A)` takes the input of some collections and computes an output from those collections, then it should be expected that if the user gives `A` as an `Array`, the computation should be done via `Array`s. If `A` was a `CuArray`, then it should be expected that the computation should be internally done using a `CuArray` (or appropriately error if not supported). For these reasons, constructing arrays via generic methods, like `similar(A)`, is preferred when writing `f` instead of using non-generic constructors like `Array(undef,size(A))` unless the function is documented as being non-generic. ## Trait definition and adherence to generic interface is preferred when possible Julia provides many interfaces, for example: - [Iteration](https://docs.julialang.org/en/v1/manual/interfaces/#man-interface-iteration) - [Indexing](https://docs.julialang.org/en/v1/manual/interfaces/#Indexing) - [Broadcast](https://docs.julialang.org/en/v1/manual/interfaces/#man-interfaces-broadcasting) Those interfaces should be followed when possible. For example, when defining broadcast overloads, one should implement a `BroadcastStyle` as suggested by the documentation instead of simply attempting to bypass the broadcast system via `copyto!` overloads. When interface functions are missing, these should be added to Base Julia or an interface package, like [ArrayInterface.jl](https://github.com/JuliaArrays/ArrayInterface.jl). Such traits should be declared and used when appropriate. For example, if a line of code requires mutation, the trait `ArrayInterface.ismutable(A)` should be checked before attempting to mutate, and informative error messages should be written to capture the immutable case (or, an alternative code which does not mutate should be given). One example of this principle is demonstrated in the generation of Jacobian matrices. In many scientific applications, one may wish to generate a Jacobian cache from the user's input `u0`. A naive way to generate this Jacobian is `J = similar(u0,length(u0),length(u0))`. However, this will generate a Jacobian `J` such that `J isa Matrix`. ## Macros should be limited and only be used for syntactic sugar Macros define new syntax, and for this reason they tend to be less composable than other coding styles and require prior familiarity to be easily understood. One principle to keep in mind is, "can the person reading the code easily picture what code is being generated?". For example, a user of Soss.jl may not know what code is being generated by: ```julia @model (x, α) begin σ ~ Exponential() β ~ Normal() y ~ For(x) do xj Normal(α + β * xj, σ) end return y end ``` and thus using such a macro as the interface is not preferred when possible. However, a macro like [`@muladd`](https://github.com/SciML/MuladdMacro.jl) is trivial to picture on a code (it recursively transforms `a*b + c` to `muladd(a,b,c)` for more [accuracy and efficiency](https://en.wikipedia.org/wiki/Multiply%E2%80%93accumulate_operation)), so using such a macro for example: ```julia julia> @macroexpand(@muladd k3 = f(t + c3 * dt, @. uprev + dt * (a031 * k1 + a032 * k2))) :(k3 = f((muladd)(c3, dt, t), (muladd).(dt, (muladd).(a032, k2, (*).(a031, k1)), uprev))) ``` is recommended. Some macros in this category are: - `@inbounds` - [`@muladd`](https://github.com/SciML/MuladdMacro.jl) - `@view` - [`@named`](https://github.com/SciML/ModelingToolkit.jl) - `@.` - [`@..`](https://github.com/YingboMa/FastBroadcast.jl) Some performance macros, like `@simd`, `@threads`, or [`@turbo` from LoopVectorization.jl](https://github.com/JuliaSIMD/LoopVectorization.jl), make an exception in that their generated code may be foreign to many users. However, they still are classified as appropriate uses as they are syntactic sugar since they do (or should) not change the behavior of the program in measurable ways other than performance. ## Errors should be caught as high as possible, and error messages should be contextualized for newcomers Whenever possible, defensive programming should be used to check for potential errors before they are encountered deeper within a package. For example, if one knows that `f(u0,p)` will error unless `u0` is the size of `p`, this should be caught at the start of the function to throw a domain specific error, for example "parameters and initial condition should be the same size". ## Subpackaging and interface packages is preferred over conditional modules via Requires.jl Requires.jl should be avoided at all costs. If an interface package exists, such as [ChainRulesCore.jl](https://github.com/JuliaDiff/ChainRulesCore.jl) for defining automatic differentiation rules without requiring a dependency on the whole ChainRules.jl system, or [RecipesBase.jl](https://github.com/JuliaPlots/RecipesBase.jl) which allows for defining Plots.jl plot recipes without a dependency on Plots.jl, a direct dependency on these interface packages is preferred. Otherwise, instead of resorting to a conditional dependency using Requires.jl, it is preferred one creates subpackages, i.e. smaller independent packages kept within the same Github repository with independent versioning and package management. An example of this is seen in [Optimization.jl](https://github.com/SciML/Optimization.jl) which has subpackages like [OptimizationBBO.jl](https://github.com/SciML/Optimization.jl/tree/master/lib/OptimizationBBO) for BlackBoxOptim.jl support. Some important interface packages to know about are: - [ChainRulesCore.jl](https://github.com/JuliaDiff/ChainRulesCore.jl) - [RecipesBase.jl](https://github.com/JuliaPlots/RecipesBase.jl) - [ArrayInterface.jl](https://github.com/JuliaArrays/ArrayInterface.jl) - [CommonSolve.jl](https://github.com/SciML/CommonSolve.jl) - [SciMLBase.jl](https://github.com/SciML/SciMLBase.jl) ## Functions should either attempt to be non-allocating and reuse caches, or treat inputs as immutable Mutating codes and non-mutating codes fall into different worlds. When a code is fully immutable, the compiler can better reason about dependencies, optimize the code, and check for correctness. However, many times a code making the fullest use of mutation can outperform even what the best compilers of today can generate. That said, the worst of all worlds is when code mixes mutation with non-mutating code. Not only is this a mishmash of coding styles, it has the potential non-locality and compiler proof issues of mutating code while not fully benefiting from the mutation. ## Out-Of-Place and Immutability is preferred when sufficient performant Mutation is used to get more performance by decreasing the amount of heap allocations. However, if it's not helpful for heap allocations in a given spot, do not use mutation. Mutation is scary and should be avoided unless it gives an immediate benefit. For example, if matrices are sufficiently large, then `A*B` is as fast as `mul!(C,A,B)`, and thus writing `A*B` is preferred (unless the rest of the function is being careful about being fully non-allocating, in which case this should be `mul!` for consistency). Similarly, when defining types, using `struct` is preferred to `mutable struct` unless mutating the struct is a common occurrence. Even if mutating the struct is a common occurrence, see whether using [Setfield.jl](https://github.com/jw3126/Setfield.jl) is sufficient. The compiler will optimize the construction of immutable structs, and thus this can be more efficient if it's not too much of a code hassle. ## Tests should attempt to cover a wide gamut of input types Code coverage numbers are meaningless if one does not consider the input types. For example, one can hit all the code with `Array`, but that does not test whether `CuArray` is compatible! Thus, it's always good to think of coverage not in terms of lines of code but in terms of type coverage. A good list of number types to think about are: - `Float64` - `Float32` - `Complex` - [`Dual`](https://github.com/JuliaDiff/ForwardDiff.jl) - `BigFloat` Array types to think about testing are: - `Array` - [`OffsetArray`](https://github.com/JuliaArrays/OffsetArrays.jl) - [`CuArray`](https://github.com/JuliaGPU/CUDA.jl) ## When in doubt, a submodule should become a subpackage or separate package Keep packages to one core idea. If there's something separate enough to be a submodule, could it instead be a separate well-tested and documented package to be used by other packages? Most likely yes. ## Globals should be avoided whenever possible Global variables should be avoided whenever possible. When required, global variables should be constants and have an all uppercase name separated with underscores (e.g. `MY_CONSTANT`). They should be defined at the top of the file, immediately after imports and exports but before an `__init__` function. If you truly want mutable global style behavior you may want to look into mutable containers. ## Type-stable and Type-grounded code is preferred wherever possible Type-stable and type-grounded code helps the compiler create not only more optimized code, but also faster to compile code. Always keep containers well-typed, functions specializing on the appropriate arguments, and types concrete. ## Closures should be avoided whenever possible Closures can cause accidental type instabilities that are difficult to track down and debug; in the long run it saves time to always program defensively and avoid writing closures in the first place, even when a particular closure would not have been problematic. A similar argument applies to reading code with closures; if someone is looking for type instabilities, this is faster to do when code does not contain closures. Furthermore, if you want to update variables in an outer scope, do so explicitly with `Ref`s or self defined structs. For example, ```julia map(Base.Fix2(getindex, i), vector_of_vectors) ``` is preferred over ```julia map(v -> v[i], vector_of_vectors) ``` or ```julia [v[i] for v in vector_of_vectors] ``` ## Numerical functionality should use the appropriate generic numerical interfaces While you can use `A\b` to do a linear solve inside a package, that does not mean that you should. This interface is only sufficient for performing factorizations, and so that limits the scaling choices, the types of `A` that can be supported, etc. Instead, linear solves within packages should use LinearSolve.jl. Similarly, nonlinear solves should use NonlinearSolve.jl. Optimization should use Optimization.jl. Etc. This allows the full generic choice to be given to the user without depending on every solver package (effectively recreating the generic interfaces within each package). ## Functions should capture one underlying principle Functions mean one thing. Every dispatch of `+` should be "the meaning of addition on these types". While in theory you could add dispatches to `+` that mean something different, that will fail in generic code for which `+` means addition. Thus, for generic code to work, code needs to adhere to one meaning for each function. Every dispatch should be an instantiation of that meaning. ## Internal choices should be exposed as options whenever possible Whenever possible, numerical values and choices within scripts should be exposed as options to the user. This promotes code reusability beyond the few cases the author may have expected. ## Prefer code reuse over rewrites whenever possible If a package has a function you need, use the package. Add a dependency if you need to. If the function is missing a feature, prefer to add that feature to said package and then add it as a dependency. If the dependency is potentially troublesome, for example because it has a high load time, prefer to spend time helping said package fix these issues and add the dependency. Only when it does not seem possible to make the package "good enough" should using the package be abandoned. If it is abandoned, consider building a new package for this functionality as you need it, and then make it a dependency. ## Prefer to not shadow functions Two functions can have the same name in Julia by having different namespaces. For example, `X.f` and `Y.f` can be two different functions, with different dispatches, but the same name. This should be avoided whenever possible. Instead of creating `MyPackage.sort`, consider adding dispatches to `Base.sort` for your types if these new dispatches match the underlying principle of the function. If it doesn't, prefer to use a different name. While using `MyPackage.sort` is not conflicting, it is going to be confusing for most people unfamiliar with your code, so `MyPackage.special_sort` would be more helpful to newcomers reading the code.
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.1.0
6536386eaeea150a0b83074ea454fd70399ea9ae
docs/src/developers/style-guide.md
docs
2308
# [Style Guide](@id style) ```@contents Pages = ["style.md"] Depth = 3 ``` This section describes the coding style rules that apply to our code and that we recommend you to use it also. In some cases, our style guide diverges from Julia's official [Style Guide](https://docs.julialang.org/en/v1/manual/style-guide/) (Please read it!). All such cases will be explicitly noted and justified. Our style guide adopts many recommendations from the [BlueStyle](https://github.com/invenia/BlueStyle). Please read the [BlueStyle](https://github.com/invenia/BlueStyle) before contributing to this package. If not following, your pull requests may not be accepted. !!! info The style guide is always a work in progress, and not all PythonCallHelpers code follows the rules. When modifying PythonCallHelpers, please fix the style violations of the surrounding code (i.e., leave the code tidier than when you started). If large changes are needed, consider separating them into another pull request. ## Formatting ### [Run JuliaFormatter](@id formatter) PythonCallHelpers uses [JuliaFormatter](https://github.com/domluna/JuliaFormatter.jl) as an auto-formatting tool. We use the options contained in [`.JuliaFormatter.toml`](https://github.com/singularitti/PythonCallHelpers.jl/blob/main/.JuliaFormatter.toml). To format your code, `cd` to the PythonCallHelpers directory, then run: ```@repl using Pkg Pkg.add("JuliaFormatter") using JuliaFormatter: format format("docs"); format("src"); format("test"); ``` !!! info A continuous integration check verifies that all PRs made to PythonCallHelpers have passed the formatter. The following sections outline extra style guide points that are not fixed automatically by JuliaFormatter. ### Use the Julia extension for Visual Studio Code Please use [VS Code](https://code.visualstudio.com/) with the [Julia extension](https://marketplace.visualstudio.com/items?itemName=julialang.language-julia) to edit, format, and test your code. We do not recommend using other editors to edit your code for the time being. This extension already has [JuliaFormatter](https://github.com/domluna/JuliaFormatter.jl) integrated. So to format your code, follow the steps listed [here](https://www.julia-vscode.org/docs/stable/userguide/formatter/).
PythonCallHelpers
https://github.com/singularitti/PythonCallHelpers.jl.git
[ "MIT" ]
0.5.0
903fd51345b94a3aa271118ca189d0c4a2cae2e3
src/MatrixMarket.jl
code
208
module MatrixMarket using SparseArrays using LinearAlgebra using TranscodingStreams, CodecZlib export mmread, mmwrite, mminfo include("mminfo.jl") include("mmread.jl") include("mmwrite.jl") end # module
MatrixMarket
https://github.com/JuliaSparse/MatrixMarket.jl.git
[ "MIT" ]
0.5.0
903fd51345b94a3aa271118ca189d0c4a2cae2e3
src/mminfo.jl
code
1825
""" mminfo(file) Read header information on the size and structure from file. The actual data matrix is not parsed. # Arguments - `file`: The filename or io stream. """ function mminfo(filename::String) stream = open(filename, "r") if endswith(filename, ".gz") stream = TranscodingStream(GzipDecompressor(), stream) end info = mminfo(stream) close(stream) return info end function mminfo(stream::IO) firstline = chomp(readline(stream)) if !startswith(firstline, "%%MatrixMarket") throw(FileFormatException("Expected start of header `%%MatrixMarket`")) end tokens = split(firstline) if length(tokens) != 5 throw(FileFormatException("Not enough words on first line, got $(length(tokens)) words")) end (head1, rep, field, symm) = map(lowercase, tokens[2:5]) if head1 != "matrix" throw(FileFormatException("Unknown MatrixMarket data type: $head1 (only `matrix` is supported)")) end dimline = readline(stream) # Skip all comments and empty lines while length(chomp(dimline)) == 0 || (length(dimline) > 0 && dimline[1] == '%') dimline = readline(stream) end rows, cols, entries = parse_dimension(dimline, rep) return rows, cols, entries, rep, field, symm end struct FileFormatException <: Exception msg::String end Base.showerror(io::IO, e::FileFormatException) = print(io, e.msg) function parse_dimension(line::String, rep::String) dims = map(x -> parse(Int, x), split(line)) if length(dims) < (rep == "coordinate" ? 3 : 2) throw(FileFormatException(string("Could not read in matrix dimensions from line: ", line))) end if rep == "coordinate" return dims[1], dims[2], dims[3] else return dims[1], dims[2], (dims[1] * dims[2]) end end
MatrixMarket
https://github.com/JuliaSparse/MatrixMarket.jl.git
[ "MIT" ]
0.5.0
903fd51345b94a3aa271118ca189d0c4a2cae2e3
src/mmread.jl
code
3866
""" mmread(filename, infoonly=false, retcoord=false) Read the contents of the Matrix Market file `filename` into a matrix, which will be either sparse or dense, depending on the Matrix Market format indicated by `coordinate` (coordinate sparse storage), or `array` (dense array storage). # Arguments - `filename::String`: The file to read. - `infoonly::Bool=false`: Only information on the size and structure is returned from reading the header. The actual data for the matrix elements are not parsed. - `retcoord::Bool`: If it is `true`, the rows, column and value vectors are returned along with the header information. """ function mmread(filename::String, infoonly::Bool=false, retcoord::Bool=false) stream = open(filename, "r") if endswith(filename, ".gz") stream = TranscodingStream(GzipDecompressor(), stream) end result = infoonly ? mminfo(stream) : mmread(stream, retcoord) close(stream) return result end function mmread(stream::IO, infoonly::Bool=false, retcoord::Bool=false) rows, cols, entries, rep, field, symm = mminfo(stream) infoonly && return rows, cols, entries, rep, field, symm T = parse_eltype(field) symfunc = parse_symmetric(symm) if rep == "coordinate" rn = Vector{Int}(undef, entries) cn = Vector{Int}(undef, entries) vals = Vector{T}(undef, entries) for i in 1:entries line = readline(stream) splits = find_splits(line, num_splits(T)) rn[i] = parse_row(line, splits) cn[i] = parse_col(line, splits, T) vals[i] = parse_val(line, splits, T) end result = retcoord ? (rn, cn, vals, rows, cols, entries, rep, field, symm) : symfunc(sparse(rn, cn, vals, rows, cols)) else vals = [parse(Float64, readline(stream)) for _ in 1:entries] A = reshape(vals, rows, cols) result = symfunc(A) end return result end function parse_eltype(field::String) if field == "real" return Float64 elseif field == "complex" return ComplexF64 elseif field == "integer" return Int64 elseif field == "pattern" return Bool else throw(FileFormatException("Unsupported field $field.")) end end function parse_symmetric(symm::String) if symm == "general" return identity elseif symm == "symmetric" || symm == "hermitian" return hermitianize! elseif symm == "skew-symmetric" return skewsymmetrize! else throw(FileFormatException("Unknown matrix symmetry: $symm.")) end end function hermitianize!(M::AbstractMatrix) M .+= tril(M, -1)' return M end function skewsymmetrize!(M::AbstractMatrix) M .-= tril(M, -1)' return M end parse_row(line, splits) = parse(Int, line[1:splits[1]]) parse_col(line, splits, ::Type{Bool}) = parse(Int, line[splits[1]:end]) parse_col(line, splits, eltype) = parse(Int, line[splits[1]:splits[2]]) function parse_val(line, splits, ::Type{ComplexF64}) real = parse(Float64, line[splits[2]:splits[3]]) imag = parse(Float64, line[splits[3]:length(line)]) return ComplexF64(real, imag) end parse_val(line, splits, ::Type{Bool}) = true parse_val(line, splits, ::Type{T}) where {T} = parse(T, line[splits[2]:length(line)]) num_splits(::Type{ComplexF64}) = 3 num_splits(::Type{Bool}) = 1 num_splits(elty) = 2 function find_splits(s::String, num) splits = Vector{Int}(undef, num) cur = 1 in_space = s[1] == '\t' || s[1] == ' ' @inbounds for i in 1:length(s) if s[i] == '\t' || s[i] == ' ' if !in_space in_space = true splits[cur] = i cur += 1 cur > num && break end else in_space = false end end splits end
MatrixMarket
https://github.com/JuliaSparse/MatrixMarket.jl.git
[ "MIT" ]
0.5.0
903fd51345b94a3aa271118ca189d0c4a2cae2e3
src/mmwrite.jl
code
2067
""" mmwrite(filename, matrix) Write a sparse matrix to .mtx file format. # Arguments - `filename::String`: The file to write. - `matrix::SparseMatrixCSC`: The sparse matrix to write. """ function mmwrite(filename::String, matrix::SparseMatrixCSC) stream = open(filename, "w") if endswith(filename, ".gz") stream = TranscodingStream(GzipCompressor(), stream) end mmwrite(stream, matrix) close(stream) end function mmwrite(stream::IO, matrix::SparseMatrixCSC) nl = get_newline() elem = generate_eltype(eltype(matrix)) sym = generate_symmetric(matrix) # write header write(stream, "%%MatrixMarket matrix coordinate $elem $sym$nl") # only use lower triangular part of symmetric and Hermitian matrices if issymmetric(matrix) || ishermitian(matrix) matrix = tril(matrix) end # write matrix size and number of nonzeros write(stream, "$(size(matrix, 1)) $(size(matrix, 2)) $(nnz(matrix))$nl") rows = rowvals(matrix) vals = nonzeros(matrix) for i in 1:size(matrix, 2) for j in nzrange(matrix, i) entity = generate_entity(i, j, rows, vals, elem) write(stream, entity) end end end generate_eltype(::Type{<:Bool}) = "pattern" generate_eltype(::Type{<:Integer}) = "integer" generate_eltype(::Type{<:AbstractFloat}) = "real" generate_eltype(::Type{<:Complex}) = "complex" generate_eltype(elty) = error("Invalid matrix type") function generate_symmetric(m::AbstractMatrix) if issymmetric(m) return "symmetric" elseif ishermitian(m) return "hermitian" else return "general" end end function generate_entity(i, j, rows, vals, kind::String) nl = get_newline() if kind == "pattern" return "$(rows[j]) $i$nl" elseif kind == "complex" return "$(rows[j]) $i $(real(vals[j])) $(imag(vals[j]))$nl" else return "$(rows[j]) $i $(vals[j])$nl" end end function get_newline() if Sys.iswindows() return "\r\n" else return "\n" end end
MatrixMarket
https://github.com/JuliaSparse/MatrixMarket.jl.git
[ "MIT" ]
0.5.0
903fd51345b94a3aa271118ca189d0c4a2cae2e3
test/mtx.jl
code
3243
@testset "mtx" begin mtx_filename = joinpath(TEST_PATH, "data", "test.mtx") res = sparse( [5, 4, 1, 2, 6], [1, 5, 1, 4, 7], [1, 1, 1, 1, 1], 11, 12 ) testmatrices = download_unzip_nist_files() @testset "read/write mtx" begin rows, cols, entries, rep, field, symm = mminfo(mtx_filename) @test rows == 11 @test cols == 12 @test entries == 5 @test rep == "coordinate" @test field == "integer" @test symm == "general" A = mmread(mtx_filename) @test A isa SparseMatrixCSC @test A == res newfilename = replace(mtx_filename, "test.mtx" => "test_write.mtx") mmwrite(newfilename, res) f = open(mtx_filename) sha_test = bytes2hex(sha256(read(f, String))) close(f) f = open(newfilename) sha_new = bytes2hex(sha256(read(f, String))) close(f) @test sha_test == sha_new rm(newfilename) end @testset "read/write mtx.gz" begin gz_filename = mtx_filename * ".gz" rows, cols, entries, rep, field, symm = mminfo(gz_filename) @test rows == 11 @test cols == 12 @test entries == 5 @test rep == "coordinate" @test field == "integer" @test symm == "general" A = mmread(gz_filename) @test A isa SparseMatrixCSC @test A == res newfilename = replace(gz_filename, "test.mtx.gz" => "test_write.mtx.gz") mmwrite(newfilename, res) stream = GzipDecompressorStream(open(gz_filename)) adjusted_content = replace(read(stream, String), "\n" => get_newline()) sha_test = bytes2hex(sha256(adjusted_content)) close(stream) stream = GzipDecompressorStream(open(newfilename)) sha_new = bytes2hex(sha256(read(stream, String))) close(stream) @test sha_test == sha_new rm(newfilename) end @testset "read/write NIST mtx files" begin # verify mmread(mmwrite(A)) == A for filename in filter(t -> endswith(t, ".mtx"), readdir()) new_filename = replace(filename, ".mtx" => "_.mtx") A = MatrixMarket.mmread(filename) MatrixMarket.mmwrite(new_filename, A) new_A = MatrixMarket.mmread(new_filename) @test new_A == A rm(new_filename) end end @testset "read/write NIST mtx.gz files" begin for gz_filename in filter(t -> endswith(t, ".mtx.gz"), readdir()) mtx_filename = replace(gz_filename, ".mtx.gz" => ".mtx") # reading from .mtx and .mtx.gz must be identical A_gz = MatrixMarket.mmread(gz_filename) A = MatrixMarket.mmread(mtx_filename) @test A_gz == A # writing to .mtx and .mtx.gz must be identical new_filename = replace(gz_filename, ".mtx.gz" => "_.mtx.gz") mmwrite(new_filename, A) new_A = MatrixMarket.mmread(new_filename) @test new_A == A rm(new_filename) end end # clean up for filename in filter(t -> endswith(t, ".mtx"), readdir()) rm(filename) rm(filename * ".gz") end end
MatrixMarket
https://github.com/JuliaSparse/MatrixMarket.jl.git
[ "MIT" ]
0.5.0
903fd51345b94a3aa271118ca189d0c4a2cae2e3
test/runtests.jl
code
319
using MatrixMarket using CodecZlib using Downloads using GZip using SparseArrays using SHA using Test include("test_utils.jl") const TEST_PATH = @__DIR__ const NIST_FILELIST = download_nist_filelist() tests = [ "mtx", ] @testset "MatrixMarket.jl" begin for t in tests include("$(t).jl") end end
MatrixMarket
https://github.com/JuliaSparse/MatrixMarket.jl.git
[ "MIT" ]
0.5.0
903fd51345b94a3aa271118ca189d0c4a2cae2e3
test/test_utils.jl
code
1905
function get_newline() if Sys.iswindows() return "\r\n" else return "\n" end end function gunzip(fname) destname, ext = splitext(fname) if ext != ".gz" error("gunzip: $fname: unknown suffix -- ignored") end open(destname, "w") do f GZip.open(fname) do g write(f, read(g, String)) end end destname end function download_nist_filelist() isfile("matrices.html") || Downloads.download("math.nist.gov/MatrixMarket/matrices.html", "matrices.html") matrixmarketdata = Any[] open("matrices.html") do f for line in readlines(f) if occursin("""<A HREF="/MatrixMarket/data/""", line) collectionname, setname, matrixname = split(split(line, '"')[2], '/')[4:6] matrixname = split(matrixname, '.')[1] push!(matrixmarketdata, (collectionname, setname, matrixname)) end end end rm("matrices.html") return matrixmarketdata end function download_unzip_nist_files() # Download one matrix at random plus some specifically chosen ones. n = rand(1:length(NIST_FILELIST)) testmatrices = [ ("NEP", "mhd", "mhd1280b"), ("Harwell-Boeing", "acoust", "young4c"), ("Harwell-Boeing", "platz", "plsk1919"), NIST_FILELIST[n] ] for (collectionname, setname, matrixname) in testmatrices fn = string(collectionname, '_', setname, '_', matrixname) mtxfname = string(fn, ".mtx") if !isfile(mtxfname) url = "https://math.nist.gov/pub/MatrixMarket2/$collectionname/$setname/$matrixname.mtx.gz" gzfname = string(fn, ".mtx.gz") try Downloads.download(url, gzfname) catch continue end gunzip(gzfname) end end return testmatrices end
MatrixMarket
https://github.com/JuliaSparse/MatrixMarket.jl.git
[ "MIT" ]
0.5.0
903fd51345b94a3aa271118ca189d0c4a2cae2e3
README.md
docs
1344
# MatrixMarket [![Build Status](https://travis-ci.org/JuliaSparse/MatrixMarket.jl.svg?branch=master)](https://travis-ci.org/JuliaSparse/MatrixMarket.jl) Package to read/write matrices from/to files in the [Matrix Market native exchange format](http://math.nist.gov/MatrixMarket/formats.html#MMformat). The [Matrix Market](http://math.nist.gov/MatrixMarket/) is a NIST repository of "test data for use in comparative studies of algorithms for numerical linear algebra, featuring nearly 500 sparse matrices from a variety of applications, as well as matrix generation tools and services." Over time, the [Matrix Market's native exchange format](http://math.nist.gov/MatrixMarket/formats.html#MMformat) has become one of the _de facto_ standard file formats for exchanging matrix data. ## Usage ### Read using MatrixMarket M = MatrixMarket.mmread("myfile.mtx") `M` will be a sparse or dense matrix depending on whether the file contains a matrix in coordinate format or array format. The specific type of `M` may be `Symmetric` or `Hermitian` depending on the symmetry information contained in the file header. MatrixMarket.mmread("myfile.mtx", true) Returns raw data from the file header. Does not read in the actual matrix elements. ### Write MatrixMarket.mmwrite("myfile.mtx", M) `M` has to be a sparse matrix.
MatrixMarket
https://github.com/JuliaSparse/MatrixMarket.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
benchmark/stack.jl
code
1709
using NiLangCore, BenchmarkTools bg = BenchmarkGroup() # pop!/push! bg["NiLang"] = @benchmarkable begin @instr PUSH!(x) @instr POP!(x) end seconds=1 setup=(x=3.0) # @invcheckoff pop!/push! bg["NiLang-@invcheckoff"] = @benchmarkable begin @instr @invcheckoff PUSH!(x) @instr @invcheckoff POP!(x) end seconds=1 setup=(x=3.0) # @invcheckoff pop!/push! bg["NiLang-@invcheckoff-@inbounds"] = @benchmarkable begin @instr @invcheckoff @inbounds PUSH!(x) @instr @invcheckoff @inbounds POP!(x) end seconds=1 setup=(x=3.0) # Julia pop!/push! bg["Julia"] = @benchmarkable begin push!(stack, x) x = pop!(stack) end seconds=1 setup=(x=3.0; stack=Float64[]) # FastStack-inbounds-Any bg["FastStack-inbounds-Any"] = @benchmarkable begin @inbounds push!(stack, x) @inbounds pop!(stack) end seconds=1 setup=(x=3.0; stack=FastStack(10)) # Julia pop!/push! bg["Julia-Any"] = @benchmarkable begin push!(stack, x) x = pop!(stack) end seconds=1 setup=(x=3.0; stack=Any[]) # setindex bg["setindex"] = @benchmarkable begin stack[2] = x x = 0.0 x = stack[2] end seconds=1 setup=(x=3.0; stack=Float64[1.0, 2.0]) # setindex-inbounds bg["setindex-inbounds"] = @benchmarkable begin stack[2] = x x = 0.0 x = stack[2] end seconds=1 setup=(x=3.0; stack=Float64[1.0, 2.0]) # FastStack bg["FastStack"] = @benchmarkable begin push!(stack, x) x = 0.0 x = pop!(stack) end seconds=1 setup=(x=3.0; stack=FastStack{Float64}(10)) # FastStack-inbounds bg["FastStack-inbounds"] = @benchmarkable begin @inbounds push!(stack, x) x = 0.0 @inbounds x = pop!(stack) end seconds=1 setup=(x=3.0; stack=FastStack{Float64}(10)) tune!(bg) run(bg)
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
benchmark/try.jl
code
2297
using Zygote f(x, y) = (x+exp(y), y) invf(x, y) = (x-exp(y), y) # ∂L/∂x2 = ∂L/∂x1*∂x1/∂x2 + ∂L/∂y1*∂y1/∂y2 = ∂L/∂x1*invf'(x2) + ∂L/∂y1*invf'(y2) x1, y1 = 1.4, 4.4 x2, y2 = f(x,y) function gf(x, y, gx, gy) x2, y2 = f(x, y) invJ1 = gradient((x2, y2)->invf(x2, y2)[1], x2, y2) invJ2 = gradient((x2, y2)->invf(x2, y2)[2], x2, y2) return (x2, y2, gx, gy) end gradient((x, y)->invf(x, y)[1], x, y) mutable struct A{T} x::T end Base.:*(x1::A, x2::A) = A(x1.x*x2.x) Base.:+(x1::A, x2::A) = A(x1.x+x2.x) Base.zero(::A{T}) where T = A(T(0)) struct A2{T} x::T end Base.:*(x1::A2, x2::A2) = A2(x1.x*x2.x) Base.:+(x1::A2, x2::A2) = A2(x1.x+x2.x) Base.zero(::A2{T}) where T = A2(T(0)) struct BG{T} x::T g::B{T} BG(x::T) where T = new{T}(x) end struct BG{T} x::T g::BG{T} BG(x::T) where T = new{T}(x) end mutable struct AG{T} x::T g::AG{T} AG(x::T) where T = new{T}(x) AG(x::T, g::TG) where {T,TG} = new{T}(x, T(g)) end Base.:*(x1::AG, x2::AG) = AG(x1.x*x2.x) Base.:+(x1::AG, x2::AG) = AG(x1.x+x2.x) Base.zero(::AG{T}) where T = AG(T(0)) init(ag::AG{T}) where T = (ag.g = AG(T(0))) using BenchmarkTools ma = fill(A(1.0), 100,100) ma2 = fill(A2(1.0), 100,100) function f(ma, mb) M, N, K = size(ma, 1), size(mb, 2), size(ma, 2) res = fill(zero(ma[1]), M, N) for i=1:M for j=1:N for k=1:K @inbounds res[i,j] += ma[i,k]*mb[k,j] end end end return res end @benchmark f(ma, ma) @benchmark f(ma2, ma2) ma = fill(AG(1.0), 100,100) @benchmark ma*ma a = A(0.4) ag = AG(0.4) using NiLangCore @benchmark isdefined($ag, :g) @benchmark $ag + $ag ag.g = AG(0.0) @benchmark $a + $a struct SG{T} x::T g::Ref{T} SG(x::T) where T = new{T}(x) end Base.:*(x1::SG, x2::SG) = SG(x1.x*x2.x) Base.:+(x1::SG, x2::SG) = SG(x1.x+x2.x) Base.zero(::SG{T}) where T = SG(T(0)) init(ag::AG{T}) where T = (ag.g = AG(T(0))) using BenchmarkTools ma = fill(SG(1.0), 100,100) @benchmark ma*ma a = A(0.4) ag = AG(0.4) using NiLangCore @benchmark isdefined($ag, :g) @benchmark $ag + $ag ag.g = AG(0.0) @benchmark $a + $a using NiLang, NiLang.AD @i function test(x, one, N::Int) for i = 1:N x += one end end invcheckon(true) @benchmark test'(Loss(0.0), 1.0, 1000000)
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
docs/make.jl
code
382
using Documenter, NiLangCore makedocs(; modules=[NiLangCore], format=Documenter.HTML(), pages=[ "Home" => "index.md", ], repo="https://github.com/GiggleLiu/NiLangCore.jl/blob/{commit}{path}#L{line}", sitename="NiLangCore.jl", authors="JinGuo Liu, thautwarm", assets=String[], ) deploydocs(; repo="github.com/GiggleLiu/NiLangCore.jl", )
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/Core.jl
code
6182
############# function properties ############# export isreversible, isreflexive, isprimitive export protectf """ isreversible(f, ARGT) Return `true` if a function is reversible. """ isreversible(f, ::Type{ARGT}) where ARGT = hasmethod(~f, ARGT) """ isreflexive(f) Return `true` if a function is self-inverse. """ isreflexive(f) = (~f) === f """ isprimitive(f) Return `true` if `f` is an `instruction` that can not be decomposed anymore. """ isprimitive(f) = false ############# ancillas ################ export InvertibilityError, @invcheck """ deanc(a, b) Deallocate varialbe `a` with value `b`. It will throw an error if * `a` and `b` are objects with different types, * `a` is not equal to `b` (for floating point numbers, an error within `NiLangCore.GLOBAL_ATOL[]` is allowed), """ function deanc end function deanc(a::T, b::T) where T <: AbstractFloat if a !== b && abs(b - a) > GLOBAL_ATOL[] throw(InvertibilityError("deallocate fail (floating point numbers): $a ≂̸ $b")) end end deanc(x::T, val::T) where T<:Tuple = deanc.(x, val) deanc(x::T, val::T) where T<:AbstractArray = x === val || deanc.(x, val) deanc(a::T, b::T) where T<:AbstractString = a === b || throw(InvertibilityError("deallocate fail (string): $a ≂̸ $b")) function deanc(x::T, val::T) where T<:Dict if x !== val if length(x) != length(val) throw(InvertibilityError("deallocate fail (dict): length of dict not the same, got $(length(x)) and $(length(val))!")) else for (k, v) in x if haskey(val, k) deanc(x[k], val[k]) else throw(InvertibilityError("deallocate fail (dict): key $k of dict does not exist!")) end end end end end deanc(a, b) = throw(InvertibilityError("deallocate fail (type mismatch): `$(typeof(a))` and `$(typeof(b))`")) @generated function deanc(a::T, b::T) where T nf = fieldcount(a) if isprimitivetype(T) :(a === b || throw(InvertibilityError("deallocate fail (primitive): $a ≂̸ $b"))) else Expr(:block, [:($deanc(a.$NAME, b.$NAME)) for NAME in fieldnames(T)]...) end end """ InvertibilityError <: Exception InvertibilityError(ex) The error for irreversible statements. """ struct InvertibilityError <: Exception ex end """ @invcheck x val The macro version `NiLangCore.deanc`, with more informative error. """ macro invcheck(x, val) esc(_invcheck(x, val)) end # the expression for reversibility checking function _invcheck(x, val) Expr(:try, Expr(:block, :($deanc($x, $val))), :e, Expr(:block, :(println("deallocate fail `$($(QuoteNode(x))) → $($(QuoteNode(val)))`")), :(throw(e))) ) end _invcheck(docheck::Bool, arg, res) = docheck ? _invcheck(arg, res) : nothing """ chfield(x, field, val) Change a `field` of an object `x`. The `field` can be a `Val` type ```jldoctest; setup=:(using NiLangCore) julia> chfield(1+2im, Val(:im), 5) 1 + 5im ``` or a function ```jldoctest; setup=:(using NiLangCore) julia> using NiLangCore julia> struct GVar{T, GT} x::T g::GT end julia> @fieldview xx(x::GVar) = x.x julia> chfield(GVar(1.0, 0.0), xx, 2.0) GVar{Float64, Float64}(2.0, 0.0) ``` """ function chfield end ########### Inv ########## export Inv, invtype """ Inv{FT} <: Function Inv(f) The inverse of a function. """ struct Inv{FT} <: Function f::FT end Inv(f::Inv) = f.f @static if VERSION >= v"1.6" Base.:~(f::Base.ComposedFunction) = (~(f.inner)) ∘ (~(f.outer)) end Base.:~(f::Function) = Inv(f) Base.:~(::Type{Inv{T}}) where T = T # for type, it is a destructor Base.:~(::Type{T}) where T = Inv{T} # for type, it is a destructor Base.show(io::IO, b::Inv) = print(io, "~$(b.f)") Base.display(bf::Inv) where f = print(bf) """ protectf(f) Protect a function from being inverted, useful when using an callable object. """ protectf(x) = x protectf(x::Inv) = x.f invtype(::Type{T}) where T = Inv{<:T} ######### Infer export PlusEq, MinusEq, XorEq, MulEq, DivEq """ PlusEq{FT} <: Function PlusEq(f) Called when executing `out += f(args...)` instruction. The following two statements are same ```jldoctest; setup=:(using NiLangCore) julia> x, y, z = 0.0, 2.0, 3.0 (0.0, 2.0, 3.0) julia> x, y, z = PlusEq(*)(x, y, z) (6.0, 2.0, 3.0) julia> x, y, z = 0.0, 2.0, 3.0 (0.0, 2.0, 3.0) julia> @instr x += y*z julia> x, y, z (6.0, 2.0, 3.0) ``` """ struct PlusEq{FT} <: Function f::FT end """ MinusEq{FT} <: Function MinusEq(f) Called when executing `out -= f(args...)` instruction. See `PlusEq` for detail. """ struct MinusEq{FT} <: Function f::FT end """ MulEq{FT} <: Function MulEq(f) Called when executing `out *= f(args...)` instruction. See `PlusEq` for detail. """ struct MulEq{FT} <: Function f::FT end """ DivEq{FT} <: Function DivEq(f) Called when executing `out /= f(args...)` instruction. See `PlusEq` for detail. """ struct DivEq{FT} <: Function f::FT end """ XorEq{FT} <: Function XorEq(f) Called when executing `out ⊻= f(args...)` instruction. See `PlusEq` for detail. """ struct XorEq{FT} <: Function f::FT end const OPMX{FT} = Union{PlusEq{FT}, MinusEq{FT}, XorEq{FT}, MulEq{FT}, DivEq{FT}} for (TP, OP) in [(:PlusEq, :+), (:MinusEq, :-), (:XorEq, :⊻)] @eval (inf::$TP)(out!, args...; kwargs...) = $OP(out!, inf.f(args...; kwargs...)), args... @eval (inf::$TP)(out!::Tuple, args...; kwargs...) = $OP.(out!, inf.f(args...; kwargs...)), args... # e.g. allow `(x, y) += sincos(a)` end Base.:~(op::PlusEq) = MinusEq(op.f) Base.:~(om::MinusEq) = PlusEq(om.f) Base.:~(op::MulEq) = DivEq(op.f) Base.:~(om::DivEq) = MulEq(om.f) Base.:~(om::XorEq) = om for (T, S) in [(:PlusEq, "+="), (:MinusEq, "-="), (:MulEq, "*="), (:DivEq, "/="), (:XorEq, "⊻=")] @eval Base.display(o::$T) = print($S, "(", o.f, ")") @eval Base.display(o::Type{$T}) = print($S) @eval Base.show_function(io::IO, o::$T, compact::Bool) = print(io, "$($S)($(o.f))") @eval Base.show_function(io::IO, ::MIME"plain/text", o::$T, compact::Bool) = Base.show(io, o) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/NiLangCore.jl
code
414
module NiLangCore using MLStyle using TupleTools include("lens.jl") include("utils.jl") include("symboltable.jl") include("stack.jl") include("Core.jl") include("vars.jl") include("instr.jl") include("dualcode.jl") include("preprocess.jl") include("variable_analysis.jl") include("compiler.jl") include("checks.jl") if Base.VERSION >= v"1.4.2" include("precompile.jl") _precompile_() end end # module
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/checks.jl
code
1912
export check_inv, world_similar, almost_same @nospecialize """ check_inv(f, args; atol::Real=1e-8, verbose::Bool=false, kwargs...) Return true if `f(args..., kwargs...)` is reversible. """ function check_inv(f, args; atol::Real=1e-8, verbose::Bool=false, kwargs...) args0 = deepcopy(args) args_ = f(args...; kwargs...) args = length(args) == 1 ? (args_,) : args_ args_ = (~f)(args...; kwargs...) args = length(args) == 1 ? (args_,) : args_ world_similar(args0, args, atol=atol, verbose=verbose) end function world_similar(a, b; atol::Real=1e-8, verbose::Bool=false) for (xa, xb) in zip(a, b) if !almost_same(xa, xb; atol=atol) verbose && println("$xa does not match $xb") return false end end return true end @specialize """ almost_same(a, b; atol=GLOBAL_ATOL[], kwargs...) -> Bool Return true if `a` and `b` are almost same w.r.t. `atol`. """ function almost_same(a::T, b::T; atol=GLOBAL_ATOL[], kwargs...) where T <: AbstractFloat a === b || abs(b - a) < atol end function almost_same(a::TA, b::TB; kwargs...) where {TA, TB} false end function almost_same(a::T, b::T; kwargs...) where {T<:Dict} length(a) != length(b) && return false for (k, v) in a haskey(b, k) && almost_same(v, b[k]; kwargs...) || return false end return true end @generated function almost_same(a::T, b::T; kwargs...) where T nf = fieldcount(a) if isprimitivetype(T) :(a === b) else quote res = true @nexprs $nf i-> res = res && almost_same(getfield(a, i), getfield(b, i); kwargs...) res end end end almost_same(x::T, y::T; kwargs...) where T<:AbstractArray = all(almost_same.(x, y; kwargs...)) almost_same(x::FastStack, y::FastStack; kwargs...) where T<:AbstractArray = all(almost_same.(x.data[1:x.top[]], y.data[1:y.top[]]; kwargs...))
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/compiler.jl
code
15496
struct CompileInfo invcheckon::Ref{Bool} end CompileInfo() = CompileInfo(Ref(true)) function compile_body(m::Module, body::AbstractVector, info) out = [] for ex in body ex_ = compile_ex(m, ex, info) ex_ !== nothing && push!(out, ex_) end return out end deleteindex!(d::AbstractDict, index) = delete!(d, index) @inline function map_func(x::Symbol) if x == :+= PlusEq, false elseif x == :.+= PlusEq, true elseif x == :-= MinusEq, false elseif x == :.-= MinusEq, true elseif x == :*= MulEq, false elseif x == :.*= MulEq, true elseif x == :/= DivEq, false elseif x == :./= DivEq, true elseif x == :⊻= XorEq, false elseif x == :.⊻= XorEq, true else error("`$x` can not be mapped to a reversible function.") end end # e.g. map `x += sin(z)` => `PlusEq(sin)(x, z)`. function to_standard_format(ex::Expr) head::Symbol = ex.head F, isbcast = map_func(ex.head) a, b = ex.args if !isbcast @match b begin :($f($(args...); $(kwargs...))) => :($F($f)($a, $(args...); $(kwargs...))) :($f($(args...))) => :($F($f)($a, $(args...))) :($x || $y) => :($F($logical_or)($a, $x, $y)) :($x && $y) => :($F($logical_and)($a, $x, $y)) _ => :($F(identity)($a, $b)) end else @match b begin :($f.($(args...); $(kwargs...))) => :($F($f).($a, $(args...); $(kwargs...))) :($f.($(args...))) => :($F($f).($a, $(args...))) :($f($(args...); $(kwargs...))) => :($F($(removedot(f))).($a, $(args...); $(kwargs...))) :($f($(args...))) => :($F($(removedot(f))).($a, $(args...))) _ => :($F(identity).($a, $b)) end end end logical_or(a, b) = a || b logical_and(a, b) = a && b """ compile_ex(m::Module, ex, info) Compile a NiLang statement to a regular julia statement. """ function compile_ex(m::Module, ex, info) @match ex begin :($a += $b) || :($a .+= $b) || :($a -= $b) || :($a .-= $b) || :($a *= $b) || :($a .*= $b) || :($a /= $b) || :($a ./= $b) || :($a ⊻= $b) || :($a .⊻= $b) => compile_ex(m, to_standard_format(ex), info) :(($t1=>$t2)($x)) => assign_ex(x, :(convert($t2, $x)), info.invcheckon[]) :(($t1=>$t2).($x)) => assign_ex(x, :(convert.($t2, $x)), info.invcheckon[]) # multi args expanded in preprocessing # general :($x ↔ $y) => begin e1 = isemptyvar(x) e2 = isemptyvar(y) if e1 && e2 nothing elseif e1 && !e2 _push_value(x, _pop_value(y), info.invcheckon[]) elseif !e1 && e2 _push_value(y, _pop_value(x), info.invcheckon[]) else tmp = gensym("temp") Expr(:block, :($tmp = $y), assign_ex(y, x, info.invcheckon[]), assign_ex(x, tmp, info.invcheckon[])) end end # stack :($s[end] → $x) => begin if info.invcheckon[] y = gensym("result") Expr(:block, :($y=$loaddata($x, $pop!($s))), _invcheck(y, x), assign_ex(x, y, info.invcheckon[])) else y = gensym("result") Expr(:block, :($y=$loaddata($x, $pop!($s))), assign_ex(x, y, info.invcheckon[])) end end :($s[end+1] ← $x) => :($push!($s, $_copy($x))) # dict :($x[$index] ← $tp) => begin assign_expr = :($x[$index] = $tp) if info.invcheckon[] Expr(:block, _assert_nokey(x, index), assign_expr) else assign_expr end end :($x[$index] → $tp) => begin delete_expr = :($(deleteindex!)($x, $index)) if info.invcheckon[] Expr(:block, _invcheck(:($x[$index]), tp), delete_expr) else delete_expr end end # general :($x ← $tp) => :($x = $tp) :($x → $tp) => begin if info.invcheckon[] _invcheck(x, tp) end end :($f($(args...))) => begin assignback_ex(ex, info.invcheckon[]) end :($f.($(allargs...))) => begin args, kwargs = seperate_kwargs(allargs) symres = gensym("results") ex = :($symres = $unzipped_broadcast($kwargs, $f, $(args...))) Expr(:block, ex, assign_vars(args, symres, info.invcheckon[]).args...) end Expr(:if, _...) => compile_if(m, copy(ex), info) :(while ($pre, $post); $(body...); end) => begin whilestatement(pre, post, compile_body(m, body, info), info) end :(for $i=$range; $(body...); end) => begin forstatement(i, range, compile_body(m, body, info), info, nothing) end :(@simd $line for $i=$range; $(body...); end) => begin forstatement(i, range, compile_body(m, body, info), info, Symbol("@simd")=>line) end :(@threads $line for $i=$range; $(body...); end) => begin forstatement(i, range, compile_body(m, body, info), info, Symbol("@threads")=>line) end :(@avx $line for $i=$range; $(body...); end) => begin forstatement(i, range, compile_body(m, body, info), info, Symbol("@avx")=>line) end :(begin $(body...) end) => begin Expr(:block, compile_body(m, body, info)...) end :(@safe $line $subex) => subex :(@inbounds $line $subex) => Expr(:macrocall, Symbol("@inbounds"), line, compile_ex(m, subex, info)) :(@invcheckoff $line $subex) => begin state = info.invcheckon[] info.invcheckon[] = false ex = compile_ex(m, subex, info) info.invcheckon[] = state ex end :(@cuda $line $(args...)) => begin fcall = @match args[end] begin :($f($(args...))) => Expr(:call, Expr(:->, :(args...), Expr(:block, :($f(args...)), nothing ) ), args... ) _ => error("expect a function after @cuda, got $(args[end])") end Expr(:macrocall, Symbol("@cuda"), line, args[1:end-1]..., fcall) end :(@launchkernel $line $device $thread $ndrange $f($(args...))) => begin res = gensym("results") Expr(:block, :($res = $f($device, $thread)($(args...); ndrange=$ndrange)), :(wait($res)) ) end :(nothing) => ex ::Nothing => ex ::LineNumberNode => ex _ => error("statement not supported: `$ex`") end end function compile_if(m::Module, ex, info) pres = [] posts = [] ex = analyse_if(m, ex, info, pres, posts) Expr(:block, pres..., ex, posts...) end function analyse_if(m::Module, ex, info, pres, posts) var = gensym("branch") if ex.head == :if pre, post = ex.args[1].args ex.args[1] = var elseif ex.head == :elseif pre, post = ex.args[1].args[2].args ex.args[1].args[2] = var end push!(pres, :($var = $pre)) if info.invcheckon[] push!(posts, _invcheck(var, post)) end ex.args[2] = Expr(:block, compile_body(m, ex.args[2].args, info)...) if length(ex.args) == 3 if ex.args[3].head == :elseif ex.args[3] = analyse_if(m, ex.args[3], info, pres, posts) elseif ex.args[3].head == :block ex.args[3] = Expr(:block, compile_body(m, ex.args[3].args, info)...) end end ex end function whilestatement(precond, postcond, body, info) ex = Expr(:block, Expr(:while, precond, Expr(:block, body...), ), ) if info.invcheckon[] pushfirst!(ex.args, _invcheck(postcond, false)) push!(ex.args[end].args[end].args, _invcheck(postcond, true) ) end ex end function forstatement(i, range, body, info, mcr) assigns, checkers = compile_range(range) exf = Expr(:for, :($i=$range), Expr(:block, body...)) if !(mcr isa Nothing) exf = Expr(:macrocall, mcr.first, mcr.second, exf) end if info.invcheckon[] Expr(:block, assigns..., exf, checkers...) else exf end end _pop_value(x) = @match x begin :($s[end]) => :($pop!($s)) :($s[$ind]) => :($pop!($s, $ind)) # dict (notice pop over vector elements is not allowed.) :($x::$T) => :($(_pop_value(x))::$T) :(($(args...)),) => Expr(:tuple, _pop_value.(args)...) _ => x end _push_value(x, val, invcheck) = @match x begin :($s[end+1]) => :($push!($s, $val)) :($s[$arg]::∅) => begin ex = :($s[$arg] = $val) if invcheck Expr(:block, _assert_nokey(s, arg), ex) else ex end end _ => assign_ex(x, val, invcheck) end function _assert_nokey(x, index) str = "dictionary `$x` already has key `$index`" Expr(:if, :(haskey($x, $index)), :(throw(InvertibilityError($str)))) end _copy(x) = copy(x) _copy(x::Tuple) = copy.(x) export @code_julia """ @code_julia ex Get the interpreted expression of `ex`. ```julia julia> @code_julia x += exp(3.0) quote var"##results#267" = ((PlusEq)(exp))(x, 3.0) x = var"##results#267"[1] try (NiLangCore.deanc)(3.0, var"##results#267"[2]) catch e @warn "deallocate fail: `3.0 → var\"##results#267\"[2]`" throw(e) end end julia> @code_julia @invcheckoff x += exp(3.0) quote var"##results#257" = ((PlusEq)(exp))(x, 3.0) x = var"##results#257"[1] end ``` """ macro code_julia(ex) QuoteNode(compile_ex(__module__, ex, CompileInfo())) end compile_ex(m::Module, ex) = compile_ex(m, ex, CompileInfo()) export @i """ @i function fname(args..., kwargs...) ... end @i struct sname ... end Define a reversible function/type. ```jldoctest; setup=:(using NiLangCore) julia> @i function test(out!, x) out! += identity(x) end julia> test(0.2, 0.8) (1.0, 0.8) ``` See `test/compiler.jl` for more examples. """ macro i(ex) ex = gen_ifunc(__module__, ex) ex.args[1] = :(Base.@__doc__ $(ex.args[1])) esc(ex) end # generate the reversed function function gen_ifunc(m::Module, ex) mc, fname, args, ts, body = precom(m, ex) fname = _replace_opmx(fname) # implementations ftype = get_ftype(fname) head = :($fname($(args...)) where {$(ts...)}) dfname = dual_fname(fname) dftype = get_ftype(dfname) fdef1 = Expr(:function, head, Expr(:block, compile_body(m, body, CompileInfo())..., functionfoot(args))) dualhead = :($dfname($(args...)) where {$(ts...)}) fdef2 = Expr(:function, dualhead, Expr(:block, compile_body(m, dual_body(m, body), CompileInfo())..., functionfoot(args))) if mc !== nothing fdef1 = Expr(:macrocall, mc[1], mc[2], fdef1) fdef2 = Expr(:macrocall, mc[1], mc[2], fdef2) end #ex = :(Base.@__doc__ $fdef1; if $ftype != $dftype; $fdef2; end) ex = Expr(:block, fdef1, Expr(:if, :($ftype != $dftype), fdef2), ) end export nilang_ir """ nilang_ir(ex; reversed::Bool=false) Get the NiLang reversible IR from the function expression `ex`, return the reversed function if `reversed` is `true`. This IR is not directly executable on Julia, please use `macroexpand(Main, :(@i function .... end))` to get the julia expression of a reversible function. ```jldoctest; setup=:(using NiLangCore) julia> ex = :(@inline function f(x!::T, y) where T @routine begin anc ← zero(T) anc += identity(x!) end x! += y * anc ~@routine end); julia> NiLangCore.nilang_ir(Main, ex) |> NiLangCore.rmlines :(@inline function f(x!::T, y) where T begin anc ← zero(T) anc += identity(x!) end x! += y * anc begin anc -= identity(x!) anc → zero(T) end end) julia> NiLangCore.nilang_ir(Main, ex; reversed=true) |> NiLangCore.rmlines :(@inline function (~f)(x!::T, y) where T begin anc ← zero(T) anc += identity(x!) end x! -= y * anc begin anc -= identity(x!) anc → zero(T) end end) ``` """ function nilang_ir(m::Module, ex; reversed::Bool=false) mc, fname, args, ts, body = precom(m, ex) fname = _replace_opmx(fname) # implementations if reversed dfname = :(~$fname) # use fake head for readability head = :($dfname($(args...)) where {$(ts...)}) body = dual_body(m, body) else head = :($fname($(args...)) where {$(ts...)}) end fdef = Expr(:function, head, Expr(:block, body...)) if mc !== nothing fdef = Expr(:macrocall, mc[1], mc[2], fdef) end fdef end # seperate and return `args` and `kwargs` @inline function seperate_kwargs(args) if length(args) > 0 && args[1] isa Expr && args[1].head == :parameters args = args[2:end], args[1] else args, Expr(:parameters) end end # add a `return` statement to the end of the function body. function functionfoot(args) args = get_argname.(seperate_kwargs(args)[1]) if length(args) == 1 if args[1] isa Expr && args[1].head == :(...) args[1].args[1] else args[1] end else :(($(args...),)) end end # to provide the eye candy for defining `x += f(args...)` like functions _replace_opmx(ex) = @match ex begin :(:+=($f)) => :($(gensym())::PlusEq{typeof($f)}) :(:-=($f)) => :($(gensym())::MinusEq{typeof($f)}) :(:*=($f)) => :($(gensym())::MulEq{typeof($f)}) :(:/=($f)) => :($(gensym())::DivEq{typeof($f)}) :(:⊻=($f)) => :($(gensym())::XorEq{typeof($f)}) _ => ex end export @instr """ @instr ex Execute a reversible instruction. """ macro instr(ex) ex = precom_ex(__module__, ex, NiLangCore.PreInfo()) #variable_analysis_ex(ex, SymbolTable()) esc(Expr(:block, NiLangCore.compile_ex(__module__, ex, CompileInfo()), nothing)) end # the range of for statement compile_range(range) = @match range begin :($start:$step:$stop) => begin start_, step_, stop_ = gensym("start"), gensym("step"), gensym("stop") Any[:($start_ = $start), :($step_ = $step), :($stop_ = $stop)], Any[_invcheck(start_, start), _invcheck(step_, step), _invcheck(stop_, stop)] end :($start:$stop) => begin start_, stop_ = gensym("start"), gensym("stop") Any[:($start_ = $start), :($stop_ = $stop)], Any[_invcheck(start_, start), _invcheck(stop_, stop)] end :($list) => begin list_ = gensym("iterable") Any[:($list_ = deepcopy($list))], Any[_invcheck(list_, list)] end end """ get_ftype(fname) Return the function type, e.g. * `obj::ABC` => `ABC` * `f` => `typeof(f)` """ function get_ftype(fname) @match fname begin :($x::$tp) => tp _ => :($NiLangCore._typeof($fname)) end end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/dualcode.jl
code
5191
# get the expression of the inverse function function dual_func(m::Module, fname, args, ts, body) :(function $(:(~$fname))($(args...)) where {$(ts...)}; $(dual_body(m, body)...); end) end # get the function name of the inverse function function dual_fname(op) @match op begin :($x::$tp) => :($x::$invtype($tp)) :(~$x) => x _ => :($(gensym("~$op"))::$_typeof(~$op)) end end _typeof(x) = typeof(x) _typeof(x::Type{T}) where T = Type{T} """ dual_ex(m::Module, ex) Get the dual expression of `ex`. """ function dual_ex(m::Module, ex) @match ex begin :(($t1=>$t2)($x)) => :(($t2=>$t1)($x)) :(($t1=>$t2).($x)) => :(($t2=>$t1).($x)) :($x ↔ $y) => dual_swap(x, y) :($s[end+1] ← $x) => :($s[end] → $x) :($s[end] → $x) => :($s[end+1] ← $x) :($x → $val) => :($x ← $val) :($x ← $val) => :($x → $val) :($f($(args...))) => startwithdot(f) ? :($(getdual(removedot(sym))).($(args...))) : :($(getdual(f))($(args...))) :($f.($(args...))) => :($(getdual(f)).($(args...))) :($a += $b) => :($a -= $b) :($a .+= $b) => :($a .-= $b) :($a -= $b) => :($a += $b) :($a .-= $b) => :($a .+= $b) :($a *= $b) => :($a /= $b) :($a .*= $b) => :($a ./= $b) :($a /= $b) => :($a *= $b) :($a ./= $b) => :($a .*= $b) :($a ⊻= $b) => :($a ⊻= $b) :($a .⊻= $b) => :($a .⊻= $b) Expr(:if, _...) => dual_if(m, copy(ex)) :(while ($pre, $post); $(body...); end) => begin Expr(:while, :(($post, $pre)), Expr(:block, dual_body(m, body)...)) end :(for $i=$start:$step:$stop; $(body...); end) => begin Expr(:for, :($i=$stop:(-$step):$start), Expr(:block, dual_body(m, body)...)) end :(for $i=$start:$stop; $(body...); end) => begin j = gensym("j") Expr(:for, :($j=$start:$stop), Expr(:block, :($i ← $stop-$j+$start), dual_body(m, body)..., :($i → $stop-$j+$start))) end :(for $i=$itr; $(body...); end) => begin Expr(:for, :($i=Base.Iterators.reverse($itr)), Expr(:block, dual_body(m, body)...)) end :(@safe $line $subex) => Expr(:macrocall, Symbol("@safe"), line, subex) :(@cuda $line $(args...)) => Expr(:macrocall, Symbol("@cuda"), line, args[1:end-1]..., dual_ex(m, args[end])) :(@launchkernel $line $(args...)) => Expr(:macrocall, Symbol("@launchkernel"), line, args[1:end-1]..., dual_ex(m, args[end])) :(@inbounds $line $subex) => Expr(:macrocall, Symbol("@inbounds"), line, dual_ex(m, subex)) :(@simd $line $subex) => Expr(:macrocall, Symbol("@simd"), line, dual_ex(m, subex)) :(@threads $line $subex) => Expr(:macrocall, Symbol("@threads"), line, dual_ex(m, subex)) :(@avx $line $subex) => Expr(:macrocall, Symbol("@avx"), line, dual_ex(m, subex)) :(@invcheckoff $line $subex) => Expr(:macrocall, Symbol("@invcheckoff"), line, dual_ex(m, subex)) :(begin $(body...) end) => Expr(:block, dual_body(m, body)...) :(nothing) => ex ::LineNumberNode => ex ::Nothing => ex :() => ex _ => error("can not invert target expression $ex") end end function dual_if(m::Module, ex) _dual_cond(cond) = @match cond begin :(($pre, $post)) => :(($post, $pre)) end if ex.head == :if ex.args[1] = _dual_cond(ex.args[1]) elseif ex.head == :elseif ex.args[1].args[2] = _dual_cond(ex.args[1].args[2]) end ex.args[2] = Expr(:block, dual_body(m, ex.args[2].args)...) if length(ex.args) == 3 if ex.args[3].head == :elseif ex.args[3] = dual_if(m, ex.args[3]) elseif ex.args[3].head == :block ex.args[3] = Expr(:block, dual_body(m, ex.args[3].args)...) end end ex end function dual_swap(x, y) e1 = isemptyvar(x) e2 = isemptyvar(y) if e1 && !e2 || !e1 && e2 :($(_dual_swap_var(x)) ↔ $(_dual_swap_var(y))) else :($y ↔ $x) end end _dual_swap_var(x) = @match x begin :($s[end+1]) => :($s[end]) :($x::∅) => :($x) :($s[end]) => :($s[end+1]) _ => :($x::∅) end export @code_reverse """ @code_reverse ex Get the reversed expression of `ex`. ```jldoctest; setup=:(using NiLangCore) julia> @code_reverse x += exp(3.0) :(x -= exp(3.0)) ``` """ macro code_reverse(ex) QuoteNode(dual_ex(__module__, ex)) end getdual(f) = @match f begin :(~$f) => f _ => :(~$f) end function dual_body(m::Module, body) out = [] # fix function LineNumberNode if length(body) > 1 && body[1] isa LineNumberNode && body[2] isa LineNumberNode push!(out, body[1]) start = 2 else start = 1 end ptr = length(body) # reverse the statements len = 0 while ptr >= start if ptr-len==0 || body[ptr-len] isa LineNumberNode ptr-len != 0 && push!(out, body[ptr-len]) for j=ptr:-1:ptr-len+1 push!(out, dual_ex(m, body[j])) end ptr -= len+1 len = 0 else len += 1 end end return out end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/instr.jl
code
5453
export @dual, @selfdual, @dualtype """ @dual f invf Define `f` and `invf` as a pair of dual instructions, i.e. reverse to each other. """ macro dual(f, invf) esc(quote if !$NiLangCore.isprimitive($f) $NiLangCore.isprimitive(::typeof($f)) = true end if !$NiLangCore.isprimitive($invf) $NiLangCore.isprimitive(::typeof($invf)) = true end if Base.:~($f) !== $invf Base.:~(::typeof($f)) = $invf; end if Base.:~($invf) !== $f Base.:~(::typeof($invf)) = $f; end end) end macro dualtype(t, invt) esc(quote $invtype($t) === $invt || begin $NiLangCore.invtype(::Type{$t}) = $invt $NiLangCore.invtype(::Type{T}) where T<:$t = $invt{T.parameters...} end $invtype($invt) === $t || begin $NiLangCore.invtype(::Type{$invt}) = $t $NiLangCore.invtype(::Type{T}) where T<:$invt = $t{T.parameters...} end end) end @dualtype PlusEq MinusEq @dualtype DivEq MulEq @dualtype XorEq XorEq """ @selfdual f Define `f` as a self-dual instructions. """ macro selfdual(f) esc(:(@dual $f $f)) end export @const @eval macro $(:const)(ex) esc(ex) end export @skip! macro skip!(ex) esc(ex) end export @assignback # TODO: include control flows. """ @assignback f(args...) [invcheck] Assign input variables with output values: `args... = f(args...)`, turn off invertibility error check if the second argument is false. """ macro assignback(ex, invcheck=true) ex = precom_ex(__module__, ex, PreInfo()) esc(assignback_ex(ex, invcheck)) end function assignback_ex(ex::Expr, invcheck::Bool) @match ex begin :($f($(args...))) => begin symres = gensym("results") ex = :($symres = $f($(args...))) res = assign_vars(seperate_kwargs(args)[1], symres, invcheck) pushfirst!(res.args, ex) return res end _ => error("assign back fail, got $ex") end end """ assign_vars(args, symres, invcheck) Get the expression of assigning `symres` to `args`. """ function assign_vars(args, symres, invcheck) exprs = [] for (i,arg) in enumerate(args) exi = @match arg begin :($ag...) => begin i!=length(args) && error("`args...` like arguments should only appear as the last argument!") ex = :(ntuple(j->$symres[j+$(i-1)], length($ag))) assign_ex(ag, i==1 ? :(length($ag) == 1 ? ($symres,) : $ex) : ex, invcheck) end _ => if length(args) == 1 assign_ex(arg, symres, invcheck) else assign_ex(arg, :($symres[$i]), invcheck) end end exi !== nothing && push!(exprs, exi) end Expr(:block, exprs...) end error_message_fcall(arg) = """ function arguments should not contain function calls on variables, got `$arg`, try to decompose it into elementary statements, e.g. statement `z += f(g(x))` should be written as y += g(x) z += y If `g` is a dataview (a function map an object to its field or a bijective function), one can also use the pipline like z += f(x |> g) """ assign_ex(arg, res, invcheck) = @match arg begin ::Number || ::String => _invcheck(invcheck, arg, res) ::Symbol || ::GlobalRef => _isconst(arg) ? _invcheck(invcheck, arg, res) : :($arg = $res) :(@skip! $line $x) => nothing :(@fields $line $x) => assign_ex(x, Expr(:call, default_constructor, :(typeof($x)), Expr(:..., res)), invcheck) :($x::∅) => assign_ex(x, res, invcheck) :($x::$T) => assign_ex(x, :($loaddata($T, $res)), invcheck) :($x.$k) => _isconst(x) ? _invcheck(invcheck, arg, res) : assign_ex(x, :(chfield($x, $(Val(k)), $res)), invcheck) # tuples must be index through (x |> 1) :($a |> tget($x)) => assign_ex(a, :($(TupleTools.insertat)($a, $x, ($res,))), invcheck) :($a |> subarray($(ranges...))) => :(($res===view($a, $(ranges...))) || (view($a, $(ranges...)) .= $res)) :($x |> $f) => _isconst(x) ? _invcheck(invcheck, arg,res) : assign_ex(x, :(chfield($x, $f, $res)), invcheck) :($x .|> $f) => _isconst(x) ? _invcheck(invcheck, arg,res) : assign_ex(x, :(chfield.($x, Ref($f), $res)), invcheck) :($x') => _isconst(x) ? _invcheck(invcheck, arg, res) : assign_ex(x, :(chfield($x, adjoint, $res)), invcheck) :(-$x) => _isconst(x) ? _invcheck(invcheck, arg,res) : assign_ex(x, :(chfield($x, -, $res)), invcheck) :($t{$(p...)}($(args...))) => begin if length(args) == 1 assign_ex(args[1], :($getfield($res, 1)), invcheck) else assign_vars(args, :($type2tuple($res)), invcheck) end end :($f($(args...))) => all(_isconst, args) || error(error_message_fcall(arg)) :($f.($(args...))) => all(_isconst, args) || error(error_message_fcall(arg)) :($a[$(x...)]) => begin :($a[$(x...)] = $res) end :(($(args...),)) => begin # TODO: avoid possible repeated evaluation (not here, in swap) Expr(:block, [assign_ex(args[i], :($res[$i]), invcheck) for i=1:length(args)]...) end _ => _invcheck(invcheck, arg, res) end export @assign """ @assign a b [invcheck] Perform the assign `a = b` in a reversible program. Turn off invertibility check if the `invcheck` is false. """ macro assign(a, b, invcheck=true) esc(assign_ex(a, b, invcheck)) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/lens.jl
code
3188
export _zero, @fields # update a field of a struct. @inline @generated function field_update(main :: T, field::Val{Field}, value) where {T, Field} fields = fieldnames(T) Expr(:new, T, Any[field !== Field ? :(main.$field) : :value for field in fields]...) end # the default constructor of a struct @inline @generated function default_constructor(::Type{T}, fields::Vararg{Any,N}) where {T,N} Expr(:new, T, Any[:(fields[$i]) for i=1:N]...) end """ _zero(T) _zero(x::T) Create a `zero` of type `T` by recursively applying `zero` to its fields. """ @inline @generated function _zero(::Type{T}) where {T} Expr(:new, T, Any[:(_zero($field)) for field in T.types]...) end @inline @generated function _zero(x::T) where {T} Expr(:new, T, Any[:(_zero(x.$field)) for field in fieldnames(T)]...) end function lens_compile(ex, cache, value) @match ex begin :($a.$b.$c = $d) => begin updated = Expr(:let, Expr(:block, :($cache = $cache.$b), :($value = $d)), :($field_update($cache, $(Val(c)), $value))) lens_compile(:($a.$b = $updated), cache, value) end :($a.$b = $c) => begin Expr(:let, Expr(:block, :($cache = $a), :($value=$c)), :($field_update($cache, $(Val(b)), $value))) end _ => error("Malformed update notation $ex, expect the form like 'a.b = c'.") end end function with(ex) cache = gensym("cache") value = gensym("value") lens_compile(ex, cache, value) end """ e.g. `@with x.y = val` will return a new object similar to `x`, with the `y` field changed to `val`. """ macro with(ex) with(ex) |> esc end @inline @generated function _zero(::Type{T}) where {T<:Tuple} Expr(:tuple, Any[:(_zero($field)) for field in T.types]...) end _zero(::Type{T}) where T<:Real = zero(T) _zero(::Type{String}) = "" _zero(::Type{Symbol}) = Symbol("") _zero(::Type{Char}) = '\0' _zero(::Type{T}) where {ET,N,T<:AbstractArray{ET,N}} = reshape(ET[], ntuple(x->0, N)) _zero(::Type{T}) where {A,B,T<:Dict{A,B}} = Dict{A,B}() #_zero(x::T) where T = _zero(T) # not adding this line! _zero(x::T) where T<:Real = zero(x) _zero(::String) = "" _zero(::Symbol) = Symbol("") _zero(::Char) = '\0' _zero(x::T) where T<:AbstractArray = zero(x) function _zero(d::T) where {A,B,T<:Dict{A,B}} Dict{A,B}([x=>_zero(y) for (x,y) in d]) end @static if VERSION > v"1.6.100" @generated function chfield(x, ::Val{FIELD}, xval) where FIELD if ismutabletype(x) Expr(:block, :(x.$FIELD = xval), :x) else :(@with x.$FIELD = xval) end end else @generated function chfield(x, ::Val{FIELD}, xval) where FIELD if x.mutable Expr(:block, :(x.$FIELD = xval), :x) else :(@with x.$FIELD = xval) end end end @generated function chfield(x, f::Function, xval) Expr(:block, _invcheck(:(f(x)), :xval), :x) end # convert field of an object to a tuple @generated function type2tuple(x::T) where T Expr(:tuple, [:(x.$v) for v in fieldnames(T)]...) end macro fields(ex) esc(:($type2tuple($ex))) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/precompile.jl
code
6932
function _precompile_() ccall(:jl_generating_output, Cint, ()) == 1 || return nothing Base.precompile(Tuple{typeof(gen_ifunc),Module,Expr}) # time: 1.9265794 Base.precompile(Tuple{typeof(render_arg),Expr}) # time: 0.045112614 Base.precompile(Tuple{typeof(dual_ex),Module,Expr}) # time: 0.04482814 Base.precompile(Tuple{typeof(memkernel),Expr}) # time: 0.042466346 Base.precompile(Tuple{typeof(functionfoot),Vector{Any}}) # time: 0.028968 Base.precompile(Tuple{PlusEq{typeof(cos)},Float64,Float64}) # time: 0.024845816 Base.precompile(Tuple{typeof(assign_ex),Expr,Expr,Bool}) # time: 0.02307638 Base.precompile(Tuple{typeof(pushvar!),Vector{Symbol},Expr}) # time: 0.008682777 Base.precompile(Tuple{typeof(precom_opm),Symbol,Symbol,Expr}) # time: 0.024082141 Base.precompile(Tuple{typeof(precom_opm),Symbol,Expr,Expr}) # time: 0.014433064 Base.precompile(Tuple{PlusEq{typeof(sin)},Float64,Float64}) # time: 0.004275546 Base.precompile(Tuple{typeof(get_argname),Expr}) # time: 0.00546523 Base.precompile(Tuple{typeof(forstatement),Symbol,Expr,Vector{Any},CompileInfo,Nothing}) # time: 0.003776189 Base.precompile(Tuple{typeof(julia_usevar!),SymbolTable,Expr}) # time: 0.049144164 Base.precompile(Tuple{typeof(julia_usevar!),SymbolTable,Symbol}) # time: 0.003254819 Base.precompile(Tuple{typeof(precom_ex),Module,LineNumberNode,PreInfo}) # time: 0.001450645 Base.precompile(Tuple{typeof(precom_body),Module,SubArray{Any, 1, Vector{Any}, Tuple{UnitRange{Int64}}, true},PreInfo}) # time: 0.001423381 Base.precompile(Tuple{typeof(precom_body),Module,Vector{Any},PreInfo}) # time: 0.001126582 Base.precompile(Tuple{typeof(variable_analysis_ex),Expr,SymbolTable}) # time: 0.31368348 Base.precompile(Tuple{typeof(precom_ex),Module,Expr,PreInfo}) # time: 0.20325074 Base.precompile(Tuple{typeof(compile_ex),Module,Expr,CompileInfo}) # time: 0.19640462 Base.precompile(Tuple{Core.kwftype(typeof(almost_same)),NamedTuple{(:atol,), Tuple{Float64}},typeof(almost_same),Vector{Int64},Vector{Int64}}) # time: 0.080980584 Base.precompile(Tuple{typeof(unzipped_broadcast),PlusEq{typeof(*)},Tuple{Float64, Float64},Tuple{Float64, Float64},Tuple{Float64, Float64}}) # time: 0.07962415 Base.precompile(Tuple{typeof(unzipped_broadcast),PlusEq{typeof(identity)},Vector{Float64},Vector{Float64}}) # time: 0.060837016 Base.precompile(Tuple{typeof(almost_same),Vector{Float64},Vector{Float64}}) # time: 0.043818954 Base.precompile(Tuple{typeof(functionfoot),Vector{Expr}}) # time: 0.03988443 Base.precompile(Tuple{typeof(assign_ex),Expr,Symbol,Bool}) # time: 0.03273888 Base.precompile(Tuple{typeof(assign_ex),Expr,Float64,Bool}) # time: 0.031806484 Base.precompile(Tuple{typeof(Base.show_function),IOContext{IOBuffer},PlusEq{typeof(abs2)},Bool}) # time: 0.025549678 Base.precompile(Tuple{typeof(precom_opm),Symbol,Symbol,Symbol}) # time: 0.023482375 Base.precompile(Tuple{PlusEq{typeof(^)},ComplexF64,ComplexF64,Vararg{ComplexF64, N} where N}) # time: 0.018600011 Base.precompile(Tuple{typeof(functionfoot),Vector{Symbol}}) # time: 0.018092046 Base.precompile(Tuple{PlusEq{typeof(tan)},Float64,Float64}) # time: 0.015275044 Base.precompile(Tuple{typeof(precom_if),Module,Expr,PreInfo}) # time: 0.010871046 Base.precompile(Tuple{MinusEq{typeof(^)},Union{Float64, ComplexF64},Union{Float64, ComplexF64},Union{Float64, ComplexF64}}) # time: 0.010170748 Base.precompile(Tuple{PlusEq{typeof(*)},Float64,Float64,Vararg{Float64, N} where N}) # time: 0.007921303 Base.precompile(Tuple{PlusEq{typeof(+)},Float64,Float64,Vararg{Float64, N} where N}) # time: 0.00782437 Base.precompile(Tuple{typeof(_isconst),Expr}) # time: 0.007764913 Base.precompile(Tuple{typeof(swapvars!),SymbolTable,Expr,Expr}) # time: 0.007498742 Base.precompile(Tuple{typeof(Base.show_function),IOContext{IOBuffer},PlusEq{typeof(angle)},Bool}) # time: 0.007020253 Base.precompile(Tuple{typeof(precom_opm),Symbol,Expr,Int64}) # time: 0.006372358 Base.precompile(Tuple{typeof(precom_opm),Symbol,Symbol,Int64}) # time: 0.006353995 Base.precompile(Tuple{typeof(Base.show_function),IOContext{IOBuffer},PlusEq{typeof(abs)},Bool}) # time: 0.005321797 Base.precompile(Tuple{typeof(unzipped_broadcast),PlusEq{typeof(*)},Vector{Float64},Vector{Float64},Vector{Float64}}) # time: 0.005210431 Base.precompile(Tuple{typeof(isreversible),Function,Type{Tuple{Number, Any, Any}}}) # time: 0.003735743 Base.precompile(Tuple{typeof(removedot),Symbol}) # time: 0.003666485 Base.precompile(Tuple{PlusEq{typeof(*)},Float64,Int64,Vararg{Any, N} where N}) # time: 0.003475684 Base.precompile(Tuple{MinusEq{typeof(*)},Float64,Int64,Vararg{Any, N} where N}) # time: 0.003429493 Base.precompile(Tuple{PlusEq{typeof(*)},Float64,Float64,Vararg{Any, N} where N}) # time: 0.002768054 Base.precompile(Tuple{MinusEq{typeof(*)},Float64,Float64,Vararg{Any, N} where N}) # time: 0.002728645 Base.precompile(Tuple{typeof(assign_vars),SubArray{Any, 1, Vector{Any}, Tuple{UnitRange{Int64}}, true},Expr,Bool}) # time: 0.00237789 Base.precompile(Tuple{typeof(unzipped_broadcast),MinusEq{typeof(*)},Vector{Float64},Vector{Float64},Vector{Float64}}) # time: 0.002377491 Base.precompile(Tuple{typeof(almost_same),Tuple{Float64, Float64, Vector{Float64}, Vector{Float64}},Tuple{Float64, Float64, Vector{Float64}, Vector{Float64}}}) # time: 0.002303422 Base.precompile(Tuple{typeof(unzipped_broadcast),MinusEq{typeof(identity)},Vector{Float64},Vector{Float64}}) # time: 0.00225225 Base.precompile(Tuple{typeof(dual_swap),Expr,Expr}) # time: 0.002167955 Base.precompile(Tuple{PlusEq{typeof(sin)},ComplexF64,ComplexF64}) # time: 0.002135965 Base.precompile(Tuple{typeof(whilestatement),Expr,Expr,Vector{Any},CompileInfo}) # time: 0.001927632 Base.precompile(Tuple{typeof(variable_analysis_ex),LineNumberNode,SymbolTable}) # time: 0.001729012 Base.precompile(Tuple{typeof(compile_ex),Module,LineNumberNode,CompileInfo}) # time: 0.001608265 Base.precompile(Tuple{typeof(assign_ex),Int64,Expr,Bool}) # time: 0.001299157 Base.precompile(Tuple{typeof(_pop_value),Expr}) # time: 0.001298544 Base.precompile(Tuple{typeof(assign_ex),Symbol,Symbol,Bool}) # time: 0.001287453 Base.precompile(Tuple{typeof(assign_ex),Float64,Expr,Bool}) # time: 0.001275525 Base.precompile(Tuple{MinusEq{typeof(/)},Float64,Float64,Vararg{Any, N} where N}) # time: 0.001080575 Base.precompile(Tuple{typeof(_push_value),Expr,Expr,Bool}) # time: 0.00106306 Base.precompile(Tuple{MinusEq{typeof(*)},Float64,Float64,Vararg{Float64, N} where N}) # time: 0.001049093 Base.precompile(Tuple{PlusEq{typeof(/)},Float64,Float64,Vararg{Any, N} where N}) # time: 0.00102371 end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/preprocess.jl
code
7899
export precom # precompiling information struct PreInfo routines::Vector{Any} end PreInfo() = PreInfo([]) """ precom(module, ex) Precompile a function, returns a tuple of (macros, function name, arguments, type parameters, function body). """ function precom(m::Module, ex) mc, fname, args, ts, body = match_function(ex) vars = Symbol[] newargs = map(args) do arg @match arg begin :(::$tp)=>Expr(:(::), gensym(), tp) _ => arg end end for arg in newargs pushvar!(vars, arg) end info = PreInfo() body_out = precom_body(m, body, info) if !isempty(info.routines) error("`@routine` and `~@routine` must appear in pairs, mising `~@routine`!") end st = SymbolTable(vars, Symbol[], Symbol[]) st_after = copy(st) variable_analysis_ex.(body_out, Ref(st_after)) checksyms(st_after, st) mc, fname, newargs, ts, body_out end function precom_body(m::Module, body::AbstractVector, info) Any[precom_ex(m, ex, info) for ex in body] end # precompile `+=`, `-=`, `*=` and `/=` function precom_opm(f, out, arg2) if f in [:(+=), :(-=), :(*=), :(/=)] @match arg2 begin :($x |> $view) => Expr(f, out, :(identity($arg2))) :($subf($(subargs...))) => Expr(f, out, arg2) _ => Expr(f, out, :(identity($arg2))) end elseif f in [:(.+=), :(.-=), :(.*=), :(./=)] @match arg2 begin :($x |> $view) || :($x .|> $view) => Expr(f, out, :(identity.($arg2))) :($subf.($(subargs...))) => Expr(f, out, arg2) :($subf($(subargs...))) => Expr(f, out, arg2) _ => Expr(f, out, :(identity.($arg2))) end end end # precompile `⊻=` function precom_ox(f, out, arg2) if f == :(⊻=) @match arg2 begin :($x |> $view) => Expr(f, out, :(identity($arg2))) :($subf($(subargs...))) || :($a || $b) || :($a && $b) => Expr(f, out, arg2) _ => Expr(f, out, :(identity($arg2))) end elseif f == :(.⊻=) @match arg2 begin :($x |> $view) || :($x .|> $view) => Expr(f, out, :(identity.($arg2))) :($subf.($(subargs...))) => Expr(f, out, arg2) :($subf($(subargs...))) => Expr(f, out, arg2) _ => Expr(f, out, :(identity.($arg2))) end end end """ precom_ex(module, ex, info) Precompile a single statement `ex`, where `info` is a `PreInfo` instance. """ function precom_ex(m::Module, ex, info) @match ex begin :($x ← $val) || :($x → $val) => ex :($x ↔ $y) => ex :($(xs...), $y ← $val) => precom_ex(m, :(($(xs...), $y) ← $val), info) :($(xs...), $y → $val) => precom_ex(m, :(($(xs...), $y) → $val), info) :($a += $b) => precom_opm(:+=, a, b) :($a -= $b) => precom_opm(:-=, a, b) :($a *= $b) => precom_opm(:*=, a, b) :($a /= $b) => precom_opm(:/=, a, b) :($a ⊻= $b) => precom_ox(:⊻=, a, b) :($a .+= $b) => precom_opm(:.+=, a, b) :($a .-= $b) => precom_opm(:.-=, a, b) :($a .*= $b) => precom_opm(:.*=, a, b) :($a ./= $b) => precom_opm(:./=, a, b) :($a .⊻= $b) => precom_ox(:.⊻=, a, b) Expr(:if, _...) => precom_if(m, copy(ex), info) :(while ($pre, $post); $(body...); end) => begin post = post == :~ ? pre : post info = PreInfo() Expr(:while, :(($pre, $post)), Expr(:block, precom_body(m, body, info)...)) end :(@from $line $post while $pre; $(body...); end) => precom_ex(m, Expr(:while, :(($pre, !$post)), ex.args[4].args[2]), info) :(begin $(body...) end) => begin Expr(:block, precom_body(m, body, info)...) end # TODO: allow ommit step. :(for $i=$range; $(body...); end) || :(for $i in $range; $(body...); end) => begin info = PreInfo() Expr(:for, :($i=$(precom_range(range))), Expr(:block, precom_body(m, body, info)...)) end :(@safe $line $subex) => ex :(@cuda $line $(args...)) => ex :(@launchkernel $line $(args...)) => ex :(@inbounds $line $subex) => Expr(:macrocall, Symbol("@inbounds"), line, precom_ex(m, subex, info)) :(@simd $line $subex) => Expr(:macrocall, Symbol("@simd"), line, precom_ex(m, subex, info)) :(@threads $line $subex) => Expr(:macrocall, Symbol("@threads"), line, precom_ex(m, subex, info)) :(@avx $line $subex) => Expr(:macrocall, Symbol("@avx"), line, precom_ex(m, subex, info)) :(@invcheckoff $line $subex) => Expr(:macrocall, Symbol("@invcheckoff"), line, precom_ex(m, subex, info)) :(@routine $line $expr) => begin precode = precom_ex(m, expr, info) push!(info.routines, precode) precode end :(~(@routine $line)) => begin if isempty(info.routines) error("`@routine` and `~@routine` must appear in pairs, mising `@routine`!") end precom_ex(m, dual_ex(m, pop!(info.routines)), info) end # 1. precompile to expand macros # 2. get dual expression # 3. precompile to analyze vaiables :(~$expr) => precom_ex(m, dual_ex(m, precom_ex(m, expr, PreInfo())), info) :($f($(args...))) => :($f($(args...))) :($f.($(args...))) => :($f.($(args...))) :(nothing) => ex Expr(:macrocall, _...) => precom_ex(m, macroexpand(m, ex), info) ::LineNumberNode => ex ::Nothing => ex _ => error("unsupported statement: $ex") end end precom_range(range) = @match range begin _ => range end function precom_if(m, ex, exinfo) _expand_cond(cond) = @match cond begin :(($pre, ~)) => :(($pre, $pre)) :(($pre, $post)) => :(($pre, $post)) :($pre) => :(($pre, $pre)) end if ex.head == :if ex.args[1] = _expand_cond(ex.args[1]) elseif ex.head == :elseif ex.args[1].args[2] = _expand_cond(ex.args[1].args[2]) end info = PreInfo() ex.args[2] = Expr(:block, precom_body(m, ex.args[2].args, info)...) if length(ex.args) == 3 if ex.args[3].head == :elseif ex.args[3] = precom_if(m, ex.args[3], exinfo) elseif ex.args[3].head == :block info = PreInfo() ex.args[3] = Expr(:block, precom_body(m, ex.args[3].args, info)...) else error("unknown statement following `if` $ex.") end end ex end export @code_preprocess """ @code_preprocess ex Preprocess `ex` and return the symmetric reversible IR. ```jldoctest; setup=:(using NiLangCore) julia> NiLangCore.rmlines(@code_preprocess if (x < 3, ~) x += exp(3.0) end) :(if (x < 3, x < 3) x += exp(3.0) end) ``` """ macro code_preprocess(ex) QuoteNode(precom_ex(__module__, ex, PreInfo())) end precom_ex(m::Module, ex) = precom_ex(m, ex, PreInfo()) # push a new variable to variable set `x`, for allocating `target` function pushvar!(x::Vector{Symbol}, target) @match target begin ::Symbol => begin if target in x throw(InvertibilityError("Symbol `$target` should not be used as the allocation target, it is an existing variable in the current scope.")) else push!(x, target) end end :(($(tar...),)) => begin for t in tar pushvar!(x, t) end end :($tar = _) => pushvar!(x, tar) :($tar...) => pushvar!(x, tar) :($tar::$tp) => pushvar!(x, tar) Expr(:parameters, targets...) => begin for tar in targets pushvar!(x, tar) end end Expr(:kw, tar, val) => begin pushvar!(x, tar) end _ => error("unknown variable expression $(target)") end nothing end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/stack.jl
code
1837
export FastStack, GLOBAL_STACK, FLOAT64_STACK, FLOAT32_STACK, COMPLEXF64_STACK, COMPLEXF32_STACK, BOOL_STACK, INT64_STACK, INT32_STACK const GLOBAL_STACK = [] struct FastStack{T} data::Vector{T} top::Base.RefValue{Int} end function FastStack{T}(n::Int) where T FastStack{T}(Vector{T}(undef, n), Ref(0)) end function FastStack(n::Int) where T FastStack{Any}(Vector{Any}(undef, n), Ref(0)) end Base.show(io::IO, x::FastStack{T}) where T = print(io, "FastStack{$T}($(x.top[])/$(length(x.data)))") Base.show(io::IO, ::MIME"text/plain", x::FastStack{T}) where T = show(io, x) Base.length(stack::FastStack) = stack.top[] Base.empty!(stack::FastStack) = (stack.top[] = 0; stack) @inline function Base.push!(stack::FastStack, val) stack.top[] += 1 @boundscheck stack.top[] <= length(stack.data) || throw(BoundsError(stack, stack.top[])) stack.data[stack.top[]] = val return stack end @inline function Base.pop!(stack::FastStack) @boundscheck stack.top[] > 0 || throw(BoundsError(stack, stack.top[])) val = stack.data[stack.top[]] stack.top[] -= 1 return val end # default stack size is 10^6 (~8M for Float64) let empty_exprs = Expr[:($empty!($GLOBAL_STACK))] for DT in [:Float64, :Float32, :ComplexF64, :ComplexF32, :Int64, :Int32, :Bool] STACK = Symbol(uppercase(String(DT)), :_STACK) @eval const $STACK = FastStack{$DT}(1000000) # allow in-stack and out-stack different, to support loading data to GVar. push!(empty_exprs, Expr(:call, empty!, STACK)) end @eval function empty_global_stacks!() $(empty_exprs...) end end """ loaddata(t, x) load data `x`, matching type `t`. """ loaddata(::Type{T}, x::T) where T = x loaddata(::Type{T1}, x::T) where {T1,T} = convert(T1,x) loaddata(::T1, x::T) where {T1,T} = loaddata(T1, x)
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/symboltable.jl
code
4127
# * existing: the ancillas and input arguments in the local scope. # They should be protected to avoid duplicated allocation. # * deallocated: the ancillas removed. # They should be recorded to avoid using after deallocation. # * unclassified: the variables from global scope. # They can not be allocation target. struct SymbolTable existing::Vector{Symbol} deallocated::Vector{Symbol} unclassified::Vector{Symbol} end function SymbolTable() SymbolTable(Symbol[], Symbol[], Symbol[]) end Base.copy(st::SymbolTable) = SymbolTable(copy(st.existing), copy(st.deallocated), copy(st.unclassified)) # remove a variable from a list function removevar!(lst::AbstractVector, var) index = findfirst(==(var), lst) deleteat!(lst, index) end # replace a variable in a list with target variable function replacevar!(lst::AbstractVector, var, var2) index = findfirst(==(var), lst) lst[index] = var2 end # allocate a new variable function allocate!(st::SymbolTable, var::Symbol) if var ∈ st.existing throw(InvertibilityError("Repeated allocation of variable `$(var)`")) elseif var ∈ st.deallocated removevar!(st.deallocated, var) push!(st.existing, var) elseif var ∈ st.unclassified throw(InvertibilityError("Variable `$(var)` used before allocation.")) else push!(st.existing, var) end nothing end # find the list containing var function findlist(st::SymbolTable, var::Symbol) if var ∈ st.existing return st.existing elseif var ∈ st.unclassified return st.unclassified elseif var in st.deallocated return st.deallocated else return nothing end end # using a variable function operate!(st::SymbolTable, var::Symbol) if var ∈ st.existing || var ∈ st.unclassified elseif var ∈ st.deallocated throw(InvertibilityError("Operating on deallocate variable `$(var)`")) else push!(st.unclassified, var::Symbol) end nothing end # deallocate a variable function deallocate!(st::SymbolTable, var::Symbol) if var ∈ st.deallocated throw(InvertibilityError("Repeated deallocation of variable `$(var)`")) elseif var ∈ st.existing removevar!(st.existing, var) push!(st.deallocated, var) elseif var ∈ st.unclassified throw(InvertibilityError("Deallocating an external variable `$(var)`")) else throw(InvertibilityError("Deallocating an external variable `$(var)`")) end nothing end # check symbol table to make sure there is symbols introduced in the local scope that has not yet deallocated. # `a` is the symbol table after running local scope, `b` is the symbol table before running the local scope. function checksyms(a::SymbolTable, b::SymbolTable=SymbolTable()) diff = setdiff(a.existing, b.existing) if !isempty(diff) error("Some variables not deallocated correctly: $diff") end end function swapsyms!(st::SymbolTable, var1::Symbol, var2::Symbol) lst1 = findlist(st, var1) lst2 = findlist(st, var2) if lst1 !== nothing && lst2 !== nothing # exchange variables i1 = findfirst(==(var1), lst1) i2 = findfirst(==(var2), lst2) lst2[i2], lst1[i1] = lst1[i1], lst2[i2] elseif lst1 !== nothing replacevar!(lst1, var1, var2) operate!(st, var1) elseif lst2 !== nothing replacevar!(lst2, var2, var1) operate!(st, var2) else operate!(st, var1) operate!(st, var2) end end function swapsyms_asymetric!(st::SymbolTable, var1s::Vector, var2::Symbol) length(var1s) == 0 && return lst1 = findlist(st, var1s[1]) for k=2:length(var1s) if findlist(st, var1s[k]) !== lst1 error("variable status not aligned: $var1s") end end lst2 = findlist(st, var2) if lst1 !== nothing removevar!.(Ref(lst1), var1s) push!(lst1, var2) else operate!(st, var2) end if lst2 !== nothing removevar!(lst2, var2) push!.(Ref(lst2),var1s) else operate!.(Ref(st), var1s) end end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/utils.jl
code
4048
const GLOBAL_ATOL = Ref(1e-8) ########### macro tools ############# startwithdot(sym::Symbol) = string(sym)[1] == '.' startwithdot(sym::Expr) = false startwithdot(sym) = false function removedot(f) string(f)[1] == '.' || error("$f is not a broadcasting.") Symbol(string(f)[2:end]) end """ get_argname(ex) Return the argument name of a function argument expression, e.g. `x::Float64 = 4` gives `x`. """ function get_argname(fname) @match fname begin ::Symbol => fname :($x::$t) => x :($x::$t=$y) => x :($x=$y) => x :($x...) => :($x...) :($x::$t...) => :($x...) Expr(:parameters, args...) => fname _ => error("can not get the function name of expression $fname.") end end """ match_function(ex) Analyze a function expression, returns a tuple of `(macros, function name, arguments, type parameters (in where {...}), statements in the body)` """ function match_function(ex) @match ex begin :(function $(fname)($(args...)) $(body...) end) || :($fname($(args...)) = $(body...)) => (nothing, fname, args, [], body) Expr(:function, :($fname($(args...)) where {$(ts...)}), xbody) => (nothing, fname, args, ts, xbody.args) Expr(:macrocall, mcname, line, fdef) => ([mcname, line], match_function(fdef)[2:end]...) _ => error("must input a function, got $ex") end end """ rmlines(ex::Expr) Remove line number nodes for pretty printing. """ rmlines(ex::Expr) = begin hd = ex.head if hd == :macrocall Expr(:macrocall, ex.args[1], nothing, rmlines.(ex.args[3:end])...) else tl = Any[rmlines(ex) for ex in ex.args if !(ex isa LineNumberNode)] Expr(hd, tl...) end end rmlines(@nospecialize(a)) = a ########### ordered dict ############### struct MyOrderedDict{TK,TV} keys::Vector{TK} vals::Vector{TV} end function MyOrderedDict{K,V}() where {K,V} MyOrderedDict(K[], V[]) end function Base.setindex!(d::MyOrderedDict, val, key) ind = findfirst(x->x===key, d.keys) if ind isa Nothing push!(d.keys, key) push!(d.vals, val) else @inbounds d.vals[ind] = val end return d end function Base.getindex(d::MyOrderedDict, key) ind = findfirst(x->x===key, d.keys) if ind isa Nothing throw(KeyError(ind)) else return d.vals[ind] end end function Base.delete!(d::MyOrderedDict, key) ind = findfirst(x->x==key, d.keys) if ind isa Nothing throw(KeyError(ind)) else deleteat!(d.vals, ind) deleteat!(d.keys, ind) end end Base.length(d::MyOrderedDict) = length(d.keys) function Base.pop!(d::MyOrderedDict) k = pop!(d.keys) v = pop!(d.vals) k, v end Base.isempty(d::MyOrderedDict) = length(d.keys) == 0 ########### broadcasting ############### """ unzipped_broadcast(f, args...) unzipped broadcast for arrays and tuples, e.g. `SWAP.([1,2,3], [4,5,6])` will do inplace element-wise swap, and return `[4,5,6], [1,2,3]`. """ unzipped_broadcast(f) = error("must provide at least one argument in broadcasting!") function unzipped_broadcast(f, arg::AbstractArray; kwargs...) arg .= f.(arg) end function unzipped_broadcast(f, arg::Tuple; kwargs...) f.(arg) end @generated function unzipped_broadcast(f, args::Vararg{<:AbstractArray,N}; kwargs...) where N argi = [:(args[$k][i]) for k=1:N] quote for i = 1:same_length(args) ($(argi...),) = f($(argi...); kwargs...) end return args end end @generated function unzipped_broadcast(f, args::Vararg{<:Tuple,N}; kwargs...) where N quote same_length(args) res = map(f, args...) ($([:($getindex.(res, $i)) for i=1:N]...),) end end function same_length(args) length(args) == 0 && error("can not broadcast over an empty set of arguments.") l = length(args[1]) for j=2:length(args) @assert l == length(args[j]) "length of arguments should be the same `$(length(args[j])) != $l`" end return l end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/variable_analysis.jl
code
10585
function variable_analysis_ex(ex, syms::SymbolTable) use!(x) = usevar!(syms, x) allocate!(x) = allocatevar!(syms, x) deallocate!(x) = deallocatevar!(syms, x) @match ex begin :($x[$key] ← $val) || :($x[$key] → $val) => (use!(x); use!(key); use!(val)) :($x ← $val) => allocate!(x) :($x → $val) => deallocate!(x) :($x ↔ $y) => swapvars!(syms, x, y) :($a += $f($(b...))) || :($a -= $f($(b...))) || :($a *= $f($(b...))) || :($a /= $f($(b...))) || :($a .+= $f($(b...))) || :($a .-= $f($(b...))) || :($a .*= $f($(b...))) || :($a ./= $f($(b...))) || :($a .+= $f.($(b...))) || :($a .-= $f.($(b...))) || :($a .*= $f.($(b...))) || :($a ./= $f.($(b...))) => begin ex.args[1] = render_arg(a) b .= render_arg.(b) use!(a) use!.(b) check_args(Any[a, b...]) end :($a ⊻= $f($(b...))) || :($a .⊻= $f($(b...))) || :($a .⊻= $f.($(b...))) => begin ex.args[1] = render_arg(a) b .= render_arg.(b) use!(a) use!(b) end :($a ⊻= $b || $c) || :($a ⊻= $b && $c) => begin ex.args[1] = render_arg(a) ex.args[2].args .= render_arg.(ex.args[2].args) use!(a) use!(b) end Expr(:if, _...) => variable_analysis_if(ex, syms) :(while $condition; $(body...); end) => begin julia_usevar!(syms, condition) localsyms = SymbolTable(Symbol[], copy(syms.deallocated), Symbol[]) variable_analysis_ex.(body, Ref(localsyms)) checksyms(localsyms) end :(begin $(body...) end) => begin variable_analysis_ex.(body, Ref(syms)) end # TODO: allow ommit step. :(for $i=$range; $(body...); end) => begin julia_usevar!(syms, range) localsyms = SymbolTable(Symbol[], copy(syms.deallocated), Symbol[]) variable_analysis_ex.(body, Ref(localsyms)) checksyms(localsyms) ex end :(@safe $line $subex) => julia_usevar!(syms, subex) :(@cuda $line $(args...)) => variable_analysis_ex(args[end], syms) :(@launchkernel $line $(args...)) => variable_analysis_ex(args[end], syms) :(@inbounds $line $subex) => variable_analysis_ex(subex, syms) :(@simd $line $subex) => variable_analysis_ex(subex, syms) :(@threads $line $subex) => variable_analysis_ex(subex, syms) :(@avx $line $subex) => variable_analysis_ex(subex, syms) :(@invcheckoff $line $subex) => variable_analysis_ex(subex, syms) # 1. precompile to expand macros # 2. get dual expression # 3. precompile to analyze vaiables :($f($(args...))) => begin args .= render_arg.(args) check_args(args) use!.(args) end :($f.($(args...))) => begin args .= render_arg.(args) check_args(args) use!.(args) end :(nothing) => nothing ::LineNumberNode => nothing ::Nothing => nothing _ => error("unsupported statement: $ex") end end function variable_analysis_if(ex, exsyms) syms = copy(exsyms) julia_usevar!(exsyms, ex.args[1]) variable_analysis_ex.(ex.args[2].args, Ref(exsyms)) checksyms(exsyms, syms) if length(ex.args) == 3 if ex.args[3].head == :elseif variable_analysis_if(ex.args[3], exsyms) elseif ex.args[3].head == :block syms = copy(exsyms) variable_analysis_ex.(ex.args[3].args, Ref(exsyms)) checksyms(exsyms, syms) else error("unknown statement following `if` $ex.") end end end usevar!(syms::SymbolTable, arg) = @match arg begin ::Number || ::String => nothing ::Symbol => _isconst(arg) || operate!(syms, arg) :(@skip! $line $x) => julia_usevar!(syms, x) :($x.$k) => usevar!(syms, x) :($a |> subarray($(ranges...))) => (usevar!(syms, a); julia_usevar!.(Ref(syms), ranges)) :($x |> tget($f)) || :($x |> $f) || :($x .|> $f) || :($x::$f) => (usevar!(syms, x); julia_usevar!(syms, f)) :($x') || :(-$x) => usevar!(syms, x) :($t{$(p...)}($(args...))) => begin usevar!(syms, t) usevar!.(Ref(syms), p) usevar!.(Ref(syms), args) end :($a[$(x...)]) => begin usevar!(syms, a) usevar!.(Ref(syms), x) end :(($(args...),)) => usevar!.(Ref(syms), args) _ => julia_usevar!(syms, arg) end julia_usevar!(syms::SymbolTable, ex) = @match ex begin ::Symbol => _isconst(ex) || operate!(syms, ex) :($a:$b:$c) => julia_usevar!.(Ref(syms), [a, b, c]) :($a:$c) => julia_usevar!.(Ref(syms), [a, c]) :($a && $b) || :($a || $b) || :($a[$b]) => julia_usevar!.(Ref(syms), [a, b]) :($a.$b) => julia_usevar!(syms, a) :(($(v...),)) || :(begin $(v...) end) => julia_usevar!.(Ref(syms), v) :($f($(v...))) || :($f[$(v...)]) => begin julia_usevar!(syms, f) julia_usevar!.(Ref(syms), v) end :($args...) => julia_usevar!(syms, args) Expr(:parameters, targets...) => julia_usevar!.(Ref(syms), targets) Expr(:kw, tar, val) => julia_usevar!(syms, val) ::LineNumberNode => nothing _ => nothing end # push a new variable to variable set `x`, for allocating `target` allocatevar!(st::SymbolTable, target) = @match target begin ::Symbol => allocate!(st, target) :(($(tar...),)) => begin for t in tar allocatevar!(st, t) end end :($tar = $y) => allocatevar!(st, y) :($tar...) => allocatevar!(st, tar) :($tar::$tp) => allocatevar!(st, tar) Expr(:parameters, targets...) => begin for tar in targets allocatevar!(st, tar) end end Expr(:kw, tar, val) => begin allocatevar!(st, tar) end _ => _isconst(target) || error("unknown variable expression $(target)") end # pop a variable from variable set `x`, for deallocating `target` deallocatevar!(st::SymbolTable, target) = @match target begin ::Symbol => deallocate!(st, target) :(($(tar...),)) => begin for t in tar deallocatevar!(st, t) end end _ => error("unknow variable expression $(target)") end function swapvars!(st::SymbolTable, x, y) e1 = isemptyvar(x) e2 = isemptyvar(y) # check assersion for (e, v) in ((e1, x), (e2, y)) e && dosymbol(v) do sv if sv ∈ st.existing || sv ∈ st.unclassified throw(InvertibilityError("can not assert variable to empty: $v")) end end end if e1 && e2 elseif e1 && !e2 dosymbol(sx -> allocate!(st, sx), x) dosymbol(sy -> deallocate!(st, sy), y) usevar!(st, x) elseif !e1 && e2 dosymbol(sx -> deallocate!(st, sx), x) dosymbol(sy -> allocate!(st, sy), y) usevar!(st, y) else # both are nonempty sx = dosymbol(identity, x) sy = dosymbol(identity, y) if sx === nothing || sy === nothing # e.g. x.y ↔ k.c usevar!(st, x) usevar!(st, y) elseif sx isa Symbol && sy isa Symbol # e.g. x ↔ y swapsyms!(st, sx, sy) elseif sx isa Vector && sy isa Vector # e.g. (x, y) ↔ (a, b) @assert length(sx) == length(sy) swapsyms!.(Ref(st), sx, sy) elseif sx isa Vector && sy isa Symbol # e.g. (x, y) ↔ args swapsyms_asymetric!(st, sx, sy) elseif sx isa Symbol && sy isa Vector # e.g. args ↔ (x, y) swapsyms_asymetric!(st, sy, sx) end end end isemptyvar(ex) = @match ex begin :($x[end+1]) => true :($x::∅) => true _ => false end dosymbol(f, ex) = @match ex begin x::Symbol => f(x) :(@fields $line $sym) => dosymbol(f, sym) :($x::$T) => dosymbol(f, x) :(($(args...),)) => dosymbol.(Ref(f), args) _ => nothing end _isconst(x) = @match x begin ::Symbol => x ∈ Symbol[:im, :π, :Float64, :Float32, :Int, :Int64, :Int32, :Bool, :UInt8, :String, :Char, :ComplexF64, :ComplexF32, :(:), :end, :nothing] ::QuoteNode || ::Bool || ::Char || ::Number || ::String => true :($f($(args...))) => all(_isconst, args) :(@const $line $ex) => true _ => false end # avoid share read/write function check_args(args) args_kernel = [] for i=1:length(args) out = memkernel(args[i]) if out isa Vector for o in out if o !== nothing push!(args_kernel, o) end end elseif out !== nothing push!(args_kernel, out) end end # error on shared read or shared write. for i=1:length(args_kernel) for j in i+1:length(args_kernel) if args_kernel[i] == args_kernel[j] throw(InvertibilityError("$i-th argument and $j-th argument shares the same memory $(args_kernel[i]), shared read and shared write are not allowed!")) end end end end # Returns the memory `identifier`, it is used to avoid shared read/write. memkernel(ex) = @match ex begin ::Symbol => ex :(@const $line $x) => memkernel(x) :($a |> subarray($(inds...))) || :($a[$(inds...)]) => :($(memkernel(a))[$(inds...)]) :($x.$y) => :($(memkernel(x)).$y) :($a |> tget($x)) => :($(memkernel(a))[$x]) :($x |> $f) || :($x .|> $f) || :($x') || :(-$x) || :($x...) => memkernel(x) :($t{$(p...)}($(args...))) || :(($(args...),)) => memkernel.(args) _ => nothing # Julia scope, including `@skip!`, `f(x)` et. al. end # Modify the argument, e.g. `x.[1,3:5]` is rendered as `x |> subarray(1,3:5)`. render_arg(ex) = @match ex begin ::Symbol => ex :(@skip! $line $x) => ex :(@const $line $x) => Expr(:macrocall, Symbol("@const"), line, render_arg(x)) :($a.[$(inds...)]) => :($(render_arg(a)) |> subarray($(inds...))) :($a |> subarray($(inds...))) => :($(render_arg(a)) |> subarray($(inds...))) :($a[$(inds...)]) => :($(render_arg(a))[$(inds...)]) :($x.$y) => :($(render_arg(x)).$y) :($a |> tget($x)) => :($(render_arg(a)) |> tget($x)) :($x |> $f) => :($(render_arg(x)) |> $f) :($x .|> $f) => :($(render_arg(x)) .|> $f) :($x') => :($(render_arg(x))') :(-$x) => :(-$(render_arg(x))) :($ag...) => :($(render_arg(ag))...) :($t{$(p...)}($(args...))) => :($t{($p...)}($(render_arg.(args)...))) :(($(args...),)) => :(($(render_arg.(args)...),)) _ => ex # Julia scope, including `@skip!`, `f(x)` et. al. end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
src/vars.jl
code
1440
using Base.Cartesian export chfield ############# ancillas ################ export @fieldview """ @fieldview fname(x::TYPE) = x.fieldname @fieldview fname(x::TYPE) = x[i] ... Create a function fieldview that can be accessed by a reversible program ```jldoctest; setup=:(using NiLangCore) julia> struct GVar{T, GT} x::T g::GT end julia> @fieldview xx(x::GVar) = x.x julia> chfield(GVar(1.0, 0.0), xx, 2.0) GVar{Float64, Float64}(2.0, 0.0) ``` """ macro fieldview(ex) @match ex begin :($f($obj::$tp) = begin $line; $ex end) => begin xval = gensym("value") esc(Expr(:block, :(Base.@__doc__ $f($obj::$tp) = begin $line; $ex end), :($NiLangCore.chfield($obj::$tp, ::typeof($f), $xval) = $(Expr(:block, assign_ex(ex, xval, false), obj))) )) end _ => error("expect expression `f(obj::type) = obj.prop`, got $ex") end end chfield(a, b, c) = error("chfield($a, $b, $c) not defined!") chfield(x, ::typeof(identity), xval) = xval chfield(x::T, ::typeof(-), y::T) where T = -y chfield(x::T, ::typeof(adjoint), y) where T = adjoint(y) ############ dataview patches ############ export tget, subarray """ tget(i::Int) Get the i-th entry of a tuple. """ tget(i::Int) = x::Tuple -> x[i] """ subarray(ranges...) Get a subarray, same as `view` in Base. """ subarray(args...) = x -> view(x, args...)
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/Core.jl
code
621
using Test, NiLangCore @testset "basic" begin @test ~(~sin) === sin @test ~(~typeof(sin)) === typeof(sin) @test isreflexive(XorEq(NiLangCore.logical_or)) println(XorEq(*)) println(PlusEq(+)) println(MinusEq(-)) println(MulEq(*)) println(DivEq(/)) end @static if VERSION > v"1.5.100" @testset "composite function" begin @i function f1(x) x.:1 += x.:2 end @i function f2(x) x.:2 += cos(x.:1) end @i function f3(x) x.:1 ↔ x.:2 end x = (2.0, 3.0) y = (f3∘f2∘f1)(x) z = (~(f3∘f2∘f1))(y) @show x, z @test all(x .≈ z) end end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/compiler.jl
code
19887
using NiLangCore using Test using Base.Threads @testset "to_standard_format" begin for (OP, FUNC) in [(:+=, PlusEq), (:-=, MinusEq), (:*=, MulEq), (:/=, DivEq), (:⊻=, XorEq)] @test NiLangCore.to_standard_format(Expr(OP, :x, :y)) == :($FUNC(identity)(x, y)) @test NiLangCore.to_standard_format(Expr(OP, :x, :(sin(y; z=3)))) == :($FUNC(sin)(x, y; z=3)) OPD = Symbol(:., OP) @test NiLangCore.to_standard_format(Expr(OPD, :x, :y)) == :($FUNC(identity).(x, y)) @test NiLangCore.to_standard_format(Expr(OPD, :x, :(sin.(y)))) == :($FUNC(sin).(x, y)) @test NiLangCore.to_standard_format(Expr(OPD, :x, :(y .* z))) == :($FUNC(*).(x, y, z)) end @test NiLangCore.to_standard_format(Expr(:⊻=, :x, :(y && z))) == :($XorEq($(NiLangCore.logical_and))(x, y, z)) @test NiLangCore.to_standard_format(Expr(:⊻=, :x, :(y || z))) == :($XorEq($(NiLangCore.logical_or))(x, y, z)) end @testset "i" begin @i function test1(a::T, b, out) where T<:Number add(a, b) out += a * b end @i function tt(a, b) out ← 0.0 test1(a, b, out) (~test1)(a, b, out) a += b out → 0.0 end # compute (a+b)*b -> out x = 3.0 y = 4.0 out = 0.0 @test isreversible(test1, Tuple{Number, Any, Any}) @test check_inv(test1, (x, y, out)) @test check_inv(tt, (x, y)) @test check_inv(tt, (x, y)) end @testset "if statement 1" begin # compute (a+b)*b -> out @i function test1(a, b, out) add(a, b) if (a > 8, a > 8) out += a*b else end end x = 3 y = 4 out = 0 @instr test1(x, y, out) @test out==0 @test x==7 @instr (~test1)(x, y, out) @test out==0 @test x==3 end @testset "if statement error" begin x = 3 y = 4 out = 0 # compute (a+b)*b -> out @i function test1(a, b, out) add(a, b) if (out < 4, out < 4) out += a*b else end end @test_throws InvertibilityError test1(x, y, out) end @testset "if statement 3" begin x = 3 y = 4 out = 0 @i @inline function test1(a, b, out) add(a, b) if (a > 2, a > 2) out += a*b else end end x = 3 y = 4 out = 0 @instr test1(x, y, out) @test out==28 @instr (~test1)(x, y, out) @test out==0 end @testset "if statement 4" begin @i function test1(a, b, out) add(a, b) if a > 8.0 out += a*b end end @test test1(1.0, 8.0, 0.0)[3] == 72.0 @i function test2(a, b) add(a, b) if a > 8.0 a -= b^2 end end @test_throws InvertibilityError test2(1.0, 8.0) @test_throws LoadError macroexpand(Main, :(@i function test3(a, b) add(a, b) if a > 8.0 a -= b*b end end)) end @testset "for" begin @i function looper(x, y, k) for i=1:1:k x += y end end x = 0.0 y = 1.0 k = 3 @instr looper(x, y, k) @test x == 3 @instr (~looper)(x, y, k) @test x == 0.0 shiba = 18 @i function looper2(x, y, k) for i=1:1:k k += shiba x += y end end @test_throws InvertibilityError looper2(x, y, k) end @testset "while" begin @i function looper(x, y) while (x<100, x>0) x += y end end x = 0.0 y = 9 @instr looper(x, y) @test x == 108 @instr (~looper)(x, y) @test x == 0.0 @i function looper2(x, y) while (x<100, x>-10) x += y end end @test_throws InvertibilityError looper2(x, y) @test_throws LoadError macroexpand(@__MODULE__, :(@i function looper3(x, y) while (x<100, x>0) z ← 0 x += y z += 1 end end)) end @testset "ancilla" begin one, ten = 1, 10 @i function looper(x, y) z ← 0 x += y z += one z -= one z → 0 end x = 0.0 y = 9 @instr looper(x, y) @test x[] == 9 @instr (~looper)(x, y) @test x[] == 0.0 @i function looper2(x, y) z ← 0 x += y z += one z -= ten z → 0 end x = 0.0 y = 9 @test_throws InvertibilityError looper2(x, y) end @testset "broadcast" begin # compute (a+b)*b -> out @i function test1(a, b) a .+= b end x = [3, 1.0] y = [4, 2.0] @instr test1(x, y) @test x == [7, 3.0] @instr (~test1)(x, y) @test x == [3, 1.0] @i function test2(a, b, out) a .+= identity.(b) out .+= (a .* b) end x = Array([3, 1.0]) y = [4, 2.0] out = Array([0.0, 1.0]) @instr test2(x, y, out) @test out==[28, 7] @test check_inv(test2, (x, y, out)) end @testset "broadcast arr" begin @i function f5(x, y, z, a, b) x += y + z b += a + x end @i function f4(x, y, z, a) x += y + z a += y + x end @i function f3(x, y, z) y += x + z end @i function f2(x, y) y += x end @i function f1(x) l ← zero(x) l += x x -= 2 * l l += x l → zero(x) end a = randn(10) b = randn(10) c = randn(10) d = randn(10) e = randn(10) aa = copy(a) @instr f1.(aa) @test aa ≈ -a aa = copy(a) bb = copy(b) @instr f2.(aa, bb) @test aa ≈ a @test bb ≈ b + a aa = copy(a) bb = copy(b) cc = copy(c) @instr f3.(aa, bb, cc) @test aa ≈ a @test bb ≈ b + a + c @test cc ≈ c aa = copy(a) bb = copy(b) cc = copy(c) dd = copy(d) @instr f4.(aa, bb, cc, dd) @test aa ≈ a + b + c @test bb ≈ b @test cc ≈ c @test dd ≈ a + 2b + c + d aa = copy(a) bb = copy(b) cc = copy(c) dd = copy(d) ee = copy(e) @instr f5.(aa, bb, cc, dd, ee) @test aa ≈ a + b + c @test bb ≈ b @test cc ≈ c @test dd ≈ d @test ee ≈ a + b + c + d + e x = randn(5) @test_throws AssertionError @instr x .+= c end @testset "broadcast tuple" begin @i function f5(x, y, z, a, b) x += y + z b += a + x end @i function f4(x, y, z, a) x += y + z a += y + x end @i function f3(x, y, z) y += x + z end @i function f2(x, y) y += x end @i function f1(x) l ← zero(x) l += x x -= 2 * l l += x l → zero(x) end a = (1,2) b = (3,1) c = (6,7) d = (1,11) e = (4,1) aa = a @instr f1.(aa) @test aa == -1 .* a aa = a bb = b @instr f2.(aa, bb) @test aa == a @test bb == b .+ a aa = a bb = b cc = c @instr f3.(aa, bb, cc) @test aa == a @test bb == b .+ a .+ c @test cc == c aa = a bb = b cc = c dd = d @instr f4.(aa, bb, cc, dd) @test aa == a .+ b .+ c @test bb == b @test cc == c @test dd == a .+ 2 .* b .+ c .+ d aa = a bb = b cc = c dd = d ee = e @instr f5.(aa, bb, cc, dd, ee) @test aa == a .+ b .+ c @test bb == b @test cc == c @test dd == d @test ee == a .+ b .+ c .+ d .+ e x = (2,1,5) @test_throws AssertionError @instr x .+= c end @testset "broadcast 2" begin # compute (a+b)*b -> out @i function test1(a, b) a += b end x = [3, 1.0] y = [4, 2.0] @instr test1.(x, y) @test x == [7, 3.0] @instr (~test1).(x, y) @test x == [3, 1.0] @i function test2(a, b, out) add(a, b) out += (a * b) end x = [3, 1.0] y = [4, 2.0] out = [0.0, 1.0] @instr test2.(x, y, out) @test out==[28, 7] @instr (~test2).(x, y, out) @test out==[0, 1.0] args = (x, y, out) @instr test2.(args...) @test args[3]==[28, 7] end @testset "neg sign" begin @i function test(out, x, y) out += x * (-y) end @test check_inv(test, (0.1, 2.0, -2.5); verbose=true) end @testset "@ibounds" begin @i function test(x, y) for i=1:length(x) @inbounds x[i] += y[i] end end @test test([1,2], [2,3]) == ([3,5], [2,3]) end @testset "kwargs" begin @i function test(out, x; y) out += x * (-y) end @test check_inv(test, (0.1, 2.0); y=0.5, verbose=true) end @testset "routines" begin @i function test(out, x) @routine begin out += x end ~@routine end out, x = 0.0, 1.0 @instr test(out, x) @test out == 0.0 end @testset "inverse a prog" begin @i function test(out, x) ~(out += x; out += x) ~for i=1:3 out += x end end out, x = 0.0, 1.0 @test check_inv(test, (out, x)) @instr test(out, x) @test out == -5.0 end @testset "invcheck" begin @i function test(out, x) anc ← 0 @invcheckoff for i=1:x[] x[] -= 1 end @invcheckoff while (anc<3, anc<3) anc += 1 end out += anc @invcheckoff anc → 0 end res = test(0, Ref(7)) @test res[1] == 3 @test res[2][] == 0 end @testset "nilang ir" begin ex = :( @inline function f(x!::T, y) where T anc ← zero(T) @routine anc += x! x! += y * anc ~@routine anc → zero(T) end ) ex2 = :( @inline function f(x!::T, y) where T anc ← zero(T) anc += identity(x!) x! += y * anc anc -= identity(x!) anc → zero(T) end) ex3 = :( @inline function (~f)(x!::T, y) where T anc ← zero(T) anc += identity(x!) x! -= y * anc anc -= identity(x!) anc → zero(T) end) @test nilang_ir(@__MODULE__, ex) |> NiLangCore.rmlines == ex2 |> NiLangCore.rmlines @test nilang_ir(@__MODULE__, ex; reversed=true) |> NiLangCore.rmlines == ex3 |> NiLangCore.rmlines end @testset "protectf" begin struct C<:Function end # protected @i function (a::C)(x) @safe @show a if (protectf(a) isa Inv, ~) add(x, 1.0) else sub(x, 1.0) end end a = C() @test (~a)(a(1.0)) == 1.0 # not protected @i function (a::C)(x) @safe @show a if (a isa Inv, ~) add(x, 1.0) else sub(x, 1.0) end end @test (~a)(a(1.0)) == -1.0 end @testset "ifelse statement" begin @i function f(x, y) if (x > 0, ~) y += 1 elseif (x < 0, ~) y += 2 else y += 3 end end @test f(1, 0) == (1, 1) @test f(-2, 0) == (-2, 2) @test f(0, 0) == (0, 3) @i function f2(x, y) if (x > 0, x < 0) y += 1 elseif (x < 0, x < 0) y += 2 else y += 3 end end @test_throws InvertibilityError f2(-1, 0) end @testset "skip!" begin x = 0.4 @instr (@skip! 3) += x @test x == 0.4 y = 0.3 @instr x += @const y @test x == 0.7 @test y == 0.3 end @testset "for x in range" begin @i function f(x, y) for item in y x += item end end @test check_inv(f, (0.0, [1,2,5])) end @testset "@simd and @threads" begin @i function f(x) @threads for i=1:length(x) x[i] += 1 end end x = [1,2,3] @test f(x) == [2,3,4] @i function f2(x) @simd for i=1:length(x) x[i] += 1 end end x = [1,2,3] @test f2(x) == [2,3,4] end @testset "xor over ||" begin x = false @instr x ⊻= true || false @test x @instr x ⊻= true && false @test x end macro zeros(T, x, y) esc(:($x ← zero($T); $y ← zero($T))) end @testset "macro" begin @i function f(x) @zeros Float64 a b x += a * b ~@zeros Float64 a b end @test f(3.0) == 3.0 end @testset "allow nothing pass" begin @i function f(x) nothing end @test f(2) == 2 end @testset "ancilla check" begin ex1 = :(@i function f(x) x ← 0 end) @test_throws LoadError macroexpand(Main, ex1) ex2 = :(@i function f(x) y ← 0 y ← 0 end) @test_throws LoadError macroexpand(Main, ex2) ex3 = :(@i function f(x) y ← 0 y → 0 end) @test macroexpand(Main, ex3) isa Expr ex4 = :(@i function f(x; y=5) y ← 0 end) @test_throws LoadError macroexpand(Main, ex4) ex5 = :(@i function f(x) y → 0 end) @test_throws LoadError macroexpand(Main, ex5) ex6 = :(@i function f(x::Int) y ← 0 y → 0 end) @test macroexpand(Main, ex6) isa Expr ex7 = :(@i function f(x::Int) if x>3 y ← 0 y → 0 elseif x<-3 y ← 0 y → 0 else y ← 0 y → 0 end end) @test macroexpand(Main, ex7) isa Expr ex8 = :(@i function f(x; y=5) z ← 0 z → 0 end) @test macroexpand(Main, ex8) isa Expr ex9 = :(@i function f(x; y) z ← 0 z → 0 end) @test macroexpand(Main, ex9) isa Expr ex10 = :(@i function f(x; y) begin z ← 0 end ~begin z ← 0 end end) @test macroexpand(Main, ex10) isa Expr end @testset "dict access" begin d = Dict(3=>4) @instr d[3] → 4 @instr d[4] ← 3 @test d == Dict(4=>3) @test_throws InvertibilityError @instr d[4] → 5 @test (@instr @invcheckoff d[8] → 5; true) @test_throws InvertibilityError @instr d[4] ← 5 @test (@instr @invcheckoff d[4] ← 5; true) end @testset "@routine,~@routine" begin @test_throws LoadError macroexpand(Main, :(@i function f(x) @routine begin end end)) @test_throws LoadError macroexpand(Main, :(@i function f(x) ~@routine end)) @test macroexpand(Main, :(@i function f(x) @routine begin end ~@routine end)) !== nothing end @testset "@from post while pre" begin @i function f() x ← 5 z ← 0 @from z==0 while x > 0 x -= 1 z += 1 end z → 5 x → 0 end @test f() == () @test (~f)() == () end @testset "argument with function call" begin @test_throws LoadError @macroexpand @i function f(x, y) x += sin(exp(y)) end @i function f(x, y) x += sin(exp(0.4)) + y end end @testset "allocation multiple vars" begin info = NiLangCore.PreInfo() @test NiLangCore.precom_ex(NiLangCore, :(x,y ← var), info) == :((x, y) ← var) @test NiLangCore.precom_ex(NiLangCore, :(x,y → var), info) == :((x, y) → var) @test NiLangCore.precom_ex(NiLangCore, :((x,y) ↔ (a, b)), info) == :((x,y) ↔ (a,b)) @test (@code_reverse (x,y) ← var) == :((x, y) → var) @test (@code_reverse (x,y) → var) == :((x, y) ← var) @test (@code_julia (x,y) ← var) == :((x, y) = var) @test (@code_julia (x,y) → var) == :(try $(NiLangCore.deanc)((x, y), var) catch e $(:(println("deallocate fail `$($(QuoteNode(:((x, y))))) → $(:var)`")) |> NiLangCore.rmlines) throw(e) end) x = randn(2,4) @i function f(y, x) m, n ← size(x) (l, k) ← size(x) y += m*n y += l*k (l, k) → size(x) m, n → size(x) end twosize = f(0, x)[1] @test twosize == 16 @test (~f)(twosize, x)[1] == 0 @i function g(x) (m, n) ← size(x) (m, n) → (7, 5) end @test_throws InvertibilityError g(x) @test_throws InvertibilityError (~g)(x) end @testset "argument without argname" begin @i function f(::Complex) end @test f(1+2im) == 1+2im end @testset "tuple input" begin @i function f(x::Tuple{<:Tuple, <:Real}) f(x.:1) (x.:1).:1 += x.:2 end @i function f(x::Tuple{<:Real, <:Real}) x.:1 += x.:2 end @i function g(data) f(((data.:1, data.:2), data.:3)) end @test g((1,2,3)) == (6,2,3) end @testset "single/zero argument" begin @i function f(x) neg(x) end @i function g(x::Vector) neg.(x) end @test f(3) == -3 @test g([3, 2]) == [-3, -2] x = (3,) @instr f(x...) @test x == (-3,) x = ([3, 4],) @instr f.(x...) @test x == ([-3, -4],) @i function f() end x = () @instr f(x...) @test x == () end @testset "type constructor" begin @i function f(x, y, a, b) add(Complex{}(x, y), Complex{}(a, b)) end @test f(1,2, 3, 4) == (4, 6, 3, 4) @test_throws LoadError macroexpand(NiLangCore, :(@i function f(x, y, a, b) add(Complex(x, y), Complex{}(a, b)) end)) @i function g(x::Inv, y::Inv) add(x.f, y.f) end @i function g(x, y) g(Inv{}(x), Inv{}(y)) end @test g(2, 3) == (5, 3) end @testset "variable_analysis" begin # kwargs should not be assigned @test_throws LoadError macroexpand(@__MODULE__, :(@i function f1(x; y=4) y ← 5 y → 5 end)) # deallocated variables should not be used @test_throws LoadError macroexpand(@__MODULE__, :(@i function f1(x; y=4) z ← 5 z → 5 x += 2 * z end)) # deallocated variables should not be used in local scope @test_throws LoadError macroexpand(@__MODULE__, :(@i function f1(x; y=4) z ← 5 z → 5 for i=1:10 x += 2 * z end end)) end @testset "boolean" begin @i function f1(x, y, z) x ⊻= true y .⊻= z end @test f1(false, [true, false], [true, false]) == (true, [false, false], [true, false]) @i function f2(x, y, z) z[2] ⊻= true && y[1] z[1] ⊻= z[2] || x end @test f2(false, [true, false], [true, false]) == (false, [true, false], [false, true]) end @testset "swap ↔" begin @i function f1(x, y) j::∅ ↔ k::∅ # dummy swap a::∅ ↔ x a ↔ y a ↔ x::∅ # ↔ is symmetric end @test f1(2, 3) == (3, 2) @test check_inv(f1, (2, 3)) # stack @i function f2(x, y) x[end+1] ↔ y y ← 2 end @test f2([1,2,3], 4) == ([1,2,3,4], 2) @test check_inv(f2, ([1,2,3], 3)) @i function f4(x, y) y ↔ x[end+1] y ← 2 end @test f4([1,2,3], 4) == ([1,2,3,4], 2) @test check_inv(f4, ([1,2,3], 3)) @i function f3(x, y::TY, s) where TY y → _zero(TY) x[end] ↔ (y::TY)::∅ @safe @show x[2], s x[2] ↔ s end @test f3(Float32[1,2,3], 0.0, 4f0) == (Float32[1,4], 3.0, 2f0) @test check_inv(f3, (Float32[1,2,3], 0.0, 4f0)) end @testset "feed tuple and types" begin @i function f3(a, d::Complex) a.:1 += d.re d.re ↔ d.im end @i function f4(a, b, c, d, e) f3((a, b, c), Complex{}(d, e)) end @test f4(1,2,3,4,5) == (5,2,3,5,4) @test check_inv(f4, (1,2,3,4,5)) end @testset "exchange tuple and fields" begin @i function f1(x, y, z) (x, y) ↔ @fields z end @test f1(1,2, 3+4im) == (3,4,1+2im) @i function f2(re, x) r, i ← @fields x re += r r, i → @fields x end @test f2(0.0, 3.0+2im) == (3.0, 3.0 + 2.0im) @i function f3(x, y, z) (@fields z) ↔ (x, y) end @test f3(1,2, 3+4im) == (3,4,1+2im) @test_throws LoadError macroexpand(@__MODULE__, :(@i function f3(x, y, z) (x, y) ↔ (z, j) end)) @i function f4(x, y, z, j) (x, y) ↔ (z, j) end @test f4(1,2, 3, 4) == (3,4,1,2) @i function swap_fields(obj::Complex) (x, y)::∅ ↔ @fields obj x += y (x, y) ↔ (@fields obj)::∅ end @test swap_fields(1+2im) == (3+2im) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/instr.jl
code
4877
using NiLangCore using NiLangCore: compile_ex, dual_ex, precom_ex, memkernel, render_arg, check_args using Test import Base: +, - value(x) = x NiLangCore.chfield(x::T, ::typeof(value), y::T) where T = y function add(a!::Number, b::Number) a!+b, b end function neg(b::Number) -b end @selfdual neg @i function add(a!, b) add(a! |> value, b |> value) end function sub(a!::Number, b::Number) a!-b, b end @i function sub(a!, b) sub(a! |> value, b |> value) end @dual add sub function XOR(a!::Integer, b::Integer) xor(a!, b), b end @selfdual XOR #@nograd XOR @testset "boolean" begin x = false @instr x ⊻= true @test x @instr x ⊻= true || false @test !x @instr x ⊻= true && false @instr x ⊻= !false @test x end @testset "@dual" begin @test isreversible(add, Tuple{Any,Any}) @test isreversible(sub, Tuple{Any,Any}) @test !isreflexive(add) @test ~(add) == sub a=2.0 b=1.0 @instr add(a, b) @test a == 3.0 args = (1,2) @instr add(args...) @test args == (3,2) @instr sub(a, b) @test a == 2.0 @test check_inv(add, (a, b)) @test isprimitive(add) @test isprimitive(sub) end @testset "@selfdual" begin @test !isreversible(XOR, Tuple{Any, Any}) @test !isreversible(~XOR, Tuple{Any, Any}) @test isreversible(~XOR, Tuple{Integer, Integer}) @test isreversible(XOR, Tuple{Integer, Integer}) @test isreflexive(XOR) @test isprimitive(XOR) @test ~(XOR) == XOR a=2 b=1 @instr XOR(a, b) @test a == 3 @instr XOR(a, b) @test a == 2 end @testset "+=, -=" begin x = 1.0 y = 1.0 @instr PlusEq(exp)(y, x) @test x ≈ 1 @test y ≈ 1+exp(1.0) @instr (~PlusEq(exp))(y, x) @test x ≈ 1 @test y ≈ 1 end @testset "+= and const" begin x = 0.5 @instr x += π @test x == 0.5+π @instr x += log(π) @test x == 0.5 + π + log(π) @instr x += log(π)/2 @test x == 0.5 + π + 3*log(π)/2 @instr x += log(2*π)/2 @test x == 0.5 + π + 3*log(π)/2 + log(2π)/2 end @testset "+= keyword functions" begin g(x; y=2) = x^y z = 0.0 x = 2.0 @instr z += g(x; y=4) @test z == 16.0 end @testset "constant value" begin @test @const 2 == 2 @test NiLangCore._isconst(:(@const grad(x))) end @testset "+=, -=, *=, /=" begin @test compile_ex(@__MODULE__, :(x += y * z), NiLangCore.CompileInfo()).args[1].args[2] == :($PlusEq(*)(x, y, z)) @test compile_ex(@__MODULE__, dual_ex(@__MODULE__, :(x -= y * z)), NiLangCore.CompileInfo()).args[1].args[2] == :($PlusEq(*)(x, y, z)) @test compile_ex(@__MODULE__, :(x /= y * z), NiLangCore.CompileInfo()).args[1].args[2] == :($DivEq(*)(x, y, z)) @test compile_ex(@__MODULE__, dual_ex(@__MODULE__, :(x *= y * z)), NiLangCore.CompileInfo()).args[1].args[2] == :($DivEq(*)(x, y, z)) @test ~MulEq(*) == DivEq(*) @test ~DivEq(*) == MulEq(*) function (g::MulEq)(y, a, b) y * g.f(a, b), a, b end function (g::DivEq)(y, a, b) y / g.f(a, b), a, b end a, b, c = 1.0, 2.0, 3.0 @instr a *= b + c @test a == 5.0 @instr a /= b + c @test a == 1.0 end @testset "shared read write check" begin for (x, y) in [ (:((-x[3].g' |> NEG).k[5]) , :((x[3]).g.k[5])) (:((-(x |> subarray(3)).g' |> NEG).k[5]) , :((x[3]).g.k[5])) (:(@skip! x.g) , nothing) (:(@const x .|> g) , :x) (:(cos.(x[2])) , nothing) (:(cos(x[2])) , nothing) (:((x |> g)...) , :x) (:((x |> g, y.:1)) , [:x, :(y.:1)]) (:((x |> g, y |> tget(1))) , [:x, :(y[1])])] @test memkernel(deepcopy(x)) == y @test render_arg(deepcopy(x)) == x end @test render_arg(:(x.y.[2:3])) == :(x.y |> subarray(2:3)) @test memkernel(:(x.y |> subarray(2:3))) == (:(x.y[2:3])) @test render_arg(:(x.y.[2:3] |> value)) == :(x.y |> subarray(2:3) |> value) @test memkernel(:(x.y |> subarray(2:3) |> value)) == :(x.y[2:3]) @test_throws InvertibilityError check_args([:a, :(a |> grad)]) @test check_args([:(a.x), :(a.g |> grad)]) isa Nothing @test_throws InvertibilityError check_args([:(a.x), :(b[3]), :(b[3])]) @test_throws InvertibilityError check_args([:(a.x), :((b, a.x))]) isa Nothing # TODO: check variable on the same tree, like `a.b` and `a` end @testset "dual type" begin struct AddX{T} x::T end struct SubX{T} x::T end @dualtype AddX SubX @dualtype AddX SubX @i function (f::AddX)(x::Real) end @test hasmethod(AddX(3), Tuple{Real}) @test hasmethod(SubX(3), Tuple{Real}) for (TA, TB) in [(AddX, SubX), (MulEq, DivEq), (XorEq, XorEq), (PlusEq, MinusEq)] @test invtype(TA) == TB @test invtype(TA{typeof(*)}) == TB{typeof(*)} @test invtype(TB) == TA @test invtype(TB{typeof(*)}) == TA{typeof(*)} end end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/invtype.jl
code
1268
using NiLangCore using NiLangCore: type2tuple using Test struct NiTypeTest{T} <: IWrapper{T} x::T g::T end NiTypeTest(x) = NiTypeTest(x, zero(x)) @fieldview value(invtype::NiTypeTest) = invtype.x @fieldview gg(invtype::NiTypeTest) = invtype.g @testset "inv type" begin it = NiTypeTest(0.5) @test eps(typeof(it)) === eps(Float64) @test value(it) == 0.5 @test it ≈ NiTypeTest(0.5) @test it > 0.4 @test it < NiTypeTest(0.6) @test it < 7 @test 0.4 < it @test 7 > it @test chfield(it, value, 0.3) == NiTypeTest(0.3) it = chfield(it, Val(:g), 0.2) @test almost_same(NiTypeTest(0.5+1e-15), NiTypeTest(0.5)) @test !almost_same(NiTypeTest(1.0), NiTypeTest(1)) it = NiTypeTest(0.5) @test chfield(it, gg, 0.3) == NiTypeTest(0.5, 0.3) end @testset "mutable struct set field" begin mutable struct MS{T} x::T y::T z::T end ms = MS(0.5, 0.6, 0.7) @i function f(ms) ms.x += 1 ms.y += 1 ms.z -= ms.x ^ 2 end ms2 = f(ms) @test (ms2.x, ms2.y, ms2.z) == (1.5, 1.6, -1.55) struct IMS{T} x::T y::T z::T end ms = IMS(0.5, 0.6, 0.7) ms2 = f(ms) @test (ms2.x, ms2.y, ms2.z) == (1.5, 1.6, -1.55) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/lens.jl
code
1130
using NiLangCore, Test @testset "update field" begin @test NiLangCore.field_update(1+2im, Val(:im), 4) == 1+4im struct TestUpdateField1{A, B} a::A end @test NiLangCore.field_update(TestUpdateField1{Int,Float64}(1), Val(:a), 4) == TestUpdateField1{Int,Float64}(4) struct TestUpdateField2{A} a::A function TestUpdateField2(a::T) where T new{T}(a) end end @test NiLangCore.field_update(TestUpdateField2(1), Val(:a), 4) == TestUpdateField2(4) @test NiLangCore.default_constructor(ComplexF64, 1.0, 2.0) == 1+2im end @testset "_zero" begin @test _zero(Tuple{Float64, Float32,String,Matrix{Float64},Char,Dict{Int,Int}}) == (0.0, 0f0, "", zeros(0,0), '\0', Dict{Int,Int}()) @test _zero(ComplexF64) == 0.0 + 0.0im @test _zero((1,2.0,"adsf",randn(2,2),'d',Dict(2=>5))) == (0, 0.0,"",zeros(2,2),'\0',Dict(2=>0)) @test _zero(1+2.0im) == 0.0 + 0.0im @test _zero(()) == () @test _zero((1,2)) == (0, 0) @test _zero(Symbol) == Symbol("") @test _zero(:x) == Symbol("") end @testset "fields" begin @test (@fields 1+3im) == (1,3) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/runtests.jl
code
480
using NiLangCore using Test @testset "Core.jl" begin include("Core.jl") end @testset "stack.jl" begin include("stack.jl") end @testset "lens.jl" begin include("lens.jl") end @testset "utils.jl" begin include("utils.jl") end @testset "symboltable.jl" begin include("symboltable.jl") end @testset "instr.jl" begin include("instr.jl") end @testset "vars.jl" begin include("vars.jl") end @testset "compiler.jl" begin include("compiler.jl") end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/stack.jl
code
4790
using NiLangCore, Test @testset "stack" begin for (stack, x) in [ (FLOAT64_STACK, 0.3), (FLOAT32_STACK, 0f4), (INT64_STACK, 3), (INT32_STACK, Int32(3)), (COMPLEXF64_STACK, 4.0+0.3im), (COMPLEXF32_STACK, 4f0+0f3im), (BOOL_STACK, true), ] println(stack) push!(stack, x) @test pop!(stack) === x end end @testset "stack operations" begin z = 1.0 NiLangCore.empty_global_stacks!() @test_throws ArgumentError (@instr GLOBAL_STACK[end] ↔ y::∅) y = 4.0 @test_throws ArgumentError (@instr GLOBAL_STACK[end] → y) @test_throws BoundsError (@instr @invcheckoff GLOBAL_STACK[end] ↔ y) @test_throws ArgumentError (@instr @invcheckoff GLOBAL_STACK[end] → y) x = 0.3 NiLangCore.empty_global_stacks!() @instr GLOBAL_STACK[end+1] ↔ x @instr GLOBAL_STACK[end] ↔ x::∅ @test x === 0.3 @instr @invcheckoff GLOBAL_STACK[end+1] ↔ x y = 0.5 @instr GLOBAL_STACK[end+1] ↔ y @instr @invcheckoff GLOBAL_STACK[end] ↔ x::∅ @test x == 0.5 x =0.3 st = Float64[] @instr st[end+1] ↔ x @test length(st) == 1 @instr st[end] ↔ x::∅ @test length(st) == 0 @test x === 0.3 @instr st[end+1] ↔ x @test length(st) == 1 y = 0.5 @instr st[end+1] ↔ y @instr @invcheckoff st[end] ↔ x::∅ @test x == 0.5 @i function test(x) x2 ← zero(x) x2 += x^2 GLOBAL_STACK[end+1] ↔ x x::∅ ↔ x2 end @test test(3.0) == 9.0 l = length(NiLangCore.GLOBAL_STACK) @test check_inv(test, (3.0,)) @test length(NiLangCore.GLOBAL_STACK) == l @i function test2(x) x2 ← zero(x) x2 += x^2 @invcheckoff GLOBAL_STACK[end+1] ↔ x x::∅ ↔ x2 end @test test2(3.0) == 9.0 l = length(NiLangCore.GLOBAL_STACK) @test check_inv(test2, (3.0,)) @test length(NiLangCore.GLOBAL_STACK) == l x = 3.0 @instr GLOBAL_STACK[end+1] ↔ x NiLangCore.empty_global_stacks!() l = length(NiLangCore.GLOBAL_STACK) @test l == 0 end @testset "copied push/pop stack operations" begin NiLangCore.empty_global_stacks!() x =0.3 @instr GLOBAL_STACK[end+1] ← x @test x === 0.3 @instr GLOBAL_STACK[end] → x @test x === 0.3 @instr GLOBAL_STACK[end+1] ← x x = 0.4 @test_throws InvertibilityError @instr GLOBAL_STACK[end] → x y = 0.5 @instr GLOBAL_STACK[end+1] ← y @instr @invcheckoff GLOBAL_STACK[end] → x @test x == 0.5 st = [] x = [0.3] @instr st[end+1] ← x @test st[1] !== [0.3] @test st[1] ≈ [0.3] x =0.3 st = Float64[] @instr ~(st[end] → x) @test x === 0.3 @test length(st) == 1 @instr ~(st[end+1] ← x) @test length(st) == 0 @test x === 0.3 @instr @invcheckoff st[end+1] ← x @test length(st) == 1 x = 0.4 @test_throws InvertibilityError @instr st[end] → x @test length(st) == 0 y = 0.5 @instr st[end+1] ← y @instr @invcheckoff st[end] → x @test x == 0.5 @i function test(x, x2) x2 += x^2 GLOBAL_STACK[end+1] ← x x ↔ x2 end @test test(3.0, 0.0) == (9.0, 3.0) l = length(NiLangCore.GLOBAL_STACK) @test check_inv(test, (3.0, 0.0)) @test length(NiLangCore.GLOBAL_STACK) == l end @testset "dictionary & vector" begin # allocate and deallocate @i function f1(d, y) d["y"] ← y end d = Dict("x" => 34) @test f1(d, 3) == (Dict("x"=>34, "y"=>3), 3) @test_throws InvertibilityError f1(d, 3) d = Dict("x" => 34) @test check_inv(f1, (d, 3)) # not available on vectors @i function f2(d, y) d[2] ← y end @test_throws MethodError f2([1,2,3], 3) # swap @i function f3(d, y) d["y"] ↔ y end d = Dict("y" => 34) @test f3(d, 3) == (Dict("y"=>3), 34) d = Dict("z" => 34) @test_throws KeyError f3(d, 3) d = Dict("y" => 34) @test check_inv(f3, (d, 3)) # swap on vector @i function f4(d, y, x) d[2] ↔ y d[end] ↔ x end d = [11,12,13] @test f4(d, 1,2) == ([11,1,2],12,13) d = [11,12,13] @test check_inv(f4, (d, 1,2)) # swap to empty @i function f5(d, x::T) where T d["x"]::∅ ↔ x # swap in d["y"] ↔ x::∅ # swap out end d = Dict("y" => 34) @test f5(d, 3) == (Dict("x"=>3), 34) d = Dict("y" => 34) @test check_inv(f5, (d, 3)) d = Dict("x" => 34) @test_throws InvertibilityError f5(d, 3) # not available on vectors @i function f6(d, y) d[2]::∅ ↔ y end @test_throws MethodError f6([1,2,3], 3) end @testset "inverse stack" begin @i function f(x) x[end+1] ← 1 end x = FastStack{Int}(3) @test check_inv(f, (x,)) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/symboltable.jl
code
2654
using Test, NiLangCore using NiLangCore: SymbolTable, allocate!, deallocate!, operate!, swapvars!, variable_analysis_ex @testset "variable analysis" begin st = SymbolTable() # allocate! : not exist allocate!(st, :x) allocate!(st, :y) @test st.existing == [:x, :y] # allocate! : existing @test_throws InvertibilityError allocate!(st, :x) @test st.existing == [:x, :y] # deallocate! : not exist @test_throws InvertibilityError deallocate!(st, :z) # deallocate! : existing deallocate!(st, :y) @test st.existing == [:x] @test st.deallocated == [:y] # deallocate! : deallocated @test_throws InvertibilityError deallocate!(st, :y) # operate! : deallocated @test_throws InvertibilityError operate!(st, :y) # allocate! : deallocated allocate!(st, :y) @test st.existing == [:x, :y] @test st.deallocated == [] # operate! : not exist operate!(st, :j) @test st.unclassified == [:j] # operate! : existing operate!(st, :y) @test st.unclassified == [:j] # allocate! unclassified @test_throws InvertibilityError allocate!(st, :j) # operate! : unclassified operate!(st, :j) @test st.unclassified == [:j] # deallocate! : unclassified @test_throws InvertibilityError deallocate!(st, :j) # swap both existing swapvars!(st, :j, :x) @test st.unclassified == [:x] @test st.existing == [:j, :y] # swap existing - nonexisting swapvars!(st, :j, :k) @test st.unclassified == [:x, :j] @test st.existing == [:k, :y] # swap nonexisting - existing swapvars!(st, :o, :x) @test st.unclassified == [:o, :j, :x] @test st.existing == [:k, :y] # swap both not existing swapvars!(st, :m, :n) @test st.unclassified == [:o, :j, :x, :m, :n] # push and pop variables end @testset "variable analysis" begin st = SymbolTable([:x, :y], [], []) ex = :((x,y) ↔ (a, b)) variable_analysis_ex(ex, st) @test st.existing == [:a, :b] @test st.unclassified == [:x, :y] st = SymbolTable([:x, :y], [], []) ex = :((x,y) ↔ b) variable_analysis_ex(ex, st) @test st.existing == [:b] @test st.unclassified == [:x, :y] ex = :(b ↔ (x,y)) variable_analysis_ex(ex, st) @test st.existing == [:x, :y] @test st.unclassified == [:b] st = SymbolTable([:x, :y], [], []) ex = :(b ↔ x) variable_analysis_ex(ex, st) @test st.existing == [:b, :y] @test st.unclassified == [:x] st = SymbolTable([], [], []) ex = :(b ↔ (x, y)) variable_analysis_ex(ex, st) @test st.existing == [] @test st.unclassified == [:b, :x, :y] end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/utils.jl
code
1670
using Test, NiLangCore using NiLangCore: get_argname, get_ftype, match_function, MyOrderedDict @testset "match function" begin ex = match_function(:(function f(x) x end)) @test ex[1] == nothing @test ex[2] == :f @test ex[3] == [:x] @test ex[4] == [] @test length(filter(x->!(x isa LineNumberNode), ex[5])) == 1 ex = match_function(:(@inline function f(x; y) x end)) @test ex[1][1] == Symbol("@inline") @test ex[1][2] isa LineNumberNode @test ex[2] == :f @test ex[3] == [Expr(:parameters, :y), :x] @test length(filter(x->!(x isa LineNumberNode), ex[5])) == 1 @test ex[4] == [] ex = match_function(:(function f(x::T) where T x end)) @test ex[2] == :f @test ex[3] == [:(x::T)] @test length(filter(x->!(x isa LineNumberNode), ex[5])) == 1 @test ex[4] == [:T] ex = match_function(:(f(x)=x)) @test ex[2] == :f @test ex[3] == [:x] @test length(ex[5]) == 2 @test ex[4] == [] end @testset "argname and type" begin @test get_argname(:(y=3)) == :y @test get_argname(:(y::Int)) == :y @test get_argname(:(y::Int=3)) == :y @test get_argname(:(f(; k::Int=4)).args[2]) == :(f(; k::Int=4)).args[2] end @testset "my ordered dict" begin od = MyOrderedDict{Any, Any}() od[:a] = 2 od[:b] = 4 od[:c] = 7 @test length(od) == 3 @test od[:b] == 4 od[:b] = 1 @test od[:b] == 1 delete!(od, :b) @test_throws KeyError od[:b] @test pop!(od) == (:c, 7) @test length(od) == 1 end @testset "unzipped broadcast" begin x = [1, 2, 3.0] res = NiLangCore.unzipped_broadcast(exp, x) @test res === x @test res ≈ exp.([1, 2, 3.0]) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
test/vars.jl
code
3544
using Test, NiLangCore using NiLangCore: type2tuple @testset "dataview" begin x = 1.0 @test_throws ErrorException chfield(x, "asdf", 3.0) @test chfield(x, identity, 2.0) === 2.0 @assign -x 0.1 @test x == -0.1 x = 1+2.0im @assign x' 0.1+1im @test x == 0.1-1im x = (3, 4) @instr (x.:1) += 3 @test x == (6, 4) x = 3 y = (4,) @instr x += y.:1 @test x == 7 x = [3, 4] y = ([4, 4],) @instr x .+= y.:1 @test x == [7.0, 8.0] x = true y = (true,) @instr x ⊻= y.:1 @test x == false x = [true, false] y = ([true, true],) @instr x .⊻= (y |> tget(1)) @test x == [false, true] x = ones(4) y = ones(2) @instr (x |> subarray(1:2)) += y @test x == [2,2,1,1] @instr (x |> subarray(1)) += (y |> subarray(1)) @test x == [3,2,1,1] end @testset "anc, deanc" begin @i function f(y) x ← y x → 1.0 end f(1.0) @test_throws InvertibilityError f(1.1) @i function f2(y) x ← y x → (1.0, 2.0) end f2((1.0, 2.0)) @test_throws InvertibilityError f2((1.1, 2.0)) @i function f3(y) x ← y x → [1.0, 2.0] end f3([1.0, 2.0]) @test_throws InvertibilityError f3([1.1, 2.0]) struct B a b end @i function f4(y) x ← y x → B(1.0, 2.0) end f4(B(1.0, 2.0)) @test_throws InvertibilityError f4(B(1.0, 1.1)) @i function f5(y) x ← y x → "" end f5("") @test_throws InvertibilityError f5("a") end @testset "inv and tuple output" begin a, b = false, false @instr ~(a ⊻= true) @test a == true @instr ~((a, b) ⊻= (true, true)) @test a == false @test b == true y = 1.0 x = 1.0 @instr ~(~(y += 1.0)) @test y == 2.0 @instr ~(~((x, y) += (1.0, 1.0))) @test y == 3.0 @test x == 2.0 @instr ~((x, y) += (1.0, 1.0)) @test y == 2.0 @test x == 1.0 @instr ~(y += 1.0) @test y == 1.0 z = [1.0, 2.0] @instr ~(~(z .+= [1.0, 2.0])) @test z ≈ [2.0, 4.0] end @testset "chfield" begin x = [1,2,3] @test chfield(x, length, 3) == x @test_throws InvertibilityError chfield(x, length, 2) end @testset "invcheck" begin @test (@invcheck 0.3 0.3) isa Any @test_throws InvertibilityError (@invcheck 0.3 0.4) @test_throws InvertibilityError (@invcheck 3 3.0) end @testset "dict" begin @i function f1() d ← Dict(1=>1, 2=>2) d → Dict(2=>2) end @i function f2() d ← Dict(1=>1) d → Dict(2=>1) end @i function f3() d ← Dict(1=>1) d → Dict(1=>2) end @i function f4() d ← Dict(1=>1) d → Dict(1=>1) end @test_throws InvertibilityError f1() @test_throws InvertibilityError f2() @test_throws InvertibilityError f3() @test f4() == () end @testset "fieldview" begin @fieldview first_real(x::Vector{ComplexF64}) = x[1].re x = [1.0im, 2+3im] @instr (x |> first_real) += 3 @test x == [3+1.0im, 2+3.0im] end @testset "mutable struct set field" begin mutable struct MS{T} x::T y::T z::T end ms = MS(0.5, 0.6, 0.7) @i function f(ms) ms.x += 1 ms.y += 1 ms.z -= ms.x ^ 2 end ms2 = f(ms) @test (ms2.x, ms2.y, ms2.z) == (1.5, 1.6, -1.55) struct IMS{T} x::T y::T z::T end ms = IMS(0.5, 0.6, 0.7) ms2 = f(ms) @test (ms2.x, ms2.y, ms2.z) == (1.5, 1.6, -1.55) end
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
README.md
docs
2844
# NiLangCore The core package for reversible eDSL NiLang. ![CI](https://github.com/GiggleLiu/NiLangCore.jl/workflows/CI/badge.svg) [![codecov](https://codecov.io/gh/GiggleLiu/NiLangCore.jl/branch/master/graph/badge.svg?token=ReCkoV9Pgp)](https://codecov.io/gh/GiggleLiu/NiLangCore.jl) **Warning** Requires Julia version >= 1.3. ## Examples 1. Define a pair of dual instructions ```julia julia> using NiLangCore julia> function ADD(a!::Number, b::Number) a! + b, b end ADD (generic function with 3 methods) julia> function SUB(a!::Number, b::Number) a! - b, b end SUB (generic function with 3 methods) julia> @dual ADD SUB ``` 2. Define a reversible function ```julia julia> @i function test(a, b) SUB(a, b) end ``` ## Reversible IR ```julia julia> using NiLangCore julia> @code_reverse x += f(y) :(x -= f(y)) julia> @code_reverse x .+= f.(y) :(x .-= f.(y)) julia> @code_reverse x ⊻= f(y) :(x ⊻= f(y)) julia> @code_reverse x ← zero(T) :(x → zero(T)) julia> @code_reverse begin y += f(x) end quote #= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:82 =# y -= f(x) #= REPL[52]:1 =# end julia> julia> @code_reverse if (precond, postcond) y += f(x) else y += g(x) end :(if (postcond, precond) #= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:69 =# y -= f(x) #= REPL[48]:1 =# else #= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:69 =# y -= g(x) #= REPL[48]:1 =# end) julia> @code_reverse while (precond, postcond) y += f(x) end :(@from !postcond while precond #= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:72 =# y -= f(x) #= REPL[49]:1 =# end) julia> @code_reverse for i=start:step:stop y += f(x) end :(for i = stop:-step:start #= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:76 =# y -= f(x) #= REPL[50]:1 =# end) julia> @code_reverse @safe println(x) :(#= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:81 =# @safe println(x)) ``` ## A note on symbols The `←` (\leftarrow + TAB) operation copies B to A, its inverse is `→` (\rightarrow + TAB) * push into a stack, `A[end+1] ← B` => `[A..., B], B` * add a key-value pair into a dict, `A[i] ← B` => `{A..., i=>B}, B` * allocate a new ancilla, `(A = ∅) ← B` => `(A = B), B` The `↔` (\leftrightarrow + TAB) operation swaps B and A, it is self reversible * swap two variables, `A ↔ B` => `B, A` * transfer into a stack, `A[end+1] ↔ B` => `[A..., B], ∅` * transfer a key-value pair into a dict, `A[i] ↔ B` => `haskey ? {(A\A[i])..., i=>B}, A[i] : {A..., i=>B}, ∅` * transfer the value of two variables, `(A = ∅) ↔ B` => `(A = B), ∅` One can use `var::∅` to annotate `var` as a fresh new variable (only new variables can be allocated), use `var[end+1]` to represent stack top for push and `var[end]` for stack top for pop.
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
benchmark/README.md
docs
351
# Benchmark Report ### May 8th, 2021 ``` "FastStack-inbounds" => Trial(3.136 ns) "NiLang-@invcheckoff-@inbounds" => Trial(2.096 ns) "NiLang-@invcheckoff" => Trial(5.341 ns) "FastStack" => Trial(6.775 ns) "NiLang" => Trial(22.935 ns) "Julia" => Trial(12.062 ns) "setindex-inbounds" => Trial(2.362 ns) "setindex" => Trial(2.321 ns) ```
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "Apache-2.0" ]
0.10.6
d3bfb7acf19fca70751bb70b014c6d57e4dd9b18
docs/src/index.md
docs
72
# NiLangCore.jl ```@index ``` ```@autodocs Modules = [NiLangCore] ```
NiLangCore
https://github.com/GiggleLiu/NiLangCore.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
docs/make.jl
code
403
using AbstractMCMC using Documenter using Random DocMeta.setdocmeta!(AbstractMCMC, :DocTestSetup, :(using AbstractMCMC); recursive=true) makedocs(; sitename="AbstractMCMC", format=Documenter.HTML(), modules=[AbstractMCMC], pages=["Home" => "index.md", "api.md", "design.md"], checkdocs=:exports, ) deploydocs(; repo="github.com/TuringLang/AbstractMCMC.jl.git", push_preview=true)
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
src/AbstractMCMC.jl
code
2207
module AbstractMCMC using BangBang: BangBang using ConsoleProgressMonitor: ConsoleProgressMonitor using LogDensityProblems: LogDensityProblems using LoggingExtras: LoggingExtras using ProgressLogging: ProgressLogging using StatsBase: StatsBase using TerminalLoggers: TerminalLoggers using Transducers: Transducers using FillArrays: FillArrays using Distributed: Distributed using Logging: Logging using Random: Random # Reexport sample using StatsBase: sample export sample # Parallel sampling types export MCMCThreads, MCMCDistributed, MCMCSerial """ AbstractChains `AbstractChains` is an abstract type for an object that stores parameter samples generated through a MCMC process. """ abstract type AbstractChains end """ AbstractSampler The `AbstractSampler` type is intended to be inherited from when implementing a custom sampler. Any persistent state information should be saved in a subtype of `AbstractSampler`. When defining a new sampler, you should also overload the function `transition_type`, which tells the `sample` function what type of parameter it should expect to receive. """ abstract type AbstractSampler end """ AbstractModel An `AbstractModel` represents a generic model type that can be used to perform inference. """ abstract type AbstractModel end """ AbstractMCMCEnsemble An `AbstractMCMCEnsemble` algorithm represents a specific algorithm for sampling MCMC chains in parallel. """ abstract type AbstractMCMCEnsemble end """ MCMCThreads The `MCMCThreads` algorithm allows users to sample MCMC chains in parallel using multiple threads. """ struct MCMCThreads <: AbstractMCMCEnsemble end """ MCMCDistributed The `MCMCDistributed` algorithm allows users to sample MCMC chains in parallel using multiple processes. """ struct MCMCDistributed <: AbstractMCMCEnsemble end """ MCMCSerial The `MCMCSerial` algorithm allows users to sample serially, with no thread or process parallelism. """ struct MCMCSerial <: AbstractMCMCEnsemble end include("samplingstats.jl") include("logging.jl") include("interface.jl") include("sample.jl") include("stepper.jl") include("transducer.jl") include("logdensityproblems.jl") end # module AbstractMCMC
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
src/interface.jl
code
3810
""" chainscat(c::AbstractChains...) Concatenate multiple chains. By default, the chains are concatenated along the third dimension by calling `cat(c...; dims=3)`. """ chainscat(c::AbstractChains...) = cat(c...; dims=3) """ chainsstack(c::AbstractVector) Stack chains in `c`. By default, the vector of chains is returned unmodified. If `eltype(c) <: AbstractChains`, then `reduce(chainscat, c)` is called. """ chainsstack(c) = c chainsstack(c::AbstractVector{<:AbstractChains}) = reduce(chainscat, c) """ bundle_samples(samples, model, sampler, state, chain_type[; kwargs...]) Bundle all `samples` that were sampled from the `model` with the given `sampler` in a chain. The final `state` of the `sampler` can be included in the chain. The type of the chain can be specified with the `chain_type` argument. By default, this method returns `samples`. """ function bundle_samples( samples, model::AbstractModel, sampler::AbstractSampler, state, ::Type{T}; kwargs... ) where {T} # dispatch to internal method for default implementations to fix # method ambiguity issues (see #120) return _bundle_samples(samples, model, sampler, state, T; kwargs...) end function _bundle_samples( samples, @nospecialize(::AbstractModel), @nospecialize(::AbstractSampler), @nospecialize(::Any), ::Type; kwargs..., ) return samples end function _bundle_samples( samples::Vector, @nospecialize(::AbstractModel), @nospecialize(::AbstractSampler), @nospecialize(::Any), ::Type{Vector{T}}; kwargs..., ) where {T} return map(samples) do sample convert(T, sample) end end """ step(rng, model, sampler[, state; kwargs...]) Return a 2-tuple of the next sample and the next state of the MCMC `sampler` for `model`. Samples describe the results of a single step of the `sampler`. As an example, a sample might include a vector of parameters sampled from a prior distribution. When sampling using [`sample`](@ref), every `step` call after the first has access to the current `state` of the sampler. """ function step end """ samples(sample, model, sampler[, N; kwargs...]) Generate a container for the samples of the MCMC `sampler` for the `model`, whose first sample is `sample`. The method can be called with and without a predefined number `N` of samples. """ function samples(sample, ::AbstractModel, ::AbstractSampler, N::Integer; kwargs...) ts = Vector{typeof(sample)}(undef, 0) sizehint!(ts, N) return ts end function samples(sample, ::AbstractModel, ::AbstractSampler; kwargs...) return Vector{typeof(sample)}(undef, 0) end """ save!!(samples, sample, iteration, model, sampler[, N; kwargs...]) Save the `sample` of the MCMC `sampler` at the current `iteration` in the container of `samples`. The function can be called with and without a predefined number `N` of samples. By default, AbstractMCMC uses `push!!` from the Julia package [BangBang](https://github.com/tkf/BangBang.jl) to append to the container, and widen its type if needed. """ function save!!( samples::Vector, sample, iteration::Integer, ::AbstractModel, ::AbstractSampler, N::Integer; kwargs..., ) s = BangBang.push!!(samples, sample) s !== samples && sizehint!(s, N) return s end function save!!( samples, sample, iteration::Integer, ::AbstractModel, ::AbstractSampler; kwargs... ) return BangBang.push!!(samples, sample) end # Deprecations Base.@deprecate transitions( transition, model::AbstractModel, sampler::AbstractSampler, N::Integer; kwargs... ) samples(transition, model, sampler, N; kwargs...) false Base.@deprecate transitions( transition, model::AbstractModel, sampler::AbstractSampler; kwargs... ) samples(transition, model, sampler; kwargs...) false
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
src/logdensityproblems.jl
code
4003
""" LogDensityModel <: AbstractMCMC.AbstractModel Wrapper around something that implements the LogDensityProblem.jl interface. Note that this does _not_ implement the LogDensityProblems.jl interface itself, but it simply useful for indicating to the `sample` and other `AbstractMCMC` methods that the wrapped object implements the LogDensityProblems.jl interface. # Fields - `logdensity`: The object that implements the LogDensityProblems.jl interface. """ struct LogDensityModel{L} <: AbstractModel logdensity::L function LogDensityModel{L}(logdensity::L) where {L} if LogDensityProblems.capabilities(logdensity) === nothing throw( ArgumentError( "The log density function does not support the LogDensityProblems.jl interface", ), ) end return new{L}(logdensity) end end LogDensityModel(logdensity::L) where {L} = LogDensityModel{L}(logdensity) # Fallbacks: Wrap log density function in a model """ sample( rng::Random.AbstractRNG=Random.default_rng(), logdensity, sampler::AbstractSampler, N_or_isdone; kwargs..., ) Wrap the `logdensity` function in a [`LogDensityModel`](@ref), and call `sample` with the resulting model instead of `logdensity`. The `logdensity` function has to support the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface. """ function StatsBase.sample( rng::Random.AbstractRNG, logdensity, sampler::AbstractSampler, N_or_isdone; kwargs... ) return StatsBase.sample(rng, _model(logdensity), sampler, N_or_isdone; kwargs...) end """ sample( rng::Random.AbstractRNG=Random.default_rng(), logdensity, sampler::AbstractSampler, parallel::AbstractMCMCEnsemble, N::Integer, nchains::Integer; kwargs..., ) Wrap the `logdensity` function in a [`LogDensityModel`](@ref), and call `sample` with the resulting model instead of `logdensity`. The `logdensity` function has to support the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface. """ function StatsBase.sample( rng::Random.AbstractRNG, logdensity, sampler::AbstractSampler, parallel::AbstractMCMCEnsemble, N::Integer, nchains::Integer; kwargs..., ) return StatsBase.sample( rng, _model(logdensity), sampler, parallel, N, nchains; kwargs... ) end """ steps( rng::Random.AbstractRNG=Random.default_rng(), logdensity, sampler::AbstractSampler; kwargs..., ) Wrap the `logdensity` function in a [`LogDensityModel`](@ref), and call `steps` with the resulting model instead of `logdensity`. The `logdensity` function has to support the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface. """ function steps(rng::Random.AbstractRNG, logdensity, sampler::AbstractSampler; kwargs...) return steps(rng, _model(logdensity), sampler; kwargs...) end """ Sample( rng::Random.AbstractRNG=Random.default_rng(), logdensity, sampler::AbstractSampler; kwargs..., ) Wrap the `logdensity` function in a [`LogDensityModel`](@ref), and call `Sample` with the resulting model instead of `logdensity`. The `logdensity` function has to support the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface. """ function Sample(rng::Random.AbstractRNG, logdensity, sampler::AbstractSampler; kwargs...) return Sample(rng, _model(logdensity), sampler; kwargs...) end function _model(logdensity) if LogDensityProblems.capabilities(logdensity) === nothing throw( ArgumentError( "the log density function does not support the LogDensityProblems.jl interface. Please implement the interface or provide a model of type `AbstractMCMC.AbstractModel`", ), ) end return LogDensityModel(logdensity) end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
src/logging.jl
code
1785
# avoid creating a progress bar with @withprogress if progress logging is disabled # and add a custom progress logger if the current logger does not seem to be able to handle # progress logs macro ifwithprogresslogger(progress, exprs...) return esc( quote if $progress if $hasprogresslevel($Logging.current_logger()) $ProgressLogging.@withprogress $(exprs...) else $with_progresslogger($Base.@__MODULE__, $Logging.current_logger()) do $ProgressLogging.@withprogress $(exprs...) end end else $(exprs[end]) end end, ) end # improved checks? function hasprogresslevel(logger) return Logging.min_enabled_level(logger) ≤ ProgressLogging.ProgressLevel end # filter better, e.g., according to group? function with_progresslogger(f, _module, logger) logger1 = LoggingExtras.EarlyFilteredLogger(progresslogger()) do log log._module === _module && log.level == ProgressLogging.ProgressLevel end logger2 = LoggingExtras.EarlyFilteredLogger(logger) do log log._module !== _module || log.level != ProgressLogging.ProgressLevel end return Logging.with_logger(f, LoggingExtras.TeeLogger(logger1, logger2)) end function progresslogger() # detect if code is running under IJulia since TerminalLogger does not work with IJulia # https://github.com/JuliaLang/IJulia.jl#detecting-that-code-is-running-under-ijulia if (Sys.iswindows() && VERSION < v"1.5.3") || (isdefined(Main, :IJulia) && Main.IJulia.inited) return ConsoleProgressMonitor.ProgressLogger() else return TerminalLoggers.TerminalLogger() end end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
src/sample.jl
code
19542
# Default implementations of `sample`. const PROGRESS = Ref(true) """ setprogress!(progress::Bool; silent::Bool=false) Enable progress logging globally if `progress` is `true`, and disable it otherwise. Optionally disable informational message if `silent` is `true`. """ function setprogress!(progress::Bool; silent::Bool=false) if !silent @info "progress logging is $(progress ? "enabled" : "disabled") globally" end PROGRESS[] = progress return progress end function StatsBase.sample( model_or_logdensity, sampler::AbstractSampler, N_or_isdone; kwargs... ) return StatsBase.sample( Random.default_rng(), model_or_logdensity, sampler, N_or_isdone; kwargs... ) end """ sample( rng::Random.AbatractRNG=Random.default_rng(), model::AbstractModel, sampler::AbstractSampler, N_or_isdone; kwargs..., ) Sample from the `model` with the Markov chain Monte Carlo `sampler` and return the samples. If `N_or_isdone` is an `Integer`, exactly `N_or_isdone` samples are returned. Otherwise, sampling is performed until a convergence criterion `N_or_isdone` returns `true`. The convergence criterion has to be a function with the signature ```julia isdone(rng, model, sampler, samples, state, iteration; kwargs...) ``` where `state` and `iteration` are the current state and iteration of the sampler, respectively. It should return `true` when sampling should end, and `false` otherwise. """ function StatsBase.sample( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler, N_or_isdone; kwargs..., ) return mcmcsample(rng, model, sampler, N_or_isdone; kwargs...) end function StatsBase.sample( model_or_logdensity, sampler::AbstractSampler, parallel::AbstractMCMCEnsemble, N::Integer, nchains::Integer; kwargs..., ) return StatsBase.sample( Random.default_rng(), model_or_logdensity, sampler, parallel, N, nchains; kwargs... ) end """ sample( rng::Random.AbstractRNG=Random.default_rng(), model::AbstractModel, sampler::AbstractSampler, parallel::AbstractMCMCEnsemble, N::Integer, nchains::Integer; kwargs..., ) Sample `nchains` Monte Carlo Markov chains from the `model` with the `sampler` in parallel using the `parallel` algorithm, and combine them into a single chain. """ function StatsBase.sample( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler, parallel::AbstractMCMCEnsemble, N::Integer, nchains::Integer; kwargs..., ) return mcmcsample(rng, model, sampler, parallel, N, nchains; kwargs...) end # Default implementations of regular and parallel sampling. function mcmcsample( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler, N::Integer; progress=PROGRESS[], progressname="Sampling", callback=nothing, discard_initial=0, thinning=1, chain_type::Type=Any, initial_state=nothing, kwargs..., ) # Check the number of requested samples. N > 0 || error("the number of samples must be ≥ 1") Ntotal = thinning * (N - 1) + discard_initial + 1 # Start the timer start = time() local state @ifwithprogresslogger progress name = progressname begin # Determine threshold values for progress logging # (one update per 0.5% of progress) if progress threshold = Ntotal ÷ 200 next_update = threshold end # Obtain the initial sample and state. sample, state = if initial_state === nothing step(rng, model, sampler; kwargs...) else step(rng, model, sampler, initial_state; kwargs...) end # Discard initial samples. for i in 1:discard_initial # Update the progress bar. if progress && i >= next_update ProgressLogging.@logprogress i / Ntotal next_update = i + threshold end # Obtain the next sample and state. sample, state = step(rng, model, sampler, state; kwargs...) end # Run callback. callback === nothing || callback(rng, model, sampler, sample, state, 1; kwargs...) # Save the sample. samples = AbstractMCMC.samples(sample, model, sampler, N; kwargs...) samples = save!!(samples, sample, 1, model, sampler, N; kwargs...) # Update the progress bar. itotal = 1 + discard_initial if progress && itotal >= next_update ProgressLogging.@logprogress itotal / Ntotal next_update = itotal + threshold end # Step through the sampler. for i in 2:N # Discard thinned samples. for _ in 1:(thinning - 1) # Obtain the next sample and state. sample, state = step(rng, model, sampler, state; kwargs...) # Update progress bar. if progress && (itotal += 1) >= next_update ProgressLogging.@logprogress itotal / Ntotal next_update = itotal + threshold end end # Obtain the next sample and state. sample, state = step(rng, model, sampler, state; kwargs...) # Run callback. callback === nothing || callback(rng, model, sampler, sample, state, i; kwargs...) # Save the sample. samples = save!!(samples, sample, i, model, sampler, N; kwargs...) # Update the progress bar. if progress && (itotal += 1) >= next_update ProgressLogging.@logprogress itotal / Ntotal next_update = itotal + threshold end end end # Get the sample stop time. stop = time() duration = stop - start stats = SamplingStats(start, stop, duration) return bundle_samples( samples, model, sampler, state, chain_type; stats=stats, discard_initial=discard_initial, thinning=thinning, kwargs..., ) end function mcmcsample( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler, isdone; chain_type::Type=Any, progress=PROGRESS[], progressname="Convergence sampling", callback=nothing, discard_initial=0, thinning=1, initial_state=nothing, kwargs..., ) # Start the timer start = time() local state @ifwithprogresslogger progress name = progressname begin # Obtain the initial sample and state. sample, state = if initial_state === nothing step(rng, model, sampler; kwargs...) else step(rng, model, sampler, state; kwargs...) end # Discard initial samples. for _ in 1:discard_initial # Obtain the next sample and state. sample, state = step(rng, model, sampler, state; kwargs...) end # Run callback. callback === nothing || callback(rng, model, sampler, sample, state, 1; kwargs...) # Save the sample. samples = AbstractMCMC.samples(sample, model, sampler; kwargs...) samples = save!!(samples, sample, 1, model, sampler; kwargs...) # Step through the sampler until stopping. i = 2 while !isdone(rng, model, sampler, samples, state, i; progress=progress, kwargs...) # Discard thinned samples. for _ in 1:(thinning - 1) # Obtain the next sample and state. sample, state = step(rng, model, sampler, state; kwargs...) end # Obtain the next sample and state. sample, state = step(rng, model, sampler, state; kwargs...) # Run callback. callback === nothing || callback(rng, model, sampler, sample, state, i; kwargs...) # Save the sample. samples = save!!(samples, sample, i, model, sampler; kwargs...) # Increment iteration counter. i += 1 end end # Get the sample stop time. stop = time() duration = stop - start stats = SamplingStats(start, stop, duration) # Wrap the samples up. return bundle_samples( samples, model, sampler, state, chain_type; stats=stats, discard_initial=discard_initial, thinning=thinning, kwargs..., ) end function mcmcsample( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler, ::MCMCThreads, N::Integer, nchains::Integer; progress=PROGRESS[], progressname="Sampling ($(min(nchains, Threads.nthreads())) threads)", initial_params=nothing, initial_state=nothing, kwargs..., ) # Check if actually multiple threads are used. if Threads.nthreads() == 1 @warn "Only a single thread available: MCMC chains are not sampled in parallel" end # Check if the number of chains is larger than the number of samples if nchains > N @warn "Number of chains ($nchains) is greater than number of samples per chain ($N)" end # Copy the random number generator, model, and sample for each thread nchunks = min(nchains, Threads.nthreads()) chunksize = cld(nchains, nchunks) interval = 1:nchunks rngs = [deepcopy(rng) for _ in interval] models = [deepcopy(model) for _ in interval] samplers = [deepcopy(sampler) for _ in interval] # Create a seed for each chain using the provided random number generator. seeds = rand(rng, UInt, nchains) # Ensure that initial parameters and states are `nothing` or of the correct length check_initial_params(initial_params, nchains) check_initial_state(initial_state, nchains) # Set up a chains vector. chains = Vector{Any}(undef, nchains) @ifwithprogresslogger progress name = progressname begin # Create a channel for progress logging. if progress channel = Channel{Bool}(length(interval)) end Distributed.@sync begin if progress # Update the progress bar. Distributed.@async begin # Determine threshold values for progress logging # (one update per 0.5% of progress) threshold = nchains ÷ 200 nextprogresschains = threshold progresschains = 0 while take!(channel) progresschains += 1 if progresschains >= nextprogresschains ProgressLogging.@logprogress progresschains / nchains nextprogresschains = progresschains + threshold end end end end Distributed.@async begin try Distributed.@sync for (i, _rng, _model, _sampler) in zip(1:nchunks, rngs, models, samplers) chainidxs = if i == nchunks ((i - 1) * chunksize + 1):nchains else ((i - 1) * chunksize + 1):(i * chunksize) end Threads.@spawn for chainidx in chainidxs # Seed the chunk-specific random number generator with the pre-made seed. Random.seed!(_rng, seeds[chainidx]) # Sample a chain and save it to the vector. chains[chainidx] = StatsBase.sample( _rng, _model, _sampler, N; progress=false, initial_params=if initial_params === nothing nothing else initial_params[chainidx] end, initial_state=if initial_state === nothing nothing else initial_state[chainidx] end, kwargs..., ) # Update the progress bar. progress && put!(channel, true) end end finally # Stop updating the progress bar. progress && put!(channel, false) end end end end # Concatenate the chains together. return chainsstack(tighten_eltype(chains)) end function mcmcsample( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler, ::MCMCDistributed, N::Integer, nchains::Integer; progress=PROGRESS[], progressname="Sampling ($(Distributed.nworkers()) processes)", initial_params=nothing, initial_state=nothing, kwargs..., ) # Check if actually multiple processes are used. if Distributed.nworkers() == 1 @warn "Only a single process available: MCMC chains are not sampled in parallel" end # Check if the number of chains is larger than the number of samples if nchains > N @warn "Number of chains ($nchains) is greater than number of samples per chain ($N)" end # Ensure that initial parameters and states are `nothing` or of the correct length check_initial_params(initial_params, nchains) check_initial_state(initial_state, nchains) _initial_params = initial_params === nothing ? FillArrays.Fill(nothing, nchains) : initial_params _initial_state = initial_state === nothing ? FillArrays.Fill(nothing, nchains) : initial_state # Create a seed for each chain using the provided random number generator. seeds = rand(rng, UInt, nchains) # Set up worker pool. pool = Distributed.CachingPool(Distributed.workers()) local chains @ifwithprogresslogger progress name = progressname begin # Create a channel for progress logging. if progress channel = Distributed.RemoteChannel(() -> Channel{Bool}(Distributed.nworkers())) end Distributed.@sync begin if progress # Update the progress bar. Distributed.@async begin # Determine threshold values for progress logging # (one update per 0.5% of progress) threshold = nchains ÷ 200 nextprogresschains = threshold progresschains = 0 while take!(channel) progresschains += 1 if progresschains >= nextprogresschains ProgressLogging.@logprogress progresschains / nchains nextprogresschains = progresschains + threshold end end end end Distributed.@async begin try function sample_chain(seed, initial_params, initial_state) # Seed a new random number generator with the pre-made seed. Random.seed!(rng, seed) # Sample a chain. chain = StatsBase.sample( rng, model, sampler, N; progress=false, initial_params=initial_params, initial_state=initial_state, kwargs..., ) # Update the progress bar. progress && put!(channel, true) # Return the new chain. return chain end chains = Distributed.pmap( sample_chain, pool, seeds, _initial_params, _initial_state ) finally # Stop updating the progress bar. progress && put!(channel, false) end end end end # Concatenate the chains together. return chainsstack(tighten_eltype(chains)) end function mcmcsample( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler, ::MCMCSerial, N::Integer, nchains::Integer; progressname="Sampling", initial_params=nothing, initial_state=nothing, kwargs..., ) # Check if the number of chains is larger than the number of samples if nchains > N @warn "Number of chains ($nchains) is greater than number of samples per chain ($N)" end # Ensure that initial parameters and states are `nothing` or of the correct length check_initial_params(initial_params, nchains) check_initial_state(initial_state, nchains) _initial_params = initial_params === nothing ? FillArrays.Fill(nothing, nchains) : initial_params _initial_state = initial_state === nothing ? FillArrays.Fill(nothing, nchains) : initial_state # Create a seed for each chain using the provided random number generator. seeds = rand(rng, UInt, nchains) # Sample the chains. function sample_chain(i, seed, initial_params, initial_state) # Seed a new random number generator with the pre-made seed. Random.seed!(rng, seed) # Sample a chain. return StatsBase.sample( rng, model, sampler, N; progressname=string(progressname, " (Chain ", i, " of ", nchains, ")"), initial_params=initial_params, initial_state=initial_state, kwargs..., ) end chains = map(sample_chain, 1:nchains, seeds, _initial_params, _initial_state) # Concatenate the chains together. return chainsstack(tighten_eltype(chains)) end tighten_eltype(x) = x tighten_eltype(x::Vector{Any}) = map(identity, x) @nospecialize check_initial_params(x, n) = throw( ArgumentError( "initial parameters must be specified as a vector of length equal to the number of chains or `nothing`", ), ) check_initial_params(::Nothing, n) = nothing function check_initial_params(x::AbstractArray, n) if length(x) != n throw( ArgumentError( "incorrect number of initial parameters (expected $n, received $(length(x))" ), ) end return nothing end @nospecialize check_initial_state(x, n) = throw( ArgumentError( "initial states must be specified as a vector of length equal to the number of chains or `nothing`", ), ) check_initial_state(::Nothing, n) = nothing function check_initial_state(x::AbstractArray, n) if length(x) != n throw( ArgumentError( "incorrect number of initial states (expected $n, received $(length(x))" ), ) end return nothing end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
src/samplingstats.jl
code
404
""" SamplingStats A struct that tracks sampling information. The fields available are: - `start`: A `Float64` Unix timestamp indicating the start time of sampling. - `stop`: A `Float64` Unix timestamp indicating the stop time of sampling. - `duration`: The sampling time duration, defined as `stop - start`. """ struct SamplingStats start::Float64 stop::Float64 duration::Float64 end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
src/stepper.jl
code
2246
struct Stepper{A<:Random.AbstractRNG,M<:AbstractModel,S<:AbstractSampler,K} rng::A model::M sampler::S kwargs::K end # Initial sample. function Base.iterate(stp::Stepper) # Unpack iterator. rng = stp.rng model = stp.model sampler = stp.sampler kwargs = stp.kwargs discard_initial = get(kwargs, :discard_initial, 0)::Int # Start sampling algorithm and discard initial samples if desired. sample, state = step(rng, model, sampler; kwargs...) for _ in 1:discard_initial sample, state = step(rng, model, sampler, state; kwargs...) end return sample, state end # Subsequent samples. function Base.iterate(stp::Stepper, state) # Unpack iterator. rng = stp.rng model = stp.model sampler = stp.sampler kwargs = stp.kwargs thinning = get(kwargs, :thinning, 1)::Int # Return next sample, possibly after thinning the chain if desired. for _ in 1:(thinning - 1) _, state = step(rng, model, sampler, state; kwargs...) end return step(rng, model, sampler, state; kwargs...) end Base.IteratorSize(::Type{<:Stepper}) = Base.IsInfinite() Base.IteratorEltype(::Type{<:Stepper}) = Base.EltypeUnknown() function steps(model_or_logdensity, sampler::AbstractSampler; kwargs...) return steps(Random.default_rng(), model_or_logdensity, sampler; kwargs...) end """ steps( rng::Random.AbstractRNG=Random.default_rng(), model::AbstractModel, sampler::AbstractSampler; kwargs..., ) Create an iterator that returns samples from the `model` with the Markov chain Monte Carlo `sampler`. # Examples ```jldoctest; setup=:(using AbstractMCMC: steps) julia> struct MyModel <: AbstractMCMC.AbstractModel end julia> struct MySampler <: AbstractMCMC.AbstractSampler end julia> function AbstractMCMC.step(rng, ::MyModel, ::MySampler, state=nothing; kwargs...) # all samples are zero return 0.0, state end julia> iterator = steps(MyModel(), MySampler()); julia> collect(Iterators.take(iterator, 10)) == zeros(10) true ``` """ function steps( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler; kwargs... ) return Stepper(rng, model, sampler, kwargs) end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
src/transducer.jl
code
2887
struct Sample{A<:Random.AbstractRNG,M<:AbstractModel,S<:AbstractSampler,K} <: Transducers.Transducer rng::A model::M sampler::S kwargs::K end function Sample(model_or_logdensity, sampler::AbstractSampler; kwargs...) return Sample(Random.default_rng(), model_or_logdensity, sampler; kwargs...) end """ Sample( rng::Random.AbstractRNG=Random.default_rng(), model::AbstractModel, sampler::AbstractSampler; kwargs..., ) Create a transducer that returns samples from the `model` with the Markov chain Monte Carlo `sampler`. # Examples ```jldoctest; setup=:(using AbstractMCMC: Sample) julia> struct MyModel <: AbstractMCMC.AbstractModel end julia> struct MySampler <: AbstractMCMC.AbstractSampler end julia> function AbstractMCMC.step(rng, ::MyModel, ::MySampler, state=nothing; kwargs...) # all samples are zero return 0.0, state end julia> transducer = Sample(MyModel(), MySampler()); julia> collect(transducer(1:10)) == zeros(10) true ``` """ function Sample( rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler; kwargs... ) return Sample(rng, model, sampler, kwargs) end # Initial sample. function Transducers.start(rf::Transducers.R_{<:Sample}, result) # Unpack transducer. td = Transducers.xform(rf) rng = td.rng model = td.model sampler = td.sampler kwargs = td.kwargs discard_initial = get(kwargs, :discard_initial, 0)::Int # Start sampling algorithm and discard initial samples if desired. sample, state = step(rng, model, sampler; kwargs...) for _ in 1:discard_initial sample, state = step(rng, model, sampler, state; kwargs...) end return Transducers.wrap( rf, (sample, state), Transducers.start(Transducers.inner(rf), result) ) end # Subsequent samples. function Transducers.next(rf::Transducers.R_{<:Sample}, result, input) # Unpack transducer. td = Transducers.xform(rf) rng = td.rng model = td.model sampler = td.sampler kwargs = td.kwargs thinning = get(kwargs, :thinning, 1)::Int let rng = rng, model = model, sampler = sampler, kwargs = kwargs, thinning = thinning, inner_rf = Transducers.inner(rf) Transducers.wrapping(rf, result) do (sample, state), iresult iresult2 = Transducers.next(inner_rf, iresult, sample) # Perform thinning if desired. for _ in 1:(thinning - 1) _, state = step(rng, model, sampler, state; kwargs...) end return step(rng, model, sampler, state; kwargs...), iresult2 end end end function Transducers.complete(rf::Transducers.R_{Sample}, result) _, inner_result = Transducers.unwrap(rf, result) return Transducers.complete(Transducers.inner(rf), inner_result) end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
test/logdensityproblems.jl
code
3845
@testset "logdensityproblems.jl" begin # Add worker processes. # Memory requirements on Windows are ~4x larger than on Linux, hence number of processes is reduced # See, e.g., https://github.com/JuliaLang/julia/issues/40766 and https://github.com/JuliaLang/Pkg.jl/pull/2366 pids = addprocs(Sys.iswindows() ? div(Sys.CPU_THREADS::Int, 2) : Sys.CPU_THREADS::Int) # Load all required packages (`utils.jl` needs LogDensityProblems, Logging, and Random). @everywhere begin using AbstractMCMC using AbstractMCMC: sample using LogDensityProblems using Logging using Random include("utils.jl") end @testset "LogDensityModel" begin ℓ = MyLogDensity(10) model = @inferred AbstractMCMC.LogDensityModel(ℓ) @test model isa AbstractMCMC.LogDensityModel{MyLogDensity} @test model.logdensity === ℓ @test_throws ArgumentError AbstractMCMC.LogDensityModel(mylogdensity) end @testset "fallback for log densities" begin # Sample with log density dim = 10 ℓ = MyLogDensity(dim) Random.seed!(1234) N = 1_000 samples = sample(ℓ, MySampler(), N) # Samples are of the correct dimension and log density values are correct @test length(samples) == N @test all(length(x.a) == dim for x in samples) @test all(x.b ≈ LogDensityProblems.logdensity(ℓ, x.a) for x in samples) # Same chain as if LogDensityModel is used explicitly Random.seed!(1234) samples2 = sample(AbstractMCMC.LogDensityModel(ℓ), MySampler(), N) @test length(samples2) == N @test all(x.a == y.a && x.b == y.b for (x, y) in zip(samples, samples2)) # Same chain if sampling is performed with convergence criterion Random.seed!(1234) isdone(rng, model, sampler, state, samples, iteration; kwargs...) = iteration > N samples3 = sample(ℓ, MySampler(), isdone) @test length(samples3) == N @test all(x.a == y.a && x.b == y.b for (x, y) in zip(samples, samples3)) # Same chain if sampling is performed with iterator Random.seed!(1234) samples4 = collect(Iterators.take(AbstractMCMC.steps(ℓ, MySampler()), N)) @test length(samples4) == N @test all(x.a == y.a && x.b == y.b for (x, y) in zip(samples, samples4)) # Same chain if sampling is performed with transducer Random.seed!(1234) xf = AbstractMCMC.Sample(ℓ, MySampler()) samples5 = collect(xf(1:N)) @test length(samples5) == N @test all(x.a == y.a && x.b == y.b for (x, y) in zip(samples, samples5)) # Parallel sampling for alg in (MCMCSerial(), MCMCDistributed(), MCMCThreads()) chains = sample(ℓ, MySampler(), alg, N, 2) @test length(chains) == 2 samples = vcat(chains[1], chains[2]) @test length(samples) == 2 * N @test all(length(x.a) == dim for x in samples) @test all(x.b ≈ LogDensityProblems.logdensity(ℓ, x.a) for x in samples) end # Log density has to satisfy the LogDensityProblems interface @test_throws ArgumentError sample(mylogdensity, MySampler(), N) @test_throws ArgumentError sample(mylogdensity, MySampler(), isdone) @test_throws ArgumentError sample(mylogdensity, MySampler(), MCMCSerial(), N, 2) @test_throws ArgumentError sample(mylogdensity, MySampler(), MCMCThreads(), N, 2) @test_throws ArgumentError sample( mylogdensity, MySampler(), MCMCDistributed(), N, 2 ) @test_throws ArgumentError AbstractMCMC.steps(mylogdensity, MySampler()) @test_throws ArgumentError AbstractMCMC.Sample(mylogdensity, MySampler()) end # Remove workers rmprocs(pids...) end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
test/runtests.jl
code
596
using AbstractMCMC using ConsoleProgressMonitor: ProgressLogger using IJulia using LogDensityProblems using LoggingExtras: TeeLogger, EarlyFilteredLogger using TerminalLoggers: TerminalLogger using FillArrays: FillArrays using Transducers using Distributed using Logging: Logging using Random using Statistics using Test using Test: collect_test_logs const LOGGERS = Set() const CURRENT_LOGGER = Logging.current_logger() include("utils.jl") @testset "AbstractMCMC" begin include("sample.jl") include("stepper.jl") include("transducer.jl") include("logdensityproblems.jl") end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
test/sample.jl
code
25262
@testset "sample.jl" begin @testset "Basic sampling" begin @testset "REPL" begin empty!(LOGGERS) Random.seed!(1234) N = 1_000 chain = sample(MyModel(), MySampler(), N; loggers=true) @test length(LOGGERS) == 1 logger = first(LOGGERS) @test logger isa TeeLogger @test logger.loggers[1].logger isa (Sys.iswindows() && VERSION < v"1.5.3" ? ProgressLogger : TerminalLogger) @test logger.loggers[2].logger === CURRENT_LOGGER @test Logging.current_logger() === CURRENT_LOGGER # test output type and size @test chain isa Vector{<:MySample} @test length(chain) == N # test some statistical properties tail_chain = @view chain[2:end] @test mean(x.a for x in tail_chain) ≈ 0.5 atol = 6e-2 @test var(x.a for x in tail_chain) ≈ 1 / 12 atol = 5e-3 @test mean(x.b for x in tail_chain) ≈ 0.0 atol = 5e-2 @test var(x.b for x in tail_chain) ≈ 1 atol = 6e-2 # initial parameters chain = sample( MyModel(), MySampler(), 3; progress=false, initial_params=(b=3.2, a=-1.8) ) @test chain[1].a == -1.8 @test chain[1].b == 3.2 end @testset "IJulia" begin # emulate running IJulia kernel @eval IJulia begin inited = true end empty!(LOGGERS) Random.seed!(1234) N = 10 sample(MyModel(), MySampler(), N; loggers=true) @test length(LOGGERS) == 1 logger = first(LOGGERS) @test logger isa TeeLogger @test logger.loggers[1].logger isa ProgressLogger @test logger.loggers[2].logger === CURRENT_LOGGER @test Logging.current_logger() === CURRENT_LOGGER @eval IJulia begin inited = false end end @testset "Custom logger" begin empty!(LOGGERS) Random.seed!(1234) N = 10 logger = Logging.ConsoleLogger(stderr, Logging.LogLevel(-1)) Logging.with_logger(logger) do sample(MyModel(), MySampler(), N; loggers=true) end @test length(LOGGERS) == 1 @test first(LOGGERS) === logger @test Logging.current_logger() === CURRENT_LOGGER end @testset "Suppress output" begin logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample(MyModel(), MySampler(), 100; progress=false) end @test all(l.level > Logging.LogLevel(-1) for l in logs) # disable progress logging globally @test !(@test_logs (:info, "progress logging is disabled globally") AbstractMCMC.setprogress!( false )) @test !AbstractMCMC.PROGRESS[] logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample(MyModel(), MySampler(), 100) end @test all(l.level > Logging.LogLevel(-1) for l in logs) # enable progress logging globally @test (@test_logs (:info, "progress logging is enabled globally") AbstractMCMC.setprogress!( true )) @test AbstractMCMC.PROGRESS[] end end @testset "Multithreaded sampling" begin if Threads.nthreads() == 1 warnregex = r"^Only a single thread available" @test_logs (:warn, warnregex) sample( MyModel(), MySampler(), MCMCThreads(), 10, 10 ) end # No dedicated chains type N = 10_000 chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000) @test chains isa Vector{<:Vector{<:MySample}} @test length(chains) == 1000 @test all(length(x) == N for x in chains) Random.seed!(1234) chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000; chain_type=MyChain) # test output type and size @test chains isa Vector{<:MyChain} @test length(chains) == 1000 @test all(x -> length(x.as) == length(x.bs) == N, chains) @test all(ismissing(x.as[1]) for x in chains) # test some statistical properties @test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains) @test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=1e-1), chains) # test reproducibility Random.seed!(1234) chains2 = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000; chain_type=MyChain) @test all(ismissing(x.as[1]) for x in chains2) @test all(c1.as[i] == c2.as[i] for (c1, c2) in zip(chains, chains2), i in 2:N) @test all(c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N) # Unexpected order of arguments. str = "Number of chains (10) is greater than number of samples per chain (5)" @test_logs (:warn, str) match_mode = :any sample( MyModel(), MySampler(), MCMCThreads(), 5, 10; chain_type=MyChain ) # Suppress output. logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample( MyModel(), MySampler(), MCMCThreads(), 10_000, 1000; progress=false, chain_type=MyChain, ) end @test all(l.level > Logging.LogLevel(-1) for l in logs) # Smoke test for nchains < nthreads if Threads.nthreads() == 2 sample(MyModel(), MySampler(), MCMCThreads(), N, 1) end # initial parameters nchains = 100 initial_params = [(b=randn(), a=rand()) for _ in 1:nchains] chains = sample( MyModel(), MySampler(), MCMCThreads(), 3, nchains; progress=false, initial_params=initial_params, ) @test length(chains) == nchains @test all( chain[1].a == params.a && chain[1].b == params.b for (chain, params) in zip(chains, initial_params) ) initial_params = (a=randn(), b=rand()) chains = sample( MyModel(), MySampler(), MCMCThreads(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains), ) @test length(chains) == nchains @test all( chain[1].a == initial_params.a && chain[1].b == initial_params.b for chain in chains ) # Too many `initial_params` @test_throws ArgumentError sample( MyModel(), MySampler(), MCMCThreads(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains + 1), ) # Too few `initial_params` @test_throws ArgumentError sample( MyModel(), MySampler(), MCMCThreads(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains - 1), ) end @testset "Multicore sampling" begin if nworkers() == 1 warnregex = r"^Only a single process available" @test_logs (:warn, warnregex) sample( MyModel(), MySampler(), MCMCDistributed(), 10, 10; chain_type=MyChain ) end # Add worker processes. # Memory requirements on Windows are ~4x larger than on Linux, hence number of processes is reduced # See, e.g., https://github.com/JuliaLang/julia/issues/40766 and https://github.com/JuliaLang/Pkg.jl/pull/2366 pids = addprocs( Sys.iswindows() ? div(Sys.CPU_THREADS::Int, 2) : Sys.CPU_THREADS::Int ) # Load all required packages (`utils.jl` needs LogDensityProblems, Logging, and Random). @everywhere begin using AbstractMCMC using AbstractMCMC: sample using LogDensityProblems using Logging using Random include("utils.jl") end # No dedicated chains type N = 10_000 chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000) @test chains isa Vector{<:Vector{<:MySample}} @test length(chains) == 1000 @test all(length(x) == N for x in chains) Random.seed!(1234) chains = sample( MyModel(), MySampler(), MCMCDistributed(), N, 1000; chain_type=MyChain ) # Test output type and size. @test chains isa Vector{<:MyChain} @test all(ismissing(c.as[1]) for c in chains) @test length(chains) == 1000 @test all(x -> length(x.as) == length(x.bs) == N, chains) # Test some statistical properties. @test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains) @test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=1e-1), chains) # Test reproducibility. Random.seed!(1234) chains2 = sample( MyModel(), MySampler(), MCMCDistributed(), N, 1000; chain_type=MyChain ) @test all(ismissing(c.as[1]) for c in chains2) @test all(c1.as[i] == c2.as[i] for (c1, c2) in zip(chains, chains2), i in 2:N) @test all(c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N) # Unexpected order of arguments. str = "Number of chains (10) is greater than number of samples per chain (5)" @test_logs (:warn, str) match_mode = :any sample( MyModel(), MySampler(), MCMCDistributed(), 5, 10; chain_type=MyChain ) # Suppress output. logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample( MyModel(), MySampler(), MCMCDistributed(), 10_000, 100; progress=false, chain_type=MyChain, ) end @test all(l.level > Logging.LogLevel(-1) for l in logs) # initial parameters nchains = 100 initial_params = [(a=randn(), b=rand()) for _ in 1:nchains] chains = sample( MyModel(), MySampler(), MCMCDistributed(), 3, nchains; progress=false, initial_params=initial_params, ) @test length(chains) == nchains @test all( chain[1].a == params.a && chain[1].b == params.b for (chain, params) in zip(chains, initial_params) ) initial_params = (b=randn(), a=rand()) chains = sample( MyModel(), MySampler(), MCMCDistributed(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains), ) @test length(chains) == nchains @test all( chain[1].a == initial_params.a && chain[1].b == initial_params.b for chain in chains ) # Too many `initial_params` @test_throws ArgumentError sample( MyModel(), MySampler(), MCMCDistributed(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains + 1), ) # Too few `initial_params` @test_throws ArgumentError sample( MyModel(), MySampler(), MCMCDistributed(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains - 1), ) # Remove workers rmprocs(pids...) end @testset "Serial sampling" begin # No dedicated chains type N = 10_000 chains = sample(MyModel(), MySampler(), MCMCSerial(), N, 1000; progress=false) @test chains isa Vector{<:Vector{<:MySample}} @test length(chains) == 1000 @test all(length(x) == N for x in chains) Random.seed!(1234) chains = sample( MyModel(), MySampler(), MCMCSerial(), N, 1000; chain_type=MyChain, progress=false, ) # Test output type and size. @test chains isa Vector{<:MyChain} @test all(ismissing(c.as[1]) for c in chains) @test length(chains) == 1000 @test all(x -> length(x.as) == length(x.bs) == N, chains) # Test some statistical properties. @test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains) @test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains) @test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=1e-1), chains) # Test reproducibility. Random.seed!(1234) chains2 = sample( MyModel(), MySampler(), MCMCSerial(), N, 1000; chain_type=MyChain, progress=false, ) @test all(ismissing(c.as[1]) for c in chains2) @test all(c1.as[i] == c2.as[i] for (c1, c2) in zip(chains, chains2), i in 2:N) @test all(c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N) # Unexpected order of arguments. str = "Number of chains (10) is greater than number of samples per chain (5)" @test_logs (:warn, str) match_mode = :any sample( MyModel(), MySampler(), MCMCSerial(), 5, 10; chain_type=MyChain ) # Suppress output. logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do sample( MyModel(), MySampler(), MCMCSerial(), 10_000, 100; progress=false, chain_type=MyChain, ) end @test all(l.level > Logging.LogLevel(-1) for l in logs) # initial parameters nchains = 100 initial_params = [(a=rand(), b=randn()) for _ in 1:nchains] chains = sample( MyModel(), MySampler(), MCMCSerial(), 3, nchains; progress=false, initial_params=initial_params, ) @test length(chains) == nchains @test all( chain[1].a == params.a && chain[1].b == params.b for (chain, params) in zip(chains, initial_params) ) initial_params = (b=rand(), a=randn()) chains = sample( MyModel(), MySampler(), MCMCSerial(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains), ) @test length(chains) == nchains @test all( chain[1].a == initial_params.a && chain[1].b == initial_params.b for chain in chains ) # Too many `initial_params` @test_throws ArgumentError sample( MyModel(), MySampler(), MCMCSerial(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains + 1), ) # Too few `initial_params` @test_throws ArgumentError sample( MyModel(), MySampler(), MCMCSerial(), 3, nchains; progress=false, initial_params=FillArrays.Fill(initial_params, nchains - 1), ) end @testset "Ensemble sampling: Reproducibility" begin N = 1_000 nchains = 10 # Serial sampling Random.seed!(1234) chains_serial = sample( MyModel(), MySampler(), MCMCSerial(), N, nchains; progress=false, chain_type=MyChain, ) @test all(ismissing(c.as[1]) for c in chains_serial) # Multi-threaded sampling Random.seed!(1234) chains_threads = sample( MyModel(), MySampler(), MCMCThreads(), N, nchains; progress=false, chain_type=MyChain, ) @test all(ismissing(c.as[1]) for c in chains_threads) @test all( c1.as[i] == c2.as[i] for (c1, c2) in zip(chains_serial, chains_threads), i in 2:N ) @test all( c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains_serial, chains_threads), i in 1:N ) # Multi-core sampling Random.seed!(1234) chains_distributed = sample( MyModel(), MySampler(), MCMCDistributed(), N, nchains; progress=false, chain_type=MyChain, ) @test all(ismissing(c.as[1]) for c in chains_distributed) @test all( c1.as[i] == c2.as[i] for (c1, c2) in zip(chains_serial, chains_distributed), i in 2:N ) @test all( c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains_serial, chains_distributed), i in 1:N ) end @testset "Chain constructors" begin chain1 = sample(MyModel(), MySampler(), 100) chain2 = sample(MyModel(), MySampler(), 100; chain_type=MyChain) @test chain1 isa Vector{<:MySample} @test chain2 isa MyChain end @testset "Sample stats" begin chain = sample(MyModel(), MySampler(), 1000; chain_type=MyChain) @test chain.stats.stop >= chain.stats.start @test chain.stats.duration == chain.stats.stop - chain.stats.start end @testset "Discard initial samples" begin # Create a chain and discard initial samples. Random.seed!(1234) N = 100 discard_initial = 50 chain = sample(MyModel(), MySampler(), N; discard_initial=discard_initial) @test length(chain) == N @test !ismissing(chain[1].a) # Repeat sampling without discarding initial samples. # On Julia < 1.6 progress logging changes the global RNG and hence is enabled here. # https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258 Random.seed!(1234) ref_chain = sample( MyModel(), MySampler(), N + discard_initial; progress=VERSION < v"1.6" ) @test all(chain[i].a == ref_chain[i + discard_initial].a for i in 1:N) @test all(chain[i].b == ref_chain[i + discard_initial].b for i in 1:N) end @testset "Thin chain by a factor of `thinning`" begin # Run a thinned chain with `N` samples thinned by factor of `thinning`. Random.seed!(100) N = 100 thinning = 3 chain = sample(MyModel(), MySampler(), N; thinning=thinning) @test length(chain) == N @test ismissing(chain[1].a) # Repeat sampling without thinning. # On Julia < 1.6 progress logging changes the global RNG and hence is enabled here. # https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258 Random.seed!(100) ref_chain = sample(MyModel(), MySampler(), N * thinning; progress=VERSION < v"1.6") @test all(chain[i].a == ref_chain[(i - 1) * thinning + 1].a for i in 2:N) @test all(chain[i].b == ref_chain[(i - 1) * thinning + 1].b for i in 1:N) end @testset "Sample without predetermined N" begin Random.seed!(1234) chain = sample(MyModel(), MySampler()) bmean = mean(x.b for x in chain) @test ismissing(chain[1].a) @test abs(bmean) <= 0.001 || length(chain) == 10_000 # Discard initial samples. Random.seed!(1234) discard_initial = 50 chain = sample(MyModel(), MySampler(); discard_initial=discard_initial) bmean = mean(x.b for x in chain) @test !ismissing(chain[1].a) @test abs(bmean) <= 0.001 || length(chain) == 10_000 # On Julia < 1.6 progress logging changes the global RNG and hence is enabled here. # https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258 Random.seed!(1234) N = length(chain) ref_chain = sample( MyModel(), MySampler(), N; discard_initial=discard_initial, progress=VERSION < v"1.6", ) @test all(chain[i].a == ref_chain[i].a for i in 1:N) @test all(chain[i].b == ref_chain[i].b for i in 1:N) # Thin chain by a factor of `thinning`. Random.seed!(1234) thinning = 3 chain = sample(MyModel(), MySampler(); thinning=thinning) bmean = mean(x.b for x in chain) @test ismissing(chain[1].a) @test abs(bmean) <= 0.001 || length(chain) == 10_000 # On Julia < 1.6 progress logging changes the global RNG and hence is enabled here. # https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258 Random.seed!(1234) N = length(chain) ref_chain = sample( MyModel(), MySampler(), N; thinning=thinning, progress=VERSION < v"1.6" ) @test all(chain[i].a == ref_chain[i].a for i in 2:N) @test all(chain[i].b == ref_chain[i].b for i in 1:N) end @testset "Sample vector of `NamedTuple`s" begin chain = sample(MyModel(), MySampler(), 1_000; chain_type=Vector{NamedTuple}) # Check output type @test chain isa Vector{<:NamedTuple} @test length(chain) == 1_000 @test all(keys(x) == (:a, :b) for x in chain) # Check some statistical properties @test ismissing(chain[1].a) @test mean(x.a for x in view(chain, 2:1_000)) ≈ 0.5 atol = 6e-2 @test var(x.a for x in view(chain, 2:1_000)) ≈ 1 / 12 atol = 1e-2 @test mean(x.b for x in chain) ≈ 0 atol = 0.11 @test var(x.b for x in chain) ≈ 1 atol = 0.15 end @testset "Testing callbacks" begin function count_iterations( rng, model, sampler, sample, state, i; iter_array, kwargs... ) return push!(iter_array, i) end N = 100 it_array = Float64[] sample(MyModel(), MySampler(), N; callback=count_iterations, iter_array=it_array) @test it_array == collect(1:N) # sampling without predetermined N it_array = Float64[] chain = sample( MyModel(), MySampler(); callback=count_iterations, iter_array=it_array ) @test it_array == collect(1:size(chain, 1)) end @testset "Providing initial state" begin function record_state( rng, model, sampler, sample, state, i; states_channel, kwargs... ) return put!(states_channel, state) end initial_state = 10 @testset "sample" begin n = 10 states_channel = Channel{Int}(n) chain = sample( MyModel(), MySampler(), n; initial_state=initial_state, callback=record_state, states_channel=states_channel, ) # Extract the states. states = [take!(states_channel) for _ in 1:n] @test length(states) == n for i in 1:n @test states[i] == initial_state + i end end @testset "sample with $mode" for mode in [MCMCSerial(), MCMCThreads(), MCMCDistributed()] nchains = 4 initial_state = 10 states_channel = if mode === MCMCDistributed() # Need to use `RemoteChannel` for this. RemoteChannel(() -> Channel{Int}(nchains)) else Channel{Int}(nchains) end chain = sample( MyModel(), MySampler(), mode, 1, nchains; initial_state=FillArrays.Fill(initial_state, nchains), callback=record_state, states_channel=states_channel, ) # Extract the states. states = [take!(states_channel) for _ in 1:nchains] @test length(states) == nchains for i in 1:nchains @test states[i] == initial_state + 1 end end end end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
test/stepper.jl
code
2333
@testset "stepper.jl" begin @testset "Iterator sampling" begin Random.seed!(1234) as = [] bs = [] iter = AbstractMCMC.steps(MyModel(), MySampler()) iter = AbstractMCMC.steps(MyModel(), MySampler(); a=1.0) # `a` shouldn't do anything for (count, t) in enumerate(iter) if count >= 1000 break end # don't save missing values t.a === missing && continue push!(as, t.a) push!(bs, t.b) end @test length(as) == length(bs) == 998 @test mean(as) ≈ 0.5 atol = 2e-2 @test var(as) ≈ 1 / 12 atol = 5e-3 @test mean(bs) ≈ 0.0 atol = 5e-2 @test var(bs) ≈ 1 atol = 5e-2 @test Base.IteratorSize(iter) == Base.IsInfinite() @test Base.IteratorEltype(iter) == Base.EltypeUnknown() end @testset "Discard initial samples" begin # Create a chain of `N` samples after discarding some initial samples. Random.seed!(1234) N = 50 discard_initial = 10 iter = AbstractMCMC.steps(MyModel(), MySampler(); discard_initial=discard_initial) as = [] bs = [] for t in Iterators.take(iter, N) push!(as, t.a) push!(bs, t.b) end # Repeat sampling with `sample`. Random.seed!(1234) chain = sample( MyModel(), MySampler(), N; discard_initial=discard_initial, progress=false ) @test all(as[i] == chain[i].a for i in 1:N) @test all(bs[i] == chain[i].b for i in 1:N) end @testset "Thin chain by a factor of `thinning`" begin # Create a thinned chain with a thinning factor of `thinning`. Random.seed!(1234) N = 50 thinning = 3 iter = AbstractMCMC.steps(MyModel(), MySampler(); thinning=thinning) as = [] bs = [] for t in Iterators.take(iter, N) push!(as, t.a) push!(bs, t.b) end # Repeat sampling with `sample`. Random.seed!(1234) chain = sample(MyModel(), MySampler(), N; thinning=thinning, progress=false) @test as[1] === chain[1].a === missing @test all(as[i] == chain[i].a for i in 2:N) @test all(bs[i] == chain[i].b for i in 1:N) end end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
test/transducer.jl
code
3308
@testset "transducer.jl" begin Random.seed!(1234) @testset "Basic sampling" begin N = 1_000 local chain Logging.with_logger(TerminalLogger()) do xf = AbstractMCMC.Sample(MyModel(), MySampler(); sleepy=true, logger=true) chain = collect(xf(withprogress(1:N; interval=1e-3))) end # test output type and size @test chain isa Vector{<:MySample} @test length(chain) == N # test some statistical properties tail_chain = @view chain[2:end] @test mean(x.a for x in tail_chain) ≈ 0.5 atol = 6e-2 @test var(x.a for x in tail_chain) ≈ 1 / 12 atol = 5e-3 @test mean(x.b for x in tail_chain) ≈ 0.0 atol = 5e-2 @test var(x.b for x in tail_chain) ≈ 1 atol = 6e-2 end @testset "drop" begin xf = AbstractMCMC.Sample(MyModel(), MySampler()) chain = collect(Drop(1)(xf(1:10))) @test chain isa Vector{MySample{Float64,Float64}} @test length(chain) == 9 end # Reproduce iterator example @testset "iterator example" begin # filter missing values and split transitions xf = opcompose( AbstractMCMC.Sample(MyModel(), MySampler()), OfType(MySample{Float64,Float64}), Map(x -> (x.a, x.b)), ) as, bs = foldl(xf, 1:999; init=(Float64[], Float64[])) do (as, bs), (a, b) push!(as, a) push!(bs, b) as, bs end @test length(as) == length(bs) == 998 @test mean(as) ≈ 0.5 atol = 2e-2 @test var(as) ≈ 1 / 12 atol = 5e-3 @test mean(bs) ≈ 0.0 atol = 5e-2 @test var(bs) ≈ 1 atol = 5e-2 end @testset "Discard initial samples" begin # Create a chain of `N` samples after discarding some initial samples. Random.seed!(1234) N = 50 discard_initial = 10 xf = opcompose( AbstractMCMC.Sample(MyModel(), MySampler(); discard_initial=discard_initial), Map(x -> (x.a, x.b)), ) as, bs = foldl(xf, 1:N; init=([], [])) do (as, bs), (a, b) push!(as, a) push!(bs, b) as, bs end # Repeat sampling with `sample`. Random.seed!(1234) chain = sample( MyModel(), MySampler(), N; discard_initial=discard_initial, progress=false ) @test all(as[i] == chain[i].a for i in 1:N) @test all(bs[i] == chain[i].b for i in 1:N) end @testset "Thin chain by a factor of `thinning`" begin # Create a thinned chain with a thinning factor of `thinning`. Random.seed!(1234) N = 50 thinning = 3 xf = opcompose( AbstractMCMC.Sample(MyModel(), MySampler(); thinning=thinning), Map(x -> (x.a, x.b)), ) as, bs = foldl(xf, 1:N; init=([], [])) do (as, bs), (a, b) push!(as, a) push!(bs, b) as, bs end # Repeat sampling with `sample`. Random.seed!(1234) chain = sample(MyModel(), MySampler(), N; thinning=thinning, progress=false) @test as[1] === chain[1].a === missing @test all(as[i] == chain[i].a for i in 2:N) @test all(bs[i] == chain[i].b for i in 1:N) end end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
test/utils.jl
code
2891
struct MyModel <: AbstractMCMC.AbstractModel end struct MySample{A,B} a::A b::B end struct MySampler <: AbstractMCMC.AbstractSampler end struct AnotherSampler <: AbstractMCMC.AbstractSampler end struct MyChain{A,B,S} <: AbstractMCMC.AbstractChains as::Vector{A} bs::Vector{B} stats::S end MyChain(a, b) = MyChain(a, b, NamedTuple()) function AbstractMCMC.step( rng::AbstractRNG, model::MyModel, sampler::MySampler, state::Union{Nothing,Integer}=nothing; loggers=false, initial_params=nothing, kwargs..., ) # sample `a` is missing in the first step if not provided a, b = if state === nothing && initial_params !== nothing initial_params.a, initial_params.b else (state === nothing ? missing : rand(rng)), randn(rng) end loggers && push!(LOGGERS, Logging.current_logger()) _state = state === nothing ? 1 : state + 1 return MySample(a, b), _state end function AbstractMCMC.bundle_samples( samples::Vector{<:MySample}, model::MyModel, sampler::MySampler, ::Any, ::Type{MyChain}; stats=nothing, kwargs..., ) as = [t.a for t in samples] bs = [t.b for t in samples] return MyChain(as, bs, stats) end function isdone( rng::AbstractRNG, model::MyModel, s::MySampler, samples, state, iteration::Int; kwargs..., ) # Calculate the mean of x.b. bmean = mean(x.b for x in samples) return abs(bmean) <= 0.001 || iteration > 10_000 end # Set a default convergence function. function AbstractMCMC.sample(model, sampler::MySampler; kwargs...) return sample(Random.default_rng(), model, sampler, isdone; kwargs...) end function AbstractMCMC.chainscat( chain::Union{MyChain,Vector{<:MyChain}}, chains::Union{MyChain,Vector{<:MyChain}}... ) return vcat(chain, chains...) end # Conversion to NamedTuple Base.convert(::Type{NamedTuple}, x::MySample) = (a=x.a, b=x.b) # Gaussian log density (without additive constants) # Without LogDensityProblems.jl interface mylogdensity(x) = -sum(abs2, x) / 2 # With LogDensityProblems.jl interface struct MyLogDensity dim::Int end LogDensityProblems.logdensity(::MyLogDensity, x) = mylogdensity(x) LogDensityProblems.dimension(m::MyLogDensity) = m.dim function LogDensityProblems.capabilities(::Type{MyLogDensity}) return LogDensityProblems.LogDensityOrder{0}() end # Define "sampling" function AbstractMCMC.step( rng::AbstractRNG, model::AbstractMCMC.LogDensityModel{MyLogDensity}, ::MySampler, state::Union{Nothing,Integer}=nothing; kwargs..., ) # Sample from multivariate normal distribution ℓ = model.logdensity dim = LogDensityProblems.dimension(ℓ) θ = randn(rng, dim) logdensity_θ = LogDensityProblems.logdensity(ℓ, θ) _state = state === nothing ? 1 : state + 1 return MySample(θ, logdensity_θ), _state end
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
README.md
docs
1149
# AbstractMCMC.jl Abstract types and interfaces for Markov chain Monte Carlo methods. [![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://turinglang.github.io/AbstractMCMC.jl/stable) [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://turinglang.github.io/AbstractMCMC.jl/dev) [![CI](https://github.com/TuringLang/AbstractMCMC.jl/workflows/CI/badge.svg?branch=master)](https://github.com/TuringLang/AbstractMCMC.jl/actions?query=workflow%3ACI+branch%3Amaster) [![IntegrationTest](https://github.com/TuringLang/AbstractMCMC.jl/workflows/IntegrationTest/badge.svg?branch=master)](https://github.com/TuringLang/AbstractMCMC.jl/actions?query=workflow%3AIntegrationTest+branch%3Amaster) [![Codecov](https://codecov.io/gh/TuringLang/AbstractMCMC.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/TuringLang/AbstractMCMC.jl) [![Coveralls](https://coveralls.io/repos/github/TuringLang/AbstractMCMC.jl/badge.svg?branch=master)](https://coveralls.io/github/TuringLang/AbstractMCMC.jl?branch=master) [![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle)
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
docs/src/api.md
docs
4119
# API AbstractMCMC defines an interface for sampling Markov chains. ## Model ```@docs AbstractMCMC.AbstractModel AbstractMCMC.LogDensityModel ``` ## Sampler ```@docs AbstractMCMC.AbstractSampler ``` ## Sampling a single chain ```@docs AbstractMCMC.sample(::AbstractRNG, ::AbstractMCMC.AbstractModel, ::AbstractMCMC.AbstractSampler, ::Any) AbstractMCMC.sample(::AbstractRNG, ::Any, ::AbstractMCMC.AbstractSampler, ::Any) ``` ### Iterator ```@docs AbstractMCMC.steps(::AbstractRNG, ::AbstractMCMC.AbstractModel, ::AbstractMCMC.AbstractSampler) AbstractMCMC.steps(::AbstractRNG, ::Any, ::AbstractMCMC.AbstractSampler) ``` ### Transducer ```@docs AbstractMCMC.Sample(::AbstractRNG, ::AbstractMCMC.AbstractModel, ::AbstractMCMC.AbstractSampler) AbstractMCMC.Sample(::AbstractRNG, ::Any, ::AbstractMCMC.AbstractSampler) ``` ## Sampling multiple chains in parallel ```@docs AbstractMCMC.sample( ::AbstractRNG, ::AbstractMCMC.AbstractModel, ::AbstractMCMC.AbstractSampler, ::AbstractMCMC.AbstractMCMCEnsemble, ::Integer, ::Integer, ) AbstractMCMC.sample( ::AbstractRNG, ::Any, ::AbstractMCMC.AbstractSampler, ::AbstractMCMC.AbstractMCMCEnsemble, ::Integer, ::Integer, ) ``` Two algorithms are provided for parallel sampling with multiple threads and multiple processes, and one allows for the user to sample multiple chains in serial (no parallelization): ```@docs AbstractMCMC.MCMCThreads AbstractMCMC.MCMCDistributed AbstractMCMC.MCMCSerial ``` ## Common keyword arguments Common keyword arguments for regular and parallel sampling are: - `progress` (default: `AbstractMCMC.PROGRESS[]` which is `true` initially): toggles progress logging - `chain_type` (default: `Any`): determines the type of the returned chain - `callback` (default: `nothing`): if `callback !== nothing`, then `callback(rng, model, sampler, sample, state, iteration)` is called after every sampling step, where `sample` is the most recent sample of the Markov chain and `state` and `iteration` are the current state and iteration of the sampler - `discard_initial` (default: `0`): number of initial samples that are discarded - `thinning` (default: `1`): factor by which to thin samples. - `initial_state` (default: `nothing`): if `initial_state !== nothing`, the first call to [`AbstractMCMC.step`](@ref) is passed `initial_state` as the `state` argument. !!! info The common keyword arguments `progress`, `chain_type`, and `callback` are not supported by the iterator [`AbstractMCMC.steps`](@ref) and the transducer [`AbstractMCMC.Sample`](@ref). There is no "official" way for providing initial parameter values yet. However, multiple packages such as [EllipticalSliceSampling.jl](https://github.com/TuringLang/EllipticalSliceSampling.jl) and [AdvancedMH.jl](https://github.com/TuringLang/AdvancedMH.jl) support an `initial_params` keyword argument for setting the initial values when sampling a single chain. To ensure that sampling multiple chains "just works" when sampling of a single chain is implemented, [we decided to support `initial_params` in the default implementations of the ensemble methods](https://github.com/TuringLang/AbstractMCMC.jl/pull/94): - `initial_params` (default: `nothing`): if `initial_params isa AbstractArray`, then the `i`th element of `initial_params` is used as initial parameters of the `i`th chain. If one wants to use the same initial parameters `x` for every chain, one can specify e.g. `initial_params = FillArrays.Fill(x, N)`. Progress logging can be enabled and disabled globally with `AbstractMCMC.setprogress!(progress)`. ```@docs AbstractMCMC.setprogress! ``` ## Chains The `chain_type` keyword argument allows to set the type of the returned chain. A common choice is to return chains of type `Chains` from [MCMCChains.jl](https://github.com/TuringLang/MCMCChains.jl). AbstractMCMC defines the abstract type `AbstractChains` for Markov chains. ```@docs AbstractMCMC.AbstractChains ``` For chains of this type, AbstractMCMC defines the following two methods. ```@docs AbstractMCMC.chainscat AbstractMCMC.chainsstack ```
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
docs/src/design.md
docs
4016
# Design This page explains the default implementations and design choices of AbstractMCMC. It is not intended for users but for developers that want to implement the AbstractMCMC interface for Markov chain Monte Carlo sampling. The user-facing API is explained in [API](@ref). ## Overview AbstractMCMC provides a default implementation of the user-facing interface described in [API](@ref). You can completely neglect these and define your own implementation of the interface. However, as described below, in most use cases the default implementation allows you to obtain support of parallel sampling, progress logging, callbacks, iterators, and transducers for free by just defining the sampling step of your inference algorithm, drastically reducing the amount of code you have to write. In general, the docstrings of the functions described below might be helpful if you intend to make use of the default implementations. ## Basic structure The simplified structure for regular sampling (the actual implementation contains some additional error checks and support for progress logging and callbacks) is ```julia StatsBase.sample( rng::Random.AbstractRNG, model::AbstractMCMC.AbstractModel, sampler::AbstractMCMC.AbstractSampler, nsamples::Integer; chain_type = ::Type{Any}, kwargs... ) # Obtain the initial sample and state. sample, state = AbstractMCMC.step(rng, model, sampler; kwargs...) # Save the sample. samples = AbstractMCMC.samples(sample, model, sampler, N; kwargs...) samples = AbstractMCMC.save!!(samples, sample, 1, model, sampler, N; kwargs...) # Step through the sampler. for i in 2:N # Obtain the next sample and state. sample, state = AbstractMCMC.step(rng, model, sampler, state; kwargs...) # Save the sample. samples = AbstractMCMC.save!!(samples, sample, i, model, sampler, N; kwargs...) end return AbstractMCMC.bundle_samples(samples, model, sampler, state, chain_type; kwargs...) end ``` All other default implementations make use of the same structure and in particular call the same methods. ## Sampling step The only method for which no default implementation is provided (and hence which downstream packages *have* to implement) is [`AbstractMCMC.step`](@ref). It defines the sampling step of the inference method. ```@docs AbstractMCMC.step ``` ## Collecting samples !!! note This section does not apply to the iterator and transducer interface. After the initial sample is obtained, the default implementations for regular and parallel sampling (not for the iterator and the transducer since it is not needed there) create a container for all samples (the initial one and all subsequent samples) using `AbstractMCMC.samples`. ```@docs AbstractMCMC.samples ``` In each step, the sample is saved in the container by `AbstractMCMC.save!!`. The notation `!!` follows the convention of the package [BangBang.jl](https://github.com/JuliaFolds/BangBang.jl) which is used in the default implementation of `AbstractMCMC.save!!`. It indicates that the sample is pushed to the container but a "widening" fallback is used if the container type does not allow to save the sample. Therefore `AbstractMCMC.save!!` *always has* to return the container. ```@docs AbstractMCMC.save!! ``` For most use cases the default implementation of `AbstractMCMC.samples` and `AbstractMCMC.save!!` should work out of the box and hence need not to be overloaded in downstream code. ## Creating chains !!! note This section does not apply to the iterator and transducer interface. At the end of the sampling procedure for regular and paralle sampling we transform the collection of samples to the desired output type by calling `AbstractMCMC.bundle_samples`. ```@docs AbstractMCMC.bundle_samples ``` The default implementation should be fine in most use cases, but downstream packages could, e.g., save the final state of the sampler as well if they overload `AbstractMCMC.bundle_samples`.
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
5.2.0
b0489adc45a7c8cf0d8e2ddf764f89c1c3decebd
docs/src/index.md
docs
604
# AbstractMCMC.jl *Abstract types and interfaces for Markov chain Monte Carlo methods.* AbstractMCMC defines an interface for sampling and combining Markov chains. It comes with a default sampling algorithm that provides support of progress bars, parallel sampling (multithreaded and multicore), and user-provided callbacks out of the box. Typically developers only have to define the sampling step of their inference method in an iterator-like fashion to make use of this functionality. Additionally, the package defines an iterator and a transducer for sampling Markov chains based on the interface.
AbstractMCMC
https://github.com/TuringLang/AbstractMCMC.jl.git
[ "MIT" ]
0.1.3
d95b8e6e93a173b71406d24fce591fa44a8cf3f9
docs/make.jl
code
519
using StableMap using Documenter DocMeta.setdocmeta!(StableMap, :DocTestSetup, :(using StableMap); recursive=true) makedocs(; modules=[StableMap], authors="Chris Elrod <elrodc@gmail.com> and contributors", repo="https://github.com/chriselrod/StableMap.jl/blob/{commit}{path}#{line}", sitename="StableMap.jl", format=Documenter.HTML(; prettyurls=get(ENV, "CI", "false") == "true", edit_link="main", assets=String[], ), pages=[ "Home" => "index.md", ], )
StableMap
https://github.com/chriselrod/StableMap.jl.git
[ "MIT" ]
0.1.3
d95b8e6e93a173b71406d24fce591fa44a8cf3f9
src/StableMap.jl
code
3215
module StableMap using ArrayInterface using LinearAlgebra export stable_map, stable_map! function stable_map!(f, dst::AbstractArray, arg0) N = length(dst) eachindex(arg0) == Base.oneto(N) || throw(ArgumentError("All args must have same axes.")) @inbounds for i = 1:N dst[i] = f(arg0[i]) end return dst end function stable_map!( f, dst::AbstractArray{T}, arg0, args::Vararg{Any,K} ) where {K,T} N = length(dst) all(==(Base.oneto(N)), map(eachindex, (arg0, args...))) || throw(ArgumentError("All args must have same axes.")) @inbounds for i = 1:N # fᵢ = f(map(Base.Fix2(Base.unsafe_getindex, i), args)...) # dst[i] = convert(T, fᵢ)::T dst[i] = f(arg0[i], map(Base.Fix2(Base.unsafe_getindex, i), args)...) end return dst end function stable_map!(f, dst::AbstractArray) N = length(dst) @inbounds for i = 1:N # fᵢ = f(map(Base.Fix2(Base.unsafe_getindex, i), args)...) # dst[i] = convert(T, fᵢ)::T dst[i] = f() end return dst end function narrowing_map!( f, dst::AbstractArray{T}, start::Int, args::Vararg{Any,K} ) where {K,T} N = length(dst) all(==(Base.oneto(N)), map(eachindex, args)) || throw(ArgumentError("All args must have same axes.")) @inbounds for i = start:N xi = f(map(Base.Fix2(Base.unsafe_getindex, i), args)...) if xi isa T dst[i] = xi else Ti = typeof(xi) PT = promote_type(Ti, T) if PT === T dst[i] = convert(T, xi) elseif Base.isconcretetype(PT) dst_promote = Array{PT}(undef, size(dst)) copyto!( view(dst_promote, Base.OneTo(i - 1)), view(dst, Base.OneTo(i - 1)) ) dst_promote[i] = convert(PT, xi)::PT return narrowing_map!(f, dst_promote, i + 1, args...) else dst_union = Array{Union{T,Ti}}(undef, size(dst)) copyto!( view(dst_union, Base.OneTo(i - 1)), view(dst, Base.OneTo(i - 1)) ) dst_union[i] = xi return narrowing_map!(f, dst_union, i + 1, args...) end end end return dst end isconcreteunion(TU) = if TU isa Union isconcretetype(TU.a) && isconcreteunion(TU.b) else isconcretetype(TU) end function promote_return(f::F, args...) where {F} T = Base.promote_op(f, map(eltype, args)...) Base.isconcretetype(T) && return T T isa Union || return nothing TU = Base.promote_union(T) Base.isconcretetype(TU) && return TU isconcreteunion(TU) && return TU nothing end function stable_map(f::F, args::Vararg{AbstractArray,K}) where {K,F} # assume specialized implementation all(ArrayInterface.ismutable, args) || return map(f, args...) T = promote_return(f, args...) first_arg = first(args) T === nothing || return stable_map!(f, Array{T}(undef, size(first_arg)), args...) x = f(map(first, args)...) dst = similar(first_arg, typeof(x)) @inbounds dst[1] = x narrowing_map!(f, dst, 2, args...) end function stable_map(f, A::Diagonal{T}) where {T} B = Matrix{promote_type(T, Float32)}(undef, size(A)) @inbounds for i in eachindex(A) B[i] = f(A[i]) end return B end @inline stable_map(f::F, arg1::A, args::Vararg{A,K}) where {F,K,A} = map(f, arg1, args...) end
StableMap
https://github.com/chriselrod/StableMap.jl.git
[ "MIT" ]
0.1.3
d95b8e6e93a173b71406d24fce591fa44a8cf3f9
test/runtests.jl
code
507
using StableMap using Test using ForwardDiff @testset "StableMap.jl" begin x = rand(10); @test stable_map(exp, x) ≈ map(exp, x) unstablemax(x,y) = Base.inferencebarrier(x > y ? x : y) y = rand(-10:10, 10); res = stable_map(unstablemax, x, y) @test res isa Vector{Float64} @test res == map(unstablemax, x, y) f(x) = Base.inferencebarrier(x > 1 ? x : 1.0) @test stable_map(f, [ForwardDiff.Dual(0f0,1f0), ForwardDiff.Dual(2f0,1f0)]) isa Vector{ForwardDiff.Dual{Nothing,Float64,1}} end
StableMap
https://github.com/chriselrod/StableMap.jl.git
[ "MIT" ]
0.1.3
d95b8e6e93a173b71406d24fce591fa44a8cf3f9
README.md
docs
4344
# StableMap [![Build Status](https://github.com/chriselrod/StableMap.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/chriselrod/StableMap.jl/actions/workflows/CI.yml?query=branch%3Amain) [![Coverage](https://codecov.io/gh/chriselrod/StableMap.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/chriselrod/StableMap.jl) The map that preserves the relative order of inputs mapped to outputs. So do other maps, of course. StableMap tries to return vectors that are as concretely typed as possible. For example: ```julia julia> using StableMap, ForwardDiff, BenchmarkTools [ Info: Precompiling StableMap [626594ce-0aac-4e81-a7f6-bc4bb5ff97e9] julia> f(x) = x > 1 ? x : 1.0 f (generic function with 1 method) julia> g(x) = Base.inferencebarrier(x > 1 ? x : 1.0) g (generic function with 1 method) julia> h(x) = Base.inferencebarrier(x) h (generic function with 1 method) julia> x = [ForwardDiff.Dual(0f0,1f0), ForwardDiff.Dual(2f0,1f0)]; julia> y = [ForwardDiff.Dual(2f0,1f0), ForwardDiff.Dual(0f0,1f0)]; julia> @btime map(f, $x) 208.010 ns (4 allocations: 176 bytes) 2-element Vector{Real}: 1.0 Dual{Nothing}(2.0,1.0) julia> @btime stable_map(f, $x) 93.329 ns (1 allocation: 96 bytes) 2-element Vector{ForwardDiff.Dual{Nothing, Float64, 1}}: Dual{Nothing}(1.0,0.0) Dual{Nothing}(2.0,1.0) julia> @btime map(f, $y) 210.378 ns (4 allocations: 176 bytes) 2-element Vector{Real}: Dual{Nothing}(2.0,1.0) 1.0 julia> @btime stable_map(f, $y) 94.547 ns (1 allocation: 96 bytes) 2-element Vector{ForwardDiff.Dual{Nothing, Float64, 1}}: Dual{Nothing}(2.0,1.0) Dual{Nothing}(1.0,0.0) julia> @btime map(g, $x) 890.247 ns (10 allocations: 272 bytes) 2-element Vector{Real}: 1.0 Dual{Nothing}(2.0,1.0) julia> @btime stable_map(g, $x) 3.221 μs (18 allocations: 800 bytes) 2-element Vector{ForwardDiff.Dual{Nothing, Float64, 1}}: Dual{Nothing}(1.0,0.0) Dual{Nothing}(2.0,1.0) julia> @btime map(g, $y) 866.372 ns (10 allocations: 272 bytes) 2-element Vector{Real}: Dual{Nothing}(2.0,1.0) 1.0 julia> @btime stable_map(g, $y) 3.357 μs (18 allocations: 800 bytes) 2-element Vector{ForwardDiff.Dual{Nothing, Float64, 1}}: Dual{Nothing}(2.0,1.0) Dual{Nothing}(1.0,0.0) julia> @btime map(h, $x) 531.503 ns (5 allocations: 144 bytes) 2-element Vector{ForwardDiff.Dual{Nothing, Float32, 1}}: Dual{Nothing}(0.0,1.0) Dual{Nothing}(2.0,1.0) julia> @btime stable_map(h, $x) 810.656 ns (4 allocations: 128 bytes) 2-element Vector{ForwardDiff.Dual{Nothing, Float32, 1}}: Dual{Nothing}(0.0,1.0) Dual{Nothing}(2.0,1.0) julia> @btime map(h, $y) 535.145 ns (5 allocations: 144 bytes) 2-element Vector{ForwardDiff.Dual{Nothing, Float32, 1}}: Dual{Nothing}(2.0,1.0) Dual{Nothing}(0.0,1.0) julia> @btime stable_map(h, $y) 816.471 ns (4 allocations: 128 bytes) 2-element Vector{ForwardDiff.Dual{Nothing, Float32, 1}}: Dual{Nothing}(2.0,1.0) Dual{Nothing}(0.0,1.0) ``` It can be faster at handling small unions than `Base.map`, but is currently slower for functions than return `Any`. However, in both cases, it has the benefit of returning as concretely-typed arrays as possible. It will try to promote returned objects to the same type, and if this is not possible, it will return a small union. ```julia julia> m(x) = x > 1.0 ? x : [x] m (generic function with 1 method) julia> @btime map(m, $x) 257.890 ns (4 allocations: 208 bytes) 2-element Vector{Any}: ForwardDiff.Dual{Nothing, Float32, 1}[Dual{Nothing}(0.0,1.0)] Dual{Nothing}(2.0,1.0) julia> @btime stable_map(m, $x) 194.158 ns (3 allocations: 144 bytes) 2-element Vector{Union{ForwardDiff.Dual{Nothing, Float32, 1}, Vector{ForwardDiff.Dual{Nothing, Float32, 1}}}}: ForwardDiff.Dual{Nothing, Float32, 1}[Dual{Nothing}(0.0,1.0)] Dual{Nothing}(2.0,1.0) julia> @btime map(m, $y) 260.979 ns (4 allocations: 224 bytes) 2-element Vector{Any}: Dual{Nothing}(2.0,1.0) ForwardDiff.Dual{Nothing, Float32, 1}[Dual{Nothing}(0.0,1.0)] julia> @btime stable_map(m, $y) 190.128 ns (3 allocations: 144 bytes) 2-element Vector{Union{ForwardDiff.Dual{Nothing, Float32, 1}, Vector{ForwardDiff.Dual{Nothing, Float32, 1}}}}: Dual{Nothing}(2.0,1.0) ForwardDiff.Dual{Nothing, Float32, 1}[Dual{Nothing}(0.0,1.0)] ```
StableMap
https://github.com/chriselrod/StableMap.jl.git
[ "MIT" ]
0.1.3
d95b8e6e93a173b71406d24fce591fa44a8cf3f9
docs/src/index.md
docs
183
```@meta CurrentModule = StableMap ``` # StableMap Documentation for [StableMap](https://github.com/chriselrod/StableMap.jl). ```@index ``` ```@autodocs Modules = [StableMap] ```
StableMap
https://github.com/chriselrod/StableMap.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
docs/make.jl
code
614
using Documenter, KrylovPreconditioners makedocs( modules = [KrylovPreconditioners], doctest = true, linkcheck = true, format = Documenter.HTML(assets = ["assets/style.css"], ansicolor = true, prettyurls = get(ENV, "CI", nothing) == "true", collapselevel = 1), sitename = "KrylovPreconditioners.jl", pages = ["Home" => "index.md", "Reference" => "reference.md" ] ) deploydocs( repo = "github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git", push_preview = true, devbranch = "main", )
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/KrylovPreconditionersAMDGPUExt.jl
code
510
module KrylovPreconditionersAMDGPUExt using LinearAlgebra using SparseArrays using AMDGPU using AMDGPU.rocSPARSE using LinearAlgebra: checksquare, BlasReal, BlasFloat import LinearAlgebra: ldiv!, mul! import Base: size, eltype, unsafe_convert using KrylovPreconditioners const KP = KrylovPreconditioners using KernelAbstractions const KA = KernelAbstractions include("AMDGPU/ic0.jl") include("AMDGPU/ilu0.jl") include("AMDGPU/blockjacobi.jl") include("AMDGPU/operators.jl") include("AMDGPU/scaling.jl") end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/KrylovPreconditionersCUDAExt.jl
code
493
module KrylovPreconditionersCUDAExt using LinearAlgebra using SparseArrays using CUDA using CUDA.CUSPARSE using LinearAlgebra: checksquare, BlasReal, BlasFloat import LinearAlgebra: ldiv!, mul! import Base: size, eltype, unsafe_convert using KrylovPreconditioners const KP = KrylovPreconditioners using KernelAbstractions const KA = KernelAbstractions include("CUDA/ic0.jl") include("CUDA/ilu0.jl") include("CUDA/blockjacobi.jl") include("CUDA/operators.jl") include("CUDA/scaling.jl") end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/AMDGPU/blockjacobi.jl
code
1687
KP.BlockJacobiPreconditioner(J::rocSPARSE.ROCSparseMatrixCSR; options...) = BlockJacobiPreconditioner(SparseMatrixCSC(J); options...) function KP.create_blocklist(cublocks::ROCArray, npart) blocklist = Array{ROCArray{Float64,2}}(undef, npart) for b in 1:npart blocklist[b] = ROCMatrix{Float64}(undef, size(cublocks,1), size(cublocks,2)) end return blocklist end function _update_gpu(p, j_rowptr, j_colval, j_nzval, device::ROCBackend) nblocks = p.nblocks blocksize = p.blocksize fillblock_gpu_kernel! = KP._fillblock_gpu!(device) # Fill Block Jacobi" begin fillblock_gpu_kernel!( p.cublocks, size(p.id,1), p.cupartitions, p.cumap, j_rowptr, j_colval, j_nzval, p.cupart, p.culpartitions, p.id, ndrange=(nblocks, blocksize), ) KA.synchronize(device) # Invert blocks begin for b in 1:nblocks p.blocklist[b] .= p.cublocks[:,:,b] end AMDGPU.@sync pivot, info = AMDGPU.rocSOLVER.getrf_batched!(p.blocklist) AMDGPU.@sync pivot, info, p.blocklist = AMDGPU.rocSOLVER.getri_batched!(p.blocklist, pivot) for b in 1:nblocks p.cublocks[:,:,b] .= p.blocklist[b] end return end """ function update!(J::ROCSparseMatrixCSR, p) Update the preconditioner `p` from the sparse Jacobian `J` in CSR format for ROCm 1) The dense blocks `cuJs` are filled from the sparse Jacobian `J` 2) To a batch inversion of the dense blocks using CUBLAS 3) Extract the preconditioner matrix `p.P` from the dense blocks `cuJs` """ function KP.update!(p::BlockJacobiPreconditioner, J::rocSPARSE.ROCSparseMatrixCSR) _update_gpu(p, J.rowPtr, J.colVal, J.nzVal, p.device) end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/AMDGPU/ic0.jl
code
4395
mutable struct AMD_IC0{SM} <: AbstractKrylovPreconditioner n::Int desc::rocSPARSE.ROCMatrixDescriptor buffer::ROCVector{UInt8} info::rocSPARSE.MatInfo timer_update::Float64 P::SM end for (bname, aname, sname, T) in ((:rocsparse_scsric0_buffer_size, :rocsparse_scsric0_analysis, :rocsparse_scsric0, :Float32), (:rocsparse_dcsric0_buffer_size, :rocsparse_dcsric0_analysis, :rocsparse_dcsric0, :Float64), (:rocsparse_ccsric0_buffer_size, :rocsparse_ccsric0_analysis, :rocsparse_ccsric0, :ComplexF32), (:rocsparse_zcsric0_buffer_size, :rocsparse_zcsric0_analysis, :rocsparse_zcsric0, :ComplexF64)) @eval begin function KP.kp_ic0(A::ROCSparseMatrixCSR{$T,Cint}) P = copy(A) n = checksquare(P) desc = rocSPARSE.ROCMatrixDescriptor('G', 'L', 'N', 'O') info = rocSPARSE.MatInfo() buffer_size = Ref{Csize_t}() rocSPARSE.$bname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, buffer_size) buffer = ROCVector{UInt8}(undef, buffer_size[]) rocSPARSE.$aname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, rocSPARSE.rocsparse_analysis_policy_force, rocSPARSE.rocsparse_solve_policy_auto, buffer) posit = Ref{Cint}(1) rocSPARSE.rocsparse_csric0_zero_pivot(rocSPARSE.handle(), info, posit) (posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))") rocSPARSE.$sname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, rocSPARSE.rocsparse_solve_policy_auto, buffer) return AMD_IC0(n, desc, buffer, info, 0.0, P) end function KP.update!(p::AMD_IC0{ROCSparseMatrixCSR{$T,Cint}}, A::ROCSparseMatrixCSR{$T,Cint}) copyto!(p.P.nzVal, A.nzVal) rocSPARSE.$sname(rocSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.rowPtr, p.P.colVal, p.info, rocSPARSE.rocsparse_solve_policy_auto, p.buffer) return p end function KP.kp_ic0(A::ROCSparseMatrixCSC{$T,Cint}) P = copy(A) n = checksquare(P) desc = rocSPARSE.ROCMatrixDescriptor('G', 'L', 'N', 'O') info = rocSPARSE.MatInfo() buffer_size = Ref{Csize_t}() rocSPARSE.$bname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, buffer_size) buffer = ROCVector{UInt8}(undef, buffer_size[]) rocSPARSE.$aname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, rocSPARSE.rocsparse_analysis_policy_force, rocSPARSE.rocsparse_solve_policy_auto, buffer) posit = Ref{Cint}(1) rocSPARSE.rocsparse_csric0_zero_pivot(rocSPARSE.handle(), info, posit) (posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))") rocSPARSE.$sname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, rocSPARSE.rocsparse_solve_policy_auto, buffer) return AMD_IC0(n, desc, buffer, info, 0.0, P) end function KP.update!(p::AMD_IC0{ROCSparseMatrixCSC{$T,Cint}}, A::ROCSparseMatrixCSC{$T,Cint}) copyto!(p.P.nzVal, A.nzVal) rocSPARSE.$sname(rocSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.colPtr, p.P.rowVal, p.info, rocSPARSE.rocsparse_solve_policy_auto, p.buffer) return p end end end for ArrayType in (:(ROCVector{T}), :(ROCMatrix{T})) @eval begin function ldiv!(ic::AMD_IC0{ROCSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat ldiv!(LowerTriangular(ic.P), x) # Forward substitution with L ldiv!(LowerTriangular(ic.P)', x) # Backward substitution with Lᴴ return x end function ldiv!(y::$ArrayType, ic::AMD_IC0{ROCSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat ic.timer_update += @elapsed begin copyto!(y, x) ldiv!(ic, y) end return y end function ldiv!(ic::AMD_IC0{ROCSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal ldiv!(UpperTriangular(ic.P)', x) # Forward substitution with L ldiv!(UpperTriangular(ic.P), x) # Backward substitution with Lᴴ return x end function ldiv!(y::$ArrayType, ic::AMD_IC0{ROCSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal ic.timer_update += @elapsed begin copyto!(y, x) ldiv!(ic, y) end return y end end end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/AMDGPU/ilu0.jl
code
4396
mutable struct AMD_ILU0{SM} <: AbstractKrylovPreconditioner n::Int desc::rocSPARSE.ROCMatrixDescriptor buffer::ROCVector{UInt8} info::rocSPARSE.MatInfo timer_update::Float64 P::SM end for (bname, aname, sname, T) in ((:rocsparse_scsrilu0_buffer_size, :rocsparse_scsrilu0_analysis, :rocsparse_scsrilu0, :Float32), (:rocsparse_dcsrilu0_buffer_size, :rocsparse_dcsrilu0_analysis, :rocsparse_dcsrilu0, :Float64), (:rocsparse_ccsrilu0_buffer_size, :rocsparse_ccsrilu0_analysis, :rocsparse_ccsrilu0, :ComplexF32), (:rocsparse_zcsrilu0_buffer_size, :rocsparse_zcsrilu0_analysis, :rocsparse_zcsrilu0, :ComplexF64)) @eval begin function KP.kp_ilu0(A::ROCSparseMatrixCSR{$T,Cint}) P = copy(A) n = checksquare(P) desc = rocSPARSE.ROCMatrixDescriptor('G', 'L', 'N', 'O') info = rocSPARSE.MatInfo() buffer_size = Ref{Csize_t}() rocSPARSE.$bname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, buffer_size) buffer = ROCVector{UInt8}(undef, buffer_size[]) rocSPARSE.$aname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, rocSPARSE.rocsparse_analysis_policy_force, rocSPARSE.rocsparse_solve_policy_auto, buffer) posit = Ref{Cint}(1) rocSPARSE.rocsparse_csrilu0_zero_pivot(rocSPARSE.handle(), info, posit) (posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))") rocSPARSE.$sname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, rocSPARSE.rocsparse_solve_policy_auto, buffer) return AMD_ILU0(n, desc, buffer, info, 0.0, P) end function KP.update!(p::AMD_ILU0{ROCSparseMatrixCSR{$T,Cint}}, A::ROCSparseMatrixCSR{$T,Cint}) copyto!(p.P.nzVal, A.nzVal) rocSPARSE.$sname(rocSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.rowPtr, p.P.colVal, p.info, rocSPARSE.rocsparse_solve_policy_auto, p.buffer) return p end function KP.kp_ilu0(A::ROCSparseMatrixCSC{$T,Cint}) P = copy(A) n = checksquare(P) desc = rocSPARSE.ROCMatrixDescriptor('G', 'L', 'N', 'O') info = rocSPARSE.MatInfo() buffer_size = Ref{Csize_t}() rocSPARSE.$bname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, buffer_size) buffer = ROCVector{UInt8}(undef, buffer_size[]) rocSPARSE.$aname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, rocSPARSE.rocsparse_analysis_policy_force, rocSPARSE.rocsparse_solve_policy_auto, buffer) posit = Ref{Cint}(1) rocSPARSE.rocsparse_csrilu0_zero_pivot(rocSPARSE.handle(), info, posit) (posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))") rocSPARSE.$sname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, rocSPARSE.rocsparse_solve_policy_auto, buffer) return AMD_ILU0(n, desc, buffer, info, 0.0, P) end function KP.update!(p::AMD_ILU0{ROCSparseMatrixCSC{$T,Cint}}, A::ROCSparseMatrixCSC{$T,Cint}) copyto!(p.P.nzVal, A.nzVal) rocSPARSE.$sname(rocSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.colPtr, p.P.rowVal, p.info, rocSPARSE.rocsparse_solve_policy_auto, p.buffer) return p end end end for ArrayType in (:(ROCVector{T}), :(ROCMatrix{T})) @eval begin function ldiv!(ilu::AMD_ILU0{ROCSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat ldiv!(UnitLowerTriangular(ilu.P), x) # Forward substitution with L ldiv!(UpperTriangular(ilu.P), x) # Backward substitution with U return x end function ldiv!(y::$ArrayType, ilu::AMD_ILU0{ROCSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat copyto!(y, x) ilu.timer_update += @elapsed begin ldiv!(ilu, y) end return y end function ldiv!(ilu::AMD_ILU0{ROCSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal ldiv!(LowerTriangular(ilu.P), x) # Forward substitution with L ldiv!(UnitUpperTriangular(ilu.P), x) # Backward substitution with U return x end function ldiv!(y::$ArrayType, ilu::AMD_ILU0{ROCSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal copyto!(y, x) ilu.timer_update += @elapsed begin ldiv!(ilu, y) end return y end end end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/AMDGPU/operators.jl
code
9830
using AMDGPU.HIP mutable struct AMD_KrylovOperator{T} <: AbstractKrylovOperator{T} type::Type{T} m::Int n::Int nrhs::Int transa::Char descA::rocSPARSE.ROCSparseMatrixDescriptor buffer_size::Ref{Csize_t} buffer::ROCVector{UInt8} end eltype(A::AMD_KrylovOperator{T}) where T = T size(A::AMD_KrylovOperator) = (A.m, A.n) for (SparseMatrixType, BlasType) in ((:(ROCSparseMatrixCSR{T}), :BlasFloat), (:(ROCSparseMatrixCSC{T}), :BlasFloat), (:(ROCSparseMatrixCOO{T}), :BlasFloat)) @eval begin function KP.KrylovOperator(A::$SparseMatrixType; nrhs::Int=1, transa::Char='N') where T <: $BlasType m,n = size(A) alpha = Ref{T}(one(T)) beta = Ref{T}(zero(T)) descA = rocSPARSE.ROCSparseMatrixDescriptor(A, 'O') if nrhs == 1 descX = rocSPARSE.ROCDenseVectorDescriptor(T, n) descY = rocSPARSE.ROCDenseVectorDescriptor(T, m) algo = rocSPARSE.rocSPARSE.rocsparse_spmv_alg_default buffer_size = Ref{Csize_t}() if HIP.runtime_version() ≥ v"6-" rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), transa, alpha, descA, descX, beta, descY, T, algo, rocSPARSE.rocsparse_spmv_stage_buffer_size, buffer_size, C_NULL) else rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), transa, alpha, descA, descX, beta, descY, T, algo, buffer_size, C_NULL) end buffer = ROCVector{UInt8}(undef, buffer_size[]) if HIP.runtime_version() ≥ v"6-" rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), transa, alpha, descA, descX, beta, descY, T, algo, rocSPARSE.rocsparse_spmv_stage_preprocess, buffer_size, buffer) end return AMD_KrylovOperator{T}(T, m, n, nrhs, transa, descA, buffer_size, buffer) else descX = rocSPARSE.ROCDenseMatrixDescriptor(T, n, nrhs) descY = rocSPARSE.ROCDenseMatrixDescriptor(T, m, nrhs) algo = rocSPARSE.rocsparse_spmm_alg_default buffer_size = Ref{Csize_t}() transb = 'N' rocSPARSE.rocsparse_spmm(rocSPARSE.handle(), transa, 'N', alpha, descA, descX, beta, descY, T, algo, rocSPARSE.rocsparse_spmm_stage_buffer_size, buffer_size, C_NULL) buffer = ROCVector{UInt8}(undef, buffer_size[]) rocSPARSE.rocsparse_spmm(rocSPARSE.handle(), transa, 'N', alpha, descA, descX, beta, descY, T, algo, rocSPARSE.rocsparse_spmm_stage_preprocess, buffer_size, buffer) return AMD_KrylovOperator{T}(T, m, n, nrhs, transa, descA, buffer_size, buffer) end end function KP.update!(A::AMD_KrylovOperator{T}, B::$SparseMatrixType) where T <: $BlasFloat descB = rocSPARSE.ROCSparseMatrixDescriptor(B, 'O') A.descA = descB return A end end end function LinearAlgebra.mul!(y::ROCVector{T}, A::AMD_KrylovOperator{T}, x::ROCVector{T}) where T <: BlasFloat (length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m")) (length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n")) (A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1")) descY = rocSPARSE.ROCDenseVectorDescriptor(y) descX = rocSPARSE.ROCDenseVectorDescriptor(x) algo = rocSPARSE.rocsparse_spmv_alg_default alpha = Ref{T}(one(T)) beta = Ref{T}(zero(T)) if HIP.runtime_version() ≥ v"6-" rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), A.transa, alpha, A.descA, descX, beta, descY, T, algo, rocSPARSE.rocsparse_spmv_stage_compute, A.buffer_size, A.buffer) else rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), A.transa, alpha, A.descA, descX, beta, descY, T, algo, A.buffer_size, A.buffer) end end function LinearAlgebra.mul!(Y::ROCMatrix{T}, A::AMD_KrylovOperator{T}, X::ROCMatrix{T}) where T <: BlasFloat mY, nY = size(Y) mX, nX = size(X) (mY != A.m) && throw(DimensionMismatch("mY != A.m")) (mX != A.n) && throw(DimensionMismatch("mX != A.n")) (nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs")) descY = rocSPARSE.ROCDenseMatrixDescriptor(Y) descX = rocSPARSE.ROCDenseMatrixDescriptor(X) algo = rocSPARSE.rocsparse_spmm_alg_default alpha = Ref{T}(one(T)) beta = Ref{T}(zero(T)) rocSPARSE.rocsparse_spmm(rocSPARSE.handle(), A.transa, 'N', alpha, A.descA, descX, beta, descY, T, algo, rocSPARSE.rocsparse_spmm_stage_compute, A.buffer_size, A.buffer) end mutable struct AMD_TriangularOperator{T} <: AbstractTriangularOperator{T} type::Type{T} m::Int n::Int nrhs::Int transa::Char descA::rocSPARSE.ROCSparseMatrixDescriptor buffer_size::Ref{Csize_t} buffer::ROCVector{UInt8} end eltype(A::AMD_TriangularOperator{T}) where T = T size(A::AMD_TriangularOperator) = (A.m, A.n) for (SparseMatrixType, BlasType) in ((:(ROCSparseMatrixCSR{T}), :BlasFloat), (:(ROCSparseMatrixCOO{T}), :BlasFloat)) @eval begin function KP.TriangularOperator(A::$SparseMatrixType, uplo::Char, diag::Char; nrhs::Int=1, transa::Char='N') where T <: $BlasType m,n = size(A) alpha = Ref{T}(one(T)) descA = rocSPARSE.ROCSparseMatrixDescriptor(A, 'O') rocsparse_uplo = Ref{rocSPARSE.rocsparse_fill_mode}(uplo) rocsparse_diag = Ref{rocSPARSE.rocsparse_diag_type}(diag) rocSPARSE.rocsparse_spmat_set_attribute(descA, rocSPARSE.rocsparse_spmat_fill_mode, rocsparse_uplo, Csize_t(sizeof(rocsparse_uplo))) rocSPARSE.rocsparse_spmat_set_attribute(descA, rocSPARSE.rocsparse_spmat_diag_type, rocsparse_diag, Csize_t(sizeof(rocsparse_diag))) if nrhs == 1 descX = rocSPARSE.ROCDenseVectorDescriptor(T, n) descY = rocSPARSE.ROCDenseVectorDescriptor(T, m) algo = rocSPARSE.rocsparse_spsv_alg_default buffer_size = Ref{Csize_t}() rocSPARSE.rocsparse_spsv(rocSPARSE.handle(), transa, alpha, descA, descX, descY, T, algo, rocSPARSE.rocsparse_spsv_stage_buffer_size, buffer_size, C_NULL) buffer = ROCVector{UInt8}(undef, buffer_size[]) rocSPARSE.rocsparse_spsv(rocSPARSE.handle(), transa, alpha, descA, descX, descY, T, algo, rocSPARSE.rocsparse_spsv_stage_preprocess, buffer_size, buffer) return AMD_TriangularOperator{T}(T, m, n, nrhs, transa, descA, buffer_size, buffer) else descX = rocSPARSE.ROCDenseMatrixDescriptor(T, n, nrhs) descY = rocSPARSE.ROCDenseMatrixDescriptor(T, m, nrhs) algo = rocSPARSE.rocsparse_spsm_alg_default buffer_size = Ref{Csize_t}() rocSPARSE.rocsparse_spsm(rocSPARSE.handle(), transa, 'N', alpha, descA, descX, descY, T, algo, rocSPARSE.rocsparse_spsm_stage_buffer_size, buffer_size, C_NULL) buffer = ROCVector{UInt8}(undef, buffer_size[]) rocSPARSE.rocsparse_spsm(rocSPARSE.handle(), transa, 'N', alpha, descA, descX, descY, T, algo, rocSPARSE.rocsparse_spsm_stage_preprocess, buffer_size, buffer) return AMD_TriangularOperator{T}(T, m, n, nrhs, transa, descA, buffer_size, buffer) end end function KP.update!(A::AMD_TriangularOperator{T}, B::$SparseMatrixType) where T <: $BlasFloat (B isa ROCSparseMatrixCOO) && rocSPARSE.rocsparse_coo_set_pointers(A.descA, B.rowInd, B.colInd, B.nzVal) (B isa ROCSparseMatrixCSR) && rocSPARSE.rocsparse_csr_set_pointers(A.descA, B.rowPtr, B.colVal, B.nzVal) return A end end end function LinearAlgebra.ldiv!(y::ROCVector{T}, A::AMD_TriangularOperator{T}, x::ROCVector{T}) where T <: BlasFloat (length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m")) (length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n")) (A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1")) descY = rocSPARSE.ROCDenseVectorDescriptor(y) descX = rocSPARSE.ROCDenseVectorDescriptor(x) algo = rocSPARSE.rocsparse_spsv_alg_default alpha = Ref{T}(one(T)) rocSPARSE.rocsparse_spsv(rocSPARSE.handle(), A.transa, alpha, A.descA, descX, descY, T, algo, rocSPARSE.rocsparse_spsv_stage_compute, A.buffer_size, A.buffer) end function LinearAlgebra.ldiv!(Y::ROCMatrix{T}, A::AMD_TriangularOperator{T}, X::ROCMatrix{T}) where T <: BlasFloat mY, nY = size(Y) mX, nX = size(X) (mY != A.m) && throw(DimensionMismatch("mY != A.m")) (mX != A.n) && throw(DimensionMismatch("mX != A.n")) (nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs")) descY = rocSPARSE.ROCDenseMatrixDescriptor(Y) descX = rocSPARSE.ROCDenseMatrixDescriptor(X) algo = rocSPARSE.rocsparse_spsm_alg_default alpha = Ref{T}(one(T)) rocSPARSE.rocsparse_spsm(rocSPARSE.handle(), A.transa, 'N', alpha, A.descA, descX, descY, T, algo, rocSPARSE.rocsparse_spsm_stage_compute, A.buffer_size, A.buffer) end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/AMDGPU/scaling.jl
code
98
KP.scaling_csr!(A::rocSPARSE.ROCSparseMatrixCSR, b::ROCVector) = scaling_csr!(A, b, ROCBackend())
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/CUDA/blockjacobi.jl
code
1671
KP.BlockJacobiPreconditioner(J::CUSPARSE.CuSparseMatrixCSR; options...) = BlockJacobiPreconditioner(SparseMatrixCSC(J); options...) function KP.create_blocklist(cublocks::CuArray, npart) blocklist = Array{CuArray{Float64,2}}(undef, npart) for b in 1:npart blocklist[b] = CuMatrix{Float64}(undef, size(cublocks,1), size(cublocks,2)) end return blocklist end function _update_gpu(p, j_rowptr, j_colval, j_nzval, device::CUDABackend) nblocks = p.nblocks blocksize = p.blocksize fillblock_gpu_kernel! = KP._fillblock_gpu!(device) # Fill Block Jacobi" begin fillblock_gpu_kernel!( p.cublocks, size(p.id,1), p.cupartitions, p.cumap, j_rowptr, j_colval, j_nzval, p.cupart, p.culpartitions, p.id, ndrange=(nblocks, blocksize), ) KA.synchronize(device) # Invert blocks begin for b in 1:nblocks p.blocklist[b] .= p.cublocks[:,:,b] end CUDA.@sync pivot, info = CUDA.CUBLAS.getrf_batched!(p.blocklist, true) CUDA.@sync pivot, info, p.blocklist = CUDA.CUBLAS.getri_batched(p.blocklist, pivot) for b in 1:nblocks p.cublocks[:,:,b] .= p.blocklist[b] end return end """ function update!(J::CuSparseMatrixCSR, p) Update the preconditioner `p` from the sparse Jacobian `J` in CSR format for CUDA 1) The dense blocks `cuJs` are filled from the sparse Jacobian `J` 2) To a batch inversion of the dense blocks using CUBLAS 3) Extract the preconditioner matrix `p.P` from the dense blocks `cuJs` """ function KP.update!(p::BlockJacobiPreconditioner, J::CUSPARSE.CuSparseMatrixCSR) _update_gpu(p, J.rowPtr, J.colVal, J.nzVal, p.device) end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/CUDA/ic0.jl
code
4586
mutable struct IC0Info info::CUSPARSE.csric02Info_t function IC0Info() info_ref = Ref{CUSPARSE.csric02Info_t}() CUSPARSE.cusparseCreateCsric02Info(info_ref) obj = new(info_ref[]) finalizer(CUSPARSE.cusparseDestroyCsric02Info, obj) obj end end unsafe_convert(::Type{CUSPARSE.csric02Info_t}, info::IC0Info) = info.info mutable struct NVIDIA_IC0{SM} <: AbstractKrylovPreconditioner n::Int desc::CUSPARSE.CuMatrixDescriptor buffer::CuVector{UInt8} info::IC0Info timer_update::Float64 P::SM end for (bname, aname, sname, T) in ((:cusparseScsric02_bufferSize, :cusparseScsric02_analysis, :cusparseScsric02, :Float32), (:cusparseDcsric02_bufferSize, :cusparseDcsric02_analysis, :cusparseDcsric02, :Float64), (:cusparseCcsric02_bufferSize, :cusparseCcsric02_analysis, :cusparseCcsric02, :ComplexF32), (:cusparseZcsric02_bufferSize, :cusparseZcsric02_analysis, :cusparseZcsric02, :ComplexF64)) @eval begin function KP.kp_ic0(A::CuSparseMatrixCSR{$T,Cint}) P = copy(A) n = checksquare(P) desc = CUSPARSE.CuMatrixDescriptor('G', 'L', 'N', 'O') info = IC0Info() buffer_size = Ref{Cint}() CUSPARSE.$bname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, buffer_size) buffer = CuVector{UInt8}(undef, buffer_size[]) CUSPARSE.$aname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer) posit = Ref{Cint}(1) CUSPARSE.cusparseXcsric02_zeroPivot(CUSPARSE.handle(), info, posit) (posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))") CUSPARSE.$sname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer) return NVIDIA_IC0(n, desc, buffer, info, 0.0, P) end function KP.update!(p::NVIDIA_IC0{CuSparseMatrixCSR{$T,Cint}}, A::CuSparseMatrixCSR{$T,Cint}) copyto!(p.P.nzVal, A.nzVal) CUSPARSE.$sname(CUSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.rowPtr, p.P.colVal, p.info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, p.buffer) return p end function KP.kp_ic0(A::CuSparseMatrixCSC{$T,Cint}) P = copy(A) n = checksquare(P) desc = CUSPARSE.CuMatrixDescriptor('G', 'L', 'N', 'O') info = IC0Info() buffer_size = Ref{Cint}() CUSPARSE.$bname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, buffer_size) buffer = CuVector{UInt8}(undef, buffer_size[]) CUSPARSE.$aname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer) posit = Ref{Cint}(1) CUSPARSE.cusparseXcsric02_zeroPivot(CUSPARSE.handle(), info, posit) (posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))") CUSPARSE.$sname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer) return NVIDIA_IC0(n, desc, buffer, info, 0.0, P) end function KP.update!(p::NVIDIA_IC0{CuSparseMatrixCSC{$T,Cint}}, A::CuSparseMatrixCSC{$T,Cint}) copyto!(p.P.nzVal, A.nzVal) CUSPARSE.$sname(CUSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.colPtr, p.P.rowVal, p.info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, p.buffer) return p end end end for ArrayType in (:(CuVector{T}), :(CuMatrix{T})) @eval begin function ldiv!(ic::NVIDIA_IC0{CuSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat ldiv!(LowerTriangular(ic.P), x) # Forward substitution with L ldiv!(LowerTriangular(ic.P)', x) # Backward substitution with Lᴴ return x end function ldiv!(y::$ArrayType, ic::NVIDIA_IC0{CuSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat copyto!(y, x) ic.timer_update += @elapsed begin ldiv!(ic, y) end return y end function ldiv!(ic::NVIDIA_IC0{CuSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasFloat ldiv!(UpperTriangular(ic.P)', x) # Forward substitution with L ldiv!(UpperTriangular(ic.P), x) # Backward substitution with Lᴴ return x end function ldiv!(y::$ArrayType, ic::NVIDIA_IC0{CuSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal copyto!(y, x) ic.timer_update += @elapsed begin ldiv!(ic, y) end return y end end end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/CUDA/ilu0.jl
code
4643
mutable struct ILU0Info info::CUSPARSE.csrilu02Info_t function ILU0Info() info_ref = Ref{CUSPARSE.csrilu02Info_t}() CUSPARSE.cusparseCreateCsrilu02Info(info_ref) obj = new(info_ref[]) finalizer(CUSPARSE.cusparseDestroyCsrilu02Info, obj) obj end end unsafe_convert(::Type{CUSPARSE.csrilu02Info_t}, info::ILU0Info) = info.info mutable struct NVIDIA_ILU0{SM} <: AbstractKrylovPreconditioner n::Int desc::CUSPARSE.CuMatrixDescriptor buffer::CuVector{UInt8} info::ILU0Info timer_update::Float64 P::SM end for (bname, aname, sname, T) in ((:cusparseScsrilu02_bufferSize, :cusparseScsrilu02_analysis, :cusparseScsrilu02, :Float32), (:cusparseDcsrilu02_bufferSize, :cusparseDcsrilu02_analysis, :cusparseDcsrilu02, :Float64), (:cusparseCcsrilu02_bufferSize, :cusparseCcsrilu02_analysis, :cusparseCcsrilu02, :ComplexF32), (:cusparseZcsrilu02_bufferSize, :cusparseZcsrilu02_analysis, :cusparseZcsrilu02, :ComplexF64)) @eval begin function KP.kp_ilu0(A::CuSparseMatrixCSR{$T,Cint}) P = copy(A) n = checksquare(P) desc = CUSPARSE.CuMatrixDescriptor('G', 'L', 'N', 'O') info = ILU0Info() buffer_size = Ref{Cint}() CUSPARSE.$bname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, buffer_size) buffer = CuVector{UInt8}(undef, buffer_size[]) CUSPARSE.$aname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer) posit = Ref{Cint}(1) CUSPARSE.cusparseXcsrilu02_zeroPivot(CUSPARSE.handle(), info, posit) (posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))") CUSPARSE.$sname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer) return NVIDIA_ILU0(n, desc, buffer, info, 0.0, P) end function KP.update!(p::NVIDIA_ILU0{CuSparseMatrixCSR{$T,Cint}}, A::CuSparseMatrixCSR{$T,Cint}) copyto!(p.P.nzVal, A.nzVal) CUSPARSE.$sname(CUSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.rowPtr, p.P.colVal, p.info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, p.buffer) return p end function KP.kp_ilu0(A::CuSparseMatrixCSC{$T,Cint}) P = copy(A) n = checksquare(P) desc = CUSPARSE.CuMatrixDescriptor('G', 'L', 'N', 'O') info = ILU0Info() buffer_size = Ref{Cint}() CUSPARSE.$bname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, buffer_size) buffer = CuVector{UInt8}(undef, buffer_size[]) CUSPARSE.$aname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer) posit = Ref{Cint}(1) CUSPARSE.cusparseXcsrilu02_zeroPivot(CUSPARSE.handle(), info, posit) (posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))") CUSPARSE.$sname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer) return NVIDIA_ILU0(n, desc, buffer, info, 0.0, P) end function KP.update!(p::NVIDIA_ILU0{CuSparseMatrixCSC{$T,Cint}}, A::CuSparseMatrixCSC{$T,Cint}) copyto!(p.P.nzVal, A.nzVal) CUSPARSE.$sname(CUSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.colPtr, p.P.rowVal, p.info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, p.buffer) return p end end end for ArrayType in (:(CuVector{T}), :(CuMatrix{T})) @eval begin function ldiv!(ilu::NVIDIA_ILU0{CuSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat ldiv!(UnitLowerTriangular(ilu.P), x) # Forward substitution with L ldiv!(UpperTriangular(ilu.P), x) # Backward substitution with U return x end function ldiv!(y::$ArrayType, ilu::NVIDIA_ILU0{CuSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat copyto!(y, x) ilu.timer_update += @elapsed begin ldiv!(ilu, y) end return y end function ldiv!(ilu::NVIDIA_ILU0{CuSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal ldiv!(LowerTriangular(ilu.P), x) # Forward substitution with L ldiv!(UnitUpperTriangular(ilu.P), x) # Backward substitution with U return x end function ldiv!(y::$ArrayType, ilu::NVIDIA_ILU0{CuSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal copyto!(y, x) ilu.timer_update += @elapsed begin ldiv!(ilu, y) end return y end end end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/CUDA/operators.jl
code
8276
mutable struct NVIDIA_KrylovOperator{T} <: AbstractKrylovOperator{T} type::Type{T} m::Int n::Int nrhs::Int transa::Char descA::CUSPARSE.CuSparseMatrixDescriptor buffer::CuVector{UInt8} end eltype(A::NVIDIA_KrylovOperator{T}) where T = T size(A::NVIDIA_KrylovOperator) = (A.m, A.n) for (SparseMatrixType, BlasType) in ((:(CuSparseMatrixCSR{T}), :BlasFloat), (:(CuSparseMatrixCSC{T}), :BlasFloat), (:(CuSparseMatrixCOO{T}), :BlasFloat)) @eval begin function KP.KrylovOperator(A::$SparseMatrixType; nrhs::Int=1, transa::Char='N') where T <: $BlasType m,n = size(A) alpha = Ref{T}(one(T)) beta = Ref{T}(zero(T)) descA = CUSPARSE.CuSparseMatrixDescriptor(A, 'O') if nrhs == 1 descX = CUSPARSE.CuDenseVectorDescriptor(T, n) descY = CUSPARSE.CuDenseVectorDescriptor(T, m) algo = CUSPARSE.CUSPARSE_SPMV_ALG_DEFAULT buffer_size = Ref{Csize_t}() CUSPARSE.cusparseSpMV_bufferSize(CUSPARSE.handle(), transa, alpha, descA, descX, beta, descY, T, algo, buffer_size) buffer = CuVector{UInt8}(undef, buffer_size[]) return NVIDIA_KrylovOperator{T}(T, m, n, nrhs, transa, descA, buffer) else descX = CUSPARSE.CuDenseMatrixDescriptor(T, n, nrhs) descY = CUSPARSE.CuDenseMatrixDescriptor(T, m, nrhs) algo = CUSPARSE.CUSPARSE_SPMM_ALG_DEFAULT buffer_size = Ref{Csize_t}() CUSPARSE.cusparseSpMM_bufferSize(CUSPARSE.handle(), transa, 'N', alpha, descA, descX, beta, descY, T, algo, buffer_size) buffer = CuVector{UInt8}(undef, buffer_size[]) if !(A isa CuSparseMatrixCOO) CUSPARSE.cusparseSpMM_preprocess(CUSPARSE.handle(), transa, 'N', alpha, descA, descX, beta, descY, T, algo, buffer) end return NVIDIA_KrylovOperator{T}(T, m, n, nrhs, transa, descA, buffer) end end function KP.update!(A::NVIDIA_KrylovOperator{T}, B::$SparseMatrixType) where T <: $BlasFloat descB = CUSPARSE.CuSparseMatrixDescriptor(B, 'O') A.descA = descB return A end end end function LinearAlgebra.mul!(y::CuVector{T}, A::NVIDIA_KrylovOperator{T}, x::CuVector{T}) where T <: BlasFloat (length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m")) (length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n")) (A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1")) descY = CUSPARSE.CuDenseVectorDescriptor(y) descX = CUSPARSE.CuDenseVectorDescriptor(x) algo = CUSPARSE.CUSPARSE_SPMV_ALG_DEFAULT alpha = Ref{T}(one(T)) beta = Ref{T}(zero(T)) CUSPARSE.cusparseSpMV(CUSPARSE.handle(), A.transa, alpha, A.descA, descX, beta, descY, T, algo, A.buffer) end function LinearAlgebra.mul!(Y::CuMatrix{T}, A::NVIDIA_KrylovOperator{T}, X::CuMatrix{T}) where T <: BlasFloat mY, nY = size(Y) mX, nX = size(X) (mY != A.m) && throw(DimensionMismatch("mY != A.m")) (mX != A.n) && throw(DimensionMismatch("mX != A.n")) (nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs")) descY = CUSPARSE.CuDenseMatrixDescriptor(Y) descX = CUSPARSE.CuDenseMatrixDescriptor(X) algo = CUSPARSE.CUSPARSE_SPMM_ALG_DEFAULT alpha = Ref{T}(one(T)) beta = Ref{T}(zero(T)) CUSPARSE.cusparseSpMM(CUSPARSE.handle(), A.transa, 'N', alpha, A.descA, descX, beta, descY, T, algo, A.buffer) end mutable struct NVIDIA_TriangularOperator{T,S} <: AbstractTriangularOperator{T} type::Type{T} m::Int n::Int nrhs::Int transa::Char descA::CUSPARSE.CuSparseMatrixDescriptor descT::S buffer::CuVector{UInt8} end eltype(A::NVIDIA_TriangularOperator{T}) where T = T size(A::NVIDIA_TriangularOperator) = (A.m, A.n) for (SparseMatrixType, BlasType) in ((:(CuSparseMatrixCSR{T}), :BlasFloat), (:(CuSparseMatrixCOO{T}), :BlasFloat)) @eval begin function KP.TriangularOperator(A::$SparseMatrixType, uplo::Char, diag::Char; nrhs::Int=1, transa::Char='N') where T <: $BlasType m,n = size(A) alpha = Ref{T}(one(T)) descA = CUSPARSE.CuSparseMatrixDescriptor(A, 'O') cusparse_uplo = Ref{CUSPARSE.cusparseFillMode_t}(uplo) cusparse_diag = Ref{CUSPARSE.cusparseDiagType_t}(diag) CUSPARSE.cusparseSpMatSetAttribute(descA, 'F', cusparse_uplo, Csize_t(sizeof(cusparse_uplo))) CUSPARSE.cusparseSpMatSetAttribute(descA, 'D', cusparse_diag, Csize_t(sizeof(cusparse_diag))) if nrhs == 1 descT = CUSPARSE.CuSparseSpSVDescriptor() descX = CUSPARSE.CuDenseVectorDescriptor(T, n) descY = CUSPARSE.CuDenseVectorDescriptor(T, m) algo = CUSPARSE.CUSPARSE_SPSV_ALG_DEFAULT buffer_size = Ref{Csize_t}() CUSPARSE.cusparseSpSV_bufferSize(CUSPARSE.handle(), transa, alpha, descA, descX, descY, T, algo, descT, buffer_size) buffer = CuVector{UInt8}(undef, buffer_size[]) CUSPARSE.cusparseSpSV_analysis(CUSPARSE.handle(), transa, alpha, descA, descX, descY, T, algo, descT, buffer) return NVIDIA_TriangularOperator{T,CUSPARSE.CuSparseSpSVDescriptor}(T, m, n, nrhs, transa, descA, descT, buffer) else descT = CUSPARSE.CuSparseSpSMDescriptor() descX = CUSPARSE.CuDenseMatrixDescriptor(T, n, nrhs) descY = CUSPARSE.CuDenseMatrixDescriptor(T, m, nrhs) algo = CUSPARSE.CUSPARSE_SPSM_ALG_DEFAULT buffer_size = Ref{Csize_t}() CUSPARSE.cusparseSpSM_bufferSize(CUSPARSE.handle(), transa, 'N', alpha, descA, descX, descY, T, algo, descT, buffer_size) buffer = CuVector{UInt8}(undef, buffer_size[]) CUSPARSE.cusparseSpSM_analysis(CUSPARSE.handle(), transa, 'N', alpha, descA, descX, descY, T, algo, descT, buffer) return NVIDIA_TriangularOperator{T,CUSPARSE.CuSparseSpSMDescriptor}(T, m, n, nrhs, transa, descA, descT, buffer) end end function KP.update!(A::NVIDIA_TriangularOperator{T,CUSPARSE.CuSparseSpSVDescriptor}, B::$SparseMatrixType) where T <: $BlasFloat CUSPARSE.version() ≥ v"12.2" || error("This operation is only support by CUDA ≥ v12.3") descB = CUSPARSE.CuSparseMatrixDescriptor(B, 'O') A.descA = descB CUSPARSE.cusparseSpSV_updateMatrix(CUSPARSE.handle(), A.descT, B.nzVal, 'G') return A end function KP.update!(A::NVIDIA_TriangularOperator{T,CUSPARSE.CuSparseSpSMDescriptor}, B::$SparseMatrixType) where T <: $BlasFloat return error("This operation will be supported in CUDA v12.4") end end end function LinearAlgebra.ldiv!(y::CuVector{T}, A::NVIDIA_TriangularOperator{T}, x::CuVector{T}) where T <: BlasFloat (length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m")) (length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n")) (A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1")) descY = CUSPARSE.CuDenseVectorDescriptor(y) descX = CUSPARSE.CuDenseVectorDescriptor(x) algo = CUSPARSE.CUSPARSE_SPSV_ALG_DEFAULT alpha = Ref{T}(one(T)) CUSPARSE.cusparseSpSV_solve(CUSPARSE.handle(), A.transa, alpha, A.descA, descX, descY, T, algo, A.descT) end function LinearAlgebra.ldiv!(Y::CuMatrix{T}, A::NVIDIA_TriangularOperator{T}, X::CuMatrix{T}) where T <: BlasFloat mY, nY = size(Y) mX, nX = size(X) (mY != A.m) && throw(DimensionMismatch("mY != A.m")) (mX != A.n) && throw(DimensionMismatch("mX != A.n")) (nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs")) descY = CUSPARSE.CuDenseMatrixDescriptor(Y) descX = CUSPARSE.CuDenseMatrixDescriptor(X) algo = CUSPARSE.CUSPARSE_SPSM_ALG_DEFAULT alpha = Ref{T}(one(T)) CUSPARSE.cusparseSpSM_solve(CUSPARSE.handle(), A.transa, 'N', alpha, A.descA, descX, descY, T, algo, A.descT) end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
ext/CUDA/scaling.jl
code
96
KP.scaling_csr!(A::CUSPARSE.CuSparseMatrixCSR, b::CuVector) = scaling_csr!(A, b, CUDABackend())
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
src/KrylovPreconditioners.jl
code
1413
module KrylovPreconditioners using LinearAlgebra, SparseArrays using Adapt using KernelAbstractions const KA = KernelAbstractions using LinearAlgebra: checksquare, BlasReal, BlasFloat import LinearAlgebra: ldiv! abstract type AbstractKrylovPreconditioner end export AbstractKrylovPreconditioner abstract type AbstractKrylovOperator{T} end export AbstractKrylovOperator abstract type AbstractTriangularOperator{T} end export AbstractTriangularOperator update!(p::AbstractKrylovPreconditioner, A::SparseMatrixCSC) = error("update!() for $(typeof(p)) is not implemented.") update!(p::AbstractKrylovPreconditioner, A) = error("update!() for $(typeof(p)) is not implemented.") update!(p::AbstractKrylovOperator, A::SparseMatrixCSC) = error("update!() for $(typeof(p)) is not implemented.") update!(p::AbstractKrylovOperator, A) = error("update!() for $(typeof(p)) is not implemented.") export update!, get_timer, reset_timer! function get_timer(p::AbstractKrylovPreconditioner) return p.timer_update end function reset_timer!(p::AbstractKrylovPreconditioner) p.timer_update = 0.0 end function KrylovOperator end export KrylovOperator function TriangularOperator end export TriangularOperator # Preconditioners include("ic0.jl") include("ilu0.jl") include("blockjacobi.jl") # Scaling include("scaling.jl") export scaling_csr! # Ordering # include(ordering.jl) end # module KrylovPreconditioners
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
src/blockjacobi.jl
code
9993
export BlockJacobiPreconditioner using LightGraphs, Metis """ overlap(Graph, subset, level) Given subset embedded within Graph, compute subset2 such that subset2 contains subset and all of its adjacent vertices. """ function overlap(Graph, subset; level=1) @assert level > 0 subset2 = [LightGraphs.neighbors(Graph, v) for v in subset] subset2 = reduce(vcat, subset2) subset2 = unique(vcat(subset, subset2)) level -= 1 if level == 0 return subset2 else return overlap(Graph, subset2, level=level) end end """ BlockJacobiPreconditioner Overlapping-Schwarz preconditioner. ### Attributes * `nblocks::Int64`: Number of partitions or blocks. * `blocksize::Int64`: Size of each block. * `partitions::Vector{Vector{Int64}}``: `npart` partitions stored as lists * `cupartitions`: `partitions` transfered to the GPU * `lpartitions::Vector{Int64}``: Length of each partitions. * `culpartitions::Vector{Int64}``: Length of each partitions, on the GPU. * `blocks`: Dense blocks of the block-Jacobi * `cublocks`: `Js` transfered to the GPU * `map`: The partitions as a mapping to construct views * `cumap`: `cumap` transferred to the GPU` * `part`: Partitioning as output by Metis * `cupart`: `part` transferred to the GPU """ mutable struct BlockJacobiPreconditioner{AT,GAT,VI,GVI,GMT,MI,GMI} <: AbstractKrylovPreconditioner nblocks::Int64 blocksize::Int64 partitions::MI cupartitions::GMI lpartitions::VI culpartitions::GVI rest_size::VI curest_size::GVI blocks::AT cublocks::GAT map::VI cumap::GVI part::VI cupart::GVI id::GMT blocklist::Vector{GMT} timer_update::Float64 device::KA.Backend end function create_blocklist(blocks::Array, npart) blocklist = Array{Array{Float64,2}}(undef, npart) for b in 1:npart blocklist[b] = Matrix{Float64}(undef, size(blocks,1), size(blocks,2)) end return blocklist end function BlockJacobiPreconditioner(J, npart::Int64, device=CPU(), olevel=0) if npart < 2 error("Number of partitions `npart` should be at" * "least 2 for partitioning in Metis") end adj = build_adjmatrix(SparseMatrixCSC(J)) g = LightGraphs.Graph(adj) part = Metis.partition(g, npart) partitions = Vector{Vector{Int64}}() for i in 1:npart push!(partitions, []) end for (i,v) in enumerate(part) push!(partitions[v], i) end # We keep track of the partition size pre-overlap. # This will allow us to implement the RAS update. rest_size = length.(partitions) # overlap if olevel > 0 for i in 1:npart partitions[i] = overlap(g, partitions[i], level=olevel) end end lpartitions = length.(partitions) blocksize = maximum(length.(partitions)) blocks = zeros(Float64, blocksize, blocksize, npart) # Get partitions into bit typed structure bpartitions = zeros(Int64, blocksize, npart) bpartitions .= 0.0 for i in 1:npart bpartitions[1:length(partitions[i]),i] .= Vector{Int64}(partitions[i]) end id = Matrix{Float64}(I, blocksize, blocksize) for i in 1:npart blocks[:,:,i] .= id end nmap = 0 for b in partitions nmap += length(b) end map = zeros(Int64, nmap) part = zeros(Int64, nmap) for b in 1:npart for (i,el) in enumerate(partitions[b]) map[el] = i part[el] = b end end id = adapt(device, id) cubpartitions = adapt(device, bpartitions) culpartitions = adapt(device, lpartitions) curest_size = adapt(device, rest_size) cublocks = adapt(device, blocks) cumap = adapt(device, map) cupart = adapt(device, part) blocklist = create_blocklist(cublocks, npart) return BlockJacobiPreconditioner( npart, blocksize, bpartitions, cubpartitions, lpartitions, culpartitions, rest_size, curest_size, blocks, cublocks, map, cumap, part, cupart, id, blocklist, 0.0, device ) end function BlockJacobiPreconditioner(J::SparseMatrixCSC; nblocks=-1, device=CPU(), noverlaps=0) n = size(J, 1) npartitions = if nblocks > 0 nblocks else div(n, 32) end return BlockJacobiPreconditioner(J, npartitions, device, noverlaps) end Base.eltype(::BlockJacobiPreconditioner) = Float64 # NOTE: Custom kernel to implement blocks - vector multiplication. # The blocks have very unbalanced sizes, leading to imbalances # between the different threads. # CUBLAS.gemm_strided_batched has been tested has well, but is # overall 3x slower than this custom kernel : due to the various sizes # of the blocks, gemm_strided is performing too many unecessary operations, # impairing its performance. @kernel function mblock_b_kernel!(y, b, p_len, rp_len, part, blocks) j, i = @index(Global, NTuple) @inbounds len = p_len[i] @inbounds rlen = rp_len[i] if j <= rlen accum = 0.0 idxA = @inbounds part[j, i] for k=1:len idxB = @inbounds part[k, i] @inbounds accum = accum + blocks[j, k, i]*b[idxB] end @inbounds y[idxA] = accum end end @kernel function mblock_B_kernel!(y, b, p_len, rp_len, part, blocks) p = size(b, 2) i, j = @index(Global, NTuple) len = p_len[i] rlen = rp_len[i] if j <= rlen for ℓ=1:p accum = 0.0 idxA = @inbounds part[j, i] for k=1:len idxB = @inbounds part[k, i] @inbounds accum = accum + blocks[j, k, i]*b[idxB,ℓ] end @inbounds y[idxA,ℓ] = accum end end end function LinearAlgebra.mul!(y, C::BlockJacobiPreconditioner, b::Vector{T}) where T n = size(b, 1) fill!(y, zero(T)) for i=1:C.nblocks rlen = C.lpartitions[i] part = C.partitions[1:rlen, i] blck = C.blocks[1:rlen, 1:rlen, i] for j=1:C.rest_size[i] idx = part[j] y[idx] += dot(blck[j, :], b[part]) end end end function LinearAlgebra.mul!(Y, C::BlockJacobiPreconditioner, B::Matrix{T}) where T n, p = size(B) fill!(Y, zero(T)) for i=1:C.nblocks rlen = C.lpartitions[i] part = C.partitions[1:rlen, i] blck = C.blocks[1:rlen, 1:rlen, i] for rhs=1:p for j=1:C.rest_size[i] idx = part[j] Y[idx,rhs] += dot(blck[j, :], B[part,rhs]) end end end end function LinearAlgebra.mul!(y, C::BlockJacobiPreconditioner, b::AbstractVector{T}) where T device = KA.get_backend(b) n = size(b, 1) fill!(y, zero(T)) max_rlen = maximum(C.rest_size) ndrange = (max_rlen, C.nblocks) C.timer_update += @elapsed begin mblock_b_kernel!(device)( y, b, C.culpartitions, C.curest_size, C.cupartitions, C.cublocks, ndrange=ndrange, ) KA.synchronize(device) end end function LinearAlgebra.mul!(Y, C::BlockJacobiPreconditioner, B::AbstractMatrix{T}) where T device = KA.get_backend(B) n, p = size(B) fill!(Y, zero(T)) max_rlen = maximum(C.rest_size) ndrange = (C.nblocks, max_rlen) C.timer_update += @elapsed begin mblock_B_kernel!(device)( Y, B, C.culpartitions, C.curest_size, C.cupartitions, C.cublocks, ndrange=ndrange, ) KA.synchronize(device) end end """ build_adjmatrix Build the adjacency matrix of a matrix A corresponding to the undirected graph """ function build_adjmatrix(A) rows = Int64[] cols = Int64[] vals = Float64[] rowsA = rowvals(A) m, n = size(A) for i = 1:n for j in nzrange(A, i) push!(rows, rowsA[j]) push!(cols, i) push!(vals, 1.0) push!(rows, i) push!(cols, rowsA[j]) push!(vals, 1.0) end end return sparse(rows,cols,vals,size(A,1),size(A,2)) end """ _fillblock_gpu Fill the dense blocks of the preconditioner from the sparse CSR matrix arrays """ @kernel function _fillblock_gpu!(blocks, blocksize, partition, map, rowPtr, colVal, nzVal, part, lpartitions, id) b,k = @index(Global, NTuple) for i in 1:blocksize blocks[k,i,b] = id[k,i] end @synchronize @inbounds if k <= lpartitions[b] # select row i = partition[k, b] # iterate matrix for row_ptr in rowPtr[i]:(rowPtr[i + 1] - 1) # retrieve column value col = colVal[row_ptr] # iterate partition list and see if pertains to it for j in 1:lpartitions[b] if col == partition[j, b] @inbounds blocks[k, j, b] = nzVal[row_ptr] end end end end end """ function update!(p, J::SparseMatrixCSC) Update the preconditioner `p` from the sparse Jacobian `J` in CSC format for the CPU Note that this implements the same algorithm as for the GPU and becomes very slow on CPU with growing number of blocks. """ function update!(p::BlockJacobiPreconditioner, J::SparseMatrixCSC) # TODO: Enabling threading leads to a crash here for b in 1:p.nblocks p.blocks[:,:,b] = p.id[:,:] for k in 1:p.lpartitions[b] i = p.partitions[k,b] for j in J.colptr[i]:J.colptr[i+1]-1 if b == p.part[J.rowval[j]] p.blocks[p.map[J.rowval[j]], p.map[i], b] = J.nzval[j] end end end end for b in 1:p.nblocks # Invert blocks p.blocks[:,:,b] .= inv(p.blocks[:,:,b]) end end function Base.show(precond::BlockJacobiPreconditioner) npartitions = precond.npart nblock = precond.nblocks println("#partitions: $npartitions, Blocksize: n = ", nblock, " Mbytes = ", (nblock*nblock*npartitions*8.0)/(1024.0*1024.0)) println("Block Jacobi block size: $(precond.nJs)") end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
src/ic0.jl
code
79
export kp_ic0 kp_ic0(A) = error("Not implemented for this type $(typeof(A))")
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
src/ilu0.jl
code
81
export kp_ilu0 kp_ilu0(A) = error("Not implemented for this type $(typeof(A))")
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
src/scaling.jl
code
605
@kernel function scaling_csr_kernel!(rowPtr, nzVal, b) m = @index(Global, Linear) max = 0.0 @inbounds for i = rowPtr[m]:(rowPtr[m + 1] - 1) absnzVal = abs(nzVal[i]) # This works somehow better in ExaPF. Was initially a bug I thought # absnzVal = nzVal[i] if absnzVal > max max = absnzVal end end if max < 1.0 b[m] /= max @inbounds for i = rowPtr[m]:(rowPtr[m + 1] - 1) nzVal[i] /= max end end end function scaling_csr!(A, b, backend::KA.Backend) scaling_csr_kernel!(backend)(A.rowPtr, A.nzVal, b; ndrange=length(b)) synchronize(backend) end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
test/runtests.jl
code
539
using AMDGPU using CUDA # using oneAPI using Test @testset "KrylovPreconditioners" begin if AMDGPU.functional() @info "Testing AMDGPU backend" @testset "Testing AMDGPU backend" begin include("gpu/amd.jl") end end if CUDA.functional() @info "Testing CUDA backend" @testset "Testing CUDA backend" begin include("gpu/nvidia.jl") end end # if oneAPI.functional() # @info "Testing oneAPI backend" # @testset "Testing oneAPI backend" begin # include("gpu/intel.jl") # end # end end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
test/gpu/amd.jl
code
1926
using AMDGPU, AMDGPU.rocSPARSE, AMDGPU.rocSOLVER _get_type(J::ROCSparseMatrixCSR) = ROCArray{Float64, 1, AMDGPU.Mem.HIPBuffer} _is_csr(J::ROCSparseMatrixCSR) = true _is_csc(J::ROCSparseMatrixCSR) = false include("gpu.jl") @testset "AMD -- AMDGPU.jl" begin @test AMDGPU.functional() AMDGPU.allowscalar(false) @testset "IC(0)" begin @testset "ROCSparseMatrixCSC -- $FC" for FC in (Float64,) test_ic0(FC, ROCVector{FC}, ROCSparseMatrixCSC{FC}) end @testset "ROCSparseMatrixCSR -- $FC" for FC in (Float64, ComplexF64) test_ic0(FC, ROCVector{FC}, ROCSparseMatrixCSR{FC}) end end @testset "ILU(0)" begin @testset "ROCSparseMatrixCSC -- $FC" for FC in (Float64,) test_ilu0(FC, ROCVector{FC}, ROCSparseMatrixCSC{FC}) end @testset "ROCSparseMatrixCSR -- $FC" for FC in (Float64, ComplexF64) test_ilu0(FC, ROCVector{FC}, ROCSparseMatrixCSR{FC}) end end @testset "KrylovOperator" begin @testset "ROCSparseMatrixCOO -- $FC" for FC in (Float64, ComplexF64) test_operator(FC, ROCVector{FC}, ROCMatrix{FC}, ROCSparseMatrixCOO{FC}) end @testset "ROCSparseMatrixCSC -- $FC" for FC in (Float64, ComplexF64) test_operator(FC, ROCVector{FC}, ROCMatrix{FC}, ROCSparseMatrixCSC{FC}) end @testset "ROCSparseMatrixCSR -- $FC" for FC in (Float64, ComplexF64) test_operator(FC, ROCVector{FC}, ROCMatrix{FC}, ROCSparseMatrixCSR{FC}) end end @testset "TriangularOperator" begin @testset "ROCSparseMatrixCOO -- $FC" for FC in (Float64, ComplexF64) test_triangular(FC, ROCVector{FC}, ROCMatrix{FC}, ROCSparseMatrixCOO{FC}) end @testset "ROCSparseMatrixCSR -- $FC" for FC in (Float64, ComplexF64) test_triangular(FC, ROCVector{FC}, ROCMatrix{FC}, ROCSparseMatrixCSR{FC}) end end @testset "Block Jacobi preconditioner" begin test_block_jacobi(ROCBackend(), ROCArray, ROCSparseMatrixCSR) end end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
test/gpu/gpu.jl
code
5907
using SparseArrays, Random, Test using LinearAlgebra, Krylov, KrylovPreconditioners Random.seed!(666) function test_ic0(FC, V, M) n = 100 R = real(FC) A_cpu = rand(FC, n, n) A_cpu = A_cpu * A_cpu' A_cpu = sparse(A_cpu) b_cpu = rand(FC, n) A_gpu = M(A_cpu) b_gpu = V(b_cpu) P = kp_ic0(A_gpu) x_gpu, stats = cg(A_gpu, b_gpu, M=P, ldiv=true) r_gpu = b_gpu - A_gpu * x_gpu @test stats.niter ≤ 5 if (FC <: ComplexF64) && V.body.name.name == :ROCArray @test_broken norm(r_gpu) ≤ 1e-6 else @test norm(r_gpu) ≤ 1e-8 end A_gpu = M(A_cpu + 200*I) update!(P, A_gpu) x_gpu, stats = cg(A_gpu, b_gpu, M=P, ldiv=true) r_gpu = b_gpu - A_gpu * x_gpu @test stats.niter ≤ 5 if (FC <: ComplexF64) && V.body.name.name == :ROCArray @test_broken norm(r_gpu) ≤ 1e-6 else @test norm(r_gpu) ≤ 1e-8 end end function test_ilu0(FC, V, M) n = 100 R = real(FC) A_cpu = rand(FC, n, n) A_cpu = sparse(A_cpu) b_cpu = rand(FC, n) A_gpu = M(A_cpu) b_gpu = V(b_cpu) P = kp_ilu0(A_gpu) x_gpu, stats = gmres(A_gpu, b_gpu, N=P, ldiv=true) r_gpu = b_gpu - A_gpu * x_gpu @test stats.niter ≤ 5 @test norm(r_gpu) ≤ 1e-8 A_gpu = M(A_cpu + 200*I) update!(P, A_gpu) x_gpu, stats = gmres(A_gpu, b_gpu, N=P, ldiv=true) r_gpu = b_gpu - A_gpu * x_gpu @test stats.niter ≤ 5 @test norm(r_gpu) ≤ 1e-8 end function test_operator(FC, V, DM, SM) m = 200 n = 100 A_cpu = rand(FC, n, n) A_cpu = sparse(A_cpu) b_cpu = rand(FC, n) A_gpu = SM(A_cpu) b_gpu = V(b_cpu) opA_gpu = KrylovOperator(A_gpu) x_gpu, stats = gmres(opA_gpu, b_gpu) r_gpu = b_gpu - A_gpu * x_gpu @test stats.solved @test norm(r_gpu) ≤ 1e-8 A_cpu = rand(FC, m, n) A_cpu = sparse(A_cpu) A_gpu = SM(A_cpu) opA_gpu = KrylovOperator(A_gpu) for i = 1:5 y_cpu = rand(FC, m) x_cpu = rand(FC, n) mul!(y_cpu, A_cpu, x_cpu) y_gpu = V(y_cpu) x_gpu = V(x_cpu) mul!(y_gpu, opA_gpu, x_gpu) @test collect(y_gpu) ≈ y_cpu end for j = 1:5 y_cpu = rand(FC, m) x_cpu = rand(FC, n) A_cpu2 = A_cpu + j*I mul!(y_cpu, A_cpu2, x_cpu) y_gpu = V(y_cpu) x_gpu = V(x_cpu) A_gpu2 = SM(A_cpu2) update!(opA_gpu, A_gpu2) mul!(y_gpu, opA_gpu, x_gpu) @test collect(y_gpu) ≈ y_cpu end nrhs = 3 opA_gpu = KrylovOperator(A_gpu; nrhs) for i = 1:5 Y_cpu = rand(FC, m, nrhs) X_cpu = rand(FC, n, nrhs) mul!(Y_cpu, A_cpu, X_cpu) Y_gpu = DM(Y_cpu) X_gpu = DM(X_cpu) mul!(Y_gpu, opA_gpu, X_gpu) @test collect(Y_gpu) ≈ Y_cpu end for j = 1:5 Y_cpu = rand(FC, m, nrhs) X_cpu = rand(FC, n, nrhs) A_cpu2 = A_cpu + j*I mul!(Y_cpu, A_cpu2, X_cpu) Y_gpu = DM(Y_cpu) X_gpu = DM(X_cpu) A_gpu2 = SM(A_cpu2) update!(opA_gpu, A_gpu2) mul!(Y_gpu, opA_gpu, X_gpu) @test collect(Y_gpu) ≈ Y_cpu end end function test_triangular(FC, V, DM, SM) n = 100 for (uplo, diag, triangle) in [('L', 'U', UnitLowerTriangular), ('L', 'N', LowerTriangular ), ('U', 'U', UnitUpperTriangular), ('U', 'N', UpperTriangular )] A_cpu = rand(FC, n, n) A_cpu = uplo == 'L' ? tril(A_cpu) : triu(A_cpu) A_cpu = diag == 'U' ? A_cpu - Diagonal(A_cpu) + I : A_cpu A_cpu = sparse(A_cpu) b_cpu = rand(FC, n) A_gpu = SM(A_cpu) b_gpu = V(b_cpu) opA_gpu = TriangularOperator(A_gpu, uplo, diag) for i = 1:5 y_cpu = rand(FC, n) x_cpu = rand(FC, n) ldiv!(y_cpu, triangle(A_cpu), x_cpu) y_gpu = V(y_cpu) x_gpu = V(x_cpu) ldiv!(y_gpu, opA_gpu, x_gpu) @test collect(y_gpu) ≈ y_cpu end for j = 1:5 y_cpu = rand(FC, n) x_cpu = rand(FC, n) A_cpu2 = A_cpu + j*tril(A_cpu,-1) + j*triu(A_cpu,1) ldiv!(y_cpu, triangle(A_cpu2), x_cpu) y_gpu = V(y_cpu) x_gpu = V(x_cpu) A_gpu2 = SM(A_cpu2) update!(opA_gpu, A_gpu2) ldiv!(y_gpu, opA_gpu, x_gpu) @test collect(y_gpu) ≈ y_cpu end nrhs = 3 opA_gpu = TriangularOperator(A_gpu, uplo, diag; nrhs) for i = 1:5 Y_cpu = rand(FC, n, nrhs) X_cpu = rand(FC, n, nrhs) ldiv!(Y_cpu, triangle(A_cpu), X_cpu) Y_gpu = DM(Y_cpu) X_gpu = DM(X_cpu) ldiv!(Y_gpu, opA_gpu, X_gpu) @test collect(Y_gpu) ≈ Y_cpu end if V.body.name.name != :CuArray for j = 1:5 Y_cpu = rand(FC, n, nrhs) X_cpu = rand(FC, n, nrhs) A_cpu2 = A_cpu + j*tril(A_cpu,-1) + j*triu(A_cpu,1) ldiv!(Y_cpu, triangle(A_cpu2), X_cpu) Y_gpu = DM(Y_cpu) X_gpu = DM(X_cpu) A_gpu2 = SM(A_cpu2) update!(opA_gpu, A_gpu2) ldiv!(Y_gpu, opA_gpu, X_gpu) @test collect(Y_gpu) ≈ Y_cpu end end end end _get_type(J::SparseMatrixCSC) = Vector{Float64} function generate_random_system(n::Int, m::Int) # Add a diagonal term for conditionning A = randn(n, m) + 15I x♯ = randn(m) b = A * x♯ # Be careful: all algorithms work with sparse matrix spA = sparse(A) return spA, b, x♯ end function test_block_jacobi(device, AT, SMT) n, m = 100, 100 A, b, x♯ = generate_random_system(n, m) # Transfer data to device A = A |> SMT b = b |> AT x♯ = x♯ |> AT x = similar(b); r = similar(b) nblocks = 2 if _is_csr(A) scaling_csr!(A, b, device) end precond = BlockJacobiPreconditioner(A, nblocks, device) update!(precond, A) S = _get_type(A) linear_solver = Krylov.BicgstabSolver(n, m, S) Krylov.bicgstab!( linear_solver, A, b; N=precond, atol=1e-10, rtol=1e-10, verbose=0, history=true, ) n_iters = linear_solver.stats.niter copyto!(x, linear_solver.x) r = b - A * x resid = norm(r) / norm(b) @test(resid ≤ 1e-6) @test x ≈ x♯ @test n_iters ≤ n end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
test/gpu/intel.jl
code
130
using oneAPI include("gpu.jl") @testset "Intel -- oneAPI.jl" begin @test oneAPI.functional() oneAPI.allowscalar(false) end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
test/gpu/nvidia.jl
code
1879
using CUDA, CUDA.CUSPARSE, CUDA.CUSOLVER _get_type(J::CuSparseMatrixCSR) = CuArray{Float64, 1, CUDA.Mem.DeviceBuffer} _is_csr(J::CuSparseMatrixCSR) = true _is_csc(J::CuSparseMatrixCSR) = false include("gpu.jl") @testset "Nvidia -- CUDA.jl" begin @test CUDA.functional() CUDA.allowscalar(false) @testset "IC(0)" begin @testset "CuSparseMatrixCSC -- $FC" for FC in (Float64,) test_ic0(FC, CuVector{FC}, CuSparseMatrixCSC{FC}) end @testset "CuSparseMatrixCSR -- $FC" for FC in (Float64, ComplexF64) test_ic0(FC, CuVector{FC}, CuSparseMatrixCSR{FC}) end end @testset "ILU(0)" begin @testset "CuSparseMatrixCSC -- $FC" for FC in (Float64,) test_ilu0(FC, CuVector{FC}, CuSparseMatrixCSC{FC}) end @testset "CuSparseMatrixCSR -- $FC" for FC in (Float64, ComplexF64) test_ilu0(FC, CuVector{FC}, CuSparseMatrixCSR{FC}) end end @testset "KrylovOperator" begin @testset "CuSparseMatrixCOO -- $FC" for FC in (Float64, ComplexF64) test_operator(FC, CuVector{FC}, CuMatrix{FC}, CuSparseMatrixCOO{FC}) end @testset "CuSparseMatrixCSC -- $FC" for FC in (Float64, ComplexF64) test_operator(FC, CuVector{FC}, CuMatrix{FC}, CuSparseMatrixCSC{FC}) end @testset "CuSparseMatrixCSR -- $FC" for FC in (Float64, ComplexF64) test_operator(FC, CuVector{FC}, CuMatrix{FC}, CuSparseMatrixCSR{FC}) end end @testset "TriangularOperator" begin @testset "CuSparseMatrixCOO -- $FC" for FC in (Float64, ComplexF64) test_triangular(FC, CuVector{FC}, CuMatrix{FC}, CuSparseMatrixCOO{FC}) end @testset "CuSparseMatrixCSR -- $FC" for FC in (Float64, ComplexF64) test_triangular(FC, CuVector{FC}, CuMatrix{FC}, CuSparseMatrixCSR{FC}) end end @testset "Block Jacobi preconditioner" begin test_block_jacobi(CUDABackend(), CuArray, CuSparseMatrixCSR) end end
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
README.md
docs
1668
# KrylovPreconditioners.jl | **Documentation** | **CI** | **Coverage** | **Downloads** | |:-----------------:|:------:|:------------:|:-------------:| | [![docs-stable][docs-stable-img]][docs-stable-url] [![docs-dev][docs-dev-img]][docs-dev-url] | [![build-gh][build-gh-img]][build-gh-url] [![build-cirrus][build-cirrus-img]][build-cirrus-url] | [![codecov][codecov-img]][codecov-url] | [![downloads][downloads-img]][downloads-url] | [docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg [docs-stable-url]: https://JuliaSmoothOptimizers.github.io/KrylovPreconditioners.jl/stable [docs-dev-img]: https://img.shields.io/badge/docs-dev-purple.svg [docs-dev-url]: https://JuliaSmoothOptimizers.github.io/KrylovPreconditioners.jl/dev [build-gh-img]: https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl/workflows/CI/badge.svg?branch=main [build-gh-url]: https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl/actions [build-cirrus-img]: https://img.shields.io/cirrus/github/JuliaSmoothOptimizers/KrylovPreconditioners.jl?logo=Cirrus%20CI [build-cirrus-url]: https://cirrus-ci.com/github/JuliaSmoothOptimizers/KrylovPreconditioners.jl [codecov-img]: https://codecov.io/gh/JuliaSmoothOptimizers/KrylovPreconditioners.jl/branch/main/graph/badge.svg [codecov-url]: https://app.codecov.io/gh/JuliaSmoothOptimizers/KrylovPreconditioners.jl [downloads-img]: https://shields.io/endpoint?url=https://pkgs.genieframework.com/api/v1/badge/KrylovPreconditioners [downloads-url]: https://pkgs.genieframework.com?packages=KrylovPreconditioners The best sidekick of [Krylov.jl](https://github.com/JuliaSmoothOptimizers/Krylov.jl) └(^o^ )X( ^o^)┘
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git
[ "MPL-2.0" ]
0.2.2
3372f4dfa2499b0aa0b478a5082aff34915532e8
docs/src/index.md
docs
982
# [KrylovPreconditioners.jl documentation](@id Home) This package provides a collection of preconditioners. ## How to Install KrylovPreconditioners.jl can be installed and tested through the Julia package manager: ```julia julia> ] pkg> add KrylovPreconditioners pkg> test KrylovPreconditioners ``` # Bug reports and discussions If you think you found a bug, feel free to open an [issue](https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl/issues). Focused suggestions and requests can also be opened as issues. Before opening a pull request, start an issue or a discussion on the topic, please. If you want to ask a question not suited for a bug report, feel free to start a discussion [here](https://github.com/JuliaSmoothOptimizers/Organization/discussions). This forum is for general discussion about this repository and the [JuliaSmoothOptimizers](https://github.com/JuliaSmoothOptimizers) organization, so questions about any of our packages are welcome.
KrylovPreconditioners
https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git