licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.4.7 | 1e03704320016415ef5dcc2c54edc3f20ee37bca | code | 12386 | using DataScienceTraits
using CategoricalArrays
using ColorTypes
using CoDa
using Distributions
using Meshes
using Dates
using Test
# import to avoid conflicts
import DynamicQuantities
import Unitful
const DST = DataScienceTraits
@testset "DataScienceTraits.jl" begin
@testset "scitype" begin
# Continuous
@test scitype(Float32) <: DST.Continuous
@test scitype(Float64) <: DST.Continuous
@test scitype(ComplexF32) <: DST.Continuous
@test scitype(ComplexF64) <: DST.Continuous
@test scitype(1.0f0) <: DST.Continuous
@test scitype(1.0) <: DST.Continuous
@test scitype(1.0f0 + 2im) <: DST.Continuous
@test scitype(1.0 + 2im) <: DST.Continuous
# Categorical
@test scitype(Int) <: DST.Categorical
@test scitype(Char) <: DST.Categorical
@test scitype(String) <: DST.Categorical
@test scitype(1) <: DST.Categorical
@test scitype('a') <: DST.Categorical
@test scitype("a") <: DST.Categorical
# Tensorial
@test scitype(Vector) <: DST.Tensorial
@test scitype(Matrix) <: DST.Tensorial
@test scitype(Array) <: DST.Tensorial
@test scitype(rand(2)) <: DST.Tensorial
@test scitype(rand(2, 2)) <: DST.Tensorial
@test scitype(rand(2, 2, 2)) <: DST.Tensorial
# Unknown
@test scitype(Nothing) <: DST.Unknown
@test scitype(Union{}) <: DST.Unknown
@test scitype(nothing) <: DST.Unknown
end
@testset "elscitype" begin
# Continuous
@test elscitype(Vector{Float32}) <: DST.Continuous
@test elscitype(Vector{Float64}) <: DST.Continuous
@test elscitype(NTuple{3,ComplexF32}) <: DST.Continuous
@test elscitype(NTuple{3,ComplexF64}) <: DST.Continuous
@test elscitype((1.0f0, 2.0f0, 3.0f0)) <: DST.Continuous
@test elscitype(1:0.1:3) <: DST.Continuous
@test elscitype([1.0f0, 2.0f0, 3.0f0] .+ 2im) <: DST.Continuous
@test elscitype([1.0, 2.0, 3.0] .+ 2im) <: DST.Continuous
# Categorical
@test elscitype(Vector{Int}) <: DST.Categorical
@test elscitype(NTuple{3,Char}) <: DST.Categorical
@test elscitype(NTuple{3,String}) <: DST.Categorical
@test elscitype((1, 2, 3)) <: DST.Categorical
@test elscitype('a':'c') <: DST.Categorical
@test elscitype(["a", "b", "c"]) <: DST.Categorical
# Unknown
@test elscitype(Vector{Nothing}) <: DST.Unknown
@test elscitype(Tuple{}) <: DST.Unknown
@test elscitype(fill(nothing, 3)) <: DST.Unknown
@test elscitype(()) <: DST.Unknown
end
@testset "sciconvert" begin
# fallback: Continuous
for x in (1.0f0, 1.0, 1.0f0 + 2im, 1.0 + 2im)
@test DST.sciconvert(DST.Continuous, x) === x
@test scitype(DST.sciconvert(DST.Continuous, x)) <: DST.Continuous
end
# fallback: Categorical
for x in ('a', "a")
@test DST.sciconvert(DST.Categorical, x) === x
@test scitype(DST.sciconvert(DST.Categorical, x)) <: DST.Categorical
end
# fallback: Unknown
@test DST.sciconvert(DST.Unknown, :a) === :a
@test scitype(DST.sciconvert(DST.Unknown, :a)) <: DST.Unknown
@test DST.sciconvert(DST.Unknown, nothing) === nothing
@test scitype(DST.sciconvert(DST.Unknown, nothing)) <: DST.Unknown
# Interger to Continuous
@test DST.sciconvert(DST.Continuous, 1) === 1.0
@test scitype(DST.sciconvert(DST.Continuous, 1)) <: DST.Continuous
# Symbol to Categorical
@test DST.sciconvert(DST.Categorical, :a) === "a"
@test scitype(DST.sciconvert(DST.Categorical, :a)) <: DST.Categorical
# Number to Categorical
@test DST.sciconvert(DST.Categorical, 1.0) === 1
@test scitype(DST.sciconvert(DST.Categorical, 1.0)) <: DST.Categorical
# no conversion: Integer to Categorical
@test DST.sciconvert(DST.Categorical, Int32(1)) === Int32(1)
@test scitype(DST.sciconvert(DST.Categorical, Int32(1))) <: DST.Categorical
# throws
@test_throws ArgumentError DST.sciconvert(DST.Continuous, nothing)
end
@testset "coerce" begin
@test DST.coerce(DST.Continuous, [1, 2, 3]) == [1.0, 2.0, 3.0]
@test elscitype(DST.coerce(DST.Continuous, [1, 2, 3])) <: DST.Continuous
@test DST.coerce(DST.Continuous, (1, 2, 3)) == (1.0, 2.0, 3.0)
@test elscitype(DST.coerce(DST.Continuous, (1, 2, 3))) <: DST.Continuous
@test DST.coerce(DST.Categorical, [:a, :b, :c]) == ["a", "b", "c"]
@test elscitype(DST.coerce(DST.Categorical, [:a, :b, :c])) <: DST.Categorical
@test DST.coerce(DST.Categorical, (:a, :b, :c)) == ("a", "b", "c")
@test elscitype(DST.coerce(DST.Categorical, (:a, :b, :c))) <: DST.Categorical
@test DST.coerce(DST.Categorical, [1.0, 2.0, 3.0]) == [1, 2, 3]
@test elscitype(DST.coerce(DST.Categorical, [1.0, 2.0, 3.0])) <: DST.Categorical
@test DST.coerce(DST.Categorical, (1.0, 2.0, 3.0)) == (1, 2, 3)
@test elscitype(DST.coerce(DST.Categorical, (1.0, 2.0, 3.0))) <: DST.Categorical
end
@testset "isordered" begin
@test !DST.isordered([1, 2, 3])
@test !DST.isordered(['a', 'b', 'c'])
@test !DST.isordered(("a", "b", "c"))
# throws
@test_throws ArgumentError DST.isordered([1.0, 2.0, 3.0])
end
@testset "missing values" begin
@test scitype(Missing) <: DST.Unknown
@test scitype(Union{Float64,Missing}) <: DST.Continuous
@test scitype(Union{Int,Missing}) <: DST.Categorical
@test elscitype(fill(missing, 3)) <: DST.Unknown
@test elscitype([1.0, missing, 3.0]) <: DST.Continuous
@test elscitype([1, missing, 3]) <: DST.Categorical
@test isequal(DST.coerce(DST.Continuous, [1, missing, 3]), [1.0, missing, 3.0])
@test elscitype(DST.coerce(DST.Continuous, [1, missing, 3])) <: DST.Continuous
@test isequal(DST.coerce(DST.Categorical, [:a, missing, :c]), ["a", missing, "c"])
@test elscitype(DST.coerce(DST.Categorical, [:a, missing, :c])) <: DST.Categorical
@test isequal(DST.coerce(DST.Categorical, [1.0, missing, 3.0]), [1, missing, 3])
@test elscitype(DST.coerce(DST.Categorical, [1.0, missing, 3.0])) <: DST.Categorical
end
@testset "Unitful" begin
u = Unitful.u"m"
q1 = 1 * u
q2 = 1.0 * u
Q1 = typeof(q1)
Q2 = typeof(q2)
@test scitype(Q1) <: DST.Categorical
@test scitype(Q2) <: DST.Continuous
@test scitype(q1) <: DST.Categorical
@test scitype(q2) <: DST.Continuous
@test elscitype(Vector{Q1}) <: DST.Categorical
@test elscitype(Vector{Q2}) <: DST.Continuous
@test elscitype([1, 2, 3] * u) <: DST.Categorical
@test elscitype([1.0, 2.0, 3.0] * u) <: DST.Continuous
@test elscitype([1, missing, 3] * u) <: DST.Categorical
@test elscitype([1.0, missing, 3.0] * u) <: DST.Continuous
# Quantity{Interger} to Continuous
@test DST.sciconvert(DST.Continuous, 1 * u) === 1.0 * u
@test scitype(DST.sciconvert(DST.Continuous, 1 * u)) <: DST.Continuous
# Quantity{Number} to Categorical
@test DST.sciconvert(DST.Categorical, 1.0 * u) === 1 * u
@test scitype(DST.sciconvert(DST.Categorical, 1.0 * u)) <: DST.Categorical
# no conversion: Quantity{Interger} to Categorical
@test DST.sciconvert(DST.Categorical, Int32(1) * u) === Int32(1) * u
@test scitype(DST.sciconvert(DST.Categorical, Int32(1) * u)) <: DST.Categorical
# coercion
@test DST.coerce(DST.Continuous, [1, 2, 3] * u) == [1.0, 2.0, 3.0] * u
@test elscitype(DST.coerce(DST.Continuous, [1, 2, 3] * u)) <: DST.Continuous
@test isequal(DST.coerce(DST.Continuous, [1, missing, 3] * u), [1.0, missing, 3.0] * u)
@test elscitype(DST.coerce(DST.Continuous, [1, missing, 3] * u)) <: DST.Continuous
@test DST.coerce(DST.Categorical, [1.0, 2.0, 3.0] * u) == [1, 2, 3] * u
@test elscitype(DST.coerce(DST.Categorical, [1.0, 2.0, 3.0] * u)) <: DST.Categorical
@test isequal(DST.coerce(DST.Categorical, [1.0, missing, 3.0] * u), [1, missing, 3] * u)
@test elscitype(DST.coerce(DST.Categorical, [1.0, missing, 3.0] * u)) <: DST.Categorical
end
@testset "DynamicQuantities" begin
uf = DynamicQuantities.u"m"
ui = DynamicQuantities.Quantity{Int}(uf)
q1 = 1 * ui
q2 = 1.0 * uf
Q1 = typeof(q1)
Q2 = typeof(q2)
@test scitype(Q1) <: DST.Categorical
@test scitype(Q2) <: DST.Continuous
@test scitype(q1) <: DST.Categorical
@test scitype(q2) <: DST.Continuous
@test elscitype(Vector{Q1}) <: DST.Categorical
@test elscitype(Vector{Q2}) <: DST.Continuous
@test elscitype([1, 2, 3] .* ui) <: DST.Categorical
@test elscitype([1.0, 2.0, 3.0] .* uf) <: DST.Continuous
@test elscitype([1 * ui, missing, 3 * ui]) <: DST.Categorical
@test elscitype([1.0 * uf, missing, 3.0 * uf]) <: DST.Continuous
# Quantity{Interger} to Continuous
@test DST.sciconvert(DST.Continuous, 1 * ui) === 1.0 * uf
@test scitype(DST.sciconvert(DST.Continuous, 1 * ui)) <: DST.Continuous
# Quantity{Number} to Categorical
@test DST.sciconvert(DST.Categorical, 1.0 * uf) === 1 * ui
@test scitype(DST.sciconvert(DST.Categorical, 1.0 * uf)) <: DST.Categorical
# no conversion: Quantity{Interger} to Categorical
q3 = DynamicQuantities.Quantity{Int32}(q1)
@test DST.sciconvert(DST.Categorical, q3) === q3
@test scitype(DST.sciconvert(DST.Categorical, q3)) <: DST.Categorical
# coercion
@test DST.coerce(DST.Continuous, [1, 2, 3] .* ui) == [1.0, 2.0, 3.0] .* uf
@test elscitype(DST.coerce(DST.Continuous, [1, 2, 3] .* ui)) <: DST.Continuous
@test isequal(DST.coerce(DST.Continuous, [1 * ui, missing, 3 * ui]), [1.0 * uf, missing, 3.0 * uf])
@test elscitype(DST.coerce(DST.Continuous, [1 * ui, missing, 3 * ui])) <: DST.Continuous
@test DST.coerce(DST.Categorical, [1.0, 2.0, 3.0] .* uf) == [1, 2, 3] .* ui
@test elscitype(DST.coerce(DST.Categorical, [1.0, 2.0, 3.0] .* uf)) <: DST.Categorical
@test isequal(DST.coerce(DST.Categorical, [1.0 * uf, missing, 3.0 * uf]), [1 * ui, missing, 3 * ui])
@test elscitype(DST.coerce(DST.Categorical, [1.0 * uf, missing, 3.0 * uf])) <: DST.Categorical
end
@testset "CategoricalArrays" begin
carr = categorical([1, 2, 3])
cval = first(carr)
CV = typeof(cval)
CA = typeof(carr)
@test scitype(CV) <: DST.Categorical
@test scitype(cval) <: DST.Categorical
@test elscitype(CA) <: DST.Categorical
@test elscitype(carr) <: DST.Categorical
@test elscitype(categorical([1, missing, 3])) <: DST.Categorical
@test !DST.isordered(carr)
carr = categorical([1, 3, 2], ordered=true)
@test DST.isordered(carr)
end
@testset "Colors" begin
c1 = Gray(0)
c2 = RGB(0,0,0)
@test scitype(Gray) <: DST.Colorful
@test scitype(c1) <: DST.Colorful
@test scitype(RGB) <: DST.Colorful
@test scitype(c2) <: DST.Colorful
@test elscitype(Vector{Colorant}) <: DST.Colorful
@test elscitype([c1, c2]) <: DST.Colorful
@test elscitype([c1, missing, c2]) <: DST.Colorful
end
@testset "CoDa" begin
c1 = Composition(a=0.2, b=0.8)
c2 = Composition(a=0.5, b=0.5)
@test scitype(Composition{2,(:a, :b)}) <: DST.Compositional
@test scitype(c1) <: DST.Compositional
@test elscitype(Vector{Composition{2,(:a, :b)}}) <: DST.Compositional
@test elscitype([c1, c2]) <: DST.Compositional
@test elscitype([c1, missing, c2]) <: DST.Compositional
end
@testset "Distributions" begin
@test scitype(Normal()) <: DST.Distributional
@test scitype(Exponential()) <: DST.Distributional
@test elscitype(fill(Normal(), 3)) <: DST.Distributional
@test elscitype(fill(Exponential(), 3)) <: DST.Distributional
@test elscitype([Normal(), missing, Normal()]) <: DST.Distributional
@test elscitype([Exponential(), missing, Exponential()]) <: DST.Distributional
end
@testset "Meshes" begin
@test scitype(rand(Point)) <: DST.Geometrical
@test scitype(rand(Triangle)) <: DST.Geometrical
@test scitype(rand(Triangle)) <: DST.Geometrical
@test elscitype([rand(Point)]) <: DST.Geometrical
@test elscitype([rand(Triangle)]) <: DST.Geometrical
@test elscitype([Point(0, 0), missing, Point(1, 1)]) <: DST.Geometrical
@test elscitype([Triangle((0, 0), (1, 0), (1, 1)), missing, Point(1, 1)]) <: DST.Geometrical
end
@testset "Dates" begin
@test scitype(Date(2023, 1, 1)) <: DST.Temporal
@test scitype(Time(1, 0, 0)) <: DST.Temporal
@test scitype(DateTime(2023, 1, 1)) <: DST.Temporal
@test elscitype(fill(Date(2023, 1, 1), 3)) <: DST.Temporal
@test elscitype([Date(2023, 1, 1), missing, Time(1, 0, 0)]) <: DST.Temporal
end
end
| DataScienceTraits | https://github.com/JuliaML/DataScienceTraits.jl.git |
|
[
"MIT"
] | 0.4.7 | 1e03704320016415ef5dcc2c54edc3f20ee37bca | docs | 1065 | # DataScienceTraits.jl
[](https://github.com/JuliaML/DataScienceTraits.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/JuliaML/DataScienceTraits.jl)
This package provides an alternative implementation to
[ScientificTypes.jl](https://github.com/JuliaAI/ScientificTypes.jl)
that is lightweight, with a more sensible set of defaults for data science.
See https://github.com/JuliaAI/ScientificTypes.jl/issues/180.
## Usage
This package is intended for developers of statistical packages
that need to dispatch different algorithms depending on scientific
types:
```julia
import DataScienceTraits as DST
reduction(::DST.Continuous) = sum
reduction(::DST.Categorical) = first
```
Extensions are provided for third-party types such as CoDa.jl
and CategoricalArrays.jl in order to facilitate the integration
with existing software stacks.
| DataScienceTraits | https://github.com/JuliaML/DataScienceTraits.jl.git |
|
[
"MIT"
] | 0.4.0 | 1f68a51d48babc46940b8f39ca0b7883cac6f972 | code | 3678 | module CUDAatomics
using CUDAnative, LLVM
using LLVM.Interop
let type_map = Dict( Int32 => ("u32", "r", "b32", "s32"), UInt32 => ("u32", "r", "b32", "u32"), Int64 => ("u64", "l", "b64", "s64"), UInt64 => ("u64", "l", "b64", "s64"), Float32 => ("f32", "f", "b32", "f32"), Float64 => ("f64", "d", "b64", "f64") ), atomiclist = [("add",2,1), ("exch",2,3), ("min",2,4), ("max",2,4), ("inc",2,1), ("dec",2,1), ("cas",3,3), ("and",2,3), ("or",2,3), ("xor",2,3)]
global @generated function cvt( a::Type{T}, b::U ) where {T,U}
round_str = ""
if U==Float32
if !(T in [Float32, Float64])
round_str = "rzi."
end
elseif U==Float64
if T==Float32
round_str = "rn."
elseif T!=Float64
round_str = "rzi."
end
else
if T in [Float32,Float64]
round_str = "rn."
end
end
call_string = string("cvt.",round_str,type_map[T][1],".",type_map[U][1], " \$0, \$1;")
ex = :(@asmcall)
append!(ex.args, [call_string, string("=",type_map[T][2],",",type_map[U][2]), false, Symbol(T), :(Tuple{$U}), :b])
return :(Base.@_inline_meta; $ex)
end
function atomicexpression(instr::String, nargs, typeindex)
fex = :(@generated function $(Symbol("atomic"*instr))(a) end)
for i=1:(nargs-1)
push!(fex.args[3].args[1].args, Symbol("a"*"$i"))
end
push!(fex.args[3].args[1].args, Expr(:kw, :index, 1))
push!(fex.args[3].args[1].args, Expr(:kw, :field, Val(nothing)))
fargs = fex.args[3].args[2].args
append!(fargs, (quote
type_map = $type_map
fieldsym = field.parameters[1]
if fieldsym == nothing
base_type = a.parameters[1]
offset = 0
else
field_index = findfirst(fieldnames(a.parameters[1]) .== fieldsym)
base_type = a.parameters[1].types[field_index]
offset = (cumsum(sizeof.(a.parameters[1].types)) .- sizeof.(a.parameters[1].types))[field_index]
end
ASstr = a.parameters[3] == CUDAnative.AS.Shared ? "shared" : "global"
end).args)
append!(fargs, (quote
call_string = string( "cvta.to.",ASstr,".u64 %rd1, \$1;\natom.",ASstr,".",$instr,".", type_map[base_type][$typeindex], " \$0, [%rd1],")
call_string = string(call_string, string(string(string.(" \$", collect(2:$nargs), ",")...)[1:end-1],";"))
end).args)
for i=1:(nargs-1)
varsym = Symbol("a"*"$i")
subex = :($(Symbol("a"*"$i"*"val")) = $varsym==base_type ? $(QuoteNode(:($varsym))) : :(cvt($base_type, $varsym)))
subex.args[end].args[end].args[end].args[end] = varsym
push!(fargs, subex)
end
append!(fargs, (quote
ex = :(@asmcall)
constraint_list = Array{String}(["=", type_map[base_type][2], ",l"])
argtype_expr = :(Tuple{UInt64})
for i=2:$nargs
push!(constraint_list,string(","*type_map[base_type][2]))
push!(argtype_expr.args,Symbol(base_type))
end
append!(ex.args, [call_string, string(constraint_list...), true, Symbol(base_type), argtype_expr, :( UInt64(pointer(a)) + $offset + $(sizeof(base_type))*(index-1)), a1val])
return :(Base.@_inline_meta; $ex)
end).args)
for i=2:(nargs-1)
push!(fargs[end-2].args[end].args, Symbol("a"*"$i"*"val"))
end
return fex
end
for atomicfunc in atomiclist
eval(atomicexpression(atomicfunc[1], atomicfunc[2], atomicfunc[3]))
eval(:(export $(Symbol("atomic"*atomicfunc[1]))))
end
end
function atomicsub(a::CuDeviceArray{Int32, N, A}, b, index=1, field=Val(nothing)) where {N,A}
atomicadd(a, -b, index, field)
end
export atomicsub
export cvt
end
| CUDAatomics | https://github.com/alandion/CUDAatomics.jl.git |
|
[
"MIT"
] | 0.4.0 | 1f68a51d48babc46940b8f39ca0b7883cac6f972 | code | 3281 | using Test
using CUDAnative, CuArrays
using CUDAatomics
if CUDAnative.functional()
function kern_atomicadd( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomicadd(a,b[i], 2)
return nothing
end
d_a = CuArray(zeros(Float32, 2))
d_b = CuArray(Float32.(collect(1:1024)))
@cuda threads=32 blocks=32 kern_atomicadd(d_a, d_b)
@test abs(Array(d_a)[2] - sum(Array(d_b))) < 1.0f-7
function kern_atomicsub( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomicsub(a,b[i])
return nothing
end
d_a = CuArray(zeros(Int32, 1))
d_b = CuArray(Int32.(collect(1:1024)))
@cuda threads=32 blocks=32 kern_atomicsub(d_a, d_b)
@test Array(d_a)[1] == -524800
function kern_atomicmin( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomicmin(a,b[i])
return nothing
end
d_a = CuArray(Array{Int32}([1025]))
d_b = CuArray(Int32.(collect(1:1024)))
@cuda threads=32 blocks=32 kern_atomicmin(d_a, d_b)
@test Array(d_a)[1] == 1
function kern_atomicmax( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomicmax(a,b[i])
return nothing
end
d_a = CuArray(Array{Int32}([1]))
d_b = CuArray(Int32.(collect(1:1024)))
@cuda threads=32 blocks=32 kern_atomicmax(d_a, d_b)
@test Array(d_a)[1] == 1024
function kern_atomicinc( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomicinc(a,b[i])
return nothing
end
d_a = CuArray(Array{UInt32}([UInt32(0)]))
d_b = CuArray(repeat(Array{UInt32}([UInt32(1024)]), 1024))
@cuda threads=32 blocks=32 kern_atomicinc(d_a, d_b)
@test Array(d_a)[1] == 0x00000400
function kern_atomicdec( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomicdec(a,b[i])
return nothing
end
d_a = CuArray(Array{UInt32}([UInt32(1025)]))
d_b = CuArray(repeat(Array{UInt32}([UInt32(1025)]), 1024))
@cuda threads=32 blocks=32 kern_atomicdec(d_a, d_b)
@test Array(d_a)[1] == 0x00000001
function kern_atomicand( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomicand(a,b[i])
return nothing
end
d_a = CuArray(Array{UInt32}([UInt32(1389)]))
d_b = CuArray(repeat(Array{UInt32}([UInt32(1023)]), 1024))
@cuda threads=32 blocks=32 kern_atomicand(d_a, d_b)
@test Array(d_a)[1] == 0x0000016d
function kern_atomicexch( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomicexch(a,b[i])
return nothing
end
d_a = CuArray(zeros(Float32, 1))
d_b = CuArray(Float32.(collect(1:1024)))
@cuda threads=32 blocks=32 kern_atomicexch(d_a, d_b)
@test findfirst( Array(d_b).==Array(d_a) ) < 1025
function kern_atomiccas( a, b )
i = (blockIdx().x-1) * blockDim().x + threadIdx().x
atomiccas(a, i, b[i])
return nothing
end
d_a = CuArray(Array{Int32}([17]))
d_b = CuArray(Array{Int32}(collect(1025:2048)))
@cuda threads=32 blocks=32 kern_atomiccas(d_a, d_b)
@test findfirst(Array(d_b).==Array(d_a)) == 17
end
| CUDAatomics | https://github.com/alandion/CUDAatomics.jl.git |
|
[
"MIT"
] | 0.4.0 | 1f68a51d48babc46940b8f39ca0b7883cac6f972 | docs | 1896 | # CUDAatomics
Support for atomic operations in [CUDAnative](https://github.com/JuliaGPU/CUDAnative.jl) kernels
## Usage
The functions implemented closely follow the functions in the [CUDA C Programming Guide](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions). All functions and types included in the guide are supported, and function names are the same as in the guide except that only lowercase characters are used.
The first element to each function will be a `CuDeviceArray` (instead of a pointer as in the C guide). In addition to the arguments to each function in the C guide, two additional optional arguments are provided for each atomic function:
```
atomicadd(ary, value, [index=1, fieldname=Val(nothing)])
```
`index` specifies which element of the array should be atomically updated, and defaults to the first element. `fieldname` is a value type used for extending the atomic functions to user-defined types (see below).
## Automatic type conversion
When calling an atomic function such that the `eltype` of the `ary` does not match the type of `value`, the `value` will be automatically converted to the `eltype` of `ary` before the atomic operation is performed. For instance, one might want to use `threadIdx().x` (which has type `Int64`) to perform a compare-and-swap with a 32-bit integer.
## Extending to user-defined types
The optional `fieldname` argument can be used to specify the field to be updated of a user-defined type. For instance, to extend `atomicadd` to a dual-number type, one could do
```
struct DualFloat
value::Float32
partial::Float32
end
function CUDAatomics.atomicadd(a::CuDeviceArray{DualFloat, N, A}, b::DualFloat, index=1) where {N,A}
CUDAatomics.atomicadd(a, b.value, index, Val(:value))
CUDAatomics.atomicadd(a, b.partial, index, Val(:partial))
end
```
| CUDAatomics | https://github.com/alandion/CUDAatomics.jl.git |
|
[
"MIT"
] | 0.3.2 | bb4f42b25b87f124478207a82f5b02dfafdb3e63 | code | 160 | # execute this file in the docs directory with this
# julia --color=yes --project make.jl
using Documenter, SimpleRandom
makedocs(; sitename = "SimpleRandom")
| SimpleRandom | https://github.com/scheinerman/SimpleRandom.jl.git |
|
[
"MIT"
] | 0.3.2 | bb4f42b25b87f124478207a82f5b02dfafdb3e63 | code | 4563 | import Base: getindex, setindex!, (+), (*), (-), length, setindex!, show
export RV, RV_types, E, Var, validate!
export vals, probs, report
export Uniform_RV, Binomial_RV, Bernoulli_RV
"""
`RV` represents a discrete random variable with finite support.
"""
mutable struct RV{S<:Number,T<:Real}
data::Dict{S,T}
valid::Bool
function RV{S,T}() where {S,T}
d = Dict{S,T}()
new(d, false)
end
end
RV_types(X::RV{S,T}) where {S,T} = (S, T)
"""
`length(X::RV)` returns the number of values in the random variable `X`.
"""
length(X::RV) = length(X.data)
function show(io::IO, X::RV)
S, T = RV_types(X)
print(io, "RV{$S,$T} with $(length(X.data)) values")
end
"""
`vals(X::RV)` returns an iterator of the values this random variable
can take. Use `X[v]` to get the associate probability of the
value `v`.
"""
vals(X::RV) = keys(X.data)
"""
`probs(X::RV)` returns an iterator of the probabilities associated with
the values in `X`.
"""
function probs(X::RV)
validate!(X)
return values(X.data)
end
"""
`validate!(X)` ensures that the probabilies of the values in `X`
sum to one. If not, they are rescaled.
"""
function validate!(X::RV)
if !X.valid
u = sum(values(X.data))
for x in keys(X.data)
X.data[x] /= u
end
X.valid = true
end
nothing
end
"""
`E(X)` is the expected value of `X`.
"""
function E(X::RV{S,T}) where {S,T}
@assert length(X) > 0 "Cannot compute the expected value: no values!"
validate!(X)
return sum(k * X.data[k] for k in keys(X.data))
end
"""
`Var(Y)` is the variance of `Y`.
"""
function Var(X::RV{S,T}) where {S,T}
@assert length(X) > 0 "Cannot compute the variance: no values!"
validate!(X)
exex = E(X)^2
exx = sum(k * k * X.data[k] for k in keys(X.data))
return exx - exex
end
"""
`Bernoulli(p)` makes a single coin flip RV.
"""
function Bernoulli_RV(p::T) where {T}
@assert 0 <= p && p <= 1 "p must be in [0,1]"
X = RV{Int,T}()
X[1] = p
X[0] = 1 - p
X.valid = true
return X
end
"""
`Binomial_RV(n,p)` returns a binomial random variable.
"""
function Binomial_RV(n::S, p::T) where {S<:Integer,T}
@assert n >= 0 "n must be nonnegative"
@assert 0 <= p && p <= 1 "probability must be in [0,1]"
X = RV{S,T}()
for k = 0:n
X[k] = binomial(n, k) * (p^k) * (1 - p)^(n - k)
end
return X
end
"""
`Uniform_RV(n)` returns the uniform distribution on `{1,2,...,n}`.
"""
function Uniform_RV(n::Int)
X = RV{Int,Rational{Int}}()
for k = 1:n
X[k] = 1 // n
end
return X
end
"""
`X[v]` returns the probability of `v` in the random variable `X`.
Note that we validate `X` (with `validate!`) before retrieving
the value.
"""
function getindex(X::RV{S,T}, k::S) where {S,T}
validate!(X)
try
return X.data[k]
catch
return zero(T)
end
end
function setindex!(X::RV{S,T}, p::Real, k::S) where {S,T}
@assert p >= 0 "Probability must be nonnegative"
X.data[k] = T(p)
X.valid = false
return p
end
"""
`X+Y`: sum of independent random variables.
"""
function (+)(X::RV, Y::RV)
S = typeof(first(vals(X)) + first(vals(Y)))
T = typeof(first(probs(X)) + first(probs(Y)))
Z = RV{S,T}()
for a in keys(X.data)
for b in keys(Y.data)
if !haskey(Z.data, a + b)
Z.data[a+b] = 0
end
Z.data[a+b] += X.data[a] * Y.data[b]
end
end
validate!(Z)
return Z
end
"""
`X-Y`: difference of independent random variables.
"""
(-)(X::RV, Y::RV) = X + (-Y)
"""
`a*X`: scalar multiple of the random variable `X`.
"""
function (*)(a::Number, X::RV)
S = typeof(first(vals(X)) * a)
T = typeof(first(probs(X)))
aX = RV{typeof(a),T}()
for k in vals(X)
aX[a*k] = X[k]
end
return aX
end
"""
`-X`: negative of a random variable.
"""
function (-)(X::RV{S,T}) where {S,T}
negone = -one(S)
return negone * X
end
# This implementation is somewhat inefficient
"""
`random_choice(X)` for a random variable `X` returns a value of `X`
according to its probability distribution. That is, the probability
a value `v` is returned is `X[v]`.
"""
function random_choice(X::RV)
validate!(X)
return random_choice(X.data)
end
"""
`report(X)` prints out a list of the values of `X` and their
associated probabilities
"""
function report(X::RV)
A = collect(vals(X))
try
sort!(A)
finally
for a in A
println("$a\t$(X[a])")
end
end
nothing
end
| SimpleRandom | https://github.com/scheinerman/SimpleRandom.jl.git |
|
[
"MIT"
] | 0.3.2 | bb4f42b25b87f124478207a82f5b02dfafdb3e63 | code | 2765 | module SimpleRandom
using LinearAlgebra
import Random.randperm
include("RV.jl")
export random_unit_vector, random_subset
"""
`random_unit_vector(d)` returns a random `d`-dimensional unit vector.
"""
function random_unit_vector(d::Int)
v = randn(d)
return v / norm(v)
end
"""
`random_subset` is used to create random subsets as follows:
+ `random_subset(A)`: random subset of `A` with each element
chosen with probability 1/2.
+ `random_subset(A,k)`: random `k`-element subset of `A`.
+ `random_subset(n)`: random subset of `1:n`.
+ `random_subset(n,k)`: random `k`-element subset of `1:n`.
"""
function random_subset(A::Union{Set,BitSet})
T = typeof(A)
B = T()
for a in A
if rand() < 0.5
push!(B, a)
end
end
return B
end
random_subset(n::Int) = random_subset(Set(1:n))
function random_subset(A::Union{Set,BitSet}, k::Int)
n = length(A)
if k < 0 || k > n
error("k = $k is out of range")
end
T = typeof(A)
B = T()
elements = collect(A)
p = randperm(n)
for j = 1:k
push!(B, elements[p[j]])
end
return B
end
function random_subset(n::Int, k::Int)
if n < 0 || k < 0 || k > n
error("n = $n and/or k = $k invalid")
end
x = randperm(n)
y = x[1:k]
return Set(y)
end
export random_choice
"""
`random_choice(weights)` randomly chooses a value from `1` to `n`,
where `n` is the number of elements in `weights`. The probability
that `k` is chosen is proportional to `weights[k]`. The `weights`
must be nonnegative and not all zero.
`random_choice(dict)` choose a random key `k` from `dict` with weight
proportional to `dict[k]`. Thus, `dict` must be of type
`Dict{S, T<:Real}`.
"""
function random_choice(weights::Vector{T}) where {T<:Real}
vals = cumsum(weights)
vals /= vals[end]
idx = rand()
for k = 1:length(vals)
@inbounds if idx <= vals[k]
return k
end
end
error("Impropper input")
end
function random_choice(d::Dict{S,T}) where {S,T<:Real}
ks = collect(keys(d))
n = length(ks)
wts = [d[ks[j]] for j = 1:n]
idx = random_choice(wts)
return ks[idx]
end
import Distributions
export binom_rv, poisson_rv, exp_rv
"""
`binom_rv(n,p)` generates a random binomial random value.
`p` defaults to `0.5`.
"""
binom_rv(n::Int, p::Real = 0.5) = rand(Distributions.Binomial(n, p))
"""
`poisson_rv(lambda)` generates a Poisson random value with
mean `lamba` (which defaults to `1.0`).
"""
poisson_rv(lambda::Real = 1.0) = rand(Distributions.Poisson(lambda))
"""
`exp_rv(theta)` returns an exponential random value with
mean `theta` (which defaults to `1.0`).
"""
exp_rv(theta::Real = 1.0) = rand(Distributions.Exponential(theta))
end # end of module
| SimpleRandom | https://github.com/scheinerman/SimpleRandom.jl.git |
|
[
"MIT"
] | 0.3.2 | bb4f42b25b87f124478207a82f5b02dfafdb3e63 | code | 144 | using PyPlot
"""
`histplot(x)` plot a histogram of the values in `x` and
`histplot(x,n)` gives a plot with `n` bins.
"""
histplot = plt[:hist]
| SimpleRandom | https://github.com/scheinerman/SimpleRandom.jl.git |
|
[
"MIT"
] | 0.3.2 | bb4f42b25b87f124478207a82f5b02dfafdb3e63 | code | 740 | using Test
using SimpleRandom, LinearAlgebra
A = random_subset(100)
@test length(A) <= 100
B = random_subset(Set(1:20), 5)
@test length(B) == 5
A = random_subset(20, 15)
@test length(A) == 15
wt = [1 / 2, 1 / 3, 1 / 6]
t = random_choice(wt)
@test 0 < t < 4
d = Dict{String,Float64}()
d["alpha"] = 0.5
d["gamma"] = 0.5
t = random_choice(d)
@test length(t) == 5
x = binom_rv(10)
@test 0 <= x <= 10
x = poisson_rv(0.25)
@test x >= 0
x = exp_rv(1.2)
@test x >= 0
X = RV{Int,Rational{Int}}()
X[1] = 1 // 2
X[2] = 1 // 3
X[3] = 1 // 6
@test E(X) == 1 // 2 + 2 // 3 + 3 // 6
@test Var(X) == 5 // 9
@test length(X) == 3
a = random_choice(X)
@test 0 < a < 4
@test sum(probs(X)) == 1
v = random_unit_vector(5)
@test 0.9999 <= norm(v) <= 1.0001
| SimpleRandom | https://github.com/scheinerman/SimpleRandom.jl.git |
|
[
"MIT"
] | 0.3.2 | bb4f42b25b87f124478207a82f5b02dfafdb3e63 | docs | 6452 | # SimpleRandom
This is a collection of Julia functions to make random things.
## Random Unit Vector
`random_unit_vector(d)` returns a random `d`-dimensional unit vector.
## Random Subsets
`random_subset` creates a random subset with the following variations:
+ `random_subset(A)`: create a random subset of `A` with each element
included with probability 0.5.
+ `random_subset(A,k)`: create a random `k`-element
subset of `A`.
+ `random_subset(n)`: create a random subset of `1:n`.
+ `random_subset(n,k)`: create a random `k`-element
subset of `1:n`.
## Random Selection
`random_choice` is used to select a number or object at random
according to some (finite, discrete distribution). We provide two
variants:
+ `random_choice(weights)` randomly chooses a value from `1` to `n`,
where `n` is the number of elements in `weights`. The probability
that `k` is chosen is proportional to `weights[k]`. The `weights`
must be nonnegative and not all zero.
+ `random_choice(dict)` choose a random key `k` from `dict` with weight
proportional to `dict[k]`. Thus, `dict` must be of type `Dict{S, T<:Real}`.
### Notes
+ No error checking is done on the input. An error
might be raised for bad input, but that's not
guaranteed.
+ The implementation might be improved. If the size
of the argument is small, this is efficient enough.
But if `wts` (or `d`) has many elements, I probably
should do some sort of binary search through the vector
of cumulative sums.
## Histogram
The function `histplot(x)` creates a `PyPlot` bar chart giving a histogram
of the values in the list `x`. Called as `histplot(x,n)` creates such
a plot with `n` bins.
**Note**: This function has been moved to a separate file `histplot.jl` in
the `src` directory. I've been having some issues with `PyPlot` and
this function doesn't really apply to creating random things (but
rather to visualizing them).
## Distributions
**Note**: I'm just wrapping stuff found in `Distributions`.
Probably better just to use that package directly.
### Binomial
`binom_rv(n,p)` generates a random binomial random value. `p` defaults
to `0.5`.
### Poisson
`poisson_rv(lambda)` returns a Poisson random value with mean `lambda`
(which defaults to `1.0`).
### Exponential
`exp_rv(theta)` returns an exponential random value with
mean `theta` (which defaults to `1.0`).
# Random Variable Type
The `RV` type represents a random variable *with finite support*; that is,
the set of possible values produced by the random variable is finite. This
rules out continuous random variables and discrete random variables with
infinite support such as Poisson random variables.
## Defining a Random Variable
The user needs to specify the value type of the random variable
(which needs to be a `Number` type) and the data type for the probabilities
(which needs to be a `Real` type such as `Float64` or `Rational{Int}`).
For example, to define a random variable whose values are integers and
whose probabilities are rational numbers, we do this:
```
julia> using SimpleRandom
julia> X = RV{Int, Rational{Int}}()
RV{Int64,Rational{Int64}} with 0 values
```
Now let's imagine that we want the values of `X` to be in the
set {1,2,3} with probabilities 1/2, 1/4, and 1/4 respectively.
We can specify this in two ways.
First, we can directly enter the probabilities like this:
```
julia> X = RV{Int, Rational{Int}}()
RV{Int64,Rational{Int64}} with 0 values
julia> X[1]=1//2
1//2
julia> X[2]=1//4
1//4
julia> X[3]=1//4
1//4
julia> report(X)
1 1//2
2 1//4
3 1//4
```
Alternatively, we can enter values and have them automatically scaled
so that they add to 1.
```
julia> X = RV{Int, Rational{Int}}()
RV{Int64,Rational{Int64}} with 0 values
julia> X[1] = 2
2
julia> X[2] = 1
1
julia> X[3] = 1
1
julia> report(X)
1 1//2
2 1//4
3 1//4
```
Rescaling happens automatically any time the user/computer wants to
access the probability associated with a value. In this case, the
`report` function prints out the probabilities associated with each
value so the rescaling took place behind the scenes then it was invoked.
Continuing this example, if we now enter `X[4]=1//2`, the probabilities
no longer sum to 1, so if we request the probability associated with a value,
the rescaling takes place.
```
julia> X[4] = 1//2
1//2
julia> X[4]
1//3
julia> report(X)
1 1//3
2 1//6
3 1//6
4 1//3
```
In summary, `X[v]=p` assigns probability `p` to the value `v`. Retrieving
a value invokes a rescaling operation (if needed) before the value is
returned. Note that if `v` is a value that has not been assigned a
probability, then `0` is returned.
## Functions
The following functions are provided:
+ `E(X)` returns the expected value of `X`.
+ `Var(X)` returns the variance of `X`.
+ `length(X)` returns the number of values to which probabilities have been assigned.
+ `vals(X)` returns an iterator to the values associated with `X`.
+ `probs(X)` returns an iterator to the probabilities associated with values in `X`.
+ `report(X)` prints a table consisting of the values and their associated probabilities.
+ `random_choice(X)` returns a random value `v` of `X` at random
with probability `X[v]`. This function is not efficient. Compare these
timings for generating an array of ten thousand binomial random
values:
```
julia> X = Binomial_RV(20,.5)
RV{Int64,Float64} with 21 values
julia> @time A = [ random_choice(X) for _=1:10_000 ];
0.024765 seconds (60.78 k allocations: 10.015 MiB, 83.96% compilation time)
julia> @time B = [ binom_rv(20,.5) for _=1:10_000];
0.009486 seconds (27.78 k allocations: 1.928 MiB, 91.27% compilation time)
```
## Operations
+ `a*X` where `a` is a number creates a new random variable by multiplying the values in `X` by `a`.
+ `X+Y` creates a new random variable that represents the sum of the random variables `X` and `Y` considered as independent.
Note that `2*X` is *not* the same as `X+X`.
+ `X-Y` is the difference of independent `X` and `Y`.
## Pre-made Random Variables
+ `Uniform_RV(n)` creates a random variable whose values are
in `1:n` each with probability `1//n`.
+ `Bernoulli_RV(p)` creates a random variable whose value is `0`
with probability `1-p` and `1` with probability `p`.
+ `Binomial(n,p)` creates a random variable whose values are in `0:n`
with probability given by the binomial distribution. That is, the value
`k` has probability `binomial(n,k)*p^k*(1-p)^(n-k)`.
| SimpleRandom | https://github.com/scheinerman/SimpleRandom.jl.git |
|
[
"MIT"
] | 0.3.2 | bb4f42b25b87f124478207a82f5b02dfafdb3e63 | docs | 6452 | # SimpleRandom
This is a collection of Julia functions to make random things.
## Random Unit Vector
`random_unit_vector(d)` returns a random `d`-dimensional unit vector.
## Random Subsets
`random_subset` creates a random subset with the following variations:
+ `random_subset(A)`: create a random subset of `A` with each element
included with probability 0.5.
+ `random_subset(A,k)`: create a random `k`-element
subset of `A`.
+ `random_subset(n)`: create a random subset of `1:n`.
+ `random_subset(n,k)`: create a random `k`-element
subset of `1:n`.
## Random Selection
`random_choice` is used to select a number or object at random
according to some (finite, discrete distribution). We provide two
variants:
+ `random_choice(weights)` randomly chooses a value from `1` to `n`,
where `n` is the number of elements in `weights`. The probability
that `k` is chosen is proportional to `weights[k]`. The `weights`
must be nonnegative and not all zero.
+ `random_choice(dict)` choose a random key `k` from `dict` with weight
proportional to `dict[k]`. Thus, `dict` must be of type `Dict{S, T<:Real}`.
### Notes
+ No error checking is done on the input. An error
might be raised for bad input, but that's not
guaranteed.
+ The implementation might be improved. If the size
of the argument is small, this is efficient enough.
But if `wts` (or `d`) has many elements, I probably
should do some sort of binary search through the vector
of cumulative sums.
## Histogram
The function `histplot(x)` creates a `PyPlot` bar chart giving a histogram
of the values in the list `x`. Called as `histplot(x,n)` creates such
a plot with `n` bins.
**Note**: This function has been moved to a separate file `histplot.jl` in
the `src` directory. I've been having some issues with `PyPlot` and
this function doesn't really apply to creating random things (but
rather to visualizing them).
## Distributions
**Note**: I'm just wrapping stuff found in `Distributions`.
Probably better just to use that package directly.
### Binomial
`binom_rv(n,p)` generates a random binomial random value. `p` defaults
to `0.5`.
### Poisson
`poisson_rv(lambda)` returns a Poisson random value with mean `lambda`
(which defaults to `1.0`).
### Exponential
`exp_rv(theta)` returns an exponential random value with
mean `theta` (which defaults to `1.0`).
# Random Variable Type
The `RV` type represents a random variable *with finite support*; that is,
the set of possible values produced by the random variable is finite. This
rules out continuous random variables and discrete random variables with
infinite support such as Poisson random variables.
## Defining a Random Variable
The user needs to specify the value type of the random variable
(which needs to be a `Number` type) and the data type for the probabilities
(which needs to be a `Real` type such as `Float64` or `Rational{Int}`).
For example, to define a random variable whose values are integers and
whose probabilities are rational numbers, we do this:
```
julia> using SimpleRandom
julia> X = RV{Int, Rational{Int}}()
RV{Int64,Rational{Int64}} with 0 values
```
Now let's imagine that we want the values of `X` to be in the
set {1,2,3} with probabilities 1/2, 1/4, and 1/4 respectively.
We can specify this in two ways.
First, we can directly enter the probabilities like this:
```
julia> X = RV{Int, Rational{Int}}()
RV{Int64,Rational{Int64}} with 0 values
julia> X[1]=1//2
1//2
julia> X[2]=1//4
1//4
julia> X[3]=1//4
1//4
julia> report(X)
1 1//2
2 1//4
3 1//4
```
Alternatively, we can enter values and have them automatically scaled
so that they add to 1.
```
julia> X = RV{Int, Rational{Int}}()
RV{Int64,Rational{Int64}} with 0 values
julia> X[1] = 2
2
julia> X[2] = 1
1
julia> X[3] = 1
1
julia> report(X)
1 1//2
2 1//4
3 1//4
```
Rescaling happens automatically any time the user/computer wants to
access the probability associated with a value. In this case, the
`report` function prints out the probabilities associated with each
value so the rescaling took place behind the scenes then it was invoked.
Continuing this example, if we now enter `X[4]=1//2`, the probabilities
no longer sum to 1, so if we request the probability associated with a value,
the rescaling takes place.
```
julia> X[4] = 1//2
1//2
julia> X[4]
1//3
julia> report(X)
1 1//3
2 1//6
3 1//6
4 1//3
```
In summary, `X[v]=p` assigns probability `p` to the value `v`. Retrieving
a value invokes a rescaling operation (if needed) before the value is
returned. Note that if `v` is a value that has not been assigned a
probability, then `0` is returned.
## Functions
The following functions are provided:
+ `E(X)` returns the expected value of `X`.
+ `Var(X)` returns the variance of `X`.
+ `length(X)` returns the number of values to which probabilities have been assigned.
+ `vals(X)` returns an iterator to the values associated with `X`.
+ `probs(X)` returns an iterator to the probabilities associated with values in `X`.
+ `report(X)` prints a table consisting of the values and their associated probabilities.
+ `random_choice(X)` returns a random value `v` of `X` at random
with probability `X[v]`. This function is not efficient. Compare these
timings for generating an array of ten thousand binomial random
values:
```
julia> X = Binomial_RV(20,.5)
RV{Int64,Float64} with 21 values
julia> @time A = [ random_choice(X) for _=1:10_000 ];
0.024765 seconds (60.78 k allocations: 10.015 MiB, 83.96% compilation time)
julia> @time B = [ binom_rv(20,.5) for _=1:10_000];
0.009486 seconds (27.78 k allocations: 1.928 MiB, 91.27% compilation time)
```
## Operations
+ `a*X` where `a` is a number creates a new random variable by multiplying the values in `X` by `a`.
+ `X+Y` creates a new random variable that represents the sum of the random variables `X` and `Y` considered as independent.
Note that `2*X` is *not* the same as `X+X`.
+ `X-Y` is the difference of independent `X` and `Y`.
## Pre-made Random Variables
+ `Uniform_RV(n)` creates a random variable whose values are
in `1:n` each with probability `1//n`.
+ `Bernoulli_RV(p)` creates a random variable whose value is `0`
with probability `1-p` and `1` with probability `p`.
+ `Binomial(n,p)` creates a random variable whose values are in `0:n`
with probability given by the binomial distribution. That is, the value
`k` has probability `binomial(n,k)*p^k*(1-p)^(n-k)`.
| SimpleRandom | https://github.com/scheinerman/SimpleRandom.jl.git |
|
[
"MIT"
] | 0.1.2 | ed381befe9b68a1e82fdd4357ba1127281fb8fa7 | code | 2484 | using Revise
using MathOptInterface
const MOI = MathOptInterface
export YasolVariable, YasolConstraint
# JUMP extensions
# variable extension
struct YasolVariable
info::JuMP.VariableInfo
quantifier::String
block::Int64
end
function JuMP.build_variable(
_error::Function,
info::JuMP.VariableInfo,
::Type{YasolVariable};
quantifier::String,
block::Int64,
kwargs...,
)
return YasolVariable(
info,
quantifier,
block,
)
end
function JuMP.add_variable(
model::JuMP.Model,
yasolVar::YasolVariable,
name::String,
)
var = JuMP.add_variable(
model,
JuMP.ScalarVariable(yasolVar.info),
name,
)
# add variable attributes to variable
MOI.set(model, YasolSolver.VariableAttribute("quantifier"), var, yasolVar.quantifier)
MOI.set(model, YasolSolver.VariableAttribute("block"), var, yasolVar.block)
# print warning, if variable in first block is not existential
if(yasolVar.block == 1 && yasolVar.quantifier != "exists")
@error string("Variables in the first block need to be existential! Please add a dummy variable!")
return
end
# check if quantifier is "exists" or "all"
if((yasolVar.quantifier != "exists") && (yasolVar.quantifier != "all"))
@error string("Variable quantifier has to be either 'exists' or 'all'!")
end
# check if block is an integer
if(!isinteger(yasolVar.block))
@error string("Variable blocks need to be of type integer!")
end
return var
end
# constraint extension
struct YasolConstraint
f::AffExpr
s::MOI.AbstractScalarSet
quantifier::String
end
function JuMP.build_constraint(
_error::Function,
f::AffExpr,
s::MOI.AbstractScalarSet,
::Type{YasolConstraint};
quantifier::String,
)
return YasolConstraint(f, s, quantifier)
end
function JuMP.add_constraint(
model::Model,
yasolCon::YasolConstraint,
name::String,
)
con = JuMP.add_constraint(
model,
ScalarConstraint(yasolCon.f, yasolCon.s),
name,
)
# add constarint attributes to constraint
MOI.set(model, YasolSolver.ConstraintAttribute("quantifier"), con, yasolCon.quantifier)
# check if quantifier is "exists" or "all"
if((yasolCon.quantifier != "exists") && (yasolCon.quantifier != "all"))
@error string("Constraint quantifier has to be either 'exists' or 'all'!")
end
return con
end
| YasolSolver | https://github.com/MichaelHartisch/YasolSolver.jl.git |
|
[
"MIT"
] | 0.1.2 | ed381befe9b68a1e82fdd4357ba1127281fb8fa7 | code | 2651 | module YasolSolver
using Revise
import MathOptInterface
const MOI = MathOptInterface
using EzXML
include("MOI_wrapper/MOI_wrapper.jl")
include("JuMP.jl")
# change parameter in Yasol.ini file
function setInitialParameter(yasolDir::String, parameter::String, value::Int64)
# create temp copy
mv(joinpath(yasolDir, "Yasol.ini"), joinpath(yasolDir, "Yasol_temp.ini"))
# clear original file
open(joinpath(yasolDir, "Yasol.ini"), "w") do f
write(f, "")
end
# copy updated values
open(joinpath(yasolDir, "Yasol_temp.ini"), "r") do f
open(joinpath(yasolDir, "Yasol.ini"), "a") do i
while !eof(f)
x = readline(f)
par = rsplit(x, "=")
if par[1] == parameter
# overwrite parameter value
write(i, parameter * "=" * string(value) * "\n")
else
write(i, x * "\n")
end
end
end
end
# delete temp file
rm(joinpath(yasolDir, "Yasol_temp.ini"))
end
# return all initial parameter
function getInitialParameter(yasolDir::String)
result = []
open(joinpath(yasolDir, "Yasol.ini")) do file
for ln in eachline(file)
if ln != "END"
push!(result, ln)
end
end
end
return result
end
# import and return solution
function importSolution(solPath::String)
doc = readxml(solPath)
objective_value = 0.0
runtime = 0.0
solutionStatus = ""
gap = 0.0
values = Dict{String,Float64}()
for node in eachelement(doc.root)
if node.name == "header"
objective_value = parse(Float64, node["ObjectiveValue"])
runtime = parse(Float64, rsplit(node["Runtime"], " ")[1])
elseif node.name == "quality"
solutionStatus = node["SolutionStatus"]
gap = parse(Float64, node["Gap"])
elseif node.name == "variables"
for var in eachelement(node)
values[var["name"]] = parse(Float64, var["value"])
end
end
end
res = _Results(objective_value, runtime, solutionStatus, gap, values)
return res
end
# print solution
function printSolution(res::YasolSolver._Results)
println("---Solution---")
println("Objective value: " * string(res.objective_value))
println("Runtime: " * string(res.runtime))
println("Solution status: " * string(res.solutionStatus))
println("Gap: " * string(res.gap))
println("Variable values: ")
for (key, value) in res.values
println(key * ": " * string(value))
end
println("---End---")
end
end
| YasolSolver | https://github.com/MichaelHartisch/YasolSolver.jl.git |
|
[
"MIT"
] | 0.1.2 | ed381befe9b68a1e82fdd4357ba1127281fb8fa7 | code | 25972 | using Revise
using MathOptInterface
const MOI = MathOptInterface
using JuMP
using DataStructures
### ============================================================================
### Objective expression
### ============================================================================
mutable struct _Objective
# constant value
constant::Float64
# terms
terms::Vector{MOI.ScalarAffineTerm{Float64}}
function _Objective(constant::Float64, terms::Vector{MOI.ScalarAffineTerm{Float64}})
return new(constant, terms)
end
end
### ============================================================================
### Variables
### ============================================================================
mutable struct _VariableInfo
# Index of the variable
index::MOI.VariableIndex
# The block that the variable appears in.
block::Int64
# The quantifier of the variable.
quantifier::String
function _VariableInfo(index::MOI.VariableIndex, block::Int64, quantifier::String)
return new(index, block, quantifier)
end
end
### ============================================================================
### Variable constraints
### ============================================================================
struct _VariableConstraintInfo
# Constraint index
index::MOI.ConstraintIndex
# Variable Index
vindex::MOI.VariableIndex
# Constraint set
conSet::Any
function _VariableConstraintInfo(ind::MOI.ConstraintIndex, vind::MOI.VariableIndex, set)
return new(ind, vind, set)
end
end
### ============================================================================
### Constraints
### ============================================================================
struct _ConstraintInfo
# Constraint index
index::MOI.ConstraintIndex
# Constraint ScalarAffineFunction
scalarAffineFunction::MOI.ScalarAffineFunction{Float64}
# Constraint set
conSet::Any
# Constraint quantifier
quantifier::String
function _ConstraintInfo(ind::MOI.ConstraintIndex, scalarAffFunc::MOI.ScalarAffineFunction{Float64}, set, quantifier)
return new(ind, scalarAffFunc, set, quantifier)
end
end
### ============================================================================
### Results
### ============================================================================
struct _Results
# status
#raw_status_string::String
#termination_status::MOI.TerminationStatusCode
objective_value::Float64
runtime::Float64
#decisionNodes::Int64
#propagationSteps::Int64
#learntConstraints::Int64
# quality
solutionStatus::String
gap::Float64
# variable values
values::Dict{String, Float64}
function _Results(obj::Float64, runtime::Float64, solStatus::String, gap::Float64, values::Dict{String, Float64})
return new(obj, runtime, solStatus, gap, values)
end
end
"""
AbstractSolverCommand
An abstract type that allows over-riding the call behavior of the solver.
"""
abstract type AbstractSolverCommand end
"""
call_solver(
solver::AbstractSolverCommand,
qip_filename::String,
options::Vector{String},
stdin::IO,
stdout::IO,
)::String
Execute the `solver` given the QIP file at `qip_filename`, a vector of `options`,
and `stdin` and `stdout`. Return the filename of the resulting `.sol` file.
"""
function call_solver end
struct _DefaultSolverCommand{F} <: AbstractSolverCommand
f::F
end
function call_solver(
solver::_DefaultSolverCommand,
solverPath::String,
qip_filename::String,
options::Vector{Int64},
stdin::IO,
stdout::IO,
output::String,
)
# parse optimizer attributes, value for time_limit is -1 if not supposed to be set
cmd = ``
if options[2] == -1
cmd = `$(solverPath) $(qip_filename) $(options[1])`
else
cmd = `$(solverPath) $(qip_filename) $(options[1]) $(options[2])`
end
solver.f() do solver_path
ret = run(
pipeline(
cmd,
stdin = stdin,
stdout = output,
append=true,
stderr= output,
),
)
if ret.exitcode != 0
error("Nonzero exit code: $(ret.exitcode)")
end
end
end
_solver_command(x::String) = _DefaultSolverCommand(f -> f(x))
_solver_command(x::Function) = _DefaultSolverCommand(x)
_solver_command(x::AbstractSolverCommand) = x
### ============================================================================
### Optimizer
### ============================================================================
mutable struct Optimizer <: MOI.AbstractOptimizer
solver_command::AbstractSolverCommand
stdin::Any
stdout::Any
# result information
results::_Results
# solve time
solve_time::Float64
# Store MOI.Name().
name::String
# The objective expression.
o::_Objective
sense::MOI.OptimizationSense
#is_objective_set::Bool
# A vector of variable constraints
vc::Vector{_VariableConstraintInfo}
# A vector of constraints
c::Vector{_ConstraintInfo}
cNumber::Int64
# A dictionary of info for the variables.
v::Dict{MOI.VariableIndex,_VariableInfo}
# was optimizer called
optimize_not_called::Bool
# termination status
termination_status::MOI.TerminationStatusCode
# solver specific attributes
# time limit in seconds
time_limit::Int64
# output information level
output_info::Int64
# problem file name
problem_file::String
# name of solver .exe
solver_path::String
end
"""
Optimizer(
solver_command::Union{String,Function},
stdin::Any = stdin,
stdout:Any = stdout,
)
Create a new Optimizer object.
`solver_command`:
* A `String` of the full path of an Yasol executable.
Redirect IO using `stdin` and `stdout`. These arguments are passed to
`Base.pipeline`. See the Julia documentation for more details.
"""
function Optimizer(
solver_command::Union{AbstractSolverCommand,String,Function} = "",
stdin::Any = stdin,
stdout::Any = stdout,
)
return Optimizer(
_solver_command(solver_command),
stdin,
stdout,
_Results(
0.0,
0.0,
"",
0.0,
Dict{String,Float64}()
),
NaN,
"",
_Objective(0.0, MOI.ScalarAffineTerm{Float64}[]),
MOI.FEASIBILITY_SENSE,
_VariableConstraintInfo[],
_ConstraintInfo[],
0,
Dict{MOI.VariableIndex,_VariableInfo}(),
true,
MOI.OPTIMIZE_NOT_CALLED,
-1,
-1,
"",
""
)
end
Base.show(io::IO, ::Optimizer) = print(io, "A YASOL model")
MOI.get(model::Optimizer, ::MOI.SolverName) = "YASOL"
MOI.get(model::Optimizer, ::MOI.RawSolver) = model
MOI.supports(::Optimizer, ::MOI.Name) = true
MOI.get(model::Optimizer, ::MOI.Name) = model.name
MOI.get(model::Optimizer, ::MOI.NumberOfVariables) = length(model.v)
function MOI.empty!(model::Optimizer)
#model.results = _QIPResults()
model.solve_time = NaN
model.o = _Objective(0.0, MOI.ScalarAffineTerm{Float64}[])
model.sense = MOI.FEASIBILITY_SENSE
model.vc = _VariableConstraintInfo[]
model.c = _ConstraintInfo[]
model.cNumber = 0
model.v = Dict{MOI.VariableIndex,_VariableInfo}()
model.time_limit = -1
model.output_info = -1
model.problem_file = ""
model.optimize_not_called = true
return
end
function MOI.is_empty(model::Optimizer)
if isempty(model.vc) && isempty(model.c) && isempty(model.v)
return true
else
return false
end
end
# ========================================
# Supported constraints and objectives
# ========================================
const _SCALAR_FUNCTIONS = Union{
MOI.VariableIndex,
MOI.ScalarAffineFunction{Float64},
}
const _SCALAR_SETS = Union{
MOI.LessThan{Float64},
MOI.GreaterThan{Float64},
MOI.EqualTo{Float64},
MOI.Integer,
MOI.ZeroOne,
}
function MOI.supports_constraint(
::Optimizer,
::Type{<:_SCALAR_FUNCTIONS},
::Type{<:_SCALAR_SETS},
)
return true
end
MOI.supports(::Optimizer, ::MOI.ObjectiveSense) = true
MOI.supports(::Optimizer, ::MOI.ObjectiveFunction{<:MOI.ScalarAffineFunction}) = true
# ========================================
# Copy_to functionality. No incremental modification supported.
# ========================================
MOI.Utilities.supports_default_copy_to(::Optimizer, ::Bool) = false
function MOI.copy_to(
dest::Optimizer,
model::MOI.ModelLike;
copy_names::Bool = false,
)
mapping = MOI.Utilities.IndexMap()
# copy optimizer attributes
try
dest.time_limit = MOI.get(model, MOI.RawOptimizerAttribute("time limit"))
catch
dest.time_limit = -1
end
try
dest.output_info = MOI.get(model, MOI.RawOptimizerAttribute("output info"))
catch
dest.output_info = 1
end
try
dest.problem_file = MOI.get(model, MOI.RawOptimizerAttribute("problem file name"))
catch
@error string("Please provide a problem file name! No problem file could be written!")
return
end
# copy objective sense
dest.sense = MOI.get(model, MOI.ObjectiveSense())
# copy variables
# save the quantifier and block of the last variable
last_block = 0
last_quantifiers = []
for v in MOI.get(model, MOI.ListOfVariableIndices())
try
quantifier = MOI.get(model, YasolSolver.VariableAttribute("quantifier"), v)
block = MOI.get(model, YasolSolver.VariableAttribute("block"), v)
if quantifier !== nothing && block !== nothing
dest.v[v] = _VariableInfo(v, block, quantifier)
if block > last_block
last_block = block
end
else
@error string("You need to set a quantifier and a block for each variable when using Yasol solver!")
return
end
catch err
#println("You need to set a quantifier and a block for each variable when using Yasol solver!")
#@warn string("You need to set a quantifier and a block for each variable when using Yasol solver!")
@warn string(err)
end
mapping[v] = v
end
# show warning, if variable in last block is not existential
for v in MOI.get(model, MOI.ListOfVariableIndices())
try
quantifier = MOI.get(model, YasolSolver.VariableAttribute("quantifier"), v)
block = MOI.get(model, YasolSolver.VariableAttribute("block"), v)
if block == last_block
push!(last_quantifiers, quantifier)
end
catch err
@warn string(err)
end
end
for q in last_quantifiers
if q != "exists"
@error string("The variable in the last block needs to be existential! Please add a dummy variable!")
return
end
end
# copy objective function
F = MOI.get(model, MOI.ObjectiveFunctionType())
obj = MOI.get(model, MOI.ObjectiveFunction{F}())
temp = _Objective(obj.constant, obj.terms)
dest.o = temp
# copy constraints
for (F, S) in MOI.get(model, MOI.ListOfConstraintTypesPresent())
for ci in MOI.get(model, MOI.ListOfConstraintIndices{F,S}())
mapping[ci] = ci
f = MOI.get(model, MOI.ConstraintFunction(), ci)
s = MOI.get(model, MOI.ConstraintSet(), ci)
# get constraint quantifier
q = ""
q_temp = MOI.get(model, YasolSolver.ConstraintAttribute("quantifier"), ci)
if q_temp !== nothing
q = q_temp
else
q = ""
end
if typeof(f) == MathOptInterface.VariableIndex
vcon = _VariableConstraintInfo(ci, f, s)
push!(dest.vc, vcon)
else
con = _ConstraintInfo(ci, f, s, q)
push!(dest.c, con)
end
end
end
# count constraints
values = []
for con in dest.c
push!(values, Int64(con.index.value))
end
dest.cNumber = length(values)
return mapping
end
# ========================================
# Write model to file.
# ========================================
function Base.write(io::IO, qipmodel::Optimizer)
# print objective sense
if(qipmodel.sense == MOI.MIN_SENSE)
println(io, "MINIMIZE")
elseif qipmodel.sense == MOI.MAX_SENSE
println(io, "MAXIMIZE")
end
# number of variables
numVar = length(qipmodel.v)
exist = []
all = []
binaries = []
generals = []
# print objective function
for term in qipmodel.o.terms
# print coefficient and variables
if term.coefficient < 0.0
print(io, string(term.coefficient) * "x" * string(term.variable.value) * " ")
elseif term.coefficient > 0.0
print(io, "+" * string(term.coefficient) * "x" * string(term.variable.value) * " ")
end
end
# print constant value if != 0
if qipmodel.o.constant < 0.0
print(io, string(qipmodel.o.constant))
else qipmodel.o.constant > 0.0
print(io, "+ " * string(qipmodel.o.constant))
end
println(io, "")
# print constraints
println(io, "SUBJECT TO")
for con in qipmodel.c
# print quantifier if set
if con.quantifier !== ""
if con.quantifier === "exists"
print(io, "E_Constraint" * string(con.index.value) * ": ")
elseif con.quantifier === "all"
print(io, "U_Constraint" * string(con.index.value) * ": ")
end
end
# print terms
for term in con.scalarAffineFunction.terms
if term.coefficient < 0.0
print(io, string(term.coefficient) * "x" * string(term.variable.value) * " ")
elseif term.coefficient > 0.0
print(io, "+" * string(term.coefficient) * "x" * string(term.variable.value) * " ")
end
end
temp = 0.0
temp = temp + (con.scalarAffineFunction.constant)*-1
"""
# print constant
if con.scalarAffineFunction.constant < 0.0
#print(io, string(con.scalarAffineFunction.constant))
temp+= (con.scalarAffineFunction.constant)*-1
else con.scalarAffineFunction.constant > 0.0
#print(io, "+ " * string(con.scalarAffineFunction.constant))
temp+= (con.scalarAffineFunction.constant)*-1
end
"""
# print set
if typeof(con.conSet) == MathOptInterface.GreaterThan{Float64}
if temp !== 0.0
print(io, ">= " * string((con.conSet.lower) + temp))
else
print(io, ">= " * string((con.conSet.lower)))
end
elseif typeof(con.conSet) == MathOptInterface.LessThan{Float64}
if temp !== 0.0
print(io, "<= " * string((con.conSet.upper) + temp))
else
print(io, "<= " * string((con.conSet.upper)))
end
end
println(io, "")
end
# print variable bounds
println(io, "BOUNDS")
for i in 1:numVar
lower = -9999.9
upper = 9999.9
type = nothing
for varCon in qipmodel.vc
if varCon.vindex.value == i
if typeof(varCon.conSet) == MathOptInterface.Integer
push!(generals, "x"*string(i))
type = "int"
elseif typeof(varCon.conSet) == MathOptInterface.ZeroOne
push!(binaries, "x"*string(i))
type = "binary"
elseif typeof(varCon.conSet) == MathOptInterface.GreaterThan{Float64}
lower = varCon.conSet.lower
elseif typeof(varCon.conSet) == MathOptInterface.LessThan{Float64}
upper = varCon.conSet.upper
end
end
end
# show warning, if variable has no lower or upper bound
if (lower == -9999.9 || upper == 9999.9)
@error string("Every variable needs to be bounded from above and below (binary variables as well)!")
return
end
# write bounds
println(io, string(lower) * " <= " * "x" * string(i) * " <= " * string(upper))
end
# check, if all variables are integer or binary
for a in all
if !(a in binaries) && (!a in generals)
@error string("All variables need to be binary or integer!")
return
end
end
# print binaries
println(io, "BINARIES")
bin = ""
for b in binaries
bin *= b * " "
end
println(io, bin)
# print generals
println(io, "GENERALS")
gen = ""
for g in generals
gen *= g * " "
end
println(io, gen)
# print exists
println(io, "EXISTS")
exists = ""
for i in 1:MOI.get(qipmodel, MOI.NumberOfVariables())
if qipmodel.v[MOI.VariableIndex(i)].quantifier === "exists"
exists = exists * "x" * string(i) * " "
end
end
println(io, exists)
# print all
println(io, "ALL")
all = ""
for i in 1:MOI.get(qipmodel, MOI.NumberOfVariables())
if qipmodel.v[MOI.VariableIndex(i)].quantifier === "all"
all = all * "x" * string(i) * " "
end
end
println(io, all)
# print order
println(io, "ORDER")
order = ""
tempDict = OrderedDict{Int64,Int64}()
for i in 1:MOI.get(qipmodel, MOI.NumberOfVariables())
tempDict[i] = qipmodel.v[MathOptInterface.VariableIndex(i)].block
end
# sort temp dict
last_block = 0
last_block_variables = []
sorted = sort!(tempDict, byvalue=true)
# build order string
for (key, value) in sorted
order = order * "x" * string(key) * " "
if value > last_block
last_block = value
end
end
# save all last block variables
for (key, value) in sorted
if value == last_block
push!(last_block_variables, key)
end
end
# make sure, that continuos variables are only allowed in the last block
for (key, value) in sorted
if value != last_block
if !("x"*string(key) in generals) && !("x"*string(key) in binaries)
@error string("Continuos variables are only allowed in the last block")
return
end
end
end
#@warn string(last_block)
#@warn string(last_block_variables)
println(io, order)
println(io, "END")
end
# ========================================
# Call yasol solver with problem file and parameters.
# ========================================
function MOI.optimize!(model::Optimizer)
# check if problem file name is set, show warning otherwise
if(model.problem_file == "")
@error string("Please provide a problem file name! No problem file could be written!")
return
end
# check if Yasol.ini file is given in the solver folder
path = joinpath(pwd(), "Yasol.ini")
if !isfile(String(path))
@warn string("No Yasol.ini file was found in the solver folder!")
end
# check if Yasol .exe is available under given path
if !isfile(String(model.solver_path*".exe")) && !isfile(String(model.solver_path))
@error string("No Yasol executable was found under the given path!")
return
end
model.optimize_not_called = false
options = [model.output_info, model.time_limit]
start_time = time()
# create problem file
qlp_file = joinpath(pwd(), model.problem_file)
output_file = joinpath(pwd(), (rsplit(model.problem_file, ".")[1]) * "_output.txt")
touch(output_file)
open(io -> write(io, model), qlp_file, "w")
# call solver
try
call_solver(
model.solver_command,
model.solver_path,
qlp_file,
options,
model.stdin,
model.stdout,
output_file,
)
# read solution & set results
model.results = YasolSolver.importSolution(qlp_file * ".sol")
if model.results.solutionStatus == "OPTIMAL"
model.termination_status = MOI.OPTIMAL
end
catch err
# TODO show error in results
end
model.solve_time = time() - start_time
return
end
function MOI.write_to_file(model::Optimizer, filename::String)
open(io -> write(io, model), filename, "w")
return
end
# ========================================
# Model attributes
# ========================================
MOI.supports(::Optimizer, ::MOI.RawOptimizerAttribute) = true
function MOI.set(model::Optimizer, param::MOI.RawOptimizerAttribute, value)
if param == MOI.RawOptimizerAttribute("output info")
model.output_info = Int64(value)
elseif param == MOI.RawOptimizerAttribute("time limit")
model.time_limit = Int64(value)
elseif param == MOI.RawOptimizerAttribute("problem file name")
model.problem_file = String(value)
# check if problem file already exists
if isfile(String(value))
@warn "A file with the chosen name already exists. You are about to overwrite that file."
end
# check if solution file already exists
if isfile(String(value) * ".sol")
@warn "A solution file for the problem already exists. If you create another solution with the same name, you cannot import the new solution using JuMP."
end
elseif param == MOI.RawOptimizerAttribute("solver path")
model.solver_path = String(value)
end
return
end
function MOI.get(model::Optimizer, param::MOI.RawOptimizerAttribute)
if param == MOI.RawOptimizerAttribute("output info")
return model.output_info
elseif param == MOI.RawOptimizerAttribute("time limit")
return model.time_limit
elseif param == MOI.RawOptimizerAttribute("problem file name")
return model.problem_file
elseif param == MOI.RawOptimizerAttribute("solver path")
return model.solver_path
end
end
# ========================================
# Variable attributes
# ========================================
struct VariableAttribute <: MOI.AbstractVariableAttribute
name::String
end
function MOI.supports(
model::Optimizer,
attr::VariableAttribute,
::Type{MOI.VariableIndex},
)
if attr.name === "quantifier" || attr.name === "block"
return true
else
return false
end
end
function MOI.get(
model::Optimizer,
attr::VariableAttribute,
vi::MOI.VariableIndex,
)
if attr === "quantifier"
return model.v[vi].quantifier
elseif attr === "block"
return model.v[vi].block
end
end
# variable attribute 'quantifier'
function MOI.set(
model::Optimizer,
attr::VariableAttribute,
vi::MOI.VariableIndex,
value::String,
)
if attr === "quantifier" && value === "all"
model.v[vi].quantifier = "all"
elseif attr === "quantifier" && value === "exists"
model.v[vi].quantifier = "exists"
end
return
end
# variable attribute 'block'
function MOI.set(
model::Optimizer,
attr::VariableAttribute,
vi::MOI.VariableIndex,
value::Int,
)
if attr === "block"
model.v[vi].block = Int64(value)
end
return
end
# ========================================
# Constraint attributes
# ========================================
struct ConstraintAttribute <: MOI.AbstractConstraintAttribute
name::String
end
function MOI.supports(
model::Optimizer,
attr::ConstraintAttribute,
::Type{<:MOI.ConstraintIndex},
)
if attr.name === "quantifier"
return true
else
return false
end
end
function MOI.get(
model::Optimizer,
attr::ConstraintAttribute,
ci::MOI.ConstraintIndex,
)
if attr === "quantifier"
for con in model.c
if con.index == ci
return con.quantifier
end
end
end
end
function MOI.set(
model::Optimizer,
attr::ConstraintAttribute,
ci::MOI.ConstraintIndex,
value::String,
)
if attr === "quantifier"
for con in model.c
if con.index == ci
con.quantifier = value
end
end
end
return
end
# ========================================
# Solution & TerminationStatus
# ========================================
MOI.supports(::Optimizer, ::MOI.TerminationStatus) = true
MOI.supports(::Optimizer, ::MOI.TimeLimitSec) = true
# return termination status
function MOI.get(model::Optimizer, attr::MOI.TerminationStatus)
try
if model.optimize_not_called
return MOI.OPTIMIZE_NOT_CALLED
elseif model.results.solutionStatus == "OPTIMAL"
return MOI.OPTIMAL
elseif model.results.solutionStatus == "INFEASIBLE"
return MOI.INFEASIBLE
elseif model.results.solutionStatus == "INCUMBENT"
return MOI.TIME_LIMIT
else
return MOI.OTHER_ERROR
end
catch err
println(err)
end
end
# return solve time
function MOI.get(model::Optimizer, attr::MOI.SolveTimeSec)
try
return Float64(model.results.runtime)
catch err
println(err)
end
end
# return objective value
function MOI.get(model::Optimizer, attr::MOI.ObjectiveValue)
try
return model.results.objective_value
catch err
println(err)
end
end
# return variable value
function MOI.get(
model::Optimizer,
attr::MOI.VariablePrimal,
x::MOI.VariableIndex,
)
try
return model.results.values["x" * string(x.value)]
catch err
println(err)
end
end
| YasolSolver | https://github.com/MichaelHartisch/YasolSolver.jl.git |
|
[
"MIT"
] | 0.1.2 | ed381befe9b68a1e82fdd4357ba1127281fb8fa7 | code | 10683 | module TestMOIWrapper
using Test
using YasolSolver
using JuMP
using MathOptInterface
const MOI = MathOptInterface
function runtests()
for name in names(@__MODULE__; all = true)
if !startswith("$(name)", "test_")
continue
end
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
function test_rawOptimizerAttributes()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
set_optimizer_attribute(model, "time limit", 60)
set_optimizer_attribute(model, "output info", 1)
set_optimizer_attribute(model, "problem file name", "FullTest.qlp")
@test get_optimizer_attribute(model, "time limit") == 60
@test get_optimizer_attribute(model, "output info") == 1
@test get_optimizer_attribute(model, "problem file name") == "FullTest.qlp"
end
function test_solverName()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
@test MOI.get(model, MOI.SolverName()) == "YASOL"
end
function test_numberOfVariables()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
@test MOI.get(model, MOI.NumberOfVariables()) == 0
@variable(model, x1, integer=true, lower_bound=0, upper_bound=2)
@test MOI.get(model, MOI.NumberOfVariables()) == 1
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=2)
@test MOI.get(model, MOI.NumberOfVariables()) == 2
end
function test_OptSense()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
@variable(model, x1, integer=true, lower_bound=0, upper_bound=2)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=2)
@constraint(model, con1, x1 + x2 <= 10)
@objective(model, Max, x1+2x2)
@test MOI.get(model, MOI.ObjectiveSense()) == MAX_SENSE
end
function test_OptFunctionType()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
@variable(model, x1, integer=true, lower_bound=0, upper_bound=2)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=2)
@constraint(model, con1, x1 + x2 <= 10)
@objective(model, Max, x1+2x2)
@test MOI.get(model, MOI.ObjectiveFunctionType()) == MathOptInterface.ScalarAffineFunction{Float64}
end
function test_variableAttributes()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
@variable(model, x1, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=2)
@test MOI.get(model, YasolSolver.VariableAttribute("quantifier"), x1) == "all"
@test MOI.get(model, YasolSolver.VariableAttribute("block"), x1) == 2
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=1)
@test MOI.get(model, YasolSolver.VariableAttribute("quantifier"), x2) == "exists"
@test MOI.get(model, YasolSolver.VariableAttribute("block"), x2) == 1
end
function test_constraintAttributes()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
@variable(model, x1, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=2)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=1)
@constraint(model, con1, -1*x1 +2*x2<=10, YasolConstraint, quantifier="all")
@constraint(model, con2, -1*x1 +2*x2<=20, YasolConstraint, quantifier="exists")
@test MOI.get(model, YasolSolver.ConstraintAttribute("quantifier"), con1) == "all"
@test MOI.get(model, YasolSolver.ConstraintAttribute("quantifier"), con2) == "exists"
end
function test_status()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
@test MOI.get(model, MOI.TerminationStatus()) == OPTIMIZE_NOT_CALLED
end
function test_status()
model = Model(() -> YasolSolver.Optimizer("C:/Yasol/Yasol_CLP"))
@test MOI.get(model, MOI.TerminationStatus()) == OPTIMIZE_NOT_CALLED
end
end
TestMOIWrapper.runtests()
"""
For local testing
# change yasol initial parameter
yasol.setInitialParameter("C:/Yasol", "writeOutputFile", 1)
# get initial parameter
@show yasol.getInitialParameter("C:/Yasol")
MAXIMIZE
1x1 +1x2 +1x3
SUBJECT TO
-2x2 -1x3 <= -2
-1x1 +2x2 +1x3 <= 2
2x1 + 4x2 <= 6
BOUNDS
0 <= x1 <= 2
0 <= x2 <= 1
0 <= x3 <= 2
GENERAL
x1
BINARY
x2
EXISTS
x1 x3
ALL
x2
ORDER
x1 x2 x3
END
# define model
cd("C:/Yasol")
model = Model(() -> yasol.Optimizer("C:/Yasol/Yasol_CLP"))
set_optimizer_attribute(model, "time limit", 60)
set_optimizer_attribute(model, "output info", 1)
set_optimizer_attribute(model, "problem file name", "Test08122021.qlp")
@variable(model, x1, integer=true, lower_bound=0, upper_bound=2)
MOI.set(model, yasol.VariableAttribute("quantifier"), x1, "exists")
MOI.set(model, yasol.VariableAttribute("block"), x1, 1)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1)
MOI.set(model, yasol.VariableAttribute("quantifier"), x2, "all")
MOI.set(model, yasol.VariableAttribute("block"), x2, 2)
@variable(model, x3, lower_bound=0, upper_bound=2)
MOI.set(model, yasol.VariableAttribute("quantifier"), x3, "exists")
MOI.set(model, yasol.VariableAttribute("block"), x3, 3)
@constraint(model, con1, -2*x2 -1x3 <= -2)
@constraint(model, con2, -1*x1 +2*x2 +1*x3 <= 2)
@constraint(model, con3, 2x1 + 4x2 <= 6)
@objective(model, Max, 1*x1 +1*x2 +1*x3)
optimize!(model)
# import solution
solution = yasol.importSolution("C:/Yasol/Test08122021.qlp.sol")
@show solution
yasol.printSolution(solution)
@show termination_status(model)
@show value(x1)
@show objective_value(model)
@show solve_time(model)
# access model parameters
print(model.moi_backend.optimizer.model)
print(model.moi_backend.optimizer.model.optimizer.is_objective_set)
print(model.moi_backend.model_cache.optattr.keys)
print(model.moi_backend.model)
MOI.get(model, yasol.VariableAttribute("quantifier"), x1)
MOI.get(model, yasol.VariableAttribute("block"), x1)
MOI.get(model, yasol.ConstraintAttribute("quantifier"), con3)
get_optimizer_attribute(model, "time limit")
get_optimizer_attribute(model, "output info")
# using constraints that have attributes
MINIMIZE
-x1 -2x2 +2x3 +x4
SUBJECT TO
E_Constraint1: x1 -2x2 +x3 -x4 <= 1
E_Constraint2: x1 +x2 +x3 -x4 <= 2
U_Constraint1: x1 +x2 +x3 <= 2
BOUNDS
0 <= x1 <= 1
0 <= x2 <= 1
0 <= x3 <= 1
0 <= x4 <= 1
BINARIES
x1 x2 x3 x4
EXISTS
x1 x2 x4
ALL
x3
ORDER
x1 x2 x3 x4
END
# define model
cd("C:/Yasol")
model = Model(() -> yasol.Optimizer("C:/Yasol/Yasol_CLP"))
set_optimizer_attribute(model, "time limit", 60)
set_optimizer_attribute(model, "output info", 1)
set_optimizer_attribute(model, "problem file name", "ConstraintExt.qlp")
@variable(model, x1, binary=true, lower_bound=0, upper_bound=1)
MOI.set(model, yasol.VariableAttribute("quantifier"), x1, "exists")
MOI.set(model, yasol.VariableAttribute("block"), x1, 1)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1)
MOI.set(model, yasol.VariableAttribute("quantifier"), x2, "exists")
MOI.set(model, yasol.VariableAttribute("block"), x2, 2)
@variable(model, x3, binary=true, lower_bound=0, upper_bound=1)
MOI.set(model, yasol.VariableAttribute("quantifier"), x3, "all")
MOI.set(model, yasol.VariableAttribute("block"), x3, 3)
@variable(model, x4, binary=true, lower_bound=0, upper_bound=1)
MOI.set(model, yasol.VariableAttribute("quantifier"), x4, "exists")
MOI.set(model, yasol.VariableAttribute("block"), x4, 4)
@constraint(model, con1, 1*x1 -2*x2 +1*x3 -1*x4 <= 1)
MOI.set(model, yasol.ConstraintAttribute("quantifier"), con1, "exists")
@constraint(model, con2, 1*x1 + 1*x2 +1*x3 -1*x4 <= 2)
MOI.set(model, yasol.ConstraintAttribute("quantifier"), con2, "exists")
@constraint(model, con3, 1*x1 + 1*x2 +1*x3 <= 2)
MOI.set(model, yasol.ConstraintAttribute("quantifier"), con3, "all")
@objective(model, Min, -1*x1 -2*x2 +2*x3 +1x4)
optimize!(model)
# import solution
solution = yasol.importSolution("C:/Yasol/ConstraintExt.qlp.sol")
@show solution
yasol.printSolution(solution)
#print(solver_name(model))
#@show model
#print(model)
#write_to_file(model, "model.mps")
# using JuMP extension
cd("C:/Yasol")
model = Model(() -> yasol.Optimizer("C:/Yasol/Yasol_CLP"))
set_optimizer_attribute(model, "time limit", 60)
set_optimizer_attribute(model, "output info", 1)
set_optimizer_attribute(model, "problem file name", "Test2-23_11.qlp")
@variable(model, x1, integer=true, lower_bound=0, upper_bound=2, YasolVariable, quantifier="exists", block=1)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=2)
@variable(model, x3, lower_bound=0, upper_bound=2, YasolVariable, quantifier="exists", block=3)
@constraint(model, con1, -2*x2 -1x3 + 8 <= -2 + 2*x2)
@constraint(model, con2, -1*x1 +2*x2 +1*x3 >= -6 + 2*x2, YasolConstraint, quantifier="all")
@constraint(model, con3, 2x1 + 4x2 <= 6)
@objective(model, Max, 1*x1 +1*x2 +1*x3)
optimize!(model)
# import solution
solution = yasol.importSolution("C:/Yasol/Test25112021.qlp.sol")
@show solution
yasol.printSolution(solution)
@show termination_status(model)
@show value(x3)
@show objective_value(model)
@show solve_time(model)
# use full JuMP extension
MINIMIZE
-x1 -2x2 +2x3 +x4
SUBJECT TO
E_Constraint1: x1 -2x2 +x3 -x4 <= 1
E_Constraint2: x1 +x2 +x3 -x4 <= 2
U_Constraint1: x1 +x2 +x3 <= 2
BOUNDS
0 <= x1 <= 1
0 <= x2 <= 1
0 <= x3 <= 1
0 <= x4 <= 1
BINARIES
x1 x2 x3 x4
EXISTS
x1 x2 x4
ALL
x3
ORDER
x1 x2 x3 x4
END
# define model
cd("C:/Yasol")
model = Model(() -> yasol.Optimizer("C:/Yasol/Yasol_CLP"))
set_optimizer_attribute(model, "time limit", 60)
set_optimizer_attribute(model, "output info", 1)
set_optimizer_attribute(model, "problem file name", "ConstraintExt.qlp")
@variable(model, x1, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=1)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=2)
@variable(model, x3, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=3)
@variable(model, x4, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=4)
@constraint(model, con1, 1*x1 -2*x2 +1*x3 -1*x4 +5 <= -1*x1 -3, YasolConstraint, quantifier="exists")
@constraint(model, con2, 1*x1 + 1*x2 +1*x3 -1*x4 -2 <= 2 - 2*x2, YasolConstraint, quantifier="exists")
@constraint(model, con3, 1*x1 + 1*x2 +1*x3 <= 1 + x3, YasolConstraint, quantifier="all")
@objective(model, Min, -1*x1 -2*x2 +2*x3 +1x4 - 5)
optimize!(model)
@show termination_status(model)
@show value(x1)
@show objective_value(model)
@show solve_time(model)
"""
| YasolSolver | https://github.com/MichaelHartisch/YasolSolver.jl.git |
|
[
"MIT"
] | 0.1.2 | ed381befe9b68a1e82fdd4357ba1127281fb8fa7 | code | 2506 | using YasolSolver
using Test
using JuMP
using MathOptInterface
const MOI = MathOptInterface
@testset "YasolSolver.jl" begin
include("MOI_wrapper.jl")
"""
cd("C:/Yasol")
model = Model(() -> YasolSolver.Optimizer())
set_optimizer_attribute(model, "solver path", "C:/Yasol/Yasol_CLP_VersionX")
set_optimizer_attribute(model, "time limit", 60)
set_optimizer_attribute(model, "output info", 1)
set_optimizer_attribute(model, "problem file name", "Test3.qlp")
@variable(model, x1, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=1)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=2)
@variable(model, x3, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=3)
@variable(model, x4, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=4)
@constraint(model, con1, 1*x1 -2*x2 +1*x3 -1*x4 +5 <= -1*x1 -3, YasolConstraint, quantifier="exists")
@constraint(model, con2, 1*x1 + 1*x2 +1*x3 -1*x4 -2 <= 2 - 2*x2, YasolConstraint, quantifier="exists")
@constraint(model, con3, 1*x1 + 1*x2 +1*x3 <= 1 + x3, YasolConstraint, quantifier="all")
@objective(model, Min, -1*x1 -2*x2 +2*x3 +1x4)
optimize!(model)
cd("C:/Yasol")
model = Model(() -> YasolSolver.Optimizer())
set_optimizer_attribute(model, "solver path", "C:/Yasol/Yasol_CLP_VersionX")
set_optimizer_attribute(model, "time limit", 60)
set_optimizer_attribute(model, "output info", 1)
set_optimizer_attribute(model, "problem file name", "Test.qlp")
@variable(model, x1, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=1)
@variable(model, x2, integer=true, lower_bound=0, upper_bound=5, YasolVariable, quantifier="exists", block=2)
@variable(model, x3, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=3)
@variable(model, x4, integer=true, lower_bound=0, upper_bound=8, YasolVariable, quantifier="exists", block=4)
@constraint(model, con1, 1*x1 -2*x2 +1*x3 -1*x4 +5 <= -1*x1 -3, YasolConstraint, quantifier="exists")
@constraint(model, con2, 1*x1 + 1*x2 +1*x3 -1*x4 -2 <= 2 - 2*x2, YasolConstraint, quantifier="exists")
@constraint(model, con3, 1*x1 + 1*x2 +1*x3 <= 1 + x3, YasolConstraint, quantifier="all")
@objective(model, Min, -1*x1 -2*x2 +2*x3 +1x4 - 5)
optimize!(model)
"""
end
| YasolSolver | https://github.com/MichaelHartisch/YasolSolver.jl.git |
|
[
"MIT"
] | 0.1.2 | ed381befe9b68a1e82fdd4357ba1127281fb8fa7 | docs | 4436 | # YasolSolver.jl
[](https://github.com/hendrikbecker99/YasolSolver.jl/actions/workflows/CI.yml)
[](https://ci.appveyor.com/project/hendrikbecker99/yasolsolver-jl)
[](https://codecov.io/gh/hendrikbecker99/YasolSolver.jl)
YasolSolver.jl is an interface between [MathOptInterface.jl](https://github.com/jump-dev/MathOptInterface.jl)
and [Yasol solver](http://tm-server-2.wiwi.uni-siegen.de/t3-q-mip/index.php?id=2).
Please consult the [providing website](http://tm-server-2.wiwi.uni-siegen.de/t3-q-mip/index.php?id=1) for further information about the solver and how to build models.
## Installation
First, download the Yasol solver from [here](http://tm-server-2.wiwi.uni-siegen.de/t3-q-mip/index.php?id=4).
Second, install Yasol interface using `Pkg.add`.
```julia
import Pkg
Pkg.add("YasolSolver")
```
## Use with JuMP
Models can be build using [JuMP.jl](https://github.com/jump-dev/JuMP.jl) package and will be solved using Yasol interface and Yasol solver.
This can be done using the ``YasolSolver.Optimizer`` object. Here is how to create a
*JuMP* model that uses Yasol as the solver.
```julia
using JuMP, YasolSolver
cd("C:/Yasol") # change path to Yasol .exe directory
model = Model(() -> YasolSolver.Optimizer()) # use the path to Yasol solver .exe
set_optimizer_attribute(model, "solver path", "C:/Yasol/Yasol_CLP")
set_optimizer_attribute(model, "time limit", 60)
set_optimizer_attribute(model, "output info", 1)
set_optimizer_attribute(model, "problem file name", "Example.qlp")
```
The solver supports 3 attributes that can be used with JuMP:
* solver path -> defines the path to the Yasol executable
* time limit -> defines the time limit in seconds
* output info -> defines output level (1 is recommended)
* problem file name -> defines filename of model; solution file will have the same name
Further, the solver specific parameter saved in *Yasol.ini* can be set and retrieved the following:
```julia
# change Yasol initial parameter
# format: solver directory, parameter name, value
YasolSolver.setInitialParameter("C:/Yasol", "writeOutputFile", 1)
# get initial parameter
# format: solver directory
@show YasolSolver.getInitialParameter("C:/Yasol")
```
**Note: Do not change the default parameter without knowing their purpose!**
## Build and solve a JuMP model
Do the following to build and solve a JuMP model using Yasol solver:
```julia
@variable(model, x1, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=1)
@variable(model, x2, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=2)
@variable(model, x3, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="all", block=3)
@variable(model, x4, binary=true, lower_bound=0, upper_bound=1, YasolVariable, quantifier="exists", block=4)
@constraint(model, con1, 1*x1 -2*x2 +1*x3 -1*x4 <= 1, YasolConstraint, quantifier="exists")
@constraint(model, con2, 1*x1 + 1*x2 +1*x3 -1*x4 <= 2, YasolConstraint, quantifier="exists")
@constraint(model, con3, 1*x1 + 1*x2 +1*x3 <= 2, YasolConstraint, quantifier="all")
@objective(model, Min, -1*x1 -2*x2 +2*x3 +1x4)
optimize!(model)
```
## Solver specific variable and constraint extensions
The package provides two JuMP extensions that are used in the example above:
##### YasolVariable
To use Yasol variables, the keyword ``YasolVariable`` needs to be used followed by the parameter
``quantifier``, that can have the values 'exists' or 'all' and the parameter ``block`` that needs to be an integer >= 1.
Every variable can either be binary or an interger variable.
##### YasolConstraint
To use Yasol constraints, the keyword ``YasolConstraint`` needs to be used followed by the parameter ``quantifier``, that can have the values 'exists' or 'all'. Constraints can also be used without the constraint extension.
## Read solution
After calling the optimize function, the solution will be available in the selected project directory. Additionally, the solution can be accessed using JuMP the following way:
```julia
@show termination_status(model)
@show value(x1)
@show objective_value(model)
@show solve_time(model)
```
| YasolSolver | https://github.com/MichaelHartisch/YasolSolver.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 729 | # Use
#
# DOCUMENTER_DEBUG=true julia --color=yes make.jl local [nonstrict] [fixdoctests]
#
# for local builds.
using Documenter
using ValueShapes
makedocs(
sitename = "ValueShapes",
modules = [ValueShapes],
format = Documenter.HTML(
prettyurls = !("local" in ARGS),
canonical = "https://oschulz.github.io/ValueShapes.jl/stable/"
),
pages=[
"Home" => "index.md",
"API" => "api.md",
"LICENSE" => "LICENSE.md",
],
doctest = ("fixdoctests" in ARGS) ? :fix : true,
linkcheck = !("nonstrict" in ARGS),
warnonly = ("nonstrict" in ARGS),
)
deploydocs(
repo = "github.com/oschulz/ValueShapes.jl.git",
forcepush = true,
push_preview = true,
)
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 1177 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
__precompile__(true)
"""
ValueShapes
Provides a Julia API to describe the shape of values, like scalars, arrays
and structures.
"""
module ValueShapes
using Base: @propagate_inbounds
using ArgCheck
using ArraysOfArrays
using ChangesOfVariables
using Distributions
using ElasticArrays
using FillArrays
using InverseFunctions
using Random
using Statistics
using StatsBase
using DensityInterface
import Adapt
import ChainRulesCore
import IntervalSets
import Tables
import TypedTables
# Long-term, ChainRulesCore should be sufficient:
import ZygoteRules
using ChainRulesCore: AbstractTangent, Tangent, AbstractZero, NoTangent, ZeroTangent
using ChainRulesCore: AbstractThunk, ProjectTo, unthunk, backing
using Random123: Philox4x
include("tangent_utils.jl")
include("value_shape.jl")
include("value_accessor.jl")
include("scalar_shape.jl")
include("array_shape.jl")
include("const_value_shape.jl")
include("named_tuple_shape.jl")
include("functions.jl")
include("distributions.jl")
include("const_value_dist.jl")
include("named_tuple_dist.jl")
include("reshaped_dist.jl")
end # module
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 6988 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
ArrayShape{T,N} <: AbstractValueShape
Describes the shape of `N`-dimensional arrays of type `T` and a given size.
Constructor:
ArrayShape{T}(dims::NTuple{N,Integer}) where {T,N}
ArrayShape{T}(dims::Integer...) where {T}
e.g.
shape = ArrayShape{Real}(2, 3)
See also the documentation of [`AbstractValueShape`](@ref).
"""
struct ArrayShape{T,N} <: AbstractValueShape
dims::NTuple{N,Int}
end
export ArrayShape
ArrayShape{T}(dims::NTuple{N,Integer}) where {T,N} = ArrayShape{T,N}(map(Int, dims))
ArrayShape{T}(dims::Integer...) where {T} = ArrayShape{T}(dims)
@inline Base.size(shape::ArrayShape) = shape.dims
Base.length(shape::ArrayShape) = prod(size(shape))
@inline Base.:(<=)(a::ArrayShape{T,N}, b::ArrayShape{U,N}) where {T,U,N} = T<:U && size(a) == size(b)
@inline _valshapeoftype(T::Type{<:AbstractArray}) = throw(ArgumentError("Type $T does not have a fixed shape"))
@inline default_unshaped_eltype(shape::ArrayShape{T}) where {T} =
default_unshaped_eltype(_valshapeoftype(T))
@inline shaped_type(shape::ArrayShape{T,N}, ::Type{U}) where {T,N,U<:Real} =
Array{shaped_type(_valshapeoftype(T),U),N}
@inline function valshape(x::AbstractArray{T}) where T
_valshapeoftype(T) # ensure T has a fixed shape
ArrayShape{T}(size(x))
end
@inline function valshape(x::AbstractArray{T,0}) where T
_valshapeoftype(T) # ensure T has a fixed shape
ScalarShape{T}()
end
# Possible extension: valshape(x::AbstractArrayOfSimilarArrays{...})
totalndof(shape::ArrayShape{T}) where{T} =
prod(size(shape)) * totalndof(_valshapeoftype(T))
(shape::ArrayShape{T,N})(::UndefInitializer) where {T,N} = Array{default_datatype(T),N}(undef, size(shape)...)
function _check_unshaped_compat(A::AbstractArray{T,N}, shape::ArrayShape{U,N}) where {T<:Real,U<:Real,N}
Telem = eltype(A)
Telem <: U || throw(ArgumentError("Element type $Telem of array not compatible with element type $U of given shape"))
size(A) == size(shape) || throw(ArgumentError("Size of array differs from size of given shape"))
end
function unshaped(A::AbstractArray{T,1}, shape::ArrayShape{U,1}) where {T<:Real,U<:Real}
_check_unshaped_compat(A, shape)
A
end
function unshaped(A::AbstractArray{T,N}, shape::ArrayShape{U,N}) where {T<:Real,U<:Real,N}
_check_unshaped_compat(A, shape)
reshape(view(A, ntuple(_ -> :, Val(N))...), prod(size(A)))
end
function unshaped(A::Base.ReshapedArray{T,N,<:AbstractArray{T,1}}, shape::ArrayShape{U,N}) where {T<:Real,U<:Real,N}
_check_unshaped_compat(A, shape)
parent(A)
end
replace_const_shapes(f::Function, shape::ArrayShape) = shape
@inline function _apply_shape_to_data(shape::ArrayShape{<:Real,1}, data::AbstractVector{<:Real})
@boundscheck _checkcompat(shape, data)
data
end
const ArrayAccessor{T,N} = ValueAccessor{ArrayShape{T,N}} where {T,N}
const RealScalarOrVectorAccessor = ValueAccessor{<:Union{ScalarShape{<:Real},ArrayShape{<:Real,1}}}
Base.@propagate_inbounds vs_getindex(data::AbstractVector{<:Real}, va::ArrayAccessor{<:Real}) = copy(view(data, va))
@static if VERSION < v"1.4"
# To avoid ambiguity with Julia v1.0 (and v1.1 to v1.3?)
Base.@propagate_inbounds vs_getindex(data::AbstractVector{<:Real}, va::ArrayAccessor{<:Real,1}) = copy(view(data, va))
end
Base.@propagate_inbounds function vs_getindex(
data::AbstractArray{<:Real,N},
idxs::Vararg{RealScalarOrVectorAccessor,N}
) where N
idxs_mapped = map(view_idxs, axes(data), idxs)
getindex(data, idxs_mapped...)
end
Base.@propagate_inbounds vs_unsafe_view(data::AbstractVector{<:Real}, va::ArrayAccessor{<:Real,1}) =
Base.unsafe_view(data, view_idxs(axes(data, 1), va))
Base.@propagate_inbounds vs_unsafe_view(data::AbstractVector{<:Real}, va::ArrayAccessor{<:Real,N}) where {N} =
reshape(Base.unsafe_view(data, view_idxs(axes(data, 1), va)), size(va.shape)...)
Base.@propagate_inbounds function vs_unsafe_view(
data::AbstractArray{<:Real,N},
idxs::Vararg{RealScalarOrVectorAccessor,N}
) where N
idxs_mapped = map(view_idxs, axes(data), idxs)
Base.view(data, idxs_mapped...)
end
Base.@propagate_inbounds vs_setindex!(data::AbstractVector{<:Real}, v, va::ArrayAccessor{<:Real}) =
setindex!(data, v, view_idxs(axes(data, 1), va))
@static if VERSION < v"1.4"
# To avoid ambiguity with Julia v1.0 (and v1.1 to v1.3?)
Base.@propagate_inbounds vs_setindex!(data::AbstractVector{<:Real}, v, va::ArrayAccessor{<:Real,1}) =
setindex!(data, v, view_idxs(axes(data, 1), va))
end
Base.@propagate_inbounds function vs_setindex!(
data::AbstractArray{<:Real,N},
v,
idxs::Vararg{RealScalarOrVectorAccessor,N}
) where N
idxs_mapped = map(view_idxs, axes(data), idxs)
setindex!(data, v, idxs_mapped...)
end
Base.@propagate_inbounds function _bcasted_view(data::AbstractVector{<:AbstractVector{<:Real}}, va::ArrayAccessor)
_bcasted_view(convert(VectorOfSimilarVectors, data), va)
end
Base.@propagate_inbounds function _bcasted_view(data::AbstractVectorOfSimilarVectors{<:Real}, va::ArrayAccessor{T,1}) where {T}
flat_data = flatview(data)
idxs = view_idxs(axes(flat_data, 1), va)
fpview = view(flat_data, idxs, :)
VectorOfSimilarVectors(fpview)
end
Base.@propagate_inbounds function _bcasted_view(data::AbstractVectorOfSimilarVectors{<:Real}, va::ArrayAccessor{T,N}) where {T,N}
flat_data = flatview(data)
idxs = view_idxs(axes(flat_data, 1), va)
fpview = view(flat_data, idxs, :)
VectorOfSimilarArrays(reshape(fpview, size(va.shape)..., :))
end
Base.Broadcast.broadcasted(::typeof(getindex), A::AbstractVectorOfSimilarVectors{<:Real}, acc::Ref{<:ArrayAccessor}) =
copy(_bcasted_view(A, acc[]))
Base.Broadcast.broadcasted(::typeof(view), A::AbstractVectorOfSimilarVectors{<:Real}, acc::Ref{<:ArrayAccessor}) =
_bcasted_view(A, acc[])
function _bcasted_view_unchanged(data::AbstractArray{<:AbstractVector{T}}, shape::ArrayShape{U,1}) where {T<:Real,U>:T}
_checkcompat_inner(shape, data)
data
end
Base.Broadcast.broadcasted(vs::ArrayShape{T,1}, A::AbstractArray{<:AbstractVector{<:Real},N}) where {T,N} =
_bcasted_view_unchanged(A, vs)
@inline _bcasted_unshaped(A::AbstractArrayOfSimilarVectors{<:Real}) = A
@inline _bcasted_unshaped(A::AbstractArray{<:AbstractVector{<:Real}}) = convert(ArrayOfSimilarVectors, A)
# Specialize unshaped.(::AbstractArray{<:AbstractVector{<:Real}}):
Base.Broadcast.broadcasted(::typeof(unshaped), A::AbstractArray{<:AbstractVector{<:Real}}) =
_bcasted_unshaped(A)
function Base.Broadcast.broadcasted(::typeof(unshaped), A::AbstractArray{<:AbstractVector{<:Real}}, vsref::Ref{<:AbstractValueShape})
elshape(A) <= vsref[] || throw(ArgumentError("Shape of value not compatible with given shape"))
Base.Broadcast.broadcasted(unshaped, A)
end
# TODO: Add support for StaticArray.
# Possible extension: variable/flexible array shapes?
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 4350 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
ConstValueDist <: Distributions.Distribution
Represents a delta distribution for a constant value of arbritrary type.
Calling `varshape` on a `ConstValueDist` will yield a
[`ConstValueShape`](@ref).
"""
struct ConstValueDist{VF<:VariateForm,T} <: Distribution{VF,Discrete}
value::T
end
export ConstValueDist
ConstValueDist(x::T) where {T<:Real} = ConstValueDist{Univariate,T}(x)
ConstValueDist(x::T) where {T<:AbstractVector{<:Real}} = ConstValueDist{Multivariate,T}(x)
ConstValueDist(x::T) where {T<:AbstractMatrix{<:Real}} = ConstValueDist{Matrixvariate,T}(x)
@static if isdefined(Distributions, :ArrayLikeVariate)
ConstValueDist(x::T) where {T<:AbstractArray{<:Real,N}} where N = ConstValueDist{ArrayLikeVariate{N},T}(x)
end
ConstValueDist(x::NamedTuple{names}) where names = ConstValueDist{NamedTupleVariate{names},typeof(x)}(x)
_pdf_impl(d::ConstValueDist, x) = d.value == x ? float(eltype(d))(1) : float(eltype(d))(0)
_logpdf_impl(d::ConstValueDist, x) = d.value == x ? float(eltype(d))(0) : float(eltype(d))(-Inf)
Distributions.pdf(d::ConstValueDist{Univariate}, x::Real) = _pdf_impl(d, x)
Distributions.logpdf(d::ConstValueDist{Univariate}, x::Real) = _logpdf_impl(d, x)
@static if isdefined(Distributions, :ArrayLikeVariate)
Distributions._pdf(d::ConstValueDist{<:ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where N = _pdf_impl(d, x)
Distributions._logpdf(d::ConstValueDist{<:ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where N = _logpdf_impl(d, x)
end
# Explicit defintions for Multivariate and Matrixvariate to avoid ambiguities with Distributions:
Distributions._pdf(d::ConstValueDist{Multivariate}, x::AbstractVector{<:Real}) = _pdf_impl(d, x)
Distributions._logpdf(d::ConstValueDist{Multivariate}, x::AbstractVector{<:Real}) = log(pdf(d, x))
Distributions._pdf(d::ConstValueDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = _pdf_impl(d, x)
Distributions._logpdf(d::ConstValueDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = log(pdf(d, x))
Distributions.pdf(d::ConstValueDist{<:NamedTupleVariate{names}}, x::NamedTuple{names}) where names = _pdf_impl(d, x)
Distributions.logpdf(d::ConstValueDist{<:NamedTupleVariate{names}}, x::NamedTuple{names}) where names = log(pdf(d, x))
Distributions.insupport(d::ConstValueDist{Univariate}, x::Real) = x == d.value
@static if isdefined(Distributions, :ArrayLikeVariate)
Distributions.insupport(d::ConstValueDist{<:ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where N = x == d.value
else
Distributions.insupport(d::ConstValueDist{Multivariate}, x::AbstractVector{<:Real}) = x == d.value
Distributions.insupport(d::ConstValueDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = x == d.value
end
Distributions.insupport(d::ConstValueDist{<:NamedTupleVariate{names}}, x::NamedTuple{names}) where names = x == d.value
Distributions.cdf(d::ConstValueDist{Univariate}, x::Real) = d.value <= x ? Float32(1) : Float32(0)
Distributions.quantile(d::ConstValueDist{Univariate}, q::Real) = d.value # Sensible?
Distributions.minimum(d::ConstValueDist{Univariate}) = d.value
Distributions.maximum(d::ConstValueDist{Univariate}) = d.value
StatsBase.mean(d::ConstValueDist) = d.value
StatsBase.mode(d::ConstValueDist) = d.value
Base.size(d::ConstValueDist{<:PlainVariate}) = size(d.value)
Base.length(d::ConstValueDist{<:PlainVariate}) = prod(size(d))
Base.eltype(d::ConstValueDist{<:PlainVariate}) = eltype(d.value)
Random.rand(rng::AbstractRNG, d::ConstValueDist) = d.value
@static if isdefined(Distributions, :ArrayLikeVariate)
Distributions._rand!(rng::AbstractRNG, d::ConstValueDist{<:ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where N = copyto!(x, d.value)
else
Distributions._rand!(rng::AbstractRNG, d::ConstValueDist{<:Multivariate}, x::AbstractVector{<:Real}) = copyto!(x, d.value)
Distributions._rand!(rng::AbstractRNG, d::ConstValueDist{<:Matrixvariate}, x::AbstractMatrix{<:Real}) = copyto!(x, d.value)
end
Random.rand(rng::AbstractRNG, d::ConstValueDist{<:StructVariate}, dims::Dims) = Fill(d.value, dims)
Random.rand!(rng::AbstractRNG, d::ConstValueDist{<:StructVariate}, A::AbstractArray) = fill!(A, d.value)
ValueShapes.varshape(d::ConstValueDist) = ConstValueShape(d.value)
Statistics.var(d::ConstValueDist) = zero(d.value)
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 4096 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
ConstValueShape{T} <: AbstractValueShape
A `ConstValueShape` describes the shape of constant values of type `T`.
Constructor:
ConstValueShape(value)
`value` may be of arbitrary type, e.g. a constant scalar value or array:
ConstValueShape(4.2)
ConstValueShape([11 21; 12 22])
Shapes of constant values have zero degrees of freedom (see
[`totalndof`](@ref)).
See also the documentation of [`AbstractValueShape`](@ref).
"""
struct ConstValueShape{T, strict} <: AbstractValueShape
value::T
end
export ConstValueShape
ConstValueShape{T}(x::T) where T = ConstValueShape{T,true}(x)
ConstValueShape(x::T) where T = ConstValueShape{T,true}(x)
Base.:(==)(a::ConstValueShape, b::ConstValueShape) = a.value == b.value
Base.isequal(a::ConstValueShape, b::ConstValueShape) = isequal(a.value, b.value)
Base.isapprox(a::ConstValueShape, b::ConstValueShape; kwargs...) = isapprox(a.value, b.value; kwargs...)
Base.hash(x::ConstValueShape, h::UInt) = hash(x.value, hash(:ConstValueShape, hash(:ValueShapes, h)))
@inline Base.size(shape::ConstValueShape) = size(shape.value)
@inline Base.length(shape::ConstValueShape) = length(shape.value)
@inline Base.:(<=)(a::ConstValueShape{T}, b::ConstValueShape{U}) where {T,U} = T<:U && a.value ≈ b.value
@inline default_unshaped_eltype(shape::ConstValueShape) = Int32
@inline shaped_type(shape::ConstValueShape, ::Type{T}) where {T<:Real} = typeof(shape.value)
@inline totalndof(::ConstValueShape) = 0
# ToDo/Decision: Return a copy instead?
(shape::ConstValueShape)(::UndefInitializer) = shape.value
function unshaped(x::Any, shape::ConstValueShape)
x == shape.value || throw(ArgumentError("Given value does not match value of ConstValueShape"))
Float32[]
end
@inline _valshapeoftype(::Type{Nothing}) = ConstValueShape(nothing)
"""
const_zero_shape(shape::ConstValueShape)
Get the equivalent of a constant zero shape for shape `shape` that will
only allow zero values to be set via an accessor.
"""
const_zero_shape(shape::ConstValueShape) = ConstValueShape(const_zero(shape.value))
"""
nonstrict_const_zero_shape(shape::ConstValueShape)
Get the equivalent of a constant zero shape for shape `shape` that will
ignore any attempt to set a value via an accessor.
Useful as a gradient/tangent varshape of constants, as they can ignore
attempts to set non-zero values.
"""
function nonstrict_const_zero_shape(shape::ConstValueShape)
x = const_zero(shape.value)
ConstValueShape{typeof(x),false}(x)
end
replace_const_shapes(f::Function, shape::ConstValueShape) = f(shape)
const ConstAccessor{T,strict} = ValueAccessor{ConstValueShape{T,strict}}
@inline vs_getindex(data::AbstractVector{<:Real}, va::ConstAccessor) = va.shape.value
@inline vs_unsafe_view(::AbstractVector{<:Real}, va::ConstAccessor) = va.shape.value
# Zygote has a generic `@adjoint getindex(x::AbstractArray, inds...)` and same for view that
# will result in overwriting va.shape.value with dy without these custom adjoints:
ZygoteRules.@adjoint function getindex(x::AbstractVector{<:Real}, va::ConstAccessor)
getindex(x, va), dy -> nothing, nothing
end
ZygoteRules.@adjoint function view(x::AbstractVector{<:Real}, va::ConstAccessor)
view(x, va), dy -> nothing, nothing
end
function vs_setindex!(data::AbstractVector{<:Real}, v, va::ConstAccessor{T,true}) where T
v == va.shape.value || throw(ArgumentError("Cannot set constant value to a different value"))
data
end
function vs_setindex!(data::AbstractVector{<:Real}, v, va::ConstAccessor{T,false}) where T
data
end
@inline _bcasted_view(data::AbstractArrayOfSimilarVectors{<:Real,N}, va::ConstAccessor) where N =
Fill(va.shape.value, size(data)...)
Base.Broadcast.broadcasted(::typeof(getindex), A::AbstractArrayOfSimilarVectors{<:Real,N}, acc::Ref{<:ConstAccessor}) where N =
_bcasted_view(A, acc[])
Base.Broadcast.broadcasted(::typeof(view), A::AbstractArrayOfSimilarVectors{<:Real,N}, acc::Ref{<:ConstAccessor}) where N =
_bcasted_view(A, acc[])
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 5689 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
varshape(d::Distributions.Distribution)::AbstractValueShape
Get the value shape of the variates of distribution `d`.
"""
function varshape end
export varshape
varshape(d::Distribution{Univariate}) = ScalarShape{Real}()
@static if isdefined(Distributions, :ArrayLikeVariate)
varshape(d::Distribution{<:ArrayLikeVariate}) = ArrayShape{Real}(size(d)...)
else
varshape(d::Distribution{Multivariate}) = ArrayShape{Real}(size(d)...)
varshape(d::Distribution{Matrixvariate}) = ArrayShape{Real}(size(d)...)
end
"""
unshaped(d::Distributions.Distribution)
Turns `d` into a `Distributions.Distribution{Multivariate}` based on
`varshape(d)`.
"""
function unshaped(d::UnivariateDistribution)
# ToDo: Replace with `reshape(d, 1)` when result of `reshape(::UnivariateDistribution, 1)`
# becomes fully functional in Distributions:
product_distribution(Fill(d, 1))
end
_unshaped_uv_pullback(ΔΩ) = NoTangent(), only(unthunk(ΔΩ).v)
ChainRulesCore.rrule(::typeof(unshaped), d::UnivariateDistribution) = unshaped(d), _unshaped_uv_pullback
unshaped(d::Distribution{Multivariate}) = d
@static if isdefined(Distributions, :ReshapedDistribution)
unshaped(d::Distribution{<:ArrayLikeVariate}) = reshape(d, length(d))
else
unshaped(d::MatrixReshaped) = d.d
end
@static if isdefined(Distributions, :ArrayLikeVariate)
const PlainVariate = ArrayLikeVariate
else
const PlainVariate = Union{Univariate,Multivariate,Matrixvariate}
end
struct StructVariate{T} <: VariateForm end
const NamedTupleVariate{names} = StructVariate{NamedTuple{names}} # ToDo: Use StructVariate{<:NamedTuple{names}} instead?
function _rand_flat_impl(rng::AbstractRNG, d::Distribution{NamedTupleVariate{names}}) where names
shape = varshape(d)
X = Vector{default_unshaped_eltype(shape)}(undef, totalndof(varshape(d)))
(shape, rand!(rng, unshaped(d), X))
end
function Random.rand(rng::AbstractRNG, d::Distribution{NamedTupleVariate{names}}) where names
shape, X = _rand_flat_impl(rng, d)
shape(X)
end
function Random.rand(rng::AbstractRNG, d::Distribution{NamedTupleVariate{names}}, dims::Tuple{}) where names
shape, X = _rand_flat_impl(rng, d)
shape.(Fill(X))
end
function Random.rand(rng::AbstractRNG, d::Distribution{NamedTupleVariate{names}}, dims::Dims) where names
shape = varshape(d)
X_flat = Array{default_unshaped_eltype(shape)}(undef, totalndof(varshape(d)), dims...)
X = ArrayOfSimilarVectors(X_flat)
rand!(rng, unshaped(d), X)
shape.(X)
end
function Random.rand!(d::Distribution{NamedTupleVariate{names}}, x::ShapedAsNT{names}) where names
rand!(Random.default_rng(), d, x)
end
function Random.rand!(rng::AbstractRNG, d::Distribution{NamedTupleVariate{names}}, x::ShapedAsNT{names}) where names
valshape(x) >= varshape(d) || throw(ArgumentError("Shapes of variate and value are not compatible"))
rand!(rng, unshaped(d), unshaped(x))
x
end
function _aov_rand_impl!(rng::AbstractRNG, d::Distribution{Multivariate}, X::ArrayOfSimilarVectors{<:Real})
rand!(rng, unshaped(d), flatview(X))
end
# Workaround for current limitations of ArraysOfArrays.unshaped for standard arrays of vectors
function _aov_rand_impl!(rng::AbstractRNG, d::Distribution{Multivariate}, X::AbstractArray{<:AbstractVector{<:Real}})
rand!.(Ref(rng), Ref(unshaped(d)), X)
end
function Random.rand!(rng::AbstractRNG, d::Distribution{<:NamedTupleVariate}, X::ShapedAsNTArray)
elshape(X) >= varshape(d) || throw(ArgumentError("Shapes of variate and value are not compatible"))
_aov_rand_impl!(rng, unshaped(d), unshaped.(X))
X
end
function Distributions.logpdf(d::Distribution{NamedTupleVariate{names}}, x::NamedTuple{names}) where names
logpdf(unshaped(d), unshaped(x, varshape(d)))
end
function Distributions.logpdf(d::Distribution{NamedTupleVariate{names}}, x::ShapedAsNT{names}) where names
@argcheck valshape(x) <= varshape(d)
logpdf(unshaped(d), unshaped(x))
end
function Distributions.logpdf(d::Distribution{NamedTupleVariate{names}}, x::AbstractArray{<:NamedTuple{names},0}) where names
logpdf(unshaped(d), unshaped(x, varshape(d)))
end
function Distributions.pdf(d::Distribution{NamedTupleVariate{names}}, x::NamedTuple{names}) where names
pdf(unshaped(d), unshaped(x, varshape(d)))
end
function Distributions.pdf(d::Distribution{NamedTupleVariate{names}}, x::ShapedAsNT{names}) where names
@argcheck valshape(x) <= varshape(d)
pdf(unshaped(d), unshaped(x))
end
function Distributions.pdf(d::Distribution{NamedTupleVariate{names}}, x::AbstractArray{<:NamedTuple{names},0}) where names
pdf(unshaped(d), unshaped(x, varshape(d)))
end
function Distributions.insupport(d::Distribution{NamedTupleVariate{names}}, x::NamedTuple{names}) where names
insupport(unshaped(d), unshaped(x, varshape(d)))
end
function Distributions.insupport(d::Distribution{NamedTupleVariate{names}}, x::ShapedAsNT{names}) where names
@argcheck valshape(x) <= varshape(d)
insupport(unshaped(d), unshaped(x))
end
function Distributions.insupport(d::Distribution{NamedTupleVariate{names}}, x::AbstractArray{<:NamedTuple{names},0}) where names
insupport(unshaped(d), unshaped(x, varshape(d)))
end
function Distributions.insupport(d::Distribution{NamedTupleVariate{names}}, X::AbstractArray{<:NamedTuple{names},N}) where {N,names}
Distributions.insupport!(BitArray(undef, size(X)), d, X)
end
function Distributions.insupport!(r::AbstractArray{Bool,N}, d::Distribution{NamedTupleVariate{names}}, X::AbstractArray{<:NamedTuple{names},N}) where {N,names}
r .= insupport.(Ref(d), X)
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 359 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
resultshape(f, vs::AbstractValueShape)
Return the shape of values returned by `f` when applied to values of shape
`vs`.
Returns `missing` if the shape of the function result cannot be determined.
"""
resultshape(f, vs::AbstractValueShape) = missing
export resultshape
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 10824 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
_ntd_dist_and_shape(d::Distribution) = (d, varshape(d))
_ntd_dist_and_shape(s::ConstValueShape) = (ConstValueDist(s.value), s)
_ntd_dist_and_shape(s::IntervalSets.AbstractInterval) = _ntd_dist_and_shape(Uniform(minimum(s), maximum(s)))
_ntd_dist_and_shape(xs::AbstractVector{<:IntervalSets.AbstractInterval}) = _ntd_dist_and_shape(Product((s -> Uniform(minimum(s), maximum(s))).(xs)))
_ntd_dist_and_shape(xs::AbstractVector{<:Distribution}) = _ntd_dist_and_shape(product_distribution(xs))
_ntd_dist_and_shape(x::Number) = _ntd_dist_and_shape(ConstValueShape(x))
_ntd_dist_and_shape(x::AbstractArray{<:Number}) = _ntd_dist_and_shape(ConstValueShape(x))
"""
NamedTupleDist <: MultivariateDistribution
NamedTupleDist <: MultivariateDistribution
A distribution with `NamedTuple`-typed variates.
`NamedTupleDist` provides an effective mechanism to specify the distribution
of each variable/parameter in a set of named variables/parameters.
Calling `varshape` on a `NamedTupleDist` will yield a
[`NamedTupleShape`](@ref).
"""
struct NamedTupleDist{
names,
DT <: (NTuple{N,Distribution} where N),
AT <: (NTuple{N,ValueShapes.ValueAccessor} where N),
VT
} <: Distribution{NamedTupleVariate{names},Continuous}
_internal_distributions::NamedTuple{names,DT}
_internal_shape::NamedTupleShape{names,AT,VT}
end
export NamedTupleDist
function NamedTupleDist(::Type{VT}, dists::NamedTuple{names}) where {VT,names}
dsb = map(_ntd_dist_and_shape, dists)
NamedTupleDist(
map(x -> x[1], dsb),
NamedTupleShape(VT, map(x -> x[2], dsb))
)
end
NamedTupleDist(dists::NamedTuple) = NamedTupleDist(NamedTuple, dists)
@inline NamedTupleDist(::Type{VT} ;named_dists...) where VT = NamedTupleDist(VT, values(named_dists))
@inline NamedTupleDist(;named_dists...) = NamedTupleDist(NamedTuple, values(named_dists))
@inline Base.convert(::Type{NamedTupleDist}, named_dists::NamedTuple) = NamedTupleDist(;named_dists...)
@inline _distributions(d::NamedTupleDist) = getfield(d, :_internal_distributions)
@inline _shape(d::NamedTupleDist) = getfield(d, :_internal_shape)
function Base.show(io::IO, d::NamedTupleDist)
print(io, Base.typename(typeof(d)).name, "(")
show(io, _distributions(d))
print(io, ")")
end
@inline Base.keys(d::NamedTupleDist) = keys(_distributions(d))
@inline Base.values(d::NamedTupleDist) = values(_distributions(d))
@inline Base.getindex(d::NamedTupleDist, k::Symbol) = _distributions(d)[k]
@inline function Base.getproperty(d::NamedTupleDist, s::Symbol)
# Need to include internal fields of NamedTupleShape to make Zygote happy (ToDo: still true?):
if s == :_internal_distributions
getfield(d, :_internal_distributions)
elseif s == :_internal_shape
getfield(d, :_internal_shape)
else
getproperty(_distributions(d), s)
end
end
@inline function Base.propertynames(d::NamedTupleDist, private::Bool = false)
names = propertynames(_distributions(d))
if private
(names..., :_internal_distributions, :_internal_shape)
else
names
end
end
@inline Base.map(f, dist::NamedTupleDist) = map(f, _distributions(dist))
Base.merge(a::NamedTuple, dist::NamedTupleDist{names}) where {names} = merge(a, _distributions(dist))
Base.merge(a::NamedTupleDist) = a
Base.merge(a::NamedTupleDist{names,DT,AT,VT}, b::NamedTupleDist, cs::NamedTupleDist...) where {names,DT,AT,VT} =
merge(NamedTupleDist(VT; a..., b...), cs...)
function Base.merge(a::NamedTupleDist{names,DT,AT,VT}, b::Union{NamedTupleDist,NamedTuple}, cs::Union{NamedTupleDist,NamedTuple}...) where {names,DT,AT,VT}
merge(a, convert(NamedTupleDist, b), map(x -> convert(NamedTupleDist, x), cs)...)
end
varshape(d::NamedTupleDist) = _shape(d)
struct UnshapedNTD{NTD<:NamedTupleDist} <: Distribution{Multivariate,Continuous}
shaped::NTD
end
_ntd_length(d::Distribution) = length(d)
_ntd_length(d::ConstValueDist) = 0
function Base.length(ud::UnshapedNTD)
d = ud.shaped
len = sum(_ntd_length, values(d))
@assert len == totalndof(varshape(d))
len
end
Base.eltype(ud::UnshapedNTD) = default_unshaped_eltype(varshape(ud.shaped))
unshaped(d::NamedTupleDist) = UnshapedNTD(d)
function _ntd_logpdf(
dist::ConstValueDist,
acc::ValueShapes.ValueAccessor{<:ConstValueShape},
x::AbstractVector{<:Real}
)
float(zero(eltype(x)))
end
function _ntd_logpdf(
dist::Distribution,
acc::ValueShapes.ValueAccessor,
x::AbstractVector{<:Real}
)
logpdf(dist, float(x[acc]))
end
function _ntd_logpdf(d::NamedTupleDist, x::AbstractVector{<:Real})
distributions = values(d)
accessors = values(varshape(d))
sum(map((dist, acc) -> _ntd_logpdf(dist, acc, x), distributions, accessors))
end
# ConstValueDist has no dof, so NamedTupleDist logpdf contribution must be zero:
_ntd_logpdf(dist::ConstValueDist, x::Any) = zero(Float32)
_ntd_logpdf(dist::Distribution, x::Any) = logpdf(dist, x)
function _ntd_logpdf(d::NamedTupleDist{names}, x::NamedTuple{names}) where names
distributions = values(d)
parvalues = values(x)
sum(map((dist, d) -> _ntd_logpdf(dist, d), distributions, parvalues))
end
Distributions.logpdf(d::NamedTupleDist{names}, x::NamedTuple{names}) where names = _ntd_logpdf(d, x)
Distributions.pdf(d::NamedTupleDist{names}, x::NamedTuple{names}) where names = exp(logpdf(d, x))
Distributions._logpdf(ud::UnshapedNTD, x::AbstractVector{<:Real}) = _ntd_logpdf(ud.shaped, x)
Distributions.logpdf(d::NamedTupleDist{names}, x::ShapedAsNT{names}) where names = _ntd_logpdf(d, convert(NamedTuple, x))
Distributions.pdf(d::NamedTupleDist{names}, x::ShapedAsNT{names}) where names = exp(logpdf(d, convert(NamedTuple, x)))
function _ntd_insupport(
dist::Distribution,
acc::ValueShapes.ValueAccessor,
x::AbstractVector{<:Real}
)
insupport(dist, float(x[acc]))
end
function _ntd_insupport(d::NamedTupleDist, x::AbstractVector{<:Real})
distributions = values(d)
accessors = values(varshape(d))
prod(map((dist, acc) -> _ntd_insupport(dist, acc, x), distributions, accessors))
end
# ConstValueDist has no dof, set NamedTupleDist insupport contribution to true:
_ntd_insupport(dist::ConstValueDist, x::Any) = true
_ntd_insupport(dist::Distribution, x::Any) = insupport(dist, x)
function _ntd_insupport(d::NamedTupleDist{names}, x::NamedTuple{names}) where names
distributions = values(d)
parvalues = values(x)
prod(map((dist, d) -> _ntd_insupport(dist, d), distributions, parvalues))
end
Distributions.insupport(d::NamedTupleDist{names}, x::NamedTuple{names}) where names = _ntd_insupport(d, x)
Distributions.insupport(d::NamedTupleDist{names}, x::ShapedAsNT{names}) where names = _ntd_insupport(d, convert(NamedTuple, x))
Distributions.insupport(ud::UnshapedNTD, x::AbstractVector{<:Real}) = _ntd_insupport(ud.shaped, x)
function _ntd_rand!(
rng::AbstractRNG, dist::ConstValueDist,
acc::ValueShapes.ValueAccessor{<:ConstValueShape},
x::AbstractVector{<:Real}
)
nothing
end
function _ntd_rand!(
rng::AbstractRNG, dist::Distribution{Univariate},
acc::ValueShapes.ValueAccessor,
x::AbstractVector{<:Real}
)
x_view = view(x, acc)
idxs = eachindex(x_view)
@assert length(idxs) == 1
x_view[first(idxs)] = rand(rng, dist)
nothing
end
function _ntd_rand!(
rng::AbstractRNG, dist::Union{Distribution{<:PlainVariate}},
acc::ValueShapes.ValueAccessor,
x::AbstractVector{<:Real}
)
rand!(rng, dist, view(x, acc))
nothing
end
function _ntd_rand!(rng::AbstractRNG, d::NamedTupleDist, x::AbstractVector{<:Real})
distributions = values(d)
accessors = values(varshape(d))
map((dist, acc) -> _ntd_rand!(rng, dist, acc, x), distributions, accessors)
x
end
@inline Distributions._rand!(rng::AbstractRNG, ud::UnshapedNTD, x::AbstractVector{<:Real}) = _ntd_rand!(rng, ud.shaped, x)
function _ntd_mode!(
dist::ConstValueDist,
acc::ValueShapes.ValueAccessor{<:ConstValueShape},
params::AbstractVector{<:Real}
)
nothing
end
function _ntd_mode!(
dist::Distribution,
acc::ValueShapes.ValueAccessor,
params::AbstractVector{<:Real}
)
view(params, acc) .= mode(dist)
nothing
end
# Workaround, Distributions.jl doesn't define mode for Product:
function _ntd_mode!(
dist::Distributions.Product,
acc::ValueShapes.ValueAccessor,
params::AbstractVector{<:Real}
)
view(params, acc) .= map(mode, dist.v)
nothing
end
function _ntd_mode!(x::AbstractVector{<:Real}, d::NamedTupleDist)
distributions = values(d)
shape = varshape(d)
accessors = values(shape)
map((dist, acc) -> _ntd_mode!(dist, acc, x), distributions, accessors)
nothing
end
function _ntd_mode(d::NamedTupleDist)
x = Vector{default_unshaped_eltype(varshape(d))}(undef,varshape(d))
_ntd_mode!(x, d)
x
end
# ToDo/Decision: Return NamedTuple or ShapedAsNT?
StatsBase.mode(d::NamedTupleDist) = varshape(d)(mode(unshaped(d)))
StatsBase.mode(ud::UnshapedNTD) = _ntd_mode(ud.shaped)
_ntd_mean(dist::ConstValueDist) = Float32[]
_ntd_mean(dist::Distribution) = mean(unshaped(dist))
# ToDo/Decision: Return NamedTuple or ShapedAsNT?
Statistics.mean(d::NamedTupleDist) = varshape(d)(mean(unshaped(d)))
function Statistics.mean(ud::UnshapedNTD)
d = ud.shaped
vcat(map(d -> _ntd_mean(d), values(ValueShapes._distributions(d)))...)
end
_ntd_var(dist::ConstValueDist) = Float32[]
_ntd_var(dist::Distribution) = var(unshaped(dist))
# ToDo/Decision: Return NamedTuple or ShapedAsNT?
Statistics.var(d::NamedTupleDist) = variance_shape(varshape(d))(var(unshaped(d)))
function Statistics.var(ud::UnshapedNTD)
d = ud.shaped
vcat(map(d -> _ntd_var(d), values(ValueShapes._distributions(d)))...)
end
function _ntd_var_or_cov!(A_cov::AbstractArray{<:Real,0}, dist::Distribution{Univariate})
A_cov[] = var(dist)
nothing
end
function _ntd_var_or_cov!(A_cov::AbstractArray{<:Real,2}, dist::Distribution{Multivariate})
A_cov[:, :] = cov(dist)
nothing
end
function _ntd_cov!(
dist::ConstValueDist,
acc::ValueShapes.ValueAccessor{<:ConstValueShape},
A_cov::AbstractMatrix{<:Real}
)
nothing
end
function _ntd_cov!(
dist::Distribution,
acc::ValueShapes.ValueAccessor,
A_cov::AbstractMatrix{<:Real}
)
_ntd_var_or_cov!(view(A_cov, acc, acc), dist)
nothing
end
function _ntd_cov!(A_cov::AbstractMatrix{<:Real}, d::NamedTupleDist)
distributions = values(d)
accessors = values(varshape(d))
map((dist, acc) -> _ntd_cov!(dist, acc, A_cov), distributions, accessors)
A_cov
end
function _ntd_cov(d::NamedTupleDist)
n = totalndof(varshape(d))
A_cov = zeros(n, n)
_ntd_cov!(A_cov, d)
end
Statistics.cov(ud::UnshapedNTD) = _ntd_cov(ud.shaped)
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 30335 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
function _varoffset_cumsum(x::Tuple{Vararg{Integer,N}}) where N
if @generated
if N < 1000
vars = [Symbol("s$i") for i in 0:N-1]
exprs = [:($(vars[i+1]) = $(vars[i]) + x[$i]) for i in 1:N-1]
quote
s0 = 0
$(exprs...)
($(vars...),)
end
else
:((0, cumsum([x...][begin:end-1])...))
end
else
(0, cumsum([x...][begin:end-1])...)
end
end
"""
NamedTupleShape{names,...} <: AbstractValueShape
Defines the shape of a `NamedTuple` (resp. set of variables, parameters,
etc.).
Constructors:
NamedTupleShape(name1 = shape1::AbstractValueShape, ...)
NamedTupleShape(named_shapes::NamedTuple)
Example:
```julia
shape = NamedTupleShape(
a = ScalarShape{Real}(),
b = ArrayShape{Real}(2, 3),
c = ConstValueShape(42)
)
data = VectorOfSimilarVectors{Float64}(shape)
resize!(data, 10)
rand!(flatview(data))
table = shape.(data)
fill!(table.a, 4.2)
all(x -> x == 4.2, view(flatview(data), 1, :))
```
See also the documentation of [`AbstractValueShape`](@ref).
"""
struct NamedTupleShape{names,AT<:(NTuple{N,ValueAccessor} where N),VT} <: AbstractValueShape
_accessors::NamedTuple{names,AT}
_flatdof::Int
@inline function NamedTupleShape{names,AT,VT}(
_accessors::NamedTuple{names,AT}, _flatdof::Int
) where {names,AT,VT}
new{names,AT,VT}(_accessors, _flatdof)
end
@inline function NamedTupleShape(::Type{VT}, shape::NamedTuple{names,<:NTuple{N,AbstractValueShape}}) where {VT,names,N}
labels = keys(shape)
shapes = values(shape)
shapelengths = map(totalndof, shapes)
offsets = _varoffset_cumsum(shapelengths)
accessors = map(ValueAccessor, shapes, offsets)
# acclengths = map(x -> x.len, accessors)
# @assert shapelengths == acclengths
n_flattened = sum(shapelengths)
named_accessors = NamedTuple{labels}(accessors)
new{names,typeof(accessors),VT}(named_accessors, n_flattened)
end
end
export NamedTupleShape
@inline NamedTupleShape(::Type{VT}; named_shapes...) where VT = NamedTupleShape(VT, values(named_shapes))
@inline NamedTupleShape(shape::NamedTuple{names,<:NTuple{N,AbstractValueShape}}) where {names,N} = NamedTupleShape(NamedTuple, shape)
@inline NamedTupleShape(;named_shapes...) = NamedTupleShape(NamedTuple;named_shapes...)
@inline _accessors(x::NamedTupleShape) = getfield(x, :_accessors)
@inline _flatdof(x::NamedTupleShape) = getfield(x, :_flatdof)
function Base.show(io::IO, shape::NamedTupleShape{names,AT,VT}) where {names,AT,VT}
print(io, Base.typename(typeof(shape)).name, "(")
if !(VT <: NamedTuple)
show(io, VT)
print(io, ", ")
end
show(io, (;shape...))
print(io, ")")
end
Base.:(==)(a::NamedTupleShape, b::NamedTupleShape) = _accessors(a) == _accessors(b)
Base.isequal(a::NamedTupleShape, b::NamedTupleShape) = isequal(_accessors(a), _accessors(b))
#Base.isapprox(a::NamedTupleShape, b::NamedTupleShape; kwargs...) = ...
Base.hash(x::NamedTupleShape, h::UInt) = hash(_accessors(x), hash(:NamedTupleShape, hash(:ValueShapes, h)))
@inline totalndof(shape::NamedTupleShape) = _flatdof(shape)
@inline Base.keys(shape::NamedTupleShape) = keys(_accessors(shape))
@inline Base.values(shape::NamedTupleShape) = values(_accessors(shape))
@inline Base.getindex(d::NamedTupleShape, k::Symbol) = _accessors(d)[k]
@inline function Base.getproperty(shape::NamedTupleShape, p::Symbol)
# Need to include internal fields of NamedTupleShape to make Zygote happy (ToDo: still true?):
if p == :_accessors
getfield(shape, :_accessors)
elseif p == :_flatdof
getfield(shape, :_flatdof)
else
getproperty(_accessors(shape), p)
end
end
@inline function Base.propertynames(shape::NamedTupleShape, private::Bool = false)
names = propertynames(_accessors(shape))
if private
(names..., :_flatdof, :_accessors)
else
names
end
end
@inline Base.length(shape::NamedTupleShape) = length(_accessors(shape))
@inline Base.getindex(shape::NamedTupleShape, i::Integer) = getindex(_accessors(shape), i)
@inline Base.map(f, shape::NamedTupleShape) = map(f, _accessors(shape))
function Base.merge(a::NamedTuple, shape::NamedTupleShape{names}) where {names}
merge(a, NamedTuple{names}(map(x -> valshape(x), values(shape))))
end
Base.merge(a::NamedTupleShape) = a
function Base.merge(a::NamedTupleShape{names,AT,VT}, b::NamedTupleShape, cs::NamedTupleShape...) where {names,AT,VT}
merge(NamedTupleShape(VT; a..., b...), cs...)
end
function Base.:(<=)(a::NamedTupleShape{names}, b::NamedTupleShape{names}) where {names}
all(map((a, b) -> a.offset == b.offset && a.shape <= b.shape, values(a), values(b)))
end
@inline Base.:(<=)(a::NamedTupleShape, b::NamedTupleShape) = false
valshape(x::NamedTuple) = NamedTupleShape(NamedTuple, map(valshape, x))
(shape::NamedTupleShape)(::UndefInitializer) = map(x -> valshape(x)(undef), shape)
@inline _multi_promote_type() = Nothing
@inline _multi_promote_type(T::Type) = T
@inline _multi_promote_type(T::Type, U::Type, rest::Type...) = promote_type(T, _multi_promote_type(U, rest...))
@inline default_unshaped_eltype(shape::NamedTupleShape) =
_multi_promote_type(map(default_unshaped_eltype, values(shape))...)
function unshaped(x::NamedTuple{names}, shape::NamedTupleShape{names}) where names
# ToDo: Improve performance of return type inference
T = default_unshaped_eltype(shape)
U = default_unshaped_eltype(valshape(x))
R = promote_type(T, U)
x_unshaped = Vector{R}(undef, totalndof(shape)...)
sntshape = NamedTupleShape(ShapedAsNT; shape...)
sntshape(x_unshaped)[] = x
x_unshaped
end
function unshaped(x::AbstractArray{<:NamedTuple{names},0}, shape::NamedTupleShape{names}) where names
unshaped(x[], shape)
end
function replace_const_shapes(f::Function, shape::NamedTupleShape{names,AT,VT}) where {names,AT,VT}
NamedTupleShape(VT, map(s -> replace_const_shapes(f, s), (;shape...)))
end
"""
ShapedAsNT{names,...}
View of an `AbstractVector{<:Real}` as a mutable named tuple (though not) a
`NamedTuple`, exactly), according to a specified [`NamedTupleShape`](@ref).
Constructors:
ShapedAsNT(data::AbstractVector{<:Real}, shape::NamedTupleShape)
shape(data)
The resulting `ShapedAsNT` shares memory with `data`:
```julia
x = (a = 42, b = rand(1:9, 2, 3))
shape = NamedTupleShape(
ShapedAsNT,
a = ScalarShape{Real}(),
b = ArrayShape{Real}(2, 3)
)
data = Vector{Int}(undef, shape)
y = shape(data)
@assert y isa ShapedAsNT
y[] = x
@assert y[] == x
y.a = 22
@assert shape(data) == y
@assert unshaped(y) === data
```
Use `unshaped(x)` to access `data` directly.
See also [`ShapedAsNTArray`](@ref).
"""
struct ShapedAsNT{names,D<:AbstractVector{<:Real},S<:NamedTupleShape{names}}
__internal_data::D
__internal_valshape::S
function ShapedAsNT{names,D,S}(__internal_data::D, __internal_valshape::S) where {names,D<:AbstractVector{<:Real},S<:NamedTupleShape{names}}
new{names,D,S}(__internal_data, __internal_valshape)
end
Base.@propagate_inbounds function ShapedAsNT(data::D, shape::S) where {T<:Real,D<:AbstractVector{T},names,S<:NamedTupleShape{names}}
@boundscheck _checkcompat(shape, data)
fixed_shape = _snt_ntshape(shape)
new{names,D,typeof(fixed_shape)}(data, fixed_shape)
end
end
export ShapedAsNT
_snt_ntshape(vs::NamedTupleShape{names,AT,<:ShapedAsNT}) where {names,AT} = vs
_snt_ntshape(vs::NamedTupleShape{names,AT}) where {names,AT} = NamedTupleShape{names,AT,ShapedAsNT}(_accessors(vs), _flatdof(vs))
@inline shaped_type(shape::NamedTupleShape{names,AT,<:NamedTuple}, ::Type{T}) where {names,AT,T<:Real} =
NamedTuple{names,Tuple{map(acc -> shaped_type(acc.shape, T), values(_accessors(shape)))...}}
@inline shaped_type(shape::NamedTupleShape{names,AT,<:ShapedAsNT}, ::Type{T}) where {names,AT,T<:Real} =
ShapedAsNT{names,Vector{T},typeof(shape)}
Base.@propagate_inbounds (shape::NamedTupleShape{names,AT,<:NamedTuple})(
data::AbstractVector{<:Real}
) where {names,AT} = ShapedAsNT(data, shape)[]
Base.@propagate_inbounds (shape::NamedTupleShape{names,AT,<:ShapedAsNT})(
data::AbstractVector{<:Real}
) where {names,AT} = ShapedAsNT(data, shape)
@inline _data(A::ShapedAsNT) = getfield(A, :__internal_data)
@inline _valshape(A::ShapedAsNT) = getfield(A, :__internal_valshape)
@inline valshape(A::ShapedAsNT) = _valshape(A)
@inline unshaped(A::ShapedAsNT) = _data(A)
function unshaped(x::ShapedAsNT{names}, shape::NamedTupleShape{names}) where names
valshape(x) <= shape || throw(ArgumentError("Shape of value not compatible with given shape"))
unshaped(x)
end
@inline _shapedasnt_getprop(data::AbstractArray{<:Real}, va::ValueAccessor) = view(data, va)
@inline _shapedasnt_getprop(data::AbstractArray{<:Real}, va::ScalarAccessor) = getindex(data, va)
# ToDo: Move index calculation to separate function with no-op custom pullback to increase performance?
Base.@propagate_inbounds function Base.getproperty(A::ShapedAsNT, p::Symbol)
# Need to include internal fields of ShapedAsNT to make Zygote happy (ToDo: still true?):
if p == :__internal_data
getfield(A, :__internal_data)
elseif p == :__internal_valshape
getfield(A, :__internal_valshape)
else
data = _data(A)
shape = _valshape(A)
va = getproperty(_accessors(shape), p)
_shapedasnt_getprop(data, va)
end
end
Base.@propagate_inbounds function Base.setproperty!(A::ShapedAsNT, p::Symbol, x)
data = _data(A)
shape = _valshape(A)
va = getproperty(_accessors(shape), p)
setindex!(data, x, va)
A
end
Base.@propagate_inbounds function Base.setproperty!(A::ShapedAsNT, p::Symbol, x::ZeroTangent)
data = _data(A)
shape = _valshape(A)
va = getproperty(_accessors(shape), p)
idxs = view_idxs(eachindex(data), va)
fill!(view(data, idxs), zero(eltype(data)))
A
end
@inline function Base.propertynames(A::ShapedAsNT, private::Bool = false)
names = Base.propertynames(_valshape(A))
if private
(names..., :__internal_data, :__internal_valshape)
else
names
end
end
Base.size(x::ShapedAsNT) = ()
Base.axes(x::ShapedAsNT) = ()
Base.length(x::ShapedAsNT) = 1
Base.isempty(x::ShapedAsNT) = false
Base.ndims(x::ShapedAsNT) = 0
Base.ndims(::Type{<:ShapedAsNT}) = 0
Base.iterate(r::ShapedAsNT) = (r[], nothing)
Base.iterate(r::ShapedAsNT, s) = nothing
Base.IteratorSize(::Type{<:ShapedAsNT}) = HasShape{0}()
Base.@propagate_inbounds function Base.getindex(x::ShapedAsNT{names}) where names
if @generated
Expr(:tuple, map(p -> :($p = x.$p), names)...)
else
# Shouldn't be used, ideally
@assert false
accessors = _accessors(_valshape(x))
data = _data(x)
map(va -> getindex(data, va), accessors)
end
end
@inline Base.getindex(d::ShapedAsNT, k::Symbol) = getproperty(d, k)
Base.@propagate_inbounds Base.view(A::ShapedAsNT) = A
Base.@propagate_inbounds function Base.setindex!(A::ShapedAsNT{names}, x::NamedTuple{names}) where {names}
if @generated
Expr(:block, map(p -> :(A.$p = x.$p), names)...)
else
# Shouldn't be used, ideally
@assert false
map(n -> setproperty!(A, n, getproperty(x, n)), names)
end
A
end
Base.@propagate_inbounds Base.setindex!(A::ShapedAsNT{T}, x) where {T} = setindex!(A, convert(T, x))
Base.@propagate_inbounds function Base.setindex!(A::ShapedAsNT, x, i::Integer)
@boundscheck Base.checkbounds(A, i)
setindex!(A, x)
end
Base.NamedTuple(A::ShapedAsNT) = A[]
Base.NamedTuple{names}(A::ShapedAsNT{names}) where {names} = A[]
Base.convert(::Type{NamedTuple}, A::ShapedAsNT) = A[]
Base.convert(::Type{NamedTuple{names}}, A::ShapedAsNT{names}) where {names} = A[]
function Base.convert(::Type{ShapedAsNT{names,D_a,S}}, A::ShapedAsNT{names,D_b,S}) where {names,D_a,D_b,S}
ShapedAsNT{names,D_a,S}(convert(D_a,_data(A)), valshape(A))
end
realnumtype(::Type{<:ShapedAsNT{<:Any,<:AbstractArray{T}}}) where {T<:Real} = T
stripscalar(A::ShapedAsNT) = A[]
Base.show(io::IO, ::MIME"text/plain", A::ShapedAsNT) = show(io, A)
function Base.show(io::IO, A::ShapedAsNT)
print(io, Base.typename(typeof(A)).name, "(")
show(io, A[])
print(io, ")")
end
Base.:(==)(A::ShapedAsNT, B::ShapedAsNT) = _data(A) == _data(B) && _valshape(A) == _valshape(B)
Base.isequal(A::ShapedAsNT, B::ShapedAsNT) = isequal(_data(A), _data(B)) && _valshape(A) == _valshape(B)
Base.isapprox(A::ShapedAsNT, B::ShapedAsNT; kwargs...) = isapprox(_data(A), _data(B); kwargs...) && _valshape(A) == _valshape(B)
Base.copy(A::ShapedAsNT) = ShapedAsNT(copy(_data(A)),_valshape(A))
function Adapt.adapt_structure(to, x::ShapedAsNT)
ShapedAsNT(Adapt.adapt(to, _data(x)), _valshape(x))
end
# Required for accumulation during automatic differentiation:
function Base.:(+)(A::ShapedAsNT{names}, B::ShapedAsNT{names}) where names
@argcheck _valshape(A) == _valshape(B)
ShapedAsNT(_data(A) + _data(B), _valshape(A))
end
# Required for accumulation during automatic differentiation:
function ChainRulesCore.add!!(A::ShapedAsNT{names}, B::ShapedAsNT{names}) where names
@argcheck _valshape(A) == _valshape(B)
ChainRulesCore.add!!(_data(A), _data(B))
return A
end
function ChainRulesCore.Tangent(x::T, unshaped_dx::AbstractVector{<:Real}) where {T<:ShapedAsNT}
vs = valshape(x)
gs = gradient_shape(vs)
contents = (__internal_data = unshaped_dx, __internal_valshape = gs)
Tangent{T,typeof(contents)}(contents)
end
struct GradShapedAsNTProjector{VS<:NamedTupleShape} <: Function
gradshape::VS
end
ChainRulesCore.ProjectTo(x::ShapedAsNT) = GradShapedAsNTProjector(gradient_shape(valshape(x)))
_check_ntgs_tangent_compat(a::NamedTupleShape, ::NoTangent) = nothing
function _check_ntgs_tangent_compat(a::NamedTupleShape{names}, b::NamedTupleShape{names}) where names
a >= b || error("Incompatible tangent NamedTupleShape")
end
_snt_from_tangent(data::AbstractVector{<:Real}, gs::NamedTupleShape) = ShapedAsNT(data, gs)
_snt_from_tangent(data::_ZeroLike, gs::NamedTupleShape) = _az_tangent(data)
function (project::GradShapedAsNTProjector{<:NamedTupleShape{names}})(data::NamedTuple{(:__internal_data, :__internal_valshape)}) where names
gs = project.gradshape
_check_ntgs_tangent_compat(gs, data.__internal_valshape)
_snt_from_tangent(data.__internal_data, project.gradshape)
end
function (project::GradShapedAsNTProjector{<:NamedTupleShape{names}})(tangent::Tangent{<:ShapedAsNT{names}}) where names
project(_backing(tangent))
end
function (project::GradShapedAsNTProjector{<:NamedTupleShape{names}})(tangent::ShapedAsNT{names}) where names
tangent
end
(project::GradShapedAsNTProjector{<:NamedTupleShape})(tangent::_ZeroLike) = _az_tangent(tangent)
_getindex_tangent(x::ShapedAsNT, dy::_ZeroLike) = _az_tangent(dy)
function _getindex_tangent(x::ShapedAsNT, dy::NamedTuple)
tangent = Tangent(x, _tangent_array(unshaped(x)))
dx_unshaped, gs = _backing(tangent)
ShapedAsNT(dx_unshaped, gs)[] = _notangent_to_zerotangent(dy)
tangent
end
function ChainRulesCore.rrule(::typeof(getindex), x::ShapedAsNT)
shapedasnt_getindex_pullback(ΔΩ) = (NoTangent(), ProjectTo(x)(_getindex_tangent(x, _unpack_tangent(ΔΩ))))
return x[], shapedasnt_getindex_pullback
end
_unshaped_tangent(x::ShapedAsNT, dy::AbstractArray{<:Real}) = Tangent(x, dy)
_unshaped_tangent(x::ShapedAsNT, dy::_ZeroLike) = _az_tangent(dy)
function ChainRulesCore.rrule(::typeof(unshaped), x::ShapedAsNT)
unshaped_nt_pullback(ΔΩ) = (NoTangent(), ProjectTo(x)(_unshaped_tangent(x, _unpack_tangent(ΔΩ))))
return unshaped(x), unshaped_nt_pullback
end
function ChainRulesCore.rrule(::typeof(unshaped), x::ShapedAsNT, vs::NamedTupleShape)
unshaped_nt_pullback(ΔΩ) = (NoTangent(), ProjectTo(x)(_unshaped_tangent(x, _unpack_tangent(ΔΩ))), NoTangent())
return unshaped(x, vs), unshaped_nt_pullback
end
function _unshaped_tangent(x::NamedTuple, vs::NamedTupleShape, dy::AbstractArray{<:Real})
gs = gradient_shape(vs)
# gs(dy) can be a NamedTuple or a ShapedAsNT, depending on vs:
dx = convert(NamedTuple, gs(dy))
Tangent{typeof(x),typeof(dx)}(dx)
end
_unshaped_tangent(x::NamedTuple, vs::NamedTupleShape, dy::_ZeroLike) = _az_tangent(dy)
function ChainRulesCore.rrule(::typeof(unshaped), x::NamedTuple, vs::NamedTupleShape)
unshaped_nt_pullback(ΔΩ) = (NoTangent(), _unshaped_tangent(x, vs, _unpack_tangent(ΔΩ)), NoTangent())
return unshaped(x, vs), unshaped_nt_pullback
end
_shapedasnt_tangent(dy::_ZeroLike, vs::NamedTupleShape{names}) where names = _az_tangent(dy)
_shapedasnt_tangent(dy::ShapedAsNT{names}, vs::NamedTupleShape{names}) where names = unshaped(dy)
function _shapedasnt_tangent(
dy::Tangent{<:NamedTuple{names},<:NamedTuple{names}},
vs::NamedTupleShape{names}
) where names
unshaped(_backing(dy), gradient_shape(vs))
end
function _shapedasnt_tangent(
dy::Tangent{<:Any,<:NamedTuple{(:__internal_data, :__internal_valshape)}},
vs::NamedTupleShape{names}
) where names
_backing(dy).__internal_data
end
function ChainRulesCore.rrule(::Type{ShapedAsNT}, A::AbstractVector{<:Real}, vs::NamedTupleShape{names}) where names
shapedasnt_pullback(ΔΩ) = (NoTangent(), _shapedasnt_tangent(unthunk(ΔΩ), vs), NoTangent())
return ShapedAsNT(A, vs), shapedasnt_pullback
end
"""
ShapedAsNTArray{T<:NamedTuple,...} <: AbstractArray{T,N}
View of an `AbstractArray{<:AbstractVector{<:Real},N}` as an array of
`NamedTuple`s, according to a specified [`NamedTupleShape`](@ref).
`ShapedAsNTArray` implements the `Tables` API.
Constructors:
ShapedAsNTArray(
data::AbstractArray{<:AbstractVector{<:Real},
shape::NamedTupleShape
)
shape.(data)
The resulting `ShapedAsNTArray` shares memory with `data`:
```julia
using ValueShapes, ArraysOfArrays, Tables, TypedTables
X = [
(a = 42, b = rand(1:9, 2, 3))
(a = 11, b = rand(1:9, 2, 3))
]
shape = valshape(X[1])
data = nestedview(Array{Int}(undef, totalndof(shape), 2))
Y = shape.(data)
@assert Y isa ShapedAsNTArray
Y[:] = X
@assert Y[1] == X[1] == shape(data[1])
@assert Y.a == [42, 11]
Tables.columns(Y)
@assert unshaped.(Y) === data
@assert Table(Y) isa TypedTables.Table
```
Use `unshaped.(Y)` to access `data` directly.
`Tables.columns(Y)` will return a `NamedTuple` of columns. They will contain
a copy the data, using a memory layout as contiguous as possible for each
column.
"""
struct ShapedAsNTArray{T,N,D<:AbstractArray{<:AbstractVector{<:Real},N},S<:NamedTupleShape} <: AbstractArray{T,N}
__internal_data::D
__internal_elshape::S
end
export ShapedAsNTArray
function ShapedAsNTArray(data::D, shape::S) where {N,T<:Real,D<:AbstractArray{<:AbstractVector{T},N},S<:NamedTupleShape}
NT_T = shaped_type(shape, T)
ShapedAsNTArray{NT_T,N,D,S}(data, shape)
end
Base.Broadcast.broadcasted(vs::NamedTupleShape, A::AbstractArray{<:AbstractVector{<:Real}}) =
ShapedAsNTArray(A, vs)
@inline _data(A::ShapedAsNTArray) = getfield(A, :__internal_data)
@inline _elshape(A::ShapedAsNTArray) = getfield(A, :__internal_elshape)
@inline elshape(A::ShapedAsNTArray) = _elshape(A)
realnumtype(::Type{<:ShapedAsNTArray{<:Any,N,<:AbstractArray{<:AbstractArray{T}}}}) where {T<:Real,N} = T
Base.Broadcast.broadcasted(::typeof(identity), A::ShapedAsNTArray) = A
Base.Broadcast.broadcasted(::typeof(unshaped), A::ShapedAsNTArray) = _data(A)
function Base.Broadcast.broadcasted(::typeof(unshaped), A::ShapedAsNTArray, vsref::Ref{<:AbstractValueShape})
@_adignore elshape(A) <= vsref[] || throw(ArgumentError("Shape of value not compatible with given shape"))
_data(A)
end
@inline function Base.getproperty(A::ShapedAsNTArray, p::Symbol)
# Need to include internal fields of ShapedAsNTArray to make Zygote happy (ToDo: still true?):
if p == :__internal_data
getfield(A, :__internal_data)
elseif p == :__internal_elshape
getfield(A, :__internal_elshape)
else
data = _data(A)
shape = _elshape(A)
va = getproperty(_accessors(shape), p)
view.(data, Ref(va))
end
end
@inline function Base.propertynames(A::ShapedAsNTArray, private::Bool = false)
names = Base.propertynames(_elshape(A))
if private
(names..., :__internal_data, :__internal_elshape)
else
names
end
end
Base.:(==)(A::ShapedAsNTArray, B::ShapedAsNTArray) = _data(A) == _data(B) && _elshape(A) == _elshape(B)
Base.isequal(A::ShapedAsNTArray, B::ShapedAsNTArray) = isequal(_data(A), _data(B)) && _elshape(A) == _elshape(B)
Base.isapprox(A::ShapedAsNTArray, B::ShapedAsNTArray; kwargs...) = isapprox(_data(A), _data(B); kwargs...) && _elshape(A) == _elshape(B)
@inline Base.size(A::ShapedAsNTArray) = size(_data(A))
@inline Base.axes(A::ShapedAsNTArray) = axes(_data(A))
@inline Base.IndexStyle(::Type{<:ShapedAsNTArray{T,N,D}}) where {T,N,D} = IndexStyle(D)
ShapedAsNT(A::ShapedAsNTArray{T,0}) where T = _elshape(A)(first(_data(A)))
ShapedAsNT{names}(A::ShapedAsNTArray{<:NamedTuple{names},0}) where names = _elshape(A)(first(_data(A)))
Base.convert(::Type{ShapedAsNT}, A::ShapedAsNTArray{T,0}) where T = ShapedAsNT(A)
Base.convert(::Type{ShapedAsNT{names}}, A::ShapedAsNTArray{<:NamedTuple{names},0}) where names = ShapedAsNT{names}(A)
Base.@propagate_inbounds _apply_ntshape_copy(data::AbstractVector{<:Real}, shape::NamedTupleShape) = shape(data)
Base.@propagate_inbounds _apply_ntshape_copy(data::AbstractArray{<:AbstractVector{<:Real}}, shape::NamedTupleShape) =
ShapedAsNTArray(data, shape)
Base.getindex(A::ShapedAsNTArray, idxs...) = _apply_ntshape_copy(getindex(_data(A), idxs...), _elshape(A))
Base.view(A::ShapedAsNTArray, idxs...) = ShapedAsNTArray(view(_data(A), idxs...), _elshape(A))
function Base.setindex!(A::ShapedAsNTArray, x, idxs::Integer...)
A_idxs = ShapedAsNT(getindex(_data(A), idxs...), _elshape(A))
setindex!(A_idxs, x)
end
function Base.similar(A::ShapedAsNTArray{T}, ::Type{T}, dims::Dims) where T
data = _data(A)
U = eltype(data)
newdata = similar(data, U, dims)
# In case newdata is not something like an ArrayOfSimilarVectors:
if !isempty(newdata) && !isdefined(newdata, firstindex(newdata))
for i in eachindex(newdata)
newdata[i] = similar(data[firstindex(data)])
end
end
ShapedAsNTArray(newdata, _elshape(A))
end
Base.empty(A::ShapedAsNTArray{T,N,D,S}) where {T,N,D,S} =
ShapedAsNTArray{T,N,D,S}(empty(_data(A)), _elshape(A))
# For some reason `TypedTables.columnnames` is a different function than Tables.columnnames(A),
# `TypedTables.columnnames` doesn't support arrays of ShapedAsNT, so define:
TypedTables.columnnames(A::ShapedAsNTArray) = Tables.columnnames(A)
Base.show(io::IO, ::MIME"text/plain", A::ShapedAsNTArray) = TypedTables.showtable(io, A)
function Base.show(io::IO, ::MIME"text/plain", A::ShapedAsNTArray{T,0}) where T
println(io, "0-dimensional ShapedAsNTArray:")
show(io, A[])
end
Base.copy(A::ShapedAsNTArray) = ShapedAsNTArray(copy(_data(A)), _elshape(A))
Base.pop!(A::ShapedAsNTArray) = _elshape(A)(pop!(_data(A)))
# Base.push!(A::ShapedAsNTArray, x::Any) # ToDo
Base.popfirst!(A::ShapedAsNTArray) = _elshape(A)(popfirst!(_data(A)))
# Base.pushfirst!(A::ShapedAsNTArray, x::Any) # ToDo
function Base.append!(A::ShapedAsNTArray, B::ShapedAsNTArray)
_elshape(A) == _elshape(B) || throw(ArgumentError("Can't append ShapedAsNTArray instances with different element shapes"))
append!(_data(A), _data(B))
A
end
# Base.append!(A::ShapedAsNTArray, B::AbstractArray) # ToDo
function Base.prepend!(A::ShapedAsNTArray, B::ShapedAsNTArray)
_elshape(A) == _elshape(B) || throw(ArgumentError("Can't prepend ShapedAsNTArray instances with different element shapes"))
prepend!(_data(A), _data(B))
A
end
# Base.prepend!(A::ShapedAsNTArray, B::AbstractArray) # ToDo
function Base.deleteat!(A::ShapedAsNTArray, i)
deleteat!(_data(A), i)
A
end
# Base.insert!(A::ShapedAsNTArray, i::Integer, x::Any) # ToDo
Base.splice!(A::ShapedAsNTArray, i) = _elshape(A)(splice!(_data(A), i))
# Base.splice!(A::ShapedAsNTArray, i, replacement) # ToDo
function Base.vcat(A::ShapedAsNTArray, B::ShapedAsNTArray)
_elshape(A) == _elshape(B) || throw(ArgumentError("Can't vcat ShapedAsNTArray instances with different element shapes"))
ShapedAsNTArray(vcat(_data(A), _data(B)), _elshape(A))
end
# Base.vcat(A::ShapedAsNTArray, B::AbstractArray) # ToDo
# Base.hcat(A::ShapedAsNTArray, B) # ToDo
Base.vec(A::ShapedAsNTArray{T,1}) where T = A
Base.vec(A::ShapedAsNTArray) = ShapedAsNTArray(vec(_data(A)), _elshape(A))
Tables.istable(::Type{<:ShapedAsNTArray}) = true
Tables.rowaccess(::Type{<:ShapedAsNTArray}) = true
Tables.columnaccess(::Type{<:ShapedAsNTArray}) = true
Tables.schema(A::ShapedAsNTArray{T}) where {T} = Tables.Schema(T)
function Tables.columns(A::ShapedAsNTArray)
data = _data(A)
accessors = _accessors(_elshape(A))
# Copy columns to make each column as contiguous in memory as possible:
map(va -> getindex.(data, Ref(va)), accessors)
end
@inline Tables.rows(A::ShapedAsNTArray) = A
function Adapt.adapt_structure(to, x::ShapedAsNTArray)
ShapedAsNTArray(Adapt.adapt(to, _data(x)), _elshape(x))
end
const _AnySNTArray{names} = ShapedAsNTArray{<:Union{NamedTuple{names},ShapedAsNT{names}}}
# For accumulation during automatic differentiation:
function Base.:(+)(A::_AnySNTArray{names}, B::_AnySNTArray{names}) where names
@argcheck elshape(A) == elshape(B)
ShapedAsNTArray(_data(A) + _data(B), elshape(A))
end
# For accumulation during automatic differentiation:
function ChainRulesCore.add!!(A::_AnySNTArray{names}, B::_AnySNTArray{names}) where names
@argcheck elshape(A) == elshape(B)
ChainRulesCore.add!!(_data(A), _data(B))
return A
end
function ChainRulesCore.Tangent(X::T, unshaped_dX::AbstractArray{<:AbstractVector{<:Real}}) where {T<:ShapedAsNTArray}
vs = elshape(X)
gs = gradient_shape(vs)
contents = (__internal_data = unshaped_dX, __internal_elshape = gs)
Tangent{T,typeof(contents)}(contents)
end
struct GradShapedAsNTArrayProjector{VS<:NamedTupleShape} <: Function
gradshape::VS
end
ChainRulesCore.ProjectTo(X::ShapedAsNTArray) = GradShapedAsNTArrayProjector(gradient_shape(elshape(X)))
function (project::GradShapedAsNTArrayProjector{<:NamedTupleShape{names}})(data::NamedTuple{(:__internal_data, :__internal_elshape)}) where names
_check_ntgs_tangent_compat(project.gradshape, data.__internal_elshape)
_keep_zerolike(ShapedAsNTArray, data.__internal_data, project.gradshape)
end
(project::GradShapedAsNTArrayProjector{<:NamedTupleShape{names}})(tangent::Tangent{<:_AnySNTArray}) where names = project(_backing(tangent))
(project::GradShapedAsNTArrayProjector{<:NamedTupleShape{names}})(tangent::_AnySNTArray) where names = tangent
(project::GradShapedAsNTArrayProjector{<:NamedTupleShape})(tangent::_ZeroLike) = _az_tangent(tangent)
function (project::GradShapedAsNTArrayProjector{<:NamedTupleShape{names}})(
tangent::AbstractArray{<:Union{Tangent{<:Any,<:NamedTuple{names}},ShapedAsNT{names}}}
) where names
data =_shapedasntarray_tangent(tangent, project.gradshape)
ShapedAsNTArray(data, project.gradshape)
end
_tablecols_tangent(X::ShapedAsNTArray, dY::_ZeroLike) = _az_tangent(dY)
function _write_snta_col!(data::AbstractArray{<:AbstractVector{<:Real}}, va::ValueAccessor, A::AbstractArray)
B = view.(data, Ref(va))
B .= A
end
function _write_snta_col!(data::ArrayOfSimilarVectors{<:Real}, va::ValueAccessor, A::_ZeroLike)
flat_data = flatview(data)
idxs = view_idxs(axes(flat_data, 1), va)
fill!(view(flat_data, idxs, :), zero(eltype(flat_data)))
end
_write_snta_col!(data::AbstractArray{<:AbstractVector{<:Real}}, va::ConstAccessor, A) = nothing
function _tablecols_tangent(X::_AnySNTArray, dY::NamedTuple{names}) where names
tangent = Tangent(X, _tangent_array(_data(X)))
dx_unshaped, gs = _backing(tangent)
global g_state = (;X, dY)
# ToDo: Re-check safety of this after nested-NT arrays are implemented:
map((va_i, dY_i) -> _write_snta_col!(dx_unshaped, va_i, dY_i), _accessors(gs), _notangent_to_zerotangent(dY))
tangent
end
g_state = nothing
function ChainRulesCore.rrule(::typeof(Tables.columns), X::ShapedAsNTArray)
tablecols_pullback(ΔΩ) = begin
(NoTangent(), ProjectTo(X)(_tablecols_tangent(X, _unpack_tangent(ΔΩ))))
end
return Tables.columns(X), tablecols_pullback
end
_data_tangent(X::ShapedAsNTArray, dY::AbstractArray{<:AbstractVector{<:Real}}) = Tangent(X, dY)
_data_tangent(X::ShapedAsNTArray, dY::_ZeroLike) = _az_tangent(dY)
function ChainRulesCore.rrule(::typeof(_data), X::ShapedAsNTArray)
_data_pullback(ΔΩ) = (NoTangent(), ProjectTo(X)(_data_tangent(X, _unpack_tangent(ΔΩ))))
return _data(X), _data_pullback
end
_shapedasntarray_tangent(dY::_ZeroLike, vs::NamedTupleShape{names}) where names = _az_tangent(dY)
_shapedasntarray_tangent(dY::_AnySNTArray, vs::NamedTupleShape{names}) where names = unshaped.(dY)
function _shapedasntarray_tangent(
dY::AbstractArray{<:Tangent{<:Any,<:NamedTuple{names}}},
vs::NamedTupleShape{names}
) where names
ArrayOfSimilarArrays(unshaped.(ValueShapes._backing.(dY), Ref(gradient_shape(vs))))
end
function _shapedasntarray_tangent(
dY::AbstractArray{<:ShapedAsNT{names}},
vs::NamedTupleShape{names}
) where names
ArrayOfSimilarArrays(unshaped.(dY))
end
function _shapedasntarray_tangent(
dY::Tangent{<:Any,<:NamedTuple{(:__internal_data, :__internal_elshape)}},
vs::NamedTupleShape{names}
) where names
_backing(dY).__internal_data
end
function ChainRulesCore.rrule(::Type{ShapedAsNTArray}, A::AbstractArray{<:AbstractVector{<:Real}}, vs::NamedTupleShape{names}) where names
shapedasntarray_pullback(ΔΩ) = begin
global g_state = (;A, ΔΩ, vs)
(NoTangent(), _shapedasntarray_tangent(unthunk(ΔΩ), vs), NoTangent())
end
return ShapedAsNTArray(A, vs), shapedasntarray_pullback
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 4754 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
@static if hasmethod(reshape, Tuple{Distribution{Multivariate,Continuous}, Int, Int})
_reshape_arraylike_dist(d::Distribution, sz::Integer...) = reshape(d, sz)
else
_reshape_arraylike_dist(d::Distribution, sz1::Integer, sz2::Integer) = MatrixReshaped(d, sz1, sz2)
end
"""
ReshapedDist <: Distribution
An multivariate distribution reshaped using a given
[`AbstractValueShape`](@ref).
Constructors:
```julia
ReshapedDist(dist::MultivariateDistribution, shape::AbstractValueShape)
```
In addition, `MultivariateDistribution`s can be reshaped via
```julia
(shape::AbstractValueShape)(dist::MultivariateDistribution)
```
with the difference that
```julia
(shape::ArrayShape{T,1})(dist::MultivariateDistribution)
```
will return the original `dist` instead of a `ReshapedDist`.
"""
struct ReshapedDist{
VF <: VariateForm,
VS <: ValueSupport,
D <: Distribution{Multivariate,VS},
S <: AbstractValueShape
} <: Distribution{VF,VS}
dist::D
shape::S
end
export ReshapedDist
_variate_form(shape::ScalarShape) = Univariate
@static if isdefined(Distributions, :ArrayLikeVariate)
_variate_form(shape::ArrayShape{T,N}) where {T,N} = ArrayLikeVariate{N}
else
_variate_form(shape::ArrayShape{T,1}) where T = Multivariate
_variate_form(shape::ArrayShape{T,2}) where T = Matrixvariate
end
_variate_form(shape::NamedTupleShape{names}) where names = NamedTupleVariate{names}
_with_zeroconst(shape::AbstractValueShape) = replace_const_shapes(const_zero_shape, shape)
function ReshapedDist(dist::MultivariateDistribution{VS}, shape::AbstractValueShape) where {VS}
@argcheck totalndof(varshape(dist)) == totalndof(shape)
VF = _variate_form(shape)
D = typeof(dist)
S = typeof(shape)
ReshapedDist{VF,VS,D,S}(dist, shape)
end
function (shape::ArrayShape{<:Real,1})(dist::MultivariateDistribution)
@argcheck totalndof(varshape(dist)) == totalndof(shape)
dist
end
(shape::ArrayShape{<:Real})(dist::MultivariateDistribution) = _reshape_arraylike_dist(dist, size(shape)...)
# ToDo: Enable when `reshape(::MultivariateDistribution, ())` becomes fully functional in Distributions:
#(shape::ScalarShape{<:Real})(dist::MultivariateDistribution) = _reshape_arraylike_dist(dist, size(shape)...)
(shape::AbstractValueShape)(dist::MultivariateDistribution) = ReshapedDist(dist, shape)
@inline varshape(rd::ReshapedDist) = rd.shape
@inline unshaped(rd::ReshapedDist) = rd.dist
Random.rand(rng::AbstractRNG, rd::ReshapedDist{Univariate}) = varshape(rd)(rand(rng, unshaped(rd)))
function Distributions._rand!(rng::AbstractRNG, rd::ReshapedDist{Multivariate}, x::AbstractVector{<:Real})
Distributions._rand!(rng, unshaped(rd), x)
end
@static if isdefined(Distributions, :ArrayLikeVariate)
function Distributions._rand!(rng::AbstractRNG, rd::ReshapedDist{<:ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where N
Distributions._rand!(rng, _reshape_arraylike_dist(unshaped(rd), size(rd)...), x)
end
else
function Distributions._rand!(rng::AbstractRNG, rd::ReshapedDist{Matrixvariate}, x::AbstractMatrix{<:Real})
Distributions._rand!(rng, _reshape_arraylike_dist(unshaped(rd), size(rd)...), x)
end
end
Base.size(rd::ReshapedDist{<:PlainVariate}) = size(varshape(rd))
Base.length(rd::ReshapedDist{<:PlainVariate}) = prod(size(rd))
Statistics.mean(rd::ReshapedDist) = varshape(rd)(mean(unshaped(rd)))
StatsBase.mode(rd::ReshapedDist) = varshape(rd)(mode(unshaped(rd)))
Statistics.var(rd::ReshapedDist) = _with_zeroconst(varshape(rd))(var(unshaped(rd)))
Statistics.cov(rd::ReshapedDist{Multivariate}) = cov(unshaped(rd))
Distributions.pdf(rd::ReshapedDist{Univariate}, x::Real) = pdf(unshaped(rd), unshaped(x))
Distributions._pdf(rd::ReshapedDist{Multivariate}, x::AbstractVector{<:Real}) = pdf(unshaped(rd), x)
Distributions._pdf(rd::ReshapedDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = pdf(_reshape_arraylike_dist(unshaped(rd), size(rd)...), x)
Distributions.logpdf(rd::ReshapedDist{Univariate}, x::Real) = logpdf(unshaped(rd), unshaped(x))
Distributions._logpdf(rd::ReshapedDist{Multivariate}, x::AbstractVector{<:Real}) = logpdf(unshaped(rd), x)
Distributions._logpdf(rd::ReshapedDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = logpdf(_reshape_arraylike_dist(unshaped(rd), size(rd)...), x)
Distributions.insupport(rd::ReshapedDist{Univariate}, x::Real) = insupport(unshaped(rd), unshaped(x))
Distributions.insupport(rd::ReshapedDist{Multivariate}, x::AbstractVector{<:Real}) = insupport(unshaped(rd), x)
Distributions.insupport(rd::ReshapedDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = insupport(_reshape_arraylike_dist(unshaped(rd), size(rd)...), x)
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 3740 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
AbstractScalarShape{T} <: AbstractValueShape
"""
abstract type AbstractScalarShape{T} <: AbstractValueShape end
export AbstractScalarShape
@inline Base.size(::AbstractScalarShape) = ()
@inline Base.length(::AbstractScalarShape) = 1
@inline default_unshaped_eltype(shape::AbstractScalarShape{T}) where {T<:Real} =
default_datatype(T)
@inline default_unshaped_eltype(shape::AbstractScalarShape{Complex}) = default_datatype(Real)
@inline default_unshaped_eltype(shape::AbstractScalarShape{<:Complex{T}}) where {T} =
default_unshaped_eltype(_valshapeoftype(T))
@inline shaped_type(shape::AbstractScalarShape{<:Real}, ::Type{T}) where {T<:Real} = T
@inline shaped_type(shape::AbstractScalarShape{<:Complex}, ::Type{T}) where {T<:Real} = Complex{T}
"""
ScalarShape{T} <: AbstractScalarShape{T}
An `ScalarShape` describes the shape of scalar values of a given type.
Constructor:
ScalarShape{T::Type}()
T may be an abstract type of Union, or a specific type, e.g.
ScalarShape{Real}()
ScalarShape{Integer}()
ScalarShape{Float32}()
ScalarShape{Complex}()
Scalar shapes may have a total number of degrees of freedom
(see [`totalndof`](@ref)) greater than one, e.g. shapes of complex-valued
scalars:
totalndof(ScalarShape{Real}()) == 1
totalndof(ScalarShape{Complex}()) == 2
See also the documentation of [`AbstractValueShape`](@ref).
"""
struct ScalarShape{T} <: AbstractScalarShape{T} end
export ScalarShape
@inline Base.:(<=)(a::ScalarShape{T}, b::ScalarShape{U}) where {T,U} = T<:U
@inline _valshapeoftype(T::Type{<:Number}) = ScalarShape{T}()
@inline totalndof(::ScalarShape{T}) where {T <: Real} = 1
@inline function totalndof(::ScalarShape{T}) where {T <: Any}
if @generated
fieldtypes = ntuple(i -> fieldtype(T, i), Val(fieldcount(T)))
field_flatlenghts = sum(U -> totalndof(_valshapeoftype(U)), fieldtypes)
l = prod(field_flatlenghts)
quote $l end
else
# Shouldn't be used, ideally
@assert false
fieldtypes = ntuple(i -> fieldtype(T, i), Val(fieldcount(T)))
field_flatlenghts = sum(U -> totalndof(_valshapeoftype(U)), fieldtypes)
l = prod(field_flatlenghts)
l
end
end
(shape::ScalarShape{T})(::UndefInitializer) where {T<:Number} = zero(default_datatype(T))
function unshaped(x::Union{T,AbstractArray{T,0}}, shape::ScalarShape{U}) where {T<:Real,U<:Real}
T <: U || throw(ArgumentError("Element type $T of scalar value not compatible with type $U of given scalar shape"))
unshaped(x)
end
replace_const_shapes(f::Function, shape::ScalarShape) = shape
const ScalarAccessor{T} = ValueAccessor{ScalarShape{T}} where {T}
# Scalar accessors should return value, not 0-dim array view:
@inline _apply_accessor_to_data(acc::ScalarAccessor, data::AbstractVector{<:Real}) = getindex(data, acc)
@inline view_idxs(idxs::AbstractUnitRange{<:Integer}, va::ScalarAccessor{<:Real}) = first(idxs) + va.offset
# ToDo: view_idxs for scalars with dof greater than 1 (complex, etc.)
Base.@propagate_inbounds function _bcasted_view(data::AbstractArrayOfSimilarVectors{<:Real,N}, va::ScalarAccessor) where N
flat_data = flatview(data)
idxs = view_idxs(axes(flat_data, 1), va)
colons = map(_ -> :, Base.tail(axes(flat_data)))
view(flat_data, idxs, colons...)
end
Base.Broadcast.broadcasted(::typeof(getindex), A::AbstractArrayOfSimilarVectors{<:Real,N}, acc::Ref{<:ScalarAccessor}) where N =
copy(_bcasted_view(A, acc[]))
Base.Broadcast.broadcasted(::typeof(view), A::AbstractArrayOfSimilarVectors{<:Real,N}, acc::Ref{<:ScalarAccessor}) where N =
_bcasted_view(A, acc[])
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 1420 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
@inline _adignore_call(f) = f()
@inline _adignore_call_pullback(@nospecialize ΔΩ) = (NoTangent(), NoTangent())
ChainRulesCore.rrule(::typeof(_adignore_call), f) = _adignore_call(f), _adignore_call_pullback
macro _adignore(expr)
:(_adignore_call(() -> $(esc(expr))))
end
_backing(x::Any) = x
_backing(x::Tangent) = backing(x)
_unpack_tangent(x::Any) = _backing(unthunk(x))
const _ZeroLike = Union{AbstractZero,Nothing}
const _az_tangent(x::AbstractZero) = x
const _az_tangent(::Nothing) = ZeroTangent() # Still necessary? Return NoTangent() instead?
_notangent_to_zerotangent(x::Any) = x
_notangent_to_zerotangent(x::Union{NoTangent,Nothing}) = ZeroTangent()
_notangent_to_zerotangent(x::Union{Tuple,NamedTuple}) = map(_notangent_to_zerotangent, x)
function _tangent_array(A::AbstractArray{T}) where T
dA = similar(A, float(T))
fill!(dA, NaN) # For safety
return dA
end
function _tangent_array(A::ArrayOfSimilarVectors{T}) where T
dA = similar(A, Vector{float(T)})
fill!(flatview(dA), NaN) # For safety
return dA
end
@inline _keep_zerolike(::Type{T}, x, xs...) where T = T(x, xs...)
@inline _keep_zerolike(::Type{T}, x::_ZeroLike, xs...) where T = _az_tangent(x)
@inline _keep_zerolike(f::F, x, xs...) where F = f(x, xs...)
@inline _keep_zerolike(f::F, x::_ZeroLike, xs...) where F = _az_tangent(x)
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 6187 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
ValueAccessor{S<:AbstractValueShape}
A value accessor provides a means to access a value with a given shape
stored in a flat real-valued data vector with a given offset position.
Constructor:
ValueAccessor{S}(shape::S, offset::Int)
The offset is relative to the first index of a flat data array, so if
the value is stored at the beginning of the array, the offset will be zero.
An `ValueAccessor` can be used to index into a given flat data array.
Example:
```julia
acc = ValueAccessor(ArrayShape{Real}(2,3), 2)
valshape(acc) == ArrayShape{Real,2}((2, 3))
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
data[acc] == acc(data) == [3 5 7; 4 6 8]
```
Note: Subtypes of [`AbstractValueShape`](@ref) should specialize
[`ValueShapes.vs_getindex`](@ref), [`ValueShapes.vs_unsafe_view`](@ref) and
[`ValueShapes.vs_setindex!`](@ref) for their `ValueAccessor{...}`.
Specializing `Base.getindex`, `Base.view`, `Base.unsafe_view` or
`Base.setindex!` directly may result in method ambiguities with custom array
tapes that specialize these functions in a very generic fashion.
"""
struct ValueAccessor{S<:AbstractValueShape}
shape::S
offset::Int
len::Int
ValueAccessor{S}(shape::S, offset::Int) where {S<:AbstractValueShape} =
new{S}(shape, offset, totalndof(shape))
end
export ValueAccessor
ValueAccessor(shape::S, offset::Int) where {S<:AbstractValueShape} = ValueAccessor{S}(shape, offset)
Base.:(==)(a::ValueAccessor, b::ValueAccessor) = a.shape == b.shape && a.offset == b.offset && a.len == b.len
Base.isequal(a::ValueAccessor, b::ValueAccessor) = isequal(a.shape, b.shape) && isequal(a.offset, b.offset) && isequal(a.len, b.len)
# Base.isapprox(a::ValueAccessor, b::ValueAccessor) = ...
Base.hash(x::ValueAccessor, h::UInt) = hash(x.len, hash(x.offset, hash(x.shape, hash(:ValueAccessor, hash(:ValueShapes, h)))))
@inline _apply_accessor_to_data(acc::ValueAccessor, data::AbstractVector{<:Real}) = view(data, acc)
@inline function (acc::ValueAccessor)(data::AbstractVector{<:Real})
_apply_accessor_to_data(acc, data)
end
# Reserve broadcasting semantics for value accessors:
@inline Base.Broadcast.broadcastable(va::ValueAccessor) =
throw(ArgumentError("broadcasting over `ValueAccessor`s is reserved"))
default_unshaped_eltype(va::ValueAccessor) = default_unshaped_eltype(va.shape)
Base.size(va::ValueAccessor) = size(va.shape)
Base.length(va::ValueAccessor) = length(va.shape)
# Can't use `idxs::Vararg{ValueAccessor,N}`, would cause ambiguities with
# Base for N == 0.
Base.to_indices(A::AbstractArray{T,1}, I::Tuple{ValueAccessor}) where {T<:Real} = I
Base.to_indices(A::AbstractArray{T,2}, I::Tuple{ValueAccessor,ValueAccessor}) where {T<:Real} = I
Base.to_indices(A::AbstractArray{T,3}, I::Tuple{ValueAccessor,ValueAccessor,ValueAccessor}) where {T<:Real} = I
Base.checkindex(::Type{Bool}, inds::AbstractUnitRange, i::ValueAccessor) =
checkindex(Bool, inds, view_idxs(inds, i))
valshape(va::ValueAccessor) = va.shape
# Would this be useful?
# AbstractValueShape(va::ValueAccessor) = valshape(va)
# Base.convert(::Type{AbstractValueShape}, va::ValueAccessor) = AbstractValueShape(va)
function view_range end
@inline function view_range(idxs::AbstractUnitRange{<:Integer}, va::ValueAccessor)
from = first(idxs) + va.offset
to = from + va.len - 1
from:to
end
function view_idxs end
@inline view_idxs(idxs::AbstractUnitRange{<:Integer}, va::ValueAccessor) = view_range(idxs, va)
"""
ValueShapes.vs_getindex(data::AbstractArray{<:Real}, idxs::ValueAccessor...)
Specialize `ValueShapes.vs_getindex` instead of `Base.getindex` for
[`ValueShapes.ValueAccessor`](@ref)s, to avoid methods ambiguities with
with certain custom array types.
"""
function vs_getindex end
# Can't use `idxs::Vararg{ValueAccessor,N}`, would cause ambiguities with
# Base for N == 0.
Base.@propagate_inbounds Base._getindex(::IndexStyle, data::AbstractArray{<:Real,1}, idx1::ValueAccessor) =
vs_getindex(data, idx1)
Base.@propagate_inbounds Base._getindex(::IndexStyle, data::AbstractArray{<:Real,2}, idx1::ValueAccessor, idx2::ValueAccessor) =
vs_getindex(data, idx1, idx2)
Base.@propagate_inbounds Base._getindex(::IndexStyle, data::AbstractArray{<:Real,3}, idx1::ValueAccessor, idx2::ValueAccessor, idx3::ValueAccessor) =
vs_getindex(data, idx1, idx2, idx3)
"""
ValueShapes.vs_unsafe_view(data::AbstractArray{<:Real}, idxs::ValueAccessor...)
Specialize `ValueShapes.vs_unsafe_view` instead of `Base.view` or
`Base.unsafe_view` for [`ValueShapes.ValueAccessor`](@ref)s, to avoid methods
ambiguities with with certain custom array types.
"""
function vs_unsafe_view end
# Can't use `idxs::Vararg{ValueAccessor,N}`, would cause ambiguities with
# Base for N == 0.
Base.@propagate_inbounds Base.unsafe_view(data::AbstractArray{<:Real,1}, idx1::ValueAccessor) =
vs_unsafe_view(data, idx1)
Base.@propagate_inbounds Base.unsafe_view(data::AbstractArray{<:Real,2}, idx1::ValueAccessor, idx2::ValueAccessor) =
vs_unsafe_view(data, idx1, idx2)
Base.@propagate_inbounds Base.unsafe_view(data::AbstractArray{<:Real,3}, idx1::ValueAccessor, idx2::ValueAccessor, idx3::ValueAccessor) =
vs_unsafe_view(data, idx1, idx2, idx3)
"""
ValueShapes.vs_setindex!(data::AbstractArray{<:Real}, v, idxs::ValueAccessor...)
Specialize `ValueShapes.vs_setindex!` instead of `Base.setindex!` or for
[`ValueShapes.ValueAccessor`](@ref)s, to avoid methods ambiguities with with
certain custom array types.
"""
function vs_setindex! end
# Can't use `idxs::Vararg{ValueAccessor,N}`, would cause ambiguities with
# Base for N == 0.
Base.@propagate_inbounds Base._setindex!(::IndexStyle, data::AbstractArray{<:Real,1}, v, idx1::ValueAccessor) =
vs_setindex!(data, v, idx1)
Base.@propagate_inbounds Base._setindex!(::IndexStyle, data::AbstractArray{<:Real,2}, v, idx1::ValueAccessor, idx2::ValueAccessor) =
vs_setindex!(data, v, idx1, idx2)
Base.@propagate_inbounds Base._setindex!(::IndexStyle, data::AbstractArray{<:Real,3}, v, idx1::ValueAccessor, idx2::ValueAccessor, idx3::ValueAccessor) =
vs_setindex!(data, v, idx1, idx2, idx3)
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 14248 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
realnumtype(T::Type)
Return the underlying numerical type of T that's a subtype of `Real`.
Uses type promotion among underlying `Real` type in `T`.
e.g.
```julia
A = fill(fill(rand(Float32, 5), 10), 5)
realnumtype(typeof(A)) == Float32
```
"""
function realnumtype end
export realnumtype
realnumtype(::Type{T}) where T = throw(ArgumentError("Can't derive numeric type for type $T"))
realnumtype(::Type{T}) where {T<:Real} = T
realnumtype(::Type{<:Complex{T}}) where {T<:Real} = T
realnumtype(::Type{<:Enum{T}}) where {T<:Real} = T
realnumtype(::Type{<:AbstractArray{T}}) where {T} = realnumtype(T)
realnumtype(::Type{<:NamedTuple{names,T}}) where {names,T} = realnumtype(T)
realnumtype(::Type{NTuple{N,T}}) where {N,T} = realnumtype(T)
@generated function realnumtype(::Type{T}) where {T<:Tuple}
:(promote_type(map(realnumtype, $((T.parameters...,)))...))
end
# Use a fake numtype for non-numerical types that may be used to express
# default and missing values, string/symbol options, etc.:
realnumtype(::Type{Nothing}) = Bool
realnumtype(::Type{Missing}) = Bool
realnumtype(::Type{Tuple{}}) = Bool
realnumtype(::Type{Symbol}) = Bool
realnumtype(::Type{<:AbstractString}) = Bool
"""
ValueShapes.default_datatype(T::Type)
Return a default specific type U that is more specific than T, with U <: T.
e.g.
ValueShapes.default_datatype(Real) == Float64
ValueShapes.default_datatype(Complex) == Complex{Float64}
"""
function default_datatype end
@inline default_datatype(::Type{>:Int}) = Int
@inline default_datatype(::Type{>:Float64}) = Float64
@inline default_datatype(::Type{>:Real}) = Float64
@inline default_datatype(::Type{>:Complex{Float64}}) = Complex{Float64}
@inline default_datatype(T::Type) = T
"""
abstract type AbstractValueShape
An `AbstractValueShape` combines type and size information.
Subtypes are defined for shapes of scalars (see [`ScalarShape`](@ref)),
arrays (see [`ArrayShape`](@ref)), constant values
(see [`ConstValueShape`](@ref)) and `NamedTuple`s (see
[`NamedTupleShape`](@ref)).
Subtypes of `AbstractValueShape` must support `eltype`, `size` and
[`totalndof`](@ref).
Value shapes can be used as constructors to generate values of the given
shape with undefined content. If the element type of the shape is an abstract
or union type, a suitable concrete type will be chosen automatically, if
possible (see [`ValueShapes.default_datatype`](@ref)):
```julia
shape = ArrayShape{Real}(2,3)
A = shape(undef)
typeof(A) == Array{Float64,2}
size(A) == (2, 3)
valshape(A) == ArrayShape{Float64}(2,3)
```
Use
(shape::AbstractValueShape)(data::AbstractVector{<:Real})::eltype(shape)
to view a flat vector of anonymous real values
as a value of the given shape:
```julia
data = [1, 2, 3, 4, 5, 6]
shape(data) == [1 3 5; 2 4 6]
```
In return,
Base.Vector{<:Real}(undef, shape::AbstractValueShape)
will create a suitable uninitialized vector of the right length to hold such
flat data for the given shape. If no type `T` is given, a suitable data
type will be chosen automatically.
When dealing with multiple vectors of flattened data, use
shape.(data::ArraysOfArrays.AbstractVectorOfSimilarVectors)
ValueShapes supports this via specialized broadcasting.
In return,
ArraysOfArrays.VectorOfSimilarVectors{<:Real}(shape::AbstractValueShape)
will create a suitable vector (of length zero) of vectors that can hold
flattened data for the given shape. The result will be a
`VectorOfSimilarVectors` wrapped around a 2-dimensional `ElasticArray`.
This way, all data is stored in a single contiguous chunk of memory.
`AbstractValueShape`s can be compared with `<=` and `>=`, with semantics that
are similar to compare type with `<:` and `>:`:
```julia
a::AbstractValueShape <= b::AbstractValueShape == true
```
implies that values of shape `a` are can be used in contexts that expect
values of shape `b`. E.g.:
```julia
(ArrayShape{Float64}(4,5) <= ArrayShape{Real}(4,5)) == true
(ArrayShape{Float64}(4,5) <= ArrayShape{Integer}(4,5)) == false
(ArrayShape{Float64}(2,2) <= ArrayShape{Float64}(3,3)) == false
(ScalarShape{Real}() >= ScalarShape{Int}()) == true
```
"""
abstract type AbstractValueShape end
export AbstractValueShape
@inline Base.:(>=)(a::AbstractValueShape, b::AbstractValueShape) = b <= a
vs_cmp_pullback(ΔΩ) = (NoTangent(), NoTangent(), NoTangent())
ChainRulesCore.rrule(::typeof(Base.:(==)), a::AbstractValueShape, b::AbstractValueShape) = (a == b, vs_cmp_pullback)
ChainRulesCore.rrule(::typeof(Base.:(<=)), a::AbstractValueShape, b::AbstractValueShape) = (a <= b, vs_cmp_pullback)
ChainRulesCore.rrule(::typeof(Base.:(>=)), a::AbstractValueShape, b::AbstractValueShape) = (a >= b, vs_cmp_pullback)
# Reserve broadcasting semantics for value shapes:
@inline Base.Broadcast.broadcastable(shape::AbstractValueShape) =
throw(ArgumentError("broadcasting over `AbstractValueShape`s is reserved"))
function _valshapeoftype end
"""
ValueShapes.default_unshaped_eltype(shape::AbstractValueShape)
Returns the default real array element type to use for unshaped
representations of data with shape `shape`.
Subtypes of `AbstractValueShape` must implemenent
`ValueShapes.default_unshaped_eltype`.
"""
function default_unshaped_eltype end
"""
ValueShapes.shaped_type(shape::AbstractValueShape, ::Type{T}) where {T<:Real}
ValueShapes.shaped_type(shape::AbstractValueShape)
Returns the type the will result from reshaping a real-valued vector (of
element type `T`, if specified) with `shape`.
Subtypes of `AbstractValueShape` must implement
ValueShapes.shaped_type(shape::AbstractValueShape, ::Type{T}) where {T<:Real}
"""
function shaped_type end
shaped_type(shape::AbstractValueShape) = shaped_type(shape, default_unshaped_eltype(shape))
"""
valshape(x)::AbstractValueShape
valshape(acc::ValueAccessor)::AbstractValueShape
Get the value shape of an arbitrary value, resp. the shape a `ValueAccessor`
is based on, or the shape of the variates for a `Distribution`.
"""
function valshape end
export valshape
@inline valshape(x::T) where T = _valshapeoftype(T)
"""
elshape(x)::AbstractValueShape
Get the shape of the elements of x
"""
function elshape end
export elshape
@inline elshape(x::T) where T = _valshapeoftype(eltype(T))
@inline elshape(A::AbstractArray{<:AbstractArray}) = ArrayShape{eltype(eltype(A))}(innersize(A)...)
"""
totalndof(shape::AbstractValueShape)
Get the total number of degrees of freedom of values of the given shape.
Equivalent to the length of a vector that would result from flattening the
data into a sequence of real numbers, excluding any constant values.
"""
function totalndof end
export totalndof
# Support for missing varshapes:
totalndof(::Missing) = missing
"""
unshaped(x)::AbstractVector{<:Real}
unshaped(x, shape::AbstractValueShape)::AbstractVector{<:Real}
Retrieve the unshaped underlying data of x, assuming x is a structured view
(based on some [`AbstractValueShape`](@ref)) of a flat/unstructured
real-valued data vector.
If `shape` is given, ensures that the shape of `x` is compatible with it.
Specifying a shape may be necessary if the correct shape of `x` cannot be
inferred from `x`, e.g. because `x` is assumed to have fewer degrees of
freedom (because of constant components) than would be inferred from
the plain value of `x`.
Example:
```julia
shape = NamedTupleShape(
a = ScalarShape{Real}(),
b = ArrayShape{Real}(2, 3)
)
data = [1, 2, 3, 4, 5, 6, 7]
x = shape(data)
@assert unshaped(x, shape) == data
@assert unshaped(x.a) == view(data, 1:1)
@assert unshaped(x.b) == view(data, 2:7)
```
"""
function unshaped end
export unshaped
unshaped(x::Real) = Fill(x, 1)
unshaped(x::AbstractArray{<:Real,0}) = view(x, firstindex(x):firstindex(x))
unshaped(x::SubArray{<:Real,0}) = view(parent(x), x.indices[1]:x.indices[1])
unshaped(x::AbstractArray{<:Real,1}) = x
unshaped(x::Base.ReshapedArray{T,N,<:AbstractArray{T,1}}) where {T<:Real,N} = parent(x)
const _InvValueShape = Base.Fix2{typeof(unshaped),<:AbstractValueShape}
@inline function Base.Broadcast.broadcasted(inv_vs::_InvValueShape, xs)
Base.Broadcast.broadcasted(unshaped, xs, Ref(inv_vs.x))
end
InverseFunctions.inverse(vs::AbstractValueShape) = Base.Fix2(unshaped, vs)
InverseFunctions.inverse(inv_vs::_InvValueShape) = inv_vs.x
function ChangesOfVariables.with_logabsdet_jacobian(vs::AbstractValueShape, flat_x)
x = vs(flat_x)
x, zero(float(eltype(flat_x)))
end
function ChangesOfVariables.with_logabsdet_jacobian(inv_vs::_InvValueShape, x)
flat_x = inv_vs(x)
flat_x, zero(float(eltype(flat_x)))
end
const _BroadcastValueShape = Base.Fix1{typeof(broadcast),<:AbstractValueShape}
const _BroadcastInvValueShape = Base.Fix1{typeof(broadcast),<:_InvValueShape}
const _BroadcastUnshaped = Base.Fix1{typeof(broadcast),typeof(unshaped)}
function ChangesOfVariables.with_logabsdet_jacobian(bc_vs::_BroadcastValueShape, ao_flat_x)
ao_x = bc_vs(ao_flat_x)
ao_x, zero(float(realnumtype(typeof(ao_flat_x))))
end
function ChangesOfVariables.with_logabsdet_jacobian(bc_inv_vs::Union{_BroadcastInvValueShape,_BroadcastUnshaped}, ao_x)
ao_flat_x = bc_inv_vs(ao_x)
ao_flat_x, zero(float(realnumtype(typeof(ao_flat_x))))
end
const _VSTrafo = Union{
AbstractValueShape, _InvValueShape, typeof(unshaped),
_BroadcastValueShape, _BroadcastInvValueShape, _BroadcastUnshaped
}
Base.:(∘)(::typeof(identity), f::_VSTrafo) = f
Base.:(∘)(f::_VSTrafo, ::typeof(identity)) = f
"""
stripscalar(x)
Dereference value `x`.
If x is a scalar-like object, like a 0-dimensional array or a `Ref`,
`stripscalar` returns it's inner value. Otherwise, `x` is returned unchanged.
Useful to strip shaped scalar-like views of their 0-dim array semantics
(if present), but leave array-like views unchanged.
Example:
```julia
data = [1, 2, 3]
shape1 = NamedTupleShape(a = ScalarShape{Real}(), b = ArrayShape{Real}(2))
x1 = shape1(data)
@assert x1 isa NamedTuple
shape2 = ArrayShape{Real}(3)
x2 = shape2(data)
@assert x2 isa AbstractArray{Int,1}
```
"""
function stripscalar end
export stripscalar
stripscalar(x::Any) = x
stripscalar(x::Ref) = x[]
stripscalar(x::AbstractArray{T,0}) where T = x[]
function _checkcompat(shape::AbstractValueShape, data::AbstractVector{<:Real})
n_shape = totalndof(shape)
n_data = length(eachindex(data))
if n_shape != n_data
throw(ArgumentError("Data vector of length $(n_data) incompatible with value shape with $(n_shape) degrees of freedom"))
end
nothing
end
function _checkcompat_inner(shape::AbstractValueShape, data::AbstractArray{<:AbstractVector{<:Real}})
n_shape = totalndof(shape)
n_data = prod(innersize(data))
if n_shape != n_data
throw(ArgumentError("Data vector of length $(n_data) incompatible with value shape with $(n_shape) degrees of freedom"))
end
nothing
end
@inline function _apply_shape_to_data(shape::AbstractValueShape, data::AbstractVector{<:Real})
@boundscheck _checkcompat(shape, data)
_apply_accessor_to_data(ValueAccessor(shape, 0), data)
end
@inline function (shape::AbstractValueShape)(data::AbstractVector{<:Real})
_apply_shape_to_data(shape, data)
end
Base.Vector{T}(::UndefInitializer, shape::AbstractValueShape) where {T <: Real} =
Vector{T}(undef, totalndof(shape))
Base.Vector{<:Real}(::UndefInitializer, shape::AbstractValueShape) =
Vector{default_unshaped_eltype(shape)}(undef, shape)
ArraysOfArrays.VectorOfSimilarVectors{T}(shape::AbstractValueShape) where {T<:Real} =
VectorOfSimilarVectors(ElasticArray{T}(undef, totalndof(shape), 0))
# Specialize (::AbstractValueShape).(::AbstractVector{<:AbstractVector{<:Real}}):
Base.Broadcast.broadcasted(vs::AbstractValueShape, A::AbstractArray{<:AbstractVector{<:Real},N}) where N =
broadcast(view, A, Ref(ValueAccessor(vs, 0)))
# Specialize unshaped for real vectors (semantically vectors of scalar-shaped values)
function Base.Broadcast.broadcasted(::typeof(unshaped), x::AbstractVector{<:Real})
nestedview(reshape(view(x, :), 1, length(eachindex(x))))
end
function Base.Broadcast.broadcasted(::typeof(unshaped), x::AbstractVector{<:Real}, vsref::Ref{<:AbstractValueShape})
elshape(x) <= vsref[] || throw(ArgumentError("Shape of value not compatible with given shape"))
Base.Broadcast.broadcasted(unshaped, x)
end
# Specialize unshaped for real vectors that are array slices:
const _MatrixSliceFirstDim{T} = SubArray{T,1,<:AbstractArray{T,2},<:Tuple{Int,AbstractArray{Int}}}
function Base.Broadcast.broadcasted(::typeof(unshaped), x::_MatrixSliceFirstDim{<:Real})
nestedview(view(parent(x), x.indices[1]:x.indices[1], x.indices[2]))
end
function Base.Broadcast.broadcasted(::typeof(unshaped), x::_MatrixSliceFirstDim{<:Real}, vsref::Ref{<:AbstractValueShape})
elshape(x) <= vsref[] || throw(ArgumentError("Shape of value not compatible with given shape"))
Base.Broadcast.broadcasted(unshaped, x)
end
function _zerodim_array(x::T) where T
A = Array{T,0}(undef)
A[] = x
end
"""
const_zero(x::Any)
Get the equivalent of a constant zero for values the same type as .
"""
function const_zero end
const_zero(x::Number) = zero(x)
const_zero(A::AbstractArray{T}) where T <: Number = Fill(zero(T), size(A)...)
"""
replace_const_shapes(f::Function, shape::AbstractValueShape)
If `shape` is a, or contains, [`ConstValueShape`](@ref) shape(s), recursively
replace it/them with the result of `f(s::Shape)`.
"""
function replace_const_shapes end
export replace_const_shapes
"""
gradient_shape(argshape::AbstractValueShape)
Return the value shape of the gradient of functions that take values of
shape `argshape` as an input.
"""
function gradient_shape end
gradient_shape(vs::AbstractValueShape) = replace_const_shapes(nonstrict_const_zero_shape, vs)
export gradient_shape
"""
variance_shape(variate_shape::AbstractValueShape)
Return the value shape of the variance of a distribution whose variates have
the value shape `variate_shape`.
"""
function variance_shape end
variance_shape(vs::AbstractValueShape) = replace_const_shapes(const_zero_shape, vs)
export variance_shape
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 562 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
import Test
Test.@testset "Package ValueShapes" begin
include("test_value_shape.jl")
include("test_value_accessor.jl")
include("test_scalar_shape.jl")
include("test_array_shape.jl")
include("test_const_value_shape.jl")
include("test_named_tuple_shape.jl")
include("test_functions.jl")
include("test_distributions.jl")
include("test_const_value_dist.jl")
include("test_named_tuple_dist.jl")
include("test_reshaped_dist.jl")
end # testset
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 6441 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using Random
using ElasticArrays, ArraysOfArrays
@testset "array_shape" begin
@test_throws ArgumentError @inferred(ValueShapes._valshapeoftype(Vector{Int}))
@test @inferred(valshape(rand(3))) == ArrayShape{Float64,1}((3,))
@test @inferred(valshape(rand(3, 4, 5))) == ArrayShape{Float64,3}((3, 4, 5))
@test @inferred(ValueShapes.default_unshaped_eltype(ArrayShape{Complex,3}((3, 4, 5)))) == Float64
@test @inferred(ValueShapes.default_unshaped_eltype(ArrayShape{Complex{Float32},3}((3, 4, 5)))) == Float32
@test @inferred(ValueShapes.shaped_type(ArrayShape{Real}(2, 3, 4))) == Array{Float64, 3}
@test @inferred(ValueShapes.shaped_type(ArrayShape{Complex}(2))) == Array{Complex{Float64}, 1}
@test @inferred(ValueShapes.shaped_type(ArrayShape{Complex{Real}}(2), Float32)) == Array{Complex{Float32}, 1}
@test @inferred(ValueShapes.shaped_type(ArrayShape{Complex{Int16}}(2, 3))) == Array{Complex{Int16}, 2}
@test @inferred(totalndof(ArrayShape{Float64,1}((3,)))) == 3
@test @inferred(totalndof(ArrayShape{Complex,3}((3, 4, 5)))) == 120
@test size(@inferred(Vector{Float64}(undef, ArrayShape{Complex}((2, 1, 3))))) == (2 * 2*1*3,)
@test size(flatview(@inferred(VectorOfSimilarVectors{Float32}(ArrayShape{Complex}((2, 1, 3)))))) == (2 * 2*1*3, 0)
shape = ArrayShape{Real}(2,3)
A = @inferred(shape(undef))
@test typeof(A) == Array{Float64,2}
@test size(A) == (2, 3)
@test @inferred(length(shape)) == shape.dims[1]*shape.dims[2]
@test @inferred(length(shape)) == prod(shape.dims)
@test @inferred(valshape(A)) == ArrayShape{Float64}(2,3)
@test @inferred(shape([1, 2, 3, 4, 5, 6])) == [1 3 5; 2 4 6]
@test_throws ArgumentError shape([1, 2, 3, 4, 5, 6, 7])
@test @inferred(ArrayShape{Float64}(4,5) <= ArrayShape{Real}(4,5)) == true
@test @inferred(ArrayShape{Real}(4,5) >= ArrayShape{Float64}(4,5)) == true
@test (ArrayShape{Float64}(4,5) <= ArrayShape{Integer}(4,5)) == false
@test (ArrayShape{Float64}(2,2) <= ArrayShape{Float64}(3,3)) == false
@test eltype(@inferred(Vector{Float32}(undef, shape))) == Float32
@test eltype(eltype(@inferred(VectorOfSimilarVectors{Float32}(shape)))) == Float32
@test valshape(shape.(push!(@inferred(VectorOfSimilarVectors{Float64}(shape)), @inferred(Vector{Float64}(undef, shape))))[1]) == valshape(shape(undef))
let A = [2.2, 4.4, 3.3]
@test @inferred(unshaped(A, ArrayShape{Real}(3))) === A
@test_throws ArgumentError unshaped(A, ArrayShape{Real}(2))
@test_throws ArgumentError unshaped(A, ArrayShape{Integer}(3))
end
@test @inferred(unshaped([1 2; 3 4; 5 6], ArrayShape{Real}(3,2))) == [1, 3, 5, 2, 4, 6]
let shape = ArrayShape{Real}(3,2), UA = [1, 3, 5, 2, 4, 6]
@test @inferred(unshaped(shape(UA), shape)) == UA
end
let
A = collect(1:8)
ac = ValueAccessor(ArrayShape{Real}(3), 2)
@test @inferred(getindex(A, ac)) == [3, 4, 5]
@test @inferred(view(A, ac)) == [3, 4, 5]
@test @inferred(setindex!(A, [7, 2, 6], ac)) === A
@test A[ac] == [7, 2, 6]
end
let
A = collect(1:6*6)
ac = ValueAccessor(ArrayShape{Real}(2,3), 17)
@test @inferred(getindex(A, ac)) == [18 20 22; 19 21 23]
@test @inferred(view(A, ac)) == [18 20 22; 19 21 23]
@test @inferred(setindex!(A, [6 4 3; 2 1 5], ac)) === A
@test A[ac] == [6 4 3; 2 1 5]
end
let
A = collect(reshape(1:6*6, 6, 6))
ac1 = ValueAccessor(ArrayShape{Real}(2), 2)
ac2 = ValueAccessor(ArrayShape{Real}(3), 3)
@test @inferred(getindex(A, ac1, ac2)) == [21 27 33; 22 28 34]
@test @inferred(view(A, ac1, ac2)) == [21 27 33; 22 28 34]
@test @inferred(setindex!(A, [6 4 3; 2 1 5], ac1, ac2)) === A
@test A[ac1, ac2] == [6 4 3; 2 1 5]
end
let A = collect(reshape(1:5*5*5, 5, 5, 5))
ac1 = ValueAccessor(ArrayShape{Real}(size(A)[1]), 0)
ac2 = ValueAccessor(ArrayShape{Real}(1), 0)
ac3 = ValueAccessor(ArrayShape{Real}(1), 1)
ac4 = ValueAccessor(ArrayShape{Real}(1), 2)
@test @inferred(getindex(A, ac1, ac1, ac1)) == A
@test @inferred(getindex(A, ac2, ac2, ac2))[1] == A[1]
@test @inferred(getindex(A, ac4, ac4, ac4))[1] == 63
@test @inferred(view(A, ac1, ac1, ac1)) == A
@test @inferred(view(A, ac2, ac2, ac2))[1] == A[1]
@test @inferred(view(A, ac4, ac4, ac4))[1] == 63
first_layer = @inferred(getindex(A, ac1, ac1, ac2))
setindex!(A, first_layer, ac1, ac1, ac3)
@test @inferred(A[:,:,1]) == @inferred(A[:,:,2])
end
let d1 = [11, 12, 13, 14], d2 = [21, 22]
d = vcat(d1, d2)
reshaped = shape(d)
@test reshaped == reshape(d, (2,3))
end
end
@testset "broadcasting and copy" begin
data1d = [rand(4), rand(4), rand(4), rand(4)]
data2d = [[rand(4,)] [rand(4,)] [rand(4,)]]
VoV = VectorOfVectors(data1d)
shape1 = ArrayShape{Float64, 1}((4,))
shape2 = ArrayShape{Float64}(1,4)
shape3 = ArrayShape{Float64}(2,2)
shape1_VoV_bcast = broadcast(shape1, VoV)
shape2_VoV_bcast = broadcast(shape2, VoV)
shape3_VoV_bcast = broadcast(shape3, VoV)
shape1_bcast = broadcast(shape1, data1d)
shape1_data_bcast = broadcast(shape1, data1d)
shape1_data_dcast = shape1.(data1d)
shape2_data_bcast = broadcast(shape2, data1d)
shape3_data_bcast = broadcast(shape3, data1d)
@test shape1_data_dcast == shape1_data_bcast
@test isapprox(shape1_VoV_bcast, shape1_data_bcast)
@test isapprox(shape2_VoV_bcast, shape2_data_bcast)
@test isapprox(shape3_VoV_bcast, shape3_data_bcast)
AoSV = ArrayOfSimilarVectors{Float64}(data1d)
AoSA = ArrayOfSimilarArrays{Float64}(data2d)
shaped_AoSV_bcast = broadcast(shape1, AoSV)
shaped_AoSV_dcast = shape1.(AoSV)
shaped_AoSA_bcast = broadcast(shape1, AoSA)
shaped_AoSA_dcast = shape1.(AoSA)
@test shaped_AoSV_bcast == AoSV
@test shaped_AoSV_dcast == shaped_AoSV_bcast
@test shape1.(VoV) == VoV
shape1_bcast = broadcast(shape1, data2d)
@test shape1_bcast == shape1.(data2d)
for i in 1:length(data2d)
@test shape1_bcast[i] == data2d[i]
end
unshaped1 = unshaped.(shaped_AoSA_bcast)
@test unshaped1 == data2d
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 4226 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using Distributions
using Random
@testset "const_value_dist" begin
@test @inferred(ConstValueDist(4.2)) isa Distribution{Univariate}
@test @inferred(ConstValueDist([1, 2, 3])) isa Distribution{Multivariate}
@test @inferred(ConstValueDist([1 2; 3 4])) isa Distribution{Matrixvariate}
@test @inferred(length(ConstValueDist([4.2]))) == 1
@test @inferred(length(ConstValueDist([1, 2, 3]))) == 3
@test @inferred(length(ConstValueDist([1 2; 3 4]))) == 4
@test @inferred(pdf(ConstValueDist(4.2), 4.2)) == 1
@test @inferred(logpdf(ConstValueDist(4.2), 4.2)) == 0
@test @inferred(pdf(ConstValueDist(4.2), 3.7)) == 0
@test @inferred(logpdf(ConstValueDist(4.2), 3.7)) == -Inf
@test @inferred(broadcast(pdf, ConstValueDist(4.2), [4.2, 3.7])) == [1, 0]
@test @inferred(broadcast(logpdf, ConstValueDist(4.2), [4.2, 3.7])) == [0, -Inf]
@test @inferred(pdf(ConstValueDist([1, 2, 3]), [1, 2, 3])) == 1
@test @inferred(logpdf(ConstValueDist([1, 2, 3]), [1, 2, 3])) == 0
@test @inferred(pdf(ConstValueDist([1, 2, 3]), [2, 3, 4])) == 0
@test @inferred(logpdf(ConstValueDist([1, 2, 3]), [2, 3, 4])) == -Inf
@test (pdf(ConstValueDist([1, 2, 3]), hcat([1, 2, 3], [2, 3, 4]))) == [1, 0]
@test (logpdf(ConstValueDist([1, 2, 3]), hcat([1, 2, 3], [2, 3, 4]))) == [0, -Inf]
@test @inferred(pdf(ConstValueDist([1 2; 3 4]), [1 2; 3 4])) == 1
@test @inferred(logpdf(ConstValueDist([1 2; 3 4]), [1 2; 3 4])) == 0
@test @inferred(pdf(ConstValueDist([1 2; 3 4]), [4 5; 6 7])) == 0
@test @inferred(logpdf(ConstValueDist([1 2; 3 4]), [4 5; 6 7])) == -Inf
@test @inferred(insupport(ConstValueDist(4.2), 4.2)) == true
@test @inferred(insupport(ConstValueDist(4.2), 4.19)) == false
@test @inferred(insupport(ConstValueDist([1, 2, 3]), [1, 2, 3])) == true
@test @inferred(insupport(ConstValueDist([1, 2, 3]), [2, 3, 4])) == false
@test @inferred(insupport(ConstValueDist([1 2; 3 4]), [1 2; 3 4])) == true
@test @inferred(insupport(ConstValueDist([1 2; 3 4]), [4 5; 6 7])) == false
@test @inferred(rand(ConstValueDist(4.2))) == 4.2
@test @inferred(rand(ConstValueDist(4.2), 3)) == [4.2, 4.2, 4.2]
@test @inferred(rand(ConstValueDist([1, 2, 3]))) == [1, 2, 3]
@test @inferred(rand(ConstValueDist([1, 2, 3]), 2)) == hcat([1, 2, 3], [1, 2, 3])
@test @inferred(rand(ConstValueDist([1 2; 3 4]))) == [1 2; 3 4]
@test @inferred(rand(ConstValueDist([1 2; 3 4]), 2)) == [[1 2; 3 4], [1 2; 3 4]]
univariate_cvd = @inferred(ConstValueDist(Int64(42)))
shape = varshape(univariate_cvd)
@test @inferred(totalndof(shape)) == 0
@test @inferred(size(univariate_cvd)) == ()
@test @inferred(length(univariate_cvd)) == 1
@test @inferred(minimum(univariate_cvd)) == 42
@test @inferred(maximum(univariate_cvd)) == 42
@test @inferred(pdf(univariate_cvd, 42)) == 1
@test @inferred(cdf(univariate_cvd, 41.999)) == 0
@test @inferred(cdf(univariate_cvd, 42)) == 1
@test @inferred(cdf(univariate_cvd, Inf)) == 1
@test @inferred(mean(univariate_cvd)) == 42
@test @inferred(mode(univariate_cvd)) == 42
@test @inferred(eltype(univariate_cvd)) == Int64
μ1, μ2 = rand(2)
cvd_from_named_tuple = @inferred(ConstValueDist((a=μ1, b=μ2)))
@test @inferred(log(@inferred(pdf(cvd_from_named_tuple, (a=μ1, b=μ2))))) == @inferred(logpdf(cvd_from_named_tuple, (a=μ1, b=μ2))) == 0
@test @inferred(insupport(cvd_from_named_tuple, (a=μ1, b=μ2))) == true
@test @inferred(insupport(cvd_from_named_tuple, (a=μ1+eps(μ1), b=μ2+eps(μ2)))) == false
@test @inferred(insupport(cvd_from_named_tuple, (a=μ1+eps(μ1), b=μ2))) == false
@test @inferred(insupport(cvd_from_named_tuple, (a=μ1, b=μ2+eps(μ2)))) == false
n_samples = 100
samples = @inferred(rand(cvd_from_named_tuple, n_samples))
emptied_samples = @inferred(similar(samples))
@test emptied_samples != samples
@test samples == @inferred(fill((a=μ1, b=μ2), n_samples))
@test @inferred(rand!(cvd_from_named_tuple, emptied_samples)) == samples
@test samples == emptied_samples
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 1981 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using ArraysOfArrays
@testset "const_value_shape" begin
@inferred(size(ConstValueShape(42))) == ()
@inferred(eltype(ConstValueShape(42))) == Int
@inferred(totalndof(ConstValueShape(42))) == 0
@inferred(size(ConstValueShape(rand(2,3)))) == (2,3)
@inferred(ValueShapes.default_unshaped_eltype(ConstValueShape(rand(Float32,2,3)))) == Float32
@inferred(totalndof(ConstValueShape(rand(2,3)))) == 0
@test @inferred(ConstValueShape([1 4; 3 2])(undef)) == [1 4; 3 2]
@test @inferred(ConstValueShape([1 4; 3 2])(Int[])) == [1 4; 3 2]
data = [1 4; 3 2]
shape = ConstValueShape([1 4; 3 2])
@test typeof(@inferred(Vector{Int32}(undef, shape))) == Vector{Int32}
@test size(@inferred(Vector{Int32}(undef, shape))) == (0,)
@test @inferred(length(shape)) == 4
@test @inferred(ValueShapes.shaped_type(shape, Real)) == typeof(data)
@test @inferred (ConstValueShape(4.0) <= ConstValueShape(4.0)) == true
@test @inferred (ConstValueShape(4.0) >= ConstValueShape{AbstractFloat}(4.0)) == false
@test @inferred (ConstValueShape{AbstractFloat}(4.0) >= ConstValueShape(4.0)) == true
@test @inferred (ConstValueShape(4) <= ConstValueShape(5)) == false
@test @inferred(unshaped(4.2, ConstValueShape(4.2))) == Float32[]
@test_throws ArgumentError unshaped(4.3, ConstValueShape(4.2))
vecs_of_vecs = VectorOfSimilarVectors(reshape(collect(1:22), 11, 2))
va = ValueAccessor(ArrayShape{Real}(11,1), 0)
bcv = ValueShapes._bcasted_view(vecs_of_vecs, va)
for (index,value) in enumerate(bcv[1])
@test value == vecs_of_vecs[1][index]
end
aosv = ArrayOfSimilarVectors([ [1,2,3] [4,5,6] ])
va = ValueAccessor(ConstValueShape{Real}(1), 0)
inds = getindex.(aosv, Ref(va))
for i in inds
@test i == 1
end
views = view.(aosv, Ref(va))
@test views === inds
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 386 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using Distributions
@testset "distributions" begin
@test @inferred(varshape(Normal())) == ScalarShape{Real}()
@test @inferred(varshape(MvNormal([2. 1.; 1. 3.]))) == ArrayShape{Real}(2)
@test @inferred(varshape(MatrixBeta(4, 6, 6))) == ArrayShape{Real}(4, 4)
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 309 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using LinearAlgebra
@testset "functions" begin
f(x) = [x]
ValueShapes.resultshape(::typeof(f), ::Real) = ArrayShape{Real}(1)
@test @inferred(resultshape(f, 42)) >= valshape(f(42))
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 5446 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using Statistics, Random
using StatsBase, Distributions, ArraysOfArrays, IntervalSets
@testset "NamedTupleDist" begin
dist = @inferred NamedTupleDist(
ShapedAsNT,
a = 5, b = Weibull(2, 1),
c = -4..5,
d = MvNormal([1.2 0.5; 0.5 2.1]),
x = [3 4; 2 5],
e = [Normal(1.1, 0.2)]
)
@test typeof(@inferred varshape(dist)) <: NamedTupleShape
shape = varshape(dist)
shapes = map(varshape, dist)
for k in keys(dist)
@test getproperty(shapes, k) == varshape(getproperty(dist, k))
end
@test dist[:d] == dist.d
X_unshaped = [0.2, -0.4, 0.3, -0.5, 0.9]
X_shaped = shape(X_unshaped)
@test (@inferred logpdf(unshaped(dist), X_unshaped)) == logpdf(Weibull(2, 1), 0.2) + logpdf(Uniform(-4, 5), -0.4) + logpdf(MvNormal([1.2 0.5; 0.5 2.1]), [0.3, -0.5]) + logpdf(Normal(1.1, 0.2), 0.9)
@test (@inferred logpdf(dist, X_shaped)) == logpdf(unshaped(dist), X_unshaped)
@test (@inferred logpdf(dist, X_shaped[])) == logpdf(unshaped(dist), X_unshaped)
@test (@inferred mode(unshaped(dist))) == [mode(dist.b), 0.5, 0.0, 0.0, 1.1]
@test (@inferred mode(dist)) == shape(mode(unshaped(dist)))
@test (@inferred mean(unshaped(dist))) == [mean(dist.b), 0.5, 0.0, 0.0, 1.1]
@test (@inferred mean(dist)) == shape(mean(unshaped(dist)))
@test @inferred(var(unshaped(dist))) ≈ [var(dist.b), 6.75, 1.2, 2.1, 0.04]
@test @inferred(var(dist))[] == (a = 0, b = var(dist.b), c = var(dist.c), d = var(dist.d), x = var(dist.x), e = var(dist.e))
@test begin
ref_cov =
[var(dist.b) 0.0 0.0 0.0 0.0;
0.0 6.75 0.0 0.0 0.0;
0.0 0.0 1.2 0.5 0.0;
0.0 0.0 0.5 2.1 0.0;
0.0 0.0 0.0 0.0 0.04 ]
(@inferred cov(unshaped(dist))) ≈ ref_cov
end
@test @inferred(rand(dist)) isa ShapedAsNT
@test @inferred(rand(dist)[]) isa NamedTuple
@test pdf(dist, rand(dist)) > 0
@test @inferred(rand(dist, ())) isa ShapedAsNTArray{T,0} where T
@test pdf(dist, rand(dist)) > 0
@test @inferred(rand(dist, 100)) isa ShapedAsNTArray
@test all(x -> x > 0, pdf.(Ref(dist), rand(dist, 10^3)))
let X = varshape(dist).(nestedview(Array{eltype(unshaped(dist))}(undef, length(unshaped(dist)), 14)))
@test @inferred(rand!(dist, X[1])) == X[1]
@test @inferred(rand!(dist, X)) === X
end
let X = varshape(dist).([Array{Float64}(undef, totalndof(varshape(dist))) for i in 1:11])
@test @inferred(rand!(dist, X[1])) == X[1]
@test @inferred(rand!(dist, X)) === X
end
testrng() = MersenneTwister(0xaef035069e01e678)
let X = rand(unshaped(dist), 10), Xn = nestedview(X)
@test @inferred(Distributions._pdf(unshaped(dist), Xn[1])) == @inferred(pdf(unshaped(dist), Xn[1]))
@test @inferred(pdf(unshaped(dist), X)) == @inferred(broadcast(pdf, Ref(unshaped(dist)), Xn))
@test @inferred(Distributions._logpdf(unshaped(dist), Xn[1])) == @inferred(logpdf(unshaped(dist), Xn[1]))
@test @inferred(logpdf(unshaped(dist), X)) == @inferred(broadcast(logpdf, Ref(unshaped(dist)), Xn))
@test @inferred(insupport(unshaped(dist), Xn[1])) == true
@test @inferred(insupport(unshaped(dist), fill(-Inf, length(Xn)))) == false
@test @inferred(insupport(unshaped(dist), X)) == fill(true, length(Xn))
end
@test @inferred(rand(unshaped(dist))) isa Vector{Float64}
@test shape(@inferred(rand(testrng(), unshaped(dist)))) == @inferred(rand(testrng(), dist, ()))[] == @inferred(rand(testrng(), dist))
@test @inferred(rand(unshaped(dist), 10^3)) isa Matrix{Float64}
@test shape.(nestedview(@inferred(rand(testrng(), unshaped(dist), 10^3)))) == @inferred(rand(testrng(), dist, 10^3))
propnames = propertynames(dist, true)
@test propnames == (:a, :b, :c, :d, :x, :e, :_internal_distributions, :_internal_shape)
@test @inferred(keys(dist)) == propertynames(dist, false)
internaldists = getproperty(dist, :_internal_distributions)
internalshape = getproperty(dist, :_internal_shape)
@test all(i -> typeof(getindex(internaldists, i)) == typeof(getproperty(dist, keys(dist)[i])), 1:length(internaldists))
@test all(key -> getproperty(getproperty(internalshape, key), :shape) == valshape(getproperty(internalshape, key)), keys(internalshape))
@test @inferred(convert(NamedTupleDist, (x = 5, z = Normal()))) == NamedTupleDist(x = 5, z = Normal())
@test @inferred(merge((a = 42,), NamedTupleDist(x = 5, z = Normal()))) == (a = 42, x = ConstValueDist(5), z = Normal())
@test @inferred(NamedTupleDist(ShapedAsNT, (;dist...))) == dist
@test @inferred(merge(dist)) === dist
@test @inferred(merge(
NamedTupleDist(x = Normal(), y = 42),
NamedTupleDist(y = Normal(4, 5)),
NamedTupleDist(z = Exponential(3), a = 4.2),
)) == NamedTupleDist(x = Normal(), y = Normal(4, 5), z = Exponential(3), a = 4.2)
@test @inferred(merge(NamedTupleDist(a = 42,), (x = 5, z = Normal()))) == NamedTupleDist(a = 42, x = ConstValueDist(5), z = Normal())
@test @inferred(merge(
NamedTupleDist(x = Normal(), y = 42),
(y = Normal(4, 5),),
NamedTupleDist(z = Exponential(3), a = 4.2),
)) == NamedTupleDist(x = Normal(), y = Normal(4, 5), z = Exponential(3), a = 4.2)
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 20456 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using Random
using ArraysOfArrays
import TypedTables
import Tables
using LinearAlgebra, FillArrays
using ChainRulesCore: Tangent, NoTangent, ZeroTangent, AbstractThunk, Thunk, ProjectTo, rrule, backing, unthunk
import Zygote, ForwardDiff
@testset "named_tuple_shape" begin
@testset "functionality" begin
get_y(x) = x.y
data = VectorOfSimilarVectors(reshape(collect(1:22), 11, 2))
ref_table = TypedTables.Table(
a = [[1 3 5; 2 4 6], [12 14 16; 13 15 17]],
b = [7, 18],
c = [4.2, 4.2],
x = Matrix[[11 21; 12 22], [11 21; 12 22]],
y = [[8, 9, 10, 11], [19, 20, 21, 22]]
)
named_shapes = (
a = ArrayShape{Real}(2, 3),
b = ScalarShape{Real}(),
c = ConstValueShape(4.2),
x = ConstValueShape([11 21; 12 22]),
y = ArrayShape{Real}(4)
)
shape = @inferred NamedTupleShape(;named_shapes...)
sntshape = @inferred NamedTupleShape(ShapedAsNT; named_shapes...)
@test @inferred(NamedTupleShape(named_shapes)) == shape
@test @inferred(length(shape)) == 5
@test @inferred(propertynames(shape)) == keys(shape)
@test propertynames(shape, true) == (propertynames(shape)..., :_flatdof, :_accessors)
@test shape == deepcopy(shape)
@test isequal(shape, deepcopy(shape))
@test @inferred(unshaped(shape(data[1]), shape)) == data[1]
@test @inferred(unshaped(sntshape(data[1]), sntshape)) == data[1]
@test shape[:y] == shape.y
let flatdof = 0, accs = getproperty(shape, :_accessors)
for i in 1:length(keys(shape))
ishape = getindex(shape, i).shape
flatdof += accs[i].len
@test ishape == named_shapes[i]
@test ishape == accs[i].shape
end
@test getproperty(shape, :_flatdof) == flatdof
end
@test ValueShapes.default_unshaped_eltype(NamedTupleShape(a=ScalarShape{Int}())) == Int
@test @inferred(ValueShapes.default_unshaped_eltype(NamedTupleShape(a = ScalarShape{Int}(), b = ArrayShape{Float32}(2, 3)))) == Float32
@test @inferred(ValueShapes.default_unshaped_eltype(shape)) == Float64
@test @inferred(ValueShapes.shaped_type(shape)) == NamedTuple{(:a, :b, :c, :x, :y),Tuple{Array{Float64,2},Float64,Float64,Array{Int,2},Array{Float64,1}}}
@test @inferred(ValueShapes.shaped_type(shape, Float32)) == NamedTuple{(:a, :b, :c, :x, :y),Tuple{Array{Float32,2},Float32,Float64,Array{Int,2},Array{Float32,1}}}
@test @inferred(get_y(shape)) === ValueShapes._accessors(shape).y
@test @inferred(Base.propertynames(shape)) == (:a, :b, :c, :x, :y)
@test @inferred(totalndof(shape)) == 11
@test @inferred(shape(data[1])) == ref_table[1]
@test @inferred(broadcast(shape, data)) == ref_table
@test @inferred(merge((foo = 42,), shape)) == merge((foo = 42,), named_shapes)
@test @inferred(NamedTupleShape(;shape...)) == shape
@test @inferred(merge(shape)) === shape
@test @inferred(merge(
NamedTupleShape(x = ScalarShape{Real}(), y = ScalarShape{Real}()),
NamedTupleShape(y = ArrayShape{Real}(3)),
NamedTupleShape(z = ScalarShape{Real}(), a = ArrayShape{Real}(2, 4)),
)) == NamedTupleShape(x = ScalarShape{Real}(), y = ArrayShape{Real}(3), z = ScalarShape{Real}(), a = ArrayShape{Real}(2, 4))
@test typeof(@inferred(shape(undef))) == NamedTuple{(:a, :b, :c, :x, :y),Tuple{Array{Float64,2},Float64,Float64,Array{Int,2},Array{Float64,1}}}
@test typeof(@inferred(valshape(shape(undef)))) <: NamedTupleShape
@test typeof(valshape(shape(undef))(undef)) == NamedTuple{(:a, :b, :c, :x, :y),Tuple{Array{Float64,2},Float64,Float64,Array{Int,2},Array{Float64,1}}}
@test @inferred(shape(collect(1:11))) == (a = [1 3 5; 2 4 6], b = 7, c = 4.2, x = [11 21; 12 22], y = [8, 9, 10, 11])
@test_throws ArgumentError shape(collect(1:12))
@test valshape(shape.(push!(@inferred(VectorOfSimilarVectors{Float64}(shape)), @inferred(Vector{Float64}(undef, shape))))[1]) == valshape(shape(undef))
let
a = NamedTupleShape(x = ScalarShape{Int}(), y = ArrayShape{AbstractFloat}(2))
b1 = NamedTupleShape(x = ScalarShape{Real}(), y = ArrayShape{Real}(2))
b2 = NamedTupleShape(x = ScalarShape{Real}(), y = ArrayShape{Real}(3))
c = NamedTupleShape(x = ScalarShape{Real}(), z = ArrayShape{Real}(2))
@test @inferred(a <= b1) == true
@test @inferred(a >= b1) == false
@test @inferred(b1 >= a) == true
@test @inferred(a <= b2) == false
@test @inferred(a <= c) == false
end
@test @inferred(unshaped(sntshape(data[1]), sntshape)) === data[1]
@test @inferred(unshaped(sntshape(data[1])[], sntshape)) == data[1]
@testset "ValueShapes.ShapedAsNT" begin
UA = copy(data[1])
@test @inferred(size(@inferred(ValueShapes.ShapedAsNT(UA, shape)))) == ()
A = ValueShapes.ShapedAsNT(UA, shape)
@test @inferred(getproperty(A, :__internal_data) == data[1])
@test @inferred(getproperty(A, :__internal_valshape) == valshape(A))
@test @inferred(propertynames(A)) == (:a, :b, :c, :x, :y)
@test propertynames(A, true) == (:a, :b, :c, :x, :y, :__internal_data, :__internal_valshape)
@test @inferred(get_y(A)) == [8, 9, 10, 11]
@test typeof(A.b) <: Integer
@test @inferred(valshape(A)) === NamedTupleShape(ShapedAsNT; shape...)
@test @inferred(realnumtype(typeof(A))) == Int
@test @inferred(unshaped(A)) === UA
@test @inferred(unshaped(A.a)) == view(UA, 1:6)
@test @inferred(unshaped(A.b)) == view(UA, 7:7)
@test @inferred(unshaped(A.y)) == view(UA, 8:11)
@test @inferred(copy(A)) == A
@test typeof(copy(A)) == typeof(A)
@test @inferred((X -> (Y = copy(X); Y.a = [5 3 5; 9 4 5]; unshaped(Y)))(A)) == [5, 9, 3, 4, 5, 5, 7, 8, 9, 10, 11]
@test @inferred((X -> (Y = copy(X); Y.b = 9; unshaped(Y)))(A)) == [1, 2, 3, 4, 5, 6, 9, 8, 9, 10, 11]
@test @inferred((X -> (Y = copy(X); Y.c = 4.2; unshaped(Y)))(A)) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
@test_throws ArgumentError (X -> (Y = copy(X); Y.c = 4.3; unshaped(Y)))(A)
@test @inferred((X -> (Y = copy(X); Y.y = [4, 7, 5, 6]; unshaped(Y)))(A)) == [1, 2, 3, 4, 5, 6, 7, 4, 7, 5, 6]
x = (a = [5 3 5; 9 4 5], b = 9, c = 4.2, x = [11 21; 12 22], y = [4, 7, 5, 6])
@test (B = copy(A); B[] = x; B[]) == x
@test_throws ArgumentError copy(A)[] = (a = [5 3 5; 9 4 5], b = 9, c = 4.2, x = [11 21; 12 23], y = [4, 7, 5, 6])
@testset "rrules" begin
# Base.Returns is Julia >= v1.7 only, so define:
struct ReturnsValue{T} <: Function; value::T; end
(f::ReturnsValue)(args...; kw...) = f.value
vs_x = NamedTupleShape(ShapedAsNT, a = ScalarShape{Real}(), b = ArrayShape{Real}(2), c = ScalarShape{Real}(), d = ArrayShape{Real}(2), e = ArrayShape{Real}(1), f = ArrayShape{Real}(1), g = ConstValueShape([0.4, 0.5, 0.6]))
vs_dx = NamedTupleShape(ShapedAsNT, a = ScalarShape{Real}(), b = ArrayShape{Real}(2), c = ScalarShape{Real}(), d = ArrayShape{Real}(2), e = ArrayShape{Real}(1), f = ArrayShape{Real}(1), g = ConstValueShape{typeof(Fill(1.0, 3)),false}(Fill(0.0, 3)))
@test @inferred(gradient_shape(vs_x)) == vs_dx
x_unshaped = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
x = vs_x(x_unshaped)
dx_unshaped = [0.0, 1.2, 1.3, 1.4, 0.0, 0.0, 0.0, 0.0]
dx = vs_dx(dx_unshaped)
dx_contents = (__internal_data = unshaped(dx), __internal_valshape = valshape(dx))
dx_tangent = Tangent{typeof(x),typeof(dx_contents)}(dx_contents)
dx_nttangent = Tangent{typeof(x[])}(;ProjectTo(x)(dx_tangent)[]...)
@test @inferred(Tangent(x, dx_unshaped)) == dx_tangent
zdx_unshaped = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
zdx = vs_dx(zdx_unshaped)
zdx_contents = (__internal_data = unshaped(zdx), __internal_valshape = valshape(zdx))
zdx_tangent = Tangent{typeof(x),typeof(zdx_contents)}(zdx_contents)
@test @inferred(Tangent(x, zdx_unshaped)) == zdx_tangent
dy = (a = nothing, b = [1.2, 1.3], c = 1.4, d = NoTangent(), e = ZeroTangent(), f = nothing, g = [2.1, 2.2, 2.3])
dy_unshaped = dx_unshaped
dy_tangent = Tangent{typeof(dy),typeof(dy)}(dy)
ref_dx_tangent(ΔΩ::AbstractThunk) = ref_dx_tangent(unthunk(ΔΩ))
ref_dx_tangent(::Union{ZeroTangent,Nothing}) = ZeroTangent()
ref_dx_tangent(::NoTangent) = NoTangent()
ref_dx_tangent(::Any) = dx_tangent
ref_ntdx_tangent(ΔΩ::AbstractThunk) = ref_ntdx_tangent(unthunk(ΔΩ))
ref_ntdx_tangent(::Union{ZeroTangent,Nothing}) = ZeroTangent()
ref_ntdx_tangent(::NoTangent) = NoTangent()
ref_ntdx_tangent(ΔΩ::Any) = dx_nttangent
@test @inferred(ProjectTo(x)(dx_tangent)) == dx
@test @inferred(ProjectTo(x)(backing(dx_tangent))) == dx
@test @inferred(rrule(getindex, x))[1] == x[]
for unthunked_ΔΩ in [dy, dy_tangent, ZeroTangent(), NoTangent(), nothing]
for ΔΩ in [unthunked_ΔΩ, Thunk(ReturnsValue(unthunked_ΔΩ))]
@test @inferred(rrule(getindex, x)[2](ΔΩ)) == (NoTangent(), ProjectTo(x)(ref_dx_tangent(ΔΩ)))
end
end
@test rrule(unshaped, x)[1] == x_unshaped
@test rrule(unshaped, x, vs_x)[1] == x_unshaped
@test rrule(unshaped, x[], vs_x)[1] == x_unshaped
for unthunked_ΔΩ in [dy_unshaped, ZeroTangent(), NoTangent(), nothing]
for ΔΩ in [unthunked_ΔΩ, Thunk(ReturnsValue(unthunked_ΔΩ))]
@test @inferred(rrule(unshaped, x)[2](ΔΩ)) == (NoTangent(), ProjectTo(x)(ref_dx_tangent(ΔΩ)))
@test @inferred(rrule(unshaped, x, vs_x)[2](ΔΩ)) == (NoTangent(), ProjectTo(x)(ref_dx_tangent(ΔΩ)), NoTangent())
@test @inferred(rrule(unshaped, x[], vs_x)[2](ΔΩ)) == (NoTangent(), ref_ntdx_tangent(ΔΩ), NoTangent())
end
end
ref_flatdx_tangent(ΔΩ::AbstractThunk) = ref_flatdx_tangent(unthunk(ΔΩ))
ref_flatdx_tangent(::Union{ZeroTangent,Nothing}) = ZeroTangent()
ref_flatdx_tangent(::NoTangent) = NoTangent()
ref_flatdx_tangent(::Any) = dx_unshaped
@test rrule(ShapedAsNT, x_unshaped, vs_x)[1] == x
for unthunked_ΔΩ in [dx, dx_tangent, dx_nttangent, ZeroTangent(), NoTangent(), nothing]
for ΔΩ in [unthunked_ΔΩ, Thunk(ReturnsValue(unthunked_ΔΩ))]
@test @inferred(rrule(ShapedAsNT, x_unshaped, vs_x)[2](ΔΩ)) == (NoTangent(), ref_flatdx_tangent(ΔΩ), NoTangent())
end
end
end
@testset "Zygote support" begin
using Zygote
vs = NamedTupleShape(ShapedAsNT, a = ScalarShape{Real}(), b = ArrayShape{Real}(2))
# ToDo: Make this work with @inferred:
@test Zygote.gradient(x -> x[].a^2 + norm(x[].b)^2, vs([3, 4, 5])) == (gradient_shape(vs)([6, 8, 10]),)
# ToDo: Pullbacks for getproperty:
#@test Zygote.gradient(x_flat -> (x = vs(x_flat); norm(x.a)^2 + norm(x.b)^2), [3, 4, 5]) == ([6, 8, 10],)
@test Zygote.gradient(x_flat -> (x = vs(x_flat); norm(x[].a)^2 + norm(x[].b)^2), [3, 4, 5]) == ([6, 8, 10],)
foo(x::NamedTuple) = sum(map(x -> norm(x)^2, values(x)))
# ToDo: Make this work with @inferred:
@test Zygote.gradient(x -> foo(vs(x)[]), [3, 4, 5]) == ([6, 8, 10],)
let
function foo(x)
vs = valshape(x)
ux = unshaped(x, vs)
x2 = vs(ux)
sum(x2.a) + sum(x2.b) + sum(x2.c)
end
vs = NamedTupleShape(a = ArrayShape{Real}(2), b = ConstValueShape([5, 6]), c = ArrayShape{Real}(2))
x = ShapedAsNT([1.1, 2.2, 3.3, 4.4], vs)
@test Zygote.gradient(foo, x)[1] == gradient_shape(NamedTupleShape(ShapedAsNT; vs...))([1.0, 1.0, 1.0, 1.0])
end
end
end
@testset "ValueShapes.ShapedAsNTArray" begin
UA = Array(data)
@test @inferred(size(@inferred(ValueShapes.ShapedAsNTArray(UA, shape)))) == (2,)
A = ValueShapes.ShapedAsNTArray(UA, shape)
@inferred(broadcast(identity, A)) === A
@inferred typeof(@inferred broadcast(shape, data)) == typeof(A)
@test shape.(data) == A
@test @inferred(broadcast(unshaped, shape.(data))) == data
@test @inferred(propertynames(A)) == (:a, :b, :c, :x, :y)
@test propertynames(A, true) == (:a, :b, :c, :x, :y, :__internal_data, :__internal_elshape)
@test @inferred(get_y(A)) == [[8, 9, 10, 11], [19, 20, 21, 22]]
@test @inferred(elshape(A)) === shape
@test @inferred(realnumtype(typeof(A))) == Int
@test @inferred(broadcast(unshaped, A)) === UA
@test @inferred(A[1]) == (a = [1 3 5; 2 4 6], b = 7, c = 4.2, x = [11 21; 12 22], y = [8, 9, 10, 11])
@test @inferred(view(A, 2)) isa ShapedAsNTArray{T,0} where T
@test @inferred(view(A, 2)[] == A[2])
@test @inferred(view(A, 2:2)) isa ShapedAsNTArray
@test @inferred(view(A, 2:2) == A[2:2])
@test @inferred(append!(copy(A), copy(A)))[3:4] == @inferred(A[1:2])
@test @inferred(vcat(A, A))[3:4] == @inferred(A[1:2])
@test size(@inferred similar(A)) == size(A)
@test copy(A) == A
@test typeof(copy(A)) == typeof(A)
@test @inferred(TypedTables.Table(A)) == A
@test typeof(@inferred flatview(TypedTables.Table(shape.(data)).y)) == Array{Int,2}
A_zero() = shape.(nestedview(zeros(totalndof(shape), 2)))
@test (B = A_zero(); B[:] = A; B) == A
@test (B = A_zero(); B[:] = TypedTables.Table(A); B) == A
@test unshaped.(A) == data
let newshape = NamedTupleShape(a=ArrayShape{Real}(9,1), b=ArrayShape{Real}(1,2))
newA = ValueShapes.ShapedAsNTArray(data, newshape)
vecA = vec(newA)
@test newA == vecA
end
let vecA = vec(A)
@test A == vecA
end
@test getproperty(A, :__internal_data) == UA
@test getproperty(A, :__internal_elshape) == shape
@test @inferred(IndexStyle(A)) == IndexStyle(getproperty(A, :__internal_data))
@test @inferred(axes(A)[1].stop == size(data)[1])
@test A == copy(A)
let B = empty(A)
for p in propertynames(B)
@test @inferred(isempty(getproperty(B, p)))
end
end
let B = copy(A), C = copy(A), D = copy(A)
for i in 1:length(A)-1
b = pop!(B)
c = popfirst!(C)
d = splice!(D, i)
@test c == d
end
@test C == D
@test B[end] == A[1]
@test @inferred(length(A) - length(B)) == 1
@test C[1] == A[end]
@test @inferred(length(A) - length(C)) == 1
B = empty(B)
prepend!(B, A)
@test B == A
D = copy(A)
prepend!(D, A)
prepend!(D, A)
prepend!(D, A)
deleteat!(D, 1:length(A):length(D))
for i in 1:length(D)-1
@test @inferred( D[i] == D[i+1])
end
end
@test @inferred(Tables.istable(typeof(A))) == true
@test @inferred(Tables.rowaccess(typeof(A))) == @inferred(Tables.rowaccess(A))
@test @inferred(Tables.rowaccess(A)) == true
@test @inferred(Tables.columnaccess(typeof(A))) == true
@test @inferred(Tables.schema(A).names == propertynames(A))
@test @inferred(Tables.rows(A)) == A
# d = [[11 12 13; 14 15 16], [21. 22. 23.; 24. 25. 26.]]
# nt = (a = d[1], b = d[2])
# typeof(d) <: AbstractArray{<:AbstractVector{<:Real}}
d = [[rand(4,)] [rand(4,)] [rand(4,)]]
nt = (a=ArrayShape{Int64, 2}((2,3)), b=ArrayShape{Float64, 2}((3,2)))
end
end
@testset "examples" begin
@test begin
shape = NamedTupleShape(
a = ArrayShape{Real}(2, 3),
b = ScalarShape{Real}(),
c = ConstValueShape(4.2),
x = ConstValueShape([11 21; 12 22]),
y = ArrayShape{Real}(4)
)
data = VectorOfSimilarVectors{Int}(shape)
resize!(data, 10)
rand!(flatview(data), 0:99)
table = shape.(data)
fill!(table.b, 42)
all(x -> x == 42, view(flatview(data), 7, :))
end
end
@testset "gradients" begin
sntvs = NamedTupleShape(
ShapedAsNT,
a = ScalarShape{Real}(),
b = ConstValueShape([4.2, 3.3]),
c = ScalarShape{Real}(),
d = ArrayShape{Real}(2),
e = ScalarShape{Real}(),
f = ArrayShape{Real}(2)
)
ntvs = NamedTupleShape(;sntvs...)
f = let vs = sntvs
v_unshaped_0 -> begin
v_shaped_1 = vs(v_unshaped_0)
v_unshaped_1 = unshaped(v_shaped_1)
v_shaped_2 = vs(v_unshaped_1)
v_unshaped_2 = unshaped(v_shaped_2, vs)
v_shaped_3 = vs(v_unshaped_2)
v_nt_1 = v_shaped_3[]
v_unshaped_3 = unshaped(v_nt_1, vs)
v_shaped_4 = vs(v_unshaped_3)
v_unshaped_4 = unshaped(v_shaped_4, vs)
v_shaped_5 = vs(v_unshaped_4)
v_nt_2 = v_shaped_5[]
x = v_nt_2
sqrt(norm(x.a)^2 + norm(x.b)^2 + norm(x.d)^2 + norm(x.f)^2)
end
end
g = let vs = sntvs
v_shaped -> f(unshaped(v_shaped, vs))
end
for vs in (sntvs,)
v = randn(totalndof(vs))
@test @inferred(f(v)) isa Real
@test ForwardDiff.gradient(f, v) isa AbstractVector{<:Real}
grad_f_fw = ForwardDiff.gradient(f, v)
@test @inferred(Zygote.gradient(f, v)[1]) isa AbstractVector{<:Real}
grad_f_zg = Zygote.gradient(f, v)[1]
@test grad_f_fw ≈ grad_f_zg
@test @inferred(Zygote.gradient(g, vs(v))[1]) isa ShapedAsNT
@test unshaped(Zygote.gradient(g, vs(v))[1], gradient_shape(vs)) == grad_f_zg
@test @inferred(Zygote.gradient(g, vs(v)[])[1]) isa NamedTuple
@test unshaped(Zygote.gradient(g, vs(v)[])[1], gradient_shape(vs)) == grad_f_zg
end
for vs in (sntvs, ntvs)
X = nestedview(rand(totalndof(vs), 10))
f_X = let vs = vs; X -> sum(norm.(norm.(values(Tables.columns((vs.(X))))))); end
@test Zygote.gradient(f_X, X)[1] ≈ nestedview(ForwardDiff.gradient(f_X∘nestedview, flatview(X)))
sX = vs.(X)
f_sX = sX -> norm(flatview(unshaped.(sX)))
@test unshaped.(Zygote.gradient(f_sX, sX)[1], Ref(gradient_shape(vs))) ≈ nestedview(ForwardDiff.gradient(X_flat -> f_sX(vs.(nestedview(X_flat))), flatview(X)))
end
end
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 6944 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using LinearAlgebra, Random, Statistics
using StatsBase, Distributions, ArraysOfArrays
@testset "reshaped_dist" begin
scshape = ScalarShape{Real}()
a1shape = ArrayShape{Real}(3)
a2shape = ArrayShape{Real}(2, 3)
ntshape = NamedTupleShape(
ShapedAsNT,
a = ArrayShape{Real}(2, 3),
b = ScalarShape{Real}(),
c = ConstValueShape(4.2),
x = ConstValueShape([11 21; 12 22]),
y = ArrayShape{Real}(4)
)
nms = (:a, :b, :c, :x, :y)
_mvndist(n::Integer) = MvNormal(fill(2.0, n), Diagonal(fill(3.0, n)))
_mvnormalrd(shape::AbstractValueShape) = ReshapedDist(_mvndist(totalndof(shape)), shape)
_mvnormalrd2(shape::AbstractValueShape) = shape(_mvndist(totalndof(shape)))
@testset "ctors" begin
@test @inferred(_mvnormalrd(scshape)) isa Distribution{Univariate, Continuous}
@test @inferred(_mvnormalrd(a1shape)) isa Distribution{Multivariate, Continuous}
@test @inferred(_mvnormalrd(a2shape)) isa Distribution{Matrixvariate, Continuous}
@test @inferred(_mvnormalrd(ntshape)) isa Distribution{ValueShapes.NamedTupleVariate{nms}, Continuous}
@test_throws ArgumentError ReshapedDist(MvNormal(Diagonal(fill(3.0, 2))), a2shape)
@test @inferred(_mvnormalrd2(scshape)) isa ReshapedDist{Univariate, Continuous}
@test @inferred(_mvnormalrd2(a1shape)) isa MvNormal
@test @inferred(_mvnormalrd2(a2shape)) isa MatrixReshaped
@test @inferred(_mvnormalrd2(ntshape)) isa ReshapedDist{ValueShapes.NamedTupleVariate{nms}, Continuous}
@inferred(varshape(_mvnormalrd2(ntshape))) == ntshape
@inferred(unshaped(_mvnormalrd2(ntshape))) == MvNormal(fill(2.0, totalndof(ntshape)), Diagonal(fill(3.0, totalndof(ntshape))))
end
@testset "rand" begin
@test @inferred(rand(_mvnormalrd(scshape))) isa Real
@test @inferred(rand(_mvnormalrd(scshape), ())) isa AbstractArray{<:Real,0}
@test @inferred(rand(_mvnormalrd(scshape), 7)) isa AbstractArray{<:Real,1}
@test @inferred(rand(_mvnormalrd(scshape), (7,))) isa AbstractArray{<:Real,1}
@test size(rand(_mvnormalrd(scshape), 7)) == (7,)
let d = _mvndist(totalndof(a1shape))
@test @inferred(a1shape(d)) === d
end
@test @inferred(rand(_mvnormalrd(a1shape))) isa AbstractVector{<:Real}
@test @inferred(rand(_mvnormalrd(a1shape), ())) isa AbstractArray{<:AbstractVector{<:Real},0}
@test @inferred(rand(_mvnormalrd(a1shape), 7)) isa AbstractArray{<:Real,2}
@test @inferred(rand(_mvnormalrd(a1shape), (7,))) isa AbstractArray{<:AbstractVector{<:Real},1}
@test size(rand(_mvnormalrd(a1shape), 7)) == (3, 7)
@test size(rand(_mvnormalrd(a1shape), (7,))) == (7,)
let d = _mvndist(totalndof(a2shape))
@test @inferred(a2shape(d)) isa MatrixReshaped
@test unshaped(a2shape(d)) === d
end
@test @inferred(rand(_mvnormalrd(a2shape))) isa AbstractMatrix{<:Real}
@test @inferred(rand(_mvnormalrd(a2shape), 7)) isa AbstractArray{<:AbstractMatrix{<:Real},1}
@test size(rand(_mvnormalrd(a2shape), 7)) == (7,)
@test @inferred(rand(_mvnormalrd(ntshape))) isa ShapedAsNT{nms}
@test @inferred(rand(_mvnormalrd(ntshape), ())) isa ShapedAsNTArray{<:ShapedAsNT{nms},0}
@test @inferred(rand(_mvnormalrd(ntshape), 7)) isa ShapedAsNTArray{<:ShapedAsNT{nms},1}
@test size(rand(_mvnormalrd(ntshape), 7)) == (7,)
let X = rand(_mvnormalrd(ntshape), 7)
@test @inferred(rand!(_mvnormalrd(ntshape), view(X, 1))) == view(X, 1)
@test @inferred(rand!(_mvnormalrd(ntshape), X)) === X
end
end
@testset "stats functions" begin
@test @inferred(mean(_mvnormalrd(scshape))) ≈ 2
@test @inferred(mode(_mvnormalrd(scshape))) ≈ 2
@test @inferred(var(_mvnormalrd(scshape))) ≈ 3
let rd = _mvnormalrd(scshape), ux = rand(unshaped(rd)), vs = varshape(rd)
@test @inferred(pdf(rd, vs(ux)[])) == pdf(unshaped(rd), ux)
@test @inferred(pdf(rd, vs(ux))) == pdf(unshaped(rd), ux)
@test @inferred(logpdf(rd, vs(ux)[])) == logpdf(unshaped(rd), ux)
@test @inferred(logpdf(rd, vs(ux))) == logpdf(unshaped(rd), ux)
@test @inferred(insupport(rd, vs(ux)[])) == true
end
@test @inferred(mean(_mvnormalrd(a1shape))) ≈ fill(2, 3)
@test @inferred(mode(_mvnormalrd(a1shape))) ≈ fill(2, 3)
@test @inferred(var(_mvnormalrd(a1shape))) ≈ fill(3, 3)
@test @inferred(cov(_mvnormalrd(a1shape))) ≈ Diagonal(fill(3.0, 3))
let rd = _mvnormalrd(a1shape), ux = rand(unshaped(rd)), vs = varshape(rd)
@test @inferred(pdf(rd, vs(ux))) == pdf(unshaped(rd), ux)
@test @inferred(logpdf(rd, vs(ux))) == logpdf(unshaped(rd), ux)
@test @inferred(insupport(rd, vs(ux))) == true
end
@test @inferred(mean(_mvnormalrd(a2shape))) ≈ fill(2, 2, 3)
@test @inferred(mode(_mvnormalrd(a2shape))) ≈ fill(2, 2, 3)
@test @inferred(var(_mvnormalrd(a2shape))) ≈ fill(3, 2, 3)
let rd = _mvnormalrd(a2shape), ux = rand(unshaped(rd)), vs = varshape(rd)
@test @inferred(pdf(rd, vs(ux))) == pdf(unshaped(rd), ux)
@test @inferred(logpdf(rd, vs(ux))) == logpdf(unshaped(rd), ux)
@test @inferred(insupport(rd, vs(ux))) == true
end
@test @inferred(mean(_mvnormalrd(ntshape)))[] == (a = [2.0 2.0 2.0; 2.0 2.0 2.0], b = 2.0, c = 4.2, x = [11 21; 12 22], y = [2.0, 2.0, 2.0, 2.0])
@test @inferred(mode(_mvnormalrd(ntshape)))[] == (a = [2.0 2.0 2.0; 2.0 2.0 2.0], b = 2.0, c = 4.2, x = [11 21; 12 22], y = [2.0, 2.0, 2.0, 2.0])
@test @inferred(var(_mvnormalrd(ntshape)))[] == (a = [3.0 3.0 3.0; 3.0 3.0 3.0], b = 3.0, c = 0.0, x = [0 0; 0 0], y = [3.0, 3.0, 3.0, 3.0])
let rd = _mvnormalrd(ntshape), ux = rand(unshaped(rd)), vs = varshape(rd)
@test @inferred(pdf(rd, vs(ux)[])) == pdf(unshaped(rd), ux)
@test @inferred(pdf(rd, vs(ux))) == pdf(unshaped(rd), ux)
@test @inferred(logpdf(rd, vs(ux)[])) == logpdf(unshaped(rd), ux)
@test @inferred(logpdf(rd, vs(ux))) == logpdf(unshaped(rd), ux)
@test @inferred(insupport(rd, vs(ux)[])) == true
@test @inferred(insupport(rd, vs(ux))) == true
end
let rd = ReshapedDist(_mvndist(5), ArrayShape{Real}(5)), X = rand(rd, 10), Xn = nestedview(X)
@test @inferred(Distributions._pdf(rd, Xn[1])) == pdf(rd, Xn[1])
@test @inferred(pdf(rd, X)) == pdf.(Ref(rd), Xn)
@test @inferred(Distributions._logpdf(rd, Xn[1])) == logpdf(rd, Xn[1])
@test @inferred(logpdf(rd, X)) == logpdf.(Ref(rd), Xn)
end
end
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 3275 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using Random
using ArraysOfArrays
import TypedTables
@testset "scalar_shape" begin
@test @inferred(ValueShapes._valshapeoftype(Int)) == ScalarShape{Int}()
@test @inferred(ValueShapes._valshapeoftype(Complex{Float64})) == ScalarShape{Complex{Float64}}()
@test @inferred(valshape(3)) == ScalarShape{Int}()
@test @inferred(size(ScalarShape{Real}())) == ()
@test @inferred(size(ScalarShape{Complex}())) == ()
@test @inferred(size(ScalarShape{Real}())) == ()
@test @inferred(size(ScalarShape{Complex}())) == ()
@test @inferred(ValueShapes.default_unshaped_eltype(ScalarShape{Real}())) == Float64
@test @inferred(ValueShapes.default_unshaped_eltype(ScalarShape{Complex{Real}}())) == Float64
@test @inferred(ValueShapes.default_unshaped_eltype(ScalarShape{Complex{Int32}}())) == Int32
@test @inferred(ValueShapes.shaped_type(ScalarShape{Real}())) == Float64
@test @inferred(ValueShapes.shaped_type(ScalarShape{Real}(), Float32)) == Float32
@test @inferred(ValueShapes.shaped_type(ScalarShape{Complex}())) == Complex{Float64}
@test @inferred(ValueShapes.shaped_type(ScalarShape{Complex{Real}}(), Float32)) == Complex{Float32}
@test @inferred(ValueShapes.shaped_type(ScalarShape{Complex{Int16}}())) == Complex{Int16}
@test @inferred(totalndof(ScalarShape{Int}())) == 1
@test @inferred(totalndof(ScalarShape{Complex{Float64}}())) == 2
@test @inferred(ScalarShape{Real}()(undef)) === zero(Float64)
@test @inferred(ScalarShape{Complex}()(undef)) === zero(Complex{Float64})
@test typeof(@inferred(ScalarShape{Real}()([42]))) <: Integer
@test @inferred(ScalarShape{Real}()([42])[]) == 42
@test @inferred (ScalarShape{Int}() <= ScalarShape{Real}()) == true
@test @inferred (ScalarShape{Real}() <= ScalarShape{Int}()) == false
@test @inferred (ScalarShape{Real}() >= ScalarShape{Int}()) == true
let shape = ScalarShape{Real}(), data = [4.2]
@test @inferred(unshaped(shape(data), shape)) == data
@test @inferred(unshaped(shape(data)[], shape)) == data
@test_throws ArgumentError unshaped(shape([3, 4]), shape)
@test_throws ArgumentError unshaped(shape(data), ScalarShape{Integer}())
end
@test let
shape = ScalarShape{Real}()
valshape(shape.(push!(@inferred(VectorOfSimilarVectors{Float64}(shape)), @inferred(Vector{Float64}(undef, shape))))[1]) == valshape(shape(undef))
end
let
A = collect(1:6)
ac = ValueAccessor(ScalarShape{Real}(), 2)
@test @inferred(getindex(A, ac)) == 3
@test @inferred(view(A, ac))[] == 3
@test view(A, ac) isa AbstractArray{<:Real,0}
@test @inferred(setindex!(A, 7, ac)) === A
@test A[ac] == 7
end
let
A = collect(reshape(1:6*6, 6, 6))
ac1 = ValueAccessor(ScalarShape{Real}(), 2)
ac2 = ValueAccessor(ScalarShape{Real}(), 4)
@test @inferred(getindex(A, ac1, ac2)) == 27
@test @inferred(view(A, ac1, ac2))[] == 27
@test view(A, ac1, ac2) isa AbstractArray{<:Real,0}
@test @inferred(setindex!(A, 42, ac1, ac2)) === A
@test A[ac1, ac2] == 42
end
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 709 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
@testset "valueaccessor" begin
acc = @inferred ValueAccessor(ArrayShape{Real}(2,3), 2)
@test @inferred(valshape(acc)) == ArrayShape{Real,2}((2, 3))
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
@test @inferred(data[acc]) == [3 5 7; 4 6 8]
@test_throws ArgumentError Broadcast.broadcastable(acc)
@test @inferred(size(acc)) == getproperty(getproperty(acc, :shape), :dims)
@test @inferred(length(acc)) == length(getproperty(acc, :shape))
@test @inferred(ValueShapes.default_unshaped_eltype(acc)) == @inferred(ValueShapes.default_unshaped_eltype(getproperty(acc, :shape)))
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | code | 6103 | # This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
using ValueShapes
using Test
using Random
using ElasticArrays
using ArraysOfArrays
using FillArrays
using InverseFunctions, ChangesOfVariables
using ChainRulesCore: rrule, NoTangent
import TypedTables
import Dates
@testset "abstract_value_shape" begin
@testset "default_datatype" begin
@test @inferred(ValueShapes.default_datatype(Integer)) == Int
@test @inferred(ValueShapes.default_datatype(AbstractFloat)) == Float64
@test @inferred(ValueShapes.default_datatype(Real)) == Float64
@test @inferred(ValueShapes.default_datatype(Complex)) == Complex{Float64}
@test @inferred(elshape(Complex(1.0, 2.0))) == ScalarShape{Complex{Float64}}()
@test @inferred(elshape([[3, 5], [3, 2]])) == ArrayShape{Int,1}((2,))
@test ValueShapes.stripscalar(Ref(ScalarShape{Real})) == ScalarShape{Real}
# @test Vector{Real}(undef, ArrayShape{Real}((2,1))) == # weird typing going on with default_unshaped_eltype
arrshape = ArrayShape{Real, 2}((2,3))
v = Vector{Real}(undef, arrshape)
@test @inferred(length(v)) == 6
@test @inferred(size(v)) == (6,)
data1 = [1;2;3;4;7;8;9]
scalarshape = ScalarShape{Real}()
ntshape = NamedTupleShape(a=arrshape, b=scalarshape)
shapedasnt = ntshape(data1)
@test stripscalar(Ref(shapedasnt)) == Ref(shapedasnt)[]
@test_throws ArgumentError Broadcast.broadcastable(ntshape)
named_shapes = (
a = ArrayShape{Real}(2, 3),
b = ScalarShape{Real}(),
c = ConstValueShape(4.2),
x = ConstValueShape([11 21; 12 22]),
y = ArrayShape{Real}(4)
)
shape = NamedTupleShape(;named_shapes...)
sntshape = NamedTupleShape(ShapedAsNT; named_shapes...)
data2 = VectorOfSimilarVectors(reshape(collect(1:22), 11, 2))
@test_throws ArgumentError ValueShapes._checkcompat_inner(ntshape, data2)
@test ValueShapes._checkcompat_inner(shape, data2) == nothing
let vs = sntshape, x = rand(totalndof(vs)), xs = nestedview(rand(totalndof(vs), 5))
vs_jacobian(f, x) = 1
InverseFunctions.test_inverse(vs, x)
ChangesOfVariables.test_with_logabsdet_jacobian(vs, x, vs_jacobian)
ChangesOfVariables.test_with_logabsdet_jacobian(inverse(vs), vs(x), vs_jacobian)
bc_vs = Base.Fix1(broadcast, vs)
bc_unshaped = Base.Fix1(broadcast, unshaped)
InverseFunctions.test_inverse(bc_vs, xs)
@test with_logabsdet_jacobian(bc_vs, xs)[1] isa ShapedAsNTArray
ChangesOfVariables.test_with_logabsdet_jacobian(bc_vs, xs, vs_jacobian)
@test with_logabsdet_jacobian(inverse(bc_vs), vs.(xs))[1] isa ArrayOfSimilarArrays
ChangesOfVariables.test_with_logabsdet_jacobian(inverse(bc_vs), vs.(xs), vs_jacobian)
@test with_logabsdet_jacobian(bc_unshaped, vs.(xs))[1] isa ArrayOfSimilarArrays
ChangesOfVariables.test_with_logabsdet_jacobian(bc_unshaped, vs.(xs), vs_jacobian)
for f in [vs, inverse(vs), bc_vs, inverse(bc_vs), unshaped]
@test @inferred(identity ∘ f) === f
@test @inferred(f ∘ identity) === f
end
end
@test @inferred(unshaped(4.2)) isa Fill{Float64,1}
@test unshaped(4.2) == [4.2]
@test @inferred(unshaped(view([4.2], 1))) isa SubArray{Float64,1,Vector{Float64}}
@test unshaped(view([4.2], 1)) == [4.2]
@test @inferred(unshaped(Array(view([4.2], 1)))) isa SubArray{Float64,1,Vector{Float64}}
@test unshaped(Array(view([4.2], 1))) == [4.2]
let x = rand(15)
@test @inferred(unshaped(x)) === x
@test @inferred(unshaped(Base.ReshapedArray(x, (3, 5), ()))) === x
@test @inferred(broadcast(unshaped, x)) isa ArrayOfSimilarArrays{Float64,1,1,2,<:Base.ReshapedArray}
@test broadcast(unshaped, x) == nestedview(reshape(x, 1, 15))
end
let A = rand(1,15)
@test @inferred(broadcast(unshaped, view(A, 1, :))) isa ArrayOfSimilarArrays{Float64,1,1,2,<:SubArray}
@test broadcast(unshaped, view(A, 1, :)) == nestedview(A)
end
end
@testset "realnumtype" begin
a = 4.2f0
b = Complex(a, a)
A = fill(fill(rand(Float32, 5), 10), 5)
B = fill(rand(Float32, 5), 10)
C = ArrayOfSimilarArrays(B)
nt = (a = 4.2f0, b = 42)
tpl = (4.2f0, Float16(2.3))
deepnt = (a = a, b = b, A = A, B = B, C = C, nt = nt, tpl = tpl)
for x in [a, A, B, C, nt, tpl, deepnt]
@test @inferred (realnumtype(typeof(x))) == Float32
end
@test @inferred(realnumtype(typeof(Dates.TWENTYFOURHOUR))) <: Integer
for x in [nothing, missing, (), :foo, "foo"]
@test @inferred(realnumtype(typeof(x))) == Bool
end
end
@testset "value shape comparison" begin
for (T, U) in [(Real, Float64), (Float64, Real), (Real, Real), (Float64, Float64)]
@test @inferred(ScalarShape{T}() <= ScalarShape{U}())[1] == (T <: U)
@test @inferred(rrule(Base.:(<=), ScalarShape{T}(), ScalarShape{U}()))[1] == (T <: U)
@test @inferred(rrule(Base.:(<=), ScalarShape{T}(), ScalarShape{U}()))[2](T <: U) == (NoTangent(), NoTangent(), NoTangent())
@test @inferred(ScalarShape{T}() >= ScalarShape{U}())[1] == (T >: U)
@test @inferred(rrule(Base.:(>=), ScalarShape{T}(), ScalarShape{U}()))[1] == (T >: U)
@test @inferred(rrule(Base.:(>=), ScalarShape{T}(), ScalarShape{U}()))[2](T >: U) == (NoTangent(), NoTangent(), NoTangent())
@test @inferred(ScalarShape{T}() == ScalarShape{U}())[1] == (T == U)
@test @inferred(rrule(Base.:(==), ScalarShape{T}(), ScalarShape{U}()))[1] == (T == U)
@test @inferred(rrule(Base.:(==), ScalarShape{T}(), ScalarShape{U}()))[2](T == U) == (NoTangent(), NoTangent(), NoTangent())
end
end
end
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | docs | 2138 | # ValueShapes.jl
[](https://oschulz.github.io/ValueShapes.jl/stable)
[](https://oschulz.github.io/ValueShapes.jl/dev)
[](LICENSE.md)
[](https://github.com/oschulz/ValueShapes.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/oschulz/ValueShapes.jl)
## Documentation
ValueShapes provides Julia types to describe the shape of values, like
scalars, arrays and structures.
Shapes provide a generic way to construct uninitialized values (e.g.
multidimensional arrays) without using templates.
Shapes also act as a bridge between structured and flat data representations:
Mathematical and statistical algorithms (e.g. optimizers, fitters, solvers,
etc.) often represent variables/parameters as flat vectors of nameless real
values. But user code will usually be more concise and readable if
variables/parameters can have names (e.g. via `NamedTuple`s) and non-scalar
shapes. ValueShapes provides a duality of view between the two different data
representations.
See the documentation for details:
* [Documentation for stable version](https://oschulz.github.io/ValueShapes.jl/stable)
* [Documentation for development version](https://oschulz.github.io/ValueShapes.jl/dev)
ValueShapes is designed to compose well with
[ElasticArrays](https://github.com/JuliaArrays/ElasticArrays.jl),
[ArraysOfArrays](https://github.com/oschulz/ArraysOfArrays.jl) and
[TypedTables](https://github.com/FugroRoames/TypedTables.jl) (and similar
table packages). ValueShapes package has some overlap in functionality
with [TransformVariables](https://github.com/tpapp/TransformVariables.jl), but
provides a duality of view instead of transformations (and therefore uses data
views instead of data copies, where possible).
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | docs | 361 | # API
```@meta
DocTestSetup = quote
using ValueShapes
end
```
## Modules
```@index
Order = [:module]
```
## Types and constants
```@index
Order = [:type, :constant]
```
## Functions and macros
```@index
Order = [:macro, :function]
```
# Documentation
```@autodocs
Modules = [ValueShapes]
Order = [:module, :type, :constant, :macro, :function]
```
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.11.3 | 6ae70bb512c43266b7f425a135be4b65d9930216 | docs | 3431 | # ValueShapes.jl
ValueShapes provides Julia types to describe the shape of values, like
scalars, arrays and structures.
Shapes provide a generic way to construct uninitialized values (e.g.
multidimensional arrays) without using templates.
Shapes also act as a bridge between structured and flat data representations:
Mathematical and statistical algorithms (e.g. optimizers, fitters, solvers,
etc.) often represent variables/parameters as flat vectors of nameless real
values. But user code will usually be more concise and readable if
variables/parameters can have names (e.g. via `NamedTuple`s) and non-scalar
shapes. ValueShapes provides a duality of view between the two different data
representations.
ValueShapes defines the shape of a value as the combination of it's data
type (resp. element type, in the case of arrays) and the size of the value
(relevant if the value is an array), e.g.
```julia
using ValueShapes
ScalarShape{Real}()
ArrayShape{Real}(2, 3)
ConstValueShape([1 2; 3 4])
```
Array shapes can be used to construct a compatible real-valued data vector:
```julia
Vector{Float64}(undef, ArrayShape{Real}(2, 3)) isa Vector{Float64}
```
ValueShapes also provides a way to define the shape of a `NamedTuple`.
This can be used to specify the names and shapes of a set of variables or
parameters. Consider a fitting problem with the following parameters: A
scalar `a`, a 2x3 array `b` and an array `c` pinned to a fixed value. This
set parameters can be specified as
```julia
parshapes = NamedTupleShape(
a = ScalarShape{Real}(),
b = ArrayShape{Real}(2, 3),
c = ConstValueShape([1 2; 3 4])
)
```
This set of parameters has
```julia
totalndof(parshapes) == 7
```
total degrees of freedom (the constant `c` does not contribute). The flat data
representation for this `NamedTupleShape` is a vector of length 7:
```julia
using Random
data = Vector{Float64}(undef, parshapes)
size(data) == (7,)
rand!(data)
```
which can again be viewed as a `NamedTuple` described by `shape` via
```julia
data_as_ntuple = parshapes(data)
```
Note: The macro `@unpack` provided by the package
[UnPack](https://github.com/mauro3/UnPack.jl) is very hand to
to unpack `NamedTuple`s selectively.
ValueShapes can also handle multiple values for sets of variables and
is designed to compose well with
[ArraysOfArrays.jl](https://github.com/oschulz/ArraysOfArrays.jl) and
[Tables.jl](https://github.com/JuliaData/Tables.jl)
(and similar table packages). Broadcasting a shape over a vector of
real-valued vectors will create a view that implements the Tables.jl API:
```julia
using ArraysOfArrays, Tables, TypedTables
multidata = VectorOfSimilarVectors{Int}(parshapes)
resize!(multidata, 10)
rand!(flatview(multidata), 0:99)
A = parshapes.(multidata)
keys(Tables.columns(A)) == (:a, :b, :c)
```
ValueShapes supports this via specialized broadcasting. `A` is now a
table-like view into the data (see [`ShapedAsNTArray`](@ref)), and shares
memory with `multidata`. To create an independent `NamedTuple` of columns,
with a contiguous memory layout for each column, use `Tables.columns(A)`:
```julia
tcols = Tables.columns(A)
flatview(tcols.b) isa Array{Int}
```
Constructing a `TypedTables.Table`, `DataFrames.DataFrame` or similar from `A`
will have the same effect:
```julia
using TypedTables
flatview(Table(A).b) isa AbstractArray{Int}
using DataFrames
flatview(DataFrame(A).b) isa AbstractArray{Int}
```
| ValueShapes | https://github.com/oschulz/ValueShapes.jl.git |
|
[
"MIT"
] | 0.3.0 | 193c3daa18ff3e55c1dae66acb6a762c4a3bdb0b | code | 744 | using StaticPermutations
using Documenter
DocMeta.setdocmeta!(StaticPermutations, :DocTestSetup, :(using StaticPermutations);
recursive=true)
makedocs(;
modules=[StaticPermutations],
authors="Juan Ignacio Polanco <jipolanc@gmail.com> and contributors",
repo="https://github.com/jipolanco/StaticPermutations.jl/blob/{commit}{path}#L{line}",
sitename="StaticPermutations.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://jipolanco.github.io/StaticPermutations.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/jipolanco/StaticPermutations.jl.git",
forcepush=true,
)
| StaticPermutations | https://github.com/jipolanco/StaticPermutations.jl.git |
|
[
"MIT"
] | 0.3.0 | 193c3daa18ff3e55c1dae66acb6a762c4a3bdb0b | code | 254 | module StaticPermutations
export AbstractPermutation, Permutation, NoPermutation
export
identity_permutation,
isidentity,
append,
prepend
import Base: ==, *, /, \
include("types.jl")
include("operations.jl")
include("arrays.jl")
end
| StaticPermutations | https://github.com/jipolanco/StaticPermutations.jl.git |
|
[
"MIT"
] | 0.3.0 | 193c3daa18ff3e55c1dae66acb6a762c4a3bdb0b | code | 528 | """
PermutedDimsArray(A, perm::AbstractPermutation) -> B
Alternative `PermutedDimsArray` constructor taking a static permutation.
In contrast to the base constructors, the returned type is fully inferred here.
"""
function Base.PermutedDimsArray(
A::AbstractArray{T,N}, perm::Permutation{p,N}) where {T,p,N}
iperm = inv(perm)
PermutedDimsArray{T, N, Tuple(perm), Tuple(iperm), typeof(A)}(A)
end
Base.PermutedDimsArray(A::AbstractArray, ::NoPermutation) =
PermutedDimsArray(A, identity_permutation(A))
| StaticPermutations | https://github.com/jipolanco/StaticPermutations.jl.git |
|
[
"MIT"
] | 0.3.0 | 193c3daa18ff3e55c1dae66acb6a762c4a3bdb0b | code | 6211 | """
Tuple(perm::Permutation)
Extract tuple representation of Permutation.
The result can be passed to `permutedims` and `PermutedDimsArray`.
Returns `nothing` if `perm` is a `NoPermutation`.
# Examples
```jldoctest
julia> Tuple(Permutation(3, 2, 1))
(3, 2, 1)
julia> Tuple(NoPermutation()) === nothing
true
```
"""
Base.Tuple(::Permutation{p}) where {p} = p
Base.Tuple(::NoPermutation) = nothing
"""
length(perm::AbstractPermutation)
Returns length of permutation.
For `NoPermutation`, returns `nothing`.
# Examples
```jldoctest
julia> length(Permutation(3, 2, 1))
3
julia> length(NoPermutation()) === nothing
true
```
"""
Base.length(::Permutation{p}) where {p} = length(p)
Base.length(::NoPermutation) = nothing
"""
isperm(perm::AbstractPermutation) -> Bool
Returns `true` if `perm` is a valid permutation, `false` otherwise.
The result is known at compile time.
# Examples
```jldoctest
julia> isperm(Permutation(3, 1, 2))
true
julia> isperm(Permutation(4, 1, 2))
false
julia> isperm(NoPermutation())
true
```
"""
Base.isperm(::NoPermutation) = true
function Base.isperm(::Permutation{P}) where {P}
P :: Tuple
if @generated
# Call isperm tuple implementation in base Julia
pp = isperm(P)
:( $pp )
else
isperm(P)
end
end
"""
*(p::AbstractPermutation, collection)
Apply permutation to the given collection.
The collection may be a `Tuple` or a `CartesianIndex` to be reordered. If `p` is
a [`Permutation`](@ref), the collection must have the same length as `p`.
# Examples
```jldoctest
julia> p = Permutation(2, 3, 1);
julia> p * (36, 42, 14)
(42, 14, 36)
julia> p * CartesianIndex(36, 42, 14)
CartesianIndex(42, 14, 36)
```
"""
*(::NoPermutation, t::Tuple) = t
@inline function *(::Permutation{p,N}, t::Tuple{Vararg{Any,N}}) where {N,p}
@inbounds ntuple(i -> t[p[i]], Val(N))
end
@inline *(p::AbstractPermutation, I::CartesianIndex) = CartesianIndex(p * Tuple(I))
"""
*(p::AbstractPermutation, q::AbstractPermutation)
Compose two permutations: apply permutation `p` to permutation `q`.
Note that permutation composition is non-commutative.
# Examples
```jldoctest
julia> p = Permutation(2, 3, 1);
julia> q = Permutation(1, 3, 2);
julia> p * q
Permutation(3, 2, 1)
julia> q * p
Permutation(2, 1, 3)
julia> p * inv(p)
Permutation(1, 2, 3)
julia> inv(p) * p
Permutation(1, 2, 3)
```
"""
*(p::Permutation, q::Permutation) = Permutation(p * Tuple(q))
*(::NoPermutation, q::AbstractPermutation) = q
*(p::AbstractPermutation, ::NoPermutation) = p
*(p::NoPermutation, ::NoPermutation) = p
"""
/(y::AbstractPermutation, x::AbstractPermutation)
Get relative permutation needed to get from `x` to `y`. That is, the permutation
`p` such that `p * x == y`.
# Examples
```jldoctest
julia> x = Permutation(3, 1, 2);
julia> y = Permutation(2, 1, 3);
julia> p = y / x
Permutation(3, 2, 1)
julia> p * x == y
true
```
"""
function /(::Permutation{q,N}, ::Permutation{p,N}) where {p, q, N}
if @generated
perm = map(v -> findfirst(==(v), p)::Int, q)
@assert Permutation(perm) * p === q
:( Permutation($perm) )
else
perm = map(v -> findfirst(==(v), p)::Int, q)
@assert Permutation(perm) * p === q
Permutation(perm)
end
end
/(y::AbstractPermutation, ::NoPermutation) = y
# In this case, the result is the inverse permutation of `x`, such that
# `perm * x == (1, 2, 3, ...)`.
/(::NoPermutation, x::Permutation{p,N}) where {p,N} =
identity_permutation(Val(N)) / x
"""
\\(p::AbstractPermutation, x)
Undo permutation `p` from permuted collection `x`.
In other words, apply inverse of permutation `p` to `x`. This is effectively
equivalent to `inv(p) * x`.
"""
\(p::AbstractPermutation, x) = inv(p) * x
"""
inv(p::Permutation)
invperm(p::Permutation)
Returns the inverse permutation of `p`.
Functionally equivalent to Julia's `invperm`, with the advantage that the result
is a compile time constant.
See also [`/`](@ref).
# Examples
```jldoctest
julia> p = Permutation(2, 3, 1);
julia> q = inv(p)
Permutation(3, 1, 2)
julia> t_orig = (36, 42, 14);
julia> t_perm = p * t_orig
(42, 14, 36)
julia> q * t_perm === t_orig
true
```
"""
Base.inv(x::AbstractPermutation) = NoPermutation() / x
Base.invperm(x::AbstractPermutation) = inv(x)
"""
identity_permutation(::Val{N})
identity_permutation(A::AbstractArray{T,N})
Returns the identity permutation `Permutation(1, 2, ..., N)`.
"""
identity_permutation(::Val{N}) where {N} = Permutation(ntuple(identity, Val(N)))
identity_permutation(A::AbstractArray) = identity_permutation(Val(ndims(A)))
"""
isidentity(p::Permutation)
Returns `true` if `p` is an identity permutation, i.e. if it is equivalent to
`(1, 2, 3, ...)`.
```jldoctest
julia> isidentity(Permutation(1, 2, 3))
true
julia> isidentity(Permutation(1, 3, 2))
false
julia> isidentity(NoPermutation())
true
```
"""
isidentity(::NoPermutation) = true
function isidentity(perm::Permutation)
N = length(perm)
perm === identity_permutation(Val(N))
end
# Comparisons: (1, 2, ..., N) is considered equal to NoPermutation, for any N.
==(::Permutation{p}, ::Permutation{q}) where {p,q} = p === q
==(::NoPermutation, ::NoPermutation) = true
==(p::Permutation, ::NoPermutation) = isidentity(p)
==(np::NoPermutation, p::Permutation) = p == np
"""
append(p::Permutation, ::Val{M})
Append `M` non-permuted dimensions to the given permutation.
# Examples
```jldoctest
julia> append(Permutation(2, 3, 1), Val(2))
Permutation(2, 3, 1, 4, 5)
julia> append(NoPermutation(), Val(2))
NoPermutation()
```
"""
function append(::Permutation{p}, ::Val{M}) where {p, M}
N = length(p)
Permutation(p..., ntuple(i -> N + i, Val(M))...)
end
append(np::NoPermutation, ::Val) = np
"""
prepend(p::Permutation, ::Val{M})
Prepend `M` non-permuted dimensions to the given permutation.
# Examples
```jldoctest
julia> prepend(Permutation(2, 3, 1), Val(2))
Permutation(1, 2, 4, 5, 3)
julia> prepend(NoPermutation(), Val(2))
NoPermutation()
```
"""
function prepend(::Permutation{p}, ::Val{M}) where {p, M}
Permutation(ntuple(identity, Val(M))..., (M .+ p)...)
end
prepend(np::NoPermutation, ::Val) = np
| StaticPermutations | https://github.com/jipolanco/StaticPermutations.jl.git |
|
[
"MIT"
] | 0.3.0 | 193c3daa18ff3e55c1dae66acb6a762c4a3bdb0b | code | 1373 | """
AbstractPermutation
Abstract type representing a compile-time permutation.
Subtypes are [`Permutation`](@ref) and [`NoPermutation`](@ref).
"""
abstract type AbstractPermutation end
"""
Permutation{p} <: AbstractPermutation
Describes a compile-time dimension permutation.
The type parameter `p` should be a valid permutation such as `(3, 1, 2)`.
---
Permutation(perm::Vararg{Int})
Permutation(perm::NTuple{N,Int})
Constructs a `Permutation`.
# Example
Both are equivalent:
```julia
p1 = Permutation(3, 4)
p2 = Permutation((3, 4))
```
"""
struct Permutation{p,N} <: AbstractPermutation
@inline Permutation(perm::Vararg{Int}) = new{perm, length(perm)}()
end
@inline Permutation(perm::Tuple) = Permutation(perm...)
@inline Base.getindex(::Permutation{p}, ::Val{i}) where {p,i} = p[i]
@inline Base.getindex(p::Permutation, i::Integer) = p[Val(i)]
Base.show(io::IO, ::Permutation{p}) where {p} = print(io, "Permutation", p)
"""
NoPermutation <: AbstractPermutation
Represents an identity permutation.
It is functionally equivalent to `Permutation(1, 2, 3, ...)`, and is included
for convenience.
"""
struct NoPermutation <: AbstractPermutation end
@inline Base.getindex(::NoPermutation, ::Val{i}) where {i} = i
@inline Base.getindex(::NoPermutation, i::Integer) = i
Base.show(io::IO, ::NoPermutation) = print(io, "NoPermutation()")
| StaticPermutations | https://github.com/jipolanco/StaticPermutations.jl.git |
|
[
"MIT"
] | 0.3.0 | 193c3daa18ff3e55c1dae66acb6a762c4a3bdb0b | code | 4877 | using StaticPermutations
using Test
@testset "StaticPermutations.jl" begin
perm = Permutation(2, 3, 1)
noperm = NoPermutation()
@testset "Constructors" begin
@test perm === Permutation((2, 3, 1))
@inferred (() -> Permutation(2, 3, 1))()
@inferred (() -> Permutation((2, 3, 1)))()
@inferred NoPermutation()
end
@testset "getindex!" begin
@test perm[2] == perm[Val(2)] == 3
@test noperm[2] == noperm[Val(2)] == 2
valgettwo(p) = Val(p[2])
@inferred valgettwo(perm)
@inferred valgettwo(noperm)
end
@testset "I/O" begin
@test string(perm) == "Permutation(2, 3, 1)"
@test string(noperm) == "NoPermutation()"
end
@testset "Base operations" begin
@test Tuple(perm) === (2, 3, 1)
@test Tuple(noperm) === nothing
@test length(perm) === 3
@test length(noperm) === nothing
end
@testset "Permutation checks" begin
@test isperm(perm)
@test isperm(noperm)
@test !isperm(Permutation(2, 5, 3))
# Check that result is fully inferred
ispermval(p) = Val(isperm(p))
@inferred ispermval(perm)
@inferred ispermval(noperm)
@inferred ispermval(Permutation(2, 5, 3))
end
@testset "Composition" begin
p = Permutation(2, 3, 1)
q = Permutation(1, 3, 2)
@inferred p * q
@inferred p * NoPermutation()
@inferred NoPermutation() * p
@test p * q === Permutation(3, 2, 1)
@test q === p \ Permutation(3, 2, 1)
@test q * p === Permutation(2, 1, 3)
@test p === q \ Permutation(2, 1, 3)
@test p * NoPermutation() === p
@test NoPermutation() * p === p
@test NoPermutation() * NoPermutation() === NoPermutation()
@test p * inv(p) == NoPermutation()
@test inv(p) * p == NoPermutation()
end
iperm = inv(perm)
@testset "Inverse permutation" begin
@test perm * iperm == NoPermutation()
@test iperm * perm == NoPermutation()
@test invperm(perm) === iperm
@inferred inv(perm)
end
@testset "Identity permutation" begin
@inferred identity_permutation(Val(4))
id = identity_permutation(Val(4))
@test id === Permutation(1, 2, 3, 4)
@test id == NoPermutation() # they're functionally equivalent
idval(p) = Val(isidentity(p))
@test (@inferred idval(perm)) === Val(false)
@test (@inferred idval(noperm)) === Val(true)
@test (@inferred idval(id)) === Val(true)
@inferred idval(perm)
@test !isidentity(perm)
@test isidentity(NoPermutation())
@test isidentity(id)
@test isidentity(perm * iperm)
end
@testset "Left division operator" begin
a = Permutation(2, 3, 1, 4)
b = Permutation(3, 1, 4, 2)
@inferred b / a
r = b / a
@test r * a == b
np = NoPermutation()
@test a / np === a
@test np / a === inv(a)
@test np / np === np
end
@testset "Comparisons" begin
@test Permutation(1, 3, 2) == Permutation(1, 3, 2)
@test Permutation(1, 3, 2) != Permutation(3, 1, 2)
@test Permutation(2, 1, 3) != Permutation(2, 1)
@test Permutation(1, 2, 3) == NoPermutation()
@test NoPermutation() == Permutation(1, 2, 3)
@test Permutation(1, 3, 2) != NoPermutation()
@test NoPermutation() == NoPermutation()
end
@testset "Apply permutations" begin
ind = (20, 30, 10)
ind_perm = (30, 10, 20)
permval(perm) = Val(perm * (:aaa, :bbb, :ccc))
@inferred permval(perm)
@inferred permval(iperm)
@inferred permval(noperm)
@test noperm * ind === ind
@test ind === noperm \ ind
@test perm * ind === ind_perm
@test ind === perm \ ind_perm
@test perm * CartesianIndex(ind) === CartesianIndex(ind_perm)
@test noperm * CartesianIndex(ind) === CartesianIndex(ind)
end
@testset "Prepend / append" begin
@inferred prepend(Permutation(2, 3, 1), Val(2))
@inferred append(Permutation(2, 3, 1), Val(2))
@test prepend(Permutation(2, 3, 1), Val(2)) === Permutation(1, 2, 4, 5, 3)
@test prepend(NoPermutation(), Val(2)) === NoPermutation()
@test append(Permutation(2, 3, 1), Val(2)) === Permutation(2, 3, 1, 4, 5)
@test append(NoPermutation(), Val(2)) === NoPermutation()
end
@testset "PermutedDimsArray" begin
x = rand(3, 5, 4)
@inferred PermutedDimsArray(x, perm)
@inferred PermutedDimsArray(x, noperm)
# Compare new and original constructors
@test PermutedDimsArray(x, perm) === PermutedDimsArray(x, Tuple(perm))
@test PermutedDimsArray(x, noperm) === PermutedDimsArray(x, (1, 2, 3))
end
end
| StaticPermutations | https://github.com/jipolanco/StaticPermutations.jl.git |
|
[
"MIT"
] | 0.3.0 | 193c3daa18ff3e55c1dae66acb6a762c4a3bdb0b | docs | 1957 | # StaticPermutations
[](https://jipolanco.github.io/StaticPermutations.jl/stable) [](https://jipolanco.github.io/StaticPermutations.jl/dev) [](https://github.com/jipolanco/StaticPermutations.jl/actions) [](https://codecov.io/gh/jipolanco/StaticPermutations.jl)
Tools for dealing with compile-time dimension permutations of Julia arrays.
This package defines a `Permutation` type describing a permutation of dimensions.
Permutations can be composed, inverted, applied to collections and reverted, among other operations.
All these operations have zero runtime cost, since they are performed using the static information encoded in the `Permutation` type.
See the [documentation](https://jipolanco.github.io/StaticPermutations.jl/dev) for a list of implemented methods.
## Quick start
```julia
julia> using StaticPermutations
julia> perm = Permutation(2, 3, 1)
Permutation(2, 3, 1)
julia> typeof(perm)
Permutation{(2, 3, 1),3}
```
Permutations can be inverted and composed.
The resulting permutation is always fully inferred.
```julia
julia> inv(perm) # same as invperm(perm)
Permutation(3, 1, 2)
julia> q = Permutation(3, 2, 1);
# Composition is performed using the `*` operator.
julia> perm * q
Permutation(2, 1, 3)
# Note that composition is non-commutative.
julia> q * perm
Permutation(1, 3, 2)
```
Permutations are applied to collections using the `*` operator:
```julia
julia> x = (42, 12, 32) # these may be array indices, for instance
(42, 12, 32)
julia> y = perm * x
(12, 32, 42)
```
Permutations may be reverted using the `\` operator:
```julia
julia> x′ = perm \ y # same as inv(perm) * y
(42, 12, 32)
julia> x′ == x
true
```
| StaticPermutations | https://github.com/jipolanco/StaticPermutations.jl.git |
|
[
"MIT"
] | 0.3.0 | 193c3daa18ff3e55c1dae66acb6a762c4a3bdb0b | docs | 134 | ```@meta
CurrentModule = StaticPermutations
```
# StaticPermutations
```@index
```
```@autodocs
Modules = [StaticPermutations]
```
| StaticPermutations | https://github.com/jipolanco/StaticPermutations.jl.git |
|
[
"MIT"
] | 0.4.0 | 82bea8689c73967dd238d511bb4397412d52af36 | code | 512 | using Documenter
using FieldFlags
DocMeta.setdocmeta!(FieldFlags, :DocTestSetup, :(using FieldFlags); recursive=true)
makedocs(modules=[FieldFlags],
sitename = "FieldFlags.jl",
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true"),
pages = [
"index.md",
"examples.md",
"api.md"
])
!isinteractive() && deploydocs(
repo = "github.com/Seelengrab/FieldFlags.jl.git",
devbranch = "main",
push_preview = true
)
| FieldFlags | https://github.com/Seelengrab/FieldFlags.jl.git |
|
[
"MIT"
] | 0.4.0 | 82bea8689c73967dd238d511bb4397412d52af36 | code | 20198 | module FieldFlags
export @bitflags, @bitfield
"""
propertyoffset(::Type{T}, s::Symbol) -> Int
Gives the offset (in bits) the field `s` is placed at in objects of type `T`.
See also [`FieldFlags.fieldsize`](@ref).
```jldoctest
julia> @bitflags mutable struct MyFlags
flagA
_ # padding
flagB
end
julia> FieldFlags.propertyoffset(MyFlags, :flagA)
0
julia> FieldFlags.propertyoffset(MyFlags, :flagB)
2
```
"""
function propertyoffset end
"""
fieldsize(::Type{T}, s::Symbol) -> Int
Gives the size (in bits) the field `s` takes up in objects of type `T`.
See also [`FieldFlags.propertyoffset`](@ref).
```jldoctest
julia> @bitfield mutable struct MyBits
a:2
_ # padding
b:3
end
julia> FieldFlags.fieldsize(MyBits, :a)
2
julia> FieldFlags.fieldsize(MyBits, :b)
3
```
"""
function fieldsize end
"""
bitfieldnames(::Type{T}) -> NTuple{N, Symbol}
bitfieldnames(::T) -> NTuple{N, Symbol}
Gives the field names of the given bitfield object or type.
```jldoctest
julia> @bitfield mutable struct MyBits
a:2
_ # padding
b:3
end
julia> FieldFlags.bitfieldnames(MyBits)
(:a, :b)
julia> FieldFlags.bitfieldnames(MyBits(1,2))
(:a, :b)
```
"""
function bitfieldnames end
"""
cast_or_extend(T::DataType, x) -> T
Takes an object `x` of a primitive type and either bitcasts it to `T`
(if their sizes are egal) or zero-extends the bitrepresentation of `x`
to the size of `T`. `sizeof(x) <= sizeof(T)` must hold.
Returns a `T`.
See also [`FieldFlags.cast_extend_truncate`](@ref).
"""
function cast_or_extend(T::DataType, x)
if sizeof(T) === sizeof(x)
Core.Intrinsics.bitcast(T, x)
else # can only be larger - if sizeof(T) is zero, we threw
Core.Intrinsics.zext_int(T, x)
end
end
"""
cast_extend_truncate(T::DataType, x) -> T
Takes an object `x` of a primitive type and either bitcasts it to type `T`
(if their sizes are egal), zero extends the bitrepresentation of `x` to the
size of `T`, or truncates the bitrepresentation of `x` to `sizeof(T)`.
Returns a `T`.
See also [`FieldFlags.cast_or_extend`](@ref).
"""
function cast_extend_truncate(T::DataType, x)
if sizeof(x) < sizeof(T)
# zero extension is always fine
Core.Intrinsics.zext_int(T, x)
elseif sizeof(x) > sizeof(T)
# we can't do anything other than truncating here
# we need at least sizeof(T) bits in x to represent
# all shifts/offsets we can think of - larger stuff
# is swallowed
Core.Intrinsics.trunc_int(T, x)
else # ==
# no extension/truncation needed, just bitcast
Core.Intrinsics.bitcast(T, x)
end
end
"""
bitfield(expr::Expr)
Takes an `Expr(:struct)` of the form
struct MyStruct
a:x
b:y
_
_:z
end
where `a`, `b` are potential field names, `x`, `y`, and `z` are desired bitwidths
for those fields as integer literals, `_` is padding and returns the following expression:
Expr(:block,
typedefs,
typefuncs,
conv,
eqhash,
shows,
propsize,
propoffset,
getprop,
setprop
)
Where `typedefs` are the new user-facing type definition and the internal type definitions,
`typefuncs` are type related functions from Base for the new types, `conv` are `convert` methods to those
types, `propsize` is the implementation for [`FieldFlags.fieldsize`](@ref), `propoffset` is the
implementation for [`FieldFlags.propertyoffset`](@ref), `getprop` is the definition for the
`getproperty` overload for the user facing type and `setprop` is the definition for the
`setproperty!` overloda for the user facing type.
See also [`FieldFlags.bitflags`](@ref).
"""
function bitfield(expr::Expr)
expr.head == :struct || throw(ArgumentError("`@bitfields` needs a struct definition!"))
mutable = expr.args[1]
typedeclr = expr.args[2]
suptype = if typedeclr isa Symbol
typedeclr
elseif typedeclr isa Expr && typedeclr.head === :(<:)
Expr(:(<:), typedeclr.args[1], esc.(typedeclr.args[2:end])...)
else
throw(ArgumentError("Given supertype declaration is invalid: `$typedeclr`"))
end
typename = if suptype isa Symbol
suptype
elseif suptype isa Expr
suptype.args[1]
end
typename_internal = Symbol(typename, :_fields)
T = esc(typename)
Ti = esc(typename_internal)
fields = Pair{Symbol, Int}[]
numbits = 0 # aggregate number of bits of all fields
for ex in expr.args[3].args
# special case single padding
if ex === :_
numbits += 1
push!(fields, :_ => 1)
continue
end
!(ex isa Expr) && continue
(ex.head == :call
&& length(ex.args) == 3
&& first(ex.args) === :(:)) || continue # only Intx bitfields supported right now
fieldname = ex.args[2]
fieldname isa Symbol || throw(ArgumentError("Name of field is not a symbol: `$fieldname`"))
fieldsize = ex.args[3]
fieldsize isa Integer || throw(ArgumentError("Declared size of field `$fieldname` is not an integer literal!"))
numbits += fieldsize
push!(fields, fieldname => fieldsize)
end
fieldtuple = ntuple(x -> first(fields[x]), length(fields))
isempty(fieldtuple) && throw(ArgumentError("`@bitfields` needs at least one field."))
allunique(filter(!=(:_), fieldtuple)) || throw(ArgumentError("Fields need to be uniquely identifiable!"))
# `primitive type` currently requires a multiple of 8
# also makes accessing the bits later easier
# don't want to oversize when we have exactly 8,16,.. fields
typesize = 8*div(numbits, 8, RoundUp)
# This primitive type is intentionally not an Integer
# It's a composite type, there is no arithmetic here
# Also helps the compiler/LLVM later on to not mix up types
# The primitive type is explicitly wrapped in a `mutable struct`
# which ends up providing the `setindex!` interface, if the
# requested struct is declared `mutable` as well
internal_constructor = Expr(:(=), Expr(:call, typename, Expr(:(::), :t, Ti)), Expr(:new, T, :t))
newob = Expr(:new, T, :(FieldFlags.cast_or_extend($Ti, 0x0)))
struct_body = Expr(:block, Expr(:(::), :fields, Ti), internal_constructor)
mutstruct = Expr(:struct, mutable, suptype, struct_body)
typedefs = Expr(:block, :(primitive type $typename_internal $typesize end), mutstruct)
# make the properties accessible
filterednames = filter(!=(:_), fieldtuple)
typefuncs = :(
FieldFlags.bitfieldnames(::$T) = $filterednames;
FieldFlags.bitfieldnames(::Type{$T}) = $filterednames;
Base.propertynames(::$T) = FieldFlags.bitfieldnames($T);
Base.zero(::Type{$T}) = $T(FieldFlags.cast_or_extend($Ti, 0x0))
)
# prepare our `getproperty` overload
# build constructor together with `getproperty`
callargs = Any[T]
bodyargs = Any[]
# initialize return value of constructor
push!(bodyargs, :(ret = FieldFlags.cast_or_extend($Ti, 0x0)))
running_offset = 0
sizeexpr = origsize = Expr(:if)
offsetexpr = origoffset = Expr(:if)
getpropexpr = origgetprop = Expr(:if)
setpropexpr = origsetprop = Expr(:if)
for (fieldname,fieldsize) in fields
casttype = if isone(fieldsize)
Bool
elseif 2 <= fieldsize <= 8
UInt8
elseif 9 <= fieldsize <= 16
UInt16
elseif 17 <= fieldsize <= 32
UInt32
elseif 33 <= fieldsize <= 64
UInt64
else
UInt128
end
push!(sizeexpr.args, :(s === $(QuoteNode(fieldname))))
push!(sizeexpr.args, :(return $fieldsize))
nsize = Expr(:elseif)
push!(sizeexpr.args, nsize)
sizeexpr = nsize
push!(offsetexpr.args, :(s === $(QuoteNode(fieldname))))
push!(offsetexpr.args, :(return $running_offset))
noffexpr = Expr(:elseif)
push!(offsetexpr.args, noffexpr)
offsetexpr = noffexpr
running_offset += fieldsize
fieldname === :_ && continue
push!(getpropexpr.args, :(s === $(QuoteNode(fieldname))))
ifbody = :(
offsetshift = FieldFlags.cast_extend_truncate($Ti, FieldFlags.propertyoffset($T, s));
shifted = Core.Intrinsics.lshr_int(data, offsetshift);
maskshift = FieldFlags.cast_extend_truncate($Ti, FieldFlags.fieldsize($T, s));
mask = Core.Intrinsics.not_int(Core.Intrinsics.shl_int(maskbase, maskshift));
masked = Core.Intrinsics.and_int(shifted, mask);
return FieldFlags.cast_extend_truncate($casttype, masked);
)
push!(getpropexpr.args, ifbody)
ngetprop = Expr(:elseif)
push!(getpropexpr.args, ngetprop)
getpropexpr = ngetprop
# only build the expression if we actually need to
if mutable
push!(setpropexpr.args, :(s === $(QuoteNode(fieldname))))
ifbody = :(
offsetshift = FieldFlags.cast_extend_truncate($Ti, FieldFlags.propertyoffset($T, s));
shifted = Core.Intrinsics.shl_int(val, offsetshift);
mask = Core.Intrinsics.not_int(Core.Intrinsics.shl_int(maskbase, FieldFlags.fieldsize($T, s)));
mask = Core.Intrinsics.not_int(Core.Intrinsics.shl_int(mask, FieldFlags.propertyoffset($T, s)));
cleareddata = Core.Intrinsics.and_int(getfield(x, :fields), mask);
newdata = Core.Intrinsics.or_int(cleareddata, shifted);
setfield!(x, :fields, newdata);
return maskeddata;
)
push!(setpropexpr.args, ifbody)
nsetprop = Expr(:elseif)
push!(setpropexpr.args, nsetprop)
setpropexpr = nsetprop
end
# constructor args
push!(callargs, Expr(:(::), fieldname, Union{Bool, Base.BitInteger}))
cast_f = Symbol(fieldname, :_cast)
shift_f = Symbol(fieldname, :_shift)
mask_f = Symbol(fieldname, :_mask)
body = :(
# shift argument into the correct field position
$mask_f = $fieldname & ~((~zero($fieldname)) << FieldFlags.fieldsize($T, $(QuoteNode(fieldname))));
$cast_f = FieldFlags.cast_extend_truncate($Ti, $mask_f);
$shift_f = FieldFlags.cast_extend_truncate($Ti, FieldFlags.propertyoffset($T, $(QuoteNode(fieldname))));
$cast_f = Core.Intrinsics.shl_int($cast_f, $shift_f);
# `or` it into the result
ret = Core.Intrinsics.or_int(ret, $cast_f)
)
push!(bodyargs, body)
end
push!(bodyargs, Expr(:return, Expr(:call, :new, :ret)))
nosuchfieldstr = "Objects of type `$typename` have no field `"
fielderrstr = "type $typename has no field fields"
errexpr = :(ArgumentError(LazyString($nosuchfieldstr, s, "`")))
if !(:fields in fieldtuple)
# we can just branch & error here, to disallow
# property access to the internal field
# this will unfortunately cause a false positive in report_package
push!(getpropexpr.args, :(s === :fields))
push!(getpropexpr.args, :(error($fielderrstr)))
push!(getpropexpr.args, :(getfield(x, s)))
push!(setpropexpr.args, :(s === :fields))
push!(setpropexpr.args, :(error($fielderrstr)))
# this will unfortunately cause a false positive in report_package
push!(setpropexpr.args, :(setfield!(x, v, s)))
else
# there is a user defined field :fields
# so just change the last else block from
# another elseif to a call to getfield,
# which will produce an error that JET.jl
# reports even without mode=:sound
getpropexpr.head = :call
push!(getpropexpr.args, :getfield, :x, :s)
setpropexpr.head = :call
push!(setpropexpr.args, :setfield!, :x, :v, :s)
end
sizeexpr.head = :call
push!(sizeexpr.args, :throw, errexpr)
offsetexpr.head = :call
push!(offsetexpr.args, :throw, errexpr)
call = Expr(:call, callargs...)
block = Expr(:block, bodyargs...)
constr = Expr(:function, call, block)
push!(struct_body.args, constr)
propsize = :(
function FieldFlags.fieldsize(_::Type{$T}, s::Symbol)
$origsize
end
)
propoffset = :(
function FieldFlags.propertyoffset(_::Type{$T}, s::Symbol)
$origoffset
end
)
getprop = :(
function Base.getproperty(x::$T, s::Symbol)
data = getfield(x, :fields)
maskbase = Core.Intrinsics.not_int(FieldFlags.cast_or_extend($Ti, 0x0))
$origgetprop
end
)
setprop = :(
function Base.setproperty!(x::$T, s::Symbol, v::W) where W
maskbase = Core.Intrinsics.not_int(FieldFlags.cast_or_extend($Ti, 0x0))
maskeddata = v & ~(~zero(W) << FieldFlags.fieldsize($T, s))
val = FieldFlags.cast_extend_truncate($Ti, maskeddata)
$origsetprop
end
)
conv = :(
Base.convert(::Type{$T}, t::$T) = t;
function Base.convert(::Type{$T}, x::X) where X
if !isprimitivetype(X)
throw(ArgumentError(LazyString("Cannot convert objects of type ", X, " to objects of type ", $T,".")))
else
$T(FieldFlags.cast_extend_truncate($Ti, x))
end
end
)
eqhash = :(
Base.:(==)(x::$T, y::$T) = getfield(x, :fields) == getfield(y, :fields);
Base.hash(x::$T, h::UInt) = hash(getfield(x, :fields), h)
)
shows = :(
function Base.show(io::IO, x::$T)
show(io, $T)
write(io, '(')
names = propertynames(x)
for i in eachindex(names)
show(io, getproperty(x, names[i]))
i != lastindex(names) && write(io, ", ")
end
write(io, ')')
end;
function Base.show(io::IO, m::MIME"text/plain", x::$T)
show(io, m, $T)
write(io, '(')
names = propertynames(x)
for i in eachindex(names)
prop = getproperty(x, names[i])
write(io, names[i])
write(io, ": ")
FieldFlags.truncshow(io, prop)
i != lastindex(names) && write(io, ", ")
end
write(io, ')')
end
)
###
return Expr(:block,
typedefs,
typefuncs,
conv,
eqhash,
shows,
propsize,
propoffset,
getprop,
setprop
)
end
truncshow(io::IO, x) = show(io, MIME"text/plain"(), x)
function truncshow(io::IO, x::Unsigned)
p = @view repr(x)[3:end]
idx = something(findfirst(!=('0'), p), Some(lastindex(p)))
write(io, "0x")
_p = @view p[idx:end]
write(io, _p)
return nothing
end
"""
@bitfield [mutable] struct MyBits
a:2
b:3
_[:3] # padding; width is assumed 1 bit if the length is omitted
c:1
end
Construct a struct representing various fields, with their size specified in bits.
The struct can optionally be marked `mutable`.
See also [`@bitflags`](@ref).
# Extended Help
The fields are stored in a compact format where each field only takes up the specified number of bits.
Field access gives an unsigned integer, whose lower bits are the bits of the accessed field. The upper
bits are zeroed. As a special case, fields with size `1` return a `Bool`.
Explicit padding can be specified by naming a field `_`, with freely chosen width.
Field names (other than padding) need to be unique. The specified number of bits must be `>= 0`.
The order the fields are given in is the order the fields are stored in. The first field occupies
the least significant bits, followed by the second field, up to the last field, which is stored in
the most significant bits.
For example, the struct given above has this layout:
|MSB | | | | | | | |LSB |
|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|
|c |_ |_ |_ |b |b |b |a |a |
where `_` is padding, with undefined value.
The constructor created for structs defined with `@bitfield` takes any type in
`Union{Int128, Int16, Int32, Int64, Int8, UInt128, UInt16, UInt32, UInt64, UInt8, Bool}` and converts it
to the correct size by truncating the upper bits, before storing the truncated value in the object.
This truncation also occurs when writing to a field of a mutable object.
!!! warning "Struct size"
Due to compiler limitations, the size of the resulting object will (currently) always be a
multiple of 8 bits. The additional bits added due to this are considered padding and can
not be relied on to exist. They may be removed in a future release without notice.
If you need padding up to a given size, explicitly specify a trailing padding field.
!!! warning "Field type"
As there are no variable sized integers in Julia, it is only guaranteed that the return type
on field access is large enough to hold all bits required by that field. While currently
field sizes larger than `1` return an `UInt`, this is in particular not guaranteed and
may be changed in the future, so that e.g. a field of size `2` returns an `UInt8` instead.
# Examples
```jldoctest
julia> @bitfield struct MyBits
a:2
b:3
_:3 # padding
c:1
end
julia> bits = MyBits(1,2,3)
MyBits(a: 0x1, b: 0x2, c: true)
julia> bits.a
0x0000000000000001
julia> bits.b
0x0000000000000002
julia> bits.c
true
```
"""
macro bitfield(expr::Expr)
bitfield(expr)
end
#####
# Flagstructs
#####
"""
bitflags(::Expr)
The given `Expr(:struct) has the following format
struct MyFlags
a
b
_
end
which is turned into
struct MyFlags
a:1
b:1
_:1
end
before being passed to `FieldFlags.bitfield`.
Some minimal expression filtering is performed.
See also [`@bitflags`](@ref), [`@bitfield`](@ref).
"""
function bitflags(expr::Expr)
expr.head == :struct || throw(ArgumentError("`@bitflags` needs a struct definition!"))
exprfields = expr.args[3].args
fields = identity.(filter(s -> s isa Symbol, exprfields))
isempty(fields) && throw(ArgumentError("`@bitflags` needs at least one field."))
allunique(filter(!=(:_), fields)) || throw(ArgumentError("Fields need to be uniquely identifiable!"))
# we do the heavy lifting in @bitfield, so that @bitflags is just an easier interface
for n in eachindex(exprfields)
arg = exprfields[n]
arg isa Expr && arg.head == :call && first(arg.args) == :(:) && throw(ArgumentError("`@bitflags` doesn't take per-field bitwidths!"))
arg isa Symbol || continue
exprfields[n] = Expr(:call, :(:), arg, 1)
end
bitfield(expr)
end
"""
@bitflags [mutable] struct MyFlags
flagA
flagB
_ # padding
flagC
end
Construct a struct representing various boolean flags, stored in a compact format where each flag
takes up a single bit. Field access gives a `Bool`, explicit padding can be declared by naming a field
`_`. Field names (other than padding) need to be unique. The struct can optionally be marked `mutable`.
See also [`@bitfield`](@ref).
!!! warning "Struct size"
Due to compiler limitations, the size of the resulting object will (currently) always be a
multiple of 8 bits. The additional bits added due to this are considered padding and can
not be relied on to exist. They may be removed in a future release without notice.
# Examples
```jldoctest
julia> @bitflags mutable struct MyFlags
flagA
flagB
_ # padding
flagC
end
julia> flags = MyFlags(true, false, true)
MyFlags(flagA: true, flagB: false, flagC: true)
julia> flags.flagA
true
julia> flags.flagB
false
julia> flags.flagB = true
true
julia> flags.flagB
true
julia> sizeof(flags)
1
```
"""
macro bitflags(expr)
bitflags(expr)
end
end # module FieldFlags
| FieldFlags | https://github.com/Seelengrab/FieldFlags.jl.git |
|
[
"MIT"
] | 0.4.0 | 82bea8689c73967dd238d511bb4397412d52af36 | code | 11889 | using Test
using FieldFlags
using JET
using Random
const pos_fields = (Symbol.('a':('a'+9))...,)
function test_trunc_show(io, val)
mark(io)
FieldFlags.truncshow(io, val)
reset(io)
read(io, String)
end
@testset "All Tests" begin
@testset "truncshow" begin
io = IOBuffer()
@test test_trunc_show(io, 0x0) == "0x0"
@test test_trunc_show(io, 0x1) == "0x1"
@test test_trunc_show(io, 0x01) == "0x1"
@test test_trunc_show(io, 0x0101) == "0x101"
end
@testset "show" for nfields in (7,8,9)
fields = pos_fields[1:nfields]
name = Symbol("struct_" * randstring(5) * string(nfields))
:(
@bitflags struct $name
$(fields...)
end
) |> eval
args = rand(Bool, nfields)
obj = eval(:($name($(args...))))
@testset "2-arg show" begin
@test eval(Meta.parse(repr(obj))) == obj
end
mime_repr = repr(MIME"text/plain"(), obj)
@testset "text/plain" for f in eachindex(fields)
teststr = string(isone(f) ? "" : ", ", fields[f], ':')
@test occursin(teststr, mime_repr)
end
end
@testset "Failing convert" begin
struct IntWrap
x::Int
end
@bitfield struct ConvertTest
a:3
end
err = ArgumentError("Cannot convert objects of type IntWrap to objects of type ConvertTest.")
@test_throws err convert(ConvertTest, IntWrap(1))
end
@testset "Identity convert in a wrapper struct #10" begin
@bitfield struct Convert_10
a:2
end
# the type parameter is unused, but necessary to induce the identity `convert` call
struct Wrapper_10{T}
c::Convert_10
end
obj = zero(Convert_10)
@test Wrapper_10{Int}(obj) isa Wrapper_10{Int}
end
@testset "Subtyping" begin
abstract type AbstractSupertype end
@bitfield struct Concrete <: AbstractSupertype
a:2
end
f(as::AbstractSupertype) = iseven(as.a)
@test f(Concrete(2))
end
abstract type TestAbstract end
@testset "@bitflags" begin
@testset for nfields in (7,8,9)
@testset for sup in (true, false)
@testset "mutable: $mut" for mut in (true, false)
fields = pos_fields[1:nfields]
name = Symbol("struct_" * randstring(5) * string(nfields) * "_$(mut)_$sup")
if sup
supexpr = :($name <: TestAbstract)
structexpr = Expr(:struct, mut, supexpr, Expr(:block, fields...))
else
structexpr = Expr(:struct, mut, name, Expr(:block, fields...))
end
eval(:(@bitflags $structexpr))
T = eval(name)
@test if sup
supertype(T) === TestAbstract
else
supertype(T) === Any
end
args = rand(Bool, nfields)
obj = T(args...)
@test sizeof(obj) == ceil(Int, nfields/8)
# these two should always pass/fail together
@test !hasproperty(obj, :dummy)
@test_throws ErrorException("type $name has no field dummy") getproperty(obj, :dummy)
@test propertynames(obj) == fields
@testset for f in 1:nfields
# these two should always pass/fail together
@test hasproperty(obj, fields[f])
@test getproperty(obj, fields[f]) == args[f]
if mut
val = rand(Bool)
setproperty!(obj, fields[f], val)
@test getproperty(obj, fields[f]) == val
end
end
end
end
end # nfields
@testset "Empty bits" begin
:(
@bitflags struct EmptyFields
a
b
_
c
_
d
_
_
e
end
) |> eval
@test sizeof(EmptyFields) == 2
args = (rand(Bool, 5)...,)
obj = EmptyFields(args...)
fields = (:a,:b,:c,:d,:e)
@test propertynames(obj) == fields
@test !hasproperty(obj, :_)
@testset for f in 1:5
@test hasproperty(obj, fields[f])
@test getproperty(obj, fields[f]) == args[f]
end
end
@testset "No per-field bitwidths" begin
argerr = ArgumentError("`@bitflags` doesn't take per-field bitwidths!")
@test_throws argerr FieldFlags.bitflags(:(
struct PerFieldBitwidths
a:2
_
b
end))
end
end # end @bitflags
@testset "@bitfields" begin
@testset "non-pow-2 size" begin
@bitfield struct NonPow2
f:24
end
# This REALLY ought not to be how this works...
if Sys.WORD_SIZE > 24
@test 8*sizeof(NonPow2) == nextpow(2, 24)
else
@test 8*sizeof(NonPow2) == nextpow(Sys.WORD_SIZE, 24)
end
end
@testset "mutable: $mut" for mut in (true, false)
@testset for sup in (true, false)
@testset for nfields in (7,8,9)
fields = [ :($p:$n) for (n,p) in zip(shuffle(rand(2:4, nfields)), pos_fields[1:nfields]) ]
name = Symbol("struct_" * randstring(5) * string(nfields) * '_' * string(mut)* '_' * string(sup))
if sup
supexpr = :($name <: TestAbstract)
structexpr = Expr(:struct, mut, supexpr, Expr(:block, fields...))
else
structexpr = Expr(:struct, mut, name, Expr(:block, fields...))
end
eval(:(@bitfield $structexpr))
args = rand(Bool, nfields)
T = eval(:($name))
@test if sup
supertype(T) === TestAbstract
else
supertype(T) === Any
end
obj = T(args...)
sumfields = sum(x -> x.args[3], fields)
@test sizeof(getfield(obj, :fields)) == div(sumfields, 8, RoundUp)
# these two should always pass/fail together
@test !hasproperty(obj, :dummy)
@test_throws ErrorException("type $name has no field dummy") getproperty(obj, :dummy)
@test propertynames(obj) == ntuple(f -> fields[f].args[2], nfields)
zeroobj = convert(T, 0)
oneobj = convert(T, -1)
@testset for f in 1:nfields
# the empty convert preserves emptiness
@test iszero(getproperty(zeroobj, fields[f].args[2]))
# the full convert preserves fullness
@test ndigits(getproperty(oneobj, fields[f].args[2]); base=2) === fields[f].args[3]
# these two should always pass/fail together
@test hasproperty(obj, fields[f].args[2])
@test getproperty(obj, fields[f].args[2]) == args[f]
rand_set = rand(Bool)
if mut
@test setproperty!(obj, fields[f].args[2], rand_set) == rand_set
@test getproperty(obj, fields[f].args[2]) == rand_set
else
@test_throws ErrorException("setfield!: immutable struct of type $name cannot be changed") setproperty!(obj, fields[f].args[2], rand_set)
end
end
end # dense bitfields
end # supertype
@testset "Empty fields" begin
name = Symbol("EmptyBitFields_" * string(mut))
str = if mut
:(
@bitfield mutable struct $name
a:1
b:2
_:1
c:2
_:4
d:1
_:3
_:2
e:1
end)
else
:(@bitfield struct $name
a:1
b:2
_:1
c:2
_:4
d:1
_:3
_:2
e:1
end)
end
eval(str)
args = (rand(Bool, 5)...,)
obj = eval(:($name($(args...))))
@test sizeof(typeof(obj)) == 4
fields = (:a,:b,:c,:d,:e)
@test propertynames(obj) == fields
@test !hasproperty(obj, :_)
offsets = (0,1,4,10,16)
@testset for f in 1:5
@test hasproperty(obj, fields[f])
if isone(FieldFlags.fieldsize(typeof(obj), fields[f]))
@test getproperty(obj, fields[f]) isa Bool
end
@test FieldFlags.propertyoffset(typeof(obj), fields[f]) == offsets[f]
@test getproperty(obj, fields[f]) == args[f]
end
end # empty fields
end # mutability
@testset "Implicit single padding bit" begin
@bitfield struct ImplicitPaddingBitsize
a:2
_
b:4
end
@test FieldFlags.propertyoffset(ImplicitPaddingBitsize, :a) == 0
@test FieldFlags.propertyoffset(ImplicitPaddingBitsize, :b) == 2+1 # size of a + 1 from padding
end
end # end @bitfields
@testset "Propertyaccess to internal field" begin
@testset "`fields` field has been specified by the user" begin
@bitflags struct FieldsField
_
fields
end
args = rand(Bool)
obj = FieldsField(args)
@test hasfield(FieldsField, :fields)
@test hasproperty(obj, :fields)
@test obj.fields == args
@test obj.fields isa Bool
@test !(getfield(obj, :fields) isa Bool)
# one gives the field, the other gives the internal object
@test obj.fields != getfield(obj, :fields)
end
@testset "`fields` has NOT been specified by the user" begin
@bitflags struct FooField
_
foo
end
args = rand(Bool)
obj = FooField(args)
@test hasfield(FooField, :fields)
@test !(getfield(obj, :fields) isa Bool)
@test !hasproperty(obj, :fields)
@test_throws ErrorException("type FooField has no field fields") obj.fields
end
end
@testset "Effects" begin
@bitflags struct JETStruct
a
_
b
end
@testset "foldableAccess" begin
foldableAccess(j::JETStruct) = j.a
@static if VERSION >= v"1.9"
@test_call foldableAccess(JETStruct(false, true))
else
res = JET.report_call(foldableAccess, (JETStruct,))
@test isempty(JET.get_reports(res))
end
effects = Base.infer_effects(foldableAccess, (JETStruct,))
@test Core.Compiler.is_foldable(effects)
@inferred Bool foldableAccess(JETStruct(true, false))
end
@testset "erroringAccess" begin
erroringAccess(j::JETStruct) = j.z
reps = JET.get_reports(report_call(erroringAccess, (JETStruct,)))
@test !isempty(reps)
@test reps[1].msg == "type JETStruct has no field z"
effects = Base.infer_effects(erroringAccess, (JETStruct,))
@test !Core.Compiler.is_nothrow(effects)
rettypes = Base.return_types(erroringAccess, (JETStruct,))
@test only(rettypes) == Union{}
end
@testset "effects of getproperty of :fields" begin
erroringFields(j::FooField) = j.fields
reps = JET.get_reports(report_call(erroringFields, (FooField,)))
@test !isempty(reps)
# there's gotta be a better way of testing that,
# since this test will break if the internals change
# @test reps[1].vst[2].sig._sig[3].val == "type FooField has no field fields"
effects = Base.infer_effects(erroringFields, (FooField,))
@test !Core.Compiler.is_nothrow(effects)
rettypes = Base.return_types(erroringFields, (FooField,))
@test only(rettypes) == Union{}
# now what if we DO have a `fields` field?
foldableFields(j::FieldsField) = j.fields
@static if VERSION >= v"1.9"
@test_call foldableFields(FieldsField(true))
else
res = JET.report_call(foldableFields, (FieldsField,))
@test isempty(JET.get_reports(res))
end
effects = Base.infer_effects(foldableFields, (FieldsField,))
@test Core.Compiler.is_foldable(effects)
@inferred Bool foldableFields(FieldsField(true))
end
end
@testset "DSL constraints" begin
@testset "Field names" begin
expr = :(struct Foo
(a,b):1
end)
@test_throws ArgumentError("Name of field is not a symbol: `(a, b)`") FieldFlags.bitfield(expr)
end
@testset "Non-literal field sizes" begin
expr = :(struct Foo
a:b
end)
@test_throws ArgumentError("Declared size of field `a` is not an integer literal!") FieldFlags.bitfield(expr)
@testset "Allowed field size literals" for T in Base.BitInteger_types
fieldsize = one(T)
expr = :(struct Foo
a:$fieldsize
end)
@test FieldFlags.bitfield(expr) isa Expr
end
end
end
end # end All Tests | FieldFlags | https://github.com/Seelengrab/FieldFlags.jl.git |
|
[
"MIT"
] | 0.4.0 | 82bea8689c73967dd238d511bb4397412d52af36 | docs | 1026 | # FieldFlags.jl
[](https://github.com/Seelengrab/FieldFlags.jl/actions/workflows/ci.yml)
[](https://github.com/Seelengrab/FieldFlags.jl/actions/workflows/nightly.yml)
[](https://seelengrab.github.io/FieldFlags.jl/stable)
[](https://seelengrab.github.io/FieldFlags.jl/dev)
[](https://codecov.io/github/Seelengrab/FieldFlags.jl)
FieldFlags.jl is a small package for declaring [bitfield](https://en.wikipedia.org/wiki/Bit_field)-like structs, without
having to manually mask out bits. For more information, check out the [documentation](https://seelengrab.github.io/FieldFlags.jl/)! | FieldFlags | https://github.com/Seelengrab/FieldFlags.jl.git |
|
[
"MIT"
] | 0.4.0 | 82bea8689c73967dd238d511bb4397412d52af36 | docs | 1270 | # API Reference
## Public API
The following symbols are considered API for the purposes of semver.
### Macros
```@docs
@bitflags
@bitfield
```
### Functions
These functions are explicitly not exported, to prevent confusion with `Base.fieldoffset` and similar field
and property related functions.
```@docs
FieldFlags.propertyoffset
FieldFlags.fieldsize
```
### Additional Supported API
These functions are listed because they are supported, but their docstrings can't be displayed without
having an instance of a type created via [`@bitfield`](@ref) or [`@bitflags`](@ref).
* `Base.propertynames`
* Gives a tuple of the properties given in the original expression given to [`@bitfield`](@ref) or [`@bitflags`](@ref).
* `convert(::T, x::Union{Bool, Base.BitInteger})`
* Converts `x` to a `T`, originally created via the macros of this package. If the sizes don't match, `x` is either truncated or its bitrepresentation is zero-extended to fit the size of `T`.
## Internal API
The following symbols are NOT considered API for the purposes of semver. They are documented here
as a useful reference, not as a statement of semver guarantees.
```@docs
FieldFlags.bitflags
FieldFlags.bitfield
FieldFlags.cast_extend_truncate
FieldFlags.cast_or_extend
``` | FieldFlags | https://github.com/Seelengrab/FieldFlags.jl.git |
|
[
"MIT"
] | 0.4.0 | 82bea8689c73967dd238d511bb4397412d52af36 | docs | 5650 | # Examples
This page contains some examples on how to use `@bitflags` and `@bitfields`, though if you are familiar with regular structs,
the usage should be familiar, as the interfaces are modeled after them.
## Basic bitflags
Starting with [`@bitflags`](@ref), which is used for tightly-packed boolean storage, which can be used like so:
```@repl basicBitflags
using FieldFlags
@bitflags struct MyFlags
flagA
flagB
_ # padding
flagC
end
```
The above defines a struct `MyFlags` with three fields, `flagA`, `flagB` and `flagC`. As the comment indicates, `_` is for specifying padding.
All fields specified in the `struct` take up a single bit - even the padding. The minimum size for the above is thus 4 bits. The fields are stored
from least significant bit to most significant bit, starting with `fieldA`.
While the minimum bitsize for the above struct is 4 bits, due to an implementation detail/compiler requirement, all structsizes are rounded
up to the next multiple of 8 bits. `MyFlags` is thus 8 bits, or 1 byte, large:
```@repl basicBitflags
sizeof(MyFlags)
```
That is, an instance of `MyFlags` has these bits:
|MSB | | | |LSB |
|:----:|:----:|:----:|:----:|:----:|
|5-8 |flagC |_ |flagB |flagA |
With the 4 bits higher than `flagC` being implicit padding as well.
`@bitflags` gives us two default constructors; a zero-arg constructor as well as an `n`-arg constructor.
The zero-arg constructor allows us to construct an instance of `MyFlags` with all fields set to `false`:
```@repl basicBitflags
mf = MyFlags()
mf.flagA == mf.flagB == mf.flagC == false
```
As can be seen above, individual fields can be accessed with regular dot-syntax.
!!! note "Fields vs. Properties"
Technically speaking, neither `@bitflags` nor `@bitfield` gives a struct with actual _fields_ - dot-syntax access is only simulating fields, by overloading `getproperty`.
That is, a call like `getfield(mf, :flagA)` cannot succeed - use `getproperty(mf, :flagA)` instead, which handles the field unpacking for you. This is a technicality though,
and as such `property` and `field` are used interchangeably in this documentation.
In contrast, the `n`-arg constructor takes one argument for each field:
```@repl basicBitflags
mf = MyFlags(true, false, true)
mf.flagA == mf.flagC == true
mf.flagB == false
```
## Mutability
While immutability can be useful, sometimes it is more convenient to mutate a flag in-place. This can be achieved by marking the struct given to `@bitflags`
as mutable:
```@repl mutableFlags
using FieldFlags
@bitflags mutable struct MutableFlags
a
_
b
_
c
end
```
The above struct requires at least 5 bits, which means the bitlayout is like so:
|MSB | | | | |LSB |
|:----:|:----:|:----:|:----:|:----:|:----:|
|6-8 |c |_ |b |_ |a |
The remaining upper 2 bits are once again implicit padding, while the overall size of the objects stay the same:
```@repl mutableFlags
sizeof(MutableFlags)
```
The available constructors are also once again the same:
```@repl mutableFlags
methods(MutableFlags)
```
The only difference is that we are now able to set individual fields in an object:
```@repl mutableFlags
mutf = MutableFlags(false, false, false)
mutf.a == false
mutf.a = true
mutf.a == true
```
which we weren't able to do earlier:
```@repl basicBitflags
mf.flagA = true
```
!!! warning "Allocations"
One limitation of allowing fields to be set is that the object is declared as `mutable`, which has the same effect
as with regular structs that are marked as mutable. For example, `mutable` structs aren't guaranteed to be stored inline
in other objects like wrapper structs or arrays, which may require additional allocations. Setting/reading flags of
mutable objects does not lead to allocations - these stay allocation-free.
## Subtyping
On top of mutability, we can also specify an abstract supertype as usual:
```@repl supertypes
using FieldFlags
abstract type MyAbstract end
@bitflags struct MyConcrete <: MyAbstract
foo
_
bar
baz
end
supertype(MyConcrete) == MyAbstract
```
This allows for defining common fallback methods for `@bitfield` or `@bitflags` structs that may share some common fields or other invariants:
```@repl supertypes
@bitflags struct OtherConcrete <: MyAbstract
foo
_
bak
end
fallback(ma::MyAbstract) = ma.foo
fallback(MyConcrete(true, false, false)) == true
fallback(OtherConcrete(false, true)) == false
```
## [`@bitfield`](@ref) structs
Structs defined with `@bitfield` are, in regards to mutability, bitsize and subtyping behavior, identical to those defined by `@bitflags`. The major difference is
that while `@bitflags` structs only hold one bit per field, `@bitfield` can hold multiple bits per field:
```@repl bitfield
using FieldFlags
@bitfield mutable struct MyField
a:1
_:2
b:3
_
c:2
end
```
The above defines a struct `MyField`, with three fields `a`, `b` and `c`, with sizes (in bits) `1`, `3` and `2` respectively. There are also two definitions of explicit padding
between fields, the first being `2` bits in size and the second one being `1` bit in size; taken implicitly from `_` not having a size annotated. The layout of the above struct is
like so:
|MSB | | | | | | | | |LSB |
|:----:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:----:|
|10-16 |c |c |_ |b |b |b |_ |_ |a |
With the additional padding bits, we come to a total of 9 bits. This is again rounded up to the next multiple of 8, which is 16 bits or 2 bytes:
```@repl bitfield
sizeof(MyField)
``` | FieldFlags | https://github.com/Seelengrab/FieldFlags.jl.git |
|
[
"MIT"
] | 0.4.0 | 82bea8689c73967dd238d511bb4397412d52af36 | docs | 2803 | # FieldFlags.jl Documentation
FieldFlags.jl is a small package without dependencies, giving users the ability
to create structs containing packed integers of various bitsizes.
The two main exports of this package are the two macros [`@bitflags`](@ref) and
[`@bitfield`](@ref), for creating bit packed boolean flag structs and bit packed
integer field structs respectively.
This package is heavily inspired by C-style bitfields, though there are some limitations.
```@contents
Pages=["index.md", "examples.md", "api.md"]
Depth=3
```
## Goals
* Low/Negligible overhead
* I can get a [`bextract`](https://www.felixcloutier.com/x86/bextr) on my machine from extremely high level julia code
* High performance
* Good optimization by the compiler (constant folding, elimination of error paths)
* The package should "feel" as if there were no special implementation
* Good debuggability with JET.jl
## Limitations
* Thread safety
* Accessing the objects produced by this package is not thread safe and atomic access is not planned to be supported. Users are advised to use proper locking to ensure safety.
* Size of the objects
* Due to a compiler limitation, the size of all objects created by this package is a multiple of 8 bits. This restriction may be removed in the future.
* Type parameters cannot be supported - the size of a field needs to be known at definition time, so that the various bitshifts and masking operations done internally can be compiled away.
* The widest a field can currently be is `8*sizeof(UInt)` bits, as `UInt` is currently the default return type for fields (other than those of width `1`, which return a `Bool`).
## Planned Features
* Custom field type annotations
* This would look like a regular field type annotation for exact sizes (i.e. `a::UInt16`), or like e.g. `a:3::UInt16` for objects that want to store the lower 3 bits of an `UInt16` and want to get that type back out when accessing the field.
* Due to the nature of how these objects are stored internally, the types will need to be at least `isbitstype`, possibly even `isprimitivetype`, as it's unclear whether the padding potentially contained in an `isbitstype` is legal to observe (I suspect it isn't).
* `<: Signed` types will need to be at least 2 bits in size, to store the sign bit.
* See [#9](https://github.com/Seelengrab/FieldFlags.jl/issues/9) for the issue tracking this.
* Narrower field types
* Currently, all field accesses (unless accessing a single bit field) return an `UInt`. This is not guaranteed, and may be narrowed in the future, such that a field annotated with width `2` returns an `UInt8` by default, width `9` an `UInt16` etc.
* See [#7](https://github.com/Seelengrab/FieldFlags.jl/issues/7) for the issue tracking this.
| FieldFlags | https://github.com/Seelengrab/FieldFlags.jl.git |
|
[
"MIT"
] | 0.1.2 | be91ed269639deabdaf3fc8d988a8c2b7c72a874 | code | 392 | module NonconvexSearch
export MTS, MTSAlg, LS1Alg, MTSOptions, LS1Options
using Reexport, Parameters
using Random: randperm
@reexport using NonconvexCore
using NonconvexCore: @params, AbstractOptimizer, AbstractResult
using NonconvexCore: AbstractModel, VecModel, debugging, getdim
using NonconvexCore: clamp_and_evaluate!
import NonconvexCore: optimize!, Workspace
include("mts.jl")
end
| NonconvexSearch | https://github.com/JuliaNonconvex/NonconvexSearch.jl.git |
|
[
"MIT"
] | 0.1.2 | be91ed269639deabdaf3fc8d988a8c2b7c72a874 | code | 14476 |
# MTS Algorithms
# MTS: Multiple Trajectory Search for Large Scale Global Optimization,
# By [Lin-Yu Tseng and Chun Chen, 2008](https://sci2s.ugr.es/sites/default/files/files/TematicWebSites/EAMHCO/contributionsCEC08/tseng08mts.pdf)
# MTS Implementation
# Algs
struct MTSAlg <: AbstractOptimizer end
struct LS1Alg <: AbstractOptimizer end
# Options
@with_kw struct MTSOptions
M = 100
maxiter=200
search_range_tol=1e-15
n_foreground = 80
n_local_search = 200
n_local_search_test = 3
n_local_search_best = 300
BONUS1 = 10
BONUS2 = 1
a_min = 0.4
a_max = 0.5
b_min = 0.1
b_max = 0.3
c_min = 0
c_max = 1
# Fixed parameters
REDUCE_SEARCH_RANGE_FACTOR = 2.5
SEARCH_RANGE_DEGERATE_FACTOR = 2
Y1_INCR = 0.1
Y1_DECR = 0.1
X2_INCR = 0.2
end
@with_kw struct LS1Options
M = 100
maxiter=200
search_range_tol=1e-15
# Fixed parameters
REDUCE_SEARCH_RANGE_FACTOR = 2.5
SEARCH_RANGE_DEGERATE_FACTOR = 2
Y1_INCR = 0.1
Y1_DECR = 0.1
X2_INCR = 0.2
# Dummy parameters
BONUS1 = 10
BONUS2 = 1
end
# Workspaces
@params mutable struct MTSWorkspace <: Workspace
model::VecModel
x0::AbstractVector
x::AbstractVector
options::MTSOptions
enable::BitVector
improve::BitVector
search_range::AbstractVector
# Volitale variables
optimal_x::AbstractVector
optimal_ind::Int
optimal_val::Real
end
@params mutable struct LS1Workspace <: Workspace
model::VecModel
x0::AbstractVector
x::AbstractVector
options::LS1Options
enable::BitVector
improve::BitVector
search_range::AbstractVector
# Volitale variables
optimal_x::AbstractVector
optimal_ind::Int
optimal_val::Real
end
# Workspace constructors
function MTSWorkspace(model::VecModel, x0::AbstractVector, options::MTSOptions; kwargs...)
@unpack box_min, box_max = model
M = options.M
# Initialize improve and serch range
enable = trues(M)
improve = trues(M)
search_range = [(box_max-box_min) ./ 2 for _ in 1:M]
MTSWorkspace(model, x0, copy(x0), options, enable, improve, search_range, x0[1], -1, Inf)
end
function LS1Workspace(model::VecModel, x0::AbstractVector, options::LS1Options; kwargs...)
@unpack box_min, box_max = model
M = options.M
# Initialize improve and serch range
enable = trues(M)
improve = trues(M)
search_range = [(box_max-box_min) ./ 2 for _ in 1:M]
LS1Workspace(model, x0, copy(x0), options, enable, improve, search_range, x0[1], -1, Inf)
end
# Exposed workspace constructors
function Workspace(model::VecModel, optimizer::LS1Alg, x0::AbstractVector; options::LS1Options=LS1Options(), kwargs...,)
@assert length(x0) > 0 && x0[1] isa AbstractVector
if length(model.ineq_constraints) > 0 || length(model.eq_constraints) > 0
@warn "LS1 does not support (in)equality constraints. Your input would be ignored. "
end
return LS1Workspace(model, x0, options)
end
# LS1 Workspace constructor without x0 (use method in paper to initialize)
function Workspace(model::VecModel, optimizer::LS1Alg; options::LS1Options=LS1Options(), kwargs...)
x0 = initialize_x(model, options)
return Workspace(model, optimizer, x0; options=options)
end
@params struct MTSResult <: AbstractResult
minimum
minimizer
end
# Tool functions
function initialize_x(model::VecModel, options::Union{MTSOptions, LS1Options})
@unpack box_min, box_max = model
@unpack M = options
n_vars = getdim(model)[2]
SOA = build_SOA(M, n_vars, M)
x0 = Array{Real,2}(undef, M, n_vars)
for i in 1:M
for j in 1:n_vars
# To be confirmed: At the 5th line of "Multi Trajectory Search" algorithm in paper, I do think (u_i, l_i) shoule be (u_j, l_j)
x0[i, j] = box_min[j] + (box_max[j]-box_min[j])*(SOA[i, j]/(M-1))
end
end
[x0[i, :] for i in 1:size(x0, 1)]
end
function reduce_search_range(search_range, k, n_vars, box_min, box_max, search_range_tol, REDUCE_SEARCH_RANGE_FACTOR)
search_range[k] ./= 2
for i in 1:n_vars
if search_range[k][i] < search_range_tol
search_range[k][i] = (box_max[i] - box_min[i]) / REDUCE_SEARCH_RANGE_FACTOR
end
end
end
function get_buffered_clamp_and_evaluate!()
buffer = Dict()
function buffered_clamp_and_evaluate!(model::AbstractModel, x::AbstractVector)
if !haskey(buffer, (model, x))
buffer[((model, x))] = clamp_and_evaluate!(model::AbstractModel, x::AbstractVector)
end
return buffer[((model, x))]
end
return buffered_clamp_and_evaluate!
end
# Subalgorithms
function _localsearch1(workspace, k)
@unpack model, options = workspace
@unpack x, improve, search_range = workspace
@unpack BONUS1, BONUS2, search_range_tol = options
@unpack SEARCH_RANGE_DEGERATE_FACTOR, REDUCE_SEARCH_RANGE_FACTOR = options
@unpack box_min, box_max = model
n_vars = getdim(model)[2]
grade = 0
# search_range in paper is one-dimensional. Expand it to multidimensional.
if improve[k] == false
reduce_search_range(search_range, k, n_vars, box_min, box_max, search_range_tol, REDUCE_SEARCH_RANGE_FACTOR)
end
improve[k] = false
buffered_clamp_and_evaluate! = get_buffered_clamp_and_evaluate!()
for i in 1:n_vars
# Original value
_xk = copy(x[k])
xk_val = buffered_clamp_and_evaluate!(model, _xk)
update_xki = true
_xk[i] -= search_range[k][i]
# Value after update
_xk_val = buffered_clamp_and_evaluate!(model, _xk)
# Better than current best solution
if _xk_val < workspace.optimal_val
grade += BONUS1
workspace.optimal_x, workspace.optimal_ind, workspace.optimal_val = copy(_xk), k, _xk_val
end
# Value stays the same
if _xk_val == xk_val
# Restore
update_xki = false
else
# Value degenerates
if _xk_val > xk_val
# Restore x_k
_xk = copy(x[k])
_xk[i] += search_range[k][i] / SEARCH_RANGE_DEGERATE_FACTOR
# Current value
_xk_val = buffered_clamp_and_evaluate!(model, _xk)
if _xk_val < workspace.optimal_val
grade += BONUS1
workspace.optimal_x, workspace.optimal_ind, workspace.optimal_val = copy(_xk), k, _xk_val
end
if _xk_val >= xk_val
# Restore
update_xki = false
else
grade += BONUS2
improve[k] = true
end
else
grade += BONUS2
improve[k] = true
end
end
if update_xki
x[k][i] = _xk[i]
end
end
return grade
end
function _localsearch2(workspace, k)
@unpack model, options = workspace
@unpack x, improve, search_range = workspace
@unpack BONUS1, BONUS2, search_range_tol = options
@unpack SEARCH_RANGE_DEGERATE_FACTOR, REDUCE_SEARCH_RANGE_FACTOR = options
@unpack box_min, box_max = model
n_vars = getdim(model)[2]
grade = 0
# search_range in paper is one-dimensional. Expand it to multidimensional.
if improve[k] == false
reduce_search_range(search_range, k, n_vars, box_min, box_max, search_range_tol, REDUCE_SEARCH_RANGE_FACTOR)
end
improve[k] = false
D = zeros(Int, n_vars)
r = zeros(Int, n_vars)
buffered_clamp_and_evaluate! = get_buffered_clamp_and_evaluate!()
for i in 1:n_vars
# Original value
xk_val = buffered_clamp_and_evaluate!(model, x[k])
update_xk = true
D .= rand.(Ref([-1, 1]))
r .= rand.(Ref([0, 1, 2, 3]))
# Value after update
_xk = copy(x[k])
_xk .= [r[_i] == 0 ? _xk[_i]-search_range[k][i]*D[_i] : _xk[_i] for _i in 1:length(r)]
_xk_val = buffered_clamp_and_evaluate!(model, _xk)
if _xk_val < workspace.optimal_val
grade += BONUS1
workspace.optimal_x, workspace.optimal_ind, workspace.optimal_val = copy(_xk), k, _xk_val
end
# Value stays the same
if _xk_val == xk_val
# Restore
update_xk = false
else
# Value degenerates
if _xk_val > xk_val
# Restore x_k
_xk = copy(x[k])
_xk .= [r[_i] == 0 ? _xk[_i]+(search_range[k][i]*D[_i] / SEARCH_RANGE_DEGERATE_FACTOR) : _xk[_i] for _i in 1:length(r)]
_xk_val = buffered_clamp_and_evaluate!(model, _xk)
if _xk_val < workspace.optimal_val
grade += BONUS1
workspace.optimal_x, workspace.optimal_ind, workspace.optimal_val = copy(_xk), k, _xk_val
end
if _xk_val >= xk_val
update_xk = false
else
grade += BONUS2
improve[k] = true
end
else
grade += BONUS2
improve[k] = true
end
end
if update_xk
x[k] = _xk
end
end
return grade
end
function _localsearch3(workspace, k)
@unpack model, options = workspace
@unpack x = workspace
@unpack BONUS1, BONUS2 = options
@unpack Y1_INCR, Y1_DECR, X2_INCR = options
@unpack a_min, a_max, b_min, b_max, c_min, c_max = options
n_vars = getdim(model)[2]
grade = 0
_xk = copy(x[k])
update_xk = false
buffered_clamp_and_evaluate! = get_buffered_clamp_and_evaluate!()
for i in 1:n_vars
# Original value
_xk_val = buffered_clamp_and_evaluate!(model, _xk)
update_xk = true
# Heuristic search
_xk_x1, _xk_y1, _xk_x2 = copy(_xk), copy(_xk), copy(_xk)
_xk_x1[i] += Y1_INCR
_xk_y1[i] -= Y1_DECR
_xk_x2[i] += X2_INCR
_xk_x1_val, _xk_y1_val, _xk_x2_val = buffered_clamp_and_evaluate!(model, _xk_x1), buffered_clamp_and_evaluate!(model, _xk_y1), buffered_clamp_and_evaluate!(model, _xk_x2)
if _xk_x1_val < workspace.optimal_val
grade += BONUS1
workspace.optimal_x, workspace.optimal_ind, workspace.optimal_val = copy(_xk), k, _xk_val
end
if _xk_y1_val < workspace.optimal_val
grade += BONUS1
workspace.optimal_x, workspace.optimal_ind, workspace.optimal_val = copy(_xk), k, _xk_val
end
if _xk_x2_val < workspace.optimal_val
grade += BONUS1
workspace.optimal_x, workspace.optimal_ind, workspace.optimal_val = copy(_xk), k, _xk_val
end
D1, D2, D3 = _xk_val - _xk_x1_val, _xk_val - _xk_y1_val, _xk_val - _xk_x2_val
grade += ((D1>0) + (D2>0) + (D3>0)) * BONUS2
if update_xk == false
update_xk = (D1>0) || (D2>0) || (D3>0)
end
a = (rand() * (a_max - a_min)) + a_min
b = (rand() * (b_max - b_min)) + b_min
c = (rand() * (c_max - c_max)) + c_min
_xk[i] += a*(D1 - D2) + b*(D3 - 2*D1) + c
end
if update_xk
# Update x[k]
x[k] = _xk
end
return grade
end
const LOCAL_SEARCH_METHODS = [_localsearch1, _localsearch2, _localsearch3]
function mts(workspace::MTSWorkspace)
# Multiple Trajectory Search
@unpack options, enable = workspace
@unpack M, n_foreground, n_local_search, n_local_search_test, n_local_search_best = options
grade_x = [-Inf for _ in 1:M]
for i in 1:M
if !enable[i]
continue
end
LS_testgrades = [0 for _ in 1:length(LOCAL_SEARCH_METHODS)]
LS_testgrades[1] += _localsearch1(workspace, i)
LS_testgrades[2] += _localsearch2(workspace, i)
LS_testgrades[3] += _localsearch3(workspace, i)
max_ind = findmax(LS_testgrades)[2]
if max_ind == 1
_best_local_search = _localsearch1
elseif max_ind == 2
_best_local_search = _localsearch2
else
_best_local_search = _localsearch3
end
grade_x[i] = 0
for _ in 1:n_local_search_best
grade_x[i] += _best_local_search(workspace, i)
end
end
for _ in 1:n_local_search
_localsearch1(workspace, workspace.optimal_ind)
end
enable_ind = reverse(sortperm(grade_x))[begin:n_foreground]
enable[:] .= false
enable[enable_ind] .= true
end
function optimize!(workspace::MTSWorkspace)
options = workspace.options
for iter in 1:options.maxiter
#if debugging[] && iter % 50 == 0
# println("Iter ", iter, " max iter: ", options.maxiter)
#end
mts(workspace)
end
MTSResult(workspace.optimal_val, workspace.optimal_x)
end
# Build a SOA (Simulated Orthogonal Array). Refers to section 2 of the paper posted above.
function build_SOA(m, k, q)
SOA = Array{Int,2}(undef, m, k)
for c in 1:k
q_perm = randperm(q) .- 1
p = 0
for r in randperm(m)
p = (p%q)+1
SOA[r, c] = q_perm[p]
end
end
SOA
end
# mts Workspace constructor with x0
function Workspace(model::VecModel, optimizer::MTSAlg, x0::AbstractVector; options::MTSOptions=MTSOptions(), kwargs...,)
@assert length(x0) > 0 && x0[1] isa AbstractVector
if length(model.ineq_constraints) > 0 || length(model.eq_constraints) > 0
@warn "MTS does not support (in)equality constraints. Your input would be ignored. "
end
return MTSWorkspace(model, x0, options)
end
# mts Workspace constructor without x0 (use method in paper to initialize)
function Workspace(model::VecModel, optimizer::MTSAlg; options::MTSOptions=MTSOptions(), kwargs...)
x0 = initialize_x(model, options)
return Workspace(model, optimizer, x0; options=options)
end
# Export localsearch1 independently
function localsearch1(workspace::Union{MTSWorkspace, LS1Workspace})
M = workspace.options.M
for i in 1:M
_localsearch1(workspace, i)
end
end
# Export LS1 independently
function optimize!(workspace::LS1Workspace)
options = workspace.options
for iter in 1:options.maxiter
#if debugging[] && iter % 50 == 0
# println("Iter ", iter, " max iter: ", options.maxiter)
#end
localsearch1(workspace)
end
MTSResult(workspace.optimal_val, workspace.optimal_x)
end
| NonconvexSearch | https://github.com/JuliaNonconvex/NonconvexSearch.jl.git |
|
[
"MIT"
] | 0.1.2 | be91ed269639deabdaf3fc8d988a8c2b7c72a874 | code | 3916 | # Referring test code in MultistartOptimization.jl:
# Copyright (c) 2019—2021: Tamas K. Papp.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#####
##### Some multivariate test problems
#####
####
#### generic code
####
"""
A type for test functions with uniform bounds.
All subtypes (test functions) support:
1. being called with a vector of arbitrary length
2. `minimum_location`,
3. `lower_bounds`, `upper_bounds` (defined via `lu_bounds`),
4. having a global minimum *value* of `0`.
See [`TEST_FUNCTIONS`](@ref).
"""
abstract type UniformBounds end
lower_bounds(f::UniformBounds, n::Integer) = fill(Float64(lu_bounds(f)[1]), n)
upper_bounds(f::UniformBounds, n::Integer) = fill(Float64(lu_bounds(f)[2]), n)
####
#### shifted quadratic
####
"A test function with global minimum at [a, …]. For sanity checks."
struct ShiftedQuadratic{T <: Real} <: UniformBounds
a::T
end
(f::ShiftedQuadratic)(x) = (a = f.a; sum(x -> abs2(x - a), x))
minimum_location(f::ShiftedQuadratic, n::Integer) = fill(f.a, n)
lu_bounds(f::ShiftedQuadratic) = (f.a - 50, f.a + 100)
const SHIFTED_QUADRATIC = ShiftedQuadratic(0.5)
####
#### Griewank
####
"""
The Griewank problem.
From Ali, Khompatraporn, and Zabinsy (2005, p 559).
"""
struct Griewank{T <: Real} <: UniformBounds
a::T
end
function (f::Griewank)(x)
sum(abs2, x) / f.a - prod(((i, x),) -> cos(x / √i), enumerate(x)) + 1
end
minimum_location(::Griewank, n::Integer) = zeros(n)
lu_bounds(::Griewank) = (-50, 100)
const GRIEWANK = Griewank(200.0)
####
#### Levy Montalvo 2.
####
"""
Levi and Montalvo 2 problem.
From Ali, Khompatraporn, and Zabinsy (2005, p 662).
"""
struct LevyMontalvo2 <: UniformBounds end
function (::LevyMontalvo2)(x)
xn = last(x)
0.1 * abs2(sinpi(3 * first(x))) + abs2(xn - 1) * (1 + abs2(sinpi(2 * xn))) +
sum(@. abs2($(x[1:(end - 1)]) - 1) * (1 + abs2(sinpi(3 * $(x[2:end])))))
end
minimum_location(::LevyMontalvo2, n::Integer) = ones(n)
lu_bounds(::LevyMontalvo2) = (-10, 15)
const LEVY_MONTALVO2 = LevyMontalvo2()
####
#### Rastrigin
####
"""
Rastrigin problem.
From Ali, Khompatraporn, and Zabinsy (2005, p 665).
"""
struct Rastrigin <: UniformBounds end
function (::Rastrigin)(x)
10 * length(x) + sum(@. abs2(x) - 10 * cospi(2 * x))
end
minimum_location(::Rastrigin, n::Integer) = zeros(n)
lu_bounds(::Rastrigin) = (-5.12, 5.12)
const RASTRIGIN = Rastrigin()
####
#### Rosenbrock
####
"""
Rosenbrock problem.
From Ali, Khompatraporn, and Zabinsy (2005, p 666).
"""
struct Rosenbrock <: UniformBounds end
function (::Rosenbrock)(x)
x1 = x[1:(end - 1)]
x2 = x[2:end]
sum(@. 100 * abs2(x2 - abs2(x1)) + abs2(x1 - 1))
end
minimum_location(::Rosenbrock, n::Integer) = ones(n)
lu_bounds(::Rosenbrock) = (-30, 30)
const ROSENBROCK = Rosenbrock()
####
#### helper code
####
"A tuple of all test functions."
const TEST_FUNCTIONS = (SHIFTED_QUADRATIC, GRIEWANK, LEVY_MONTALVO2, RASTRIGIN, ROSENBROCK) | NonconvexSearch | https://github.com/JuliaNonconvex/NonconvexSearch.jl.git |
|
[
"MIT"
] | 0.1.2 | be91ed269639deabdaf3fc8d988a8c2b7c72a874 | code | 1372 | using NonconvexSearch, Test
include("problems.jl")
@testset "test function sanity checks" begin
for F in TEST_FUNCTIONS
@test F(minimum_location(F, 10)) ≈ 0
end
end
d_tol = Dict(SHIFTED_QUADRATIC=>1e-6, GRIEWANK=>1e-6, LEVY_MONTALVO2=>1e-6, RASTRIGIN=>1e-6, ROSENBROCK=>1e-1)
tol(F) = (d_tol[F])
test_dim = 2
@testset "MTS" begin
# Temporary disable ROSENBROCK
println("Testing MTS... ")
for F in setdiff(TEST_FUNCTIONS, (ROSENBROCK, ))
println("Testing nonconvex function: ", F)
m = Model(x -> F(x))
lb = [lu_bounds(F)[1] for _ in 1:test_dim]
ub = [lu_bounds(F)[2] for _ in 1:test_dim]
addvar!(m, lb, ub)
alg = MTSAlg()
r = optimize(m, alg, options=MTSOptions())
println(r.minimizer)
println(r.minimum)
@test abs(r.minimum) < tol(F)
end
end
@testset "Localsearch1" begin
println("Testing Localsearch1... ")
for F in setdiff(TEST_FUNCTIONS, (ROSENBROCK, ))
println("Testing nonconvex function: ", F)
m = Model(x -> F(x))
lb = [lu_bounds(F)[1] for _ in 1:test_dim]
ub = [lu_bounds(F)[2] for _ in 1:test_dim]
addvar!(m, lb, ub)
alg = LS1Alg()
r = optimize(m, alg, options=LS1Options())
println(r.minimizer)
println(r.minimum)
@test abs(r.minimum) < tol(F)
end
end
| NonconvexSearch | https://github.com/JuliaNonconvex/NonconvexSearch.jl.git |
|
[
"MIT"
] | 0.1.2 | be91ed269639deabdaf3fc8d988a8c2b7c72a874 | docs | 334 | # NonconvexSearch
[](https://github.com/JuliaNonconvex/NonconvexSearch.jl/actions)
[](https://codecov.io/gh/JuliaNonconvex/NonconvexSearch.jl)
| NonconvexSearch | https://github.com/JuliaNonconvex/NonconvexSearch.jl.git |
|
[
"MIT"
] | 0.2.4 | 1d9321a3ab42c1532db4985c0e983208d4c7a990 | code | 351 | module PointBasedValueIteration
using POMDPs
using POMDPTools
using POMDPLinter
using LinearAlgebra
using Distributions
using FiniteHorizonPOMDPs
import POMDPs: Solver, solve
import Base: ==, hash, convert
import FiniteHorizonPOMDPs: InStageDistribution, FixedHorizonPOMDPWrapper
export
PBVISolver,
solve
include("pbvi.jl")
end # module
| PointBasedValueIteration | https://github.com/JuliaPOMDP/PointBasedValueIteration.jl.git |
|
[
"MIT"
] | 0.2.4 | 1d9321a3ab42c1532db4985c0e983208d4c7a990 | code | 8056 | """
PBVISolver <: Solver
Options dictionary for Point-Based Value Iteration for POMDPs.
# Fields
- `max_iterations::Int64` the maximal number of iterations the solver runs. Default: 10
- `ϵ::Float64` the maximal gap between alpha vector improve steps. Default = 0.01
- `verbose::Bool` switch for solver text output. Default: false
"""
struct PBVISolver <: Solver
max_iterations::Int64
ϵ::Float64
verbose::Bool
end
function PBVISolver(;max_iterations::Int64=10, ϵ::Float64=0.01, verbose::Bool=false)
return PBVISolver(max_iterations, ϵ, verbose)
end
"""
AlphaVec
Pair of alpha vector and corresponding action.
# Fields
- `alpha` α vector
- `action` action corresponding to α vector
"""
struct AlphaVec
alpha::Vector{Float64}
action::Any
end
==(a::AlphaVec, b::AlphaVec) = (a.alpha,a.action) == (b.alpha, b.action)
Base.hash(a::AlphaVec, h::UInt) = hash(a.alpha, hash(a.action, h))
function _argmax(f, X)
return X[argmax(map(f, X))]
end
# adds probabilities of terminals in b to b′ and normalizes b′
function belief_norm(pomdp, b, b′, terminals, not_terminals)
if sum(b′[not_terminals]) != 0.
if !isempty(terminals)
b′[not_terminals] = b′[not_terminals] / (sum(b′[not_terminals]) / (1. - sum(b[terminals]) - sum(b′[terminals])))
b′[terminals] += b[terminals]
else
b′[not_terminals] /= sum(b′[not_terminals])
end
else
b′[terminals] += b[terminals]
b′[terminals] /= sum(b′[terminals])
end
return b′
end
# Backups belief with α vector maximizing dot product of itself with belief b
function backup_belief(pomdp::POMDP, Γ, b)
S = ordered_states(pomdp)
A = ordered_actions(pomdp)
O = ordered_observations(pomdp)
γ = discount(pomdp)
r = StateActionReward(pomdp)
Γa = Vector{Vector{Float64}}(undef, length(A))
not_terminals = [stateindex(pomdp, s) for s in S if !isterminal(pomdp, s)]
terminals = [stateindex(pomdp, s) for s in S if isterminal(pomdp, s)]
for a in A
Γao = Vector{Vector{Float64}}(undef, length(O))
trans_probs = dropdims(sum([pdf(transition(pomdp, S[is], a), sp) * b.b[is] for sp in S, is in not_terminals], dims=2), dims=2)
if !isempty(terminals) trans_probs[terminals] .+= b.b[terminals] end
for o in O
# update beliefs
obs_probs = pdf.(map(sp -> observation(pomdp, a, sp), S), [o])
b′ = obs_probs .* trans_probs
if sum(b′) > 0.
b′ = DiscreteBelief(pomdp, b.state_list, belief_norm(pomdp, b.b, b′, terminals, not_terminals))
else
b′ = DiscreteBelief(pomdp, b.state_list, zeros(length(S)))
end
# extract optimal alpha vector at resulting belief
Γao[obsindex(pomdp, o)] = _argmax(α -> α ⋅ b′.b, Γ)
end
# construct new alpha vectors
Γa[actionindex(pomdp, a)] = [r(s, a) + (!isterminal(pomdp, s) ? (γ * sum(pdf(transition(pomdp, s, a), sp) * pdf(observation(pomdp, s, a, sp), o) * Γao[i][j]
for (j, sp) in enumerate(S), (i, o) in enumerate(O))) : 0.)
for s in S]
end
# find the optimal alpha vector
idx = argmax(map(αa -> αa ⋅ b.b, Γa))
alphavec = AlphaVec(Γa[idx], A[idx])
return alphavec
end
# Iteratively improves α vectors until the gap between steps is lesser than ϵ
function improve(pomdp, B, Γ, solver)
alphavecs = nothing
while true
Γold = Γ
alphavecs = [backup_belief(pomdp, Γold, b) for b in B]
Γ = [alphavec.alpha for alphavec in alphavecs]
prec = max([sum(abs.(dot(α1, b.b) .- dot(α2, b.b))) for (α1, α2, b) in zip(Γold, Γ, B)]...)
if solver.verbose println(" Improving alphas, maximum gap between old and new α vector: $(prec)") end
prec > solver.ϵ || break
end
return Γ, alphavecs
end
# Returns all possible, not yet visited successors of current belief b
function successors(pomdp, b, Bs)
S = ordered_states(pomdp)
not_terminals = [stateindex(pomdp, s) for s in S if !isterminal(pomdp, s)]
terminals = [stateindex(pomdp, s) for s in S if isterminal(pomdp, s)]
succs = []
for a in actions(pomdp)
trans_probs = dropdims(sum([pdf(transition(pomdp, S[is], a), sp) * b[is] for sp in S, is in not_terminals], dims=2), dims=2)
if !isempty(terminals) trans_probs[terminals] .+= b[terminals] end
for o in observations(pomdp)
#update belief
obs_probs = pdf.(map(sp -> observation(pomdp, a, sp), S), [o])
b′ = obs_probs .* trans_probs
if sum(b′) > 0.
b′ = belief_norm(pomdp, b, b′, terminals, not_terminals)
if !in(b′, Bs)
push!(succs, b′)
end
end
end
end
return succs
end
# Computes distance of successor to the belief vectors in belief space
function succ_dist(pomdp, bp, B)
dist = [norm(bp - b.b, 1) for b in B]
return max(dist...)
end
# Expands the belief space with the most distant belief vector
# Returns new belief space, set of belifs and early termination flag
function expand(pomdp, B, Bs)
B_new = copy(B)
for b in B
succs = successors(pomdp, b.b, Bs)
if length(succs) > 0
b′ = succs[argmax([succ_dist(pomdp, bp, B) for bp in succs])]
push!(B_new, DiscreteBelief(pomdp, b′))
push!(Bs, b′)
end
end
return B_new, Bs, length(B) == length(B_new)
end
# 1: B ← {b0}
# 2: while V has not converged to V∗ do
# 3: Improve(V, B)
# 4: B ← Expand(B)
function solve(solver::PBVISolver, pomdp::POMDP)
S = ordered_states(pomdp)
A = ordered_actions(pomdp)
γ = discount(pomdp)
r = StateActionReward(pomdp)
# best action worst state lower bound
α_init = 1 / (1 - γ) * maximum(minimum(r(s, a) for s in S) for a in A)
Γ = [fill(α_init, length(S)) for a in A]
#init belief, if given distribution, convert to vector
init = initialize_belief(DiscreteUpdater(pomdp), initialstate(pomdp))
B = [init]
Bs = Set([init.b])
if solver.verbose println("Running PBVI solver on $(typeof(pomdp)) problem with following settings:\n max_iterations = $(solver.max_iterations), ϵ = $(solver.ϵ), verbose = $(solver.verbose)\n+----------------------------------------------------------+") end
# original code should run until V converges to V*, this yet needs to be implemented
# for example as: while max(@. abs(newV - oldV)...) > solver.ϵ
# However this probably would not work, as newV and oldV have different number of elements (arrays of alphas)
alphavecs = nothing
for i in 1:solver.max_iterations
Γ, alphavecs = improve(pomdp, B, Γ, solver)
B, Bs, early_term = expand(pomdp, B, Bs)
if solver.verbose println("Iteration $(i) executed, belief set contains $(length(Bs)) belief vectors.") end
if early_term
if solver.verbose println("Belief space did not expand. \nTerminating early.") end
break
end
end
if solver.verbose println("+----------------------------------------------------------+") end
acts = [alphavec.action for alphavec in alphavecs]
return AlphaVectorPolicy(pomdp, Γ, acts)
end
@POMDPLinter.POMDP_require solve(solver::PBVISolver, pomdp::POMDP) begin
P = typeof(pomdp)
S = statetype(P)
A = actiontype(P)
O = obstype(P)
@req discount(::P) # discount factor
@subreq ordered_states(pomdp)
@subreq ordered_actions(pomdp)
@subreq ordered_observations(pomdp)
@req transition(::P,::S,::A)
@req reward(::P,::S,::A)
ss = states(pomdp)
as = actions(pomdp)
os = observations(pomdp)
@req length(::typeof(ss))
s = first(iterator(ss))
a = first(iterator(as))
dist = transition(pomdp, s, a)
D = typeof(dist)
@req pdf(::D,::S)
odist = observation(pomdp, a, s)
OD = typeof(odist)
@req pdf(::OD,::O)
end
| PointBasedValueIteration | https://github.com/JuliaPOMDP/PointBasedValueIteration.jl.git |
|
[
"MIT"
] | 0.2.4 | 1d9321a3ab42c1532db4985c0e983208d4c7a990 | code | 4063 | using Test
using POMDPModels
using POMDPs
using SARSOP
using POMDPTools
using FiniteHorizonPOMDPs
using PointBasedValueIteration
@testset "Comparison with SARSOP" begin
pomdps = [TigerPOMDP(), BabyPOMDP(), MiniHallway()]
for pomdp in pomdps
solver = PBVISolver(10, typeof(pomdp) == MiniHallway ? 0.05 : 0.01, false)
policy = solve(solver, pomdp)
sarsop = SARSOPSolver(verbose=false)
sarsop_policy = solve(sarsop, pomdp)
@testset "$(typeof(pomdp)) Value function comparison" begin
B = []
if typeof(pomdp) == MiniHallway
B = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.083337, 0.083333, 0.083333, 0.083333, 0.083333, 0.083333, 0.083333, 0.083333, 0.083333, 0.083333, 0.083333, 0.083333, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
else
for _ in 1:100
r = rand(length(states(pomdp)))
push!(B, DiscreteBelief(pomdp, r/sum(r)))
end
end
pbvi_vals = [value(policy, b) for b in B]
sarsop_vals = [value(sarsop_policy, b) for b in B]
@test isapprox(sarsop_vals, pbvi_vals, rtol=0.1)
end
@testset "$(typeof(pomdp)) Simulation results comparison" begin
no_simulations = typeof(pomdp) == MiniHallway ? 1 : 10_000
for s in states(pomdp)
# println(s)
# @show value(policy, Deterministic(s))
# @show value(sarsop_policy, Deterministic(s))
#
# @show action(policy, Deterministic(s))
# @show action(sarsop_policy, Deterministic(s))
#
# @show mean([simulate(RolloutSimulator(max_steps = 100), pomdp, policy, updater(policy), Deterministic(s)) for i in 1:no_simulations])
# @show mean([simulate(RolloutSimulator(max_steps = 100), pomdp, sarsop_policy, updater(sarsop_policy), Deterministic(s)) for i in 1:no_simulations])
# In this state the PBVI outputs better results than SARSOP, because SARSOP does not evaluate this state, thus having sub-optimal result
if s == 5 && typeof(pomdp) == MiniHallway
continue
else
@test isapprox(value(policy, Deterministic(s)), value(sarsop_policy, Deterministic(s)), rtol=0.1)
@test isapprox( mean([simulate(RolloutSimulator(max_steps = 100), pomdp, policy, updater(policy), Deterministic(s)) for i in 1:no_simulations]),
mean([simulate(RolloutSimulator(max_steps = 100), pomdp, sarsop_policy, updater(sarsop_policy), Deterministic(s)) for i in 1:no_simulations]),
rtol=0.1)
end
end
end
end
end
| PointBasedValueIteration | https://github.com/JuliaPOMDP/PointBasedValueIteration.jl.git |
|
[
"MIT"
] | 0.2.4 | 1d9321a3ab42c1532db4985c0e983208d4c7a990 | docs | 1485 | # Point-based value iteration
[](https://github.com/JuliaPOMDP/PointBasedValueIteration.jl/actions/workflows/CI.yml)
[](http://codecov.io/github/JuliaPOMDP/PointBasedValueIteration.jl?branch=master)
Point-based value iteration solver ([Pineau et al., 2003](http://www.fore.robot.cc/papers/Pineau03a.pdf), [Shani et al., 2012](https://link.springer.com/content/pdf/10.1007/s10458-012-9200-2.pdf)) for the [POMDPs.jl](https://github.com/JuliaPOMDP/POMDPs.jl) framework.
## Installation
This package is available from Julia's General package registry.
```
using Pkg
Pkg.add("PointBasedValueIteration")
```
## Usage
```
using PointBasedValueIteration
using POMDPModels
pomdp = TigerPOMDP() # initialize POMDP
solver = PBVISolver() # set the solver
policy = solve(solver, pomdp) # solve the POMDP
```
The function `solve` returns an `AlphaVectorPolicy` as defined in [POMDPTools](https://juliapomdp.github.io/POMDPs.jl/latest/POMDPTools/policies/).
## References
- Pineau, J., Gordon, G., & Thrun, S. (2003, August). Point-based value iteration: An anytime algorithm for POMDPs. In IJCAI (Vol. 3, pp. 1025-1032).
- Shani, G., Pineau, J. & Kaplow, R. A survey of point-based POMDP solvers. Auton Agent Multi-Agent Syst 27, 1–51 (2013). https://doi.org/10.1007/s10458-012-9200-2
| PointBasedValueIteration | https://github.com/JuliaPOMDP/PointBasedValueIteration.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 396 | using Documenter, Coils
makedocs(
modules = [Coils],
sitename = "Coils.jl",
pages = Any[
"Coils.jl"=>"index.md",
"API references"=>Any[
"api/coil.md",
"api/current_loop.md",
"api/helical.md",
"api/helmholtz.md",
"api/current_loops.md",
],
],
)
deploydocs(repo = "github.com/rydyb/Coils.jl.git") | Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 121 | module Coils
include("types.jl")
include("conductor_length.jl")
include("magnetic_flux_density.jl")
end # module Coils
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 430 | export conductor_length
conductor_length(::AbstractCoil) = throw(ErrorException("not implemented"))
conductor_length(c::RectangularLoop) = 2 * (c.width + c.height)
conductor_length(c::CircularLoop) = typeof(c.radius)(2π) * c.radius
conductor_length(c::Displace) = conductor_length(c.coil)
conductor_length(c::Reverse) = conductor_length(c.coil)
conductor_length(c::Superposition) = sum(conductor_length(c) for c in c.coils)
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 2846 | using Elliptic
using DynamicQuantities.Constants: mu_0
using LinearAlgebra: norm
export magnetic_flux_density
magnetic_flux_density(::AbstractCoil, args...) = throw(ErrorException("not implemented"))
function magnetic_flux_density(c::CircularLoop, ρ::AbstractQuantity, z::AbstractQuantity)
R = c.radius
I = c.current
B0 = typeof(c.current)(0u"Gauss")
if iszero(z) && R ≈ ρ
return B0, B0, B0
end
α² = R^2 + ρ^2 + z^2 - 2R * ρ
β = √(R^2 + ρ^2 + z^2 + 2R * ρ)
k² = 1 - ustrip(α² / β^2)
E = Elliptic.E(k²)
K = Elliptic.K(k²)
C = mu_0 * I / typeof(c.current)(2π)
# Bρ diverges for ρ -> 0
if iszero(ρ)
Bρ = B0
else
Bρ = (C / (α² * β)) * ((R^2 + ρ^2 + z^2) * E - α² * K) * (z / ρ)
end
Bz = (C / (α² * β)) * ((R^2 - ρ^2 - z^2) * E + α² * K)
return Bρ, B0, Bz
end
function magnetic_flux_density(
c::CircularLoop,
x::AbstractQuantity,
y::AbstractQuantity,
z::AbstractQuantity,
)
Bρ, Bz = magnetic_flux_density(c, norm((x, y)), z)
end
function magnetic_flux_density(
c::RectangularLoop,
x::AbstractQuantity,
y::AbstractQuantity,
z::AbstractQuantity,
)
I = c.current
ax = c.height / 2
ay = c.width / 2
C = mu_0 * I / typeof(c.current)(4π)
r1 = norm((x + ax, y + ay, z))
r2 = norm((x - ax, y + ay, z))
r3 = norm((x + ax, y - ay, z))
r4 = norm((x - ax, y - ay, z))
f(r, s) = 1 / (r * (r - s))
Bx = f(r1, y + ay)
Bx -= f(r2, y + ay)
Bx -= f(r3, y - ay)
Bx += f(r4, y - ay)
Bx *= -C * z
By = f(r1, x + ax)
By -= f(r2, x - ax)
By -= f(r3, x + ax)
By += f(r4, x - ax)
By *= -C * z
Bz = (x + ax) * f(r1, y + ay)
Bz += (y + ay) * f(r1, x + ax)
Bz -= (x - ax) * f(r2, y + ay)
Bz -= (y + ay) * f(r2, x - ax)
Bz -= (x + ax) * f(r3, y - ay)
Bz -= (y - ay) * f(r3, x + ax)
Bz += (x - ax) * f(r4, y - ay)
Bz += (y - ay) * f(r4, x - ax)
Bz *= C
return Bx, By, Bz
end
function magnetic_flux_density(c::Displace, x, y, z)
x′ = x - c.x
y′ = y - c.y
z′ = z - c.z
B = magnetic_flux_density(c.coil, x′, y′, z′)
if c.coil isa CircularLoop
ρ = sqrt(x′^2 + y′^2)
if !iszero(ρ)
θ = atan(y′, x′)
Bρ = B[1]
Bz = B[2]
Bx = Bρ * cos(θ)
By = Bρ * sin(θ)
return [Bx, By, Bz]
end
end
return B
end
function magnetic_flux_density(c::Reverse, x, y, z)
return magnetic_flux_density(c.coil, x, y, z) .* -1
end
function magnetic_flux_density(c::Superposition, x, y, z)
Bx = 0.0u"Gauss"
By = 0.0u"Gauss"
Bz = 0.0u"Gauss"
for coil in c.coils
B = magnetic_flux_density(coil, x, y, z)
Bx += B[1]
By += B[2]
Bz += B[3]
end
return Bx, By, Bz
end
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 1970 | using DynamicQuantities
export AbstractCoil
export CircularLoop, RectangularLoop
export Displace, Reverse, Superposition
abstract type AbstractCoil end
struct CircularLoop{T<:AbstractQuantity} <: AbstractCoil
current::T
radius::T
function CircularLoop(; current::T, radius::T) where {T}
@assert dimension(current) === dimension(u"A") "current must have units of current"
@assert dimension(radius) === dimension(u"m") "radius must have units of length"
@assert ustrip(radius) > 0 "radius must be positive"
new{T}(current, radius)
end
end
struct RectangularLoop{T<:AbstractQuantity} <: AbstractCoil
current::T
width::T
height::T
function RectangularLoop(; current::T, width::T, height::T) where {T}
@assert dimension(current) === dimension(u"A") "current must have units of current"
@assert dimension(width) === dimension(u"m") "width must have units of length"
@assert dimension(height) === dimension(u"m") "height must have units of length"
@assert ustrip(width) > 0 && ustrip(height) > 0 "width and height must be positive"
new{T}(current, width, height)
end
end
struct Displace{C<:AbstractCoil,T<:AbstractQuantity} <: AbstractCoil
coil::C
x::T
y::T
z::T
function Displace(coil::C; x::T = zero(u"m"), y::T = zero(u"m"), z::T = zero(u"m")) where {C,T}
@assert dimension(x) === dimension(u"m") "x must have units of length"
@assert dimension(y) === dimension(u"m") "y must have units of length"
@assert dimension(z) === dimension(u"m") "z must have units of length"
new{C,T}(coil, x, y, z)
end
end
struct Reverse{C<:AbstractCoil} <: AbstractCoil
coil::C
function Reverse(coil::C) where {C}
new{C}(coil)
end
end
struct Superposition{C<:AbstractCoil} <: AbstractCoil
coils::Vector{C}
function Superposition(coils::Vector{C}) where {C}
new{C}(coils)
end
end
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 1247 | @testset "conductor_length" begin
cloop = CircularLoop(current = 1u"A", radius = 10u"mm")
@testset "CircularLoop" begin
@test conductor_length(cloop) == 2π * 10u"mm"
end
rloop = RectangularLoop(current = 1u"A", height = 10u"mm", width = 20u"mm")
@testset "RectangularLoop" begin
@test conductor_length(rloop) == 60u"mm"
end
@testset "Displace" begin
@test conductor_length(Displace(rloop; z = 10u"m")) == conductor_length(rloop)
end
@testset "Reverse" begin
@test conductor_length(Reverse(rloop)) == conductor_length(rloop)
end
@testset "Superposition" begin
@test conductor_length(
Superposition(
[
CircularLoop(current = 1u"A", radius = 10u"mm")
CircularLoop(current = 1u"A", radius = 20u"mm")
],
),
) ≈ 2π * 30u"mm" rtol = 1e-4
@test conductor_length(
Superposition(
[
RectangularLoop(current = 1u"A", height = 10u"mm", width = 20u"mm")
RectangularLoop(current = 1u"A", height = 5u"mm", width = 30u"mm")
],
),
) == 130u"mm"
end
end
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 2777 | using LinearAlgebra: norm
using DynamicQuantities.Constants: mu_0
@testset "magnetic_flux_density" begin
@testset "Coil" begin
struct Coil <: AbstractCoil
current::AbstractQuantity
end
coil = Coil(1u"A")
@test_throws ErrorException magnetic_flux_density(coil, 0u"m", 0u"m")
end
@testset "CircularLoop" begin
height = 26.5u"mm"
loop = CircularLoop(current = 300u"A", radius = 39.6u"mm")
# should equal results from Comsol 5.5 simulation (wire diameter = 1.0 mm)
comsol = [
(0u"mm", -5u"mm", 22.81u"Gauss"),
(0u"mm", -4u"mm", 23.67u"Gauss"),
(0u"mm", -3u"mm", 24.54u"Gauss"),
(0u"mm", -2u"mm", 25.45u"Gauss"),
(0u"mm", -1u"mm", 26.37u"Gauss"),
(0u"mm", 0u"mm", 27.31u"Gauss"),
(0u"mm", 1u"mm", 28.28u"Gauss"),
(0u"mm", 2u"mm", 29.26u"Gauss"),
(0u"mm", 3u"mm", 30.26u"Gauss"),
(0u"mm", 4u"mm", 31.27u"Gauss"),
(0u"mm", 5u"mm", 32.29u"Gauss"),
(0u"mm", 0u"mm", 27.31u"Gauss"),
(1u"mm", 0u"mm", 27.31u"Gauss"),
(2u"mm", 0u"mm", 27.31u"Gauss"),
(3u"mm", 0u"mm", 27.30u"Gauss"),
(4u"mm", 0u"mm", 27.30u"Gauss"),
(5u"mm", 0u"mm", 27.29u"Gauss"),
(6u"mm", 0u"mm", 27.28u"Gauss"),
(7u"mm", 0u"mm", 27.26u"Gauss"),
(8u"mm", 0u"mm", 27.26u"Gauss"),
(9u"mm", 0u"mm", 27.24u"Gauss"),
(10u"mm", 0u"mm", 27.23u"Gauss"),
]
for (ρ, z, B) in comsol
@test norm(magnetic_flux_density(loop, ρ, z - height)) ≈ B rtol = 1e-3
end
end
@testset "RectangularLoop" begin
loop = RectangularLoop(current = 1u"A", width = 1u"m", height = 1u"m")
Bx, By, Bz = magnetic_flux_density(loop, 0u"m", 0u"m", 0u"m")
@test Bx ≈ 0.0u"Gauss" rtol = 1e-3
@test By ≈ 0.0u"Gauss" rtol = 1e-3
@test Bz ≈ √2 * mu_0 * loop.current / (1π * 0.5u"m") rtol = 1e-3
end
@testset "Helmholtz" begin
# https://de.wikipedia.org/wiki/Helmholtz-Spule#Berechnung_der_magnetischen_Flussdichte
loop = CircularLoop(current = 1u"A", radius = 1u"m")
loops = Superposition([Displace(loop; z = 0.5u"m"), Displace(loop; z = -0.5u"m")])
B = magnetic_flux_density(loops, 0u"m", 0u"m", 0u"m")
@test B[3] ≈ 0.899e-6u"T" rtol = 1e-3
end
@testset "Anti-Helmholtz" begin
loop = CircularLoop(current = 1u"A", radius = 1u"m")
loops = Superposition([Displace(loop; z = 0.5u"m"), Displace(Reverse(loop); z = -0.5u"m")])
B = magnetic_flux_density(loops, 0u"m", 0u"m", 0u"m")
@test B[3] ≈ 0.0u"T" rtol = 1e-3
end
end
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 135 | using Coils
using Test
using DynamicQuantities
include("types.jl")
include("conductor_length.jl")
include("magnetic_flux_density.jl")
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 1577 | @testset "CircularLoop" begin
loop = CircularLoop(current = 10u"mA", radius = 100u"mm")
@test loop.current == 10u"mA"
@test loop.radius == 100u"mm"
@test_throws AssertionError CircularLoop(current = 10u"mA", radius = -100u"mm")
@test_throws AssertionError CircularLoop(current = 10u"m", radius = 100u"mm")
@test_throws AssertionError CircularLoop(current = 10u"A", radius = 100u"V")
end
@testset "RectangularLoop" begin
loop = RectangularLoop(current = 10u"mA", width = 20u"m", height = 30u"m")
@test loop.current == 10u"mA"
@test loop.width == 20u"m"
@test loop.height == 30u"m"
@test_throws AssertionError RectangularLoop(current = 10u"mA", width = -20u"m", height = 30u"m")
@test_throws AssertionError RectangularLoop(current = 10u"mA", width = 20u"m", height = -30u"m")
@test_throws AssertionError RectangularLoop(current = 10u"m", width = 20u"m", height = 30u"m")
@test_throws AssertionError RectangularLoop(current = 10u"mA", width = 20u"V", height = 30u"m")
@test_throws AssertionError RectangularLoop(current = 10u"mA", width = 20u"m", height = 30u"V")
end
@testset "Displace" begin
loop = CircularLoop(current = 10u"mA", radius = 100u"mm")
disp = Displace(loop; x = 10u"m", y = 20u"m", z = 30u"m")
@test disp.coil == loop
@test disp.x == 10u"m"
@test disp.y == 20u"m"
@test disp.z == 30u"m"
@test_throws AssertionError Displace(loop; x = 10u"A")
@test_throws AssertionError Displace(loop; y = 10u"A")
@test_throws AssertionError Displace(loop; z = 10u"A")
end
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | code | 293 | @testset "qconvert" begin
@test qconvert(1u"m", u"m") == 1u"m"
@test_throws IncompatibleUnitsError qconvert(1u"m", u"s")
@test qconvert(1, u"m") == 1u"m"
@test qconvert(1Unitful.u"m", u"m") == 1u"m"
@test_throws IncompatibleUnitsError qconvert(1Unitful.u"m", u"s")
end
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | docs | 1174 | # Coils.jl
| **Build Status** | **Code Coverage** |
|:-----------------------------------------:|:-------------------------------:|
| [![][CI-img]][CI-url] | [![][codecov-img]][codecov-url] |
Coils.jl is a Julia library that provides various types and functions for engineering magnetic coils made of rectangular hollow core wire.
## Install
Coils.jl is a registered package and you can install it over the REPL:
```julia
julia> ]add Coils
```
## Usage
See the notebooks in [example](example/). Clone or download those files. You can run them with [Pluto.jl](https://github.com/fonsp/Pluto.jl). Execute the following in the REPL:
```julia
julia> using Pluto
julia> Pluto.run()
```
## Testing
To run the tests, run `julia` and enable the package mode by typing `]`.
Then, run the following command:
```julia
activate .
test
```
[CI-img]: https://github.com/rydyb/Coils.jl/actions/workflows/ci.yml/badge.svg
[CI-url]: https://github.com/rydyb/Coils.jl/actions/workflows/ci.yml
[codecov-img]: https://codecov.io/gh/rydyb/Coils.jl/branch/main/graph/badge.svg?token=CNF55N4HDZ
[codecov-url]: https://codecov.io/gh/rydyb/Coils.jl
| Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | docs | 172 | # Coils.jl
Coils.jl is a Julia library that provides various types and functions for engineering magnetic coils.
## Installation
```julia
using Pkg; Pkg.add("Coils")
``` | Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | docs | 134 | # Coil
```@docs
Coils.Coil
```
```@docs
Coils.mfd
```
```@docs
Coils.conductor_coordinates
```
```@docs
Coils.conductor_length
``` | Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | docs | 95 | # CurrentLoop
```@docs
Coils.CurrentLoop
```
```@docs
Coils.mfd
```
```@docs
Coils.mfd_z
``` | Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | docs | 197 | # CurrentLoops
Converts a coil to a vector of current loops.
```@docs
Coils.CurrentLoops
```
```@docs
Coils.conductor_coordinates
```
```@docs
Coils.conductor_length
```
```@docs
Coils.mfd
``` | Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | docs | 94 | # Helical
```@docs
Coils.Helical
```
```@docs
Coils.Pancake
```
```@docs
Coils.Solenoid
``` | Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.4.0 | ef9bd76270a3b52c65d1abf5b3eacba6aaca2cf8 | docs | 75 | # Helmholtz
```@docs
Coils.Helmholtz
```
```@docs
Coils.AntiHelmholtz
``` | Coils | https://github.com/rydyb/Coils.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 2296 | using Documenter
using EnergyExpressions
using AtomicLevels
makedocs(
modules = [EnergyExpressions],
sitename = "EnergyExpressions",
pages = [
"Home" => "index.md",
"Theory" => [
"Notation" => "notation.md",
"Energy Expressions" => "energy_expressions.md",
"Calculus of Variations" => "calculus_of_variations.md"
],
"Implementation" => [
"Conjugate orbitals" => "conjugate_orbitals.md",
"Slater determinants" => "slater_determinants.md",
"N-body operators" => "nbody_operators.md",
"N-body matrix elements" => "nbody_matrix_elements.md",
"Common N-body operators" => "common_operators.md",
"N-body equations" => "equations.md",
"Variation" => "variations.md",
"System of equations" => "system_of_equations.md",
"Miscellaneous" => "misc.md"
]
],
format = Documenter.HTML(
mathengine = MathJax2(Dict(:TeX => Dict(
:equationNumbers => Dict(:autoNumber => "AMS"),
:Macros => Dict(
:defd => "≝",
:ket => ["|#1\\rangle",1],
:bra => ["\\langle#1|",1],
:braket => ["\\langle#1|#2\\rangle",2],
:matrixel => ["\\langle#1|#2|#3\\rangle",3],
:vec => ["\\mathbf{#1}",1],
:mat => ["\\mathsf{#1}",1],
:conj => ["#1^*",1],
:im => "\\mathrm{i}",
:operator => ["\\mathfrak{#1}",1],
:Hamiltonian => "\\operator{H}",
:hamiltonian => "\\operator{h}",
:Lagrangian => "\\operator{L}",
:fock => "\\operator{f}",
:lagrange => ["\\epsilon_{#1}",1],
:vary => ["\\delta_{#1}",1],
:onebody => ["(#1|#2)",2],
:twobody => ["[#1|#2]",2],
:twobodydx => ["[#1||#2]",2],
:direct => ["{\\operator{J}_{#1}}",1],
:exchange => ["{\\operator{K}_{#1}}",1],
:diff => ["\\mathrm{d}#1\\,",1],
),
))),
),
doctest = false
)
deploydocs(
repo = "github.com/JuliaAtoms/EnergyExpressions.jl.git",
push_preview = true,
)
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 629 | module EnergyExpressions
using AtomicLevels
using LinearAlgebra
using SparseArrays
using Combinatorics
using UnicodeFun
using Formatting
using ProgressMeter
include("key_tracker.jl")
include("conjugate_orbitals.jl")
include("slater_determinants.jl")
include("show_coefficients.jl")
include("nbody_operators.jl")
include("loop_macros.jl")
include("minors.jl")
include("compare_vectors.jl")
include("nbody_matrix_elements.jl")
include("bit_configurations.jl")
include("common_operators.jl")
include("equations.jl")
include("variations.jl")
include("multi_configurational_equations.jl")
include("invariant_sets.jl")
end # module
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 17376 | # * Binary utilities
function showbin(io::IO, c::BitVector)
# Printing order = LSB ... MSB
for (i,e) in enumerate(c)
write(io, e ? "1" : ".")
i%4 == 0 && write(io, " ")
end
end
showbin(c::BitVector) = showbin(stdout, c)
showbin(io::IO, i::Integer) = showbin(io, BitVector(digits(i, base=2)))
struct BitPattern{O}
obj::O
end
Base.show(io::IO, b::BitPattern) = showbin(io, b.obj)
macro showbin(var)
name = string(var)
quote
println($name, " = ", BitPattern($(esc(var))))
end
end
# * Orbitals
"""
Orbitals(orbitals, overlaps, has_overlap, non_orthogonalities)
Structure storing a common set of `orbitals`, along with possible
`overlaps` between them, in case of non-orthogonalities. `has_overlap`
is a boolean matrix indicates if a pair of orbitals have overlap,
either due to non-orthogonality or if they are the same
orbital. `non_orthogonalities` is a boolean vector that indicates if a
specific orbital is non-orthogonal to _any_ other orbital in the set
of orbitals. This structure is used internally by
[`BitConfigurations`](@ref).
"""
struct Orbitals{O,Overlaps,HasOverlap,NonOrthogonalities}
orbitals::Vector{O}
overlaps::Overlaps
has_overlap::HasOverlap
non_orthogonalities::NonOrthogonalities
function Orbitals(orbitals::Vector{O}, overlaps::AbstractVector{<:OrbitalOverlap}) where O
orb_map = Dict(o => i for (i,o) in enumerate(orbitals))
n = length(orbitals)
S = spdiagm(n,n,0 => fill(one(NBodyTerm), n))
for oo in overlaps
i = orb_map[oo.a]
j = orb_map[oo.b]
S[i,j] = OrbitalOverlap(oo.a, oo.b)
S[j,i] = OrbitalOverlap(oo.b, oo.a)
end
N = falses(n,n)
for oo in overlaps
i = orb_map[oo.a]
j = orb_map[oo.b]
N[i,j] = true
N[j,i] = true
end
has_overlap = N .| Matrix(I, size(N))
non = map(|, vec(reduce(|, N, dims=1)), vec(reduce(|, N, dims=2)))
new{O,typeof(S),BitMatrix,BitVector}(orbitals, S, has_overlap, non)
end
function Orbitals(orbitals::Vector{O}) where O
no = length(orbitals)
has_overlap = Matrix(I,no,no)
new{O,UniformScaling{Bool},BitMatrix,Nothing}(orbitals, I, has_overlap, nothing)
end
end
Base.length(orbitals::Orbitals) = length(orbitals.orbitals)
Base.eachindex(orbitals::Orbitals) = eachindex(orbitals.orbitals)
Base.iterate(orbitals::Orbitals, args...) = iterate(orbitals.orbitals, args...)
Base.getindex(orbitals::Orbitals, i) = orbitals.orbitals[i]
AtomicLevels.orbitals(orbitals::Orbitals) = orbitals.orbitals
Base.:(==)(a::Orbitals, b::Orbitals) =
a.orbitals == b.orbitals &&
a.overlaps == b.overlaps &&
a.has_overlap == b.has_overlap &&
a.non_orthogonalities == b.non_orthogonalities
# * BitConfiguration
struct BitConfiguration{Orbitals,Occupations}
orbitals::Orbitals
occupations::Occupations
end
Base.show(io::IO, c::BitConfiguration, sel=eachindex(c.orbitals)) =
write(io, join(string.(c.orbitals[filter(∈(sel), orbital_numbers(c))]), " "))
showbin(io::IO, c::BitConfiguration) = showbin(io, c.occupations)
showbin(c::BitConfiguration) = showbin(c.occupations)
Base.:(==)(a::BitConfiguration, b::BitConfiguration) =
a.orbitals == b.orbitals &&
a.occupations == b.occupations
num_particles(c::BitConfiguration) = count(c.occupations)
orbital_numbers(c::BitConfiguration) = findall(c.occupations)
get_orbitals(c::BitConfiguration) = c.orbitals[orbital_numbers(c)]
function configuration_orbital_numbers(c::BitConfiguration, mask::BitVector)
occ = copy(c.occupations)
n = 0
ons = Vector{Int}()
while !iszero(occ)
occ[1] && (n += 1)
mask[1] && push!(ons, n)
occ <<= 1
mask <<= 1
end
ons
end
for f in (:(&), :(|), :xor)
@eval begin
Base.$(f)(a::BitConfiguration, b::BitConfiguration) = map($f, a.occupations, b.occupations)
Base.$(f)(a::BitConfiguration, b::BitVector) = map($f, a.occupations, b)
Base.$(f)(a::BitVector, b::BitConfiguration) = map($f, a, b.occupations)
end
end
#=
Adapted from
- Scemama, A., & Giner, E. (2013). An efficient implementation of
Slater–Condon rules. CoRR, arXiv:1311.6244.
=#
num_excitations(a::BitConfiguration, b::BitConfiguration) = count(a ⊻ b)
holes_mask(a::BitConfiguration, b::BitConfiguration) = (a ⊻ b) & a
holes(a::BitConfiguration, b::BitConfiguration) = findall(holes_mask(a, b))
particles_mask(a::BitConfiguration, b::BitConfiguration) = (a ⊻ b) & b
particles(a::BitConfiguration, b::BitConfiguration) = findall(particles_mask(a, b))
function holes_particles_mask(a::BitConfiguration, b::BitConfiguration)
x = a ⊻ b
x, x & a, x & b
end
function holes_particles(a::BitConfiguration, b::BitConfiguration)
x, hm, pm = holes_particles_mask(a, b)
findall(hm), findall(pm)
end
function relative_sign(a::BitConfiguration, b::BitConfiguration,
holes, particles; verbosity=0)
isempty(holes) && return 1
perm = 0
av = copy(a.occupations)
n = length(av)
if verbosity > 0
print(" ")
@showbin a
print(" ")
@showbin b
end
@inbounds for (h,p) in zip(holes, particles)
lo,hi = minmax(h,p)
mask = vcat(falses(lo), trues(hi-lo-1), falses(n-hi+1))
perm += count(map(&, mask, av))
if verbosity > 0
@show h,p,lo,hi
ma = map(&, mask, av)
print(" ")
@showbin av
@showbin mask
print(" ")
@showbin ma
end
av[h] = false
av[p] = true
end
@assert av == b.occupations
iseven(perm) ? 1 : -1
end
# * BitConfigurations
"""
BitConfigurations(orbitals, configurations)
Represent collection of `configurations` as bit vectors, where `true`
values indicate that specific `orbitals` are occupied.
# Example
```julia-repl
julia> bcs = BitConfigurations([[:a,:b,:c], [:x,:b,:c], [:a,:y,:c], [:a,:b,:z]])
6-orbital 4-configuration BitConfigurations
1: a b c
2: a -> x
3: b -> y
4: c -> z
julia> h = FieldFreeOneBodyHamiltonian()
ĥ₀
julia> Matrix(bcs, h)
4×4 SparseMatrixCSC{NBodyMatrixElement, Int64} with 10 stored entries:
(a|a) + (b|b) + (c|c) (a|x) - (b|y) (c|z)
(x|a) (b|b) + (c|c) + (x|x) ⋅ ⋅
- (y|b) ⋅ (a|a) + (c|c) + (y|y) ⋅
(z|c) ⋅ ⋅ (a|a) + (b|b) + (z|z)
```
"""
struct BitConfigurations{Orbitals}
orbitals::Orbitals
configurations::BitMatrix
end
get_orbitals(c::Configuration) = c.orbitals
get_orbitals(v::AbstractVector) = v
AtomicLevels.orbitals(bcs::BitConfigurations) = orbitals(bcs.orbitals)
function BitConfigurations(orbitals::Orbitals,
configurations::AbstractVector)
orb_map = Dict(o => i for (i,o) in enumerate(orbitals))
C = falses(length(orbitals), length(configurations))
p = Progress(prod(size(C)))
for (j,c) in enumerate(configurations)
for o in get_orbitals(c)
C[orb_map[o],j] = true
ProgressMeter.next!(p)
end
end
BitConfigurations(orbitals, C)
end
function BitConfigurations(configurations::AbstractVector,
overlaps=OrbitalOverlap[])
orbitals = (!isempty(configurations) ?
unique(reduce(vcat, map(get_orbitals, configurations))) :
[])
BitConfigurations(Orbitals(orbitals, overlaps),
configurations)
end
Base.getindex(m::BitConfigurations, i::Integer) = BitConfiguration(m.orbitals, m.configurations[:,i])
Base.getindex(m::BitConfigurations, i) = BitConfigurations(m.orbitals, m.configurations[:,i])
Base.length(m::BitConfigurations) = size(m.configurations, 2)
Base.isempty(m::BitConfigurations) = length(m) == 0
Base.:(==)(a::BitConfigurations, b::BitConfigurations) =
a.orbitals == b.orbitals &&
a.configurations == b.configurations
function AtomicLevels.core(m::BitConfigurations)
isempty(m) && return 1:0
# Find all initial orbitals which are occupied in all
# configurations.
i = something(findfirst(.!vec(reduce(&, m.configurations, dims=2))),
length(m.orbitals)+1) - 1
sel = 1:i
if i > 0 && !isnothing(m.orbitals.overlaps)
# We require that the core orbitals are all canonical ones,
# i.e. no non-orthogonalities are allowed. This simplifies the
# energy expression, since the core can then only contribute
# through Slater–Condon rules.
non = m.orbitals.non_orthogonalities[sel]
imax = sel[end]
sel = 1:min(imax, something(findfirst(non), imax+1))
end
sel
end
function Base.show(io::IO, m::BitConfigurations)
norb = length(m.orbitals)
ncfg = length(m)
write(io, "$(norb)-orbital $(ncfg)-configuration BitConfigurations")
end
function Base.show(io::IO, ::MIME"text/plain", m::BitConfigurations)
show(io, m)
println(io)
c = core(m)
!isempty(c) && write(io, "Common core: $(c)")
isempty(m) && return
println(io)
ncfg = length(m)
fmt = FormatExpr("{1:$(length(digits(ncfg)))d}: ")
printfmt(io, fmt, 1)
ref = m[1]
sel = (isempty(c) || length(m) == 1 ? 0 : c[end])+1:length(m.orbitals)
show(io, ref, sel)
show_conf = i -> begin
println(io)
exc = m[i]
h,p = holes_particles(ref, exc)
printfmt(io, fmt, i)
write(io, join(string.(m.orbitals[h]), " "))
write(io, " -> ")
write(io, join(string.(m.orbitals[p]), " "))
end
screen_rows = fld(max(3,first(displaysize(io))-6), 2)
first_block = 2:min(ncfg, screen_rows)
second_block = max(1,ncfg - screen_rows):ncfg
if !isempty(first_block) && isempty((2:first_block[end]+2) ∩ second_block)
foreach(show_conf, first_block)
print(io, "\n⋮")
foreach(show_conf, second_block)
else
foreach(show_conf, 2:second_block[end])
end
end
function orbital_overlap_matrix(sd::BitConfigurations, i, j)
a = sd[i]
b = sd[j]
sd.orbitals.overlaps[orbital_numbers(a), orbital_numbers(b)]
end
"""
non_zero_cofactors(sd, N, i, j)
Find all non-zero cofactors of the orbital overlap matrix between the
Slater determinants `i` & `j` of `sd`, generated when striking out `N`
rows & columns. This routine is tailored towards the case when few
non-orthogonalities are present, e.g. approximately proportional to
the number of orbitals.
Non-orthogonality between spin-orbitals is handled by dividing the
them into two subspaces:
1. The orthogonal spin-orbitals that are common to both Slater
determinants (core orbitals),
2. All non-orthogonal orbitals, and the orbitals which differ between
the Slater determinants (i.e. holes of `i` and particles of `j`).
The relative phase between the Slater determinants is determined by
group `2` alone, by permuting the particles to the positions of the
holes, we find this phase. We can then formally permute them together
to a diagonal block at lower-right corner of the orbital overlap
matrix without incurring a phase change, since we need to permute the
same number of rows and columns. We thus get this structure:
╷ ╷
│ 1 │ │
det(Sᵢⱼ) = (-)ᵏ │───┼───│
│ │ 2 │
╵ ╵
where `k` is decided by the permutation necessary to put the particles
in the positions of the holes.
Obviously, the determinant of the orbital matrix is now given by
`det(Sᵢⱼ) = (-)ᵏ*det(2)`, since we trivially have `det(1)==1`.
Depending on the rank of `2` (determined by the number of
hole–particle pairs and which spin-orbitals are non-orthogonal), we
need to strike out at least `size(2,1)-rank(2)` rows/columns from `2`,
and at most `min(N,size(2,1))`, i.e. for each value of `n ∈
size(2,1)-rank(2):min(N,size(2,1))`, we need to additionally strike
out `m = N - n` rows from `1`, but since the determinant of subspace
`1` is unity, regardless of how many rows/columns we've stricken out,
this is a trivial excercise. Of course, we also require that `m ≤
size(1,1)`.
"""
function non_zero_cofactors(sd::BitConfigurations, N, i, j; verbosity=0)
verbosity > 0 && @show N
a = sd[i]
b = sd[j]
np = num_particles(a)
np == num_particles(b) ||
throw(ArgumentError("Configurations do not have the same number of particles"))
ks = Vector{Vector{Int}}()
ls = Vector{Vector{Int}}()
Ds = Vector{NBodyMatrixElement}()
# First deal with trivial special cases.
N > np && return ks, ls, Ds
aon = orbital_numbers(a)
bon = orbital_numbers(b)
if N == np
push!(ks, aon)
push!(ls, bon)
push!(Ds, one(NBodyMatrixElement))
return ks, ls, Ds
end
isnothing(sd.orbitals.non_orthogonalities) &&
error("Straight Slater–Condon not yet implemented")
if verbosity > 0
S = orbital_overlap_matrix(sd, i, j)
println("Overlap matrix:")
show(stdout, "text/plain", S)
println()
i == j && @show det(S)
end
v = sd.orbitals.non_orthogonalities
excitations, hmask, pmask = holes_particles_mask(a, b)
common = a & b
# For these, we may employ Slater–Condon rules
canonical_orbitals = findall(map(&, common, .~v))
verbosity > 1 && @show canonical_orbitals
# For these, we have to apply the Löwdin rules, but we can
# precompute the determinant of this subspace once, for all those
# determinant minors which do no strike out rows/columns in this
# subspace.
x = map(|, v, excitations)
anon = findall(a & x)
bnon = findall(b & x)
h = findall(hmask)
p = findall(pmask)
S2 = sd.orbitals.overlaps[anon, bnon]
non2 = sd.orbitals.has_overlap[anon, bnon]
if verbosity > 0
show(stdout, "text/plain", S2)
println()
show(stdout, "text/plain", sparse(non2))
println()
isempty(S2) && println("We're essentially in Slater–Condon land")
end
# Minimum order of Γ⁽ᵖ⁾
prmin = count(iszero, reduce(|, non2, dims=2))
pcmin = count(iszero, reduce(|, non2, dims=1))
pmin = max(prmin, pcmin)
if N < pmin
verbosity > 0 && println("Too many vanishing rows/columns")
return ks, ls, Ds
end
verbosity > 0 && println("We need to strike out $(pmin)..$(min(N, length(anon))) rows/columns from S2")
rs = relative_sign(a, b, h, p, verbosity=verbosity)
verbosity > 0 && @show rs
if N == 0
# As a special case, for the zero-body operator, return the
# properly phased determinant of S2.
push!(ks, [])
push!(ls, [])
push!(Ds, rs*det(S2))
return ks, ls, Ds
end
for n = pmin:min(N, length(anon))
# n is how many rows/columns we're striking out from S2;
# consequently m is how many rows/columns we have to strike
# out from S1
m = N - n
verbosity > 0 && @show n, m
kk,ll = nonzero_minors(n, S2)
for (k,l) in zip(kk,ll)
verbosity > 0 && @show k,l
Dkl = cofactor(k, l, S2)
iszero(Dkl) && continue
verbosity > 0 && @show Dkl
# We now employ the Slater–Condon rules for S1
for co in combinations(canonical_orbitals, m)
ko = vcat(anon[k], co)
lo = vcat(bnon[l], co)
push!(ks, ko)
push!(ls, lo)
push!(Ds, rs*Dkl)
end
end
end
ks, ls, Ds
end
function NBodyMatrixElement(bcs::BitConfigurations, op::NBodyOperator{N}, i, j) where N
orbitals = bcs.orbitals.orbitals
ks,ls,Ds = non_zero_cofactors(bcs, N, i, j)
NBodyMatrixElement(orbitals, op, orbitals,
zip(ks, ls, Ds))
end
function NBodyMatrixElement(bcs::BitConfigurations, op::LinearCombinationOperator, i, j)
terms = NBodyTerm[]
for (o,coeff) in op.operators
iszero(coeff) && continue
nbme = coeff*NBodyMatrixElement(bcs, o, i, j)
!iszero(nbme) && append!(terms, nbme.terms)
end
NBodyMatrixElement(terms)
end
function Base.Matrix(bcs::BitConfigurations, rows, op::QuantumOperator, cols; verbosity=0, kwargs...)
m,n = length(rows), length(cols)
I = [Int[] for i in 1:Threads.nthreads()]
J = [Int[] for i in 1:Threads.nthreads()]
V = [NBodyMatrixElement[] for i in 1:Threads.nthreads()]
p = if verbosity > 0
@info "Generating energy expression"
Progress(m*n)
end
Threads.@threads for i in eachindex(rows)
r = rows[i]
tid = Threads.threadid()
for (j,c) in enumerate(cols)
me = NBodyMatrixElement(bcs, op, r, c)
!isnothing(p) && ProgressMeter.next!(p)
iszero(me) && continue
push!(I[tid], i)
push!(J[tid], j)
push!(V[tid], me)
end
end
sparse(reduce(vcat, I),
reduce(vcat, J),
reduce(vcat, V), m, n)
end
function Base.Matrix(bcs::BitConfigurations, op::QuantumOperator; left=Colon(), right=Colon(), kwargs...)
ms = 1:length(bcs)
Matrix(bcs, ms[left], op, ms[right]; kwargs...)
end
export BitConfigurations
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 4075 | # * Common operators
"""
OneBodyHamiltonian
The one-body Hamiltonian, may include external fields. It is diagonal
in spin, i.e. it does not couple orbitals of opposite spin.
"""
struct OneBodyHamiltonian <: OneBodyOperator end
Base.show(io::IO, ::OneBodyHamiltonian) = write(io, "ĥ")
Base.show(io::IO, me::OrbitalMatrixElement{1,A,OneBodyHamiltonian,B}) where{A,B} =
write(io, "(", join(string.(me.a), " "), "|", join(string.(me.b), " "), ")")
"""
iszero(me::EnergyExpressions.OrbitalMatrixElement{1,<:SpinOrbital{<:Orbital},OneBodyHamiltonian,<:SpinOrbital{<:Orbital}})
The matrix element vanishes if the spin-orbitals do not have the same spin.
"""
Base.iszero(me::OrbitalMatrixElement{1,A,OneBodyHamiltonian,B}) where {A<:SpinOrbital{<:Orbital},B<:SpinOrbital{<:Orbital}} =
me.a[1].m[2] != me.b[1].m[2]
"""
FieldFreeOneBodyHamiltonian
The one-body Hamiltonian, with no external fields. It is diagonal in
the orbital symmetry.
"""
struct FieldFreeOneBodyHamiltonian <: OneBodyOperator end
Base.show(io::IO, ::FieldFreeOneBodyHamiltonian) = write(io, "ĥ₀")
Base.show(io::IO, me::OrbitalMatrixElement{1,A,FieldFreeOneBodyHamiltonian,B}) where{A,B} =
write(io, "(", join(string.(me.a), " "), "|", join(string.(me.b), " "), ")")
Base.iszero(me::OrbitalMatrixElement{1,<:SpinOrbital,FieldFreeOneBodyHamiltonian,<:SpinOrbital}) =
symmetry(me.a[1]) != symmetry(me.b[1])
"""
CoulombInteraction
Two-body Hamiltonian, representing the mutual Coulombic repulsion
between two electrons. Is diagonal in spin, i.e. the spin of the
orbitals associated with the same coordinate must be the same.
# Examples
```jldoctest
julia> EnergyExpressions.OrbitalMatrixElement((:a,:b), CoulombInteraction(), (:c,:d))
[a b|c d]
julia> EnergyExpressions.OrbitalMatrixElement((:a,:b), CoulombInteraction(), (:b,:a))
G(a,b)
```
"""
struct CoulombInteraction{O} <: TwoBodyOperator
o::O # This can be used to indicate e.g. approximate Coulomb interaction
end
CoulombInteraction() = CoulombInteraction(nothing)
Base.hash(g::CoulombInteraction, h::UInt) = hash(g.o, h)
const CoulombPotential{A,B} = ContractedOperator{1,2,1,A,<:CoulombInteraction,B}
Base.show(io::IO, ::CoulombInteraction) = write(io, "ĝ")
function Base.show(io::IO, me::OrbitalMatrixElement{2,A,<:CoulombInteraction,B}) where {A,B}
if me.a == me.b # Direct interaction
write(io, "F($(me.a[1]),$(me.a[2]))")
elseif me.a[1] == me.b[2] && me.a[2] == me.b[1] # Exchange interaction
write(io, "G($(me.a[1]),$(me.a[2]))")
else # General case
write(io, "[", join(string.(me.a), " "), "|", join(string.(me.b), " "), "]")
end
end
Base.show(io::IO, co::CoulombPotential{A,B}) where {A,B} =
write(io, "[$(co.a[1])|$(co.b[1])]")
# The Coulomb matrix elements are symmetric upon interchange of the
# coordinates, however not upon interchange of the bra and ket side,
# since we allow complex orbitals in general [this is in contrast to
# the real Rᵏ integrals mentioned in section 12.4 of the BSR manual by
# Zatsarinny (2006)].
Base.:(==)(a::OrbitalMatrixElement{2,<:Any,<:CoulombInteraction,<:Any},
b::OrbitalMatrixElement{2,<:Any,<:CoulombInteraction,<:Any}) =
a.o == b.o &&
(a.a == b.a && a.b == b.b ||
a.a == reverse(b.a) && a.b == reverse(b.b))
function Base.hash(ome::OrbitalMatrixElement{2,<:Any,<:CoulombInteraction,<:Any}, h::UInt)
p = sortperm(ome.a)
hash(hash(hash(hash(ome.a[p]), hash(ome.o)), hash(ome.b[p])), h)
end
"""
iszero(me::EnergyExpressions.OrbitalMatrixElement{2,<:SpinOrbital{<:Orbital},CoulombInteraction,<:SpinOrbital{<:Orbital}})
The matrix element vanishes if the (non-relativistic) spin-orbitals
associated with the same coordinate do not have the same spin.
"""
Base.iszero(me::OrbitalMatrixElement{2,A,<:CoulombInteraction,B}) where {A<:SpinOrbital{<:Orbital},B<:SpinOrbital{<:Orbital}} =
me.a[1].m[2] != me.b[1].m[2] || me.a[2].m[2] != me.b[2].m[2]
export OneBodyHamiltonian, FieldFreeOneBodyHamiltonian, CoulombInteraction, CoulombPotential
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 656 | function compare_vectors(a, b)
na = length(a)
nb = length(b)
# We don't bother with the case when one element in a is formally the
# same as the sum of multiple elements in b.
na == nb == 0 && return true
# We need to keep track of which elements with compared to, such
# that we use the same element in b multiple time to match
# different elements in a.
is = Vector(1:nb)
test_element(t) = findfirst(i -> t == b[i], is)
for e in a
iszero(e) && continue
i = test_element(e)
isnothing(i) && return false
deleteat!(is, i)
end
isempty(is) || all(i -> iszero(b[i]), is)
end
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 823 | import AtomicLevels: AbstractOrbital, symmetry
"""
Conjugate(orbital)
Type representing the conjugation of an `orbital`.
# Examples
```jldoctest
julia> Conjugate(:a)
:a†
```
"""
struct Conjugate{O}
orbital::O
end
function Base.show(io::IO, co::Conjugate{O}) where O
show(io, co.orbital)
write(io, "†")
end
"""
conj(o::AbstractOrbital)
Convenience function to conjugate an `AbstractOrbital`.
# Examples
```jldoctest
julia> conj(o"1s")
1s†
```
"""
Base.conj(o::O) where {O<:AbstractOrbital} = Conjugate(o)
"""
conj(o::Conjugate)
Convenience function to unconjugate a conjugated orbital.
# Examples
```jldoctest
julia> conj(Conjugate(:a))
:a
```
"""
Base.conj(co::Conjugate{O}) where O = co.orbital
AtomicLevels.symmetry(co::Conjugate{O}) where O = symmetry(co.orbital)
export Conjugate
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 6554 | """
NBodyEquation{N,O}(orbital, operator::NBodyOperator[, factor::NBodyTerm])
Equation for an `orbital`, acted upon by an operator, which may be a
single-particle operator, or an N-body operator, contracted over all
coordinates but one, and optionally multiplied by an
[`NBodyTerm`](@ref), corresponding to overlaps/matrix elements between
other orbitals.
"""
struct NBodyEquation{QO<:OneBodyOperator,O}
orbital::O
operator::QO
factor::NBodyTerm
NBodyEquation(orbital::O, operator::QO,
factor::NBodyTerm=one(NBodyTerm)) where {QO<:OneBodyOperator,O} =
new{QO,O}(orbital, operator, factor)
end
Base.iszero(nbe::NBodyEquation) = iszero(nbe.factor)
Base.zero(::Type{NBodyEquation}) = NBodyEquation(Symbol(:vac), IdentityOperator{1}(), zero(NBodyTerm))
Base.:(*)(nbe::NBodyEquation, factor) =
NBodyEquation(nbe.orbital, nbe.operator, nbe.factor*factor)
Base.:(*)(factor, nbe::NBodyEquation) = nbe*factor
Base.:(-)(nbe::NBodyEquation) = -1*nbe
function Base.show(io::IO, eq::NBodyEquation)
show(io, eq.factor, show_sign=true)
eq.orbital isa Conjugate && write(io, "⟨$(conj(eq.orbital))|")
show(io, eq.operator)
!(eq.orbital isa Conjugate) && write(io, "|$(eq.orbital)⟩")
end
function Base.:(+)(a::NBodyEquation, b::NBodyEquation)
factor = a.factor + b.factor
if a.orbital == b.orbital && a.operator == b.operator && factor isa NBodyTerm
NBodyEquation(a.orbital, a.operator, factor)
else
LinearCombinationEquation([a]) + b
end
end
Base.:(==)(a::NBodyEquation, b::NBodyEquation) =
a.orbital == b.orbital && a.operator == b.operator && a.factor == b.factor
function add_equations!(equations::AbstractVector{<:NBodyEquation}, eq::NBodyEquation)
i = findfirst(a -> a.orbital == eq.orbital && a.operator == eq.operator,
equations)
if isnothing(i)
push!(equations, eq)
else
factor = eq.factor+equations[i].factor
if iszero(factor)
deleteat!(equations, i)
elseif factor isa NBodyTerm
# If the resulting multiplicative factor is something
# simple, like a plain number, we can combine the terms
# into one.
equations[i] = NBodyEquation(eq.orbital, eq.operator, factor)
else
# However, if we end with something complicated —
# e.g. variation of R⁰(a,a;a,b)⟨a|a⟩ + R⁰(a,a;a,b) with
# respect to ⟨a| would yield five terms
#
# + ⟨a|a⟩r⁻¹×Y⁰(a,b)|a⟩ + ⟨a|a⟩r⁻¹×Y⁰(a,a)|b⟩ + R⁰(a,a;a,b)𝐈₁|a⟩ + 1r⁻¹×Y⁰(a,b)|a⟩ + 1r⁻¹×Y⁰(a,a)|b⟩,
#
# two of which we could group as
#
# + 1r⁻¹×Y⁰(a,b)|a⟩ (1 + ⟨a|a⟩)
#
# — we simply treat them as separate terms. We do this
# because it's easier to implement, but also because we
# probably want to handle the terms separately (or do
# we?). In any case, this should be pretty rare.
push!(equations, eq)
end
end
equations
end
"""
LinearCombinationEquation(equations)
A type representing a linear combination of
[`NBodyEquation`](@ref)s. Typically arises when varying a multi-term
energy expression.
"""
struct LinearCombinationEquation
equations::Vector{<:NBodyEquation}
end
Base.zero(::Type{LinearCombinationEquation}) = LinearCombinationEquation(NBodyEquation[])
Base.zero(::LinearCombinationEquation) = zero(LinearCombinationEquation)
Base.iszero(eq::LinearCombinationEquation) = isempty(eq.equations) || all(iszero, eq.equations)
Base.convert(::Type{LinearCombinationEquation}, eq::NBodyEquation) =
LinearCombinationEquation([eq])
function Base.show(io::IO, eq::LinearCombinationEquation)
if iszero(eq)
write(io, "0")
return
end
noprintterms = 2
terms = if get(io, :limit, false) && length(eq.equations) > 2noprintterms
vcat(eq.equations[1:noprintterms], "…", eq.equations[end-(noprintterms-1):end])
else
eq.equations
end
write(io, join(string.(terms), " "))
end
function Base.:(*)(eq::LinearCombinationEquation, factor)
equations = [NBodyEquation(nbe.orbital, nbe.operator, nbe.factor*factor)
for nbe in eq.equations]
LinearCombinationEquation(equations)
end
Base.:(*)(factor, eq::LinearCombinationEquation) = eq*factor
Base.:(-)(eq::LinearCombinationEquation) = -1*eq
function Base.:(+)(a::LinearCombinationEquation, b::NBodyEquation)
T = promote_type(eltype(a.equations), typeof(b))
LinearCombinationEquation(add_equations!(Vector{T}(copy(a.equations)), b))
end
Base.:(+)(a::NBodyEquation, b::LinearCombinationEquation) = b + a
function add_equations!(equations::AbstractVector{<:NBodyEquation}, eqs::LinearCombinationEquation)
foreach(Base.Fix1(add_equations!, equations), eqs.equations)
equations
end
function Base.:(+)(a::LinearCombinationEquation, b::LinearCombinationEquation)
T = promote_type(eltype(a.equations), eltype(b.equations))
LinearCombinationEquation(add_equations!(Vector{T}(copy(a.equations)), b))
end
Base.:(==)(a::LinearCombinationEquation, b::LinearCombinationEquation) =
compare_vectors(a.equations, b.equations)
function Base.:(==)(a::LinearCombinationEquation, b::NBodyEquation)
length(a.equations) == 0 && iszero(b) && return true
length(a.equations) == 1 || return false
a.equations[1] == b
end
Base.:(==)(a::NBodyEquation, b::LinearCombinationEquation) = (b == a)
function Base.:(-)(a::LinearCombinationEquation, b::LinearCombinationEquation)
iszero(b) && return a
iszero(a) && return -b
eqs = Vector{NBodyEquation}(copy(a.equations))
for e in b.equations
i = findfirst(ae -> ae.orbital == e.orbital && ae.operator == e.operator, eqs)
if isnothing(i)
push!(eqs, -e)
else
factor = eqs[i].factor - e.factor
if iszero(factor)
deleteat!(eqs, i)
elseif factor isa NBodyTerm
eqs[i] = NBodyEquation(e.orbital, e.operator, factor)
else
push!(eqs, -e)
end
end
end
LinearCombinationEquation(eqs)
end
Base.:(-)(a::LinearCombinationEquation, b::NBodyEquation) = a - LinearCombinationEquation([b])
Base.:(-)(a::NBodyEquation, b::LinearCombinationEquation) = LinearCombinationEquation([a]) - b
Base.:(-)(a::NBodyEquation, b::NBodyEquation) = LinearCombinationEquation([a]) - LinearCombinationEquation([b])
export NBodyEquation, LinearCombinationEquation
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 1591 | """
coupled_states(E[; i₀=1])
Find all states coupled by the energy expression `E`, starting from
the state with index `i₀`. This can be useful to reduce the necessary
basis or to generate invariant sets for split-operator propagation.
"""
function coupled_states(E::AbstractSparseMatrix; i₀=1)
m = size(E,1)
visited = falses(m)
visited[i₀] = true
rows = rowvals(E)
istack = [i₀]
while !isempty(istack)
icur = pop!(istack)
neighbours = rows[nzrange(E, icur)]
for n in neighbours
if !visited[n]
push!(istack, n)
visited[n] = true
end
end
end
visited
end
"""
invariant_sets(E)
Generate a list of all invariant sets, i.e. configurations that are
coupled through the matrix elements of `E`.
# Example
```julia-repl
julia> E = sparse([1 1 0; 1 1 0; 0 0 1])
3×3 SparseMatrixCSC{Int64, Int64} with 5 stored entries:
1 1 ⋅
1 1 ⋅
⋅ ⋅ 1
julia> invariant_sets(E)
2-element Vector{Vector{Int64}}:
[1, 2]
[3]
```
"""
function invariant_sets(E::AbstractSparseMatrix)
m = size(E,1)
visited = falses(m)
sets = Vector{Vector{Int}}()
while !all(visited)
icur = findfirst(.!visited)
set = coupled_states(E; i₀=icur)
visited[:] .|= set
j = findall(set)
# If the state icur is the only one in its set, it may be that
# it is actually not coupled to anything.
length(j) == 1 && iszero(E[icur,j]) && continue
push!(sets, j)
end
sets
end
export coupled_states, invariant_sets
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 1194 | # A wrapper around a Dict that remembers at which index a certain key
# was inserted, and returns this index when queried. When asked for
# all keys, they are returned in insertion order. Should probably be
# deprecated in favour of OrderedDict at some point.
struct KeyTracker{T,Data<:AbstractDict{T,Int}}
data::Data
end
KeyTracker{T}() where T = KeyTracker(Dict{T,Int}())
Base.get!(kt::KeyTracker, i) =
get!(kt.data, i, length(kt.data)+1)
function Base.keys(kt::KeyTracker)
kv = collect(pairs(kt.data))
first.(kv)[sortperm(last.(kv))]
end
struct LockedDict{K,V,Lock} <: AbstractDict{K,V}
d::Dict{K,V}
lock::Lock
end
LockedDict{K,V}() where {K,V} = LockedDict(Dict{K,V}(), ReentrantLock())
Base.pairs(d::LockedDict) =
lock(d.lock) do
pairs(d.d)
end
Base.length(d::LockedDict) =
lock(d.lock) do
length(d.d)
end
Base.isempty(d::LockedDict) =
lock(d.lock) do
isempty(d.d)
end
Base.iterate(d::LockedDict, args...) =
lock(d.lock) do
iterate(d.d, args...)
end
function Base.get!(d::LockedDict, i, default)
i ∈ keys(d.d) && return d.d[i]
lock(d.lock) do
get!(d.d, i, default)
end
end
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 3030 | """
above_diagonal_loop(N, itersym, imax, args...)
Generate `N` Cartesian loops for the iteration variables
`itersym_{1:N}`, where `itersym_N ∈ 1:imax`, `itersym_{N-1} ∈
itersym_N+1:imax`, etc, i.e. above the hyper-diagonal of the
`N`-dimensional hypercube with the side `imax`. `args...` is passed on
to `Base.Cartesian._nloops`. `above_diagonal_loop` is nestable.
# Examples
```jldoctest
julia> @above_diagonal_loop 2 i 3 begin
println("==================================")
println("i = ", Base.Cartesian.@ntuple 2 i)
@above_diagonal_loop 2 j 3 begin
println("j = ", Base.Cartesian.@ntuple 2 j)
end
end
==================================
i = (2, 1)
j = (2, 1)
j = (3, 1)
j = (3, 2)
==================================
i = (3, 1)
j = (2, 1)
j = (3, 1)
j = (3, 2)
==================================
i = (3, 2)
j = (2, 1)
j = (3, 1)
j = (3, 2)
```
"""
macro above_diagonal_loop(N, itersym, imax, args...)
lim = Expr(:call, :(:), Expr(:call, :(+), Expr(:curly, Symbol("$(itersym)_"), Expr(:call, :(+), :d, 1)), 1), imax)
rng = Expr(:(->), :d, Expr(:block, Expr(:if, :(d == $N), :(1:$imax), lim)))
Base.Cartesian._nloops(N, itersym, rng, args...)
end
"""
anti_diagonal_loop(N, itersym, imax, args...)
Generate `N` Cartesian loops for the iteration variables
`itersym_{1:N}`, where `itersym_N ∈ 1:imax`, `itersym_{N-1} ∈
1:imax\\itersym_N`, etc, i.e. no two iteration variables have the same
values simultaneously. `args...` is passed on to
`Base.Cartesian._nloops`; however, `preexpr` is already used to skip
the diagonal elements. `anti_diagonal_loop` is nestable.
# Examples
```jldoctest
julia> @anti_diagonal_loop 3 i 3 begin
println("-----------------------------")
t = (Base.Cartesian.@ntuple 3 i)
println("\$t: ", allunique(t))
@anti_diagonal_loop 2 j 2 begin
u = (Base.Cartesian.@ntuple 2 j)
println("\$u: ", allunique(u))
end
end
-----------------------------
(3, 2, 1): true
(2, 1): true
(1, 2): true
-----------------------------
(2, 3, 1): true
(2, 1): true
(1, 2): true
-----------------------------
(3, 1, 2): true
(2, 1): true
(1, 2): true
-----------------------------
(1, 3, 2): true
(2, 1): true
(1, 2): true
-----------------------------
(2, 1, 3): true
(2, 1): true
(1, 2): true
-----------------------------
(1, 2, 3): true
(2, 1): true
(1, 2): true
```
"""
macro anti_diagonal_loop(N, itersym, imax, args...)
rng = :(d -> 1:$imax)
# The preexpr body generates tests for inner loops that are true
# if the inner loop variable equals any of the outer ones; then
# that iteration is skipped.
prebody = Expr(:(->), :d,
Expr(:call, :(==),
Symbol("$(itersym)_e"),
Expr(:curly, Symbol("$(itersym)_"), :($N - d + 1))))
pre = :(e -> (Base.Cartesian.@nany $N-e $prebody) && continue)
Base.Cartesian._nloops(N, itersym, rng, pre, args...)
end
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 3987 | count_nonzeros(IJ, mn) = [count(isequal(ij), IJ) for ij in 1:mn]
"""
nonzero_minors(N, overlap) -> (ks,ls)
Find all (distinct) minor determinants of order `N` of the orbital
`overlap` matrix that do not vanish, i.e. all non-vanishing minors are
guaranteed to be present, but not all of the returned minors are
guaranteed to be non-zero. Vanishing minors returned arise when the
overlap matrix is rank deficient, which is unlikely to happen when
computing energy expressions, but must still be guarded against. This
is most easily checked by actually calculating the [`cofactor`](@ref),
which is most likely desired anyway.
"""
function nonzero_minors(N::Integer, overlap::AbstractSparseMatrix)
ks = Vector{Vector{Int}}()
ls = Vector{Vector{Int}}()
(I,J,V) = findnz(overlap)
m,n = size(overlap)
m == n || throw(ArgumentError("Overlap matrix must be square"))
# First deal with trivial special cases.
N > m && return ks, ls
if N == m
push!(ks, collect(1:m))
push!(ls, collect(1:m))
return ks, ls
end
# Number of non-zero matrix elements for each row/column.
nzrowcount = count_nonzeros(I, m)
nzcolcount = count_nonzeros(J, n)
# Vanishing rows/columns.
zrows = findall(iszero, nzrowcount)
zcols = findall(iszero, nzcolcount)
# Minimum order of Γ⁽ᵖ⁾
prmin = length(zrows)
pcmin = length(zcols)
pmin = max(prmin, pcmin)
# Then deal with next set of trivial special cases.
N < pmin && return ks, ls
if N == prmin && N == pcmin
push!(ks, zrows)
push!(ls, zcols)
return ks, ls
end
# The general case.
Nr = N - prmin
Nc = N - pcmin
# Find all rows supporting a specific column j.
I′ = [I[findall(isequal(j), J)] for j in 1:n]
# Find all columns supporting a specific row i.
J′ = [J[findall(isequal(i), I)] for i in 1:m]
for k in combinations(unique(I),Nr)
# Find all columns supported by any of the rows in the
# combination k.
cols = unique(vcat([J′[i] for i in k]...))
# For each combination k of rows, we can at most strike
# out Nc columns.
colbudget = Nc
colsmuststrike = Vector{Int}()
for j in cols
# Test if column j is solely supported by the row
# combination k.
colsupport = setdiff(I′[j], k)
if isempty(colsupport)
colbudget -= 1
colbudget < 0 && break
push!(colsmuststrike, j)
end
end
colbudget < 0 && continue
Ncm = length(colsmuststrike)
# The columns we must strike out fill the column budget.
if Ncm == Nc
push!(ks, sort(vcat(k,zrows)))
push!(ls, sort(vcat(colsmuststrike,zcols)))
continue
end
# Find all combinations of the remaining columns.
colchoices = combinations(setdiff(unique(J), colsmuststrike), Nc-Ncm)
for l in colchoices
# Find all rows that would be affected if column
# combination l is stricken out.
colsupport = unique(vcat([I′[j] for j in l]...))
supported = true
for i in colsupport
# If the row i supporting one of the columns in the
# column combination l is already a candidate to be
# stricken out, it does not matter if its support
# vanishes.
i ∈ k && continue
# Otherwise, if its support vanishes, then l is an
# unviable column combination.
rowsupport = setdiff(J′[i], l)
if isempty(rowsupport)
supported = false
break
end
end
if supported
push!(ks, sort(vcat(k,zrows)))
push!(ls, sort(vcat(l,colsmuststrike,zcols)))
end
end
end
ks, ls
end
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
|
[
"MIT"
] | 0.1.4 | 68a1812db09470298cf4b01852dd904d777d0b2d | code | 6779 | """
MCTerm(i, j, coeff, operator, source_orbital, integrals=[])
Represents one term in the multi-configurational expansion. `i` and
`j` are indices in the mixing-coefficient vector c (which is subject
to optimization, and thus has to be referred to), `coeff` is an
additional coefficient, and `integrals` is a list of indices into the
vector of common integrals, the values of which should be multiplied
to form the overall coefficient.
"""
struct MCTerm{T,QO,O}
i::Int
j::Int
coeff::T
operator::QO
source_orbital::O
integrals::Vector{Int}
end
Base.:(==)(a::MCTerm, b::MCTerm) =
a.i == b.i && a.j == b.j &&
a.coeff == b.coeff && a.operator == b.operator &&
a.source_orbital == b.source_orbital &&
sort(a.integrals) == sort(b.integrals)
"""
OrbitalEquation(orbital, equation,
one_body, direct_terms, exchange_terms, source_terms)
Represents the integro-differential equation for `orbital`, expressed
as a linear combination of the different terms, with pointers to the
list of common integrals that is stored by the encompassing
[`MCEquationSystem`](@ref) object.
"""
struct OrbitalEquation{O,Equation}
orbital::O
equation::Equation
terms::Vector{Pair{Int,Vector{MCTerm}}}
end
Base.:(==)(a::OrbitalEquation, b::OrbitalEquation) =
a.orbital == b.orbital &&
a.equation == b.equation &&
a.terms == b.terms
function Base.show(io::IO, oeq::OrbitalEquation)
write(io, "OrbitalEquation($(oeq.orbital)): ")
show(io, oeq.equation)
write(io, "\n")
end
Base.iszero(oeq::OrbitalEquation) = iszero(oeq.equation)
"""
MCEquationSystem(equations, integrals)
Represents a coupled system of integro-differential `equations`,
resulting from the variation of a multi-configurational
[`EnergyExpression`](@ref), with respect to all constituent
orbitals. All `integrals` that are in common between the `equations`
need only be computed once per iteration, for efficiency.
"""
struct MCEquationSystem
equations
integrals
end
function Base.show(io::IO, mceqs::MCEquationSystem)
neq = length(mceqs.equations)
nnzeq = count(!iszero, mceqs.equations)
nint = length(mceqs.integrals)
write(io, "$(neq)-equation MCEquationSystem, $(nnzeq) non-zero equations, $(nint) common integrals")
end
function Base.show(io::IO, mime::MIME"text/plain", mceqs::MCEquationSystem)
show(io, mceqs)
println(io)
nd = length(digits(length(mceqs.equations)))
for (i,eq) in enumerate(mceqs.equations)
iszero(eq) && continue
printfmt(io, "- {1:>$(nd)d}: ", i)
show(io, mime, eq)
end
end
"""
pushifmissing!(vector, element)
Push `element` to the end of `vector`, if not already present. Returns
the index of `element` in `vector`.
"""
function pushifmissing!(v::Vector, element)
i = findfirst(isequal(element), v)
isnothing(i) || return i
push!(v, element)
length(v)
end
"""
orbital_equation(E::EnergyExpression, orbital, integrals::Vector)
Generate the [`OrbitalEquation`](@ref) governing `orbital` by varying
the [`EnergyExpression`](@ref) `E`, and storing common expressions in
`integrals`.
"""
function orbital_equation(E::EM, orbital::O, integrals::KeyTracker) where {EM<:EnergyExpression,O}
equation = diff(E, orbital)
# Complementary orbital
comp_orbital = orbital isa Conjugate ? orbital.orbital : Conjugate(orbital)
terms = Dict{Int,Vector{MCTerm}}()
# Invert matrix coordinate -> equation mapping, i.e. gather all
# coordinates for which a specific NBodyEquation appears.
for (i,j,eq) in zip(findnz(equation)...)
for subeq = eq.equations
operator = subeq.operator
source_orbital = subeq.orbital
coeff = MCTerm(i,j,subeq.factor.coeff,
operator, source_orbital,
map(factor -> get!(integrals, factor),
subeq.factor.factors) |> Vector{Int})
# If the operator isa ContractedOperator, an integral has
# to be performed, which can potentially be reused among
# common terms. This is only possible, however, if the
# orbital under consideration is not inside the integrand;
# if it is, we are dealing with an integral operator,
# which has to be reevaluated each time it is applied to
# an orbital.
integral = (operator isa ContractedOperator && comp_orbital ∉ operator) ?
get!(integrals, operator) : 0
terms[integral] = push!(get(terms, integral, MCTerm[]), coeff)
end
end
OrbitalEquation(comp_orbital, equation, collect(pairs(terms)))
end
"""
diff(energy_expression, orbitals)
Derive the integro-differential equations for all `orbitals`, from
`energy_expression`. Returns a [`MCEquationSystem`](@ref), that
gathers information on which integrals are common to all equations,
for efficient equation solving.
"""
function Base.diff(E::EM, orbitals::VO; verbosity=0) where {EM<:EnergyExpression, O,VO<:AbstractVector{O}}
# Vector of common integrals
integrals = KeyTracker(LockedDict{Any,Int}())
norb = length(orbitals)
p = if verbosity > 0
@info "Deriving equations for $(norb) orbitals"
Progress(norb)
end
equations = [OrbitalEquation[] for i in 1:Threads.nthreads()]
for i = 1:length(orbitals)
tid = Threads.threadid()
orbital = orbitals[i]
eq = orbital_equation(E, orbital, integrals)
isnothing(p) || ProgressMeter.next!(p)
push!(equations[tid], eq)
end
equations = reduce(vcat, equations)
MCEquationSystem(equations, keys(integrals))
end
"""
diff(fun!, energy_expression, orbitals)
Derive the integro-differential equations for all `orbitals`, from
`energy_expression`; after each orbital equation has been generated
`fun!` is applied to `energy_expression` with the current orbital as
argument, which allows gradual modification of the energy
expression. Returns a [`MCEquationSystem`](@ref), that gathers
information on which integrals are common to all equations, for
efficient equation solving.
"""
function Base.diff(fun!::Function, E::EM, orbitals::VO; verbosity=0) where {EM<:EnergyExpression, O,VO<:AbstractVector{O}}
# Vector of common integrals
integrals = KeyTracker{Any}()
E = deepcopy(E)
norb = length(orbitals)
p = if verbosity > 0
@info "Deriving equations for $(norb) orbitals"
Progress(norb)
end
equations = map(orbitals) do orbital
eq = orbital_equation(E, orbital, integrals)
fun!(E, orbital)
isnothing(p) || ProgressMeter.next!(p)
eq
end
MCEquationSystem(equations, keys(integrals))
end
| EnergyExpressions | https://github.com/JuliaAtoms/EnergyExpressions.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.