{"size":4945,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"#Make comparison plot with theoretical model\nfunction make_energy_plot( p::Param ; \n theory_comparison = :true,\n gamma_comparison = :false,\n gamma_list = nothing, \n gamma_legend = nothing, \n mscale = log10 ,\n oname = \"default_name\",\n xlims = nothing,\n ylims = nothing ,\n energy_data_every = 10)\n\n #Welcome information\n println(\"\\n \\n\") \n println(\"___________________________________________________________\") \n println(\" \")\n println(\" Making Energy Plot from file: \/data\" , p.folder ,p.fname)\n println(\" \")\n\n #Default values\n my_xlims = xlims\n my_ylims = ylims\n\n if( xlims === nothing )\n my_xlims = [0 , p.tmax]\n end\n if( ylims === nothing )\n my_ylims = [1E-4 , 5.0]\n end\n\n #Activatecairo\n set_my_cairo_theme()\n\n #Create figure\n fig = Figure(resolution = (650, 400))\n ax1 = fig[1, 1] = Axis(fig , yscale = mscale)\n\n if( theory_comparison)\n \n #Theoretical model\n x , y = get_simple_theoretical_model_data(p0 , my_xlims[2])\n lines!(ax1 , x , y , linewidth = 1 , linestyle = :dash , color = \"red\" , label = \"Model\")\n\n end\n\n if(gamma_comparison)\n\n for (i,g) in enumerate(gamma_list)\n \n mcolor = [ \"green\" , \"blue\", \"red\"]\n \n #Theoretical model\n x = 0:0.1:my_xlims[2]\n y = exp.((-2*g).*x)\n lines!(ax1 , x , y, linewidth = 1 , linestyle = :dash , color = mcolor[i] , label = gamma_legend[i] )#(\"\u03b3 = \" * \"$(g).\") )\n \n end\n end\n\n #Actual data \n x , y = get_energy(p , energy_data_every , p.tmax)\n lines!(ax1 , x , y\/y[1] , linewidth = 1 , linestyle = :solid , color = \"black\", label = \"Simulation\")\n\n #Configure plot\n ax1.xlabel = \"Time\"\n ax1.ylabel = \"Energy Density [ \u03f5(t) \/ \u03f5(0) ]\"\n\n limits!(ax1 , my_xlims[1] , my_xlims[2] , my_ylims[1] , my_ylims[2] ) \n\n #Legend\n axislegend(ax1 , position = :lb)\n \n fdata = \".\/data\"\n outname = fdata * p.folder * \"$(oname).png\"\n\n #outname = abspath(p.folder, p.fname * \".png\")\n\n save(outname, fig, px_per_unit = 10)\n\n println(\" Image saved to: \" , outname )\n\n println(\"___________________________________________________________\") \n\nend\n\n#Make plot for several parameters\nfunction make_energy_plot_params( s , legends ; \n gamma_comparison = :false,\n gamma_list = nothing, \n gamma_legend = nothing, \n mscale = log10 ,\n oname = \"default_name\",\n xlims = nothing,\n ylims = nothing ,\n energy_data_every = 10,\n plot_title = \"\")\n\n #Welcome information\n println(\"\\n \\n\") \n println(\"___________________________________________________________\") \n println(\" \")\n println(\" Making Energy Plot from files: \")\n \n for p in s\n println(\"\/data\" , p.folder ,p.fname)\n end\n \n println(\" \")\n\n #Default values\n my_xlims = xlims\n my_ylims = ylims\n\n if( xlims === nothing )\n my_xlims = [0 , s[1].tmax]\n end\n \n if( ylims === nothing )\n my_ylims = [1E-5 , 5.0]\n end\n\n #Activatecairo\n set_my_cairo_theme()\n \n #Create figure\n fig = Figure(resolution = (650, 400) )\n ax1 = fig[1, 1] = Axis(fig , yscale = mscale , title = plot_title )\n\n pallete = [\"#111111\", \"#65ADC2\", \"#233B43\", \"#E84646\", \"#C29365\", \"#362C21\", \"#316675\", \"#168E7F\", \"#109B37\"]\n #colormap( \"Oranges\" , 10) \n \n for (i , p) in enumerate(s)\n\n x , y = get_energy(p , energy_data_every , p.tmax)\n \n lines!(ax1 , x , y.\/y[1] , color = pallete[4+i] , linewidth = 1, label = legends[i])\n\n end\n\n #Gamma comparison\n if(gamma_comparison)\n\n for (i,g) in enumerate(gamma_list)\n \n #Theoretical model\n x = 0:0.1:3000\n y = exp.((-2*g).*x)\n lines!(ax1 , x , y, linewidth = 1 , linestyle = :dash , color = pallete[4+i] )#(\"\u03b3 = \" * \"$(g).\") )\n \n end\n end\n\n\n #Configure plot\n ax1.xlabel = \"Time\"\n ax1.ylabel = \"Energy Density [ \u03f5(t) \/ \u03f5(0) ]\"\n\n limits!(ax1 , my_xlims[1] , my_xlims[2] , my_ylims[1] , my_ylims[2] ) \n\n #Legend\n axislegend(ax1 ,position = :rt)\n \n fdata = \".\/data\/\"\n outname = fdata * \"$(oname).png\"\n save(outname, fig, px_per_unit = 10)\n\n println(\" Image saved to: \" , outname )\n\n println(\"___________________________________________________________\") \n\nend\n","avg_line_length":28.75,"max_line_length":136,"alphanum_fraction":0.5023255814} {"size":1921,"ext":"jl","lang":"Julia","max_stars_count":183.0,"content":"## Plot recipe\n## define a hueristic to work around asymptotes\n## just sort of succesful\n@recipe function f(pq::AbstractRationalFunction{T}, a=nothing, b=nothing) where {T}\n\n xlims = get(plotattributes, :xlims, (nothing,nothing))\n ylims = get(plotattributes, :ylims, (nothing, nothing))\n rational_function_trim(pq, a, b, xlims, ylims) \n\nend\n\nisapproxreal(x::Real) = true\nisapproxreal(x::Complex{T}) where {T} = imag(x) <= sqrt(eps(real(T)))\nfunction toobig(pq)\n x -> begin\n y = pq(x)\n isinf(y) && return true\n isnan(y) && return true\n abs(y) > 1e8 && return true\n return false\n end\nend\n\nfunction rational_function_trim(pq, a, b, xlims, ylims)\n\n p,q = lowest_terms(\/\/(pq...), method=:numerical)\n dpq = derivative(p\/\/q)\n p\u2032,q\u2032 = lowest_terms(dpq)\n\n \u03bbs = Multroot.multroot(q).values\n \u03bbs = isempty(\u03bbs) ? \u03bbs : real.(filter(isapproxreal, \u03bbs))\n\n cps = Multroot.multroot(p\u2032).values\n cps = isempty(cps) ? cps : real.(filter(isapproxreal, cps))\n cps = isempty(cps) ? cps : filter(!toobig(pq), cps)\n\n a = a == nothing ? xlims[1] : a\n b = b == nothing ? xlims[2] : b\n \n if a==nothing && b==nothing\n u= poly_interval(p)\n v= poly_interval(q)\n a,b = min(first(u), first(v)), max(last(u), last(v))\n\n if !isempty(\u03bbs)\n a,b = min(a, real(minimum(\u03bbs))), max(b, real(maximum(\u03bbs)))\n end\n if !isempty(cps)\n a,b = min(a, real(minimum(cps))), max(b, real(maximum(cps)))\n end\n a *= (a > 0 ? 1\/1.5 : 1.25)\n b *= (b < 0 ? 1\/1.5 : 1.25)\n end\n\n n = 601\n xs = range(a,stop=b, length=n)\n ys = pq.(xs)\n Mcps = isempty(cps) ? 5 : 3*maximum(abs, pq.(cps))\n M = max(5, Mcps, 1.25*maximum(abs, pq.((a,b))))\n\n lo = ylims[1] == nothing ? -M : ylims[1]\n hi = ylims[2] == nothing ? M : ylims[2]\n ys\u2032 = [lo <= y\u1d62 <= hi ? y\u1d62 : NaN for y\u1d62 \u2208 ys]\n xs, ys\u2032\n\nend\n\n","avg_line_length":28.25,"max_line_length":83,"alphanum_fraction":0.5570015617} {"size":1120,"ext":"jl","lang":"Julia","max_stars_count":7.0,"content":"export iauFal03\n\"\"\"\nFundamental argument, IERS Conventions (2003):\nmean anomaly of the Moon.\n\nThis function is part of the International Astronomical Union's\nSOFA (Standards Of Fundamental Astronomy) software collection.\n\nStatus: canonical model.\n\nGiven:\n t double TDB, Julian centuries since J2000.0 (Note 1)\n\nReturned (function value):\n double l, radians (Note 2)\n\nNotes:\n\n 1. Though t is strictly TDB, it is usually more convenient to use\n TT, which makes no significant difference.\n\n 2. The expression used is as adopted in IERS Conventions (2003) and\n is from Simon et al. (1994).\n\nReferences:\n\n McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003),\n IERS Technical Note No. 32, BKG (2004)\n\n Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M.,\n Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683\n\nThis revision: 2013 June 18\n\nSOFA release 2018-01-30\n\nCopyright (C) 2018 IAU SOFA Board. See notes at end.\n\"\"\"\n\n# double iauFal03(double t)\nfunction iauFal03(t::Real)\n return ccall((:iauFal03, libsofa_c), Cdouble, (Cdouble,), convert(Float64, t))\nend","avg_line_length":26.0465116279,"max_line_length":81,"alphanum_fraction":0.7044642857} {"size":2142,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using Documenter\nusing Catalyst, ModelingToolkit\n\n\n# supposed hack to get mhchem but doesn't seem to work yet...\n# const katex_version = \"0.11.1\"\n# function Documenter.Writers.HTMLWriter.RD.mathengine!(r::Documenter.Utilities.JSDependencies.RequireJS, engine::Documenter.KaTeX)\n# push!(r, Documenter.Utilities.JSDependencies.RemoteLibrary(\n# \"katex\",\n# \"https:\/\/cdnjs.cloudflare.com\/ajax\/libs\/KaTeX\/$(katex_version)\/katex.min.js\"\n# ))\n# push!(r, Documenter.Utilities.JSDependencies.RemoteLibrary(\n# \"mhchem\",\n# \"https:\/\/cdnjs.cloudflare.com\/ajax\/libs\/KaTeX\/$(katex_version)\/contrib\/mhchem.min.js\"\n# ))\n# push!(r, Documenter.Utilities.JSDependencies.RemoteLibrary(\n# \"katex-auto-render\",\n# \"https:\/\/cdnjs.cloudflare.com\/ajax\/libs\/KaTeX\/$(katex_version)\/contrib\/auto-render.min.js\",\n# deps = [\"katex\"],\n# ))\n# push!(r, Documenter.Utilities.JSDependencies.Snippet(\n# [\"jquery\", \"katex\", \"mhchem\", \"katex-auto-render\"],\n# [\"\\$\", \"katex\", \"mhchem\", \"renderMathInElement\"],\n# \"\"\"\n# \\$(document).ready(function() {\n# renderMathInElement(\n# document.body,\n# $(Documenter.Utilities.JSDependencies.json_jsescape(engine.config, 2))\n# );\n# })\n# \"\"\"\n# ))\n# end\n\nmakedocs(\n sitename = \"Catalyst.jl\",\n authors = \"Samuel Isaacson\",\n format = Documenter.HTML(mathengine=Documenter.Writers.HTMLWriter.MathJax(), prettyurls = (get(ENV, \"CI\", nothing) == \"true\")),\n modules = [Catalyst,ModelingToolkit],\n doctest = false,\n clean = true,\n pages = Any[\n \"Home\" => \"index.md\",\n \"Tutorials\" => Any[\n \"tutorials\/using_catalyst.md\",\n \"tutorials\/basics.md\",\n \"tutorials\/models.md\",\n \"tutorials\/basic_examples.md\",\n \"tutorials\/advanced.md\",\n \"tutorials\/generated_systems.md\",\n \"tutorials\/advanced_examples.md\"\n ],\n \"API\" => Any[\n \"api\/catalyst_api.md\"\n ]\n ]\n)\n\ndeploydocs(\n repo = \"github.com\/SciML\/Catalyst.jl.git\";\n push_preview = true\n)","avg_line_length":34.5483870968,"max_line_length":131,"alphanum_fraction":0.6013071895} {"size":3467,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"\r\nfunction calc_doppler_component{T}(lambda::AbstractArray{T,1}, flux::AbstractArray{T,1};\r\n sigmasq_obs::AbstractArray{T,1} = 1e-16*ones(length(lambda)),\tsigmasq_cor::T = 1.0, rho::T = 1.0,\r\n half_chunck_size::Integer = 100)\r\n lambda.*calc_gp_on_segments(predict_deriv,lambda, flux, sigmasq_obs=sigmasq_obs,\tsigmasq_cor=sigmasq_cor, rho=rho, half_chunck_size=half_chunck_size)\r\nend\r\n\r\nfunction calc_gp_on_segments{T}(predict_gp::Function, lambda::AbstractArray{T,1}, flux::AbstractArray{T,1};\r\n sigmasq_obs::AbstractArray{T,1} = 1e-16*ones(length(lambda)),\tsigmasq_cor::T = 1.0, rho::T = 1.0,\r\n half_chunck_size::Integer = 100)\r\n @assert length(lambda) == length(flux) == length(sigmasq_obs)\r\n output = Array(Float64,length(lambda))\r\n num_seg = convert(Int64,ceil(length(lambda)\/half_chunck_size)-1)\r\n for i in 1:num_seg\r\n idx_begin = 1+half_chunck_size*(i-1)\r\n idx_end = min(half_chunck_size*(i+1), length(lambda))\r\n write_idx_begin = idx_begin + div(half_chunck_size,2)\r\n write_idx_end = idx_end - div(half_chunck_size,2)\r\n if i==1 write_idx_begin=1 end\r\n if i==num_seg\r\n idx_begin = max(1,idx_end-2*half_chunck_size)\r\n write_idx_end=length(lambda)\r\n end\r\n #println(\"# i= \",i,\": \", idx_begin, \" - \", idx_end, \" -> \", write_idx_begin, \" - \", write_idx_end)\r\n #output[write_idx_begin:write_idx_end] = predict_gp(view(lambda,idx_begin:idx_end), view(flux,idx_begin:idx_end), view(lambda,write_idx_begin:write_idx_end), sigmasq_obs=view(sigmasq_obs,idx_begin:idx_end), sigmasq_cor=sigmasq_cor, rho=rho)\r\n output[write_idx_begin:write_idx_end] = predict_gp(lambda[idx_begin:idx_end], flux[idx_begin:idx_end], lambda[write_idx_begin:write_idx_end], sigmasq_obs=sigmasq_obs[idx_begin:idx_end], sigmasq_cor=sigmasq_cor, rho=rho)\r\n end\r\n output\r\nend\r\n\r\nfunction calc_doppler_component{T}(lambda::AbstractArray{T,1}, flux::AbstractArray{T,2};\r\n sigmasq_obs::AbstractArray{T,1} = 1e-16*ones(length(xobs)),\tsigmasq_cor::T = 1.0, rho::T = 1.0)\r\n doppler_basis = calc_doppler_component(lambda,vec(mean(flux,2)),sigmasq_obs=sigmasq_obs,sigmasq_cor=sigmasq_cor,rho=rho)\r\nend\r\n\r\nfunction calc_doppler_quadratic_term{T}(lambda::AbstractArray{T,1}, flux::AbstractArray{T,1};\r\n sigmasq_obs::AbstractArray{T,1} = 1e-16*ones(length(lambda)),\tsigmasq_cor::T = 1.0, rho::T = 1.0,\r\n half_chunck_size::Integer = 100)\r\n 0.5*lambda.^2.*calc_gp_on_segments(predict_deriv2,lambda, flux, sigmasq_obs=sigmasq_obs,\tsigmasq_cor=sigmasq_cor, rho=rho, half_chunck_size=half_chunck_size)\r\nend\r\n\r\nfunction calc_doppler_quadratic_term{T}(lambda::AbstractArray{T,1}, flux::AbstractArray{T,2};\r\n sigmasq_obs::AbstractArray{T,1} = 1e-16*ones(length(xobs)),\tsigmasq_cor::T = 1.0, rho::T = 1.0)\r\n doppler_quad_term = calc_doppler_quadratic_term(lambda,vec(mean(flux,2)),sigmasq_obs=sigmasq_obs,sigmasq_cor=sigmasq_cor,rho=rho)\r\nend\r\n\r\ndoppler_comp = calc_doppler_component(lambda,obs, sigmasq_obs=1\/150000.0*ones(length(lambda)), rho=0.1)\r\ngenpca_out = fit_gen_pca_eford(obs,doppler_comp)\r\n\r\ndoppler_quadratic_term = calc_doppler_quadratic_term(lambda,obs, sigmasq_obs=1\/150000.0*ones(length(lambda)), rho=0.1)\r\ngen2pca_out = fit_gen2_pca_eford(obs,doppler_comp,doppler_quadratic_term)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","avg_line_length":57.7833333333,"max_line_length":245,"alphanum_fraction":0.6945486011} {"size":189,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"using GapFindFill, Test\n# using JuMP, Gurobi, CPLEX, GLPK\n\nprintln(\"Starting tests....................\")\n@time begin include(\"testGapFind.jl\") end\n@time begin include(\"testGapFill.jl\") end\n","avg_line_length":27.0,"max_line_length":45,"alphanum_fraction":0.6772486772} {"size":6800,"ext":"jl","lang":"Julia","max_stars_count":17.0,"content":"using Parameters: @unpack, @with_kw\n\n\"\"\"\n as(self, ::Type{T}) :: T\n\nManual Go-style type [embedding] as a replacement of \"inheritance from\nconcrete type\".\n\n[embedding]: https:\/\/golang.org\/doc\/effective_go.html?#embedding\n\"\"\"\nas(self, ::Type{T}) where {T} = as(self.super, T) :: T\nas(self::T, ::Type{T}) where {T} = self\n\n\nabstract type AbstractContinuationSolver end\n\nmutable struct ContinuationSolver{P <: AbstractContinuationProblem,\n C <: ContinuationCache,\n S <: ContinuationSolution,\n } <: AbstractContinuationSolver\n prob::P\n opts::ContinuationOptions\n cache::C\n sol::S\n i::Int\nend\n\nfunction ContinuationSolver(prob, opts)\n cache = ContinuationCache(prob, opts.h0, opts.direction)\n sol = ContinuationSolution(typeof(cache.u), prob)\n return ContinuationSolver(prob, opts, cache, sol, 0)\nend\n\ninit(prob::AbstractContinuationProblem; kwargs...) =\n ContinuationSolver(prob, ContinuationOptions(; kwargs...))\n\nsolve(prob::AbstractContinuationProblem; kwargs...) =\n solve!(init(prob; kwargs...)).sol\n\nLastPoint(solver::ContinuationSolver) =\n ContinuationLastPoint(solver.sol.sweeps[end], solver.cache.simple_bifurcation)\nLastPoint(solver::AbstractContinuationSolver) =\n LastPoint(as(solver, ContinuationSolver)) # TODO: don't\n\nfunction step!(solver::ContinuationSolver)\n predictor_corrector_step!(solver.cache, solver.opts)\n\n # Errors are now thrown in predictor_corrector_step!. I need to\n # reconsider the error handling...\n if ! solver.cache.adaptation_success\n error(\"Failed to adapt steplength h.\")\n end\n if ! solver.cache.corrector_success\n error(\"Failed in corrector loop.\")\n end\n @debug \"$(solver.i)-th step succeeded\"\n\n record!(solver.sol, solver.cache)\n solver.i += 1\nend\n\nfunction record!(sol, cache)\n push_point!(sol, cache)\nend\n\nfunction step!(f, wrapper::AbstractContinuationSolver, max_steps)\n solver = as(wrapper, ContinuationSolver)\n cache = solver.cache\n cache.h = solver.opts.h0\n @progress_if solver.opts.verbose for _ in 1:max_steps\n step!(wrapper)\n f(LastPoint(wrapper))\n if ! isindomain(cache.u, cache.prob_cache)\n return true\n end\n end\n return false\nend\n\nstruct SweepSetup{uType}\n direction::Int\n u0::uType\n past_points::Array{uType}\n max_steps::Int\nend\n\nSweepSetup(solver::ContinuationSolver;\n direction = solver.opts.direction,\n u0 = get_u0(solver.cache.prob_cache.prob),\n past_points = [],\n max_steps = solver.opts.max_samples,\n h = nothing, # currently ignored # TODO: use it?\n ) =\n SweepSetup(direction, u0, past_points, max_steps)\n\nSweepSetup(solver::AbstractContinuationSolver; kwargs...) =\n SweepSetup(as(solver, ContinuationSolver); kwargs...)\n\nfunction new_sweep!(solver::ContinuationSolver, setup::SweepSetup)\n @unpack direction, u0, past_points = setup\n cache = solver.cache\n cache.direction = direction\n cache.u = u0\n\n new_sweep!(solver.sol, direction)\n for u in past_points\n push_point!(solver.sol, u)\n end\n push_point!(solver.sol, u0)\nend\n\nfunction sweep!(f, solver::AbstractContinuationSolver; kwargs...)\n setup = SweepSetup(solver; kwargs...)\n new_sweep!(solver, setup)\n f(LastPoint(solver))\n step!(f, solver, setup.max_steps)\nend\n\n@with_kw struct NonRootException <: Exception\n msg::String = \"Not a root\"\n H::AbstractVector\n u::AbstractVector\nend\n\nfunction Base.showerror(io::IO, e::NonRootException)\n println(io, e.msg, \": norm(H) = \", norm(e.H))\n println(io, \"H = \", e.H)\n println(io, \"u = \", e.u)\nend\n\nfunction pre_solve!(wrapper::AbstractContinuationSolver)\n solver = as(wrapper, ContinuationSolver)\n opts = solver.opts\n cache = solver.cache\n H = residual!(cache.H, cache.u, cache.prob_cache)\n if ! isalmostzero(H, opts.atol)\n if opts.start_from_nearest_root\n @debug \"Finding the nearest root...\"\n cache.u = nearest_root!(cache, opts)\n else\n throw(NonRootException(\"Initial point is not a root\", H, cache.u))\n end\n end\n return wrapper\nend\n\nsolve!(solver::AbstractContinuationSolver) = solving!(donothing, solver)\ndonothing(_) = nothing\n\n\"\"\"\n solving!(f, solver::AbstractContinuationSolver)\n\nSame as `solve!(solve)` but call `f` after each continuation step with\nthe [`LastPoint`](@ref).\n\n# Examples\n```julia\nsolving!(solver) do point\n @show point.i_sweep point.i_point point.u[1] point.u[end]\nend\n```\n\"\"\"\nfunction solving!(f, wrapper::AbstractContinuationSolver)\n pre_solve!(wrapper)\n solver = as(wrapper, ContinuationSolver)\n opts = solver.opts\n cache = solver.cache\n\n @debug \"Starting the first sweep...\"\n u0 = copy(cache.u)\n sweep!(f, wrapper; u0=u0)\n if opts.bidirectional_first_sweep\n @debug \"Starting the second sweep...\"\n sweep!(f, wrapper; u0=u0, direction = solver.opts.direction * -1)\n end\n\n # TODO: Detect the case that the solution is isomorphic to the\n # circle.\n\n bifurcations = vcat(\n (s.simple_bifurcation for s in reverse(solver.sol.sweeps))...\n )\n for _ in 1:opts.max_branches\n if isempty(bifurcations)\n break\n end\n sbint = popfirst!(bifurcations)\n for (u0, u1, direction, h) in new_branches!(cache, opts, sbint)\n if ! (isindomain(u0, cache.prob_cache) &&\n isindomain(u1, cache.prob_cache))\n # Stepped outside the domain. Skip it.\n continue\n end\n @debug \"Starting $(length(solver.sol.sweeps))-th sweep...\"\n sweep!(f, wrapper;\n u0 = u1,\n past_points = [u0],\n direction = direction,\n h = h)\n append!(bifurcations, solver.sol.sweeps[end].simple_bifurcation)\n end\n end\n\n return wrapper\nend\n\n\nfunction residual(u::AbstractArray, cache::AbstractProblemCache)\n H = similar(u[1:end-1])\n return residual!(H, u, cache)\nend\n\nfunction residual(u::SVector, cache::AbstractProblemCache)\n return residual!(nothing, u, cache)\nend\n\n\nfunction residual_jacobian(u::AbstractArray, cache::AbstractProblemCache)\n H = similar(u[1:end-1])\n J = similar(H, (length(H), length(u)))\n return residual_jacobian!(H, J, u, cache)\nend\n\nfunction residual_jacobian(u::SVector, cache::AbstractProblemCache)\n return residual_jacobian!(nothing, nothing, u, cache)\nend\n\n\nfunction residual_jacobian!(cache::ContinuationCache, u::AbstractArray)\n @unpack H, J, prob_cache = cache\n H, J = residual_jacobian!(H, J, u, prob_cache)\n cache.u = u\n cache.H = H\n cache.J = J\n return H, J\nend\n","avg_line_length":28.6919831224,"max_line_length":82,"alphanum_fraction":0.6526470588} {"size":4139,"ext":"jl","lang":"Julia","max_stars_count":27.0,"content":"#g=g_u(uvec)\nf_target(x,Amp,h1,Lt,Ht) = Amp^2*((abs(x[1])Threshold(\u03b2,\u03b7,flag_t,pf))\u2218pfh\n A_mat = MatrixA(ph,\u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,U,V,d\u03a9)\n B_vec = MatrixB(x0,\u03b4,2*\u03c0*Amp,V,d\u03a9,d\u0393)\n u_vec = A_mat\\B_vec\n u_vec\nend\n\n#pf = pf_p(p)\nfunction pf_p(p;r,flag_f,P,Pf,Qf,d\u03a9,d\u0393_d,tags,design_tag)\n pvec = p_vec(p,P,tags,design_tag)\n pf = Filter(pvec,r,flag_f,P,Pf,Qf,d\u03a9,d\u0393_d)\n pf\nend\n# Chain Rule : dg\/dp = dg\/dg*dg\/du*du\/dpf*dpf\/dp\n# dg\/du=dg\/dg*dg\/du\nfunction rrule(::typeof(g_u),u_vec;Amp,h1,Lt,Ht,U,V,d\u03a9_t)\nfunction g_pullback(dgdg)\n NO_FIELDS, dgdg*Dgdu(u_vec,Amp,h1,Lt,Ht,U,V,d\u03a9_t)\nend\ng_u(u_vec;Amp,h1,Lt,Ht,U,V,d\u03a9_t), g_pullback\nend\n\nfunction Dgdu(u_vec,Amp,h1,Lt,Ht,U,V,d\u03a9_t)\nIt(x) = f_target(x,Amp,h1,Lt,Ht)\nuh_t = FEFunction(U,u_vec)\nl_temp(du)=\u222b(4*uh_t*(abs2(uh_t)-It)*du)d\u03a9_t\nassemble_vector(l_temp,V)\nend\n\n# dg\/dpf=dg\/du*du\/dpf\nfunction rrule(::typeof(u_pf),pf;x0,\u03b4,Amp,P,Pf,\u03b2,\u03b7,flag_t,flag_f,\u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,U,V,d\u03a9,d\u0393)\nu_vec = u_pf(pf;x0,\u03b4,Amp,P,Pf,\u03b2,\u03b7,flag_t,flag_f,\u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,U,V,d\u03a9,d\u0393)\nfunction u_pullback(dgdu)\n NO_FIELDS, Dgdpf(dgdu,u_vec,pf,P,Pf,\u03b2,\u03b7,flag_t,flag_f,\u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,U,V,d\u03a9)\nend\nu_vec, u_pullback\nend\n\nD\u03bedp(pf,\u03f5min,\u03f5max,\u03b2,\u03b7,flag_t)=(\u03f5max-\u03f5min)*(!flag_t+flag_t*\u03b2*(1.0-tanh(\u03b2*(pf-\u03b7))^2)\/(tanh(\u03b2*\u03b7)+tanh(\u03b2*(1.0-\u03b7))))\ndG(pfh,u,v,dp,\u03f5min,\u03f5max,k,\u03b2,\u03b7,flag_t) = real(k^2*((pf->D\u03bedp(pf,\u03f5min,\u03f5max,\u03b2,\u03b7,flag_t))\u2218pfh)*(v*u)*dp)\n\nfunction Dgdpf(dgdu,u_vec,pf,P,Pf,\u03b2,\u03b7,flag_t,flag_f,\u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,U,V,d\u03a9)\n if (flag_f)\n pfh = FEFunction(Pf,pf)\n ph = (pf->Threshold(\u03b2,\u03b7,flag_t,pf))\u2218pfh\n A_mat = MatrixA(ph,\u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,U,V,d\u03a9)\n \u03bb_vec = A_mat'\\dgdu\n \n uh = FEFunction(U,u_vec)\n \u03bbh = FEFunction(V,conj(\u03bb_vec))\n l_temp(dp) = \u222b(dG(pfh,uh,\u03bbh,dp,\u03f51,\u03f52,k,\u03b2,\u03b7,flag_t))*d\u03a9\n dgdpf = assemble_vector(l_temp,Pf)\n return dgdpf\n else\n pfh = FEFunction(P,pf)\n ph = (pf->Threshold(\u03b2,\u03b7,flag_t,pf))\u2218pfh\n A_mat = MatrixA(ph,\u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,U,V,d\u03a9)\n \u03bb_vec = A_mat'\\dgdu\n \n uh = FEFunction(U,u_vec)\n \u03bbh = FEFunction(V,conj(\u03bb_vec))\n l_temp(dp) = \u222b(dG(pfh,uh,\u03bbh,dp,\u03f51,\u03f52,k,\u03b2,\u03b7,flag_t))*d\u03a9\n dgdpf = assemble_vector(l_temp,P)\n return dgdpf\n end\nend\n\n# dg\/dp=dg\/dpf*dpf\/dp\nfunction rrule(::typeof(pf_p),p;r,flag_f,P,Pf,Qf,d\u03a9,d\u0393_d,tags,design_tag)\nfunction pf_pullback(dgdpf)\n NO_FIELDS, Dgdp(dgdpf,p,r,flag_f,P,Pf,Qf,d\u03a9,d\u0393_d,tags,design_tag)\nend\npf_p(p;r,flag_f,P,Pf,Qf,d\u03a9,d\u0393_d,tags,design_tag), pf_pullback\nend\n\nfunction Dgdp(dgdpf,p,r,flag_f,P,Pf,Qf,d\u03a9,d\u0393_d,tags,design_tag)\nnp = length(p)\nif (flag_f)\n A = assemble_matrix(Pf,Qf) do u, v\n \u222b( a_f(r,u,v))d\u03a9\n end\n \u03bbvec = A'\\dgdpf\n \u03bbh = FEFunction(Pf,\u03bbvec)\n l_temp(dp) = \u222b(\u03bbh*dp)*d\u03a9\n return extract_design(assemble_vector(l_temp,P),np,tags,design_tag)\nelse\n return extract_design(dgdpf,np,tags,design_tag)\nend\nend\n\n# Final objective function\nfunction g_p(p::Vector;x0,\u03b4,Amp,r,flag_f,P,Pf,Qf,\u03b2,\u03b7,flag_t,\n \u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,h1,Lt,Ht,U,V,d\u03a9,d\u0393,d\u03a9_t,d\u0393_d,tags,design_tag)\n pf = pf_p(p;r,flag_f,P,Pf,Qf,d\u03a9,d\u0393_d,tags,design_tag)\n u_vec=u_pf(pf;x0,\u03b4,Amp,P,Pf,\u03b2,\u03b7,flag_t,flag_f,\u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,U,V,d\u03a9,d\u0393)\n g_u(u_vec;Amp,h1,Lt,Ht,U,V,d\u03a9_t)\nend\n\nfunction g_p(p::Vector,grad::Vector;x0,\u03b4,Amp,r,flag_f,P,Pf,Qf,\u03b2,\u03b7,flag_t,\n \u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,h1,Lt,Ht,U,V,d\u03a9,d\u0393,d\u03a9_t,d\u0393_d,tags,design_tag)\n if length(grad) > 0\n dgdp, = Zygote.gradient(p->g_p(p;x0,\u03b4,Amp,r,flag_f,P,Pf,Qf,\u03b2,\u03b7,flag_t,\n \u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,h1,Lt,Ht,U,V,d\u03a9,d\u0393,d\u03a9_t,d\u0393_d,tags,design_tag),p)\n grad[:] = dgdp\n end\n g_value = g_p(p;x0,\u03b4,Amp,r,flag_f,P,Pf,Qf,\u03b2,\u03b7,flag_t,\n \u03f51,\u03f52,\u03bc,\u03c3s,k,LHp,LHn,dpml,hd,h1,Lt,Ht,U,V,d\u03a9,d\u0393,d\u03a9_t,d\u0393_d,tags,design_tag)\n @show g_value\n return g_value\nend","avg_line_length":32.8492063492,"max_line_length":111,"alphanum_fraction":0.6610292341} {"size":12164,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"\n\"\"\"\n lineararray(xrange, yrange, kx, ky, k0=0) \n \nEvaluate values of linear function ``kx\u22c5x + ky\u22c5y + k0`` on array ``xrange \u00d7 yrange``.\n \n lineararray(size, kx, ky, k0=0) \n\nUse `1:size` as `xrange` and `yrange`.\n\n lineararray(xrange, yrange, a::Vector, k0=0) \n \nUse first and second components of vector `a` as `kx` and `ky`.\n\n\"\"\"\nfunction lineararray(size, kx, ky, k0=0)\n x = (1:size)'\n y = 1:size\n return kx*x .+ ky*y .+ k0\nend\n\nfunction lineararray(xrange::AbstractRange, yrange::AbstractRange, kx, ky, k0=0)\n return kx*xrange' .+ ky*yrange .+ k0\nend\n\nlineararray(xrange::AbstractRange, yrange::AbstractRange, a::Vector, k0=0) = lineararray(xrange,yrange,a...,k0)\n\n\n\"\"\"\n rescale(array) \n\nRescale array between 0 and 1\n\"\"\"\nfunction rescale(array)\n amin, amax = extrema(array)\n s = amin \u2248 amax ? 1 : amax-amin\n return (array .- amin) \/ s\nend\n\n\"\"\"\n rescale_minmax(array) \n\nRescale array between 0 and 1 and return the rescaled array and the min, max values\n\"\"\"\nfunction rescale_minmax(array)\n amin, amax = extrema(array)\n s = amin \u2248 amax ? 1 : amax-amin\n return (array .- amin) \/ s, amin, amax\nend\n\n\n\n\"\"\"\n logrescale(array, \u03b1 = 5)\n\nRescale in log-scalse: maximum will correspond to 1, ``10^{-\u03b1}`` will correspond to 0.\n\"\"\"\nfunction logrescale(array, \u03b1 = 5)\n amin, amax = extrema(array)\n threshold = amax * 10. ^(-\u03b1)\n small = array .< threshold\n ret = copy(array)\n ret[small] .= threshold\n return rescale(log10.(ret))\nend\n\n\"Wrap Phase\" phwrap(x::Float64) = isnan(x) ? NaN : rem2pi(x, RoundNearest)\n# phwrap(::Val{NaN}) = NaN\n\nfunction rotationmatrix(\u03b1)\n [cos(\u03b1) -sin(\u03b1); sin(\u03b1) cos(\u03b1)]\nend\n\n\n\"\"\"\n Generate quadratic array\n\"\"\"\nfunction quadratic(arrsize::Tuple)\n middle = (1 .+ arrsize).\/2\n return reshape(1-middle[1]:arrsize[1]-middle[1], (:,1)).^2 .+ reshape(1-middle[2]:arrsize[2]-middle[2], (1,:)).^2;\nend\n\nquadratic(arrsize::Real) = quadratic((arrsize, arrsize))\n\n\"\"\"\n Create array of zeroes with disk of ones of the relative diameter r\n\"\"\"\nfunction diskmatrix(gridsize::Integer, r=1)\n x = range(-1, 1, length=gridsize)\n y = range(-1, 1, length=gridsize)\n \u03b4 = 0. # tuning of the aperture size\n rd =r + \u03b4 \/gridsize \n ap = [ (xc^2 + yc^2) <= r^2 ? 1 : 0 for yc \u2208 y, xc \u2208 x]\n # area = +(ap[:]...)\n phmask = [ (xc^2 + yc^2) <= r^2 ? 1 : NaN for yc \u2208 y, xc \u2208 x]\n return(ap, phmask)\nend\n\n\n\"\"\"\n aperture(xrange, yrange, d, o=0)\n aperture(dom::CartesianDomain2D, d, o)\n\nCreate circular aperture in array `xrange \u00d7 yrange` with diameter `d` and center at `o`.\n\"\"\"\n\n\nfunction aperture(xrange::AbstractRange, yrange::AbstractRange, d, o=(0,0))\n ap = [ ((xc-o[1])^2 + (yc-o[2])^2) <= d^2 \/4 ? 1. : 0. for yc \u2208 yrange, xc \u2208 xrange]\n # area = +(ap[:]...)\n phmask = [ ((xc-o[1])^2 + (yc-o[2])^2) <= d^2 \/4 ? 1. : NaN for yc \u2208 yrange, xc \u2208 xrange]\n return(ap, phmask)\nend\n\naperture(dom::CartesianDomain2D, d, o=(0,0)) = aperture(dom.xrange, dom.yrange, d,o)\n\n\n\"\"\"\nsubdivide_sum(arr,Q)\n\nDivide `arr`ay in quadratic cells of size `Q \u00d7 Q` and sum the elements with the same indexes in each cell.\n\"\"\"\nfunction subdivide_sum(arr,Q)\n if Q == 1\n return arr\n else\n m,n = size(arr) .\u00f7 Q\n ret = zeros(eltype(arr), m,n)\n\n for q in 0:Q^2-1\n a = q%Q\n b = q\u00f7Q\n ret += @view arr[(m*a + 1) : m*(a+1) , (n*b + 1) : n*(b+1)]\n end\n\n return ret\n end\nend \n\n\"\"\"\n tile(arr, Q)\n\nDivide `arr`ay in quadratic cells of size `Q \u00d7 Q` and stack them along the third dimension.\n\"\"\"\nfunction tile(arr, Q :: Integer) \n m,n = size(arr) .\u00f7 Q\n B = reshape(arr, (Q,m,Q,n))\n C = PermutedDimsArray(B,[1,3,2,4])\n D = reshape(C, (Q,Q,:))\n \nend\n\nfunction tile(arr, Q :: Tuple) \n m,n = size(arr) .\u00f7 Q # TODO rewrite for any dimension\n B = reshape(arr, (Q[1],m,Q[2],n))\n C = PermutedDimsArray(B,[1,3,2,4])\n D = reshape(C, (Q[1],Q[2],:))\n \nend\n\n\n\"\"\"\n subdivide(arr, Q)\n\nSubdivide array of dimension MQ x NQ in Q^2 stacked tiles of size M x N.\n\"\"\"\nfunction subdivide(arr, Q)\n Q1 = size(arr) .\u00f7 Q \n return tile(arr, Q1) \nend\n\nfunction upscaleFactor(s, f, apertureD, \u03bb )\n q = apertureD* s \/(f *\u03bb)\n upscale = ceil(Int,q)\nend\n\nfunction upscaleFactor(ims::ImagingSensor, \u03bb )\n upscaleFactor(ims.cam.pixelsize, ims.lens.focallength, ims.lens.aperture, \u03bb )\nend\n\nmake_centered_domain2D(ims::ImagingSensor) = make_centered_domain2D(ims.cam.imagesize..., ims.cam.pixelsize)\n\n\n\n\n\"\"\"\nappsftoPR(ap,psfimage) constructs a phase retrieval problem from two real arrays, representing the pupil and the focal planes intensity\ndistributions. \n\nThe aperture and PSF are assumed to be centred in the array.\n\nExamples\n====\n```jldoctest\n\njulia> ap,_ = PhaseRetrieval.aperture(-1:.2:1,-1:.2:1,.8)\n([0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 1.0 1.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 1.0 1.0 1.0 1.0 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 1.0 1.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0], [NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN; NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN; NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN; NaN NaN NaN NaN NaN 1.0 NaN NaN NaN NaN NaN; NaN NaN NaN NaN 1.0 1.0 1.0 NaN NaN NaN NaN; NaN NaN NaN 1.0 1.0 1.0 1.0 1.0 NaN NaN NaN; NaN NaN NaN NaN 1.0 1.0 1.0 NaN NaN NaN NaN; NaN NaN NaN NaN NaN 1.0 NaN NaN NaN NaN NaN; NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN; NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN; NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN])\n\njulia> psfimage = psf(ap)\n[17.720305330423212 17.720305330423212 2.830830026003772 0.7990467619253477 1.7153703234534299 0.002318499057199975 0.8566413660014238 0.0023184990571999695 1.7153703234534299 0.7990467619253475 2.830830026003772; 17.720305330423212 17.720305330423212 2.830830026003772 0.7990467619253475 1.7153703234534299 0.0023184990571999695 0.8566413660014238 0.002318499057199975 1.7153703234534299 0.7990467619253477 2.830830026003772; 2.830830026003773 2.830830026003773 0.2240431494891658 5.881503709461158 4.671643508435733 0.0810140527710051 0.6181197482998197 0.0810140527710051 4.671643508435733 5.881503709461155 0.2240431494891658; 0.7990467619253475 0.7990467619253477 5.881503709461158 11.063720826850961 3.6825070656623606 0.6902785321094302 4.96008586865757 0.6902785321094302 3.6825070656623606 11.063720826850961 5.881503709461158; 1.7153703234534294 1.7153703234534299 4.671643508435735 3.6825070656623615 0.5365498748309357 19.64548752112056 38.22662768629444 19.64548752112056 0.5365498748309356 3.682507065662362 4.671643508435733; 0.00231849905719993 0.002318499057199919 0.08101405277100532 0.6902785321094301 19.64548752112056 78.45538081840574 118.33852533074673 78.45538081840574 19.64548752112056 0.6902785321094301 0.08101405277100517; 0.8566413660014239 0.8566413660014239 0.6181197482998197 4.96008586865757 38.22662768629444 118.33852533074673 169.0 118.33852533074673 38.22662768629444 4.96008586865757 0.6181197482998197; 0.002318499057199919 0.00231849905719993 0.08101405277100517 0.6902785321094301 19.64548752112056 78.45538081840574 118.33852533074673 78.45538081840574 19.64548752112056 0.6902785321094301 0.08101405277100532; 1.7153703234534299 1.7153703234534294 4.671643508435733 3.682507065662362 0.5365498748309356 19.64548752112056 38.22662768629444 19.64548752112056 0.5365498748309357 3.6825070656623615 4.671643508435735; 0.7990467619253477 0.7990467619253475 5.881503709461158 11.063720826850961 3.6825070656623606 0.6902785321094302 4.96008586865757 0.6902785321094302 3.6825070656623606 11.063720826850961 5.881503709461158; 2.830830026003773 2.830830026003773 0.2240431494891658 5.881503709461155 4.671643508435733 0.0810140527710051 0.6181197482998197 0.0810140527710051 4.671643508435733 5.881503709461158 0.2240431494891658]\n\njulia> pr = appsftoPR(ap,psfimage);\nAlternatingProjections.TwoSetsFP(AlternatingProjections.ConstrainedByAmplitude{Float64, 2}([1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0; 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 1.0]), AlternatingProjections.FourierTransformedSet{AlternatingProjections.ConstrainedByAmplitude{Float64, 2}, FFTW.cFFTWPlan{ComplexF64, -1, false, 2, UnitRange{Int64}}, AbstractFFTs.ScaledPlan{ComplexF64, FFTW.cFFTWPlan{ComplexF64, 1, true, 2, UnitRange{Int64}}, Float64}}(AlternatingProjections.ConstrainedByAmplitude{Float64, 2}([13.0 10.878351222990862 6.182768610120748 2.2271250231312947 0.7862059197817196 0.9255492239753778 0.9255492239753778 0.7862059197817196 2.2271250231312947 6.182768610120748 10.878351222990862; 10.878351222990862 8.857504209336044 4.43232303889513 0.830830026003773 0.28462967654657045 0.048150794979936934 0.04815079497993703 0.2846296765465702 0.830830026003773 4.43232303889513 8.857504209336044; 6.182768610120748 4.43232303889513 0.7324956483358354 1.9189859472289947 2.1613985075491597 1.3097214678905702 1.30972146789057 2.1613985075491593 1.9189859472289947 0.7324956483358352 4.43232303889513; 2.2271250231312947 0.8308300260037731 1.9189859472289943 3.3262171947801247 2.425181170440913 0.893894155884995 0.8938941558849948 2.425181170440913 3.3262171947801247 1.9189859472289943 0.8308300260037731; 0.7862059197817196 0.28462967654657007 2.1613985075491593 2.425181170440913 0.4733319654208511 1.6825070656623624 1.6825070656623624 0.4733319654208511 2.4251811704409127 2.1613985075491593 0.28462967654657007; 0.9255492239753776 0.04815079497993745 1.3097214678905702 0.8938941558849948 1.6825070656623622 4.209549302529098 4.209549302529098 1.6825070656623622 0.893894155884995 1.3097214678905702 0.04815079497993751; 0.9255492239753776 0.04815079497993751 1.3097214678905702 0.893894155884995 1.6825070656623622 4.209549302529098 4.209549302529098 1.6825070656623622 0.8938941558849948 1.3097214678905702 0.04815079497993745; 0.7862059197817196 0.28462967654657007 2.1613985075491593 2.4251811704409127 0.4733319654208511 1.6825070656623624 1.6825070656623624 0.4733319654208511 2.425181170440913 2.1613985075491593 0.28462967654657007; 2.2271250231312947 0.8308300260037731 1.9189859472289943 3.3262171947801247 2.425181170440913 0.8938941558849948 0.893894155884995 2.425181170440913 3.3262171947801247 1.9189859472289943 0.8308300260037731; 6.182768610120748 4.43232303889513 0.7324956483358352 1.9189859472289947 2.1613985075491593 1.30972146789057 1.3097214678905702 2.1613985075491597 1.9189859472289947 0.7324956483358354 4.43232303889513; 10.878351222990862 8.857504209336044 4.43232303889513 0.830830026003773 0.2846296765465702 0.04815079497993703 0.048150794979936934 0.28462967654657045 0.830830026003773 4.43232303889513 8.857504209336044]), FFTW forward plan for 11\u00d711 array of ComplexF64\n(dft-rank>=2\/1\n (dft-direct-11-x11 \"n1fv_11_avx2_128\")\n (dft-direct-11-x11 \"n1fv_11_avx2_128\")), 0.008264462809917356 * FFTW in-place backward plan for 11\u00d711 array of ComplexF64\n(dft-rank>=2\/1\n (dft-direct-11-x11 \"n1bv_11_avx2_128\")\n (dft-direct-11-x11 \"n1bv_11_avx2_128\"))))\n\n```\n\n\"\"\"\nfunction appsftoPR(ap,psfimage)\n size(ap) == size(psfimage) || error(\"Array sizes do not match\")\n a = sqrt.(fftshift(ap))\n\n A = sqrt.(fftshift(psfimage))\n # normalise A to ap independent from FFT definition\n A = A .\/ sqrt(sum(abs2,A)) .* sqrt(sum(abs2,fft(a)))\n\n return PRproblem(a, A)\n\nend\n\nfunction removepiston(\u03d5)\n return \u03d5 .- mean(phwrap.(filter(!isnan,\u03d5)))\nend\n\nfunction twinphase(\u03d5)\n shifts = map(x->1-mod(x,2), size(\u03d5))\n return circshift(reverse(-\u03d5),shifts)\nend","avg_line_length":52.2060085837,"max_line_length":3231,"alphanum_fraction":0.7190891154} {"size":843,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"\"\"\"\n density(sp::AbstractSparseMatrix)\n\nCompute the density of `sp`, the fraction of elements that\nare non-zero. This assumes that there are no stored values equal to zero.\n\"\"\"\nfunction density(sp::AbstractSparseMatrix)\n return SparseArrays.nnz(sp) \/ length(sp)\nend\n\n\"\"\"\n summarystats(sp::SparseMatrixCSC)\n\nPrint some statistics for `sp`. The line\n\"number of non-zeros\" counts structural non-zeros.\n\"\"\"\nfunction summarystats(sp::AbstractSparseMatrix)\n padding = 18\n for (description, statistic) in (\n (\"size\", size(sp)),\n (\"num. elements\", length(sp)),\n (\"num. non-zeros\", nnz(sp)),\n (\"density\", Printf.@sprintf(\"%.4e\", density(sp))),\n (\"num. stored zeros\", count(iszero, SparseArrays.nonzeros(sp))))\n println(rpad(description, padding), \": \", statistic)\n end\n return nothing\nend\n","avg_line_length":29.0689655172,"max_line_length":73,"alphanum_fraction":0.6642941874} {"size":254,"ext":"jl","lang":"Julia","max_stars_count":51.0,"content":"function count_increases(numbers::Vector{Int}, window::Int)\n count(numbers[1:end-window] .< numbers[1+window:end])\nend\n\nnumbers = [parse(Int, number) for number in readlines()]\nprintln(count_increases(numbers, 1))\nprintln(count_increases(numbers, 3))\n","avg_line_length":31.75,"max_line_length":59,"alphanum_fraction":0.7519685039} {"size":5319,"ext":"jl","lang":"Julia","max_stars_count":6.0,"content":"@testset \"util\" begin\n x = [1, 2, 3]\n X = [1 0 0; 0 1 0; 0 0 1]\n\n @test Dolang._unpack_var(x, 1) == 1\n @test Dolang._unpack_var(x, 2) == 2\n @test Dolang._unpack_var(x, 3) == 3\n\n @test Dolang._unpack_var(X, 1) == [1, 0 ,0]\n @test Dolang._unpack_var(X, 2) == [0, 1 ,0]\n @test Dolang._unpack_var(X, 3) == [0, 0 ,1]\n\n Dolang._assign_var(x, 3, 1)\n Dolang._assign_var(x, 1, 2)\n Dolang._assign_var(x, 2, 3)\n @test x == [3, 1, 2]\n\n Dolang._assign_var(X, [0, 1 ,0], 3)\n Dolang._assign_var(X, [0, 0 ,1], 1)\n Dolang._assign_var(X, [1, 0 ,0], 2)\n @test X == [0 1 0; 0 0 1; 1 0 0]\n\n @test Dolang._output_size(4, x) == (4,)\n @test Dolang._output_size(4, x, x, x, x) == (4,)\n\n @test Dolang._output_size(4, X) == (3, 4)\n @test Dolang._output_size(4, X, x) == (3, 4)\n @test Dolang._output_size(4, x, X) == (3, 4)\n @test Dolang._output_size(4, X, x, X) == (3, 4)\n @test Dolang._output_size(4, x, X, x) == (3, 4)\n\n @test_throws DimensionMismatch Dolang._output_size(4, X, rand(4, 4))\n\n foo = Dolang._allocate_out(Int, 4, x)\n @test isa(foo, Vector{Int})\n @test size(foo) == (4,)\n\n foo = Dolang._allocate_out(Int, 4, x, x, x, x)\n @test isa(foo, Vector{Int})\n @test size(foo) == (4,)\n\n foo = Dolang._allocate_out(Int, 4, X)\n @test isa(foo, Matrix{Int})\n @test size(foo) == (3,4)\n\n foo = Dolang._allocate_out(Int, 4, X, x)\n @test isa(foo, Matrix{Int})\n @test size(foo) == (3,4)\n\n foo = Dolang._allocate_out(Int, 4, x, X)\n @test isa(foo, Matrix{Int})\n @test size(foo) == (3,4)\n\n foo = Dolang._allocate_out(Int, 4, x, X, x)\n @test isa(foo, Matrix{Int})\n @test size(foo) == (3,4)\n\n foo = Dolang._allocate_out(Int, 4, X, x, X)\n @test isa(foo, Matrix{Int})\n @test size(foo) == (3,4)\n\n @testset \"_to_expr\" begin\n @test Dolang._to_expr(\"foo\") == Expr(:block, :foo)\n @test Dolang._to_expr(100) == Expr(:block, 100)\n @test Dolang._to_expr(:bar) == Expr(:block, :bar)\n @test Dolang._to_expr(:(x+y)) == :(x+y)\n end\n\n @testset \"inf_to_Inf\" begin\n x = rand()\n @test Dolang.inf_to_Inf(x) == x\n @test Dolang.inf_to_Inf(Inf) == Inf\n @test Dolang.inf_to_Inf(:inf) == Inf\n @test Dolang.inf_to_Inf(:(-inf)) == :(-$(Inf))\n @test Dolang.inf_to_Inf(:(x-inf)) == :(x-$(Inf))\n end\n\n @testset \"solve_triangular_system\" begin\n # very simple case\n d1 = Dict(:x => 1.0, :y => :(x+1))\n sol1 = OrderedDict{Symbol,Number}(:x => 1.0, :y => 2.0)\n @test Dolang.solve_triangular_system(d1) == sol1\n\n # fully specified numerical dict\n d2 = Dict(:x => 1.0, :y => 2.0)\n sol2 = OrderedDict{Symbol,Number}(:x => 1.0, :y => 2.0)\n @test Dolang.solve_triangular_system(d2) == sol2\n\n # unknown variable w\n d3 = Dict(:x => 1.0, :y => :(x+1), :z=> :(100*w))\n @test_throws ErrorException Dolang.solve_triangular_system(d3)\n\n # non-triangular system\n d4 = Dict(:x => :(y+1), :y => :(x-1))\n @test_throws ErrorException Dolang.solve_triangular_system(d4)\n end\n\n\nend # @testset \"util\"\n\n\n@testset \"triangular systems\" begin\n\n @testset \"solve_dependencies\" begin\n system_ints = Dict{Int,Set{Int}}(\n 1=>Set([2,3,4]),\n 2=>Set([3,4]),\n 3=>Set([4]),\n 4=>Set([])\n )\n sol = Dolang.solve_dependencies(system_ints)\n @test sol==[4,3,2,1]\n\n system_syms = Dict{Any,Set{Any}}(\n :d=>Set([(:a,0),:b,:c]),\n :c=>Set([(:a,0),:b]),\n :b=>Set([(:a,0)]),\n (:a,0)=>Set([])\n )\n sol = Dolang.solve_dependencies(system_syms)\n @test sol==[(:a,0),:b,:c,:d]\n\n system_error = Dict{Any,Set{Any}}(\n :d=>Set([(:a,0),:b,:c]),\n :c=>Set([(:a,0),:b]),\n :b=>Set([(:a,0)]),\n (:a,0)=>Set([:d])\n )\n @test_throws Dolang.TriangularSystemException Dolang.solve_dependencies(system_error)\n end\n\n @testset \"Reorder triangular block\" begin\n eqs = Dict(\n (:k,0)=>:((1-delta)*k(-1)+i(-1)),\n (:y,0)=>:(A(0)*k(-1)^theta),\n (:A,0)=>:(rho*A(-1)+epsilon),\n )\n eqs_reordered = OrderedDict(\n (:A, 0) => :(rho * A(-1) + epsilon),\n (:y, 0) => :(A(0) * k(-1) ^ theta),\n (:k, 0) => :((1 - delta) * k(-1) + i(-1))\n )\n @test eqs_reordered == Dolang.reorder_triangular_block(eqs)\n end\n\n @testset \"Solve definitions\" begin\n\n defs = Dict(\n (:rho, 0) => :(c[t] \/ c[t-1]),\n (:V, 0) => :(c[t] ^ (1 - gamma) \/ (1 - gamma)),\n (:c, 0) => :(y[t] ^ theta - i[t]),\n )\n solved_defs = OrderedDict(\n (:V, 0) => :(c[t] ^ (1 - gamma) \/ (1 - gamma)),\n (:c, 0) => :(y[t] ^ theta - i[t]),\n (:c, -1) => :(y[t-1] ^ theta - i[t-1]),\n (:rho, 0) => :(c[t] \/ c[t-1])\n )\n @test solved_defs == Dolang.solve_definitions(defs)\n\n solved_defs_reduced = OrderedDict(\n (:c, 1) => :(y[t+1] ^ theta - i[t+1]),\n (:c, 0) => :(y[t] ^ theta - i[t]),\n (:rho, 1) => :(c[t+1] \/ c[t])\n )\n\n @test solved_defs_reduced == Dolang.solve_definitions(defs, [(:rho,1)])\n\n end\nend\n","avg_line_length":31.1052631579,"max_line_length":93,"alphanum_fraction":0.4920097763} {"size":3141,"ext":"jl","lang":"Julia","max_stars_count":307.0,"content":"include(joinpath(@__DIR__, \"testcommon.jl\"))\n\n# Wrap the functions defined in C++\nmodule ParametricTypes\n\nusing CxxWrap\n@wrapmodule(CxxWrap.CxxWrapCore.libparametric())\n\nfunction __init__()\n @initcxx\nend\n\n@cxxdereference dereftemplate1(x::TemplateType) = get_first(x)\n\nend\n\nimport .ParametricTypes.TemplateType, .ParametricTypes.NonTypeParam\n\np1 = TemplateType{ParametricTypes.P1, ParametricTypes.P2}()\np2 = TemplateType{ParametricTypes.P2, ParametricTypes.P1}()\n\nprintln(\"Dumping object p1:\")\ndump(p1)\n\n@testset \"$(basename(@__FILE__)[1:end-3])\" begin\n\n@test ParametricTypes.get_first(p1) == 1\n@test ParametricTypes.get_second(p2) == 1\n@test typeof(ParametricTypes.get_first(p1)) == Int32\n@test typeof(ParametricTypes.get_second(p2)) == Int32\n\n@test ParametricTypes.dereftemplate1(p1) == 1\n@test ParametricTypes.dereftemplate1(CxxRef(p1)) == 1\n\n@test ParametricTypes.get_first(CxxRef(p1)) == 1\n@test ParametricTypes.get_first(CxxRef(p1)[]) == 1\n@test length(typeof(CxxRef(p1)[]).parameters) == 2\n\n@test ParametricTypes.get_first(p2) == 10.\n@test ParametricTypes.get_second(p1) == 10.\n@test typeof(ParametricTypes.get_first(p2)) == Float64\n@test typeof(ParametricTypes.get_second(p1)) == Float64\n\n@test ParametricTypes.TemplateDefaultType{ParametricTypes.P1}() != nothing\n\nnontype1 = ParametricTypes.NonTypeParam{Int32, Int32(1)}()\n@test ParametricTypes.get_nontype(nontype1) == 1\n\nnontype2 = ParametricTypes.NonTypeParam{UInt32, UInt32(2)}()\n@test ParametricTypes.get_nontype(nontype2) == UInt32(2)\n\nnontype3 = ParametricTypes.NonTypeParam{Int32, Int32(1)}(3)\n@test ParametricTypes.get_nontype(nontype3) == 3\n\nnontype4 = ParametricTypes.NonTypeParam{CxxLong, CxxLong(64)}()\n@test ParametricTypes.get_nontype(nontype4) == CxxLong(64)\n\nconcr = ParametricTypes.ConcreteTemplate{Float64}()\n@test isa(concr, ParametricTypes.AbstractTemplate{Float64})\n@test isa(concr, ParametricTypes.AbstractTemplate)\n@test isa(concr, ParametricTypes.ConcreteTemplate)\nabst = ParametricTypes.to_base(CxxPtr(concr))\n@test isa(abst[], ParametricTypes.AbstractTemplate{Float64})\n@test isa(abst[], ParametricTypes.AbstractTemplate)\n\nf3 = ParametricTypes.Foo3{Int32, CxxWrap.CxxBool, Float32}()\n@test length(methods(ParametricTypes.foo3_method)) == 6\nf2 = ParametricTypes.Foo2{Float64}()\n@test length(methods(ParametricTypes.foo2_method)) == 2\n\n@test length(methods(ParametricTypes.foo3_free_method)) == 6\nParametricTypes.foo3_free_method(f3)\n\n@test supertype(ParametricTypes.Foo3{Float64,ParametricTypes.P1,Float32}) == ParametricTypes.AbstractTemplate{Float64}\n\ndarr = [1.0, 2.0, 3.0]\ncarr = Complex{Float32}[1+2im, 3+4im]\nvec1 = ParametricTypes.CppVector{Float64}(pointer(darr), 3)\nvec2 = ParametricTypes.CppVector2{Float64, Float32}()\nvec3 = ParametricTypes.CppVector{Complex{Float32}}(pointer(carr), 2)\n@test isa(vec1, AbstractVector{Float64})\n@test isa(vec2, AbstractVector{Float64})\n@test isa(vec3, AbstractVector{Complex{Float32}})\n@test ParametricTypes.get(vec1,0)[] == 1.0\n@test ParametricTypes.get(vec1,1)[] == 2.0\n@test ParametricTypes.get(vec1,2)[] == 3.0\n@test ParametricTypes.get(vec3,0)[] == 1+2im\n@test ParametricTypes.get(vec3,1)[] == 3+4im\n\nend","avg_line_length":34.9,"max_line_length":118,"alphanum_fraction":0.7730022286} {"size":13822,"ext":"jl","lang":"Julia","max_stars_count":30.0,"content":"using Compat\nusing Distances\nusing Ripserer\nusing SparseArrays\nusing Random\nusing StaticArrays\nusing Suppressor\nusing Test\n\nusing Ripserer:\n distances, births, adjacency_matrix, edges, nv, unsafe_simplex, ChainElement, Chain\n\ninclude(\"..\/testdatasets.jl\")\ninclude(\"interfacetest.jl\")\n\n@testset \"distances\" begin\n for points in (\n [(0, 0), (0, 1), (1, 1), (1, 0)],\n [SVector(0, 0), SVector(0, 1), SVector(1, 1), SVector(1, 0)],\n )\n @test distances(points) \u2248 [0 1 \u221a2 1; 1 0 1 \u221a2; \u221a2 1 0 1; 1 \u221a2 1 0]\n @test distances(points, Cityblock()) == [0 1 2 1; 1 0 1 2; 2 1 0 1; 1 2 1 0]\n end\nend\n\n@testset \"Rips points constructor, sparse=false\" begin\n filtration = Rips([(sin(x), cos(x)) for x in range(0, 2\u03c0; length=101)[1:(end - 1)]])\n adj = adjacency_matrix(filtration)\n @test all(x -> x > 0, adj[i, j] for i in 1:100 for j in (i + 1):100)\n @test eltype(edges(filtration)) === Simplex{1,Float64,Int}\n\n filtration = Rips{Int32}([\n (sin(x), cos(x)) for x in range(0.0f0, 2.0f0\u03c0; length=101)[1:(end - 1)]\n ])\n adj = adjacency_matrix(filtration)\n @test !issparse(adj)\n @test all(x -> x > 0, adj[i, j] for i in 1:100 for j in (i + 1):100)\n @test eltype(edges(filtration)) === Simplex{1,Float32,Int32}\nend\n\n@testset \"Rips points constructor, sparse=true\" begin\n filtration = Rips(\n [(sin(x), cos(x)) for x in range(0, 2\u03c0; length=101)[1:(end - 1)]];\n threshold=0.1,\n sparse=true,\n )\n adj = adjacency_matrix(filtration)\n @test issparse(adj)\n @test maximum(adj) \u2264 0.1\n @test maximum(adj) \u2264 threshold(filtration)\nend\n\n@testset \"Construction does not alter input\" begin\n dist = [\n 0 9 1 2\n 9 0 3 4\n 1 3 0 4\n 2 4 4 0\n ]\n orig_dist = copy(dist)\n Rips(dist; threshold=1)\n @test dist == orig_dist\n Rips(dist; threshold=1, sparse=true)\n @test dist == orig_dist\n\n dist = sparse(dist)\n orig_dist = copy(dist)\n Rips(dist; threshold=1)\n @test dist == orig_dist\nend\n\n@testset \"Warns with duplicate points\" begin\n pts = [(1, 0), (1, 1), (0, 1), (0, 0), (0, 0)]\n @test @capture_err(Rips(pts)) \u2260 \"\"\n @test @capture_err(Rips(pts[1:4])) == \"\"\n @test nv(@suppress Rips(pts)) == 4\nend\n\n@testset \"Errors\" begin\n @testset \"Non-square matrices throw an error\" begin\n @test_throws DimensionMismatch Rips(zeros(3, 2))\n @test_throws DimensionMismatch Rips(zeros(3, 2); sparse=true)\n end\n @testset \"Asymmetric matrices throw an error\" begin\n @test_throws ArgumentError Rips([1 1 1; 1 1 1; 1 2 1])\n @test_throws ArgumentError Rips([1 1 1; 2 1 1; 1 2 1]; sparse=true)\n end\n @testset \"Edge births must be larger than vertex births\" begin\n @test_throws ArgumentError Rips([1 1 1; 1 1 1; 1 1 2])\n @test_throws ArgumentError Rips([1 1 1; 1 2 1; 1 1 1]; sparse=true)\n end\nend\n\n@testset \"ripserer\" begin\n @testset \"Dense\" begin\n @testset \"Icosahedron\" begin\n d0, d1, d2 = ripserer(icosahedron; dim_max=2)\n @test d0 == [fill((0.0, 1.0), 11); (0.0, Inf)]\n @test d1 == []\n @test d2 == [(1.0, 2.0)]\n end\n @testset \"Cycle with various fields\" begin\n d0_2, d1_2, d2_2, d3_2 = ripserer(Rips{Int32}, cycle; dim_max=3)\n d0_7, d1_7, d2_7, d3_7 = ripserer(Rips(cycle); dim_max=3, modulus=7)\n d0_r, d1_r, d2_r, d3_r = ripserer(cycle; dim_max=3, field=Rational{Int})\n\n @test d0_2 == d0_7 == d0_r == [fill((0, 1), size(cycle, 1) - 1); (0, Inf)]\n @test d1_2 == d1_7 == d1_r == [(1, 6)]\n @test d2_2 == d2_7 == d2_r == fill((6, 7), 5)\n @test d3_2 == d3_7 == d3_r == [(7, 8)]\n end\n @testset \"RP2 with various fields\" begin\n _, d1_2, d2_2 = ripserer(Rips(projective_plane); dim_max=2)\n _, d1_3, d2_3 = ripserer(Rips, projective_plane; dim_max=2, modulus=3)\n _, d1_331, d2_331 = ripserer(projective_plane; dim_max=2, field=Mod{5})\n _, d1_r, d2_r = ripserer(projective_plane; dim_max=2, field=Rational{Int})\n @test d1_2 == [(1, 2)]\n @test d2_2 == [(1, 2)]\n @test d1_3 == d1_331 == d1_r == []\n @test d2_3 == d2_331 == d1_r == []\n end\n @testset \"Icosahedron, threshold=1\" begin\n d0, d1, d2 = ripserer(Rips(icosahedron; threshold=1); dim_max=2)\n @test d0 == [fill((0.0, 1.0), 11); (0.0, Inf)]\n @test d1 == []\n @test d2 == [(1.0, Inf)]\n end\n @testset \"Icosahedron, threshold=0.5\" begin\n d0, d1, d2 = ripserer(icosahedron; dim_max=2, threshold=0.5)\n @test d0 == fill((0.0, Inf), 12)\n @test d1 == []\n @test d2 == []\n end\n @testset \"RP2 with various fields, threshold=1\" begin\n _, d1_2, d2_2 = ripserer(Rips, projective_plane; dim_max=2, threshold=1)\n _, d1_3, d2_3 = ripserer(projective_plane; dim_max=2, modulus=3, threshold=1)\n _, d1_331, d2_331 = ripserer(\n projective_plane; dim_max=2, field=Mod{5}, threshold=1\n )\n _, d1_r, d2_r = ripserer(\n projective_plane; dim_max=2, field=Rational{Int}, threshold=1\n )\n @test d1_2 == [(1, Inf)]\n @test d2_2 == [(1, Inf)]\n @test d1_3 == d1_331 == d1_r == []\n @test d2_3 == d2_331 == d1_r == []\n end\n @testset \"Points as input\" begin\n for metric in (Euclidean(), Cityblock())\n pts = torus_points(9)\n @test ripserer(pts; metric=metric) == ripserer(Rips(pts; metric=metric))\n end\n end\n @testset \"Cutoff\" begin\n d0, d1 = ripserer(rand_dist_matrix(20); cutoff=0.5)\n @test all(persistence.(d0) .> 0.5)\n @test all(persistence.(d1) .> 0.5)\n end\n end\n\n @testset \"Sparse\" begin\n @testset \"Icosahedron\" begin\n d0, d1, d2 = ripserer(sparse(icosahedron); dim_max=2)\n @test d0 == [fill((0.0, 1.0), 11); (0.0, Inf)]\n @test d1 == []\n @test d2 == [(1.0, 2.0)]\n end\n @testset \"RP2 with various fields, threshold=1\" begin\n _, d1_2, d2_2 = ripserer(sparse(projective_plane); dim_max=2, threshold=1)\n _, d1_3, d2_3 = ripserer(\n Rips(projective_plane; threshold=1, sparse=true); dim_max=2, modulus=3\n )\n _, d1_331, d2_331 = ripserer(\n Rips, sparse(projective_plane); dim_max=2, field=Mod{5}, threshold=1\n )\n _, d1_r, d2_r = ripserer(\n Rips,\n projective_plane;\n sparse=true,\n dim_max=2,\n field=Rational{Int},\n threshold=1,\n )\n @test d1_2 == [(1, Inf)]\n @test d2_2 == [(1, Inf)]\n @test d1_3 == d1_331 == d1_r == []\n @test d2_3 == d2_331 == d1_r == []\n end\n @testset \"Equal to Rips\" begin\n for thresh in (nothing, 1, 0.5, 0.126)\n data = torus_points(100)\n r_res = ripserer(data; threshold=thresh, dim_max=2)\n s_res_1 = ripserer(Rips, data; threshold=thresh, sparse=true, dim_max=2)\n\n # Add zeros to diagonal. Adding ones first actually changes the structure of\n # the matrix.\n data2 = sparse(Ripserer.distances(data))\n for i in axes(data2, 1)\n data2[i, i] = 1\n data2[i, i] = 0\n end\n s_res_2 = ripserer(data2; threshold=thresh, dim_max=2)\n\n @test r_res == s_res_1 == s_res_2\n end\n end\n end\n\n @testset \"Representatives\" begin\n @testset \"Known example\" begin\n # This example was generated by getting representatives from ripser.\n _, d1, d2 = ripserer(projective_plane; dim_max=2, reps=true)\n\n @test simplex.(representative(only(d1))) == [\n Simplex{1}((11, 10), 1),\n Simplex{1}((10, 7), 1),\n Simplex{1}((10, 6), 1),\n Simplex{1}((8, 1), 1),\n Simplex{1}((7, 3), 1),\n Simplex{1}((7, 1), 1),\n Simplex{1}((6, 2), 1),\n Simplex{1}((5, 1), 1),\n Simplex{1}((2, 1), 1),\n ]\n @test coefficient.(representative(only(d1))) == fill(Mod{2}(1), 9)\n @test simplex.(representative(only(d2))) == [Simplex{2}((6, 2, 1), 1)]\n @test coefficient.(representative(only(d2))) == [Mod{2}(1)]\n end\n @testset \"Types\" begin\n d0, d1, d2, d3 = ripserer(cycle; dim_max=3, reps=true)\n @test d1[1].representative isa Chain\n\n d0, d1, d2, d3 = ripserer(cycle; dim_max=3, reps=true, field=Rational{Int})\n @test d3[1].representative isa Chain\n end\n @testset \"Infinite interval very low threshold\" begin\n _, d1 = ripserer(cycle; dim_max=1, reps=true, threshold=1, field=Rational{Int})\n rep = representative(only(d1))\n @test simplex(only(rep)) == birth_simplex(only(d1))\n @test rep isa Chain{Rational{Int},Simplex{1,Int,Int}}\n end\n @testset \"Infinite interval higher threshold\" begin\n _, d1 = ripserer(cycle; dim_max=1, reps=true, threshold=3, field=Rational{Int})\n rep = representative(only(d1))\n @test !isempty(rep)\n @test rep isa Chain{Rational{Int},Simplex{1,Int,Int}}\n end\n @testset \"Critical simplices\" begin\n result = ripserer(torus(100); reps=true, threshold=0.5)\n for diag in result\n @test birth.(diag) == birth.(birth_simplex.(diag))\n finite = filter(isfinite, diag)\n @test death.(finite) == birth.(death_simplex.(finite))\n infinite = filter(!isfinite, diag)\n @test all(isnothing, death_simplex.(infinite))\n end\n end\n end\n\n @testset \"Diagram metadata\" begin\n filtration = Rips(cycle)\n d0, d1, d2, d3 = ripserer(filtration; dim_max=3, reps=true, field=Rational{Int})\n @test d0.dim == 0\n @test d1.dim == 1\n @test d2.dim == 2\n @test d3.dim == 3\n thresh = Float64(threshold(Rips(cycle)))\n @test d0.threshold \u2261 thresh\n @test d1.threshold \u2261 thresh\n @test d2.threshold \u2261 thresh\n @test d3.threshold \u2261 thresh\n field = Rational{Int}\n @test d0.field \u2261 field\n @test d1.field \u2261 field\n @test d2.field \u2261 field\n @test d3.field \u2261 field\n @test d0.filtration == filtration\n @test d1.filtration == filtration\n @test d2.filtration == filtration\n @test d3.filtration == filtration\n end\n\n @testset \"Zero-dimensional sublevel set persistence\" begin\n @testset \"with sparse matrix\" begin\n data = [1, 0, 1, 2, 3, 4, 3, 2, 3, 2, 1, 2]\n\n # Create distance matrix from data, where neighboring points are connected by edges\n # and the edge weights are equal to the max of both vertex births.\n n = length(data)\n dists = spzeros(n, n)\n for i in 1:n\n dists[i, i] = data[i]\n end\n for i in 1:(n - 1)\n j = i + 1\n dists[i, j] = dists[j, i] = max(dists[i, i], dists[j, j])\n end\n # 0-dimensional persistence should find values of minima and maxima of our data.\n d0 = ripserer(dists; dim_max=0)[1]\n @test d0 == [(2, 3), (1, 4), (0, Inf)]\n end\n\n @testset \"with Rips\" begin\n data = [1, 0, 1, 2, 3, 4, 3, 2, 3, 2, 1, 2]\n\n n = length(data)\n dists = zeros(Int, (n, n))\n for i in 1:n, j in 1:n\n if abs(i - j) \u2264 1\n dists[i, j] = max(data[i], data[j])\n else\n dists[i, j] = 5\n end\n end\n d0 = ripserer(dists; dim_max=0)[1]\n @test d0 == [(2, 3), (1, 4), (0, Inf)]\n end\n end\n\n @testset \"Homology and explicit cohomology\" begin\n @testset \"Representative cycle\" begin\n res_hom = ripserer(cycle; alg=:homology, reps=true, dim_max=3)\n @test vertices.(simplex.(representative(res_hom[2][1]))) ==\n sort!(vcat([(i + 1, i) for i in 1:17], [(18, 1)]))\n end\n @testset \"Infinite intervals\" begin\n @test_broken ripserer(\n Rips(cycle; threshold=2); alg=:homology, implicit=true\n )[2][1] == (1.0, Inf)\n @test_broken ripserer(\n Rips, cycle; alg=:homology, threshold=2, implicit=false\n )[2][1] == (1.0, Inf)\n @test ripserer(cycle; alg=:involuted, threshold=2)[2][1] == (1.0, Inf)\n end\n end\n\n @testset \"Interface\" begin\n test_filtration(Rips, cycle; modulus=3, dim_max=2)\n end\n\n @testset \"Errors\" begin\n @testset \"Overflow checking\" begin\n @test_throws OverflowError ripserer(Rips{Int16}(ones(1000, 1000)))\n end\n @testset \"Unsupported algirithms\" begin\n @test_throws ArgumentError ripserer(ones(5, 5); alg=:something)\n end\n @testset \"Int or Float64 field type\" begin\n @test_throws ErrorException ripserer(ones(5, 5); field=Int)\n @test_throws ErrorException ripserer(ones(5, 5); field=Float64)\n @test_throws ErrorException ripserer(ones(5, 5); field=UInt8)\n end\n @testset \"Explicit cohomology reperesentatives unsupported\" begin\n @test_throws ErrorException ripserer(cycle; implicit=false, reps=true)\n end\n end\nend\n","avg_line_length":38.6089385475,"max_line_length":95,"alphanum_fraction":0.5348719433} {"size":691,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"# Autogenerated wrapper script for libblastrampoline_jll for aarch64-linux-gnu\nexport libblastrampoline\n\nJLLWrappers.@generate_wrapper_header(\"libblastrampoline\")\nJLLWrappers.@declare_library_product(libblastrampoline, \"libblastrampoline.so\")\nfunction __init__()\n JLLWrappers.@generate_init_header()\n JLLWrappers.@init_library_product(\n libblastrampoline,\n \"lib\/libblastrampoline.so\",\n RTLD_LAZY | RTLD_DEEPBIND,\n )\n\n JLLWrappers.@generate_init_footer()\n @static if VERSION < v\"1.7.0-DEV.641\"\n ccall((:lbt_forward, libblastrampoline), Int32, (Cstring, Int32, Int32),\n Libdl.dlpath(Base.libblas_name) , 1, 0)\n end\n\nend # __init__()\n","avg_line_length":32.9047619048,"max_line_length":80,"alphanum_fraction":0.7308248915} {"size":574,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"{\"score\": 7.73, \"score_count\": 38085, \"timestamp\": 1569423377.0}\n{\"score\": 7.73, \"score_count\": 37197, \"timestamp\": 1566275460.0}\n{\"score\": 7.72, \"score_count\": 29663, \"timestamp\": 1545149834.0}\n{\"score\": 7.72, \"score_count\": 18918, \"timestamp\": 1522422851.0}\n{\"score\": 7.96, \"score_count\": 641, \"timestamp\": 1496459804.0}\n{\"score\": 7.98, \"score_count\": 514, \"timestamp\": 1495852926.0}\n{\"score\": 8.03, \"score_count\": 327, \"timestamp\": 1495246840.0}\n{\"score\": 8.07, \"score_count\": 152, \"timestamp\": 1494641237.0}\n{\"score\": 7.89, \"score_count\": 59, \"timestamp\": 1494025917.0}\n","avg_line_length":57.4,"max_line_length":64,"alphanum_fraction":0.6707317073} {"size":4773,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"\n#################### Post order traversal ####################\n\"\"\"\n post_order(root::T, traversal::Vector{T})::Vector{T} where T<:AbstractNode\n\nThis function performs a post order traversal through the tree. It is assumed that `root` is the\nroot of the tree. Thus, if `root` is not the root, the subtree defined by the root `root` is\nused for the post order traversal.\n\nReturns vector of Nodes.\n\n* `root` : root Node of tree.\n\n* `traversal` : vector of Nodes; nodes are pushed to this vector as the tree is traversed.\n\"\"\"\nfunction post_order(root::T, traversal::Vector{T}) where T<:AbstractNode\n if root.nchild != 0\n for child in root.children\n post_order(child, traversal)\n end\n end # if\n push!(traversal, root)\n return traversal\nend # function post_order_trav\n\n\n\"\"\"\n post_order(root::T)::Vector{T} where T<:AbstractNode\n\nThis function does post order traversal. Only the root node needs to be supplied.\n\nReturns vector of Nodes.\n\n* `root` : root Node of tree.\n\"\"\"\nfunction post_order(root::T)::Vector{T} where T<:AbstractNode\n t::Vector{T} = []\n post_order(root, t)\n return t\nend # function post_order\n\n\"\"\"\n get_leaves(root::T, traversal::Vector{T})::Vector{T} where T<:AbstractNode\n\nThis function returns leaf nodes of a tree. It is assumed that `root` is the\nroot of the tree. Thus, if `root` is not the root, the subtree defined by the root `root` is\nused.\n\nReturns a vector of leaf Nodes.\n\n* `root` : root Node of tree to traverse.\n\n* `traversal` : vector of Nodes; leaf Nodes, once found, are pushed to this vector.\n\n\"\"\"\nfunction get_leaves(root::T, traversal::Vector{T})::Vector{T} where T<:AbstractNode\n if root.nchild != 0\n for child in root.children\n get_leaves(child, traversal)\n end\n else\n push!(traversal, root)\n end # if\n\n return traversal\nend # function post_order_trav\n\n\n\"\"\"\n get_leaves(root::T)::Vector{T} where T<:AbstractNode\n\nThis function returns the leaves of a tree. Only the root node needs to be supplied.\n\nReturns vector of leaf Nodes.\n\n* `root` : root Node of tree.\n\"\"\"\nfunction get_leaves(root::T)::Vector{T} where T<:AbstractNode\n t::Vector{T} = []\n get_leaves(root, t)\n return t\nend # function post_order\n\n\n\n\n#################### Pre order traversal ####################\n\n\"\"\"\n pre_order(root::T, traversal::Vector{T})::Vector{T} where T<:AbstractNode\n\nThis function performs a pre order traversal through the tree. It is assumed that `root` is the\nroot of the tree. Thus, if `root` is not the root, the subtree defined by the root `root` is\nused for the pre order traversal.\n\nReturns vector of Nodes.\n\n* `root` : root Node of tree.\n\n* `traversal` : vector of Nodes; nodes are pushed to this vector as the tree is traversed.\n\"\"\"\nfunction pre_order(root::T, traversal::Vector{T})::Vector{T} where T<:AbstractNode\n push!(traversal, root)\n if root.nchild != 0\n for child in root.children\n pre_order(child, traversal)\n end\n end # if\n return traversal\nend # function pre_order!\n\n\n\"\"\"\n pre_order(root::T)::Vector{T} where T<:AbstractNode\n\nThis function does pre order traversal. Only the root node needs to be supplied.\n\nReturns vector of Nodes.\n\n* `root` : root Node of tree.\n\"\"\"\nfunction pre_order(root::T)::Vector{T} where T<:AbstractNode\n t::Vector{T} = []\n pre_order(root, t)\n return t\nend # function pre_order\n\n#################### Level order traversal ####################\n\n\"\"\"\n level_order(node::T)::Array{T} where T<:AbstractNode\n\nThis function does level order traversal. Only the root node needs to be supplied.\n\nReturns Array of Nodes.\n\n* `node` : root Node of tree.\n\"\"\"\nfunction level_order(node::T)::Array{T} where T<:AbstractNode\n level = 1\n stack::Array{T} = []\n while level_traverse(node, level, stack)\n level += 1\n end # while\n stack\nend # function level_order\n\n\"\"\"\n level_traverse(node::T, level::Int64, stack::Array{T})::Bool where T <:AbstractNode\n\nThis function traverses a level of the tree specified through `node`. The level\nis specified via the `level` argument and the nodes visited are stored in the\n`stack`.\nThis function is intended as the internal worker for the level_order function.\n\"\"\"\nfunction level_traverse(node::T, level::Int64, stack::Array{T})::Bool where T <:AbstractNode\n\n if level == 1\n # level which needs to be traversed right now\n push!(stack, node)\n return true\n else\n # move down the tree to the correct level\n boolqueue = [false] # this is used to look for the correct level\n for child in node.children\n push!(boolqueue, level_traverse(child, level-1, stack))\n end # for\n return reduce(|, boolqueue)\n end # if\nend # function level_traverse\n","avg_line_length":27.9122807018,"max_line_length":96,"alphanum_fraction":0.6681332495} {"size":8727,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# Trust-region\n# ------------------------------------------------------------\nconst TRConstraint = CI{VectorAffineDecisionFunction{Float64}, MOI.NormInfinityCone}\n@with_kw mutable struct TRData{T <: AbstractFloat}\n Q\u0303::T = 1e10\n \u0394::MOI.VariableIndex = MOI.VariableIndex(0)\n constraint::TRConstraint = TRConstraint(0)\n c\u0394::Int = 0\n incumbent::Int = 1\n major_iterations::Int = 0\n minor_iterations::Int = 0\nend\n\n@with_kw mutable struct TRParameters{T <: AbstractFloat}\n \u03b3::T = 1e-4\n \u0394::T = 1.0\n \u0394\u0305::T = 1000.0\nend\n\n\"\"\"\n TrustRegion\n\nFunctor object for using trust-region regularization in an L-shaped algorithm. Create by supplying a [`TR`](@ref) object through `regularize ` in the `LShapedSolver` factory function and then pass to a `StochasticPrograms.jl` model.\n\n...\n# Parameters\n- `\u03b3::T = 1e-4`: Relative tolerance for deciding if a minor iterate should be accepted as a new major iterate.\n- `\u0394::AbstractFloat = 1.0`: Initial size of \u221e-norm trust-region.\n- `\u0394\u0305::AbstractFloat = 1000.0`: Maximum size of \u221e-norm trust-region.\n...\n\"\"\"\nstruct TrustRegion{T <: AbstractFloat, A <: AbstractVector} <: AbstractRegularization\n data::TRData{T}\n parameters::TRParameters{T}\n\n decisions::Decisions\n projection_targets::Vector{MOI.VariableIndex}\n \u03be::Vector{Decision{T}}\n\n Q\u0303_history::A\n \u0394_history::A\n incumbents::Vector{Int}\n\n function TrustRegion(decisions::Decisions, \u03be\u2080::AbstractVector; kw...)\n T = promote_type(eltype(\u03be\u2080), Float32)\n A = Vector{T}\n \u03be = map(\u03be\u2080) do val\n KnownDecision(val, T)\n end\n return new{T, A}(TRData{T}(),\n TRParameters{T}(; kw...),\n decisions,\n Vector{MOI.VariableIndex}(undef, length(\u03be\u2080)),\n \u03be,\n A(),\n A(),\n Vector{Int}())\n end\nend\n\nfunction initialize_regularization!(lshaped::AbstractLShaped, tr::TrustRegion{T}) where T <: AbstractFloat\n n = length(tr.\u03be) + 1\n # Add projection targets\n add_projection_targets!(tr, lshaped.master)\n # Add trust region\n name = string(:\u0394)\n trust_region = KnownDecision(tr.parameters.\u0394, T)\n set = SingleDecisionSet(1, trust_region, NoSpecifiedConstraint(), false)\n tr.data.\u0394, _ = MOI.add_constrained_variable(lshaped.master, set)\n set_decision!(tr.decisions, tr.data.\u0394, trust_region)\n MOI.set(lshaped.master, MOI.VariableName(), tr.data.\u0394, name)\n x = VectorOfDecisions(all_decisions(tr.decisions))\n \u03be = VectorOfDecisions(tr.projection_targets)\n \u0394 = SingleDecision(tr.data.\u0394)\n # Add trust-region constraint\n f = MOIU.operate(vcat, T, \u0394, x) -\n MOIU.operate(vcat, T, zero(tr.parameters.\u0394), \u03be)\n tr.data.constraint =\n MOI.add_constraint(lshaped.master, f,\n MOI.NormInfinityCone(n))\n return nothing\nend\n\nfunction restore_regularized_master!(lshaped::AbstractLShaped, tr::TrustRegion)\n # Delete trust region constraint\n if !iszero(tr.data.constraint.value)\n MOI.delete(lshaped.master, tr.data.constraint)\n tr.data.constraint = TRConstraint(0)\n end\n # Delete trust region\n if !iszero(tr.data.\u0394.value)\n MOI.delete(lshaped.master, tr.data.\u0394)\n StochasticPrograms.remove_decision!(tr.decisions, tr.data.\u0394)\n tr.data.\u0394 = MOI.VariableIndex(0)\n end\n # Delete projection targets\n for var in tr.projection_targets\n MOI.delete(lshaped.master, var)\n StochasticPrograms.remove_decision!(tr.decisions, var)\n end\n empty!(tr.projection_targets)\n return nothing\nend\n\nfunction filter_variables!(tr::TrustRegion, list::Vector{MOI.VariableIndex})\n # Filter projection targets\n filter!(vi -> !(vi in tr.projection_targets), list)\n # Filter \u0394\n i = something(findfirst(isequal(tr.data.\u0394), list), 0)\n if !iszero(i)\n MOI.deleteat!(list, i)\n end\n return nothing\nend\n\nfunction filter_constraints!(tr::TrustRegion, list::Vector{<:CI})\n # Filter trust-region constraint\n i = something(findfirst(isequal(tr.data.constraint), list), 0)\n if !iszero(i)\n MOI.deleteat!(list, i)\n end\n return nothing\nend\n\nfunction log_regularization!(lshaped::AbstractLShaped, tr::TrustRegion)\n @unpack Q\u0303, \u0394, incumbent = tr.data\n push!(tr.Q\u0303_history, Q\u0303)\n push!(tr.\u0394_history, StochasticPrograms.decision(tr.decisions, \u0394).value)\n push!(tr.incumbents, incumbent)\n return nothing\nend\n\nfunction log_regularization!(lshaped::AbstractLShaped, t::Integer, tr::TrustRegion)\n @unpack Q\u0303,\u0394,incumbent = tr.data\n tr.Q\u0303_history[t] = Q\u0303\n tr.\u0394_history[t] = StochasticPrograms.decision(tr.decisions, \u0394).value\n tr.incumbents[t] = incumbent\n return nothing\nend\n\nfunction take_step!(lshaped::AbstractLShaped, tr::TrustRegion)\n @unpack Q,\u03b8 = lshaped.data\n @unpack \u03c4 = lshaped.parameters\n @unpack Q\u0303 = tr.data\n @unpack \u03b3 = tr.parameters\n need_update = false\n t = timestamp(lshaped)\n Q\u0303t = incumbent_objective(lshaped, t, tr)\n if Q + \u03c4 <= Q\u0303 && (tr.data.major_iterations == 1 || Q <= Q\u0303t - \u03b3*abs(Q\u0303t-\u03b8))\n need_update = true\n enlarge_trustregion!(lshaped, tr)\n tr.data.c\u0394 = 0\n x = current_decision(lshaped)\n for i in eachindex(tr.\u03be)\n tr.\u03be[i].value = x[i]\n end\n tr.data.Q\u0303 = Q\n tr.data.incumbent = timestamp(lshaped)\n tr.data.major_iterations += 1\n else\n need_update = reduce_trustregion!(lshaped, tr)\n tr.data.minor_iterations += 1\n end\n if need_update\n update_trustregion!(lshaped, tr)\n end\n return nothing\nend\n\nfunction process_cut!(lshaped::AbstractLShaped, cut::HyperPlane{FeasibilityCut}, tr::TrustRegion)\n @unpack \u03c4 = lshaped.parameters\n if !satisfied(cut, decision(lshaped), \u03c4)\n # Project decision to ensure prevent master infeasibility\n A = [I cut.\u03b4Q; cut.\u03b4Q' 0*I]\n b = [zeros(length(tr.\u03be)); -gap(cut, decision(lshaped))]\n t = A\\b\n for i in eachindex(tr.\u03be)\n tr.\u03be[i].value += t[i]\n end\n update_trustregion!(lshaped, tr)\n end\n return nothing\nend\n\nfunction update_trustregion!(lshaped::AbstractLShaped, tr::TrustRegion)\n @unpack \u0394 = tr.data\n # Update projection targets\n for vi in tr.projection_targets\n ci = CI{MOI.SingleVariable,SingleDecisionSet{Float64}}(vi.value)\n MOI.modify(lshaped.master,\n ci,\n KnownValuesChange())\n end\n # Update trust-region\n ci = CI{MOI.SingleVariable,SingleDecisionSet{Float64}}(\u0394.value)\n MOI.modify(lshaped.master,\n ci,\n KnownValuesChange())\n return nothing\nend\n\nfunction enlarge_trustregion!(lshaped::AbstractLShaped, tr::TrustRegion)\n @unpack Q,\u03b8 = lshaped.data\n @unpack \u03c4, = lshaped.parameters\n @unpack \u0394 = tr.data\n @unpack \u0394\u0305 = tr.parameters\n t = timestamp(lshaped)\n \u0394\u0303 = incumbent_trustregion(lshaped, t, tr)\n \u03be = incumbent_decision(lshaped, t, tr)\n Q\u0303 = incumbent_objective(lshaped, t, tr)\n if Q\u0303 - Q >= 0.5*(Q\u0303 - \u03b8) && abs(norm(\u03be - lshaped.x, Inf) - \u0394\u0303) <= \u03c4\n # Enlarge the trust-region radius\n \u0394 = StochasticPrograms.decision(tr.decisions, tr.data.\u0394)\n \u0394.value = max(\u0394.value, min(\u0394\u0305, 2 * \u0394\u0303))\n return true\n else\n return false\n end\nend\n\nfunction reduce_trustregion!(lshaped::AbstractLShaped, tr::TrustRegion)\n @unpack Q,\u03b8 = lshaped.data\n @unpack Q\u0303,\u0394,c\u0394 = tr.data\n t = timestamp(lshaped)\n \u0394\u0303 = incumbent_trustregion(lshaped, t, tr)\n Q\u0303 = incumbent_objective(lshaped, t, tr)\n \u03c1 = min(1, \u0394\u0303)*(Q-Q\u0303)\/(Q\u0303-\u03b8)\n if \u03c1 > 0\n tr.data.c\u0394 += 1\n end\n if \u03c1 > 3 || (c\u0394 >= 3 && 1 < \u03c1 <= 3)\n # Reduce the trust-region radius\n tr.data.c\u0394 = 0\n \u0394 = StochasticPrograms.decision(tr.decisions, tr.data.\u0394)\n \u0394.value = min(\u0394.value, (1\/min(\u03c1,4))*\u0394\u0303)\n return true\n else\n return false\n end\nend\n\n# API\n# ------------------------------------------------------------\n\"\"\"\n TR\n\nFactory object for [`TrustRegion`](@ref). Pass to `regularize ` in the `LShapedSolver` factory function. Equivalent factory calls: `TR`, `WithTR`, `TrustRegion`, `WithTrustRegion`. See ?TrustRegion for parameter descriptions.\n\n\"\"\"\nmutable struct TR <: AbstractRegularizer\n parameters::TRParameters{Float64}\nend\nTR(; kw...) = TR(TRParameters(kw...))\nWithTR(; kw...) = TR(TRParameters(kw...))\nTrustRegion(; kw...) = TR(TRParameters(kw...))\nWithTrustRegion(; kw...) = TR(TRParameters(kw...))\n\nfunction (tr::TR)(decisions::Decisions, x::AbstractVector)\n return TrustRegion(decisions, x; type2dict(tr.parameters)...)\nend\n\nfunction str(::TR)\n return \"L-shaped using trust-region\"\nend\n","avg_line_length":32.9320754717,"max_line_length":232,"alphanum_fraction":0.6321760055} {"size":43563,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# This file is auto-generated by AWSMetadata.jl\nusing AWS\nusing AWS.AWSServices: frauddetector\nusing AWS.Compat\nusing AWS.UUIDs\n\"\"\"\n BatchCreateVariable()\n\nCreates a batch of variables.\n\n# Required Parameters\n- `variableEntries`: The list of variables for the batch create variable request.\n\n# Optional Parameters\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\nbatch_create_variable(variableEntries; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"BatchCreateVariable\", Dict{String, Any}(\"variableEntries\"=>variableEntries); aws_config=aws_config)\nbatch_create_variable(variableEntries, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"BatchCreateVariable\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"variableEntries\"=>variableEntries), args)); aws_config=aws_config)\n\n\"\"\"\n BatchGetVariable()\n\nGets a batch of variables.\n\n# Required Parameters\n- `names`: The list of variable names to get.\n\n\"\"\"\n\nbatch_get_variable(names; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"BatchGetVariable\", Dict{String, Any}(\"names\"=>names); aws_config=aws_config)\nbatch_get_variable(names, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"BatchGetVariable\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"names\"=>names), args)); aws_config=aws_config)\n\n\"\"\"\n CreateDetectorVersion()\n\nCreates a detector version. The detector version starts in a DRAFT status.\n\n# Required Parameters\n- `detectorId`: The ID of the detector under which you want to create a new version.\n- `rules`: The rules to include in the detector version.\n\n# Optional Parameters\n- `description`: The description of the detector version.\n- `externalModelEndpoints`: The Amazon Sagemaker model endpoints to include in the detector version.\n- `modelVersions`: The model versions to include in the detector version.\n- `ruleExecutionMode`: The rule execution mode for the rules included in the detector version. You can define and edit the rule mode at the detector version level, when it is in draft status. If you specify FIRST_MATCHED, Amazon Fraud Detector evaluates rules sequentially, first to last, stopping at the first matched rule. Amazon Fraud dectector then provides the outcomes for that single rule. If you specifiy ALL_MATCHED, Amazon Fraud Detector evaluates all rules and returns the outcomes for all matched rules. The default behavior is FIRST_MATCHED.\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\ncreate_detector_version(detectorId, rules; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateDetectorVersion\", Dict{String, Any}(\"detectorId\"=>detectorId, \"rules\"=>rules); aws_config=aws_config)\ncreate_detector_version(detectorId, rules, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateDetectorVersion\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId, \"rules\"=>rules), args)); aws_config=aws_config)\n\n\"\"\"\n CreateModel()\n\nCreates a model using the specified model type.\n\n# Required Parameters\n- `eventTypeName`: The name of the event type.\n- `modelId`: The model ID.\n- `modelType`: The model type. \n\n# Optional Parameters\n- `description`: The model description. \n- `tags`: A collection of key and value pairs.\n\"\"\"\n\ncreate_model(eventTypeName, modelId, modelType; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateModel\", Dict{String, Any}(\"eventTypeName\"=>eventTypeName, \"modelId\"=>modelId, \"modelType\"=>modelType); aws_config=aws_config)\ncreate_model(eventTypeName, modelId, modelType, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateModel\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"eventTypeName\"=>eventTypeName, \"modelId\"=>modelId, \"modelType\"=>modelType), args)); aws_config=aws_config)\n\n\"\"\"\n CreateModelVersion()\n\nCreates a version of the model using the specified model type and model id. \n\n# Required Parameters\n- `modelId`: The model ID. \n- `modelType`: The model type.\n- `trainingDataSchema`: The training data schema.\n- `trainingDataSource`: The training data source location in Amazon S3. \n\n# Optional Parameters\n- `externalEventsDetail`: Details for the external events data used for model version training. Required if trainingDataSource is EXTERNAL_EVENTS.\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\ncreate_model_version(modelId, modelType, trainingDataSchema, trainingDataSource; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateModelVersion\", Dict{String, Any}(\"modelId\"=>modelId, \"modelType\"=>modelType, \"trainingDataSchema\"=>trainingDataSchema, \"trainingDataSource\"=>trainingDataSource); aws_config=aws_config)\ncreate_model_version(modelId, modelType, trainingDataSchema, trainingDataSource, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateModelVersion\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"modelId\"=>modelId, \"modelType\"=>modelType, \"trainingDataSchema\"=>trainingDataSchema, \"trainingDataSource\"=>trainingDataSource), args)); aws_config=aws_config)\n\n\"\"\"\n CreateRule()\n\nCreates a rule for use with the specified detector. \n\n# Required Parameters\n- `detectorId`: The detector ID for the rule's parent detector.\n- `expression`: The rule expression.\n- `language`: The language of the rule.\n- `outcomes`: The outcome or outcomes returned when the rule expression matches.\n- `ruleId`: The rule ID.\n\n# Optional Parameters\n- `description`: The rule description.\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\ncreate_rule(detectorId, expression, language, outcomes, ruleId; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateRule\", Dict{String, Any}(\"detectorId\"=>detectorId, \"expression\"=>expression, \"language\"=>language, \"outcomes\"=>outcomes, \"ruleId\"=>ruleId); aws_config=aws_config)\ncreate_rule(detectorId, expression, language, outcomes, ruleId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateRule\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId, \"expression\"=>expression, \"language\"=>language, \"outcomes\"=>outcomes, \"ruleId\"=>ruleId), args)); aws_config=aws_config)\n\n\"\"\"\n CreateVariable()\n\nCreates a variable.\n\n# Required Parameters\n- `dataSource`: The source of the data.\n- `dataType`: The data type.\n- `defaultValue`: The default value for the variable when no value is received.\n- `name`: The name of the variable.\n\n# Optional Parameters\n- `description`: The description.\n- `tags`: A collection of key and value pairs.\n- `variableType`: The variable type. For more information see Variable types. Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT \n\"\"\"\n\ncreate_variable(dataSource, dataType, defaultValue, name; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateVariable\", Dict{String, Any}(\"dataSource\"=>dataSource, \"dataType\"=>dataType, \"defaultValue\"=>defaultValue, \"name\"=>name); aws_config=aws_config)\ncreate_variable(dataSource, dataType, defaultValue, name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"CreateVariable\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"dataSource\"=>dataSource, \"dataType\"=>dataType, \"defaultValue\"=>defaultValue, \"name\"=>name), args)); aws_config=aws_config)\n\n\"\"\"\n DeleteDetector()\n\nDeletes the detector. Before deleting a detector, you must first delete all detector versions and rule versions associated with the detector.\n\n# Required Parameters\n- `detectorId`: The ID of the detector to delete.\n\n\"\"\"\n\ndelete_detector(detectorId; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DeleteDetector\", Dict{String, Any}(\"detectorId\"=>detectorId); aws_config=aws_config)\ndelete_detector(detectorId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DeleteDetector\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId), args)); aws_config=aws_config)\n\n\"\"\"\n DeleteDetectorVersion()\n\nDeletes the detector version. You cannot delete detector versions that are in ACTIVE status.\n\n# Required Parameters\n- `detectorId`: The ID of the parent detector for the detector version to delete.\n- `detectorVersionId`: The ID of the detector version to delete.\n\n\"\"\"\n\ndelete_detector_version(detectorId, detectorVersionId; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DeleteDetectorVersion\", Dict{String, Any}(\"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId); aws_config=aws_config)\ndelete_detector_version(detectorId, detectorVersionId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DeleteDetectorVersion\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId), args)); aws_config=aws_config)\n\n\"\"\"\n DeleteEvent()\n\nDeletes the specified event.\n\n# Required Parameters\n- `eventId`: The ID of the event to delete.\n- `eventTypeName`: The name of the event type.\n\n\"\"\"\n\ndelete_event(eventId, eventTypeName; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DeleteEvent\", Dict{String, Any}(\"eventId\"=>eventId, \"eventTypeName\"=>eventTypeName); aws_config=aws_config)\ndelete_event(eventId, eventTypeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DeleteEvent\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"eventId\"=>eventId, \"eventTypeName\"=>eventTypeName), args)); aws_config=aws_config)\n\n\"\"\"\n DeleteRule()\n\nDeletes the rule. You cannot delete a rule if it is used by an ACTIVE or INACTIVE detector version.\n\n# Required Parameters\n- `rule`: \n\n\"\"\"\n\ndelete_rule(rule; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DeleteRule\", Dict{String, Any}(\"rule\"=>rule); aws_config=aws_config)\ndelete_rule(rule, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DeleteRule\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"rule\"=>rule), args)); aws_config=aws_config)\n\n\"\"\"\n DescribeDetector()\n\nGets all versions for a specified detector.\n\n# Required Parameters\n- `detectorId`: The detector ID.\n\n# Optional Parameters\n- `maxResults`: The maximum number of results to return for the request.\n- `nextToken`: The next token from the previous response.\n\"\"\"\n\ndescribe_detector(detectorId; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DescribeDetector\", Dict{String, Any}(\"detectorId\"=>detectorId); aws_config=aws_config)\ndescribe_detector(detectorId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DescribeDetector\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId), args)); aws_config=aws_config)\n\n\"\"\"\n DescribeModelVersions()\n\nGets all of the model versions for the specified model type or for the specified model type and model ID. You can also get details for a single, specified model version. \n\n# Optional Parameters\n- `maxResults`: The maximum number of results to return.\n- `modelId`: The model ID.\n- `modelType`: The model type.\n- `modelVersionNumber`: The model version number.\n- `nextToken`: The next token from the previous results.\n\"\"\"\n\ndescribe_model_versions(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DescribeModelVersions\"; aws_config=aws_config)\ndescribe_model_versions(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"DescribeModelVersions\", args; aws_config=aws_config)\n\n\"\"\"\n GetDetectorVersion()\n\nGets a particular detector version. \n\n# Required Parameters\n- `detectorId`: The detector ID.\n- `detectorVersionId`: The detector version ID.\n\n\"\"\"\n\nget_detector_version(detectorId, detectorVersionId; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetDetectorVersion\", Dict{String, Any}(\"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId); aws_config=aws_config)\nget_detector_version(detectorId, detectorVersionId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetDetectorVersion\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId), args)); aws_config=aws_config)\n\n\"\"\"\n GetDetectors()\n\nGets all detectors or a single detector if a detectorId is specified. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 5 and 10. To get the next page results, provide the pagination token from the GetDetectorsResponse as part of your request. A null pagination token fetches the records from the beginning. \n\n# Optional Parameters\n- `detectorId`: The detector ID.\n- `maxResults`: The maximum number of objects to return for the request.\n- `nextToken`: The next token for the subsequent request.\n\"\"\"\n\nget_detectors(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetDetectors\"; aws_config=aws_config)\nget_detectors(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetDetectors\", args; aws_config=aws_config)\n\n\"\"\"\n GetEntityTypes()\n\nGets all entity types or a specific entity type if a name is specified. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 5 and 10. To get the next page results, provide the pagination token from the GetEntityTypesResponse as part of your request. A null pagination token fetches the records from the beginning. \n\n# Optional Parameters\n- `maxResults`: The maximum number of objects to return for the request.\n- `name`: The name.\n- `nextToken`: The next token for the subsequent request.\n\"\"\"\n\nget_entity_types(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetEntityTypes\"; aws_config=aws_config)\nget_entity_types(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetEntityTypes\", args; aws_config=aws_config)\n\n\"\"\"\n GetEventPrediction()\n\nEvaluates an event against a detector version. If a version ID is not provided, the detector\u2019s (ACTIVE) version is used.\n\n# Required Parameters\n- `detectorId`: The detector ID.\n- `entities`: The entity type (associated with the detector's event type) and specific entity ID representing who performed the event. If an entity id is not available, use \\\"UNKNOWN.\\\"\n- `eventId`: The unique ID used to identify the event.\n- `eventTimestamp`: Timestamp that defines when the event under evaluation occurred.\n- `eventTypeName`: The event type associated with the detector specified for the prediction.\n- `eventVariables`: Names of the event type's variables you defined in Amazon Fraud Detector to represent data elements and their corresponding values for the event you are sending for evaluation.\n\n# Optional Parameters\n- `detectorVersionId`: The detector version ID.\n- `externalModelEndpointDataBlobs`: The Amazon SageMaker model endpoint input data blobs.\n\"\"\"\n\nget_event_prediction(detectorId, entities, eventId, eventTimestamp, eventTypeName, eventVariables; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetEventPrediction\", Dict{String, Any}(\"detectorId\"=>detectorId, \"entities\"=>entities, \"eventId\"=>eventId, \"eventTimestamp\"=>eventTimestamp, \"eventTypeName\"=>eventTypeName, \"eventVariables\"=>eventVariables); aws_config=aws_config)\nget_event_prediction(detectorId, entities, eventId, eventTimestamp, eventTypeName, eventVariables, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetEventPrediction\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId, \"entities\"=>entities, \"eventId\"=>eventId, \"eventTimestamp\"=>eventTimestamp, \"eventTypeName\"=>eventTypeName, \"eventVariables\"=>eventVariables), args)); aws_config=aws_config)\n\n\"\"\"\n GetEventTypes()\n\nGets all event types or a specific event type if name is provided. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 5 and 10. To get the next page results, provide the pagination token from the GetEventTypesResponse as part of your request. A null pagination token fetches the records from the beginning. \n\n# Optional Parameters\n- `maxResults`: The maximum number of objects to return for the request.\n- `name`: The name.\n- `nextToken`: The next token for the subsequent request.\n\"\"\"\n\nget_event_types(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetEventTypes\"; aws_config=aws_config)\nget_event_types(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetEventTypes\", args; aws_config=aws_config)\n\n\"\"\"\n GetExternalModels()\n\nGets the details for one or more Amazon SageMaker models that have been imported into the service. This is a paginated API. If you provide a null maxResults, this actions retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 5 and 10. To get the next page results, provide the pagination token from the GetExternalModelsResult as part of your request. A null pagination token fetches the records from the beginning. \n\n# Optional Parameters\n- `maxResults`: The maximum number of objects to return for the request.\n- `modelEndpoint`: The Amazon SageMaker model endpoint.\n- `nextToken`: The next page token for the request.\n\"\"\"\n\nget_external_models(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetExternalModels\"; aws_config=aws_config)\nget_external_models(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetExternalModels\", args; aws_config=aws_config)\n\n\"\"\"\n GetKMSEncryptionKey()\n\nGets the encryption key if a Key Management Service (KMS) customer master key (CMK) has been specified to be used to encrypt content in Amazon Fraud Detector.\n\n\"\"\"\n\nget_kmsencryption_key(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetKMSEncryptionKey\"; aws_config=aws_config)\nget_kmsencryption_key(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetKMSEncryptionKey\", args; aws_config=aws_config)\n\n\"\"\"\n GetLabels()\n\nGets all labels or a specific label if name is provided. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 50 records per page. If you provide a maxResults, the value must be between 10 and 50. To get the next page results, provide the pagination token from the GetGetLabelsResponse as part of your request. A null pagination token fetches the records from the beginning. \n\n# Optional Parameters\n- `maxResults`: The maximum number of objects to return for the request.\n- `name`: The name of the label or labels to get.\n- `nextToken`: The next token for the subsequent request.\n\"\"\"\n\nget_labels(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetLabels\"; aws_config=aws_config)\nget_labels(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetLabels\", args; aws_config=aws_config)\n\n\"\"\"\n GetModelVersion()\n\nGets the details of the specified model version.\n\n# Required Parameters\n- `modelId`: The model ID.\n- `modelType`: The model type.\n- `modelVersionNumber`: The model version number.\n\n\"\"\"\n\nget_model_version(modelId, modelType, modelVersionNumber; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetModelVersion\", Dict{String, Any}(\"modelId\"=>modelId, \"modelType\"=>modelType, \"modelVersionNumber\"=>modelVersionNumber); aws_config=aws_config)\nget_model_version(modelId, modelType, modelVersionNumber, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetModelVersion\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"modelId\"=>modelId, \"modelType\"=>modelType, \"modelVersionNumber\"=>modelVersionNumber), args)); aws_config=aws_config)\n\n\"\"\"\n GetModels()\n\nGets one or more models. Gets all models for the AWS account if no model type and no model id provided. Gets all models for the AWS account and model type, if the model type is specified but model id is not provided. Gets a specific model if (model type, model id) tuple is specified. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 1 and 10. To get the next page results, provide the pagination token from the response as part of your request. A null pagination token fetches the records from the beginning.\n\n# Optional Parameters\n- `maxResults`: The maximum number of objects to return for the request. \n- `modelId`: The model ID.\n- `modelType`: The model type.\n- `nextToken`: The next token for the subsequent request.\n\"\"\"\n\nget_models(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetModels\"; aws_config=aws_config)\nget_models(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetModels\", args; aws_config=aws_config)\n\n\"\"\"\n GetOutcomes()\n\nGets one or more outcomes. This is a paginated API. If you provide a null maxResults, this actions retrieves a maximum of 100 records per page. If you provide a maxResults, the value must be between 50 and 100. To get the next page results, provide the pagination token from the GetOutcomesResult as part of your request. A null pagination token fetches the records from the beginning. \n\n# Optional Parameters\n- `maxResults`: The maximum number of objects to return for the request. \n- `name`: The name of the outcome or outcomes to get.\n- `nextToken`: The next page token for the request. \n\"\"\"\n\nget_outcomes(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetOutcomes\"; aws_config=aws_config)\nget_outcomes(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetOutcomes\", args; aws_config=aws_config)\n\n\"\"\"\n GetRules()\n\nGet all rules for a detector (paginated) if ruleId and ruleVersion are not specified. Gets all rules for the detector and the ruleId if present (paginated). Gets a specific rule if both the ruleId and the ruleVersion are specified. This is a paginated API. Providing null maxResults results in retrieving maximum of 100 records per page. If you provide maxResults the value must be between 50 and 100. To get the next page result, a provide a pagination token from GetRulesResult as part of your request. Null pagination token fetches the records from the beginning.\n\n# Required Parameters\n- `detectorId`: The detector ID.\n\n# Optional Parameters\n- `maxResults`: The maximum number of rules to return for the request.\n- `nextToken`: The next page token.\n- `ruleId`: The rule ID.\n- `ruleVersion`: The rule version.\n\"\"\"\n\nget_rules(detectorId; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetRules\", Dict{String, Any}(\"detectorId\"=>detectorId); aws_config=aws_config)\nget_rules(detectorId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetRules\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId), args)); aws_config=aws_config)\n\n\"\"\"\n GetVariables()\n\nGets all of the variables or the specific variable. This is a paginated API. Providing null maxSizePerPage results in retrieving maximum of 100 records per page. If you provide maxSizePerPage the value must be between 50 and 100. To get the next page result, a provide a pagination token from GetVariablesResult as part of your request. Null pagination token fetches the records from the beginning. \n\n# Optional Parameters\n- `maxResults`: The max size per page determined for the get variable request. \n- `name`: The name of the variable. \n- `nextToken`: The next page token of the get variable request. \n\"\"\"\n\nget_variables(; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetVariables\"; aws_config=aws_config)\nget_variables(args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"GetVariables\", args; aws_config=aws_config)\n\n\"\"\"\n ListTagsForResource()\n\nLists all tags associated with the resource. This is a paginated API. To get the next page results, provide the pagination token from the response as part of your request. A null pagination token fetches the records from the beginning. \n\n# Required Parameters\n- `resourceARN`: The ARN that specifies the resource whose tags you want to list.\n\n# Optional Parameters\n- `maxResults`: The maximum number of objects to return for the request. \n- `nextToken`: The next token from the previous results.\n\"\"\"\n\nlist_tags_for_resource(resourceARN; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"ListTagsForResource\", Dict{String, Any}(\"resourceARN\"=>resourceARN); aws_config=aws_config)\nlist_tags_for_resource(resourceARN, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"ListTagsForResource\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"resourceARN\"=>resourceARN), args)); aws_config=aws_config)\n\n\"\"\"\n PutDetector()\n\nCreates or updates a detector. \n\n# Required Parameters\n- `detectorId`: The detector ID. \n- `eventTypeName`: The name of the event type.\n\n# Optional Parameters\n- `description`: The description of the detector.\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\nput_detector(detectorId, eventTypeName; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutDetector\", Dict{String, Any}(\"detectorId\"=>detectorId, \"eventTypeName\"=>eventTypeName); aws_config=aws_config)\nput_detector(detectorId, eventTypeName, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutDetector\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId, \"eventTypeName\"=>eventTypeName), args)); aws_config=aws_config)\n\n\"\"\"\n PutEntityType()\n\nCreates or updates an entity type. An entity represents who is performing the event. As part of a fraud prediction, you pass the entity ID to indicate the specific entity who performed the event. An entity type classifies the entity. Example classifications include customer, merchant, or account.\n\n# Required Parameters\n- `name`: The name of the entity type.\n\n# Optional Parameters\n- `description`: The description.\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\nput_entity_type(name; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutEntityType\", Dict{String, Any}(\"name\"=>name); aws_config=aws_config)\nput_entity_type(name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutEntityType\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"name\"=>name), args)); aws_config=aws_config)\n\n\"\"\"\n PutEventType()\n\nCreates or updates an event type. An event is a business activity that is evaluated for fraud risk. With Amazon Fraud Detector, you generate fraud predictions for events. An event type defines the structure for an event sent to Amazon Fraud Detector. This includes the variables sent as part of the event, the entity performing the event (such as a customer), and the labels that classify the event. Example event types include online payment transactions, account registrations, and authentications.\n\n# Required Parameters\n- `entityTypes`: The entity type for the event type. Example entity types: customer, merchant, account.\n- `eventVariables`: The event type variables.\n- `name`: The name.\n\n# Optional Parameters\n- `description`: The description of the event type.\n- `labels`: The event type labels.\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\nput_event_type(entityTypes, eventVariables, name; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutEventType\", Dict{String, Any}(\"entityTypes\"=>entityTypes, \"eventVariables\"=>eventVariables, \"name\"=>name); aws_config=aws_config)\nput_event_type(entityTypes, eventVariables, name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutEventType\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"entityTypes\"=>entityTypes, \"eventVariables\"=>eventVariables, \"name\"=>name), args)); aws_config=aws_config)\n\n\"\"\"\n PutExternalModel()\n\nCreates or updates an Amazon SageMaker model endpoint. You can also use this action to update the configuration of the model endpoint, including the IAM role and\/or the mapped variables. \n\n# Required Parameters\n- `inputConfiguration`: The model endpoint input configuration.\n- `invokeModelEndpointRoleArn`: The IAM role used to invoke the model endpoint.\n- `modelEndpoint`: The model endpoints name.\n- `modelEndpointStatus`: The model endpoint\u2019s status in Amazon Fraud Detector.\n- `modelSource`: The source of the model.\n- `outputConfiguration`: The model endpoint output configuration.\n\n# Optional Parameters\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\nput_external_model(inputConfiguration, invokeModelEndpointRoleArn, modelEndpoint, modelEndpointStatus, modelSource, outputConfiguration; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutExternalModel\", Dict{String, Any}(\"inputConfiguration\"=>inputConfiguration, \"invokeModelEndpointRoleArn\"=>invokeModelEndpointRoleArn, \"modelEndpoint\"=>modelEndpoint, \"modelEndpointStatus\"=>modelEndpointStatus, \"modelSource\"=>modelSource, \"outputConfiguration\"=>outputConfiguration); aws_config=aws_config)\nput_external_model(inputConfiguration, invokeModelEndpointRoleArn, modelEndpoint, modelEndpointStatus, modelSource, outputConfiguration, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutExternalModel\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"inputConfiguration\"=>inputConfiguration, \"invokeModelEndpointRoleArn\"=>invokeModelEndpointRoleArn, \"modelEndpoint\"=>modelEndpoint, \"modelEndpointStatus\"=>modelEndpointStatus, \"modelSource\"=>modelSource, \"outputConfiguration\"=>outputConfiguration), args)); aws_config=aws_config)\n\n\"\"\"\n PutKMSEncryptionKey()\n\nSpecifies the Key Management Service (KMS) customer master key (CMK) to be used to encrypt content in Amazon Fraud Detector.\n\n# Required Parameters\n- `kmsEncryptionKeyArn`: The KMS encryption key ARN.\n\n\"\"\"\n\nput_kmsencryption_key(kmsEncryptionKeyArn; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutKMSEncryptionKey\", Dict{String, Any}(\"kmsEncryptionKeyArn\"=>kmsEncryptionKeyArn); aws_config=aws_config)\nput_kmsencryption_key(kmsEncryptionKeyArn, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutKMSEncryptionKey\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"kmsEncryptionKeyArn\"=>kmsEncryptionKeyArn), args)); aws_config=aws_config)\n\n\"\"\"\n PutLabel()\n\nCreates or updates label. A label classifies an event as fraudulent or legitimate. Labels are associated with event types and used to train supervised machine learning models in Amazon Fraud Detector. \n\n# Required Parameters\n- `name`: The label name.\n\n# Optional Parameters\n- `description`: The label description.\n- `tags`: \n\"\"\"\n\nput_label(name; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutLabel\", Dict{String, Any}(\"name\"=>name); aws_config=aws_config)\nput_label(name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutLabel\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"name\"=>name), args)); aws_config=aws_config)\n\n\"\"\"\n PutOutcome()\n\nCreates or updates an outcome. \n\n# Required Parameters\n- `name`: The name of the outcome.\n\n# Optional Parameters\n- `description`: The outcome description.\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\nput_outcome(name; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutOutcome\", Dict{String, Any}(\"name\"=>name); aws_config=aws_config)\nput_outcome(name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"PutOutcome\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"name\"=>name), args)); aws_config=aws_config)\n\n\"\"\"\n TagResource()\n\nAssigns tags to a resource.\n\n# Required Parameters\n- `resourceARN`: The resource ARN.\n- `tags`: The tags to assign to the resource.\n\n\"\"\"\n\ntag_resource(resourceARN, tags; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"TagResource\", Dict{String, Any}(\"resourceARN\"=>resourceARN, \"tags\"=>tags); aws_config=aws_config)\ntag_resource(resourceARN, tags, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"TagResource\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"resourceARN\"=>resourceARN, \"tags\"=>tags), args)); aws_config=aws_config)\n\n\"\"\"\n UntagResource()\n\nRemoves tags from a resource.\n\n# Required Parameters\n- `resourceARN`: The ARN of the resource from which to remove the tag.\n- `tagKeys`: The resource ARN.\n\n\"\"\"\n\nuntag_resource(resourceARN, tagKeys; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UntagResource\", Dict{String, Any}(\"resourceARN\"=>resourceARN, \"tagKeys\"=>tagKeys); aws_config=aws_config)\nuntag_resource(resourceARN, tagKeys, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UntagResource\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"resourceARN\"=>resourceARN, \"tagKeys\"=>tagKeys), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateDetectorVersion()\n\n Updates a detector version. The detector version attributes that you can update include models, external model endpoints, rules, rule execution mode, and description. You can only update a DRAFT detector version.\n\n# Required Parameters\n- `detectorId`: The parent detector ID for the detector version you want to update.\n- `detectorVersionId`: The detector version ID. \n- `externalModelEndpoints`: The Amazon SageMaker model endpoints to include in the detector version.\n- `rules`: The rules to include in the detector version.\n\n# Optional Parameters\n- `description`: The detector version description. \n- `modelVersions`: The model versions to include in the detector version.\n- `ruleExecutionMode`: The rule execution mode to add to the detector. If you specify FIRST_MATCHED, Amazon Fraud Detector evaluates rules sequentially, first to last, stopping at the first matched rule. Amazon Fraud dectector then provides the outcomes for that single rule. If you specifiy ALL_MATCHED, Amazon Fraud Detector evaluates all rules and returns the outcomes for all matched rules. You can define and edit the rule mode at the detector version level, when it is in draft status. The default behavior is FIRST_MATCHED.\n\"\"\"\n\nupdate_detector_version(detectorId, detectorVersionId, externalModelEndpoints, rules; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateDetectorVersion\", Dict{String, Any}(\"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId, \"externalModelEndpoints\"=>externalModelEndpoints, \"rules\"=>rules); aws_config=aws_config)\nupdate_detector_version(detectorId, detectorVersionId, externalModelEndpoints, rules, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateDetectorVersion\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId, \"externalModelEndpoints\"=>externalModelEndpoints, \"rules\"=>rules), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateDetectorVersionMetadata()\n\nUpdates the detector version's description. You can update the metadata for any detector version (DRAFT, ACTIVE, or INACTIVE). \n\n# Required Parameters\n- `description`: The description.\n- `detectorId`: The detector ID.\n- `detectorVersionId`: The detector version ID. \n\n\"\"\"\n\nupdate_detector_version_metadata(description, detectorId, detectorVersionId; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateDetectorVersionMetadata\", Dict{String, Any}(\"description\"=>description, \"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId); aws_config=aws_config)\nupdate_detector_version_metadata(description, detectorId, detectorVersionId, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateDetectorVersionMetadata\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"description\"=>description, \"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateDetectorVersionStatus()\n\nUpdates the detector version\u2019s status. You can perform the following promotions or demotions using UpdateDetectorVersionStatus: DRAFT to ACTIVE, ACTIVE to INACTIVE, and INACTIVE to ACTIVE.\n\n# Required Parameters\n- `detectorId`: The detector ID. \n- `detectorVersionId`: The detector version ID. \n- `status`: The new status.\n\n\"\"\"\n\nupdate_detector_version_status(detectorId, detectorVersionId, status; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateDetectorVersionStatus\", Dict{String, Any}(\"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId, \"status\"=>status); aws_config=aws_config)\nupdate_detector_version_status(detectorId, detectorVersionId, status, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateDetectorVersionStatus\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"detectorId\"=>detectorId, \"detectorVersionId\"=>detectorVersionId, \"status\"=>status), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateModel()\n\nUpdates a model. You can update the description attribute using this action.\n\n# Required Parameters\n- `modelId`: The model ID.\n- `modelType`: The model type.\n\n# Optional Parameters\n- `description`: The new model description.\n\"\"\"\n\nupdate_model(modelId, modelType; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateModel\", Dict{String, Any}(\"modelId\"=>modelId, \"modelType\"=>modelType); aws_config=aws_config)\nupdate_model(modelId, modelType, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateModel\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"modelId\"=>modelId, \"modelType\"=>modelType), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateModelVersion()\n\nUpdates a model version. Updating a model version retrains an existing model version using updated training data and produces a new minor version of the model. You can update the training data set location and data access role attributes using this action. This action creates and trains a new minor version of the model, for example version 1.01, 1.02, 1.03.\n\n# Required Parameters\n- `majorVersionNumber`: The major version number.\n- `modelId`: The model ID.\n- `modelType`: The model type.\n\n# Optional Parameters\n- `externalEventsDetail`: The event details.\n- `tags`: A collection of key and value pairs.\n\"\"\"\n\nupdate_model_version(majorVersionNumber, modelId, modelType; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateModelVersion\", Dict{String, Any}(\"majorVersionNumber\"=>majorVersionNumber, \"modelId\"=>modelId, \"modelType\"=>modelType); aws_config=aws_config)\nupdate_model_version(majorVersionNumber, modelId, modelType, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateModelVersion\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"majorVersionNumber\"=>majorVersionNumber, \"modelId\"=>modelId, \"modelType\"=>modelType), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateModelVersionStatus()\n\nUpdates the status of a model version. You can perform the following status updates: Change the TRAINING_COMPLETE status to ACTIVE. Change ACTIVEto INACTIVE. \n\n# Required Parameters\n- `modelId`: The model ID of the model version to update.\n- `modelType`: The model type.\n- `modelVersionNumber`: The model version number.\n- `status`: The model version status.\n\n\"\"\"\n\nupdate_model_version_status(modelId, modelType, modelVersionNumber, status; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateModelVersionStatus\", Dict{String, Any}(\"modelId\"=>modelId, \"modelType\"=>modelType, \"modelVersionNumber\"=>modelVersionNumber, \"status\"=>status); aws_config=aws_config)\nupdate_model_version_status(modelId, modelType, modelVersionNumber, status, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateModelVersionStatus\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"modelId\"=>modelId, \"modelType\"=>modelType, \"modelVersionNumber\"=>modelVersionNumber, \"status\"=>status), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateRuleMetadata()\n\nUpdates a rule's metadata. The description attribute can be updated.\n\n# Required Parameters\n- `description`: The rule description.\n- `rule`: The rule to update.\n\n\"\"\"\n\nupdate_rule_metadata(description, rule; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateRuleMetadata\", Dict{String, Any}(\"description\"=>description, \"rule\"=>rule); aws_config=aws_config)\nupdate_rule_metadata(description, rule, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateRuleMetadata\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"description\"=>description, \"rule\"=>rule), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateRuleVersion()\n\nUpdates a rule version resulting in a new rule version. Updates a rule version resulting in a new rule version (version 1, 2, 3 ...). \n\n# Required Parameters\n- `expression`: The rule expression.\n- `language`: The language.\n- `outcomes`: The outcomes.\n- `rule`: The rule to update.\n\n# Optional Parameters\n- `description`: The description.\n- `tags`: The tags to assign to the rule version.\n\"\"\"\n\nupdate_rule_version(expression, language, outcomes, rule; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateRuleVersion\", Dict{String, Any}(\"expression\"=>expression, \"language\"=>language, \"outcomes\"=>outcomes, \"rule\"=>rule); aws_config=aws_config)\nupdate_rule_version(expression, language, outcomes, rule, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateRuleVersion\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"expression\"=>expression, \"language\"=>language, \"outcomes\"=>outcomes, \"rule\"=>rule), args)); aws_config=aws_config)\n\n\"\"\"\n UpdateVariable()\n\nUpdates a variable.\n\n# Required Parameters\n- `name`: The name of the variable.\n\n# Optional Parameters\n- `defaultValue`: The new default value of the variable.\n- `description`: The new description.\n- `variableType`: The variable type. For more information see Variable types.\n\"\"\"\n\nupdate_variable(name; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateVariable\", Dict{String, Any}(\"name\"=>name); aws_config=aws_config)\nupdate_variable(name, args::AbstractDict{String, <:Any}; aws_config::AWSConfig=global_aws_config()) = frauddetector(\"UpdateVariable\", Dict{String, Any}(mergewith(_merge, Dict{String, Any}(\"name\"=>name), args)); aws_config=aws_config)\n","avg_line_length":60.6727019499,"max_line_length":631,"alphanum_fraction":0.7800426968} {"size":3792,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using DrWatson\nquickactivate(@__DIR__)\n@show projectdir()\n\n## ============================================================================\n@info \"load packages\"\nusing CovidSurvey\nusing Random, Dates, Turing\nusing Base.Threads\nusing StatsPlots\nusing PrettyTables\nusing DataFrames\nusing BSON\nusing ColorSchemes\ncolors = ColorSchemes.tableau_10.colors\n\nusing Memoization\nusing ReverseDiff\nsetadbackend(:reversediff)\nTuring.setrdcache(true)\n\nplotlyjs()\n@info \"number of threads available: $(nthreads())\"\n## ============================================================================\n\nps = (\n warmup = 100,\n model = \"deaths\",\n steps = 100,\n seed = nothing,\n observ = \"2021-03-01\",#\"2021-02-06\",#\"2021-03-25\"\n chains = 1,\n preds = \"CS,MG,MA\",#\"CF,CC,CR,CS\"\n semipara=true,\n rwstep = 7,\n include= false,\n std = false,\n)\n## ============================================================================\n# load data\n@info \"load data\"\n\ndata_params = (\n observationsend = ps.observ\n , predictors = ps.preds|> CovidSurvey.parse_predictors\n # , cases_start = ps.cases\n # , hospitmodel = Regional.HospitInit1(obs_stop=\"2020-07-01\")\n , casemodel = Regional.CaseInit(obs_start=\"2020-06-01\")\n , seromodel = Regional.SeroInit(delay=0, std=1.)\n , rwstep = ps.rwstep\n , epidemicstart = 20\n , numimpute = 6\n , include_early_dynamic = ps.include\n , link = KLogistic(4.5)\n , invlink = KLogit(4.5)\n , predictive = false\n , covariates_kwargs = Dict(\n :semiparametric => ps.semipara,\n :fname => [projectdir(\"data\/contacts\/dk\/\", \"averaged_contact_rates_region=$(r).csv\" ) for r in 1:Regional.nregions],#[projectdir(\"data\", \"mean_contact_rates_region=$r.csv\" ) for r in 1:Regional.nregions],\n :shift => -1,\n :startdate => \"2020-11-10\", # >= 1000 cases \/ day\n :datecol => \"dates\",\n :aggregation => ps.rwstep,\n :standartize => ps.std,\n :mobility => [projectdir(\"data\/mobility\/mobility_region=$(Regional.regions[i]).csv\") for i in 1:Regional.nregions]\n )\n )\ndata = Regional.load_data(; data_params... )\nd = turing_data = data.turing_data;\n\ngethospits(d,i) = DataFrame( :hospits => d.hospits[i] )\ngetdeaths(d,i) = DataFrame( :deaths => d.deaths[i] )\ngetcases(d,i) = DataFrame( :cases => d.cases[i] )\ngetdates(d,i) = DataFrame( :date => d.dates[i] )\n\nfunction getrw(d, i)\n no = d.num_observations[i]\n cs = d.covariates_starts[i]\n ri = d.rt_step_indices[i]\n li = d.lockdown_indices[i]\n\n rw = ones(Int64, no)\n rw[li+1:cs-1] = ri .+ 1\n rw[cs:no] .= rw[cs-1]\n return DataFrame(:rw => rw)\nend\n\nfunction getcovariates(d, i)\n no = d.num_observations[i]\n cs = d.covariates_starts[i]\n names = data.predictors\n co = d.covariates[i]\n np = length(names)\n\n dat = zeros(no, np)\n dat[cs:no, :] = co\n return DataFrame(dat, names)\nend\n\nfunction epidata(d, i, r)\n df = hcat(\n getdates(d,i)\n , getcases(d,i)\n , gethospits(d,i)\n , getdeaths(d,i)\n , getcovariates(d, i)\n , getrw(d, i)\n )\n df[!,:region] .= r\n return df\nend\n\nfunction epidata(data)\n return vcat( [epidata(data.turing_data, i, r) for (i, r) in enumerate(data.regions)]... )\nend\n\nepid = epidata(data)\nCSV.write(projectdir(\"data\/epidemia\", \"denmark_predictors=CS-MG-MA.csv\"), epid)\n\n\nplotlyjs()\nlet\n p = plot()\n for (i, r) in enumerate(data.regions)\n df = epidata(d, i, Regional.regions[i])\n plot!(df.date, cumsum(df.hospit), hover=[\"$i, cases: $n\" for (i,n) in enumerate(cumsum(df.hospits))], c=colors[i])\n vline!([df.date[20]], c=colors[i])\n end\n hline!([30], c=colors[end])\n p\nend\n","avg_line_length":28.7272727273,"max_line_length":219,"alphanum_fraction":0.5683016878} {"size":241,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"module Esolangs\nexport Boolfuck, Brainfuck, PortableMinskyMachineNotation\n\ninclude(\"Boolfuck\/src\/Boolfuck.jl\")\ninclude(\"Brainfuck\/src\/Brainfuck.jl\")\ninclude(\"PortableMinskyMachineNotation\/src\/PortableMinskyMachineNotation.jl\")\n\nend # module\n","avg_line_length":26.7777777778,"max_line_length":77,"alphanum_fraction":0.8423236515} {"size":578,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# Autogenerated wrapper script for CoinUtils_jll for x86_64-w64-mingw32-cxx03\nexport libCoinUtils\n\nusing OpenBLAS32_jll\nusing CompilerSupportLibraries_jll\nJLLWrappers.@generate_wrapper_header(\"CoinUtils\")\nJLLWrappers.@declare_library_product(libCoinUtils, \"libCoinUtils-3.dll\")\nfunction __init__()\n JLLWrappers.@generate_init_header(OpenBLAS32_jll, CompilerSupportLibraries_jll)\n JLLWrappers.@init_library_product(\n libCoinUtils,\n \"bin\\\\libCoinUtils-3.dll\",\n RTLD_LAZY | RTLD_DEEPBIND,\n )\n\n JLLWrappers.@generate_init_footer()\nend # __init__()\n","avg_line_length":32.1111111111,"max_line_length":83,"alphanum_fraction":0.7889273356} {"size":3813,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# Autogenerated wrapper script for lib4ti2_jll for armv6l-linux-gnueabihf-cxx03\nexport circuits, exe4ti2gmp, exe4ti2int32, exe4ti2int64, genmodel, gensymm, graver, groebner, hilbert, lib4ti2, lib4ti2int32, lib4ti2int64, libzsolve, markov, minimize, normalform, output, ppi, qsolve, rays, walk, zbasis, zsolve\n\nusing GMP_jll\nusing GLPK_jll\nJLLWrappers.@generate_wrapper_header(\"lib4ti2\")\nJLLWrappers.@declare_file_product(circuits)\nJLLWrappers.@declare_executable_product(exe4ti2gmp)\nJLLWrappers.@declare_executable_product(exe4ti2int32)\nJLLWrappers.@declare_executable_product(exe4ti2int64)\nJLLWrappers.@declare_executable_product(genmodel)\nJLLWrappers.@declare_executable_product(gensymm)\nJLLWrappers.@declare_file_product(graver)\nJLLWrappers.@declare_file_product(groebner)\nJLLWrappers.@declare_file_product(hilbert)\nJLLWrappers.@declare_library_product(lib4ti2, \"lib4ti2gmp.so.0\")\nJLLWrappers.@declare_library_product(lib4ti2int32, \"lib4ti2int32.so.0\")\nJLLWrappers.@declare_library_product(lib4ti2int64, \"lib4ti2int64.so.0\")\nJLLWrappers.@declare_library_product(libzsolve, \"libzsolve.so.0\")\nJLLWrappers.@declare_file_product(markov)\nJLLWrappers.@declare_file_product(minimize)\nJLLWrappers.@declare_file_product(normalform)\nJLLWrappers.@declare_executable_product(output)\nJLLWrappers.@declare_executable_product(ppi)\nJLLWrappers.@declare_file_product(qsolve)\nJLLWrappers.@declare_file_product(rays)\nJLLWrappers.@declare_file_product(walk)\nJLLWrappers.@declare_file_product(zbasis)\nJLLWrappers.@declare_executable_product(zsolve)\nfunction __init__()\n JLLWrappers.@generate_init_header(GMP_jll, GLPK_jll)\n JLLWrappers.@init_file_product(\n circuits,\n \"bin\/circuits\",\n )\n\n JLLWrappers.@init_executable_product(\n exe4ti2gmp,\n \"bin\/4ti2gmp\",\n )\n\n JLLWrappers.@init_executable_product(\n exe4ti2int32,\n \"bin\/4ti2int32\",\n )\n\n JLLWrappers.@init_executable_product(\n exe4ti2int64,\n \"bin\/4ti2int64\",\n )\n\n JLLWrappers.@init_executable_product(\n genmodel,\n \"bin\/genmodel\",\n )\n\n JLLWrappers.@init_executable_product(\n gensymm,\n \"bin\/gensymm\",\n )\n\n JLLWrappers.@init_file_product(\n graver,\n \"bin\/graver\",\n )\n\n JLLWrappers.@init_file_product(\n groebner,\n \"bin\/groebner\",\n )\n\n JLLWrappers.@init_file_product(\n hilbert,\n \"bin\/hilbert\",\n )\n\n JLLWrappers.@init_library_product(\n lib4ti2,\n \"lib\/lib4ti2gmp.so\",\n RTLD_LAZY | RTLD_DEEPBIND,\n )\n\n JLLWrappers.@init_library_product(\n lib4ti2int32,\n \"lib\/lib4ti2int32.so\",\n RTLD_LAZY | RTLD_DEEPBIND,\n )\n\n JLLWrappers.@init_library_product(\n lib4ti2int64,\n \"lib\/lib4ti2int64.so\",\n RTLD_LAZY | RTLD_DEEPBIND,\n )\n\n JLLWrappers.@init_library_product(\n libzsolve,\n \"lib\/libzsolve.so\",\n RTLD_LAZY | RTLD_DEEPBIND,\n )\n\n JLLWrappers.@init_file_product(\n markov,\n \"bin\/markov\",\n )\n\n JLLWrappers.@init_file_product(\n minimize,\n \"bin\/minimize\",\n )\n\n JLLWrappers.@init_file_product(\n normalform,\n \"bin\/normalform\",\n )\n\n JLLWrappers.@init_executable_product(\n output,\n \"bin\/output\",\n )\n\n JLLWrappers.@init_executable_product(\n ppi,\n \"bin\/ppi\",\n )\n\n JLLWrappers.@init_file_product(\n qsolve,\n \"bin\/qsolve\",\n )\n\n JLLWrappers.@init_file_product(\n rays,\n \"bin\/rays\",\n )\n\n JLLWrappers.@init_file_product(\n walk,\n \"bin\/walk\",\n )\n\n JLLWrappers.@init_file_product(\n zbasis,\n \"bin\/zbasis\",\n )\n\n JLLWrappers.@init_executable_product(\n zsolve,\n \"bin\/zsolve\",\n )\n\n JLLWrappers.@generate_init_footer()\nend # __init__()\n","avg_line_length":24.9215686275,"max_line_length":228,"alphanum_fraction":0.6931549961} {"size":4104,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"@testset \"Basic Figures\" begin\n fig = Figure()\n @test current_figure() === fig\n\n fig2 = Figure()\n @test fig !== fig2\n @test current_figure() === fig2\n\n current_figure!(fig)\n @test current_figure() === fig\nend\n\n@testset \"FigureAxisPlot\" begin\n fap = scatter(rand(100, 2))\n @test fap isa Makie.FigureAxisPlot\n fig, ax, p = scatter(rand(100, 2))\n @test fig isa Figure\n @test ax isa Axis\n @test p isa Scatter\n\n fig2, ax2, p2 = scatter(rand(100, 3))\n @test fig2 isa Figure\n @test ax2 isa LScene # 3d plot\n @test p2 isa Scatter\nend\n\n@testset \"AxisPlot and Axes\" begin\n fig = Figure()\n @test current_axis() === nothing\n @test current_figure() === fig\n\n figurepos = fig[1, 1]\n @test figurepos isa Makie.FigurePosition\n ap = scatter(figurepos, rand(100, 2))\n @test ap isa Makie.AxisPlot\n @test current_axis() === ap.axis\n\n ax2, p2 = scatter(fig[1, 2], rand(100, 2))\n @test ax2 isa Axis\n @test p2 isa Scatter\n @test current_axis() === ax2\n\n ax3, p3 = scatter(fig[1, 3], rand(100, 3))\n @test ax3 isa LScene\n @test p3 isa Scatter\n @test current_axis() === ax3\n\n @test ap.axis in fig.content\n @test ax2 in fig.content\n @test ax3 in fig.content\n\n current_axis!(fig, ax2)\n @test current_axis(fig) === ax2\n @test current_axis() === ax2\n\n fig2 = Figure()\n @test current_figure() === fig2\n @test current_axis() === nothing\n @test current_axis(fig) === ax2\n\n # current axis can also switch current figure when called without figure argument\n current_axis!(ax2)\n @test current_axis() === ax2\n @test current_figure() === fig\nend\n\n@testset \"Deleting from figures\" begin\n fig = Figure()\n ax = fig[1, 1] = Axis(fig)\n @test current_axis() === ax\n @test ax in fig.content\n delete!(ax)\n @test !(ax in fig.content)\n @test ax.parent === nothing\n @test current_axis() === nothing\nend\n\n@testset \"Getting figure content\" begin\n fig = Figure()\n ax = fig[1, 1] = Axis(fig)\n @test contents(fig[1, 1], exact = true) == [ax]\n @test contents(fig[1, 1], exact = false) == [ax]\n @test contents(fig[1:2, 1:2], exact = true) == []\n @test contents(fig[1:2, 1:2], exact = false) == [ax]\n\n @test content(fig[1, 1]) == ax\n @test_throws ErrorException content(fig[2, 2])\n @test_throws ErrorException content(fig[1:2, 1:2])\n\n label = fig[1, 1] = Label(fig)\n @test contents(fig[1, 1], exact = true) == [ax, label]\n @test contents(fig[1, 1], exact = false) == [ax, label]\n @test contents(fig[1:2, 1:2], exact = true) == []\n @test contents(fig[1:2, 1:2], exact = false) == [ax, label]\n\n @test_throws ErrorException content(fig[1, 1])\n\n ax2 = fig[1, 2][1, 1] = Axis(fig)\n @test contents(fig[1, 2][1, 1], exact = true) == [ax2]\n @test contents(fig[1, 2][1, 1], exact = false) == [ax2]\n @test contents(fig[1, 2][1:2, 1:2], exact = true) == []\n @test contents(fig[1, 2][1:2, 1:2], exact = false) == [ax2]\n @test contents(fig[1:2, 1:2][1, 1]) == []\n\n label2 = fig[1, 2][1, 1] = Label(fig)\n @test contents(fig[1, 2][1, 1], exact = true) == [ax2, label2]\n @test contents(fig[1, 2][1, 1], exact = false) == [ax2, label2]\n @test contents(fig[1, 2][1:2, 1:2], exact = true) == []\n @test contents(fig[1, 2][1:2, 1:2], exact = false) == [ax2, label2]\n\n @test_throws ErrorException content(fig[1, 2][1, 1])\nend\n\n@testset \"Nested axis assignment\" begin\n fig = Figure()\n @test Axis(fig[1, 1]) isa Axis\n @test Axis(fig[1, 1][2, 3]) isa Axis\n @test Axis(fig[1, 1][2, 3][4, 5]) isa Axis\n @test_throws ErrorException scatter(fig[1, 1])\n @test_throws ErrorException scatter(fig[1, 1][2, 3])\n @test_throws ErrorException scatter(fig[1, 1][2, 3][4, 5])\n @test scatter(fig[1, 2], 1:10) isa Makie.AxisPlot\n @test scatter(fig[1, 1][1, 1], 1:10) isa Makie.AxisPlot\n @test scatter(fig[1, 1][1, 1][1, 1], 1:10) isa Makie.AxisPlot\n\n fig = Figure()\n fig[1, 1] = GridLayout()\n @test Axis(fig[1, 1][1, 1]) isa Axis\n fig[1, 1] = GridLayout()\n @test_throws ErrorException Axis(fig[1, 1][1, 1])\nend\n","avg_line_length":31.0909090909,"max_line_length":85,"alphanum_fraction":0.5967348928} {"size":16434,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# This file is a companion to `src\/api.jl` --- it defines the raw ccall wrappers, while\n# here small normalizations are made to make the calls more Julian.\n# For instance, many property getters return values through pointer output arguments,\n# so the methods here handle making the appropriate `Ref`s and return them (as tuples).\n\n###\n### HDF5 General library functions\n###\n\nfunction h5_get_libversion()\n majnum, minnum, relnum = Ref{Cuint}(), Ref{Cuint}(), Ref{Cuint}()\n h5_get_libversion(majnum, minnum, relnum)\n VersionNumber(majnum[], minnum[], relnum[])\nend\n\nfunction h5_is_library_threadsafe()\n is_ts = Ref{Cuint}()\n h5_is_library_threadsafe(is_ts)\n return is_ts[] > 0\nend\n\n###\n### Attribute Interface\n###\n\nfunction h5a_get_name(attr_id)\n len = h5a_get_name(attr_id, 0, C_NULL)\n buf = StringVector(len)\n h5a_get_name(attr_id, len+1, buf)\n return String(buf)\nend\n\nfunction h5a_get_name_by_idx(loc_id, obj_name, idx_type, order, idx, lapl_id)\n len = h5a_get_name_by_idx(loc_id, obj_name, idx_type, order, idx, C_NULL, 0, lapl_id)\n buf = StringVector(len)\n h5a_get_name_by_idx(loc_id, obj_name, idx_type, order, idx, buf, len + 1, lapl_id)\n return String(buf)\nend\n\n# libhdf5 supports proper closure environments, so we use that support rather than\n# emulating it with the less desirable form of creating closure handles directly in\n# `@cfunction` with `$f`.\n# This helper translates between the two preferred forms for each respective language.\nfunction h5a_iterate_helper(loc_id::hid_t, attr_name::Ptr{Cchar}, ainfo::Ptr{H5A_info_t}, @nospecialize(f::Any))::herr_t\n return f(loc_id, attr_name, ainfo)\nend\n\n\"\"\"\n h5a_iterate(f, loc_id, idx_type, order, idx = 0) -> hsize_t\n\nExecutes [`h5a_iterate`](@ref h5a_iterate(::hid_t, ::Cint, ::Cint, ::Ptr{hsize_t}, ::Ptr{Cvoid}, ::Ptr{Cvoid}))\nwith the user-provided callback function `f`, returning the index where iteration ends.\n\nThe callback function must correspond to the signature\n```\n f(loc::HDF5.API.hid_t, name::Ptr{Cchar}, info::Ptr{HDF5.API.H5A_info_t}) -> HDF5.API.herr_t\n```\nwhere a negative return value halts iteration abnormally, a positive value halts iteration\nsuccessfully, and zero continues iteration.\n\n# Examples\n```julia-repl\njulia> HDF5.API.h5a_iterate(obj, HDF5.API.H5_INDEX_NAME, HDF5.API.H5_ITER_INC) do loc, name, info\n println(unsafe_string(name))\n return HDF5.API.herr_t(0)\n end\n```\n\"\"\"\nfunction h5a_iterate(@nospecialize(f), obj_id, idx_type, order, idx = 0)\n idxref = Ref{hsize_t}(idx)\n fptr = @cfunction(h5a_iterate_helper, herr_t, (hid_t, Ptr{Cchar}, Ptr{H5A_info_t}, Any))\n h5a_iterate(obj_id, idx_type, order, idxref, fptr, f)\n return idxref[]\nend\n\n###\n### Dataset Interface\n###\n\n\"\"\"\n h5d_vlen_get_buf_size(dataset_id, type_id, space_id)\n\nHelper method to determines the number of bytes required to store the variable length data from the dataset. Returns a value of type `HDF5.API.hsize_t`.\n\"\"\"\nfunction h5d_vlen_get_buf_size(dataset_id, type_id, space_id)\n sz = Ref{hsize_t}()\n h5d_vlen_get_buf_size(dataset_id, type_id, space_id, sz)\n return sz[]\nend\n\n\"\"\"\n h5d_get_chunk_info(dataset_id, fspace_id, index)\n h5d_get_chunk_info(dataset_id, index; fspace_id = HDF5.API.H5S_ALL)\n\nHelper method to retrieve chunk information.\n\nReturns a `NamedTuple{(:offset, :filter_mask, :addr, :size), Tuple{HDF5.API.hsize_t, UInt32, HDF5.API.haddr_t, HDF5.API.hsize_t}}`.\n\"\"\"\nfunction h5d_get_chunk_info(dataset_id, fspace_id, index)\n offset = Vector{hsize_t}(undef, ndims(dataset_id))\n filter_mask = Ref{UInt32}()\n addr = Ref{haddr_t}()\n size = Ref{hsize_t}()\n h5d_get_chunk_info(dataset_id, fspace_id, index, offset, filter_mask, addr, size)\n return (offset = offset, filter_mask = filter_mask[], addr = addr[], size = size[])\nend\nh5d_get_chunk_info(dataset_id, index; fspace_id = H5S_ALL) = h5d_get_chunk_info(dataset_id, fspace_id, index)\n\n\"\"\"\n h5d_get_chunk_info_by_coord(dataset_id, offset)\n\nHelper method to read chunk information by coordinate. Returns a `NamedTuple{(:filter_mask, :addr, :size), Tuple{UInt32, HDF5.API.haddr_t, HDF5.API.hsize_t}}`.\n\"\"\"\nfunction h5d_get_chunk_info_by_coord(dataset_id, offset)\n filter_mask = Ref{UInt32}()\n addr = Ref{haddr_t}()\n size = Ref{hsize_t}()\n h5d_get_chunk_info_by_coord(dataset_id, offset, filter_mask, addr, size)\n return (filter_mask = filter_mask[], addr = addr[], size = size[])\nend\n\n\"\"\"\n h5d_get_chunk_storage_size(dataset_id, offset)\n\nHelper method to retrieve the chunk storage size in bytes. Returns an integer of type `HDF5.API.hsize_t`.\n\"\"\"\nfunction h5d_get_chunk_storage_size(dataset_id, offset)\n chunk_nbytes = Ref{hsize_t}()\n h5d_get_chunk_storage_size(dataset_id, offset, chunk_nbytes)\n return chunk_nbytes[]\nend\n\n@static if v\"1.10.5\" \u2264 _libhdf5_build_ver\n \"\"\"\n h5d_get_num_chunks(dataset_id, fspace_id = H5S_ALL)\n\n Helper method to retrieve the number of chunks. Returns an integer of type `HDF5.API.hsize_t`.\n \"\"\"\n function h5d_get_num_chunks(dataset_id, fspace_id = H5S_ALL)\n nchunks = Ref{hsize_t}()\n h5d_get_num_chunks(dataset_id, fspace_id, nchunks)\n return nchunks[]\n end\nend\n\n\"\"\"\n h5d_get_space_status(dataset_id)\n\nHelper method to retrieve the status of the dataset space.\nReturns a `HDF5.API.H5D_space_status_t` (`Cint`) indicating the status, see `HDF5.API.H5D_SPACE_STATUS_`* constants.\n\"\"\"\nfunction h5d_get_space_status(dataset_id)\n r = Ref{H5D_space_status_t}()\n h5d_get_space_status(dataset_id, r)\n return r[]\nend\n\n\n###\n### Error Interface\n###\n\nfunction h5e_get_auto(estack_id)\n func = Ref{Ptr{Cvoid}}()\n client_data = Ref{Ptr{Cvoid}}()\n h5e_get_auto(estack_id, func, client_data)\n return func[], client_data[]\nend\n\n\"\"\"\n mesg_type, mesg = h5e_get_msg(meshg_id)\n\n\"\"\"\nfunction h5e_get_msg(mesg_id)\n mesg_type = Ref{Cint}()\n mesg_len = h5e_get_msg(mesg_id, mesg_type, C_NULL, 0)\n buffer = StringVector(mesg_len)\n h5e_get_msg(mesg_id, mesg_type, buffer, mesg_len+1)\n resize!(buffer, mesg_len)\n return mesg_type[], String(buffer)\nend\n\n\n# See explanation for h5a_iterate above.\nfunction h5e_walk_helper(n::Cuint, err_desc::Ptr{H5E_error2_t}, @nospecialize(f::Any))::herr_t\n f(n, err_desc)\n return herr_t(0)\nend\nfunction h5e_walk(f::Function, stack_id, direction)\n fptr = @cfunction(h5e_walk_helper, herr_t, (Cuint, Ptr{H5E_error2_t}, Any))\n h5e_walk(stack_id, direction, fptr, f)\nend\n\n###\n### File Interface\n###\n\nfunction h5f_get_intent(file_id)\n intent = Ref{Cuint}()\n h5f_get_intent(file_id, intent)\n return intent[]\nend\n\nfunction h5f_get_name(loc_id)\n len = h5f_get_name(loc_id, C_NULL, 0)\n buf = StringVector(len)\n h5f_get_name(loc_id, buf, len+1)\n return String(buf)\nend\n\nfunction h5f_get_obj_ids(file_id, types)\n sz = h5f_get_obj_count(file_id, types)\n hids = Vector{hid_t}(undef, sz)\n sz2 = h5f_get_obj_ids(file_id, types, sz, hids)\n sz2 != sz && resize!(hids, sz2)\n return hids\nend\n\nfunction h5f_get_vfd_handle(file_id, fapl)\n file_handle = Ref{Ptr{Cvoid}}()\n h5f_get_vfd_handle(file_id, fapl, file_handle)\n return file_handle[]\nend\n\n###\n### Group Interface\n###\n\nfunction h5g_get_info(loc_id)\n ginfo = Ref{H5G_info_t}()\n h5g_get_info(loc_id, ginfo)\n return ginfo[]\nend\n\nfunction h5g_get_num_objs(loc_id)\n num_objs = Ref{hsize_t}()\n h5g_get_num_objs(loc_id, num_objs)\n return num_objs[]\nend\n\n###\n### Identifier Interface\n###\n\nfunction h5i_get_name(loc_id)\n len = h5i_get_name(loc_id, C_NULL, 0)\n buf = StringVector(len)\n h5i_get_name(loc_id, buf, len+1)\n return String(buf)\nend\n\n###\n### Link Interface\n###\n\nfunction h5l_get_info(link_loc_id, link_name, lapl_id)\n info = Ref{H5L_info_t}()\n h5l_get_info(link_loc_id, link_name, info, lapl_id)\n return info[]\nend\n\nfunction h5l_get_name_by_idx(loc_id, group_name, idx_type, order, idx, lapl_id)\n len = h5l_get_name_by_idx(loc_id, group_name, idx_type, order, idx, C_NULL, 0, lapl_id)\n buf = StringVector(len)\n h5l_get_name_by_idx(loc_id, group_name, idx_type, order, idx, buf, len + 1, lapl_id)\n return String(buf)\nend\n\n# See explanation for h5a_iterate above.\nfunction h5l_iterate_helper(group::hid_t, name::Ptr{Cchar}, info::Ptr{H5L_info_t}, @nospecialize(f::Any))::herr_t\n return f(group, name, info)\nend\n\"\"\"\n h5l_iterate(f, group_id, idx_type, order, idx = 0) -> hsize_t\n\nExecutes [`h5l_iterate`](@ref h5l_iterate(::hid_t, ::Cint, ::Cint, ::Ptr{hsize_t}, ::Ptr{Cvoid}, ::Ptr{Cvoid}))\nwith the user-provided callback function `f`, returning the index where iteration ends.\n\nThe callback function must correspond to the signature\n```\n f(group::HDF5.API.hid_t, name::Ptr{Cchar}, info::Ptr{HDF5.API.H5L_info_t}) -> HDF5.API.herr_t\n```\nwhere a negative return value halts iteration abnormally, a positive value halts iteration\nsuccessfully, and zero continues iteration.\n\n# Examples\n```julia-repl\njulia> HDF5.API.h5l_iterate(hfile, HDF5.API.H5_INDEX_NAME, HDF5.API.H5_ITER_INC) do group, name, info\n println(unsafe_string(name))\n return HDF5.API.herr_t(0)\n end\n```\n\"\"\"\nfunction h5l_iterate(@nospecialize(f), group_id, idx_type, order, idx = 0)\n idxref = Ref{hsize_t}(idx)\n fptr = @cfunction(h5l_iterate_helper, herr_t, (hid_t, Ptr{Cchar}, Ptr{H5L_info_t}, Any))\n h5l_iterate(group_id, idx_type, order, idxref, fptr, f)\n return idxref[]\nend\n\n###\n### Object Interface\n###\n\nfunction h5o_get_info(loc_id)\n oinfo = Ref{H5O_info_t}()\n h5o_get_info(loc_id, oinfo)\n return oinfo[]\nend\n\n###\n### Property Interface\n###\n\nfunction h5p_get_alignment(fapl_id)\n threshold = Ref{hsize_t}()\n alignment = Ref{hsize_t}()\n h5p_get_alignment(fapl_id, threshold, alignment)\n return threshold[], alignment[]\nend\n\nfunction h5p_get_alloc_time(plist_id)\n alloc_time = Ref{Cint}()\n h5p_get_alloc_time(plist_id, alloc_time)\n return alloc_time[]\nend\n\nfunction h5p_get_char_encoding(plist_id)\n encoding = Ref{Cint}()\n h5p_get_char_encoding(plist_id, encoding)\n return encoding[]\nend\n\nfunction h5p_get_chunk(plist_id)\n ndims = h5p_get_chunk(plist_id, 0, C_NULL)\n dims = Vector{hsize_t}(undef, ndims)\n h5p_get_chunk(plist_id, ndims, dims)\n return dims, ndims\nend\n\nfunction h5p_get_create_intermediate_group(plist_id)\n cig = Ref{Cuint}()\n h5p_get_create_intermediate_group(plist_id, cig)\n return cig[]\nend\n\nfunction h5p_get_dxpl_mpio(dxpl_id)\n xfer_mode = Ref{Cint}()\n h5p_get_dxpl_mpio(dxpl_id, xfer_mode)\n return xfer_mode[]\nend\n\nfunction h5p_get_fclose_degree(fapl_id)\n out = Ref{Cint}()\n h5p_get_fclose_degree(fapl_id, out)\n return out[]\nend\n\nfunction h5p_get_libver_bounds(plist_id)\n low = Ref{Cint}()\n high = Ref{Cint}()\n h5p_get_libver_bounds(plist_id, low, high)\n return low[], high[]\nend\n\nfunction h5p_get_local_heap_size_hint(plist_id)\n size_hint = Ref{Csize_t}()\n h5p_get_local_heap_size_hint(plist_id, size_hint)\n return size_hint[]\nend\n\nfunction h5p_get_obj_track_times(plist_id)\n track_times = Ref{UInt8}()\n h5p_get_obj_track_times(plist_id, track_times)\n return track_times[] != 0x0\nend\n\nfunction h5p_get_userblock(plist_id)\n len = Ref{hsize_t}()\n h5p_get_userblock(plist_id, len)\n return len[]\nend\n\n# Note: The following function(s) implement direct ccalls because the binding generator\n# cannot (yet) do the string wrapping and memory freeing.\n\n\"\"\"\n h5p_get_class_name(pcid::hid_t) -> String\n\nSee `libhdf5` documentation for [`H5P_GET_CLASS_NAME`](https:\/\/portal.hdfgroup.org\/display\/HDF5\/H5P_GET_CLASS_NAME).\n\"\"\"\nfunction h5p_get_class_name(pcid)\n pc = ccall((:H5Pget_class_name, libhdf5), Ptr{UInt8}, (hid_t,), pcid)\n if pc == C_NULL\n @h5error(\"Error getting class name\")\n end\n s = unsafe_string(pc)\n h5_free_memory(pc)\n return s\nend\n\n###\n### Reference Interface\n###\n\n###\n### Dataspace Interface\n###\n\nfunction h5s_get_regular_hyperslab(space_id)\n n = h5s_get_simple_extent_ndims(space_id)\n start = Vector{hsize_t}(undef, n)\n stride = Vector{hsize_t}(undef, n)\n count = Vector{hsize_t}(undef, n)\n block = Vector{hsize_t}(undef, n)\n h5s_get_regular_hyperslab(space_id, start, stride, count, block)\n return start, stride, count, block\nend\n\nfunction h5s_get_simple_extent_dims(space_id)\n n = h5s_get_simple_extent_ndims(space_id)\n dims = Vector{hsize_t}(undef, n)\n maxdims = Vector{hsize_t}(undef, n)\n h5s_get_simple_extent_dims(space_id, dims, maxdims)\n return dims, maxdims\nend\nfunction h5s_get_simple_extent_dims(space_id, ::Nothing)\n n = h5s_get_simple_extent_ndims(space_id)\n dims = Vector{hsize_t}(undef, n)\n h5s_get_simple_extent_dims(space_id, dims, C_NULL)\n return dims\nend\n\n\n###\n### Datatype Interface\n###\n\nfunction h5t_get_array_dims(type_id)\n nd = h5t_get_array_ndims(type_id)\n dims = Vector{hsize_t}(undef, nd)\n h5t_get_array_dims(type_id, dims)\n return dims\nend\n\nfunction h5t_get_fields(type_id)\n spos = Ref{Csize_t}()\n epos = Ref{Csize_t}()\n esize = Ref{Csize_t}()\n mpos = Ref{Csize_t}()\n msize = Ref{Csize_t}()\n h5t_get_fields(type_id, spos, epos, esize, mpos, msize)\n return (spos[], epos[], esize[], mpos[], msize[])\nend\n\n# Note: The following two functions implement direct ccalls because the binding generator\n# cannot (yet) do the string wrapping and memory freeing.\n\"\"\"\n h5t_get_member_name(type_id::hid_t, index::Cuint) -> String\n\nSee `libhdf5` documentation for [`H5Oopen`](https:\/\/portal.hdfgroup.org\/display\/HDF5\/H5T_GET_MEMBER_NAME).\n\"\"\"\nfunction h5t_get_member_name(type_id, index)\n pn = ccall((:H5Tget_member_name, libhdf5), Ptr{UInt8}, (hid_t, Cuint), type_id, index)\n if pn == C_NULL\n @h5error(\"Error getting name of compound datatype member #$index\")\n end\n s = unsafe_string(pn)\n h5_free_memory(pn)\n return s\nend\n\n\"\"\"\n h5t_get_tag(type_id::hid_t) -> String\n\nSee `libhdf5` documentation for [`H5Oopen`](https:\/\/portal.hdfgroup.org\/display\/HDF5\/H5T_GET_TAG).\n\"\"\"\nfunction h5t_get_tag(type_id)\n pc = ccall((:H5Tget_tag, libhdf5), Ptr{UInt8}, (hid_t,), type_id)\n if pc == C_NULL\n @h5error(\"Error getting opaque tag\")\n end\n s = unsafe_string(pc)\n h5_free_memory(pc)\n return s\nend\n\n###\n### Optimized Functions Interface\n###\n\n###\n### HDF5 Lite Interface\n###\n\nfunction h5lt_dtype_to_text(dtype_id)\n len = Ref{Csize_t}()\n h5lt_dtype_to_text(dtype_id, C_NULL, 0, len)\n buf = StringVector(len[] - 1)\n h5lt_dtype_to_text(dtype_id, buf, 0, len)\n return String(buf)\nend\n\n###\n### Table Interface\n###\n\nfunction h5tb_get_table_info(loc_id, table_name)\n nfields = Ref{hsize_t}()\n nrecords = Ref{hsize_t}()\n h5tb_get_table_info(loc_id, table_name, nfields, nrecords)\n return nfields[], nrecords[]\nend\n\nfunction h5tb_get_field_info(loc_id, table_name)\n nfields, = h5tb_get_table_info(loc_id, table_name)\n field_sizes = Vector{Csize_t}(undef, nfields)\n field_offsets = Vector{Csize_t}(undef, nfields)\n type_size = Ref{Csize_t}()\n # pass C_NULL to field_names argument since libhdf5 does not provide a way to determine if the\n # allocated buffer is the correct length, which is thus susceptible to a buffer overflow if\n # an incorrect buffer length is passed. Instead, we manually compute the column names using the\n # same calls that h5tb_get_field_info internally uses.\n h5tb_get_field_info(loc_id, table_name, C_NULL, field_sizes, field_offsets, type_size)\n did = h5d_open(loc_id, table_name, H5P_DEFAULT)\n tid = h5d_get_type(did)\n h5d_close(did)\n field_names = [h5t_get_member_name(tid, i-1) for i in 1:nfields]\n h5t_close(tid)\n return field_names, field_sizes, field_offsets, type_size[]\nend\n\n###\n### Filter Interface\n###\n\n\n\n###\n### MPIO\n###\n\nh5p_set_fapl_mpio(fapl_id, comm::Hmpih32, info::Hmpih32) =\n h5p_set_fapl_mpio32(fapl_id, comm, info)\nh5p_set_fapl_mpio(fapl_id, comm::Hmpih64, info::Hmpih64) =\n h5p_set_fapl_mpio64(fapl_id, comm, info)\n\n\nh5p_get_fapl_mpio(fapl_id, comm::Ref{Hmpih32}, info::Ref{Hmpih32}) =\n h5p_get_fapl_mpio32(fapl_id, comm, info)\nh5p_get_fapl_mpio(fapl_id, comm::Ref{Hmpih64}, info::Ref{Hmpih64}) =\n h5p_get_fapl_mpio64(fapl_id, comm, info)\n\nfunction h5p_get_fapl_mpio(fapl_id, ::Type{Hmpih}) where {Hmpih<:Union{Hmpih32,Hmpih64}}\n comm, info = Ref{Hmpih}(), Ref{Hmpih}()\n h5p_get_fapl_mpio(fapl_id, comm, info)\n return comm[], info[]\nend\n","avg_line_length":29.3989266547,"max_line_length":159,"alphanum_fraction":0.7174759645} {"size":480,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using Scanivalve\nusing Documenter\n\nDocMeta.setdocmeta!(Scanivalve, :DocTestSetup, :(using Scanivalve); recursive=true)\n\nmakedocs(;\n modules=[Scanivalve],\n authors=\"Paulo Jabardo \",\n repo=\"https:\/\/github.com\/pjsjipt\/Scanivalve.jl\/blob\/{commit}{path}#{line}\",\n sitename=\"Scanivalve.jl\",\n format=Documenter.HTML(;\n prettyurls=get(ENV, \"CI\", \"false\") == \"true\",\n assets=String[],\n ),\n pages=[\n \"Home\" => \"index.md\",\n ],\n)\n","avg_line_length":25.2631578947,"max_line_length":83,"alphanum_fraction":0.6333333333} {"size":150,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using Polynomials\n\np = Poly([-42,0,-12,1])\nq = Poly([-3,1])\n\nd, r = divrem(p,q)\n\nprintln(p, \" divided by \", q, \" is \", d, \" with remainder \", r, \".\")\n","avg_line_length":16.6666666667,"max_line_length":68,"alphanum_fraction":0.5266666667} {"size":24732,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"\"\"\" Generalized algebraic theories (GATs) in Julia.\n\"\"\"\nmodule GAT\nexport @theory, @signature, @instance, theory, invoke_term\n\nusing Base.Meta: ParseError\nusing AutoHashEquals\nusing DataStructures: OrderedDict\nusing Logging\nusing MLStyle: @match\n\nusing ..Meta\n\n# Data types\n############\n\nconst Context = OrderedDict{Symbol,Expr0}\n\n\"\"\" Type constructor in a GAT.\n\"\"\"\n@auto_hash_equals struct TypeConstructor\n name::Symbol\n params::Vector{Symbol}\n context::Context\n doc::Union{String,Nothing}\n\n function TypeConstructor(name::Symbol, params::Vector,\n context::Context, doc=nothing)\n new(name, params, context, doc)\n end\nend\n\n\"\"\" Term constructor in a GAT.\n\"\"\"\n@auto_hash_equals struct TermConstructor\n name::Symbol\n params::Vector{Symbol}\n typ::Expr0\n context::Context\n doc::Union{String,Nothing}\n\n function TermConstructor(name::Symbol, params::Vector, typ::Expr0,\n context::Context, doc=nothing)\n new(name, params, typ, context, doc)\n end\nend\n\n\"\"\" Axiom constructor in a GAT.\n\"\"\"\n@auto_hash_equals struct AxiomConstructor\n name::Symbol\n left::Expr0\n right::Expr0\n context::Context\n doc::Union{String,Nothing}\n\n function AxiomConstructor(name::Symbol, left::Expr0, right::Expr0,\n context::Context, doc=nothing)\n new(name, left, right, context, doc)\n end\nend\n\n\"\"\" Data structure for a generalized algebraic theory (GAT).\n\"\"\"\n@auto_hash_equals struct Theory\n types::Vector{TypeConstructor}\n terms::Vector{TermConstructor}\n axioms::Vector{AxiomConstructor}\n aliases::Dict{Symbol,Symbol}\nend\n\nstruct TheoryBinding\n name::Symbol\n params::Vector{Symbol}\nend\nstruct TheoryHead\n main::TheoryBinding\n base::Vector{TheoryBinding}\n TheoryHead(main, base=[]) = new(main, base)\nend\n\n# Theories\n##########\n\n\"\"\" Define a generalized algebraic theory (GAT).\n\nFour kinds of things can go in the theory body:\n\n1. Type constructors, indicated by the special type `TYPE`, e.g.,\n `Hom(X::Ob,Y::Ob)::TYPE`\n2. Term constructors, e.g.,\n `id(X::Ob)::Hom(X,X)`\n3. Function aliases, e.g.,\n `@op Hom :\u2192`\n4. Equality axioms, e.g.,\n `f \u22c5 id(B) == f \u22a3 (A::Ob, B::Ob, f::(A \u2192 B))`\n\nA theory can extend existing theories (at present only one).\n\"\"\"\nmacro theory(head, body)\n theory_builder(head, body)\nend\n\n\"\"\" Define a signature for a generalized algebraic theory (GAT).\n\nA signature is the same as a theory, except it may not contain axioms, and\ntherefore only three kinds of things can go in the signature body:\n\n1. Type constructors, indicated by the special type `TYPE`, e.g.,\n `Hom(X::Ob,Y::Ob)::TYPE`\n2. Term constructors, e.g.,\n `id(X::Ob)::Hom(X,X)`\n3. Function aliases, e.g.,\n `@op Hom :\u2192`\n\nA signature can extend existing theories (at present only one).\n\"\"\"\nmacro signature(head, body)\n theory_builder(head, body, signature=true)\nend\n\n\"\"\" Retrieve generalized algebraic theory associated with abstract type.\n\nFor example, if `Category` is imported from `Catlab.Theories`, then\n`theory(Category)`returns the theory of a category.\n\"\"\"\nfunction theory end\n\n\"\"\" Define how a theory is built, set up as a separate function to allow both\n the signature and theory macros to share code and throw an error if any\n axioms are defined in a signature.\n\"\"\"\nfunction theory_builder(head, body; signature=false)\n # Parse theory header.\n head = parse_theory_head(head)\n @assert all(param in head.main.params\n for base in head.base for param in base.params)\n @assert length(head.base) <= 1 \"Multiple theory extension not supported\"\n base_name = isempty(head.base) ? nothing : only(head.base).name\n\n # Parse theory body: GAT types\/terms and function aliases.\n types, terms, axioms, aliases = parse_theory_body(body)\n if signature && length(axioms) > 0\n throw(ParseError(\"@signature macro does not allow axioms to be defined\"))\n end\n theory = Theory(types, terms, axioms, aliases)\n\n # We must generate and evaluate the code at *run time* because the base\n # theory, if specified, is not available at *parse time*.\n expr = :(theory_code($head, $theory, $(esc(base_name))))\n Expr(:block,\n Expr(:call, esc(:eval), expr),\n :(Core.@__doc__ $(esc(head.main.name))))\nend\n\nfunction theory_code(head, theory, base_type)\n # Add types\/terms\/aliases from base theory, if provided.\n if !isnothing(base_type)\n base_theory = GAT.theory(base_type)\n base_params = [ type.name for type in base_theory.types ]\n bindings = Dict(zip(base_params, only(head.base).params))\n base_theory = replace_types(bindings, base_theory)\n theory = Theory([base_theory.types; theory.types],\n [base_theory.terms; theory.terms],\n [base_theory.axioms; theory.axioms],\n merge(base_theory.aliases, theory.aliases))\n end\n theory = replace_types(theory.aliases, theory)\n\n # Names of generic functions in interface defined by theory.\n names = unique!(vcat(\n [ param for type in theory.types for param in type.params ], # Accessors.\n [ term.name for term in theory.terms ], # Term constructors.\n collect(keys(theory.aliases)) # Unicode aliases.\n )) \n \n # Generate block with abstract type definition, registration of theory,\n # and stubs for generic functions.\n Expr(:block,\n Expr(:abstract, head.main.name),\n Expr(:(=),\n Expr(:call, GlobalRef(GAT, :theory),\n Expr(:(::), Expr(:curly, :Type, head.main.name))),\n theory),\n (Expr(:function, name) for name in names)...,\n )\nend\n\nfunction parse_theory_head(expr::Expr)::TheoryHead\n parse = parse_theory_binding\n parse_jl = parse_theory_binding_jlstyle\n parse_either = parse_theory_binding_either\n @match expr begin\n (Expr(:call, :(=>), Expr(:tuple, bases), main)\n => TheoryHead(parse(main), map(parse, bases)))\n (Expr(:(<:), main, Expr(:tuple,bases))\n => TheoryHead(parse_jl(main), map(parse_jl, bases)))\n Expr(:call, :(=>), base, main) => TheoryHead(parse(main), [parse(base)])\n Expr(:(<:), main, base) => TheoryHead(parse_jl(main), [parse_jl(base)])\n _ => TheoryHead(parse_either(expr))\n end\nend\n\nfunction parse_theory_binding(expr::Expr)::TheoryBinding\n @warn \"Using Haskell-style theory declaration with parentheses is deprecated,\" *\n \" use Julia-style with curly braces.\"\n @match expr begin\n Expr(:call, name::Symbol, params...) => TheoryBinding(name, params)\n _ => throw(ParseError(\"Ill-formed theory binding $expr\"))\n end\nend\n\nfunction parse_theory_binding_jlstyle(expr::Expr)::TheoryBinding\n @match expr begin\n Expr(:curly, name::Symbol, params...) => TheoryBinding(name, params)\n _ => throw(ParseError(\"Ill-formed theory binding $expr\"))\n end\nend\n\nfunction parse_theory_binding_either(expr::Expr)::TheoryBinding\n @match expr begin\n Expr(:call, name::Symbol, params...) => TheoryBinding(name, params)\n Expr(:curly, name::Symbol, params...) => TheoryBinding(name, params)\n _ => throw(ParseError(\"Ill-formed theory binding $expr\"))\n end\nend\n\n\"\"\" Parse the body of a GAT declaration.\n\"\"\"\nfunction parse_theory_body(expr::Expr)\n @assert expr.head == :block\n aliases = Dict{Symbol, Symbol}()\n types = OrderedDict{Symbol,TypeConstructor}()\n terms = TermConstructor[]\n axioms = AxiomConstructor[]\n for elem in strip_lines(expr).args\n elem = strip_lines(elem)\n head = last(parse_docstring(elem)).head\n if head in (:(::), :call, :comparison, :where)\n cons = parse_constructor(elem)\n if isa(cons, TypeConstructor)\n if haskey(types, cons.name)\n throw(ParseError(\"Duplicate type constructor $elem\"))\n else\n types[cons.name] = cons\n end\n elseif isa(cons, TermConstructor)\n push!(terms, cons)\n else\n push!(axioms, cons)\n end\n elseif head == :macrocall && elem.args[1] == Symbol(\"@op\")\n if elem.args[2].head == :(:=)\n aliases[elem.args[2].args[1]] = elem.args[2].args[2]\n elseif elem.args[2].head == :block\n merge!(aliases, Dict(map(x -> if x.head == :(:=)\n x.args[1] => x.args[2]\n else\n throw(ParseError(\"Ill-formed alias $x\"))\n end, strip_lines(elem.args[2]).args)))\n else\n throw(ParseError(\"Ill-formed alias $elem\"))\n end\n else\n throw(ParseError(\"Ill-formed theory element $elem\"))\n end\n end\n return (collect(values(types)), terms, axioms, aliases)\nend\n\n\"\"\" Get type constructor by name.\n\nUnlike term constructors, type constructors cannot be overloaded, so there is at\nmost one type constructor with a given name.\n\"\"\"\nfunction get_type(theory::Theory, name::Symbol)::TypeConstructor\n indices = findall(cons -> cons.name == name, theory.types)\n length(indices) < 1 && error(\"Malformed GAT definition type constructor for $name is missing\")\n length(indices) > 1 && error(\"Malformed GAT definition type constructor for $name cannot be overloaded\")\n theory.types[indices[1]]\nend\nfunction has_type(theory::Theory, name::Symbol)::Bool\n findfirst(cons -> cons.name == name, theory.types) != nothing\nend\n\n\"\"\" Add a type-valued first argument to a Julia function signature.\n\nWe need this to avoid ambiguity in method dispatch when a term constructor has\nno arguments (e.g., `munit()`) or more generally has no arguments that are types\nin the signature (e.g., object generators in a category).\n\nThe fundamental reason why these cases must be treated specially is that Julia\ndoes not (yet?) support\n[dispatching on return type](https:\/\/github.com\/JuliaLang\/julia\/issues\/19206).\n\"\"\"\nfunction add_type_dispatch(call_expr::Expr, type_expr::Expr0)::Expr\n @match call_expr begin\n (Expr(:call, name, args...) =>\n Expr(:call, name, :(::Type{$type_expr}), args...))\n _ => throw(ParseError(\"Ill-formed call expression $call_expr\"))\n end\nend\n\n# GAT expressions\n#################\n\n\"\"\" Parse a raw expression in a GAT.\n\nA \"raw expression\" is a just composition of function and constant symbols.\n\"\"\"\nfunction parse_raw_expr(expr)\n @match expr begin\n Expr(:call, args...) => map(parse_raw_expr, args)\n head::Symbol => nothing\n _ => throw(ParseError(\"Ill-formed raw expression $expr\"))\n end\n expr # Return the expression unmodified. This function just checks syntax.\nend\n\n\"\"\" Parse context for term or type in a GAT.\n\"\"\"\nfunction parse_context(expr::Expr)::Context\n context = Context()\n args = expr.head == :tuple ? expr.args : [ expr ]\n for arg in args\n name, type = @match arg begin\n Expr(:(::), name::Symbol, type) => (name, parse_raw_expr(type))\n name::Symbol => (name, :Any)\n _ => throw(ParseError(\"Ill-formed context expression $expr\"))\n end\n if haskey(context, name)\n throw(ParseError(\"Name $name already defined\"))\n end\n context[name] = type\n end\n context\nend\n\n\"\"\" Parse type or term constructor in a GAT.\n\"\"\"\nfunction parse_constructor(expr::Expr)::Union{TypeConstructor,TermConstructor,\n AxiomConstructor}\n # Context is optional.\n doc, expr = parse_docstring(expr)\n cons_expr, context = @match expr begin\n Expr(:call, :<=, inner, context) => (inner, parse_context(context))\n Expr(:call, :\u22a3, inner, context) => (inner, parse_context(context))\n Expr(:comparison, cons_left, cons_sym, cons_right, :\u22a3, context) => (\n Expr(:call, cons_sym, cons_left, cons_right), parse_context(context))\n Expr(:where, inner, context) => (inner, parse_context(context))\n _ => (expr, Context())\n end\n\n # Allow abbreviated syntax where tail of context is included in parameters.\n function parse_param(param::Expr0)::Symbol\n name, type = @match param begin\n Expr(:(::), name::Symbol, type) => (name, parse_raw_expr(type))\n name::Symbol => (name, :Any)\n _ => throw(ParseError(\"Ill-formed type\/term parameter $param\"))\n end\n if !haskey(context, name)\n context[name] = type\n end\n name\n end\n\n @match cons_expr begin\n (Expr(:(::), name::Symbol, :TYPE)\n => TypeConstructor(name, [], context, doc))\n (Expr(:(::), Expr(:call, name::Symbol, params...), :TYPE)\n => TypeConstructor(name, map(parse_param, params), context, doc))\n (Expr(:(::), Expr(:call, name::Symbol, params...), type)\n => TermConstructor(name, map(parse_param, params), parse_raw_expr(type),\n context, doc))\n (Expr(:call, :(==), left, right)\n => AxiomConstructor(:(==), left, right, context, doc))\n _ => throw(ParseError(\"Ill-formed type\/term constructor $cons_expr\"))\n end\nend\n\n\"\"\" Replace names of type constructors in a GAT.\n\"\"\"\nfunction replace_types(bindings::Dict, theory::Theory)::Theory\n Theory([ replace_types(bindings, t) for t in theory.types ],\n [ replace_types(bindings, t) for t in theory.terms ],\n [ replace_types(bindings, t) for t in theory.axioms ],\n replace_types(bindings, theory.aliases))\nend\nfunction replace_types(bindings::Dict, cons::TypeConstructor)::TypeConstructor\n TypeConstructor(replace_symbols(bindings, cons.name), cons.params,\n replace_types(bindings, cons.context), cons.doc)\nend\nfunction replace_types(bindings::Dict, cons::TermConstructor)::TermConstructor\n TermConstructor(cons.name, cons.params,\n replace_symbols(bindings, cons.typ),\n replace_types(bindings, cons.context), cons.doc)\nend\nfunction replace_types(bindings::Dict, cons::AxiomConstructor)::AxiomConstructor\n AxiomConstructor(cons.name,\n replace_symbols(bindings, cons.left),\n replace_symbols(bindings, cons.right),\n replace_types(bindings, cons.context), cons.doc)\nend\nfunction replace_types(bindings::Dict, aliases::Dict)::Dict\n Dict(a => replace_symbols(bindings, aliases[a])\n for a in keys(aliases))\nend\nfunction replace_types(bindings::Dict, context::Context)::Context\n GAT.Context(((name => @match expr begin\n (Expr(:call, sym::Symbol, args...) =>\n Expr(:call, replace_symbols(bindings, sym), args...))\n sym::Symbol => replace_symbols(bindings, sym)\n end) for (name, expr) in context))\nend\n\n\"\"\" Remove type parameters from dependent type.\n\"\"\"\nfunction strip_type(expr)::Symbol\n @match expr begin\n Expr(:call, head::Symbol, args...) => head\n sym::Symbol => sym\n end\nend\n\n# GAT expressions\n#################\n\n\"\"\" Expand context variables that occur implicitly in an expression.\n\nReference: (Cartmell, 1986, Sec 10: 'Informal syntax').\n\"\"\"\nfunction expand_in_context(expr, params::Vector{Symbol},\n context::Context, theory::Theory)\n @match expr begin\n Expr(:call, name::Symbol, args...) => begin\n expanded = [expand_in_context(e, params, context, theory) for e in args]\n Expr(:call, name, expanded...)\n end\n name::Symbol => begin\n if name in params\n name\n elseif haskey(context, name)\n expand_symbol_in_context(name, params, context, theory)\n else\n error(\"Name $name missing from context $context\")\n end\n end\n _ => throw(ParseError(\"Ill-formed raw expression $expr\"))\n end\nend\nfunction expand_symbol_in_context(sym::Symbol, params::Vector{Symbol},\n context::Context, theory::Theory)\n # This code expands symbols that occur as direct arguments to type\n # constructors. If there are term constructors in between, it does not work:\n # indeed, it cannot work in general because the term constructors are not\n # necessarily injective. For example, we can expand :X in\n # (:X => :Ob, :f => :(Hom(X)))\n # but not in\n # (:X => :Ob, :Y => :Ob, :f => :(Hom(otimes(X,Y))))\n names = collect(keys(context))\n start = findfirst(names .== sym)\n for name in names[start+1:end]\n expr = context[name]\n if isa(expr, Expr) && expr.head == :call && sym in expr.args[2:end]\n cons = get_type(theory, expr.args[1])\n accessor = cons.params[findfirst(expr.args[2:end] .== sym)]\n expanded = Expr(:call, accessor, name)\n return expand_in_context(expanded, params, context, theory)\n end\n end\n error(\"Name $sym does not occur explicitly among $params in context $context\")\nend\n\n\"\"\" Expand context variables that occur implicitly in the type expression\nof a term constructor.\n\"\"\"\nfunction expand_term_type(cons::TermConstructor, theory::Theory)\n isa(cons.typ, Symbol) ? cons.typ :\n expand_in_context(cons.typ, cons.params, cons.context, theory)\nend\n\n\"\"\" Implicit equations defined by a context.\n\nThis function allows a generalized algebraic theory (GAT) to be expressed as\nan essentially algebraic theory, i.e., as partial functions whose domains are\ndefined by equations.\n\nReferences:\n - (Cartmell, 1986, Sec 6: \"Essentially algebraic theories and categories with\n finite limits\")\n - (Freyd, 1972, \"Aspects of topoi\")\n\"\"\"\nfunction equations(context::Context, theory::Theory)::Vector{Pair}\n # The same restrictions as `expand_symbol_in_context` apply here.\n eqs = Pair[]\n names = collect(keys(context))\n for (start, var) in enumerate(names)\n for name in names[start+1:end]\n expr = context[name]\n if isa(expr, Symbol) && !has_type(theory, expr)\n # If the constructor is a symbol and there isn't a matching type in\n # the theory, assume it's a Julia type. For now, these are\n # completely ignored by the syntax system.\n continue\n end\n expr = isa(expr, Symbol) ? Expr(:call, expr) : expr\n cons = get_type(theory, expr.args[1])\n accessors = cons.params[findall(expr.args[2:end] .== var)]\n append!(eqs, (Expr(:call, a, name) => var for a in accessors))\n end\n end\n eqs\nend\n\n\"\"\" Implicit equations defined by context, allowing for implicit variables.\n\"\"\"\nfunction equations(params::Vector{Symbol}, context::Context,\n theory::Theory)::Vector{Pair}\n eqs = [ (expand_in_context(lhs, params, context, theory) =>\n expand_in_context(rhs, params, context, theory))\n for (lhs, rhs) in equations(context, theory) ]\n # Remove tautologies (expr == expr) resulting from expansions.\n # FIXME: Should we worry about redundancies from the symmetry of equality,\n # i.e., (expr1 == expr2) && (expr2 == expr1)?\n filter(eq -> eq.first != eq.second, eqs)\nend\n\n\"\"\" Implicit equations for term constructor.\n\"\"\"\nfunction equations(cons::TermConstructor, theory::Theory)::Vector{Pair}\n equations(cons.params, cons.context, theory)\nend\n\n# Instances\n###########\n\n\"\"\" Define an *instance* of a generalized algebraic theory (GAT).\n\"\"\"\nmacro instance(head, body)\n # Parse the instance definition.\n head = parse_theory_binding_either(head)\n functions, ext_functions = parse_instance_body(body)\n\n # We must generate and evaluate the code at *run time* because the theory\n # type is not defined at *parse time*.\n # Also, we \"throw away\" any docstring.\n # FIXME: Is there a better place to put the docstring?\n expr = :(instance_code($(esc(head.name)), $(esc(head.params)), $functions, $ext_functions))\n Expr(:block,\n Expr(:call, esc(:eval), expr),\n :(Core.@__doc__ abstract type $(esc(gensym(:instance_doc))) end)) # \/dev\/null\nend\nfunction instance_code(theory_type, instance_types, instance_fns, external_fns)\n code = Expr(:block)\n theory = GAT.theory(theory_type)\n bindings = Dict(zip([type.name for type in theory.types], instance_types))\n bound_fns = [ replace_symbols(bindings, f) for f in interface(theory) ]\n bound_fns = OrderedDict(parse_function_sig(f) => f for f in bound_fns)\n instance_fns = Dict(parse_function_sig(f) => f for f in instance_fns)\n for (sig, f) in bound_fns\n if sig.name in external_fns\n continue\n elseif haskey(instance_fns, sig)\n f_impl = instance_fns[sig]\n elseif !isnothing(f.impl)\n f_impl = f\n else\n error(\"Method $(f.call_expr) not implemented in $(nameof(mod)) instance\")\n end\n push!(code.args, generate_function(f_impl))\n end\n return code\nend\n\n\"\"\" Parse the body of a GAT instance definition.\n\"\"\"\nfunction parse_instance_body(expr::Expr)\n @assert expr.head == :block\n funs = JuliaFunction[]\n ext_funs = Symbol[]\n for elem in strip_lines(expr).args\n elem = strip_lines(elem)\n head = elem.head\n if head == :macrocall && elem.args[1] == Symbol(\"@import\")\n ext_funs = @match elem.args[2] begin\n sym::Symbol => [ext_funs; [sym]]\n Expr(:tuple, args...) => [ext_funs; Symbol[args...]]\n end\n else\n push!(funs, parse_function(elem))\n end\n end\n return (funs, ext_funs)\nend\n\n\"\"\" Complete set of Julia functions for a theory.\n\"\"\"\nfunction interface(theory::Theory)::Vector{JuliaFunction}\n [ accessors(theory);\n constructors(theory);\n alias_functions(theory) ]\nend\n\n\"\"\" Julia functions for type parameter accessors.\n\"\"\"\nfunction accessors(theory::Theory)::Vector{JuliaFunction}\n vcat(map(accessors, theory.types)...)\nend\nfunction accessors(cons::TypeConstructor)::Vector{JuliaFunction}\n [ JuliaFunction(Expr(:call, param, Expr(:(::), cons.name)),\n strip_type(cons.context[param]))\n for param in cons.params ]\nend\n\n\"\"\" Julia functions for term constructors of GAT.\n\"\"\"\nfunction constructors(theory::Theory)::Vector{JuliaFunction}\n [ constructor(cons, theory) for cons in theory.terms ]\nend\nfunction constructor(cons::Union{TypeConstructor,TermConstructor},\n theory::Theory)::JuliaFunction\n arg_names = cons.params\n arg_types = [ strip_type(cons.context[name]) for name in arg_names ]\n args = [ Expr(:(::), name, typ) for (name,typ) in zip(arg_names, arg_types) ]\n return_type = cons isa TermConstructor ? strip_type(cons.typ) : cons.name\n\n call_expr = Expr(:call, cons.name, args...)\n if !any(has_type(theory, typ) for typ in arg_types)\n call_expr = add_type_dispatch(call_expr, return_type)\n end\n JuliaFunction(call_expr, return_type)\nend\n\n\"\"\" Julia functions for term and type aliases of GAT.\n\"\"\"\nfunction alias_functions(theory::Theory)::Vector{JuliaFunction}\n # collect all of the types and terms from the theory\n terms_types = [theory.types; theory.terms]\n # iterate over the specified aliases\n collect(Iterators.flatten(map(collect(theory.aliases)) do alias\n # collect all of the destination function definitions to alias\n # allows an alias to overite all the type definitions of a function\n dests = filter(i -> i.name == last(alias), map(x -> x, terms_types))\n # If there are no matching functions, throw a parse error\n if isempty(dests)\n throw(ParseError(\"Cannot alias undefined type or term $alias\"))\n end\n # For each destination, create a Julia function\n map(dests) do dest\n fun = constructor(dest, theory)\n fun.call_expr.args[1] = first(alias)\n # Extract arguments from function header, handling special case of\n # created by `add_type_dispatch`.\n args = map(fun.call_expr.args[2:end]) do arg\n @match arg begin\n # Special case: dispatch on return type.\n Expr(:(::), Expr(:curly, :Type, type)) => type\n # Main case: typed parameter.\n Expr(:(::), param, type) => param\n _ => throw(ParseError(\"Cannot parse argument $arg for alias $alias\"))\n end\n end\n body = Expr(:call, dest.name, args...)\n JuliaFunction(fun.call_expr, fun.return_type, body)\n end\n end))\nend\n\n\"\"\" Invoke a term constructor by name on an instance.\n\nThis method provides reflection for GATs. In everyday use the generic\nmethod for the constructor should be called directly, not through this function.\n\nCf. Julia's builtin `invoke()` function.\n\"\"\"\nfunction invoke_term(theory_type::Type, instance_types::Tuple,\n constructor_name::Symbol, args...)\n # Get the corresponding Julia method from the parent module.\n method = getfield(parentmodule(theory_type), constructor_name)\n args = collect(Any, args)\n\n # Add dispatch on return type, if necessary.\n if !any(typeof(arg) <: typ for typ in instance_types for arg in args)\n # Case 1: Name refers to type constructor, e.g., generator constructor\n # in syntax system.\n theory = GAT.theory(theory_type)\n index = findfirst(cons -> cons.name == constructor_name, theory.types)\n if isnothing(index)\n # Case 2: Name refers to term constructor.\n # FIXME: Terms constructors can be overloaded, so there may be multiple\n # term constructors with the same name. Distinguishing them requires type\n # inference. I am punting on that right now.\n constructor = theory.terms[\n findfirst(cons -> cons.name == constructor_name, theory.terms)]\n return_name = strip_type(constructor.typ)\n index = findfirst(cons -> cons.name == return_name, theory.types)\n end\n insert!(args, 1, instance_types[index])\n end\n\n # Invoke the method!\n method(args...)\nend\n\nend\n","avg_line_length":35.0311614731,"max_line_length":106,"alphanum_fraction":0.674389455} {"size":6776,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"export MagneticFieldMeasurement, saveMagneticFieldAsHDF5, loadMagneticFieldMeasurement, loadMagneticField, addMeasuredPosition\n\nBase.@kwdef mutable struct MagneticFieldMeasurement\n \"Description of the dataset.\"\n description::Union{String, Nothing} = nothing\n \"Positions of the measured field values.\"\n positions::Union{Positions, Missing} = missing\n \"Field values at the specific positions.\"\n fields::Union{Array{typeof(1.0u\"T\"), 2}, Missing} = missing\n \"Error of the measured field values.\"\n fieldsError::Union{Array{typeof(1.0u\"T\"), 2}, Nothing} = nothing\n \"Frequency of the measured field.\"\n fieldsFrequency::Union{Vector{typeof(1.0u\"Hz\")}, Nothing} = nothing\n \"Applied current while measuring the matching position.\"\n currents::Union{Array{typeof(1.0u\"A\"), 2}, Nothing} = nothing\n \"Timestamp of the matching position.\"\n timestamp::Union{Vector{DateTime}, Nothing} = nothing\n \"Offset of the hall probe's active areas.\"\n sensorOffset::Union{Vector{typeof(1.0u\"m\")}, Nothing} = nothing\n \"Temperature at the start of the measurement or, if defined as a vector, the temperature when measuring the matching position.\"\n temperature::Union{typeof(1.0u\"\u00b0C\"), Vector{typeof(1.0u\"\u00b0C\")}, Nothing} = nothing\nend\n\nfunction MagneticFieldMeasurement(filename::String)\n return loadMagneticFieldMeasurement(filename)\nend\n\nfunction MagneticFieldMeasurement(file::HDF5.File)\n return loadMagneticFieldMeasurement(file)\nend\n\nfunction saveMagneticFieldAsHDF5(measurement::MagneticFieldMeasurement, filename::String)\n h5open(filename, \"w\") do file\n saveMagneticFieldAsHDF5(measurement, file)\n end\nend\n\nfunction saveMagneticFieldAsHDF5(measurement::MagneticFieldMeasurement, file::HDF5.File)\n write(file, measurement.positions)\n\n if !isnothing(description(measurement))\n write(file, \"\/description\", description(measurement))\n end\n\n if !ismissing(fields(measurement))\n write(file, \"\/fields\", ustrip.(fields(measurement)))\n end\n\n if !isnothing(fieldsError(measurement))\n write(file, \"\/fieldsError\", ustrip.(fieldsError(measurement)))\n end\n\n if !isnothing(fieldsFrequency(measurement))\n write(file, \"\/fieldsFrequency\", ustrip.(fieldsFrequency(measurement)))\n end\n\n if !isnothing(currents(measurement))\n write(file, \"\/currents\", ustrip.(currents(measurement)))\n end\n\n if !isnothing(timestamp(measurement))\n write(file, \"\/timestamp\", string.(timestamp(measurement)))\n end\n\n if !isnothing(sensorOffset(measurement))\n write(file, \"\/sensorOffset\", ustrip.(sensorOffset(measurement)))\n end\n\n if !isnothing(temperature(measurement))\n write(file, \"\/temperature\", ustrip.(temperature(measurement)))\n end\nend\n\nfunction loadMagneticFieldMeasurement(filename::String)\n h5open(filename, \"r\") do file\n return loadMagneticFieldMeasurement(file)\n end\nend\n\nfunction loadMagneticFieldMeasurement(file::HDF5.File)\n splattingDict = Dict{Symbol, Any}()\n splattingDict[:positions] = Positions(file)\n\n if haskey(file, \"fields\")\n splattingDict[:fields] = read(file, \"fields\")u\"T\"\n else\n error(\"The HDF5 file for a magnetic field measurement must contain a field vector.\")\n end\n\n if typeof(splattingDict[:positions]) == MeanderingGridPositions\n splattingDict[:fields] = splattingDict[:fields][:, getPermutation(splattingDict[:positions]), :]\n splattingDict[:positions] = splattingDict[:positions].grid\n end\n\n if haskey(file, \"description\")\n splattingDict[:description] = read(file, \"description\")\n end\n\n if haskey(file, \"fieldsError\")\n splattingDict[:fieldsError] = read(file, \"fieldsError\")u\"T\"\n end\n\n if haskey(file, \"fieldsFrequency\")\n splattingDict[:fieldsFrequency] = read(file, \"fieldsFrequency\")u\"Hz\"\n end\n\n if haskey(file, \"currents\")\n splattingDict[:currents] = read(file, \"currents\")u\"A\"\n end\n\n if haskey(file, \"timestamp\")\n splattingDict[:timestamp] = DateTime.(read(file, \"timestamp\"))\n end\n\n if haskey(file, \"sensorOffset\")\n splattingDict[:sensorOffset] = read(file, \"sensorOffset\")u\"m\"\n end\n\n if haskey(file, \"temperature\")\n splattingDict[:temperature] = read(file, \"temperature\")u\"\u00b0C\"\n end\n\n return MagneticFieldMeasurement(;splattingDict...)\nend\n\n# Alias for backwards compatibility\nloadMagneticField(filename::String) = loadMagneticFieldMeasurement(filename)\n\n# Create getter and setter for all fields of `MagneticFieldMeasurement`\nfor (fieldname, fieldtype) in zip(fieldnames(MagneticFieldMeasurement), fieldtypes(MagneticFieldMeasurement))\n fieldnameStr = string(fieldname)\n\n # At the moment, this should be a Union\n missingOrNothing = (fieldtype.b <: Union{Missing, Nothing}) ? fieldtype.b : fieldtype.a\n fieldtype = (fieldtype.b <: Union{Missing, Nothing}) ? fieldtype.a : fieldtype.b\n\n #@info \"\" fieldnameStr missingOrNothing fieldtype\n\n @eval begin\n export $fieldname\n\n # TODO: Add docstring from struct; I did not yet find a way to retrieve it\n function $(fieldname)(measurement::MagneticFieldMeasurement)::Union{$fieldtype, $missingOrNothing}\n return measurement.$fieldname\n end\n \n # TODO: Add docstring from struct; I did not yet find a way to retrieve it\n function $(fieldname)(measurement::MagneticFieldMeasurement, value::Union{$fieldtype, $missingOrNothing})\n measurement.$fieldname = value\n end\n end\nend\n\nfunction addMeasuredPosition(measurement::MagneticFieldMeasurement, pos::Vector; field=nothing, fieldError=nothing, fieldFrequency=nothing, current=nothing, timestamp=nothing, temperature=nothing)\n idx = posToLinIdx(measurement.positions, pos)\n\n if !isnothing(field)\n if ismissing(measurement.fields)\n measurement.fields = fill(0.0u\"T\", (length(measurement.positions), 3))\n end\n\n measurement.fields[idx, :] .= field\n end\n\n if !isnothing(fieldError)\n if isnothing(measurement.fieldsError)\n measurement.fieldsError = fill(0.0u\"T\", (length(measurement.positions), 3))\n end\n\n measurement.fieldsError[idx, :] .= fieldError\n end\n\n if !isnothing(fieldFrequency)\n if isnothing(measurement.fieldsFrequency)\n measurement.fieldsFrequency = fill(0.0u\"Hz\", length(measurement.positions))\n end\n\n measurement.fieldsFrequency[idx] = fieldFrequency\n end\n\n if !isnothing(current)\n if isnothing(measurement.currents)\n measurement.currents = fill(0.0u\"A\", (length(measurement.positions), length(current)))\n end\n \n measurement.currents[idx, :] .= current\n end\n\n if !isnothing(timestamp)\n if isnothing(measurement.timestamp)\n measurement.timestamp = fill(now(), length(measurement.positions))\n end\n\n measurement.timestamp[idx] = timestamp\n end\n\n if !isnothing(temperature)\n if isnothing(measurement.temperature)\n measurement.temperature = fill(-273.0u\"\u00b0C\", length(measurement.positions))\n end\n\n measurement.temperature[idx] = temperature\n end\nend","avg_line_length":33.2156862745,"max_line_length":196,"alphanum_fraction":0.7415879575} {"size":3690,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"#=\nFunctionalities to take a string corresponding to Julia code and evaluate\nthat code in a given module while capturing stdout and redirecting it to\na file.\n=#\n\n\"\"\"\n$SIGNATURES\n\nConsumes a string with Julia code, returns a vector of expression(s).\n\nNote: this function was adapted from the `parse_input` function from Weave.jl.\n\"\"\"\nfunction parse_code(code::AS)\n exs = Any[] # Expr, Symbol or Any Julia core value\n n = sizeof(code)\n pos = 1\n while pos \u2264 n\n ex, pos = Meta.parse(code, pos)\n isnothing(ex) && continue\n push!(exs, ex)\n end\n exs\nend\n\n\"\"\"\n$SIGNATURES\n\nReturns only the stack traces which are related to the user's code.\nThis means removing stack traces pointing to Franklin's code.\n\"\"\"\nfunction trim_stacktrace(s::String)\n first_match_start = first(findfirst(STACKTRACE_TRIM_PAT, s))\n # Keep only everything before the regex match.\n return s[1:first_match_start-3]\nend\n\n\"\"\"\n$SIGNATURES\n\nRun some code in a given module while redirecting stdout to a given path.\nReturn the result of the evaluation or `nothing` if the code was empty or\nthe evaluation failed.\nIf the evaluation errors, the error is printed to output then a warning is\nshown.\n\n## Arguments\n\n1. `mod`: the module in which to evaluate the code,\n1. `code`: string corresponding to the code,\n1. `out_path`: path where stdout should be redirected\n\n## Keywords\n\n* `warn_err=true`: whether to show a warning in the REPL if there was an error\n running the code.\n* `strip=false`: whether to strip the code, this may already have been done.\n\"\"\"\nfunction run_code(mod::Module, code::AS, out_path::AS;\n warn_err::Bool=true, strip_code::Bool=true)\n isempty(code) && return nothing\n strip_code && (code = strip(code))\n exs = parse_code(strip(code))\n ne = length(exs)\n res = nothing # to capture final result\n err = nothing\n stacktrace = nothing\n ispath(out_path) || mkpath(dirname(out_path))\n open(out_path, \"w\") do outf\n if !FD_ENV[:SILENT_MODE]\n rprint(\"\u2192 evaluating code [$(out_path |> basename |> splitext |> first)] in ($(locvar(\"fd_rpath\")))\")\n end\n redirect_stdout(outf) do\n e = 1\n while e <= ne\n try\n res = Core.eval(mod, exs[e])\n catch e\n io = IOBuffer()\n showerror(io, e)\n println(String(take!(io)))\n err = typeof(e)\n\n exc, bt = last(Base.catch_stack())\n stacktrace = sprint(showerror, exc, bt)\n\n break\n end\n e += 1\n end\n end\n end\n # if there was an error, return nothing and possibly show warning\n if !isnothing(err)\n FD_ENV[:SILENT_MODE] || print(\"\\n\")\n warn_err && print_warning(\"\"\"\n There was an error of type '$err' when running a code block.\n Checking the output files '$(splitext(out_path)[1]).(out|res)'\n might be helpful to understand and solve the issue.\n \\nRelevant pointers:\n $POINTER_EVAL\n \\nDetails:\n $(trim_stacktrace(stacktrace))\n \"\"\")\n res = nothing\n end\n # if last bit ends with `;` return nothing (no display)\n endswith(code, r\";\\s*\") && return nothing\n # if last line is a Julia value return\n isa(exs[end], Expr) || return res\n # if last line of the code is a `show`\n if length(exs[end].args) > 1 && exs[end].args[1] == Symbol(\"@show\")\n return nothing\n end\n # otherwise return the result of the last expression\n return res\nend\n","avg_line_length":31.2711864407,"max_line_length":113,"alphanum_fraction":0.6056910569} {"size":430,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# Autogenerated wrapper script for nPth_jll for x86_64-linux-gnu\nexport libnpth\n\nJLLWrappers.@generate_wrapper_header(\"nPth\")\nJLLWrappers.@declare_library_product(libnpth, \"libnpth.so.0\")\nfunction __init__()\n JLLWrappers.@generate_init_header()\n JLLWrappers.@init_library_product(\n libnpth,\n \"lib\/libnpth.so\",\n RTLD_LAZY | RTLD_DEEPBIND,\n )\n\n JLLWrappers.@generate_init_footer()\nend # __init__()\n","avg_line_length":26.875,"max_line_length":64,"alphanum_fraction":0.7372093023} {"size":9559,"ext":"jl","lang":"Julia","max_stars_count":221.0,"content":"using Base.GC\n\nstruct TestStruct\n a::cl.CL_int\n b::cl.CL_float\nend\n\n@testset \"OpenCL.Buffer\" begin\n @testset \"OpenCL.Buffer constructors\" begin\n for device in cl.devices()\n\n ctx = cl.Context(device)\n testarray = zeros(Float32, 1000)\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_ALLOC_HOST_PTR | cl.CL_MEM_READ_ONLY,\n length(testarray)) != nothing\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_ALLOC_HOST_PTR | cl.CL_MEM_WRITE_ONLY,\n length(testarray)) != nothing\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_ALLOC_HOST_PTR | cl.CL_MEM_READ_WRITE,\n length(testarray)) != nothing\n\n buf = cl.Buffer(Float32, ctx, cl.CL_MEM_ALLOC_HOST_PTR | cl.CL_MEM_READ_WRITE, length(testarray))\n @test length(buf) == length(testarray)\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_COPY_HOST_PTR | cl.CL_MEM_READ_ONLY,\n hostbuf=testarray) != nothing\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_COPY_HOST_PTR | cl.CL_MEM_WRITE_ONLY,\n hostbuf=testarray) != nothing\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_COPY_HOST_PTR | cl.CL_MEM_READ_WRITE,\n hostbuf=testarray) != nothing\n\n buf = cl.Buffer(Float32, ctx, cl.CL_MEM_COPY_HOST_PTR | cl.CL_MEM_READ_WRITE, hostbuf=testarray)\n @test length(buf) == length(testarray)\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_USE_HOST_PTR | cl.CL_MEM_READ_ONLY,\n hostbuf=testarray) != nothing\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_USE_HOST_PTR | cl.CL_MEM_WRITE_ONLY,\n hostbuf=testarray) != nothing\n\n @test cl.Buffer(Float32, ctx, cl.CL_MEM_USE_HOST_PTR | cl.CL_MEM_READ_WRITE,\n hostbuf=testarray) != nothing\n\n buf = cl.Buffer(Float32, ctx, cl.CL_MEM_USE_HOST_PTR | cl.CL_MEM_READ_WRITE, hostbuf=testarray)\n @test length(buf) == length(testarray)\n\n # invalid buffer size should throw error\n @test_throws cl.CLError cl.Buffer(Float32, ctx, cl.CL_MEM_ALLOC_HOST_PTR, +0)\n @test_throws InexactError cl.Buffer(Float32, ctx, cl.CL_MEM_ALLOC_HOST_PTR, -1)\n\n # invalid flag combinations should throw error\n @test_throws cl.CLError cl.Buffer(Float32, ctx, cl.CL_MEM_USE_HOST_PTR | cl.CL_MEM_ALLOC_HOST_PTR,\n hostbuf=testarray)\n\n # invalid host pointer should throw error\n @test_throws TypeError cl.Buffer(Float32, ctx, cl.CL_MEM_COPY_HOST_PTR,\n hostbuf=C_NULL)\n\n @test_throws TypeError cl.Buffer(Float32, ctx, cl.CL_MEM_USE_HOST_PTR,\n hostbuf=C_NULL)\n end\n end\n\n @testset \"OpenCL.Buffer constructors symbols\" begin\n for device in cl.devices()\n ctx = cl.Context(device)\n\n for mf1 in [:rw, :r, :w]\n for mf2 in [:copy, :use, :alloc, :null]\n for mtype in [cl.CL_char,\n cl.CL_uchar,\n cl.CL_short,\n cl.CL_ushort,\n cl.CL_int,\n cl.CL_uint,\n cl.CL_long,\n cl.CL_ulong,\n cl.CL_half,\n cl.CL_float,\n cl.CL_double,\n #TODO: bool, vector_types, struct_types...\n ]\n testarray = zeros(mtype, 100)\n if mf2 == :copy || mf2 == :use\n @test cl.Buffer(mtype, ctx, (mf1, mf2), hostbuf=testarray) != nothing\n buf = cl.Buffer(mtype, ctx, (mf1, mf2), hostbuf=testarray)\n @test length(buf) == length(testarray)\n elseif mf2 == :alloc\n @test cl.Buffer(mtype, ctx, (mf1, mf2),\n length(testarray)) != nothing\n buf = cl.Buffer(mtype, ctx, (mf1, mf2), length(testarray))\n @test length(buf) == length(testarray)\n end\n end\n end\n end\n\n test_array = Vector{TestStruct}(undef, 100)\n @test cl.Buffer(TestStruct, ctx, :alloc, length(test_array)) != nothing\n @test cl.Buffer(TestStruct, ctx, :copy, hostbuf=test_array) != nothing\n\n # invalid buffer size should throw error\n @test_throws cl.CLError cl.Buffer(Float32, ctx, :alloc, +0)\n @test_throws InexactError cl.Buffer(Float32, ctx, :alloc, -1)\n\n # invalid flag combinations should throw error\n @test_throws ArgumentError cl.Buffer(Float32, ctx, (:use, :alloc), hostbuf=test_array)\n\n # invalid host pointer should throw error\n @test_throws TypeError cl.Buffer(Float32, ctx, :copy, hostbuf=C_NULL)\n\n @test_throws TypeError cl.Buffer(Float32, ctx, :use, hostbuf=C_NULL)\n\n end\n end\n\n @testset \"OpenCL.Buffer fill\" begin\n for device in cl.devices()\n if occursin(\"Portable\", device[:platform][:name])\n # the pocl platform claims to implement v1.2 of the spec, but does not\n @warn(\"Skipping test OpenCL.Buffer fill for POCL Platform\")\n continue\n end\n ctx = cl.Context(device)\n queue = cl.CmdQueue(ctx)\n testarray = zeros(Float32, 1000)\n buf = cl.Buffer(Float32, ctx, (:rw, :copy), hostbuf=testarray)\n @test length(buf) == length(testarray)\n\n v = cl.opencl_version(device)\n if v.major == 1 && v.minor < 2\n platform_name = device[:platform][:name]\n @info(\"Skipping OpenCL.Buffer fill for $platform_name: fill is a v1.2 command\")\n continue\n end\n cl.fill!(queue, buf, 1f0)\n readback = cl.read(queue, buf)\n @test all(x -> x == 1.0, readback)\n @test all(x -> x == 0.0, testarray)\n @test buf.valid == true\n end\n end\n\n @testset \"OpenCL.Buffer write!\" begin\n for device in cl.devices()\n ctx = cl.Context(device)\n queue = cl.CmdQueue(ctx)\n testarray = zeros(Float32, 1000)\n buf = cl.Buffer(Float32, ctx, (:rw, :copy), hostbuf=testarray)\n @test length(buf) == length(testarray)\n cl.write!(queue, buf, ones(Float32, length(testarray)))\n readback = cl.read(queue, buf)\n @test all(x -> x == 1.0, readback) == true\n @test buf.valid == true\n end\n end\n\n @testset \"OpenCL.Buffer empty_like\" begin\n for device in cl.devices()\n ctx = cl.Context(device)\n queue = cl.CmdQueue(ctx)\n testarray = zeros(Float32, 1000)\n buf = cl.Buffer(Float32, ctx, (:rw, :copy), hostbuf=testarray)\n\n @test sizeof(cl.empty_like(ctx, buf)) == sizeof(testarray)\n end\n end\n\n @testset \"OpenCL.Buffer copy!\" begin\n for device in cl.devices()\n ctx = cl.Context(device)\n queue = cl.CmdQueue(ctx)\n test_array = fill(2f0, 1000)\n a_buf = cl.Buffer(Float32, ctx, length(test_array))\n b_buf = cl.Buffer(Float32, ctx, length(test_array))\n c_arr = Vector{Float32}(undef, length(test_array))\n # host to device buffer\n cl.copy!(queue, a_buf, test_array)\n # device buffer to device buffer\n cl.copy!(queue, b_buf, a_buf)\n # device buffer to host\n cl.copy!(queue, c_arr, b_buf)\n @test all(x -> isapprox(x, 2.0), c_arr) == true\n end\n end\n\n @testset \"OpenCL.Buffer map\/unmap\" begin\n for device in cl.devices()\n ctx = cl.Context(device)\n queue = cl.CmdQueue(ctx)\n b = cl.Buffer(Float32, ctx, :rw, 100)\n for f in (:r, :w, :rw)\n a, evt = cl.enqueue_map_mem(queue, b, f, 0, (10,10))\n cl.wait(evt)\n @test size(a) == (10,10)\n @test typeof(a) == Array{Float32,2}\n\n # cannot unmap a buffer without same host array\n bad = similar(a)\n @test_throws ArgumentError cl.unmap!(queue, b, bad)\n\n @test cl.ismapped(b) == true\n cl.unmap!(queue, b, a)\n @test cl.ismapped(b) == false\n\n # cannot unmap an unmapped buffer\n @test_throws ArgumentError cl.unmap!(queue, b, a)\n\n # gc here quickly force any memory errors\n Base.GC.gc()\n end\n @test cl.ismapped(b) == false\n a, evt = cl.enqueue_map_mem(queue, b, :rw, 0, (10,10))\n @test cl.ismapped(b) == true\n evt = cl.enqueue_unmap_mem(queue, b, a, wait_for=evt)\n cl.wait(evt)\n @test cl.ismapped(b) == false\n end\n end\nend\n","avg_line_length":42.4844444444,"max_line_length":110,"alphanum_fraction":0.5100951982} {"size":578,"ext":"jl","lang":"Julia","max_stars_count":28.0,"content":"module TestMacrosAggregateVector\n\nimport Test: @testset, @test\nimport Volcanito: @group_by, @aggregate_vector, materialize\nimport DataFrames: DataFrame\nimport Statistics: mean\n\n@testset \"@aggregate_vector\" begin\n df = DataFrame(\n a = [1, 2, 3, 4],\n b = [2, 3, 4, 5],\n c = [0, 0, 1, 1],\n )\n\n plan1 = @group_by(df, c)\n gdf = materialize(plan1)\n\n plan2 = @aggregate_vector(gdf, m = mean(a + b))\n res = materialize(plan2)\n @test res == DataFrame(\n c = [0, 1],\n m = [mean([1 + 2, 2 + 3]), mean([3 + 4, 4 + 5])],\n )\nend\n\nend\n","avg_line_length":21.4074074074,"max_line_length":59,"alphanum_fraction":0.5674740484} {"size":10704,"ext":"jl","lang":"Julia","max_stars_count":329.0,"content":"using LoopVectorization, OffsetArrays\nusing Test\n\n@testset \"dot\" begin\n dotq = :(for i \u2208 eachindex(a,b)\n s += a[i]*b[i]\n end)\n lsdot = LoopVectorization.loopset(dotq);\n @test LoopVectorization.choose_order(lsdot) == (Symbol[:i], :i, Symbol(\"##undefined##\"), :i, 4, -1)\n function mydot(a::AbstractVector, b::AbstractVector)\n s = zero(eltype(a))\n za = OffsetArray(a, OffsetArrays.Origin(0))\n zb = OffsetArray(b, OffsetArrays.Origin(0))\n @inbounds @simd for i \u2208 LoopVectorization.CloseOpen(min(length(a),length(b)))\n s += za[i]*zb[i]\n end\n s\n end\n function mydotavx(a::AbstractVector, b::AbstractVector)\n s = zero(eltype(a))\n za = OffsetArray(a, OffsetArrays.Origin(0))\n zb = OffsetArray(b, OffsetArrays.Origin(0))\n @turbo for i \u2208 LoopVectorization.CloseOpen(min(length(a),length(b)))\n s += za[i]*zb[i]\n end\n s\n end\n @test LoopVectorization.ArrayInterface.static_step(LoopVectorization.CloseOpen(-5,10)) === LoopVectorization.One()\n function mydot(a, b)\n s = zero(eltype(a))\n @inbounds @simd for i \u2208 eachindex(a,b)\n s += a[i]*b[i]\n end\n s\n end\n function mydotavx(a, b)\n s = zero(eltype(a))\n @turbo for i \u2208 eachindex(a,b)\n s += a[i]*b[i]\n end\n s\n end\n \n function mydot_avx(a, b)\n s = zero(eltype(a))\n @_avx for i \u2208 eachindex(a,b)\n a\u1d62 = getindex(a, i)\n s += a\u1d62*b[i]\n end\n s\n end\n \n\n selfdotq = :(for i \u2208 indices(a,1)\n s += a[i]*a[i]\n end)\n lsselfdot = LoopVectorization.loopset(selfdotq);\n @test LoopVectorization.choose_order(lsselfdot) == (Symbol[:i], :i, Symbol(\"##undefined##\"), :i, 8, -1)\n\n function myselfdot(a)\n s = zero(eltype(a))\n @inbounds @simd for i \u2208 eachindex(a)\n s += getindex(a, i) * a[i]\n end\n s\n end\n function myselfdotavx(a)\n s = zero(eltype(a))\n @turbo for i \u2208 eachindex(a)\n s += a[i]*a[i]\n end\n s\n end\n function myselfdotavx_v2(a)\n s = zero(eltype(a))\n @turbo for i \u2208 1:length(a)\n s += a[i]*a[i]\n end\n s\n end\n function myselfdotavx_range(a)\n s = zero(eltype(a))\n rng = axes(a, 1)\n @turbo for i \u2208 rng\n s += a[i]*a[i]\n end\n s\n end\n function myselfdot_avx(a)\n s = zero(eltype(a))\n @_avx for i \u2208 eachindex(a)\n s += a[i]*a[i]\n end\n s\n end\n function dot_unroll2avx(x::Vector{T}, y::Vector{T}) where {T<:Number}\n z = zero(T)\n @turbo unroll=2 for i \u2208 1:length(x)\n z += x[i]*y[i]\n end\n z\n end\n function dot_unroll3avx(x::Vector{T}, y::Vector{T}) where {T<:Number}\n z = zero(T)\n @turbo unroll=3 for i \u2208 1:length(x)\n z += x[i]*y[i]\n end\n z\n end\n # @macroexpand @turbo inline=false unroll=2 for i \u2208 1:length(x)\n # z += x[i]*y[i]\n # end\n\n function dot_unroll2avx_noinline(x::Vector{T}, y::Vector{T}) where {T<:Number}\n z = zero(T)\n @turbo inline=true unroll=2 for i \u2208 1:length(x)\n z += x[i]*y[i]\n end\n z\n end\n function dot_unroll3avx_inline(x::Vector{T}, y::Vector{T}) where {T<:Number}\n z = zero(T)\n @turbo unroll=3 inline=true check_empty=true for i \u2208 1:length(x)\n z += x[i]*y[i]\n end\n z\n end\n function dot_unroll2_avx(x::Vector{T}, y::Vector{T}) where {T<:Number}\n z = zero(T)\n @_avx unroll=2 for i \u2208 1:length(x)\n z += x[i]*y[i]\n end\n z\n end\n function dot_unroll3_avx(x::Vector{T}, y::Vector{T}) where {T<:Number}\n z = zero(T)\n @_avx unroll=3 for i \u2208 1:length(x)\n z += x[i]*y[i]\n end\n z\n end\n function complex_dot_soa(\n xre::AbstractVector{T}, xim::AbstractVector{T},\n yre::AbstractVector{T}, yim::AbstractVector{T}\n ) where {T}\n zre = zero(T)\n zim = zero(T)\n @turbo for i \u2208 1:length(xre)\n zre += xre[i]*yre[i] - xim[i]*yim[i]\n zim += xre[i]*yim[i] + xim[i]*yre[i]\n end\n Complex{T}(zre,zim)\n end\n qc = :(for i \u2208 1:length(xre)\n zre += xre[i]*yre[i] - xim[i]*yim[i]\n zim += xre[i]*yim[i] + xim[i]*yre[i]\n end);\n lsc = LoopVectorization.loopset(qc);\n function complex_mul_with_index_offset!(c_re, c_im, a_re, a_im, b_re, b_im)\n @inbounds @simd ivdep for i = 1:length(a_re) - 1\n c_re[i] = b_re[i] * a_re[i + 1] - b_im[i] * a_im[i + 1]\n c_im[i] = b_re[i] * a_im[i + 1] + b_im[i] * a_re[i + 1]\n end\n end\n function complex_mul_with_index_offsetavx!(c_re, c_im, a_re, a_im, b_re, b_im)\n @turbo for i = 1:length(a_re) - 1\n c_re[i] = b_re[i] * a_re[i + 1] - b_im[i] * a_im[i + 1]\n c_im[i] = b_re[i] * a_im[i + 1] + b_im[i] * a_re[i + 1]\n end\n end\n function complex_mul_with_index_offset_avx!(c_re, c_im, a_re, a_im, b_re, b_im)\n @_avx for i = 1:length(a_re) - 1\n c_re[i] = b_re[i] * a_re[i + 1] - b_im[i] * a_im[i + 1]\n setindex!(c_im, b_re[i] * a_im[i + 1] + b_im[i] * a_re[i + 1], i)\n end\n end\n # q = :(for i = 1:length(a_re) - 1\n # c_re[i] = b_re[i] * a_re[i + 1] - b_im[i] * a_im[i + 1]\n # c_im[i] = b_re[i] * a_im[i + 1] + b_im[i] * a_re[i + 1]\n # end);\n # ls = LoopVectorization.loopset(q)\n\n function mcpi(x, y)\n acc = 0\n @inbounds @simd for i \u2208 eachindex(x)\n acc += (x[i]*x[i] + y[i]*y[i]) < 1.0\n end\n 4acc\/length(x)\n end\n function mcpiavx(x, y)\n acc = 0\n @turbo for i \u2208 eachindex(x)\n acc += (x[i]*x[i] + y[i]*y[i]) < 1.0\n end\n 4acc\/length(x)\n end\n function mcpiavx_u4(x, y)\n acc = 0\n @turbo unroll=4 for i \u2208 eachindex(x)\n acc += (x[i]*x[i] + y[i]*y[i]) < 1.0\n end\n 4acc\/length(x)\n end\n function mcpi_avx(x, y)\n acc = 0\n @_avx for i \u2208 eachindex(x)\n acc += (x[i]*x[i] + y[i]*y[i]) < 1.0\n end\n 4acc\/length(x)\n end\n function mcpi_avx_u4(x, y)\n acc = 0\n @_avx unroll=4 for i \u2208 1:length(x)\n acc += (x[i]*x[i] + y[i]*y[i]) < 1.0\n end\n 4acc\/length(x)\n end\n\n function dotloopinductvarpow(x::AbstractArray{T}) where {T}\n s = zero(T)\n for i \u2208 eachindex(x)\n s += x[i] * T(i)^3\n end\n s\n end\n function dotloopinductvarpowavx(x)\n s = zero(eltype(x))\n @turbo for i \u2208 eachindex(x)\n s += x[i] * i^3\n end\n s\n end\n function dot_from_n_to_100(a, b, n)\n s = zero(eltype(a))\n @turbo for i \u2208 n:100\n s += a[i] * b[i]\n end\n s\n end\n function dot33(a,b)\n s = zero(eltype(a))\n @turbo for i \u2208 1:33\n s += a[i] * b[i]\n end\n s\n end\n function dot17(a,b)\n s = zero(eltype(a))\n @turbo for i \u2208 1:17\n s += a[i] * b[i]\n end\n s\n end\n # @macroexpand @_avx for i = 1:length(a_re) - 1\n # c_re[i] = b_re[i] * a_re[i + 1] - b_im[i] * a_im[i + 1]\n # c_im[i] = b_re[i] * a_im[i + 1] + b_im[i] * a_re[i + 1]\n # end\n\n # a = rand(400);\n for T \u2208 (Float32, Float64, Int32, Int64)\n @show T, @__LINE__\n N = 143\n R = T <: Integer ? (T(-100):T(100)) : T\n a = rand(R, N); b = rand(R, N);\n ao = OffsetArray(a, -60:N-61); bo = OffsetArray(b, -60:N-61);\n s = mydot(a, b)\n @test mydotavx(a,b) \u2248 s\n @test mydot_avx(a,b) \u2248 s\n @test mydotavx(ao,bo) \u2248 s\n @test mydot_avx(ao,bo) \u2248 s\n @test dot_unroll2avx(a,b) \u2248 s\n @test dot_unroll3avx(a,b) \u2248 s\n @test dot_unroll2_avx(a,b) \u2248 s\n @test dot_unroll3_avx(a,b) \u2248 s\n @test dot_unroll2avx_noinline(a,b) \u2248 s\n @test dot_unroll3avx_inline(a,b) \u2248 s\n s = myselfdot(a)\n @test myselfdotavx(a) \u2248 s\n @test myselfdotavx_v2(a) \u2248 s\n @test myselfdotavx_range(a) \u2248 s\n @test myselfdot_avx(a) \u2248 s\n @test myselfdotavx(a) \u2248 s\n\n A = OffsetArray(rand(37, 61), -5, 10);\n s = myselfdot(A);\n @test myselfdotavx(A) \u2248 myselfdotavx(A') \u2248 s\n @test myselfdotavx_v2(A) \u2248 myselfdotavx_v2(A') \u2248 s\n # @test myselfdot_avx(A) \u2248 myselfdot_avx(A') \u2248 s\n\n @test dot17(a,b) \u2248 @view(a[1:17])' * @view(b[1:17])\n @test dot33(a,b) \u2248 @view(a[1:33])' * @view(b[1:33])\n\n if T <: Union{Float32,Float64}\n \u03c0est = mcpi(a, b)\n @test \u03c0est \u2248 mcpiavx(a, b)\n @test \u03c0est \u2248 mcpiavx_u4(a, b)\n @test \u03c0est \u2248 mcpi_avx(a, b)\n @test \u03c0est \u2248 mcpi_avx_u4(a, b)\n end\n\n if !(!Bool(LoopVectorization.VectorizationBase.has_feature(Val(:x86_64_avx2))) && T === Int32)\n @test dotloopinductvarpow(a) \u2248 dotloopinductvarpowavx(a)\n end\n @test dot_from_n_to_100(a, b, 33) == @views mydotavx(a[33:100], b[33:100])\n\n a_re = rand(R, N); a_im = rand(R, N);\n b_re = rand(R, N); b_im = rand(R, N);\n ac = Complex.(a_re, a_im);\n bc = Complex.(b_re, b_im);\n\n @test mydot(ac, bc) \u2248 complex_dot_soa(a_re, a_im, b_re, b_im)\n\n c_re1 = similar(a_re); c_im1 = similar(a_im);\n c_re2 = similar(a_re); c_im2 = similar(a_im);\n # b_re = rand(R, length(a_re) + 1); b_im = rand(R, length(a_im) + 1);\n complex_mul_with_index_offset!(c_re1, c_im1, a_re, a_im, b_re, b_im)\n complex_mul_with_index_offsetavx!(c_re2, c_im2, a_re, a_im, b_re, b_im)\n c_re1v, c_im1v, c_re2v, c_im2v = @views c_re1[1:end-1], c_im1[1:end-1], c_re2[1:end-1], c_im2[1:end-1];\n @test c_re1v \u2248 c_re2v\n @test c_im1v \u2248 c_im2v\n fill!(c_re2, -999999); fill!(c_im2, 99999999);\n complex_mul_with_index_offset_avx!(c_re2, c_im2, a_re, a_im, b_re, b_im)\n @test c_re1v \u2248 c_re2v\n @test c_im1v \u2248 c_im2v\n\n # Float32 is not accurate enough\n # Alternatively, loosen approx requirement?\n R == Float32 && continue\n A = rand(R, N, N, N);\n B = rand(R, N, N, N);\n @test mydot(A, B) \u2248 mydotavx(A, B)\n # test CartesianIndices\n for i \u2208 [3, :, 1:N-1], j \u2208 [5, :, 1:N-2], k \u2208 [:, 1:N-3]\n Av = view(A, i, j, k);\n Bv = view(B, i, j, k);\n # @show i, j, k\n @test mydot(Av, Bv) \u2248 mydotavx(Av, Bv) \n end\n end\nend\n\n","avg_line_length":31.298245614,"max_line_length":118,"alphanum_fraction":0.4961696562} {"size":7023,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# This file is a part of Julia. License is MIT: https:\/\/julialang.org\/license\n\nusing Random, Sockets\n\nconst STDLIB_DIR = joinpath(Sys.BINDIR, \"..\", \"share\", \"julia\", \"stdlib\", \"v$(VERSION.major).$(VERSION.minor)\")\nconst STDLIBS = readdir(STDLIB_DIR)\n\n\"\"\"\n\n`tests, net_on, exit_on_error, seed = choosetests(choices)` selects a set of tests to be\nrun. `choices` should be a vector of test names; if empty or set to\n`[\"all\"]`, all tests are selected.\n\nThis function also supports \"test collections\": specifically, \"linalg\"\n refers to collections of tests in the correspondingly-named\ndirectories.\n\nUpon return:\n - `tests` is a vector of fully-expanded test names,\n - `net_on` is true if networking is available (required for some tests),\n - `exit_on_error` is true if an error in one test should cancel\n remaining tests to be run (otherwise, all tests are run unconditionally),\n - `seed` is a seed which will be used to initialize the global RNG for each\n test to be run.\n\nThree options can be passed to `choosetests` by including a special token\nin the `choices` argument:\n - \"--skip\", which makes all tests coming after be skipped,\n - \"--exit-on-error\" which sets the value of `exit_on_error`,\n - \"--seed=SEED\", which sets the value of `seed` to `SEED`\n (parsed as an `UInt128`); `seed` is otherwise initialized randomly.\n This option can be used to reproduce failed tests.\n\"\"\"\nfunction choosetests(choices = [])\n testnames = [\n \"subarray\", \"core\", \"compiler\", \"worlds\",\n \"keywordargs\", \"numbers\", \"subtype\",\n \"char\", \"strings\", \"triplequote\", \"unicode\", \"intrinsics\",\n \"dict\", \"hashing\", \"iobuffer\", \"staged\", \"offsetarray\",\n \"arrayops\", \"tuple\", \"reduce\", \"reducedim\", \"abstractarray\",\n \"intfuncs\", \"simdloop\", \"vecelement\", \"rational\",\n \"bitarray\", \"copy\", \"math\", \"fastmath\", \"functional\", \"iterators\",\n \"operators\", \"path\", \"ccall\", \"parse\", \"loading\", \"bigint\",\n \"bigfloat\", \"sorting\", \"statistics\", \"spawn\", \"backtrace\",\n \"file\", \"read\", \"version\", \"namedtuple\",\n \"mpfr\", \"broadcast\", \"complex\",\n \"floatapprox\", \"stdlib\", \"reflection\", \"regex\", \"float16\",\n \"combinatorics\", \"sysinfo\", \"env\", \"rounding\", \"ranges\", \"mod2pi\",\n \"euler\", \"show\",\n \"errorshow\", \"sets\", \"goto\", \"llvmcall\", \"llvmcall2\", \"grisu\",\n \"some\", \"meta\", \"stacktraces\", \"docs\",\n \"misc\", \"threads\",\n \"enums\", \"cmdlineargs\", \"int\",\n \"checked\", \"bitset\", \"floatfuncs\", \"precompile\", \"inline\",\n \"boundscheck\", \"error\", \"ambiguous\", \"cartesian\", \"osutils\",\n \"channels\", \"iostream\", \"specificity\", \"codegen\",\n \"reinterpretarray\", \"syntax\", \"logging\", \"missing\", \"asyncmap\"\n ]\n\n tests = []\n skip_tests = []\n exit_on_error = false\n seed = rand(RandomDevice(), UInt128)\n\n for (i, t) in enumerate(choices)\n if t == \"--skip\"\n skip_tests = choices[i + 1:end]\n break\n elseif t == \"--exit-on-error\"\n exit_on_error = true\n elseif startswith(t, \"--seed=\")\n seed = parse(UInt128, t[8:end])\n else\n push!(tests, t)\n end\n end\n\n if tests == [\"all\"] || isempty(tests)\n tests = testnames\n end\n\n\n unicodetests = [\"unicode\/utf8\"]\n if \"unicode\" in skip_tests\n filter!(x -> (x != \"unicode\" && !(x in unicodetests)), tests)\n elseif \"unicode\" in tests\n # specifically selected case\n filter!(x -> x != \"unicode\", tests)\n prepend!(tests, unicodetests)\n end\n\n stringtests = [\"strings\/basic\", \"strings\/search\", \"strings\/util\",\n \"strings\/io\", \"strings\/types\"]\n if \"strings\" in skip_tests\n filter!(x -> (x != \"strings\" && !(x in stringtests)), tests)\n elseif \"strings\" in tests\n # specifically selected case\n filter!(x -> x != \"strings\", tests)\n prepend!(tests, stringtests)\n end\n\n # do subarray before sparse but after linalg\n if \"subarray\" in skip_tests\n filter!(x -> x != \"subarray\", tests)\n elseif \"subarray\" in tests\n filter!(x -> x != \"subarray\", tests)\n prepend!(tests, [\"subarray\"])\n end\n\n compilertests = [\"compiler\/compiler\", \"compiler\/validation\"]\n\n if \"compiler\" in skip_tests\n filter!(x -> (x != \"compiler\" && !(x in compilertests)), tests)\n elseif \"compiler\" in tests\n # specifically selected case\n filter!(x -> x != \"compiler\", tests)\n prepend!(tests, compilertests)\n end\n\n if \"stdlib\" in skip_tests\n filter!(x -> (x != \"stdlib\" && !(x in STDLIBS)) , tests)\n elseif \"stdlib\" in tests\n filter!(x -> (x != \"stdlib\" && !(x in STDLIBS)) , tests)\n prepend!(tests, STDLIBS)\n end\n\n # do ambiguous first to avoid failing if ambiguities are introduced by other tests\n if \"ambiguous\" in skip_tests\n filter!(x -> x != \"ambiguous\", tests)\n elseif \"ambiguous\" in tests\n filter!(x -> x != \"ambiguous\", tests)\n prepend!(tests, [\"ambiguous\"])\n end\n\n if startswith(string(Sys.ARCH), \"arm\")\n # Remove profile from default tests on ARM since it currently segfaults\n # Allow explicitly adding it for testing\n @warn \"Skipping Profile tests\"\n filter!(x -> (x != \"Profile\"), tests)\n end\n\n net_required_for = [\"Sockets\", \"LibGit2\"]\n net_on = true\n try\n ipa = getipaddr()\n catch\n @warn \"Networking unavailable: Skipping tests [\" * join(net_required_for, \", \") * \"]\"\n net_on = false\n end\n\n if !net_on\n filter!(!in(net_required_for), tests)\n end\n\n if ccall(:jl_running_on_valgrind,Cint,()) != 0 && \"rounding\" in tests\n @warn \"Running under valgrind: Skipping rounding tests\"\n filter!(x -> x != \"rounding\", tests)\n end\n\n # The shift and invert solvers need SuiteSparse for sparse input\n Base.USE_GPL_LIBS || filter!(x->x != \"IterativeEigensolvers\", STDLIBS)\n\n filter!(!in(skip_tests), tests)\n\n explicit_pkg = \"OldPkg\/pkg\" in tests\n explicit_pkg3 = \"Pkg\/pkg\" in tests\n explicit_libgit2 = \"LibGit2\/online\" in tests\n new_tests = String[]\n for test in tests\n if test in STDLIBS\n testfile = joinpath(STDLIB_DIR, test, \"test\", \"testgroups\")\n if isfile(testfile)\n testgroups = readlines(testfile)\n length(testgroups) == 0 && error(\"no testgroups defined for $test\")\n prepend!(new_tests, (test * \"\/\") .* testgroups)\n else\n push!(new_tests, test)\n end\n end\n end\n filter!(x -> (x != \"stdlib\" && !(x in STDLIBS)) , tests)\n append!(tests, new_tests)\n explicit_pkg || filter!(x -> x != \"OldPkg\/pkg\", tests)\n explicit_pkg3 || filter!(x -> x != \"Pkg\/pkg\", tests)\n explicit_libgit2 || filter!(x -> x != \"LibGit2\/online\", tests)\n\n # Filter out tests from the test groups in the stdlibs\n filter!(!in(skip_tests), tests)\n\n tests, net_on, exit_on_error, seed\nend\n","avg_line_length":36.578125,"max_line_length":111,"alphanum_fraction":0.5977502492} {"size":4444,"ext":"jl","lang":"Julia","max_stars_count":25.0,"content":"using ..Pickle: Memo, PickleStack, HierarchicalTable, load, isdefer\n\nusing DataStructures\nusing Strided\nusing ZipFile\n\nconst DEFAULT_PROTO = 2\n\nconst MAGIC = BigInt(0x1950a86a20f9469cfc6c)\nconst TORCH_PROTOCOL = 1001\n\nstruct TorchPickler{PROTO} <: AbstractPickle\n memo::Memo\n stack::PickleStack\n mt::HierarchicalTable\n storage::StorageManager\nend\n\nfunction TorchPickler(proto=DEFAULT_PROTO, memo=Dict())\n st = StorageManager()\n mt = HierarchicalTable()\n\n # some corresponding methods\n mt[\"collections.OrderedDict\"] = OrderedDict\n mt[\"torch._utils._rebuild_tensor_v2\"] = (arg...) -> build_tensor(st, arg...)\n\n mt[\"__julia__.__torch__.rebuild_tensor\"] = \"torch._utils._rebuild_tensor_v2\"\n mt[\"__julia__.OrderedDict\"] = \"collections.OrderedDict\"\n mt[\"__julia__.__torch__.StorageType.Float32\"] = \"torch.FloatStorage\"\n mt[\"__julia__.__torch__.StorageType.Float64\"] = \"torch.DoubleStorage\"\n mt[\"__julia__.__torch__.StorageType.Float16\"] = \"torch.HalfStorage\"\n mt[\"__julia__.__torch__.StorageType.UInt8\"] = \"torch.ByteStorage\"\n mt[\"__julia__.__torch__.StorageType.Int8\"] = \"torch.CharStorage\"\n mt[\"__julia__.__torch__.StorageType.Int16\"] = \"torch.ShortStorage\"\n mt[\"__julia__.__torch__.StorageType.Int32\"] = \"torch.IntStorage\"\n mt[\"__julia__.__torch__.StorageType.Int64\"] = \"torch.LongStorage\"\n mt[\"__julia__.__torch__.StorageType.Bool\"] = \"torch.BoolStorage\"\n\n # ingore state_dict version number\n mt[\"__build__.OrderedCollections.OrderedDict\"] = (od, _meta) -> od\n\n TorchPickler{proto}(Memo(memo), PickleStack(), mt, st)\nend\n\nprotocol(::TorchPickler{P}) where {P} = P\nisbinary(pklr::TorchPickler) = protocol(pklr) >= 1\n\n\"\"\"\n THload(file::AbstractString)\n\nload data that saved by `torch.save`. `torch.tensor` \nwill be load as `Array` or `Strided.StridedView` \ndependent on the memory layout of that tensor.\n\"\"\"\nTHload(file::AbstractString) = open(file) do io\n THload(TorchPickler(), io)\nend\n\nfunction THload(tp::TorchPickler, io)\n if peek(io) == 0x80\n return legacy_load(tp, io)\n elseif read(io, 4) == b\"PK\\x03\\x04\"\n z = ZipFile.Reader(io)\n if any(x->x.name==\"constants.pkl\", z.files)\n error(\"TorchScript archive not support.\")\n end\n return zip_load(tp, z)\n else\n error(\"Unkown file format. Is this really a file from `torch.save`?\")\n end\nend\n\nfunction get_record(zipfile, name)\n zipfile.files[findfirst(x->endswith(x.name, name), zipfile.files)]\nend\n\nfunction zip_load(tp::TorchPickler, zipfile)\n typeinfo = load(tp, get_record(zipfile, \"data.pkl\"))\n load_tensor_zip!(zipfile, tp.storage)\n return typeinfo\nend\n\nfunction unchecked_legacy_load(tp::TorchPickler, io)\n magic = load(tp, io)\n magic != MAGIC && error(\"Invalid magic number; corrupt file?\")\n torch_protocol = load(tp, io)\n torch_protocol != TORCH_PROTOCOL && error(\"Invalid protocol version: $torch_protocol\")\n\n _sys_info = load(tp, io)\n\n typeinfo = load(tp, io)\n tensor_key = load(tp, io)\n load_tensor!(io, tp.storage, tensor_key)\n return typeinfo\nend\n\nfunction legacy_load(tp::TorchPickler, io)\n typeinfo = unchecked_legacy_load(tp, io)\n @assert !isdefer(typeinfo)\n return typeinfo\nend\n\nfunction build_tensor(sm::StorageManager, fake_storage, offset, tsize, tstride, grad, _)\n @assert length(tsize) == length(tstride)\n @assert fake_storage.head == :persistent_load\n header, thtype, key, device, numel, = fake_storage.args[1]\n @assert header == \"storage\"\n dtype = thtype2dtype(thtype)\n jltype = dtype2jltype(dtype)\n tlength = prod(tsize)\n\n if haskey(sm, key)\n storage = sm[key][end]\n else\n storage = Array{jltype}(undef, numel)\n setindex!(sm, (dtype, numel, device, storage), key)\n end\n\n if (tlength == numel) && (isone(length(tsize)) || isempty(tsize) || isone(first(tstride)))\n tensor = reshape(storage, tsize) # f-contiguous\n else # otherwise use strided\n tensor = StridedView(storage, tsize, tstride, offset)\n end\n\n return tensor\nend\n\nfunction load_tensor!(io::IO, sm::StorageManager, tensor_key)\n for key in tensor_key\n type, numel, device, storage = sm[key]\n tsize = read(io, Int64)\n @assert tsize == numel\n tdata = read(io, tsize * bytewidth(type))\n storage .= reinterpret(dtype2jltype(type), tdata)\n end\nend\n\nfunction load_tensor_zip!(zipfile, sm::StorageManager)\n for (key, values) in pairs(sm)\n type, numel, device, storage = values\n zf = get_record(zipfile, \"\/$key\")\n tdata = read(zf)\n storage .= reinterpret(dtype2jltype(type), tdata)\n end\nend\n","avg_line_length":30.4383561644,"max_line_length":92,"alphanum_fraction":0.7196219622} {"size":3269,"ext":"jl","lang":"Julia","max_stars_count":17.0,"content":"import DataFrames\nimport Statistics\n\n\"\"\"\n\"\"\"\nfunction risk_score_cutoff_values(\n estimator::AbstractFittable,\n features_df::DataFrames.AbstractDataFrame,\n labels_df::DataFrames.AbstractDataFrame,\n single_label_name::Symbol,\n positive_class::AbstractString;\n multiply_by::Real = 1.0,\n average_function = Statistics.mean,\n )\n #\n ytrue = Int.(\n singlelabelbinaryytrue(\n labels_df[single_label_name],\n positive_class,\n )\n )\n #\n predictedprobabilitiesalllabels =\n predict_proba(estimator, features_df)\n yscore = Float64.(\n singlelabelbinaryyscore(\n predictedprobabilitiesalllabels[single_label_name],\n positive_class,\n )\n )\n #\n cutoffs, risk_group_prevalences = risk_score_cutoff_values(\n ytrue,\n yscore;\n multiply_by = multiply_by,\n average_function = average_function,\n )\n return cutoffs, risk_group_prevalences\nend\n\n\"\"\"\n\"\"\"\nfunction risk_score_cutoff_values(\n ytrue::AbstractVector{<:Integer},\n yscore::AbstractVector{<:AbstractFloat};\n multiply_by::Real = 1.0,\n average_function = Statistics.mean,\n )\n true_negative_rows = findall(\n ytrue .== 0\n )\n true_positive_rows = findall(\n ytrue .== 1\n )\n #\n average_score_true_negatives = average_function(\n yscore[true_negative_rows]\n )\n average_score_true_positives = average_function(\n yscore[true_positive_rows]\n )\n #\n lower_cutoff = multiply_by * average_score_true_negatives\n higher_cutoff = multiply_by * average_score_true_positives\n #\n cutoffs = (lower_cutoff, higher_cutoff,)\n #\n low_risk_group_rows = findall(\n yscore .<= average_score_true_negatives\n )\n medium_risk_group_rows = findall(\n average_score_true_negatives .<=\n yscore .<=\n average_score_true_positives\n )\n high_risk_group_rows = findall(\n average_score_true_positives .<= yscore\n )\n #\n risk_group_prevalences = DataFrames.DataFrame()\n risk_group_prevalences[:Risk_group] = [\n \"Low risk\",\n \"Medium risk\",\n \"High risk\",\n ]\n risk_group_prevalences[:User_supplied_average_function] = [\n average_function( ytrue[low_risk_group_rows] ),\n average_function( ytrue[medium_risk_group_rows] ),\n average_function( ytrue[high_risk_group_rows] ),\n ]\n risk_group_prevalences[:Arithmetic_mean] = [\n Statistics.mean( ytrue[low_risk_group_rows] ),\n Statistics.mean( ytrue[medium_risk_group_rows] ),\n Statistics.mean( ytrue[high_risk_group_rows] ),\n ]\n risk_group_prevalences[:Median] = [\n Statistics.median( ytrue[low_risk_group_rows] ),\n Statistics.median( ytrue[medium_risk_group_rows] ),\n Statistics.median( ytrue[high_risk_group_rows] ),\n ]\n if average_function==Statistics.mean || average_function==Statistics.median\n DataFrames.deletecols!(\n risk_group_prevalences,\n [:User_supplied_average_function],\n )\n end\n return cutoffs, risk_group_prevalences\nend\n\n","avg_line_length":29.7181818182,"max_line_length":79,"alphanum_fraction":0.6393392475} {"size":4863,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using Documenter,DiffEqBase,DiffEqProblemLibrary,DiffEqBiological\n\nODEProblemLibrary = DiffEqProblemLibrary.ODEProblemLibrary\nODEProblemLibrary.importodeproblems()\n\nSDEProblemLibrary = DiffEqProblemLibrary.SDEProblemLibrary\nSDEProblemLibrary.importsdeproblems()\n\nDDEProblemLibrary = DiffEqProblemLibrary.DDEProblemLibrary\nDDEProblemLibrary.importddeproblems()\n\nDAEProblemLibrary = DiffEqProblemLibrary.DAEProblemLibrary\nDAEProblemLibrary.importdaeproblems()\n\nmakedocs(modules=[DiffEqBase,DiffEqProblemLibrary,DiffEqBiological,ODEProblemLibrary,SDEProblemLibrary,DDEProblemLibrary,DAEProblemLibrary],\n doctest=false, clean=true,\n format = Documenter.HTML(analytics = \"UA-90474609-3\",\n assets = [\"assets\/favicon.ico\"],\n canonical=\"https:\/\/diffeq.sciml.ai\/stable\/\"),\n sitename=\"DifferentialEquations.jl\",\n authors=\"Chris Rackauckas\",\n pages = Any[\n \"DifferentialEquations.jl: Scientific Machine Learning (SciML) Enabled Simulation and Estimation\" => \"index.md\",\n \"Tutorials\" => Any[\n \"tutorials\/ode_example.md\",\n \"tutorials\/advanced_ode_example.md\",\n \"tutorials\/sde_example.md\",\n \"tutorials\/rode_example.md\",\n \"tutorials\/dde_example.md\",\n \"tutorials\/dae_example.md\",\n \"tutorials\/discrete_stochastic_example.md\",\n \"tutorials\/jump_diffusion.md\",\n \"tutorials\/bvp_example.md\",\n \"tutorials\/additional.md\"\n ],\n \"Basics\" => Any[\n \"basics\/overview.md\",\n \"basics\/common_solver_opts.md\",\n \"basics\/solution.md\",\n \"basics\/plot.md\",\n \"basics\/integrator.md\",\n \"basics\/problem.md\",\n \"basics\/faq.md\",\n \"basics\/compatibility_chart.md\"\n ],\n \"Problem Types\" => Any[\n \"types\/discrete_types.md\",\n \"types\/ode_types.md\",\n \"types\/dynamical_types.md\",\n \"types\/split_ode_types.md\",\n \"types\/steady_state_types.md\",\n \"types\/bvp_types.md\",\n \"types\/sde_types.md\",\n \"types\/rode_types.md\",\n \"types\/dde_types.md\",\n \"types\/dae_types.md\",\n \"types\/jump_types.md\",\n ],\n \"Solver Algorithms\" => Any[\n \"solvers\/discrete_solve.md\",\n \"solvers\/ode_solve.md\",\n \"solvers\/dynamical_solve.md\",\n \"solvers\/split_ode_solve.md\",\n \"solvers\/steady_state_solve.md\",\n \"solvers\/bvp_solve.md\",\n \"solvers\/jump_solve.md\",\n \"solvers\/sde_solve.md\",\n \"solvers\/rode_solve.md\",\n \"solvers\/dde_solve.md\",\n \"solvers\/dae_solve.md\",\n \"solvers\/benchmarks.md\"\n ],\n \"Additional Features\" => Any[\n \"features\/performance_overloads.md\",\n \"features\/diffeq_arrays.md\",\n \"features\/diffeq_operator.md\",\n \"features\/noise_process.md\",\n \"features\/linear_nonlinear.md\",\n \"features\/callback_functions.md\",\n \"features\/callback_library.md\",\n \"features\/ensemble.md\",\n \"features\/io.md\",\n \"features\/low_dep.md\",\n \"features\/progress_bar.md\"\n ],\n \"Analysis Tools\" => Any[\n \"analysis\/parameterized_functions.md\",\n \"analysis\/parameter_estimation.md\",\n \"analysis\/bifurcation.md\",\n \"analysis\/sensitivity.md\",\n \"analysis\/global_sensitivity.md\",\n \"analysis\/uncertainty_quantification.md\",\n \"analysis\/neural_networks.md\",\n \"analysis\/dev_and_test.md\"\n ],\n \"Domain Modeling Tools\" => Any[\n \"models\/multiscale.md\",\n \"models\/physical.md\",\n \"models\/financial.md\",\n \"models\/biological.md\",\n \"models\/external_modeling.md\"\n ],\n \"APIs\" => Any[\n \"apis\/diffeqbio.md\"\n ],\n \"Extra Details\" => Any[\n \"extras\/timestepping.md\",\n \"extras\/sensitivity_math.md\",\n ]\n ])\n\n#Redirect old links\n# cd(joinpath(@__DIR__, \"build\")) do\n# for (root, dirs, files) in walkdir(\".\")\n# for file in files\n# path = relpath(joinpath(root, file), \".\")\n# m = match(r\"(.+)\/index\\.html$\", path)\n# m === nothing && continue\n# redirect = \"$(m[1]).html\"\n# @info \"Adding redirect for $(m[1]) from $(redirect)\"\n# isfile(redirect) && (@warn \"$redirect exists, skip\"; continue)\n# open(redirect, \"w\") do io\n# write(io, \"\"\"\n# \n# \"\"\")\n# end\n# end\n# end\n# end\n\ndeploydocs(\n repo = \"github.com\/SciML\/DiffEqDocs.jl.git\"\n)\n","avg_line_length":36.2910447761,"max_line_length":140,"alphanum_fraction":0.5671396257} {"size":2168,"ext":"jl","lang":"Julia","max_stars_count":11.0,"content":"function all_1e_ints(bfs::BasisSet,mol::Molecule)\n n = length(bfs.bfs)\n S = Array(Float64,(n,n))\n T = Array(Float64,(n,n))\n V = Array(Float64,(n,n))\n for (i,j) in pairs(n)\n a,b = bfs.bfs[i],bfs.bfs[j]\n S[i,j] = S[j,i] = overlap(a,b)\n T[i,j] = T[j,i] = kinetic(a,b)\n V[i,j] = V[j,i] = nuclear_attraction(a,b,mol)\n end\n return S,T,V\nend\n\nfunction all_twoe_ints(bflist,ERI=coulomb)\n n = length(bflist.bfs)\n totlen = div(n*(n+1)*(n*n+n+2),8)\n ints2e = Array(Float64,totlen)\n for (i,j,k,l) in iiterator(n)\n ints2e[iindex(i,j,k,l)] = ERI(bflist.bfs[i],bflist.bfs[j],bflist.bfs[k],bflist.bfs[l])\n end\n return ints2e\nend\n\nfunction make2JmK(D::Array{Float64,2},Ints::Array{Float64,1})\n n = size(D,1)\n G = Array(Float64,(n,n))\n D1 = reshape(D,n*n)\n temp = Array(Float64,n*n)\n for (i,j) in pairs(n)\n kl = 1\n for (k,l) in pairs(n,\"rect\")\n temp[kl] = 2*Ints[iindex(i,j,k,l)]-Ints[iindex(i,k,j,l)]\n kl += 1\n end\n G[i,j] = G[j,i] = dot(D1,temp)\n end\n return G\nend\n\ndmat(U::Array{Float64,2},nocc::Int64) = U[:,1:nocc]*U[:,1:nocc]'\n\n\nfunction rhf(mol::Molecule,MaxIter::Int64=8,verbose::Bool=false)\n @printf(\"Starting RHF calculation:\")\n\n @printf(\"Building basis ...\")\n bfs = build_basis(mol)\n @printf(\"... done\\n\\n\")\n\n @printf(\"1e integrals ...\")\n S,T,V = all_1e_ints(bfs,mol)\n @printf(\"... done\\n\\n\")\n\n @printf(\"2e integrals ...\")\n Ints = all_twoe_ints(bfs)\n @printf(\"... done\\n\\n\")\n\n h = T+V\n E,U = eig(h,S)\n Enuke = nuclear_repulsion(mol)\n nclosed,nopen = divrem(nel(mol),2)\n Eold = 0\n Energy = 0\n println(\"Nel=$(nel(mol)) Nclosed=$nclosed\")\n if verbose\n println(\"S=\\n$S\")\n println(\"h=\\n$h\")\n println(\"T=\\n$T\")\n println(\"V=\\n$V\")\n println(\"E: $E\")\n println(\"U: $U\")\n println(\"2e ints:\\n$Ints\")\n end\n for iter in 1:MaxIter\n D = dmat(U,nclosed)\n if verbose\n println(\"D=\\n$D\")\n end\n G = make2JmK(D,Ints)\n H = h+G\n E,U = eig(H,S)\n Eone = trace2(D,h)\n Etwo = trace2(D,H)\n Energy = Enuke + Eone + Etwo\n println(\"HF: $iter $Energy : $Enuke $Eone $Etwo\")\n if isapprox(Energy,Eold)\n break\n end\n Eold = Energy\n end\n return Energy,E,U\nend\n","avg_line_length":23.0638297872,"max_line_length":90,"alphanum_fraction":0.5793357934} {"size":345,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"import accrue.cryptoerase.runtime.Condition;\n\nclass H {\n final C c;\n public int{L \/c.c H} y; \n public H(C c) {\n\tthis.c = c;\n\tc.c.set();\n }\n}\n\nclass C {\n static final Condition c = new Condition();\n int{L \/c H} x = 0;\n\n public static void main(String[] args) {\n\tC c = new C();\n\tH h = new H(c);\n\tc.x = 42;\n\th.y = c.x;\n }\n}\n","avg_line_length":15.0,"max_line_length":47,"alphanum_fraction":0.5333333333} {"size":217,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"import LinearAlgebra: Symmetric\n@explicit_intercepts Symmetric Tuple{\u2207Array}\n\u2207(::typeof(Symmetric), ::Type{Arg{1}}, p, Y::\u2207Array, Y\u0304::\u2207Array, X::\u2207Array) =\n UpperTriangular(Y\u0304) + LowerTriangular(Y\u0304)' - Diagonal(Y\u0304)\n","avg_line_length":43.4,"max_line_length":77,"alphanum_fraction":0.6866359447} {"size":50,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"using Plots\np = plot(rand(5), rand(5))\ndisplay(p)\n","avg_line_length":12.5,"max_line_length":26,"alphanum_fraction":0.66} {"size":500,"ext":"jl","lang":"Julia","max_stars_count":9.0,"content":"add_noise(X, d, N, noiseVar = 1e-10) = X .+ (noiseVar .* rand(d, N))\n\n# Return the kNN nearest point.\nfunction knn_nearest_point(X::Array{Float64, 2}, knnTree, point, k)\n indices, distances = nearest(knnTree, point, k+1)\n sorted = sortperm(distances)\n X[:,indices[sorted[end]]]\nend\n\n# Return the kNN nearest point.\nfunction knn_nearest_distance(X::Array{Float64, 2}, knnTree, point, k)\n indices, distances = nearest(knnTree, point, k+1)\n sorted = sortperm(distances)\n distances[sorted[end]]\nend","avg_line_length":33.3333333333,"max_line_length":70,"alphanum_fraction":0.71} {"size":770,"ext":"jl","lang":"Julia","max_stars_count":5.0,"content":"# Example julia lambda function\n# Couting words in a file in S3 path\n\n# See following resources for S3 in julia\n# - https:\/\/juliacloud.github.io\/AWSCore.jl\/build\/AWSS3.html\n# - https:\/\/github.com\/JuliaCloud\/AWSS3.jl\n\nmodule word_count\n\nusing AWSLambdaJuliaRuntime\nusing AWSS3\nusing AWSCore\n\n#= The handler function =#\nfunction handler(event_data::InvocationRequest)\n\t# download some file from S3 path\n\t# count number of unique words\n\t# put result back to S3 path or send in response\n\n\tprintln(\"Hello World : $(event_data.payload)\")\n\trun(`ls`)\n\n\ti_am_good = true\n\tif i_am_good\n\t\treturn success_invocation_response(\"\"\"{\"msg\": \"Me Rockz!\"}\"\"\", \"application\/json\")\n\telse\n\t\treturn failure_invocation_response(\"Me Suckz!\", \"some_error_type\")\n\tend\nend\n\nend # module word_count\n","avg_line_length":24.0625,"max_line_length":84,"alphanum_fraction":0.7519480519} {"size":1394,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"struct LinearCache{TA,Tb,Tp,Talg,Tc,Tr,Tl}\n A::TA\n b::Tb\n p::Tp\n alg::Talg\n cacheval::Tc\n isfresh::Bool\n Pr::Tr\n Pl::Tl\nend\n\nfunction set_A(cache, A)\n @set! cache.A = A\n @set! cache.isfresh = true\nend\n\nfunction set_b(cache, b)\n @set! cache.b = b\nend\n\nfunction set_p(cache, p)\n @set! cache.p = p\n # @set! cache.isfresh = true\nend\n\nfunction set_cacheval(cache::LinearCache, alg)\n if cache.isfresh\n @set! cache.cacheval = alg\n @set! cache.isfresh = false\n end\n return cache\nend\n\nfunction SciMLBase.init(\n prob::LinearProblem,\n alg,\n args...;\n alias_A = false,\n alias_b = false,\n kwargs...,\n)\n @unpack A, b, p = prob\n if alg isa LUFactorization\n fact = lu_instance(A)\n Tfact = typeof(fact)\n else\n fact = nothing\n Tfact = Any\n end\n Pr = nothing\n Pl = nothing\n\n A = alias_A ? A : copy(A)\n b = alias_b ? b : copy(b)\n\n cache = LinearCache{\n typeof(A),\n typeof(b),\n typeof(p),\n typeof(alg),\n Tfact,\n typeof(Pr),\n typeof(Pl),\n }(\n A,\n b,\n p,\n alg,\n fact,\n true,\n Pr,\n Pl,\n )\n return cache\nend\n\nSciMLBase.solve(prob::LinearProblem, alg, args...; kwargs...) =\n solve(init(prob, alg, args...; kwargs...))\n\nSciMLBase.solve(cache) = solve(cache, cache.alg)\n","avg_line_length":17.2098765432,"max_line_length":63,"alphanum_fraction":0.5337159254} {"size":5405,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"module C_constraints\n include(dirname(@__FILE__)*\"\\\\Utils_cons.jl\")\n include(dirname(dirname(@__FILE__))*\"\\\\Moments\\\\Moments.jl\")\n using .Utils_cons\n using .Moments\n using LinearAlgebra\n const la = LinearAlgebra\n const uc = Utils_cons\n const mom = Moments\n\n export make_mon_expo_keys,\n make_PSD_con,\n make_ord4_con,\n make_loc_cons_S_inf,\n make_loc_cons_S\u2082,\n make_loc_cons_S\u2082\u2081,\n make_G\u1d3f_con\n\n make_mon_expo_keys(d,t::Int) = Moments.make_mon_expo(d,t*2)\n\n \"\"\"L([x\u1d63\u2091,x\u1d62\u2098,y\u1d63\u2091,y\u1d62\u2098]\u209c[x\u1d63\u2091,x\u1d62\u2098,y\u1d63\u2091,y\u1d62\u2098]\u209c\u1d40) \u2ab0 0\"\"\"\n function make_PSD_con(d,t,Lx;noBlock=false)\n MMex\u1d3f,MMCoef\u1d3f = Moments.get_\u2102_block_diag(d,t;noBlock=noBlock)\n println(keys(MMex\u1d3f))\n return Utils_cons.idx2var_dic(Lx,MMex\u1d3f,MMCoef\u1d3f)\n end\n\n\n \"\"\"L(xx*\u2297yy*) \u2ab0 0 \u27fa \"\"\"\n make_ord4_con(d,Lx) = uc.idx2varxx\u0304\u1d40tyy\u0304\u1d40(Lx,mom.make_xx\u0304\u1d40_tens_yy\u0304\u1d40(d))\n\n \"\"\" L\u1d3f(g\u1d3f\u22c5[x\u1d63\u2091,x\u1d62\u2098,y\u1d63\u2091,y\u1d62\u2098]\u209c\u208b\u2081[x\u1d63\u2091,x\u1d62\u2098,y\u1d63\u2091,y\u1d62\u2098]\u209c\u208b\u2081\u1d40) \u2ab0 0\n g\u1d3f = \u221a(\u03c1\u2098\u2090\u2093) - ((x\u1d63\u2091)\u1d62\u00b2 + (x\u1d62\u2098)\u1d62\u00b2) , i \u2208 [d_1],\n or \u221a(\u03c1\u2098\u2090\u2093) - ((y\u1d63\u2091)\u2c7c\u00b2 + (y\u1d62\u2098)\u2c7c\u00b2) , j \u2208 [d_2] \"\"\"\n function make_loc_con(Lx,sq\u03c1,e\u1d64,e\u1d65,B,C)\n Lx\u1d62x\u2c7c = uc.idx2var_arr(Lx,B,C,[2*e\u1d64]) + uc.idx2var_arr(Lx,B,C,[2*e\u1d65]) # L((x\u1d63\u2091)\u1d62\u00b2+ (x\u1d62\u2098)\u1d62\u00b2\u22c5\u03b7\u209c\u208b\u2081)\n return sq\u03c1*uc.idx2var_arr(Lx,B,C) - Lx\u1d62x\u2c7c # L((\u221a\u03c1\u2098\u2090\u2093 - ((x\u1d63\u2091)\u1d62\u00b2+(x\u1d62\u2098)\u1d62\u00b2))\u22c5\u03b7\u209c\u208b\u2081)\n end\n function make_loc_cons_S_inf(\u03c1,d,t,Lx;noBlock=false)\n d\u2081,d\u2082 = d ; n = sum(2 .*d) ; sq\u03c1 = sqrt(maximum(norm.(\u03c1)))\n MMex\u1d3f,MMCoef\u1d3f = mom.get_\u2102_block_diag(d,t .- 1;noBlock=noBlock)\n loc_con = Dict()\n for b in keys(MMCoef\u1d3f)\n for k in 1:d\u2081 # Constraint: L\u1d3f( (\u221a\u03c1\u2098\u2090\u2093-((x\u1d63\u2091)\u1d62\u00b2-(x\u1d62\u2098)\u1d62\u00b2))\u22c5\u03b7\u209c\u208b\u2081)) \u2ab0 0 for k \u2208 [d\u2081]\n loc_con[(b,\"x\u00b2_$k\")] = make_loc_con(Lx,sq\u03c1,uc.e\u1d62(n,k),uc.e\u1d62(n,k+d\u2081),MMex\u1d3f[b],MMCoef\u1d3f[b])\n end\n for k in 1:d\u2082 # Constraint: L\u1d3f( (\u221a\u03c1\u2098\u2090\u2093-((y\u1d63\u2091)\u1d62\u00b2+(y\u1d62\u2098)\u1d62\u00b2))\u22c5\u03b7\u209c\u208b\u2081) \u2ab0 0 for k \u2208 [d\u2082]\n loc_con[(b,\"y\u00b2_$k\")] = make_loc_con(Lx,sq\u03c1,uc.e\u1d62(n,k+2*d\u2081),uc.e\u1d62(n,k+2*d\u2081+d\u2082),MMex\u1d3f[b],MMCoef\u1d3f[b])\n end\n end\n return loc_con\n end\n\n \"\"\"L\u1d3f(g\u1d3f\u22c5[x\u1d63\u2091,x\u1d62\u2098,y\u1d63\u2091,y\u1d62\u2098]\u209c\u208b\u2081[x\u1d63\u2091,x\u1d62\u2098,y\u1d63\u2091,y\u1d62\u2098]\u209c\u208b\u2081\u1d40) \u2ab0 0\n g\u1d3f = \u221aTr(\u03c1) - \u2211\u1d48\u1d62((x\u1d63\u2091)\u1d62\u00b2 + (x\u1d62\u2098)\u1d62\u00b2),\n \u221aTr(\u03c1) - \u2211\u1d48\u1d62((y\u1d63\u2091)\u1d62\u00b2 + (y\u1d62\u2098)\u1d62\u00b2)\"\"\"\n tmp(Lx,B,C,n,s,l) = sum([uc.idx2var_arr(Lx,B,C,[2* uc.e\u1d62(n,k)] ) for k in s:(s+l)]) #### SOmething is wrong\n function make_loc_cons_S\u2082(\u03c1,d,t,Lx;noBlock=false)\n d\u2081,d\u2082 = d ; n = sum(2 .*d) ; sqrt_tr_\u03c1 = sqrt(real(tr(\u03c1)))\n MMex\u1d3f,MMCoef\u1d3f = mom.get_\u2102_block_diag(d, t.- 1,noBlock=noBlock)\n\n loc_con = Dict(); g\u2082 = Dict()\n for b in keys(MMCoef\u1d3f)\n xRterm = tmp(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b],n,1,d\u2081) + tmp(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b],n,d\u2081,d\u2081) # L\u1d3f(\u2211\u1d48\u1d62((x\u1d63\u2091)\u1d62\u00b2 + (x\u1d62\u2098)\u1d62\u00b2 ) \u22c5 \u03b7\u209c\u208b\u2081 )\n yRterm = tmp(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b],n,2*d\u2081,d\u2082) + tmp(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b],n,2*d\u2081+d\u2082,d\u2082) # L\u1d3f(\u2211\u1d48\u1d62((y\u1d63\u2091)\u1d62\u00b2 + (y\u1d62\u2098)\u1d62\u00b2 ) \u22c5 \u03b7\u209c\u208b\u2081 )\n loc_con[b,\"x\"] = sqrt_tr_\u03c1*uc.idx2var_arr(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b]) - xRterm # \u221aTr(\u03c1) \u22c5 L\u1d3f(\u03b7\u209c\u208b\u2081) - L\u1d3f(\u2211\u1d48\u1d62((x\u1d63\u2091)\u1d62\u00b2 + (x\u1d62\u2098)\u1d62\u00b2 ) \u22c5 \u03b7\u209c\u208b\u2081 )\n # loc_con[b,\"y\"] = sqrt_tr_\u03c1*uc.idx2var_arr(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b]) - yRterm # \u221aTr(\u03c1) \u22c5 L\u1d3f(\u03b7\u209c\u208b\u2081) - L\u1d3f(\u2211\u1d48\u1d62((y\u1d63\u2091)\u1d62\u00b2 + (y\u1d62\u2098)\u1d62\u00b2 ) \u22c5 \u03b7\u209c\u208b\u2081 )\n g\u2082[b] = xRterm - yRterm\n end\n return loc_con, g\u2082\n end\n\n \"\"\"L\u1d3f(g\u1d3f\u22c5[x\u1d63\u2091,x\u1d62\u2098,y\u1d63\u2091,y\u1d62\u2098]\u209c\u208b\u2081[x\u1d63\u2091,x\u1d62\u2098,y\u1d63\u2091,y\u1d62\u2098]\u209c\u208b\u2081\u1d40) \u2ab0 0\n g^\u211d = Tr(\u03c1) - \u2211\u1d48\u1d62(u\u2093\u1d62\u00b2 + v\u2093\u1d62\u00b2),\n \u2211\u1d48\u1d62(u_y\u2c7c\u00b2 + v_y\u2c7c\u00b2) - 1\"\"\"\n function make_loc_cons_S\u2082\u2081(\u03c1,d,t,Lx;noBlock=false)\n d\u2081,d\u2082 = d\n n = sum(2 .*d)\n # MB = mom.get_\u2102_block_diag(d,t .- 1)\n MMex\u1d3f,MMCoef\u1d3f = mom.get_\u2102_block_diag(d, t.- 1,noBlock=noBlock)\n tr_\u03c1 = real(tr(\u03c1))\n\n loc_con = Dict()\n loc_con_eq = Dict()\n for b in keys(MMCoef\u1d3f)\n xRterm = tmp(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b],n,1,d\u2081) + tmp(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b],n,d\u2081,d\u2081) # L(\u2211\u1d48\u1d62((x\u1d63\u2091)\u1d62\u00b2 + (x\u1d62\u2098)\u1d62\u00b2 ) \u22c5 \u03b7\u209c\u208b\u2081 )\n yRterm = tmp(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b],n,2*d\u2081,d\u2082) + tmp(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b],n,2*d\u2081+d\u2082,d\u2082) # L(\u2211\u1d48\u1d62((y\u1d63\u2091)\u1d62\u00b2 + (y\u1d62\u2098)\u1d62\u00b2 ) \u22c5 \u03b7\u209c\u208b\u2081 )\n loc_con[b] = tr_\u03c1*uc.idx2var_arr(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b]) - xRterm # Tr(\u03c1)\u22c5L(\u03b7\u209c\u208b\u2081) - L(\u2211\u1d48\u1d62((x\u1d63\u2091)\u1d62\u00b2 + (x\u1d62\u2098)\u1d62\u00b2 ) \u22c5 \u03b7\u209c\u208b\u2081 ) \u2ab0 0\n loc_con_eq[b] = uc.idx2var_arr(Lx, MMex\u1d3f[b],MMCoef\u1d3f[b]) - yRterm # 1\u22c5L(\u03b7\u209c\u208b\u2081) - L(\u2211\u1d48\u1d62((y\u1d63\u2091)\u1d62\u00b2 + (y\u1d62\u2098)\u1d62\u00b2 ) \u22c5 \u03b7\u209c\u208b\u2081 ) = 0\n end\n return loc_con, loc_con_eq\n end\n\n \"\"\"\u03c1\u2297L(\u03b7) - L( (xx*\u2297yy*) \u2297 \u03b7 )\u2ab0 0 \u2200 \u03b7 \u2208 blocks of ([x,\u0304x,y,\u0304y]\u209c\u208b\u2082[x,\u0304x,y,\u0304y]*\u209c\u208b\u2082)\n L( \u03c1\u211d \u2297 \u03b7) - L( G\u211d[k] \u2297 \u03b7)\u2ab0 0 \u2200 \u03b7 \u2208 blocks of [u\u2093,v\u2093,u_y,v_y]\u209c\u208b\u2082[u\u2093,v\u2093,u_y,v_y]^T\u209c\u208b\u2082 \"\"\"\n\n\n\n function make_G\u1d3f_con(\u03c1,d,t,Lx;noBlock=false)\n n = sum(2 .*d) ; D = prod(d) ; \u03c1\u1d3f = [real(\u03c1) -imag(\u03c1); imag(\u03c1) real(\u03c1)]\n G\u1d3f,sm = uc.get_G\u1d3f(mom.make_xx\u0304\u1d40_tens_yy\u0304\u1d40(d)) #\n MMex\u1d3f,MMCoef\u1d3f = mom.get_\u2102_block_diag(d, t.- 2,noBlock=noBlock)\n\n LG\u211d\u03b7 = Dict()\n tmp2(B,C,k) = la.kron(sm[k],ones(D .*size(B))) .*\n uc.idx2var_arr(\n Lx,\n mom.var_kron_C(G\u1d3f[k],B),\n mom.var_kron_C(fill(0.0,size(G\u1d3f[k])...),C)) # L(G\u1d3f[k]\u22c5\u03b7\u209c\u208b\u2082)\n for b in keys(MMCoef\u1d3f)\n TEMP1 = sum([tmp2(MMex\u1d3f[b],MMCoef\u1d3f[b],k) for k in 1:8]) # \u2211\u2096L(G\u1d3f[k]\u22c5\u03b7\u209c\u208b\u2082)\n TEMP2 = la.kron(\u03c1\u1d3f,uc.idx2var_arr(Lx,MMex\u1d3f[b],MMCoef\u1d3f[b])) #\n LG\u211d\u03b7[b] = TEMP2 - TEMP1 # L(\u03c1\u1d3f \u2297 \u03b7\u209c\u208b\u2082) - \u2211\u2096L(G\u1d3f[k] \u2297 \u03b7\u209c\u208b\u2082)\n end\n return LG\u211d\u03b7\n end\nend\n","avg_line_length":45.8050847458,"max_line_length":147,"alphanum_fraction":0.5248843663} {"size":113,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using DynamicFactorModeling\nusing Test\n\n@testset \"DynamicFactorModeling.jl\" begin\n @test sayhi() == \"Hi!\"\nend\n","avg_line_length":16.1428571429,"max_line_length":41,"alphanum_fraction":0.7433628319} {"size":555,"ext":"jl","lang":"Julia","max_stars_count":23.0,"content":"@testset \"Eps\" begin\n @testset for T in (Posit8, Posit16, Posit32,\n # pX1_mul is not supported yet by C library\n # Posit8_1, Posit16_1, Posit24_1,\n Posit8_2, Posit16_2, Posit24_2)\n\n @test eps(T) == eps(one(T))\n @test floatmin(T) == eps(zero(T))\n if ~(T == Posit8)\n # Excluding Posit(8,0) as this test is only true for exponent bits > 0\n @test_broken floatmax(T) == eps(floatmax(T))\n end\n @test notareal(T) == eps(notareal(T))\n end\nend\n","avg_line_length":34.6875,"max_line_length":82,"alphanum_fraction":0.5351351351} {"size":6205,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"export fitness_evaluate, select_indexes\n\nusing Random\n\n\"\"\"\n null_evaluate(i::CGPInd, j::CGPInd)\n\nDefault evaluation function for two CGP individuals setting minimum fitness.\n\"\"\"\nfunction null_evaluate(i::CGPInd, j::CGPInd)\n return -Inf\nend\n\n\"\"\"\n\tfitness_evaluate(e::NSGA2Evo, fitness::Function)\n\nEvaluation function for the NSGA-II algorithm.\n\"\"\"\nfunction fitness_evaluate(e::NSGA2Evo{T}, fitness::Function) where T\n\t@sync @inbounds for i in eachindex(e.population)\n\t\tThreads.@spawn begin\n\t\t\te.population[i].fitness .= fitness(e.population[i], e.gen, e.atari_games[i])\n\t\tend\n end\nend\n\nfunction fitness_evaluate_two_atari_runs(e::NSGA2Evo{T}, fitness::Function) where T\n\tn = length(e.population)\n\to1, f1 = zeros(n), zeros(Int64, n)\n\to2, f2 = zeros(n), zeros(Int64, n)\n\t@sync @inbounds for i in eachindex(e.population)\n\t\tThreads.@spawn begin\n\t\t\to1[i], f1[i] = fitness(e.population[i], e.gen, e.atari_games[i][1])\n\t\tend\n\t\tThreads.@spawn begin\n\t\t\to2[i], f2[i] = fitness(e.population[i], 25000+e.gen, e.atari_games[i][2])\n\t\tend\n end\n\t@inbounds for i in eachindex(e.population)\n\t\te.population[i].fitness .= [o1[i], o2[i]]\n\t\te.population[i].reached_frames = f1[i] + f2[i]\n\tend\nend\n\n\"\"\"\n fitness_evaluate(e::CGPEvolution; fitness::Function=null_evaluate)\n\nSets the fitness of each individual to the Array of values returned by fitness.\nMultithreaded option enabled in this version.\n\"\"\"\nfunction fitness_evaluate(e::CGPEvolution, fitness::Function)\n @sync for i in eachindex(e.population)\n Threads.@spawn begin\n e.population[i].fitness .= fitness(e.population[i])\n end\n end\nend\n\nfunction fitness_evaluate_ij(\n\te::DualCGPEvolution,\n\tf::Array{Float64, 2},\n\ti::Int64,\n\tj::Int64,\n\tfitness::Function=null_evaluate\n)\n\tenco_i = IPCGPInd(e.encoder_config, e.encoder_population[i].chromosome)\n\tcont_j = CGPInd(e.controller_config, e.controller_population[j].chromosome)\n\tf[i,j] = fitness(enco_i, cont_j)[1] # Currently, only pick 1st fitness dimension\nend\n\n\"\"\"\n fitness_evaluate(e::DualCGPEvolution, fitness::Function=null_evaluate)\n\nSets the fitness of each individual to the maximum value of the fitness matrix\nin the dual CGP evolution framework.\n\"\"\"\nfunction fitness_evaluate(e::DualCGPEvolution, fitness::Function=null_evaluate)\n n_enco = e.encoder_config.n_population\n n_cont = e.controller_config.n_population\n fitness_matrix = zeros(n_enco, n_cont)\n\n\t# FORMER method with @sync\n #=\n\t@sync for i in 1:n_enco\n for j in 1:n_cont\n encoder_i = IPCGPInd(e.encoder_config, e.encoder_population[i].chromosome)\n controller_j = CGPInd(e.controller_config, e.controller_population[j].chromosome)\n Threads.@spawn begin\n fitness_matrix[i, j] = fitness(encoder_i, controller_j)[1] # Currently, only pick 1st fitness dimension\n end\n end\n end\n\t=#\n\n\t# NEW method with Threads.@threads\n\tindexes = [(i, j) for i in 1:n_enco for j in 1:n_cont]\n Threads.@threads for l in 1:(n_enco*n_cont)\n i, j = indexes[l]\n\t\tfitness_evaluate_ij(e, fitness_matrix, i, j, fitness)\n end\n\n for i in eachindex(e.encoder_population)\n e.encoder_population[i].fitness[1] = maximum(fitness_matrix[i,:])\n end\n for j in eachindex(e.controller_population)\n e.controller_population[j].fitness[1] = maximum(fitness_matrix[:,j])\n end\nend\n\n\"\"\"\n\tselect_indexes(e::DualCGPGAEvo)\n\nSelect the indexes of the pairs to be evaluated for this generation.\n\t- 1. Select the elite pairs\n\t- 2. Select at least one pair per row\/col\n\t- 3. Select random pairs until e.n_eval pairs have been selected\n\"\"\"\nfunction select_indexes(e::DualCGPGAEvo)\n\tindexes = Vector{Tuple{Int64, Int64}}()\n\tnrows = size(e.elites_matrix, 1)\n\tncols = size(e.elites_matrix, 2)\n\tnmax = max(nrows, ncols)\n\t# 1. Select elites and maybe other pairs set in eval_matrix (e.g. mutant)\n\tfor i in 1:nrows\n\t\tfor j in 1:ncols\n\t\t\tif e.eval_matrix[i, j]\n\t\t\t\tpush!(indexes, (i, j))\n\t\t\tend\n\t\tend\n\tend\n\t# 2. Select at least one pair per row\/col\n\tshuffledrows = shuffle(collect(1:nrows))\n\tshuffledcols = shuffle(collect(1:ncols))\n\tcandidates = Vector{Tuple{Int64, Int64}}()\n\tfor i in 1:nmax\n\t\ti_modrow = i-nrows*divrem(i-1,nrows)[1]\n\t\ti_modcol = i-ncols*divrem(i-1,ncols)[1]\n\t\tc = (shuffledrows[i_modrow], shuffledcols[i_modcol])\n\t\tpush!(candidates, c)\n\tend\n\teliterows = [i[1] for i in indexes]\n\telitecols = [i[2] for i in indexes]\n\tfor c in candidates # Only push candidate if the row or col is not evaluated\n\t\tif c[1] \u2209 eliterows || c[2] \u2209 elitecols\n\t\t\tpush!(indexes, c)\n\t\tend\n\tend\n\t# 3. Select additional random evaluations\n\twhile length(indexes) < e.n_eval\n\t\tc = (rand(1:nrows), rand(1:ncols))\n\t\tif c \u2209 indexes\n\t\t\tpush!(indexes, c)\n\t\tend\n\tend\n\tindexes\nend\n\nfunction fitness_evaluate_ij!(\n\te::DualCGPGAEvo,\n\ti::Int64,\n\tj::Int64,\n\tfitness::Function\n)\n\tenco_i = IPCGPInd(e.encoder_config, e.encoder_sympop[i].chromosome)\n\tcont_j = CGPInd(e.controller_config, e.controller_sympop[j].chromosome)\n\te.fitness_matrix[i, j] = fitness(enco_i, cont_j, e.gen)[1]\nend\n\nfunction set_ind_fitnesses!(e::DualCGPGAEvo)\n\tfor i in eachindex(e.encoder_sympop)\n e.encoder_sympop[i].fitness[1] = maximum(e.fitness_matrix[i,:])\n end\n for j in eachindex(e.controller_sympop)\n e.controller_sympop[j].fitness[1] = maximum(e.fitness_matrix[:,j])\n end\nend\n\nfunction set_elites!(e::DualCGPGAEvo)\n\te.elites_matrix = falses(size(e.elites_matrix)...) # zero elites matrix\n\tci = CartesianIndices(size(e.fitness_matrix))\n # p = sortperm(vec(e.fitness_matrix))[end-e.n_elite+1:end]\n\tp = partialsortperm(vec(e.fitness_matrix), 1:e.n_elite; rev=true)\n elite_indexes = ci[p]\n\tfor i in elite_indexes\n\t\te.elites_matrix[i] = true\n\tend\nend\n\n\"\"\"\n\tfitness_evaluate(e::DualCGPGAEvo, fitness::Function=null_evaluate)\n\nGA sparse fitness evaluation method.\n\"\"\"\nfunction fitness_evaluate(e::DualCGPGAEvo, fitness::Function=null_evaluate)\n\t# 1. Select indexes of individuals to evaluate\n\tindexes = select_indexes(e)\n\t# 2. Evaluate those individuals\n\tThreads.@threads for l in eachindex(indexes)\n i, j = indexes[l]\n\t\tfitness_evaluate_ij!(e, i, j, fitness)\n end\n\t# 3. Set individual's fitnesses\n\tset_ind_fitnesses!(e)\n\t# 4. Set elites\n\tset_elites!(e)\nend\n","avg_line_length":29.9758454106,"max_line_length":119,"alphanum_fraction":0.7107171636} {"size":4741,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using LatexTables\nusing Test\n\nbody = reshape([\"v\", 1, 2, 3], 1, :)\n#body = vcat([\"v\" 1 2 3], [\"a\" 2 1 0.5])\nn_row = size(body)[1]\n\nrun_finished(s::String) = return true\ncompare_strings(s1::String, s2::String) = remove_spaces(s1) == remove_spaces(s2)\ncompare_strings_rows(s1::String, s2::String) = split_string(s1) == split_string(s2)\n\nfunction compare_strings_rows_subset(s1::String, s2::String)\n s1_spl = split_string(s1)\n s2_spl = split_string(s2)\n return is_subset(s1_spl, s2_spl) && !is_subset(s2_spl, s1_spl)\nend\n\nfunction compile_success(file_name, s::String)\n\n dir_name = \"output\"\n file_pdf = file_name * \".pdf\"\n file_tex = file_name * \".tex\"\n\n isfile(joinpath(dir_name, file_pdf)) && rm(joinpath(dir_name, file_pdf))\n\n make_tex_file(file_tex, s; dir_name=dir_name, compile=true)\n\n return isfile(joinpath(dir_name, file_pdf))\nend\n\n\nresult1 = \"\\\\begin{table}[!ht] \\n\\\\centering \\n\\\\caption{cap} \\n\\\\label{lab} \\n\\\\begin{tabular}{@{} llll @{}} \\n\\\\toprule \\nv & \\$1\\$ & \\$2\\$ & \\$3\\$ \\\\\\\\ \\n\\\\bottomrule \\n\\\\end{tabular} \\n\\\\end{table} \\n\"\nresult2 = \"\\\\begin{table}[!ht] \\n\\\\centering \\n\\\\caption{cap} \\n\\\\label{lab} \\n\\\\begin{tabular}{@{} llll @{}} \\n\\\\hline \\nv & \\$1\\$ & \\$2\\$ & \\$3\\$ \\\\\\\\ \\n\\\\hline \\n\\\\end{tabular} \\n\\\\end{table} \\n\"\n\nfunction split_string(s::String)\n s = remove_spaces(s)\n s = sort(split(s, \"\\n\"))\n return s[.!isempty.(s)]\nend\n\nfunction remove_spaces(s::String)\n ii = [t[1] for t in collect.(findall(\" \", s))]\n jj = setdiff(1:length(s), ii)\n return s[jj]\nend\n\nfunction is_subset(s1::AbstractArray, s2::AbstractArray)\n all([any(x1 .== s2) for x1 in s1])\nend\n\n@testset \"Introduction\" begin\n @test_throws Exception error(\"Kdo testuje, neveri koderum.\")\nend\n\n@testset \"Highlighting\" begin\n @test_throws Exception table_to_tex(body; highlight_max_row=true, highlight_max_col=true)\n @test_throws Exception table_to_tex(body; highlight_min_row=true, highlight_min_col=true)\n @test run_finished(table_to_tex(body; highlight_max_row=true, highlight_max_style=Color(:blue)))\n @test run_finished(table_to_tex(body; highlight_max_row=true, highlight_max_style=[Color(:blue), Style(:italic), CellColor(:red)]))\n @test run_finished(table_to_tex(body; highlight_min_row=true, highlight_min_style=Color(:blue)))\n @test run_finished(table_to_tex(body; highlight_min_row=true, highlight_min_style=[Color(:blue), Style(:italic), CellColor(:red)]))\nend\n\n@testset \"Types\" begin\n @test_throws Exception table_to_tex(body; col_format=[\"\", \"a\"])\n @test run_finished(table_to_tex(body; col_format=[\"s\", \"3f\", \"2d\", 'd']))\nend\n\n@testset \"Leading col\" begin\n @test run_finished(table_to_tex(body; leading_col=repeat([\"1\"], n_row), alignment=\"\"))\n @test run_finished(table_to_tex(body; leading_col=repeat([1], n_row), alignment='l'))\n @test run_finished(table_to_tex(body; leading_col=repeat([\"1\"], n_row), alignment=\"l\"))\n @test run_finished(table_to_tex(body; leading_col=repeat([1], n_row), alignment=\"llll\"))\n @test run_finished(table_to_tex(body; leading_col=repeat([\"1\"], n_row), alignment=\"lllll\"))\nend\n\n@testset \"Comparison\" begin\n @test compare_strings_rows(result1, table_to_tex(body; table_type=:booktabs, position=\"!ht\", caption=\"cap\", label=\"lab\", centering=true, alignment=\"l\", caption_position_top=true))\n @test compare_strings_rows(result1, table_to_tex(body; table_type=:booktabs, position=\"!ht\", caption=\"cap\", label=\"lab\", centering=true, alignment=\"l\", caption_position_top=false))\n @test compare_strings_rows(result2, table_to_tex(body; table_type=:tabular, position=\"!ht\", caption=\"cap\", label=\"lab\", centering=true, alignment=\"l\", caption_position_top=true))\n @test compare_strings_rows(result2, table_to_tex(body; table_type=:tabular, position=\"!ht\", caption=\"cap\", label=\"lab\", centering=true, alignment=\"l\", caption_position_top=false))\n @test !compare_strings_rows_subset(result1, table_to_tex(body; table_type=:booktabs, position=\"!ht\", centering=true, alignment=\"l\", caption_position_top=true, floating_table=false))\n @test compare_strings_rows_subset(table_to_tex(body; table_type=:booktabs, position=\"!ht\", centering=true, alignment=\"l\", caption_position_top=true, floating_table=false), result1)\nend\n\n@testset \"Compile\" begin\n @test compile_success(\"test12344321\", table_to_tex(body; table_type=:booktabs, position=\"!ht\", caption=\"cap\", label=\"lab\", centering=true, alignment=\"l\", caption_position_top=true, highlight_max_row=true, highlight_min_row=true))\n @test compile_success(\"test12344322\", table_to_tex(body; table_type=:booktabs, position=\"!ht\", caption=\"cap\", label=\"lab\", centering=true, alignment=\"l\", caption_position_top=true, highlight_max_col=true, highlight_min_col=true))\nend\n","avg_line_length":52.6777777778,"max_line_length":233,"alphanum_fraction":0.7194684666} {"size":5349,"ext":"jl","lang":"Julia","max_stars_count":17.0,"content":"using Mill, JSON, Flux, JsonGrinder, Test\nusing HierarchicalUtils\nimport HierarchicalUtils: NodeType, children, InnerNode, LeafNode, printtree\nusing JsonGrinder: DictEntry, suggestextractor, schema\nusing Mill: reflectinmodel\n\nj1 = JSON.parse(\"\"\"{\"a\": 4, \"b\": {\"a\":[1,2,3], \"b\": 1},\"c\": { \"a\": {\"a\":[1,2,3],\"b\":[4,5,6]}}}\"\"\",inttype=Float64)\nj2 = JSON.parse(\"\"\"{\"a\": 3, \"c\": {\"a\":{\"a\":[2,3], \"b\":[5,6]}}}\"\"\")\nj3 = JSON.parse(\"\"\"{\"a\": 2, \"b\": {\"a\":[1,2,3,4], \"b\": 1}}\"\"\")\nj4 = JSON.parse(\"\"\"{\"a\": 4, \"b\": {}}\"\"\")\nj5 = JSON.parse(\"\"\"{\"b\": {}}\"\"\")\nj6 = JSON.parse(\"\"\"{}\"\"\")\n\nsch = schema([j1,j2,j3,j4,j5,j6])\next = suggestextractor(sch, testing_settings)\n\n@testset \"printtree\" begin\n @test buf_printtree(ext, trav=true) == \"\"\"\n Dict [\"\"]\n \u251c\u2500\u2500 a: Float32 [\"E\"]\n \u251c\u2500\u2500 b: Dict [\"U\"]\n \u2502 \u251c\u2500\u2500 a: Array of [\"Y\"]\n \u2502 \u2502 \u2514\u2500\u2500 Float32 [\"a\"]\n \u2502 \u2514\u2500\u2500 b: Float32 [\"c\"]\n \u2514\u2500\u2500 c: Dict [\"k\"]\n \u2514\u2500\u2500 a: Dict [\"s\"]\n \u251c\u2500\u2500 a: Array of [\"u\"]\n \u2502 \u2514\u2500\u2500 Float32 [\"v\"]\n \u2514\u2500\u2500 b: Array of [\"w\"]\n \u2514\u2500\u2500 Float32 [\"x\"]\n \"\"\"\n e = JsonGrinder.key_as_field(sch[:b], testing_settings, path = \"b\")\n\text2 = deepcopy(ext)\n\text2.dict[:b] = e\n\t@test buf_printtree(ext2, trav=true) == \"\"\"\n Dict [\"\"]\n \u251c\u2500\u2500 a: Float32 [\"E\"]\n \u251c\u2500\u2500 b: KeyAsField [\"U\"]\n \u2502 \u251c\u2500\u2500 String [\"Y\"]\n \u2502 \u2514\u2500\u2500 MultiRepresentation [\"c\"]\n \u2502 \u251c\u2500\u2500 e1: Array of [\"d\"]\n \u2502 \u2502 \u2514\u2500\u2500 Float32 [\"dU\"]\n \u2502 \u2514\u2500\u2500 e2: Float32 [\"e\"]\n \u2514\u2500\u2500 c: Dict [\"k\"]\n \u2514\u2500\u2500 a: Dict [\"s\"]\n \u251c\u2500\u2500 a: Array of [\"u\"]\n \u2502 \u2514\u2500\u2500 Float32 [\"v\"]\n \u2514\u2500\u2500 b: Array of [\"w\"]\n \u2514\u2500\u2500 Float32 [\"x\"]\n \"\"\"\nend\n\n@testset \"nnodes\" begin\n @test nnodes(ext) == 12\n @test nnodes(ext[:a]) == 1\n @test nnodes(ext[:b]) == 4\n @test nnodes(ext[:c]) == 6\nend\n\n@testset \"nleafs\" begin\n @test nleafs(ext[:a]) + nleafs(ext[:b]) + nleafs(ext[:c]) == nleafs(ext)\nend\n\n@testset \"children\" begin\n @test children(ext) == (; :a=>ext[:a], :b=>ext[:b], :c=>ext[:c])\n @test children(ext[:a]) == ()\n @test children(ext[:b]) == (; :a=>ext[:b][:a], :b=>ext[:b][:b])\n @test children(ext[:b][:a]) == (ext[:b][:a].item,)\n @test children(ext[:b][:b]) == ()\n @test children(ext[:c]) == (; :a=>ext[:c][:a])\n @test children(ext[:c][:a]) == (; :a=>ext[:c][:a][:a], :b=>ext[:c][:a][:b])\n @test children(ext[:c][:a][:a]) == (ext[:c][:a][:a].item,)\n @test children(ext[:c][:a][:b]) == (ext[:c][:a][:b].item,)\nend\n\n@testset \"nchildren\" begin\n @test nchildren(ext) == 3\n @test nchildren(ext[:a]) == 0\n @test nchildren(ext[:b]) == 2\n @test nchildren(ext[:b][:a]) == 1\n @test nchildren(ext[:b][:b]) == 0\n @test nchildren(ext[:c]) == 1\n @test nchildren(ext[:c][:a]) == 2\n @test nchildren(ext[:c][:a][:a]) == 1\n @test nchildren(ext[:c][:a][:b]) == 1\nend\n\n@testset \"getindex on strings\" begin\n @test ext[\"\"] == ext\n @test ext[\"E\"] == ext[:a]\n @test ext[\"U\"] == ext[:b]\n @test ext[\"Y\"] == ext[:b][:a]\n @test ext[\"a\"] == ext[:b][:a].item\n @test ext[\"c\"] == ext[:b][:b]\n @test ext[\"k\"] == ext[:c]\n @test ext[\"s\"] == ext[:c][:a]\n @test ext[\"u\"] == ext[:c][:a][:a]\n @test ext[\"v\"] == ext[:c][:a][:a].item\n @test ext[\"w\"] == ext[:c][:a][:b]\n @test ext[\"x\"] == ext[:c][:a][:b].item\nend\n\n@testset \"NodeIterator\" begin\n @test collect(NodeIterator(ext)) == [ext[\"\"], ext[\"E\"], ext[\"U\"], ext[\"Y\"], ext[\"a\"], ext[\"c\"],\n ext[\"k\"], ext[\"s\"], ext[\"u\"], ext[\"v\"], ext[\"w\"], ext[\"x\"]]\nend\n\n@testset \"LeafIterator\" begin\n @test collect(LeafIterator(ext)) == [ext[\"E\"], ext[\"a\"], ext[\"c\"], ext[\"v\"], ext[\"x\"]]\nend\n\n@testset \"TypeIterator\" begin\n @test collect(TypeIterator(ExtractArray, ext)) == [ext[\"Y\"], ext[\"u\"], ext[\"w\"]]\nend\n\n@testset \"show\" begin\n e = ExtractCategorical([\"a\",\"b\"])\n @test buf_printtree(e) == \"\"\"Categorical d = 3\n \"\"\"\n\n dict = Dict(\"a\" => ExtractArray(ExtractScalar(Float64,2,3)),\"b\" => ExtractArray(ExtractScalar(Float64,2,3)))\n br = ExtractDict(dict)\n @test buf_printtree(br, trav=true) ==\n \"\"\"\n Dict [\"\"]\n \u251c\u2500\u2500 a: Array of [\"E\"]\n \u2502 \u2514\u2500\u2500 Float64 [\"M\"]\n \u2514\u2500\u2500 b: Array of [\"U\"]\n \u2514\u2500\u2500 Float64 [\"c\"]\n \"\"\"\n\n dict = Dict(\"a\" => ExtractScalar(Float64,2,3),\"b\" => ExtractScalar(Float64), \"c\" => ExtractArray(ExtractScalar(Float64,2,3)))\n br = ExtractDict(dict)\n @test buf_printtree(br, trav=true) ==\n \"\"\"\n Dict [\"\"]\n \u251c\u2500\u2500 a: Float64 [\"E\"]\n \u251c\u2500\u2500 b: Float64 [\"U\"]\n \u2514\u2500\u2500 c: Array of [\"k\"]\n \u2514\u2500\u2500 Float64 [\"s\"]\n \"\"\"\n\n other1 = Dict(\"a\" => ExtractArray(ExtractScalar(Float64,2,3)),\"b\" => ExtractArray(ExtractScalar(Float64,2,3)))\n br1 = ExtractDict(other1)\n other = Dict(\"a\" => ExtractArray(br1), \"b\" => ExtractScalar(Float64,2,3))\n br = ExtractDict(other)\n @test buf_printtree(br, trav=true) ==\n \"\"\"\n Dict [\"\"]\n \u251c\u2500\u2500 a: Array of [\"E\"]\n \u2502 \u2514\u2500\u2500 Dict [\"M\"]\n \u2502 \u251c\u2500\u2500 a: Array of [\"O\"]\n \u2502 \u2502 \u2514\u2500\u2500 Float64 [\"P\"]\n \u2502 \u2514\u2500\u2500 b: Array of [\"Q\"]\n \u2502 \u2514\u2500\u2500 Float64 [\"R\"]\n \u2514\u2500\u2500 b: Float64 [\"U\"]\n \"\"\"\nend\n","avg_line_length":33.641509434,"max_line_length":129,"alphanum_fraction":0.4548513741} {"size":275,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using GalerkinSparseGrids\nusing Base.Test\nusing Cubature\nusing ODE\n\ntests = [\n\t\"elementary.jl\",\n\t\"hier_DG.jl\",\n\t\"vhier_DG.jl\",\n\t\"differentiation.jl\",\n\t\"solvers.jl\"\n]\n\nfor filename in tests\n\tname = first(splitext(filename))\n\t@testset \"$name\" begin\n\t\tinclude(filename)\n\tend\nend","avg_line_length":14.4736842105,"max_line_length":33,"alphanum_fraction":0.7272727273} {"size":19972,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# This file is a part of Julia. License is MIT: https:\/\/julialang.org\/license\n\nimport Libdl\n\n# helper function for passing input to stdin\n# and returning the stdout result\nfunction writereadpipeline(input, exename)\n p = open(exename, \"w+\")\n @async begin\n write(p.in, input)\n close(p.in)\n end\n return read(p.out, String)\nend\n\n# helper function for returning stderr and stdout\n# from running a command (ignoring failure status)\nfunction readchomperrors(exename::Cmd)\n out = Base.PipeEndpoint()\n err = Base.PipeEndpoint()\n p = run(exename, devnull, out, err, wait=false)\n o = @async(readchomp(out))\n e = @async(readchomp(err))\n return (success(p), fetch(o), fetch(e))\nend\n\n\nlet exename = `$(Base.julia_cmd()) --sysimage-native-code=yes --startup-file=no`\n # --version\n let v = split(read(`$exename -v`, String), \"julia version \")[end]\n @test Base.VERSION_STRING == chomp(v)\n end\n @test read(`$exename -v`, String) == read(`$exename --version`, String)\n\n # --help\n let header = \"julia [switches] -- [programfile] [args...]\"\n @test startswith(read(`$exename -h`, String), header)\n @test startswith(read(`$exename --help`, String), header)\n end\n\n # --quiet, --banner\n let t(q,b) = \"Base.JLOptions().quiet == $q && Base.JLOptions().banner == $b\"\n @test success(`$exename -e $(t(0, -1))`)\n @test success(`$exename -q -e $(t(1, 0))`)\n @test success(`$exename --quiet -e $(t(1, 0))`)\n @test success(`$exename --banner=no -e $(t(0, 0))`)\n @test success(`$exename --banner=yes -e $(t(0, 1))`)\n @test success(`$exename -q --banner=no -e $(t(1, 0))`)\n @test success(`$exename -q --banner=yes -e $(t(1, 1))`)\n @test success(`$exename --banner=no -q -e $(t(1, 0))`)\n @test success(`$exename --banner=yes -q -e $(t(1, 1))`)\n end\n\n # --home\n @test success(`$exename -H $(Sys.BINDIR)`)\n @test success(`$exename --home=$(Sys.BINDIR)`)\n\n # --eval\n @test success(`$exename -e \"exit(0)\"`)\n @test !success(`$exename -e \"exit(1)\"`)\n @test success(`$exename --eval=\"exit(0)\"`)\n @test !success(`$exename --eval=\"exit(1)\"`)\n @test !success(`$exename -e`)\n @test !success(`$exename --eval`)\n # --eval --interactive (replaced --post-boot)\n @test success(`$exename -i -e \"exit(0)\"`)\n @test !success(`$exename -i -e \"exit(1)\"`)\n\n # --print\n @test read(`$exename -E \"1+1\"`, String) == \"2\\n\"\n @test read(`$exename --print=\"1+1\"`, String) == \"2\\n\"\n @test !success(`$exename -E`)\n @test !success(`$exename --print`)\n\n # --load\n let testfile = tempname()\n try\n write(testfile, \"testvar = :test\\nprintln(\\\"loaded\\\")\\n\")\n @test read(`$exename -i --load=$testfile -e \"println(testvar)\"`, String) == \"loaded\\ntest\\n\"\n @test read(`$exename -i -L $testfile -e \"println(testvar)\"`, String) == \"loaded\\ntest\\n\"\n # multiple, combined\n @test read(```$exename\n -e 'push!(ARGS, \"hi\")'\n -E \"1+1\"\n -E \"2+2\"\n -L $testfile\n -E '3+3'\n -L $testfile\n -E 'pop!(ARGS)'\n -e 'show(ARGS); println()'\n 9 10\n ```, String) == \"\"\"\n 2\n 4\n loaded\n 6\n loaded\n \"hi\"\n [\"9\", \"10\"]\n \"\"\"\n finally\n rm(testfile)\n end\n end\n # -L, --load requires an argument\n @test !success(`$exename -L`)\n @test !success(`$exename --load`)\n\n # --cpu-target (requires LLVM enabled)\n @test !success(`$exename -C invalidtarget`)\n @test !success(`$exename --cpu-target=invalidtarget`)\n\n # --procs\n @test readchomp(`$exename -q -p 2 -e \"println(nworkers())\"`) == \"2\"\n @test !success(`$exename -p 0`)\n @test !success(`$exename --procs=1.0`)\n\n # --machine-file\n # this does not check that machine file works,\n # only that the filename gets correctly passed to the option struct\n let fname = tempname()\n touch(fname)\n fname = realpath(fname)\n try\n @test readchomp(`$exename --machine-file $fname -e\n \"println(unsafe_string(Base.JLOptions().machine_file))\"`) == fname\n finally\n rm(fname)\n end\n end\n\n # -i, isinteractive\n @test readchomp(`$exename -E \"isinteractive()\"`) == \"false\"\n @test readchomp(`$exename -E \"isinteractive()\" -i`) == \"true\"\n\n # --color\n @test readchomp(`$exename --color=yes -E \"Base.have_color\"`) == \"true\"\n @test readchomp(`$exename --color=no -E \"Base.have_color\"`) == \"false\"\n @test !success(`$exename --color=false`)\n\n # --history-file\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().historyfile)\"\n --history-file=yes`) == \"true\"\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().historyfile)\"\n --history-file=no`) == \"false\"\n @test !success(`$exename --history-file=false`)\n\n # --code-coverage\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().code_coverage)\"`) == \"false\"\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().code_coverage)\"\n --code-coverage=none`) == \"false\"\n\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().code_coverage)\"\n --code-coverage`) == \"true\"\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().code_coverage)\"\n --code-coverage=user`) == \"true\"\n\n # --track-allocation\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().malloc_log)\"`) == \"false\"\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().malloc_log)\"\n --track-allocation=none`) == \"false\"\n\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().malloc_log)\"\n --track-allocation`) == \"true\"\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().malloc_log)\"\n --track-allocation=user`) == \"true\"\n\n # --optimize\n @test readchomp(`$exename -E \"Base.JLOptions().opt_level\"`) == \"2\"\n @test readchomp(`$exename -E \"Base.JLOptions().opt_level\" -O`) == \"3\"\n @test readchomp(`$exename -E \"Base.JLOptions().opt_level\" --optimize`) == \"3\"\n @test readchomp(`$exename -E \"Base.JLOptions().opt_level\" -O0`) == \"0\"\n\n # -g\n @test readchomp(`$exename -E \"Base.JLOptions().debug_level\" -g`) == \"2\"\n let code = read(`$exename -g0 -i -e \"code_llvm(stdout, +, (Int64, Int64), false, true); exit()\"`, String)\n @test contains(code, \"llvm.module.flags\")\n @test !contains(code, \"llvm.dbg.cu\")\n @test !contains(code, \"int.jl\")\n @test !contains(code, \"Int64\")\n end\n let code = read(`$exename -g1 -i -e \"code_llvm(stdout, +, (Int64, Int64), false, true); exit()\"`, String)\n @test contains(code, \"llvm.module.flags\")\n @test contains(code, \"llvm.dbg.cu\")\n @test contains(code, \"int.jl\")\n @test !contains(code, \"Int64\")\n end\n let code = read(`$exename -g2 -i -e \"code_llvm(stdout, +, (Int64, Int64), false, true); exit()\"`, String)\n @test contains(code, \"llvm.module.flags\")\n @test contains(code, \"llvm.dbg.cu\")\n @test contains(code, \"int.jl\")\n @test contains(code, \"\\\"Int64\\\"\")\n end\n\n # --check-bounds\n let JL_OPTIONS_CHECK_BOUNDS_DEFAULT = 0,\n JL_OPTIONS_CHECK_BOUNDS_ON = 1,\n JL_OPTIONS_CHECK_BOUNDS_OFF = 2\n @test parse(Int,readchomp(`$exename -E \"Int(Base.JLOptions().check_bounds)\"`)) ==\n JL_OPTIONS_CHECK_BOUNDS_DEFAULT\n @test parse(Int,readchomp(`$exename -E \"Int(Base.JLOptions().check_bounds)\"\n --check-bounds=yes`)) == JL_OPTIONS_CHECK_BOUNDS_ON\n @test parse(Int,readchomp(`$exename -E \"Int(Base.JLOptions().check_bounds)\"\n --check-bounds=no`)) == JL_OPTIONS_CHECK_BOUNDS_OFF\n end\n # check-bounds takes yes\/no as argument\n @test !success(`$exename -E \"exit(0)\" --check-bounds=false`)\n\n # --depwarn\n @test readchomp(`$exename --depwarn=no -E \"Base.JLOptions().depwarn\"`) == \"0\"\n @test readchomp(`$exename --depwarn=yes -E \"Base.JLOptions().depwarn\"`) == \"1\"\n @test !success(`$exename --depwarn=false`)\n # test deprecated syntax\n @test !success(`$exename -e \"foo (x::Int) = x * x\" --depwarn=error`)\n # test deprecated method\n @test !success(`$exename -e \"\n foo() = :foo; bar() = :bar\n @deprecate foo() bar()\n foo()\n \" --depwarn=error`)\n\n # test deprecated bindings, #13269\n let code = \"\"\"\n module Foo\n import Base: @deprecate_binding\n\n const NotDeprecated = true\n @deprecate_binding Deprecated NotDeprecated\n end\n\n Foo.Deprecated\n \"\"\"\n\n @test !success(`$exename -E \"$code\" --depwarn=error`)\n\n @test readchomperrors(`$exename -E \"$code\" --depwarn=yes`) ==\n (true, \"true\", \"WARNING: Foo.Deprecated is deprecated, use NotDeprecated instead.\\n likely near no file:5\")\n\n @test readchomperrors(`$exename -E \"$code\" --depwarn=no`) ==\n (true, \"true\", \"\")\n end\n\n # --inline\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().can_inline)\"`) == \"true\"\n @test readchomp(`$exename --inline=yes -E \"Bool(Base.JLOptions().can_inline)\"`) == \"true\"\n @test readchomp(`$exename --inline=no -E \"Bool(Base.JLOptions().can_inline)\"`) == \"false\"\n # --inline takes yes\/no as argument\n @test !success(`$exename --inline=false`)\n\n # --polly\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().polly)\"`) == \"true\"\n @test readchomp(`$exename --polly=yes -E \"Bool(Base.JLOptions().polly)\"`) == \"true\"\n @test readchomp(`$exename --polly=no -E \"Bool(Base.JLOptions().polly)\"`) == \"false\"\n # --polly takes yes\/no as argument\n @test !success(`$exename --polly=false`)\n\n # --fast-math\n let JL_OPTIONS_FAST_MATH_DEFAULT = 0,\n JL_OPTIONS_FAST_MATH_ON = 1,\n JL_OPTIONS_FAST_MATH_OFF = 2\n @test parse(Int,readchomp(`$exename -E\n \"Int(Base.JLOptions().fast_math)\"`)) == JL_OPTIONS_FAST_MATH_DEFAULT\n @test parse(Int,readchomp(`$exename --math-mode=user -E\n \"Int(Base.JLOptions().fast_math)\"`)) == JL_OPTIONS_FAST_MATH_DEFAULT\n @test parse(Int,readchomp(`$exename --math-mode=ieee -E\n \"Int(Base.JLOptions().fast_math)\"`)) == JL_OPTIONS_FAST_MATH_OFF\n @test parse(Int,readchomp(`$exename --math-mode=fast -E\n \"Int(Base.JLOptions().fast_math)\"`)) == JL_OPTIONS_FAST_MATH_ON\n end\n\n # --worker takes default \/ custom as argument (default\/custom arguments\n # tested in test\/parallel.jl)\n @test !success(`$exename --worker=true`)\n\n # test passing arguments\n mktempdir() do dir\n testfile = joinpath(dir, tempname())\n # write a julia source file that just prints ARGS to stdout\n write(testfile, \"\"\"\n println(ARGS)\n \"\"\")\n mkpath(joinpath(dir, \".julia\", \"config\"))\n cp(testfile, joinpath(dir, \".julia\", \"config\", \"startup.jl\"))\n\n withenv((Sys.iswindows() ? \"USERPROFILE\" : \"HOME\") => dir) do\n output = \"[\\\"foo\\\", \\\"-bar\\\", \\\"--baz\\\"]\"\n @test readchomp(`$exename $testfile foo -bar --baz`) == output\n @test readchomp(`$exename $testfile -- foo -bar --baz`) == output\n @test readchomp(`$exename -L $testfile -e 'exit(0)' -- foo -bar --baz`) ==\n output\n @test readchomp(`$exename --startup-file=yes -e 'exit(0)' -- foo -bar --baz`) ==\n output\n\n output = \"String[]\\nString[]\"\n @test readchomp(`$exename -L $testfile $testfile`) == output\n @test readchomp(`$exename --startup-file=yes $testfile`) == output\n\n @test !success(`$exename --foo $testfile`)\n @test readchomp(`$exename -L $testfile -e 'exit(0)' -- foo -bar -- baz`) ==\n \"[\\\"foo\\\", \\\"-bar\\\", \\\"--\\\", \\\"baz\\\"]\"\n end\n end\n\n # test the program name remains constant\n mktempdir() do dir\n a = joinpath(dir, \"a.jl\")\n b = joinpath(dir, \"b.jl\")\n c = joinpath(dir, \".julia\", \"config\", \"startup.jl\")\n\n write(a, \"\"\"\n println(@__FILE__)\n println(PROGRAM_FILE)\n include(\\\"$(escape_string(b))\\\")\n \"\"\")\n write(b, \"\"\"\n println(@__FILE__)\n println(PROGRAM_FILE)\n \"\"\")\n mkpath(dirname(c))\n cp(b, c)\n\n readsplit(cmd) = split(readchomp(cmd), '\\n')\n\n withenv((Sys.iswindows() ? \"USERPROFILE\" : \"HOME\") => dir) do\n @test readsplit(`$exename $a`) ==\n [a, a,\n b, a]\n @test readsplit(`$exename -L $b -e 'exit(0)'`) ==\n [realpath(b), \"\"]\n @test readsplit(`$exename -L $b $a`) ==\n [realpath(b), a,\n a, a,\n b, a]\n @test readsplit(`$exename --startup-file=yes -e 'exit(0)'`) ==\n [c, \"\"]\n @test readsplit(`$exename --startup-file=yes -L $b -e 'exit(0)'`) ==\n [c, \"\",\n realpath(b), \"\"]\n @test readsplit(`$exename --startup-file=yes -L $b $a`) ==\n [c, a,\n realpath(b), a,\n a, a,\n b, a]\n end\n end\n\n # issue #10562\n @test readchomp(`$exename -e 'println(ARGS);' ''`) == \"[\\\"\\\"]\"\n\n # issue #12679\n @test readchomperrors(`$exename --startup-file=no --compile=yes -ioo`) ==\n (false, \"\", \"ERROR: unknown option `-o`\")\n @test readchomperrors(`$exename --startup-file=no -p`) ==\n (false, \"\", \"ERROR: option `-p\/--procs` is missing an argument\")\n @test readchomperrors(`$exename --startup-file=no --inline`) ==\n (false, \"\", \"ERROR: option `--inline` is missing an argument\")\n @test readchomperrors(`$exename --startup-file=no -e \"@show ARGS\" -now -- julia RUN.jl`) ==\n (false, \"\", \"ERROR: unknown option `-n`\")\n\n # --compiled-modules={yes|no}\n @test readchomp(`$exename -E \"Bool(Base.JLOptions().use_compiled_modules)\"`) == \"true\"\n @test readchomp(`$exename --compiled-modules=yes -E\n \"Bool(Base.JLOptions().use_compiled_modules)\"`) == \"true\"\n @test readchomp(`$exename --compiled-modules=no -E\n \"Bool(Base.JLOptions().use_compiled_modules)\"`) == \"false\"\n @test !success(`$exename --compiled-modules=foo -e \"exit(0)\"`)\n\n # issue #12671, starting from a non-directory\n # rm(dir) fails on windows with Permission denied\n # and was an upstream bug in llvm <= v3.3\n if !Sys.iswindows() && Base.libllvm_version > v\"3.3\"\n testdir = mktempdir()\n cd(testdir) do\n rm(testdir)\n @test success(`$exename -e \"exit(0)\"`)\n end\n end\nend\n\n\n# Find the path of libjulia (or libjulia-debug, as the case may be)\n# to use as a dummy shlib to open\nlibjulia = abspath(Libdl.dlpath((ccall(:jl_is_debugbuild, Cint, ()) != 0) ? \"libjulia-debug\" : \"libjulia\"))\n\n# test error handling code paths of running --sysimage\nlet exename = joinpath(Sys.BINDIR, Base.julia_exename()),\n sysname = unsafe_string(Base.JLOptions().image_file)\n for nonexist_image in (\n joinpath(@__DIR__, \"nonexistent\"),\n \"$sysname.nonexistent\",\n )\n let err = Pipe(),\n p = run(pipeline(`$exename --sysimage=$nonexist_image`, stderr=err), wait=false)\n close(err.in)\n let s = read(err, String)\n @test contains(s, \"ERROR: could not load library \\\"$nonexist_image\\\"\\n\")\n @test !contains(s, \"Segmentation fault\")\n @test !contains(s, \"EXCEPTION_ACCESS_VIOLATION\")\n end\n @test !success(p)\n @test !Base.process_signaled(p)\n @test p.exitcode == 1\n end\n end\n let err = Pipe(),\n p = run(pipeline(`$exename --sysimage=$libjulia`, stderr=err), wait=false)\n close(err.in)\n let s = read(err, String)\n @test s == \"ERROR: System image file failed consistency check: maybe opened the wrong version?\\n\"\n end\n @test !success(p)\n @test !Base.process_signaled(p)\n @test p.exitcode == 1\n end\nend\n\nlet exename = `$(Base.julia_cmd()) --sysimage-native-code=yes`\n # --startup-file\n let JL_OPTIONS_STARTUPFILE_ON = 1,\n JL_OPTIONS_STARTUPFILE_OFF = 2\n # `HOME=$tmpdir` to avoid errors in the user startup.jl, which hangs the tests. Issue #17642\n mktempdir() do tmpdir\n withenv(\"HOME\"=>tmpdir) do\n @test parse(Int,readchomp(`$exename -E \"Base.JLOptions().startupfile\" --startup-file=yes`)) == JL_OPTIONS_STARTUPFILE_ON\n end\n end\n @test parse(Int,readchomp(`$exename -E \"Base.JLOptions().startupfile\"\n --startup-file=no`)) == JL_OPTIONS_STARTUPFILE_OFF\n end\n @test !success(`$exename --startup-file=false`)\nend\n\n# Make sure `julia --lisp` doesn't break\nrun(pipeline(devnull, `$(joinpath(Sys.BINDIR, Base.julia_exename())) --lisp`, devnull))\n\n# Test that `julia [some other option] --lisp` is disallowed\n@test readchomperrors(`$(joinpath(Sys.BINDIR, Base.julia_exename())) -Cnative --lisp`) ==\n (false, \"\", \"ERROR: --lisp must be specified as the first argument\")\n\n# --sysimage-native-code={yes|no}\nlet exename = `$(Base.julia_cmd()) --startup-file=no`\n @test readchomp(`$exename --sysimage-native-code=yes -E\n \"Bool(Base.JLOptions().use_sysimage_native_code)\"`) == \"true\"\n @test readchomp(`$exename --sysimage-native-code=no -E\n \"Bool(Base.JLOptions().use_sysimage_native_code)\"`) == \"false\"\nend\n\n# backtrace contains type and line number info (esp. on windows #17179)\nfor precomp in (\"yes\", \"no\")\n success, out, bt = readchomperrors(`$(Base.julia_cmd()) --startup-file=no --sysimage-native-code=$precomp\n -E 'include(\"____nonexistent_file\")'`)\n @test !success\n @test out == \"\"\n @test contains(bt, \"include_relative(::Module, ::String) at $(joinpath(\".\", \"loading.jl\"))\")\n lno = match(r\"at \\.[\\\/\\\\]loading\\.jl:(\\d+)\", bt)\n @test length(lno.captures) == 1\n @test parse(Int, lno.captures[1]) > 0\nend\n\n# PR #23002\nlet exename = `$(Base.julia_cmd()) --startup-file=no`\n for (mac, flag, pfix, msg) in [(\"@test_nowarn\", ``, \"_1\", \"\"),\n (\"@test_warn\", `--warn-overwrite=yes`, \"_2\", \"\\\"WARNING: Method definition\\\"\")]\n str = \"\"\"\n using Test\n try\n # issue #18725\n $mac $msg @eval Main begin\n f18725$(pfix)(x) = 1\n f18725$(pfix)(x) = 2\n end\n @test Main.f18725$(pfix)(0) == 2\n # PR #23030\n $mac $msg @eval Main module Module23030$(pfix)\n f23030$(pfix)(x) = 1\n f23030$(pfix)(x) = 2\n end\n catch\n exit(-1)\n end\n exit(0)\n \"\"\"\n run(`$exename $flag -e $str`)\n end\nend\n\n# issue #6310\nlet exename = `$(Base.julia_cmd()) --startup-file=no`\n @test writereadpipeline(\"2+2\", exename) == \"4\\n\"\n @test writereadpipeline(\"2+2\\n3+3\\n4+4\", exename) == \"4\\n6\\n8\\n\"\n @test writereadpipeline(\"\", exename) == \"\"\n @test writereadpipeline(\"print(2)\", exename) == \"2\"\n @test writereadpipeline(\"print(2)\\nprint(3)\", exename) == \"23\"\n let infile = tempname()\n touch(infile)\n try\n @test read(pipeline(exename, stdin=infile), String) == \"\"\n write(infile, \"(1, 2+3)\")\n @test read(pipeline(exename, stdin=infile), String) == \"(1, 5)\\n\"\n write(infile, \"1+2\\n2+2\\n1-2\\n\")\n @test read(pipeline(exename, stdin=infile), String) == \"3\\n4\\n-1\\n\"\n write(infile, \"print(2)\")\n @test read(pipeline(exename, stdin=infile), String) == \"2\"\n write(infile, \"print(2)\\nprint(3)\")\n @test read(pipeline(exename, stdin=infile), String) == \"23\"\n finally\n rm(infile)\n end\n end\nend\n","avg_line_length":39.084148728,"max_line_length":136,"alphanum_fraction":0.5606348888} {"size":806,"ext":"jl","lang":"Julia","max_stars_count":4.0,"content":"using QCMaterial\nusing Documenter\n\nmakedocs(;\n modules=[QCMaterial],\n authors=\"Rihito Sakurai and contributors\",\n repo=\"https:\/\/github.com\/sakurairihito\/QCMaterialNew\/blob\/{commit}{path}#L{line}\",\n sitename=\"QCMaterialNew\",\n format=Documenter.HTML(;\n prettyurls=get(ENV, \"CI\", \"false\") == \"true\",\n canonical=\"https:\/\/github.com\/sakurairihito\/QCMaterialNew\",\n assets=String[],\n ),\n pages=[\n \"Home\" => \"index.md\",\n \"Chapter1\"=>[\n \"Introduction\" => \"chapter1\/intro.md\",\n \"About Goma-chan\" => \"chapter1\/goma.md\",\n ],\n \"Chapter2\"=>\"chapter2\/azarashi.md\",\n ],\n)\n\ndeploydocs(;\n devbranch=\"vqs\",\n target=\"build\",\n repo=\"github.com\/sakurairihito\/QCMaterialNew\",\n versions=nothing\n)\n","avg_line_length":26.8666666667,"max_line_length":86,"alphanum_fraction":0.6129032258} {"size":5441,"ext":"jl","lang":"Julia","max_stars_count":193.0,"content":"using CImGui\nusing CImGui.ImGuiGLFWBackend\nusing CImGui.ImGuiGLFWBackend.LibCImGui\nusing CImGui.ImGuiGLFWBackend.LibGLFW\nusing CImGui.ImGuiOpenGLBackend\nusing CImGui.ImGuiOpenGLBackend.ModernGL\n# using CImGui.ImGuiGLFWBackend.GLFW\nusing CImGui.CSyntax\n\n# include(joinpath(@__DIR__, \"demo_window.jl\"))\n\nglfwDefaultWindowHints()\nglfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3)\nglfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2)\nif Sys.isapple()\n glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE) # 3.2+ only\n glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE) # required on Mac\nend\n\n# create window\nwindow = glfwCreateWindow(1280, 720, \"Demo\", C_NULL, C_NULL)\n@assert window != C_NULL\nglfwMakeContextCurrent(window)\nglfwSwapInterval(1) # enable vsync\n\n# create OpenGL and GLFW context\nwindow_ctx = ImGuiGLFWBackend.create_context(window)\ngl_ctx = ImGuiOpenGLBackend.create_context()\n\n# setup Dear ImGui context\nctx = CImGui.CreateContext()\n\n# enable docking and multi-viewport\nio = CImGui.GetIO()\nio.ConfigFlags = unsafe_load(io.ConfigFlags) | CImGui.ImGuiConfigFlags_DockingEnable\nio.ConfigFlags = unsafe_load(io.ConfigFlags) | CImGui.ImGuiConfigFlags_ViewportsEnable\n\n# setup Dear ImGui style\nCImGui.StyleColorsDark()\n# CImGui.StyleColorsClassic()\n# CImGui.StyleColorsLight()\n\n# When viewports are enabled we tweak WindowRounding\/WindowBg so platform windows can look identical to regular ones.\nstyle = Ptr{ImGuiStyle}(CImGui.GetStyle())\nif unsafe_load(io.ConfigFlags) & ImGuiConfigFlags_ViewportsEnable == ImGuiConfigFlags_ViewportsEnable\n style.WindowRounding = 5.0f0\n col = CImGui.c_get(style.Colors, CImGui.ImGuiCol_WindowBg)\n CImGui.c_set!(style.Colors, CImGui.ImGuiCol_WindowBg, ImVec4(col.x, col.y, col.z, 1.0f0))\nend\n\n# load Fonts\n# - If no fonts are loaded, dear imgui will use the default font. You can also load multiple fonts and use `CImGui.PushFont\/PopFont` to select them.\n# - `CImGui.AddFontFromFileTTF` will return the `Ptr{ImFont}` so you can store it if you need to select the font among multiple.\n# - If the file cannot be loaded, the function will return C_NULL. Please handle those errors in your application (e.g. use an assertion, or display an error and quit).\n# - The fonts will be rasterized at a given size (w\/ oversampling) and stored into a texture when calling `CImGui.Build()`\/`GetTexDataAsXXXX()``, which `ImGui_ImplXXXX_NewFrame` below will call.\n# - Read 'fonts\/README.txt' for more instructions and details.\nfonts_dir = joinpath(@__DIR__, \"..\", \"fonts\")\nfonts = unsafe_load(CImGui.GetIO().Fonts)\n# default_font = CImGui.AddFontDefault(fonts)\n# CImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"Cousine-Regular.ttf\"), 15)\n# CImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"DroidSans.ttf\"), 16)\n# CImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"Karla-Regular.ttf\"), 10)\n# CImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"ProggyTiny.ttf\"), 10)\n# CImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"Roboto-Medium.ttf\"), 16)\nCImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"Recursive Mono Casual-Regular.ttf\"), 16)\nCImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"Recursive Mono Linear-Regular.ttf\"), 16)\nCImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"Recursive Sans Casual-Regular.ttf\"), 16)\nCImGui.AddFontFromFileTTF(fonts, joinpath(fonts_dir, \"Recursive Sans Linear-Regular.ttf\"), 16)\n# @assert default_font != C_NULL\n\n# creat texture for image drawing\n# img_width, img_height = 256, 256\n# image_id = ImGui_ImplOpenGL3_CreateImageTexture(img_width, img_height)\n\n# setup Platform\/Renderer bindings\nImGuiGLFWBackend.init(window_ctx)\nImGuiOpenGLBackend.init(gl_ctx)\n\ntry\n demo_open = true\n clear_color = Cfloat[0.45, 0.55, 0.60, 1.00]\n while glfwWindowShouldClose(window) == 0\n glfwPollEvents()\n # start the Dear ImGui frame\n ImGuiOpenGLBackend.new_frame(gl_ctx)\n ImGuiGLFWBackend.new_frame(window_ctx)\n CImGui.NewFrame()\n\n demo_open && @c CImGui.ShowDemoWindow(&demo_open)\n\n # # show image example\n # CImGui.Begin(\"Image Demo\")\n # image = rand(GLubyte, 4, img_width, img_height)\n # ImGui_ImplOpenGL3_UpdateImageTexture(image_id, image, img_width, img_height)\n # CImGui.Image(Ptr{Cvoid}(image_id), (img_width, img_height))\n # CImGui.End()\n\n # rendering\n CImGui.Render()\n glfwMakeContextCurrent(window)\n\n width, height = Ref{Cint}(), Ref{Cint}() #! need helper fcn\n glfwGetFramebufferSize(window, width, height)\n display_w = width[]\n display_h = height[]\n\n glViewport(0, 0, display_w, display_h)\n glClearColor(clear_color...)\n glClear(GL_COLOR_BUFFER_BIT)\n ImGuiOpenGLBackend.render(gl_ctx)\n\n if unsafe_load(igGetIO().ConfigFlags) & ImGuiConfigFlags_ViewportsEnable == ImGuiConfigFlags_ViewportsEnable\n backup_current_context = glfwGetCurrentContext()\n igUpdatePlatformWindows()\n GC.@preserve gl_ctx igRenderPlatformWindowsDefault(C_NULL, pointer_from_objref(gl_ctx))\n glfwMakeContextCurrent(backup_current_context)\n end\n\n glfwSwapBuffers(window)\n end\ncatch e\n @error \"Error in renderloop!\" exception=e\n Base.show_backtrace(stderr, catch_backtrace())\nfinally\n ImGuiOpenGLBackend.shutdown(gl_ctx)\n ImGuiGLFWBackend.shutdown(window_ctx)\n CImGui.DestroyContext(ctx)\n glfwDestroyWindow(window)\nend\n","avg_line_length":41.8538461538,"max_line_length":194,"alphanum_fraction":0.7559272193} {"size":6849,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"\n################################################################################\n# Fundamental \"neural\" data types\n################################################################################\nstruct Spike\n\tn::UInt64\n\tt::Float64\nend\nBase.show(io::IO, s::Spike) = print(io, \"(Neuron $(s.n): $(s.t) s)\")\n\nstruct Synapse\n\tstrength::Float64\n\tsource::UInt64\n\tdest::UInt64\n\tdelay::UInt16 \t# in number of samples\nend\nBase.show(io::IO, syn::Synapse) = print(io, \"(Source: $(syn.source), Destination: $(syn.dest), Delay: $(syn.delay), Strength: $(syn.strength))\")\n\nabstract type Neuron end\n\n\n################################################################################\n# Izhikevich neuron\n################################################################################\nstruct SimpleNeuron <: Neuron\n\ta::Float64\n\tb::Float64\n\tc::Float64\n\td::Float64\nend\n\nstruct SimpleNeuronType{T} end\nSimpleNeuronType(s::Symbol) = SimpleNeuronType{s}()\nSimpleNeuronType(s::AbstractString) = SimpleNeuronType{Symbol(s)}()\n\nSimpleNeuron(s::Symbol) = SimpleNeuron(SimpleNeuronType{Symbol(s)}())\nSimpleNeuron(s::AbstractString) = SimpleNeuron(SimpleNeuronType(s))\n\n# Regular Spiking (Exc)\nSimpleNeuron(::SimpleNeuronType{:rs}) = SimpleNeuron(0.02, 0.2, -65.0, 8.0)\nSimpleNeuron(::SimpleNeuronType{:RS}) = SimpleNeuron(0.02, 0.2, -65.0, 8.0)\n# Intrinsically Bursting (Exc)\nSimpleNeuron(::SimpleNeuronType{:ib}) = SimpleNeuron(0.02, 0.2, -55.0, 4.0)\nSimpleNeuron(::SimpleNeuronType{:IB}) = SimpleNeuron(0.02, 0.2, -55.0, 4.0)\n# Chattering (Exc)\nSimpleNeuron(::SimpleNeuronType{:ch}) = SimpleNeuron(0.02, 0.2, -50.0, 2.0)\nSimpleNeuron(::SimpleNeuronType{:CH}) = SimpleNeuron(0.02, 0.2, -50.0, 2.0)\n# Fast Spiking (Inh)\nSimpleNeuron(::SimpleNeuronType{:fs}) = SimpleNeuron(0.1, 0.2, -65.0, 2.0)\nSimpleNeuron(::SimpleNeuronType{:FS}) = SimpleNeuron(0.1, 0.2, -65.0, 2.0)\n# Low-Threshold Spiking (Inh)\nSimpleNeuron(::SimpleNeuronType{:lts}) = SimpleNeuron(0.02, 0.25, -65.0, 2.0)\nSimpleNeuron(::SimpleNeuronType{:LTS}) = SimpleNeuron(0.02, 0.25, -65.0, 2.0)\n# Thalamo-Cortical \nSimpleNeuron(::SimpleNeuronType{:tc}) = SimpleNeuron(0.02, 0.25, -65.0, 0.05)\nSimpleNeuron(::SimpleNeuronType{:TC}) = SimpleNeuron(0.02, 0.25, -65.0, 0.05)\n# Resonator \nSimpleNeuron(::SimpleNeuronType{:rz}) = SimpleNeuron(0.1, 0.26, -65.0, 2.0)\nSimpleNeuron(::SimpleNeuronType{:RZ}) = SimpleNeuron(0.1, 0.26, -65.0, 2.0)\n\n# Random Excitatory\nSimpleNeuron(::SimpleNeuronType{:exc}) = begin\n\tc = rand()^2.0\n\tSimpleNeuron(0.02, 0.2, -65.0+15.0c, 8.0-6.0c)\nend\nSimpleNeuron(::SimpleNeuronType{:EXC}) = SimpleNeuron(SimpleNeuronType(:exc))\n# Random Inhibitory\nSimpleNeuron(::SimpleNeuronType{:inh}) = begin\n\tr = rand()\n\tSimpleNeuron(0.02+0.08r, 0.25-0.05r, -65.0, 2.0)\nend\nSimpleNeuron(::SimpleNeuronType{:INH}) = SimpleNeuron(SimpleNeuronType(:inh))\n\n\n################################################################################\n# Gerstner neuron\n################################################################################\nmutable struct SRMNeuron <: Neuron\n\tu::Float64\n\tu_max::Float64\n\tu_rest::Float64\n\t\u0398::Float64\n\t\u03c4_refr::Float64\n\t\u03c4_memb::Float64\n\tt_lastevent::Float64\n\tt_lastspike::Float64\n\t\u03f5_last::Float64\nend\n\nSRMNeuron() = SRMNeuron(-65.0, 8.0, -65.0, -50.0, 7.0\/1000.0, 3.0\/1000.0,\n\t\t\t\t\t\t0.0, 0.0, 0.0)\n\nSRMNeuron(\u03c4_membrane) = SRMNeuron(-65.0, 8.0, -65.0, -50.0, 7.0\/1000.0, \u03c4_membrane, \n\t\t\t\t\t\t\t\t 0.0, 0.0, 0.0)\n\n\n######################################################################\n# Delay lines \n######################################################################\n\nmutable struct DelayLine\n\tnumstored::Int64\n\tdelaylen::Int64\n\tcounts::Vector{Int64}\nend\n\nDelayLine(delaylen::Int64, maxevents::Int64) =\n\tDelayLine(0, delaylen, zeros(Int64, maxevents))\n\nDelayLine(delaylen::Int64) = DelayLine(0, delaylen, zeros(Int64, 64))\n\nDelayLine(numstored::Int64, delaylen::Int64, maxevents::Int64) = \n\tDelayLine(numstored, delaylen, zeros(Int64, maxevents))\n\n\n################################################################################\n# Types for managing experiments\n################################################################################\nstruct ModelParams\n\tfs::Float64\n\n\t# Graph (revise later) -- now specified directly, but still used here\n\tnum_neurons::UInt64\n\tp_contact::Float64\n\tp_exc::Float64\n\tmaxdelay::Float64 # ms\n\n\t# STDP\n\tsynmax::Float64\n\ttau_pre::Float64\n\ttau_post::Float64\n\ta_pre::Float64\n\ta_post::Float64\nend\nModelParams() = ModelParams(2000.0, 1000, 0.1, 0.8, 20.0, 10.0,\n\t\t\t\t\t\t\t0.02, 0.02, 1.20, 1.0)\n\nstruct TrialParams\n\tdur::Float64\n\tlambda::Float64\n\trandspikesize::Float64\n\trandinput::Bool\n\tinhibition::Bool\n stdp::Bool\n\tinputmode::UInt64\n\tmultiinputmode::UInt64\n\tinputweight::Float64\n\trecordstart::Float64\n\trecordstop::Float64\n\tlambdainput::Float64\n\tinputrefractorytime::Float64\nend\n\nTrialParams() = TrialParams(1.0, 3.0, 20.0, 1, 1, 1, 1, 1, 20.0, 0.0, 1.0, 0.5, 0.0)\n\nfunction Base.show(io::IO, tp::TrialParams)\n\tprintln(io, \"\\tInput mode: $(tp.inputmode == 1 ? \"periodic\" : \"poissonian\")\")\n\tprintln(io, \"\\tInput timing density (if random): $(tp.lambdainput) (1\/s)\")\n\tprintln(io, \"\\tInput size: $(tp.inputweight)\")\n\tprintln(io, \"\\tDuration: $(tp.dur) (s)\")\t\n\tprintln(io, \"\\tRandom input: $(tp.randinput)\")\n\tprintln(io, \"\\tRand spike density (\u03bb): $(tp.lambda) (1\/s)\")\n\tprintln(io, \"\\tRandom spike size: $(tp.randspikesize)\")\n\tprintln(io, \"\\tInhibition: $(tp.inhibition)\")\n\tprintln(io, \"\\tSTDP: $(tp.stdp)\")\n\tprintln(io, \"\\tRecord start: $(tp.recordstart)\")\n\tprintln(io, \"\\tRecord stop: $(tp.recordstop)\")\nend\n\nstruct Experiment\n\tname::String\n\tmp::ModelParams\n\ttp::TrialParams\n\tinput::Array{Spike,1}\nend\n\nstruct Model\n\tparams::ModelParams\n\tdelgraph::Array{Int64,2}\n\tsyngraph::Array{Float64,2}\n\tneurons::Array{SimpleNeuron,1}\nend\n\nstruct ExperimentOutput\n\tmodelname::AbstractString\n\ttrialname::AbstractString\n\ttp::TrialParams\n\tinput::Array{Array{Spike,1},1}\n\toutput::Array{Spike,1}\n\tinputtimes::Array{Float64,1}\n\tinputids::Array{Int64,1}\n\tsynapses::Array{Synapse,1}\nend\n\n\nfunction Base.show(io::IO, exp::ExperimentOutput)\n\tprintln(IO, \"Model Name: $(exp.modelname)\")\n\tprintln(IO, \"Trial Name: $(exp.trialname)\")\n\tprintln(IO, \"Trial Parameters\")\n\tBase.show(io, exp.tp)\nend\n\nstruct ExperimentType{T} end\n\n\nExperimentType(s::AbstractString) = ExperimentType{Symbol(s)}()\n\n################################################################################\n# Delay learning types\n################################################################################\nstruct CategoricalSpiker\n\tdls::Array{DelayLine,1}\n\tneuron::SRMNeuron\n\tweight::Float64\nend\n\nCategoricalSpiker(inputs::T, possibledelays::T, weight;\n\t\t\t\t \u03c4_membrane=3.0\/1000.0,\n\t\t\t\t maxevents=64) where T <: Union{Array{Int64,1}, UnitRange{Int64}} = begin\n\tdls = [DelayLine(rand(possibledelays), maxevents) for _ \u2208 inputs]\n\tneuron = SRMNeuron(\u03c4_membrane)\n\tCategoricalSpiker(dls, neuron, weight)\nend\n\n","avg_line_length":30.44,"max_line_length":144,"alphanum_fraction":0.6110381078} {"size":4112,"ext":"jl","lang":"Julia","max_stars_count":3.0,"content":"include(\".\/representation.jl\")\ninclude(\".\/parent_selection.jl\")\ninclude(\".\/recombination.jl\")\ninclude(\".\/mutation.jl\")\ninclude(\".\/metrics.jl\")\ninclude(\".\/functions_collection.jl\")\ninclude(\".\/utils.jl\")\ninclude(\".\/survivor_selection.jl\")\ninclude(\".\/graphing_utilities.jl\")\n\nusing .Representation, .ParentSelection, .Recombination, .Mutation, .Metrics, .FunctionsCollection\nusing .SurvivorSelection, .Utils, Statistics, .GraphingUtilities\n\nparameters = Utils.get_algo_params()\nx_values = Utils.generate_linespace(parameters[\"x_range\"][1], parameters[\"x_range\"][2], parameters[\"population_size\"])\ntrue_values = parameters[\"evaluate_function\"](x_values)\npopulation = Representation.generate_population(parameters[\"population_size\"], parameters[\"n_kernels\"], parameters[\"\u03c3_initial\"])\nextra_params_error = Dict(\"threshold\"=>parameters[\"threshold\"],\n \"lower_weight\"=>parameters[\"lower_weight\"],\n \"upper_weight\"=>parameters[\"upper_weight\"])\n\nMetrics.calc_population_fitness!(population,\n true_values,\n parameters[\"error_function\"],\n extra_params_error)\n\nall_fitness_values = Array{Array{Float64, 1}, 1}[]\nall_hit_ratios = Array{Array{Float64, 1}, 1}[]\nfor run in 1:parameters[\"num_runs\"]\n fitness_values = Array{Float64, 1}[]\n hit_ratios = Array{Float64, 1}[]\n for i in 1:parameters[\"num_generations\"]\n println(\"Generation: \", i)\n println(\"Selecting parents...\")\n parents_groups = ParentSelection.random_parent_selection(population, parameters[\"\u03bb_children\"],\n parameters[\"\u03bc_parents\"])\n println(\"Applying recombination...\")\n offspring = Recombination.apply_recombination(parents_groups, parameters[\"\u03bc_parents\"],\n parameters[\"recombination_type\"])\n println(\"Applying mutation\")\n offspring_population = Representation.Population(offspring)\n mutated_population = parameters[\"mutation_type\"](offspring_population)\n println(\"Calculating fitness...\")\n Metrics.calc_population_fitness!(mutated_population,\n true_values,\n parameters[\"error_function\"],\n extra_params_error)\n println(\"Selecting survivors...\")\n next_generation = nothing\n if parameters[\"survivor_selection\"] == \"children\"\n next_generation = SurvivorSelection.rank_selection(mutated_population, parameters[\"\u03bc_parents\"])\n elseif parameters[\"survivor_selection\"] == \"all\"\n next_generation = SurvivorSelection.rank_selection(mutated_population, population, parameters[\"\u03bc_parents\"])\n else\n throw(ArgumentError(\"Survivor selection requested not available. Options are: all, children\"))\n end\n push!(fitness_values, [member.fitness for member in next_generation.members])\n push!(hit_ratios, [member.hit_ratio for member in next_generation.members])\n global population = next_generation\n end\n push!(all_fitness_values, fitness_values)\n push!(all_hit_ratios, hit_ratios)\nend\nprintln(\"Finished training...\")\nmean_runs_fitness = mean(all_fitness_values)\nmean_hit_ratio = mean(all_hit_ratios)\n\noutput_name = nothing\nif parameters[\"output_file\"] != nothing\n output_name = \"outputs\/\" * parameters[\"output_file\"]\nend\n\nGraphingUtilities.plot_all(mean_runs_fitness,\n parameters[\"num_generations\"],\n population,\n true_values,\n x_values,\n mean_hit_ratio,\n parameters[\"population_size\"],\n output_name)\n\n\nopen(output_name * \".txt\", \"w\") do file\n write(file, string([mean(values) for values in mean_runs_fitness]))\n write(file, \"\\n\")\n write(file, string([mean(values) for values in mean_hit_ratio]))\n write(file, \"\\n\")\nend\n","avg_line_length":46.202247191,"max_line_length":128,"alphanum_fraction":0.6381322957} {"size":4460,"ext":"jl","lang":"Julia","max_stars_count":72.0,"content":"## draw: unify rand, sample\n\ndraw!(x::Distribution, o) = rand!(x, o)\n\ndraw!(x::AbstractVector, o::AbstractVector) = sample!(x, o)\n\nfunction draw!(x::AbstractMatrix, o::AbstractMatrix)\n idx = sample(1:nobs(x), nobs(o))\n for (to, from) in enumerate(idx)\n o[to,:] = x[from,:]\n end\n return o\nend\n\nfunction draw!(x::AbstractDataFrame, o::AbstractDataFrame)\n idx = sample(1:nobs(x), nobs(o))\n for column in names(x)\n o[!,column] = x[idx,column]\n end\n return o\nend\n\npick(x::AbstractVector, i::AbstractVector) = x[i]\n\npick(x::AbstractArray, i::AbstractVector) = copy(selectdim(x, 1, i))\n\npick(x::AbstractDataFrame, i::AbstractVector) = x[i,:]\n\n\"\"\"\n MaximumEntropyCache{T<:Real}\n\nA cache type for storing precomputed values for a particular input array.\nThis is intended to minimize memory allocations and time when drawing random samples.\n\n# Fields\n- `n::Int`: Number of elements in the input array.\n- `t::Type{T}`: `eltype` of the input elements.\n- `inds::Vector{Int}`: Indicies of the sorted data.\n- `values::Vector{T}`: Sorted values of the input data.\n- `Z::Vector{T}`: Intermediate points for the ordered values.\n- `mtrm::T`: Trimmed mean of deviations.\n- `m::Vector{T}`: Mean of the maximum entropy density within each interval.\n- `U::Vector{T}`: Preallocated array for the random noise values.\n- `quantiles::Vector{T}`: Preallocated array for sample quantiles.\n- `v::Vector{T}`\n\"\"\"\nmutable struct MaximumEntropyCache{T <: Real}\n n::Int\n inds::Vector{Int}\n vals::Vector{T}\n Z::Vector{T}\n mtrm::T\n m::Vector{T}\n U::Vector{T}\n quantiles::Vector{T}\n v::Vector{T}\nend\n\nfunction MaximumEntropyCache()\n MaximumEntropyCache(0,\n Int[],\n Float64[],\n Float64[],\n 0.0,\n Float64[],\n Float64[],\n Float64[],\n Float64[],\n )\nend\n\nfunction init!(cache::MaximumEntropyCache{T}, x::AbstractArray) where T\n cache.n = length(x)\n sorted!(cache, x)\n trimmed!(cache, x)\n intermediates!(cache)\n med!(cache)\n cache.U = zeros(T, cache.n)\n cache.quantiles = zeros(T, cache.n)\n cache.v = [y \/ cache.n for y in 0:cache.n]\n return nothing\nend\n\n\"\"\"\n sorted!(c::MaximumEntropyCache, x::AbstractArray)\n\nSets the sorted indices and values for `x`.\n\"\"\"\nfunction sorted!(c::MaximumEntropyCache, x::AbstractArray)\n c.inds = sortperm(x)\n c.vals = x[c.inds]\nend\n\n\"\"\"\n trimmed!(c::MaximumEntropyCache, x::AbstractArray)\n\nCompute our trimmed mean of deviations.\n\"\"\"\nfunction trimmed!(c::MaximumEntropyCache, x::AbstractArray)\n c.mtrm = mean([abs(x[i] - x[i - 1]) for i in 2:c.n])\nend\n\n\"\"\"\n intermediates!(c::MaximumEntropyCache)\n\nCompute our intermediate points for the ordered values.\n\"\"\"\nfunction intermediates!(c::MaximumEntropyCache{T}) where T\n c.Z = zeros(T, c.n + 1)\n for i in 2:c.n\n c.Z[i] = (c.vals[i - 1] + c.vals[i]) \/ 2\n end\n\n # Insert our lower and upper tails using our trimmed mean\n c.Z[1] = (c.vals[1] - c.mtrm)\n c.Z[end] = (c.vals[end] + c.mtrm)\nend\n\n\"\"\"\n med!(c::MaximumEntropyCache)\n\nCompute the mean of the maximum entropy density within each interval such that\nthe \u2018mean-preserving constraint\u2019 is satisfied.\n\"\"\"\nfunction med!(c::MaximumEntropyCache{T}) where T\n c.m = zeros(T, c.n)\n c.m[1] = 0.75 * c.vals[1] + 0.25 * c.vals[1]\n\n for k in 2:(c.n - 1)\n c.m[k] = 0.25 * c.vals[k - 1] + 0.5 * c.vals[k] + 0.25 * c.vals[k + 1]\n end\n\n c.m[end] = 0.25 * c.vals[end - 1] + 0.75 * c.vals[end]\nend\n\nfunction draw!(cache::MaximumEntropyCache, x::T, o::T) where T <: AbstractArray\n # Generate random numbers from the [0, 1] uniform interval.\n sort!(rand!(cache.U))\n\n # Compute sample quantiles of the ME density at those points and sort them.\n for k in 1:cache.n\n ind = argmin(abs.(cache.v .- cache.U[k]))\n\n if cache.v[ind] > cache.U[k]\n ind -= 1\n end\n\n c = (2 * cache.m[ind] - cache.Z[ind] - cache.Z[ind + 1]) \/ 2\n y0 = cache.Z[ind] + c\n y1 = cache.Z[ind + 1] + c\n cache.quantiles[k] =\n y0 + (cache.U[k] - cache.v[ind]) * (y1 - y0) \/ (cache.v[ind + 1] - cache.v[ind])\n end\n\n # Reorder the sorted sample quantiles by using the ordering index.\n # This recovers the time dependence relationships of the originally observed data.\n recovered = sortperm(cache.inds)\n for i in 1:cache.n\n idx = recovered[i]\n o[i] = cache.quantiles[idx]\n end\n\n return o\nend\n","avg_line_length":27.0303030303,"max_line_length":92,"alphanum_fraction":0.6228699552} {"size":2668,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using DiffEqBase, Test\n\nfunction f(du,u,p,t)\n du[1] = 0.2u[1]\n du[2] = 0.4u[2]\nend\nu0 = ones(2)\ntspan = (0,1.0)\n\nprob = ODEProblem(f,u0,tspan)\n@test typeof(prob.tspan) == Tuple{Float64,Float64}\nprob = ODEProblem{true}(f,u0,tspan)\n@test typeof(prob.tspan) == Tuple{Float64,Float64}\nprob = ODEProblem(ODEFunction{true}(f),u0,tspan)\n@test typeof(prob.tspan) == Tuple{Float64,Float64}\n@test isinplace(prob) == true\nprob = ODEProblem{false}(f,u0,tspan)\n@test isinplace(prob) == false\n\n@test_broken @inferred ODEProblem{true}(f,u0,tspan)\n@test_broken @inferred ODEProblem(f,u0,tspan)\n\nfunction f(dv,u,v,p,t)\n dv .= 2.0 .* v\nend\nu0 = ones(2)\nv0 = ones(2)\ntspan = (0,1.0)\nprob = SecondOrderODEProblem(f,u0,v0,tspan)\n\nprob = SDEProblem((u,p,t) -> 1.01u,(u,p,t) -> 0.87u,1\/2,(0.0,1.0))\n\nfunction f(du,u,p,t)\n du[1] = 0.2u[1]\n du[2] = 0.4u[2]\nend\nfunction g(du,u,p,t)\n du[1] = 0.2u[1]\n du[2] = 0.4u[2]\nend\nu0 = ones(2)\ntspan = (0,1.0)\nprob = SDEProblem(f,g,u0,tspan)\nprob = SDEProblem{true}(f,g,u0,tspan)\n\n@test_broken @inferred SDEProblem(f,g,u0,tspan)\n@test_broken @inferred SDEProblem{true}(f,g,u0,tspan)\n\nf_1delay = function (du,u,h,p,t)\n du[1] = - h(t-1)[1]\nend\nprob = DDEProblem(f_1delay,ones(1),t->zeros(1),(0.0, 10.0),constant_lags = ones(1))\nprob = DDEProblem{true}(f_1delay,ones(1),t->zeros(1),(0.0, 10.0),dependent_lags = ones(1))\n\n@test_broken @inferred DDEProblem(f_1delay,ones(1),t->zeros(1),(0.0, 10.0),constant_lags = ones(1))\n@test_broken @inferred DDEProblem{true}(f_1delay,ones(1),t->zeros(1),(0.0, 10.0),dependent_lags = ones(1))\n\nfunction f(r, yp, y, p,tres)\n r[1] = -0.04*y[1] + 1.0e4*y[2]*y[3]\n r[2] = -r[1] - 3.0e7*y[2]*y[2] - yp[2]\n r[1] -= yp[1]\n r[3] = y[1] + y[2] + y[3] - 1.0\nend\nu0 = [1.0, 0, 0]\ndu0 = [-0.04, 0.04, 0.0]\nprob_dae_resrob = DAEProblem(f,du0,u0,(0.0,100000.0))\nprob_dae_resrob = DAEProblem{true}(f,du0,u0,(0.0,100000.0))\n\n@test_broken @inferred DAEProblem(f,du0,u0,(0.0,100000.0))\n@test_broken @inferred DAEProblem{true}(f,du0,u0,(0.0,100000.0))\n\nf(u,t,W) = 1.01u.+0.87u.*W\nu0 = 1.00\ntspan = (0.0,1.0)\nprob = RODEProblem(f,u0,tspan)\nprob = RODEProblem{false}(f,u0,tspan)\n\n@test_broken @inferred RODEProblem(f,u0,tspan)\n@test_broken @inferred RODEProblem{false}(f,u0,tspan)\n\nDiscreteProblem(ones(1),tspan)\nf(t,u) = 0.5\nDiscreteProblem{false}(f,ones(1),tspan)\n\n@test_broken @inferred DiscreteProblem(ones(1),tspan)\n@test_broken @inferred DiscreteProblem{false}(f,ones(1),tspan)\n\nfunction f(du,u,t)\n du[1] = 2 - 2u[1]\n du[2] = u[1] - 4u[2]\nend\nu0 = zeros(2)\nprob = SteadyStateProblem(f,u0)\n\n@test_broken @inferred SteadyStateProblem(f,u0)\n@test SteadyStateProblem(ODEProblem(f,u0,tspan,:param)).p == :param\n","avg_line_length":27.5051546392,"max_line_length":106,"alphanum_fraction":0.6600449775} {"size":2559,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# Specific intervals tests\n\n# Promotion behaviors -- we only allow concrete endpoints of the same type\n@test 1.0 .. 2 === 1.0 .. 2.0\n@test 1\/\/2 .. 3.5 === 0.5 .. 3.5\n@test_throws ArgumentError :a .. \"b\"\n@test_throws ArgumentError 1 .. (2,3)\n\nv = [1 .. 2, 3.0 .. 4.0]\n@test v[1] === 1.0 .. 2.0\n@test v[2] === 3.0 .. 4.0\n\n# Test simple arithmetic, with promotion behaviors\n@test (1.0 .. 2.0) + 1 === (2.0 .. 3.0)\n@test (1 .. 2) + 1.0 === (2.0 .. 3.0)\n@test (1 .. 2) + (1.0 .. 2.0) === (2.0 .. 4.0)\n@test (1 .. 2) - (1 .. 2) === (-1 .. 1)\n@test +(1 .. 2) === (1 .. 2)\n@test -(1 .. 2) === (-2 .. -1)\n\n@test (1..2)*3 === 3..6\n@test (-1..1)*3 === -3..3\n@test (2..4)\/2 === 1.0 .. 2.0\n@test 1\/(2..4) === 1\/4 .. 1\/2\n\n@test 3.2 in 3..4\n@test 4 in 2.0 .. 6.0\n@test 4 in 4.0 .. 4.0\n@test 4 in 4.0 .. 5\n@test (1..2) in (0.5 .. 2.5)\n@test !((1..2) in (1.5 .. 2.5))\n\n@test maximum(1..2) === 2\n@test minimum(1..2) === 1\n\n# Comparisons are \"for-all\" like, with <= and >= allowing overlap\n@test 0 <= 1 .. 2\n@test !(0 >= 1 .. 2)\n@test 1 <= 1 .. 2\n@test !(1 >= 1 .. 2)\n@test !(2 <= 1 .. 2)\n@test 2 >= 1 .. 2\n@test !(3 <= 1 .. 2)\n@test 3 >= 1 .. 2\n\n@test 0 < 1 .. 2\n@test !(0 > 1 .. 2)\n@test !(1 < 1 .. 2)\n@test !(1 > 1 .. 2)\n@test !(2 < 1 .. 2)\n@test !(2 > 1 .. 2)\n@test !(3 < 1 .. 2)\n@test 3 > 1 .. 2\n\n# Test dictionary lookup by numeric value\nd = Dict(1..2 => 1, 2.0..3.0 => 2)\n@test d[1..2] === 1\n@test d[1.0..2.0] === 1\n@test d[2..3] === 2\n@test d[2.0..3.0] === 2\nd[0x1 .. 0x2] = 3\n@test d[1..2] === 3\n@test length(d) == 2\n\n# Test repeated intervals:\n@test (1..2) + [1,2,3] == [(1..2)+i for i in [1,2,3]]\n@test (1..2) + (1:3) == [(1..2)+i for i in 1:3]\n@test (1..2) - [1,2,3] == [(1..2)-i for i in [1,2,3]]\n@test (1..2) - (1:3) == [(1..2)-i for i in 1:3]\n\n@test [1,2,3] + (1..2)== [i+(1..2) for i in [1,2,3]]\n@test (1:3) + (1..2)== [i+(1..2) for i in 1:3]\n@test [1,2,3] - (1..2)== [i-(1..2) for i in [1,2,3]]\n@test (1:3) - (1..2)== [i-(1..2) for i in 1:3]\n\n# And intervals at indices\n@test atindex(1..2, [3,4,5]) == [atindex(1..2, i) for i in [3,4,5]]\n@test atindex(1..2, 3:5) == [atindex(1..2, i) for i in 3:5]\n\n# Ensure comparisons are exact (and not lossy)\n@assert 0.2 > 2\/\/10 # 0.2 == 2.0000000000000001\n@test !(0.1 .. 0.2 <= 2\/\/10)\n\n# Conversion and construction:\n@test 1 .. 2 === ClosedInterval(1, 2) === ClosedInterval{Int}(1.0, 2.0) === ClosedInterval{Int}(1.0 .. 2.0)\n@test 1.0 .. 2.0 === ClosedInterval(1.0, 2) === ClosedInterval{Float64}(1, 2) === ClosedInterval{Float64}(1 .. 2)\n@test 1 .. 1 === ClosedInterval(1, 1) === ClosedInterval{Int}(1.0, 1.0)\n","avg_line_length":29.0795454545,"max_line_length":113,"alphanum_fraction":0.4861273935} {"size":662,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using HDF5\nconst PRINT_MEMORY = `ps -p $(getpid()) -o rss=`\nconst DATA = zeros(1000)\nmacro memtest(ex)\n @info :Memory\n quote\n for i in 1:100\n for _ in 1:100\n $ex\n end\n # HDF5.h5_garbage_collect()\n GC.gc(); print(rpad(i, 8)); run(PRINT_MEMORY)\n end\n end\nend\n@memtest h5open(\"\/tmp\/memtest.h5\", \"w\") do file\n dset = d_create(file, \"A\", datatype(DATA), dataspace(DATA), chunk=(100,))\n dset[:] = DATA[:]\nend\n@memtest h5open(\"\/tmp\/memtest.h5\", \"w\") do file\n file[\"A\", chunk=(100,)] = DATA[:]\nend\n@memtest h5open(\"\/tmp\/memtest.h5\", \"r\") do file\n file[\"A\",\"dxpl_mpio\", 0]\nend\n","avg_line_length":25.4615384615,"max_line_length":77,"alphanum_fraction":0.5543806647} {"size":3711,"ext":"jl","lang":"Julia","max_stars_count":5.0,"content":"include(\"plotting.jl\")\n\n\"\"\"\nStructure for storing summary statistics\n\"\"\"\nstruct sts \n median::Float64\n intwidth::Float64\nend\n\nfunction cross_validate(train::trainingData{A, B}, range\u03b8::B, range\u03bb::B, transform, quadtype = \"Gaussian\", priortype = \"Uniform\", num_pts=100, strt = 0, endpt=1.5) where A<:Array{Float64, 2} where B<:Array{Float64, 1}\n X = train.X\n x = collect(range(strt, stop = endpt, length = num_pts)) #define mesh \n Xs = repeat(x, outer = [1, length(z)]) #preallocate space \n Ys = Array{Float64}(undef, num_pts, length(z))\n for i=1:length(z)\n println(i)\n ind = [collect(1:i-1); collect(i+1:length(z))]\n @time begin\n train_cur = trainingData(s[ind, :], X[ind, :], z[ind]) \n test_cur = testingData(s[i:i, :], X[i:i, :])\n pdf, cdf = getBtgDensity(train_cur, test_cur, range\u03b8, range\u03bb, transform, quadtype, priortype)\n end\n for j = 1:num_pts\n Ys[j, i] = pdf(x[j])\n end\n end\n return Xs, Ys\nend\n\n\"\"\"\nSingle point cross-validation. Currently does not use confidence intervals or median finding. \n\"\"\"\nfunction cross_validate_artifact(X, s, g, gprime, p\u03b8, p\u03bb, z, range\u03b8, range\u03bb, num_pts=200, strt = 0, endpt=20)\n ss = Array{sts}(undef, length(z))\n x = collect(range(strt, stop = endpt, length = num_pts)) #define mesh \n Xs = repeat(x, outer = [1, length(z)]) #preallocate space \n Ys = Array{Float64}(undef, num_pts, length(z))\n for i=1:length(z)\n println(i)\n ind = [collect(1:i-1); collect(i+1:length(z))]\n @time begin\n X0 = size(X, 2)==1 ? X[1:1, :] : X[i:i, :] # constant mean?\n pdf, cdf = model(X[ind, :], X0, s[ind, :], s[i:i, :], boxCox, boxCoxPrime, p\u03b8, p\u03bb, z[ind], range_theta, range_lambda) #X0 is [1]\n constant = cdf(30)\n pdfn = x -> pdf(x)\/constant\n end\n #@time begin\n #med = bisection(x -> cdfn(x)-0.5, 1e-3, 50, 1e-3, 15) #compute median \n #end\n #@time begin\n #intwidth = confidence(pdfn, med) #compute width of credible interval corresponding to 95% confidence level\n #end\n #st = sts(med, intwidth); ss[i]=st\n for j = 1:num_pts\n Ys[j, i] = pdfn(x[j])\n end\n end\n return ss, Xs, Ys\nend\n\n\"\"\"\nDelete-one-group cross validation.\n\"\"\"\nfunction cross_validate_groups(X, s, g, gprime, p\u03b8, p\u03bb, z, range\u03b8, range\u03bb, num_group = 5, num_pts=200, strt = 0, endpt=20)\n if num_group>length(z) \n error(\"number of splits must be less than or equal to number of data points\")\n end\n x = collect(range(strt, stop = endpt, length = num_pts)) #define mesh \n Xs = repeat(x, outer = [1, length(z)]) #preallocate space to store \n Ys = Array{Float64}(undef, num_pts, length(z)) \n num_per_group = Integer(ceil(length(z)\/num_group))\n for i=1:num_group\n ind_train = [collect(1:(i-1)*num_per_group); collect(i*num_per_group+1:length(z))] #delete the ith group \n println(ind_train) \n for k=1:num_per_group \n cur = (i-1)*num_per_group + k\n println(cur)\n if cur <= length(z)\n X0 = size(X, 2)==1 ? X[1:1, :] : X[cur:cur, :] # set design matrix \n @time begin\n pdf, cdf = model(X[ind_train, :], X0, s[ind_train, :], s[cur:cur, :], boxCox, boxCoxPrime, p\u03b8, p\u03bb, z[ind_train], range_theta, range_lambda) \n constant = cdf(30) #compute unnormalized cdf\n cdfn = x -> cdf(x)\/constant; pdfn = x -> pdf(x)\/constant #compute normalized pdf and cdf\n end\n for j = 1:num_pts\n Ys[j, cur] = pdfn(x[j])\n end\n end\n end\n end\n return Xs, Ys\nend","avg_line_length":39.9032258065,"max_line_length":217,"alphanum_fraction":0.5758555645} {"size":5034,"ext":"jl","lang":"Julia","max_stars_count":3.0,"content":"include(\"simAn.jl\")\n\n\"\"\"\n assemble!(ata_model::AbstractModel; solver=\"jump\", starting_design=Matrix{Float64}(undef, 0, 0), results_folder=\"results\", start_temp=0.1, geom_temp=0.1, n_item_sample=1, n_test_sample=1, opt_feas=0.0, n_fill=1, max_time=1000.00, max_conv=2, feas_nh=0, opt_nh=5, verbosity=2, optimizer_constructor=\"GLPK\", optimizer_attributes=[(\"tm_lim\", 1000)])\n\n# Description\n\nAssemble the tests.\n\n# Arguments\n\n- **`ata_model::AbstractModel`** : Required. The model built with ATA fuctions. \n- **`solver`** : Optional. Default: `\"jump\"`. Values: `\"jump\"`, `\"siman\"`. The solving interface to be used (JuMP or internal solver based on Simulated Annealing).\n- **`starting_design`** : Optional. Default: `Matrix{Float64}(undef, 0, 0)`. The starting design matrix. Must be a `Matrix{Float64}`.\n- **`results_folder`** : Optional. Default: `\"results\"`. The folder in which the output is stored.\n\n## siman arguments\n\n - **`start_temp`** : Optional. Default: `0.1`. Values: `[0, Inf]`. Starting temperature, set to minimum for short journeys (if 0 worse solutions will never be accepted).\n - **`geom_temp`** : Optional. Default: `0.1`. Values: `[0, Inf)`. Decreasing geometric factor.\n - **`n_item_sample`** : Optional. Default: `1`. Values: `[1, Inf]`. Number of items to alter. Set to minimum for a shallow analysis, set to maximum for a deep analysis of the neighbourhoods.\n - **`n_test_sample`** : Optional. Default: `1`. Values: `[1, Inf]`. Number of tests to alter. Set to minimum for a shallow analysis, set to maximum for a deep analysis of the neighbourhoods.\n - **`opt_feas`** : Optional. Default: `0.0`. Values: `[0, Inf)`. Optimality\/feasibility balancer, if 0 only feasibility of solution is analysed. Viceversa, if 1, only optimality is considered (uncontrained model). All the other values are accepted but produce uninterpretable results.\n - **`n_fill`** : Optional. Default: `1`. Values: `[0, Inf)`. Number of fill-up phases, usually 1 is sufficient, if start_temp is high it can be higher. If a starting_design is supplied, it can be set to 0.\n - **`verbosity`** : Optional. Default: `2`. Values: `1` (minimal), `2` (detailed). Verbosity level. In the console '+' stands for improvement, '_' for accepting worse solution. The dots are the fill-up improvement steps.\n \n * Termination criteria\n\n - **`max_time`** : Optional. Default: `1000.0`. Values: `[0, Inf)`. Time limit in seconds.\n - **`max_conv`** : Optional. Default: `2`. Values: `[1, Inf)`. Maximum convergence, stop when, after max_conv rounds no improvements have been found. Set to minimum for shallow analysis, increase it for deep analysis of neighbourhoods.\n - **`feas_nh`** : Optional. Default: `0`. Values: `[1, Inf)`. Maximum number of Feasibility neighbourhoods to explore, set to the minimum if the model is small or not highly constrained.\n - **`opt_nh`** : Optional. Default: `5`. Values: `[1, Inf)`. Maximum number of Optimality neighbourhoods to explore, set to the minimum if the model is highly constrained.\n\n## jump arguments\n\n - **`optimizer_constructor`** : Optional. Default: `\"GLPK\"`. Values: `\"GLPK\"`, `\"Knitro\"`, `\"Gurobi\"`, `\"Cbc\"`, `\"CPLEX\"`, `\"Xpress\"`, `\"SCIP\"`, `\"Juniper\"`. JuMP solver selection. Remember to load the required package before assemble!.\n - **`optimizer_attributes`** : Optional. Default: `[(\"tm_lim\", 1000)]`. Values: An array of pairs `(attribute, value)`. Attributes and related values for the JuMP solver.\n\n## other keyword arguments\n - **`kwargs...`** : Optional. \n\"\"\"\nfunction assemble!(\n ata_model::AbstractModel;\n solver = \"jump\",\n starting_design = Matrix{Float64}(undef, 0, 0),\n results_folder = \"results\",\n start_temp = 0.1,\n geom_temp = 0.1,\n n_item_sample = 1,\n n_test_sample = 1,\n opt_feas = 0.0,\n n_fill = 1,\n max_time = 1000.0,\n max_conv = 2,\n feas_nh = 0,\n opt_nh = 5,\n verbosity = 2,\n optimizer_constructor = \"GLPK\",\n optimizer_attributes = [(\"tm_lim\", 1000)],\n kwargs...,\n)\n _group_by_friends!(ata_model)\n if solver == \"siman\"\n siman!(\n ata_model;\n results_folder = results_folder,\n starting_design = starting_design,\n start_temp = start_temp,\n geom_temp = geom_temp,\n max_time = max_time,\n n_item_sample = n_item_sample,\n n_test_sample = n_test_sample,\n max_conv = max_conv,\n verbosity = verbosity,\n opt_feas = opt_feas,\n n_fill = n_fill,\n feas_nh = feas_nh,\n opt_nh = opt_nh,\n kwargs...,\n )\n elseif solver == \"jump\"\n jump!(\n ata_model;\n results_folder = results_folder,\n starting_design = starting_design,\n optimizer_constructor = optimizer_constructor,\n optimizer_attributes = optimizer_attributes,\n kwargs...,\n )\n else\n error!(ata_model, \"only \\\"siman\\\" and \\\"jump\\\" are supported.\")\n end\n return nothing\nend\n","avg_line_length":52.9894736842,"max_line_length":350,"alphanum_fraction":0.6551450139} {"size":12120,"ext":"jl","lang":"Julia","max_stars_count":122.0,"content":"# new Sandshark example\n# add more julia processes\n# nprocs() < 7 ? addprocs(7-nprocs()) : nothing\n\nusing Caesar, RoME #, KernelDensityEstimate, IncrementalInference\nusing Interpolations\nusing Distributions\nusing DelimitedFiles\n\nusing RoMEPlotting\nusing Gadfly, DataFrames\nusing ProgressMeter\n\nconst TU = TransformUtils\n\nGadfly.set_default_plot_size(35cm,25cm)\n\ninclude(joinpath(@__DIR__,\"Plotting.jl\"))\ninclude(joinpath(@__DIR__,\"SandsharkUtils.jl\"))\n\n\n# Step: Selecting a subset for processing and build up a cache of the factors.\nepochs = timestamps[50:2:100]\nlastepoch = 0\nfor ep in epochs\n global lastepoch\n if lastepoch != 0\n # @show interp_yaw(ep)\n deltaAng = interp_yaw(ep) - interp_yaw(lastepoch)\n\n wXi = TU.SE2([interp_x(lastepoch);interp_y(lastepoch);interp_yaw(lastepoch)])\n wXj = TU.SE2([interp_x(ep);interp_y(ep);interp_yaw(ep)])\n iDXj = se2vee(wXi\\wXj)\n NAV[ep] = iDXj\n # println(\"$(iDXj[1]), $(iDXj[2]), $(iDXj[3])\")\n\n odoDict[ep] = Pose2Pose2(MvNormal(NAV[ep], Matrix(Diagonal([0.1;0.1;0.005].^2))))\n end\n rangepts = rangedata[ep][:]\n rangeprob = kde!(rangepts)\n azipts = azidata[ep][:,1]\n aziprob = kde!(azipts)\n\n # prep the factor functions\n ppbrDict[ep] = Pose2Point2BearingRange(aziprob, rangeprob)\n lastepoch = ep\nend\n\n\n## Step: Building the factor graph\nfg = initfg()\n# Add a central beacon with a prior\naddVariable!(fg, :l1, Point2)\n# Pinger location is (0.6; -16)\naddFactor!(fg, [:l1], PriorPose2( MvNormal([0.6; -16], Matrix(Diagonal([0.1; 0.1].^2)) ) ), autoinit=false)\n\nindex = 0\nfor ep in epochs\n global index\n curvar = Symbol(\"x$index\")\n addVariable!(fg, curvar, Pose2)\n\n # xi -> l1 - nonparametric factor\n # addFactor!(fg, [curvar; :l1], ppbrDict[ep], autoinit=false)\n\n if ep != epochs[1]\n # Odo factor x(i-1) -> xi\n addFactor!(fg, [Symbol(\"x$(index-1)\"); curvar], odoDict[ep], autoinit=false)\n else\n # Prior to the first pose location (a \"GPS\" prior)\n initLoc = [interp_x(ep);interp_y(ep);interp_yaw(ep)]\n println(\"Adding a prior at $curvar, $initLoc\")\n addFactor!(fg, [curvar], PriorPose2( MvNormal(initLoc, Matrix(Diagonal([0.1;0.1;0.05].^2))) ), autoinit=false)\n end\n # Heading partial prior\n # addFactor!(fg, [curvar], RoME.PartialPriorYawPose2(Normal(interp_yaw(ep), deg2rad(3))), autoinit=false)\n index+=1\nend\n\n# Just adding the first one...\naddFactor!(fg, [:x0; :l1], ppbrDict[epochs[1]], autoinit=false)\n\naddFactor!(fg, [:x5; :l1], ppbrDict[epochs[6]], autoinit=false)\n\naddFactor!(fg, [:x10; :l1], ppbrDict[epochs[11]], autoinit=false)\n\naddFactor!(fg, [:x13; :l1], ppbrDict[epochs[14]], autoinit=false)\n\naddFactor!(fg, [:x15; :l1], ppbrDict[epochs[16]], autoinit=false)\n\naddFactor!(fg, [:x17; :l1], ppbrDict[epochs[18]], autoinit=false)\naddFactor!(fg, [:x18; :l1], ppbrDict[epochs[19]], autoinit=false)\naddFactor!(fg, [:x19; :l1], ppbrDict[epochs[20]], autoinit=false)\naddFactor!(fg, [:x20; :l1], ppbrDict[epochs[21]], autoinit=false)\naddFactor!(fg, [:x21; :l1], ppbrDict[epochs[22]] , autoinit=false) # breaks it, autoinit=false!\naddFactor!(fg, [:x22; :l1], ppbrDict[epochs[23]], autoinit=false)\naddFactor!(fg, [:x23; :l1], ppbrDict[epochs[24]], autoinit=false)\naddFactor!(fg, [:x24; :l1], ppbrDict[epochs[25]], autoinit=false)\naddFactor!(fg, [:x25; :l1], ppbrDict[epochs[26]], autoinit=false)\n\n\ngetSolverParams(fg).drawtree = true\ngetSolverParams(fg).showtree = true\n\ntree = solveTree!(fg, recordcliqs=ls(fg))\n\n\n\nplotKDE(ppbrDict[epochs[26]].bearing)\nplotKDE(ppbrDict[epochs[26]].range)\n\nplotKDE(ppbrDict[epochs[23]].bearing)\nplotKDE(ppbrDict[epochs[23]].range)\n\n\nplotKDE([ppbrDict[epochs[21]].bearing; ppbrDict[epochs[22]].bearing; ppbrDict[epochs[23]].bearing], c=[\"red\";\"black\";\"green\"])\nplotKDE([ppbrDict[epochs[21]].range; ppbrDict[epochs[22]].range; ppbrDict[epochs[23]].range], c=[\"red\";\"black\";\"green\"])\n\n\nwriteGraphPdf(fg, engine=\"dot\")\n\ntree = solveTree!(fg) #, tree) # for incremental solution\n\ndrawPosesLandms(fg)\n\n\n# IIF.wipeBuildNewTree!(fg, drawpdf=true)\n# run(`evince bt.pdf`)\n# run(`evince \/tmp\/caesar\/bt.pdf`)\n\n\nendDogLeg = [interp_x[epochs[end]]; interp_y[epochs[end]]]\nestDogLeg = [get2DPoseMeans(fg)[1][end]; get2DPoseMeans(fg)[2][end]]\nendDogLeg - estDogLeg\n\ndrawPosesLandms(fg)\n\n\n\nls(fg, :x25)\n\n#To Boldly Believe... The Good, the Bad, and the Unbeliefable\nX25 = getBelief(fg, :x25)\n\n# i\npts, = predictbelief(fg, :x21, [:x20x21f1; :x21l1f1])\nplotKDE([kde!(pts);X25], dims=[1;2], levels=1, c=[\"red\";\"green\"])\n\n\npts, = predictbelief(fg, :x25, :)\nplotKDE([kde!(pts);X25], dims=[1;2], levels=1, c=[\"red\";\"green\"])\nplotKDE([kde!(pts);X25], dims=[3], levels=1, c=[\"red\";\"green\"])\n\n\n# Solvery! Roll dice for solvery check\n# writeGraphPdf(fg)\n# ensureAllInitialized!(fg)\nt = string(now())\nsavejld(fg, file=\"presolve_$t.jld\")\nsolveTree!(fg) #, N=100\nsavejld(fg, file=\"postsolve_$t.jld\")\n\n# pl = drawPoses(fg, spscale=2.75) # Just for odo plot\n# Roll again for inspiration check\n## PLOT BEAM PATTERNS\nGadfly.push_theme(:default)\npla = drawPosesLandmarksAndOdo(fg, ppbrDict, navkeys, X, Y, lblkeys, lblX, lblY)\nGadfly.draw(PDF(\"sandshark-beacon_$t.pdf\", 12cm, 15cm), pla)\nGadfly.draw(PNG(\"sandshark-beacon_$t.png\", 12cm, 15cm), pla)\n\n\n\n\n## resolve funky 21\n\n\n# shows all the proposals based on the clique in the tree -- similar to plotLocalProduct on factor graph\nplotTreeProduct(fg, tree, :x25)\n#\n# using IncrementalInference\n#\n#\n# import RoMEPlotting: spyCliqMat\n#\n# function spyCliqMat(cliq::Graphs.ExVertex; showmsg=true)\n# mat = deepcopy(IIF.getCliqMat(cliq, showmsg=showmsg))\n# # TODO -- add improved visualization here, iter vs skip\n# mat = map(Float64, mat)*2.0-1.0\n# numlcl = size(IIF.getCliqAssocMat(cliq),1)\n# mat[(numlcl+1):end,:] *= 0.9\n# mat[(numlcl+1):end,:] -= 0.1\n# numfrtl1 = floor(Int,length(cliq.attributes[\"data\"].frontalIDs)+1)\n# mat[:,numfrtl1:end] *= 0.9\n# mat[:,numfrtl1:end] -= 0.1\n# @show cliq.attributes[\"data\"].itervarIDs\n# @show cliq.attributes[\"data\"].directvarIDs\n# @show cliq.attributes[\"data\"].msgskipIDs\n# @show cliq.attributes[\"data\"].directFrtlMsgIDs\n# @show cliq.attributes[\"data\"].directPriorMsgIDs\n# sp = Gadfly.spy(mat)\n# push!(sp.guides, Gadfly.Guide.title(\"$(cliq.attributes[\"label\"]) || $(cliq.attributes[\"data\"].frontalIDs) :$(cliq.attributes[\"data\"].separatorIDs)\"))\n# push!(sp.guides, Gadfly.Guide.xlabel(\"fmcmcs $(cliq.attributes[\"data\"].itervarIDs)\"))\n# push!(sp.guides, Gadfly.Guide.ylabel(\"lcl=$(numlcl) || msg=$(size(IIF.getCliqMsgMat(cliq),1))\" ))\n# return sp\n# end\n# function spyCliqMat(bt::BayesTree, lbl::Symbol; showmsg=true)\n# spyCliqMat(IIF.getClique(bt,lbl), showmsg=showmsg)\n# end\n#\n\n\n\ntree = wipeBuildNewTree!(fg, drawpdf=true, show=true)\n\n\nimport IncrementalInference: getCliqMat\n\nsym = :x25\n# get clique sym is in\ngetClique(tree, sym).attributes[\"label\"]\nspyCliqMat(tree, sym)\n\n# get all variables in clique\nsyms = union(getCliqSymbols(tree, sym)...)\nvarnum = findfirst(syms, sym)\nwhichpot = getData(getClique(tree, sym)).cliqAssocMat[:,varnum]\n\n# get factor ids\nfids = getData(getClique(tree, sym)).potentials[whichpot]\n\n# get all factors in clique\nfsyms = Symbol[]\nfor factor in getVert.(fg, fids)\n push!(fsyms, Symbol(factor.label))\nend\n\n# get KDEs for the factors\npp = kde!.(approxConv.(fg, fsyms, sym))\n\n# plot the actual KDE\nplotKDE(pp, dims=[1;2], levels=2)\n\nplotKDE(pp, dims=[1;2], levels=2, legend=string.(fsyms))\n\n\n# Colors should not appear more than once\n# in at base\/\n# in #plotKDE#13 at KernelDensityEstimatePlotting\/src\/KernelDensityEstimatePlotting.jl:299\n# in Gadfly.Guide.ManualColorKey at Gadfly\/src\/guide.jl:501\n\nls(fg, :x25)\n\n\ngetData(fg, :x25f1, nt=:fnc).fnc.usrfnc!\n\n\n\ngetData(getClique(tree, sym))\n\n\n\n0\n\n\n\n\n\n\n\n\n\n\n\n\n\n#### DEBUG PPBRDict ====================\n\nLL = [0.6; -16; 0]\nfunction expectedLocalBearing(curPose, LL)\n v = [LL[1] - curPose[1]; LL[2] - curPose[2]]\n world = atan2(v[2], v[1])\n loc = rad2deg(TU.wrapRad(world - curPose[3]))\n while loc < 0\n loc += 360\n end\n return loc\nend\n# println(\"$(interp_x[ep]), $(interp_y[ep]), world yaw = $(interp_yaw[ep] * 180 \/ pi)\")\n# XX = [interp_x(ep); interp_y[ep]; interp_yaw[ep]]\n# expectedLocalBearing(XX, LL)\n# println(\"Expected local yaw = $(expectedLocalBearing(XX, LL)). Sonar local yaw = $(rad2deg(getKDEMax(ppbr.bearing)[1]))\")\n# xyt = se2vee(SE2(XX[1:3]) \\ SE2(LL))\n# bear = rad2deg(TU.wrapRad(atan2(xyt[2],xyt[1]) -XX[3]))\n\nbearExp = []\nbearAcoustics = []\nfor ep in epochs\n XX = [interp_x(ep); interp_y[ep]; interp_yaw[ep]]\n ppbr = ppbrDict[ep]\n push!(bearExp, expectedLocalBearing(XX, LL))\n push!(bearAcoustics, rad2deg(getKDEMax(ppbr.bearing)[1]))\n println(\"local beacon heading: Expected = $(expectedLocalYaw(XX, LL)). Sonar = $(rad2deg(getKDEMax(ppbr.bearing)[1]))\")\nend\n\nlayers = []\npush!(layers, Gadfly.layer(x=1:length(bearExp), y=bearExp, Geom.path())[1], Theme(default_color=color(\"red\")))\npush!(layers, Gadfly.layer(x=1:length(bearAcoustics), y=bearAcoustics, Geom.path(), Theme(default_color=color(\"blue\"))))\nGadfly.plot(layers...)\n# end\n\n####################################\n\n\n\n######################################################\n\n# pl = drawPoses(fg, spscale=2.75)\n# drawPosesLandms(fg, spscale=0.75) #Means so we don't run into MM == Union() || Dict{} in\n# You rolled 20!\n\n# Roll again for debug check\n## Debugging strange headings\n# posePts = get2DPoseMeans(fg)\n# landPts = get2DLandmMeans(fg)\n## Debugging landmark bearing range\n\n# ppbrDict[epoch_slice[1]]\n# getSample(ppbrDict[epoch_slice[1]],100)\n#\n#\n# addVariable!(fg, :l1, Point2)\n# addFactor!(fg, [:x0; :l1], ppbrDict[epoch_slice[1]], autoinit=false)\n#\n# ls(fg, :l1)\n# pts = IIF.approxConv(fg, :x0l1f1, :l1)\n#\n# fct = getData(getVert(fg, :x0l1f1, nt=:fnc))\n#\n# fct.fnc.zDim\n\n# dev\n# ep = epochs[1]\n# # azidata[ep][:,1]\n# pll = layerBeamPatternRose(ppbrDict[ep].bearing)\n# Gadfly.plot(pll...)\n#\n# pll = layerBeamPatternRose(ppbrDict[ep].bearing, wRr=TU.R(pi\/2))\n# Gadfly.plot(pll...)\n\n#\n# diff(epochs[[end;1]])\n\n\n## GADFLY EXAMPLE\n# plotLocalProduct(fg, :x49, dims=[1;2])\n# stuff = IncrementalInference.localProduct(fg, :x49)\n# plotKDE(stuff[2], dims=[1;2], levels=10)\n#\n# # STUFF\n# fsym = :x49l1f1\n# const TU = TransformUtils\n# XX, LL = (KDE.getKDEMax.(IIF.getBelief.(fg, IIF.lsf(fg, fsym)))...)\n# @show xyt = se2vee(SE2(XX[1:3]) \\ SE2([LL[1:2];0.0]))\n# bear= rad2deg(TU.wrapRad(atan2(-xyt[2],xyt[1]) -XX[3]))\n# b = IncrementalInference.getData(fg, fsym, nt=:fnc).fnc.usrfnc!\n# b.bearing\n# p = plotKDE(b.range)\n# Gadfly.draw(PDF(\"multimodal_bearing_range.pdf\", 12cm, 15cm), p)\n#\n# # SAVE THIS PLOT\n# # epochs = timestamps[11:2:200]\n# g = getBelief.(fg, [:l1, :x49])\n# m1 = KDE.marginal(g[1], [1;2])\n# m2 = KDE.marginal(g[2], [1;2])\n# norm(diff(KDE.getKDEMax.([m1; m2])))\n#\n# kde!(rand(Normal(4.0, 0.1),100))\n#\n# ###############\n#\n# PL = []\n#\n# push!(PL, Gadfly.layer(x=collect(1:10), y=randn(10), Geom.path, Theme(default_color=colorant\"red\"))[1])\n# push!(PL, Gadfly.layer(x=collect(1:10), y=randn(10), Geom.path, Theme(default_color=colorant\"green\"))[1])\n# push!(PL, Coord.Cartesian(xmin=-1.0,xmax=11, ymin=-5.0, ymax=5.0))\n# push!(PL, Guide.ylabel(\"test\"))\n# push!(PL, Guide.ylabel(\"test\"))\n# pl = drawPoses(fg)\n# push!(pl.layers, )\n#\n# pl = drawPoses(fg)\n# pl = Gadfly.plot(pl.layers...)\n#\n#\n#\n# pl = Gadfly.plot(PL...)\n#\n# @show fieldnames(pl)\n# push!(pl.layers, Gadfly.layer(x=collect(1:10), y=randn(10), Geom.path, Theme(default_color=colorant\"magenta\"))[1] )\n#\n# pl\n# # vstasck, hstack\n#\n#\n# Gadfly.push_theme(:default)\n# # SVG, PDF, PNG\n# Gadfly.draw(PDF(\"\/tmp\/testfig.pdf\", 12cm, 8cm), pl)\n# run(`evince \/tmp\/testfig.pdf`)\n#\n#\n# ### PATH + Poses\n#\n# pl = drawPosesLandms(fg, spscale=2.5)\n# # Add odo\n# navdf = DataFrame(\n# ts = navkeys,\n# x = X,\n# y = Y\n# )\n# # pl = Gadfly.layer(navdf, x=:x, y=:y, Geom.path())\n# push!(pl.layers, Gadfly.layer(navdf, x=:x, y=:y, Geom.path())[1])\n# Gadfly.plot(pl.layers)\n\nfsym = :x49l1f1\nconst TU = TransformUtils\nXX, LL = (KDE.getKDEMax.(IIF.getBelief.(fg, IIF.lsf(fg, fsym)))...)\n@show xyt = se2vee(SE2(XX[1:3]) \\ SE2([LL[1:2];0.0]))\nbear= rad2deg(TU.wrapRad(atan2(-xyt[2],xyt[1]) -XX[3]))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n","avg_line_length":27.2359550562,"max_line_length":153,"alphanum_fraction":0.6646864686} {"size":1782,"ext":"jl","lang":"Julia","max_stars_count":3.0,"content":"module Unfold\n\nusing PyMNE\nusing SparseArrays\nusing StatsModels\nusing StatsBase\nusing IterativeSolvers\nusing DataFrames\nusing MixedModels\nusing Missings\nusing StatsBase\nusing LinearAlgebra\nusing Tables # not sure we need it\nusing GLM # not sure we need it\nimport MixedModels.FeMat # extended for sparse femats, type piracy => issue on MixedModels.jl github\nusing TimerOutputs # debugging fitting times etc. not strictly needed\nusing DSP\nusing StatsModels\nusing StaticArrays # for MixedModels extraction of parametrs (inherited from MixedModels.jl, not strictly needed )\nusing ProgressMeter\nusing DocStringExtensions # for Docu\nusing MLBase # for crossVal\nusing BSplines # for spline predictors\n#using CategoricalArrays\nimport StatsBase: fit\nimport StatsBase: coef\nimport StatsBase: fit!\nimport StatsBase: coefnames\nimport StatsBase: modelmatrix\n#using IncompleteLU\nimport Base.(+) # overwrite for DesignMatrices\nusing Distributions: Gamma, pdf # TODO replace this with direct implementation (used in basisfunction.jl)\n\ninclude(\"linearmodels.jl\")\ninclude(\"basisfunctions.jl\")\ninclude(\"designmatrix.jl\")\ninclude(\"fit.jl\")\ninclude(\"utilities.jl\")\ninclude(\"condense.jl\")\ninclude(\"solver.jl\")\ninclude(\"predict.jl\")\ninclude(\"splinepredictors.jl\")\ninclude(\"clusterpermutation.jl\")\n#include(\"plot.jl\") # don't include for now\nexport fit, fit!, designmatrix!\nexport firbasis, hrfbasis, condense_long\nexport UnfoldLinearModel,\n UnfoldLinearMixedModel,\n UnfoldModel,\n UnfoldLinearMixedModelContinuousTime,\n UnfoldLinearModelContinuousTime\nexport FIRBasis, HRFBasis, SplineBasis\nexport modelmatrix\nexport formula, design, designmatrix, coef\nexport coeftable\nexport unfoldfit # might be renamend to fit! in the future\nexport predict, spl\nexport cluster_permutation_test\nend # module\n","avg_line_length":29.7,"max_line_length":114,"alphanum_fraction":0.8136924804} {"size":1731,"ext":"jl","lang":"Julia","max_stars_count":10.0,"content":"function search_db(str::AbstractString)\n strs = split(str)\n\n N = length(strs)\n elems = String[]\n for i in 1:N\n str = strs[i]\n try\n PeriodicTable.elements[Symbol(str)]\n catch\n continue\n end\n push!(elems, str)\n end\n kwrds = filter(str -> !in(str, elems), strs)\n\n elems = join(elems, ',')\n res = HTTP.get(\"http:\/\/avdwgroup.engin.brown.edu\/getdbid.php?element=\" * elems)\n dicts = try\n JSON.parse(String(res.body))\n catch\n Dict[]\n end\n\n for kwrd in kwrds\n dicts = filter(dict -> occursin(kwrd, string(dict)), dicts)\n end\n\n if isempty(dicts)\n println(\"No database found\")\n return nothing\n end\n\n N = length(dicts)\n for i in 1:N\n print(\"[$i] \")\n dict = dicts[i]\n\n auth = get(dict, \"authoryear\", \"\")\n print(auth * \" \")\n\n elems = get(dict, \"element\", \"\")\n elems = join(elems, ',')\n print(elems * \" \")\n\n print('\\n')\n end\n\n if length(dicts) > 1\n print(\"Select a database to read: \")\n str = readline()\n isempty(str) && return nothing\n i = parse(Int, str)\n else\n i = 1\n end\n\n if i > length(dicts)\n println(\"\\nInvalid index: $i\")\n return nothing\n end\n\n url = get(dicts[i], \"tdburl\", \"\")\n file = HTTP.download(url)\n\n if endswith(file, \".zip\")\n archive = ZipFile.Reader(file)\n @assert length(archive.files) == 1\n io = archive.files[1]\n db = read_tdb(io)\n close(archive)\n elseif endswith(file, \".tdb\")\n read_tdb(file)\n else\n @error \"Unsupported filetype: $url\"\n end\n\n Base.Filesystem.rm(file)\n\n return db\nend\n\n","avg_line_length":20.8554216867,"max_line_length":83,"alphanum_fraction":0.5228191797} {"size":238,"ext":"jl","lang":"Julia","max_stars_count":9.0,"content":"using Base.Test, SDDiP, JuMP, GLPKMathProgInterface, Ipopt\n\n@testset \"Lagrangian\" begin\n include(\"Lagrangian.jl\")\nend\n\n@testset \"SDDiP\" begin\n include(\"SDDiP.jl\")\nend\n\n@testset \"@binarystate\" begin\n include(\"binarystate.jl\")\nend\n","avg_line_length":17.0,"max_line_length":58,"alphanum_fraction":0.7226890756} {"size":10061,"ext":"jl","lang":"Julia","max_stars_count":7.0,"content":"\"\"\"\nHalf pipe of aluminum, with an open through crack.\n\nThe pipe corner is excited by a concentrated force in the form of a\nHann-windowed sinusoidal pulse.\n\nThe crack configuration is not exactly the same one as in the book.\nIt is not described unambiguously in the book.\n\nReference:\nOstachowicz W, Kudela P, Krawczuk M, Zak A. Guided Waves in Structures for SHM: The Time - domain Spectral Element Method. A John Wiley & Sons, Ltd., 2011.\n\"\"\"\nmodule cracked_half_pipe_examples\n\nusing LinearAlgebra\nusing SparseArrays\nusing Arpack\nusing FinEtools\nusing FinEtoolsDeforLinear\nusing FinEtoolsFlexStructures\nusing FinEtoolsFlexStructures.FESetShellT3Module: FESetShellT3\nusing FinEtoolsFlexStructures.AssemblyModule\nusing FinEtoolsFlexStructures.FEMMShellT3FFModule\nusing FinEtoolsFlexStructures.RotUtilModule: initial_Rfield, update_rotation_field!\nusing SymRCM\nusing VisualStructures: default_layout_3d, plot_nodes, plot_midline, render, plot_space_box, plot_midsurface, space_aspectratio, save_to_json\nusing PlotlyJS\nusing Gnuplot; # @gp \"clear\"\nusing FinEtools.MeshExportModule.VTKWrite: vtkwritecollection\nusing ThreadedSparseCSR\nusing UnicodePlots\nusing InteractiveUtils\nusing BenchmarkTools\nusing FinEtools.MeshExportModule.VTKWrite: vtkwritecollection, vtkwrite\n\n\nconst E = 72.7*phun(\"GPa\");\nconst nu = 0.33;\nconst rho = 2700.0*phun(\"kg\/m^3\")\nconst d1 = pi*phun(\"rad\")\nconst d2 = 6.0\/180*pi*phun(\"rad\")\nconst d5 = (180-40.0)\/180*pi*phun(\"rad\") - d2\/2\nconst d6 = d1 - d5 - d2\nconst nd5 = 19\nconst nd2 = 2\nconst nd6 = 2\nconst nd3 = 25\nconst nd4 = 25\nconst halfcirc = 500*phun(\"mm\")\nconst radius = halfcirc \/ pi\nconst d3 = 1000*phun(\"mm\")\nconst d4 = d3\/2\nconst thickness = 10*phun(\"mm\")\nconst ksi = 0.0\nconst omegad = 1000*phun(\"Hz\")\nconst carrier_frequency = 75*phun(\"kilo*Hz\")\nconst modulation_frequency = carrier_frequency\/4\nconst totalforce = 1*phun(\"N\")\nconst forcepatchradius = 3*phun(\"mm\")\nconst forcedensity = totalforce\/(pi*forcepatchradius^2)\nconst distributedforce = !true\nconst tend = 0.5*phun(\"milli*s\")\n\nconst visualize = true\nconst visualizeclear = true\nconst visualizevtk = !true\nconst color = \"black\"\n\nfunction _pwr(K, M, maxit = 30, rtol = 1\/10000)\n invM = fill(0.0, size(M, 1))\n invM .= 1.0 .\/ (vec(diag(M)))\n v = rand(size(M, 1))\n w = fill(0.0, size(M, 1))\n everyn = Int(round(maxit \/ 50)) + 1\n lambda = lambdap = 0.0\n for i in 1:maxit\n ThreadedSparseCSR.bmul!(w, K, v)\n wn = norm(w)\n w .*= (1.0\/wn)\n v .= invM .* w\n vn = norm(v)\n v .*= (1.0\/vn)\n if i % everyn == 0\n lambda = sqrt((v' * (K * v)) \/ (v' * M * v))\n # @show i, abs(lambda - lambdap) \/ lambda\n if abs(lambda - lambdap) \/ lambda < rtol\n break\n end\n lambdap = lambda\n end\n end\n return lambda\nend\n\nfunction _cd_loop!(M, K, ksi, U0, V0, tend, dt, force!, peek)\n # Central difference integration loop, for mass-proportional Rayleigh damping.\n U = similar(U0)\n V = similar(U0)\n A = similar(U0)\n F = similar(U0)\n E = similar(U0)\n C = similar(U0)\n C .= (ksi*2*omegad) .* vec(diag(M))\n invMC = similar(U0)\n invMC .= 1.0 .\/ (vec(diag(M)) .+ (dt\/2) .* C)\n nsteps = Int64(round(tend\/dt))\n if nsteps*dt < tend\n dt = tend \/ (nsteps+1)\n end\n dt2_2 = ((dt^2)\/2)\n dt_2 = (dt\/2)\n\n t = 0.0\n @. U = U0; @. V = V0; # Initial Conditions\n A .= invMC .* force!(F, t); # Compute initial acceleration\n peek(0, U, V, t)\n for step in 1:nsteps\n t = t + dt\n @. U += dt*V + dt2_2*A; # Displacement update\n force!(F, t); # External forces are computed\n ThreadedSparseCSR.bmul!(E, K, U) # Evaluate elastic restoring forces\n @. F -= E + C * (V + dt_2 * A) # Calculate total force\n @. V += dt_2 * A; # Update the velocity, part one: add old acceleration\n @. A = invMC * F # Compute the acceleration\n @. V += dt_2 * A; # Update the velocity, part two: add new acceleration\n peek(step, U, V, t)\n end\nend\n\nfunction _execute(nref = 2, nthr = 0, color = \"red\")\n tolerance = min(d2\/nd2, d3\/nd3, d4\/nd4, d5\/nd5)\/nref\/10\n\n xs = sort(unique(vcat(\n linearspace(0.0, d5, nd5), \n linearspace(d5, d5+d2, nd2), \n linearspace(d5+d2, d1, nd6)\n )))\n ys = collect(linearspace(0.0, d4, nd4))\n \n fens1, fes1 = T3blockx(xs, ys); # Mesh\n for r in 1:nref\n fens1, fes1 = T3refine(fens1, fes1)\n end\n \n ys = collect(linearspace(d4, d3, nd3))\n \n fens2, fes2 = T3blockx(xs, ys); # Mesh\n for r in 1:nref\n fens2, fes2 = T3refine(fens2, fes2)\n end\n \n fens, fes1, fes2 = mergemeshes(fens1, fes1, fens2, fes2, 0.0)\n fes = cat(fes1, fes2)\n \n offset = min(d2 \/ nd2 \/ nref \/ 10, 4*tolerance)\n l1 = selectnode(fens; box = [0 d5+offset d4 d4], inflate = tolerance)\n l2 = selectnode(fens; box = [d5+d2-offset d1 d4 d4], inflate = tolerance)\n la = selectnode(fens; box = [-Inf Inf d4 d4], inflate = tolerance)\n\n \n candidates = vcat(l1, l2)\n fens, fes = mergenodes(fens, fes, tolerance, candidates)\n bfes = meshboundary(fes)\n @info \"Mesh $(count(fens)) nodes, $(count(fes)) elements\"\n\n fens.xyz = xyz3(fens)\n\n for i in 1:count(fens)\n a, y = fens.xyz[i, 1:2]\n fens.xyz[i, :] .= cos(pi-a)*radius, y, sin(pi-a)*radius\n end\n\n vtkwrite(\"cracked_half_pipe.vtu\", fens, fes)\n vtkwrite(\"cracked_half_pipe-boundary.vtu\", fens, bfes)\n\n # Renumber the nodes\n femm = FEMMBase(IntegDomain(fes, TriRule(1)))\n C = connectionmatrix(femm, count(fens))\n perm = symrcm(C)\n \n mater = MatDeforElastIso(DeforModelRed3D, rho, E, nu, 0.0)\n \n sfes = FESetShellT3()\n accepttodelegate(fes, sfes)\n femm = FEMMShellT3FFModule.make(IntegDomain(fes, TriRule(1), thickness), mater)\n # Set up\n femm.drilling_stiffness_scale = 1.0\n\n # Construct the requisite fields, geometry and displacement\n # Initialize configuration variables\n geom0 = NodalField(fens.xyz)\n u0 = NodalField(zeros(size(fens.xyz,1), 3))\n Rfield0 = initial_Rfield(fens)\n dchi = NodalField(zeros(size(fens.xyz,1), 6))\n\n # No EBC's\n applyebc!(dchi)\n numberdofs!(dchi, perm);\n # numberdofs!(dchi);\n\n # Assemble the system matrix\n FEMMShellT3FFModule.associategeometry!(femm, geom0)\n SM = FinEtoolsFlexStructures.AssemblyModule\n K = FEMMShellT3FFModule.stiffness(femm, SM.SysmatAssemblerSparseCSRSymm(0.0), geom0, u0, Rfield0, dchi);\n M = FEMMShellT3FFModule.mass(femm, SysmatAssemblerSparseDiag(), geom0, dchi);\n \n omega_max = _pwr(K, M, Int(round(count(fens) \/ 1000)))\n omega_max = max(omega_max, 20*2*pi*carrier_frequency)\n @show dt = Float64(0.99 * 2\/omega_max)\n\n U0 = gathersysvec(dchi)\n V0 = deepcopy(U0)\n \n mpoint = selectnode(fens; nearestto=[radius 0 0.0])[1]\n cpoint = selectnode(fens; nearestto=[radius d3 0])[1]\n \n mpointdof1 = dchi.dofnums[mpoint, 1]\n mpointdof3 = dchi.dofnums[mpoint, 3]\n cpointdof = dchi.dofnums[cpoint, 3]\n \n # Four cycles of the carrier frequency\n\n function computetrac!(forceout::FFltVec, XYZ::FFltMat, tangents::FFltMat, fe_label::FInt)\n dx = XYZ[1] - fens.xyz[mpoint, 1]\n dy = XYZ[2] - fens.xyz[mpoint, 2]\n dz = XYZ[3] - fens.xyz[mpoint, 3]\n d = (dx^2+dy^2+dz^2)\/forcepatchradius\n forceout[1:3] .= 0.0\n if d < 1.0\n forceout[1] = forcedensity*exp(-20*d^2\/forcepatchradius^2)\n end\n forceout[4:6] .= 0.0\n return forceout\n end\n\n if distributedforce\n lfemm = FEMMBase(IntegDomain(fes, TriRule(6)))\n fi = ForceIntensity(FFlt, 6, computetrac!);\n Fmag = distribloads(lfemm, geom0, dchi, fi, 2);\n else\n Fmag = fill(0.0, dchi.nfreedofs)\n Fmag[mpointdof1] = -totalforce\/4 # only a quarter of the plate is modeled\n end\n\n function force!(F, t)\n mul = 0.0\n if t <= 1\/modulation_frequency\n mul = 0.5 * (1 - cos(2*pi*modulation_frequency*t)) * sin(2*pi*carrier_frequency*t)\n end\n F .= mul .* Fmag\n return F\n end\n\n nsteps = Int(round(tend\/dt))\n cdeflections = fill(0.0, nsteps+1)\n mdeflections = fill(0.0, nsteps+1)\n displacements = []\n nbtw = Int(round(nsteps\/100))\n\n\n peek(step, U, V, t) = begin\n cdeflections[step+1] = U[cpointdof]\n mdeflections[step+1] = U[mpointdof1]\n if rem(step+1, nbtw) == 0\n push!(displacements, deepcopy(U))\n end\n nothing\n end\n\n @info \"$nsteps steps\"\n @time _cd_loop!(M, K, ksi, U0, V0, nsteps*dt, dt, force!, peek)\n \n if visualize\n # @gp \"set terminal windows 0 \" :-\n # @gp \"clear\"\n @gp :- collect(0.0:dt:(nsteps*dt)) cdeflections \" lw 2 lc rgb '$color' with lines title 'Deflection at the center' \" :-\n @gp :- collect(0.0:dt:(nsteps*dt)) mdeflections \" lw 4 lc rgb '$color' with lines title 'Deflection at the source' \" :-\n @gp :- \"set xlabel 'Time'\" :-\n @gp :- \"set ylabel 'Deflection'\" :-\n @gp :- \"set title 'Free-floating plate'\"\n\n\n # Visualization\n @info \"Dumping visualization\"\n times = Float64[]\n vectors = []\n for i in 1:length(displacements)\n scattersysvec!(dchi, displacements[i])\n push!(vectors, (\"U\", deepcopy(dchi.values[:, 1:3])))\n push!(times, i*dt*nbtw)\n end\n vtkwritecollection(\"cracked_half_pipe_$nref\", fens, fes, times; vectors = vectors)\n end\nend\n\nfunction test(nrefs = [4], nthr = 0, color = \"red\")\n @info \"Cracked half pipe, nrefs = $nrefs: parallel CSR\"\n for nref in nrefs\n _execute(nref, nthr, color)\n end\n return true\nend\n\nfunction allrun(nrefs = [4], nthr = 0, color = \"red\")\n println(\"#####################################################\")\n println(\"# test \")\n test(nrefs, nthr, color)\n return true\nend # function allrun\n\n@info \"All examples may be executed with \"\nprintln(\"using .$(@__MODULE__); $(@__MODULE__).allrun()\")\n\nend # module\nnothing\n","avg_line_length":31.8386075949,"max_line_length":155,"alphanum_fraction":0.619322135} {"size":3601,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# This file contans the common interface for LightGraphs.\n\n_NI(m) = error(\"Not implemented: $m\")\n\n\"\"\"\n AbstractEdge\n\nAn absract type representing a single edge between two vertices of a graph.\n\"\"\"\nabstract type AbstractEdge end\n\n\"\"\"\n AbstractEdgeIter\n\nAn abstract type representing an edge iterator.\n\"\"\"\nabstract type AbstractEdgeIter end\n\n\"\"\"\n AbstractGraph\n\nAn abstract type representing a graph.\n\"\"\"\nabstract type AbstractGraph end\n\n\n@traitdef IsDirected{G<:AbstractGraph}\n@traitimpl IsDirected{G} <- is_directed(G)\n\n\n#\n# Interface for AbstractEdges\n#\n\n\"\"\"\n src(e)\n\nReturn the source vertex of edge `e`.\n\"\"\"\nsrc(e::AbstractEdge) = _NI(\"src\")\n\n\"\"\"\n dst(e)\n\nReturn the destination vertex of edge `e`.\n\"\"\"\ndst(e::AbstractEdge) = _NI(\"dst\")\n\nPair(e::AbstractEdge) = _NI(\"Pair\")\nTuple(e::AbstractEdge) = _NI(\"Tuple\")\n\n\"\"\"\n reverse(e)\n\nCreate a new edge from `e` with source and destination vertices reversed.\n\"\"\"\nreverse(e::AbstractEdge) = _NI(\"reverse\")\n\n==(e1::AbstractEdge, e2::AbstractEdge) = _NI(\"==\")\n\n\n#\n# Interface for AbstractGraphs\n#\n\"\"\"\n edgetype(g)\n\nReturn the type of graph `g`'s edge\n\"\"\"\nedgetype(g::AbstractGraph) = _NI(\"edgetype\")\n\n\"\"\"\n eltype(g)\n\nReturn the type of the graph's vertices (must be <: Integer)\n\"\"\"\neltype(g::AbstractGraph) = _NI(\"eltype\")\n\n\"\"\"\n nv(g)\n\nReturn the number of vertices in `g`.\n\"\"\"\nnv(g::AbstractGraph) = _NI(\"nv\")\n\n\"\"\"\n ne(g)\n\nReturn the number of edges in `g`.\n\"\"\"\nne(g::AbstractGraph) = _NI(\"ne\")\n\n\"\"\"\n vertices(g)\n\nReturn (an iterator to or collection of) the vertices of a graph.\n\n### Implementation Notes\nA returned iterator is valid for one pass over the edges, and\nis invalidated by changes to `g`.\n\n\"\"\"\nvertices(g::AbstractGraph) = _NI(\"vertices\")\n\n\"\"\"\n edges(g)\nReturn (an iterator to or collection of) the edges of a graph.\n\n### Implementation Notes\nA returned iterator is valid for one pass over the edges, and\nis invalidated by changes to `g`.\n\"\"\"\nedges(x...) = _NI(\"edges\")\n\nis_directed(x...) = _NI(\"is_directed\")\nis_directed{T}(::Type{T}) = _NI(\"is_directed\")\n\"\"\"\n add_vertex!(g)\n\nAdd a new vertex to the graph `g`.\nReturn true if the vertex was added successfully, false otherwise.\n\"\"\"\nadd_vertex!(x...) = _NI(\"add_vertex!\")\n\n\"\"\"\n add_edge!(g, e)\n\nAdd a new edge `e` to `g`. Return false if add fails\n(e.g., if vertices are not in the graph, or edge already exists), true otherwise.\n\"\"\"\nadd_edge!(x...) = _NI(\"add_edge!\")\n\n\"\"\"\n rem_vertex!(g)\n\nRemove the vertex `v` from graph `g`. Return false if removal fails\n(e.g., if vertex is not in the graph), true otherwise.\n\"\"\"\nrem_vertex!(x...) = _NI(\"rem_vertex!\")\n\n\"\"\"\n rem_edge!(g, e)\n\nRemove the edge `e` from `g`. Return false if edge removal fails\n(e.g., if edge does not exist), true otherwise.\n\"\"\"\nrem_edge!(x...) = _NI(\"rem_edge!\")\n\n\"\"\"\n has_vertex(g, v)\n\nReturn true if `v` is a vertex of `g`.\n\"\"\"\nhas_vertex(x...) = _NI(\"has_vertex\")\n\n\"\"\"\n has_edge(g, e)\n\nReturn true if the graph `g` has an edge `e`.\n\"\"\"\nhas_edge(x...) = _NI(\"has_edge\")\n\n\"\"\"\n in_neighbors(g, v)\n\nReturn a list of all neighbors connected to vertex `v` by an incoming edge.\n\n### Implementation Notes\nReturns a reference, not a copy. Do not modify result.\n\"\"\"\nin_neighbors(x...) = _NI(\"in_neighbors\")\n\n\"\"\"\n out_neighbors(g, v)\n\nReturn a list of all neighbors connected to vertex `v` by an outgoing edge.\n\n# Implementation Notes\nReturns a reference, not a copy. Do not modify result.\n\"\"\"\nout_neighbors(x...) = _NI(\"out_neighbors\")\n\n\"\"\"\n zero(g)\n\nReturn a zero-vertex, zero-edge version of the same type of graph as `g`.\n\"\"\"\nzero(g::AbstractGraph) = _NI(\"zero\")\n","avg_line_length":19.0529100529,"max_line_length":81,"alphanum_fraction":0.6667592335} {"size":339,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using Lumira \nusing Gadfly\nusing Base.Test\n\n\nplot = plot([sin, cos], 0, 25)\n\nsetToWriteToFile(false) \n\nexportToSVG(plot)\n\nsetId(\"trigo\")\nsetName(\"HelloWorld\")\n\n@test getId() == \"trigo\"\n@test getName() == \"HelloWorld\"\n@test createTemplate(\"dummyPath\") == true\n@test createChartCode(\"dummyPath\") == true\n@test createLumiraExtension() == true","avg_line_length":17.8421052632,"max_line_length":42,"alphanum_fraction":0.7138643068} {"size":12592,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# TODO The mutability of the coefficient type `T` is currently not exploited yet.\n# We first need to make sure that it is copied with `MA.copy_if_mutable` when it\n# is passed from one function to a mutable one.\n\nMA.mutability(::Type{<:TypedLike}) = MA.IsMutable()\n\nfunction MA.mutable_copy(func::MOI.ScalarAffineFunction)\n terms = [\n MOI.ScalarAffineTerm(\n MA.copy_if_mutable(t.coefficient),\n t.variable_index,\n ) for t in func.terms\n ]\n return MOI.ScalarAffineFunction(terms, MA.copy_if_mutable(func.constant))\nend\nfunction MA.mutable_copy(func::MOI.ScalarQuadraticFunction)\n affine_terms = [\n MOI.ScalarAffineTerm(\n MA.copy_if_mutable(t.coefficient),\n t.variable_index,\n ) for t in func.affine_terms\n ]\n quadratic_terms = [\n MOI.ScalarQuadraticTerm(\n MA.copy_if_mutable(t.coefficient),\n t.variable_index_1,\n t.variable_index_2,\n ) for t in func.quadratic_terms\n ]\n return MOI.ScalarQuadraticFunction(\n affine_terms,\n quadratic_terms,\n MA.copy_if_mutable(func.constant),\n )\nend\n\nfunction MA.isequal_canonical(\n f::F,\n g::F,\n) where {\n F<:Union{\n MOI.ScalarAffineFunction,\n MOI.ScalarQuadraticFunction,\n MOI.VectorAffineFunction,\n MOI.VectorQuadraticFunction,\n },\n}\n return MA.isequal_canonical(MOI.constant(f), MOI.constant(g)) && all(\n MOI.dict_compare.(MOI._dicts(f), MOI._dicts(g), MA.isequal_canonical),\n )\nend\n\nfunction MA.iszero!(f::TypedScalarLike)\n return iszero(MOI.constant(f)) && _is_constant(canonicalize!(f))\nend\n\nfunction MA.scaling(f::TypedScalarLike{T}) where {T}\n g = canonical(f)\n if !_is_constant(g)\n throw(InexactError(:convert, T, f))\n end\n return MA.scaling(MOI.constant(g))\nend\n\nfunction MA.promote_operation(\n ::Union{typeof(zero),typeof(one)},\n F::Type{<:TypedScalarLike},\n)\n return F\nend\n\n# To avoid type piracy, we add at least one `ScalarLike` outside of the `...`.\nconst PROMOTE_IMPLEMENTED_OP = Union{typeof(+),typeof(-),typeof(*),typeof(\/)}\nfunction MA.promote_operation(\n op::PROMOTE_IMPLEMENTED_OP,\n F::Type{<:ScalarLike{T}},\n G::Type{<:ScalarLike{T}},\n) where {T,N}\n return promote_operation(op, T, F, G)\nend\nfunction MA.promote_operation(\n op::PROMOTE_IMPLEMENTED_OP,\n F::Type{T},\n G::Type{<:TypedLike{T}},\n) where {T,N}\n return promote_operation(op, T, F, G)\nend\nfunction MA.promote_operation(\n op::PROMOTE_IMPLEMENTED_OP,\n F::Type{<:TypedLike{T}},\n G::Type{T},\n) where {T}\n return promote_operation(op, T, F, G)\nend\nfunction MA.promote_operation(\n op::PROMOTE_IMPLEMENTED_OP,\n F::Type{<:Number},\n G::Type{<:Union{MOI.SingleVariable,MOI.VectorOfVariables}},\n)\n return promote_operation(op, F, F, G)\nend\nfunction MA.promote_operation(\n op::PROMOTE_IMPLEMENTED_OP,\n F::Type{<:Union{MOI.SingleVariable,MOI.VectorOfVariables}},\n G::Type{<:Number},\n)\n return promote_operation(op, G, F, G)\nend\n\nfunction MA.mutable_operate!(\n op::Union{typeof(zero),typeof(one)},\n f::MOI.ScalarAffineFunction,\n)\n empty!(f.terms)\n f.constant = op(f.constant)\n return f\nend\nfunction MA.mutable_operate!(\n op::Union{typeof(zero),typeof(one)},\n f::MOI.ScalarQuadraticFunction,\n)\n empty!(f.affine_terms)\n empty!(f.quadratic_terms)\n f.constant = op(f.constant)\n return f\nend\n\nfunction MA.mutable_operate!(::typeof(-), f::MOI.ScalarQuadraticFunction)\n operate_terms!(-, f.quadratic_terms)\n operate_terms!(-, f.affine_terms)\n f.constant = -f.constant\n return f\nend\nfunction MA.mutable_operate!(::typeof(-), f::MOI.ScalarAffineFunction)\n operate_terms!(-, f.terms)\n f.constant = -f.constant\n return f\nend\nfunction MA.mutable_operate!(\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarAffineFunction{T},\n g::T,\n) where {T}\n f.constant = op(f.constant, g)\n return f\nend\nfunction MA.mutable_operate!(\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarAffineFunction{T},\n g::MOI.SingleVariable,\n) where {T}\n push!(f.terms, MOI.ScalarAffineTerm(op(one(T)), g.variable))\n return f\nend\nfunction MA.mutable_operate_to!(\n output::MOI.ScalarAffineFunction{T},\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarAffineFunction{T},\n g::MOI.ScalarAffineFunction{T},\n) where {T}\n empty!(output.terms)\n append!(output.terms, f.terms)\n append!(output.terms, operate_terms(op, g.terms))\n output.constant = op(f.constant, g.constant)\n return output\nend\nfunction MA.mutable_operate_to!(\n output::MOI.ScalarQuadraticFunction{T},\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarQuadraticFunction{T},\n g::MOI.ScalarQuadraticFunction{T},\n) where {T}\n empty!(output.affine_terms)\n append!(output.affine_terms, f.affine_terms)\n append!(output.affine_terms, operate_terms(op, g.affine_terms))\n empty!(output.quadratic_terms)\n append!(output.quadratic_terms, f.quadratic_terms)\n append!(output.quadratic_terms, operate_terms(op, g.quadratic_terms))\n output.constant = op(f.constant, g.constant)\n return output\nend\nfunction MA.mutable_operate!(\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarAffineFunction{T},\n g::MOI.ScalarAffineFunction{T},\n) where {T}\n append!(f.terms, operate_terms(op, g.terms))\n f.constant = op(f.constant, g.constant)\n return f\nend\nfunction MA.mutable_operate!(\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarQuadraticFunction{T},\n g::T,\n) where {T}\n f.constant = op(f.constant, g)\n return f\nend\nfunction MA.mutable_operate!(\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarQuadraticFunction{T},\n g::MOI.SingleVariable,\n) where {T}\n push!(f.affine_terms, MOI.ScalarAffineTerm(op(one(T)), g.variable))\n return f\nend\nfunction MA.mutable_operate!(\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarQuadraticFunction{T},\n g::MOI.ScalarAffineFunction{T},\n) where {T}\n append!(f.affine_terms, operate_terms(op, g.terms))\n f.constant = op(f.constant, g.constant)\n return f\nend\nfunction MA.mutable_operate!(\n op::Union{typeof(+),typeof(-)},\n f::MOI.ScalarQuadraticFunction{T},\n g::MOI.ScalarQuadraticFunction{T},\n) where {T}\n append!(f.affine_terms, operate_terms(op, g.affine_terms))\n append!(f.quadratic_terms, operate_terms(op, g.quadratic_terms))\n f.constant = op(f.constant, g.constant)\n return f\nend\n\n_constant(::Type{T}, \u03b1::T) where {T} = \u03b1\n_constant(::Type{T}, ::MOI.SingleVariable) where {T} = zero(T)\n_constant(::Type{T}, func::TypedScalarLike{T}) where {T} = MOI.constant(func)\n\n_affine_terms(f::MOI.ScalarAffineFunction) = f.terms\n_affine_terms(f::MOI.ScalarQuadraticFunction) = f.affine_terms\n\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n \u03b1::T,\n f::MOI.SingleVariable,\n \u03b2::T,\n) where {T}\n push!(terms, MOI.ScalarAffineTerm(op(\u03b1 * \u03b2), f.variable))\n return\nend\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n f::MOI.SingleVariable,\n \u03b2::T,\n) where {T}\n push!(terms, MOI.ScalarAffineTerm(op(\u03b2), f.variable))\n return\nend\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n \u03b1::T,\n f::MOI.SingleVariable,\n) where {T}\n push!(terms, MOI.ScalarAffineTerm(op(\u03b1), f.variable))\n return\nend\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n f::MOI.SingleVariable,\n) where {T}\n push!(terms, MOI.ScalarAffineTerm(op(one(T)), f.variable))\n return\nend\n\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n \u03b1::T,\n f::TypedScalarLike{T},\n \u03b2::T,\n) where {T}\n for t in _affine_terms(f)\n push!(terms, operate_term(op, operate_term(*, \u03b1, t, \u03b2)))\n end\nend\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n f::TypedScalarLike{T},\n \u03b2::T,\n) where {T}\n for t in _affine_terms(f)\n push!(terms, operate_term(op, operate_term(*, t, \u03b2)))\n end\nend\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n \u03b1::T,\n f::TypedScalarLike{T},\n) where {T}\n for t in _affine_terms(f)\n push!(terms, operate_term(op, operate_term(*, \u03b1, t)))\n end\nend\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n f::TypedScalarLike{T},\n) where {T}\n append!(terms, operate_terms(op, _affine_terms(f)))\n return\nend\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n args::Vararg{T,N},\n) where {T,N}\n return\nend\nfunction _add_sub_affine_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarAffineTerm{T}},\n \u03b1::T,\n \u03b2::T,\n args::Vararg{ScalarQuadraticLike,N},\n) where {T,N}\n return _add_sub_affine_terms(op, terms, \u03b1 * \u03b2, args...)\nend\n\nfunction MA.mutable_operate!(\n op::MA.AddSubMul,\n f::MOI.ScalarAffineFunction{T},\n args::Vararg{ScalarAffineLike{T},N},\n) where {T,N}\n f.constant = op(f.constant, _constant.(T, args)...)\n _add_sub_affine_terms(MA.add_sub_op(op), f.terms, args...)\n return f\nend\n\nfunction _add_quadratic_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarQuadraticTerm{T}},\n \u03b1::ScalarAffineLike{T},\n f::MOI.ScalarQuadraticFunction{T},\n \u03b2::ScalarAffineLike{T},\n) where {T}\n for t in f.quadratic_terms\n push!(\n terms,\n operate_term(\n op,\n operate_term(*, _constant(T, \u03b1), t, _constant(T, \u03b2)),\n ),\n )\n end\nend\nfunction _add_quadratic_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarQuadraticTerm{T}},\n f::MOI.ScalarQuadraticFunction{T},\n \u03b2::ScalarAffineLike{T},\n) where {T}\n for t in f.quadratic_terms\n push!(terms, operate_term(op, operate_term(*, t, _constant(T, \u03b2))))\n end\nend\nfunction _add_quadratic_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarQuadraticTerm{T}},\n \u03b1::ScalarAffineLike{T},\n f::MOI.ScalarQuadraticFunction{T},\n) where {T}\n for t in f.quadratic_terms\n push!(terms, operate_term(op, operate_term(*, _constant(T, \u03b1), t)))\n end\nend\nfunction _add_quadratic_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarQuadraticTerm{T}},\n f::MOI.ScalarQuadraticFunction{T},\n) where {T}\n append!(terms, operate_terms(op, f.quadratic_terms))\n return\nend\nfunction _add_quadratic_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarQuadraticTerm{T}},\n # Compiler fails in StackOverflowError on Julia v1.1\n #args::Vararg{ScalarAffineLike{T}, N}) where {T, N}\n args::ScalarAffineLike{T},\n) where {T}\n return\nend\nfunction _merge_constants(\n ::Type{T},\n \u03b1::ScalarAffineLike{T},\n \u03b2::ScalarAffineLike{T},\n args::Vararg{Any,N},\n) where {T,N}\n return (_constant(T, \u03b1) * _constant(T, \u03b2), args...)\nend\nfunction _add_quadratic_terms(\n op::Union{typeof(+),typeof(-)},\n terms::Vector{MOI.ScalarQuadraticTerm{T}},\n args::Vararg{Any,N},\n) where {T,N}\n return _add_quadratic_terms(op, terms, _merge_constants(T, args...)...)\nend\n\n_num_function_with_terms(::Type{T}, ::T) where {T} = 0\n_num_function_with_terms(::Type{T}, ::ScalarLike{T}) where {T} = 1\nfunction _num_function_with_terms(\n ::Type{T},\n f::ScalarQuadraticLike{T},\n args::Vararg{ScalarQuadraticLike{T},N},\n) where {T,N}\n return _num_function_with_terms(T, f) + _num_function_with_terms(T, args...)\nend\nfunction MA.mutable_operate!(\n op::MA.AddSubMul,\n f::MOI.ScalarQuadraticFunction{T},\n args::Vararg{ScalarQuadraticLike{T},N},\n) where {T,N}\n if isone(_num_function_with_terms(T, args...))\n f.constant = op(f.constant, _constant.(T, args)...)\n _add_sub_affine_terms(MA.add_sub_op(op), f.affine_terms, args...)\n _add_quadratic_terms(MA.add_sub_op(op), f.quadratic_terms, args...)\n return f\n else\n return MA.mutable_operate!(MA.add_sub_op(op), f, *(args...))\n end\nend\n# `args` could be `(x', a)` where `a` is a vector of constants and `x` a vector\n# of affine functions for instance.\nfunction MA.mutable_operate!(\n op::MA.AddSubMul,\n f::TypedScalarLike,\n args::Vararg{Any,N},\n) where {N}\n return MA.mutable_operate!(MA.add_sub_op(op), f, *(args...))\nend\n","avg_line_length":28.6833712984,"max_line_length":85,"alphanum_fraction":0.6646283355} {"size":10025,"ext":"jl","lang":"Julia","max_stars_count":7.0,"content":"\"\"\"\nCalculates Karcher mean of a collection of curves using the elastic square-root\nvelocity (srvf) framework.\n\n curve_karcher_mean(beta; mode='O', maxit=20)\n :param beta: array (n,T,N) for N number of curves\n :param mode: Open ('O') or Closed ('C') curves\n :param maxit: maximum number of iterations\n :param wscale: with scale (false)\n\n :return mu: mean srvf\n :return betamean: mean curve\n :return v: shooting vectors\n :return q: array of srvfs\n\"\"\"\nfunction curve_karcher_mean(beta::Array{Float64, 3}; mode='O', maxit=20,\n wscale=false)\n n, T, N = size(beta)\n q = zeros(n, T, N);\n for ii = 1:N\n q[:, :, ii] = curve_to_q(beta[:, :, ii])\n end\n\n # Initialize mu as one of the shapes\n mu = q[:, :, 1];\n betamean = beta[:, :, 1];\n\n delta = 0.5;\n tolv = 1e-4;\n told = 5*1e-3;\n itr = 1;\n sumd = zeros(maxit+1);\n v = zeros(n,T,N);\n normvbar = zeros(maxit+1);\n\n while itr < maxit\n @printf(\"Iteration: %d\\n\", itr)\n\n mu \/= sqrt(innerprod_q2(mu,mu));\n\n sumv = zeros(2, T);\n sumd[itr] = 0.;\n\n # TODO: parallelize\n for i = 1:N\n v1, d = karcher_calc(beta[:,:,i],q[:,:,i], betamean, mu, mode=mode,\n wscale=wscale);\n v[:,:,i] = v1;\n sumd[itr+1] = sumd[itr+1] + d^2;\n end\n\n sumv = sum(v,3);\n\n # compute average direction of tangent vectors v_i\n vbar = sumv.\/float(N);\n\n normvbar[itr] = sqrt(innerprod_q2(vbar,vbar));\n normv = normvbar[itr];\n\n if (normv > tolv && abs(sumd[itr+1]-sumd[itr])>told)\n # update mu in direction of vbar\n mu = cos(delta*normvbar[itr])*mu + sin(delta*normvbar[itr]) *\n vbar\/normvbar[itr];\n\n if mode == 'C'\n mu = project_curve(mu);\n end\n\n x = q_to_curve(mu);\n a = -1 * calculatecentroid(x);\n betamean = x + repeat(a,1,T);\n else\n break\n end\n\n itr += 1;\n\n end\n\n return (mu, betamean, v, q)\nend\n\n\n\"\"\"\nAligns a collection of curves using the elastic square-root velocity (srvf)\nframework.\n\n curve_srvf_align(beta; mode='O', maxit=20)\n\n :param beta: array (n,T,N) for N number of curves\n :param mode: Open ('O') or Closed ('C') curves\n :param maxit: maximum number of iterations\n\n :return betan: aligned curves\n :return qn: aligned srvfs\n :return betamean: mean curve\n :return q_mu: mean srvf\n\"\"\"\nfunction curve_srvf_align(beta::Array{Float64, 3}; mode='O', maxit=20)\n n, T, N = size(beta);\n # find mean\n mu, betamean, v, q = curve_karcher_mean(beta, mode=mode, maxit=maxit);\n\n qn = zeros(n,T,N);\n betan = zeros(n,T,N);\n centroid2 = calculatecentroid(betamean);\n betamean -= repeat(centroid2, 1, T);\n q_mu = curve_to_q(betamean);\n\n # align to mean\n for ii = 1:N\n beta1 = beta[:,:,ii];\n centroid1 = calculatecentroid(beta1);\n beta1 -= repeat(centroid1, 1, T);\n\n # Iteratively optimzie over SO(n) x Gamma\n # Optimize over SO(n) x Gamma\n gam, R, tau = optimum_reparam(betamean, beta1);\n gamI = invert_gamma(gam);\n beta1 = R * shift_f(beta1, tau);\n\n # Applying optimal re-parameterization to the second curve\n beta1 = group_action_by_gamma_coord(beta1, gamI);\n\n # Optimize over SO(n)\n beta1, R, tau = find_rotation_seed_coord(betamean, beta1);\n qn[:,:,ii] = curve_to_q(beta1);\n betan[:,:,ii] = beta1;\n end\n\n return betan, qn, betamean, q_mu\nend\n\n\n\"\"\"\nCalculate Karcher Covariance of a set of curves\n\n curve_karcher_cov(betamean, beta; mode='O')\n :param betamean: array (n,T) of mean curve\n :param beta: array (n,T,N) for N number of curves\n :param mode: Open ('O') or Closed ('C') curves\n\n :return K: covariance matrix\n\"\"\"\nfunction curve_karcher_cov(betamean::Array{Float64,2}, beta::Array{Float64,3};\n mode='O')\n n, T, N = size(beta);\n\n # Compute Karcher covariance of uniformly sampled mean\n betamean = resamplecurve(betamean, T);\n mu = curve_to_q(betamean);\n if mode == 'C'\n mu = project_curve(mu);\n basis = find_basis_normal(mu);\n end\n\n v = zeros(n, T, N);\n for i = 1:N\n beta1 = beta[:, :, i];\n\n w, dist = inverse_exp_coord(betamean, beta1);\n # Project to the tangent space of manifold to obtain v_i\n if mode == 'O'\n v[:,:,i] = w;\n else\n v[:,:,i] = project_tangent(w, mu, basis);\n end\n end\n\n K = zeros(2*T, 2*T);\n\n for i = 1:N\n w = v[:,:,i];\n w = [vec(w[1,:]); vec(w[2,:])];\n K = K + w*w';\n end\n\n K \/= (N-1);\n\n return K\nend\n\n\n\"\"\"\nCalculate principal directions of a set of curves\n\n curve_principal_directions(betamean, mu, K; mode='O', no=3, N=5)\n :param betamean: array (n,T) of mean curve\n :param mu: array (n,T) of mean srvf\n :param K: array (T,T) covariance matrix\n :param mode: Open ('O') or Closed ('C') curve\n :param no: number of components\n :param N: number of samples on each side of mean\n\n :return pd: array describing principal directions\n\"\"\"\nfunction curve_principal_directions(betamean::Array{Float64, 2}, mu, K;\n mode='O', no=3, N=5)\n n, T = size(betamean);\n U, s, V = svd(K);\n\n qarray = Array{Any}(undef, (no, 2*N+1));\n qarray1 = Array{Any}(undef, N);\n qarray2 = Array{Any}(undef, N);\n pd = Array{Any}(undef, no, 2*N+1);\n pd1 = Array{Any}(undef, N);\n pd2 = Array{Any}(undef, N);\n\n for m = 1:no\n princDir = [U[1:T,m]'; U[T+1:2*T,m]'];\n v = sqrt(s[m])*princDir;\n q1 = copy(mu);\n epsilon = 2.0\/N;\n\n # Forward direction from mean\n for i = 1:N\n normv = sqrt(innerprod_q2(v,v));\n\n if normv < 1e-4\n q2 = copy(mu);\n else\n q2 = cos(epsilon*normv)*q1 + sin(epsilon*normv)*v\/normv;\n if mode == 'C'\n q2 = project_curve(q2);\n end\n end\n\n qarray1[i] = q2;\n p = q_to_curve(q2);\n centroid1 = -1 * calculatecentroid(p);\n beta_scaled, scale = scale_curve(p + repeat(centroid1,1, T));\n pd1[i] = beta_scaled;\n\n # Parallel translate tangent vector\n basis2 = find_basis_normal(q2);\n v = parallel_translate(v, q1, q2, basis2, mode);\n\n q1 = copy(q2);\n end\n\n # Backward direction from mean\n v = -sqrt(s[m])*princDir;\n q1 = mu;\n for i = 1:N\n normv = sqrt(innerprod_q2(v,v));\n\n if normv < 1e-4\n q2 = copy(mu);\n else\n q2 = cos(epsilon*normv)*q1 + sin(epsilon*normv)*v\/normv;\n if mode == 'C'\n q2 = project_curve(q2);\n end\n end\n\n qarray2[i] = q2;\n p = q_to_curve(q2);\n centroid1 = -1*calculatecentroid(p);\n beta_scaled, scale = scale_curve(p + repeat(centroid1, 1, T));\n pd2[i] = beta_scaled;\n\n # parallel translate tangent vector\n basis2 = find_basis_normal(q2);\n v = parallel_translate(v, q1, q2, basis2, mode);\n\n q1 = copy(q2);\n end\n\n for i = 1:N\n qarray[m,i] = qarray2[N+1-i];\n pd[m, i] = pd2[N+1-i];\n end\n\n qarray[m,N+1] = mu;\n centroid1 = -1 * calculatecentroid(betamean);\n beta_scaled, scale = scale_curve(betamean + repeat(centroid1,1, T));\n\n pd[m, N] = beta_scaled;\n\n for i = 1:N;\n qarray[m,i+N+1] = qarray1[i];\n pd[m,i+N+1] = pd1[i];\n end\n\n end\n\n return pd\nend\n\n\n\"\"\"\nSample shapes from model\n\n sample_shapes(mu, K; mode='O', no=3, numSamp=10)\n :param mu: array (n,T) mean srvf\n :param K: array (T,T) covariance matrix\n :param mode: Open ('O') or Closed ('C') curves\n :param no: number of principal components\n :param numSamp: number of samples\n\n :return samples: array (n,T,numSamp) of sample curves\n\"\"\"\nfunction sample_shapes(mu::Array{Float64,2}, K; mode='O', no=3, numSamp=10)\n n, T = size(mu);\n\n U,s,V = svd(K);\n\n if mode == 'O'\n N = 2;\n else\n N = 10;\n end\n\n epsilon = 1.0\/(N-1);\n\n q1 = copy(mu);\n q2 = copy(mu);\n samples = Array{Any}(undef, numSamp);\n for i = 1:numSamp\n v = zeros(2,T);\n for m = 1:no\n v = v + randn()*sqrt(s[m])*[U[1:T,m]';U[T+1:2*T,m]'];\n end\n\n q1 = copy(mu);\n for j = 1:N-1\n normv = sqrt(innerprod_q2(v,v));\n\n if normv < 1e-4\n q2 = copy(mu);\n else\n q2 = cos(epsilon*normv)*q1 + sin(epsilon*normv)*v\/normv;\n if mode == 'C'\n q2 = project_curve(q2);\n end\n end\n\n # Parallel translate tangent vector\n basis2 = find_basis_normal(q2);\n v = parallel_translate(v, q1, q2, basis2, mode);\n\n q1 = copy(q2);\n end\n\n samples[i] = q_to_curve(q2);\n end\n\n return samples\nend\n\n\n\"\"\"\nkarcher mean calculation function\n karcher_calc(beta, q, betamean, mu; mode='O')\n :param beta: array (n,T)\n :param q: array (n,T)\n :param betamean: array (n,T)\n :param mu: array (n,T)\n :param mode: Open ('O') or Closed ('C') curves\n :param wscale: with scale ('false')\n\n :return v: shooting vector\n :return d: elastic distance\n\"\"\"\nfunction karcher_calc(beta, q, betamean, mu; mode='O', wscale=false)\n if mode == 'C'\n basis = find_basis_normal(mu);\n end\n # Compute shooting vector from mu to q_i\n w, d = inverse_exp_coord(betamean, beta, wscale=wscale);\n\n # Project to tangent space of manifold to obtain v_i\n if mode == 'O'\n v = copy(w);\n else\n v = project_tangent(w, q, basis);\n end\n\n return v,d\nend\n","avg_line_length":26.312335958,"max_line_length":79,"alphanum_fraction":0.5301745636} {"size":3188,"ext":"jl","lang":"Julia","max_stars_count":5.0,"content":"mutable struct CoefplotOption\n xticklabel_angle::Float64\n xticklabel_size::Float64\n yticklabel_angle::Float64\n yticklabel_size::Float64\n other_coefplot_options::PGFPlotsX.Options\nend\n\nmutable struct SinglecoefplotOption\n dotsize::Float64\n dotcolor::Color\n linewidth::Float64\n linecolor::Color\n other_dot_options::PGFPlotsX.Options\n other_line_options::PGFPlotsX.Options\nend\n\ndefault_singlecoefplot_options() = SinglecoefplotOption(4, first(COLOR_PALATTE), 0.5, first(COLOR_PALATTE), PGFPlotsX.Options(), PGFPlotsX.Options())\n\ndefault_coefplot_options() = CoefplotOption(0, 5, 0, 5, PGFPlotsX.Options())\n\nget_line_options(option::SinglecoefplotOption) = merge(option.other_line_options,\n color_as_option(option.linecolor),\n PGFPlotsX.Options(Symbol(\"line width\") => option.linewidth))\n\nget_dot_options(option::SinglecoefplotOption) = merge(option.other_dot_options,\n color_as_fill_option(option.dotcolor),\n PGFPlotsX.Options(:circle => nothing, # TODO consider make this flexible \n Symbol(\"inner sep\") => 0,\n Symbol(\"minimum size\") => option.dotsize))\n\nget_coefplot_options(option::CoefplotOption) = merge(option.other_coefplot_options,\n PGFPlotsX.Options(Symbol(\"xticklabel style\")=> \"font=\\\\fontsize{$(option.xticklabel_size)}{$(option.xticklabel_size)}\\\\selectfont, rotate=$(option.xticklabel_angle)\"),\n PGFPlotsX.Options(Symbol(\"yticklabel style\")=> \"font=\\\\fontsize{$(option.yticklabel_size)}{$(option.yticklabel_size)}\\\\selectfont, rotate=$(option.yticklabel_angle)\"))\n\nBase.:(==)(o1::PGFPlotsX.Options,o2::PGFPlotsX.Options) = (o1.dict == o2.dict) && (o1.print_empty == o2.print_empty)\nfunction Base.:(==)(so1::SinglecoefplotOption,so2::SinglecoefplotOption)\n for fieldname in fieldnames(typeof(so1))\n if getfield(so1,fieldname) == getfield(so2,fieldname)\n continue\n else\n return false\n end\n end\n return true\nend\nfunction all_equal(v::Vector{SinglecoefplotOption})\n for i in 1:(length(v)-1)\n if v[i] == v[i+1]\n continue\n else\n return false\n end\n end\n return true\nend\n\nmutable struct LegendsOption\n fontsize::Real\n at::Tuple{Real, Real}\n anchor::Symbol\n\n function LegendsOption(fontsize::Real=5,at::Tuple{Real, Real}=(0.98,0.98),anchor::Symbol=Symbol(\"north east\"))\n new(fontsize,at,anchor)\n end\nend\n\nfunction gen_legend_options(lo::LegendsOption)\n legend_stype = PGFPlotsX.Options(:font => \"\\\\fontsize{$(lo.fontsize)}{$(lo.fontsize)}\\\\selectfont\",\n :at => \"($(first(lo.at)),$(last(lo.at)))\",\n :anchor => string(lo.anchor))\n PGFPlotsX.Options(Symbol(\"legend style\") => legend_stype)\nend","avg_line_length":43.6712328767,"max_line_length":220,"alphanum_fraction":0.5994353827} {"size":6133,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"@testset \"4.1.4.1 (a+b sin)^m (A+B sin+C sin^2)\" begin\n (A, B, C, a, b, e, f, m, x, ) = @variables A B C a b e f m x\n\n #= ::Package:: =#\n\n #= ::Title::Closed:: =#\n #=Integrands*of*the*form*(b*sin(e+f*x))^m*(A+C*sin(e+f*x)^2)=#\n\n\n #= ::Section::Closed:: =#\n #=Integrands*of*the*form*(b*sin(e+f*x))^m*(A+C*sin(e+f*x)^2)*when*A*(m+2)+C*(m+1)=0=#\n\n\n @test_int [sin(e + f*x)^m*(1 + m - (2 + m)*sin(e + f*x)^2), x, 1, (cos(e + f*x)*sin(e + f*x)^(1 + m))\/f]\n\n @test_int [sin(e + f*x)^5*(6 - 7*sin(e + f*x)^2), x, 1, (cos(e + f*x)*sin(e + f*x)^6)\/f]\n @test_int [sin(e + f*x)^4*(5 - 6*sin(e + f*x)^2), x, 1, (cos(e + f*x)*sin(e + f*x)^5)\/f]\n @test_int [sin(e + f*x)^3*(4 - 5*sin(e + f*x)^2), x, 1, (cos(e + f*x)*sin(e + f*x)^4)\/f]\n @test_int [sin(e + f*x)^2*(3 - 4*sin(e + f*x)^2), x, 1, (cos(e + f*x)*sin(e + f*x)^3)\/f]\n @test_int [sin(e + f*x)^1*(2 - 3*sin(e + f*x)^2), x, 1, (cos(e + f*x)*sin(e + f*x)^2)\/f]\n @test_int [sin(e + f*x)^0*(1 - 2*sin(e + f*x)^2), x, 3, (cos(e + f*x)*sin(e + f*x))\/f]\n @test_int [csc(e + f*x)^1*(0 - 1*sin(e + f*x)^2), x, 1, cos(e + f*x)\/f]\n @test_int [csc(e + f*x)^2*(-1 - 0*sin(e + f*x)^2), x, 2, cot(e + f*x)\/f]\n @test_int [csc(e + f*x)^3*(-2 + 1*sin(e + f*x)^2), x, 1, (cot(e + f*x)*csc(e + f*x))\/f]\n @test_int [csc(e + f*x)^4*(-3 + 2*sin(e + f*x)^2), x, 1, (cot(e + f*x)*csc(e + f*x)^2)\/f]\n @test_int [csc(e + f*x)^5*(-4 + 3*sin(e + f*x)^2), x, 1, (cot(e + f*x)*csc(e + f*x)^3)\/f]\n\n\n #= ::Title::Closed:: =#\n #=Integrands*of*the*form*(a+b*sin(e+f*x))^m*(A+C*sin(e+f*x)^2)=#\n\n\n #= ::Section::Closed:: =#\n #=Integrands*of*the*form*(a+a*sin(e+f*x))^m*(A+C*sin(e+f*x)^2)=#\n\n\n @test_int [(a + a*sin(e + f*x))^m*(A + C*sin(e + f*x)^2), x, 4, If(13>=8, (C*cos(e + f*x)*(a + a*sin(e + f*x))^m)\/(f*(2 + 3*m + m^2)) - (2^(1\/2 + m)*(C*(1 + m + m^2) + A*(2 + 3*m + m^2))*cos(e + f*x)*HypergeometricFunctions._\u2082F\u2081(1\/2, 1\/2 - m, 3\/2, (1\/2)*(1 - sin(e + f*x)))*(1 + sin(e + f*x))^(-(1\/2) - m)*(a + a*sin(e + f*x))^m)\/(f*(1 + m)*(2 + m)) - (C*cos(e + f*x)*(a + a*sin(e + f*x))^(1 + m))\/(a*f*(2 + m)), (C*cos(e + f*x)*(a + a*sin(e + f*x))^m)\/(f*(2 + 3*m + m^2)) - (2^(1\/2 + m)*(C*(1 + m + m^2) + A*(2 + 3*m + m^2))*cos(e + f*x)*HypergeometricFunctions._\u2082F\u2081(1\/2, 1\/2 - m, 3\/2, (1\/2)*(1 - sin(e + f*x)))*(1 + sin(e + f*x))^(-(1\/2) - m)*(a + a*sin(e + f*x))^m)\/(f*(2 + 3*m + m^2)) - (C*cos(e + f*x)*(a + a*sin(e + f*x))^(1 + m))\/(a*f*(2 + m)))]\n\n\n #= ::Section::Closed:: =#\n #=Integrands*of*the*form*(a+b*sin(e+f*x))^m*(A+C*sin(e+f*x)^2)=#\n\n\n @test_int [(a + b*sin(e + f*x))^m*(A - A*sin(e + f*x)^2), x, 7, (4*sqrt(2)*A*AppellF1(1\/2, -(3\/2), -m, 3\/2, (1\/2)*(1 - sin(e + f*x)), (b*(1 - sin(e + f*x)))\/(a + b))*cos(e + f*x)*(a + b*sin(e + f*x))^m)\/(((a + b*sin(e + f*x))\/(a + b))^m*(f*sqrt(1 + sin(e + f*x)))) - (4*sqrt(2)*A*AppellF1(1\/2, -(1\/2), -m, 3\/2, (1\/2)*(1 - sin(e + f*x)), (b*(1 - sin(e + f*x)))\/(a + b))*cos(e + f*x)*(a + b*sin(e + f*x))^m)\/(((a + b*sin(e + f*x))\/(a + b))^m*(f*sqrt(1 + sin(e + f*x))))]\n\n\n @test_int [(a + b*sin(e + f*x))^m*(A + C*sin(e + f*x)^2), x, 8, -((C*cos(e + f*x)*(a + b*sin(e + f*x))^(1 + m))\/(b*f*(2 + m))) + (sqrt(2)*a*(a + b)*C*AppellF1(1\/2, 1\/2, -1 - m, 3\/2, (1\/2)*(1 - sin(e + f*x)), (b*(1 - sin(e + f*x)))\/(a + b))*cos(e + f*x)*(a + b*sin(e + f*x))^m)\/(((a + b*sin(e + f*x))\/(a + b))^m*(b^2*f*(2 + m)*sqrt(1 + sin(e + f*x)))) - (sqrt(2)*(a^2*C + b^2*(C*(1 + m) + A*(2 + m)))*AppellF1(1\/2, 1\/2, -m, 3\/2, (1\/2)*(1 - sin(e + f*x)), (b*(1 - sin(e + f*x)))\/(a + b))*cos(e + f*x)*(a + b*sin(e + f*x))^m)\/(((a + b*sin(e + f*x))\/(a + b))^m*(b^2*f*(2 + m)*sqrt(1 + sin(e + f*x))))]\n\n\n @test_int [sin(e + f*x)^5*(A + C*sin(e + f*x)^2), x, 3, -(((A + C)*cos(e + f*x))\/f) + ((2*A + 3*C)*cos(e + f*x)^3)\/(3*f) - ((A + 3*C)*cos(e + f*x)^5)\/(5*f) + (C*cos(e + f*x)^7)\/(7*f)]\n\n\n #= ::Title::Closed:: =#\n #=Integrands*of*the*form*(a+b*sin(e+f*x))^m*(A+B*sin(e+f*x)+C*sin(e+f*x)^2)=#\n\n\n #= ::Section::Closed:: =#\n #=Integrands*of*the*form*(a+a*sin(e+f*x))^m*(A+B*sin(e+f*x)+C*sin(e+f*x)^2)=#\n\n\n @test_int [(a + a*sin(e + f*x))^m*(A + B*sin(e + f*x) + C*sin(e + f*x)^2), x, 4, If(13>=8, ((C - B*(2 + m))*cos(e + f*x)*(a + a*sin(e + f*x))^m)\/(f*(1 + m)*(2 + m)) - (2^(1\/2 + m)*(B*m*(2 + m) + C*(1 + m + m^2) + A*(2 + 3*m + m^2))*cos(e + f*x)*HypergeometricFunctions._\u2082F\u2081(1\/2, 1\/2 - m, 3\/2, (1\/2)*(1 - sin(e + f*x)))*(1 + sin(e + f*x))^(-(1\/2) - m)*(a + a*sin(e + f*x))^m)\/(f*(1 + m)*(2 + m)) - (C*cos(e + f*x)*(a + a*sin(e + f*x))^(1 + m))\/(a*f*(2 + m)), ((C - B*(2 + m))*cos(e + f*x)*(a + a*sin(e + f*x))^m)\/(f*(1 + m)*(2 + m)) - (2^(1\/2 + m)*(B*m*(2 + m) + C*(1 + m + m^2) + A*(2 + 3*m + m^2))*cos(e + f*x)*HypergeometricFunctions._\u2082F\u2081(1\/2, 1\/2 - m, 3\/2, (1\/2)*(1 - sin(e + f*x)))*(1 + sin(e + f*x))^(-(1\/2) - m)*(a + a*sin(e + f*x))^m)\/(f*(2 + 3*m + m^2)) - (C*cos(e + f*x)*(a + a*sin(e + f*x))^(1 + m))\/(a*f*(2 + m)))]\n\n\n #= ::Section::Closed:: =#\n #=Integrands*of*the*form*(a+b*sin(e+f*x))^m*(A+B*sin(e+f*x)+C*sin(e+f*x)^2)=#\n\n\n @test_int [(a + b*sin(e + f*x))^m*(A + (A + C)*sin(e + f*x) + C*sin(e + f*x)^2), x, 7, -((4*sqrt(2)*C*AppellF1(1\/2, -(3\/2), -m, 3\/2, (1\/2)*(1 - sin(e + f*x)), (b*(1 - sin(e + f*x)))\/(a + b))*cos(e + f*x)*(a + b*sin(e + f*x))^m)\/(((a + b*sin(e + f*x))\/(a + b))^m*(f*sqrt(1 + sin(e + f*x))))) - (2*sqrt(2)*(A - C)*AppellF1(1\/2, -(1\/2), -m, 3\/2, (1\/2)*(1 - sin(e + f*x)), (b*(1 - sin(e + f*x)))\/(a + b))*cos(e + f*x)*(a + b*sin(e + f*x))^m)\/(((a + b*sin(e + f*x))\/(a + b))^m*(f*sqrt(1 + sin(e + f*x))))]\n\n\n @test_int [(a + b*sin(e + f*x))^m*(A + B*sin(e + f*x) + C*sin(e + f*x)^2), x, 8, -((C*cos(e + f*x)*(a + b*sin(e + f*x))^(1 + m))\/(b*f*(2 + m))) + (sqrt(2)*(a + b)*(a*C - b*B*(2 + m))*AppellF1(1\/2, 1\/2, -1 - m, 3\/2, (1\/2)*(1 - sin(e + f*x)), (b*(1 - sin(e + f*x)))\/(a + b))*cos(e + f*x)*(a + b*sin(e + f*x))^m)\/(((a + b*sin(e + f*x))\/(a + b))^m*(b^2*f*(2 + m)*sqrt(1 + sin(e + f*x)))) - (sqrt(2)*(a^2*C + b^2*C*(1 + m) + A*b^2*(2 + m) - a*b*B*(2 + m))*AppellF1(1\/2, 1\/2, -m, 3\/2, (1\/2)*(1 - sin(e + f*x)), (b*(1 - sin(e + f*x)))\/(a + b))*cos(e + f*x)*(a + b*sin(e + f*x))^m)\/(((a + b*sin(e + f*x))\/(a + b))^m*(b^2*f*(2 + m)*sqrt(1 + sin(e + f*x))))]\n end\n","avg_line_length":84.0136986301,"max_line_length":829,"alphanum_fraction":0.4066525355} {"size":347,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using ReachabilityAnalysis\nDict([\n \"opts\" => Dict(:T=>10.0, :alg=>TMJets(abstol=1e-15, orderT=8, orderQ=1, maxsteps=50_000)),\n \"X0\" => Hyperrectangle(low=[1.3, 1.3, -0.2, -0.2], high=[1.7, 1.7, 0.2, 0.2]),\n \"info\" => Dict(\n \"name\" => \"robot_arm\",\n \"dim\" => 2,\n \"linear\" => false,\n \"hybrid\" => false\n )\n])\n","avg_line_length":28.9166666667,"max_line_length":94,"alphanum_fraction":0.5014409222} {"size":510,"ext":"jl","lang":"Julia","max_stars_count":11.0,"content":"using Rcl\nusing Test\n\n# run test in the whole directory, latest modified files\n# are run first, this makes waiting time shorter when writing\n# or modifying unit-tests\nfunction run_tests(dir)\n files = readdir(dir)\n\n filter!(files) do x\n occursin(r\".*\\.jl$\", x)\n end\n\n sort!(files; by = fn -> stat(joinpath(dir, fn)).mtime, rev = true)\n\n map(files) do file\n include(joinpath(dir, file))\n end\nend\n\n\n@testset \"Rcl Test\" begin\n run_tests(joinpath(dirname(@__FILE__), \"unit\"))\nend\n","avg_line_length":20.4,"max_line_length":70,"alphanum_fraction":0.6607843137} {"size":1183,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using StatFiles, StatsModels, DataFrames, DataFramesMeta, GLM\ndf = DataFrame(load(raw\"d:\\OneDrive\\Documents\\Macros\\nlsw88.dta\"))[:,[:wage; :tenure; :ttl_exp; :collgrad; :industry]]\ndropmissing!(df)\ndesc = describe(df, :eltype)\nfor i in axes(desc, 1) # needed only for lm()\n if desc[i,:eltype] == Float32\n sym = desc[i,:variable]\n @transform!(df, @byrow $sym=Float64($sym))\n end\nend\nf = @formula(wage ~ 1 + tenure + ttl_exp * collgrad)\nf = apply_schema(f, schema(f, df))\nresp, predexog = modelcols(f, df)\n\nfit = lm(f, df)\n\nisa(fit.model, LinearModel)\n_terms = fit.mf.f.rhs.terms\nk = length(_terms)\n_I = zeros(k,k); _I[diagind(_I)] .= 1.\n\ntermcols = Dict(t => _I[:,i] for (i,t) in enumerate(_terms))\nnewtermcols = Dict{AbstractTerm, Vector{Float64}}()\nfor pair in termcols\n if isa(pair.first, InteractionTerm)\n newtermcols[InteractionTerm((pair.first.terms[2], pair.first.terms[1]))] = pair.second\n end\nend\nmerge!(termcols, newtermcols)\n\nHstr = \"wage + 2 * tenure ==3\"\nH = Meta.parse(Hstr)\n\ndump(Meta.parse(\"1 + ttl_exp & collgrad\"))\n\nfunction processH(H::Expr)\n if H.head==:call\n if H.args[1] == :(==)\n return Expr(:(-), H.args[2], H.args[3])\n end\n end\nend\n","avg_line_length":27.511627907,"max_line_length":118,"alphanum_fraction":0.663567202} {"size":866,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"const config = Dict{String, Any}(\n # whether to debug AWS requests and responses\n \"dbg\" => false,\n\n # AWS credentials to connect with\n # If not set, they are taken from environment or .aws\/* files\n #\"id\" => \"\",\n #\"key\" => \"\",\n #\"region\" => AWS.US_WEST_2,\n #\"availability-zones\" => [\"us-west-2a\", \"us-west-2b\"],\n\n # AMI and instance specifications to use for EC2 tests\n # a generic AMI to use to test ec2 operations\n # any linux AMI would do (e.g. ami-835b4efa - Ubuntu 16.04 64 bit us-west-2)\n # \"ami-generic\" => \"ami-835b4efa\",\n # the instance type to launch\n # \"insttype\" => \"m3.medium\",\n # tags to tag the instances with\n # \"ownertag\" => \"awstest\",\n # \"nametag\" => \"awstest\",\n # key to use to launch instances\n # \"keyname\" => \"awstest\",\n\n # AWS Account ID (required for SQS)\n #\"awsAccountID\" => \"\"\n)\n","avg_line_length":32.0740740741,"max_line_length":80,"alphanum_fraction":0.6004618938} {"size":58,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"julia> sizeof(Int8)\n1\n\njulia> t = 1\n1\n\njulia> sizeof(t)\n8\n","avg_line_length":6.4444444444,"max_line_length":19,"alphanum_fraction":0.6379310345} {"size":4300,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"interval_indicators = [\"VWAP\", \"AD\", \"OBV\", \n \"TRANGE\", \"STOCH\", \"STOCHF\", \"BOP\",\n \"ULTOSC\", \"SAR\", \"ADOSC\"]\n\nfor func in interval_indicators\n fname = Symbol(func)\n @eval begin\n function ($fname)(symbol::String, interval::String; datatype::String=\"json\", kwargs...)\n @argcheck in(interval, [\"1min\", \"5min\", \"15min\", \"30min\", \"60min\", \"daily\", \"weekly\", \"monthly\"])\n \n requiredParams = \"&symbol=$symbol&interval=$interval&datatype=$datatype&\"\n optionalParams = _parse_params(kwargs)\n \n uri = _form_uri_head(uppercase($func)) * requiredParams * optionalParams * \"&apikey=\" * ENV[\"ALPHA_VANTAGE_API_KEY\"]\n data = _get_request(uri)\n return _parse_response(data, datatype)\n end\n export $fname\n end\nend\n\ninterval_seriestype_indicators = [\"HT_TRENDLINE\", \"HT_SINE\", \"HT_TRENDMODE\",\n \"HT_DCPERIOD\", \"HT_DCPHASE\", \"HT_PHASOR\", \n \"MACD\", \"MAMA\", \"MACDEXT\", \"APO\", \"PPO\"]\n\nfor func in interval_seriestype_indicators\n fname = Symbol(func)\n @eval begin\n function ($fname)(symbol::String, interval::String, series_type::String; datatype::String=\"json\", kwargs...)\n @argcheck in(interval, [\"1min\", \"5min\", \"15min\", \"30min\", \"60min\", \"daily\", \"weekly\", \"monthly\"])\n @argcheck in(series_type, [\"open\", \"high\", \"low\", \"close\"])\n\n requiredParams = \"&symbol=$symbol&interval=$interval&series_type=$series_type&datatype=$datatype&\"\n optionalParams = _parse_params(kwargs)\n\n uri = _form_uri_head(uppercase($func)) * requiredParams * optionalParams * \"&apikey=\" * ENV[\"ALPHA_VANTAGE_API_KEY\"]\n data = _get_request(uri)\n return _parse_response(data, datatype)\n end\n export $fname\n end\nend\n\ninterval_timeperiod_indicators = [\"ADX\", \"ADXR\", \n \"AROON\", \"NATR\", \"WILLR\", \n \"CCI\", \"AROONOSC\", \"MFI\", \n \"DX\", \"MINUS_DI\", \"PLUS_DI\", \n \"MINUS_DM\", \"PLUS_DM\", \"ATR\"]\n\nfor func in interval_timeperiod_indicators\n fname = Symbol(func)\n @eval begin\n function ($fname)(symbol::String, interval::String, time_period::Int64; datatype::String=\"json\", kwargs...)\n @argcheck in(interval, [\"1min\", \"5min\", \"15min\", \"30min\", \"60min\", \"daily\", \"weekly\", \"monthly\"])\n @argcheck time_period > 0\n \n requiredParams = \"&symbol=$symbol&interval=$interval&time_period=$time_period&datatype=$datatype&\"\n optionalParams = _parse_params(kwargs)\n\n uri = _form_uri_head(uppercase($func)) * requiredParams * optionalParams * \"&apikey=\" * ENV[\"ALPHA_VANTAGE_API_KEY\"]\n data = _get_request(uri)\n return _parse_response(data, datatype)\n end\n export $fname\n end\nend\n\ninterval_timeperiod_seriestype_indicators = [\"EMA\", \"SMA\", \"WMA\", \n \"DEMA\", \"TEMA\", \"TRIMA\", \n \"KAMA\", \"RSI\", \"T3\", \n \"STOCHRSI\", \"MOM\", \"CMO\", \n \"ROC\", \"ROCR\", \"TRIX\", \n \"BBANDS\", \"MIDPOINT\", \"MIDPRICE\"]\n\nfor func in interval_timeperiod_seriestype_indicators\n fname = Symbol(func)\n @eval begin \n function ($fname)(symbol::String, interval::String, time_period::Int64, series_type::String; datatype::String=\"json\", kwargs...)\n @argcheck in(interval, [\"1min\", \"5min\", \"15min\", \"30min\", \"60min\", \"daily\", \"weekly\", \"monthly\"])\n @argcheck in(series_type, [\"open\", \"high\", \"low\", \"close\"])\n @argcheck time_period > 0\n\n requiredParams = \"&symbol=$symbol&interval=$interval&time_period=$time_period&series_type=$series_type&datatype=$datatype&\"\n optionalParams = _parse_params(kwargs)\n uri = _form_uri_head(uppercase($func)) * requiredParams * optionalParams * \"&apikey=\" * ENV[\"ALPHA_VANTAGE_API_KEY\"]\n data = _get_request(uri)\n return _parse_response(data, datatype)\n end\n export $fname\n end\nend","avg_line_length":47.2527472527,"max_line_length":136,"alphanum_fraction":0.5593023256} {"size":598,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"\nfunction _default(mesh::TOrSignal{M}, s::Style, data::Dict) where M <: GeometryBasics.Mesh\n return @gen_defaults! data begin\n shading = true\n backlight = 0f0\n main = mesh\n vertex_color = Vec4f0(0)\n texturecoordinates = Vec2f0(0)\n image = nothing => Texture\n matcap = nothing => Texture\n fetch_pixel = false\n uv_scale = Vec2f0(1)\n shader = GLVisualizeShader(\n \"fragment_output.frag\", \"util.vert\", \"standard.vert\", \"standard.frag\",\n view = Dict(\"light_calc\" => light_calc(shading))\n )\n end\nend\n","avg_line_length":31.4736842105,"max_line_length":90,"alphanum_fraction":0.5936454849} {"size":1227,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# This software is licensed under the MIT License (MIT).\n\nimport Pkg\n\nsrcdir = @__DIR__\n\nPkg.activate(srcdir)\n\n# Need to have PackageCompiler installed\nPkg.add(\"PackageCompiler\", preserve = Pkg.PRESERVE_ALL)\n\nprj_file = Pkg.project().path\nprj_dir = dirname(prj_file)\n\n@info(\"Building custom system image in project \\\"$prj_dir\\\".\")\n\n@info(\"Instantiating project.\")\nPkg.instantiate()\n\n@info(\"Precompiling packages.\")\nPkg.precompile()\n\nexcluded = [\n \"PyCall\", \n \"SymPy\", \n \"StatsPlots\",\n \"ImageMagick\",\n \"StatsBase\",\n \"PackageCompiler\",\n \"Ipopt\",\n \"SpecialFunctions\",\n]\n\nprj_sysimage = get(Pkg.TOML.parsefile(prj_file), \"deps\", Dict{String,Any}())\nsysimg_pkgs = filter(x -> x \u2209 excluded, sort(collect(keys(prj_sysimage))))\n@info(\"Package to include in system image: $(join(sysimg_pkgs, \" \"))\")\n\n@info(\"Building system image.\")\n\nimport PackageCompiler, Libdl\n\nsysimage_path = joinpath(srcdir, \"JuliaSysimage.\" * Libdl.dlext)\n\nPackageCompiler.create_sysimage(\n Symbol.(sysimg_pkgs),\n sysimage_path = sysimage_path,\n precompile_execution_file = joinpath(srcdir, \"precompile_exec.jl\"),\n cpu_target = PackageCompiler.default_app_cpu_target(),\n replace_default = false\n)\n\n@info \"Created custom Julia system image\"","avg_line_length":23.5961538462,"max_line_length":76,"alphanum_fraction":0.7294213529} {"size":4103,"ext":"jl","lang":"Julia","max_stars_count":3.0,"content":"module RxNav\n\nusing EzXML\nusing HTTP\n\nexport rcui, drugs, interaction, interaction_within_list, prescribable, interact\n\ninclude(\"util.jl\")\ninclude(\"API\/RxClassAPI.jl\")\ninclude(\"API\/RxNormAPI.jl\")\ninclude(\"API\/RxTermsAPI.jl\")\ninclude(\"API\/DrugInteractionAPI.jl\")\n\n\"\"\"\n rcui(name)\n\nTake a name of an NDC drug, return its rxcui as String.\n\"\"\"\nfunction rcui(name)\n try\n doc = getdoc(\"rcui\", HTTP.URIs.escapeuri(name))\n idstring = nodecontent(findfirst(\"\/\/idGroup\/rxnormId\", doc))\n return idstring\n catch y\n @warn y\n return \"\"\n end\nend\n\n\"\"\"\n drugs(name)\n\nGiven a drug name, return a list of all available dosing forms of the drug.\n\"\"\"\nfunction drugs(name)\n try\n doc = getdoc(\"drugs\", HTTP.URIs.escapeuri(name))\n nameelements = findall(\"\/\/drugGroup\/conceptGroup\/conceptProperties\/name\", doc)\n return nodecontent.(nameelements)\n catch y\n @warn y\n return String[]\n end\nend\n\n\"\"\"\n interact(list::Vector)\n interact(s1::String, severeonly::Bool=true)\n interact(s1::String, s2::String, args...)\n\nGet a list of interactions for a single drug (or rxcui drug id) or pairwise interactions for more than one drug (or rxcuid).\n\"\"\"\ninteract(list::Vector) = if length(list) > 1 interaction_within_list(list); else interact(first(list)) end\ninteract(s1::String, severeonly::Bool=true) = interaction(s1; ONCHigh=severeonly)\ninteract(s1::String, s2::String, args...) = interact([[s1, s2]; [x for x in args]])\n\n\"\"\"\n interaction(id; ONCHigh = true)\n\nGiven a drug name or rxcui id string, return known drug interations for that drug.\nIf ONCHigh is true only return the ONCHigh database entries, which returns fewer\nentries, tending to list only the more significant interactions. Set ONCHigh\nto false to get all known interactions, which can be multiple and sometimes redundant.\nReturns a `Vector` of `NamedTuple`s as in (drug1, drug2, severity, description)\n\"\"\"\nfunction interaction(id; ONCHigh = true)\n if !is_in_rxcui_format(id)\n id = rcui(id)\n end\n interactions = NamedTuple[]\n try\n tail = HTTP.URIs.escapeuri(id) * (ONCHigh ? \"&sources=ONCHigh\" : \"\")\n doc = getdoc(\"interaction\", tail)\n pairs = findall(\"\/\/interactionTypeGroup\/interactionType\/interactionPair\", doc)\n for p in pairs\n sev = nodecontent(findfirst(\"severity\", p))\n desc = nodecontent(findfirst(\"description\", p))\n enames = findall(\"interactionConcept\/minConceptItem\/name\", p)\n if !isempty(enames)\n names = nodecontent.(enames)\n push!(interactions, (drug1=names[1], drug2=names[2], severity=sev, description=desc))\n end\n end\n catch y\n @warn y\n end\n return interactions\nend\n\n\"\"\"\n interaction_within_list(idlist::Vector{String})\n \nGiven a list of drug names or rxcui id strings, return known drug interations for \nthat combination of drugs. Results are organized pairwise, so if A, B, and C have\nmutual interactions this will be reported for example as A with B, A with C, B with C.\nReturns a `Vector` of `NamedTuple`s as in (drug1, drug2, severity, description)\n\"\"\"\nfunction interaction_within_list(idlist::Vector{String})\n for (i, id) in enumerate(idlist)\n if !is_in_rxcui_format(id)\n idlist[i] = rcui(id)\n end\n end\n interactions = NamedTuple[]\n try\n arg = join(map(x -> HTTP.URIs.escapeuri(x), idlist), \"+\")\n doc = getdoc(\"interactionlist\", arg)\n pairs = findall(\"\/\/fullInteractionTypeGroup\/fullInteractionType\/interactionPair\", doc)\n for p in pairs\n sev = nodecontent(findfirst(\"severity\", p))\n desc = nodecontent(findfirst(\"description\", p))\n enames = findall(\"interactionConcept\/minConceptItem\/name\", p)\n if !isempty(enames)\n names = nodecontent.(enames)\n push!(interactions, (drug1=names[1], drug2=names[2], severity=sev, description=desc))\n end\n end\n catch y\n @warn y\n end\n return interactions\nend\n\nend # module\n","avg_line_length":32.824,"max_line_length":124,"alphanum_fraction":0.6597611504} {"size":10597,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"export nagents, AbstractAgent, ABM, AgentBasedModel,\nrandom_activation, by_id, fastest, partial_activation, random_agent,\nproperty_activation, allagents\n\nabstract type AbstractSpace end\n\n\"\"\"\nAll agents must be a mutable subtype of `AbstractAgent`.\nYour agent type **must have** the `id` field as first field.\nDepending on the space structure there might be a `pos` field of appropriate type\nand a `vel` field of appropriate type.\n\nYour agent type may have other additional fields relevant to your system,\nfor example variable quantities like \"status\" or other \"counters\".\n\n## Examples\nImagine agents who have extra properties `weight, happy`. For a [`GraphSpace`](@ref)\nwe would define them like\n```julia\nmutable struct ExampleAgent <: AbstractAgent\n id::Int\n pos::Int\n weight::Float64\n happy::Bool\nend\n```\nwhile for e.g. a [`ContinuousSpace`](@ref) we would use\n```julia\nmutable struct ExampleAgent <: AbstractAgent\n id::Int\n pos::NTuple{2, Float64}\n vel::NTuple{2, Float64}\n weight::Float64\n happy::Bool\nend\n```\nwhere `vel` is optional, useful if you want to use [`move_agent!`](@ref) in continuous\nspace.\n\"\"\"\nabstract type AbstractAgent end\n\nfunction correct_pos_type(n, model)\n if typeof(model.space) <: GraphSpace\n return coord2vertex(n, model)\n elseif typeof(model.space) <: GridSpace\n return vertex2coord(n, model)\n end\nend\n\nSpaceType=Union{Nothing, AbstractSpace}\n\nstruct AgentBasedModel{A<:AbstractAgent, S<:SpaceType, F, P}\n agents::Dict{Int,A}\n space::S\n scheduler::F\n properties::P\nend\nconst ABM = AgentBasedModel\nagenttype(::ABM{A}) where {A} = A\nspacetype(::ABM{A, S}) where {A, S} = S\n\n\"\"\"\n AgentBasedModel(AgentType [, space]; scheduler, properties) \u2192 model\nCreate an agent based model from the given agent type and `space`.\nYou can provide an agent _instance_ instead of type, and the type will be deduced.\n `ABM` is equivalent with `AgentBasedModel`.\n\nThe agents are stored in a dictionary that maps unique ids (integers)\nto agents. Use `model[id]` to get the agent with the given `id`.\n\n`space` is a subtype of `AbstractSpace`: [`GraphSpace`](@ref), [`GridSpace`](@ref) or\n[`ContinuousSpace`](@ref).\nIf it is ommited then all agents are virtually in one node and have no spatial structure.\n\n**Note:** Spaces are mutable objects and are not designed to be shared between models.\nCreate a fresh instance of a space with the same properties if you need to do this.\n\n`properties = nothing` is additional model-level properties (typically a dictionary)\nthat can be accessed as `model.properties`. However, if `properties` is a dictionary with\nkey type `Symbol`, or of it is a struct, then the syntax\n`model.name` is short hand for `model.properties[:name]` (or `model.properties.name`\nfor structs).\nThis syntax can't be used for `name` being `agents, space, scheduler, properties`,\nwhich are the fields of `AgentBasedModel`.\n\n`scheduler = fastest` decides the order with which agents are activated\n(see e.g. [`by_id`](@ref) and the scheduler API).\n\nType tests for `AgentType` are done, and by default\nwarnings are thrown when appropriate. Use keyword `warn=false` to supress that.\n\"\"\"\nfunction AgentBasedModel(\n ::Type{A}, space::S = nothing;\n scheduler::F = fastest, properties::P = nothing, warn = true\n ) where {A<:AbstractAgent, S<:SpaceType, F, P}\n agent_validator(A, space, warn)\n\n agents = Dict{Int, A}()\n return ABM{A, S, F, P}(agents, space, scheduler, properties)\nend\n\nfunction AgentBasedModel(agent::AbstractAgent, args...; kwargs...)\n return ABM(typeof(agent), args...; kwargs...)\nend\n\nfunction Base.show(io::IO, abm::ABM{A}) where {A}\n n = isconcretetype(A) ? nameof(A) : string(A)\n s = \"AgentBasedModel with $(nagents(abm)) agents of type $(n)\"\n if abm.space == nothing\n s*= \"\\n no space\"\n else\n s*= \"\\n space: $(sprint(show, abm.space))\"\n end\n s*= \"\\n scheduler: $(nameof(abm.scheduler))\"\n print(io, s)\n if abm.properties \u2260 nothing\n print(io, \"\\n properties: \", abm.properties)\n end\nend\n\n\"\"\"\n getindex(model::ABM, id::Integer)\n\nReturn an agent given its ID.\n\"\"\"\nBase.getindex(m::ABM, id::Integer) = m.agents[id]\n\n\"\"\"\n setindex!(model::ABM, agent::AbstractAgent, id::Int)\n\nAdd an `agent` to the `model` at a given index: `id`. Note this method will return an error if the `id` requested is not equal to `agent.id`.\n\"\"\"\nfunction Base.setindex!(m::ABM, a::AbstractAgent, id::Int)\n a.id \u2260 id && throw(ArgumentError(\"You are adding an agent to an ID not equal with the agent's ID!\"))\n m.agents[id] = a\nend\n\n\"\"\"\n getproperty(model::ABM, prop::Symbol)\n\nReturn a property from the current `model`. Possibilities are\n- `:agents`, list of all agents present\n- `:space`, current space information\n- `:scheduler`, which sheduler is being used\n- `:properties`, dictionary of all model properties\n- Any symbol that exists within the model properties dictionary\n\nAlternatively, all of these values can be returned using the `model.x` syntax.\nFor example, if a model has the set of properties `Dict(:weight => 5, :current => false)`, retrieving these values can be obtained via `model.properties` as well as the `getproperty()` method. Equivalently, we can use `getproperty(model, :weight)` and `model.weight`.\n\"\"\"\nfunction Base.getproperty(m::ABM{A, S, F, P}, s::Symbol) where {A, S, F, P}\n if s === :agents\n return getfield(m, :agents)\n elseif s === :space\n return getfield(m, :space)\n elseif s === :scheduler\n return getfield(m, :scheduler)\n elseif s === :properties\n return getfield(m, :properties)\n elseif P <: Dict\n return getindex(getfield(m, :properties), s)\n else # properties is assumed to be a struct\n return getproperty(getfield(m, :properties), s)\n end\nend\n\nfunction Base.setproperty!(m::ABM{A, S, F, P}, s::Symbol, x) where {A, S, F, P}\n properties = getfield(m, :properties)\n if properties != nothing && haskey(properties, s)\n properties[s] = x\n else\n throw(ErrorException(\"Cannot set $(s) in this manner. Please use the `AgentBasedModel` constructor.\"))\n end\nend\n\n\"\"\"\n agent_validator(agent, space)\nValidate the user supplied agent (subtype of `AbstractAgent`).\nChecks for mutability and existence and correct types for fields depending on `SpaceType`.\n\"\"\"\nfunction agent_validator(::Type{A}, space::S, warn::Bool) where {A<:AbstractAgent, S<:SpaceType}\n # Check A for required properties & fields\n if warn\n isconcretetype(A) || @warn \"AgentType is not concrete. If your agent is parametrically typed, you're probably seeing this warning because you gave `Agent` instead of `Agent{Float64}` (for example) to this function. You can also create an instance of your agent and pass it to this function. If you want to use `Union` types for mixed agent models, you can silence this warning.\"\n isbitstype(A) && @warn \"AgentType should be mutable. Try adding the `mutable` keyword infront of `struct` in your agent definition.\"\n end\n isconcretetype(A) || return # no checks can be done for union types\n (any(isequal(:id), fieldnames(A)) && fieldnames(A)[1] == :id) || throw(ArgumentError(\"First field of Agent struct must be `id` (it should be of type `Int`).\"))\n fieldtype(A, :id) <: Integer || throw(ArgumentError(\"`id` field in Agent struct must be of type `Int`.\"))\n if space != nothing\n (any(isequal(:pos), fieldnames(A)) && fieldnames(A)[2] == :pos) || throw(ArgumentError(\"Second field of Agent struct must be `pos` when using a space.\"))\n # Check `pos` field in A has the correct type\n pos_type = fieldtype(A, :pos)\n space_type = typeof(space)\n if space_type <: GraphSpace && !(pos_type <: Integer)\n throw(ArgumentError(\"`pos` field in Agent struct must be of type `Int` when using GraphSpace.\"))\n elseif space_type <: GridSpace && !(pos_type <: NTuple{D, Integer} where {D})\n throw(ArgumentError(\"`pos` field in Agent struct must be of type `NTuple{Int}` when using GridSpace.\"))\n elseif space_type <: ContinuousSpace\n if !(pos_type <: NTuple{D, <:AbstractFloat} where {D})\n throw(ArgumentError(\"`pos` field in Agent struct must be of type `NTuple{<:AbstractFloat}` when using ContinuousSpace.\"))\n end\n if warn && any(isequal(:vel), fieldnames(A)) && !(fieldtype(A, :vel) <: NTuple{D, <:AbstractFloat} where {D})\n @warn \"`vel` field in Agent struct should be of type `NTuple{<:AbstractFloat}` when using ContinuousSpace.\"\n end\n end\n end\nend\n\n\"\"\"\n random_agent(model)\nReturn a random agent from the model.\n\"\"\"\nrandom_agent(model) = model[rand(keys(model.agents))]\n\n\"\"\"\n nagents(model::ABM)\nReturn the number of agents in the `model`.\n\"\"\"\nnagents(model::ABM) = length(model.agents)\n\n\"\"\"\n allagents(model)\nReturn an iterator over all agents of the model.\n\"\"\"\nallagents(model) = values(model.agents)\n\n####################################\n# Schedulers\n####################################\n\"\"\"\n fastest\nActivate all agents once per step in the order dictated by the agent's container,\nwhich is arbitrary (the keys sequence of a dictionary).\nThis is the fastest way to activate all agents once per step.\n\"\"\"\nfastest(model) = keys(model.agents)\n\n\"\"\"\n by_id\nActivate agents at each step according to their id.\n\"\"\"\nfunction by_id(model::ABM)\n agent_ids = sort(collect(keys(model.agents)))\n return agent_ids\nend\n\n@deprecate as_added by_id\n\n\"\"\"\n random_activation\nActivate agents once per step in a random order.\nDifferent random ordering is used at each different step.\n\"\"\"\nfunction random_activation(model::ABM)\n order = shuffle(collect(keys(model.agents)))\nend\n\n\"\"\"\n partial_activation(p)\nAt each step, activate only `p` percentage of randomly chosen agents.\n\"\"\"\nfunction partial_activation(p::Real)\n function partial(model::ABM{A, S, F, P}) where {A, S, F, P}\n ids = collect(keys(model.agents))\n return randsubseq(ids, p)\n end\n return partial\nend\n\n\"\"\"\n property_activation(property)\nAt each step, activate the agents in an order dictated by their `property`,\nwith agents with greater `property` acting first. `property` is a `Symbol`, which\njust dictates which field the agents to compare.\n\"\"\"\nfunction property_activation(p::Symbol)\n function by_property(model::ABM{A, S, F, P}) where {A, S, F, P}\n ids = collect(keys(model.agents))\n properties = [getproperty(model.agents[id], p) for id in ids]\n s = sortperm(properties)\n return ids[s]\n end\nend\n","avg_line_length":37.1824561404,"max_line_length":386,"alphanum_fraction":0.6853826555} {"size":2495,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"# This file was generated by the Julia Swagger Code Generator\n# Do not modify this file directly. Modify the swagger specification instead.\n\n\n\nmutable struct IoK8sApiAppsV1beta2DeploymentList <: SwaggerModel\n apiVersion::Any # spec type: Union{ Nothing, String } # spec name: apiVersion\n items::Any # spec type: Union{ Nothing, Vector{IoK8sApiAppsV1beta2Deployment} } # spec name: items\n kind::Any # spec type: Union{ Nothing, String } # spec name: kind\n metadata::Any # spec type: Union{ Nothing, IoK8sApimachineryPkgApisMetaV1ListMeta } # spec name: metadata\n\n function IoK8sApiAppsV1beta2DeploymentList(;apiVersion=nothing, items=nothing, kind=nothing, metadata=nothing)\n o = new()\n validate_property(IoK8sApiAppsV1beta2DeploymentList, Symbol(\"apiVersion\"), apiVersion)\n setfield!(o, Symbol(\"apiVersion\"), apiVersion)\n validate_property(IoK8sApiAppsV1beta2DeploymentList, Symbol(\"items\"), items)\n setfield!(o, Symbol(\"items\"), items)\n validate_property(IoK8sApiAppsV1beta2DeploymentList, Symbol(\"kind\"), kind)\n setfield!(o, Symbol(\"kind\"), kind)\n validate_property(IoK8sApiAppsV1beta2DeploymentList, Symbol(\"metadata\"), metadata)\n setfield!(o, Symbol(\"metadata\"), metadata)\n o\n end\nend # type IoK8sApiAppsV1beta2DeploymentList\n\nconst _property_map_IoK8sApiAppsV1beta2DeploymentList = Dict{Symbol,Symbol}(Symbol(\"apiVersion\")=>Symbol(\"apiVersion\"), Symbol(\"items\")=>Symbol(\"items\"), Symbol(\"kind\")=>Symbol(\"kind\"), Symbol(\"metadata\")=>Symbol(\"metadata\"))\nconst _property_types_IoK8sApiAppsV1beta2DeploymentList = Dict{Symbol,String}(Symbol(\"apiVersion\")=>\"String\", Symbol(\"items\")=>\"Vector{IoK8sApiAppsV1beta2Deployment}\", Symbol(\"kind\")=>\"String\", Symbol(\"metadata\")=>\"IoK8sApimachineryPkgApisMetaV1ListMeta\")\nBase.propertynames(::Type{ IoK8sApiAppsV1beta2DeploymentList }) = collect(keys(_property_map_IoK8sApiAppsV1beta2DeploymentList))\nSwagger.property_type(::Type{ IoK8sApiAppsV1beta2DeploymentList }, name::Symbol) = Union{Nothing,eval(Meta.parse(_property_types_IoK8sApiAppsV1beta2DeploymentList[name]))}\nSwagger.field_name(::Type{ IoK8sApiAppsV1beta2DeploymentList }, property_name::Symbol) = _property_map_IoK8sApiAppsV1beta2DeploymentList[property_name]\n\nfunction check_required(o::IoK8sApiAppsV1beta2DeploymentList)\n (getproperty(o, Symbol(\"items\")) === nothing) && (return false)\n true\nend\n\nfunction validate_property(::Type{ IoK8sApiAppsV1beta2DeploymentList }, name::Symbol, val)\nend\n","avg_line_length":63.9743589744,"max_line_length":255,"alphanum_fraction":0.7687374749} {"size":2012,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"using BlockArrays\nusing ControlSystems\nusing LinearAlgebra\nusing PredictiveControl\n\nconst MPC = PredictiveControl\n\n# Some utilities for testing\ninclude( \"..\/..\/src\/utilities.jl\" )\ninclude( \"..\/testUtils.jl\" )\n\n\n# Create a sample system\nA = [1.0 0.0 0.0 0.0;\n 0.0 2.0 0.0 0.0;\n 0.0 0.0 3.0 0.0;\n 0.0 0.0 0.0 4.0]\nB = [1.0 0.0;\n 0.0 0.0;\n 0.0 1.0;\n 0.0 0.0]\nC = to_matrix( Float64, I, 4 )\n\nsys = StateSpace( A, B, C, 0, 0.1 )\n\nN = 5\nnx = ControlSystems.nstates( sys )\nnu = ControlSystems.ninputs( sys )\n\n###################################################################\n# Test the propagation of the input through the system\n###################################################################\n\n# Horizon must be greater than 1\n@test_throws DomainError MPC.initialpropagation( sys, -1 )\n@test_throws DomainError MPC.initialpropagation( sys, 0 )\n\n# Form a propagation matrix to test\nmat = MPC.initialpropagation( sys, N )\n\n@test blocksize( mat ) == (N, 1)\n@test size( mat ) == (nx*N, nx)\n\n# Make sure the blocks have the proper values for the system\nfor i = 1:N\n b = view( mat, Block( i, 1 ) )\n\n @test size( b ) == (nx, nx)\n @test b == sys.A^i\nend\n\n\n###################################################################\n# Test the propagation of the initial state through the controlled\n# system to form the new input sequence\n###################################################################\nK = [1.0 0.0 1.0 0.0;\n 0.0 1.0 0.0 1.0]\n\ncsys = StateSpace( A - B*K, B, C, 0, 0.1 )\n\n# Horizon must be greater than 1\n@test_throws DomainError MPC.inputinitialpropagation( csys, K, -1 )\n@test_throws DomainError MPC.inputinitialpropagation( csys, K, 0 )\n\n# Form a propagation matrix to test\nmat = MPC.inputinitialpropagation( csys, K, N )\n\n@test blocksize( mat ) == (N, 1)\n@test size( mat ) == (nu*N, nx)\n\n# Make sure the blocks have the proper values for the system\nfor i = 1:N\n b = view( mat, Block( i, 1 ) )\n\n @test size( b ) == (nu, nx)\n @test b == -K*csys.A^(i-1)\nend\n","avg_line_length":25.4683544304,"max_line_length":67,"alphanum_fraction":0.5586481113} {"size":795,"ext":"jl","lang":"Julia","max_stars_count":213.0,"content":"#################### File I\/O ####################\n\nfunction readcoda(output::AbstractString, index::AbstractString)\n out = readdlm(output, Any)\n ind = readdlm(index, Any)\n\n firstind = ind[:, 2]\n firstiter = out[firstind, 1]\n lastind = ind[:, 3]\n lastiter = out[lastind, 1]\n\n thin = Int((lastiter[1] - firstiter[1]) \/ (lastind[1] - firstind[1]))\n window = maximum(firstiter):thin:minimum(lastiter)\n startind = firstind + (first(window) - firstiter) \/ step(window)\n stopind = lastind - (lastiter - last(window)) \/ step(window)\n\n names = AbstractString[ind[:, 1]...]\n\n value = Array{Float64}(length(window), length(names))\n for i in 1:size(value, 2)\n inds = Int(startind[i]):Int(stopind[i])\n value[:, i] = out[inds, 2]\n end\n\n Chains(value; iterations=window, names=names)\nend\n","avg_line_length":29.4444444444,"max_line_length":71,"alphanum_fraction":0.6251572327} {"size":1677,"ext":"jl","lang":"Julia","max_stars_count":3.0,"content":"\n#created in including file\n#comm = MPIComm{UInt64, UInt16, UInt32}()\n\npid = myPid(comm)\n\nmacro MPIMapTests()\n @test isa(map, BlockMap{UInt64, UInt16, UInt32})\n\n @test uniqueGIDs(map)\n\n for i = 1:5\n @test myLID(map, i)\n @test pid*5 + i - 5 == gid(map, i)\n end\n\n for i = 1:20\n if cld(i, 5) == pid\n @test myGID(map, i)\n @test (i-1)%5+1 == lid(map, i)\n else\n @test !myGID(map, i)\n @test 0 == lid(map, i)\n end\n end\n\n\n @test !myLID(map, -1)\n @test !myLID(map, 0)\n @test !myLID(map, 6)\n @test !myLID(map, 46)\n @test !myGID(map, -1)\n @test !myGID(map, 0)\n @test !myGID(map, 21)\n @test !myGID(map, 46)\n\n @test 0 == lid(map, -1)\n @test 0 == lid(map, 0)\n @test 0 == lid(map, 21)\n @test 0 == lid(map, 46)\n @test 0 == gid(map, -1)\n @test 0 == gid(map, 0)\n @test 0 == gid(map, 6)\n @test 0 == gid(map, 46)\n\n @test distributedGlobal(map)\n\n @test linearMap(map)\n\n @test 20 == numGlobalElements(map)\n @test 5 == numMyElements(map)\n\n @test pid*5 - 4 == minMyGID(map)\n @test pid*5 == maxMyGID(map)\n @test 1 == minAllGID(map)\n @test 20 == maxAllGID(map)\n @test 1 == minLID(map)\n @test 5 == maxLID(map)\n\n @test ([1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4],\n [1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5]) == remoteIDList(map, collect(1:20))\n\n @test collect((1:5) .+ 5*(pid - 1)) == myGlobalElementIDs(map)\nend\n\nmap = BlockMap(20, comm)\n@MPIMapTests\n\nmap = BlockMap(20, 5, comm)\n@MPIMapTests\n\nmap = BlockMap(5*numProc(comm), collect((1:5) .+ 5*(pid - 1)), comm)\n@MPIMapTests\n","avg_line_length":22.6621621622,"max_line_length":109,"alphanum_fraction":0.5163983304} {"size":1674,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"type ScatterPlot <: Graph\n\tdataGroups::Vector{DataGroup}\nend\n\nScatterPlot(dg::DataGroup) = ScatterPlot([dg])\n\nfunction plotData(p::PlotFrame, g::ScatterPlot, showLegend::Bool)\n\tglobal dgCount\n\n\tfor x in 1:length(g.dataGroups)\n\t\tdg = g.dataGroups[x]\n\t\tif dg.lineColor == \"blank\"\n\t\t\tdg.lineColor = goodColors[mod(x, length(goodColors))]\n\t\tend\n\n\t\tif dg.markerColor == \"blank\"\n\t\t\tdg.markerColor = goodColors[mod(x, length(goodColors))]\n\t\tend\n\n\t\tif dg.markerType == \"blank\"\n\t\t\tdg.markerType = goodSymbols[mod(x, length(goodSymbols))]\n\t\tend\n\n\t\tplotPoints(p, dg)\n\n\t\tif showLegend\n\t\t\tdgCount += 1\n\t\t\tif dg.legend.label == \"Data Group \"\n\t\t\t\tdg.legend.label = dg.legend.label*string(dgCount)\n\t\t\tend\n\t\tend\n\tend\nend\n\nfunction scatterplot(dgs::Vector{DataGroup})\n\tplot(PlotFrame(ScatterPlot(dgs)))\nend\n\nfunction scatterplot(dg::DataGroup)\n\tplot(PlotFrame(ScatterPlot(dg)))\nend\n\nfunction scatterplot(dfs::Vector{DataFrame})\n\tdgs = DataGroup[]\n\tfor df in dfs\n\t\tpush!(dgs, DataGroup(df))\n\tend\n\tscatterplot(dgs)\nend\n\nfunction scatterplot(df::DataFrame)\n\tscatterplot(DataGroup(df))\nend\n\nfunction scatterplot(ys::Array{Float64, 2})\n\tdfs = DataFrame[]\n\tfor r = 1:size(y, 1)\n\t\tpush!(dfs, DataFrame(x = collect(1:length(y)), y = y[r,:]))\n\tend\n\tscatterplot(dfs)\nend\n\nfunction scatterplot(xs::Array{Float64, 2}, ys::Array{Float64, 2})\n\tdfs = DataFrame[]\n\tfor r = 1:size(y, 1)\n\t\tpush!(dfs, DataFrame(x = x[r,:], y = y[r,:]))\n\tend\n\tscatterplot(dfs)\nend\n\nfunction scatterplot(xs::Array{Float64, 2}, ys::Array{Float64, 2}, yerrs::Array{Float64, 2})\n\tdfs = DataFrame[]\n\tfor r = 1:size(ys, 1)\n\t\tpush!(dfs, DataFrame(x = vec(xs[r,:]), y = vec(ys[r,:]), yerr = vec(yerrs[r,:])))\n\tend\n\tscatterplot(dfs)\nend\n","avg_line_length":21.4615384615,"max_line_length":92,"alphanum_fraction":0.6845878136} {"size":12584,"ext":"jl","lang":"Julia","max_stars_count":31.0,"content":"VERSION < v\"0.7.0-beta2.199\" && __precompile__()\n\n\"\"\"\nThe `ChangePrecision` module exports a macro `@changeprecision T expression`\nthat changes the \"default\" floating-point precision in a given `expression`\nto a new floating-point type `T`.\n\"\"\"\nmodule ChangePrecision\n\nimport Random, Statistics, LinearAlgebra\nusing Random: AbstractRNG\n\n## Note: code in this module must be very careful with math functions,\n# because we've defined module-specific versions of very basic\n# functions like + and *. Call Base.:+ etcetera if needed.\n\nexport @changeprecision\n\n############################################################################\n# The @changeprecision(T, expr) macro, below, takes calls to\n# functions f that default to producing Float64 (e.g. from integer args)\n# and converts them to calls to ChangePrecision.f(T, args...). Then\n# we implement our f(T, args...) to default to T instead. The following\n# are a list of function calls to transform in this way.\n\nconst randfuncs = (:rand, :randn, :randexp) # random-number generators\nconst matfuncs = (:ones, :zeros) # functions to construct arrays\nconst complexfuncs = (:abs, :angle) # functions that give Float64 for Complex{Int}\nconst binaryfuncs = (:*, :+, :-, :^) # binary functions on irrationals that make Float64\n\n# math functions that convert integer-like arguments to floating-point results\n# (from https:\/\/docs.julialang.org\/en\/release-0.6\/manual\/mathematical-operations\/, up to date as of 0.6)\nconst intfuncs = (:\/, :\\, :inv, :float,\n # powers logs and roots\n :\u221a,:\u221b,:sqrt,:cbrt,:hypot,:exp,:exp2,:exp10,:expm1,:log,:log2,:log10,:log1p,:cis,\n # trig\n :sin, :cos, :tan, :cot, :sec, :csc,\n :sinh, :cosh, :tanh, :coth, :sech, :csch,\n :asin, :acos, :atan, :acot, :asec, :acsc,\n :asinh, :acosh, :atanh, :acoth, :asech, :acsch,\n :sinc, :cosc, :atan2,\n :cospi, :sinpi,\n # trig in degrees\n :deg2rad,:rad2deg,\n :sind, :cosd, :tand, :cotd, :secd, :cscd,\n :asind, :acosd, :atand, :acotd, :asecd, :acscd,\n )\n\n\n# functions that convert integer arrays to floating-point results\nconst statfuncs = (:mean, :std, :stdm, :var, :varm, :median, :cov, :cor)\nconst linalgfuncs = (:opnorm, :norm, :normalize,\n :factorize, :cholesky, :bunchkaufman, :ldlt, :lu, :qr, :lq,\n :eigen, :eigvals, :eigfact, :eigmax, :eigmin, :eigvecs,\n :hessenberg, :schur, :svd, :svdvals,\n :cond, :condskeel, :det, :logdet, :logabsdet,\n :pinv, :nullspace, :lyap, :sylvester)\n\n# functions to change to ChangePrecision.func(T, ...) calls:\nconst changefuncs = Set([randfuncs..., matfuncs...,\n intfuncs..., complexfuncs...,\n statfuncs..., linalgfuncs...,\n binaryfuncs..., :include])\n\n############################################################################\n\nchangeprecision(T, x) = x\nchangeprecision(T::Type, x::Float64) = parse(T, string(x)) # change float literals\nfunction changeprecision(T, x::Symbol)\n if x \u2208 (:Inf, :NaN)\n return :(convert($T, $x))\n else\n return x\n end\nend\nfunction changeprecision(T, x::Float64)\n if T === :Float16\n return Float16(x)\n elseif T === :Float32\n return Float32(x)\n elseif T === :Float64\n return x\n else\n return :(parse($T, $(string(x))))\n end\nend\nfunction changeprecision(T, ex::Expr)\n if Meta.isexpr(ex, :call, 3) && ex.args[1] == :^ && ex.args[3] isa Int\n # mimic Julia 0.6\/0.7's lowering to literal_pow\n return Expr(:call, ChangePrecision.literal_pow, T, :^, changeprecision(T, ex.args[2]), Val{ex.args[3]}())\n elseif Meta.isexpr(ex, :call, 2) && ex.args[1] == :include\n return :($include($T, @__MODULE__, $(ex.args[2])))\n elseif Meta.isexpr(ex, :call) && ex.args[1] in changefuncs\n return Expr(:call, Core.eval(ChangePrecision, ex.args[1]), T, changeprecision.(T, ex.args[2:end])...)\n elseif Meta.isexpr(ex, :., 2) && ex.args[1] in changefuncs && Meta.isexpr(ex.args[2], :tuple)\n return Expr(:., Core.eval(ChangePrecision, ex.args[1]), Expr(:tuple, T, changeprecision.(T, ex.args[2].args)...))\n elseif Meta.isexpr(ex, :macrocall) && ex.args[1] == Symbol(\"@__dot__\")\n return changeprecision(T, Base.Broadcast.__dot__(ex.args[end]))\n else\n return Expr(ex.head, changeprecision.(T, ex.args)...)\n end\nend\n\n# calls to include(f) are changed to include(T, mod, f) so that\n# @changeprecision can apply recursively to included files.\nfunction include(T, mod, filename::AbstractString)\n # use the undocumented parse_input_line function so that we preserve\n # the filename and line-number information.\n s = string(\"begin; \", read(filename, String), \"\\nend\\n\")\n expr = Base.parse_input_line(s, filename=filename)\n Core.eval(mod, changeprecision(T, expr))\nend\n\n\"\"\"\n @changeprecision T expression\n\nChange the \"default\" precision in the given `expression` to the floating-point\ntype `T`.\n\nThis changes floating-point literals, integer expressions like `1\/3`,\nrandom-number functions like `rand`, and matrix constructors like `ones`\nto default to the new type `T`.\n\nFor example,\n```\n@changeprecision Float32 begin\n x = 7.3\n y = 1\/3\n z = rand() .+ ones(3,4)\nend\n```\nuses `Float32` precision for all of the expressions in `begin ... end`.\n\"\"\"\nmacro changeprecision(T, expr)\n esc(changeprecision(T, expr))\nend\n\n############################################################################\n\n# integer-like types that get converted to Float64 by various functions\nconst HWInt = Union{Bool,Int8,Int16,Int32,Int64,Int128,UInt8,UInt16,UInt32,UInt64,UInt128}\nconst RatLike = Union{Rational{<:HWInt}, Complex{<:Rational{<:HWInt}}}\nconst IntLike = Union{HWInt, Complex{<:HWInt}}\nconst IntRatLike = Union{IntLike,RatLike}\nconst Promotable = Union{IntLike, RatLike, Irrational}\nconst PromotableNoRat = Union{IntLike, Irrational}\n\n@inline tofloat(T, x) = T(x)\n@inline tofloat(::Type{T}, x::Complex) where {T<:Real} = Complex{T}(x)\n@inline tofloat(T, x::AbstractArray) = copyto!(similar(x, T), x)\n@inline tofloat(::Type{T}, x::AbstractArray{<:Complex}) where {T<:Real} = copyto!(similar(x, Complex{T}), x)\n\n###########################################################################\n# ChangePrecision.f(T, args...) versions of Base.f(args...) functions.\n\n# define our own versions of rand etc. that override the default type,\n# which which still respect a type argument if it is explicitly provided\nfor f in randfuncs\n @eval begin\n $f(T) = Random.$f(T)\n $f(T, dims::Integer...) = Random.$f(T, dims...)\n $f(T, dims::Tuple{<:Integer}) = Random.$f(T, dims)\n $f(T, rng::AbstractRNG, dims::Integer...) = Random.$f(rng, T, dims...)\n $f(T, rng::AbstractRNG, dims::Tuple{<:Integer}) = Random.$f(rng, T, dims)\n $f(T, args...) = Random.$f(args...)\n end\nend\n\n# similarly for array constructors like ones\nfor f in matfuncs\n @eval begin\n $f(T) = Base.$f(T)\n $f(T, dims::Integer...) = Base.$f(T, dims...)\n $f(T, dims::Tuple{<:Integer}) = Base.$f(T, dims)\n $f(T, args...) = Base.$f(args...)\n end\nend\n\n# we want to change expressions like 1\/2 to produce the new floating-point type\nfor f in intfuncs\n @eval begin\n $f(T, n::Promotable) = Base.$f(tofloat(T, n))\n $f(T, m::Promotable, n::Promotable) = Base.$f(tofloat(T, m), tofloat(T, n))\n $f(T, args...) = Base.$f(args...)\n end\nend\n\n# exception to intfuncs above: division on rationals produces an exact rational\ninv(T, x::RatLike) = Base.inv(x)\n\/(T, y::IntLike, x::RatLike) = Base.:\/(y, x)\n\\(T, x::RatLike, y::IntLike) = Base.:\\(x, y)\n\/(T, y::RatLike, x::IntLike) = Base.:\/(y, x)\n\\(T, x::IntLike, y::RatLike) = Base.:\\(x, y)\n\/(T, y::RatLike, x::RatLike) = Base.:\/(y, x)\n\\(T, x::RatLike, y::RatLike) = Base.:\\(x, y)\n\nfor f in complexfuncs\n @eval begin\n $f(T, z::Union{Complex{<:HWInt},Complex{<:Rational{<:HWInt}}}) = Base.$f(tofloat(T, z))\n $f(T, args...) = Base.$f(args...)\n end\nend\n\nfor f in binaryfuncs\n @eval begin\n $f(T, x::Irrational, y::Promotable) = Base.$f(tofloat(T, x), tofloat(T, y))\n $f(T, x::Promotable, y::Irrational) = Base.$f(tofloat(T, x), tofloat(T, y))\n $f(T, x::Irrational, y::Irrational) = Base.$f(tofloat(T, x), tofloat(T, y))\n $f(T, args...) = Base.$f(args...)\n end\nend\n-(T::Type, x::Irrational) = Base.:-(tofloat(T, x))\nfor f in (:+, :*) # these functions can accept 3+ arguments\n # FIXME: these methods may be slow compared to the built-in + or *\n # because they do less inlining?\n @eval begin\n @inline $f(T, x::Promotable, y, z, args...) = $f(T, x, $f(T, y, z, args...))\n @inline $f(T, x::IntRatLike, y::IntRatLike, z::IntRatLike, args::IntRatLike...) = Base.$f(x, y, z, args...)\n end\nend\n\n^(T, x::Union{AbstractMatrix{<:Promotable},Promotable}, y::Union{RatLike,Complex{<:HWInt}}) = Base.:^(tofloat(T, x), y)\n\n# e^x is handled specially\nconst esym = VERSION < v\"0.7.0-DEV.1592\" ? :e : :\u212f # changed in JuliaLang\/julia#23427\n^(T, x::Irrational{esym}, y::Promotable) = Base.exp(tofloat(T, y))\nliteral_pow(T, op, x::Irrational{esym}, ::Val{n}) where {n} = Base.exp(tofloat(T, n))\n\n# literal integer powers are specially handled in Julia\nif VERSION < v\"0.7.0-DEV.843\" # JuliaLang\/julia#22475\n literal_pow(T, op, x::Irrational, ::Val{n}) where {n} = Base.literal_pow(op, tofloat(T, x), Val{n})\n @inline literal_pow(T, op, x, ::Val{n}) where {n} = Base.literal_pow(op, x, Val{n})\nelse\n literal_pow(T, op, x::Irrational, p) = Base.literal_pow(op, tofloat(T, x), p)\n @inline literal_pow(T, op, x, p) = Base.literal_pow(op, x, p)\nend\n\nfor f in (statfuncs...,linalgfuncs...)\n m = f \u2208 statfuncs ? :Statistics : :LinearAlgebra\n # for functions like factorize, if we are converting the matrix to floating-point\n # anyway then we might as well call factorize! instead to overwrite our temp array:\n if f \u2208 (:factorize, :cholesky, :bunchkaufman, :ldlt, :lu, :qr, :lq, :eigen, :svd, :eigvals!, :svdvals!, :median)\n f! = Symbol(f, :!)\n @eval begin\n $f(T, x::AbstractArray{<:Promotable}, args...; kws...) = $m.$f!(tofloat(T, x), args...; kws...)\n $f(T, x::AbstractArray{<:Promotable}, y::AbstractArray{<:Promotable}, args...; kws...) = $m.$f!(tofloat(T, x), tofloat(T, y), args...; kws...)\n end\n else\n @eval begin\n $f(T, x::AbstractArray{<:Promotable}, args...; kws...) = $m.$f(tofloat(T, x), args...; kws...)\n $f(T, x::AbstractArray{<:Promotable}, y::AbstractArray{<:Promotable}, args...; kws...) = $m.$f(tofloat(T, x), tofloat(T, y), args...; kws...)\n end\n end\n @eval begin\n $f(T, x::AbstractArray{<:Promotable}, y::AbstractArray, args...; kws...) = $m.$f(x, y, args...; kws...)\n $f(T, args...; kws...) = $m.$f(args...; kws...)\n end\nend\nfor f in (:varm, :stdm) # look at type of second (scalar) argument\n @eval begin\n $f(T, x::AbstractArray{<:Promotable}, m::Union{AbstractFloat,Complex{<:AbstractFloat}}, args...; kws...) = Statistics.$f(x, m, args...; kws...)\n $f(T, x::AbstractArray{<:PromotableNoRat}, m::PromotableNoRat, args...; kws...) = Statistics.$f(tofloat(T, x), tofloat(T, m), args...; kws...)\n end\nend\ninv(T, x::AbstractArray{<:PromotableNoRat}) = Base.inv(tofloat(T, x))\n\/(T, x::AbstractArray{<:Promotable}, y::Union{PromotableNoRat,AbstractArray{<:PromotableNoRat}}) = Base.:\/(tofloat(T, x), tofloat(T, y))\n\\(T, y::Union{PromotableNoRat,AbstractArray{<:PromotableNoRat}}, x::AbstractArray{<:Promotable}) = Base.:\\(tofloat(T, y), tofloat(T, x))\n\n# more array functions that are exact for rationals: don't convert\nfor f in (:mean, :median, :var, :std, :cor, :cov, :ldlt, :lu)\n m = f \u2208 statfuncs ? :Statistics : :LinearAlgebra\n @eval begin\n $f(T, x::AbstractArray{<:RatLike}, y::AbstractArray{<:Promotable}, args...; kws...) = $m.$f(tofloat(T, x), tofloat(T, y), args...; kws...)\n $f(T, x::AbstractArray{<:RatLike}, y::AbstractArray{<:RatLike}, args...; kws...) = $m.$f(x, y, args...; kws...)\n $f(T, x::AbstractArray{<:RatLike}, args...; kws...) = $m.$f(x, args...; kws...)\n end\nend\n\n############################################################################\n\nend # module\n","avg_line_length":44.6241134752,"max_line_length":154,"alphanum_fraction":0.5895581691} {"size":20830,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"\"\"\"\n```\neqcond(m::Model1010_1spread)\n```\n\nExpresses the equilibrium conditions in canonical form using \u03930, \u03931, C, \u03a8, and \u03a0 matrices.\nUsing the mappings of states\/equations to integers defined in m1010.jl, coefficients are\nspecified in their proper positions.\n\n### Outputs\n\n* `\u03930` (`n_states` x `n_states`) holds coefficients of current time states.\n* `\u03931` (`n_states` x `n_states`) holds coefficients of lagged states.\n* `C` (`n_states` x `1`) is a vector of constants\n* `\u03a8` (`n_states` x `n_shocks_exogenous`) holds coefficients of iid shocks.\n* `\u03a0` (`n_states` x `n_states_expectational`) holds coefficients of expectational states.\n\"\"\"\nfunction eqcond(m::Model1010_1spread)\n endo = m.endogenous_states\n exo = m.exogenous_shocks\n ex = m.expected_shocks\n eq = m.equilibrium_conditions\n\n \u03930 = zeros(n_states(m), n_states(m))\n \u03931 = zeros(n_states(m), n_states(m))\n C = zeros(n_states(m))\n \u03a8 = zeros(n_states(m), n_shocks_exogenous(m))\n \u03a0 = zeros(n_states(m), n_shocks_expectational(m))\n\n ### ENDOGENOUS STATES ###\n\n ### 1. Consumption Euler Equation\n\n # Sticky prices and wages\n \u03930[eq[:eq_euler], endo[:c_t]] = 1.\n \u03930[eq[:eq_euler], endo[:R_t]] = (1 - m[:h]*exp(-m[:z_star]))\/(m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\n \u03930[eq[:eq_euler], endo[:b_t]] = -1.\n \u03930[eq[:eq_euler], endo[:E\u03c0_t]] = -(1 - m[:h]*exp(-m[:z_star]))\/(m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\n \u03930[eq[:eq_euler], endo[:z_t]] = (m[:h]*exp(-m[:z_star]))\/(1 + m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_euler], endo[:Ec_t]] = -1\/(1 + m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_euler], endo[:Ez_t]] = -1\/(1 + m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_euler], endo[:L_t]] = -(m[:\u03c3_c] - 1)*m[:wl_c]\/(m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\n \u03930[eq[:eq_euler], endo[:EL_t]] = (m[:\u03c3_c] - 1)*m[:wl_c]\/(m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\n \u03931[eq[:eq_euler], endo[:c_t]] = (m[:h]*exp(-m[:z_star]))\/(1 + m[:h]*exp(-m[:z_star]))\n\n # Flexible prices and wages\n \u03930[eq[:eq_euler_f], endo[:c_f_t]] = 1.\n \u03930[eq[:eq_euler_f], endo[:r_f_t]] = (1 - m[:h]*exp(-m[:z_star]))\/(m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\n \u03930[eq[:eq_euler_f], endo[:b_t]] = -1.\n \u03930[eq[:eq_euler_f], endo[:z_t]] = (m[:h]*exp(-m[:z_star]))\/(1 + m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_euler_f], endo[:Ec_f_t]] = -1\/(1 + m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_euler_f], endo[:Ez_t]] = -1\/(1 + m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_euler_f], endo[:L_f_t]] = -(m[:\u03c3_c] - 1)*m[:wl_c]\/(m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\n \u03930[eq[:eq_euler_f], endo[:EL_f_t]] = (m[:\u03c3_c] - 1)*m[:wl_c]\/(m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\n \u03931[eq[:eq_euler_f], endo[:c_f_t]] = (m[:h]*exp(-m[:z_star]))\/(1 + m[:h]*exp(-m[:z_star]))\n\n ### 2. Investment Euler Equation\n\n # Sticky prices and wages\n \u03930[eq[:eq_inv], endo[:qk_t]] = -1\/(m[:S\u2032\u2032]*exp(2.0*m[:z_star])*(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])))\n \u03930[eq[:eq_inv], endo[:i_t]] = 1.\n \u03930[eq[:eq_inv], endo[:z_t]] = 1\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03931[eq[:eq_inv], endo[:i_t]] = 1\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_inv], endo[:Ei_t]] = -m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_inv], endo[:Ez_t]] = -m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_inv], endo[:\u03bc_t]] = -1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_inv_f], endo[:qk_f_t]] = -1\/(m[:S\u2032\u2032]*exp(2*m[:z_star])*(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])))\n \u03930[eq[:eq_inv_f], endo[:i_f_t]] = 1.\n \u03930[eq[:eq_inv_f], endo[:z_t]] = 1\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03931[eq[:eq_inv_f], endo[:i_f_t]] = 1\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_inv_f], endo[:Ei_f_t]] = -m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_inv_f], endo[:Ez_t]] = -m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_inv_f], endo[:\u03bc_t]] = -1.\n\n ### 3. Financial Friction Block\n\n # Return to capital\n # Sticky prices and wages\n \u03930[eq[:eq_capval], endo[:Rktil_t]] = 1.\n \u03930[eq[:eq_capval], endo[:\u03c0_t]] = -1.\n \u03930[eq[:eq_capval], endo[:rk_t]] = -m[:r_k_star]\/(1 + m[:r_k_star] - m[:\u03b4])\n \u03930[eq[:eq_capval], endo[:qk_t]] = -(1 - m[:\u03b4])\/(1 + m[:r_k_star] - m[:\u03b4])\n \u03931[eq[:eq_capval], endo[:qk_t]] = -1.\n\n # Spreads\n # Sticky prices and wages\n \u03930[eq[:eq_spread], endo[:ERtil_k_t]] = 1.\n \u03930[eq[:eq_spread], endo[:R_t]] = -1.\n \u03930[eq[:eq_spread], endo[:b_t]] = (m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\/(1 - m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_spread], endo[:qk_t]] = -m[:\u03b6_spb]\n \u03930[eq[:eq_spread], endo[:kbar_t]] = -m[:\u03b6_spb]\n \u03930[eq[:eq_spread], endo[:n_t]] = m[:\u03b6_spb]\n \u03930[eq[:eq_spread], endo[:\u03c3_\u03c9_t]] = -1.\n \u03930[eq[:eq_spread], endo[:\u03bc_e_t]] = -1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_spread_f], endo[:ERktil_f_t]] = 1.\n \u03930[eq[:eq_spread_f], endo[:r_f_t]] = -1.\n \u03930[eq[:eq_spread_f], endo[:b_t]] = (m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\/(1 - m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_spread_f], endo[:qk_f_t]] = -m[:\u03b6_spb]\n \u03930[eq[:eq_spread_f], endo[:kbar_f_t]] = -m[:\u03b6_spb]\n \u03930[eq[:eq_spread_f], endo[:n_f_t]] = m[:\u03b6_spb]\n \u03930[eq[:eq_spread_f], endo[:\u03c3_\u03c9_t]] = -1.\n \u03930[eq[:eq_spread_f], endo[:\u03bc_e_t]] = -1.\n\n # n evol\n # Sticky prices and wages\n \u03930[eq[:eq_nevol], endo[:n_t]] = 1.\n \u03930[eq[:eq_nevol], endo[:\u03b3_t]] = -1.\n \u03930[eq[:eq_nevol], endo[:z_t]] = m[:\u03b3_star]*m[:vstar]\/m[:nstar]\n \u03930[eq[:eq_nevol], endo[:Rktil_t]] = -m[:\u03b6_nRk]\n \u03930[eq[:eq_nevol], endo[:\u03c0_t]] = (m[:\u03b6_nRk] - m[:\u03b6_nR])\n \u03931[eq[:eq_nevol], endo[:\u03c3_\u03c9_t]] = -m[:\u03b6_n\u03c3_\u03c9]\/m[:\u03b6_sp\u03c3_\u03c9]\n \u03931[eq[:eq_nevol], endo[:\u03bc_e_t]] = -m[:\u03b6_n\u03bc_e]\/m[:\u03b6_sp\u03bc_e]\n \u03931[eq[:eq_nevol], endo[:qk_t]] = m[:\u03b6_nqk]\n \u03931[eq[:eq_nevol], endo[:kbar_t]] = m[:\u03b6_nqk]\n \u03931[eq[:eq_nevol], endo[:n_t]] = m[:\u03b6_nn]\n \u03931[eq[:eq_nevol], endo[:R_t]] = -m[:\u03b6_nR]\n \u03931[eq[:eq_nevol], endo[:b_t]] = m[:\u03b6_nR]*((m[:\u03c3_c]*(1.0+m[:h]*exp(-m[:z_star])))\/(1.0-m[:h]*exp(-m[:z_star])))\n\n # Flexible prices and wages\n \u03930[eq[:eq_nevol_f], endo[:n_f_t]] = 1.\n \u03930[eq[:eq_nevol_f], endo[:z_t]] = m[:\u03b3_star]*m[:vstar]\/m[:nstar]\n \u03930[eq[:eq_nevol_f], endo[:rktil_f_t]] = -m[:\u03b6_nRk]\n \u03931[eq[:eq_nevol_f], endo[:\u03c3_\u03c9_t]] = -m[:\u03b6_n\u03c3_\u03c9]\/m[:\u03b6_sp\u03c3_\u03c9]\n \u03931[eq[:eq_nevol_f], endo[:\u03bc_e_t]] = -m[:\u03b6_n\u03bc_e]\/m[:\u03b6_sp\u03bc_e]\n \u03931[eq[:eq_nevol_f], endo[:qk_f_t]] = m[:\u03b6_nqk]\n \u03931[eq[:eq_nevol_f], endo[:kbar_f_t]] = m[:\u03b6_nqk]\n \u03931[eq[:eq_nevol_f], endo[:n_f_t]] = m[:\u03b6_nn]\n \u03931[eq[:eq_nevol_f], endo[:r_f_t]] = -m[:\u03b6_nR]\n \u03931[eq[:eq_nevol_f], endo[:b_t]] = m[:\u03b6_nR]*((m[:\u03c3_c]*(1.0+m[:h]*exp(-m[:z_star])))\/(1.0-m[:h]*exp(-m[:z_star])))\n\n # Flexible prices and wages - ASSUME NO FINANCIAL FRICTIONS\n \u03930[eq[:eq_capval_f], endo[:rktil_f_t]] = 1.\n \u03930[eq[:eq_capval_f], endo[:rk_f_t]] = -m[:r_k_star]\/(m[:r_k_star]+1-m[:\u03b4])\n \u03930[eq[:eq_capval_f], endo[:qk_f_t]] = -(1-m[:\u03b4])\/(m[:r_k_star]+1-m[:\u03b4])\n \u03931[eq[:eq_capval_f], endo[:qk_f_t]] = -1.\n\n ### 4. Aggregate Production Function\n\n # Sticky prices and wages\n \u03930[eq[:eq_output], endo[:y_t]] = 1.\n \u03930[eq[:eq_output], endo[:k_t]] = -m[:\u03a6]*m[:\u03b1]\n \u03930[eq[:eq_output], endo[:L_t]] = -m[:\u03a6]*(1 - m[:\u03b1])\n\n # Flexible prices and wages\n \u03930[eq[:eq_output_f], endo[:y_f_t]] = 1.\n \u03930[eq[:eq_output_f], endo[:k_f_t]] = -m[:\u03a6]*m[:\u03b1]\n \u03930[eq[:eq_output_f], endo[:L_f_t]] = -m[:\u03a6]*(1 - m[:\u03b1])\n\n ### 5. Capital Utilization\n\n # Sticky prices and wages\n \u03930[eq[:eq_caputl], endo[:k_t]] = 1.\n \u03931[eq[:eq_caputl], endo[:kbar_t]] = 1.\n \u03930[eq[:eq_caputl], endo[:z_t]] = 1.\n \u03930[eq[:eq_caputl], endo[:u_t]] = -1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_caputl_f], endo[:k_f_t]] = 1.\n \u03931[eq[:eq_caputl_f], endo[:kbar_f_t]] = 1.\n \u03930[eq[:eq_caputl_f], endo[:z_t]] = 1.\n \u03930[eq[:eq_caputl_f], endo[:u_f_t]] = -1.\n\n ### 6. Rental Rate of Capital\n\n # Sticky prices and wages\n \u03930[eq[:eq_capsrv], endo[:u_t]] = 1.\n \u03930[eq[:eq_capsrv], endo[:rk_t]] = -(1 - m[:ppsi])\/m[:ppsi]\n\n # Flexible prices and wages\n \u03930[eq[:eq_capsrv_f], endo[:u_f_t]] = 1.\n \u03930[eq[:eq_capsrv_f], endo[:rk_f_t]] = -(1 - m[:ppsi])\/m[:ppsi]\n\n ### 7. Evolution of Capital\n\n # Sticky prices and wages\n \u03930[eq[:eq_capev], endo[:kbar_t]] = 1.\n \u03931[eq[:eq_capev], endo[:kbar_t]] = 1 - m[:istar]\/m[:kbarstar]\n \u03930[eq[:eq_capev], endo[:z_t]] = 1 - m[:istar]\/m[:kbarstar]\n \u03930[eq[:eq_capev], endo[:i_t]] = -m[:istar]\/m[:kbarstar]\n \u03930[eq[:eq_capev], endo[:\u03bc_t]] = -m[:istar]*m[:S\u2032\u2032]*exp(2*m[:z_star])*(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\/m[:kbarstar]\n\n # Flexible prices and wages\n \u03930[eq[:eq_capev_f], endo[:kbar_f_t]] = 1.\n \u03931[eq[:eq_capev_f], endo[:kbar_f_t]] = 1 - m[:istar]\/m[:kbarstar]\n \u03930[eq[:eq_capev_f], endo[:z_t]] = 1 - m[:istar]\/m[:kbarstar]\n \u03930[eq[:eq_capev_f], endo[:i_f_t]] = -m[:istar]\/m[:kbarstar]\n \u03930[eq[:eq_capev_f], endo[:\u03bc_t]] = -m[:istar]*m[:S\u2032\u2032]*exp(2*m[:z_star])*(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\/m[:kbarstar]\n\n ### 8. Price Markup\n\n # Sticky prices and wages\n \u03930[eq[:eq_mkupp], endo[:mc_t]] = 1.\n \u03930[eq[:eq_mkupp], endo[:w_t]] = -1.\n \u03930[eq[:eq_mkupp], endo[:L_t]] = -m[:\u03b1]\n \u03930[eq[:eq_mkupp], endo[:k_t]] = m[:\u03b1]\n\n # Flexible prices and wages\n \u03930[eq[:eq_mkupp_f], endo[:w_f_t]] = 1.\n \u03930[eq[:eq_mkupp_f], endo[:L_f_t]] = m[:\u03b1]\n \u03930[eq[:eq_mkupp_f], endo[:k_f_t]] = -m[:\u03b1]\n\n ### 9. Phillips Curve\n\n # Sticky prices and wages\n \u03930[eq[:eq_phlps], endo[:\u03c0_t]] = 1.\n \u03930[eq[:eq_phlps], endo[:mc_t]] = -((1 - m[:\u03b6_p]*m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))*\n (1 - m[:\u03b6_p]))\/(m[:\u03b6_p]*((m[:\u03a6]- 1)*m[:\u03f5_p] + 1))\/(1 + m[:\u03b9_p]*m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03931[eq[:eq_phlps], endo[:\u03c0_t]] = m[:\u03b9_p]\/(1 + m[:\u03b9_p]*m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_phlps], endo[:E\u03c0_t]] = -m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])\/(1 + m[:\u03b9_p]*m[:\u03b2]*\n exp((1 - m[:\u03c3_c])*m[:z_star]))\n\n # Comment out for counterfactual with no price mark up shock\n \u03930[eq[:eq_phlps], endo[:\u03bb_f_t]] = -(1 + m[:\u03b9_p]*m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\/\n (1 + m[:\u03b9_p]*m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n\n # Flexible prices and wages not necessary\n\n ### 10. Rental Rate of Capital\n\n # Sticky prices and wages\n \u03930[eq[:eq_caprnt], endo[:rk_t]] = 1.\n \u03930[eq[:eq_caprnt], endo[:k_t]] = 1.\n \u03930[eq[:eq_caprnt], endo[:L_t]] = -1.\n \u03930[eq[:eq_caprnt], endo[:w_t]] = -1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_caprnt_f], endo[:rk_f_t]] = 1.\n \u03930[eq[:eq_caprnt_f], endo[:k_f_t]] = 1.\n \u03930[eq[:eq_caprnt_f], endo[:L_f_t]] = -1.\n \u03930[eq[:eq_caprnt_f], endo[:w_f_t]] = -1.\n\n ### 11. Marginal Substitution\n\n # Sticky prices and wages\n \u03930[eq[:eq_msub], endo[:\u03bc_\u03c9_t]] = 1.\n \u03930[eq[:eq_msub], endo[:L_t]] = m[:\u03bd_l]\n \u03930[eq[:eq_msub], endo[:c_t]] = 1\/(1 - m[:h]*exp(-m[:z_star]))\n \u03931[eq[:eq_msub], endo[:c_t]] = m[:h]*exp(-m[:z_star])\/(1 - m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_msub], endo[:z_t]] = m[:h]*exp(-m[:z_star]) \/(1 - m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_msub], endo[:w_t]] = -1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_msub_f], endo[:w_f_t]] = -1.\n \u03930[eq[:eq_msub_f], endo[:L_f_t]] = m[:\u03bd_l]\n \u03930[eq[:eq_msub_f], endo[:c_f_t]] = 1\/(1 - m[:h]*exp(-m[:z_star]))\n \u03931[eq[:eq_msub_f], endo[:c_f_t]] = m[:h]*exp(-m[:z_star])\/(1 - m[:h]*exp(-m[:z_star]))\n \u03930[eq[:eq_msub_f], endo[:z_t]] = m[:h]*exp(-m[:z_star])\/(1 - m[:h]*exp(-m[:z_star]))\n\n ### 12. Evolution of Wages\n\n # Sticky prices and wages\n \u03930[eq[:eq_wage], endo[:w_t]] = 1\n \u03930[eq[:eq_wage], endo[:\u03bc_\u03c9_t]] = (1 - m[:\u03b6_w]*m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))*\n (1 - m[:\u03b6_w])\/(m[:\u03b6_w]*((m[:\u03bb_w] - 1)*m[:\u03f5_w] + 1))\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_wage], endo[:\u03c0_t]] = (1 + m[:\u03b9_w]*m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03931[eq[:eq_wage], endo[:w_t]] = 1\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_wage], endo[:z_t]] = 1\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03931[eq[:eq_wage], endo[:\u03c0_t]] = m[:\u03b9_w]\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_wage], endo[:Ew_t]] = -m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_wage], endo[:Ez_t]] = -m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_wage], endo[:E\u03c0_t]] = -m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star])\/(1 + m[:\u03b2]*exp((1 - m[:\u03c3_c])*m[:z_star]))\n \u03930[eq[:eq_wage], endo[:\u03bb_w_t]] = -1.\n\n # Flexible prices and wages not necessary\n\n ### 13. Monetary Policy Rule\n\n # Sticky prices and wages\n \u03930[eq[:eq_mp], endo[:R_t]] = 1.\n \u03931[eq[:eq_mp], endo[:R_t]] = m[:\u03c1]\n \u03930[eq[:eq_mp], endo[:\u03c0_t]] = -(1 - m[:\u03c1])*m[:\u03c81]\n \u03930[eq[:eq_mp], endo[:\u03c0_star_t]] = (1 - m[:\u03c1])*m[:\u03c81]\n \u03930[eq[:eq_mp], endo[:y_t]] = -(1 - m[:\u03c1])*m[:\u03c82] - m[:\u03c83]\n \u03930[eq[:eq_mp], endo[:y_f_t]] = (1 - m[:\u03c1])*m[:\u03c82] + m[:\u03c83]\n \u03931[eq[:eq_mp], endo[:y_t]] = -m[:\u03c83]\n \u03931[eq[:eq_mp], endo[:y_f_t]] = m[:\u03c83]\n \u03930[eq[:eq_mp], endo[:rm_t]] = -1.\n\n # Flexible prices and wages not necessary\n\n ### 14. Resource Constraint\n\n # Sticky prices and wages\n \u03930[eq[:eq_res], endo[:y_t]] = 1.\n \u03930[eq[:eq_res], endo[:g_t]] = -m[:g_star]\n \u03930[eq[:eq_res], endo[:c_t]] = -m[:cstar]\/m[:ystar]\n \u03930[eq[:eq_res], endo[:i_t]] = -m[:istar]\/m[:ystar]\n \u03930[eq[:eq_res], endo[:u_t]] = -m[:r_k_star]*m[:kstar]\/m[:ystar]\n\n # Flexible prices and wages\n \u03930[eq[:eq_res_f], endo[:y_f_t]] = 1.\n \u03930[eq[:eq_res_f], endo[:g_t]] = -m[:g_star]\n \u03930[eq[:eq_res_f], endo[:c_f_t]] = -m[:cstar]\/m[:ystar]\n \u03930[eq[:eq_res_f], endo[:i_f_t]] = -m[:istar]\/m[:ystar]\n \u03930[eq[:eq_res_f], endo[:u_f_t]] = -m[:r_k_star]*m[:kstar]\/m[:ystar]\n\n ### 15. Extra States\n # These aren't strictly necessary, but they track lags or simplify the equations\n\n # \u03c0_t1\n \u03930[eq[:eq_\u03c01], endo[:\u03c0_t1]] = 1.\n \u03931[eq[:eq_\u03c01], endo[:\u03c0_t]] = 1.\n\n # \u03c0_t2\n \u03930[eq[:eq_\u03c02], endo[:\u03c0_t2]] = 1.\n \u03931[eq[:eq_\u03c02], endo[:\u03c0_t1]] = 1.\n\n # \u03c0_a\n \u03930[eq[:eq_\u03c0_a], endo[:\u03c0_a_t]] = 1.\n \u03930[eq[:eq_\u03c0_a], endo[:\u03c0_t]] = -1.\n \u03930[eq[:eq_\u03c0_a], endo[:\u03c0_t1]] = -1.\n \u03930[eq[:eq_\u03c0_a], endo[:\u03c0_t2]] = -1.\n \u03931[eq[:eq_\u03c0_a], endo[:\u03c0_t2]] = 1.\n\n # Rt1\n \u03930[eq[:eq_Rt1], endo[:R_t1]] = 1.\n \u03931[eq[:eq_Rt1], endo[:R_t]] = 1.\n\n # Ez_t\n \u03930[eq[:eq_Ez], endo[:Ez_t]] = 1.\n \u03930[eq[:eq_Ez], endo[:ztil_t]] = -(m[:\u03c1_z]-1)\/(1-m[:\u03b1])\n \u03930[eq[:eq_Ez], endo[:zp_t]] = -m[:\u03c1_z_p]\n\n # Nominal Deposit rate\n \u03930[eq[:eq_dep], endo[:Rd_t]] = 1.\n \u03930[eq[:eq_dep], endo[:R_t]] = -1.\n \u03930[eq[:eq_dep], endo[:b_t]] = (m[:\u03c3_c]*(1 + m[:h]*exp(-m[:z_star])))\/(1 - m[:h]*exp(-m[:z_star]))\n\n\n ### EXOGENOUS SHOCKS ###\n\n # Neutral technology\n \u03930[eq[:eq_z], endo[:z_t]] = 1.\n \u03931[eq[:eq_z], endo[:ztil_t]] = (m[:\u03c1_z] - 1)\/(1 - m[:\u03b1])\n \u03930[eq[:eq_z], endo[:zp_t]] = -1.\n \u03a8[eq[:eq_z], exo[:z_sh]] = 1\/(1 - m[:\u03b1])\n\n \u03930[eq[:eq_ztil], endo[:ztil_t]] = 1.\n \u03931[eq[:eq_ztil], endo[:ztil_t]] = m[:\u03c1_z]\n \u03a8[eq[:eq_ztil], exo[:z_sh]] = 1.\n\n # Long-run changes to productivity\n \u03930[eq[:eq_zp], endo[:zp_t]] = 1.\n \u03931[eq[:eq_zp], endo[:zp_t]] = m[:\u03c1_z_p]\n \u03a8[eq[:eq_zp], exo[:zp_sh]] = 1.\n\n # Government spending\n \u03930[eq[:eq_g], endo[:g_t]] = 1.\n \u03931[eq[:eq_g], endo[:g_t]] = m[:\u03c1_g]\n \u03a8[eq[:eq_g], exo[:g_sh]] = 1.\n \u03a8[eq[:eq_g], exo[:z_sh]] = m[:\u03b7_gz]\n\n # Asset shock\n \u03930[eq[:eq_b_til], endo[:b_til_t]] = 1. # transient component\n \u03931[eq[:eq_b_til], endo[:b_til_t]] = m[:\u03c1_b_til]\n \u03a8[eq[:eq_b_til], exo[:b_til_sh]] = 1.\n\n \u03930[eq[:eq_b_p], endo[:b_p_t]] = 1. # permanent component\n \u03931[eq[:eq_b_p], endo[:b_p_t]] = m[:\u03c1_b_p]\n \u03a8[eq[:eq_b_p], exo[:b_p_sh]] = 1.\n\n \u03930[eq[:eq_b], endo[:b_t]] = 1.\n \u03930[eq[:eq_b], endo[:b_til_t]] = -1.\n \u03930[eq[:eq_b], endo[:b_p_t]] = -1.\n\n # Investment-specific technology\n \u03930[eq[:eq_\u03bc], endo[:\u03bc_t]] = 1.\n \u03931[eq[:eq_\u03bc], endo[:\u03bc_t]] = m[:\u03c1_\u03bc]\n \u03a8[eq[:eq_\u03bc], exo[:\u03bc_sh]] = 1.\n\n # Price mark-up shock\n \u03930[eq[:eq_\u03bb_f], endo[:\u03bb_f_t]] = 1.\n \u03931[eq[:eq_\u03bb_f], endo[:\u03bb_f_t]] = m[:\u03c1_\u03bb_f]\n \u03931[eq[:eq_\u03bb_f], endo[:\u03bb_f_t1]] = -m[:\u03b7_\u03bb_f]\n \u03a8[eq[:eq_\u03bb_f], exo[:\u03bb_f_sh]] = 1.\n\n \u03930[eq[:eq_\u03bb_f1], endo[:\u03bb_f_t1]] = 1.\n \u03a8[eq[:eq_\u03bb_f1], exo[:\u03bb_f_sh]] = 1.\n\n # Wage mark-up shock\n \u03930[eq[:eq_\u03bb_w], endo[:\u03bb_w_t]] = 1.\n \u03931[eq[:eq_\u03bb_w], endo[:\u03bb_w_t]] = m[:\u03c1_\u03bb_w]\n \u03931[eq[:eq_\u03bb_w], endo[:\u03bb_w_t1]] = -m[:\u03b7_\u03bb_w]\n \u03a8[eq[:eq_\u03bb_w], exo[:\u03bb_w_sh]] = 1.\n\n \u03930[eq[:eq_\u03bb_w1], endo[:\u03bb_w_t1]] = 1.\n \u03a8[eq[:eq_\u03bb_w1], exo[:\u03bb_w_sh]] = 1.\n\n # Monetary policy shock\n \u03930[eq[:eq_rm], endo[:rm_t]] = 1.\n \u03931[eq[:eq_rm], endo[:rm_t]] = m[:\u03c1_rm]\n \u03a8[eq[:eq_rm], exo[:rm_sh]] = 1.\n\n ### Financial frictions\n\n # Standard deviation of capital shock to entrepreneurs\n \u03930[eq[:eq_\u03c3_\u03c9], endo[:\u03c3_\u03c9_t]] = 1.\n \u03931[eq[:eq_\u03c3_\u03c9], endo[:\u03c3_\u03c9_t]] = m[:\u03c1_\u03c3_w]\n \u03a8[eq[:eq_\u03c3_\u03c9], exo[:\u03c3_\u03c9_sh]] = 1.\n\n # Exogenous bankruptcy costs\n \u03930[eq[:eq_\u03bc_e], endo[:\u03bc_e_t]] = 1.\n \u03931[eq[:eq_\u03bc_e], endo[:\u03bc_e_t]] = m[:\u03c1_\u03bc_e]\n \u03a8[eq[:eq_\u03bc_e], exo[:\u03bc_e_sh]] = 1.\n\n # Fraction of entrepreneurs surviving period t\n \u03930[eq[:eq_\u03b3], endo[:\u03b3_t]] = 1.\n \u03931[eq[:eq_\u03b3], endo[:\u03b3_t]] = m[:\u03c1_\u03b3]\n \u03a8[eq[:eq_\u03b3], exo[:\u03b3_sh]] = 1.\n\n # Long-term inflation expectations\n \u03930[eq[:eq_\u03c0_star], endo[:\u03c0_star_t]] = 1.\n \u03931[eq[:eq_\u03c0_star], endo[:\u03c0_star_t]] = m[:\u03c1_\u03c0_star]\n \u03a8[eq[:eq_\u03c0_star], exo[:\u03c0_star_sh]] = 1.\n\n # Anticipated policy shocks\n if n_anticipated_shocks(m) > 0\n\n # This section adds the anticipated shocks. There is one state for all the\n # anticipated shocks that will hit in a given period (i.e. rm_tl2 holds those that\n # will hit in two periods), and the equations are set up so that rm_tl2 last period\n # will feed into rm_tl1 this period (and so on for other numbers), and last period's\n # rm_tl1 will feed into the rm_t process (and affect the Taylor Rule this period).\n\n \u03931[eq[:eq_rm], endo[:rm_tl1]] = 1.\n \u03930[eq[:eq_rml1], endo[:rm_tl1]] = 1.\n \u03a8[eq[:eq_rml1], exo[:rm_shl1]] = 1.\n\n if n_anticipated_shocks(m) > 1\n for i = 2:n_anticipated_shocks(m)\n \u03931[eq[Symbol(\"eq_rml$(i-1)\")], endo[Symbol(\"rm_tl$i\")]] = 1.\n \u03930[eq[Symbol(\"eq_rml$i\")], endo[Symbol(\"rm_tl$i\")]] = 1.\n \u03a8[eq[Symbol(\"eq_rml$i\")], exo[Symbol(\"rm_shl$i\")]] = 1.\n end\n end\n end\n\n ### EXPECTATION ERRORS ###\n\n ### E(c)\n\n # Sticky prices and wages\n \u03930[eq[:eq_Ec], endo[:c_t]] = 1.\n \u03931[eq[:eq_Ec], endo[:Ec_t]] = 1.\n \u03a0[eq[:eq_Ec], ex[:Ec_sh]] = 1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_Ec_f], endo[:c_f_t]] = 1.\n \u03931[eq[:eq_Ec_f], endo[:Ec_f_t]] = 1.\n \u03a0[eq[:eq_Ec_f], ex[:Ec_f_sh]] = 1.\n\n ### E(q)\n\n # Sticky prices and wages\n \u03930[eq[:eq_Eqk], endo[:qk_t]] = 1.\n \u03931[eq[:eq_Eqk], endo[:Eqk_t]] = 1.\n \u03a0[eq[:eq_Eqk], ex[:Eqk_sh]] = 1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_Eqk_f], endo[:qk_f_t]] = 1.\n \u03931[eq[:eq_Eqk_f], endo[:Eqk_f_t]] = 1.\n \u03a0[eq[:eq_Eqk_f], ex[:Eqk_f_sh]] = 1.\n\n ### E(i)\n\n # Sticky prices and wages\n \u03930[eq[:eq_Ei], endo[:i_t]] = 1.\n \u03931[eq[:eq_Ei], endo[:Ei_t]] = 1.\n \u03a0[eq[:eq_Ei], ex[:Ei_sh]] = 1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_Ei_f], endo[:i_f_t]] = 1.\n \u03931[eq[:eq_Ei_f], endo[:Ei_f_t]] = 1.\n \u03a0[eq[:eq_Ei_f], ex[:Ei_f_sh]] = 1.\n\n ### E(\u03c0)\n\n # Sticky prices and wages\n \u03930[eq[:eq_E\u03c0], endo[:\u03c0_t]] = 1.\n \u03931[eq[:eq_E\u03c0], endo[:E\u03c0_t]] = 1.\n \u03a0[eq[:eq_E\u03c0], ex[:E\u03c0_sh]] = 1.\n\n ### E(l)\n\n # Sticky prices and wages\n \u03930[eq[:eq_EL], endo[:L_t]] = 1.\n \u03931[eq[:eq_EL], endo[:EL_t]] = 1.\n \u03a0[eq[:eq_EL], ex[:EL_sh]] = 1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_EL_f], endo[:L_f_t]] = 1.\n \u03931[eq[:eq_EL_f], endo[:EL_f_t]] = 1.\n \u03a0[eq[:eq_EL_f], ex[:EL_f_sh]] = 1.\n\n ### E(rk)\n\n # Sticky prices and wages\n \u03930[eq[:eq_Erk], endo[:rk_t]] = 1.\n \u03931[eq[:eq_Erk], endo[:Erk_t]] = 1.\n \u03a0[eq[:eq_Erk], ex[:Erk_sh]] = 1.\n\n # Flexible prices and wages\n \u03930[eq[:eq_Erktil_f], endo[:rktil_f_t]] = 1.\n \u03931[eq[:eq_Erktil_f], endo[:ERktil_f_t]] = 1.\n \u03a0[eq[:eq_Erktil_f], ex[:Erktil_f_sh]] = 1.\n\n ### E(w)\n\n # Sticky prices and wages\n \u03930[eq[:eq_Ew], endo[:w_t]] = 1.\n \u03931[eq[:eq_Ew], endo[:Ew_t]] = 1.\n \u03a0[eq[:eq_Ew], ex[:Ew_sh]] = 1.\n\n ### E(Rktil)\n\n # Sticky prices and wages\n \u03930[eq[:eq_ERktil], endo[:Rktil_t]] = 1.\n \u03931[eq[:eq_ERktil], endo[:ERtil_k_t]] = 1.\n \u03a0[eq[:eq_ERktil], ex[:ERktil_sh]] = 1.\n\n return \u03930, \u03931, C, \u03a8, \u03a0\nend\n","avg_line_length":39.5256166983,"max_line_length":134,"alphanum_fraction":0.5289486318} {"size":188,"ext":"jl","lang":"Julia","max_stars_count":5.0,"content":"const GITHUB_ORGANIZATION = \"UnofficialJuliaMirror\"\nconst GITHUB_BOT_USERNAME = \"UnofficialJuliaMirrorBot\"\nconst GITHUB_BOT_PERSONAL_ACCESS_TOKEN = ENV[\"GITHUB_BOT_PERSONAL_ACCESS_TOKEN\"]\n","avg_line_length":47.0,"max_line_length":80,"alphanum_fraction":0.8776595745} {"size":2600,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using BlockArrays\n\nstruct PartiallyImplementedBlockVector <: AbstractBlockArray{Float64,1} end\n\n@testset \"partially implemented block array\" begin\n # the error thrown before was incorrect\n A = PartiallyImplementedBlockVector()\n try\n getblock(A, 1)\n catch err\n @test err isa ErrorException && err.msg == \"getblock for PartiallyImplementedBlockVector is not implemented\"\n end\n try\n getblock!(zeros(5), A, Block(1))\n catch err\n @test err isa ErrorException && err.msg == \"getblock! for PartiallyImplementedBlockVector is not implemented\"\n end\n try\n BlockArrays.setblock!(A, zeros(5), Block(1))\n catch err\n @test err isa ErrorException && err.msg == \"setblock! for PartiallyImplementedBlockVector is not implemented\"\n end\n try\n BlockArrays.blocksize(A, 2)\n catch err\n @test err isa ErrorException && err.msg == \"blocksizes for PartiallyImplementedBlockVector is not implemented\"\n end\nend\n\n@testset \"Array block interface\" begin\n @test blocksizes(1) == BlockArrays.BlockSizes{0}()\n @test 1[Block()] == 1\n\n A = randn(5)\n @test blocksizes(A) == BlockArrays.BlockSizes([5])\n @test A[Block(1)] == A\n view(A,Block(1))[1] = 2\n @test A[1] == 2\n @test_throws BoundsError A[Block(2)]\n\n A = randn(5,5)\n @test A[Block(1,1)] == A\nend\n\n@testset \"Triangular\/Symmetric\/Hermitian block arrays\" begin\n A = PseudoBlockArray{ComplexF64}(undef, (1:4), (1:4))\n A .= randn.() .+ randn.().*im\n\n @test UpperTriangular(A)[Block(2,2)] == UpperTriangular(A[2:3,2:3])\n @test UpperTriangular(A)[Block(2,3)] == A[2:3,4:6]\n @test UpperTriangular(A)[Block(3,2)] == zeros(3,2)\n @test Symmetric(A)[Block(2,2)] == Symmetric(A[2:3,2:3])\n @test Symmetric(A)[Block(2,3)] == A[2:3,4:6]\n @test Symmetric(A)[Block(3,2)] == transpose(A[2:3,4:6])\n @test Hermitian(A)[Block(2,2)] == Hermitian(A[2:3,2:3])\n @test Hermitian(A)[Block(2,3)] == A[2:3,4:6]\n @test Hermitian(A)[Block(3,2)] == A[2:3,4:6]'\nend\n\n@testset \"Adjoint\/Transpose block arrays\" begin\n A = PseudoBlockArray{ComplexF64}(undef, (1:4), (2:5))\n A .= randn.() .+ randn.().*im\n\n @test blocksizes(A') == BlockArrays.BlockSizes(2:5, 1:4)\n @test blocksizes(Transpose(A)) == BlockArrays.BlockSizes(2:5, 1:4)\n\n @test A'[Block(2,2)] == A[Block(2,2)]' == A[2:3,3:5]'\n @test transpose(A)[Block(2,2)] == transpose(A[2:3,3:5])\n @test A'[Block(2,3)] == A[Block(3,2)]'\n @test transpose(A)[Block(2,3)] == transpose(A[Block(3,2)])\n\n @test BlockArray(A') == A'\n @test BlockArray(transpose(A)) == transpose(A)\nend\n","avg_line_length":34.6666666667,"max_line_length":118,"alphanum_fraction":0.6311538462} {"size":27368,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"# This file is a part of Julia. License is MIT: https:\/\/julialang.org\/license\n\n## integer arithmetic ##\n\n# The tuples and types that do not include 128 bit sizes are necessary to handle\n# certain issues on 32-bit machines, and also to simplify promotion rules, as\n# they are also used elsewhere where Int128\/UInt128 support is separated out,\n# such as in hashing2.jl\n\nconst BitSigned32_types = (Int8, Int16, Int32)\nconst BitUnsigned32_types = (UInt8, UInt16, UInt32)\nconst BitInteger32_types = (BitSigned32_types..., BitUnsigned32_types...)\n\nconst BitSigned64_types = (BitSigned32_types..., Int64)\nconst BitUnsigned64_types = (BitUnsigned32_types..., UInt64)\nconst BitInteger64_types = (BitSigned64_types..., BitUnsigned64_types...)\n\nconst BitSigned_types = (BitSigned64_types..., Int128)\nconst BitUnsigned_types = (BitUnsigned64_types..., UInt128)\nconst BitInteger_types = (BitSigned_types..., BitUnsigned_types...)\n\nconst BitSignedSmall_types = Int === Int64 ? ( Int8, Int16, Int32) : ( Int8, Int16)\nconst BitUnsignedSmall_types = Int === Int64 ? (UInt8, UInt16, UInt32) : (UInt8, UInt16)\nconst BitIntegerSmall_types = (BitSignedSmall_types..., BitUnsignedSmall_types...)\n\nconst BitSigned32 = Union{BitSigned32_types...}\nconst BitUnsigned32 = Union{BitUnsigned32_types...}\nconst BitInteger32 = Union{BitInteger32_types...}\n\nconst BitSigned64 = Union{BitSigned64_types...}\nconst BitUnsigned64 = Union{BitUnsigned64_types...}\nconst BitInteger64 = Union{BitInteger64_types...}\n\nconst BitSigned = Union{BitSigned_types...}\nconst BitUnsigned = Union{BitUnsigned_types...}\nconst BitInteger = Union{BitInteger_types...}\n\nconst BitSignedSmall = Union{BitSignedSmall_types...}\nconst BitUnsignedSmall = Union{BitUnsignedSmall_types...}\nconst BitIntegerSmall = Union{BitIntegerSmall_types...}\n\nconst BitSigned64T = Union{Type{Int8}, Type{Int16}, Type{Int32}, Type{Int64}}\nconst BitUnsigned64T = Union{Type{UInt8}, Type{UInt16}, Type{UInt32}, Type{UInt64}}\n\nconst BitIntegerType = Union{map(T->Type{T}, BitInteger_types)...}\n\n# >> this use of `unsigned` is defined somewhere else << the docstring should migrate there\n\"\"\"\n unsigned(T::Integer)\n\nConvert an integer bitstype to the unsigned type of the same size.\n# Examples\n```jldoctest\njulia> unsigned(Int16)\nUInt16\njulia> unsigned(UInt64)\nUInt64\n```\n\"\"\" unsigned\n\n\"\"\"\n signed(T::Integer)\n\nConvert an integer bitstype to the signed type of the same size.\n# Examples\n```jldoctest\njulia> signed(UInt16)\nInt16\njulia> signed(UInt64)\nInt64\n```\n\"\"\"\nsigned(::Type{UInt8}) = Int8\nsigned(::Type{UInt16}) = Int16\nsigned(::Type{UInt32}) = Int32\nsigned(::Type{UInt64}) = Int64\nsigned(::Type{UInt128}) = Int128\nsigned(::Type{T}) where {T<:Signed} = T\n\n## integer comparisons ##\n\n(<)(x::T, y::T) where {T<:BitSigned} = slt_int(x, y)\n\n(-)(x::BitInteger) = neg_int(x)\n(-)(x::T, y::T) where {T<:BitInteger} = sub_int(x, y)\n(+)(x::T, y::T) where {T<:BitInteger} = add_int(x, y)\n(*)(x::T, y::T) where {T<:BitInteger} = mul_int(x, y)\n\ninv(x::Integer) = float(one(x)) \/ float(x)\n(\/)(x::T, y::T) where {T<:Integer} = float(x) \/ float(y)\n# skip promotion for system integer types\n(\/)(x::BitInteger, y::BitInteger) = float(x) \/ float(y)\n\n\"\"\"\n isodd(x::Integer) -> Bool\n\nReturn `true` if `x` is odd (that is, not divisible by 2), and `false` otherwise.\n\n# Examples\n```jldoctest\njulia> isodd(9)\ntrue\n\njulia> isodd(10)\nfalse\n```\n\"\"\"\nisodd(n::Integer) = rem(n, 2) != 0\n\n\"\"\"\n iseven(x::Integer) -> Bool\n\nReturn `true` is `x` is even (that is, divisible by 2), and `false` otherwise.\n\n# Examples\n```jldoctest\njulia> iseven(9)\nfalse\n\njulia> iseven(10)\ntrue\n```\n\"\"\"\niseven(n::Integer) = !isodd(n)\n\nsignbit(x::Integer) = x < 0\nsignbit(x::Unsigned) = false\n\nflipsign(x::T, y::T) where {T<:BitSigned} = flipsign_int(x, y)\nflipsign(x::BitSigned, y::BitSigned) = flipsign_int(promote(x, y)...) % typeof(x)\n\nflipsign(x::Signed, y::Float16) = flipsign(x, bitcast(Int16, y))\nflipsign(x::Signed, y::Float32) = flipsign(x, bitcast(Int32, y))\nflipsign(x::Signed, y::Float64) = flipsign(x, bitcast(Int64, y))\nflipsign(x::Signed, y::Real) = flipsign(x, -oftype(x, signbit(y)))\n\ncopysign(x::Signed, y::Signed) = flipsign(x, x \u22bb y)\ncopysign(x::Signed, y::Float16) = copysign(x, bitcast(Int16, y))\ncopysign(x::Signed, y::Float32) = copysign(x, bitcast(Int32, y))\ncopysign(x::Signed, y::Float64) = copysign(x, bitcast(Int64, y))\ncopysign(x::Signed, y::Real) = copysign(x, -oftype(x, signbit(y)))\n\n\"\"\"\n abs(x)\n\nThe absolute value of `x`.\n\nWhen `abs` is applied to signed integers, overflow may occur,\nresulting in the return of a negative value. This overflow occurs only\nwhen `abs` is applied to the minimum representable value of a signed\ninteger. That is, when `x == typemin(typeof(x))`, `abs(x) == x < 0`,\nnot `-x` as might be expected.\n\n# Examples\n```jldoctest\njulia> abs(-3)\n3\n\njulia> abs(1 + im)\n1.4142135623730951\n\njulia> abs(typemin(Int64))\n-9223372036854775808\n```\n\"\"\"\nfunction abs end\n\nabs(x::Unsigned) = x\nabs(x::Signed) = flipsign(x,x)\n\n~(n::Integer) = -n-1\n\n\"\"\"\n unsigned(x)\n\nConvert a number to an unsigned integer. If the argument is signed, it is reinterpreted as\nunsigned without checking for negative values.\n# Examples\n```jldoctest\njulia> unsigned(-2)\n0xfffffffffffffffe\njulia> unsigned(2)\n0x0000000000000002\njulia> signed(unsigned(-2))\n-2\n```\n\"\"\"\nunsigned(x) = x % typeof(convert(Unsigned, zero(x)))\nunsigned(x::BitSigned) = reinterpret(typeof(convert(Unsigned, zero(x))), x)\n\n\"\"\"\n signed(x)\n\nConvert a number to a signed integer. If the argument is unsigned, it is reinterpreted as\nsigned without checking for overflow.\n\"\"\"\nsigned(x) = x % typeof(convert(Signed, zero(x)))\nsigned(x::BitUnsigned) = reinterpret(typeof(convert(Signed, zero(x))), x)\n\ndiv(x::BitSigned, y::Unsigned) = flipsign(signed(div(unsigned(abs(x)), y)), x)\ndiv(x::Unsigned, y::BitSigned) = unsigned(flipsign(signed(div(x, unsigned(abs(y)))), y))\n\nrem(x::BitSigned, y::Unsigned) = flipsign(signed(rem(unsigned(abs(x)), y)), x)\nrem(x::Unsigned, y::BitSigned) = rem(x, unsigned(abs(y)))\n\nfunction divrem(x::BitSigned, y::Unsigned)\n q, r = divrem(unsigned(abs(x)), y)\n flipsign(signed(q), x), flipsign(signed(r), x)\nend\n\nfunction divrem(x::Unsigned, y::BitSigned)\n q, r = divrem(x, unsigned(abs(y)))\n unsigned(flipsign(signed(q), y)), r\nend\n\n\n\"\"\"\n mod(x, y)\n rem(x, y, RoundDown)\n\nThe reduction of `x` modulo `y`, or equivalently, the remainder of `x` after floored\ndivision by `y`, i.e. `x - y*fld(x,y)` if computed without intermediate rounding.\n\nThe result will have the same sign as `y`, and magnitude less than `abs(y)` (with some\nexceptions, see note below).\n\n!!! note\n\n When used with floating point values, the exact result may not be representable by the\n type, and so rounding error may occur. In particular, if the exact result is very\n close to `y`, then it may be rounded to `y`.\n\n```jldoctest\njulia> mod(8, 3)\n2\n\njulia> mod(9, 3)\n0\n\njulia> mod(8.9, 3)\n2.9000000000000004\n\njulia> mod(eps(), 3)\n2.220446049250313e-16\n\njulia> mod(-eps(), 3)\n3.0\n```\n\"\"\"\nfunction mod(x::T, y::T) where T<:Integer\n y == -1 && return T(0) # avoid potential overflow in fld\n return x - fld(x, y) * y\nend\nmod(x::BitSigned, y::Unsigned) = rem(y + unsigned(rem(x, y)), y)\nmod(x::Unsigned, y::Signed) = rem(y + signed(rem(x, y)), y)\nmod(x::T, y::T) where {T<:Unsigned} = rem(x, y)\n\n# Don't promote integers for div\/rem\/mod since there is no danger of overflow,\n# while there is a substantial performance penalty to 64-bit promotion.\ndiv(x::T, y::T) where {T<:BitSigned64} = checked_sdiv_int(x, y)\nrem(x::T, y::T) where {T<:BitSigned64} = checked_srem_int(x, y)\ndiv(x::T, y::T) where {T<:BitUnsigned64} = checked_udiv_int(x, y)\nrem(x::T, y::T) where {T<:BitUnsigned64} = checked_urem_int(x, y)\n\n## integer bitwise operations ##\n\n\"\"\"\n ~(x)\n\nBitwise not.\n\n# Examples\n```jldoctest\njulia> ~4\n-5\n\njulia> ~10\n-11\n\njulia> ~true\nfalse\n```\n\"\"\"\n(~)(x::BitInteger) = not_int(x)\n\n\"\"\"\n x & y\n\nBitwise and. Implements [three-valued logic](https:\/\/en.wikipedia.org\/wiki\/Three-valued_logic),\nreturning [`missing`](@ref) if one operand is `missing` and the other is `true`.\n\n# Examples\n```jldoctest\njulia> 4 & 10\n0\n\njulia> 4 & 12\n4\n\njulia> true & missing\nmissing\n\njulia> false & missing\nfalse\n```\n\"\"\"\n(&)(x::T, y::T) where {T<:BitInteger} = and_int(x, y)\n\n\"\"\"\n x | y\n\nBitwise or. Implements [three-valued logic](https:\/\/en.wikipedia.org\/wiki\/Three-valued_logic),\nreturning [`missing`](@ref) if one operand is `missing` and the other is `false`.\n\n# Examples\n```jldoctest\njulia> 4 | 10\n14\n\njulia> 4 | 1\n5\n\njulia> true | missing\ntrue\n\njulia> false | missing\nmissing\n```\n\"\"\"\n(|)(x::T, y::T) where {T<:BitInteger} = or_int(x, y)\nxor(x::T, y::T) where {T<:BitInteger} = xor_int(x, y)\n\n\"\"\"\n bswap(n)\n\nReverse the byte order of `n`.\n\n(See also [`ntoh`](@ref) and [`hton`](@ref) to convert between the current native byte order and big-endian order.)\n\n# Examples\n```jldoctest\njulia> a = bswap(0x10203040)\n0x40302010\n\njulia> bswap(a)\n0x10203040\n\njulia> string(1, base = 2)\n\"1\"\n\njulia> string(bswap(1), base = 2)\n\"100000000000000000000000000000000000000000000000000000000\"\n```\n\"\"\"\nbswap(x::Union{Int8, UInt8}) = x\nbswap(x::Union{Int16, UInt16, Int32, UInt32, Int64, UInt64, Int128, UInt128}) =\n bswap_int(x)\n\n\"\"\"\n count_ones(x::Integer) -> Integer\n\nNumber of ones in the binary representation of `x`.\n\n# Examples\n```jldoctest\njulia> count_ones(7)\n3\n```\n\"\"\"\ncount_ones(x::BitInteger) = ctpop_int(x) % Int\n\n\"\"\"\n leading_zeros(x::Integer) -> Integer\n\nNumber of zeros leading the binary representation of `x`.\n\n# Examples\n```jldoctest\njulia> leading_zeros(Int32(1))\n31\n```\n\"\"\"\nleading_zeros(x::BitInteger) = ctlz_int(x) % Int\n\n\"\"\"\n trailing_zeros(x::Integer) -> Integer\n\nNumber of zeros trailing the binary representation of `x`.\n\n# Examples\n```jldoctest\njulia> trailing_zeros(2)\n1\n```\n\"\"\"\ntrailing_zeros(x::BitInteger) = cttz_int(x) % Int\n\n\"\"\"\n count_zeros(x::Integer) -> Integer\n\nNumber of zeros in the binary representation of `x`.\n\n# Examples\n```jldoctest\njulia> count_zeros(Int32(2 ^ 16 - 1))\n16\n```\n\"\"\"\ncount_zeros(x::Integer) = count_ones(~x)\n\n\"\"\"\n leading_ones(x::Integer) -> Integer\n\nNumber of ones leading the binary representation of `x`.\n\n# Examples\n```jldoctest\njulia> leading_ones(UInt32(2 ^ 32 - 2))\n31\n```\n\"\"\"\nleading_ones(x::Integer) = leading_zeros(~x)\n\n\"\"\"\n trailing_ones(x::Integer) -> Integer\n\nNumber of ones trailing the binary representation of `x`.\n\n# Examples\n```jldoctest\njulia> trailing_ones(3)\n2\n```\n\"\"\"\ntrailing_ones(x::Integer) = trailing_zeros(~x)\n\n## integer comparisons ##\n\n(< )(x::T, y::T) where {T<:BitUnsigned} = ult_int(x, y)\n(<=)(x::T, y::T) where {T<:BitSigned} = sle_int(x, y)\n(<=)(x::T, y::T) where {T<:BitUnsigned} = ule_int(x, y)\n\n==(x::BitSigned, y::BitUnsigned) = (x >= 0) & (unsigned(x) == y)\n==(x::BitUnsigned, y::BitSigned ) = (y >= 0) & (x == unsigned(y))\n<( x::BitSigned, y::BitUnsigned) = (x < 0) | (unsigned(x) < y)\n<( x::BitUnsigned, y::BitSigned ) = (y >= 0) & (x < unsigned(y))\n<=(x::BitSigned, y::BitUnsigned) = (x < 0) | (unsigned(x) <= y)\n<=(x::BitUnsigned, y::BitSigned ) = (y >= 0) & (x <= unsigned(y))\n\n## integer shifts ##\n\n# unsigned shift counts always shift in the same direction\n>>(x::BitSigned, y::BitUnsigned) = ashr_int(x, y)\n>>(x::BitUnsigned, y::BitUnsigned) = lshr_int(x, y)\n<<(x::BitInteger, y::BitUnsigned) = shl_int(x, y)\n>>>(x::BitInteger, y::BitUnsigned) = lshr_int(x, y)\n# signed shift counts can shift in either direction\n# note: this early during bootstrap, `>=` is not yet available\n# note: we only define Int shift counts here; the generic case is handled later\n>>(x::BitInteger, y::Int) =\n ifelse(0 <= y, x >> unsigned(y), x << unsigned(-y))\n<<(x::BitInteger, y::Int) =\n ifelse(0 <= y, x << unsigned(y), x >> unsigned(-y))\n>>>(x::BitInteger, y::Int) =\n ifelse(0 <= y, x >>> unsigned(y), x << unsigned(-y))\n\nfor to in BitInteger_types, from in (BitInteger_types..., Bool)\n if !(to === from)\n if to.size < from.size\n @eval rem(x::($from), ::Type{$to}) = trunc_int($to, x)\n elseif from === Bool\n @eval rem(x::($from), ::Type{$to}) = convert($to, x)\n elseif from.size < to.size\n if from <: Signed\n @eval rem(x::($from), ::Type{$to}) = sext_int($to, x)\n else\n @eval rem(x::($from), ::Type{$to}) = convert($to, x)\n end\n else\n @eval rem(x::($from), ::Type{$to}) = bitcast($to, x)\n end\n end\nend\n\n## integer bitwise rotations ##\n\n\"\"\"\n bitrotate(x::Base.BitInteger, k::Integer)\n\n`bitrotate(x, k)` implements bitwise rotation.\nIt returns the value of `x` with its bits rotated left `k` times.\nA negative value of `k` will rotate to the right instead.\n\n!!! compat \"Julia 1.5\"\n This function requires Julia 1.5 or later.\n\n```jldoctest\njulia> bitrotate(UInt8(114), 2)\n0xc9\n\njulia> bitstring(bitrotate(0b01110010, 2))\n\"11001001\"\n\njulia> bitstring(bitrotate(0b01110010, -2))\n\"10011100\"\n\njulia> bitstring(bitrotate(0b01110010, 8))\n\"01110010\"\n```\n\"\"\"\nbitrotate(x::T, k::Integer) where {T <: BitInteger} =\n (x << ((sizeof(T) << 3 - 1) & k)) | (x >>> ((sizeof(T) << 3 - 1) & -k))\n\n# @doc isn't available when running in Core at this point.\n# Tuple syntax for documentation two function signatures at the same time\n# doesn't work either at this point.\nif nameof(@__MODULE__) === :Base\n for fname in (:mod, :rem)\n @eval @doc \"\"\"\n rem(x::Integer, T::Type{<:Integer}) -> T\n mod(x::Integer, T::Type{<:Integer}) -> T\n %(x::Integer, T::Type{<:Integer}) -> T\n\n Find `y::T` such that `x` \u2261 `y` (mod n), where n is the number of integers representable\n in `T`, and `y` is an integer in `[typemin(T),typemax(T)]`.\n If `T` can represent any integer (e.g. `T == BigInt`), then this operation corresponds to\n a conversion to `T`.\n\n # Examples\n ```jldoctest\n julia> 129 % Int8\n -127\n ```\n \"\"\" $fname(x::Integer, T::Type{<:Integer})\n end\nend\n\nrem(x::T, ::Type{T}) where {T<:Integer} = x\nrem(x::Signed, ::Type{Unsigned}) = x % unsigned(typeof(x))\nrem(x::Unsigned, ::Type{Signed}) = x % signed(typeof(x))\nrem(x::Integer, T::Type{<:Integer}) = convert(T, x) # `x % T` falls back to `convert`\nrem(x::Integer, ::Type{Bool}) = ((x & 1) != 0)\nmod(x::Integer, ::Type{T}) where {T<:Integer} = rem(x, T)\n\nunsafe_trunc(::Type{T}, x::Integer) where {T<:Integer} = rem(x, T)\n\n\"\"\"\n trunc([T,] x)\n trunc(x; digits::Integer= [, base = 10])\n trunc(x; sigdigits::Integer= [, base = 10])\n\n`trunc(x)` returns the nearest integral value of the same type as `x` whose absolute value\nis less than or equal to `x`.\n\n`trunc(T, x)` converts the result to type `T`, throwing an `InexactError` if the value is\nnot representable.\n\n`digits`, `sigdigits` and `base` work as for [`round`](@ref).\n\"\"\"\nfunction trunc end\n\n\"\"\"\n floor([T,] x)\n floor(x; digits::Integer= [, base = 10])\n floor(x; sigdigits::Integer= [, base = 10])\n\n`floor(x)` returns the nearest integral value of the same type as `x` that is less than or\nequal to `x`.\n\n`floor(T, x)` converts the result to type `T`, throwing an `InexactError` if the value is\nnot representable.\n\n`digits`, `sigdigits` and `base` work as for [`round`](@ref).\n\"\"\"\nfunction floor end\n\n\"\"\"\n ceil([T,] x)\n ceil(x; digits::Integer= [, base = 10])\n ceil(x; sigdigits::Integer= [, base = 10])\n\n`ceil(x)` returns the nearest integral value of the same type as `x` that is greater than or\nequal to `x`.\n\n`ceil(T, x)` converts the result to type `T`, throwing an `InexactError` if the value is not\nrepresentable.\n\n`digits`, `sigdigits` and `base` work as for [`round`](@ref).\n\"\"\"\nfunction ceil end\n\nround(::Type{T}, x::Integer) where {T<:Integer} = convert(T, x)\ntrunc(::Type{T}, x::Integer) where {T<:Integer} = convert(T, x)\nfloor(::Type{T}, x::Integer) where {T<:Integer} = convert(T, x)\n ceil(::Type{T}, x::Integer) where {T<:Integer} = convert(T, x)\n\n## integer construction ##\n\n\"\"\"\n @int128_str str\n @int128_str(str)\n\n`@int128_str` parses a string into a Int128\nThrows an `ArgumentError` if the string is not a valid integer\n\"\"\"\nmacro int128_str(s)\n return parse(Int128, s)\nend\n\n\"\"\"\n @uint128_str str\n @uint128_str(str)\n\n`@uint128_str` parses a string into a UInt128\nThrows an `ArgumentError` if the string is not a valid integer\n\"\"\"\nmacro uint128_str(s)\n return parse(UInt128, s)\nend\n\n\"\"\"\n @big_str str\n @big_str(str)\n\nParse a string into a [`BigInt`](@ref) or [`BigFloat`](@ref),\nand throw an `ArgumentError` if the string is not a valid number.\nFor integers `_` is allowed in the string as a separator.\n\n# Examples\n```jldoctest\njulia> big\"123_456\"\n123456\n\njulia> big\"7891.5\"\n7891.5\n```\n\"\"\"\nmacro big_str(s)\n if '_' in s\n # remove _ in s[2:end-1]\n bf = IOBuffer(maxsize=lastindex(s))\n print(bf, s[1])\n for c in SubString(s, 2, lastindex(s)-1)\n c != '_' && print(bf, c)\n end\n print(bf, s[end])\n seekstart(bf)\n n = tryparse(BigInt, String(take!(bf)))\n n === nothing || return n\n else\n n = tryparse(BigInt, s)\n n === nothing || return n\n n = tryparse(BigFloat, s)\n n === nothing || return n\n end\n message = \"invalid number format $s for BigInt or BigFloat\"\n return :(throw(ArgumentError($message)))\nend\n\n## integer promotions ##\n\n# with different sizes, promote to larger type\npromote_rule(::Type{Int16}, ::Union{Type{Int8}, Type{UInt8}}) = Int16\npromote_rule(::Type{Int32}, ::Union{Type{Int16}, Type{Int8}, Type{UInt16}, Type{UInt8}}) = Int32\npromote_rule(::Type{Int64}, ::Union{Type{Int16}, Type{Int32}, Type{Int8}, Type{UInt16}, Type{UInt32}, Type{UInt8}}) = Int64\npromote_rule(::Type{Int128}, ::Union{Type{Int16}, Type{Int32}, Type{Int64}, Type{Int8}, Type{UInt16}, Type{UInt32}, Type{UInt64}, Type{UInt8}}) = Int128\npromote_rule(::Type{UInt16}, ::Union{Type{Int8}, Type{UInt8}}) = UInt16\npromote_rule(::Type{UInt32}, ::Union{Type{Int16}, Type{Int8}, Type{UInt16}, Type{UInt8}}) = UInt32\npromote_rule(::Type{UInt64}, ::Union{Type{Int16}, Type{Int32}, Type{Int8}, Type{UInt16}, Type{UInt32}, Type{UInt8}}) = UInt64\npromote_rule(::Type{UInt128}, ::Union{Type{Int16}, Type{Int32}, Type{Int64}, Type{Int8}, Type{UInt16}, Type{UInt32}, Type{UInt64}, Type{UInt8}}) = UInt128\n# with mixed signedness and same size, Unsigned wins\npromote_rule(::Type{UInt8}, ::Type{Int8} ) = UInt8\npromote_rule(::Type{UInt16}, ::Type{Int16} ) = UInt16\npromote_rule(::Type{UInt32}, ::Type{Int32} ) = UInt32\npromote_rule(::Type{UInt64}, ::Type{Int64} ) = UInt64\npromote_rule(::Type{UInt128}, ::Type{Int128}) = UInt128\n\n## traits ##\n\n\"\"\"\n typemin(T)\n\nThe lowest value representable by the given (real) numeric DataType `T`.\n\n# Examples\n```jldoctest\njulia> typemin(Float16)\n-Inf16\n\njulia> typemin(Float32)\n-Inf32\n```\n\"\"\"\nfunction typemin end\n\n\"\"\"\n typemax(T)\n\nThe highest value representable by the given (real) numeric `DataType`.\n\n# Examples\n```jldoctest\njulia> typemax(Int8)\n127\n\njulia> typemax(UInt32)\n0xffffffff\n```\n\"\"\"\nfunction typemax end\n\ntypemin(::Type{Int8 }) = Int8(-128)\ntypemax(::Type{Int8 }) = Int8(127)\ntypemin(::Type{UInt8 }) = UInt8(0)\ntypemax(::Type{UInt8 }) = UInt8(255)\ntypemin(::Type{Int16 }) = Int16(-32768)\ntypemax(::Type{Int16 }) = Int16(32767)\ntypemin(::Type{UInt16}) = UInt16(0)\ntypemax(::Type{UInt16}) = UInt16(65535)\ntypemin(::Type{Int32 }) = Int32(-2147483648)\ntypemax(::Type{Int32 }) = Int32(2147483647)\ntypemin(::Type{UInt32}) = UInt32(0)\ntypemax(::Type{UInt32}) = UInt32(4294967295)\ntypemin(::Type{Int64 }) = -9223372036854775808\ntypemax(::Type{Int64 }) = 9223372036854775807\ntypemin(::Type{UInt64}) = UInt64(0)\ntypemax(::Type{UInt64}) = 0xffffffffffffffff\n@eval typemin(::Type{UInt128}) = $(convert(UInt128, 0))\n@eval typemax(::Type{UInt128}) = $(bitcast(UInt128, convert(Int128, -1)))\n@eval typemin(::Type{Int128} ) = $(convert(Int128, 1) << 127)\n@eval typemax(::Type{Int128} ) = $(bitcast(Int128, typemax(UInt128) >> 1))\n\n\nwiden(::Type{Int8}) = Int16\nwiden(::Type{Int16}) = Int32\nwiden(::Type{Int32}) = Int64\nwiden(::Type{Int64}) = Int128\nwiden(::Type{UInt8}) = UInt16\nwiden(::Type{UInt16}) = UInt32\nwiden(::Type{UInt32}) = UInt64\nwiden(::Type{UInt64}) = UInt128\n\n# a few special cases,\n# Int64*UInt64 => Int128\n# |x|<=2^(k-1), |y|<=2^k-1 => |x*y|<=2^(2k-1)-1\nwidemul(x::Signed,y::Unsigned) = widen(x) * signed(widen(y))\nwidemul(x::Unsigned,y::Signed) = signed(widen(x)) * widen(y)\n# multplication by Bool doesn't require widening\nwidemul(x::Bool,y::Bool) = x * y\nwidemul(x::Bool,y::Number) = x * y\nwidemul(x::Number,y::Bool) = x * y\n\n\n## wide multiplication, Int128 multiply and divide ##\n\nif Core.sizeof(Int) == 4\n function widemul(u::Int64, v::Int64)\n local u0::UInt64, v0::UInt64, w0::UInt64\n local u1::Int64, v1::Int64, w1::UInt64, w2::Int64, t::UInt64\n\n u0 = u & 0xffffffff; u1 = u >> 32\n v0 = v & 0xffffffff; v1 = v >> 32\n w0 = u0 * v0\n t = reinterpret(UInt64, u1) * v0 + (w0 >>> 32)\n w2 = reinterpret(Int64, t) >> 32\n w1 = u0 * reinterpret(UInt64, v1) + (t & 0xffffffff)\n hi = u1 * v1 + w2 + (reinterpret(Int64, w1) >> 32)\n lo = w0 & 0xffffffff + (w1 << 32)\n return Int128(hi) << 64 + Int128(lo)\n end\n\n function widemul(u::UInt64, v::UInt64)\n local u0::UInt64, v0::UInt64, w0::UInt64\n local u1::UInt64, v1::UInt64, w1::UInt64, w2::UInt64, t::UInt64\n\n u0 = u & 0xffffffff; u1 = u >>> 32\n v0 = v & 0xffffffff; v1 = v >>> 32\n w0 = u0 * v0\n t = u1 * v0 + (w0 >>> 32)\n w2 = t >>> 32\n w1 = u0 * v1 + (t & 0xffffffff)\n hi = u1 * v1 + w2 + (w1 >>> 32)\n lo = w0 & 0xffffffff + (w1 << 32)\n return UInt128(hi) << 64 + UInt128(lo)\n end\n\n function *(u::Int128, v::Int128)\n u0 = u % UInt64; u1 = Int64(u >> 64)\n v0 = v % UInt64; v1 = Int64(v >> 64)\n lolo = widemul(u0, v0)\n lohi = widemul(reinterpret(Int64, u0), v1)\n hilo = widemul(u1, reinterpret(Int64, v0))\n t = reinterpret(UInt128, hilo) + (lolo >>> 64)\n w1 = reinterpret(UInt128, lohi) + (t & 0xffffffffffffffff)\n return Int128(lolo & 0xffffffffffffffff) + reinterpret(Int128, w1) << 64\n end\n\n function *(u::UInt128, v::UInt128)\n u0 = u % UInt64; u1 = UInt64(u>>>64)\n v0 = v % UInt64; v1 = UInt64(v>>>64)\n lolo = widemul(u0, v0)\n lohi = widemul(u0, v1)\n hilo = widemul(u1, v0)\n t = hilo + (lolo >>> 64)\n w1 = lohi + (t & 0xffffffffffffffff)\n return (lolo & 0xffffffffffffffff) + UInt128(w1) << 64\n end\n\n function _setbit(x::UInt128, i)\n # faster version of `return x | (UInt128(1) << i)`\n j = i >> 5\n y = UInt128(one(UInt32) << (i & 0x1f))\n if j == 0\n return x | y\n elseif j == 1\n return x | (y << 32)\n elseif j == 2\n return x | (y << 64)\n elseif j == 3\n return x | (y << 96)\n end\n return x\n end\n\n function divrem(x::UInt128, y::UInt128)\n iszero(y) && throw(DivideError())\n if (x >> 64) % UInt64 == 0\n if (y >> 64) % UInt64 == 0\n # fast path: upper 64 bits are zero, so we can fallback to UInt64 division\n q64, x64 = divrem(x % UInt64, y % UInt64)\n return UInt128(q64), UInt128(x64)\n else\n # this implies y>x, so\n return zero(UInt128), x\n end\n end\n n = leading_zeros(y) - leading_zeros(x)\n q = zero(UInt128)\n ys = y << n\n while n >= 0\n # ys == y * 2^n\n if ys <= x\n x -= ys\n q = _setbit(q, n)\n if (x >> 64) % UInt64 == 0\n # exit early, similar to above fast path\n if (y >> 64) % UInt64 == 0\n q64, x64 = divrem(x % UInt64, y % UInt64)\n q |= q64\n x = UInt128(x64)\n end\n return q, x\n end\n end\n ys >>>= 1\n n -= 1\n end\n return q, x\n end\n\n function div(x::Int128, y::Int128)\n (x == typemin(Int128)) & (y == -1) && throw(DivideError())\n return Int128(div(BigInt(x), BigInt(y)))::Int128\n end\n div(x::UInt128, y::UInt128) = divrem(x, y)[1]\n\n function rem(x::Int128, y::Int128)\n return Int128(rem(BigInt(x), BigInt(y)))::Int128\n end\n\n function rem(x::UInt128, y::UInt128)\n iszero(y) && throw(DivideError())\n if (x >> 64) % UInt64 == 0\n if (y >> 64) % UInt64 == 0\n # fast path: upper 64 bits are zero, so we can fallback to UInt64 division\n return UInt128(rem(x % UInt64, y % UInt64))\n else\n # this implies y>x, so\n return x\n end\n end\n n = leading_zeros(y) - leading_zeros(x)\n ys = y << n\n while n >= 0\n # ys == y * 2^n\n if ys <= x\n x -= ys\n if (x >> 64) % UInt64 == 0\n # exit early, similar to above fast path\n if (y >> 64) % UInt64 == 0\n x = UInt128(rem(x % UInt64, y % UInt64))\n end\n return x\n end\n end\n ys >>>= 1\n n -= 1\n end\n return x\n end\n\n function mod(x::Int128, y::Int128)\n return Int128(mod(BigInt(x), BigInt(y)))::Int128\n end\nelse\n *(x::T, y::T) where {T<:Union{Int128,UInt128}} = mul_int(x, y)\n\n div(x::Int128, y::Int128) = checked_sdiv_int(x, y)\n div(x::UInt128, y::UInt128) = checked_udiv_int(x, y)\n\n rem(x::Int128, y::Int128) = checked_srem_int(x, y)\n rem(x::UInt128, y::UInt128) = checked_urem_int(x, y)\nend\n\n# issue #15489: since integer ops are unchecked, they shouldn't check promotion\nfor op in (:+, :-, :*, :&, :|, :xor)\n @eval function $op(a::Integer, b::Integer)\n T = promote_typeof(a, b)\n aT, bT = a % T, b % T\n not_sametype((a, b), (aT, bT))\n return $op(aT, bT)\n end\nend\n\nconst _mask1_uint128 = (UInt128(0x5555555555555555) << 64) | UInt128(0x5555555555555555)\nconst _mask2_uint128 = (UInt128(0x3333333333333333) << 64) | UInt128(0x3333333333333333)\nconst _mask4_uint128 = (UInt128(0x0f0f0f0f0f0f0f0f) << 64) | UInt128(0x0f0f0f0f0f0f0f0f)\n\n\"\"\"\n bitreverse(x)\n\nReverse the order of bits in integer `x`. `x` must have a fixed bit width,\ne.g. be an `Int16` or `Int32`.\n\n!!! compat \"Julia 1.5\"\n This function requires Julia 1.5 or later.\n\n# Examples\n```jldoctest\njulia> bitreverse(0x8080808080808080)\n0x0101010101010101\n\njulia> reverse(bitstring(0xa06e)) == bitstring(bitreverse(0xa06e))\ntrue\n```\n\"\"\"\nfunction bitreverse(x::BitInteger)\n # TODO: consider using llvm.bitreverse intrinsic\n z = unsigned(x)\n mask1 = _mask1_uint128 % typeof(z)\n mask2 = _mask2_uint128 % typeof(z)\n mask4 = _mask4_uint128 % typeof(z)\n z = ((z & mask1) << 1) | ((z >> 1) & mask1)\n z = ((z & mask2) << 2) | ((z >> 2) & mask2)\n z = ((z & mask4) << 4) | ((z >> 4) & mask4)\n return bswap(z) % typeof(x)\nend\n","avg_line_length":28.5678496868,"max_line_length":154,"alphanum_fraction":0.6139652148} {"size":396,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"#if lowercase(get(ENV, \"CI\", \"false\")) == \"true\"\n# include(\"install_pycall.jl\")\n#end\n#using Pkg\n#using PyCall\n#Pkg.build(\"PyCall\")\n#println(PyCall.libpython)\nusing Comrade\nusing FFTW\nusing Statistics\nusing Test\nusing Plots\n\n#include(\"test_util.jl\")\n\n@testset \"Comrade.jl\" begin\n include(\"observation.jl\")\n include(\"distributions.jl\")\n include(\"models.jl\")\n include(\"bayes.jl\")\nend\n","avg_line_length":18.0,"max_line_length":48,"alphanum_fraction":0.6994949495} {"size":3263,"ext":"jl","lang":"Julia","max_stars_count":8.0,"content":"\"\"\"\ntypes : stores types \/ objects used.\n\tMostly just bags of parameters for different models and different sequences with various properties we want to maintain.\n\"\"\"\n\nmodule types\n\nmutable struct GremModel #MRF model.\n\tw1::Array{Float32, 2}\n\tw2::Array{Float32, 2}\n\tnNodes::Int\n\tbatch_size::Int\nend\n\nmutable struct MRF_nuc #from simulated_annealing_dna_clean.\n\tnucs::String #no reason not to use letters\n\tskip_sam::Array{Int64, 1}\n\tskip_trn::Array{Int64, 1}\n\t#You should guarantee gaps are in sorted order\n\tgap_pos::Array{Tuple{Int64, Int64}, 1}\nend\n\nmutable struct Chromosome #another type\n\tpath::Array{Tuple{Int64, Int64, Int64}}\n\tfull_sequence::AbstractString\n\tdeg_nuc::MRF_nuc\n\tdeg_map::Dict{Int64, Int64}\n\tdeg_trns::Int64\n\tdeg_trne::Int64\n\tdeg_d::AbstractString #this is direction, +\/-\n\tdeg_skip::Array{Int64}\n\tdeg_insert::Array{Tuple{Int64, Int64}}\n\n\tmark_nuc::MRF_nuc\n\tmark_map::Dict{Int64, Int64}\n\tmark_trns::Int64\n\tmark_trne::Int64\n\tmark_d::AbstractString\n\tmark_skip::Array{Int64}\n\tmark_insert::Array{Tuple{Int64, Int64}}\n\n\tdeg_prob::Float32\n\tmark_prob::Float32\nend\n\nmutable struct SampleNucs\n\tfinal_seq::AbstractString\n\tdeg_trns::Int64\n\tdeg_trne::Int64\n\tmark_trns::Int64\n\tmark_trne::Int64\n\tpath #An array of tuples.\nend\n\nmutable struct ExtendedSampleNucs\n\tfinal_seq::AbstractString\n\tdeg_trns::Int64\n\tdeg_trne::Int64\n\tdeg_d::AbstractString\n\tmark_trns::Int64\n\tmark_trne::Int64\n\tmark_d::AbstractString\n\tpath #An array of tuples.\n\tdeg_skip\n\tdeg_insert\n\tmark_skip\n\tmark_insert\nend\n\nmutable struct ExChrome #extended chromosome.\n\tpath::Array{Tuple{Int64, Int64, Int64}}\n\tfull_sequence::AbstractString\n\tdeg_nuc::Any\n\tdeg_map::Dict{Int64, Int64}\n\tdeg_trns::Int64\n\tdeg_trne::Int64\n\tdeg_d::AbstractString\n\tdeg_skip::Array{Int64}\n\tdeg_insert::Array{Tuple{Int64, Int64}}\n\n\tmark_nuc::Any\n\tmark_map::Dict{Int64, Int64}\n\tmark_trns::Int64\n\tmark_trne::Int64\n\tmark_d::AbstractString\n\tmark_skip::Array{Int64}\n\tmark_insert::Array{Tuple{Int64, Int64}}\n\n\tdeg_prob::Float32\n\tdeg_base_E::Float32\n\tdeg_vec\n\tdeg_seq::AbstractString\n\tdeg_ull::Float32\n\tdeg_pv_w1::Float32\n\tdeg_pv_w2::Array{Float32, 2}\n\n\tmark_prob::Float32\n\tmark_base_E::Float32\n\tmark_vec\n\tmark_seq::AbstractString\n\tmark_ull::Float32\n\tmark_pv_w1::Float32\n\tmark_pv_w2::Array{Float32, 2}\n\n\tfirst_weight::Float32\nend\n\nmutable struct SaveChrome #extended chromosome in saveable form (discard matrices which are easily re-calculable)\n\tpath::Array{Tuple{Int64, Int64, Int64}}\n\tfull_sequence::AbstractString\n\tdeg_nuc::Any #MRF_nuc\n\tdeg_map::Dict{Int64, Int64}\n\tdeg_trns::Int64\n\tdeg_trne::Int64\n\tdeg_d::AbstractString\n\tdeg_skip::Array{Int64}\n\tdeg_insert::Array{Tuple{Int64, Int64}}\n\n\tmark_nuc::Any #MRF_nuc\n\tmark_map::Dict{Int64, Int64}\n\tmark_trns::Int64\n\tmark_trne::Int64\n\tmark_d::AbstractString\n\tmark_skip::Array{Int64}\n\tmark_insert::Array{Tuple{Int64, Int64}}\n\n\tdeg_prob::Float32\n\tdeg_base_E::Float32\n\t#deg_vec\n\tdeg_seq::AbstractString\n\t#deg_ull::Float32\n\t#deg_pv_w1::Float32\n\t#deg_pv_w2::Array{Float32, 2}\n\n\tmark_prob::Float32\n\tmark_base_E::Float32\n\t#mark_vec\n\tmark_seq::AbstractString\n\t#mark_ull::Float32\n\t#mark_pv_w1::Float32\n\t#mark_pv_w2::Array{Float32, 2}\n\n\tfirst_weight::Float32\nend\n\nmutable struct wholeHMM\n\tstate_probs::Array{Float64, 2}\n\tinsert_probs::Array{Float64, 2}\n\tmatch_probs::Array{Float64, 2}\nend\n\nend\n","avg_line_length":21.3267973856,"max_line_length":121,"alphanum_fraction":0.7741342323} {"size":926,"ext":"jl","lang":"Julia","max_stars_count":170.0,"content":"# Simple kernel for matrix multiplication\n@kernel function matmul_kernel!(a, b, c)\n i, j = @index(Global, NTuple)\n\n # creating a temporary sum variable for matrix multiplication\n tmp_sum = zero(eltype(c))\n for k = 1:size(a)[2]\n @inbounds tmp_sum += a[i,k] * b[k, j]\n end\n\n c[i,j] = tmp_sum\nend\n\n\nfunction matmul_testsuite(backend, ArrayT)\n\n matmul = matmul_kernel!(backend(), (32, 32))\n a = ArrayT(rand(128, 256))\n b = ArrayT(rand(256, 128))\n c = ArrayT(zeros(128, 128))\n wait(matmul(a, b, c, ndrange=size(c)))\n\n @test c \u2248 a*b\n\n dmatmul = Enzyme.autodiff(matmul)\n da = similar(a)\n da .= 0\n db = similar(b)\n db .= 0\n dc = similar(c)\n dc .= 1\n c .= 0\n\n compare_dc = copy(dc)\n wait(dmatmul(\n Duplicated(a, da),\n Duplicated(b, db),\n Duplicated(c, dc), ndrange=size(c)))\n\n @test da \u2248 compare_dc * b'\n @test db \u2248 a' * compare_dc\nend","avg_line_length":22.0476190476,"max_line_length":65,"alphanum_fraction":0.5788336933} {"size":365,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"\ninclude(\"BLTandPantelides\/BLTandPantelidesUtilities.jl\")\ninclude(\"BLTandPantelides\/BLTandPantelides.jl\")\n\ninclude(\"Utilities.jl\")\n\ninclude(\"ExactlyRemoveSingularities.jl\")\ninclude(\"StateSelection.jl\")\n\ninclude(\"DAEquations\/SymbolicTransform.jl\")\ninclude(\"DAEquations\/BasicStructuralTransform.jl\")\n\ninclude(\"StructuralTransform.jl\")\n\ninclude(\"ModelElaboration.jl\")\n","avg_line_length":22.8125,"max_line_length":56,"alphanum_fraction":0.8246575342} {"size":5517,"ext":"jl","lang":"Julia","max_stars_count":null,"content":"using Test\nusing Dates\nusing Printf\nusing NCDatasets\n\nsz = (4,5)\nfilename = tempname()\n#filename = \"\/tmp\/test-6.nc\"\n#if isfile(filename)\n# rm(filename)\n#end\n\n# The mode \"c\" stands for creating a new file (clobber)\nNCDatasets.NCDataset(filename,\"c\") do ds\n\n # define the dimension \"lon\" and \"lat\"\n ds.dim[\"lon\"] = sz[1]\n ds.dim[\"lat\"] = sz[2]\n\n v = NCDatasets.defVar(ds,\"small\",Float64,(\"lon\",\"lat\"))\n# @test_throws Union{NCDatasets.NetCDFError,DimensionMismatch} v[:] = zeros(sz[1]+1,sz[2])\n @test_throws NCDatasets.NetCDFError v[1:sz[1],1:sz[2]] = zeros(sz[1]+1,sz[2])\n @test_throws NCDatasets.NetCDFError v[sz[1]+1,1] = 1\n @test_throws NCDatasets.NetCDFError v[-1,1] = 1\n\n # variables\n for T in [UInt8,Int8,UInt16,Int16,UInt32,Int32,UInt64,Int64,Float32,Float64,\n Char,String]\n #for T in [String]\n local data\n data, scalar_data =\n if T == String\n [Char(i+60) * Char(j+60) for i = 1:sz[1], j = 1:sz[2]], \"abcde\"\n else\n [T(i+2*j) for i = 1:sz[1], j = 1:sz[2]], T(100)\n end\n\n v = NCDatasets.defVar(ds,\"var-$T\",T,(\"lon\",\"lat\"))\n v[:,:] = data\n @test v[:,:] == data[:,:]\n\n # issue #33\n @test Array(v) == data\n\n @test v[2,:] == data[2,:]\n\n @test v[:,3] == data[:,3]\n\n @test v[2,3] == data[2,3]\n\n # ignore extra index\n @test v[2,3,1,1] == data[2,3,1,1]\n\n # ignore extra index\n @test v[2:3,3,1,1] == data[2:3,3,1,1]\n\n @test v[[1,2,3],:] == data[[1,2,3],:]\n\n # cartesian indices\n ci = CartesianIndex(1,1):CartesianIndex(3,2)\n @test v.var[ci] == data[ci]\n\n # write scalar\n v.var[1,1] = scalar_data\n v.var[:,:] .= scalar_data\n @test all(v.var[:,:][:] .== scalar_data)\n\n # stridded write and read\n v[1:2:end,1:2:end] = data[1:2:end,1:2:end]\n @test all(v[1:2:end,1:2:end] .== data[1:2:end,1:2:end])\n end\nend\n\n# quick interface\nNCDataset(filename,\"c\") do ds\n data = Int32[i+3*j for i = 1:sz[1], j = 1:sz[2]]\n defVar(ds,\"temp\",data,(\"lon\",\"lat\"), attrib = [\n \"units\" => \"degree_Celsius\",\n \"long_name\" => \"Temperature\"\n ])\n @test ds[\"temp\"][:] == data\n @test eltype(ds[\"temp\"].var) == Int32\n @test ds.dim[\"lon\"] == sz[1]\n @test ds.dim[\"lat\"] == sz[2]\n\n # load in-place\n data2 = similar(data)\n NCDatasets.load!(ds[\"temp\"].var,data2,:,:)\n @test data2 == data\n\n data2 = zeros(eltype(data),sz[1],2)\n NCDatasets.load!(ds[\"temp\"].var,data2,:,1:2)\n @test data2 == data[:,1:2]\n\n data2 = zeros(eltype(data),sz[1],1)\n NCDatasets.load!(ds[\"temp\"].var,data2,:,1)\n @test data2[:] == data[:,1]\n\n # test Union{Missing,T}\n defVar(ds,\"foo\",[missing,1.,2.],(\"dim\",), fillvalue = -9999.)\n @test fillvalue(ds[\"foo\"]) == -9999.\n @test isequal(ds[\"foo\"][:], [missing,1.,2.])\n\n # test Union{Missing,T} and default fill value (issue #38)\n defVar(ds,\"foo_default_fill_value\",[missing,1.,2.],(\"dim\",))\n @test fillvalue(ds[\"foo_default_fill_value\"]) == fillvalue(Float64)\n @test isequal(ds[\"foo_default_fill_value\"][:], [missing,1.,2.])\n\n\n for DT in [DateTime,\n DateTimeStandard,\n DateTimeJulian,\n DateTimeProlepticGregorian,\n DateTimeAllLeap,\n DateTimeNoLeap,\n DateTime360Day\n ]\n\n # test DateTime et al., array\n data_dt = [DT(2000,1,1),DT(2000,1,2),DT(2000,1,3)]\n defVar(ds,\"foo_$(DT)\",data_dt,(\"dim\",))\n data_dt2 = ds[\"foo_$(DT)\"][:]\n @test isequal(convert.(DT,data_dt2), data_dt)\n\n # test DateTime et al. with missing array\n data_dt = [missing,DT(2000,1,2),DT(2000,1,3)]\n defVar(ds,\"foo_$(DT)_with_fill_value\",data_dt,(\"dim\",))\n\n data_dt2 = ds[\"foo_$(DT)_with_fill_value\"][:]\n @test ismissing(data_dt2[1])\n\n @test isequal(convert.(DT,data_dt2[2:end]), data_dt[2:end])\n end\n\n defVar(ds,\"scalar\",123.)\n @test ds[\"scalar\"][:] == 123.\n\n # test indexing with symbols #101\n @test ds[:scalar][:] == 123.\nend\nrm(filename)\n\n# check bounds error\nfilename = tempname()\nNCDataset(filename,\"c\") do ds\n defVar(ds,\"temp\",randn(10,11),(\"lon\",\"lat\"))\n @test_throws NCDatasets.NetCDFError defVar(ds,\"salt\",randn(10,12),(\"lon\",\"lat\"))\nend\nrm(filename)\n\n# check error for unknown variable\nfilename = tempname()\nNCDataset(filename,\"c\") do ds\n @test_throws NCDatasets.NetCDFError ds[\"does_not_exist\"]\nend\nrm(filename)\n\n\n# issue 23\n# return type using CartesianIndex\n\nfilename = tempname()\nds = NCDataset(filename, \"c\");\nds.dim[\"lon\"] = 5;\nds.dim[\"lat\"] = 10;\nds.dim[\"time\"] = Inf;\n\nncvar = defVar(ds, \"var\", Int64, (\"lon\", \"lat\", \"time\"));\n\nnt = 25;\ndata = reshape(1:5*10*nt, 5, 10, nt);\nncvar[:,:,1:nt] = data;\nclose(ds);\n\nds = NCDataset(filename);\nstart = 1;\nall(data[CartesianIndex(1, 1), start:end] .== ds[\"var\"][CartesianIndex(1, 1), start:end])\ndata11 = ds[\"var\"][CartesianIndex(1, 1), start:end]\nclose(ds)\n\n@test typeof(data11[1]) == Int64\nrm(filename)\n\n# issue #36\n\nx, y = collect(1:10), collect(10:18)\n\nfilename = tempname()\nNCDataset(filename, \"c\") do ds\n defDim(ds, \"x\", length(x))\n defVar(ds, \"x\", x, (\"x\",))\n defDim(ds, \"y\", length(y))\n defVar(ds, \"y\", y, (\"y\",))\nend\n\nrm(filename)\n\n# issue 155\n\nfilename = tempname()\nx = 1.:0.1:10.\nds = NCDataset(filename,\"c\");\ndefDim(ds, \"x\", length(x))\nncv = defVar(ds, \"x\", Float64, (\"x\",))\nncv[:] = x\nclose(ds)\nrm(filename)\n","avg_line_length":26.5240384615,"max_line_length":93,"alphanum_fraction":0.5653434838} {"size":540,"ext":"jl","lang":"Julia","max_stars_count":1.0,"content":"kon = rand()\nkoff = rand()\n\nc1 = chem2diff(\"\"\"\n $(kon),$(koff): 2 A -> B\n \"\"\")\nc2 = chem2diff(\"\"\"\n $(kon),$(koff): 2A -> B\n \"\"\")\n\nc3 = chem2diff(\"\"\"\n $(kon),$(koff):A +A -> B\n \"\"\")\n\nn = 2\nz = 1.0*ones(n)\ndz = zeros(n)\n\ndz[1] = 2*(-kon*z[1]^2 + koff*z[2])\ndz[2] = kon*z[1]^2 - koff*z[2]\n\n#This works only because of the fixed usage of `y`,`dy`\ny = ones(n) \ndy = zeros(n)\n\neval(c1)\n@test all(dy .\u2248 dz)\n\neval(c2)\n@test all(dy .\u2248 dz)\n\neval(c3)\n@test all(dy .\u2248 dz)\n\n","avg_line_length":15.4285714286,"max_line_length":55,"alphanum_fraction":0.4296296296}