text
stringlengths
5
1.02M
# Author: Ritchie Lee, ritchie.lee@sv.cmu.edu # Date: 12/15/2014 # Correlated Encounter Model for Cooperative Aircraft in the National Airspace # exposed as DBN. Samples are generated at runtime at each step. module CorrAEMDBNImpl export AddObserver, getInitialState, initialize, update, get, CorrAEMDBN import Compat.ASCIIString using AbstractEncounterDBNImpl using AbstractEncounterDBNInterfaces using CommonInterfaces using Util using Encounter using CorrAEMImpl using RLESUtils, Observers import CommonInterfaces.addObserver import CommonInterfaces.initialize import CommonInterfaces.update import AbstractEncounterDBNInterfaces.get import AbstractEncounterDBNInterfaces.getInitialState import Base.convert include(Pkg.dir("SISLES/src/Encounter/CorrAEMImpl/corr_aem_sample.jl")) type CorrAEMDBN <: AbstractEncounterDBN number_of_aircraft::Int64 encounter_file::ASCIIString initial_sample_file::ASCIIString transition_sample_file::ASCIIString encounter_number::Int64 command_method::Symbol #:ENC = read from encounter file, :DBN = generate from DBN on-the-fly aem::CorrAEM dirichlet_transition #initial states init_aem_dstate::Vector{Int64} #discrete current state init_aem_dyn_cstate::Vector{Float64} #continuous of current dynamic variables #current states t::Int64 aem_dstate::Vector{Int64} #discrete current state aem_dyn_cstate::Vector{Float64} #continuous of current dynamic variables #caching and reuse dynamic_variables0::Vector{Int64} dynamic_variables1::Vector{Int64} parents_cache::Dict{Int64, Vector{Bool}} weights_cache::Dict{Tuple{Int64, Int64}, Vector{Float64}} cumweights_cache::Dict{Tuple{Int64, Int64}, Vector{Float64}} #pre-allocated output to avoid repeating reallocations output_commands::Vector{CorrAEMCommand} logProb::Float64 #log probability of output function CorrAEMDBN(number_of_aircraft::Int, encounter_file::AbstractString, initial_sample_file::AbstractString, transition_sample_file::AbstractString, encounter_number::Int, encounter_seed::UInt64, command_method::Symbol) dbn = new() @assert number_of_aircraft == 2 #need to revisit the code if this is not true dbn.number_of_aircraft = number_of_aircraft dbn.encounter_file = encounter_file dbn.initial_sample_file = initial_sample_file dbn.transition_sample_file = transition_sample_file dbn.encounter_number = encounter_number dbn.command_method = command_method dbn.aem = CorrAEM(encounter_file, initial_sample_file, transition_sample_file) dbn.t = 0 srand(encounter_seed) #There's a rand inside generateEncounter generateEncounter(dbn.aem, sample_number=encounter_number) #To optimize: This allocates a lot of memory #compute initial states of variables p = dbn.aem.parameters dbn.dynamic_variables0 = p.temporal_map[:,1] dbn.dynamic_variables1 = p.temporal_map[:,2] aem_initial_unconverted = unconvertUnitsAemState(dbn.aem.initial) aem_initial_dstate = Int64[ val2ind(p.boundaries[i], p.r_transition[i], val) for (i, val) in enumerate(aem_initial_unconverted)] dbn.init_aem_dstate = [aem_initial_dstate; aem_initial_dstate[dbn.dynamic_variables0]] #bins, [11:14] are updated with time, append space for t+1 variables dbn.init_aem_dyn_cstate = dbn.aem.initial[dbn.dynamic_variables0] #continuous variables. dbn.dirichlet_transition = bn_dirichlet_prior(p.N_transition) dbn.aem_dstate = deepcopy(dbn.init_aem_dstate) dbn.aem_dyn_cstate = deepcopy(dbn.init_aem_dyn_cstate) #precompute and cache these quantities dbn.parents_cache = Dict{Int64,Vector{Bool}}() dbn.weights_cache = Dict{Tuple{Int64,Int64}, Vector{Float64}}() dbn.cumweights_cache = Dict{Tuple{Int64,Int64}, Vector{Float64}}() for i = 1:length(p.N_transition) dbn.parents_cache[i] = p.G_transition[:, i] for j = 1:1:size(dbn.dirichlet_transition[i], 2) dbn.weights_cache[(i, j)] = p.N_transition[i][:, j] + dbn.dirichlet_transition[i][:, j] dbn.weights_cache[(i, j)] /= sum(dbn.weights_cache[(i, j)]) dbn.cumweights_cache[(i, j)] = cumsum(dbn.weights_cache[(i, j)]) end end dbn.output_commands = CorrAEMCommand[ CorrAEMCommand(0.0, 0.0, 0.0, 0.0) for i = 1:number_of_aircraft ] dbn.logProb = 0.0 return dbn end end addObserver(dbn::CorrAEMDBN, f::Function) = add_observer(aem.observer, f) addObserver(dbn::CorrAEMDBN, tag::AbstractString, f::Function) = add_observer(aem.observer, tag, f) function initialize(dbn::CorrAEMDBN) #reset to initial state copy!(dbn.aem_dstate, dbn.init_aem_dstate) copy!(dbn.aem_dyn_cstate, dbn.init_aem_dyn_cstate) dbn.t = 0 #reset aem indices initialize(dbn.aem) end function getInitialState(dbn::CorrAEMDBN, index::Int) return Encounter.getInitialState(dbn.aem, index) end function update(dbn::CorrAEMDBN) if dbn.command_method == :DBN logProb = step_dbn(dbn) elseif dbn.command_method == :ENC logProb = step_enc(dbn) else error("CorrAEMDBNImpl::Step: No such command method") end dbn.t += 1 return logProb end function step_dbn(dbn::CorrAEMDBN) p = dbn.aem.parameters aem_dstate = dbn.aem_dstate #entire state, discrete bins aem_dyn_cstate = dbn.aem_dyn_cstate #dynamic states, continuous logProb = 0.0 for (o,i) in enumerate(dbn.dynamic_variables1) if !isempty(find(dbn.parents_cache[i])) dims = tuple(p.r_transition[dbn.parents_cache[i]]...) j = sub2ind(dims, aem_dstate[dbn.parents_cache[i]]...) aem_dstate[i] = select_random_cumweights(dbn.cumweights_cache[(i,j)]) logProb += log(dbn.weights_cache[(i,j)][aem_dstate[i]]) #Resampling and dediscretizing process i_t = dbn.dynamic_variables0[o] if (aem_dstate[i] != aem_dstate[i_t]) || #compare to state at last time step, #Different bin, do resample (aem_dstate[i] == aem_dstate[i_t] && rand() < p.resample_rates[i_t]) #Same bin but meets resample rate aem_dyn_cstate[o],_ = dediscretize(aem_dstate[i],p.boundaries[i_t],p.zero_bins[i_t]) if in(i,[17,18]) #these need unit conversion aem_dyn_cstate[o] /= 60 #convert units end end #Else same bin and does not meet rate, just set equal to previous (no update) end end # copy over x(t+1) to x(t) aem_dstate[dbn.dynamic_variables0] = aem_dstate[dbn.dynamic_variables1] #Just a reminder, this will break if number_of_aircraft != 2 @assert dbn.number_of_aircraft == 2 dbn.output_commands[1].t = dbn.t dbn.output_commands[1].v_d = getInitialSample(dbn.aem, :v1d) dbn.output_commands[1].h_d = dbn.aem_dyn_cstate[1] dbn.output_commands[1].psi_d = dbn.aem_dyn_cstate[3] dbn.output_commands[2].t = dbn.t dbn.output_commands[2].v_d = getInitialSample(dbn.aem, :v2d) dbn.output_commands[2].h_d = dbn.aem_dyn_cstate[2] dbn.output_commands[2].psi_d = dbn.aem_dyn_cstate[4] return dbn.logProb = logProb end #TODO: remove hardcoding convert_units(x::AbstractFloat, i::Int) = in(i, [17,18]) ? x / 60 : x convert(::Type{Vector{Float64}}, command_1::CorrAEMCommand, command_2::CorrAEMCommand) = [ command_1.h_d, command_2.h_d, command_1.psi_d, command_2.psi_d ] function step_enc(dbn::CorrAEMDBN) aem = dbn.aem p = aem.parameters for i = 1:dbn.number_of_aircraft cmd = Encounter.update(aem, i) if cmd != nothing dbn.output_commands[i] = cmd end end #Just a reminder, this will break if number_of_aircraft != 2 #@assert dbn.number_of_aircraft == 2 #= FIXME: skip probability calc for now... #prepare t+1 from encounter commands aem_dyn_cstate = convert(Vector{Float64}, dbn.output_commands[1], dbn.output_commands[2]) aem_dstate = dbn.aem_dstate #only copies pointer #load into the (t+1) slots #boundaries are specified at t aem_dyn_cstate_unconverted = unconvertUnitsDynVars(aem_dyn_cstate) #need to unconvert units aem_dstate[dbn.dynamic_variables1] = Int64[ val2ind(p.boundaries[i], p.r_transition[i], aem_dyn_cstate_unconverted[o]) for (o, i) in enumerate(dbn.dynamic_variables0) ] # compute the probability of this transition logProb = 0.0 for (o, i) in enumerate(dbn.dynamic_variables1) j = 1 parents = dbn.parents_cache[i] if !isempty(find(parents)) dims = tuple(p.r_transition[parents]...) indices = aem_dstate[parents] j = sub2ind(dims, indices...) end weights = dbn.weights_cache[(i, j)] logProb += log(weights[aem_dstate[i]]) #probability from continuous sampling process #two components: resample prob, dediscretize prob i_prev = dbn.dynamic_variables0[o] if aem_dstate[i] != aem_dstate[i_prev] #not the same bin, resample wp 1 logProb += dediscretize_prob(aem_dyn_cstate[o], aem_dstate[i], p.boundaries[i_prev], p.zero_bins[i_prev]) elseif isapprox(aem_dyn_cstate[o], dbn.aem_dyn_cstate[o], atol=0.0001) #same bin same value, did not resample logProb += log(1.0 - p.resample_rates[i_prev]) else #Same bin different value, got resampled logProb += log(p.resample_rates[i_prev]) logProb += dediscretize_prob(aem_dyn_cstate[o], aem_dstate[i], p.boundaries[i_prev], p.zero_bins[i_prev]) end end # copy over x(t+1) to x(t) aem_dstate[dbn.dynamic_variables0] = aem_dstate[dbn.dynamic_variables1] #push to sim dbn.aem_dstate = aem_dstate dbn.aem_dyn_cstate = aem_dyn_cstate #return dbn.logProb = logProb =# dbn.logProb = 0.0 #for now... FIXME end function get(dbn::CorrAEMDBN, aircraft_number::Int) return dbn.output_commands[aircraft_number] end function val2ind(boundariesi, ri, value) if !isempty(boundariesi) index = findfirst(x -> (x > value), boundariesi) - 1 if index == -1 index = ri end else index = value end return index end function dediscretize(dval::Int64, boundaries::Vector{Float64}, zero_bin::Int64) val_min = boundaries[dval] val_max = boundaries[dval + 1] if dval == zero_bin val = 0.0 prob = 1.0 elseif val_max == val_min val = val_min prob = 1.0 else val = val_min + rand() * (val_max - val_min) prob = 1.0 / (val_max - val_min) #this is a density so it won't be normalized to [0,1] end return (val, prob) end function dediscretize_prob(val::Float64, dval::Int64, boundaries::Vector{Float64}, zero_bin::Int64) val_min = boundaries[dval] val_max = boundaries[dval + 1] if dval == zero_bin @assert val == 0.0 prob = 1.0 elseif val_max == val_min @assert val == val_min prob = 1.0 else @assert val_min <= val <= val_max prob = 1.0 / (val_max - val_min) #this is a density so it won't be normalized to [0,1] end return prob end function unconvertUnitsDynVars(v) return [v[1:2] * 60.0, v[3:end]] end function unconvertUnitsAemState(state_) #from CorrAEM state = deepcopy(state_) state[7] /= 1.68780 state[8] /= 1.68780 state[9] /= 1.68780 state[10] /= 1.68780 state[11] *= 60 state[12] *= 60 state[15] /= 6076.12 return state end function select_random_cumweights(cweights::Vector{Float64}) #select randomly according to cumulative weights vector r = cweights[end] * rand() return findfirst(x -> (x >= r), cweights) end end #module
function isprerelease(version::VersionNumber)::Bool prerelease = version.prerelease isempty(prerelease) && return false for x in prerelease if !isempty(strip(x)) return true end end return false end function _calculate_increment(before::VersionNumber, after::VersionNumber)::VersionNumber before_first3 = VersionNumber(before.major, before.minor, before.patch) after_first3 = VersionNumber(after.major, after.minor, after.patch) # @debug("before: ", before) # @debug("before_first3: ", before_first3) # @debug("after:", after) # @debug("after_first3:", after_first3) always_assert(after > before, "after > before") always_assert(after_first3 >= before_first3, "after_first3 >= before_first3") if before.major == after.major if before.minor == after.minor always_assert(after.patch >= before.patch, "after.patch >= before.patch") return VersionNumber(0, 0, after.patch - before.patch) else always_assert(after.minor >= before.minor, "after.minor >= before.minor") return VersionNumber(0, after.minor - before.minor, after.patch - 0) end else always_assert(after.major >= before.major, "after.major >= before.major") return VersionNumber(after.major - before.major, after.minor - 0, after.patch - 0) end end function check_version_increment(master_version::VersionNumber, head_version::VersionNumber; allow_skipping_versions::Bool, gh_set_output::Bool = get(ENV, "GITHUB_ACTIONS", "") == "true", gh_set_output_io::IO = stdout)::Nothing always_assert(head_version > master_version, "head_version > master_version") _gh_set_output_println(gh_set_output, gh_set_output_io, "compare_versions", "success") @info("Version number has increased") increment = _calculate_increment(master_version, head_version) if increment in (v"0.0.0", v"0.0.1", v"0.1.0", v"1.0.0") @info("Increment is good", master_version, head_version, increment) else if allow_skipping_versions @warn("Increment is bad, but `allow_skipping_versions` is true, so we will allow it", master_version, head_version, increment, allow_skipping_versions) else @error("Increment is bad", master_version, head_version, increment, allow_skipping_versions) error("Bad increment") end end return nothing end function compare_versions(master_version::VersionNumber, head_version::VersionNumber; allow_unchanged_prerelease::Bool, allow_skipping_versions::Bool, gh_set_output::Bool = get(ENV, "GITHUB_ACTIONS", "") == "true", gh_set_output_io::IO = stdout)::Nothing if head_version > master_version check_version_increment(master_version, head_version; allow_skipping_versions = allow_skipping_versions, gh_set_output = gh_set_output, gh_set_output_io = gh_set_output_io) elseif head_version == master_version if isprerelease(head_version) && isprerelease(master_version) && allow_unchanged_prerelease _gh_set_output_println(gh_set_output, gh_set_output_io, "compare_versions", "success") @info("Version number did not change, but it is a prerelease so this is allowed") else _gh_set_output_println(gh_set_output, gh_set_output_io, "compare_versions", "failure") throw(ErrorException("Version number is unchanged, which is not allowed")) end else _gh_set_output_println(gh_set_output, gh_set_output_io, "compare_versions", "failure") throw(ErrorException("Version number decreased, which is not allowed")) end return nothing end
##Operators # TODO: REMOVE! for op in (:Derivative,:Integral) @eval begin function ($op)(d::AbstractVector{T}) where T<:IntervalOrSegment n=length(d) R=zeros(Operator{mapreduce(eltype,promote_type,d)},n,n) for k=1:n R[k,k]=$op(d[k]) end R end end end function Evaluation(d::AbstractVector{T},x...) where T<:IntervalOrSegment n=length(d) R=zeros(Operator{mapreduce(eltype,promote_type,d)},n,n) for k=1:n R[k,k]=Evaluation(d[k],x...) end R end ## Construction function diagm_container(kv::Pair{<:Integer,<:AbstractVector{O}}...) where O<:Operator T = mapreduce(x -> mapreduce(eltype,promote_type,x.second), promote_type, kv) n = mapreduce(x -> length(x.second) + abs(x.first), max, kv) zeros(Operator{T}, n, n) end ##TODO: unify with other blockdiag function blockdiag(d1::AbstractVector{T},d2::AbstractVector{T}) where T<:Operator if isempty(d1)&&isempty(d2) error("Empty blockdiag") end if isempty(d1) TT=mapreduce(eltype,promote_type,d2) elseif isempty(d2) TT=mapreduce(eltype,promote_type,d1) else TT=promote_type(mapreduce(eltype,promote_type,d1), mapreduce(eltype,promote_type,d2)) end D=zeros(Operator{TT},length(d1)+length(d2),2) D[1:length(d1),1]=d1 D[length(d1)+1:end,2]=d2 D end blockdiag(a::Operator,b::Operator) = blockdiag(Operator{promote_type(eltype(a),eltype(b))}[a], Operator{promote_type(eltype(a),eltype(b))}[b]) ## broadcase broadcast(::typeof(*),A::AbstractArray{N},D::Operator) where {N<:Number} = Operator{promote_type(N,eltype(D))}[A[k,j]*D for k=1:size(A,1),j=1:size(A,2)] broadcast(::typeof(*),D::Operator,A::AbstractArray{N}) where {N<:Number}=A.*D
using Test using SpinMonteCarlo const SEED = 137 const SEED2 = 19937 const MCS = 10000 const Therm = MCS const alpha = 0.001 @testset begin filenames = [ "classical.jl", "quantum.jl", "checkpoint.jl", ] for filename in filenames t = @elapsed include(filename) println("$(filename): $t sec") end end
""" ListBasedNonProjective() Transition system for list-based non-projective dependency parsing. Described in Nivre 2008, "Algorithms for Deterministic Incremental Dependency Parsing." """ struct ListBasedNonProjective <: AbstractTransitionSystem end initconfig(s::ListBasedNonProjective, graph::DependencyTree) = ListBasedNonProjectiveConfig(graph) initconfig(s::ListBasedNonProjective, deptype, words) = ListBasedNonProjectiveConfig{deptype}(words) projective_only(::ListBasedNonProjective) = false transition_space(::ListBasedNonProjective, labels=[]) = isempty(labels) ? [LeftArc(), RightArc(), NoArc(), Shift()] : [LeftArc.(labels)..., RightArc.(labels)..., NoArc(), Shift()] struct ListBasedNonProjectiveConfig{T} <: AbstractParserConfiguration{T} λ1::Vector{Int} # right-headed λ2::Vector{Int} # left-headed β::Vector{Int} A::Vector{T} end function ListBasedNonProjectiveConfig{T}(words::Vector{String}) where {T} λ1 = [0] λ2 = Int[] β = 1:length(words) A = [unk(T, id, w) for (id,w) in enumerate(words)] ListBasedNonProjectiveConfig{T}(λ1, λ2, β, A) end function ListBasedNonProjectiveConfig{T}(gold::DependencyTree) where {T} λ1 = [0] λ2 = Int[] β = 1:length(gold) A = [dep(token, head=-1) for token in gold] ListBasedNonProjectiveConfig{T}(λ1, λ2, β, A) end ListBasedNonProjectiveConfig(gold::DependencyTree) = ListBasedNonProjectiveConfig{eltype(gold)}(gold) buffer(cfg::ListBasedNonProjectiveConfig) = cfg.β token(cfg::ListBasedNonProjectiveConfig, i) = iszero(i) ? root(deptype(cfg)) : i == -1 ? noval(deptype(cfg)) : cfg.A[i] tokens(cfg::ListBasedNonProjectiveConfig) = cfg.A tokens(cfg::ListBasedNonProjectiveConfig, is) = [token(cfg, i) for i in is] function leftarc(cfg::ListBasedNonProjectiveConfig, args...; kwargs...) λ1, i = cfg.λ1[1:end-1], cfg.λ1[end] j, β = cfg.β[1], cfg.β[2:end] A = copy(cfg.A) i != 0 && (A[i] = dep(A[i], args...; head=j, kwargs...)) ListBasedNonProjectiveConfig(λ1, [i ; cfg.λ2], [j ; β], A) end function rightarc(cfg::ListBasedNonProjectiveConfig, args...; kwargs...) λ1, i = cfg.λ1[1:end-1], cfg.λ1[end] j, β = cfg.β[1], cfg.β[2:end] A = copy(cfg.A) A[j] = dep(A[j], args...; head=i, kwargs...) ListBasedNonProjectiveConfig(λ1, [i ; cfg.λ2], [j ; β], A) end function noarc(cfg::ListBasedNonProjectiveConfig) λ1, i = cfg.λ1[1:end-1], cfg.λ1[end] λ2, β, A = cfg.λ2, cfg.β, cfg.A ListBasedNonProjectiveConfig(λ1, [i ; λ2], β, A) end function shift(cfg::ListBasedNonProjectiveConfig) λ1, λ2 = cfg.λ1, cfg.λ2 i, β = cfg.β[1], cfg.β[2:end] ListBasedNonProjectiveConfig([λ1 ; λ2 ; i], Int[], β, cfg.A) end function isfinal(cfg::ListBasedNonProjectiveConfig) return all(a -> head(a) != -1, tokens(cfg)) && length(cfg.λ1) == length(cfg.A) + 1 && length(cfg.λ2) == 0 && length(cfg.β) == 0 end """ static_oracle(::ListBasedNonProjectiveConfig, tree) Return a training oracle function which returns gold transition operations from a parser configuration with reference to `graph`. """ function static_oracle(cfg::ListBasedNonProjectiveConfig, tree, arc=untyped) l = i -> arc(tree[i]) if length(cfg.λ1) >= 1 && length(cfg.β) >= 1 i, λ1 = cfg.λ1[end], cfg.λ1[1:end-1] j, β = cfg.β[1], cfg.β[2:end] if !iszero(i) && head(tree, i) == j return LeftArc(l(i)...) elseif head(tree, j) == i return RightArc(l(j)...) end j_deps = dependents(tree, j) if (!(any(x -> x < j, j_deps) && j_deps[1] < i)) && !(head(tree, j) < i) return Shift() end end if length(cfg.λ1) == 0 return Shift() end return NoArc() end # todo? possible_transitions(cfg::ListBasedNonProjectiveConfig, graph::DependencyTree, arc=untyped) = TransitionOperator[static_oracle(cfg, graph, arc)] ==(cfg1::ListBasedNonProjectiveConfig, cfg2::ListBasedNonProjectiveConfig) = cfg1.λ1 == cfg2.λ1 && cfg1.λ2 == cfg2.λ2 && cfg1.β == cfg2.β && cfg1.A == cfg2.A function Base.show(io::IO, c::ListBasedNonProjectiveConfig) λ1 = join(c.λ1, ",") λ2 = join(c.λ2, ",") β = join(c.β, ",") print(io, "ListBasedNonProjectiveConfig([$λ1],[$λ2],[$β])\n$(join([join([id(t),form(t),head(t)],'\t') for t in tokens(c)],'\n'))") end
# Basic test script - generates the file sample_uam_traj.csv containing one uam trajectory # Include the files include("UAMTrajectoryGenerator.jl") # Set the random seed Random.seed!(1) # Generate the trajectory file generate_trajectory_file("output/uam_traj.csv")
using FunctionalCollections: PersistentSet, PersistentHashMap, dissoc, assoc, conj, disj using Gen ######################################### # compiling a trace into a factor graph # ######################################### struct Latent{T,U} domain::Vector{T} parent_addrs::Vector{U} end struct Observation{U} parent_addrs::Vector{U} # only include addrs that are selected end parent_addrs(info::Union{Latent,Observation}) = info.parent_addrs function get_domain_to_idx(domain::Vector{T}) where {T} domain_to_idx = Dict{T,Int}() for (i, value) in enumerate(domain) domain_to_idx[value] = i end return domain_to_idx end function cartesian_product(value_lists) tuples = Vector{Tuple}() for value in value_lists[1] if length(value_lists) > 1 append!(tuples, [(value, rest...) for rest in cartesian_product(value_lists[2:end])]) else append!(tuples, [(value,)]) end end return tuples end # addr could be latent or obserrved... # latent_addrs is the set of latent variables that are involved, which may or # may not include the actual addr itself.. function create_factor( trace, addr, latents::Dict{Any,Latent}, observations::Dict{Any,Observation}, all_latent_addrs::Vector{Any}) N = length(all_latent_addrs) in_factor = Vector{Bool}(undef, N) if haskey(latents, addr) for (i, a) in enumerate(all_latent_addrs) in_factor[i] = (a == addr || a in parent_addrs(latents[addr])) end num_vars = length(parent_addrs(latents[addr]))+1 elseif haskey(observations, addr) for (i, a) in enumerate(all_latent_addrs) in_factor[i] = (a in parent_addrs(observations[addr])) end num_vars = length(parent_addrs(observations[addr])) end dims = map(i -> in_factor[i] ? length(latents[all_latent_addrs[i]].domain) : 1, 1:N) log_factor = Array{Float64,N}(undef, dims...) view_inds = map(i -> in_factor[i] ? Colon() : 1, 1:N) log_factor_view = view(log_factor, view_inds...) var_addrs = Vector{Any}(undef, num_vars) value_idx_lists = Vector{Any}(undef, num_vars) j = 1 for i in 1:N if in_factor[i] a = all_latent_addrs[i] var_addrs[j] = a value_idx_lists[j] = collect(1:length(latents[a].domain)) j += 1 end end @assert j == num_vars + 1 # populate factor with values by probing trace with update # the key idea is that this scales exponentially in maximum number of # parents of a variable, not the total number of variables cprod = cartesian_product(value_idx_lists) for value_idx_tuple in cprod choices = choicemap() for (a, value_idx) in zip(var_addrs, value_idx_tuple) choices[a] = latents[a].domain[value_idx] end (tmp_trace, _, _, _) = update(trace, get_args(trace), map((_)->NoChange(),get_args(trace)), choices) # NOTE: technically, the generative function of trace can use any # internal proposal, not only forward sampling, but this code is only # correct if the internal proposal uses forward sampling. enhancing the # trace interface with some more methods that specifically assume a # dependency graph would resolve this weight = project(tmp_trace, select(addr)) log_factor_view[value_idx_tuple...] = weight end log_factor = log_factor .- logsumexp(log_factor[:]) return (log_factor, var_addrs) end function compile_trace_to_factor_graph( trace, latents::Dict{Any,Latent}, observations::Dict{Any,Observation}) # choose order of addresses (note, this is NOT the elimination order) # TODO does the order in which the addresses are indexed matter for e.g. cache performance? maybe? all_latent_addrs = collect(keys(latents)) latent_addr_to_idx = Dict{Any,Int}() for (idx, addr) in enumerate(all_latent_addrs) latent_addr_to_idx[addr] = idx end # construct factor nodes, one for each latent and downstream variable N = length(all_latent_addrs) addr_to_factor_node = Dict{Any,FactorNode{N}}() factor_id = 1 for addr in Iterators.flatten((keys(latents), keys(observations))) (log_factor, var_addrs) = create_factor( trace, addr, latents, observations, all_latent_addrs) addr_to_factor_node[addr] = FactorNode{N}(factor_id, Int[latent_addr_to_idx[a] for a in var_addrs], log_factor) factor_id += 1 end num_factors = factor_id - 1 # for each latent address, the set of addresses for factors that it is involved in children_and_self = [Set{Any}([all_latent_addrs[i]]) for i in 1:N] for (addr, addr_info) in Iterators.flatten((latents, observations)) for parent_addr in parent_addrs(addr_info) push!(children_and_self[latent_addr_to_idx[parent_addr]], addr) end end # construct factor graph var_nodes = PersistentHashMap{Int,VarNode}() for (addr, addr_info) in latents i = latent_addr_to_idx[addr] factor_nodes = PersistentSet{FactorNode{N}}( [addr_to_factor_node[addr] for addr in children_and_self[i]]) var_node = VarNode(addr, factor_nodes, addr_info.domain, get_domain_to_idx(addr_info.domain)) var_nodes = assoc(var_nodes, latent_addr_to_idx[addr], var_node) end return FactorGraph{N}(num_factors, var_nodes, latent_addr_to_idx) end
#MAIN ENTRY POINT #final columns for the decompressed file #merges crsp and compustat # note refresh merge will be automatically run if any of the other two are run function loadccm(; datapath::String=DATA_PATH, ccmname::String = CCM_NAME, incsvstream::Function = IN_CSV_STREAM, csvextension::String = CSV_EXTENSION) local ccm::DataFrame #makes a serialization object so we don't have to keep parsing the CSV ccm = incsvstream("$datapath\\$ccmname.$csvextension") |> CSV.File |> DataFrame return ccm end function mergecompccm!(comp::DataFrame; testoutput::Bool = TEST_OUTPUT, months2stale::Int = MONTHS_2_STALE_LONG) #first local ccm::DataFrame = loadccm() ccm = preprocessccm!(ccm) testoutput && CSV.write("output\\ccm.csv", ccm) select!(ccm, Not([:cusip])) #println("Comp rows before merge: $(size(comp, 1))") comp = innerjoin(comp, ccm, on=[:gvkey]) comp.keep = trues(size(comp,1)) comp.keep = (r::DataFrameRow-> r.adate ∈ r.linkeffdate:Day(1):r.linkenddate).(eachrow(comp)) comp = comp[comp.keep,:] select!(comp, Not(:keep)) println("Comp rows after merge, post-filter: $(size(comp, 1))") #Note: we still will have overlapping lineffdates and linkenddates, which will be corrected #with the below method comp = reconcilecompccmintervals!(comp) # lagwithin!(comp, :fdate, :gvkey, :fyear) sort!(comp, [:lpermno, :fdate]) issorted(comp, [:lpermno, :adate]) || error("I thought comp was sorted by [:lpermno, :adate]") period2stale::Month = Month(months2stale) lagwithin2!(comp, [:fdate, :fyrmonth], :lpermno, date=:fdate, maxnotstale = period2stale) lagwithin2!(comp, [:Lfdate, :Lfyrmonth], :lpermno, date=:fdate, maxnotstale = period2stale) for r ∈ eachrow(comp) ismissing(r.Lfdate) && continue (r.Lfdate < r.fdate - period2stale) && error("failed date check $r") end @info "past date validatation check" #= NOTE: we accept dup fyears due to changing fiscal years function testfordups(df::AbstractDataFrame, groupfields::Vector{Symbol}) for sdf ∈ groupby(df, groupfields) if size(sdf,1) > 1 println(sdf[!, [:gvkey, :lpermno, :fdate, :Lfdate, :adate, :Nadate, :linkeffdate, :linkenddate, :begindate, :enddate, :fyear]]) error("duplicate fyear found") end end @info "passed dedup fyear test" end testfordups(comp, [:lpermno, :fyear])=# return comp end #formats the ccm table in a reasonable way function preprocessccm!(ccm::DataFrame; retainedcolsccm::Vector{Symbol} = RETAINED_COLS_CCM, lastdatestring::String = LAST_DATE_STRING, testoutput::Bool = TEST_OUTPUT) #names ot lower case cleannames::Vector{String} = (s::String->lowercase(s)).(names(ccm)) rename!(ccm, ((oldn, newn)->oldn=>newn).(names(ccm), cleannames)) #fix the end date ccm.linkenddt .= (s::MString-> coalesce(s,"")=="E" ? lastdatestring : s).(ccm.linkenddt) #below didn't work for some reason #filter!(r::DataFrameRow->((!ismissing(r.gvkey)) && (!ismissing(r.lpermno))), ccm) ccm = ccm[(r::DataFrameRow->( (!ismissing(r.gvkey)) && (!ismissing(r.lpermno)))).(eachrow(ccm)),:] ccm.linkprim = (s::String->length(s)==1 ? s[1] : error("invalid linkprim in ccm")).(ccm.linkprim) filter!(r::DataFrameRow->r.linkprim =='P' || r.linkprim =='C', ccm) #parse the dates ccm.linkeffdate = (s->parseccm(Date, s)).(ccm.linkdt) ccm.linkenddate = (s->parseccm(Date, s)).(ccm.linkenddt) #dedup if possible ccm.tokeep = trues(size(ccm,1)) ccm.dateranges = ((linkeffdate::Date,linkenddate::Date)-> linkeffdate:Day(1):linkenddate).(ccm.linkeffdate,ccm.linkenddate) gccm::GroupedDataFrame = groupby(ccm, [:gvkey, :lpermno]) @mpar for i ∈ 1:length(gccm) sccm::SubDataFrame = gccm[i] Nsccm::Int = size(sccm,1) #construct the union of all of the date sets if Nsccm > 1 local periodrange::StepRange{Date,Day} = minimum( sccm.linkeffdate):Day(1):maximum(sccm.linkenddate) local dateunionlength::Int = length(unique([sccm.dateranges...;])) if dateunionlength == length(periodrange) sccm.linkeffdate[1] = minimum(periodrange) sccm.linkenddate[1] = maximum(periodrange) sccm.tokeep[2:end] .= false end end end println("ccm size pre-dedup $(size(ccm))") ccm = ccm[ccm.tokeep,:] println("ccm size post-dedup $(size(ccm))") if testoutput testoutput && CSV.write("output\\ccm_preproc.csv", ccm) end #rename!(ccm, :indfmt=>:indfmtold) #ccm.indfmt = (s::MString->parsecomp(Symbol, s)).(ccm.indfmtold) #only need some of the fields ccm = select!(ccm, retainedcolsccm) return ccm end #this function reconciles the link intervals with the announcement intervals #NOTE: A weird situation can arise if gvkey changes. The the linkenddate finishes too soon, #we end up with a gap . Considering the laternatives, it seems like the lesser evil #to extending the record beyond the linkenddate function reconcilecompccmintervals!(comp::DataFrame) #subtract one day since the effective interval is inclusive of the #first date while the begin-end date is exclusve of the first date comp.begindate .= (max).(comp.begindate, comp.linkeffdate .- Day(1)) comp.enddate .= (min).(comp.enddate, comp.linkenddate) #this gets rid of one day records #(this assumes the previous effective record is the current record) #not many of these ~140 #println("records before interval reconciliation: $(size(comp,1))") #println(comp[comp.begindate .≥ comp.enddate, # [:gvkey, :adateq, :begindate, :enddate, :linkeffdate, :linkenddate]]) comp = comp[comp.begindate .< comp.enddate, :] println("records after interval reconciliation: $(size(comp,1))") return comp end function parseccm(::Type{T}, s::String; #=parsedmissings = CCM_PARSED_MISSINGS=#) where T #if it doesn't parse to the right type, set to missing v::Union{T,Missing} = something(tryparse(T, s), missing) #check against the list of missing codes #(!ismissing(v)) && (v ∈ parsedmissings) && (v=missing) return v end #the date case parseccm(::Type{Date}, s::String; ccmdateformat::DateFormat = CCM_DATE_FORMAT) = Dates.Date(s, ccmdateformat) #helper methods and special cases parseccm(::Type{<:Any}, v::Missing) = missing parseccm(::Type{Date}, i::Int) = parseccm(Date, "$i") parseccm(::Type{Symbol}, s::String) = Symbol(s)
module ComradeSoss #Turn off precompilations because of GG bug https://github.com/cscherrer/Soss.jl/issues/267 __precompile__(false) using HypercubeTransform using Reexport @reexport using Soss @reexport using Comrade import Distributions const Dists = Distributions using MeasureTheory using NamedTupleTools using NestedTuples using MacroTools using PyCall using Random using Requires using ParameterHandling using StatsBase: median using StatsBase using StructArrays using TupleVectors # This is a hack for MeasureTheory since it wants the type to output Base.rand(rng::AbstractRNG, ::Type{Float64}, d::Dists.ContinuousDistribution) = rand(rng, d) const rad2μas = 180.0/π*3600*1e6 #These hold the pointers to the PyObjects that will store dynesty and ehtim #and are loaded at initialization const dynesty = PyNULL() export ObsChar, scandata, create_joint, DynestyStatic, sample, optimize, threaded_optimize, chi2, ehtim #include("ehtim.jl") include("utility.jl") include("hypercube.jl") include("inference.jl") include("dists.jl") include("models.jl") #include("grads.jl") #include("nlopt.jl") #include("hmc.jl") function __init__() copy!(dynesty, pyimport("dynesty")) @require UltraNest="6822f173-b0be-4018-9ee2-28bf56348d09" include("ultranest.jl") @require NestedSamplers="41ceaf6f-1696-4a54-9b49-2e7a9ec3782e" include("nested.jl") @require BlackBoxOptim="a134a8b2-14d6-55f6-9291-3336d3ab0209" include("bboptim.jl") @require Metaheuristics="bcdb8e00-2c21-11e9-3065-2b553b22f898" include("metaheuristics.jl") end end #end ComradeSoss
############################################################################# # Joulia # A Large-Scale Spatial Power System Model for Julia # See https://github.com/JuliaEnergy/Joulia.jl ############################################################################# # This file contains functions and structs for nodes mutable struct Nodes id load exchange function Nodes(nodes_df::DataFrame, load_df::DataFrame, exchange_df::DataFrame) N = Symbol.(nodes_df[1]) load_dict = df_to_dict(load_df) insert_default!(load_dict, fill(0.0, 8760), N) exchange_dict = df_to_dict(exchange_df) insert_default!(exchange_dict, fill(0.0, 8760), N) return new(N, load_dict, exchange_dict) end end
@with_kw type Body2D <: Body name::String = "None" parent::Body = ground2D() connection::connection2D = free2D("None") mass::SymFloat = SymFloat(symbols("m_None",nonnegative=true,real=true)) inertia::SymFloat = SymFloat(symbols("I_None",nonnegative=true,real=true)) end function ==(b1::Body2D,b2::Body2D) f = fieldnames(b1) for i in f if getfield(b1,i) != getfield(b2,i) return false end end return true end
""" child = replace(parent, query => replacement) Generates a `child` crystal structure by (i) searches the `parent` crystal structure for subgraphs that match the `query` then (ii) replaces the substructures of the `parent` matching the `query` fragment with the `replacement` fragment. Equivalent to calling `substructure_replace(query ∈ parent, replacement)`. Accepts the same keyword arguments as [`substructure_replace`](@ref). """ replace(p::Crystal, pair::Pair; kwargs...) = substructure_replace(pair[1] ∈ p, pair[2]; kwargs...) """ alignment = Alignment(rot::Matrix{Float64}, shift_1::Vector{Float64}, shift_2::Vector{Float64}, err::Float64) Data structure for tracking alignment in substructure find/replace operations. """ struct Alignment rot::Matrix{Float64} # before rotation shift_1::Vector{Float64} # after rotation shift_2::Vector{Float64} # error err::Float64 end struct Installation aligned_replacement::Crystal q2p::Dict{Int, Int} r2p::Dict{Int, Int} end function get_r2p_alignment(replacement::Crystal, parent::Crystal, r2p::Dict{Int, Int}, q2p::Dict{Int, Int}) center = (X::Matrix{Float64}) -> sum(X, dims=2)[:] / size(X, 2) # when both centered to origin @assert replacement.atoms.n ≥ 3 && parent.atoms.n ≥ 3 "Parent and replacement must each be at least 3 atoms for SVD alignment." ### # compute centered Cartesian coords of the atoms of # replacement fragment involved in alignment ### atoms_r = Cart(replacement.atoms[[r for (r, p) in r2p]], replacement.box) X_r = atoms_r.coords.x x_r_center = center(X_r) X_r = X_r .- x_r_center ### # compute centered Cartesian coords of the atoms of # parent involved in alignment ### # handle fragments cut across the PB using the parent subset isomorphic to query parent_substructure = deepcopy(parent[[p for (q, p) in q2p]]) conglomerate!(parent_substructure) # must do this for when replacement fragment is disconnected # prepare parent substructure having correspondence with replacement p2ps = Dict([p => i for (i, p) in enumerate([p for (q, p) in q2p])]) # parent to parent subset map parent_substructure_to_align_to = parent_substructure[[p2ps[p] for p in [p for (r, p) in r2p]]] atoms_p = Cart(parent_substructure_to_align_to.atoms, parent.box) X_p = atoms_p.coords.x x_p_center = center(X_p) X_p = X_p .- x_p_center # solve the orthogonal procrustes probelm via SVD F = svd(X_r * X_p') # optimal rotation matrix rot = F.V * F.U' err = norm(rot * X_r - X_p) return Alignment(rot, - x_r_center, x_p_center, err) end function conglomerate!(parent_substructure::Crystal) # snip the cross-PB bonds bonds = deepcopy(parent_substructure.bonds) if length(connected_components(bonds)) > 1 @warn "# connected components in parent substructure > 1. assuming the substructure does not cross the periodic boundary..." return end drop_cross_pb_bonds!(bonds) # find connected components of bonding graph without cross-PB bonds # these are the components split across the boundary conn_comps = connected_components(bonds) # if substructure is entireline in the unit cell, it's already conglomerated :) if length(conn_comps) == 1 return end # we wish to shift all connected components to a reference component, # defined to be the largest component for speed. conn_comps_shifted = [false for c = 1:length(conn_comps)] ref_comp_id = argmax(length.(conn_comps)) conn_comps_shifted[ref_comp_id] = true # consider it shifted. # has atom p been shifted? function shifted_atom(p::Int) # loop over all connected components that have been shifted for conn_comp in conn_comps[conn_comps_shifted] # if parent substructure atom in this, yes! if p in conn_comp return true end end # reached this far, atom p is not in component that has been shifted. return false end # to which component does atom p belong? function find_component(p::Int) for c = 1:length(conn_comps) if p in conn_comps[c] return c end end end # until all components have been shifted to the reference component... while ! all(conn_comps_shifted) # loop over cross-PB edges in the parent substructure for ed in edges(parent_substructure.bonds) if get_prop(parent_substructure.bonds, ed, :cross_boundary) # if one edge belongs to unshifted component and another belogs to any component that has been shifted... if shifted_atom(ed.src) && ! shifted_atom(ed.dst) p_ref, p = ed.src, ed.dst elseif shifted_atom(ed.dst) && ! shifted_atom(ed.src) p_ref, p = ed.dst, ed.src else continue # both are shifted or both are unshifted. ignore this cross-PB edge end # here's the unshifted component we will shift next, to be next to the shifted components. comp_id = find_component(p) # find displacement vector for this cross-PB edge. dx = parent_substructure.atoms.coords.xf[:, p_ref] - parent_substructure.atoms.coords.xf[:, p] # get distance to nearest image n_dx = copy(dx) nearest_image!(n_dx) # shift all atoms in this component by this vector. for atom_idx in conn_comps[comp_id] parent_substructure.atoms.coords.xf[:, atom_idx] .+= dx - n_dx end # mark that we've shifted this component. conn_comps_shifted[comp_id] = true end end end return end function aligned_replacement(replacement::Crystal, parent::Crystal, r2p_alignment::Alignment) # put replacement into cartesian space atoms_r = Cart(replacement.atoms, replacement.box) # rotate replacement to align with parent_subset atoms_r.coords.x[:, :] = r2p_alignment.rot * (atoms_r.coords.x .+ r2p_alignment.shift_1) .+ r2p_alignment.shift_2 # cast atoms back to Frac return Crystal(replacement.name, parent.box, Frac(atoms_r, parent.box), Charges{Frac}(0), replacement.bonds, replacement.symmetry) end function effect_replacements(search::Search, replacement::Crystal, configs::Vector{Tuple{Int, Int}}, name::String)::Crystal nb_not_masked = sum(.! occursin.(rc[:r_tag], String.(search.query.atoms.species))) if replacement.atoms.n > 0 q_unmasked_in_r = substructure_search(search.query[1:nb_not_masked], replacement) q2r = Dict([q => q_unmasked_in_r.isomorphisms[1][1][q] for q in 1:nb_not_masked]) else q2r = Dict{Int, Int}() end installations = [optimal_replacement(search, replacement, q2r, loc_id, [ori_id]) for (loc_id, ori_id) in configs] child = install_replacements(search.parent, installations, name) # handle `missing` values in edge :cross_boundary attribute for edge in edges(child.bonds) # loop over edges # check if cross-boundary info is missing if ismissing(get_prop(child.bonds, edge, :cross_boundary)) # check if bond crosses boundary distance_e = get_prop(child.bonds, edge, :distance) # distance in edge property dxa = Cart(Frac(child.atoms.coords.xf[:, src(edge)] - child.atoms.coords.xf[:, dst(edge)]), child.box) # Cartesian displacement distance_a = norm(dxa.x) # current euclidean distance by atom coords set_prop!(child.bonds, edge, :cross_boundary, !isapprox(distance_e, distance_a, atol=0.1)) end end return child end function install_replacements(parent::Crystal, replacements::Vector{Installation}, name::String)::Crystal # create child w/o symmetry rules for sake of crystal addition child = Crystal(name, parent.box, parent.atoms, parent.charges, parent.bonds, Xtals.SymmetryInfo()) obsolete_atoms = Int[] # to delete at the end # loop over replacements to install for installation in replacements replacement, q2p, r2p = installation.aligned_replacement, installation.q2p, installation.r2p #add into parent if replacement.atoms.n > 0 child = +(child, replacement, check_overlap=false) end # reconstruct bonds for (r, p) in r2p # p is in parent_subst p_nbrs = neighbors(parent.bonds, p) for p_nbr in p_nbrs if ! (p_nbr in values(q2p)) # p_nbr not in parent_subst # need bond nbr => r in child, where r is in replacement e = (p_nbr, child.atoms.n - replacement.atoms.n + r) # create edge add_edge!(child.bonds, e) # copy edge attributes from parent (:cross_boundary will need to be reassessed later) set_props!(child.bonds, e[1], e[2], props(parent.bonds, p, p_nbr)) set_prop!(child.bonds, e[1], e[2], :cross_boundary, missing) end end end # accumulate atoms to delete obsolete_atoms = vcat(obsolete_atoms, values(q2p)...) end # delete obsolete atoms obsolete_atoms = unique(obsolete_atoms) keep_atoms = [p for p = 1:child.atoms.n if ! (p in obsolete_atoms)] child = child[keep_atoms] # restore symmetry rules child = Crystal(name, child.box, child.atoms, child.charges, child.bonds, parent.symmetry) # return result return child end function optimal_replacement(search::Search, replacement::Crystal, q2r::Dict{Int,Int}, loc_id::Int, ori_ids::Vector{Int}) # unpack search arg isomorphisms, parent = search.isomorphisms, search.parent if q2r == Dict{Int, Int}() # "replace-with-nothing" operation q2p = isomorphisms[loc_id][1] r2p = Dict([0 => p for p in values(q2p)]) return Installation(replacement, q2p, r2p) end if ori_ids == [0] ori_ids = [1:nb_ori_at_loc(search)[loc_id]...] end # loop over ori_ids to find best r2p_alignment r2p_alignment = Alignment(zeros(1,1), [0.], [0.], Inf) best_ori = 0 best_r2p = Dict{Int, Int}() for ori_id in ori_ids # find r2p isom q2p = isomorphisms[loc_id][ori_id] r2p = Dict([r => q2p[q] for (q, r) in q2r]) # calculate alignment test_alignment = get_r2p_alignment(replacement, parent, r2p, q2p) # keep best alignment and generating ori_id if test_alignment.err < r2p_alignment.err r2p_alignment = test_alignment best_ori = ori_id best_r2p = r2p end end opt_aligned_replacement = aligned_replacement(replacement, parent, r2p_alignment) # return the replacement modified according to r2p_alignment @assert ne(opt_aligned_replacement.bonds) == ne(replacement.bonds) return Installation(opt_aligned_replacement, isomorphisms[loc_id][best_ori], best_r2p) end @doc raw""" child = substructure_replace(search, replacement; random=false, nb_loc=0, loc=Int[], ori=Int[], name="new_xtal", verbose=false, remove_duplicates=false, periodic_boundaries=true) Replace the substructures of `search.parent` matching the `search.query` fragment with the `replacement` fragment, at locations and orientations specified by the keyword arguments `random`, `nb_loc`, `loc`, and `ori`. Default behavior is to effect replacements at all "hit" locations in the parent structure and, at each location, choose the orientation giving the optimal (lowest error) spatial aligment. Returns a new `Crystal` with the specified modifications (returns `search.parent` if no replacements are made). # Arguments - `search::Search` the `Search` for a substructure moiety in the parent crystal - `replacement::Crystal` the moiety to use for replacement of the searched substructure - `random::Bool` set `true` to select random replacement orientations - `nb_loc::Int` assign a value to select random replacement at `nb_loc` random locations - `loc::Array{Int}` assign value(s) to select specific locations for replacement. If `ori` is not specified, replacement orientation is random. - `ori::Array{Int}` assign value(s) when `loc` is assigned to specify exact configurations for replacement. `0` values mean the configuration at that location should be selected for optimal alignment with the parent. - `name::String` assign to give the generated `Crystal` a name ("new_xtal" by default) - `verbose::Bool` set `true` to print console messages about the replacement(s) being performed - `remove_duplicates::Bool` set `true` to automatically combine overlapping atoms of the same species in generated structures. - `reinfer_bonds::Bool` set `true` to re-infer bonds after producing a structure - `periodic_boundaries::Bool` set `false` to disable periodic boundary conditions when checking for atom duplication or re-inferring bonds """ function substructure_replace(search::Search, replacement::Crystal; random::Bool=false, nb_loc::Int=0, loc::Array{Int}=Int[], ori::Array{Int}=Int[], name::String="new_xtal", verbose::Bool=false, remove_duplicates::Bool=false, periodic_boundaries::Bool=true, reinfer_bonds::Bool=false, wrap::Bool=true)::Crystal # replacement at all locations (default) if nb_loc == 0 && loc == Int[] && ori == Int[] nb_loc = nb_locations(search) loc = [1:nb_loc...] if random ori = [rand(1:nb_ori_at_loc(search)[i]) for i in loc] if verbose @info "Replacing" q_in_p=search r=replacement.name mode="random ori @ all loc" end else ori = zeros(Int, nb_loc) if verbose @info "Replacing" q_in_p=search r=replacement.name mode="optimal ori @ all loc" end end # replacement at nb_loc random locations elseif nb_loc > 0 && ori == Int[] && loc == Int[] loc = sample([1:nb_locations(search)...], nb_loc, replace=false) if random ori = [rand(1:nb_ori_at_loc(search)[i]) for i in loc] if verbose @info "Replacing" q_in_p=search r=replacement.name mode="random ori @ $nb_loc loc" end else ori = zeros(Int, nb_loc) if verbose @info "Replacing" q_in_p=search r=replacement.name mode="optimal ori @ $nb_loc loc" end end # specific replacements elseif ori ≠ Int[] && loc ≠ Int[] @assert length(loc) == length(ori) "one orientation per location" nb_loc = length(ori) if verbose @info "Replacing" q_in_p=search r=replacement.name mode="loc: $loc\tori: $ori" end # replacement at specific locations elseif loc ≠ Int[] nb_loc = length(loc) if random ori = [rand(1:nb_ori_at_loc(search)[i]) for i in loc] if verbose @info "Replacing" q_in_p=search r=replacement.name mode="random ori @ loc: $loc" end else ori = zeros(Int, nb_loc) if verbose @info "Replacing" q_in_p=search r=replacement.name mode="optimal ori @ loc: $loc" end end end # remove charges from parent if search.parent.charges.n > 0 @warn "Dropping charges from parent." p = search.parent search = Search( Crystal(p.name, p.box, p.atoms, Charges{Frac}(0), p.bonds, p.symmetry), search.query, search.isomorphisms ) end # generate configuration tuples (location, orientation) configs = Tuple{Int,Int}[(loc[i], ori[i]) for i in 1:nb_loc] # process replacements child = effect_replacements(search, replacement, configs, name) if remove_duplicates child = Crystal(child.name, child.box, Xtals.remove_duplicates(child.atoms, child.box, periodic_boundaries), Xtals.remove_duplicates(child.charges, child.box, periodic_boundaries) ) end if wrap # throw error if installed replacement fragment spans the unit cell if any(abs.(child.atoms.coords.xf) .> 2.0) error("installed replacement fragment too large for the unit cell; replicate the parent and try again.") end # wrap coordinates wrap!(child.atoms.coords) # check :cross_boundary edge attributes for edge in edges(child.bonds) # loop over edges distance_e = get_prop(child.bonds, edge, :distance) # distance in edge property dxa = Cart(Frac(child.atoms.coords.xf[:, src(edge)] - child.atoms.coords.xf[:, dst(edge)]), child.box) # Cartesian displacement distance_a = norm(dxa.x) # current euclidean distance by atom coords set_prop!(child.bonds, edge, :cross_boundary, !isapprox(distance_e, distance_a, atol=0.1)) end end if reinfer_bonds remove_bonds!(child) infer_bonds!(child, periodic_boundaries) end return child end substructure_replace(search::Search, replacement::Nothing; kwargs...) = substructure_replace(search, moiety(nothing); kwargs...)
# AUTO GENERATED FILE - DO NOT EDIT export 'feffery'_antdalert """ 'feffery'_antdalert(;kwargs...) An AntdAlert component. Keyword arguments: - `id` (String; optional) - `className` (String; optional) - `closable` (Bool; optional) - `description` (String; optional) - `loading_state` (optional): . loading_state has the following type: lists containing elements 'is_loading', 'prop_name', 'component_name'. Those elements have the following types: - `is_loading` (Bool; optional): Determines if the component is loading or not - `prop_name` (String; optional): Holds which property is loading - `component_name` (String; optional): Holds the name of the component that is loading - `message` (String | Array of Strings; optional) - `messageRenderMode` (a value equal to: 'default', 'loop-text', 'marquee'; optional) - `showIcon` (Bool; optional) - `style` (Dict; optional) - `type` (a value equal to: 'success', 'info', 'warning', 'error'; optional) """ function 'feffery'_antdalert(; kwargs...) available_props = Symbol[:id, :className, :closable, :description, :loading_state, :message, :messageRenderMode, :showIcon, :style, :type] wild_props = Symbol[] return Component("'feffery'_antdalert", "AntdAlert", "feffery_antd_components", available_props, wild_props; kwargs...) end
module ATI import GetC.@getCFun typealias GLenum Cuint typealias GLboolean Cuchar typealias GLbitfield Cuint typealias GLvoid Void typealias GLbyte Cuchar typealias GLshort Cshort typealias GLint Cint typealias GLubyte Cuchar typealias GLushort Cushort typealias GLuint Cuint typealias GLsizei Csize_t typealias GLfloat Cfloat typealias GLclampf Cfloat typealias GLdouble Cdouble typealias GLclampd Cdouble typealias GLchar Cchar typealias GLint64 Clonglong typealias GLuint64 Culonglong typealias GLhalf Cushort typealias GLhalfARB Cushort typealias GLhalfNV Cushort typealias GLsync Ptr{Void} typealias Pointer Ptr{Void} typealias GLsizeiptr Cint typealias GLintptr Cptrdiff_t const DRAW_BUFFER0_ATI = 0x8825 export DRAW_BUFFER0_ATI const DRAW_BUFFER10_ATI = 0x882F export DRAW_BUFFER10_ATI const DRAW_BUFFER11_ATI = 0x8830 export DRAW_BUFFER11_ATI const DRAW_BUFFER12_ATI = 0x8831 export DRAW_BUFFER12_ATI const DRAW_BUFFER13_ATI = 0x8832 export DRAW_BUFFER13_ATI const DRAW_BUFFER14_ATI = 0x8833 export DRAW_BUFFER14_ATI const DRAW_BUFFER15_ATI = 0x8834 export DRAW_BUFFER15_ATI const DRAW_BUFFER1_ATI = 0x8826 export DRAW_BUFFER1_ATI const DRAW_BUFFER2_ATI = 0x8827 export DRAW_BUFFER2_ATI const DRAW_BUFFER3_ATI = 0x8828 export DRAW_BUFFER3_ATI const DRAW_BUFFER4_ATI = 0x8829 export DRAW_BUFFER4_ATI const DRAW_BUFFER5_ATI = 0x882A export DRAW_BUFFER5_ATI const DRAW_BUFFER6_ATI = 0x882B export DRAW_BUFFER6_ATI const DRAW_BUFFER7_ATI = 0x882C export DRAW_BUFFER7_ATI const DRAW_BUFFER8_ATI = 0x882D export DRAW_BUFFER8_ATI const DRAW_BUFFER9_ATI = 0x882E export DRAW_BUFFER9_ATI const MAX_DRAW_BUFFERS_ATI = 0x8824 export MAX_DRAW_BUFFERS_ATI const ELEMENT_ARRAY_ATI = 0x8768 export ELEMENT_ARRAY_ATI const ELEMENT_ARRAY_POINTER_ATI = 0x876A export ELEMENT_ARRAY_POINTER_ATI const ELEMENT_ARRAY_TYPE_ATI = 0x8769 export ELEMENT_ARRAY_TYPE_ATI const BUMP_ENVMAP_ATI = 0x877B export BUMP_ENVMAP_ATI const BUMP_NUM_TEX_UNITS_ATI = 0x8777 export BUMP_NUM_TEX_UNITS_ATI const BUMP_ROT_MATRIX_ATI = 0x8775 export BUMP_ROT_MATRIX_ATI const BUMP_ROT_MATRIX_SIZE_ATI = 0x8776 export BUMP_ROT_MATRIX_SIZE_ATI const BUMP_TARGET_ATI = 0x877C export BUMP_TARGET_ATI const BUMP_TEX_UNITS_ATI = 0x8778 export BUMP_TEX_UNITS_ATI const DU8DV8_ATI = 0x877A export DU8DV8_ATI const DUDV_ATI = 0x8779 export DUDV_ATI const X2X_BIT_ATI = 0x00000001 export X2X_BIT_ATI const X4X_BIT_ATI = 0x00000002 export X4X_BIT_ATI const X8X_BIT_ATI = 0x00000004 export X8X_BIT_ATI const ADD_ATI = 0x8963 export ADD_ATI const BIAS_BIT_ATI = 0x00000008 export BIAS_BIT_ATI const BLUE_BIT_ATI = 0x00000004 export BLUE_BIT_ATI const CND0_ATI = 0x896B export CND0_ATI const CND_ATI = 0x896A export CND_ATI const COLOR_ALPHA_PAIRING_ATI = 0x8975 export COLOR_ALPHA_PAIRING_ATI const COMP_BIT_ATI = 0x00000002 export COMP_BIT_ATI const CON_0_ATI = 0x8941 export CON_0_ATI const CON_10_ATI = 0x894B export CON_10_ATI const CON_11_ATI = 0x894C export CON_11_ATI const CON_12_ATI = 0x894D export CON_12_ATI const CON_13_ATI = 0x894E export CON_13_ATI const CON_14_ATI = 0x894F export CON_14_ATI const CON_15_ATI = 0x8950 export CON_15_ATI const CON_16_ATI = 0x8951 export CON_16_ATI const CON_17_ATI = 0x8952 export CON_17_ATI const CON_18_ATI = 0x8953 export CON_18_ATI const CON_19_ATI = 0x8954 export CON_19_ATI const CON_1_ATI = 0x8942 export CON_1_ATI const CON_20_ATI = 0x8955 export CON_20_ATI const CON_21_ATI = 0x8956 export CON_21_ATI const CON_22_ATI = 0x8957 export CON_22_ATI const CON_23_ATI = 0x8958 export CON_23_ATI const CON_24_ATI = 0x8959 export CON_24_ATI const CON_25_ATI = 0x895A export CON_25_ATI const CON_26_ATI = 0x895B export CON_26_ATI const CON_27_ATI = 0x895C export CON_27_ATI const CON_28_ATI = 0x895D export CON_28_ATI const CON_29_ATI = 0x895E export CON_29_ATI const CON_2_ATI = 0x8943 export CON_2_ATI const CON_30_ATI = 0x895F export CON_30_ATI const CON_31_ATI = 0x8960 export CON_31_ATI const CON_3_ATI = 0x8944 export CON_3_ATI const CON_4_ATI = 0x8945 export CON_4_ATI const CON_5_ATI = 0x8946 export CON_5_ATI const CON_6_ATI = 0x8947 export CON_6_ATI const CON_7_ATI = 0x8948 export CON_7_ATI const CON_8_ATI = 0x8949 export CON_8_ATI const CON_9_ATI = 0x894A export CON_9_ATI const DOT2_ADD_ATI = 0x896C export DOT2_ADD_ATI const DOT3_ATI = 0x8966 export DOT3_ATI const DOT4_ATI = 0x8967 export DOT4_ATI const EIGHTH_BIT_ATI = 0x00000020 export EIGHTH_BIT_ATI const FRAGMENT_SHADER_ATI = 0x8920 export FRAGMENT_SHADER_ATI const GREEN_BIT_ATI = 0x00000002 export GREEN_BIT_ATI const HALF_BIT_ATI = 0x00000008 export HALF_BIT_ATI const LERP_ATI = 0x8969 export LERP_ATI const MAD_ATI = 0x8968 export MAD_ATI const MOV_ATI = 0x8961 export MOV_ATI const MUL_ATI = 0x8964 export MUL_ATI const NEGATE_BIT_ATI = 0x00000004 export NEGATE_BIT_ATI const NUM_FRAGMENT_CONSTANTS_ATI = 0x896F export NUM_FRAGMENT_CONSTANTS_ATI const NUM_FRAGMENT_REGISTERS_ATI = 0x896E export NUM_FRAGMENT_REGISTERS_ATI const NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI = 0x8973 export NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI const NUM_INSTRUCTIONS_PER_PASS_ATI = 0x8971 export NUM_INSTRUCTIONS_PER_PASS_ATI const NUM_INSTRUCTIONS_TOTAL_ATI = 0x8972 export NUM_INSTRUCTIONS_TOTAL_ATI const NUM_LOOPBACK_COMPONENTS_ATI = 0x8974 export NUM_LOOPBACK_COMPONENTS_ATI const NUM_PASSES_ATI = 0x8970 export NUM_PASSES_ATI const QUARTER_BIT_ATI = 0x00000010 export QUARTER_BIT_ATI const RED_BIT_ATI = 0x00000001 export RED_BIT_ATI const REG_0_ATI = 0x8921 export REG_0_ATI const REG_10_ATI = 0x892B export REG_10_ATI const REG_11_ATI = 0x892C export REG_11_ATI const REG_12_ATI = 0x892D export REG_12_ATI const REG_13_ATI = 0x892E export REG_13_ATI const REG_14_ATI = 0x892F export REG_14_ATI const REG_15_ATI = 0x8930 export REG_15_ATI const REG_16_ATI = 0x8931 export REG_16_ATI const REG_17_ATI = 0x8932 export REG_17_ATI const REG_18_ATI = 0x8933 export REG_18_ATI const REG_19_ATI = 0x8934 export REG_19_ATI const REG_1_ATI = 0x8922 export REG_1_ATI const REG_20_ATI = 0x8935 export REG_20_ATI const REG_21_ATI = 0x8936 export REG_21_ATI const REG_22_ATI = 0x8937 export REG_22_ATI const REG_23_ATI = 0x8938 export REG_23_ATI const REG_24_ATI = 0x8939 export REG_24_ATI const REG_25_ATI = 0x893A export REG_25_ATI const REG_26_ATI = 0x893B export REG_26_ATI const REG_27_ATI = 0x893C export REG_27_ATI const REG_28_ATI = 0x893D export REG_28_ATI const REG_29_ATI = 0x893E export REG_29_ATI const REG_2_ATI = 0x8923 export REG_2_ATI const REG_30_ATI = 0x893F export REG_30_ATI const REG_31_ATI = 0x8940 export REG_31_ATI const REG_3_ATI = 0x8924 export REG_3_ATI const REG_4_ATI = 0x8925 export REG_4_ATI const REG_5_ATI = 0x8926 export REG_5_ATI const REG_6_ATI = 0x8927 export REG_6_ATI const REG_7_ATI = 0x8928 export REG_7_ATI const REG_8_ATI = 0x8929 export REG_8_ATI const REG_9_ATI = 0x892A export REG_9_ATI const SATURATE_BIT_ATI = 0x00000040 export SATURATE_BIT_ATI const SECONDARY_INTERPOLATOR_ATI = 0x896D export SECONDARY_INTERPOLATOR_ATI const SUB_ATI = 0x8965 export SUB_ATI const SWIZZLE_STQ_ATI = 0x8977 export SWIZZLE_STQ_ATI const SWIZZLE_STQ_DQ_ATI = 0x8979 export SWIZZLE_STQ_DQ_ATI const SWIZZLE_STRQ_ATI = 0x897A export SWIZZLE_STRQ_ATI const SWIZZLE_STRQ_DQ_ATI = 0x897B export SWIZZLE_STRQ_DQ_ATI const SWIZZLE_STR_ATI = 0x8976 export SWIZZLE_STR_ATI const SWIZZLE_STR_DR_ATI = 0x8978 export SWIZZLE_STR_DR_ATI const RENDERBUFFER_FREE_MEMORY_ATI = 0x87FD export RENDERBUFFER_FREE_MEMORY_ATI const TEXTURE_FREE_MEMORY_ATI = 0x87FC export TEXTURE_FREE_MEMORY_ATI const VBO_FREE_MEMORY_ATI = 0x87FB export VBO_FREE_MEMORY_ATI const COLOR_CLEAR_UNCLAMPED_VALUE_ATI = 0x8835 export COLOR_CLEAR_UNCLAMPED_VALUE_ATI const RGBA_FLOAT_MODE_ATI = 0x8820 export RGBA_FLOAT_MODE_ATI const MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI = 0x87F1 export MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI const PN_TRIANGLES_ATI = 0x87F0 export PN_TRIANGLES_ATI const PN_TRIANGLES_NORMAL_MODE_ATI = 0x87F3 export PN_TRIANGLES_NORMAL_MODE_ATI const PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI = 0x87F7 export PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI const PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI = 0x87F8 export PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI const PN_TRIANGLES_POINT_MODE_ATI = 0x87F2 export PN_TRIANGLES_POINT_MODE_ATI const PN_TRIANGLES_POINT_MODE_CUBIC_ATI = 0x87F6 export PN_TRIANGLES_POINT_MODE_CUBIC_ATI const PN_TRIANGLES_POINT_MODE_LINEAR_ATI = 0x87F5 export PN_TRIANGLES_POINT_MODE_LINEAR_ATI const PN_TRIANGLES_TESSELATION_LEVEL_ATI = 0x87F4 export PN_TRIANGLES_TESSELATION_LEVEL_ATI const STENCIL_BACK_FAIL_ATI = 0x8801 export STENCIL_BACK_FAIL_ATI const STENCIL_BACK_FUNC_ATI = 0x8800 export STENCIL_BACK_FUNC_ATI const STENCIL_BACK_PASS_DEPTH_FAIL_ATI = 0x8802 export STENCIL_BACK_PASS_DEPTH_FAIL_ATI const STENCIL_BACK_PASS_DEPTH_PASS_ATI = 0x8803 export STENCIL_BACK_PASS_DEPTH_PASS_ATI const TEXT_FRAGMENT_SHADER_ATI = 0x8200 export TEXT_FRAGMENT_SHADER_ATI const MODULATE_ADD_ATI = 0x8744 export MODULATE_ADD_ATI const MODULATE_SIGNED_ADD_ATI = 0x8745 export MODULATE_SIGNED_ADD_ATI const MODULATE_SUBTRACT_ATI = 0x8746 export MODULATE_SUBTRACT_ATI const ALPHA_FLOAT16_ATI = 0x881C export ALPHA_FLOAT16_ATI const ALPHA_FLOAT32_ATI = 0x8816 export ALPHA_FLOAT32_ATI const INTENSITY_FLOAT16_ATI = 0x881D export INTENSITY_FLOAT16_ATI const INTENSITY_FLOAT32_ATI = 0x8817 export INTENSITY_FLOAT32_ATI const LUMINANCE_ALPHA_FLOAT16_ATI = 0x881F export LUMINANCE_ALPHA_FLOAT16_ATI const LUMINANCE_ALPHA_FLOAT32_ATI = 0x8819 export LUMINANCE_ALPHA_FLOAT32_ATI const LUMINANCE_FLOAT16_ATI = 0x881E export LUMINANCE_FLOAT16_ATI const LUMINANCE_FLOAT32_ATI = 0x8818 export LUMINANCE_FLOAT32_ATI const RGBA_FLOAT16_ATI = 0x881A export RGBA_FLOAT16_ATI const RGBA_FLOAT32_ATI = 0x8814 export RGBA_FLOAT32_ATI const RGB_FLOAT16_ATI = 0x881B export RGB_FLOAT16_ATI const RGB_FLOAT32_ATI = 0x8815 export RGB_FLOAT32_ATI const MIRROR_CLAMP_ATI = 0x8742 export MIRROR_CLAMP_ATI const MIRROR_CLAMP_TO_EDGE_ATI = 0x8743 export MIRROR_CLAMP_TO_EDGE_ATI const ARRAY_OBJECT_BUFFER_ATI = 0x8766 export ARRAY_OBJECT_BUFFER_ATI const ARRAY_OBJECT_OFFSET_ATI = 0x8767 export ARRAY_OBJECT_OFFSET_ATI const DISCARD_ATI = 0x8763 export DISCARD_ATI const DYNAMIC_ATI = 0x8761 export DYNAMIC_ATI const OBJECT_BUFFER_SIZE_ATI = 0x8764 export OBJECT_BUFFER_SIZE_ATI const OBJECT_BUFFER_USAGE_ATI = 0x8765 export OBJECT_BUFFER_USAGE_ATI const PRESERVE_ATI = 0x8762 export PRESERVE_ATI const STATIC_ATI = 0x8760 export STATIC_ATI const MAX_VERTEX_STREAMS_ATI = 0x876B export MAX_VERTEX_STREAMS_ATI const VERTEX_SOURCE_ATI = 0x8774 export VERTEX_SOURCE_ATI const VERTEX_STREAM0_ATI = 0x876C export VERTEX_STREAM0_ATI const VERTEX_STREAM1_ATI = 0x876D export VERTEX_STREAM1_ATI const VERTEX_STREAM2_ATI = 0x876E export VERTEX_STREAM2_ATI const VERTEX_STREAM3_ATI = 0x876F export VERTEX_STREAM3_ATI const VERTEX_STREAM4_ATI = 0x8770 export VERTEX_STREAM4_ATI const VERTEX_STREAM5_ATI = 0x8771 export VERTEX_STREAM5_ATI const VERTEX_STREAM6_ATI = 0x8772 export VERTEX_STREAM6_ATI const VERTEX_STREAM7_ATI = 0x8773 export VERTEX_STREAM7_ATI @getCFun "libGL" glDrawBuffersATI glDrawBuffersATI(n::GLsizei, bufs::Ptr{GLenum})::Void export glDrawBuffersATI @getCFun "libGL" glElementPointerATI glElementPointerATI(type_::GLenum, pointer::Ptr{Void})::Void export glElementPointerATI @getCFun "libGL" glDrawElementArrayATI glDrawElementArrayATI(mode::GLenum, count::GLsizei)::Void export glDrawElementArrayATI @getCFun "libGL" glDrawRangeElementArrayATI glDrawRangeElementArrayATI(mode::GLenum, start::GLuint, END::GLuint, count::GLsizei)::Void export glDrawRangeElementArrayATI @getCFun "libGL" glTexBumpParameterivATI glTexBumpParameterivATI(pname::GLenum, param::Ptr{GLint})::Void export glTexBumpParameterivATI @getCFun "libGL" glTexBumpParameterfvATI glTexBumpParameterfvATI(pname::GLenum, param::Ptr{GLfloat})::Void export glTexBumpParameterfvATI @getCFun "libGL" glGetTexBumpParameterivATI glGetTexBumpParameterivATI(pname::GLenum, param::Ptr{GLint})::Void export glGetTexBumpParameterivATI @getCFun "libGL" glGetTexBumpParameterfvATI glGetTexBumpParameterfvATI(pname::GLenum, param::Ptr{GLfloat})::Void export glGetTexBumpParameterfvATI @getCFun "libGL" glGenFragmentShadersATI glGenFragmentShadersATI(range_::GLuint)::Cuint export glGenFragmentShadersATI @getCFun "libGL" glBindFragmentShaderATI glBindFragmentShaderATI(id::GLuint)::Void export glBindFragmentShaderATI @getCFun "libGL" glDeleteFragmentShaderATI glDeleteFragmentShaderATI(id::GLuint)::Void export glDeleteFragmentShaderATI @getCFun "libGL" glBeginFragmentShaderATI glBeginFragmentShaderATI()::Void export glBeginFragmentShaderATI @getCFun "libGL" glEndFragmentShaderATI glEndFragmentShaderATI()::Void export glEndFragmentShaderATI @getCFun "libGL" glPassTexCoordATI glPassTexCoordATI(dst::GLuint, coord::GLuint, swizzle::GLenum)::Void export glPassTexCoordATI @getCFun "libGL" glSampleMapATI glSampleMapATI(dst::GLuint, interp::GLuint, swizzle::GLenum)::Void export glSampleMapATI @getCFun "libGL" glColorFragmentOp1ATI glColorFragmentOp1ATI(op::GLenum, dst::GLuint, dstMask::GLuint, dstMod::GLuint, arg1::GLuint, arg1Rep::GLuint, arg1Mod::GLuint)::Void export glColorFragmentOp1ATI @getCFun "libGL" glColorFragmentOp2ATI glColorFragmentOp2ATI(op::GLenum, dst::GLuint, dstMask::GLuint, dstMod::GLuint, arg1::GLuint, arg1Rep::GLuint, arg1Mod::GLuint, arg2::GLuint, arg2Rep::GLuint, arg2Mod::GLuint)::Void export glColorFragmentOp2ATI @getCFun "libGL" glColorFragmentOp3ATI glColorFragmentOp3ATI(op::GLenum, dst::GLuint, dstMask::GLuint, dstMod::GLuint, arg1::GLuint, arg1Rep::GLuint, arg1Mod::GLuint, arg2::GLuint, arg2Rep::GLuint, arg2Mod::GLuint, arg3::GLuint, arg3Rep::GLuint, arg3Mod::GLuint)::Void export glColorFragmentOp3ATI @getCFun "libGL" glAlphaFragmentOp1ATI glAlphaFragmentOp1ATI(op::GLenum, dst::GLuint, dstMod::GLuint, arg1::GLuint, arg1Rep::GLuint, arg1Mod::GLuint)::Void export glAlphaFragmentOp1ATI @getCFun "libGL" glAlphaFragmentOp2ATI glAlphaFragmentOp2ATI(op::GLenum, dst::GLuint, dstMod::GLuint, arg1::GLuint, arg1Rep::GLuint, arg1Mod::GLuint, arg2::GLuint, arg2Rep::GLuint, arg2Mod::GLuint)::Void export glAlphaFragmentOp2ATI @getCFun "libGL" glAlphaFragmentOp3ATI glAlphaFragmentOp3ATI(op::GLenum, dst::GLuint, dstMod::GLuint, arg1::GLuint, arg1Rep::GLuint, arg1Mod::GLuint, arg2::GLuint, arg2Rep::GLuint, arg2Mod::GLuint, arg3::GLuint, arg3Rep::GLuint, arg3Mod::GLuint)::Void export glAlphaFragmentOp3ATI @getCFun "libGL" glSetFragmentShaderConstantATI glSetFragmentShaderConstantATI(dst::GLuint, value::Ptr{GLfloat})::Void export glSetFragmentShaderConstantATI @getCFun "libGL" glMapObjectBufferATI glMapObjectBufferATI(buffer::GLuint)::Ptr{Void} export glMapObjectBufferATI @getCFun "libGL" glUnmapObjectBufferATI glUnmapObjectBufferATI(buffer::GLuint)::Void export glUnmapObjectBufferATI @getCFun "libGL" glPNTrianglesiATI glPNTrianglesiATI(pname::GLenum, param::GLint)::Void export glPNTrianglesiATI @getCFun "libGL" glPNTrianglesfATI glPNTrianglesfATI(pname::GLenum, param::GLfloat)::Void export glPNTrianglesfATI @getCFun "libGL" glStencilOpSeparateATI glStencilOpSeparateATI(face::GLenum, sfail::GLenum, dpfail::GLenum, dppass::GLenum)::Void export glStencilOpSeparateATI @getCFun "libGL" glStencilFuncSeparateATI glStencilFuncSeparateATI(frontfunc::GLenum, backfunc::GLenum, ref::GLint, mask::GLuint)::Void export glStencilFuncSeparateATI @getCFun "libGL" glNewObjectBufferATI glNewObjectBufferATI(size::GLsizei, pointer::Ptr{Void}, usage::GLenum)::Cuint export glNewObjectBufferATI @getCFun "libGL" glIsObjectBufferATI glIsObjectBufferATI(buffer::GLuint)::Bool export glIsObjectBufferATI @getCFun "libGL" glUpdateObjectBufferATI glUpdateObjectBufferATI(buffer::GLuint, offset::GLuint, size::GLsizei, pointer::Ptr{Void}, preserve::GLenum)::Void export glUpdateObjectBufferATI @getCFun "libGL" glGetObjectBufferfvATI glGetObjectBufferfvATI(buffer::GLuint, pname::GLenum, params::Ptr{GLfloat})::Void export glGetObjectBufferfvATI @getCFun "libGL" glGetObjectBufferivATI glGetObjectBufferivATI(buffer::GLuint, pname::GLenum, params::Ptr{GLint})::Void export glGetObjectBufferivATI @getCFun "libGL" glFreeObjectBufferATI glFreeObjectBufferATI(buffer::GLuint)::Void export glFreeObjectBufferATI @getCFun "libGL" glArrayObjectATI glArrayObjectATI(array::GLenum, size::GLint, type_::GLenum, stride::GLsizei, buffer::GLuint, offset::GLuint)::Void export glArrayObjectATI @getCFun "libGL" glGetArrayObjectfvATI glGetArrayObjectfvATI(array::GLenum, pname::GLenum, params::Ptr{GLfloat})::Void export glGetArrayObjectfvATI @getCFun "libGL" glGetArrayObjectivATI glGetArrayObjectivATI(array::GLenum, pname::GLenum, params::Ptr{GLint})::Void export glGetArrayObjectivATI @getCFun "libGL" glVariantArrayObjectATI glVariantArrayObjectATI(id::GLuint, type_::GLenum, stride::GLsizei, buffer::GLuint, offset::GLuint)::Void export glVariantArrayObjectATI @getCFun "libGL" glGetVariantArrayObjectfvATI glGetVariantArrayObjectfvATI(id::GLuint, pname::GLenum, params::Ptr{GLfloat})::Void export glGetVariantArrayObjectfvATI @getCFun "libGL" glGetVariantArrayObjectivATI glGetVariantArrayObjectivATI(id::GLuint, pname::GLenum, params::Ptr{GLint})::Void export glGetVariantArrayObjectivATI @getCFun "libGL" glVertexAttribArrayObjectATI glVertexAttribArrayObjectATI(index::GLuint, size::GLint, type_::GLenum, normalized::GLboolean, stride::GLsizei, buffer::GLuint, offset::GLuint)::Void export glVertexAttribArrayObjectATI @getCFun "libGL" glGetVertexAttribArrayObjectfvATI glGetVertexAttribArrayObjectfvATI(index::GLuint, pname::GLenum, params::Ptr{GLfloat})::Void export glGetVertexAttribArrayObjectfvATI @getCFun "libGL" glGetVertexAttribArrayObjectivATI glGetVertexAttribArrayObjectivATI(index::GLuint, pname::GLenum, params::Ptr{GLint})::Void export glGetVertexAttribArrayObjectivATI @getCFun "libGL" glVertexStream1sATI glVertexStream1sATI(stream::GLenum, x::GLshort)::Void export glVertexStream1sATI @getCFun "libGL" glVertexStream1svATI glVertexStream1svATI(stream::GLenum, coords::Ptr{GLshort})::Void export glVertexStream1svATI @getCFun "libGL" glVertexStream1iATI glVertexStream1iATI(stream::GLenum, x::GLint)::Void export glVertexStream1iATI @getCFun "libGL" glVertexStream1ivATI glVertexStream1ivATI(stream::GLenum, coords::Ptr{GLint})::Void export glVertexStream1ivATI @getCFun "libGL" glVertexStream1fATI glVertexStream1fATI(stream::GLenum, x::GLfloat)::Void export glVertexStream1fATI @getCFun "libGL" glVertexStream1fvATI glVertexStream1fvATI(stream::GLenum, coords::Ptr{GLfloat})::Void export glVertexStream1fvATI @getCFun "libGL" glVertexStream1dATI glVertexStream1dATI(stream::GLenum, x::GLdouble)::Void export glVertexStream1dATI @getCFun "libGL" glVertexStream1dvATI glVertexStream1dvATI(stream::GLenum, coords::Ptr{GLdouble})::Void export glVertexStream1dvATI @getCFun "libGL" glVertexStream2sATI glVertexStream2sATI(stream::GLenum, x::GLshort, y::GLshort)::Void export glVertexStream2sATI @getCFun "libGL" glVertexStream2svATI glVertexStream2svATI(stream::GLenum, coords::Ptr{GLshort})::Void export glVertexStream2svATI @getCFun "libGL" glVertexStream2iATI glVertexStream2iATI(stream::GLenum, x::GLint, y::GLint)::Void export glVertexStream2iATI @getCFun "libGL" glVertexStream2ivATI glVertexStream2ivATI(stream::GLenum, coords::Ptr{GLint})::Void export glVertexStream2ivATI @getCFun "libGL" glVertexStream2fATI glVertexStream2fATI(stream::GLenum, x::GLfloat, y::GLfloat)::Void export glVertexStream2fATI @getCFun "libGL" glVertexStream2fvATI glVertexStream2fvATI(stream::GLenum, coords::Ptr{GLfloat})::Void export glVertexStream2fvATI @getCFun "libGL" glVertexStream2dATI glVertexStream2dATI(stream::GLenum, x::GLdouble, y::GLdouble)::Void export glVertexStream2dATI @getCFun "libGL" glVertexStream2dvATI glVertexStream2dvATI(stream::GLenum, coords::Ptr{GLdouble})::Void export glVertexStream2dvATI @getCFun "libGL" glVertexStream3sATI glVertexStream3sATI(stream::GLenum, x::GLshort, y::GLshort, z::GLshort)::Void export glVertexStream3sATI @getCFun "libGL" glVertexStream3svATI glVertexStream3svATI(stream::GLenum, coords::Ptr{GLshort})::Void export glVertexStream3svATI @getCFun "libGL" glVertexStream3iATI glVertexStream3iATI(stream::GLenum, x::GLint, y::GLint, z::GLint)::Void export glVertexStream3iATI @getCFun "libGL" glVertexStream3ivATI glVertexStream3ivATI(stream::GLenum, coords::Ptr{GLint})::Void export glVertexStream3ivATI @getCFun "libGL" glVertexStream3fATI glVertexStream3fATI(stream::GLenum, x::GLfloat, y::GLfloat, z::GLfloat)::Void export glVertexStream3fATI @getCFun "libGL" glVertexStream3fvATI glVertexStream3fvATI(stream::GLenum, coords::Ptr{GLfloat})::Void export glVertexStream3fvATI @getCFun "libGL" glVertexStream3dATI glVertexStream3dATI(stream::GLenum, x::GLdouble, y::GLdouble, z::GLdouble)::Void export glVertexStream3dATI @getCFun "libGL" glVertexStream3dvATI glVertexStream3dvATI(stream::GLenum, coords::Ptr{GLdouble})::Void export glVertexStream3dvATI @getCFun "libGL" glVertexStream4sATI glVertexStream4sATI(stream::GLenum, x::GLshort, y::GLshort, z::GLshort, w::GLshort)::Void export glVertexStream4sATI @getCFun "libGL" glVertexStream4svATI glVertexStream4svATI(stream::GLenum, coords::Ptr{GLshort})::Void export glVertexStream4svATI @getCFun "libGL" glVertexStream4iATI glVertexStream4iATI(stream::GLenum, x::GLint, y::GLint, z::GLint, w::GLint)::Void export glVertexStream4iATI @getCFun "libGL" glVertexStream4ivATI glVertexStream4ivATI(stream::GLenum, coords::Ptr{GLint})::Void export glVertexStream4ivATI @getCFun "libGL" glVertexStream4fATI glVertexStream4fATI(stream::GLenum, x::GLfloat, y::GLfloat, z::GLfloat, w::GLfloat)::Void export glVertexStream4fATI @getCFun "libGL" glVertexStream4fvATI glVertexStream4fvATI(stream::GLenum, coords::Ptr{GLfloat})::Void export glVertexStream4fvATI @getCFun "libGL" glVertexStream4dATI glVertexStream4dATI(stream::GLenum, x::GLdouble, y::GLdouble, z::GLdouble, w::GLdouble)::Void export glVertexStream4dATI @getCFun "libGL" glVertexStream4dvATI glVertexStream4dvATI(stream::GLenum, coords::Ptr{GLdouble})::Void export glVertexStream4dvATI @getCFun "libGL" glNormalStream3bATI glNormalStream3bATI(stream::GLenum, nx::GLbyte, ny::GLbyte, nz::GLbyte)::Void export glNormalStream3bATI @getCFun "libGL" glNormalStream3bvATI glNormalStream3bvATI(stream::GLenum, coords::Ptr{GLbyte})::Void export glNormalStream3bvATI @getCFun "libGL" glNormalStream3sATI glNormalStream3sATI(stream::GLenum, nx::GLshort, ny::GLshort, nz::GLshort)::Void export glNormalStream3sATI @getCFun "libGL" glNormalStream3svATI glNormalStream3svATI(stream::GLenum, coords::Ptr{GLshort})::Void export glNormalStream3svATI @getCFun "libGL" glNormalStream3iATI glNormalStream3iATI(stream::GLenum, nx::GLint, ny::GLint, nz::GLint)::Void export glNormalStream3iATI @getCFun "libGL" glNormalStream3ivATI glNormalStream3ivATI(stream::GLenum, coords::Ptr{GLint})::Void export glNormalStream3ivATI @getCFun "libGL" glNormalStream3fATI glNormalStream3fATI(stream::GLenum, nx::GLfloat, ny::GLfloat, nz::GLfloat)::Void export glNormalStream3fATI @getCFun "libGL" glNormalStream3fvATI glNormalStream3fvATI(stream::GLenum, coords::Ptr{GLfloat})::Void export glNormalStream3fvATI @getCFun "libGL" glNormalStream3dATI glNormalStream3dATI(stream::GLenum, nx::GLdouble, ny::GLdouble, nz::GLdouble)::Void export glNormalStream3dATI @getCFun "libGL" glNormalStream3dvATI glNormalStream3dvATI(stream::GLenum, coords::Ptr{GLdouble})::Void export glNormalStream3dvATI @getCFun "libGL" glClientActiveVertexStreamATI glClientActiveVertexStreamATI(stream::GLenum)::Void export glClientActiveVertexStreamATI @getCFun "libGL" glVertexBlendEnviATI glVertexBlendEnviATI(pname::GLenum, param::GLint)::Void export glVertexBlendEnviATI @getCFun "libGL" glVertexBlendEnvfATI glVertexBlendEnvfATI(pname::GLenum, param::GLfloat)::Void export glVertexBlendEnvfATI end
using AWSCore using AWSS3 using AWSLambda JL_VERSION_BASE="0.6" JL_VERSION_PATCH="4" JL_VERSION="$JL_VERSION_BASE.$JL_VERSION_PATCH" image_name = "octech/$(replace(basename(pwd()), "_", "")):$JL_VERSION" lambda_name = basename(pwd()) source_bucket = "octech.com.au.ap-southeast-2.awslambda.jl.deploy" base_zip = "$(lambda_name)_$(VERSION)_$(AWSLambda.aws_lamabda_jl_version).zip" if length(ARGS) == 0 || ARGS[1] == "build" cp("../../src/AWSLambda.jl", "AWSLambda.jl"; remove_destination=true) run(`docker build --build-arg JL_VERSION_BASE=$JL_VERSION_BASE --build-arg JL_VERSION_PATCH=$JL_VERSION_PATCH -t $image_name .`) end if length(ARGS) > 0 && ARGS[1] == "shell" run(`docker run --rm -it -v $(pwd()):/var/host $image_name bash`) end if length(ARGS) > 0 && ARGS[1] == "zip" rm(base_zip; force=true) cmd = `zip --symlinks -r -9 /var/host/$base_zip .` run(`docker run --rm -it -v $(pwd()):/var/host $image_name $cmd`) end if length(ARGS) > 0 && ARGS[1] == "deploy" AWSCore.Services.s3("PUT", "/$source_bucket/$base_zip", headers=Dict("x-amz-acl" => "public-read"), Body=read(base_zip)) end if length(ARGS) > 0 && ARGS[1] == "deploy_regions" lambda_regions = ["us-east-1", "us-east-2", "us-west-1", "us-west-2", "ap-northeast-2", "ap-south-1", "ap-southeast-1", "ap-northeast-1", "eu-central-1", "eu-west-1", "eu-west-2"] @sync for r in lambda_regions raws = merge(default_aws_config(), Dict(:region => r)) bucket = "octech.com.au.$r.awslambda.jl.deploy" @async begin s3_create_bucket(raws, bucket) AWSS3.s3(default_aws_config(), "PUT", bucket; path = base_zip, headers = Dict( "x-amz-copy-source" => "$source_bucket/$base_zip", "x-amz-acl" => "public-read")) end end end
using BetaVQE using Yao, Yao.EasyBuild, Yao.BitBasis using Test, Random, StatsBase using Zygote using BetaVQE.VAN @testset "VAN" begin include("VAN/VAN.jl") end @testset "sample" begin Random.seed!(3) nbits = 4 nhiddens = [10, 20] nsamples = 1000 β = 1.0 h = heisenberg(nbits) c = dispatch!(variational_circuit(nbits), :random) model = AutoRegressiveModel(nbits, nhiddens) configs = bitarray(collect(0:(1<<nbits-1)), nbits) logp = get_logp(model, configs) @test isapprox(sum(exp.(logp)), 1.0, rtol=1e-2) f = sum(exp.(logp) .* free_energy_local(β, h, model, c, configs)) samples = gen_samples(model, nsamples) f_sample = free_energy(β, h, model, c, samples) @test isapprox(f, f_sample, rtol=1e-1) end @testset "circuit diff" begin Random.seed!(3) nbits = 4 nhiddens = [10] nsamples = 2000 h = heisenberg(nbits) c = dispatch!(variational_circuit(nbits), :random) model = AutoRegressiveModel(nbits, nhiddens) samples = gen_samples(model, nsamples) # check the gradient of circuit parameters params = parameters(c) ϵ = 1e-5 for k in 1:length(params) params[k] -= ϵ dispatch!(c, params) l1 = free_energy(2.0, h, model, c, samples) params[k] += 2ϵ dispatch!(c, params) l2 = free_energy(2.0, h, model, c, samples) params[k] -= ϵ dispatch!(c, params) g = (l2-l1)/2ϵ g2 = gradient(c->free_energy(2.0, h, model, c, samples), c)[1] @test isapprox(g, g2[k], rtol=1e-2) end end @testset "network diff" begin Random.seed!(7) nbits = 2 nhiddens = [10] nsamples = 1000 h = heisenberg(nbits) c = dispatch!(variational_circuit(nbits), :random) model = AutoRegressiveModel(nbits, nhiddens) ϵ = 1e-5 # check the gradient of model configs = bitarray(collect(0:(1<<nbits-1)), nbits) function f(model) logp = get_logp(model, configs) sum(exp.(logp) .* free_energy_local(2.0, h, model, c, configs)) end m, n = 1, 1 params = model_parameters(model) params[m][n] -= ϵ model_dispatch!(model, params) l1 = f(model) params[m][n] += 2ϵ model_dispatch!(model, params) l2 = f(model) g = (l2-l1)/2ϵ #tune it back params[m][n] -= ϵ model_dispatch!(model, params) samples = gen_samples(model, nsamples) g2 = gradient(model->free_energy(2.0, h, model, c, samples), model)[1] @test isapprox(g, g2.W[1][n], rtol=1e-1) @show g, g2.W[1][n] end @testset "tns circuit diff" begin Random.seed!(3) nx = 2 ny = 2 nbits = nx*ny depth = 3 nhiddens = [10] nsamples = 2000 h = heisenberg(nbits) c = tns_circuit(nbits, depth, EasyBuild.pair_square(nx, ny; periodic=false); entangler=(n,i,j)->put(n,(i,j)=>general_U4(rand(15)*2π))) model = AutoRegressiveModel(nbits, nhiddens) samples = gen_samples(model, nsamples) # check the gradient of circuit parameters params = parameters(c) ϵ = 1e-5 for k in 1:length(params) params[k] -= ϵ dispatch!(c, params) l1 = free_energy(2.0, h, model, c, samples) params[k] += 2ϵ dispatch!(c, params) l2 = free_energy(2.0, h, model, c, samples) params[k] -= ϵ dispatch!(c, params) g = (l2-l1)/2ϵ g2 = gradient(c->free_energy(2.0, h, model, c, samples), c)[1] @test isapprox(g, g2[k], atol=1e-2) end end @testset "train" begin nbits = 4 h = hamiltonian(TFIM(nbits, 1; Γ=0.0, periodic=false)) network = PSAModel(nbits) circuit = tns_circuit(nbits, 2, EasyBuild.pair_square(nbits, 1; periodic=false); entangler=(n,i,j)->put(n,(i,j)=>general_U4())) network_params, circuit_params = train(1.0, h, network, circuit; nbatch=1000, niter=100) samples = gen_samples(network, 1000) @test free_energy(1.0, h, network, circuit, samples) <= -3.2 end
# constants const a = 6.378137e6 # the semi-major axis of the earth const F = 298.257222101 # the inverse 1st flattening const n = 1/(2F-1) const m₀ = 0.9999 # central meridian scale factor A₀ = 1 + n^2/4 + n^4/64 const A̅ = A₀/(1+n) *m₀*a ## longitude, latitude of origin for zones in Japan const Origin_LatLon_Japan = [ 33.0 129.5 ; # 1 33.0 131.0 ; # 2 36.0 132.0+1/6; # 3 33.0 133.5 ; # 4 36.0 134.0+1/3; # 5 36.0 136.0 ; # 6 36.0 137.0+1/6; # 7 36.0 138.5 ; # 8 36.0 139.0+5/6; # 9 40.0 140.0+5/6; # 10 44.0 140.25 ; # 11 44.0 142.25 ; # 12 44.0 144.25 ; # 13 26.0 142.0 ; # 14 26.0 127.5 ; # 15 26.0 124.0 ; # 16 26.0 131.0 ; # 17 20.0 136.0 ; # 18 26.0 154.0 ; # 19 ]
using JuMP, EAGO m = Model() EAGO.register_eago_operators!(m) @variable(m, -1 <= x[i=1:4] <= 1) @variable(m, -32.35079088597315 <= q <= 33.04707617725232) add_NL_constraint(m, :(log(1 + exp(0.7516814527569986 + 0.07211062597860263*log(1 + exp(0.800956367833014 + 0.1575422515944389*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.30181017969165413*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.5024447348308962*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + -0.7463877606567912*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.28099609060437203*log(1 + exp(-0.8241959750660866 + 0.48752067922981546*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + -0.8942226763869914*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + -0.1882956347370066*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.0613423792438379*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + 0.19401527858092393*log(1 + exp(-0.014178711859067938 + -0.6496362145686132*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.5744456236018198*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.06990002879133872*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.11738131892224501*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.43991506890783594*log(1 + exp(-0.38559964579408224 + -0.6882459841770032*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.5869261088566509*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.7286086900917508*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.9385542676650069*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))))) + log(1 + exp(0.5442086655639207 + -0.859033717055464*log(1 + exp(0.800956367833014 + 0.1575422515944389*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.30181017969165413*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.5024447348308962*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + -0.7463877606567912*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.6994107397450926*log(1 + exp(-0.8241959750660866 + 0.48752067922981546*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + -0.8942226763869914*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + -0.1882956347370066*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.0613423792438379*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.21447765318835765*log(1 + exp(-0.014178711859067938 + -0.6496362145686132*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.5744456236018198*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.06990002879133872*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.11738131892224501*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + 0.9247182813449877*log(1 + exp(-0.38559964579408224 + -0.6882459841770032*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.5869261088566509*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.7286086900917508*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.9385542676650069*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))))) + log(1 + exp(-0.33457802196328057 + 0.8511359109817849*log(1 + exp(0.800956367833014 + 0.1575422515944389*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.30181017969165413*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.5024447348308962*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + -0.7463877606567912*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + 0.9056512987027685*log(1 + exp(-0.8241959750660866 + 0.48752067922981546*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + -0.8942226763869914*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + -0.1882956347370066*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.0613423792438379*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.901983268277478*log(1 + exp(-0.014178711859067938 + -0.6496362145686132*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.5744456236018198*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.06990002879133872*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.11738131892224501*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.02854306019224717*log(1 + exp(-0.38559964579408224 + -0.6882459841770032*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.5869261088566509*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.7286086900917508*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.9385542676650069*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))))) + log(1 + exp(0.85223750045835 + -0.7468516096711739*log(1 + exp(0.800956367833014 + 0.1575422515944389*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.30181017969165413*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.5024447348308962*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + -0.7463877606567912*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.1269236474608202*log(1 + exp(-0.8241959750660866 + 0.48752067922981546*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + -0.8942226763869914*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + -0.1882956347370066*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.0613423792438379*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.5407492362268407*log(1 + exp(-0.014178711859067938 + -0.6496362145686132*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.5744456236018198*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.06990002879133872*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.11738131892224501*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))) + -0.8670343803826994*log(1 + exp(-0.38559964579408224 + -0.6882459841770032*log(1 + exp(-0.7991039641167363 + 0.28486813994855353*$(x[1]) + 0.682420711072186*$(x[2]) + 0.05952972537514656*$(x[3]) + 0.998655851604032*$(x[4]))) + 0.5869261088566509*log(1 + exp(-0.9827986828893067 + 0.8570099551669048*$(x[1]) + -0.029287184975956393*$(x[2]) + 0.28818320654817864*$(x[3]) + -0.9376552616355038*$(x[4]))) + 0.7286086900917508*log(1 + exp(0.33306588949676286 + 0.667638331016386*$(x[1]) + 0.7691056200843742*$(x[2]) + 0.5908191290581821*$(x[3]) + 0.06769592108634237*$(x[4]))) + 0.9385542676650069*log(1 + exp(-0.7350489056562997 + 0.545921427040915*$(x[1]) + -0.3116746162014481*$(x[2]) + 0.844944868364411*$(x[3]) + -0.9086564944285445*$(x[4]))))))) - $q <= 0.0)) @objective(m, Min, q) return m
# Networks found by Bose-Nelson algorithm, except for N=16 # http://pages.ripco.net/~jgamble/nw.html # http://www.cs.brandeis.edu/~hugues/sorting_networks.html # https://github.com/JeffreySarnoff/SortingNetworks.jl sorting_network_parameters = Dict( 2 => (((1,2),),), 4 => (((1,2), (3,4)), ((1,3), (2,4)), ((2,3),)), 8 => ( ((1, 2), (3, 4), (5, 6), (7, 8)), ((1, 3), (2, 4), (5, 7), (6, 8)), ((2, 3), (6, 7), (1, 5), (4, 8)), ((2, 6), (3, 7)), ((2, 5), (4, 7)), ((3, 5), (4, 6)), ((4, 5),) ), 16 => ( ((4, 11), (12, 15), (5, 14), (3, 13), (1, 7), (9, 10), (2,8), (6, 16)), ((1, 2), (3, 5), (7, 8), (13, 14), (4, 6), (9, 12), (11, 16), (10, 15)), ((1, 4), (7, 11), (14, 15), (2, 6), (8, 16), (3, 9), (10, 13), (5, 12)), ((1, 3), (8, 14), (15, 16), (2, 5), (6, 12), (4, 9), (11, 13), (7, 10)), ((2, 3), (4, 7), (8, 9), (12, 14), (6, 10), (13, 15), (5, 11)), ((3, 7), (12, 13), (2, 4), (6, 8), (9, 10), (14, 15)), ((3, 4), (5, 7), (11, 12), (13, 14)), ((9, 11), (5, 6), (7, 8), (10, 12)), ((4, 5), (6, 7), (8, 9), (10, 11), (12, 13)), ((7, 8), (9, 10)) ), 32 => ( ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16), (17, 18), (19, 20), (21, 22), (23, 24), (25, 26), (27, 28), (29, 30), (31, 32)), ((1, 3), (2, 4), (5, 7), (6, 8), (9, 11), (10, 12), (13, 15), (14, 16), (17, 19), (18, 20), (21, 23), (22, 24), (25, 27), (26, 28), (29, 31), (30, 32)), ((2, 3), (6, 7), (1, 5), (4, 8), (10, 11), (14, 15), (9, 13), (12, 16), (18, 19), (22, 23), (17, 21), (20, 24), (26, 27), (30, 31), (25, 29), (28, 32)), ((2, 6), (3, 7), (10, 14), (11, 15), (1, 9), (8, 16), (18, 22), (19, 23), (26, 30), (27, 31), (17, 25), (24, 32)), ((2, 5), (4, 7), (10, 13), (12, 15), (18, 21), (20, 23), (26, 29), (28, 31), (1, 17), (16, 32)), ((3, 5), (4, 6), (11, 13), (12, 14), (2, 10), (7, 15), (19, 21), (20, 22), (27, 29), (28, 30), (18, 26), (23, 31)), ((4, 5), (12, 13), (2, 9), (3, 11), (6, 14), (8, 15), (20, 21), (28, 29), (18, 25), (19, 27), (22, 30), (24, 31)), ((4, 12), (3, 9), (5, 13), (8, 14), (20, 28), (19, 25), (21, 29), (24, 30), (2, 18), (15, 31)), ((4, 11), (6, 13), (20, 27), (22, 29), (2, 17), (3, 19), (14, 30), (16, 31)), ((4, 10), (7, 13), (20, 26), (23, 29), (3, 17), (16, 30)), ((4, 9), (8, 13), (6, 10), (7, 11), (20, 25), (24, 29), (22, 26), (23, 27)), ((5, 9), (8, 12), (21, 25), (24, 28), (4, 20), (13, 29)), ((6, 9), (8, 11), (22, 25), (24, 27), (4, 19), (5, 21), (12, 28), (14, 29)), ((7, 9), (8, 10), (23, 25), (24, 26), (4, 18), (6, 22), (11, 27), (15, 29)), ((8, 9), (24, 25), (4, 17), (6, 21), (7, 23), (10, 26), (12, 27), (16, 29)), ((8, 24), (7, 21), (5, 17), (6, 18), (9, 25), (12, 26), (15, 27), (16, 28)), ((8, 23), (6, 17), (7, 19), (10, 25), (14, 26), (16, 27)), ((8, 22), (7, 17), (11, 25), (16, 26)), ((8, 21), (12, 25)), ((8, 20), (13, 25)), ((8, 19), (14, 25), (12, 20), (13, 21)), ((8, 18), (15, 25), (11, 19), (14, 22)), ((8, 17), (16, 25), (10, 18), (12, 19), (14, 21), (15, 23)), ((9, 17), (12, 18), (16, 24), (15, 21)), ((10, 17), (16, 23), (14, 18), (15, 19)), ((11, 17), (16, 22)), ((12, 17), (16, 21)), ((13, 17), (16, 20)), ((14, 17), (16, 19)), ((15, 17), (16, 18)), ((16, 17),) ) )
struct Framework plugins Framework(plugins, hooks=[]; opts...) = new(PluginStack(plugins, hooks; opts...)) end struct EmptyPlugin{Id} <: Plugin EmptyPlugin{Id}(;opts...) where Id = new{Id}() end Plugins.symbol(::EmptyPlugin) = :empty for i = 1:1000 Plugins.register(EmptyPlugin{i}) end mutable struct CounterPlugin{Id} <: Plugin hook1count::Int hook2count::Int hook3count::Int CounterPlugin{Id}() where Id = new{Id}(0, 0, 0) end Plugins.symbol(::CounterPlugin) = :counter for i = 1:100 Plugins.register(CounterPlugin{i}) end @inline hook1(plugin::CounterPlugin, framework) = begin plugin.hook1count += 1 return false end hook2_handler(plugin::CounterPlugin, framework) = begin plugin.hook2count += 1 return false end @inline hook3(plugin::CounterPlugin, framework, p1, p2) = begin plugin.hook3count += p2 return false end chain_of_empties(length=20, startat=0) = [EmptyPlugin{i + startat} for i = 1:length] callmanytimes(framework, hook, times=1e5) = for i=1:times hook(framework) end mutable struct FrameworkTestPlugin <: Plugin calledwithframework FrameworkTestPlugin() = new("Never called") end Plugins.register(FrameworkTestPlugin) hook1(plugin::FrameworkTestPlugin, framework) = plugin.calledwithframework = framework mutable struct EventTestPlugin <: Plugin calledwithframework calledwithevent EventTestPlugin() = new("Never called", "Never called") end Plugins.register(EventTestPlugin) event_handler(plugin::EventTestPlugin, framework, event) = begin plugin.calledwithframework = framework plugin.calledwithevent = event end struct ConfigurablePlugin <: Plugin config::String ConfigurablePlugin(;config = "default") = new(config) end Plugins.register(ConfigurablePlugin) checkconfig_handler(plugin::ConfigurablePlugin, framework, event) = begin if event.config !== plugin.config throw("Not the same!") end return plugin.config end struct PropagationStopperPlugin <: Plugin end Plugins.register(PropagationStopperPlugin) propagationtest(plugin::PropagationStopperPlugin, framework, data) = data == 42 propagationtest_nodata(plugin::PropagationStopperPlugin, framework) = true struct PropagationCheckerPlugin <: Plugin end Plugins.register(PropagationCheckerPlugin) propagationtest(plugin::PropagationCheckerPlugin, framework, data) = data === 32 || throw("Not 32!") propagationtest_nodata(plugin::PropagationCheckerPlugin, framework) = throw("Not stopped!") mutable struct DynamicPlugin <: Plugin lastdata end Plugins.register(DynamicPlugin) dynamismtest(plugin::DynamicPlugin, framework, data) = plugin.lastdata = data # Hook cache test: mutable struct SharedState plugins::PluginStack shared_counter::Int end struct App{TCache} state::SharedState hooks::TCache function App(plugins, hooklist) state = SharedState(PluginStack(plugins, hooklist), 0) cache = hook_cache(state.plugins) return new{typeof(cache)}(state, cache) end end const OP_CYCLES = 1e7 function op(app::App) counters = [counter for counter in app.state.plugins if counter isa CounterPlugin] @info "op: A sample operation on the app, involving hook1() calls in a semi-realistic setting." @info "op: $(length(counters)) CounterPlugins found, $(length(app.state.plugins)) plugins in total, each CounterPlugin incrementing a private counter." start_ts = time_ns() for i in 1:OP_CYCLES app.hooks.hook3(app, i, 1) end end_ts = time_ns() for i = 1:length(counters) @test counters[i].hook3count == OP_CYCLES end time_diff = end_ts - start_ts avg_calltime = time_diff / OP_CYCLES @info "op: $OP_CYCLES hook1() calls took $(time_diff / 1e9) secs. That is $avg_calltime nanosecs per call on average, or $(avg_calltime / length(counters)) ns per in-plugin counter increment." end mutable struct LifeCycleTestPlugin <: Plugin setupcalledwith shutdowncalledwith deferredinitcalledwith LifeCycleTestPlugin() = new() end Plugins.register(LifeCycleTestPlugin) Plugins.setup!(plugin::LifeCycleTestPlugin, framework) = plugin.setupcalledwith = framework Plugins.shutdown!(plugin::LifeCycleTestPlugin, framework) = begin plugin.shutdowncalledwith = framework if framework === 42 error("shutdown called with 42") end end deferred_init(plugin::Plugin, ::Any) = true deferred_init(plugin::LifeCycleTestPlugin, data) = plugin.deferredinitcalledwith = data @testset "Plugins.jl basics" begin @testset "Plugin chain" begin a1 = Framework([CounterPlugin{1}, EmptyPlugin]) @show innerplugin = a1.plugins[:empty] @show counter = a1.plugins[:counter] a1_hook1s = hooklist(a1.plugins, hook1) @test length(a1_hook1s) === 1 callmanytimes(a1, a1_hook1s) @info "$(length(a1.plugins))-length chain, $(length(a1_hook1s)) counter (1e5 cycles):" @time callmanytimes(a1, a1_hook1s) hooklist(a1.plugins, hook2_handler)(a1) @test counter.hook1count == 2e5 @test counter.hook2count == 1 end @testset "Same plugin twice" begin a2 = Framework([CounterPlugin{1}, CounterPlugin{2}]) innercounter = a2.plugins[2] outercounter = a2.plugins[1] a2_hook1s = hooklist(a2.plugins, hook1) @test length(a2_hook1s) === 2 callmanytimes(a2, a2_hook1s) @info "$(length(a2.plugins))-length chain, $(length(a2_hook1s)) counters (1e5 cycles):" @time callmanytimes(a2, a2_hook1s) hooklist(a2.plugins, hook2_handler)(a2) @test innercounter.hook1count == 2e5 @test innercounter.hook2count == 1 @test outercounter.hook1count == 2e5 @test outercounter.hook2count == 1 end @testset "Chain of empty Plugins to skip" begin chainedapp = Framework(vcat([CounterPlugin{1}], chain_of_empties(20), [CounterPlugin{2}], chain_of_empties(20, 21))) innerplugin = chainedapp.plugins[22] outerplugin = chainedapp.plugins[1] chainedapp_hook1s = hooklist(chainedapp.plugins, hook1) callmanytimes(chainedapp, chainedapp_hook1s) @info "$(length(chainedapp.plugins))-length chain, $(length(chainedapp_hook1s)) counters (1e5 cycles):" @time callmanytimes(chainedapp, chainedapp_hook1s) @test outerplugin.hook1count == 2e5 @test outerplugin.hook2count == 0 @test innerplugin.hook1count == 2e5 @test innerplugin.hook2count == 0 end @testset "Unhandled hook returns false" begin app = Framework([EmptyPlugin{1}]) @test hooklist(app.plugins, hook1)() == false @test call_optional(hooklist(app.plugins, hook1)) == nothing end @testset "Framework goes through" begin frameworktestapp = Framework([EmptyPlugin{1}, FrameworkTestPlugin]) hooklist(frameworktestapp.plugins, hook1)(frameworktestapp) @test frameworktestapp.plugins[2].calledwithframework === frameworktestapp end @testset "Event object" begin eventtestapp = Framework([EmptyPlugin{1}, EventTestPlugin]) event = (name="test event", data=42) hooklist(eventtestapp.plugins, event_handler)(eventtestapp, event) @test eventtestapp.plugins[2].calledwithframework === eventtestapp @test eventtestapp.plugins[2].calledwithevent === event end @testset "Multiple apps with same chain, differently configured" begin app2config = "app2config" app1 = Framework([EmptyPlugin{1}, ConfigurablePlugin]) app2 = Framework([EmptyPlugin{2}, ConfigurablePlugin]; config=app2config) event1 = (config ="default",) event2 = (config = app2config,) hooklist(app1.plugins, checkconfig_handler)(app1, event1) @test_throws String hooklist(app1.plugins, checkconfig_handler)(app1, event2) @test call_optional(hooklist(app1.plugins, checkconfig_handler), app1, event1) == event1.config hooklist(app2.plugins, checkconfig_handler)(app2, event2) @test_throws String hooklist(app2.plugins, checkconfig_handler)(app2, event1) @test call_optional(hooklist(app2.plugins, checkconfig_handler), app2, event2) == event2.config end @testset "Stopping Propagation" begin spapp = Framework([EmptyPlugin{1}, PropagationStopperPlugin, EmptyPlugin{2}, PropagationCheckerPlugin]) hooklist(spapp.plugins, propagationtest)(spapp, 42) === true # It is stopped so the checker does not throw hooklist(spapp.plugins, propagationtest)(spapp, 32) === false # Not stopped but accepted by the checker @test_throws String hooklist(spapp.plugins, propagationtest)(spapp, 41) @test hooklist(spapp.plugins, propagationtest_nodata)(spapp) === true end @testset "HookList iteration" begin iapp = Framework([EmptyPlugin{1}, CounterPlugin{2}, EmptyPlugin{2}, CounterPlugin{1}, EmptyPlugin{3}]) c1 = iapp.plugins[4] c2 = iapp.plugins[2] hookers = [c2, c1] @test length(hooklist(iapp.plugins, hook1)) === 2 i = 1 for hook in hooklist(iapp.plugins, hook1) @test hookers[i] === hook.plugin i += 1 end end @testset "Accessing plugins directly" begin app = Framework([EmptyPlugin{1}, CounterPlugin{1}]) empty = app.plugins[1] counter = app.plugins[2] @test get(app.plugins, :empty) === empty @test get(app.plugins, :counter) === counter @test app.plugins[:empty] === empty @test length(app.plugins) == 2 end @testset "Hook cache" begin counters = [CounterPlugin{i} for i=2:40] empties = [EmptyPlugin{i} for i=1:100] pluginarr = [CounterPlugin{1}, empties..., counters...] @info "Measuring time to first hook call with $(length(pluginarr)) uniquely typed plugins, $(length(counters) + 1) implementig the hook." @time begin simpleapp = SharedState(PluginStack(pluginarr, [hook1]), 0) simpleapp_hooks = hooks(simpleapp) simpleapp_hooks.hook1(simpleapp) @test simpleapp.plugins[1].hook1count == 1 end end @testset "Hook cache as type parameter" begin counters = [CounterPlugin{i} for i=2:2] empties = [EmptyPlugin{i} for i=1:100] app = App([CounterPlugin{1}, empties..., counters...], [hook3]) op(app) end @testset "Lifecycle Hooks" begin app = Framework([EmptyPlugin{1}, LifeCycleTestPlugin]) plugin = app.plugins[2] @test setup!(app.plugins, app).allok == true @test plugin.setupcalledwith === app # Create a non-standard lifecycle hook lifecycle_hook = Plugins.create_lifecyclehook(deferred_init) @test string(lifecycle_hook) == "deferred_init" @test lifecycle_hook(app.plugins, "42").allok === true @test plugin.deferredinitcalledwith === "42" @test Plugins.shutdown!(app.plugins, app).allok === true @test plugin.shutdowncalledwith === app notallok = Plugins.shutdown!(app.plugins, 42) @test notallok.allok === false @test (notallok.results[2] isa Tuple) === true @test (notallok.results[2][1] isa Exception) === true @test (stacktrace(notallok.results[2][2]) isa AbstractVector{StackTraces.StackFrame}) === true @test plugin.shutdowncalledwith === 42 end @testset "Modifying plugins" begin c2 = CounterPlugin{2}() app = Framework([EmptyPlugin, CounterPlugin{1}], [hook1]) c1 = app.plugins[2] cache = hooks(app) cache.hook1(app) push!(app.plugins, c2) cache = hooks(app) cache.hook1(app) @test c2.hook1count == 1 @test c1.hook1count == 2 @test pop!(app.plugins) === c2 cache = hooks(app) cache.hook1(app) @test c2.hook1count == 1 @test c1.hook1count == 3 @test popfirst!(app.plugins) isa EmptyPlugin cache = hooks(app) cache.hook1(app) @test c2.hook1count == 1 @test c1.hook1count == 4 pushfirst!(app.plugins, c2) cache = hooks(app) cache.hook1(app) @test c2.hook1count == 2 @test c1.hook1count == 5 @test app.plugins[1] === c2 pop!(app.plugins) @test length(app.plugins) == 1 @test isempty(app.plugins) == false pop!(app.plugins) @test length(app.plugins) == 0 @test isempty(app.plugins) == true @test_throws ArgumentError pop!(app.plugins) app = Framework([EmptyPlugin{1}, CounterPlugin{3}], [hook1]) c3 = app.plugins[:counter] @test isempty(app.plugins) == false cache = hooks(app) cache.hook1(app) @test c3.hook1count == 1 empty!(app.plugins) @test isempty(app.plugins) == true cache = hooks(app) cache.hook1(app) @test c3.hook1count == 1 c5 = CounterPlugin{5}() app = Framework([EmptyPlugin{1}, CounterPlugin{4}], [hook1]) c4 = app.plugins[:counter] @test isempty(app.plugins) == false cache = hooks(app) cache.hook1(app) @test c4.hook1count == 1 @test c5.hook1count == 0 app.plugins[2] = c5 cache = hooks(app) cache.hook1(app) @test c4.hook1count == 1 @test c5.hook1count == 1 end end
""" function function_value(f::Function, fs::ScalarFunctionSpace{dim,T}, cell::Int,q_point::Int) where {dim,T} Evaluate function f(x): x ∈ ℝⁿ ↦ ℝ with x given by cell number `cell` and quadrature point with index `qpoint` using fs FunctionSpace data """ function function_value(f::Function, fs::ScalarFunctionSpace{dim,T}, cell::Int,q_point::Int) where {dim,T} coords = get_cell_coordinates(cell, fs.mesh) return f(spatial_coordinate(fs, q_point, coords)) end """ function spatial_coordinate(fs::ScalarFunctionSpace{dim}, q_point::Int, x::AbstractVector{Vec{dim,T}}) Map coordinates of quadrature point `q_point` of Scalar Function Space `fs` into domain with vertices `x` """ function spatial_coordinate(fs::ScalarFunctionSpace{dim}, q_point::Int, x::AbstractVector{Vec{dim,T}}) where {dim,T} n_base_funcs = getngeobasefunctions(fs) @assert length(x) == n_base_funcs vec = zero(Vec{dim,T}) @inbounds for i in 1:n_base_funcs vec += geometric_value(fs, q_point, i) * x[i] end return vec end # Trial Functions struct TrialFunction{dim,T,shape,N1} fs::DiscreteFunctionSpace m_values::Array{T,N1} components::Int end @inline getnbasefunctions(u::TrialFunction) = getnbasefunctions(u.fs) @inline getfunctionspace(u::TrialFunction) = u.fs @inline getnlocaldofs(u::TrialFunction) = getnlocaldofs(getfunctionspace(u)) @inline getncomponents(u::TrialFunction) = u.components function TrialFunction(fs::ScalarTraceFunctionSpace{dim,T}) where {dim,T} mesh = getmesh(fs) m_values = fill(zero(T) * T(NaN), getncells(mesh), getnbasefunctions(fs), n_faces_per_cell(mesh)) return TrialFunction{dim,T,getshape(getfiniteelement(fs)),3}(fs, m_values, 1) end function TrialFunction(fs::ScalarFunctionSpace{dim,T}) where {dim,T} mesh = getmesh(fs) m_values = fill(zero(T) * T(NaN), getncells(mesh), getnbasefunctions(fs)) return TrialFunction{dim,T,getshape(getfiniteelement(fs)),2}(fs, m_values, 1) end function TrialFunction(fs::VectorFunctionSpace{dim,T}) where {dim,T} mesh = getmesh(fs) m_values = fill(zero(T) * T(NaN), getncells(mesh), getnbasefunctions(fs)) return TrialFunction{dim,T,getshape(getfiniteelement(fs)),2}(fs, m_values, dim) end function TrialFunction(fs::DiscreteFunctionSpace{dim}, components::Int, m_values::Array{T,N}) where {dim,T,N} mesh = getmesh(fs) @assert size(m_values,1) == getncells(mesh) @assert size(m_values,2) == getnbasefunctions(fs) return TrialFunction{dim,T,getshape(getfiniteelement(fs)),N}(fs, m_values, components) end reference_coordinate(fs::ScalarFunctionSpace{dim,T}, x_ref::Vec{dim,T}, x::Vec{dim,T}) where {dim,T} = fs.Jinv[]⋅(x-x_ref) """ function value(u_h::TrialFunction{dim,T}, cell::Int, x::Vec{dim,T}) get trial function value on cell `cell` at point `x` """ function value(u_h::TrialFunction{dim,T}, node::Int, cell::Int) where {dim,T} u = zero(T) mesh = getmesh(u_h.fs) x = mesh.nodes[node].x ξ = reference_coordinate(u_h.fs, mesh.nodes[mesh.cells[cell].nodes[1]].x, x) for i in 1:getnbasefunctions(u_h.fs) u += u_h.m_values[cell, i]*value(getfiniteelement(u_h.fs), i, ξ) end return u end function nodal_avg(u_h::TrialFunction{dim,T}) where {dim,T} mesh = getmesh(u_h.fs) nodalu_h = Vector{Float64}(undef, length(mesh.nodes)) share_count = zeros(Int,length(mesh.nodes)) fill!(nodalu_h,0) @inbounds for (cell_idx, cell) in enumerate(CellIterator(mesh)) reinit!(u_h, cell) for node in getnodes(cell) u = value(u_h, node, cell) nodalu_h[node] += u share_count[node] += 1 end end return nodalu_h./share_count end function errornorm(u_h::TrialFunction{dim,T}, u_ex::Function, norm_type::String="L2") where {dim,T} mesh = getmesh(u_h.fs) Etu_h = zero(T) if norm_type == "L2" n_basefuncs_s = getnbasefunctions(u_h) @inbounds for (cell_idx, cell) in enumerate(CellIterator(mesh)) reinit!(u_h, cell) Elu_h = zero(T) for q_point in 1:getnquadpoints(u_h.fs) dΩ = getdetJdV(u_h.fs, q_point) u = zero(T) for i in 1:getnbasefunctions(u_h.fs) u += u_h.m_values[cell.current_cellid[], i]*shape_value(u_h.fs, q_point, i) end # Integral (u_h - u_ex) dΩ Elu_h += (u-u_ex(spatial_coordinate(u_h.fs, q_point, cell.coords)))^2*dΩ end Etu_h += Elu_h end else throw("Norm $norm_type not available") end return Etu_h end
############################################################################### # # fmpz.jl : BigInts # ############################################################################### # Copyright (c) 2009-2014: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, # and other contributors: # # https://github.com/JuliaLang/julia/contributors # # Copyright (C) 2014, 2015 William Hart # Copyright (C) 2015, Claus Fieker # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. export fmpz, FlintZZ, FlintIntegerRing, parent, show, convert, hash, fac, bell, binom, isprime, fdiv, cdiv, tdiv, div, rem, mod, gcd, xgcd, lcm, invmod, powmod, abs, divrem, isqrt, popcount, prevpow2, nextpow2, ndigits, dec, bin, oct, hex, base, one, zero, divexact, fits, sign, nbits, deepcopy, tdivpow2, fdivpow2, cdivpow2, flog, clog, cmpabs, clrbit!, setbit!, combit!, crt, divisible, divisor_lenstra, fdivrem, tdivrem, fmodpow2, gcdinv, isprobabprime, issquare, jacobi, remove, root, size, isqrtrem, sqrtmod, trailing_zeros, sigma, eulerphi, fib, moebiusmu, primorial, risingfac, numpart, canonical_unit, needs_parentheses, isnegative, show_minus_one, parseint, addeq!, mul!, isunit, isequal, num, den, iszero, rand ############################################################################### # # Data type and parent methods # ############################################################################### parent_type(::Type{fmpz}) = FlintIntegerRing doc""" parent(a::fmpz) > Returns the unique Flint integer parent object `FlintZZ`. """ parent(a::fmpz) = FlintZZ elem_type(::Type{FlintIntegerRing}) = fmpz doc""" base_ring(a::FlintIntegerRing) > Returns `Union{}` as this ring is not dependent on another ring. """ base_ring(a::FlintIntegerRing) = Union{} doc""" base_ring(a::fmpz) > Returns `Union{}` as the parent ring is not dependent on another ring. """ base_ring(a::fmpz) = Union{} isdomain_type(::Type{fmpz}) = true ################################################################################ # # Hashing # ################################################################################ # Similar to hash for BigInt found in julia/base function _fmpz_is_small(a::fmpz) return __fmpz_is_small(a.d) end function _fmpz_limbs(a::fmpz) return __fmpz_limbs(a.d) end function hash_integer(a::fmpz, h::UInt) return _hash_integer(a.d, h) end function hash(a::fmpz, h::UInt) return hash_integer(a, h) end function __fmpz_is_small(a::Int) return (unsigned(a) >> (Sys.WORD_SIZE - 2) != 1) end function __fmpz_limbs(a::Int) if __fmpz_is_small(a) return 0 end b = unsafe_load(convert(Ref{Cint}, unsigned(a)<<2), 2) return b end function _hash_integer(a::Int, h::UInt) s = __fmpz_limbs(a) s == 0 && return Base.hash_integer(a, h) # get the pointer after the first two Cint d = convert(Ptr{Ref{UInt}}, unsigned(a) << 2) + 2*sizeof(Cint) p = unsafe_load(d) b = unsafe_load(p) h = xor(Base.hash_uint(xor(ifelse(s < 0, -b, b), h)), h) for k = 2:abs(s) h = xor(Base.hash_uint(xor(unsafe_load(p, k), h)), h) end return h end ############################################################################### # # Basic manipulation # ############################################################################### function deepcopy_internal(a::fmpz, dict::ObjectIdDict) z = fmpz() ccall((:fmpz_set, :libflint), Void, (Ref{fmpz}, Ref{fmpz}), z, a) return z end doc""" one(R::FlintIntegerRing) > Return the integer $1$. """ one(R::FlintIntegerRing) = fmpz(1) doc""" zero(R::FlintIntegerRing) > Return the integer $1$. """ zero(R::FlintIntegerRing) = fmpz(0) doc""" sign(a::fmpz) > Returns the sign of $a$, i.e. $+1$, $0$ or $-1$. """ sign(a::fmpz) = Int(ccall((:fmpz_sgn, :libflint), Cint, (Ref{fmpz},), a)) doc""" fits(::Type{Int}, a::fmpz) > Returns `true` if the given integer fits into an `Int`, otherwise returns > `false`. """ fits(::Type{Int}, a::fmpz) = ccall((:fmpz_fits_si, :libflint), Bool, (Ref{fmpz},), a) doc""" fits(::Type{UInt}, a::fmpz) > Returns `true` if the given integer fits into a `UInt`, otherwise returns > `false`. """ fits(::Type{UInt}, a::fmpz) = sign(a) < 0 ? false : ccall((:fmpz_abs_fits_ui, :libflint), Bool, (Ref{fmpz},), a) doc""" size(a::fmpz) > Returns the number of limbs required to store the absolute value of $a$. """ size(a::fmpz) = Int(ccall((:fmpz_size, :libflint), Cint, (Ref{fmpz},), a)) doc""" isunit(a::fmpz) > Return `true` if the given integer is a unit, i.e. $\pm 1$, otherwise return > `false`. """ isunit(a::fmpz) = ccall((:fmpz_is_pm1, :libflint), Bool, (Ref{fmpz},), a) doc""" iszero(a::fmpz) > Return `true` if the given integer is zero, otherwise return `false`. """ iszero(a::fmpz) = ccall((:fmpz_is_zero, :libflint), Bool, (Ref{fmpz},), a) doc""" isone(a::fmpz) > Return `true` if the given integer is one, otherwise return `false`. """ isone(a::fmpz) = ccall((:fmpz_is_one, :libflint), Bool, (Ref{fmpz},), a) doc""" denominator(a::fmpz) > Returns the denominator of $a$ thought of as a rational. Always returns $1$. """ function denominator(a::fmpz) return fmpz(1) end doc""" numerator(a::fmpz) > Returns the numerator of $a$ thought of as a rational. Always returns $a$. """ function numerator(a::fmpz) return a end ############################################################################### # # AbstractString I/O # ############################################################################### string(x::fmpz) = dec(x) show(io::IO, x::fmpz) = print(io, string(x)) show(io::IO, a::FlintIntegerRing) = print(io, "Integer Ring") needs_parentheses(x::fmpz) = false isnegative(x::fmpz) = x < 0 show_minus_one(::Type{fmpz}) = false ############################################################################### # # Canonicalisation # ############################################################################### canonical_unit(x::fmpz) = x < 0 ? fmpz(-1) : fmpz(1) ############################################################################### # # Unary operators and functions, e.g. -fmpz(12), ~fmpz(12) # ############################################################################### function -(x::fmpz) z = fmpz() ccall((:__fmpz_neg, :libflint), Void, (Ref{fmpz}, Ref{fmpz}), z, x) return z end function ~(x::fmpz) z = fmpz() ccall((:fmpz_complement, :libflint), Void, (Ref{fmpz}, Ref{fmpz}), z, x) return z end function abs(x::fmpz) z = fmpz() ccall((:fmpz_abs, :libflint), Void, (Ref{fmpz}, Ref{fmpz}), z, x) return z end ############################################################################### # # Binary operators and functions # ############################################################################### # Metaprogram to define functions +, -, *, gcd, lcm, # &, |, $ for (fJ, fC) in ((:+, :add), (:-,:sub), (:*, :mul), (:&, :and), (:|, :or), (:$, :xor)) @eval begin function ($fJ)(x::fmpz, y::fmpz) z = fmpz() ccall(($(string(:fmpz_, fC)), :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return z end end end # Metaprogram to define functions fdiv, cdiv, tdiv, div, mod for (fJ, fC) in ((:fdiv, :fdiv_q), (:cdiv, :cdiv_q), (:tdiv, :tdiv_q), (:div, :tdiv_q)) @eval begin function ($fJ)(x::fmpz, y::fmpz) iszero(y) && throw(DivideError()) z = fmpz() ccall(($(string(:fmpz_, fC)), :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return z end end end function divexact(x::fmpz, y::fmpz) iszero(y) && throw(DivideError()) z = fmpz() ccall((:fmpz_divexact, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) z end function rem(x::fmpz, c::fmpz) iszero(c) && throw(DivideError()) q = fmpz() r = fmpz() ccall((:fmpz_tdiv_qr, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), q, r, x, abs(c)) return r end ############################################################################### # # Ad hoc binary operators # ############################################################################### function +(x::fmpz, c::Int) z = fmpz() if c >= 0 ccall((:fmpz_add_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) else ccall((:fmpz_sub_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, -c) end return z end +(c::Int, x::fmpz) = x + c function -(x::fmpz, c::Int) z = fmpz() if c >= 0 ccall((:fmpz_sub_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) else ccall((:fmpz_add_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, -c) end return z end function -(c::Int, x::fmpz) z = fmpz() if c >= 0 ccall((:fmpz_sub_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) else ccall((:fmpz_add_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, -c) end ccall((:__fmpz_neg, :libflint), Void, (Ref{fmpz}, Ref{fmpz}), z, z) return z end function *(x::fmpz, c::Int) z = fmpz() ccall((:fmpz_mul_si, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end *(c::Int, x::fmpz) = x * c +(a::fmpz, b::Integer) = a + fmpz(b) +(a::Integer, b::fmpz) = fmpz(a) + b -(a::fmpz, b::Integer) = a - fmpz(b) -(a::Integer, b::fmpz) = fmpz(a) - b *(a::fmpz, b::Integer) = a*fmpz(b) *(a::Integer, b::fmpz) = fmpz(a)*b ############################################################################### # # Ad hoc exact division # ############################################################################### function divexact(x::fmpz, y::Int) y == 0 && throw(DivideError()) z = fmpz() ccall((:fmpz_divexact_si, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, y) z end divexact(x::fmpz, y::Integer) = divexact(x, fmpz(y)) divexact(x::Integer, y::fmpz) = divexact(fmpz(x), y) ############################################################################### # # Ad hoc division # ############################################################################### function rem(x::fmpz, c::Int) c == 0 && throw(DivideError()) r = ccall((:fmpz_tdiv_ui, :libflint), Int, (Ref{fmpz}, Int), x, abs(c)) return sign(x) > 0 ? r : -r end function tdivpow2(x::fmpz, c::Int) c < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_tdiv_q_2exp, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end function fdivpow2(x::fmpz, c::Int) c < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_fdiv_q_2exp, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end function fmodpow2(x::fmpz, c::Int) c < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_fdiv_r_2exp, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end function cdivpow2(x::fmpz, c::Int) c < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_cdiv_q_2exp, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end function div(x::fmpz, c::Int) c == 0 && throw(DivideError()) z = fmpz() ccall((:fmpz_tdiv_q_si, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end function tdiv(x::fmpz, c::Int) c == 0 && throw(DivideError()) z = fmpz() ccall((:fmpz_tdiv_q_si, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end function fdiv(x::fmpz, c::Int) c == 0 && throw(DivideError()) z = fmpz() ccall((:fmpz_fdiv_q_si, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end function cdiv(x::fmpz, c::Int) c == 0 && throw(DivideError()) z = fmpz() ccall((:fmpz_cdiv_q_si, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end ############################################################################### # # Division with remainder # ############################################################################### function divrem(x::fmpz, y::fmpz) iszero(y) && throw(DivideError()) z1 = fmpz() z2 = fmpz() ccall((:fmpz_tdiv_qr, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z1, z2, x, y) z1, z2 end function tdivrem(x::fmpz, y::fmpz) iszero(y) && throw(DivideError()) z1 = fmpz() z2 = fmpz() ccall((:fmpz_tdiv_qr, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z1, z2, x, y) z1, z2 end function fdivrem(x::fmpz, y::fmpz) iszero(y) && throw(DivideError()) z1 = fmpz() z2 = fmpz() ccall((:fmpz_fdiv_qr, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z1, z2, x, y) z1, z2 end ############################################################################### # # Powering # ############################################################################### function ^(x::fmpz, y::Int) if y < 0; throw(DomainError()); end if isone(x); return x; end if x == -1; return isodd(y) ? x : -x; end if y > typemax(UInt); throw(DomainError()); end if y == 0; return one(FlintZZ); end if y == 1; return x; end z = fmpz() ccall((:fmpz_pow_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, UInt), z, x, UInt(y)) return z end ############################################################################### # # Comparison # ############################################################################### function cmp(x::fmpz, y::fmpz) Int(ccall((:fmpz_cmp, :libflint), Cint, (Ref{fmpz}, Ref{fmpz}), x, y)) end ==(x::fmpz, y::fmpz) = cmp(x,y) == 0 <=(x::fmpz, y::fmpz) = cmp(x,y) <= 0 >=(x::fmpz, y::fmpz) = cmp(x,y) >= 0 <(x::fmpz, y::fmpz) = cmp(x,y) < 0 >(x::fmpz, y::fmpz) = cmp(x,y) > 0 function cmpabs(x::fmpz, y::fmpz) Int(ccall((:fmpz_cmpabs, :libflint), Cint, (Ref{fmpz}, Ref{fmpz}), x, y)) end isless(x::fmpz, y::fmpz) = x < y ############################################################################### # # Ad hoc comparison # ############################################################################### function cmp(x::fmpz, y::Int) Int(ccall((:fmpz_cmp_si, :libflint), Cint, (Ref{fmpz}, Int), x, y)) end ==(x::fmpz, y::Int) = cmp(x,y) == 0 <=(x::fmpz, y::Int) = cmp(x,y) <= 0 >=(x::fmpz, y::Int) = cmp(x,y) >= 0 <(x::fmpz, y::Int) = cmp(x,y) < 0 >(x::fmpz, y::Int) = cmp(x,y) > 0 ==(x::Int, y::fmpz) = cmp(y,x) == 0 <=(x::Int, y::fmpz) = cmp(y,x) >= 0 >=(x::Int, y::fmpz) = cmp(y,x) <= 0 <(x::Int, y::fmpz) = cmp(y,x) > 0 >(x::Int, y::fmpz) = cmp(y,x) < 0 function cmp(x::fmpz, y::UInt) Int(ccall((:fmpz_cmp_ui, :libflint), Cint, (Ref{fmpz}, UInt), x, y)) end ==(x::fmpz, y::UInt) = cmp(x,y) == 0 <=(x::fmpz, y::UInt) = cmp(x,y) <= 0 >=(x::fmpz, y::UInt) = cmp(x,y) >= 0 <(x::fmpz, y::UInt) = cmp(x,y) < 0 >(x::fmpz, y::UInt) = cmp(x,y) > 0 ==(x::UInt, y::fmpz) = cmp(y,x) == 0 <=(x::UInt, y::fmpz) = cmp(y,x) >= 0 >=(x::UInt, y::fmpz) = cmp(y,x) <= 0 <(x::UInt, y::fmpz) = cmp(y,x) > 0 >(x::UInt, y::fmpz) = cmp(y,x) < 0 ############################################################################### # # Shifting # ############################################################################### doc""" <<(x::fmpz, c::Int) > Return $2^cx$ where $c \geq 0$. """ function <<(x::fmpz, c::Int) c < 0 && throw(DomainError()) c == 0 && return x z = fmpz() ccall((:fmpz_mul_2exp, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end doc""" >>(x::fmpz, c::Int) > Return $x/2^c$, discarding any remainder, where $c \geq 0$. """ function >>(x::fmpz, c::Int) c < 0 && throw(DomainError()) c == 0 && return x z = fmpz() ccall((:fmpz_fdiv_q_2exp, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, c) return z end ############################################################################### # # Modular arithmetic # ############################################################################### doc""" mod(x::fmpz, y::fmpz) > Return the remainder after division of $x$ by $y$. The remainder will be the > least nonnegative remainder. """ function mod(x::fmpz, y::fmpz) z = fmpz() ccall((:fmpz_mod, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return z end doc""" mod(x::fmpz, y::Int) > Return the remainder after division of $x$ by $y$. The remainder will be the > least nonnegative remainder. """ function mod(x::fmpz, c::Int) c == 0 && throw(DivideError()) if c > 0 return ccall((:fmpz_fdiv_ui, :libflint), Int, (Ref{fmpz}, Int), x, c) else r = ccall((:fmpz_fdiv_ui, :libflint), Int, (Ref{fmpz}, Int), x, -c) return r == 0 ? 0 : r + c end end doc""" powmod(x::fmpz, p::fmpz, m::fmpz) > Return $x^p (\mod m)$. The remainder will be in the range $[0, m)$ """ function powmod(x::fmpz, p::fmpz, m::fmpz) m <= 0 && throw(DomainError()) if p < 0 x = invmod(x, m) p = -p end r = fmpz() ccall((:fmpz_powm, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), r, x, p, m) return r end doc""" powmod(x::fmpz, p::Int, m::fmpz) > Return $x^p (\mod m)$. The remainder will be in the range $[0, m)$ """ function powmod(x::fmpz, p::Int, m::fmpz) m <= 0 && throw(DomainError()) if p < 0 x = invmod(x, m) p = -p end r = fmpz() ccall((:fmpz_powm_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int, Ref{fmpz}), r, x, p, m) return r end doc""" invmod(x::fmpz, m::fmpz) > Return $x^{-1} (\mod m)$. The remainder will be in the range $[0, m)$ """ function invmod(x::fmpz, m::fmpz) m <= 0 && throw(DomainError()) z = fmpz() if isone(m) return fmpz(0) end if ccall((:fmpz_invmod, :libflint), Cint, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, m) == 0 error("Impossible inverse in invmod") end return z end doc""" sqrtmod(x::fmpz, m::fmpz) > Return a square root of $x (\mod m)$ if one exists. The remainder will be in > the range $[0, m)$. We require that $m$ is prime, otherwise the algorithm may > not terminate. """ function sqrtmod(x::fmpz, m::fmpz) m <= 0 && throw(DomainError()) z = fmpz() if (ccall((:fmpz_sqrtmod, :libflint), Cint, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, m) == 0) error("no square root exists") end return z end doc""" crt(r1::fmpz, m1::fmpz, r2::fmpz, m2::fmpz, signed=false) > Find $r$ such that $r \equiv r_1 (\mod m_1)$ and $r \equiv r_2 (\mod m_2)$. > If `signed = true`, $r$ will be in the range $-m_1m_2/2 < r \leq m_1m_2/2$. > If `signed = false` the value will be in the range $0 \leq r < m_1m_2$. """ function crt(r1::fmpz, m1::fmpz, r2::fmpz, m2::fmpz, signed=false) z = fmpz() ccall((:fmpz_CRT, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Cint), z, r1, m1, r2, m2, signed) return z end doc""" crt(r1::fmpz, m1::fmpz, r2::Int, m2::Int, signed=false) > Find $r$ such that $r \equiv r_1 (\mod m_1)$ and $r \equiv r_2 (\mod m_2)$. > If `signed = true`, $r$ will be in the range $-m_1m_2/2 < r \leq m_1m_2/2$. > If `signed = false` the value will be in the range $0 \leq r < m_1m_2$. """ function crt(r1::fmpz, m1::fmpz, r2::Int, m2::Int, signed=false) z = fmpz() r2 < 0 && throw(DomainError()) m2 < 0 && throw(DomainError()) ccall((:fmpz_CRT_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Int, Int, Cint), z, r1, m1, r2, m2, signed) return z end ############################################################################### # # Integer logarithm # ############################################################################### doc""" flog(x::fmpz, c::fmpz) > Return the floor of the logarithm of $x$ to base $c$. """ function flog(x::fmpz, c::fmpz) c <= 0 && throw(DomainError()) x <= 0 && throw(DomainError()) return ccall((:fmpz_flog, :libflint), Int, (Ref{fmpz}, Ref{fmpz}), x, c) end doc""" clog(x::fmpz, c::fmpz) > Return the ceiling of the logarithm of $x$ to base $c$. """ function clog(x::fmpz, c::fmpz) c <= 0 && throw(DomainError()) x <= 0 && throw(DomainError()) return ccall((:fmpz_clog, :libflint), Int, (Ref{fmpz}, Ref{fmpz}), x, c) end doc""" flog(x::fmpz, c::Int) > Return the floor of the logarithm of $x$ to base $c$. """ function flog(x::fmpz, c::Int) c <= 0 && throw(DomainError()) return ccall((:fmpz_flog_ui, :libflint), Int, (Ref{fmpz}, Int), x, c) end doc""" clog(x::fmpz, c::Int) > Return the ceiling of the logarithm of $x$ to base $c$. """ function clog(x::fmpz, c::Int) c <= 0 && throw(DomainError()) return ccall((:fmpz_clog_ui, :libflint), Int, (Ref{fmpz}, Int), x, c) end ############################################################################### # # GCD and LCM # ############################################################################### doc""" gcd(x::fmpz, y::fmpz) > Return the greatest common divisor of $x$ and $y$. The returned result will > always be nonnegative and will be zero iff $x$ and $y$ are zero. """ function gcd(x::fmpz, y::fmpz) z = fmpz() ccall((:fmpz_gcd, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return z end doc""" gcd(x::Array{fmpz, 1}) > Return the greatest common divisor of the elements of $x$. The returned > result will always be nonnegative and will be zero iff all elements of $x$ > are zero. """ function gcd(x::Array{fmpz, 1}) if length(x) == 0 error("Array must not be empty") elseif length(x) == 1 return x[1] end z = fmpz() ccall((:fmpz_gcd, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x[1], x[2]) for i in 3:length(x) ccall((:fmpz_gcd, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, z, x[i]) if isone(z) return z end end return z end doc""" lcm(x::fmpz, y::fmpz) > Return the least common multiple of $x$ and $y$. The returned result will > always be nonnegative and will be zero iff $x$ and $y$ are zero. """ function lcm(x::fmpz, y::fmpz) z = fmpz() ccall((:fmpz_lcm, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return z end doc""" lcm(x::Array{fmpz, 1}) > Return the least common multiple of the elements of $x$. The returned result > will always be nonnegative and will be zero iff the elements of $x$ are zero. """ function lcm(x::Array{fmpz, 1}) if length(x) == 0 error("Array must not be empty") elseif length(x) == 1 return x[1] end z = fmpz() ccall((:fmpz_lcm, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x[1], x[2]) for i in 3:length(x) ccall((:fmpz_lcm, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, z, x[i]) end return z end ############################################################################### # # Extended GCD # ############################################################################### doc""" gcdx(a::fmpz, b::fmpz) > Return a tuple $g, s, t$ such that $g$ is the greatest common divisor of $a$ > and $b$ and integers $s$ and $t$ such that $g = as + bt$. """ function gcdx(a::fmpz, b::fmpz) if b == 0 # shortcut this to ensure consistent results with gcdx(a,b) return a < 0 ? (-a, -one(FlintZZ), zero(FlintZZ)) : (a, one(FlintZZ), zero(FlintZZ)) end g = fmpz() s = fmpz() t = fmpz() ccall((:fmpz_xgcd, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), g, s, t, a, b) g, s, t end doc""" gcdinv(a::fmpz, b::fmpz) > Return a tuple $g, s$ where $g$ is the greatest common divisor of $a$ and > $b$ and where $s$ is the inverse of $a$ modulo $b$ if $g = 1$. This function > can be used to detect impossible inverses, i.e. where $a$ and $b$ are not > coprime, and to yield the common factor of $a$ and $b$ if they are not > coprime. We require $b \geq a \geq 0$. """ function gcdinv(a::fmpz, b::fmpz) a < 0 && throw(DomainError()) b < a && throw(DomainError()) g = fmpz() s = fmpz() ccall((:fmpz_gcdinv, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), g, s, a, b) return g, s end ############################################################################### # # Roots # ############################################################################### doc""" isqrt(x::fmpz) > Return the floor of the square root of $x$. """ function isqrt(x::fmpz) x < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_sqrt, :libflint), Void, (Ref{fmpz}, Ref{fmpz}), z, x) return z end doc""" isqrtrem(x::fmpz) > Return a tuple $s, r$ consisting of the floor $s$ of the square root of $x$ > and the remainder $r$, i.e. such that $x = s^2 + r$. We require $x \geq 0$. """ function isqrtrem(x::fmpz) x < 0 && throw(DomainError()) s = fmpz() r = fmpz() ccall((:fmpz_sqrtrem, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), s, r, x) return s, r end doc""" root(x::fmpz, n::Int) > Return the floor of the $n$-the root of $x$. We require $n > 0$ and that > $x \geq 0$ if $n$ is even. """ function root(x::fmpz, n::Int) x < 0 && iseven(n) && throw(DomainError()) n <= 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_root, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, n) return z end ############################################################################### # # Factorization # ############################################################################### function _factor(a::fmpz) # This is a hack around https://github.com/JuliaLang/julia/issues/19963 # Remove this once julia 6.0 is required if a == 1 || a == -1 return Dict{fmpz, Int}(), a end F = fmpz_factor() ccall((:fmpz_factor, :libflint), Void, (Ref{fmpz_factor}, Ref{fmpz}), F, a) res = Dict{fmpz, Int}() for i in 1:F.num z = fmpz() ccall((:fmpz_factor_get_fmpz, :libflint), Void, (Ref{fmpz}, Ref{fmpz_factor}, Int), z, F, i - 1) res[z] = unsafe_load(F.exp, i) end return res, canonical_unit(a) end function factor(a::fmpz) fac, z = _factor(a) return Fac(z, fac) end ############################################################################### # # Number theoretic/combinatorial # ############################################################################### doc""" divisible(x::fmpz, y::fmpz) > Return `true` if $x$ is divisible by $y$, otherwise return `false`. We > require $x \neq 0$. """ function divisible(x::fmpz, y::fmpz) iszero(y) && throw(DivideError()) Bool(ccall((:fmpz_divisible, :libflint), Cint, (Ref{fmpz}, Ref{fmpz}), x, y)) end doc""" divisible(x::fmpz, y::Int) > Return `true` if $x$ is divisible by $y$, otherwise return `false`. We > require $x \neq 0$. """ function divisible(x::fmpz, y::Int) y == 0 && throw(DivideError()) Bool(ccall((:fmpz_divisible_si, :libflint), Cint, (Ref{fmpz}, Int), x, y)) end doc""" issquare(x::fmpz) > Return `true` if $x$ is a square, otherwise return `false`. """ issquare(x::fmpz) = Bool(ccall((:fmpz_is_square, :libflint), Cint, (Ref{fmpz},), x)) doc""" is_prime(x::UInt) > Return `true` if $x$ is a prime number, otherwise return `false`. """ is_prime(x::UInt) = Bool(ccall((:n_is_prime, :libflint), Cint, (UInt,), x)) doc""" isprime(x::fmpz) > Return `true` if $x$ is a prime number, otherwise return `false`. """ # flint's fmpz_is_prime doesn't work yet isprime(x::fmpz) = Bool(ccall((:fmpz_is_probabprime, :libflint), Cint, (Ref{fmpz},), x)) doc""" isprobabprime(x::fmpz) > Return `true` if $x$ is a very probably a prime number, otherwise return > `false`. No counterexamples are known to this test, but it is conjectured > that infinitely many exist. """ isprobabprime(x::fmpz) = Bool(ccall((:fmpz_is_probabprime, :libflint), Cint, (Ref{fmpz},), x)) doc""" remove(x::fmpz, y::fmpz) > Return the tuple $n, z$ such that $x = y^nz$ where $y$ and $z$ are coprime. """ function remove(x::fmpz, y::fmpz) iszero(y) && throw(DivideError()) z = fmpz() num = ccall((:fmpz_remove, :libflint), Int, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return num, z end remove(x::fmpz, y::Integer) = remove(x, fmpz(y)) remove(x::Integer, y::fmpz) = remove(fmpz(x), y) remove(x::Integer, y::Integer) = remove(fmpz(x), fmpz(y)) doc""" valuation(x::fmpz, y::fmpz) > Return the largest $n$ such that $y^n$ divides $x$. """ function valuation(x::fmpz, y::fmpz) n, _ = remove(x, y) return n end valuation(x::fmpz, y::Integer) = valuation(x, fmpz(y)) valuation(x::Integer, y::fmpz) = valuation(fmpz(x), y) valuation(x::Integer, y::Integer) = valuation(fmpz(x), fmpz(y)) doc""" divisor_lenstra(n::fmpz, r::fmpz, m::fmpz) > If $n$ has a factor which lies in the residue class $r (\mod m)$ for > $0 < r < m < n$, this function returns such a factor. Otherwise it returns > $0$. This is only efficient if $m$ is at least the cube root of $n$. We > require gcd$(r, m) = 1$ and this condition is not checked. """ function divisor_lenstra(n::fmpz, r::fmpz, m::fmpz) r <= 0 && throw(DomainError()) m <= r && throw(DomainError()) n <= m && throw(DomainError()) z = fmpz() if !Bool(ccall((:fmpz_divisor_in_residue_class_lenstra, :libflint), Cint, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, n, r, m)) z = 0 end return z end doc""" fac(x::Int) > Return the factorial of $x$, i.e. $x! = 1.2.3\ldots x$. We require > $x \geq 0$. """ function fac(x::Int) x < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_fac_ui, :libflint), Void, (Ref{fmpz}, UInt), z, x) return z end doc""" risingfac(x::fmpz, y::Int) > Return the rising factorial of $x$, i.e. $x(x + 1)(x + 2)\ldots (x + n - 1)$. > If $n < 0$ we throw a `DomainError()`. """ function risingfac(x::fmpz, y::Int) y < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_rfac_ui, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, UInt), z, x, y) return z end doc""" risingfac(x::Int, y::Int) > Return the rising factorial of $x$, i.e. $x(x + 1)(x + 2)\ldots (x + n - 1)$. > If $n < 0$ we throw a `DomainError()`. """ function risingfac(x::Int, y::Int) y < 0 && throw(DomainError()) z = fmpz() if x < 0 if y <= -x # we don't pass zero z = isodd(y) ? -risingfac(-x - y + 1, y) : risingfac(-x - y + 1, y) end else ccall((:fmpz_rfac_uiui, :libflint), Void, (Ref{fmpz}, UInt, UInt), z, x, y) end return z end doc""" primorial(x::Int) > Return the primorial of $n$, i.e. the product of all primes less than or > equal to $n$. If $n < 0$ we throw a `DomainError()`. """ function primorial(x::Int) x < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_primorial, :libflint), Void, (Ref{fmpz}, UInt), z, x) return z end doc""" fib(x::Int) > Return the $n$-th Fibonacci number $F_n$. We define $F_1 = 1$, $F_2 = 1$ and > $F_{i + 1} = F_i + F_{i - 1}$ for all $i > 2$. We require $n \geq 0$. For > convenience, we define $F_0 = 0$. """ function fib(x::Int) x < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_fib_ui, :libflint), Void, (Ref{fmpz}, UInt), z, x) return z end doc""" bell(x::Int) > Return the Bell number $B_n$. """ function bell(x::Int) x < 0 && throw(DomainError()) z = fmpz() ccall((:arith_bell_number, :libflint), Void, (Ref{fmpz}, UInt), z, x) return z end doc""" binom(n::Int, k::Int) > Return the binomial coefficient $\frac{n!}{(n - k)!k!}$. If $n, k < 0$ or > $k > n$ we return $0$. """ function binom(n::Int, k::Int) n < 0 && return fmpz(0) k < 0 && return fmpz(0) z = fmpz() ccall((:fmpz_bin_uiui, :libflint), Void, (Ref{fmpz}, UInt, UInt), z, n, k) return z end doc""" moebiusmu(x::fmpz) > Returns the Moebius mu function of $x$ as an \code{Int}. The value > returned is either $-1$, $0$ or $1$. If $x < 0$ we throw a `DomainError()`. """ function moebiusmu(x::fmpz) x < 0 && throw(DomainError()) return Int(ccall((:fmpz_moebius_mu, :libflint), Cint, (Ref{fmpz},), x)) end doc""" jacobi(x::fmpz, y::fmpz) > Return the value of the Jacobi symbol $\left(\frac{x}{y}\right)$. If > $y \leq x$ or $x < 0$, we throw a `DomainError()`. """ function jacobi(x::fmpz, y::fmpz) y <= x && throw(DomainError()) x < 0 && throw(DomainError()) return Int(ccall((:fmpz_jacobi, :libflint), Cint, (Ref{fmpz}, Ref{fmpz}), x, y)) end doc""" sigma(x::fmpz, y::Int) > Return the value of the sigma function, i.e. $\sum_{0 < d \;| x} d^y$. If > $y < 0$ we throw a `DomainError()`. """ function sigma(x::fmpz, y::Int) y < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_divisor_sigma, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, y) return z end doc""" eulerphi(x::fmpz) > Return the value of the Euler phi function at $x$, i.e. the number of > positive integers less than $x$ that are coprime with $x$. """ function eulerphi(x::fmpz) x < 0 && throw(DomainError()) z = fmpz() ccall((:fmpz_euler_phi, :libflint), Void, (Ref{fmpz}, Ref{fmpz}), z, x) return z end doc""" numpart(x::Int) > Return the number of partitions of $x$. This function is not available on > Windows 64. """ function numpart(x::Int) if (is_windows() ? true : false) && Int == Int64 error("not yet supported on win64") end x < 0 && throw(DomainError()) z = fmpz() ccall((:partitions_fmpz_ui, :libarb), Void, (Ref{fmpz}, UInt), z, x) return z end doc""" numpart(x::fmpz) > Return the number of partitions of $x$. This function is not available on > Windows 64. """ function numpart(x::fmpz) if (is_windows() ? true : false) && Int == Int64 error("not yet supported on win64") end x < 0 && throw(DomainError()) z = fmpz() ccall((:partitions_fmpz_fmpz, :libarb), Void, (Ref{fmpz}, Ref{fmpz}, Int), z, x, 0) return z end ############################################################################### # # Number bases/digits # ############################################################################### doc""" bin(n::fmpz) > Return $n$ as a binary string. """ bin(n::fmpz) = base(n, 2) doc""" oct(n::fmpz) > Return $n$ as a octal string. """ oct(n::fmpz) = base(n, 8) doc""" dec(n::fmpz) > Return $n$ as a decimal string. """ dec(n::fmpz) = base(n, 10) doc""" hex(n::fmpz) = base(n, 16) > Return $n$ as a hexadecimal string. """ hex(n::fmpz) = base(n, 16) doc""" base(n::fmpz, b::Integer) > Return $n$ as a string in base $b$. We require $2 \leq b \leq 62$. """ function base(n::fmpz, b::Integer) 2 <= b <= 62 || error("invalid base: $b") p = ccall((:fmpz_get_str,:libflint), Ref{UInt8}, (Ref{UInt8}, Cint, Ref{fmpz}), C_NULL, b, n) s = unsafe_string(p) ccall((:flint_free, :libflint), Void, (Ref{UInt8},), p) return s end function ndigits_internal(x::fmpz, b::Integer = 10) # fmpz_sizeinbase might return an answer 1 too big n = Int(ccall((:fmpz_sizeinbase, :libflint), UInt, (Ref{fmpz}, Int32), x, b)) abs(x) < fmpz(b)^(n - 1) ? n - 1 : n end doc""" ndigits(x::fmpz, b::Integer = 10) > Return the number of digits of $x$ in the base $b$ (default is $b = 10$). """ ndigits(x::fmpz, b::Integer = 10) = iszero(x) ? 1 : ndigits_internal(x, b) doc""" nbits(x::fmpz) > Return the number of binary bits of $x$. We return zero if $x = 0$. """ nbits(x::fmpz) = iszero(x) ? 0 : Int(ccall((:fmpz_sizeinbase, :libflint), UInt, (Ref{fmpz}, Int32), x, 2)) # docu states: always correct #if base is power of 2 ############################################################################### # # Bit fiddling # ############################################################################### doc""" popcount(x::fmpz) > Return the number of ones in the binary representation of $x$. """ popcount(x::fmpz) = Int(ccall((:fmpz_popcnt, :libflint), UInt, (Ref{fmpz},), x)) doc""" prevpow2(x::fmpz) > Return the previous power of $2$ up to including $x$. """ prevpow2(x::fmpz) = x < 0 ? -prevpow2(-x) : (x <= 2 ? x : one(FlintZZ) << (ndigits(x, 2) - 1)) doc""" nextpow2(x::fmpz) > Return the next power of $2$ that is at least $x$. """ nextpow2(x::fmpz) = x < 0 ? -nextpow2(-x) : (x <= 2 ? x : one(FlintZZ) << ndigits(x - 1, 2)) doc""" trailing_zeros(x::fmpz) > Count the trailing zeros in the binary representation of $x$. """ trailing_zeros(x::fmpz) = ccall((:fmpz_val2, :libflint), Int, (Ref{fmpz},), x) ############################################################################### # # Bitwise operations (unsafe) # ############################################################################### doc""" clrbit!(x::fmpz, c::Int) > Clear bit $c$ of $x$, where the least significant bit is the $0$-th bit. Note > that this function modifies its input in-place. """ function clrbit!(x::fmpz, c::Int) c < 0 && throw(DomainError()) ccall((:fmpz_clrbit, :libflint), Void, (Ref{fmpz}, Int), x, c) end doc""" setbit!(x::fmpz, c::Int) > Set bit $c$ of $x$, where the least significant bit is the $0$-th bit. Note > that this function modifies its input in-place. """ function setbit!(x::fmpz, c::Int) c < 0 && throw(DomainError()) ccall((:fmpz_setbit, :libflint), Void, (Ref{fmpz}, Int), x, c) end doc""" combit!(x::fmpz, c::Int) > Complement bit $c$ of $x$, where the least significant bit is the $0$-th bit. > Note that this function modifies its input in-place. """ function combit!(x::fmpz, c::Int) c < 0 && throw(DomainError()) ccall((:fmpz_combit, :libflint), Void, (Ref{fmpz}, Int), x, c) end ############################################################################### # # Unsafe operators # ############################################################################### function mul!(z::fmpz, x::fmpz, y::fmpz) ccall((:fmpz_mul, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return z end function addmul!(z::fmpz, x::fmpz, y::fmpz, c::fmpz) ccall((:fmpz_addmul, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return z end function addeq!(z::fmpz, x::fmpz) ccall((:fmpz_add, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, z, x) return z end function add!(z::fmpz, x::fmpz, y::fmpz) ccall((:fmpz_add, :libflint), Void, (Ref{fmpz}, Ref{fmpz}, Ref{fmpz}), z, x, y) return z end function zero!(z::fmpz) ccall((:fmpz_zero, :libflint), Void, (Ref{fmpz},), z) return z end ############################################################################### # # Parent object overloads # ############################################################################### (::FlintIntegerRing)() = fmpz() (::FlintIntegerRing)(a::Integer) = fmpz(a) (::FlintIntegerRing)(a::AbstractString) = fmpz(a) (::FlintIntegerRing)(a::fmpz) = a (::FlintIntegerRing)(a::Float64) = fmpz(a) (::FlintIntegerRing)(a::Float32) = fmpz(Float64(a)) (::FlintIntegerRing)(a::Float16) = fmpz(Float64(a)) (::FlintIntegerRing)(a::BigFloat) = fmpz(BigInt(a)) ############################################################################### # # String parser # ############################################################################### function parse(::Type{fmpz}, s::String, base::Int = 10) s = string(s) sgn = s[1] == '-' ? -1 : 1 i = 1 + (sgn == -1) z = fmpz() err = ccall((:fmpz_set_str, :libflint), Int32, (Ref{fmpz}, Ref{UInt8}, Int32), z, string(SubString(s, i)), base) err == 0 || error("Invalid big integer: $(repr(s))") return sgn < 0 ? -z : z end ############################################################################### # # Random generation # ############################################################################### function rand(R::FlintIntegerRing, n::UnitRange{Int}) return R(rand(n)) end ############################################################################### # # Constructors # ############################################################################### fmpz(s::AbstractString) = parse(fmpz, s) fmpz(z::Integer) = fmpz(BigInt(z)) fmpz(z::Float16) = fmpz(Float64(z)) fmpz(z::Float32) = fmpz(Float64(z)) fmpz(z::BigFloat) = fmpz(BigInt(z)) ############################################################################### # # Conversions and promotions # ############################################################################### convert(::Type{fmpz}, a::Integer) = fmpz(a) function convert(::Type{BigInt}, a::fmpz) r = BigInt() ccall((:fmpz_get_mpz, :libflint), Void, (Ref{BigInt}, Ref{fmpz}), r, a) return r end function convert(::Type{Int}, a::fmpz) (a > typemax(Int) || a < typemin(Int)) && throw(InexactError()) return ccall((:fmpz_get_si, :libflint), Int, (Ref{fmpz},), a) end function convert(::Type{UInt}, a::fmpz) (a > typemax(UInt) || a < 0) && throw(InexactError()) return ccall((:fmpz_get_ui, :libflint), UInt, (Ref{fmpz}, ), a) end function convert(::Type{Float64}, n::fmpz) # rounds to zero ccall((:fmpz_get_d, :libflint), Float64, (Ref{fmpz},), n) end convert(::Type{Float32}, n::fmpz) = Float32(Float64(n)) convert(::Type{Float16}, n::fmpz) = Float16(Float64(n)) convert(::Type{BigFloat}, n::fmpz) = BigFloat(BigInt(n)) Base.promote_rule(::Type{fmpz}, ::Type{T}) where {T <: Integer} = fmpz promote_rule(::Type{fmpz}, ::Type{T}) where {T <: Integer} = fmpz
module PrecessionNutation using AstronomicalTime using ERFA using StaticArrays using MuladdMacro import EarthOrientation: precession_nutation00 include("helper.jl") include("tables.jl") export precession_nutation_erfa, precession_nutation_06 function precession_nutation_erfa(ep::TTEpoch) x, y = ERFA.xy06(julian1(ep), julian2(ep)) s = ERFA.s06(julian1(ep), julian2(ep), x, y) x, y, s end ### IAU 2006 precession and IAU 2000A_R06 nutation function precession_nutation_IAU2006_IAU2000A_R06(ep_tt::TTEpoch; revision = 0) tt_jc = jc(ep_tt) if revision === 0 # P03 X = @evalpoly(tt_jc, -16_617.0, +2_004_191_898.00, -429_782.90, -198_618.34, +7.578, +5.928_5) # μas Y = @evalpoly(tt_jc, -6_951.0, -25_896.00, -22_407_274.70, +1_900.59, +1_112.526, +0.135_8) # μas s = @evalpoly(tt_jc, +94.0, +3_808.65, -122.68, -72_574.11, +27.980, +15.620_0) # μas elseif revision === 1 # P03_rev1 X = @evalpoly(tt_jc, -16_617.0, +2_004_191_804.00, -429_755.80, -198_618.29, +7.575, +5.928_5) # μas Y = @evalpoly(tt_jc, -6_951.0, -24_867.00, -22_407_272.70, +1_900.26, +1_112.525, +0.135_8) # μas # TODO: confirm values for s s = @evalpoly(tt_jc, +94.0, +3_808.65, -122.68, -72_574.11, +27.980, +15.620_0) # μas elseif revision === 2 # P03_rev2 X = @evalpoly(tt_jc, -16_617.0, +2_004_192_130.00, -429_775.20, -198_618.39, +7.576, +5.928_5) # μas Y = @evalpoly(tt_jc, -6_951.0, -25_817.00, -22_407_280.10, +1_900.46, +1_112.526, +0.135_8) # μas # TODO: confirm values for s s = @evalpoly(tt_jc, +94.0, +3_808.65, -122.68, -72_574.11, +27.980, +15.620_0) # μas else error("Revision $revision not implemented") end fund_args = fundamental_arguments(tt_jc) X_harm_coeff = zeros(MVector{5, Float64}) Y_harm_coeff = zeros(MVector{5, Float64}) s_harm_coeff = zeros(MVector{5, Float64}) @inbounds for frequency in frequencies arg = 0.0 @inbounds @simd for k in frequency.coefficients_idx @muladd arg += frequency.coefficients[k] * fund_args[k] end si = sin(arg) co = cos(arg) @inbounds for component in frequency.components @muladd ampl = component.s * si + component.c * co if component.variable == 1 X_harm_coeff[component.poweridx] += ampl elseif component.variable == 2 Y_harm_coeff[component.poweridx] += ampl else s_harm_coeff[component.poweridx] += ampl end end end X += @evalpoly(tt_jc, X_harm_coeff[1], X_harm_coeff[2], X_harm_coeff[3], X_harm_coeff[4], X_harm_coeff[5]) Y += @evalpoly(tt_jc, Y_harm_coeff[1], Y_harm_coeff[2], Y_harm_coeff[3], Y_harm_coeff[4], Y_harm_coeff[5]) s += @evalpoly(tt_jc, s_harm_coeff[1], s_harm_coeff[2], s_harm_coeff[3], s_harm_coeff[4], s_harm_coeff[5]) X = μas_to_rad(X) Y = μas_to_rad(Y) s = μas_to_rad(s) X, Y, s - X * Y / 2 end const precession_nutation_06 = precession_nutation_IAU2006_IAU2000A_R06 end # module
struct DeepTreeEnv depth::Int64 adj_list::Dict{Int64, Vector{Int64}} end function DeepTreeEnv(depth::Int64, model::String, rng::MersenneTwister) @assert depth > 3 num_nodes = compute_num_nodes(depth) list_of_nodes = collect(1:num_nodes) if model == "true" elseif model == "optimistic" else end end function compute_num_nodes(depth::Int64) sum([2^i for i in 0:depth]) end
function PlotStairs(label_id, values::AbstractArray{T}, count::Integer, xscale::Real = 1.0, x0::Real = 0.0, offset::Integer = 0, stride::Integer = sizeof(T)) where {T<:ImPlotData} LibCImPlot.PlotStairs(label_id, values, count, xscale, x0, offset, stride) end function PlotStairs(label_id, values::AbstractArray{T}, count::Integer, xscale::Real = 1.0, x0::Real = 0.0, offset::Integer = 0, stride::Integer = sizeof(Float64)) where {T<:Real} LibCImPlot.PlotStairs(label_id, Float64.(values), count, xscale, x0, offset, stride) end function PlotStairs(label_id, x::AbstractArray{T}, y::AbstractArray{T}, count::Integer, offset::Integer = 0, stride::Integer = sizeof(T)) where {T<:ImPlotData} LibCImPlot.PlotStairs(label_id, x, y, count, offset, stride) end function PlotStairs(label_id, x::AbstractArray{T}, y::AbstractArray{T}, count::Integer, offset::Integer = 0, stride::Integer = sizeof(Float64)) where {T<:Real} LibCImPlot.PlotStairs(label_id, Float64.(x), Float64.(y), count, offset, stride) end function PlotStairs(y::AbstractArray{T}; label_id::String="", count::Integer=length(y), xscale::Real = 1.0, x0::Real = 0.0, offset::Integer=0, stride::Integer=1 ) where {T <: ImPlotData} LibCImPlot.PlotStairs(label_id, y, count, xscale, x0, offset, stride * sizeof(T)) end function PlotStairs(x::AbstractArray{T}, y::AbstractArray{T}; count::Integer = min(length(x), length(y)), offset::Integer = 0, stride::Integer = 1, label_id::String = "" ) where {T <: ImPlotData} LibCImPlot.PlotStairs(label_id, x, y, count, offset, stride * sizeof(T)) end
using LinearAlgebra using Distances # unittest performance # versus python hsic_varioustests_npy.py unittest: # 20sec elapsed / 52sec user (i.e. multithreaded) # using distmat1: 10.3sec # using Distances.jl pairwise 13.2sec # using distmatvec: 10.7sec # performance discussion see https://discourse.julialang.org/t/sum-of-hadamard-products/3531/5 # for comparison, do the non-vectorized calculation # points are in columns, dimensions(variables) across rows # note transposed from previous convention! function distmatslow(X,Y) m = size(X,2) @assert m==size(Y,2) #"size mismatch" D = Array{Float32}(undef,m,m) for ix = 1:m @inbounds for iy = 1:m x = X[:,ix] y = Y[:,iy] d = norm(x-y) D[ix,iy] = d*d end end return D end function distmat(X,Y) m = size(X,2) @assert m==size(Y,2) #"size mismatch" D = Array{Float32}(undef,m,m) for ix = 1:m @inbounds for iy = ix:m x = X[:,ix] y = Y[:,iy] d = x-y d2 = d'*d D[ix,iy] = d2 D[iy,ix] = d2 end end return D end # function distmatvec(X,Y) """ points are in columns, dimensions(variables) down rows """ m = size(X,2) @assert m==size(Y,2) #"size mismatch" XY = sum(X .* Y,dims=1) #XY = XY.reshape(m,1) #R1 = n_.tile(XY,(1,Y.shape[0])) R1 = repeat(XY',1,m) # outer=(1,m) not suported by autograd R2 = repeat(XY,m,1) #xy_ = X * Y' D = R1 + R2 - 2.f0 * X' * Y D end # python-ish version requires broadcasting different shapes - "outer sum" function distmat1(X,Y) A = sum(X .* X,dims=1)' B = sum(Y .* Y,dims=1) C = X' * Y A .+ B .- 2.f0 * C end function eye(n) Matrix{Float32}(I,n,n) end function hsic(X,Y,sigma) #= # 1/m^2 Tr Kx H Ky H X,Y have data in COLUMNS, each row is a dimension # NB is transposed from python =# #println("X=\n", X'[1:2,:]) #Yt = Y;; println("Y=\n", Yt[1:2:]) m = size(X,2) println("hsic between ",m,"points") H = eye(m) - (1.f0 / m) * ones(Float32,m,m) #Dxx = distmatvec(X,X) #Dyy = distmatvec(Y,Y) Dxx = distmat1(X,X) Dyy = distmat1(Y,Y) #Dxx = pairwise(SqEuclidean(),X,X,dims=2) #Dyy = pairwise(SqEuclidean(),Y,Y,dims=2) sigma2 = 2.f0 * sigma*sigma Kx = exp.( -Dxx / sigma2 ) Ky = exp.( -Dyy / sigma2 ) Kxc = Kx * H Kyc = Ky * H thehsic = (1.f0 / (m*m)) * sum(Kxc' .* Kyc) return thehsic # type float32 end # Pkg.add("NPZ") using NPZ # aug19: this matches the output of the unittest in hsic_varioustests_npy function unittest() X = Float32[0.1 0.2 0.3; 5 4 3] Y = Float32[1 2 3; 2 2 2] X = transpose(X) Y = transpose(Y) println(distmatslow(X,X)) println(distmat(X,X)) println(distmatslow(Y,Y)) println(distmat(Y,Y)) println("hsic(X,Y,0.5)=",hsic(X,Y,0.5f0)) # larger test data = npzread("/tmp/_data.npz") # todo convert arrays to float32 X = data["arr_0"] Y = data["arr_1"] X = convert(Array{Float32,2},X') Y = convert(Array{Float32,2},Y') println("X=\n", X'[1:2,:]) println("Y=\n", Y'[1:2,:]) println("\nindependent hsic(X,Y,1)=",hsic(X,Y,1.f0)) println("\nidentical hsic(X,X,1)=",hsic(X,copy(X),1.f0)) Y2 = X .* X println("Y2=",Y2'[1:2,:]) println("\nnonlinear hsic(X,Y*Y,1)=",hsic(X,Y2,1.f0)) end
using KernelAbstractions.Extras.LoopInfo: @unroll ##### ##### Periodic boundary conditions ##### @kernel function fill_periodic_west_and_east_halo!(c, H::Int, N) j, k = @index(Global, NTuple) @unroll for i = 1:H @inbounds begin c[i, j, k] = c[N+i, j, k] # west c[N+H+i, j, k] = c[H+i, j, k] # east end end end @kernel function fill_periodic_south_and_north_halo!(c, H::Int, N) i, k = @index(Global, NTuple) @unroll for j = 1:H @inbounds begin c[i, j, k] = c[i, N+j, k] # south c[i, N+H+j, k] = c[i, H+j, k] # north end end end @kernel function fill_periodic_bottom_and_top_halo!(c, H::Int, N) i, j = @index(Global, NTuple) @unroll for k = 1:H @inbounds begin c[i, j, k] = c[i, j, N+k] # top c[i, j, N+H+k] = c[i, j, H+k] # bottom end end end function fill_west_and_east_halo!(c, ::PBC, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) yz_size = size(c_parent)[[2, 3]] event = launch!(arch, grid, yz_size, fill_periodic_west_and_east_halo!, c_parent, grid.Hx, grid.Nx; dependencies=dep, kw...) return event end function fill_south_and_north_halo!(c, ::PBC, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) xz_size = size(c_parent)[[1, 3]] event = launch!(arch, grid, xz_size, fill_periodic_south_and_north_halo!, c_parent, grid.Hy, grid.Ny; dependencies=dep, kw...) return event end function fill_bottom_and_top_halo!(c, ::PBC, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) xy_size = size(c_parent)[[1, 2]] event = launch!(arch, grid, xy_size, fill_periodic_bottom_and_top_halo!, c_parent, grid.Hz, grid.Nz; dependencies=dep, kw...) return event end #### #### Implement single periodic directions (for Distributed memory architectures) #### @kernel function fill_periodic_west_halo!(c, H::Int, N) j, k = @index(Global, NTuple) @unroll for i = 1:H @inbounds begin c[i, j, k] = c[N+i, j, k] # west end end end @kernel function fill_periodic_east_halo!(c, H::Int, N) j, k = @index(Global, NTuple) @unroll for i = 1:H @inbounds begin c[N+H+i, j, k] = c[H+i, j, k] # east end end end @kernel function fill_periodic_south_halo!(c, H::Int, N) i, k = @index(Global, NTuple) @unroll for j = 1:H @inbounds begin c[i, j, k] = c[i, N+j, k] # south end end end @kernel function fill_periodic_north_halo!(c, H::Int, N) i, k = @index(Global, NTuple) @unroll for j = 1:H @inbounds begin c[i, N+H+j, k] = c[i, H+j, k] # north end end end @kernel function fill_periodic_bottom_halo!(c, H::Int, N) i, j = @index(Global, NTuple) @unroll for k = 1:H @inbounds begin c[i, j, k] = c[i, j, N+k] # bottom end end end @kernel function fill_periodic_top_halo!(c, H::Int, N) i, j = @index(Global, NTuple) @unroll for k = 1:H @inbounds begin c[i, j, N+H+k] = c[i, j, H+k] # top end end end function fill_west_halo!(c, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) yz_size = size(c_parent)[[2, 3]] event = launch!(arch, grid, yz_size, fill_periodic_west_halo!, c_parent, grid.Hx, grid.Nx; dependencies=dep, kw...) return event end function fill_east_halo!(c, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) yz_size = size(c_parent)[[2, 3]] event = launch!(arch, grid, yz_size, fill_periodic_east_halo!, c_parent, grid.Hx, grid.Nx; dependencies=dep, kw...) return event end function fill_south_halo!(c, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) xz_size = size(c_parent)[[1, 3]] event = launch!(arch, grid, xz_size, fill_periodic_south_halo!, c_parent, grid.Hy, grid.Ny; dependencies=dep, kw...) return event end function fill_north_halo!(c, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) xz_size = size(c_parent)[[1, 3]] event = launch!(arch, grid, xz_size, fill_periodic_north_halo!, c_parent, grid.Hy, grid.Ny; dependencies=dep, kw...) return event end function fill_bottom_halo!(c, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) xy_size = size(c_parent)[[1, 2]] event = launch!(arch, grid, xy_size, fill_periodic_bottom_halo!, c_parent, grid.Hz, grid.Nz; dependencies=dep, kw...) return event end function fill_top_halo!(c, ::PBC, arch, dep, grid, args...; kw...) c_parent = parent(c) xy_size = size(c_parent)[[1, 2]] event = launch!(arch, grid, xy_size, fill_periodic_top_halo!, c_parent, grid.Hz, grid.Nz; dependencies=dep, kw...) return event end
using Printf import Flux: σ using ModelingToolkit using GalacticOptim using Optim using DiffEqFlux using NeuralPDE using Quadrature, Cubature, Cuba using Plots @parameters t,x @variables c(..) @derivatives Dt'~t @derivatives Dxx''~x @derivatives Dx'~x # Parameters v = 1 R = 0 D = 0.1 # diffusion t_max = 2.0 x_min = -1.0 x_max = 1.0 # Equations, initial and boundary conditions eqs = [ Dt(c(t, x)) ~ D * Dxx(c(t,x)) - Dx(c(t,x)) ] bcs = [ c(0, x) ~ cos(π*x) + 1.0, c(t, x_min) ~ c(t, x_max) ] # Space and time domains domains = [t ∈ IntervalDomain(0.0,t_max), x ∈ IntervalDomain(x_min,x_max) ] # Discretization nx = 32 dx = (x_max-x_min) / (nx - 1) dt = 0.01 # Neural network dim = length(domains) output = length(eqs) hidden = 8 chain = FastChain( FastDense(dim, hidden, σ), FastDense(hidden, hidden, σ), FastDense(hidden, 1)) strategy = GridTraining(dx=[dt,dx]) discretization = PhysicsInformedNN(chain, strategy=strategy) pde_system = PDESystem(eqs, bcs, domains, [t,x], [c]) prob = discretize(pde_system,discretization) cb = function (p,l) println("Current loss is: $l") return false end res = GalacticOptim.solve(prob,Optim.BFGS();cb=cb,maxiters=1200) # Plots phi = discretization.phi initθ = discretization.initθ acum = [0;accumulate(+, length.(initθ))] sep = [acum[i]+1 : acum[i+1] for i in 1:length(acum)-1] minimizers = [res.minimizer[s] for s in sep] ts,xs = [domain.domain.lower:dx:domain.domain.upper for domain in domains] anim = @animate for (i, t) in enumerate(0:dt:t_max) @info "Animating frame $i..." c_predict = reshape([phi([t, x], res.minimizer)[1] for x in xs], length(xs)) title = @sprintf("Advection-diffusion t = %.3f", t) plot(xs, c_predict, label="", title=title , ylims=(0., 2)) end gif(anim, "advection_diffusion_pinn.gif", fps=15) c_predict = reshape([ phi([0, x], res.minimizer)[1] for x in xs], length(xs)) plot(xs,c_predict) # Plot correct solution using JLD2 file = jldopen("advection_diffusion/simulation/cosine_advection_diffusion.jld2") iterations = parse.(Int, keys(file["timeseries/t"])) anim = @animate for (i, iter) in enumerate(iterations) @info "Animating frame $i..." Hx = file["grid/Hx"] x = file["grid/xC"][1+Hx:end-Hx] t = file["timeseries/t/$iter"] c = file["timeseries/c/$iter"][:] title = @sprintf("Advection-diffusion t = %.3f", t) p = plot(x, c .+ 1, linewidth=2, title=title, label="Oceananigans", xlabel="x", ylabel="Tracer", xlims=(-1, 1), ylims=(0, 2)) c_predict = reshape([phi([t, x], res.minimizer)[1] for x in xs], length(xs)) plot!(p, x, c_predict, linewidth=2, label="Neural PDE") end gif(anim, "advection_diffusion_comparison.gif", fps=15)
module AutoARIMA using LinearAlgebra using Optim using Polynomials using RecipesBase using StaticArrays using Statistics using HypothesisTests export seriesA,seriesB,seriesB2,seriesC,seriesD,seriesE,seriesF,seriesG,dowj,wine,lake export autocovariance,autocovariance_matrix,autocorrelation,autocorrelation_matrix,partial_autocorrelation export isinversible, isstationary export innovations, levinson_durbin, least_squares, yule_walker, hannan_rissanen export boxcox, inv_boxcox, guerrero, difference, integrate export simulate, forecast, fit, toarma, k, residuals export aic, aicc, bic, mse, rmse, mae, mape export correlogram,partial_correlogram export AbstractModel, AbstractParams export ARParams, MAParams, ARMAParams, ARIMAParams, MSARIMAParams export ARModel, MAModel, ARMAModel export MA∞, AR∞ export boxjenkins include("datasets.jl") include("stats.jl") include("transforms.jl") include("abstract.jl") include("criteria.jl") include("simulate.jl") include("recipes.jl") include("ls.jl") include("arma.jl") include("ar.jl") include("ma.jl") include("arima.jl") include("sarima.jl") include("auto.jl") end
module GLM_ import MLJBase export OLSRegressor #, OLS, LinearRegression import ..GLM # strange syntax for lazy-loading const OLSFitResult = GLM.LinearModel OLSFitResult(coefs::Vector, b=nothing) = OLSFitResult(coefs, b) #### #### OLSRegressor #### mutable struct OLSRegressor <: MLJBase.Deterministic{OLSFitResult} fit_intercept::Bool # allowrankdeficient::Bool end function OLSRegressor(;fit_intercept=true) return OLSRegressor(fit_intercept) end # synonyms # const OLS = OLSRegressor # const LinearRegression = OLSRegressor #### #### fit/predict OLSRegressor #### function MLJBase.fit(model::OLSRegressor, verbosity::Int, X, y::Vector) Xmatrix = MLJBase.matrix(X) features = MLJBase.schema(X).names if model.fit_intercept fitresult = GLM.lm(hcat(Xmatrix, ones(eltype(Xmatrix), size(Xmatrix, 1), 1)), y) else fitresult = GLM.lm(Xmatrix, y) end coefs = GLM.coef(fitresult) ## TODO: add feature importance curve to report using `features` report = Dict(:coef => coefs[1:end-Int(model.fit_intercept)] , :intercept => ifelse(model.fit_intercept, coefs[end], nothing) , :deviance => GLM.deviance(fitresult) , :dof_residual => GLM.dof_residual(fitresult) , :stderror => GLM.stderror(fitresult) , :vcov => GLM.vcov(fitresult)) cache = nothing return fitresult, cache, report end function MLJBase.predict(model::OLSRegressor, fitresult::OLSFitResult, Xnew) Xmatrix = MLJBase.matrix(Xnew) model.fit_intercept && (Xmatrix = hcat(Xmatrix, ones(eltype(Xmatrix), size(Xmatrix, 1), 1))) return GLM.predict(fitresult, Xmatrix) end # metadata: MLJBase.load_path(::Type{<:OLSRegressor}) = "MLJModels.GLM_.OLSRegressor" MLJBase.package_name(::Type{<:OLSRegressor}) = "GLM" MLJBase.package_uuid(::Type{<:OLSRegressor}) = "38e38edf-8417-5370-95a0-9cbb8c7f171a" MLJBase.package_url(::Type{<:OLSRegressor}) = "https://github.com/JuliaStats/GLM.jl" MLJBase.is_pure_julia(::Type{<:OLSRegressor}) = :yes MLJBase.input_kinds(::Type{<:OLSRegressor}) = [:continuous, ] MLJBase.output_kind(::Type{<:OLSRegressor}) = :continuous MLJBase.output_quantity(::Type{<:OLSRegressor}) = :univariate end # module
function run_simulation(set_sizes; fun = conjunctive_set, kwargs...) results = DataFrame[] for n in set_sizes experiment = Experiment(set_size = n, populate_visicon = fun, n_trials = 10^4) run_condition!(experiment; kwargs...) df = DataFrame(experiment.data) hit_rate = mean(df.target_present .== df.response) g = groupby(df, [:target_present,:response]) temp = combine(g, :rt => mean) temp[!,:distractors] .= n temp[!,:hit_rate] .= hit_rate push!(results, temp) end return vcat(results...) end
export Apply """ Apply{J, O} <: SFunc{Tuple{SFunc{J, O}, J}, O} Apply represents an sfunc that takes two groups of arguments. The first group is a single argument, which is an sfunc to apply to the second group of arguments. # Additional supported operators - `support` - `support_quality` - `sample` - `logcpdf` - `compute_pi` - `send_lambda` # Type parameters - `J`: the input type of the *sfunc* that may be applied; that *sfunc* is the input type of the `Apply` - `O`: the output type of the *sfunc* that may be applied, which is also the output type of the `Apply` """ struct Apply{J <: Tuple, O} <: SFunc{Tuple{SFunc{J, O}, J}, O} end @impl begin struct ApplySupport end function support(::Apply{J,O}, parranges::NTuple{N,Vector}, size::Integer, curr::Vector{<:O}) where {J<:Tuple,O,N} result = Vector{O}() for sf in parranges[1] append!(result, support(sf, (parranges[2],), size, curr)) end return unique(result) end end @impl begin struct ApplySupportQuality end function support_quality(::Apply{J,O}, parranges) where {J,O} q = support_quality_rank(:CompleteSupport) for sf in parranges[1] imp = get_imp(MultiInterface.get_policy(), Support, sf, parranges[2], 0, O[]) q = min(q, support_quality_rank(support_quality(imp, sf, parranges[2]))) end return support_quality_from_rank(q) end end @impl begin struct ApplySample end function sample(::Apply{J,O}, input::Tuple{SFunc{J,O}, J})::O where {J<:Tuple,O} return sample(input[1], input[2]) end end @impl begin struct ApplyLogcpdf end function logcpdf(::Apply{J,O}, i::Tuple{SFunc{J,O}, J}, o::O)::AbstractFloat where {J<:Tuple,O} return logcpdf(i[1], i[2], o) end end # WARNING: THIS LOGIC DOES NOT WORK WITH MORE THAN ONE PARENT @impl begin struct ApplyComputePi end function compute_pi(::Apply{J,O}, range::__OptVec{<:O}, parranges::NTuple{N,Vector}, incoming_pis::Tuple)::Dist{<:O} where {N,J<:Tuple,O} sfrange = parranges[1] argsrange = parranges[2] sfpi = incoming_pis[1] argspi = incoming_pis[2] result = zeros(Float64, length(range)) for sf in sfrange p1 = cpdf(sfpi, (), sf) p2 = compute_pi(sf, range, (argsrange,), (argspi,)) p3 = [p1 * cpdf(p2, (), x) for x in range] result .+= p3 end return Cat(range, result) end end # WARNING: THIS LOGIC DOES NOT WORK WITH MORE THAN ONE PARENT @impl begin struct ApplySendLambda end function send_lambda(::Apply{J,O}, lambda::Score{<:O}, range::__OptVec{<:O}, parranges::NTuple{N,Vector}, incoming_pis::Tuple, parent_idx::Integer)::Score where {N,J<:Tuple,O} @assert parent_idx == 1 || parent_idx == 2 sfrange = parranges[1] argsrange = parranges[2] sfpi = incoming_pis[1] argspi = incoming_pis[2] if parent_idx == 2 # For each x, we must sum over the sfunc argument compute P(y|x) for each possible sfunc result = Vector{Float64}() for args in argsrange resultpieces = Vector{Float64}() for sf in sfrange sp = logcpdf(sfpi, (), sf) for y in range a = isa(args, Tuple) ? args : tuple(args) push!(resultpieces, sp + logcpdf(sf, a, y) + get_log_score(lambda, y)) end end push!(result, logsumexp(resultpieces)) end return LogScore(argsrange, result) else # parent_idx == 1 # This is simpler; we must sum over the arguments, which is achieved by the embedded compute_pi result = Vector{Float64}() for sf in sfrange resultpieces = Vector{Float64}() ypi = compute_pi(sf, range, (argsrange,), (argspi,)) for y in range push!(resultpieces, logcpdf(ypi, (), y) + get_log_score(lambda, y)) end push!(result, logsumexp(resultpieces)) end return LogScore(sfrange, result) end end end
module MatrixEquations const BlasFloat = Union{Float64,Float32,ComplexF64,ComplexF32} const BlasReal = Union{Float64,Float32} const BlasComplex = Union{ComplexF64,ComplexF32} using LinearAlgebra using LinearAlgebra: require_one_based_indexing import LinearAlgebra: mul! using LinearMaps include("lapackutil.jl") using .LapackUtil: tgsyl!, lanv2, ladiv, lag2, lacn2!, safemin, smlnum export MatrixEquationsMaps export utqu, utqu!, qrupdate!, rqupdate!, isschur, triu2vec, vec2triu, utnormalize! export lanv2, ladiv, lag2, lacn2! export lyapc, lyapd, lyapcs!, lyapds! export plyapc, plyaps, plyapcs!, plyapd, plyapds! export arec, ared, garec, gared export sylvc, sylvd, gsylv, sylvcs!, sylvds!, gsylvs! export sylvsys, dsylvsys, sylvsyss!, dsylvsyss!, tgsyl! export sylvckr, sylvdkr, gsylvkr, sylvsyskr, dsylvsyskr export opnorm1, opnorm1est, oprcondest, opsepest export lyapop, invlyapop, sylvop, invsylvop, sylvsysop, invsylvsysop, trmatop include("meutil.jl") include("sylvester.jl") include("lyapunov.jl") include("riccati.jl") include("sylvkr.jl") include("plyapunov.jl") include("meoperators.jl") include("condest.jl") end
# DTU02435 Decision Making under Uncertainty # Assignment 3, Task 2 # Out-of-Sample Test of the Plan # Edward J. Xu # May 5th, 2019 push!(LOAD_PATH, "$(homedir())/Desktop/StochasticAirplaneRent") cd("$(homedir())/Desktop/StochasticAirplaneRent") using JuMP # using GLPKMathProgInterface # using Gurobi using CPLEX using CSV using DataFrames function processData() datf_cost = CSV.read("Data/OperationCosts.csv", header = true, delim = ',') datf_cap = CSV.read("Data/PlaneCapacity.csv", header = ["p", "cap"], delim = ',') datf_route = CSV.read("Data/DemandsRoute.csv", header = true, delim = ',') datf_costAdmin = CSV.read("Data/CostAdquisition.csv", header = ["p", "cost"], delim = ',') datf_demand = CSV.read("Data/OutofSample.csv", header = true, delim = ',') # Calculate the demand matrix mat4_demand = zeros(4, 10, 12, 500) for i = 1: 4 for j = 1: 10 for w = 1: 500 for t = 1: 12 mat4_demand[i, j, t, w] = datf_demand[t, w+1] * datf_route[i, j+1] end end end end return (datf_cost, datf_cap.cap, datf_costAdmin.cost, mat4_demand) end function optim(vec_y, datf_cost, vec_cap, vec_costAdmin, mat_demand) model = Model(solver = CplexSolver(CPX_PARAM_SCRIND = 0)) # model = Model(solver = GurobiSolver(Presolve=0)) # model = Model(solver = GLPKSolverMIP()) @variable(model, mat3_z[1:4, 1:10, 1:10] >= 0, Int) @variable(model, mat_m[1:4, 1:10] >= 0) @objective(model, Min, sum(datf_cost.cost[.&((datf_cost[:i] .== i), (datf_cost[:j] .== j), (datf_cost[:p] .== p))][1] * mat3_z[i, j, p] for p = 1:10, i = 1:4, j = 1:10) + sum(mat_m) * 1000) @constraint(model, [i = 1:4, j = 1:10], mat_demand[i, j] - sum(mat3_z[i, j, p] * vec_cap[p] for p = 1:10) <= mat_m[i, j]) @constraint(model, [p = 1:10], sum(mat3_z[i, j, p] for i = 1:4, j = 1:10) <= vec_y[p]) solve(model) result_obj = getobjectivevalue(model) return result_obj end function testOutOfSample(vec_y, datf_cost, vec_cap, vec_costAdmin, mat4_demand) mat_result_obj = zeros(500, 12) vec_result_obj = zeros(500) ## 1, Begin Optimization for w = 1:500 for t = 1:12 mat_result_obj[w, t] = optim(vec_y, datf_cost, vec_cap, vec_costAdmin, mat4_demand[:, :, t, w]) end vec_result_obj[w] = sum(mat_result_obj[w, tt] for tt = 1:12) + sum(vec_y[p] * vec_costAdmin[p] for p = 1: 10) println("Result: obj = $(vec_result_obj[w]), when w = $(w).") end return vec_result_obj end function main() (datf_cost, vec_cap, vec_costAdmin, mat4_demand) = processData() println("################################################################################\n", "############### 1/4, Out-of-Sample Test of Deterministic Program ###############\n", "################################################################################") vec_y_1 = [0.0, 1.0, 2.0, 2.0, 0.0, 12.0, 5.0, 4.0, 8.0, 8.0] timeStart = time() vec_result_obj_1 = testOutOfSample(vec_y_1, datf_cost, vec_cap, vec_costAdmin, mat4_demand) println("Elapsed time: $(time()-timeStart) seconds.") CSV.write("result/out-of-sample_1.csv", DataFrame(vec_result_obj_1'), writeheader = false, delim = ',') println("################################################################################\n", "################# 2/4, Out-of-Sample Test of Stochastic Program ################\n", "################################################################################") vec_y_2 = [0.0, 7.0, 2.0, 3.0, 0.0, 12.0, 4.0, 4.0, 8.0, 8.0] timeStart = time() vec_result_obj_2 = testOutOfSample(vec_y_2, datf_cost, vec_cap, vec_costAdmin, mat4_demand) println("Elapsed time: $(time()-timeStart) seconds.") CSV.write("result/out-of-sample_2.csv", DataFrame(vec_result_obj_2'), writeheader = false, delim = ',') println("################################################################################\n", "################## 3/4, Out-of-Sample Test of Robust Program ###################\n", "################################################################################") vec_y_3 = [0.0, 3.0, 1.0, 2.0, 0.0, 12.0, 2.0, 8.0, 8.0, 8.0] timeStart = time() vec_result_obj_3 = testOutOfSample(vec_y_3, datf_cost, vec_cap, vec_costAdmin, mat4_demand) println("Elapsed time: $(time()-timeStart) seconds.") CSV.write("result/out-of-sample_3.csv", DataFrame(vec_result_obj_3'), writeheader = false, delim = ',') println("################################################################################\n", "################################### 4/4, End ###################################\n", "################################################################################") end main()
#Run all the tests together require("Ito/test/calendars.jl") require("Ito/test/statistics.jl") require("Ito/test/integration.jl") require("Ito/test/term_structure.jl") require("Ito/test/currencies.jl")
module FunctionDependencies const dependency_table = Dict{Symbol,Symbol}( :√ => :sqrt, :adjoint => :conj, :< => :isless, :> => :<, :isgreater => :isless, :ismore => :>, :≥ => :(>=), :≤ => :(<=), :min => :isless, :max => :isless, :cmp => :isless, :isequal => :(==), :(!=) => :(==), :≠ => :(!=), :(!==) => :(===), :≢ => :(!==), :≡ => :(===), :⊻ => :xor, :⊼ => :nand, :⊽ => :nor, :% => :rem, :÷ => :div, :mod1 => :mod, :∈ => :in, ) maybe_add_dependent_funccall!(funccalls::Set{Symbol}, ::Nothing) = funccalls function maybe_add_dependent_funccall!(funccalls::Set{Symbol}, call) push!(funccalls, call) maybe_add_dependent_funccall!(funccalls, get(dependency_table, call, nothing)) funccalls end function maybe_add_dependent_funccalls!(funccalls::Set{Symbol}) calls_to_add = intersect(keys(dependency_table), funccalls) for call in calls_to_add maybe_add_dependent_funccall!(funccalls, dependency_table[call]) end funccalls end end
# A script for loading unregistered dependencies before CI action using Pkg pkg"dev https://github.com/kylejbrown17/GraphUtils.jl.git"
module transitions using conv_code using encoder using utils using code_structs export transition_list #= Esta função deve retornar algo do tipo Array{Transition}, em que o Transition.in_bit é um bit de entrada, o Transition.output é a saída gerada pelo tal bit de entrada e Transition.next_state é o próximo estado (notação binária) O 'origin_state' correspondendo ao estado com: - todas as memórias nulas é 1 - todas as memórias=1 é 2^(code.quant_mem) =# function transition_list(code::ConvolutionalCode, origin_state::Int64) if haskey(transitions_cache, code) return transitions_cache[code][origin_state] end transition_list = Array{Array{Transition, 1}, 1}(undef, 2^code.quant_mem) for i=0:2^(code.quant_mem) - 1 transition_list[i+1] = [] binary = dec2bin(i, code.quant_mem+1) binary[1] = 0 push!(transition_list[i+1], Transition(0, current_out(code, binary), i >> 1 + 1)) binary[1] = 1 push!(transition_list[i+1], Transition(1, current_out(code, binary), 2^(code.quant_mem-1) + (i >> 1) + 1 )) end transitions_cache[code] = transition_list return transitions_cache[code][origin_state] end transitions_cache = Dict() end
include("../gcc_common.jl") build_and_upload_gcc(v"4.8.5")
""" refine_triangulation_images(triang_vertices, triang_simplex_indices, splitting_rules, image_vertices, split_indices = Int[]) Refine a triangulation (composed of simplices), also updating the images of the simplices in the triangulation. - `triang_vertices::Array{Float64, 2} is a (n_triangulation_vertices x embeddingdim) sized array where each row is a vertex of the triangulation. `triang_simplex_indices::Array{Int, 2}` is an array of size (n_trinagulation_simplices x (embeddingdim+1)). Each row of simplex_indices contains the indices of the vertices (rows of the vertices array) furnishing the corresponding simplex. `image_vertices::Array{Float64, 2} is a (n_triangulation_vertices x embeddingdim) sized array where each row is a vertex of the triangulation. `split_indices::Array{Int, 1}` are the row numbers of simplex_indices indicating which simplices should be split. """ function refine_triangulation_images(triang_vertices, triang_simplex_indices, image_vertices, split_indices, k) if length(split_indices) == 0 return triang_vertices, triang_simplex_indices, image_vertices end # The number of simplices to split n_split_simplices = length(split_indices) # The dimension of the space E = size(triang_vertices, 2) # Rules for forming the strictly new vertices of the subtriangulation splitting_rules = simplicial_subdivision(k, E) rules = splitting_rules[1] # Array where each row represents one of the new simplices in the splitted simplex subtriangulation = splitting_rules[2] # How many new vertices are created each split? n_newvertices_eachsplit = size(rules, 1) # We need an array that can accomodate all of them. Each row in this # array will be a new vertex. Stacks of n_newvertices_eachsplit * E arrays. # We have as many as we have simplices to split. new_vertices = zeros(n_newvertices_eachsplit * n_split_simplices, E) new_imagevertices = zeros(n_newvertices_eachsplit * n_split_simplices, E) # Fill the array by looping over all simplices that we want to split for i = 1:n_split_simplices # Figure out what the row indices corresponding to the ith simplex # must be. Marks the beginning of each of the simplex stacks in new_vertices ind = n_newvertices_eachsplit * (i -1) # Index of the simplex we need to split simplex_idx = split_indices[i] # Get the vertices of the simplex currently being splitted. Each of the # n_newvertices_eachsplit new vertices will be a linear combination # of these vertices. Each row is a vertex. vertices = triang_vertices[triang_simplex_indices[simplex_idx, :], :] imagevertices = image_vertices[triang_simplex_indices[simplex_idx, :], :] # Generate the strictly new vertices for each sub for j = 1:n_newvertices_eachsplit # The index for a particular new vertex of the ith new simplex ind_newvertex = ind + j # Compute the jth vertex of the subtriangulation of the ith simplex # Go the jth new subsimplex of the ith splitted simplex. The entries of this # vector jth_subsimplex = rules[j, :] # Pick the corresponding original vertices with indices contained in rules[j, :] original_vertices = vertices[rules[j, :], :] original_vertices_image = imagevertices[rules[j, :], :] new_vertices[ind_newvertex, :] = sum(original_vertices, dims=1) ./ k new_imagevertices[ind_newvertex, :] = sum(original_vertices_image, dims=1) ./ k end end # Find the unique new vertices new_vertices_noreps = unique(new_vertices, 1) new_imagevertices_noreps = Array{Float64}(size(new_vertices_noreps, 1), E) Ind = Vector{Int}(size(new_vertices, 1)) for i = 1:size(new_vertices_noreps, 1) count = 0 for j = 1:size(new_vertices, 1) # run over the possibly repeated vertices if new_vertices_noreps[i, :] == new_vertices[j, :] Ind[j] = i count = count + 1 if count == 1 new_imagevertices_noreps[i, :] = new_imagevertices[j, :] end end end end # TEST: assert all zeros. #@show unique(new_imagevertices, 1) - new_imagevertices_noreps # Combine old and newly introduced vertices num_vertices_beforesplit = size(triang_vertices, 1) triang_vertices = vcat(triang_vertices, new_vertices_noreps) image_vertices = vcat(image_vertices, new_imagevertices_noreps) # Update the Ind array, so that we start at the new vertices Ind = Ind + num_vertices_beforesplit num_simplices_each_split = size(subtriangulation, 1) # The subsimplices formed by the splitting. Each row contains E + 1 indices referencing # the vertices furnishing that particular subsimplex (now found in the updated # triang_vertices array). newtriangulation = Array{Float64}(num_simplices_each_split * n_split_simplices, E + 1) # For each simplex that we need to split, for i = 1:n_split_simplices # The beginning of the stack we need to fill. index = num_simplices_each_split * (i - 1) # Figure out what the row indices corresponding to the ith simplex # must be. Marks the beginning of each of the simplex stacks in new_vertices ind = n_newvertices_eachsplit * (i - 1) # Index of the simplex we need to split simplex_idx = split_indices[i] # Pick the indices of the original vertices. Should be a column vector. inds_original_vertices = triang_simplex_indices[simplex_idx, :] # Indices of the new vertices. Should be a column vector inds_new_vertices = Ind[(ind + 1):(ind + n_newvertices_eachsplit)] inds_all_vertices = vcat(inds_original_vertices, inds_new_vertices) # Go through each of the subsimplices formed by splitting that simplex for j = 1:num_simplices_each_split newtriangulation[index + j, :] = inds_all_vertices[subtriangulation[j, :]] end end # Indices of the simplices that are not split untouched_indices = complementary(split_indices, size(triang_simplex_indices, 1)) triang_simplex_indices = round.(Int, vcat(triang_simplex_indices[untouched_indices, :], newtriangulation)) return triang_vertices, triang_simplex_indices, image_vertices, untouched_indices end
# Simulate a classic beamformer with a circular array # before running: # include("../src/DirectionFinding.jl") # using <favorite plotting program> using DirectionFinding begin fc = 1e9 λ = 299.79e6/fc N₀ = 0.01 a1 = pi/8 ca = circulararray(11, 2λ, σ²=0.0001) s1 = SignalSource(a1, halfsine_train(1e6)) sim = Simulation(ca, fc, N₀, s1) doa = cbf(sim, 2.00013e9, 100) println("Source Angle: $a1") println("Found: $(doa > 0 ? doa : doa+2π)") end
module GroundDataFVG """ Module for the download and processing of atmospheric data gathered by measuring stations located in Friuli Venezia Giulia, Italy. """ #= Accesso risorse Friuli: https://www.dati.friuliveneziagiulia.it/resource/qp5k-6pvm.csv Qualità dell'aria: PM10: qp5k-6pvm PM2.5: d63p-pqpr Ozono: 7vnx-28uy Monossido di carbonio: t274-vki6 Biossido di zolfo: 2zdv-x7g2 Biossido di azoto: ke9b-p6z2 Pollini: svph-8w2g Meteo: Elenco sensori: 498i-2j88 Previsioni e dati: j654-ykm6 (Probabilmente non funzionante, il link è esterno e non ha il download) Dati stazioni: 4wxn-35av (Probabilmente non funzionante, il link è esterno e non ha il download) Link: Dati stazioni: https://dev.meteo.fvg.it/xml/stazioni/GRA.xml =# using CombinedParsers using CombinedParsers.Regexp using CSV using DataFrames using Dates using HTTP using JSONTables export getData, getRegionAttributes, getRegionIds, getRegionStationInfo @syntax meteo_data = Repeat( "<", re"[^ >]+", Optional( re" [^ =]+=\"[^ \"]+\"" ), Optional( re" [^ =]+=\"[^\"]+\"" ), ">", Either( Numeric(Int64), Numeric(Float64), re"[^>]+" ), "</", re"[^>]+", ">" ) """ getRegionAttributes( [ type::Symbol=:METEO ] ) Obtain the names of the columns of the region's dataframe required by `GroundData.createMap`'s `attributes` parameter to create `GroundData.standardize`'s `map` parameter. """ function getRegionAttributes( type::Symbol=:METEO ) return type == :METEO ? [ :param, :unit, :value, nothing, :observation_time, :longitude, :latitude, :station_altitude, nothing, nothing, :rmh ] : type == :AIRQUALITY ? [ :parametro, :unita_misura, :value, nothing, :data_misura, :longitudine, :latitudine, nothing, :dati_insuff, nothing ] : throw(DomainError(type, "`type` must be either `:METEO` OR `:AIRQUALITY`.")) end """ getRegionAttributes( [ type::Symbol=:METEO ] ) Obtain the names of the columns of the dataframe required for `GroundData.standardize`'s `bridge` parameter. """ function getRegionIds( type::Symbol=:METEO ) return type == :METEO ? :nome : type == :AIRQUALITY ? nothing : throw(DomainError(type, "`type` must be either `:METEO` OR `:AIRQUALITY`.")) end """ getRegionStationInfo( [ type::Symbol=:METEO ] ) Obtain the names of the columns of the region's stations dataframe required by `GroundData.createMap`'s `attributes` parameter to be used in `GroundData.generateUuidsTable`. """ function getRegionStationsInfo( type::Symbol=:METEO ) return type == :METEO ? [ nothing, :nome, :longitude, :latitude ] : type == :AIRQUALITY ? [ nothing, :ubicazione, :longitudine, :latitudine ] : throw(DomainError(type, "`type` must be either `:METEO` OR `:AIRQUALITY`.")) end """ getMeteoStationsData() Obtain a `DataFrame` describing the meteo stations in Friuli Venezia Giulia, Italy. """ function getMeteoStationsData() return CSV.read(split(@__DIR__, "src")[1]*"resources\\Ground stations data\\stazioni_meteoclimatiche-FVG.csv", DataFrame) end # prec_type # 0:nulla; 1:pioggia; 2:pioggia e neve; 3:neve # cloudiness # 0:n.d.; 1:sereno; 2:poco nuvoloso; 3:variabile; 4:nuvoloso; 5:coperto """ getMeteoData() Obtain the data of the meteorological stations in Friuli Venezia Giulia as a `DataFrame`. """ function getMeteoData() resources = [ "ARI", "BAR", "BGG", "BIC", "BOA", "BOR", "BRU", "CAP", "CDP", "CER", "CHI", "CIV", "CMT", "COD", "COR", "ENE", "FAG", "FOS", "FSP", "GEM", "GRA", "GRG", "GRM", "LAU", "LIG", "LSR", "MAT", "MGG", "MNF", "MUS", "PAL", "PDA", "PIA", "213200", "POR", "PRD", "RIV", "SAN", "SGO", "SPN", "TAL", "TAR", "TOL", "TRI", "UDI", "VIV", "ZON" ] # prec_type = [ "nulla", "pioggia", "pioggia e neve", "neve" ] # cloudiness = [ "n.d.", "sereno", "poco nuvoloso", "variabile", "nuvoloso", "coperto" ] data_str = String[] for res in resources try page = HTTP.get("https://dev.meteo.fvg.it/xml/stazioni/$res.xml") push!( data_str, String(page.body) ) catch e if !isa( e, HTTP.ExceptionRequest.StatusError ) throw(e) end end end data_split = @. replace( replace( getindex( split( data_str, r"</?meteo_data>" ), 2 ), r"\n *" => "" ), r"<!--[^-]+-->" => "" ) data_parse = meteo_data.(data_split) vect = Dict[] for data in data_parse # Attributes of a single station, they are shared between all the parameters measured by the station others = [ Symbol( String( attribute[2] ) ) => attribute[6] isa Number ? attribute[6] : String( attribute[6] ) for attribute in data[1:6] ] for attribute in data[7:end] # Parameters measured by each station dict = push!( Dict( :param => !ismissing( attribute[4] ) ? String( attribute[4][5] ) : String( attribute[2] ), :value => attribute[6] isa Number ? attribute[6] : String( attribute[6] ), :unit => !ismissing(attribute[3]) ? String( attribute[3][5] ) : missing ), others... ) push!( vect, dict ) end end df = select!( DataFrame(vect), Not(2, 7) ) rel_heights = [ length( split( param, " a " ) ) == 2 ? split( param, " a " )[2] : "0m" for param in df[:, :param] ] transform!( df, [:station_name] => ByRow( x -> x = uppercase(x) ) => :nome, [:observation_time] => ByRow( x -> DateTime( x[1:14], "dd/mm/yyyy H.M" ) ) => :observation_time ) insertcols!( df, :rmh => rel_heights ) return df end """ getAQData() Obtain data on airquality gathered form measuring stations in Friuli Venezia Giulia. """ function getAQData() codes = [ "qp5k-6pvm" , "d63p-pqpr", "7vnx-28uy", "t274-vki6", "2zdv-x7g2", "ke9b-p6z2" ] params = [ "PM10", "PM2.5", "Ozono", "Monossido di carbonio", "Biossido di zolfo", "Biossido di Azoto" ] val_types = [ "media_giornaliera", "media_giornaliera", "media_oraria_max", "media_mobile_8h_max", "media_giornaliera", "media_oraria_max" ] data = [ DataFrame( CSV.File( HTTP.get( "https://www.dati.friuliveneziagiulia.it/resource/$code.csv" ).body ) ) for code in codes ] for (i, (df, param)) in enumerate(zip(data, params)) rename!( df, val_types[i] => "value" ) insertcols!(df, :type => val_types[i] ) !in( "parametro", names(df) ) && insertcols!( df, :parametro => param ) end dataframe = reduce( (x, y) -> vcat( x, y, cols=:intersect ), data ) return dataframe end """ getData(; <keyword arguments> ) Obtain data of category `type` and source of category `kind`. # Arguments - `type::Symbol=:METEO`: defines the type of data to be downloaded, it must either be `:METEO` or `:AIRQUALITY`. - `kind::Symbol=:STATIONS`: defines if the data to be downloaded has to regard information on the stations or their actual measurements, it must either be `:STATIONS` or `:SENSORS`. """ function getData(; type::Symbol=:METEO, kind::Symbol=:STATIONS ) if type == :METEO if kind == :STATIONS return getMeteoStationsData() elseif kind == :SENSORS return getMeteoData() else throw(DomainError(kind, "`kind` must either be `:STATIONS` or `:SENSORS`.")) end elseif type == :AIRQUALITY return getAQData() else throw(DomainError(type, "`type` must either be `:METEO` or `:AIRQUALITY`.")) end end # ressta = getData( type=:METEO, source=:STATIONS ) # ressen = getData( type=:METEO, source=:SENSORS ) # ressen = getData( type=:AIRQUALITY ) end # module
""" `densityplot(x, y; nargs...)` → `Plot` Description ============ Draws a density plot for the given points. It uses the first parameter `x` (which should be a vector or range) to denote the horizontal position of each point, and the second parameter `y` (which should also be a vector or range) as their vertical position. This means that the two vectors have to have the same length. Usage ====== densityplot(x, y; title = "", name = "", width = 40, height = 15, border = :solid, xlim = [0, 0], ylim = [0, 0], margin = 3, padding = 1, color = :white, labels = true) Arguments ========== - **`x`** : The horizontal dimension for each point. - **`y`** : The vertical dimension for each point. - **`title`** : Text to display on the top of the plot. - **`name`** : Annotation of the current drawing to displayed on the right - **`width`** : Number of characters per row that should be used for plotting. - **`height`** : Number of rows that should be used for plotting. Not applicable to `barplot`. - **`border`** : The style of the bounding box of the plot. Supports `:solid`, `:bold`, `:dashed`, `:dotted`, `:ascii`, and `:none`. - **`xlim`** : Plotting range for the x coordinate. `[0, 0]` stands for automatic. - **`ylim`** : Plotting range for the y coordinate. `[0, 0]` stands for automatic. - **`margin`** : Number of empty characters to the left of the whole plot. - **`padding`** : Space of the left and right of the plot between the labels and the canvas. - **`color`** : Color of the drawing. Can be any of `:blue`, `:red`, `:yellow` - **`labels`** : Can be used to hide the labels by setting `labels=false`. Returns ======== A plot object of type `Plot{DensityCanvas}` Author(s) ========== - Christof Stocker (Github: https://github.com/Evizero) Examples ========= julia> densityplot(randn(1000), randn(1000), title = "Density Plot") Density Plot ┌────────────────────────────────────────┐ 2.9 │ ░ │ │ ░ ░ ░ │ │ ░░░ ░ ░ ░ ░ │ │ ░░░ ░░▒░▒░░░ ▒ ░ │ │ ░░ ░░ ░▒░░▒▒▒▒░░░░░░░ │ │ ░░ ░▒░░░░░▓▒▒░▒░▒░▓░▒░░░ │ │ ░ ░░░░▓▓▒▓▒▒█▒▓▒▒▓▒▒░▒ ░ ░ │ │ ░ ▒▒▒▒▓▓▒▓▓▓▓▒█▒▒░░░░░░ ░ │ │ ░ ░░░░░░░▒▒▓░░▓▒█▒▒▓▓▒▒ ░ ░ │ │ ░ ░░▒▒░░▒░░▒░░░░ ░░ ░ ░ │ │ ░ ░ ░ ░░░░░░ ░░ ░░ ░ │ │ ░ ░ ░ ░ ░ ░ ░░░ │ │ ░░ ░ │ │ │ -3.3 │ │ └────────────────────────────────────────┘ -3.4 2.9 See also ========= `Plot`, `scatterplot`, `DensityCanvas` """ function densityplot( x::AbstractVector, y::AbstractVector; color::Symbol = :white, kw...) X = convert(Vector{Float64}, x) Y = convert(Vector{Float64}, y) new_plot = Plot(X, Y, DensityCanvas; grid = false, kw...) points!(new_plot, X, Y, color) end function densityplot!( plot::Plot{<:Canvas}, x::AbstractVector, y::AbstractVector; color::Symbol = :white, kw...) X = convert(Vector{Float64}, x) Y = convert(Vector{Float64}, y) points!(plot, X, Y, color) end
export iauCpv """ Copy a position/velocity vector. This function is part of the International Astronomical Union's SOFA (Standards Of Fundamental Astronomy) software collection. Status: vector/matrix support function. Given: pv double[2][3] position/velocity vector to be copied Returned: c double[2][3] copy Called: iauCp copy p-vector This revision: 2013 June 18 SOFA release 2018-01-30 Copyright (C) 2018 IAU SOFA Board. See notes at end. """ # void iauCpv(double pv[2][3], double c[2][3]) function iauCpv(pv::AbstractMatrix{<:Real}) # Allocate return value c = zeros(Float64, 3, 2) ccall((:iauCpv, libsofa_c), Cvoid, (Ptr{Cdouble}, Ptr{Cdouble}), convert(Array{Float64, 2}, pv'), c) return SMatrix{2,3}(c') end
using MPI, SeisNoise, CUDA, Serialization, Dates function corr2serial(F::FFTData,arr::Array{FFTData,1},maxlag::Real,CORRDIR::String) N = length(arr) for ii = 1:N C = correlate(F,arr[ii],maxlag) stack!(C,allstack=true) outpath = joinpath(CORRDIR,F.name,C.name) serialize(outpath,cpu(C)) end return nothing end function all2all(arr1::Array{FFTData,1},arr2::Array{FFTData,1},maxlag::Real,CORRDIR::String) N = length(arr1) for ii = 1:N corr2serial(arr1[ii],arr2,maxlag,CORRDIR) end return nothing end function one2all(arr::Array{FFTData,1},maxlag::Real,CORRDIR::String) N = length(arr) for ii = 1:N-1 corr2serial(arr[ii],arr[ii+1:end],maxlag,CORRDIR) end return nothing end function freemem!(arr::Array{FFTData,1}) N = length(arr) for ii = 1:N CUDA.unsafe_free!(arr[ii].fft) end return nothing end function loadFFT(files::Array{String,1}) N = length(files) arr = Array{FFTData}(undef,N) for ii = 1:N arr[ii] = deserialize(files[ii]) end return arr end function splitFFT(files::AbstractArray,Nper::Int) Nfiles = length(files) splits = ceil(Int,Nfiles/Nper) out = [] for ii = 1:splits startind = (ii-1)*Nper + 1 endind = min(startind + Nper - 1,Nfiles) push!(out,files[startind:endind]) end return out end function splitCC(splits::AbstractArray,maxlag::Real,CORRDIR::String) # load first split on the GPU arr1 = loadFFT(splits[1]) .|> gpu N = length(splits) for ii = 1:N # xcorr all in first array println("Correlating split $ii $(now())") data = Channel{AbstractArray}(1) @sync begin @async one2all(arr1,maxlag,CORRDIR) @async put!(data,loadFFT(splits[N])) end # load other arrays arr2 = take!(data) .|> gpu for jj = N:-1:ii+1 println("Correlating split $jj $(now())") data = Channel{AbstractArray}(1) # do compute & I/O at same time @sync begin @async all2all(arr1,arr2,maxlag,CORRDIR) @async begin loadind = jj > ii + 1 ? jj - 1 : jj put!(data,loadFFT(splits[loadind])) end end # transfer CPU arr to gpu freemem!(arr2) arr2 = take!(data) .|> gpu end # transfer last arr2 to arr1 on GPU freemem!(arr1) arr1 = deepcopy(arr2) freemem!(arr2) end return nothing end
""" Created on Fri Jun 19 2020 @author: Yoann Pradat Test clustering functions. """ using Distances using Test using VariantsNMF @testset "build_conn" begin labels = [1, 0, 1, 2, 1] conn_true = [ [1 0 1 0 1] [0 1 0 0 0] [1 0 1 0 1] [0 0 0 1 0] [1 0 1 0 1] ] conn = VariantsNMF.build_conn(labels) conn = convert(Matrix{Int64}, conn) @test conn == conn_true end @testset "kmeans_init" begin p_centroids = 96 n_centroids = 16 n_repeats = 400 # 1. test on synthetic data where centroids in each repetition are identical # each cluster is then reduced to one point and the clustering is perfect centroids_true = rand(p_centroids, n_centroids) labels_true = repeat(collect(1:n_centroids), n_repeats) X = repeat(centroids_true, 1, n_repeats) # 1.1 stop when relative change in cost remains below 5e-2 over 10 iter centroids_ref, labels_ref, cost_ref = VariantsNMF.kmeans_init( X = X, init = collect(1:n_centroids), max_iter = 100, dist_func = CosineDist(), stopping_crit = :cost, stopping_tol = 5e-2, stopping_iter = 10, verbose = false ) @test centroids_ref ≈ centroids_true atol = 1e-9 @test cost_ref ≈ 0 atol = 1e-9 @test labels_ref == labels_true # 1.2 stop when cosine distance between new centroids and ref centroids remains below 5e-2 for 10 iter centroids_ref, labels_ref, cost_ref = VariantsNMF.kmeans_init( X = X, init = collect(1:n_centroids), max_iter = 100, dist_func = CosineDist(), stopping_crit = :dist, stopping_tol = 5e-2, stopping_iter = 10, verbose = false ) @test centroids_ref ≈ centroids_true atol = 1e-9 @test cost_ref ≈ 0 atol = 1e-9 @test labels_ref == labels_true end
# # Copyright (c) 2021 Tobias Thummerer, Lars Mikelsons # Licensed under the MIT license. See LICENSE file in the project root for details. # # ToDo: This is not a good test... md = fmi2CreateModelDescription() fmi2ModelDescriptionAddRealStateAndDerivative(md, "mass.s") fmi2ModelDescriptionAddRealStateAndDerivative(md, "mass.v") fmi2ModelDescriptionAddRealOutput(md, "mass.f")
# region is an iterable subset of dimensions # spec. an integer, range, tuple, or array # inplace complex function plan_fft!(X::CuArray{T,N}, region) where {T<:cufftComplexes,N} K = CUFFT_FORWARD inplace = true xtype = (T == cufftComplex) ? CUFFT_C2C : CUFFT_Z2Z pp = _mkplan(xtype, size(X), region) cCuFFTPlan{T,K,inplace,N}(pp, X, size(X), region, xtype) end function plan_bfft!(X::CuArray{T,N}, region) where {T<:cufftComplexes,N} K = CUFFT_INVERSE inplace = true xtype = (T == cufftComplex) ? CUFFT_C2C : CUFFT_Z2Z pp = _mkplan(xtype, size(X), region) cCuFFTPlan{T,K,inplace,N}(pp, X, size(X), region, xtype) end # out-of-place complex function plan_fft(X::CuArray{T,N}, region) where {T<:cufftComplexes,N} K = CUFFT_FORWARD xtype = (T == cufftComplex) ? CUFFT_C2C : CUFFT_Z2Z inplace = false pp = _mkplan(xtype, size(X), region) cCuFFTPlan{T,K,inplace,N}(pp, X, size(X), region, xtype) end function plan_bfft(X::CuArray{T,N}, region) where {T<:cufftComplexes,N} K = CUFFT_INVERSE inplace = false xtype = (T == cufftComplex) ? CUFFT_C2C : CUFFT_Z2Z pp = _mkplan(xtype, size(X), region) cCuFFTPlan{T,K,inplace,N}(pp, X, size(X), region, xtype) end # out-of-place real-to-complex function plan_rfft(X::CuArray{T,N}, region) where {T<:cufftReals,N} K = CUFFT_FORWARD inplace = false xtype = (T == cufftReal) ? CUFFT_R2C : CUFFT_D2Z pp = _mkplan(xtype, size(X), region) ydims = collect(size(X)) ydims[region[1]] = div(ydims[region[1]],2)+1 rCuFFTPlan{T,K,inplace,N}(pp, X, (ydims...,), region, xtype) end function plan_brfft(X::CuArray{T,N}, d::Integer, region::Any) where {T<:cufftComplexes,N} K = CUFFT_INVERSE inplace = false xtype = (T == cufftComplex) ? CUFFT_C2R : CUFFT_Z2D ydims = collect(size(X)) ydims[region[1]] = d pp = _mkplan(xtype, (ydims...,), region) rCuFFTPlan{T,K,inplace,N}(pp, X, (ydims...,), region, xtype) end # FIXME: plan_inv methods allocate needlessly (to provide type parameters) # Perhaps use FakeArray types to avoid this. function plan_inv(p::cCuFFTPlan{T,CUFFT_FORWARD,inplace,N}) where {T,N,inplace} X = CuArray{T}(undef, p.sz) pp = _mkplan(p.xtype, p.sz, p.region) ScaledPlan(cCuFFTPlan{T,CUFFT_INVERSE,inplace,N}(pp, X, p.sz, p.region, p.xtype), normalization(X, p.region)) end function plan_inv(p::cCuFFTPlan{T,CUFFT_INVERSE,inplace,N}) where {T,N,inplace} X = CuArray{T}(undef, p.sz) pp = _mkplan(p.xtype, p.sz, p.region) ScaledPlan(cCuFFTPlan{T,CUFFT_FORWARD,inplace,N}(pp, X, p.sz, p.region, p.xtype), normalization(X, p.region)) end function plan_inv(p::rCuFFTPlan{T,CUFFT_INVERSE,inplace,N} ) where {T<:cufftComplexes,N,inplace} X = CuArray{real(T)}(undef, p.osz) Y = CuArray{T}(undef, p.sz) xtype = p.xtype == CUFFT_C2R ? CUFFT_R2C : CUFFT_D2Z pp = _mkplan(xtype, p.osz, p.region) ScaledPlan(rCuFFTPlan{real(T),CUFFT_FORWARD,inplace,N}(pp, X, p.sz, p.region, xtype), normalization(X, p.region)) end function plan_inv(p::rCuFFTPlan{T,CUFFT_FORWARD,inplace,N} ) where {T<:cufftReals,N,inplace} X = CuArray{complex(T)}(undef, p.osz) Y = CuArray{T}(undef, p.sz) xtype = p.xtype == CUFFT_R2C ? CUFFT_C2R : CUFFT_Z2D pp = _mkplan(xtype, p.sz, p.region) ScaledPlan(rCuFFTPlan{complex(T),CUFFT_INVERSE,inplace,N}(pp, X, p.sz, p.region, xtype), normalization(Y, p.region)) end # The rest of the standard API size(p::CuFFTPlan) = p.sz function mul!(y::CuArray{Ty}, p::CuFFTPlan{T,K,false}, x::CuArray{T} ) where {Ty,T,K} assert_applicable(p,x,y) unsafe_execute!(p,x,y) return y end function *(p::cCuFFTPlan{T,K,true,N}, x::CuArray{T,N}) where {T,K,N} assert_applicable(p,x) unsafe_execute!(p,x) x end function *(p::rCuFFTPlan{T,CUFFT_FORWARD,false,N}, x::CuArray{T,N} ) where {T<:cufftReals,N} @assert p.xtype ∈ [CUFFT_R2C,CUFFT_D2Z] y = CuArray{complex(T),N}(undef, p.osz) mul!(y,p,x) y end function *(p::rCuFFTPlan{T,CUFFT_INVERSE,false,N}, x::CuArray{T,N} ) where {T<:cufftComplexes,N} @assert p.xtype ∈ [CUFFT_C2R,CUFFT_Z2D] y = CuArray{real(T),N}(undef, p.osz) mul!(y,p,x) y end function *(p::cCuFFTPlan{T,K,false,N}, x::CuArray{T,N}) where {T,K,N} y = CuArray{T,N}(undef, p.osz) mul!(y,p,x) y end
module BaselineFunctions import Statistics export calculateBaseline function calculateBaseline(massAxis, avgSpectrum; baselinePointWidth = 0.3, threshold=0.5) baselinePoints = ceil(massAxis[1])+0.6:baselinePointWidth:floor(massAxis[end]-1)-0.4 baselineValues = similar(baselinePoints) baselineNoise = similar(baselinePoints) for i=1:length(baselinePoints) startIdx = searchsortedfirst(massAxis,baselinePoints[i]-baselinePointWidth) endIdx = searchsortedfirst(massAxis,baselinePoints[i]+baselinePointWidth) subSet = view(avgSpectrum, startIdx:endIdx) threshold = Statistics.quantile(subSet,0.2) baselineSamples = Array{Float64,1}() #fill!(baselineSamples,0.0) for j=1:length(subSet) if subSet[j] <= threshold push!(baselineSamples,subSet[j]) end end baselineValues[i] = Statistics.mean(baselineSamples) if length(baselineSamples) > 3 baselineNoise[i] = Statistics.std(baselineSamples) else baselineNoise[i] = baselineValues[i] end if (baselineValues[i] > 10000 || baselineValues == 0) println("Strange Baseline at $(baselinePoints[i]):\n$baselineSamples") end end return baselinePoints,baselineValues, baselineNoise end end
#creates a logarithmic range function log_range(start, stop, points) r = LinRange(log(start), log(stop), points) r = exp.(r) return r end export log_range
function dijkstra(net::Network{T}, snk::U) where {T<:Integer, U<:Integer} nnodes = numnodes(net) costs = fill(typemax(U), nnodes) childvec = zeros(T, nnodes) P = PriorityQueue{T,U}() P[snk] = costs[snk] = 0 childvec[snk] = snk while !isempty(P) v = dequeue!(P) for u in inneighbors(net, v) lidx = idx(net, u, v) alt = costs[v] + net.links[lidx].l if (costs[u] > alt) P[u] = costs[u] = alt childvec[u] = v end end end childvec end function initchoices() # choice initialization DTC = zeros(T, size(trips)...); DTC[1,..] .= 1. #DTC[1:Tm,..] .= rand(size(DTC[1:Tm,..])...) #DTC ./= sum(DTC, dims=1) pathvecs = Dict(k => dijkstra(net, k) for k in snks); SR = Dict(d => Dict(i => zeros(T, nsinks, nclasses) for i in 1:2) for d in divs); for d in divs for (kno,k) in enumerate(snks) for c in 1:nclasses if (outneighbors(net, d)[1] == pathvecs[k][d]) SR[d][1][:,kno,c] .= 1. elseif (outneighbors(net, d)[2] == pathvecs[k][d]) SR[d][2][:,kno,c] .= 1. else error() end end end end return (DTC, SR) end
using Documenter, VoronoiFVM, Literate # # Replace SOURCE_URL marker with github url of source # function replace_source_url(input,source_url) lines_in = collect(eachline(IOBuffer(input))) lines_out=IOBuffer() for line in lines_in println(lines_out,replace(line,"SOURCE_URL" => source_url)) end return String(take!(lines_out)) end function make_all() # # Generate Markdown pages from examples # example_jl_dir = joinpath(@__DIR__,"..","examples") example_md_dir = joinpath(@__DIR__,"src","examples") for example_source in readdir(example_jl_dir) base,ext=splitext(example_source) if ext==".jl" source_url="https://github.com/j-fu/VoronoiFVM.jl/raw/master/examples/"*example_source preprocess(buffer)=replace_source_url(buffer,source_url) Literate.markdown(joinpath(@__DIR__,"..","examples",example_source), example_md_dir, documenter=false, info=false, preprocess=preprocess) end end generated_examples=vcat(["runexamples.md"],joinpath.("examples",readdir(example_md_dir))) makedocs( sitename="VoronoiFVM.jl", modules = [VoronoiFVM], clean = false, doctest = true, authors = "J. Fuhrmann", repo="https://github.com/j-fu/VoronoiFVM.jl", pages=[ "Home"=>"index.md", "changes.md", "method.md", "API Documentation" => [ "physics.md", "system.md", "solutions.md", "solver.md", "post.md", "misc.md", "allindex.md", ], "Examples" => generated_examples ] ) rm(example_md_dir,recursive=true) if !isinteractive() deploydocs(repo = "github.com/j-fu/VoronoiFVM.jl.git") end end make_all()
lattice = load_lattice(joinpath(@__DIR__, "./Mn3Si.in")) atoms = load_atoms( joinpath(@__DIR__, "./Mn3Si.in")) atoms = attach_pseudos(atoms, Mn="hgh/pbe/mn-q15.hgh", Si="hgh/pbe/si-q4.hgh") # AFM ordering, configuration 5(c) in Jiang and Yang J. Alloys Compounds (2021) @assert atoms[1][1].symbol == :Mn @assert atoms[2][1].symbol == :Si magnetic_moments = [atoms[1][1] => [5.0, -5.0, 5.0], atoms[2][1] => [0.0]] cases = standard_cases() config[@__DIR__] = ( directory = @__DIR__, lattice = lattice, atoms = atoms, magnetic_moments = magnetic_moments, # kwargs_model = (smearing=Smearing.Gaussian(), temperature=0.01), Ecut = 45, kgrid = [13, 13, 13], supersampling = 2.0, potential = nothing, kwargs_scf = (; ), mixing = KerkerMixing(), cases = cases, )
# comparison functions for HTML Nodes and Documents # TODO right now hashing and equality completely ignore # parents. I think this is *probably* appropriate but it deserves # some more thought. There's an argument that two HTMLElements with # the same contents and children but different parent pointers are not # really equal. Perhaps an auxilliary equality function could be provided # for this purpose? # equality import Base: ==, isequal, hash isequal(x::HTMLDocument, y::HTMLDocument) = isequal(x.doctype,y.doctype) && isequal(x.root,y.root) isequal(x::HTMLText,y::HTMLText) = isequal(x.text, y.text) isequal(x::HTMLElement, y::HTMLElement) = isequal(x.attributes,y.attributes) && isequal(x.children,y.children) ==(x::HTMLDocument, y::HTMLDocument) = ==(x.doctype,y.doctype) && ==(x.root,y.root) ==(x::HTMLText,y::HTMLText) = ==(x.text, y.text) ==(x::HTMLElement, y::HTMLElement) = ==(x.attributes,y.attributes) && ==(x.children,y.children) # hashing function hash(doc::HTMLDocument) hash(hash(HTMLDocument),hash(hash(doc.doctype), hash(doc.root))) end function hash(elem::HTMLElement{T}) where {T} h = hash(HTMLElement) h = hash(h,hash(T)) h = hash(h,hash(attrs(elem))) for child in children(elem) h = hash(h,hash(child)) end return h end hash(t::HTMLText) = hash(hash(HTMLText),hash(t.text))
using CellListMap using StaticArrays using NearestNeighbors using Test function nl_NN(x,y,r) balltree = BallTree(x) return inrange(balltree,y,r,true) end function compare_nb_lists(list_CL,list_NN;self=false) if self for (i,list_original) in pairs(list_NN) list = filter(!isequal(i),list_original) cl = filter(tup -> (tup[1] == i || tup[2] == i), list_CL) if length(cl) != length(list) @show i @show length(list), list @show length(cl), cl return false end for j in list length(findall(tup -> (tup[1] == j || tup[2] == j), cl)) == 1 || return false end end else for (i,list) in pairs(list_NN) cl = filter(tup -> tup[2] == i, list_CL) if length(cl) != length(list) @show i @show length(list), list @show length(cl), cl return false end for j in list length(findall(tup -> tup[1] == j, cl)) == 1 || return false end end end return true end @testset "neighborlist" begin r = 0.1 for N in [2,3] # # Using vectors as input # # With y smaller than x x = [ rand(SVector{N,Float64}) for _ in 1:1000 ] y = [ rand(SVector{N,Float64}) for _ in 1:500 ] nb = nl_NN(x,x,r) cl = CellListMap.neighborlist(x,r) @test compare_nb_lists(cl,nb,self=true) nb = nl_NN(x,y,r) cl = CellListMap.neighborlist(x,y,r,autoswap=false) @test compare_nb_lists(cl,nb,self=false) cl = CellListMap.neighborlist(x,y,r,autoswap=true) @test compare_nb_lists(cl,nb,self=false) # with x smaller than y x = [ rand(SVector{N,Float64}) for _ in 1:500 ] y = [ rand(SVector{N,Float64}) for _ in 1:1000 ] nb = nl_NN(x,y,r) cl = CellListMap.neighborlist(x,y,r,autoswap=false) @test compare_nb_lists(cl,nb,self=false) cl = CellListMap.neighborlist(x,y,r,autoswap=true) @test compare_nb_lists(cl,nb,self=false) # Using matrices as input x = rand(N,1000) y = rand(N,500) nb = nl_NN(x,x,r) cl = CellListMap.neighborlist(x,r) @test compare_nb_lists(cl,nb,self=true) nb = nl_NN(x,y,r) cl = CellListMap.neighborlist(x,y,r,autoswap=false) @test compare_nb_lists(cl,nb,self=false) cl = CellListMap.neighborlist(x,y,r,autoswap=true) @test compare_nb_lists(cl,nb,self=false) # with x smaller than y x = rand(N,500) y = rand(N,1000) nb = nl_NN(x,y,r) cl = CellListMap.neighborlist(x,y,r,autoswap=false) @test compare_nb_lists(cl,nb,self=false) cl = CellListMap.neighborlist(x,y,r,autoswap=true) @test compare_nb_lists(cl,nb,self=false) # Check random coordinates to test the limits more thoroughly check_random_NN = true for i in 1:500 x = rand(SVector{N,Float64},100); y = rand(SVector{N,Float64},50); nb = nl_NN(x,y,r); cl = CellListMap.neighborlist(x,y,r,autoswap=false); check_random_NN = compare_nb_lists(cl,nb,self=false) end @test check_random_NN # with different types x = rand(Float32,N,500) y = rand(Float32,N,1000) nb = nl_NN(x,y,r) cl = CellListMap.neighborlist(x,y,r,autoswap=false) @test compare_nb_lists(cl,nb,self=false) cl = CellListMap.neighborlist(x,y,r,autoswap=true) @test compare_nb_lists(cl,nb,self=false) end end
# Use baremodule to shave off a few KB from the serialized `.ji` file baremodule ASL_jll using Base using Base: UUID import JLLWrappers JLLWrappers.@generate_main_file_header("ASL") JLLWrappers.@generate_main_file("ASL", UUID("ae81ac8f-d209-56e5-92de-9978fef736f9")) end # module ASL_jll
# --- # title: 581. Shortest Unsorted Continuous Subarray # id: problem581 # author: Indigo # date: 2021-06-26 # difficulty: Medium # categories: Array # link: <https://leetcode.com/problems/shortest-unsorted-continuous-subarray/description/> # hidden: true # --- # # Given an integer array `nums`, you need to find one **continuous subarray** # that if you only sort this subarray in ascending order, then the whole array # will be sorted in ascending order. # # Return _the shortest such subarray and output its length_. # # # # **Example 1:** # # # # Input: nums = [2,6,4,8,10,9,15] # Output: 5 # Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order. # # # **Example 2:** # # # # Input: nums = [1,2,3,4] # Output: 0 # # # **Example 3:** # # # # Input: nums = [1] # Output: 0 # # # # # **Constraints:** # # * `1 <= nums.length <= 104` # * `-105 <= nums[i] <= 105` # # ## @lc code=start using LeetCode function find_unsorted_subarray(nums::Vector{Int}) stk = Int[] l, r = length(nums) + 1, 1 for i in eachindex(nums) while !isempty(stk) && nums[stk[end]] > nums[i] l = min(l, pop!(stk)) end push!(stk, i) end empty!(stk) for i in length(nums):-1:1 while !isempty(stk) && nums[stk[end]] < nums[i] r = max(r, pop!(stk)) end push!(stk, i) end return r - l > 0 ? r - l + 1 : 0 end ## @lc code=end
struct Bundle{S<:System,P,F} produce::Produce{P} recursive::Bool filter::F end Bundle(p::Produce{P,V}, ops::AbstractString) where {P,V} = begin recursive = false filter = nothing index = 0 for op in split(ops, "/") if op == "*" # collecting children only at the current level recursive = false elseif op == "**" # collecting all children recursively recursive = true else #TODO: support generic indexing function? filter = op end end S = eltype(p) F = typeof(filter) Bundle{S,P,F}(p, recursive, filter) end Base.collect(b::Bundle{S}) where {S<:System} = begin p = getfield(b, :produce) v = collect(p) if getfield(b, :recursive) l = S[] #TODO: possibly reduce overhead by reusing calculated values in child nodes g(V::Vector{<:System}) = for s in V; g(s) end g(s::System) = (push!(l, s); g(value(getfield(s, p.name)))) g(::Nothing) = nothing g(v) else l = copy(v) end f = getfield(b, :filter) if !isnothing(f) filter!(s -> value(s[f]), l) end l end Base.getindex(s::Produce, ops::AbstractString) = Bundle(s, ops) struct Bunch{V} it::Base.Generator end Base.iterate(b::Bunch, i...) = iterate(getfield(b, :it), i...) Base.length(b::Bunch) = length(getfield(b, :it)) Base.eltype(::Type{<:Bunch{<:State{V}}}) where V = V Base.getproperty(b::Bundle{S}, p::Symbol) where {S<:System} = (value(getfield(x, p)) for x in value(b)) |> Bunch{vartype(S, p)} Base.getproperty(b::Bunch{S}, p::Symbol) where {S<:System} = (value(getfield(x, p)) for x in b) |> Bunch{vartype(S, p)} Base.getindex(b::Bundle, i::AbstractString) = getproperty(b, Symbol(i)) Base.getindex(b::Bunch, i::AbstractString) = getproperty(b, Symbol(i)) value(b::Bundle) = collect(b) #TODO: also make final value() based on generator, but then need sum(x; init=0) in Julia 1.6 for empty generator #value(b::Bunch) = (value(v) for v in b) value(b::Bunch) = collect(b) Base.getindex(b::Bundle) = value(b) Base.getindex(b::Bunch) = value(b) Base.adjoint(b::Bundle) = value(b) Base.adjoint(b::Bunch) = value(b)
""" nonlinear_gradient_correction(x) nonlinear_gradient_correction!(x, val) Returns `Bool` stating if the image saved has been corrected for gradient nonlinearities by the scanner sequence. Default is `false`. """ @defprop NonlinearGradientCorrection{:nonlinear_gradient_correction}::Bool """ pulse_sequence(x) pulse_sequence!(x, val) General description of the pulse sequence used for the scan (i.e. MPRAGE, Gradient Echo EPI, Spin Echo EPI, Multiband gradient echo EPI). """ @defprop PulseSequence{:pulse_sequence}::String """ pulse_sequence_details(x) pulse_sequence_details!(x, val) Information beyond pulse sequence type that identifies the specific pulse sequence used (i.e. "Standard Siemens Sequence distributed with the VB17 software," "Siemens WIP ### version #.##," or "Sequence written by X using a version compiled on MM/DD/YYYY"). """ @defprop PulseSequenceDetails{:pulse_sequence_details}::String """ pulse_seqtype(x) pulse_seqtype!(x, val) A general description of the pulse sequence used for the scan (i.e. MPRAGE, Gradient Echo EPI, Spin Echo EPI, Multiband gradient echo EPI). """ @defprop PulseSequenceType{:pulse_seqtype}::String """ scanning_sequence(x) scanning_sequence!(x, val) Description of the type of sequence data acquired. """ @defprop ScanningSequence{:scanning_sequence}::String """ sequence_name(x) sequence_name!(x, val) Manufacturer’s designation of the sequence name. """ @defprop SequenceName{:sequence_name}::String """ sequence_variant(x) sequence_variant!(x, val) Variant of the `scanning_sequence` property. """ @defprop SequenceVariant{:sequence_variant}::String """ SequenceMetadata Metadata structure for general MRI sequence information. ## Supported Properties $(description_list(nonlinear_gradient_correction, pulse_sequence, pulse_sequence_details, pulse_sequence_type, scanning_sequence, sequence_name, sequence_variant)) """ struct SequenceMetadata nonlinear_gradient_correction::Bool pulse_sequence::String pulse_sequence_details::String pulse_sequence_type::String scanning_sequence::String sequence_name::String sequence_variant::String end @properties SequenceMetadata begin nonlinear_gradient_correction(self) => :nonlinear_gradient_correction pulse_sequence(self) => :pulse_sequence pulse_sequence_details(self) => :pulse_sequence_details pulse_seqtype(self) => :pulse_sequence_type scanning_sequence(self) => :scanning_sequence sequence_name(self) => :sequence_name sequence_variant(self) => :sequence_variant end
struct CartesianFDMContext{N,P,R,T,D,S,M} top::P n::NTuple{N,Int} ρ::R τ::NTuple{2,NTuple{N,T}} δ::NTuple{2,NTuple{N,D}} σ::NTuple{2,NTuple{N,S}} μ::Tuple{M,NTuple{N,M}} end function cartesianfdmcontext(top, n) ρ = spdiagm(0 => ones(Bool, prod(n))) τ = backwardshiftmatrices(top, n), forwardshiftmatrices(top, n) δ = Ref(ρ) .- τ[1], τ[2] .- Ref(ρ) σ = Ref(ρ) .+ τ[1], τ[2] .+ Ref(ρ) μ = maskmatrices(top, n) CartesianFDMContext(top, n, ρ, τ, δ, σ, μ) end """ """ _kron(opn) = opn _kron(opn...) = kron(reverse(opn)...) _kron(opn::NTuple{1}, eye::NTuple{1}) = opn _kron(opn::NTuple{2}, eye::NTuple{2}) = kron(eye[2], opn[1]), kron(opn[2], eye[1]) _kron(opn::NTuple{3}, eye::NTuple{3}) = kron(eye[3], eye[2], opn[1]), kron(eye[3], opn[2], eye[1]), kron(opn[3], eye[2], eye[1]) """ """ function forwardshiftmatrices(top, n) opn = _forwardshift.(top, n) eye = I.(n) _kron(opn, eye) end _forwardshift(::Topology, n::Int) = spdiagm(1 => ones(Bool, n-1)) _forwardshift(::Periodic, n::Int) = spdiagm(1-n => ones(Bool, 1), 1 => ones(Bool, n-1)) """ """ function backwardshiftmatrices(top, n) opn = _backwardshift.(top, n) eye = I.(n) _kron(opn, eye) end _backwardshift(::Topology, n::Int) = spdiagm(-1 => ones(Bool, n-1)) _backwardshift(::Periodic, n::Int) = spdiagm(-1 => ones(Bool, n-1), n-1 => ones(Bool, 1)) """ """ function maskmatrices(top, n) opn = _mask.(top, n) μ⁰ = _kron(opn...) opn = _normal.(top, n) eye = _tangent.(top, n) μ¹ = _kron(opn, eye) μ⁰, μ¹ end _mask(::Topology, n::Int) = spdiagm(0 => [i ≠ n for i in 1:n]) _mask(::Periodic, n::Int) = spdiagm(0 => ones(Bool, n)) _normal(::Topology, n::Int) = spdiagm(0 => [i ≠ 1 && i ≠ n for i in 1:n]) _normal(::Periodic, n::Int) = spdiagm(0 => ones(Bool, n)) _tangent(::Topology, n::Int) = spdiagm(0 => [i ≠ n for i in 1:n]) _tangent(::Periodic, n::Int) = spdiagm(0 => ones(Bool, n))
## utility methods XXX(msg) = println(msg); XXX() = XXX("define me") function members(object::PyObject) convert(Vector{(String,PyObject)}, pycall(PyCall.inspect["getmembers"], PyObject, object)) end has_member(object::PyObject, key) = contains(pluck(members(object),1), key) pluck(xs, key) = [ x[key] for x = xs ]
f = :(f(x, y = 3; a, b = 5) = 50) doc_f = :(Core.@doc "I document function f" f) f2 = :(function f(x) println(2) return 5 end) s = :(mutable struct HandleType handle::Ptr{Nothing} end) doc_s = :(Core.@doc "I document structure s" s) c = :(const sym = val) e = :(@enum myenum a b c d) e2 = :(@cenum myotherenum::Int begin a = 1 b = 2 c = 3 d = 4 end) @testset "Expressions" begin include("exprs/utils.jl") include("exprs/deconstruct.jl") include("exprs/reconstruct.jl") @test is_broadcast(:(f.(x, y))) @test !is_broadcast(:(f(x, y))) @test !is_broadcast(:(x.y)) @test broadcast_ex(:(f(x, y)), true) == :(f.(x, y)) @test broadcast_ex(:(f(x, y)), false) == :(f(x, y)) @test prettify(concat_exs(:(x = 3), :(y = 4))) == prettify(:(x = 3; y = 4)) @test prettify(concat_exs(:( begin x = 3 end ), :(y = 4))) == prettify(:(x = 3; y = 4)) end
@testset "PopulationGrouping" begin @testset "ExampleGrouping" begin num_groups = 100 num_individuals = 100 rng = MersenneTwister(13) group_ids = rand(rng, 1:num_groups, num_individuals) @test minimum(group_ids) > 0 @test maximum(group_ids) <= num_groups grouping = MocosSim.PopulationGrouping(group_ids, num_groups) @test MocosSim.numgroups(grouping) == num_groups @testset "Every group has the right people" begin for group_id in 1:num_groups group = MocosSim.getgroup(grouping, group_id) for person_id in group @test group_ids[person_id] == group_id end end end @testset "Every person is in the right group" begin for person_id in 1:num_individuals group_id = group_ids[person_id] group = MocosSim.getgroup(grouping, group_id) @test person_id in group end end @testset "Group sizes" begin sizes = MocosSim.groupsizes(grouping) @test length(sizes) == num_groups for i in 1:MocosSim.numgroups(grouping) @test length(MocosSim.getgroup(grouping, i)) == sizes[i] end end end end
# This file is a part of BAT.jl, licensed under the MIT License (MIT). #ToDo: Move functionality of reshape_variate and reshape_variates to package ValueShapes. reshape_variate(shape::Missing, v::Any) = v function reshape_variate(shape::AbstractValueShape, v::Any) v_shape = valshape(v) if !(v_shape <= shape) throw(ArgumentError("Shape of variate doesn't match target variate shape, with variate of type $(typeof(v)) and expected shape $(shape)")) end v end function reshape_variate(shape::ArrayShape{<:Real,1}, v::Any) unshaped_v = unshaped(v)::AbstractVector{<:Real} reshape_variate(shape, unshaped_v) end function reshape_variate(shape::AbstractValueShape, v::AbstractVector{<:Real}) _reshape_realvec(shape, v) end function reshape_variate(shape::ArrayShape{<:Real,1}, v::AbstractVector{<:Real}) _reshape_realvec(shape, v) end function _reshape_realvec(shape::AbstractValueShape, v::AbstractVector{<:Real}) ndof = length(eachindex(v)) ndof_expected = totalndof(shape) if ndof != ndof_expected throw(ArgumentError("Invalid length ($ndof) of variate, target shape $(shape) has $ndof_expected degrees of freedom")) end shape(v) end reshape_variates(shape::Missing, vs::AbstractVector{<:Any}) = v function reshape_variates(shape::AbstractValueShape, vs::AbstractVector{<:Any}) v_elshape = elshape(vs) if !(v_elshape <= shape) throw(ArgumentError("Shape of variates doesn't match target variate shape, with variates of type $(eltype(vs)) and expected element shape $(shape)")) end vs end function reshape_variates(shape::ArrayShape{<:Real,1}, vs::AbstractVector{<:Any}) unshaped_vs = unshaped.(vs)::AbstractVector{<:AbstractVector{<:Real}} reshape_variates(shape, unshaped_vs) end function reshape_variates(shape::AbstractValueShape, vs::AbstractVector{<:AbstractVector{<:Real}}) _reshape_realvecs(shape, vs) end function reshape_variates(shape::ArrayShape{<:Real,1}, vs::AbstractVector{<:AbstractVector{<:Real}}) _reshape_realvecs(shape, vs) end function _reshape_realvecs(shape::AbstractValueShape, vs::AbstractVector{<:AbstractVector{<:Real}}) ndof = first(innersize(vs)) ndof_expected = totalndof(shape) if ndof != ndof_expected throw(ArgumentError("Invalid length ($ndof) of variates, target shape $(shape) has $ndof_expected degrees of freedom")) end shape.(vs) end
# This file is a part of Julia. License is MIT: https://julialang.org/license rst(x) = sprint(rst, x) function rst(io::IO, content::Vector) isempty(content) && return for md in content[1:end-1] rst(io, md) println(io) end rst(io, content[end]) end rst(io::IO, md::MD) = rst(io, md.content) function rst(io::IO, header::Header{l}) where l s = rstinline(header.text) println(io, s) println(io, string("*=-~:.^"[l])^length(s)) println(io) end function rst(io::IO, code::Code) if code.language == "jldoctest" println(io, ".. doctest::\n") elseif code.language != "rst" println(io, ".. code-block:: julia\n") end for l in lines(code.code) println(io, " ", l) end end function rst(io::IO, p::Paragraph) rstinline(io, p.content) println(io) end function rst(io::IO, list::List) for (i, item) in enumerate(list.items) bullet = isordered(list) ? "$(i + list.ordered - 1). " : "* " print(io, bullet) lines = split(rstrip(sprint(rst, item)), '\n') for (n, line) in enumerate(lines) print(io, (n == 1 || isempty(line)) ? "" : " "^length(bullet), line) n < length(lines) && println(io) end println(io) end end function rst(io::IO, q::BlockQuote) s = sprint(rst, q.content) for line in split(rstrip(s), "\n") println(io, " ", line) end println(io) end function rst(io::IO, f::Footnote) print(io, ".. [", f.id, "]") s = sprint(rst, f.text) lines = split(rstrip(s), "\n") # Single line footnotes are printed on the same line as their label # rather than taking up an additional line. if length(lines) == 1 println(io, " ", lines[1]) else println(io) for line in lines println(io, isempty(line) ? "" : " ", rstrip(line)) end println(io) end end function rst(io::IO, md::Admonition) s = sprint(rst, md.content) title = md.title == ucfirst(md.category) ? "" : md.title println(io, ".. ", md.category, "::", isempty(title) ? "" : " $title") for line in split(rstrip(s), "\n") println(io, isempty(line) ? "" : " ", line) end println(io) end function rst(io::IO, md::HorizontalRule) println(io, "–" ^ 5) end function rst(io::IO, l::LaTeX) println(io, ".. math::\n") for line in lines(l.formula) println(io, " ", line) end end rst(io::IO, md) = show(io, "text/rst", md) # Inline elements rstinline(x) = sprint(rstinline, x) function rstinline(io::IO, md...) wasCode = false for el in md wasCode && isa(el, AbstractString) && !Base.startswith(el, " ") && print(io, "\\ ") wasCode = (isa(el, Code) || isa(el, LaTeX) || isa(el, Link)) && (wasCode = true) rstinline(io, el) end end rstinline(io::IO, md::Vector) = !isempty(md) && rstinline(io, md...) # rstinline(io::IO, md::Image) = rstinline(io, ".. image:: ", md.url) function rstinline(io::IO, md::Link) if ismatch(r":(func|obj|ref|exc|class|const|data):`\.*", md.url) rstinline(io, md.url) else rstinline(io, "`", md.text, " <", md.url, ">`_") end end rstinline(io::IO, f::Footnote) = print(io, "[", f.id, "]_") rstescape(s) = replace(s, "\\", "\\\\") rstinline(io::IO, s::AbstractString) = print(io, rstescape(s)) rstinline(io::IO, md::Bold) = rstinline(io, "**", md.text, "**") rstinline(io::IO, md::Italic) = rstinline(io, "*", md.text, "*") rstinline(io::IO, md::Code) = print(io, "``", md.code, "``") rstinline(io::IO, br::LineBreak) = println(io) rstinline(io::IO, l::LaTeX) = print(io, ":math:`", l.formula, "`") rstinline(io::IO, x) = show(io, MIME"text/rst"(), x) # show Base.show(io::IO, ::MIME"text/rst", md::MD) = rst(io, md)
using Meganet, BenchmarkTools history = Pkg.dir("Meganet")*"//benchmarks//micro//bm_doubleSymLayer.jld" TYPE = Float64 npixel = 500 nex = 1000 nchannel = 3 K = getDenseKernel(TYPE,[4000,2000]) Bin = randn(TYPE,nFeatOut(K),4) Bout = randn(TYPE,nFeatIn(K),3) nLayer = getTVNormLayer(TYPE,[nex,4]) L = getDoubleSymLayer(TYPE,K,nLayer,Bin,Bout) theta = initTheta(L) Y = randn(TYPE,nFeatIn(L),nex) # Warmup Yout2,Yout2,tmp2 = apply(L,theta,Y,true) @code_warntype apply(L,theta,Y,true) trial = @benchmark apply(L,theta,Y,true); Meganet.updatehistory!(history, trial) hist = JLD.load(history, "hist") judge(hist)
export iscommutative, trred_matrix, any_order, pmaximal_overorder, phereditary_overorder, ismaximal elem_type(::AlgAssRelOrd{S, T, U}) where {S, T, U} = AlgAssRelOrdElem{S, T, U} elem_type(::Type{AlgAssRelOrd{S, T, U}}) where {S, T, U} = AlgAssRelOrdElem{S, T, U} ideal_type(::AlgAssRelOrd{S, T, U}) where {S, T, U} = AlgAssRelOrdIdl{S, T, U} ideal_type(::Type{AlgAssRelOrd{S, T, U}}) where {S, T, U} = AlgAssRelOrdIdl{S, T, U} @doc Markdown.doc""" algebra(O::AlgAssRelOrd) -> AbsAlgAss Returns the algebra which contains $O$. """ algebra(O::AlgAssRelOrd) = O.algebra _algebra(O::AlgAssRelOrd) = algebra(O) @doc Markdown.doc""" base_ring(O::AlgAssRelOrd) -> Union{NfAbsOrd, NfRelOrd} Returns an order $R$ in the base ring of the algebra of $O$, such that $O$ is an $R$-order. """ base_ring(O::AlgAssRelOrd) = order(basis_pmatrix(O, copy = false).coeffs[1]) @doc Markdown.doc""" iscommutative(O::AlgAssRelOrd) -> Bool Returns `true` if $O$ is a commutative ring and `false` otherwise. """ iscommutative(O::AlgAssRelOrd) = iscommutative(algebra(O)) ################################################################################ # # Construction # ################################################################################ @doc Markdown.doc""" Order(A::AbsAlgAss{<: NumFieldElem}, M::Generic.Mat{<: NumFieldElem}) -> AlgAssRelOrd Returns the order of $A$ with basis matrix $M$. """ function Order(A::AbsAlgAss{S}, M::Generic.Mat{S}) where S <: NumFieldElem return AlgAssRelOrd{S, fractional_ideal_type(order_type(base_ring(A))), typeof(A)}(A, deepcopy(M)) end @doc Markdown.doc""" Order(A::AbsAlgAss{<: NumFieldElem}, M::PMat{<: NumFieldElem, T}) -> AlgAssRelOrd Returns the order of $A$ with basis pseudo-matrix $M$. """ function Order(A::AbsAlgAss{S}, M::PMat{S, T}) where { S <: NumFieldElem, T } return AlgAssRelOrd{S, T, typeof(A)}(A, deepcopy(M)) end @doc Markdown.doc""" Order(A::AbsAlgAss{<: NumFieldElem}, B::Vector{<: AbsAlgAssElem{ <: NumFieldElem}}) -> AlgAssRelOrd Returns the order of $A$ with basis $B$. """ function Order(A::AbsAlgAss{S}, B::Vector{ <: AbsAlgAssElem{S} }) where { S <: NumFieldElem } @assert length(B) == dim(A) M = zero_matrix(base_ring(A), dim(A), dim(A)) for i = 1:dim(A) elem_to_mat_row!(M, i, B[i]) end return Order(A, M) end ################################################################################ # # "Assure" functions for fields # ################################################################################ function assure_has_basis_pmatrix(O::AlgAssRelOrd{S, T, U}) where {S, T, U} if isdefined(O, :basis_pmatrix) return nothing end if !isdefined(O, :pseudo_basis) error("No pseudo_basis and no basis_pmatrix defined.") end pb = pseudo_basis(O, copy = false) A = algebra(O) M = zero_matrix(base_ring(A), degree(O), degree(O)) C = Vector{T}() for i = 1:degree(O) elem_to_mat_row!(M, i, pb[i][1]) push!(C, deepcopy(pb[i][2])) end O.basis_pmatrix = PseudoMatrix(M, C) return nothing end function assure_has_pseudo_basis(O::AlgAssRelOrd{S, T, U}) where {S, T, U} if isdefined(O, :pseudo_basis) return nothing end if !isdefined(O, :basis_pmatrix) error("No pseudo_basis and no basis_pmatrix defined.") end P = basis_pmatrix(O, copy = false) A = algebra(O) pseudo_basis = Vector{Tuple{elem_type(A), T}}() for i = 1:degree(O) a = elem_from_mat_row(A, P.matrix, i) push!(pseudo_basis, (a, deepcopy(P.coeffs[i]))) end O.pseudo_basis = pseudo_basis return nothing end function assure_has_basis_matrix(O::AlgAssRelOrd) if isdefined(O, :basis_matrix) return nothing end O.basis_matrix = basis_pmatrix(O).matrix return nothing end function assure_has_basis_mat_inv(O::AlgAssRelOrd) if isdefined(O, :basis_mat_inv) return nothing end O.basis_mat_inv = inv(basis_matrix(O, copy = false)) return nothing end function assure_has_inv_coeff_ideals(O::AlgAssRelOrd) if isdefined(O, :inv_coeff_ideals) return nothing end pb = pseudo_basis(O, copy = false) O.inv_coeff_ideals = [ inv(pb[i][2]) for i in 1:degree(O) ] return nothing end ################################################################################ # # Pseudo basis / basis pseudo-matrix # ################################################################################ @doc Markdown.doc""" pseudo_basis(O::AlgAssRelOrd; copy::Bool = true) Returns the pseudo basis of $O$, i. e. a vector $v$ of pairs $(e_i, a_i)$ such that $O = \bigoplus_i a_i e_i$, where $e_i$ is an element of `algebra(O)` and $a_i$ a fractional ideal of `base_ring(O)`. """ function pseudo_basis(O::AlgAssRelOrd{S, T, U}; copy::Bool = true) where {S, T, U} assure_has_pseudo_basis(O) if copy return deepcopy(O.pseudo_basis)::Vector{Tuple{elem_type(U), T}} else return O.pseudo_basis::Vector{Tuple{elem_type(U), T}} end end @doc Markdown.doc""" basis_pmatrix(O::AlgAssRelOrd; copy::Bool = true) -> PMat Returns the basis pseudo-matrix of $O$. """ function basis_pmatrix(O::AlgAssRelOrd; copy::Bool = true) assure_has_basis_pmatrix(O) if copy return deepcopy(O.basis_pmatrix) else return O.basis_pmatrix end end function inv_coeff_ideals(O::AlgAssRelOrd; copy::Bool = true) assure_has_inv_coeff_ideals(O) if copy return deepcopy(O.inv_coeff_ideals) else return O.inv_coeff_ideals end end # Returns a basis of O as Z-module function absolute_basis(O::AlgAssRelOrd) pb = pseudo_basis(O, copy = false) res = Vector{elem_type(O)}() for i = 1:degree(O) for b in absolute_basis(pb[i][2]) push!(res, O(b*pb[i][1])) end end return res end ################################################################################ # # (Inverse) basis matrix # ################################################################################ @doc Markdown.doc""" basis_matrix(O::AlgAssRelOrd; copy::Bool = true) -> MatElem Returns the basis matrix of $O$, that is the basis pseudo-matrix of $O$ without the coefficient ideals. """ function basis_matrix(O::AlgAssRelOrd; copy::Bool = true) assure_has_basis_matrix(O) if copy return deepcopy(O.basis_matrix) else return O.basis_matrix end end @doc Markdown.doc""" basis_mat_inv(O::AlgAssRelOrd; copy::Bool = true) -> MatElem Returns the inverse of the basis matrix of $O$. """ function basis_mat_inv(O::AlgAssRelOrd; copy::Bool = true) assure_has_basis_mat_inv(O) if copy return deepcopy(O.basis_mat_inv) else return O.basis_mat_inv end end ################################################################################ # # Degree # ################################################################################ @doc Markdown.doc""" degree(O::AlgAssRelOrd) -> Int Returns the dimension of the algebra containing $O$. """ function degree(O::AlgAssRelOrd) return dim(algebra(O)) end ################################################################################ # # Inclusion of algebra elements # ################################################################################ function _check_elem_in_order(a::AbsAlgAssElem{S}, O::AlgAssRelOrd{S, T, V}, short::Type{Val{U}} = Val{false}) where {S, T, U, V} t = zero_matrix(base_ring(algebra(O)), 1, degree(O)) elem_to_mat_row!(t, 1, a) t = t*basis_mat_inv(O, copy = false) b_pmat = basis_pmatrix(O, copy = false) if short == Val{true} for i = 1:degree(O) if !(t[1, i] in b_pmat.coeffs[i]) return false end end return true else for i = 1:degree(O) if !(t[1, i] in b_pmat.coeffs[i]) return false, Vector{S}() end end v = Vector{S}(undef, degree(O)) for i = 1:degree(O) v[i] = deepcopy(t[1, i]) end return true, v end end @doc Markdown.doc""" in(a::AbsAlgAssElem, O::AlgAssRelOrd) -> Bool Returns `true` if the algebra element $a$ is in $O$ and `false` otherwise. """ function in(a::AbsAlgAssElem{S}, O::AlgAssRelOrd{S, T, U}) where {S, T, U} return _check_elem_in_order(a, O, Val{true}) end ################################################################################ # # Denominator in an order # ################################################################################ @doc Markdown.doc""" denominator(a::AbsAlgAssElem, O::AlgAssRelOrd) -> fmpz Returns $d\in \mathbb Z$ such that $d \cdot a \in O$. """ function denominator(a::AbsAlgAssElem, O::AlgAssRelOrd) t = zero_matrix(base_ring(algebra(O)), 1, degree(O)) elem_to_mat_row!(t, 1, a) t = t*basis_mat_inv(O, copy = false) d = fmpz(1) inv_coeff = inv_coeff_ideals(O, copy = false) for i = 1:degree(O) tt = inv_coeff[i]*t[1, i] tt = simplify(tt) d = lcm(d, denominator(tt)) end return d end ################################################################################ # # Random elements # ################################################################################ @doc Markdown.doc""" rand(O::AlgAssRelOrd, B::Int) -> AlgAssRelOrdElem Returns a random element of $O$ whose coefficient size is controlled by $B$. """ function rand(O::AlgAssRelOrd, B::Int) pb = pseudo_basis(O, copy = false) z = algebra(O)() for i = 1:degree(O) t = rand(pb[i][2], B) z += t*pb[i][1] end return O(z) end ################################################################################ # # Print # ################################################################################ function show(io::IO, O::AlgAssRelOrd) compact = get(io, :compact, false) if compact print(io, "Order of ") show(IOContext(io, :compact => true), algebra(O)) else print(io, "Order of ") println(io, algebra(O)) print(io, "with pseudo-basis ") pb = pseudo_basis(O, copy = false) for i = 1:degree(O) print(io, "\n(") show(IOContext(io, :compact => true), pb[i][1]) print(io, ", ") show(IOContext(io, :compact => true), pb[i][2]) print(io, ")") end end end ################################################################################ # # Equality # ################################################################################ @doc Markdown.doc""" ==(R::AlgAssRelOrd, S::AlgAssRelOrd) -> Bool Returns `true` if $R$ and $S$ are equal and `false` otherwise. """ function ==(R::AlgAssRelOrd, S::AlgAssRelOrd) algebra(R) != algebra(S) && return false return basis_pmatrix(R, copy = false) == basis_pmatrix(S, copy = false) end ################################################################################ # # Discriminant and Reduced Trace Matrix # ################################################################################ @doc Markdown.doc""" trred_matrix(O::AlgssRelOrd) -> MatElem Returns the reduced trace matrix $M$ of $O$, i. e. `M[i, j] = trred(b[i]*b[j])`, where $b$ is a basis of $O$. """ function trred_matrix(O::AlgAssRelOrd) if isdefined(O, :trred_matrix) return deepcopy(O.trred_matrix) end A = algebra(O) b = pseudo_basis(O, copy = false) d = dim(A) M = zero_matrix(base_ring(A), d, d) for i = 1:d t = trred(b[i][1]*b[i][1]) M[i, i] = t for j = i + 1:d t = trred(b[i][1]*b[j][1]) M[i, j] = t M[j, i] = t end end O.trred_matrix = M return deepcopy(M) end @doc Markdown.doc""" discriminant(O::AlgssRelOrd) Returns the discriminant of $O$. """ function discriminant(O::AlgAssRelOrd{S, T, U}) where {S, T, U} if isdefined(O, :disc) return O.disc::ideal_type(order_type(parent_type(S))) end d = det(trred_matrix(O)) pb = pseudo_basis(O, copy = false) a = pb[1][2]^2 for i = 2:degree(O) a *= pb[i][2]^2 end disc = d*a simplify(disc) O.disc = numerator(disc) return deepcopy(O.disc)::ideal_type(order_type(parent_type(S))) end ################################################################################ # # Maximal Order # ################################################################################ function ismaximal_order_known(A::AbsAlgAss{T}) where { T <: NumFieldElem } return isdefined(A, :maximal_order) end @doc Markdown.doc""" maximal_order(A::AbsAlgAss{ <: NumFieldElem }) -> AlgAssRelOrd Returns a maximal $R$-order of $A$ where $R$ is the maximal order of the base ring of $A$. """ function maximal_order(A::AbsAlgAss{T}) where { T <: NumFieldElem } if isdefined(A, :maximal_order) return A.maximal_order::order_type(A) end # So far ..._absolute is usually faster for linear, quadratic and cubic base fields, # but of course there are exceptions. # Feel free to adjust this if-condition. if base_field(base_ring(A)) == FlintQQ && degree(base_ring(A)) <= 3 O = maximal_order_via_absolute(A) else O = maximal_order_via_relative(A) end A.maximal_order = O return O::order_type(A) end function maximal_order_via_absolute(A::AbsAlgAss{T}) where { T <: NumFieldElem } B, BtoA = AlgAss(A) C, CtoB = restrict_scalars(B, FlintQQ) OC = maximal_order(C) M = zero_matrix(base_ring(A), degree(OC), dim(A)) for i = 1:degree(OC) elem_to_mat_row!(M, i, BtoA(CtoB(elem_in_algebra(basis(OC, copy = false)[i], copy = false)))) end PM = sub(pseudo_hnf(PseudoMatrix(M), :lowerleft, true), (degree(OC) - dim(A) + 1):degree(OC), 1:dim(A)) O = Order(A, PM) O.ismaximal = 1 return O end function maximal_order_via_relative(A::AbsAlgAss{T}) where { T <: NumFieldElem } O = any_order(A) return maximal_order(O) end @doc Markdown.doc""" maximal_order(O::AlgAssRelOrd) -> AlgAssRelOrd Returns a maximal order of the algebra of $O$ containing itself. """ function maximal_order(O::AlgAssRelOrd{S, T, U}) where {S, T, U} A = algebra(O) if isdefined(A, :maximal_order) # Check whether O \subseteq OO OO = A.maximal_order::AlgAssRelOrd{S, T, U} if _spans_subset_of_pseudohnf(basis_pmatrix(O, copy = false), basis_pmatrix(OO, copy = false), :lowerleft) return OO end end d = discriminant(O) fac = factor(d) OO = O for (p, e) in fac if e == 1 continue end OO += pmaximal_overorder(O, p) end OO.ismaximal = 1 if !isdefined(A, :maximal_order) A.maximal_order = OO end return OO::AlgAssRelOrd{S, T, U} end @doc Markdown.doc""" any_order(A::AbsAlgAss{ <: NumFieldElem }) -> AlgAssRelOrd Returns any $R$-order of $A$ where $R$ is the maximal order of the base ring of $A$. """ function any_order(A::AbsAlgAss{T}) where { T <: NumFieldElem } K = base_ring(A) return any_order(A, maximal_order(K)) end @doc Markdown.doc""" any_order(A::AbsAlgAss{ <: NumFieldElem}, R::Union{ NfAbsOrd, NfRelOrd }) -> AlgAssRelOrd Returns any $R$-order of $A$. """ function any_order(A::AbsAlgAss{T}, R::Union{ NfAbsOrd, NfRelOrd }) where { T <: NumFieldElem } K = base_ring(A) d = _denominator_of_mult_table(A, R) M = vcat(zero_matrix(K, 1, dim(A)), d*identity_matrix(K, dim(A))) oneA = one(A) for i = 1:dim(A) M[1, i] = deepcopy(coefficients(oneA, copy = false)[i]) end PM = PseudoMatrix(M) PM = pseudo_hnf(PM, :lowerleft, true) O = Order(A, sub(PM, 2:dim(A) + 1, 1:dim(A))) return O end function _denominator_of_mult_table(A::AbsAlgAss{T}, R::Union{ NfAbsOrd, NfRelOrd }) where { T <: NumFieldElem } l = denominator(multiplication_table(A, copy = false)[1, 1, 1], R) for i = 1:dim(A) for j = 1:dim(A) for k = 1:dim(A) l = lcm(l, denominator(multiplication_table(A, copy = false)[i, j, k], R)) end end end return l end _denominator_of_mult_table(A::AlgGrp{T}, R::Union{ NfAbsOrd, NfRelOrd }) where { T <: NumFieldElem } = fmpz(1) # TODO: This is type unstable # Requires that O is maximal and A = K^(n\times n) for a number field K. # Computes a maximal order of type # ( O ... O a^-1 ) # ( : : : ) # ( O ... O a^-1 ) # ( a ... a O ) # for an ideal a of O. # See Bley, Johnston "Computing generators of free modules over orders in group # algebras", Prop. 5.1. function _simple_maximal_order(O::AlgAssRelOrd, make_free::Bool = true, with_trafo::Type{Val{T}} = Val{false}) where T A = algebra(O) @assert A isa AlgMat n = degree(A) K = coefficient_ring(A) # Build a matrix with the first columns of basis elements of O M = zero_matrix(K, dim(A), n) for i = 1:dim(A) b = matrix(pseudo_basis(O, copy = false)[i][1], copy = false) for j = 1:n M[i, j] = deepcopy(b[j, 1]) end end PM = PseudoMatrix(M, [pseudo_basis(O, copy = false)[i][2] for i in 1:dim(A)]) PM = pseudo_hnf(PM, :upperright) M = sub(PM.matrix, 1:n, 1:n) PM = PseudoMatrix(M, PM.coeffs[1:n]) U = similar(PM.matrix, 0, 0) steinitz_form!(PM, U, false) a = PM.coeffs[end] a = simplify(a) fl = false if make_free fl, beta = isprincipal(a) end if fl mul_row!(PM.matrix, nrows(PM.matrix), beta) a = K(1) * base_ring(PM) else d = denominator(a) if !isone(d) mul_row!(PM.matrix, nrows(PM.matrix), K(1//d)) end a = a * d end M = transpose(PM.matrix) iM = inv(M) N = zero_matrix(K, dim(A), dim(A)) for i = 1:dim(A) elem_to_mat_row!(N, i, iM*pseudo_basis(O, copy = false)[i][1]*M) end PN = PseudoMatrix(N, deepcopy(basis_pmatrix(O, copy = false).coeffs)) PN = pseudo_hnf(PN, :lowerleft) niceorder = Order(A, PN) niceorder.isnice = true niceorder.nice_order_ideal = a if with_trafo == Val{true} return niceorder, A(iM) else return niceorder end end @doc Markdown.doc""" nice_order(O::AlgAssRelOrd) -> AlgAssRelOrd, AlgElem Given a maximal order $O$ in a full matrix algebra over a number field, return a nice maximal order $R$ and element $a$ such that $a O a^-1 = R$. """ function nice_order(O::AlgAssRelOrd{S, T, U}; cached::Bool = true) where {S, T, U} if cached && isdefined(O, :nice_order) return O.nice_order::Tuple{typeof(O), elem_type(U)} else sO, A = _simple_maximal_order(O, true, Val{true}) if cached O.nice_order = sO, A end return sO::typeof(O), A::elem_type(U) end end function nice_order_ideal(O::AlgAssRelOrd) !O.isnice && error(throw("Order must be nice")) return O.nice_order_ideal end @doc Markdown.doc""" ismaximal(O::AlgAssRelOrd) -> Bool Returns `true` if $O$ is a maximal order and `false` otherwise. """ function ismaximal(O::AlgAssRelOrd) if O.ismaximal == 1 return true end if O.ismaximal == 2 return false end A = algebra(O) d = discriminant(O) if isdefined(A, :maximal_order) if d == discriminant(maximal_order(A)) O.ismaximal = 1 return true else O.ismaximal = 2 return false end end fac = factor(d) for (p, e) in fac if e == 1 continue end d2 = discriminant(pmaximal_overorder(O, p)) if d != d2 O.ismaximal = 2 return false end end O.ismaximal = 1 return true end ismaximal_known(O::AlgAssRelOrd) = O.ismaximal != 0 ################################################################################ # # p-hereditary / p-maximal overorders # ################################################################################ # See Friedrichs: "Berechnung von Maximalordnungen über Dedekindringen", Algorithmus 4.12 @doc Markdown.doc""" phereditary_overorder(O::AlgAssRelOrd, p::Union{ NfAbsOrdIdl, NfRelOrdIdl }) -> AlgAssRelOrd Returns an order $O'$ containing $O$ such that the localization $O'_p$ is hereditary where $p$ is a prime ideal of the base ring of $O$. """ function phereditary_overorder(O::AlgAssRelOrd, p::Union{ NfAbsOrdIdl, NfRelOrdIdl }; return_pradical::Type{Val{T}} = Val{false}) where T d = discriminant(O) prad = pradical(O, p) OO = left_order(prad) dd = discriminant(OO) while d != dd d = dd prad = pradical(OO, p) OO = left_order(prad) dd = discriminant(OO) if valuation(dd, p) < 2 break end end if return_pradical == Val{true} return OO, prad else return OO end end # See Friedrichs: "Berechnung von Maximalordnungen über Dedekindringen", Algorithmus 3.16 function _pmaximal_overorder(O::AlgAssRelOrd, p::Union{ NfAbsOrdIdl, NfRelOrdIdl }) return _pmaximal_overorder(O, pradical(O, p), p) end function _pmaximal_overorder(O::AlgAssRelOrd, prad::AlgAssRelOrdIdl, p::Union{ NfAbsOrdIdl, NfRelOrdIdl }; strict_containment::Bool = false) d = discriminant(O) primes = _prime_ideals_over(O, prad, p, strict_containment = strict_containment) for P in primes OO = left_order(P) dd = discriminant(OO) if valuation(dd, p) < 2 return OO end if d != dd return _pmaximal_overorder(OO, p) end end return O end @doc Markdown.doc""" pmaximal_overorder(O::AlgAssRelOrd, p::Union{ NfAbsOrdIdl, NfRelOrdIdl }) -> AlgAssRelOrd Returns an order $O'$ containing $O$ such that the index $(O'':O')$ of any maximal order $O''$ containing $O$ is not divisible by $p$ where $p$ is a prime ideal of the base ring of $O$. """ function pmaximal_overorder(O::AlgAssRelOrd, p::Union{ NfAbsOrdIdl, NfRelOrdIdl }) O, prad = phereditary_overorder(O, p, return_pradical = Val{true}) return _pmaximal_overorder(O, prad, p, strict_containment = true) end ################################################################################ # # Addition # ################################################################################ function +(a::AlgAssRelOrd{S, T, U}, b::AlgAssRelOrd{S, T, U}) where { S, T, U} @assert algebra(a) === algebra(b) aB = basis_pmatrix(a, copy = false) bB = basis_pmatrix(b, copy = false) d = degree(a) PM = sub(pseudo_hnf(vcat(aB, bB), :lowerleft, true), d + 1:2*d, 1:d) return Order(algebra(a), PM) end ################################################################################ # # Units of quotients # ################################################################################ # Computes a generating system of U in O, where U is a set of representatives of # the image of the projection map \pi:O^\times -> (O/g*O)^\times. # Assumes that O is a maximal order in Mat_{n\times n}(K). # See Bley, Johnson: "Computing generators of free modules over orders in # group algebras", section 6. function enum_units(O::AlgAssRelOrd, g::NfAbsOrdIdl) A = algebra(O) @assert A isa AlgMat @assert degree(A)^2 == dim(A) n = degree(A) K = base_ring(A) OK = base_ring(O) L = _simple_maximal_order(O) a = deepcopy(basis_pmatrix(L, copy = false).coeffs[end - 1]) ai = deepcopy(basis_pmatrix(L, copy = false).coeffs[n]) gensOKg = Vector{elem_type(K)}() for b in basis(OK) bmod = mod(b, g) if iszero(bmod) continue end push!(gensOKg, elem_in_nf(bmod)) end if isone(a) gensinvag = gensOKg else gensinvag = Vector{elem_type(K)}() aig = ai*g for b in basis(ai) bmod = mod(b, aig) if iszero(bmod) continue end push!(gensinvag, bmod) end end if isone(a) gensag = gensOKg else gensag = Vector{elem_type(K)}() ag = a*g for b in basis(a) bmod = mod(b, ag) if iszero(bmod) continue end push!(gensag, bmod) end end result = Vector{elem_type(L)}() n1 = n - 1 # n \nmid i, j or n \mid i, j for i = 1:n1 for j = 1:n1 if j == i continue end for x in gensOKg E = identity_matrix(K, n) E[i, j] = deepcopy(x) push!(result, L(A(E))) end end end # n \nmid i and n \mid j for i = 1:n1 for x in gensag E = identity_matrix(K, n) E[i, n] = deepcopy(x) push!(result, L(A(E))) end end # n \mid i and n \nmid j for j = 1:n1 for x in gensinvag E = identity_matrix(K, n) E[n, j] = deepcopy(x) push!(result, L(A(E))) end end U, mU = unit_group(OK) for i = 1:ngens(U) x = elem_in_nf(mU(U[i])) E = identity_matrix(K, n) E[1, 1] = x push!(result, L(A(E))) end return result end ################################################################################ # # Ramifying primes # ################################################################################ # Returns a vector of tuples (p, m, k) where p is prime ideal at which the # algebra ramifies, m its local index (Schur index) and k its local capacity. # See Reiner: "Maximal order" Theorem 32.1 function ramified_prime_ideals(O::AlgAssRelOrd) A = algebra(O) @assert issimple(A) n2 = dim(A) n = isqrt(n2) @assert n^2 == n2 d = discriminant(O) facd = factor(d) result = Vector{Tuple{ideal_type(base_ring(O)), Int, Int}}() for (p, e) in facd k = divexact(n2 - e, n) m = divexact(n, k) push!(result, (p, m, k)) @assert m*k == n end return result end ################################################################################ # # "All" maximal orders # ################################################################################ # Returns a vector containing a system of representatives of the maximal orders # of A with respect to conjugation, that is, any maximal order of A is conjugated # to one of them and no two returned orders are conjugated. # Only works for algebras fulfilling the Eichler condition. representatives_of_maximal_orders(A::AlgAss{nf_elem}) = representatives_of_maximal_orders(maximal_order(A)) function representatives_of_maximal_orders(O::AlgAssRelOrd) A = algebra(O) @assert issimple(A) @assert iseichler(A) @assert ismaximal(O) K = base_ring(A) OK = base_ring(O) n2 = dim(A) n = isqrt(n2) @assert n^2 == n2 inf_plc = ramified_infinite_places(A) R, mR = ray_class_group(OK(1)*OK, inf_plc) S, mS = snf(R) if order(S) == 1 return [ O ] end ram_primes = ramified_prime_ideals(O) U = Vector{elem_type(S)}() for i = 1:ngens(S) push!(U, n*S[i]) end for (p, m, k) in ram_primes push!(U, k*(mS\(mR\p))) end SU, mSU = quo(S, U) if order(SU) == 1 return [ O ] end # Each element of SU corresponds now to a maximal order. # We have to find a prime ideal in each of these classes. reps_found = Set{elem_type(SU)}() primes = Vector{ideal_type(OK)}() push!(reps_found, id(SU)) P = PrimeIdealsSet(OK, 1, -1, indexdivisors = false, ramified = false) for p in P g = mSU(mS\(mR\p)) if g in reps_found continue end push!(reps_found, g) push!(primes, p) if length(reps_found) == order(SU) break end end # For each of the prime ideals compute a maximal ideal with left order O. # Then the right orders of these form a system of representatives. max_ideals = Vector{ideal_type(O)}() for i = 1:length(primes) push!(max_ideals, maximal_integral_ideal(O, primes[i], :left)) end result = Vector{typeof(O)}() push!(result, O) for i = 1:length(max_ideals) push!(result, right_order(max_ideals[i])) end return result end
# # Approximate Inference using ESS # Loading the necessary packages and setting seed. using AbstractGPs, Plots, Random Random.seed!(1234); # Loading toy regression # [dataset](https://github.com/GPflow/docs/blob/master/doc/source/notebooks/basics/data/regression_1D.csv) # taken from GPFlow examples. x = [0.8658165855998895, 0.6661700880180962, 0.8049218148148531, 0.7714303440386239, 0.14790478354654835, 0.8666105548197428, 0.007044577166530286, 0.026331737288148638, 0.17188596617099916, 0.8897812990554013, 0.24323574561119998, 0.028590102134105955]; y = [1.5255314337144372, 3.6434202968230003, 3.010885733911661, 3.774442382979625, 3.3687639483798324, 1.5506452040608503, 3.790447985799683, 3.8689707574953, 3.4933565751758713, 1.4284538820635841, 3.8715350915692364, 3.7045949061144983]; scatter(x, y, xlabel="x", ylabel="y") # Split the observations into train and test set. (x_train, y_train) = (x[begin:8], y[begin:8]); (x_test, y_test) = (x[9:end], y[9:end]); # Instantiating the kernel. k = Matern52Kernel() # Instantiating a Gaussian Process with the given kernel `k`. f = GP(k) # Instantiating a `FiniteGP`, a finite dimentional projection at the inputs of the dataset # observed under Gaussian Noise with $\sigma = 0.001$ . fx = f(x_train, 0.001) # Data's log-likelihood w.r.t prior `GP`. logpdf(fx, y_train) # Calculating the exact posterior over `f` given `y`. The GP's kernel currently has some # arbitrary fixed parameters. p_fx = posterior(fx, y_train) # Data's log-likelihood under the posterior `GP`. We see that it drastically increases. logpdf(p_fx(x_test), y_test) # Plot the posterior `p_fx` along with the observations. plt = scatter(x_train, y_train; label = "Train data") scatter!(plt, x_test, y_test; label = "Test data") plot!(plt, p_fx, 0:0.001:1; label="Posterior") # # Elliptical Slice Sampler # Previously we computed the log likelihood of the untuned kernel parameters # of the GP, $-1.285$. We now also perform approximate inference over said # kernel parameters using the # [Elliptical Slice Sampling](http://proceedings.mlr.press/v9/murray10a/murray10a.pdf) # provided by # [EllipticalSliceSampling.jl](https://github.com/TuringLang/EllipticalSliceSampling.jl/). # We start of by loading necessary packages. using EllipticalSliceSampling, Distributions # We define a function which returns log-probability of the data under the # GP / log-likelihood of the parameters of the GP. function logp(params; x=x_train, y=y_train) kernel = ScaledKernel( transform( Matern52Kernel(), ScaleTransform(exp(params[1])) ), exp(params[2]) ) f = GP(kernel) fx = f(x, 0.1) return logpdf(fx, y) end # We define a Gaussian prior over the joint distribution on kernel parameters # space. Since we have only two parameters, we define a multi-variate Gaussian # of dimension two. prior = MvNormal(2, 1) # Sanity check for the defined `logp` function and `prior` distribution. logp(rand(prior)) # Generate 2,000 samples using `ESS_mcmc` provided by `EllipticalSliceSampling.jl`. samples = sample(ESSModel(prior, logp), ESS(), 2_000; progress=false) samples_mat = reduce(hcat, samples)'; # Mean of samples of both the parameters. mean_params = mean(samples_mat, dims=1) # Plot a histogram of the samples for the two parameters. The vertical line in each # graph indicates the mean of the samples. plt = histogram(samples_mat; layout=2, labels="Param") vline!(plt, mean_params; layout=2, label="Mean") # Average log-marginal-probability of data with posterior kernel parameter samples # sampled using ESS. We can observe that there is significant improvement over # exact posterior with default kernel parameters. mean(logp(param; x=x_test, y=y_test) for param in samples) # Plot sampled functions from posterior with tuned parameters plt = scatter(x_train, y_train; label="Train data") scatter!(plt, x_train, y_train; label="Test data") for params in @view(samples[(end-100):end,:]) opt_kernel = ScaledKernel( transform( Matern52Kernel(), ScaleTransform(exp(params[1])) ), exp(params[2]) ) f = GP(opt_kernel) p_fx = posterior(f(x, 0.1), y) sampleplot!(plt, p_fx(collect(0:0.02:1)), 1) end plt
using Documenter, QuadraticTools makedocs(sitename="QuadraticTools Docs")
__precompile__() module JAMSDWriter import MathProgBase const MPB = MathProgBase #importall MathProgBase.SolverInterface using Compat using Compat.LinearAlgebra, Compat.SparseArrays if VERSION >= v"0.7" # quickfix for Nullable using Nullables end export JAMSDSolver, getsolvername, getsolveresult, getsolveresultnum, getsolvemessage, getsolveexitcode, LinearQuadraticModel # Load in `deps.jl`, complaining if it does not exist const depsjl_path = joinpath(dirname(@__FILE__), "..", "deps", "deps.jl") if !isfile(depsjl_path) error("JAMSDWriter not installed properly, run Pkg.build(\"JAMSDWriter\"), restart Julia and try again") end include(depsjl_path) const CONFIG = Dict( :debug => false, :export_gms => false, :solver_log => false ) const solverdata_dir = joinpath(dirname(@__DIR__), ".solverdata") include("jamsd_linearity.jl") include("jamsd_params.jl") include("jamsd_convert.jl") include("jamsd_fun.jl") const solver_stat = [ :Optimal, :Iteration, :Resource, :Solver, :EvalError, :Capability, :License, :User, :SetupErr, :SolverErr, :InternalErr, :Skipped, :SystemErr ] const model_stat = [ :OptimalGlobal, :OptimalLocal, :Unbounded, :InfeasibleGlobal, :InfeasibleLocal, :InfeasibleIntermed, :Feasible, :Integer, :NonIntegerIntermed, :IntegerInfeasible, :LicenseError, :ErrorUnknown, :ErrorNoSolution, :NoSolutionReturned, :SolvedUnique, :Solved, :SolvedSingular, :UnboundedNoSolution, :InfeasibleNoSolution ] mutable struct JAMSDSolver <: MPB.AbstractMathProgSolver solver_name::String options::Dict{String,Any} emp::Nullable{Function} end # TODO(xhub) write a better struct/enum here @enum MODEL_TYPE qcp=7 nlp=2 miqcp=6 minlp=5 emp=9 "change the debug state" setdebug(b::Bool) = global CONFIG[:debug] = b "export the problem to a GAMS Model file (.gms)" setexport(b::Bool) = global CONFIG[:export_gms] = b "printout the log from the solver" setsolverlog(b::Bool) = global CONFIG[:solver_log] = b """ Create a JAMSDSolver Solver for MPB. The optional arguments are: # Optional Arguments - `solver_name::String=""`: solver used for this problem - `options::Dict{String,Any}=Dict{String,Any}()`: the JAMSD options """ function JAMSDSolver(solver_name::String="", options::Dict{String,Any}=Dict{String,Any}()) JAMSDSolver(solver_name, options, Nullable{Function}()) end getsolvername(s::JAMSDSolver) = basename(s.solver_name) mutable struct JAMSDMathProgModel <: MPB.AbstractMathProgModel options::Dict{String, Any} solver_name::String x_l::Vector{Float64} x_u::Vector{Float64} g_l::Vector{Float64} g_u::Vector{Float64} nvar::Int ncon::Int obj constrs::Vector{Any} lin_constrs::Vector{Dict{Int, Float64}} lin_obj::Dict{Int, Float64} r_codes::Vector{Int} j_counts::Vector{Int} vartypes::Vector{Symbol} varlinearities_con::Vector{Symbol} varlinearities_obj::Vector{Symbol} conlinearities::Vector{Symbol} objlinearity::Symbol v_index_map::Dict{Int, Int} v_index_map_rev::Dict{Int, Int} # was: # c_index_map::Dict{Int, Int} # c_index_map_rev::Dict{Int, Int} nonquad_idx::Dict{Int, Int} quad_idx::Dict{Int, Int} sense::Symbol x_0::Vector{Float64} objval::Float64 solution::Vector{Float64} status::Symbol solve_exitcode::Int solve_result_num::Int solve_result::String model_result_num::Int model_result::String solve_message::String solve_time::Float64 model_type::MODEL_TYPE quad_equs::Vector{Any} # quad_obj::Tuple{Vector{Int}, Vector{Int}, Vector{Float64}} quad_obj::Tuple offset::Int emp::Nullable{Function} d::Nullable{MPB.AbstractNLPEvaluator} jamsd_ctx::Ptr{context} jamsd_ctx_dest::Ptr{context} jamsd_options::Ptr{jamsd_options} gams_dir::String function JAMSDMathProgModel(solver_name::String, options::Dict{String,Any}, model_type::MODEL_TYPE, emp) o = new(options, solver_name, zeros(0), zeros(0), zeros(0), zeros(0), 0, 0, :(0), [], Dict{Int, Float64}[], Dict{Int, Float64}(), Int[], Int[], Symbol[], Symbol[], Symbol[], Symbol[], :Lin, Dict{Int, Int}(), Dict{Int, Int}(), Dict{Int, Int}(), Dict{Int, Int}(), :Min, zeros(0), NaN, zeros(0), :NotSolved, -1, -1, "?", -1, "?", "", NaN, model_type, Vector{Any}(), (), 0, emp, Nullable{MPB.AbstractNLPEvaluator}(), Ptr{context}(C_NULL), Ptr{context}(C_NULL), Ptr{jamsd_options}(C_NULL), "") @compat finalizer(jamsd_cleanup, o) o end end struct JAMSDLinearQuadraticModel <: MPB.AbstractLinearQuadraticModel inner::JAMSDMathProgModel end struct JAMSDNonlinearModel <: MPB.AbstractNonlinearModel inner::JAMSDMathProgModel end struct JAMSDConicModel <: MPB.AbstractNonlinearModel inner::JAMSDMathProgModel end include("jamsd_write.jl") MPB.NonlinearModel(s::JAMSDSolver) = JAMSDNonlinearModel( JAMSDMathProgModel(s.solver_name, s.options, nlp, s.emp) ) MPB.LinearQuadraticModel(s::JAMSDSolver) = JAMSDLinearQuadraticModel( JAMSDMathProgModel(s.solver_name, s.options, qcp, s.emp) ) function MPB.ConicModel(s::JAMSDSolver) error("ConicModel is not yet supported") end function MPB.loadproblem!(outer::JAMSDNonlinearModel, nvar::Integer, ncon::Integer, x_l, x_u, g_l, g_u, sense::Symbol, d::MPB.AbstractNLPEvaluator) m = outer.inner m.nvar, m.ncon = nvar, ncon loadcommon!(m, x_l, x_u, g_l, g_u, sense) m.d = d MPB.initialize(m.d.value, [:ExprGraph]) # Process constraints m.constrs = map(1:m.ncon) do i c = MPB.constr_expr(m.d.value, i) # Remove relations and bounds from constraint expressions if length(c.args) == 3 if VERSION < v"0.5-" expected_head = :comparison expr_index = 1 rel_index = 2 else expected_head = :call expr_index = 2 rel_index = 1 end @assert c.head == expected_head # Single relation constraint: expr rel bound rel = c.args[rel_index] m.r_codes[i] = relation_to_jamsd[rel] if rel == [:<=, :(==)] m.g_u[i] = c.args[3] end if rel in [:>=, :(==)] m.g_l[i] = c.args[3] end c = c.args[expr_index] else # Double relation constraint: bound <= expr <= bound @assert c.head == :comparison m.r_codes[i] = relation_to_jamsd[:multiple] m.g_u[i] = c.args[5] m.g_l[i] = c.args[1] c = c.args[3] end # Convert non-linear expression to non-linear, linear and constant c, constant, m.conlinearities[i] = process_expression!( c, m.lin_constrs[i], m.varlinearities_con) # Update bounds on constraint m.g_l[i] -= constant m.g_u[i] -= constant # Update jacobian counts using the linear constraint variables for j in keys(m.lin_constrs[i]) m.j_counts[j] += 1 end c end # Process objective m.obj = MPB.obj_expr(m.d.value) if length(m.obj.args) < 2 m.obj = 0 else # Convert non-linear expression to non-linear, linear and constant m.obj, constant, m.objlinearity = process_expression!( m.obj, m.lin_obj, m.varlinearities_obj) # Add constant back into non-linear expression if constant != 0 m.obj = add_constant(m.obj, constant) end end m end function MPB.loadproblem!(outer::JAMSDLinearQuadraticModel, A::AbstractMatrix, x_l, x_u, c, g_l, g_u, sense) m = outer.inner m.ncon, m.nvar = size(A) loadcommon!(m, x_l, x_u, g_l, g_u, sense) # Load A into the linear constraints @assert (m.ncon, m.nvar) == size(A) load_A!(m, A) m.constrs = zeros(m.ncon) # Dummy constraint expression trees # Load c for (index, val) in enumerate(c) m.lin_obj[index] = val end # TODO(xhub) see if we can get rid of that m.obj = 0 # Dummy objective expression tree # Process variables bounds for j = 1:m.ncon lower = m.g_l[j] upper = m.g_u[j] if lower == -Inf if upper == Inf error("Neither lower nor upper bound on constraint $j") else # <= m.r_codes[j] = 2 end else if lower == upper # == m.r_codes[j] = 0 elseif upper == Inf # >= m.r_codes[j] = 1 else # lb <= expr <= ub m.r_codes[j] = -1 end end end m end function load_A!(m::JAMSDMathProgModel, A::SparseMatrixCSC{Float64}) for var = 1:A.n, k = A.colptr[var] : (A.colptr[var + 1] - 1) m.lin_constrs[A.rowval[k]][var] = A.nzval[k] m.j_counts[var] += 1 end end function load_A!(m::JAMSDMathProgModel, A::Matrix{Float64}) for con = 1:m.ncon, var = 1:m.nvar val = A[con, var] if val != 0 m.lin_constrs[A.rowval[k]][var] = A.nzval[k] m.j_counts[var] += 1 end end end function loadcommon!(m::JAMSDMathProgModel, x_l, x_u, g_l, g_u, sense) m.x_l, m.x_u = x_l, x_u m.g_l, m.g_u = g_l, g_u setsense!(m, sense) m.lin_constrs = [Dict{Int, Float64}() for _ in 1:m.ncon] m.j_counts = zeros(Int, m.nvar) m.r_codes = Vector{Int}(undef, m.ncon) m.varlinearities_con = fill(:Lin, m.nvar) m.varlinearities_obj = fill(:Lin, m.nvar) m.conlinearities = fill(:Lin, m.ncon) m.objlinearity = :Lin m.vartypes = fill(:Cont, m.nvar) m.x_0 = zeros(m.nvar) end getvartype(m::JAMSDMathProgModel) = copy(m.vartypes) function setvartype!(m::JAMSDMathProgModel, cat::Vector{Symbol}) @assert all(x-> (x in [:Cont,:Bin,:Int,:external]), cat) m.vartypes = copy(cat) end getsense(m::JAMSDMathProgModel) = m.sense function setsense!(m::JAMSDMathProgModel, sense::Symbol) @assert sense == :Min || sense == :Max m.sense = sense end setwarmstart!(m::JAMSDMathProgModel, v::Vector{Float64}) = m.x_0 = v function MPB.addquadconstr!(m::JAMSDLinearQuadraticModel, linearidx, linearval, quadrowidx, quadcolidx, quadval, sense, rhs) # we have to do a little translation of the sense here ... push!(m.inner.quad_equs, tuple(linearidx,linearval,quadrowidx,quadcolidx,quadval, quad_relation_sense[sense], rhs)) end function MPB.setquadobj!(m::JAMSDLinearQuadraticModel, rowidx, colidx, quadval) m.inner.quad_obj = tuple(rowidx, colidx, quadval) end function optimize!(m::JAMSDMathProgModel) m.status = :NotSolved m.solve_exitcode = -1 m.solve_result_num = -1 m.solve_result = "?" m.model_result_num = -1 m.model_result = "?" m.solve_message = "" # There is no non-linear binary type, only non-linear discrete, so make # sure binary vars have bounds in [0, 1] for i in 1:m.nvar if m.vartypes[i] == :Bin if m.x_l[i] < 0 m.x_l[i] = 0 end if m.x_u[i] > 1 m.x_u[i] = 1 end end end m.jamsd_options = jamsd_options_set(m.options) if m.emp.hasvalue return m.emp.value() end make_var_index!(m) make_con_index!(m) # Run solver and save exitcode t = time() m.jamsd_ctx = create_jamsd_ctx(m) jamsd_set_modeltype(m) # Solve via gams for now m.jamsd_ctx_dest, m.gams_dir = jamsd_setup_gams() m.solve_exitcode = jamsd_solve(m.jamsd_ctx, m.jamsd_ctx_dest, m.solver_name) # ccall((:print_model, libjamsd), Cint, (Ptr{context},), m.jamsd_ctx) m.solve_time = time() - t if m.solve_exitcode == 0 report_results(m) else println("JAMSD: solver failed with status $(m.solve_exitcode)") m.status = :Error m.solution = fill(NaN, m.nvar) m.solve_result = "failure" m.solve_result_num = 999 end end function getconstrduals(m::JAMSDMathProgModel) ctx = m.jamsd_ctx x = fill(NaN, numconstr(m)) if has_objective(m) offset = m.offset else offset = m.offset - 1 end for idx in 1:m.ncon eidx = m.nonquad_idx[idx] + offset x[idx] = ctx_getmultiplierval(ctx, eidx) end return x end function getquadconstrduals(quadm::JAMSDLinearQuadraticModel) m = quadm.inner ctx = m.jamsd_ctx x = fill(NaN, numconstr(m)) if has_objective(m) offset = m.offset else offset = m.offset - 1 end for (idx, equ) in enumerate(m.quad_equs) eidx = m.quad_idx[idx] + offset x[idx] = ctx_getmultiplierval(ctx, eidx) end return x end function getreducedcosts(m::JAMSDMathProgModel) ctx = m.jamsd_ctx x = fill(NaN, numvar(m)) for idx in 1:numvar(m) x[idx] = ctx_getvarmult(ctx, idx-1) end return x end MPB.getreducedcosts(nlpm::JAMSDNonlinearModel) = getreducedcosts(nlpm.inner) MPB.getreducedcosts(quadm::JAMSDLinearQuadraticModel) = getreducedcosts(quadm.inner) MPB.getconstrduals(nlpm::JAMSDNonlinearModel) = getconstrduals(nlpm.inner) MPB.getconstrduals(quadm::JAMSDLinearQuadraticModel) = getconstrduals(quadm.inner) function process_expression!(nonlin_expr::Expr, lin_expr::Dict{Int, Float64}, varlinearities::Vector{Symbol}) # Get list of all variables in the expression extract_variables!(lin_expr, nonlin_expr) # Extract linear and constant terms from non-linear expression tree = LinearityExpr(nonlin_expr) tree = pull_up_constants(tree) _, tree, constant = prune_linear_terms!(tree, lin_expr) # Make sure all terms remaining in the tree are .nl-compatible nonlin_expr = convert_formula(tree) # Track which variables appear nonlinearly nonlin_vars = Dict{Int, Float64}() extract_variables!(nonlin_vars, nonlin_expr) for j in keys(nonlin_vars) varlinearities[j] = :Nonlin end # Remove variables at coeff 0 that aren't also in the nonlinear tree for (j, coeff) in lin_expr if coeff == 0 && !(j in keys(nonlin_vars)) delete!(lin_expr, j) end end # Mark constraint as nonlinear if anything is left in the tree linearity = nonlin_expr != 0 ? :Nonlin : :Lin return nonlin_expr, constant, linearity end function process_expression!(nonlin_expr::Real, lin_expr, varlinearities) # Special case where body of constraint is constant # Return empty nonlinear and linear parts, and use the body as the constant 0, nonlin_expr, :Lin end status(m::JAMSDMathProgModel) = m.status getsolution(m::JAMSDMathProgModel) = copy(m.solution) getobjval(m::JAMSDMathProgModel) = m.objval numvar(m::JAMSDMathProgModel) = m.nvar numconstr(m::JAMSDMathProgModel) = m.ncon + length(m.quad_equs) getsolvetime(m::JAMSDMathProgModel) = m.solve_time # Access to solve results get_solve_result(m::JAMSDMathProgModel) = m.solve_result get_solve_result_num(m::JAMSDMathProgModel) = m.solve_result_num get_model_result(m::JAMSDMathProgModel) = m.model_result get_model_result_num(m::JAMSDMathProgModel) = m.model_result_num get_solve_message(m::JAMSDMathProgModel) = m.solve_message get_solve_exitcode(m::JAMSDMathProgModel) = m.solve_exitcode # We need to track linear coeffs of all variables present in the expression tree extract_variables!(lin_constr::Dict{Int, Float64}, c) = c extract_variables!(lin_constr::Dict{Int, Float64}, c::LinearityExpr) = extract_variables!(lin_constr, c.c) function extract_variables!(lin_constr::Dict{Int, Float64}, c::Expr) if c.head == :ref CONFIG[:debug] && println("DEBUG: extract_variables :: variable case: $c") if c.args[1] == :x @assert isa(c.args[2], Int) lin_constr[c.args[2]] = 0 else error("Unrecognized reference expression $c") end else map(arg -> extract_variables!(lin_constr, arg), c.args) end end add_constant(c, constant::Real) = c + constant add_constant(c::Expr, constant::Real) = Expr(:call, :+, c, constant) function make_var_index!(m::JAMSDMathProgModel) m.v_index_map = Dict(zip(1:m.nvar, 0:(m.nvar-1))) m.v_index_map_rev = Dict(zip(0:(m.nvar-1), 1:m.nvar)) end #function make_var_index!(m::JAMSDMathProgModel) # nonlin_cont = Int[] # nonlin_int = Int[] # lin_cont = Int[] # lin_int = Int[] # lin_bin = Int[] # # # TODO(xhub) we do that multiple times in the EMP context # for i in 1:length(m.vartypes) # if m.varlinearities_obj[i] == :Nonlin || # m.varlinearities_con[i] == :Nonlin # if m.vartypes[i] == :Cont || m.vartypes[i] == :external # push!(nonlin_cont, i) # else # push!(nonlin_int, i) # end # else # if m.vartypes[i] == :Cont || m.vartypes[i] == :external # push!(lin_cont, i) # elseif m.vartypes[i] == :Int # push!(lin_int, i) # else # push!(lin_bin, i) # end # end # end # # # Index variables in required order # for var_list in (nonlin_cont, nonlin_int, lin_cont, lin_bin, lin_int) # add_to_index_maps!(m.v_index_map, m.v_index_map_rev, var_list, 0) # end # CONFIG[:debug] && println("DEBUG: $(m.v_index_map)") #end function make_con_index!(m::JAMSDMathProgModel) nonlin_cons = Int[] lin_cons = Int[] for i in 1:m.ncon if m.conlinearities[i] == :Nonlin push!(nonlin_cons, i) else push!(lin_cons, i) end end for con_list in (nonlin_cons, lin_cons) add_to_index_maps!(m.nonquad_idx, con_list, 1) end if length(m.quad_idx) == 0 m.quad_idx = Dict(enumerate(Vector{Int}(Compat.range(1+m.ncon, length=length(m.quad_equs))))) end CONFIG[:debug] && println("DEBUG: make_con_index: nonquad_idx: $(m.nonquad_idx)\n quad_idx: $(m.quad_idx)") end function add_to_index_maps!(forward_map::Dict{Int, Int}, backward_map::Dict{Int, Int}, inds::Array{Int}, offset::Int) for i in inds index = length(forward_map) + offset forward_map[i] = index backward_map[index] = i end end function add_to_index_maps!(forward_map::Dict{Int, Int}, inds::Array{Int}, offset::Int) for i in inds index = length(forward_map) + offset forward_map[i] = index end end function report_results_common(m::JAMSDMathProgModel) x = fill(NaN, m.nvar) m.objval = NaN for index in 0:(m.nvar - 1) i = m.v_index_map_rev[index] x[i] = ctx_getvarval(m.jamsd_ctx, index) end m.solution = x ########################################################################### # Convert solve_result # # GAMS return two information: # - the solve status # - the model status # # - :Optimal # - :Infeasible # - :Unbounded # - :UserLimit (iteration limit or timeout) # - :Error (and maybe others) ########################################################################### tmpCint = Ref{Cint}(0) res = ccall((:ctx_getsolvestat, libjamsd), Cint, (Ptr{context}, Ref{Cint}), m.jamsd_ctx_dest, tmpCint) res != 0 && error("return code $res from JAMSD") m.solve_result_num = tmpCint.x m.solve_result = unsafe_string(ccall((:ctx_getsolvestattxt, libjamsd), Cstring, (Ptr{context}, Cint), m.jamsd_ctx_dest, m.solve_result_num)) res = ccall((:ctx_getmodelstat, libjamsd), Cint, (Ptr{context}, Ref{Cint}), m.jamsd_ctx_dest, tmpCint) res != 0 && error("return code $res from JAMSD") m.model_result_num = tmpCint.x m.model_result = unsafe_string(ccall((:ctx_getmodelstattxt, libjamsd), Cstring, (Ptr{context}, Cint), m.jamsd_ctx_dest, m.model_result_num)) # GAMS already uses an 1-indices solver_code = solver_stat[m.solve_result_num] model_code = model_stat[m.model_result_num] CONFIG[:debug] && println("solver stat $(m.solve_result) ($(m.solve_result_num)); model stat $(m.model_result) ($(m.model_result_num))") if solver_code == :Optimal if model_code == :OptimalGlobal || model_code == :OptimalLocal || model_code == :Integer m.status = :Optimal elseif model_code == :Unbounded || model_code == :UnboundedNoSolution m.status = :Unbounded elseif model_code == :InfeasibleGlobal || model_code == :InfeasibleLocal || model_code == :InfeasibleIntermed || model_code == :InfeasibleNoSolution m.status = :Infeasible elseif model_code == :Feasible # TODO investigate that. Baron is weird m.status = :Optimal elseif model_code == :NoSolutionReturned gams_solver = ctx_get_solvername(m.jamsd_ctx_dest) m.status = :Optimal # if gams_solver == "jams" || gams_solver == "JAMS" # This is fine, we have a kludge in the code # else # println("JAMSD: Solve successed, but no solution was returned by solver $(gams_solver)!") # m.status = :Error # end else println("JAMSD: unhandle case: solver stat $(m.solve_result); model stat $(m.model_result)") m.status = :Error end elseif solver_code == :Iteration || solver_code == :Resource m.status == :UserLimit elseif solver_code == :License || model_code == :LicenseError println("JAMSD: License error. Check that you have a valid license") m.status == :Error elseif solver_code == :Capability if m.solver_name == "" sname = default else sname = m.solver_name end println("JAMSD: solver $(name) cannot solve the specific problem") m.status = :Error else println("JAMSD: solver stat is $(m.solve_result) and model stat is $(m.model_result)") m.status = :Error end CONFIG[:debug] && println("status is $(m.status)") end function report_results(m::JAMSDMathProgModel) # TODO(Xhub) fix this hack res = ccall((:model_eval_eqns, libjamsd), Cint, (Ptr{context}, Ptr{context}), m.jamsd_ctx, m.jamsd_ctx_dest) res != 0 && error("JAMSD: error code $res") # Next, read for the variable values report_results_common(m) # TODO(xhub) this should not be necessary if m.solve_exitcode == 0 if m.objlinearity == :Nonlin # Try to use NLPEvaluator if we can. # Can fail due to unsupported functions so fallback to eval try m.objval = eval_f(m.d.value, m.solution) catch CONFIG[:debug] && println("Error: could not evaluate the objective function") end end # Calculate objective value from nonlinear and linear parts obj_nonlin = eval(substitute_vars!(deepcopy(m.obj), m.solution)) obj_lin = evaluate_linear(m.lin_obj, m.solution) if (length(m.quad_obj) == 3) ridx, cidx, vals = m.quad_obj obj_quad = evaluate_quad(ridx, cidx, vals, m.solution) else obj_quad = 0. end m.objval = obj_nonlin + obj_lin + obj_quad end end substitute_vars!(c, x::Array{Float64}) = c function substitute_vars!(c::Expr, x::Array{Float64}) if c.head == :ref if c.args[1] == :x index = c.args[2] @assert isa(index, Int) c = x[index] else error("Unrecognized reference expression $c") end else if c.head == :call # Convert .nl unary minus (:neg) back to :- if c.args[1] == :neg c.args[1] = :- # Convert .nl :sum back to :+ elseif c.args[1] == :sum c.args[1] = :+ end end map!(arg -> substitute_vars!(arg, x), c.args, c.args) end c end function evaluate_linear(linear_coeffs::Dict{Int, Float64}, x::Array{Float64}) total = 0.0 for (i, coeff) in linear_coeffs total += coeff * x[i] end total end function evaluate_quad(rowidx, colidx, qvals, x::Array{Float64}) n = length(x) mat = sparse(rowidx, colidx, qvals, n, n) # This is soooo ugly --xhub Q = (mat + mat') - Diagonal(diag(mat)) total = .5*x'*Q*x return total end # Wrapper functions for f in [:getvartype,:getsense,:optimize!,:status,:getsolution,:getobjval,:numvar,:numconstr,:getsolvetime] @eval MPB.$f(m::JAMSDNonlinearModel) = $f(m.inner) @eval MPB.$f(m::JAMSDLinearQuadraticModel) = $f(m.inner) end for f in [:get_solve_result,:get_solve_result_num,:get_solve_message,:get_solve_exitcode] @eval $f(m::JAMSDNonlinearModel) = $f(m.inner) @eval $f(m::JAMSDLinearQuadraticModel) = $f(m.inner) end for f in [:setvartype!,:setsense!,:setwarmstart!] @eval MPB.$f(m::JAMSDNonlinearModel, x) = $f(m.inner, x) @eval MPB.$f(m::JAMSDLinearQuadraticModel, x) = $f(m.inner, x) end # Utility method for deleting any leftover debug files function clean_solverdata() for file in readdir(solverdata_dir) ext = splitext(file)[2] (ext == ".nl" || ext == ".sol") && rm(joinpath(solverdata_dir, file)) end end include("jamsd_mathprgm.jl") include("jamsd_ovf.jl") include("jamsd_solve.jl") function jamsd_cleanup(o::JAMSDMathProgModel) ctx_dealloc(o.jamsd_ctx) ctx_dealloc(o.jamsd_ctx_dest) jamsd_options_dealloc(o.jamsd_options) if (!isempty(o.gams_dir)) try rm(o.gams_dir, recursive=true, force=true) catch iswin && run(`cmd /C RMDIR /s /q $(o.gams_dir)`) end end end function jamsd_options_set(opt::Dict{String,Any}) jopt = jamsd_options_alloc() for (k,v) in opt jamsd_option_set(jopt, k, v) end return jopt end function jamsd_setemp!(m::JAMSDSolver, emp) m.emp = emp end end
@testset "compute_absorbing_boundary_condition_matrix" begin mmesh = Mesh(10,10,0.1) bdedge = bcedge(mmesh) N = size(bdedge, 1) ρ = ones(4N) cs = ones(4N) cp = ones(4N) A = compute_absorbing_boundary_condition_matrix(ρ, cs, cp, bdedge, mmesh) @test false end
const centuries = ("I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X", "XI", "XII", "XIII", "XIV", "XV", "XVI","XVII", "XVIII", "XIX", "XX", "XXI") const year_top =2100 const ap = ("AM","PM") unix_time()::Int= rand(0: Dates.datetime2unix(Dates.DateTime(Dates.now()))) iso8601()::DateTime = Dates.DateTime(rand(1970:year_top),rand(01:12),1,rand(01:23),rand(00:59),rand(00:59)) + Dates.Day(rand(01:31)) date_time(pattern::String="Y-m-d HH:MM:SS")::String = Dates.format(iso8601(), pattern) date(pattern::String="Y-m-d")::String = Dates.format(iso8601(), pattern) time(pattern::String="H:M:S")::String = Dates.format(Dates.DateTime(1,1,1,rand(01:23),rand(00:59),rand(00:59)), pattern) function date_time_between(start_date::String="-30y", finish_date::String="now", pattern::String="Y-m-d HH:MM:SS")::String init_date::DateTime = Dates.DateTime(Dates.now()) end_date::DateTime = Dates.DateTime(Dates.now()) if start_date != "now" init_date = init_date + Dates.Year(parse(Int,start_date[1:end-1])) end if finish_date != "now" end_date = end_date + Dates.Year(parse(Int,start_date[1:end-1])) end return Dates.format(rand(collect(init_date:Day(1):end_date)), pattern) end function random_datetime(bf_now::Bool,af_now::Bool,time_start::DateTime,now_time::DateTime,time_finish::DateTime)::DateTime init_date::DateTime = now_time end_date::DateTime = now_time if bf_now init_date = time_start end if af_now end_date = time_finish end return rand(collect(time_start:Day(1):now_time)) end function date_time_this_century(;before_now::Bool=true, after_now::Bool=false, pattern::String="Y-m-d HH:MM:SS")::String r::DateTime = Dates.DateTime(Dates.now()) this_century_start::DateTime = Dates.DateTime((Int(Dates.year(r)) - (Dates.year(r) % 100)), 1, 1) this_century_finish::DateTime = (Dates.DateTime(Dates.year(this_century_start) + 100, 1, 1))-Dates.Day(1) return Dates.format(random_datetime(before_now,after_now,this_century_start,r,this_century_finish), pattern) end function date_time_this_decade(;before_now::Bool=true, after_now::Bool=false, pattern::String="Y-m-d HH:MM:SS")::String r::DateTime = Dates.DateTime(Dates.now()) this_decade_start::DateTime = Dates.DateTime((Int(Dates.year(r)) - (Dates.year(r) % 10)), 1, 1) this_decade_finish::DateTime = (Dates.DateTime(Dates.year(this_decade_start) + 10, 1, 1))-Dates.Day(1) return Dates.format(random_datetime(before_now,after_now,this_decade_start,r,this_decade_finish), pattern) end function date_time_this_year(;before_now::Bool=true, after_now::Bool=false, pattern::String="Y-m-d HH:MM:SS")::String r::DateTime = Dates.DateTime(Dates.now()) this_year_start::DateTime = Dates.DateTime(Dates.year(r), 1, 1) this_year_finish::DateTime = this_year_start+Dates.Year(1)-Dates.Day(1) return Dates.format(random_datetime(before_now,after_now,this_year_start,r,this_year_finish), pattern) end function date_time_this_month(;before_now::Bool=true, after_now::Bool=false, pattern::String="Y-m-d HH:MM:SS")::String r::DateTime = Dates.DateTime(Dates.now()) this_month_start::DateTime = r-Dates.Day(r)+Dates.Day(1) this_month_finish::DateTime = this_month_start+Dates.Month(1)-Dates.Day(1) return Dates.format(random_datetime(before_now,after_now,this_month_start,r,this_month_finish), pattern) end am_pm()::String = rand(ap) day_of_month()::Int = rand(1:31) day_of_week()::String = Dates.dayname(Date(2015,07,rand(12:18))) months()::Int =rand(1:12) month_name()::String = Dates.monthname(Date(2015,rand(1:12),15)) year()::Int =rand(1920:year_top) century()::String = rand(centuries) function timezone()::String executor(data["faker"]["address"]["time_zone"]) end
import Pkg; Pkg.activate(".") Pkg.add("Test") Pkg.add("Zygote") Pkg.add("ForwardDiff") Pkg.add("Flux") Pkg.add("Rotations") # Pkg.add("JLD2") # Pkg.add("Documenter") # Pkg.add("Example") Pkg.add("DSP") # Pkg.add("Makie") Pkg.add("GLMakie") # Pkg.add("CairoMakie") # Pkg.add("FFTW")
using Distances # kernelweights.jl: kernel weights for nonparametrics function kernelweights(x, xeval, bandwidth, prewhiten=true, kernel="gaussian", neighbors=1) if prewhiten v = cov(x) if isposdef(v) P = inv(chol(v)) else P = sqrt(inv(diag(v))) end x *= P xeval *= P end neval = size(xeval,1) n = size(x,1) weights = zeros(n,neval) # Gaussian product kernel, add others if desired if kernel=="gaussian" for i=1:neval z = (x.-xeval[[i],:])/bandwidth weights[:,i] = exp.(-0.5*sum(z.*z,2)) end end if kernel=="knngaussian" distances = pairwise(Euclidean(),x', xeval') # get all distances @inbounds for i = 1:neval di = view(distances,:,[i]) ind = sortperm(di) # indices of k nearest neighbors selected = vec(ind[1:neighbors,:]) z = (x[selected,:].-xeval[[i],:])/bandwidth weights[selected,i] = exp.(-0.5*sum(z.*z,2)) end end weights ./= sum(weights,1) return weights end
using OrderedCollections """ A simple struct to contain a vector of energies, and utilities to print them in a nice format. """ struct Energies{T <: Number} # energies["TermName"] # parametrization on T acts as a nice check that all terms return correct type energies::OrderedDict{String, T} end function Base.show(io::IO, energies::Energies) print(io, "Energies(total = $(energies.total))") end function Base.show(io::IO, ::MIME"text/plain", energies::Energies) println(io, "Energy breakdown (in Ha):") for (name, value) in energies.energies @printf io " %-20s%-10.7f\n" string(name) value end @printf io "\n %-20s%-15.12f" "total" energies.total end Base.getindex(energies::Energies, i) = energies.energies[i] Base.values(energies::Energies) = values(energies.energies) Base.keys(energies::Energies) = keys(energies.energies) Base.pairs(energies::Energies) = pairs(energies.energies) Base.iterate(energies::Energies) = iterate(energies.energies) Base.iterate(energies::Energies, state) = iterate(energies.energies, state) Base.haskey(energies::Energies, key) = haskey(energies.energies, key) function Energies(term_types::Vector, energies::Vector{T}) where {T} # nameof is there to get rid of parametric types Energies{T}(OrderedDict([string(nameof(typeof(term))) => energies[i] for (i, term) in enumerate(term_types)]...)) end function Base.propertynames(energies::Energies, private::Bool=false) ret = keys(energies) append!(ret, "total") private && append!(ret, "energies") end function Base.getproperty(energies::Energies, x::Symbol) x == :total && return sum(values(energies)) x == :energies && return getfield(energies, x) energies.energies[string(x)] end
# This file is a part of Julia. License is MIT: https://julialang.org/license module TestAdjointTranspose using Test, LinearAlgebra, SparseArrays @testset "Adjoint and Transpose inner constructor basics" begin intvec, intmat = [1, 2], [1 2; 3 4] # Adjoint/Transpose eltype must match the type of the Adjoint/Transpose of the input eltype @test_throws ErrorException Adjoint{Float64,Vector{Int}}(intvec) @test_throws ErrorException Adjoint{Float64,Matrix{Int}}(intmat) @test_throws ErrorException Transpose{Float64,Vector{Int}}(intvec) @test_throws ErrorException Transpose{Float64,Matrix{Int}}(intmat) # Adjoint/Transpose wrapped array type must match the input array type @test_throws MethodError Adjoint{Int,Vector{Float64}}(intvec) @test_throws MethodError Adjoint{Int,Matrix{Float64}}(intmat) @test_throws MethodError Transpose{Int,Vector{Float64}}(intvec) @test_throws MethodError Transpose{Int,Matrix{Float64}}(intmat) # Adjoint/Transpose inner constructor basic functionality, concrete scalar eltype @test (Adjoint{Int,Vector{Int}}(intvec)::Adjoint{Int,Vector{Int}}).parent === intvec @test (Adjoint{Int,Matrix{Int}}(intmat)::Adjoint{Int,Matrix{Int}}).parent === intmat @test (Transpose{Int,Vector{Int}}(intvec)::Transpose{Int,Vector{Int}}).parent === intvec @test (Transpose{Int,Matrix{Int}}(intmat)::Transpose{Int,Matrix{Int}}).parent === intmat # Adjoint/Transpose inner constructor basic functionality, abstract scalar eltype anyvec, anymat = Any[1, 2], Any[1 2; 3 4] @test (Adjoint{Any,Vector{Any}}(anyvec)::Adjoint{Any,Vector{Any}}).parent === anyvec @test (Adjoint{Any,Matrix{Any}}(anymat)::Adjoint{Any,Matrix{Any}}).parent === anymat @test (Transpose{Any,Vector{Any}}(anyvec)::Transpose{Any,Vector{Any}}).parent === anyvec @test (Transpose{Any,Matrix{Any}}(anymat)::Transpose{Any,Matrix{Any}}).parent === anymat # Adjoint/Transpose inner constructor basic functionality, concrete array eltype intvecvec = [[1, 2], [3, 4]] intmatmat = [[[1 2]] [[3 4]] [[5 6]]; [[7 8]] [[9 10]] [[11 12]]] @test (X = Adjoint{Adjoint{Int,Vector{Int}},Vector{Vector{Int}}}(intvecvec); isa(X, Adjoint{Adjoint{Int,Vector{Int}},Vector{Vector{Int}}}) && X.parent === intvecvec) @test (X = Adjoint{Adjoint{Int,Matrix{Int}},Matrix{Matrix{Int}}}(intmatmat); isa(X, Adjoint{Adjoint{Int,Matrix{Int}},Matrix{Matrix{Int}}}) && X.parent === intmatmat) @test (X = Transpose{Transpose{Int,Vector{Int}},Vector{Vector{Int}}}(intvecvec); isa(X, Transpose{Transpose{Int,Vector{Int}},Vector{Vector{Int}}}) && X.parent === intvecvec) @test (X = Transpose{Transpose{Int,Matrix{Int}},Matrix{Matrix{Int}}}(intmatmat); isa(X, Transpose{Transpose{Int,Matrix{Int}},Matrix{Matrix{Int}}}) && X.parent === intmatmat) end @testset "Adjoint and Transpose outer constructor basics" begin intvec, intmat = [1, 2], [1 2; 3 4] # the wrapped array's eltype strictly determines the Adjoint/Transpose eltype # so Adjoint{T}/Transpose{T} constructors are somewhat unnecessary and error-prone # so ascertain that such calls throw whether or not T and the input eltype are compatible @test_throws MethodError Adjoint{Int}(intvec) @test_throws MethodError Adjoint{Int}(intmat) @test_throws MethodError Adjoint{Float64}(intvec) @test_throws MethodError Adjoint{Float64}(intmat) @test_throws MethodError Transpose{Int}(intvec) @test_throws MethodError Transpose{Int}(intmat) @test_throws MethodError Transpose{Float64}(intvec) @test_throws MethodError Transpose{Float64}(intmat) # Adjoint/Transpose outer constructor basic functionality, concrete scalar eltype @test (Adjoint(intvec)::Adjoint{Int,Vector{Int}}).parent === intvec @test (Adjoint(intmat)::Adjoint{Int,Matrix{Int}}).parent === intmat @test (Transpose(intvec)::Transpose{Int,Vector{Int}}).parent === intvec @test (Transpose(intmat)::Transpose{Int,Matrix{Int}}).parent === intmat # the tests for the inner constructors exercise abstract scalar and concrete array eltype, forgoing here end @testset "Adjoint and Transpose add additional layers to already-wrapped objects" begin intvec, intmat = [1, 2], [1 2; 3 4] @test (A = Adjoint(Adjoint(intvec))::Adjoint{Int,Adjoint{Int,Vector{Int}}}; A.parent.parent === intvec) @test (A = Adjoint(Adjoint(intmat))::Adjoint{Int,Adjoint{Int,Matrix{Int}}}; A.parent.parent === intmat) @test (A = Transpose(Transpose(intvec))::Transpose{Int,Transpose{Int,Vector{Int}}}; A.parent.parent === intvec) @test (A = Transpose(Transpose(intmat))::Transpose{Int,Transpose{Int,Matrix{Int}}}; A.parent.parent === intmat) end @testset "Adjoint and Transpose basic AbstractArray functionality" begin # vectors and matrices with real scalar eltype, and their adjoints/transposes intvec, intmat = [1, 2], [1 2 3; 4 5 6] tintvec, tintmat = [1 2], [1 4; 2 5; 3 6] @testset "length methods" begin @test length(Adjoint(intvec)) == length(intvec) @test length(Adjoint(intmat)) == length(intmat) @test length(Transpose(intvec)) == length(intvec) @test length(Transpose(intmat)) == length(intmat) end @testset "size methods" begin @test size(Adjoint(intvec)) == (1, length(intvec)) @test size(Adjoint(intmat)) == reverse(size(intmat)) @test size(Transpose(intvec)) == (1, length(intvec)) @test size(Transpose(intmat)) == reverse(size(intmat)) end @testset "indices methods" begin @test axes(Adjoint(intvec)) == (Base.OneTo(1), Base.OneTo(length(intvec))) @test axes(Adjoint(intmat)) == reverse(axes(intmat)) @test axes(Transpose(intvec)) == (Base.OneTo(1), Base.OneTo(length(intvec))) @test axes(Transpose(intmat)) == reverse(axes(intmat)) end @testset "IndexStyle methods" begin @test IndexStyle(Adjoint(intvec)) == IndexLinear() @test IndexStyle(Adjoint(intmat)) == IndexCartesian() @test IndexStyle(Transpose(intvec)) == IndexLinear() @test IndexStyle(Transpose(intmat)) == IndexCartesian() end # vectors and matrices with complex scalar eltype, and their adjoints/transposes complexintvec, complexintmat = [1im, 2im], [1im 2im 3im; 4im 5im 6im] tcomplexintvec, tcomplexintmat = [1im 2im], [1im 4im; 2im 5im; 3im 6im] acomplexintvec, acomplexintmat = conj.(tcomplexintvec), conj.(tcomplexintmat) # vectors and matrices with real-vector and real-matrix eltype, and their adjoints/transposes intvecvec = [[1, 2], [3, 4]] tintvecvec = [[[1 2]] [[3 4]]] intmatmat = [[[1 2]] [[3 4]] [[ 5 6]]; [[7 8]] [[9 10]] [[11 12]]] tintmatmat = [[hcat([1, 2])] [hcat([7, 8])]; [hcat([3, 4])] [hcat([9, 10])]; [hcat([5, 6])] [hcat([11, 12])]] # vectors and matrices with complex-vector and complex-matrix eltype, and their adjoints/transposes complexintvecvec, complexintmatmat = im .* (intvecvec, intmatmat) tcomplexintvecvec, tcomplexintmatmat = im .* (tintvecvec, tintmatmat) acomplexintvecvec, acomplexintmatmat = conj.(tcomplexintvecvec), conj.(tcomplexintmatmat) @testset "getindex methods, elementary" begin # implicitly test elementary definitions, for arrays with concrete real scalar eltype @test Adjoint(intvec) == tintvec @test Adjoint(intmat) == tintmat @test Transpose(intvec) == tintvec @test Transpose(intmat) == tintmat # implicitly test elementary definitions, for arrays with concrete complex scalar eltype @test Adjoint(complexintvec) == acomplexintvec @test Adjoint(complexintmat) == acomplexintmat @test Transpose(complexintvec) == tcomplexintvec @test Transpose(complexintmat) == tcomplexintmat # implicitly test elementary definitions, for arrays with concrete real-array eltype @test Adjoint(intvecvec) == tintvecvec @test Adjoint(intmatmat) == tintmatmat @test Transpose(intvecvec) == tintvecvec @test Transpose(intmatmat) == tintmatmat # implicitly test elementary definitions, for arrays with concrete complex-array type @test Adjoint(complexintvecvec) == acomplexintvecvec @test Adjoint(complexintmatmat) == acomplexintmatmat @test Transpose(complexintvecvec) == tcomplexintvecvec @test Transpose(complexintmatmat) == tcomplexintmatmat end @testset "getindex(::AdjOrTransVec, ::Colon, ::AbstractArray{Int}) methods that preserve wrapper type" begin # for arrays with concrete scalar eltype @test Adjoint(intvec)[:, [1, 2]] == Adjoint(intvec) @test Transpose(intvec)[:, [1, 2]] == Transpose(intvec) @test Adjoint(complexintvec)[:, [1, 2]] == Adjoint(complexintvec) @test Transpose(complexintvec)[:, [1, 2]] == Transpose(complexintvec) # for arrays with concrete array eltype @test Adjoint(intvecvec)[:, [1, 2]] == Adjoint(intvecvec) @test Transpose(intvecvec)[:, [1, 2]] == Transpose(intvecvec) @test Adjoint(complexintvecvec)[:, [1, 2]] == Adjoint(complexintvecvec) @test Transpose(complexintvecvec)[:, [1, 2]] == Transpose(complexintvecvec) end @testset "getindex(::AdjOrTransVec, ::Colon, ::Colon) methods that preserve wrapper type" begin # for arrays with concrete scalar eltype @test Adjoint(intvec)[:, :] == Adjoint(intvec) @test Transpose(intvec)[:, :] == Transpose(intvec) @test Adjoint(complexintvec)[:, :] == Adjoint(complexintvec) @test Transpose(complexintvec)[:, :] == Transpose(complexintvec) # for arrays with concrete array elype @test Adjoint(intvecvec)[:, :] == Adjoint(intvecvec) @test Transpose(intvecvec)[:, :] == Transpose(intvecvec) @test Adjoint(complexintvecvec)[:, :] == Adjoint(complexintvecvec) @test Transpose(complexintvecvec)[:, :] == Transpose(complexintvecvec) end @testset "getindex(::AdjOrTransVec, ::Colon, ::Int) should preserve wrapper type on result entries" begin # for arrays with concrete scalar eltype @test Adjoint(intvec)[:, 2] == intvec[2:2] @test Transpose(intvec)[:, 2] == intvec[2:2] @test Adjoint(complexintvec)[:, 2] == conj.(complexintvec[2:2]) @test Transpose(complexintvec)[:, 2] == complexintvec[2:2] # for arrays with concrete array eltype @test Adjoint(intvecvec)[:, 2] == Adjoint.(intvecvec[2:2]) @test Transpose(intvecvec)[:, 2] == Transpose.(intvecvec[2:2]) @test Adjoint(complexintvecvec)[:, 2] == Adjoint.(complexintvecvec[2:2]) @test Transpose(complexintvecvec)[:, 2] == Transpose.(complexintvecvec[2:2]) end @testset "setindex! methods" begin # for vectors with real scalar eltype @test (wv = Adjoint(copy(intvec)); wv === setindex!(wv, 3, 2) && wv == setindex!(copy(tintvec), 3, 1, 2) ) @test (wv = Transpose(copy(intvec)); wv === setindex!(wv, 4, 2) && wv == setindex!(copy(tintvec), 4, 1, 2) ) # for matrices with real scalar eltype @test (wA = Adjoint(copy(intmat)); wA === setindex!(wA, 7, 3, 1) && wA == setindex!(copy(tintmat), 7, 3, 1) ) @test (wA = Transpose(copy(intmat)); wA === setindex!(wA, 7, 3, 1) && wA == setindex!(copy(tintmat), 7, 3, 1) ) # for vectors with complex scalar eltype @test (wz = Adjoint(copy(complexintvec)); wz === setindex!(wz, 3im, 2) && wz == setindex!(copy(acomplexintvec), 3im, 1, 2) ) @test (wz = Transpose(copy(complexintvec)); wz === setindex!(wz, 4im, 2) && wz == setindex!(copy(tcomplexintvec), 4im, 1, 2) ) # for matrices with complex scalar eltype @test (wZ = Adjoint(copy(complexintmat)); wZ === setindex!(wZ, 7im, 3, 1) && wZ == setindex!(copy(acomplexintmat), 7im, 3, 1) ) @test (wZ = Transpose(copy(complexintmat)); wZ === setindex!(wZ, 7im, 3, 1) && wZ == setindex!(copy(tcomplexintmat), 7im, 3, 1) ) # for vectors with concrete real-vector eltype @test (wv = Adjoint(copy(intvecvec)); wv === setindex!(wv, Adjoint([5, 6]), 2) && wv == setindex!(copy(tintvecvec), [5 6], 2)) @test (wv = Transpose(copy(intvecvec)); wv === setindex!(wv, Transpose([5, 6]), 2) && wv == setindex!(copy(tintvecvec), [5 6], 2)) # for matrices with concrete real-matrix eltype @test (wA = Adjoint(copy(intmatmat)); wA === setindex!(wA, Adjoint([13 14]), 3, 1) && wA == setindex!(copy(tintmatmat), hcat([13, 14]), 3, 1)) @test (wA = Transpose(copy(intmatmat)); wA === setindex!(wA, Transpose([13 14]), 3, 1) && wA == setindex!(copy(tintmatmat), hcat([13, 14]), 3, 1)) # for vectors with concrete complex-vector eltype @test (wz = Adjoint(copy(complexintvecvec)); wz === setindex!(wz, Adjoint([5im, 6im]), 2) && wz == setindex!(copy(acomplexintvecvec), [-5im -6im], 2)) @test (wz = Transpose(copy(complexintvecvec)); wz === setindex!(wz, Transpose([5im, 6im]), 2) && wz == setindex!(copy(tcomplexintvecvec), [5im 6im], 2)) # for matrices with concrete complex-matrix eltype @test (wZ = Adjoint(copy(complexintmatmat)); wZ === setindex!(wZ, Adjoint([13im 14im]), 3, 1) && wZ == setindex!(copy(acomplexintmatmat), hcat([-13im, -14im]), 3, 1)) @test (wZ = Transpose(copy(complexintmatmat)); wZ === setindex!(wZ, Transpose([13im 14im]), 3, 1) && wZ == setindex!(copy(tcomplexintmatmat), hcat([13im, 14im]), 3, 1)) end end @testset "Adjoint and Transpose convert methods that convert underlying storage" begin intvec, intmat = [1, 2], [1 2 3; 4 5 6] @test convert(Adjoint{Float64,Vector{Float64}}, Adjoint(intvec))::Adjoint{Float64,Vector{Float64}} == Adjoint(intvec) @test convert(Adjoint{Float64,Matrix{Float64}}, Adjoint(intmat))::Adjoint{Float64,Matrix{Float64}} == Adjoint(intmat) @test convert(Transpose{Float64,Vector{Float64}}, Transpose(intvec))::Transpose{Float64,Vector{Float64}} == Transpose(intvec) @test convert(Transpose{Float64,Matrix{Float64}}, Transpose(intmat))::Transpose{Float64,Matrix{Float64}} == Transpose(intmat) end @testset "Adjoint and Transpose similar methods" begin intvec, intmat = [1, 2], [1 2 3; 4 5 6] # similar with no additional specifications, vector (rewrapping) semantics @test size(similar(Adjoint(intvec))::Adjoint{Int,Vector{Int}}) == size(Adjoint(intvec)) @test size(similar(Transpose(intvec))::Transpose{Int,Vector{Int}}) == size(Transpose(intvec)) # similar with no additional specifications, matrix (no-rewrapping) semantics @test size(similar(Adjoint(intmat))::Matrix{Int}) == size(Adjoint(intmat)) @test size(similar(Transpose(intmat))::Matrix{Int}) == size(Transpose(intmat)) # similar with element type specification, vector (rewrapping) semantics @test size(similar(Adjoint(intvec), Float64)::Adjoint{Float64,Vector{Float64}}) == size(Adjoint(intvec)) @test size(similar(Transpose(intvec), Float64)::Transpose{Float64,Vector{Float64}}) == size(Transpose(intvec)) # similar with element type specification, matrix (no-rewrapping) semantics @test size(similar(Adjoint(intmat), Float64)::Matrix{Float64}) == size(Adjoint(intmat)) @test size(similar(Transpose(intmat), Float64)::Matrix{Float64}) == size(Transpose(intmat)) # similar with element type and arbitrary dims specifications shape = (2, 2, 2) @test size(similar(Adjoint(intvec), Float64, shape)::Array{Float64,3}) == shape @test size(similar(Adjoint(intmat), Float64, shape)::Array{Float64,3}) == shape @test size(similar(Transpose(intvec), Float64, shape)::Array{Float64,3}) == shape @test size(similar(Transpose(intmat), Float64, shape)::Array{Float64,3}) == shape end @testset "Adjoint and Transpose parent methods" begin intvec, intmat = [1, 2], [1 2 3; 4 5 6] @test parent(Adjoint(intvec)) === intvec @test parent(Adjoint(intmat)) === intmat @test parent(Transpose(intvec)) === intvec @test parent(Transpose(intmat)) === intmat end @testset "Adjoint and Transpose vector vec methods" begin intvec = [1, 2] @test vec(Adjoint(intvec)) == intvec @test vec(Transpose(intvec)) === intvec cvec = [1 + 1im] @test vec(cvec')[1] == cvec[1]' end @testset "horizontal concatenation of Adjoint/Transpose-wrapped vectors and Numbers" begin # horizontal concatenation of Adjoint/Transpose-wrapped vectors and Numbers # should preserve the Adjoint/Transpose-wrapper to preserve semantics downstream vec, tvec, avec = [1im, 2im], [1im 2im], [-1im -2im] vecvec = [[1im, 2im], [3im, 4im]] tvecvec = [[[1im 2im]] [[3im 4im]]] avecvec = [[[-1im -2im]] [[-3im -4im]]] # for arrays with concrete scalar eltype @test hcat(Adjoint(vec), Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == hcat(avec, avec) @test hcat(Adjoint(vec), 1, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == hcat(avec, 1, avec) @test hcat(Transpose(vec), Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == hcat(tvec, tvec) @test hcat(Transpose(vec), 1, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == hcat(tvec, 1, tvec) # for arrays with concrete array eltype @test hcat(Adjoint(vecvec), Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == hcat(avecvec, avecvec) @test hcat(Transpose(vecvec), Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == hcat(tvecvec, tvecvec) end @testset "map/broadcast over Adjoint/Transpose-wrapped vectors and Numbers" begin # map and broadcast over Adjoint/Transpose-wrapped vectors and Numbers # should preserve the Adjoint/Transpose-wrapper to preserve semantics downstream vec, tvec, avec = [1im, 2im], [1im 2im], [-1im -2im] vecvec = [[1im, 2im], [3im, 4im]] tvecvec = [[[1im 2im]] [[3im 4im]]] avecvec = [[[-1im -2im]] [[-3im -4im]]] # unary map over wrapped vectors with concrete scalar eltype @test map(-, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == -avec @test map(-, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == -tvec # unary map over wrapped vectors with concrete array eltype @test map(-, Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == -avecvec @test map(-, Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == -tvecvec # binary map over wrapped vectors with concrete scalar eltype @test map(+, Adjoint(vec), Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == avec + avec @test map(+, Transpose(vec), Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == tvec + tvec # binary map over wrapped vectors with concrete array eltype @test map(+, Adjoint(vecvec), Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == avecvec + avecvec @test map(+, Transpose(vecvec), Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == tvecvec + tvecvec # unary broadcast over wrapped vectors with concrete scalar eltype @test broadcast(-, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == -avec @test broadcast(-, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == -tvec # unary broadcast over wrapped vectors with concrete array eltype @test broadcast(-, Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == -avecvec @test broadcast(-, Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == -tvecvec # binary broadcast over wrapped vectors with concrete scalar eltype @test broadcast(+, Adjoint(vec), Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == avec + avec @test broadcast(+, Transpose(vec), Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == tvec + tvec # binary broadcast over wrapped vectors with concrete array eltype @test broadcast(+, Adjoint(vecvec), Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == avecvec + avecvec @test broadcast(+, Transpose(vecvec), Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == tvecvec + tvecvec # trinary broadcast over wrapped vectors with concrete scalar eltype and numbers @test broadcast(+, Adjoint(vec), 1, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == avec + avec .+ 1 @test broadcast(+, Transpose(vec), 1, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == tvec + tvec .+ 1 @test broadcast(+, Adjoint(vec), 1im, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == avec + avec .+ 1im @test broadcast(+, Transpose(vec), 1im, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == tvec + tvec .+ 1im # ascertain inference friendliness, ref. https://github.com/JuliaLang/julia/pull/25083#issuecomment-353031641 sparsevec = SparseVector([1.0, 2.0, 3.0]) @test map(-, Adjoint(sparsevec), Adjoint(sparsevec)) isa Adjoint{Float64,SparseVector{Float64,Int}} @test map(-, Transpose(sparsevec), Transpose(sparsevec)) isa Transpose{Float64,SparseVector{Float64,Int}} @test broadcast(-, Adjoint(sparsevec), Adjoint(sparsevec)) isa Adjoint{Float64,SparseVector{Float64,Int}} @test broadcast(-, Transpose(sparsevec), Transpose(sparsevec)) isa Transpose{Float64,SparseVector{Float64,Int}} @test broadcast(+, Adjoint(sparsevec), 1.0, Adjoint(sparsevec)) isa Adjoint{Float64,SparseVector{Float64,Int}} @test broadcast(+, Transpose(sparsevec), 1.0, Transpose(sparsevec)) isa Transpose{Float64,SparseVector{Float64,Int}} end @testset "Adjoint/Transpose-wrapped vector multiplication" begin realvec, realmat = [1, 2, 3], [1 2 3; 4 5 6; 7 8 9] complexvec, complexmat = [1im, 2, -3im], [1im 2 3; 4 5 -6im; 7im 8 9] # Adjoint/Transpose-vector * vector @test Adjoint(realvec) * realvec == dot(realvec, realvec) @test Transpose(realvec) * realvec == dot(realvec, realvec) @test Adjoint(complexvec) * complexvec == dot(complexvec, complexvec) @test Transpose(complexvec) * complexvec == dot(conj(complexvec), complexvec) # vector * Adjoint/Transpose-vector @test realvec * Adjoint(realvec) == broadcast(*, realvec, reshape(realvec, (1, 3))) @test realvec * Transpose(realvec) == broadcast(*, realvec, reshape(realvec, (1, 3))) @test complexvec * Adjoint(complexvec) == broadcast(*, complexvec, reshape(conj(complexvec), (1, 3))) @test complexvec * Transpose(complexvec) == broadcast(*, complexvec, reshape(complexvec, (1, 3))) # Adjoint/Transpose-vector * matrix @test (Adjoint(realvec) * realmat)::Adjoint{Int,Vector{Int}} == reshape(copy(Adjoint(realmat)) * realvec, (1, 3)) @test (Transpose(realvec) * realmat)::Transpose{Int,Vector{Int}} == reshape(copy(Transpose(realmat)) * realvec, (1, 3)) @test (Adjoint(complexvec) * complexmat)::Adjoint{Complex{Int},Vector{Complex{Int}}} == reshape(conj(copy(Adjoint(complexmat)) * complexvec), (1, 3)) @test (Transpose(complexvec) * complexmat)::Transpose{Complex{Int},Vector{Complex{Int}}} == reshape(copy(Transpose(complexmat)) * complexvec, (1, 3)) # Adjoint/Transpose-vector * Adjoint/Transpose-matrix @test (Adjoint(realvec) * Adjoint(realmat))::Adjoint{Int,Vector{Int}} == reshape(realmat * realvec, (1, 3)) @test (Transpose(realvec) * Transpose(realmat))::Transpose{Int,Vector{Int}} == reshape(realmat * realvec, (1, 3)) @test (Adjoint(complexvec) * Adjoint(complexmat))::Adjoint{Complex{Int},Vector{Complex{Int}}} == reshape(conj(complexmat * complexvec), (1, 3)) @test (Transpose(complexvec) * Transpose(complexmat))::Transpose{Complex{Int},Vector{Complex{Int}}} == reshape(complexmat * complexvec, (1, 3)) end @testset "Adjoint/Transpose-wrapped vector pseudoinversion" begin realvec, complexvec = [1, 2, 3, 4], [1im, 2, 3im, 4] rowrealvec, rowcomplexvec = reshape(realvec, (1, 4)), reshape(complexvec, (1, 4)) # pinv(Adjoint/Transpose-vector) should match matrix equivalents # TODO tighten type asserts once pinv yields Transpose/Adjoint @test pinv(Adjoint(realvec))::Vector{Float64} ≈ pinv(rowrealvec) @test pinv(Transpose(realvec))::Vector{Float64} ≈ pinv(rowrealvec) @test pinv(Adjoint(complexvec))::Vector{Complex{Float64}} ≈ pinv(conj(rowcomplexvec)) @test pinv(Transpose(complexvec))::Vector{Complex{Float64}} ≈ pinv(rowcomplexvec) end @testset "Adjoint/Transpose-wrapped vector left-division" begin realvec, complexvec = [1., 2., 3., 4.,], [1.0im, 2., 3.0im, 4.] rowrealvec, rowcomplexvec = reshape(realvec, (1, 4)), reshape(complexvec, (1, 4)) # \(Adjoint/Transpose-vector, Adjoint/Transpose-vector) should mat matrix equivalents @test Adjoint(realvec)\Adjoint(realvec) ≈ rowrealvec\rowrealvec @test Transpose(realvec)\Transpose(realvec) ≈ rowrealvec\rowrealvec @test Adjoint(complexvec)\Adjoint(complexvec) ≈ conj(rowcomplexvec)\conj(rowcomplexvec) @test Transpose(complexvec)\Transpose(complexvec) ≈ rowcomplexvec\rowcomplexvec end @testset "Adjoint/Transpose-wrapped vector right-division" begin realvec, realmat = [1, 2, 3], [1 0 0; 0 2 0; 0 0 3] complexvec, complexmat = [1im, 2, -3im], [2im 0 0; 0 3 0; 0 0 -5im] rowrealvec, rowcomplexvec = reshape(realvec, (1, 3)), reshape(complexvec, (1, 3)) # /(Adjoint/Transpose-vector, matrix) @test (Adjoint(realvec) / realmat)::Adjoint ≈ rowrealvec / realmat @test (Adjoint(complexvec) / complexmat)::Adjoint ≈ conj(rowcomplexvec) / complexmat @test (Transpose(realvec) / realmat)::Transpose ≈ rowrealvec / realmat @test (Transpose(complexvec) / complexmat)::Transpose ≈ rowcomplexvec / complexmat # /(Adjoint/Transpose-vector, Adjoint matrix) @test (Adjoint(realvec) / Adjoint(realmat))::Adjoint ≈ rowrealvec / copy(Adjoint(realmat)) @test (Adjoint(complexvec) / Adjoint(complexmat))::Adjoint ≈ conj(rowcomplexvec) / copy(Adjoint(complexmat)) @test (Transpose(realvec) / Adjoint(realmat))::Transpose ≈ rowrealvec / copy(Adjoint(realmat)) @test (Transpose(complexvec) / Adjoint(complexmat))::Transpose ≈ rowcomplexvec / copy(Adjoint(complexmat)) # /(Adjoint/Transpose-vector, Transpose matrix) @test (Adjoint(realvec) / Transpose(realmat))::Adjoint ≈ rowrealvec / copy(Transpose(realmat)) @test (Adjoint(complexvec) / Transpose(complexmat))::Adjoint ≈ conj(rowcomplexvec) / copy(Transpose(complexmat)) @test (Transpose(realvec) / Transpose(realmat))::Transpose ≈ rowrealvec / copy(Transpose(realmat)) @test (Transpose(complexvec) / Transpose(complexmat))::Transpose ≈ rowcomplexvec / copy(Transpose(complexmat)) end @testset "norm and opnorm of Adjoint/Transpose-wrapped vectors" begin # definitions are in base/linalg/generic.jl realvec, complexvec = [3, -4], [3im, -4im] # one norm result should be sum(abs.(realvec)) == 7 # two norm result should be sqrt(sum(abs.(realvec))) == 5 # inf norm result should be maximum(abs.(realvec)) == 4 for v in (realvec, complexvec) @test norm(Adjoint(v)) ≈ 5 @test norm(Adjoint(v), 1) ≈ 7 @test norm(Adjoint(v), Inf) ≈ 4 @test norm(Transpose(v)) ≈ 5 @test norm(Transpose(v), 1) ≈ 7 @test norm(Transpose(v), Inf) ≈ 4 end # one opnorm result should be maximum(abs.(realvec)) == 4 # two opnorm result should be sqrt(sum(abs.(realvec))) == 5 # inf opnorm result should be sum(abs.(realvec)) == 7 for v in (realvec, complexvec) @test opnorm(Adjoint(v)) ≈ 5 @test opnorm(Adjoint(v), 1) ≈ 4 @test opnorm(Adjoint(v), Inf) ≈ 7 @test opnorm(Transpose(v)) ≈ 5 @test opnorm(Transpose(v), 1) ≈ 4 @test opnorm(Transpose(v), Inf) ≈ 7 end end @testset "adjoint and transpose of Numbers" begin @test adjoint(1) == 1 @test adjoint(1.0) == 1.0 @test adjoint(1im) == -1im @test adjoint(1.0im) == -1.0im @test transpose(1) == 1 @test transpose(1.0) == 1.0 @test transpose(1im) == 1im @test transpose(1.0im) == 1.0im end @testset "adjoint!(a, b) return a" begin a = fill(1.0+im, 5) b = fill(1.0+im, 1, 5) @test adjoint!(a, b) === a @test adjoint!(b, a) === b end @testset "aliasing with adjoint and transpose" begin A = collect(reshape(1:25, 5, 5)) .+ rand.().*im B = copy(A) B .= B' @test B == A' B = copy(A) B .= transpose(B) @test B == transpose(A) B = copy(A) B .= B .* B' @test B == A .* A' end @testset "test show methods for $t of Factorizations" for t in (Adjoint, Transpose) A = randn(4, 4) F = lu(A) Fop = t(F) @test "LinearAlgebra."*sprint(show, Fop) == "$t of "*sprint(show, parent(Fop)) @test "LinearAlgebra."*sprint((io, t) -> show(io, MIME"text/plain"(), t), Fop) == "$t of "*sprint((io, t) -> show(io, MIME"text/plain"(), t), parent(Fop)) end const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) using .Main.OffsetArrays @testset "offset axes" begin s = Base.Slice(-3:3)' @test axes(s) === (Base.OneTo(1), Base.IdentityUnitRange(-3:3)) @test collect(LinearIndices(s)) == reshape(1:7, 1, 7) @test collect(CartesianIndices(s)) == reshape([CartesianIndex(1,i) for i = -3:3], 1, 7) @test s[1] == -3 @test s[7] == 3 @test s[4] == 0 @test_throws BoundsError s[0] @test_throws BoundsError s[8] @test s[1,-3] == -3 @test s[1, 3] == 3 @test s[1, 0] == 0 @test_throws BoundsError s[1,-4] @test_throws BoundsError s[1, 4] end end # module TestAdjointTranspose
function nsieve(limit::Int) primes = trues(limit) primes[1] = false p = 2 while !isnothing(p) && p <= isqrt(limit) primes[p*p:p:limit] .= false p = findnext(primes, p + 1) end println("Primes up to ", lpad(limit, 8), lpad(count(primes), 9)) end function main() n = isempty(ARGS) ? 4 : parse(Int, ARGS[1]) for i in n:-1:n-2 nsieve(10000 << i) end end function real_main() isinteractive() || main() end if abspath(PROGRAM_FILE) == @__FILE__ real_main() end
export zoo_jlc, zoo_jlc_file, JlcFormat, JlcVtreeFormat using DirectedAcyclicGraphs: label_nodes struct JlcFormat <: FileFormat end const JlcVtreeFormat = Tuple{JlcFormat,VtreeFormat} Tuple{JlcFormat,VtreeFormat}() = (JlcFormat(),VtreeFormat()) ############################################## # Read JLC (Juice Logic Circuit) ############################################## zoo_jlc_file(name) = artifact"circuit_model_zoo" * zoo_version * "/jlcs/$name" """ zoo_jlc(name) Loads JLC file with given name from model zoo. See https://github.com/UCLA-StarAI/Circuit-Model-Zoo. """ zoo_jlc(name) = read(zoo_jlc_file(name), LogicCircuit, JlcFormat()) const jlc_grammar = raw""" start: header (_NL node)+ _NL? header : "jlc" _WS INT node : "F" _WS INT -> false_node | "T" _WS INT -> true_node | "L" _WS INT _WS INT _WS SIGNED_INT -> literal_node | "O" _WS INT _WS INT _WS INT _WS child_nodes -> or_node | "A" _WS INT _WS INT _WS INT _WS child_nodes -> and_node child_nodes : INT (_WS INT)* %import common.INT %import common.SIGNED_INT %import common.WS_INLINE -> _WS %import common.NEWLINE -> _NL """ * dimacs_comments const jlc_parser = Lark(jlc_grammar) abstract type JlcParse <: JuiceTransformer end @inline_rule header(t::JlcParse, x) = Base.parse(Int,x) @rule start(t::JlcParse, x) = begin @assert num_nodes(x[end]) == x[1] x[end] end @rule child_nodes(t::JlcParse, x) = map(id -> t.nodes[id], x) # parse unstructured struct PlainJlcParse <: JlcParse nodes::Dict{String,PlainLogicCircuit} PlainJlcParse() = new(Dict{String,PlainLogicCircuit}()) end @rule literal_node(t::PlainJlcParse, x) = t.nodes[x[1]] = PlainLiteralNode(Base.parse(Lit,x[3])) @rule false_node(t::PlainJlcParse, x) = t.nodes[x[1]] = PlainConstantNode(false) @rule true_node(t::PlainJlcParse, x) = t.nodes[x[1]] = PlainConstantNode(true) @rule or_node(t::PlainJlcParse,x) = begin @assert length(x[4]) == Base.parse(Int,x[3]) t.nodes[x[1]] = Plain⋁Node(x[4]) end @rule and_node(t::PlainJlcParse,x) = begin @assert length(x[4]) == Base.parse(Int,x[3]) t.nodes[x[1]] = Plain⋀Node(x[4]) end function Base.parse(::Type{PlainLogicCircuit}, str, ::JlcFormat) ast = Lerche.parse(jlc_parser, str) Lerche.transform(PlainJlcParse(), ast) end Base.read(io::IO, ::Type{PlainLogicCircuit}, ::JlcFormat) = parse(PlainLogicCircuit, read(io, String), JlcFormat()) # parse structured struct StructJlcParse <: JlcParse id2vtree::Dict{String,<:Vtree} nodes::Dict{String,StructLogicCircuit} StructJlcParse(id2vtree) = new(id2vtree,Dict{String,StructLogicCircuit}()) end @rule literal_node(t::StructJlcParse, x) = begin lit = Base.parse(Lit,x[3]) vtree = t.id2vtree[x[2]] t.nodes[x[1]] = PlainStructLiteralNode(lit, vtree) end @rule false_node(t::StructJlcParse, x) = t.nodes[x[1]] = PlainStructConstantNode(false) @rule true_node(t::StructJlcParse, x) = t.nodes[x[1]] = PlainStructConstantNode(true) @rule or_node(t::StructJlcParse,x) = begin @assert length(x[4]) == Base.parse(Int,x[3]) vtree = t.id2vtree[x[2]] t.nodes[x[1]] = PlainStruct⋁Node(x[4], vtree) end @rule and_node(t::StructJlcParse,x) = begin @assert length(x[4]) == Base.parse(Int,x[3]) == 2 vtree = t.id2vtree[x[2]] t.nodes[x[1]] = PlainStruct⋀Node(x[4]..., vtree) end function Base.parse(::Type{PlainStructLogicCircuit}, str::AbstractString, ::JlcFormat, id2vtree) ast = Lerche.parse(jlc_parser, str) Lerche.transform(StructJlcParse(id2vtree), ast) end function Base.parse(::Type{PlainStructLogicCircuit}, strings, format::JlcVtreeFormat) id2vtree = parse(Dict{String,Vtree}, strings[2], format[2]) parse(PlainStructLogicCircuit, strings[1], format[1], id2vtree) end Base.read(io::IO, ::Type{PlainStructLogicCircuit}, ::JlcFormat, id2vtree) = parse(PlainStructLogicCircuit, read(io, String), JlcFormat(), id2vtree) function Base.read(ios::Tuple{IO,IO}, ::Type{PlainStructLogicCircuit}, ::JlcVtreeFormat) circuit_str = read(ios[1], String) vtree_str = read(ios[2], String) parse(PlainStructLogicCircuit, (circuit_str,vtree_str), JlcVtreeFormat()) end # Note: no parse as SDD since we are not sure whether # the original file satisfied the extra SDD properties ############################################## # Write JLCs ############################################## const JLC_FORMAT = """c this file was saved by LogicCircuits.jl c ids of jlc nodes start at 0 c jlc nodes appear bottom-up, children before parents c c file syntax: c jlc count-of-jlc-nodes c F id-of-false-jlc-node c T id-of-true-jlc-node c L id-of-literal-jlc-node id-of-vtree literal c O id-of-or-jlc-node id-of-vtree number-of-children {child-id}+ c A id-of-and-jlc-node id-of-vtree number-of-children {child-id}+ c""" function Base.write(io::IO, circuit::LogicCircuit, ::JlcFormat, vtreeid::Function = (x -> 0)) labeling = label_nodes(circuit) map!(x -> x-1, values(labeling)) # vtree nodes are 0-based indexed println(io, JLC_FORMAT) println(io, "jlc $(num_nodes(circuit))") foreach(circuit) do n if isliteralgate(n) println(io, "L $(labeling[n]) $(vtreeid(n)) $(literal(n))") elseif isconstantgate(n) sign = isfalse(n) ? "F" : "T" println(io, "$sign $(labeling[n])") else t = is⋀gate(n) ? "A" : "O" print(io, "$t $(labeling[n]) $(vtreeid(n)) $(num_children(n))") for child in children(n) print(io, " $(labeling[child])") end println(io) end end nothing end function Base.write(ios::Tuple{IO,IO}, circuit::StructLogicCircuit, format::JlcVtreeFormat) vtree2id = write(ios[2], vtree(circuit), format[2]) write(ios[1], circuit, format[1], n -> vtree2id[vtree(n)]) end
# This file is a part of JuliaFEM. # License is MIT: see https://github.com/JuliaFEM/FEMCoupling.jl/blob/master/LICENSE # # Cylinder in torsion using distributed coupling # Distributed coupling can be used to distribute point moments as point forces # to nodes which dont have rotational degrees of freedom. # The model is a 3d cylinder, shown in picture. # ![](example_dcoupling_cylindrical_beam/example_dcoupling_cylindrical_beam.png) # Distributed coupling is used to distribute a torque load to the end of the # cylinder. The other end is fixed. Node sets for coupling nodes, reference # node and fixed face were made in the ABAQUS input file. Reference node is # located in the centrum of the coupling nodes. using JuliaFEM using JuliaFEM: add_elements!, Problem using JuliaFEM.Preprocess using JuliaFEM.Abaqus: create_surface_elements using FEMBase using FEMCoupling using FEMCoupling: add_reference_node!, add_coupling_nodes! # reading mesh from ABAQUS input file datadir = Pkg.dir("FEMCoupling", "examples", "example_dcoupling_cylindrical_beam") mesh = abaqus_read_mesh(joinpath(datadir, "example_dcoupling_cylindrical_beam.inp")) println("Number of nodes in a model: ", length(mesh.nodes)) # # Elements # Creating elements for the body. Mesh and element types are defined in # the ABAQUS input file. The cylinder body is named "Body1" in the input file. cylinder_body = create_elements(mesh,"Body1") # Updating values for the elements. update!(cylinder_body, "youngs modulus", 210e3) update!(cylinder_body, "poissons ratio", 0.3) # Creating an elasticity problem and adding the elements to it. cylinder_problem = Problem(Elasticity,"cylinder_problem",3) add_elements!(cylinder_problem, cylinder_body) # # Boundary conditions # Creating Poi1-type elements as boundary condition elements to nodes of the # node set Fixed_face_set. bc_elements = [Element(Poi1, [j]) for j in mesh.node_sets[:Fixed_face_set]] # Updating geometry for the bc elements update!(bc_elements, "geometry", mesh.nodes) # Fixing all displacements for the bc elements. for i=1:3 update!(bc_elements, "displacement $i", 0.0) end # Creating a bc problem and adding the bc elements to it. bc = Problem(Dirichlet, "fixed", 3, "displacement") add_elements!(bc, bc_elements) # # Distributed coupling # Creating Poi1 elements to nodes in coupling nodes set. coupling_nodes = [Element(Poi1, [j]) for j in mesh.node_sets[:Coupling_nodes_set]] # Updating geometry for the coupling nodes. update!(coupling_nodes, "geometry", mesh.nodes) # Creating Poi1 element for the reference node. reference_node_id = collect(mesh.node_sets[:ref_node_set]) reference_node = Element(Poi1, reference_node_id) # Updating geometry and applying a point moment for the reference node. update!(reference_node, "geometry", mesh.nodes) update!(reference_node, "point moment 3", 1500.0) # Creating a coupling problem and adding coupling nodes and reference nodes to # it. coupling = Problem(Coupling, "cylind", 3, "displacement") add_coupling_nodes!(coupling, coupling_nodes) add_reference_node!(coupling, reference_node) # # Analysis # Creating a step and running the analysis. The cylinder_problem contains # information about the body, its elements and their values. The bc contains # information about boundary conditions and coupling contains information # about distributed coupling. step = Analysis(Nonlinear) add_problems!(step, [cylinder_problem, bc, coupling]) run!(step) # # Results # Comparing calculated results with ABAQUS results. The node set # circlenodes_set contains nodes which are on the outer face radius. # These circle nodes should have the maximum displacement magnitude (norm(u)). node_on_circle = first(mesh.node_sets[:circlenodes_set]) # declaring displacements at time 0.0 to variable u time=0.0 u = cylinder_problem("displacement", time)[node_on_circle] u_mag = norm(u) # Making a testset. using FEMBase.Test @testset "displacement magnitude" begin u_mag_expected=6.306e-4 @test isapprox(u_mag, u_mag_expected, rtol=1e-3) end # Printing node ids println("reference node id = $(reference_node_id[1])") println("node on circle id = $node_on_circle")
using WinReg using Base.Test @test querykey(WinReg.HKEY_LOCAL_MACHINE,"System\\CurrentControlSet\\Control\\Session Manager\\Environment","OS") == "Windows_NT"
using Gadfly plot(layer(x=1:10, y=1:10, Geom.point, Theme(default_color=color("orange"))), layer(x=collect(1:10) .+ 0.1, y=collect(1:10) .+ 0.1, Geom.point, Theme(default_color=color("blue")), order=1))
# default reset reset!(π::AbstractPolicy) = π mutable struct RandomPolicy <: AbstractPolicy end action(policy::RandomPolicy, r, s′, A′) = rand(A′) # include("online_gae.jl") # include("actor_critic.jl")
# StrICU.jl - Wrapper for ICU (International Components for Unicode) library # Some content of the documentation strings was derived from the ICU header files. # (Those portions copyright (C) 1996-2015, International Business Machines Corporation and others) """ StrICU (International Components for Unicode) Wrapper """ module StrICU using ModuleInterfaceTools @api extend! StrBase @static if !V6_COMPAT const is_windows = Sys.iswindows finalizer(o, f::Function) = Base.finalizer(f, o) end import Base: parse, get, close export ICU const ICU = StrICU const cvt_utf8 = utf8 const cvt_utf16 = utf16 export cvt_utf8, cvt_utf16 const ByteStr = Union{ASCIIStr, UTF8Str, String} const WordStringCSE = Union{UCS2CSE, _UCS2CSE, UTF16CSE} const WordStrings = Str{<:WordStringCSE} export set_locale! include("../deps/deps.jl") include("../deps/versions.jl") @static if is_windows() # make sure versions match v1 = last(matchall(r"\d{2}", iculib)) v2 = last(matchall(r"\d{2}", iculibi18n)) v1 == v2 || error("ICU library version mismatch $v1 != $v2 -- please correct $(realpath("../deps/deps.jl"))") end function __init__() set_locale!("") end global version global suffix dliculib = Libdl.dlopen(iculib) for (suf,ver) in [("",0); [("_$i",i) for i in versions]; [("_$(string(i)[1])_$(string(i)[2])",i) for i in versions]] if Libdl.dlsym_e(dliculib, "u_strToUpper"*suf) != C_NULL @eval const version = $ver @eval const suffix = $suf break end end _libicu(s, lib, p) = ( Symbol(string(p, s, suffix)), lib ) const UBool = Int8 const UChar = UInt16 const UErrorCode = Int32 const U_PARSE_CONTEXT_LEN = 16 const U_PARSE_TUPLE = ntuple((i)->0x20, U_PARSE_CONTEXT_LEN) struct UParseError line::UErrorCode # The line on which the error occured offset::UErrorCode # The character offset to the error preContext::NTuple{U_PARSE_CONTEXT_LEN, UInt8} # Textual context before the error postContext::NTuple{U_PARSE_CONTEXT_LEN, UInt8} # The error itself and/or textual context after the error end UParseError() = UParseError(0, 0, U_PARSE_TUPLE, U_PARSE_TUPLE) FAILURE(x::Integer) = x > 0 SUCCESS(x::Integer) = x <= 0 U_BUFFER_OVERFLOW_ERROR = 15 const locale = ASCIIStr[""] include("utext.jl") include("ustring.jl") include("ubrk.jl") include("ucnv.jl") include("ucol.jl") include("ucsdet.jl") include("udat.jl") include("ucal.jl") include("ucasemap.jl") @api freeze end # module StrICU
using ITensors, Printf function main() i = Index(100,"i"); A = randomITensor(i); B = randomITensor(i); # Dry run (JIT) time_dry = @elapsed begin R = A*B end reset!(timer) # End dry run time = @elapsed begin for n =1:10 R = A*B end end @printf "time = %.12f\n" time printTimes(timer) end; main()
import Gadfly Gadfly.set_default_plot_size(20Gadfly.cm, 12Gadfly.cm) # make energy conservation and velocity distribution plots pe = readdlm("pe.dat") vir = readdlm("v.dat") function mdplot(values::Array, title::String, name::String, N::Integer) p = Gadfly.plot( x=[10:10:N], y=values, Gadfly.Geom.line, Gadfly.Guide.xlabel("Simulation Step"), Gadfly.Guide.ylabel(name), Gadfly.Guide.title("$title vs. Simulation Step for Liquid Argon") ) # save as svg and png Gadfly.draw(Gadfly.SVG("$name.svg", 20Gadfly.cm, 12Gadfly.cm), p) Gadfly.draw(Gadfly.PNG("$name.png", 12Gadfly.cm, 8Gadfly.cm), p) return p end # m = 1, k = 1 in our natural units function maxboltzbuilder(T::Real) return v -> √(2/π*T^3) * v.^2 .* exp(-v.^2 / (2T)) end function mdplotspeeds(speeds::Array, name::String, T::Real) xs = [0:0.01:1] p = Gadfly.plot( Gadfly.layer( x = speeds, Gadfly.Geom.histogram(bincount=100, density=true), color = ["Measured speed histogram for T = $T"] ), ### Gadfly.layer( ### x = xs, ### y = maxboltzbuilder(T)(xs), ### Gadfly.Geom.line, ### color = ["Predicted by Maxwell-Boltzmann"] ### ), ### color = ["Legend"], Gadfly.Guide.title("Temperature Distribution for T=$T vs. Boltzmann Distribution"), Gadfly.Guide.xlabel("Velocity"), Gadfly.Guide.ylabel("Probability"), Gadfly.Guide.xticks(ticks=[0:0.1:1]) ) # save as svg and png Gadfly.draw(Gadfly.SVG("$name.svg", 20Gadfly.cm, 12Gadfly.cm), p) Gadfly.draw(Gadfly.PNG("$name.png", 12Gadfly.cm, 8Gadfly.cm), p) return p end pe_plots = Gadfly.Plot[] push!(pe_plots, mdplot(pe, "Potential Energies (T = 1.069)", "temp-1.069", 20000)) e_plots = Gadfly.Plot[] push!(e_plots, mdplot(e_1069, "Total Energies (T = 1.069)", "temp-1.069", 20000))
using SafeTestsets @safetestset "Updating Prior" begin using Test, Distributions, AdaptiveDesignOptimization α = 5 β = 5 prior = [Beta(α, β)] loglike(θ, d, data) = logpdf(Bernoulli(θ), data) model = Model(;prior, loglike) parm_list = ( θ = range(0, 1, length=200), ) design_list = (d = [1,2],) data_list = (choice=[true, false],) randomizer = Optimizer(;design_list, parm_list, data_list, model, approach=Randomize) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β)) atol = 5e-3 update!(randomizer, false) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β+1)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β+1)) atol = 5e-3 update!(randomizer, false) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β+2)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β+2)) atol = 5e-3 update!(randomizer, true) @test mean_post(randomizer)[1] ≈ mean(Beta(α+1,β+2)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α+1,β+2)) atol = 5e-3 end @safetestset "Updating Default Prior" begin using Test, Distributions, AdaptiveDesignOptimization α = 1 β = 1 loglike(θ, d, data) = logpdf(Bernoulli(θ), data) model = Model(; loglike) parm_list = ( θ = range(0, 1, length=200), ) design_list = (d = [1,2],) data_list = (choice=[true, false],) randomizer = Optimizer(;design_list, parm_list, data_list, model, approach=Randomize) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β)) atol = 5e-3 update!(randomizer, false) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β+1)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β+1)) atol = 5e-3 update!(randomizer, false) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β+2)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β+2)) atol = 5e-3 update!(randomizer, true) @test mean_post(randomizer)[1] ≈ mean(Beta(α+1,β+2)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α+1,β+2)) atol = 5e-3 end @safetestset "Updating Prior Dynamic Model" begin using Test, Distributions, AdaptiveDesignOptimization struct State1 end α = 5 β = 5 prior = [Beta(α, β)] loglike(θ, d, data, args...) = logpdf(Bernoulli(θ), data) model = Model(;prior, loglike) parm_list = ( θ = range(0, 1, length=200), ) design_list = (d = [1,2],) data_list = (choice=[true, false],) update_state!(args...) = nothing randomizer = Optimizer(;design_list, parm_list, data_list, model, update_state!, state_type=State1, model_type=Dynamic) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β)) atol = 5e-3 update!(randomizer, false) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β+1)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β+1)) atol = 5e-3 update!(randomizer, false) @test mean_post(randomizer)[1] ≈ mean(Beta(α,β+2)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α,β+2)) atol = 5e-3 update!(randomizer, true) @test mean_post(randomizer)[1] ≈ mean(Beta(α+1,β+2)) atol = 5e-3 @test std_post(randomizer)[1] ≈ std(Beta(α+1,β+2)) atol = 5e-3 end @safetestset "Dimension Check" begin using Test, Distributions, AdaptiveDesignOptimization loglike(μ, σ, data, args...) = logpdf(Normal(μ, σ), data) model = Model(; loglike) parm_list = ( μ = range(0, 10, length=10), σ = range(1, 5, length=10), ) design_list = ( d = range(1, 5, length=10), z = range(1, 5, length=10), v = range(1, 5, length=10) ) data_list = ( choice=[true, false], v = 1:3 ) optimizer = Optimizer(;design_list, parm_list, data_list, model) @test optimizer.parm_grid |> size == (10,10) @test optimizer.design_grid |> size == (10,10,10) @test optimizer.data_grid |> size == (2,3) @test optimizer.log_like |> size == (100,1000,6) @test optimizer.log_post |> size == (100,) @test optimizer.priors |> size == (10,10) @test optimizer.mutual_info |> size == (1000,) @test optimizer.marg_log_like |> size == (1,1000,6) @test optimizer.marg_entropy |> size == (1000,) @test optimizer.cond_entropy |> size == (1000,) end
# This file is a part of GERDAMetadata.jl, licensed under the MIT License (MIT). import Test Test.@testset "Package GERDAMetadata" begin include("test_filekey.jl") end # testset
-'-'-'-'.(-LEECHING'('STOP('-THE-('0')'THE-')STOP')LEECHING-)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-The-('0')'The-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NEW-('0')'NEW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NEW-('0')'NEW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-New-('0')'New-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SYSTEM-('0')'SYSTEM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-System-('0')'System-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OLD-('0')'OLD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CALL-('0')'CALL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-Call-('0')'Call-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FIND-('0')'FIND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-Find-('0')'Find-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FACE-('0')'FACE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BOOK-('0')'BOOK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAGE-('0')'PAGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAYGE-('0')'PAYGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAGEY-('0')'PAGEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAIGE-('0')'PAIGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FACEBOOK-('0')'FACEBOOK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BOOKFACE-('0')'BOOKFACE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEECHING-('0')'LEECHING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MARGARET-('0')'MARGARET-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ZERO-('0')'ZERO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ROSE-('0')'ROSE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MARIE-('0')'MARIE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EILEEN-('0')'EILEEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DANIEL-('0')'DANIEL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KEVIN-('0')'KEVIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MARY-('0')'MARY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FRANK-('0')'FRANK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FRANKIE-('0')'FRANKIE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KIT-('0')'KIT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KITTY-('0')'KITTY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-THOMAS-('0')'THOMAS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TOM-('0')'TOM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NORA-('0')'NORA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAPER-('0')'PAPER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAPERS-('0')'PAPERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CARD-('0')'CARD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ID-('0')'ID-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-IDENTITY-('0')'IDENTITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MEMORY-('0')'MEMORY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-REFLECTION-('0')'REFLECTION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-THOUGHT-('0')'THOUGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LAYER-('0')'LAYER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CAKE-('0')'CAKE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PIE-('0')'PIE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DISTANCE-('0')'DISTANCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NUMBERS-('0')'NUMBERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ONE-('0')'ONE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TWO-('0')'TWO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-THREE-('0')'THREE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FOUR-('0')'FOUR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FIVE-('0')'FIVE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SIX-('0')'SIX-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SEVEN-('0')'SEVEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EIGHT-('0')'EIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NINE-('0')'NINE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TEN-('0')'TEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ADDITION-('0')'ADDITION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SUBTRACTION-('0')'SUBTRACTION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MULTIPLICATION-('0')'MULTIPLICATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CALCULUS-('0')'CALCULUS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TRIGONOMETRY-('0')'TRIGONOMETRY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GEOMETRY-('0')'GEOMETRY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALGEBRA-('0')'ALGEBRA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VEHICLE-('0')'VEHICLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VEHICLES-('0')'VEHICLES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHIP-('0')'SHIP-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BOAT-('0')'BOAT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARC-('0')'ARC-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CAR-('0')'CAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TESLA-('0')'TESLA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EDDISON-('0')'EDDISON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ENGINEERING-('0')'ENGINEERING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BRIDGE-('0')'BRIDGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WORD-('0')'WORD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WORDS-('0')'WORDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-METAFOR-('0')'METAFOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-METAHUMAN-('0')'METAHUMAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SUPERHUMAN-('0')'SUPERHUMAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ROCK-('0')'ROCK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SCISSORS-('0')'SCISSORS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DARK-('0')'DARK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LIFE-('0')'LIFE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DEATH-('0')'DEATH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STRENGTH-('0'')STRENGTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEAKNESS-('0')'WEAKNESS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JOURNEY-('0')'JOURNEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DESTINATION-('0')'DESTINATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KNIGHTS-('0')'KNIGHTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RADIANT-('0')'RADIANT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-IDEALS-('0')'IDEALS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ROSHAR-('0')'ROSHAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SCADRIAL-('0')'SCADRIAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALLOMANCY-('0')'ALLOMANCY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALLOMANCER-('0')'ALLOMANCER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FULLBORN-('0')'FULLBORN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TWINBORN-('0')'TWINBORN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STORMLIGHT-('0'')STORMLIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GEMSTONES-('0')'GEMSTONES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INVESTURE-('0')'INVESTURE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHARDBLADE-('0')'SHARDBLADE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHARDPLATE-('0')'SHARDPLATE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPREN-('0')'SPREN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WINDRUNNER-('0')'WINDRUNNER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LASHINGS-('0')'LASHINGS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ADHESION-('0')'ADHESION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GRAVITATION-('0')'GRAVITATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DIVISION-('0')'DIVISION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ABRASION-('0')'ABRASION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PROGRESSION-('0')'PROGRESSION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ILLUMINATION-('0')'ILLUMINATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TRANSFORMATION-('0')'TRANSFORMATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TRANSPORTATION-('0')'TRANSPORTATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COHESION-('0')'COHESION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TENSION-('0')'TENSION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-METALMINDS-('0')'METALMINDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BREATH-('0')'BREATH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELANTRIS-('0')'ELANTRIS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SEONS-('0')'SEONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELANTRIAN-('0')'ELANTRIAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RETURNED-('0')'RETURNED-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHADOW-('0')'SHADOW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHADOWS-('0')'SHADOWS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CULTIVATION-('0')'CULTIVATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HARMONY-('0')'HARMONY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DEVOTION-('0')'DEVOTION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PRESERVATION-('0')'PRESERVATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RUIN-('0')'RUIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ODIUM-('0')'ODIUM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ENDOWMENT-('0')'ENDOWMENT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AUTONOMY-('0')'AUTONOMY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AMBITION-('0')'AMBITION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INVENTION-('0')'INVENTION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MERCY-('0')'MERCY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VALOR-('0')'VALOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WHIMSY-('0')'WHIMSY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SURGEBINDING-('0')'SURGEBINDING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOULCASTING-('0')'SOULCASTING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEVITATION-('0')'LEVITATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FLIGHT-('0')'FLIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HOVER-('0')'HOVER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FABRIAL-('0')'FABRIAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FABRIALS-('0')'FABRIALS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHADESMAR-('0')'SHADESMAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LERASIUM-('0')'LERASIUM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HONORBLADES-('0')'HONORBLADES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RAYSIUM-('0')'RAYSIUM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DOR-('0')'DOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DOOR-('0')'DOOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ULTRA-('0')'ULTRA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INSTINCT-('0')'INSTINCT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AWAKENING-('0')'AWAKENING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DURALUMIN-('0')'DURALUMIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MISTBORN-('0')'MISTBORN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FERUCHEMIST-('0')'FERUCHEMIST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ADONALSIUM-('0')'ADONALSIUM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHARDS-('0')'SHARDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PLATE-('0')'PLATE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KANDRA-('0')'KANDRA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SANDERSON-('0')'SANDERSON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COSMERE-('0')'COSMERE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-POTTER-('0')'POTTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HOGWARTS-('0')'HOGWARTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WANDS-('0')'WANDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LUKE-('0')'LUKE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SKYWALKER-('0')'SKYWALKER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-REVERSE-('0')'REVERSE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-REY-('0')'REY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ANNAKIN-('0')'ANNAKIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OBIWAN-('0')'OBIWAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-YODA-('0')'YODA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GROGU-('0')'GROGU-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AHSOKA-('0')'AHSOKA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LIGHTSABER-('0')'LIGHTSABER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MANDALORIAN-('0')'MANDALORIAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BESKAR-('0')'BESKAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DARTH-('0')'DARTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VADER-('0')'VADER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PALPATINE-('0')'VADER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DEMON-('0')'DEMON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DEMONS-('0')'DEMONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VAMPIRE-('0')'VAMPIRE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VAMPIRES-('0')'VAMPIRES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEREWOLVES-('0')'WEREWOLVES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WERES-('0')'WERES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WERE-('0')'WERE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FAIRY-('0')'FAIRY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FAIRIES-('0')'FAIRIES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CHEMISTRY-('0')'CHEMISTRY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELEMENTS-('0')'ELEMENTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HYDROGEN-('0')'HYDROGEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CARBON-('0')'CARBON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NITROGEN-('0')'NITROGEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BREATHE-('0')'BREATHE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INHALE-('0')'INHALE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OXYGEN-('0')'OXYGEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EXHALE-('0')'EXHALE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CARBONDIOXIDE-('0')'CARBONDIOXIDE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEECHERS-('0')'LEECHERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ATT-('0')'ATT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VERIZON-('0')'VERIZON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WIRELESS-('0')'VERIZON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PHONES-('0')'PHONES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WARNER-('0')'WARNER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CISCO-('0')'CISCO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JUNIPER-('0')'JUNIPER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MSI-('0')'MSI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ASUS-('0')'ASUS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GIGABYTE-('0')'GIGABYTE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RAT-('0')'RAT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RATS-('0')'RATS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OX-('0')'OX-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TIGER-('0')'TIGER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ZODIAC-('0')'ZODIAC-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MEDIUM-('0')'MEDIUM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPIRIT-('0')'SPIRIT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOL-('0')'SOL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LOS-('0')'LOS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LOL-('0')'LOL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BRB-('0')'BRB-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AFK-('0')'AFK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LMAO-('0')'LMAO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOLE-('0')'SOLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOUL-('0')'SOUL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CAST-('0')'CAST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OIL-('0')'OIL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PEPSI-('0')'PEPSI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COLA-('0')'COLA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FOOD-('0')'FOOD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WATER-('0')'WATER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SATELLITES-('0')'SATELLITES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SATELLITE-('0')'SATELLITE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TELESCOPE-('0')'TELESCOPE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TELESCOPES-('0')'TELESCOPES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MICROSCOPE-('0')'MICROSCOPE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MICROSCOPES-('0')'MICROSCOPES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GLASSES-('0')'GLASSES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SUNGLASSES-('0')'SUNGLASSES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JAMES-('0')'JAMES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEBB-('0')'WEBB-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AVENGERS-('0')'AVENGERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CRIME-('0')'CRIME-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DRUGS-('0')'DRUGS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALCOHOL-('0')'ALCOHOL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LIFE-('0')'LIFE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-S-('0')'S-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BLOOD-('0')'BLOOD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HONOR-('0')'HONOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SS-('0')'SS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SSS-('0')'SSS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AMAZON-('0')'AMAZON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NVIDIA-('0')'NVIDIA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GEFORCE-('0')'GEFORCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WARREN-('0')'WARREN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BUFFETT-('0')'BUFFETT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BILL-('0')'BILL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GATES-('0')'GATES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JEFF-('0')'JEFF-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GEOFF-('0')'GEOFF-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BEZOS-('0')'BEZOS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAUL-('0')'PAUL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AARON-('0')'AARON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ROYALTY-('0')'ROYALTY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KING-('0')'KING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-QUEEN-('0')'QUEEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RELIGION-('0')'RELIGION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOVIES-('0')'MOVIES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TELEVISION-('0')'TELEVISION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COMPUTERS-('0')'COMPUTERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TECHNOLOGY-('0')'TECHNOLOGY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SCIENCE-('0')'SCIENCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-QUANTUM-('0')'QUANTUM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PARTICLE-('0')'PARTICLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PARTICLES-('0')'PARTICLES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHRINK-('0')'SHRINK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GROW-('0')'GROW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ADAPT-('0')'ADAPT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEARN-('0')'LEARN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SCHOOL-('0')'SCHOOL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FAMILY-('0')'FAMILY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GRANDMOTHER-('0')'GRANDMOTHER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOTHER-('0')'MOTHER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOM-('0')'MOM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GRANDFATHER-('0')'GRANDFATHER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FATHER-('0')'FATHER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DAD-('0')'DAD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KIDS-('0')'KIDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TV-('0')'TV-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COUSINS-('0')'COUSINS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CHILDREN-('0')'CHILDREN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AI-('0')'AI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARTIFICIAL-('0')'ARTIFICIAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INTELLIGENCE-('0')'INTELLIGENCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-REINCARNATION-('0')'REINCARNATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RESURRECTION-('0')'RESURRECTION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RESURRECT-('0')'RESURRECT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RESPAWN-('0')'RESPAWN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TARGET-('0')'TARGET-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TARGETS-('0')'TARGETS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TARGETTING-('0')'TARGETTING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPELL-('0')'SPELL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPELLS-('0')'SPELLS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SKILL-('0')'SKILL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SKILLS-('0')'SKILLS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEVEL-('0')'LEVEL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEVELS-('0')'LEVELS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TALENTS-('0')'TALENTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TALENT-('0')'TALENT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NOTHING-('0')'NOTHING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EVERYTHING-('0')'EVERYTHING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-IMPOSSIBILITY-('0')'IMPOSSIBILITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SENSATION-('0')'SENSATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TOUCH-('0')'TOUCH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SMELL-('0')'SMELL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SIGHT-('0')'SIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FEEL-('0')'FEEL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EMOTIONS-('0')'EMOTIONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EMPATHY-('0')'EMPATHY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GRATITUDE-('0')'GRATITUDE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HEART-('0')'HEART-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VALOR-('0')'VALOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COURAGE-('0')'COURAGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MIND-('0')'MIND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TASTE-('0')'TASTE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EAT-('0')'EAT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HEAR-('0')'HEAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOUND-('0')'SOUND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOUNDS-('0')'SOUNDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VIBE-('0')'VIBE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VIBES-('0')'VIBES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VIBRATION-('0')'VIBRATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LISTEN-('0')'LISTEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-READ-('0')'READ-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WRITE-('0')'WRITE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPEAK-('0')'SPEAK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VOICE-('0')'VOICE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PITCH-('0')'PITCH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TALK-('0')'TALK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SAY-('0')'SAY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHOUT-('0')'SHOUT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VERBAL-('0')'VERBAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VERBALIZE-('0')'VERBALIZE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-UNDERSTAND-('0')'UNDERSTAND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-UNDERSTANDING-('0')'UNDERSTANDING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FRIEND-('0')'FRIEND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KINDNESS-('0')'KINDNESS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FRIENDS-('0')'FRIENDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FRIENDSHIP-('0')'FRIENDSHIP)'.STOP').LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PARTNER-('0')'PARTNER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LOVE-('0')'LOVE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PARTNERS-('0')'PARTNERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PARTNERSHIP-('0')'PARTNERSHIP-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TEAM-('0')'TEAM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TEAMS-('0')'TEAMS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TEAMMATE-('0')'TEAMMATE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KNOWLEDGE-('0')'KNOWLEDGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WISDOM-('0')'WISDOM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BUDDHA-('0')'BUDDHA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NIRVANA-('0')'NIRVANA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FREEDOM-('0')'FREEDOM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHAPE-('0')'SHAPE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CREATE-('0')'CREATE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CHANGE-('0')'CHANGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GENDER-('0')'GENDER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BODY-('0')'BODY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EYE-('0')'EYE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EYES-('0')'EYES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SEE-('0')'SEE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VISION-('0')'VISION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SIGHT-('0')'SIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HAIR-('0')'HAIR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOUTH-('0')'MOUTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TONGUE-('0')'TONGUE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TEETH-('0')'TEETH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NOSE-('0')'NOSE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EARS-('0')'EARS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HANDS-('0')'HANDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARMS-('0')'ARMS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FEET-('0')'FEET-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEGS-('0')'LEGS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ANATOMY-('0')'ANATOMY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STAND-('0'')STAND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STILL-('0'')STILL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOVE-('0')'MOVE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOVEMENT-('0')'MOVEMENT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WALK-('0')'WALK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RUN-('0')'RUN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPRINT-('0')'SPRINT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SWIM-('0')'SWIM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPRING-('0')'SPRING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HARU-('0')'HARU-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARY-('0')'ARY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ICAL-('0')'ICAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WINTER-('0')'WINTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ICE-('0')'ICE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FROZEN-('0')'FROZEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SUMMER-('0')'SUMMER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HEAT-('0')'HEAT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FIRE-('0')'FIRE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WOOD-('0')'WOOD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TREES-('0')'TREES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PLANTS-('0')'PLANTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AUTUMN-('0')'AUTUMN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-IRON-('0')'IRON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FALL-('0')'FALL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FAE-('0')'FAE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FATE-('0')'FATE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FATES-('0')'FATES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FATED-('0')'FATED-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DESTINY-('0')'DESTINY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CONVERSATION-('0')'CONVERSATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COMPREHEND-('0')'COMPREHEND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SADNESS-('0')'SADNESS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LONELINESS-('0')'LONELINESS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALONE-('0')'ALONE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAIN-('0')'PAIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COMPANION-('0')'COMPANION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GIFT-('0')'GIFT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PLEASURE-('0')'PLEASURE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HEAL-('0')'HEAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HEALING-('0')'HEALING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HEALTH-('0')'HEALTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEALTH-('0')'WEALTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CURRENCY-('0')'CURRENCY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MONEY-('0')'MONEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-POWER-('0')'POWER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-POWERS-('0')'POWERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ABILITIES-('0')'ABILITIES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ABILITY-('0')'ABILITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DAMAGE-('0')'DAMAGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KILL-('0')'KILL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SKY-('0')'SKY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GROUND-('0')'GROUND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SEED-('0')'SEED-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SEEDS-('0')'SEEDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ODIN-('0')'ODIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COLORS-('0')'COLORS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COLOURS-('0')'COLOURS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WHITE-('0')'WHITE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RED-('0')'RED-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ORANGE-('0')'ORANGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-YELLOW-('0')'YELLOW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GREEN-('0')'GREEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BLUE-('0')'BLUE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INDIGO-('0')'INDIGO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VIOLET-('0')'VIOLET-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CYAN-('0')'CYAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MAGENTA-('0')'MAGENTA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MUSIC-('0')'MUSIC-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MAGIC-('0')'MAGIC-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MAGICAL-('0')'MAGICAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JULIA-('0')'JULIA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CODE-('0')'CODE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GITHUB-('0')'GITHUB-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CABLE-('0')'CABLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BITCOIN-('0')'BITCOIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CRYPTOCURRENCY-('0')'CRYPTOCURRENCY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MONEY-('0')'MONEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DISNEY-('0')'DISNEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SONY-('0')'SONY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SAMSUNG-('0')'SAMSUNG-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LG-('0')'LG-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MARVEL-('0')'MARVEL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WOW-('0')'WOW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WORLD-('0')'WORLD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PLANET-('0')'PLANET-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WARCRAFT-('0')'WARCRAFT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LOVECRAFT-('0')'LOVECRAFT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PEACECRAFT-('0')'PEACECRAFT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WHEEL-('0')'WHEEL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OF-('0')'OF-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DRAGON-('0')'DRAGON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-REBORN-('0')'REBORN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EGG-('0')'EGG-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WITCHER-('0')'WITCHER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELEVEN-('0')'ELEVEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NOVEMBER-('0')'NOVEMBER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TWELVE-('0')'TWELVE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DECEMBER-('0')'DECEMBER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-THIRTEEN-('0')'THIRTEEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALUMINUM-('0')'ALUMINUM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TITAN-('0')'TITAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TICKTOCK-('0')'TICKTOCK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TICK-('0')'TICK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TOCK-('0')'TOCK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TOCKTICK-('0')'TOCKTICK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INSTAGRAM-('0')'INSTAGRAM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TWITTER-('0')'TWITTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WAZE-('0')'WAZE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CANDY-('0')'CANDY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CRUSH-('0')'CRUSH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEGEND-('0')'LEGEND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEGENDS-('0')'LEGENDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MYTH-('0')'MYTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OLYMPUS-('0')'OLYMPUS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-YGGDRASIL-('0')'YGGDRASIL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INSECTS-('0')'INSECTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPIDERS-('0')'SPIDERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ANTS-('0')'ANTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AUNTS-('0')'AUNTS-')STOP')LEECHING)'.-'-'-'-' -('-LEECHING'('STOP('-UNCLES-('0')'UNCLES-')STOP')LEECHING-)- -'-'-'-'.(-LEECHING'('STOP('-COUSINS-('0')'COUSINS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SIBLINGS-('0')'SIBLINGS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BROTHERS-('0')'BROTHERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SISTERS-('0')'SISTERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OTHER-('0')'OTHER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OTHERS-('0')'OTHERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CLOCK-('0')'CLOCK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AC-('0')'AC-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WATCH-('0')'WATCH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BUTTON-('0')'BUTTON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DC-('0')'DC-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CD-('0')'CD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PARAMOUNT-('0')'PARAMOUNT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOUNT-('0')'MOUNT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PICTURES-('0')'PICTURES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DOGS-('0')'DOGS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DOG-('0')'DOG-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CAT-('0')'CAT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CATS-('0')'CATS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ANIMALS-('0')'ANIMALS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BLIZZARD-('0')'BLIZZARD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VALVE-('0')'VALVE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SEATTLE-('0')'SEATTLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WASHINGTON-('0')'WASHINGTON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CHICAGO-('0')'CHICAGO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ILLINOIS-('0')'ILLINOIS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALASKA-('0')'ALASKA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MONTANA-('0')'MONTANA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARIZONA-('0')'ARIZONA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TEXAS-('0')'TEXAS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FLORIDA-('0')'TEXAS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GEORGIA-('0')'GEORGIA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JAPAN-('0')'JAPAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EUROPE-('0')'EUROPE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AFRICA-('0')'AFRICA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BRAZIL-('0')'BRAZIL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MEXICO-('0')'MEXICO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CALIFORNIA-('0')'CALIFORNIA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OREGON-('0')'OREGON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CANADA-('0')'CANADA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NEWYORK-('0')'NEWYORK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LASVEGAS-('0')'LASVEGAS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NEVADA-('0')'NEVADA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LAS-('0')'LAS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VEGAS-('0')'VEGAS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EVERETT-('0')'EVERETT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TACOMA-('0')'TACOMA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EDMONDS-('0')'EDMONDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHORELINE-('0')'SHORELINE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-QFC-('0')'QFC-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KROGER-('0')'KROGER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GROCERY-('0')'GROCERY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALBERTSONS-('0')'ALBERTSONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SAFEWAY-('0')'SAFEWAY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHOREWOOD-('0')'SHOREWOOD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EINSTEIN-('0')'EINSTEIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EINSTIEN-('0')'EINSTIEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RICH-('0')'RICH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ROGER-('0')'ROGER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JARED-('0')'JARED-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PANTERA-('0')'PANTERA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NEVERMORE-('0')'NEVERMORE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-METAL-('0')'METAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-UPS-('0')'UPS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STORE-('0'')STORE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STORES-('0'')STORES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DONALD-('0')'DONALD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PETERSON-('0')'PETERSON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OCEAN-('0')'OCEAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHORES-('0')'SHORES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CONCE-('-0-')-CONCEPTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CONCEP-('-0-')-CONCEPTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OTHE-('-0-')-OTHERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ST-('-0-')-STOP-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HOPE-('0')'HOPE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ANIME-('0')'ANIME-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DEMONSLAYER-('0')'DEMONSLAYER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NARUTO-('0')'NARUTO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BORUTO-('0')'BORUTO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BLEACH-('0')'BLEACH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ONEPIECE-('0')'ONEPIECE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DRAGONBALL-('0')'DRAGONBALL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DRAGONBALLZ-('0')'DRAGONBALLZ-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DRAGONBALLSUPER-('0')'DRAGONBALLSUPER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SUPERSAIYAN-('0')'SUPERSAIYAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MYHEROACADEMIA-('0')'MYHEROACADEMIA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CODE016-('0')'CODE016-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CODE002-('0')'CODE002-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WINGZERO-('0')'WINGZERO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ENDLESS-('0')'ENDLESS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WALTZ-('0')'WALTZ-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TWIN-('0')'TWIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BUSTER-('0')'BUSTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RIFLE-('0')'RIFLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RIMURU-('0')'RIMURU-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TEMPEST-('0')'TEMPEST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VELDORA-('0')'VELDORA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MANGA-('0')'MANGA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CARTOON-('0')'CARTOON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CARTOONS-('0')'CARTOONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AVATAR-('0')'AVATAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COMICS-('0')'COMICS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VIDEOGAMES-('0')'VIDEOGAMES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PLAYSTATION-('0')'PLAYSTATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-XBOX-('0')'XBOX-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-XBOX360-('0')'XBOX360-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SNES-('0')'SNES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GENESIS-('0')'GENESIS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FELICITY-('0')'FELICITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SMOKE-('0')'SMOKE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CANNABIS-('0')'CANNABIS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TOBACCO-('0')'TOBACCO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SCREEN-('0')'SCREEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VAPOR-('0')'VAPOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STEAM-('0'')STEAM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EPIC-('0')'EPIC-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DISCORD-('0')'DISCORD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SKYPE-('0')'SKYPE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ZOOM-('0')'ZOOM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ZOOMWAY-('0')'ZOOMWAY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STREAM-('0'')STREAM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STREAMING-('0'')STREAMING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TWITCH-('0')'TWITCH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CHINA-('0')'CHINA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RUSSIA-('0')'RUSSIA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AMERICA-('0')'AMERICA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-UNIVERSE-('0')'UNIVERSE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MULTIVERSE-('0')'MULTIVERSE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GOOGLE-('0')'GOOGLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GMAIL-('0')'GMAIL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-YOUTUBE-('0')'YOUTUBE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPACEX-('0')'SPACEX-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DOORS-('0')'DOORS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-APPLE-('0')'APPLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ANDROID-('0')'ANDROID-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MICROSOFT-('0')'MICROSOFT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOFT-('0')'SOFT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HARD-('0')'HARD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MICRO-('0')'MICRO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MACRO-('0')'MACRO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MINI-('0')'MINI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GIANT-('0')'GIANT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BIG-('0')'BIG-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SMALL-('0')'SMALL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SIZE-('0')'SIZE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEIGHT-('0')'WEIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HEIGHT-('0')'HEIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-REACH-('0')'REACH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ENDURANCE-('0')'ENDURANCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STAMINA-('0'')STAMINA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WINDOWS-('0')'WINDOWS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WINDOW-('0')'WINDOW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WIN-('0')'WIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NIW-('0')'NIW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DOW-('0')'DOW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WOD-('0')'WOD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MONITORS-('0')'MONITORS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DIRECTIONS-('0')'DIRECTIONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FORWARDS-('0')'FORWARDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BACKWARDS-('0')'BACKWARDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEFT-('0')'LEFT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RIGHT-('0')'RIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DIAGONAL-('0')'DIAGONAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-UP-('0')'UP''-.TOP').EECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DOWN-('0')'DOWN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DIRECTION-('0')'DIRECTION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NORTH-('0')'NORTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EOBARD-('0')'EOBARD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-THAWNE-('0')'THAWNE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EAST-('0')'EAST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPEED-('0')'SPEED-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VELOCITY-('0')'VELOCITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPEEDFORCE-('0')'SPEEDFORCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BARRY-('0')'BARRY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALLEN-('0')'ALLEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FLASH-('0')'FLASH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-IRIS-('0')'IRIS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JOE-('0')'JOE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CECILE-('0')'CECILE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WALLY-('0')'WALLY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WALLACE-('0')'WALLACE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEST-('0')'WEST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CLARK-('0')'CLARK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KENT-('0')'KENT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SUPERMAN-('0')'SUPERMAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOUTH-('0')'SOUTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LILLY-('0')'LILLY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PETER-('0')'PETER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RUTH-('0')'RUTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ZACHARY-('0')'ZACHARY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SKYE-('0')'SKYE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HOTARU-('0')'HOTARU-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NOLAN-('0')'NOLAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HOTARU-('0')'PATRICK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEAHY-('0')'LEAHY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEFAY-('0')'LEFAY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TODD-('0')'TODD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MARTIN-('0')'MARTIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JOSEPH-('0')'JOSEPH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BUNES-('0')'BUNES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PRIEST-('0')'PRIEST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MIST-('0')'MIST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RAIN-('0')'RAIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STORM-('0'')STORM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KITE-('0')'KITE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STRING-('0'')STRING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KEY-('0')'KEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELI-('0')'ELI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BRIDGETT-('0')'BRIDGETT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CRAYTON-('0')'CRAYTON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CLAYTON-('0')'CLAYTON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CLAY-('0')'CLAY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TOE-('0')'TOE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KNEE-('0')'KNEE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DYLAN-('0')'DYLAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ANGIE-('0')'ANGIE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEECH-('0')'LEECH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEILA-('0')'LEILA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CJ-('0')'CJ-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CASEY-('0')'CASEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CORRINE-('0')'CORRINE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BEN-('0')'BEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BENJAMIN-('0')'BENJAMIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JOEY-('0')'JOEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JIM-('0')'JIM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COLLETTE-('0')'COLLETTE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STEPHEN-('0'')STEPHEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JOHNSON-('0')'JOHNSON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TONY-('0')'TONY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SARA-('0')'SARA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SARAH-('0')'SARAH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ASHLEY-('0')'ASHLEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DIANA-('0')'DIANA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HUMAN-('0')'HUMAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PERSON-('0')'PERSON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PEOPLE-('0')'PEOPLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HUMANS-('0')'HUMANS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ERIN-('0')'ERIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HYBRID-('0')'HYBRID-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HYBRIDS-('0')'HYBRIDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JOEL-('0')'JOEL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GOD-('0')'GOD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GODS-('0')'GODS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SAIYANS-('0')'SAIYANS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GOKU-('0')'GOKU-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GOHAN-('0')'GOHAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EARTH-('0')'EARTH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SON-('0')'SON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NOS-('0')'NOS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ANOS-('0')'ANOS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VOID-('0')'VOID-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DIOV-('0')'DIOV-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ABYSS-('0')'ABYSS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SSYBA-('0')'SSYBA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-VOLDIGOAD-('0')'VOLDIGOAD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DEMONKING-('0')'DEMONKING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SUN-('0')'SUN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STAR-('0'')STAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STARS-('0'')STARS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STARSONG-('0'')STARSONG-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RIVER-('0')'RIVER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FOREST-('0')'FOREST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LAKE-('0')'LAKE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-POND-('0')'POND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OCEAN-('0')'OCEAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SEA-('0')'SEA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MEADOW-('0')'MEADOW-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PARK-('0')'PARK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CITY-('0')'CITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TOWN-('0')'TOWN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TAVERN-('0')'TAVERN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SUBURB-('0')'SUBURB-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BUY-('0')'BUY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HOUSE-('0')'HOUSE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OWN-('0')'OWN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CASTLE-('0')'CASTLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MIRROR-('0')'MIRROR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ROLLING-('0')'ROLLING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STONES-('0'')STONES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TONES-('0')'TONES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STONE-('0'')STONE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GOLD-('0')'GOLD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SILVER-('0')'SILVER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TIN-('0')'TIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LEAD-('0')'LEAD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KRYPTON-('0')'KRYPTON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KRYPTONITE-('0')'KRYPTONITE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SILICON-('0')'SILICON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CALCULATOR-('0')'CALCULATOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PROCESSOR-('0')'PROCESSOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PROCESS-('0')'PROCESS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PRO-('0')'PRO-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PROFESSIONAL-('0')'PROFESSIONAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DUST-('0')'DUST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SAND-('0')'SAND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BLACK-('0')'BLACK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STONE-('0'')STONE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOON-('0')'MOON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPACE-('0')'SPACE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COPPER-('0')'COPPER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HIGGS-('0')'HIGGS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SAND-('0')'SAND-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ASH-('0')'ASH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TIME-('0')'TIME-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FIELD-('0')'FIELD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PAST-('0')'PAST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PRESENT-('0')'PRESENT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FUTURE-('0')'FUTURE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CALENDAR-('0')'CALENDAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SWORD-('0')'SWORD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KEN-('0')'KEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KENSHIN-('0')'KENSHIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KEN-('0')'KEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SWORDS-('0')'SWORDS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KATANA-('0')'KATANA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TSURUGI-('0')'TSURUGI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TATARA-('0')'TATARA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FURNACE-('0')'FURNACE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AMAKUNI-('0')'AMAKUNI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AMAKURA-('0')'AMAKURA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-YASUTSUNA-('0')'YASUTSUNA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SAMURAI-('0')'SAMURAI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NINJA-('0')'NINJA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SATETSU-('0')'SATETSU-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FORGE-('0')'FORGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FOLD-('0')'FOLD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HAMMER-('0')'HAMMER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TAMAHAGANE-('0')'TAMAHAGANE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STEEL-('0'')STEEL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PEN-('0')'PEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PENS-('0')'PENS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PENCIL-('0')'PENCIL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CRAYON-('0')'CRAYON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CRAYONS-('0')'CRAYONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EMPORER-('0')'EMPORER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GUARD-('0')'GUARD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARMOR-('0')'ARMOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARMOUR-('0')'ARMUOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DEFENCE-('0')'DEFENCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OFFENCE-('0')'OFFENCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEAPON-('0')'WEAPON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEAPONS-('0')'WEAPONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHIELD-('0')'SHIELD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TARDIS-('0')'TARDIS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COMPUTER-('0')'COMPUTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COMPUTERS-('0')'COMPUTERS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TECHNOLOGY-('0')'TECHNOLOGY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELECTRICITY-('0')'ELECTRICITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WEATHER-('0')'WEATHER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LANGUAGE-('0')'LANGUAGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CHINESE-('0')'CHINESE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JAPANESE-('0')'JAPANESE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KOREAN-('0')'KOREAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FRENCH-('0')'FRENCH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SPANISH-('0')'SPANISH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GERMAN-('0')'GERMAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RUSSIAN-('0')'RUSSIAN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ENGLISH-('0')'ENGLISH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TEXT-('0')'TEXT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WRITTEN-('0')'WRITTEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALPHABET-('0')'ALPHABET-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BINARY-('0')'BINARY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HEXIDECIMAL-('0')'HEXIDECIMAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KI-('0')'KI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KEY-('0')'KEY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AURA-('0')'AURA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SELF-('0')'SELF-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-LIGHT-('0')'LIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MATCH-('0')'MATCH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELLIE-('0')'ELLIE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CLARA-('0')'CLARA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DOCTOR-('0')'DOCTOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HARRISON-('0')'HARRISON-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TESS-('0')'TESS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TESSA-('0')'TESSA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-JESSIE-('0')'JESSIE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-QUICK-('0')'QUICK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WELLS-('0')'WELLS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MOLLY-('0')'MOLLY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CODEN-('0')'CODEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STARK-('0'')STARK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STORY-('0'')STORY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STORIES-('0'')STORIES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NAME-('0')'NAME-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NAMES-('0')'NAMES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TITLE-('0')'TITLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TITLES-('0')'TITLES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CLASS-('0')'CLASS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CLASSES-('0')'CLASS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FUN-('0')'FUN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GAME-('0')'GAME-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GAMES-('0')'GAMES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ADVENTURE-('0')'ADVENTURE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GUILD-('0')'GUILD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PARTY-('0')'PARTY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALCHEMIST-('0')'ALCHEMIST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ALCHEMY-('0')'ALCHEMY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-TRANSMUTATION-('0')'TRANSMUTATION-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CIRCLE-('0')'CIRCLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BLACKSMITH-('0')'BLACKSMITH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DEMONHUNTER-('0')'DEMONHUNTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HUNTER-('0')'HUNTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MONK-('0')'MONK-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MARTIAL-('0')'MARTIAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ART-('0')'ART-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARTS-('0')'ARTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ARTHUR-('0')'ARTHUR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BEGINNING-('0')'BEGINNING-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AFTER-('0')'AFTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-END-('0')'END-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-GREY-('0')'GREY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MANA-('0')'MANA-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-AETHER-('0')'AETHER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EXIT-('0')'EXIT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WARRIOR-('0')'WARRIOR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-KNIGHT-('0')'KNIGHT-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PALADIN-('0')'PALADIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-PRIEST-('0')'PRIEST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WIZARD-('0')'WIZARD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INTEGRITY-('0')'INTEGRITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SAGE-('0')'SAGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SCHOLAR-('0')'SCHOLAR-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SORCERER-('0')'SORCERER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SOULLINKER-('0')'SOULLINKER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HOME-('0')'HOME-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BELIEF-('0')'BELIEF-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-POSSIBLE-('0')'POSSIBLE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELF-('0')'ELF-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BELIEVE-('0')'BELIEVE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-THEM-('0')'THEM-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-THEMSELF-('0')'THEMSELF-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-THEMSELVES-('0')'THEMSELVES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ELVES-('0')'ELVES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MALE-('0')'MALE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FEMALE-('0')'FEMALE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CLOTHES-('0')'CLOTHES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SHOES-('0')'SHOES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-INVENTORY-('0')'INVENTORY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STORAGE-('0'')STORAGE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-OPTIONS-('0')'OPTIONS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-CHOICE-('0')'CHOICE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-REAL-('0')'REAL-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-REALITY-('0')'REALITY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EXISTENCE-('0')'EXISTENCE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EXISTS-('0')'EXISTS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-EXIST-('0')'EXIST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HARRY-('0')'HARRY-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-MAGGIE-('0')'MAGGIE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-BLACKSTONE-('0')'BLACKSTONE-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-COPPERFIELD-('0')'COPPERFIELD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DRESDEN-('0')'DRESDEN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-RIN-('0')'RIN-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-NOMI-('0')'NOMI-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-SANDASH-('0')'SANDASH-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-STARDUST-('0'')STARDUST-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HIGGSFIELD-('0')'HIGGSFIELD-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-DRESDENFILES-('0')'DRESDENFILES-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-WINGS-('0')'WINGS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-ENTER-('0')'ENTER-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-HIGGS-('0')'HIGGS-')STOP')LEECHING)'.-'-'-'-' -'-'-'-'.(-LEECHING'('STOP('-FILES-('0')'FILES-')STOP')LEECHING)'.-'-'-'-'
@testset "Maximization for quantum search" begin n = 5 g = complete_graph(n) qss_penalty = ceil(Int, log(nv(g))) @testset "Continuous mode" begin cqss = QWSearch(CTQW(g), [1], qss_penalty) cqss0 = QWSearch(CTQW(g), [1]) @test_throws AssertionError maximize_quantum_search(cqss, -1.) @test_throws AssertionError maximize_quantum_search(cqss, 1., -1.) @test runtime(maximize_quantum_search(cqss)) > 0 @test runtime(maximize_quantum_search(cqss, 1., 1.)) > 0 @test_warn r".*" maximize_quantum_search(cqss0) @test_nowarn maximize_quantum_search(cqss) @test isapprox(runtime(maximize_quantum_search(cqss, 1., 0.2)), 1., atol=0.001) end @testset "Discrete mode" begin dqss = QWSearch(Szegedy(g), [1], qss_penalty) dqss0 = QWSearch(Szegedy(g), [1]) @test_throws AssertionError maximize_quantum_search(dqss0, -1) @test_throws AssertionError maximize_quantum_search(dqss0, 1, :unknown_mode) @test runtime(maximize_quantum_search(dqss)) > 0 @test runtime(maximize_quantum_search(dqss, nv(g), :firstmaxprob)) > 0 @test runtime(maximize_quantum_search(dqss, nv(g), :firstmaxeff)) > 0 @test runtime(maximize_quantum_search(dqss, nv(g), :maxtimeeff)) > 0 @test runtime(maximize_quantum_search(dqss, nv(g), :maxeff)) > 0 @test runtime(maximize_quantum_search(dqss, nv(g), :maxtimeprob)) > 0 @test_warn r".*" maximize_quantum_search(dqss0) @test_nowarn maximize_quantum_search(dqss) end end
""" k_i = get_k_i(pb::Problem) Compute the degree of each constraint of `pb`. """ function get_ki(problem::Problem) println("\n=== get_ki(problem::Problem)") println("Compute the degree of each constraint of `pb`.") return Dict{String, Int}() end """ check_di_ki!(d_i, k_i) Enforce the condition d_i - k_i ≥ 0 on each constraint (if needed). """ function check_di_ki!(d_i, k_i) println("\n=== check_di_ki!(d_i, k_i)") println("Enforce the condition d_i - k_i ≥ 0 on each constraint (if needed).") end """ SparsityPattern Type for storing and working on sparsitty patterns. """ type SparsityPattern end """ sparsity_pattern = compute_sparsitypattern(problem, d_i, k_i) Compute the sparsity_pattern corresponding to the given partial orders. """ function compute_sparsitypattern(problem::Problem, d_i, k_i) println("\n=== compute_sparsitypattern(problem::Problem, d_i, k_i)") println("Compute the sparsity_pattern corresponding to the given partial orders.") sparsity_pattern = SparsityPattern() return sparsity_pattern::SparsityPattern end """ compute_chordalextension!(sparsity_pattern) Compute the chordal extension on the provided sparsity_pattern. """ function compute_chordalextension!(sparsity_pattern::SparsityPattern) println("\n=== compute_chordalextension!(sparsity_pattern::SparsityPattern)") println("Compute the chordal extension on the provided sparsity_pattern.") end """ maxcliques = compute_maxcliques(sparsity_pattern) Compute a `Array{Set{Variable}}` describing the maximum cliques on the provided sparsity_pattern. """ function compute_maxcliques(sparsity_pattern::SparsityPattern) println("\n=== compute_maxcliques(sparsity_pattern::SparsityPattern)") println("Compute a `Array{Set{Variable}}` describing the maximum cliques on the provided sparsity_pattern.") maxcliques = Array{Set{Variable}}() return maxcliques::Array{Set{Variable}} end """ varsbycstr = compute_varsbycstr(problem) Compute a `Dict{String, Set{Variable}}` providing the set of variables involved in each constraint. """ function compute_varsbycstr(problem::Problem) println("\n=== compute_varsbycstr(problem::Problem)") println("Compute a `Dict{String, Set{Variable}}` providing the set of variables involved in each constraint.") varsbycstr = Dict{String, Set{Variable}}() return varsbycstr::Dict{String, Set{Variable}} end """ cliquevarsbycstr = compute_varsbycstr(sparsity_pattern, max_cliques, varsbycstr) Compute a `Dict{String, Set{Variable}}` providing the set of variables involved in the SDP localizing matrix corresponding to each constraint. """ function compute_varsbycstr(sparsity_pattern, max_cliques, varsbycstr) println("\n=== compute_varsbycstr(sparsity_pattern, max_cliques, varsbycstr)") println("Compute a `Dict{String, Set{Variable}}` providing the set of variables involved in the SDP localizing matrix corresponding to each constraint.") cliquevarsbycstr = Dict{String, Set{Variable}}() return cliquevarsbycstr::Dict{String, Set{Variable}} end """ orderbyclique = compute_cliqueorders(sparsity_pattern, d_i, varsbycstr, max_cliques) Compute a `Array{Int}` providing the relaxation order corresponding to each clique. """ function compute_cliqueorders(sparsity_pattern, d_i, varsbycstr, max_cliques) println("\n=== compute_cliqueorders(sparsity_pattern, d_i, varsbycstr, max_cliques)") println("Compute a `Array{Int}` providing the relaxation order corresponding to each clique.") orderbyclique = Dict{Int, Int}() return orderbyclique::Dict{Int, Int} end """ B_i_dict = compute_Bibycstr(problem, max_cliques, cliquevarsbycstr, orderbyclique) Compute a the decomposition of each SDP constraint on the moment basis, yielding several matrices B_i,α,β for each constraint i. """ function compute_Bibycstr(problem, max_cliques, cliquevarsbycstr, orderbyclique) println("\n=== compute_Bibycstr(problem, max_cliques, cliquevarsbycstr, orderbyclique)") println("Compute a the decomposition of each SDP constraint on the moment basis, yielding several matrices B_i,α,β for each constraint i.") return end """ SDP_SOS = build_SDP_SOS(problem, d_i, max_cliques, B_i, cliquevarsbycstr, orderbyclique) Build the primal SDP corresponding to the dual SOS hierarchy for the provided problem. """ function build_SDP_SOS(problem, d_i, max_cliques, B_i, cliquevarsbycstr, orderbyclique) println("\n=== build_SDP_SOS(problem, d_i, max_cliques, B_i, cliquevarsbycstr, orderbyclique)") println("Build the primal SDP corresponding to the dual SOS hierarchy for the provided problem.") return end """ m = make_JuMPproblem(SDP_SOS) Convert the SDP_SOS problem into a JuMP problem """ function make_JuMPproblem(SDP_SOS) println("\n=== make_JuMPproblem(SDP_SOS)") println("Convert the SDP_SOS problem into a JuMP problem") return end
struct Translation{Dim,T} <: Transform vec::Point{Dim,T} end Translation(vals::T...) where {T} = Translation(Point{length(vals),T}(vals)) Translation(vec::AbstractVector) where {Dim,T} = Translation(Point{length(vec),eltype(vec)}(vals)) (t::Translation)(p::AbstractVector) = t.vec + p Base.:∘(t1::Translation, t2::Translation) = Translation(t1.vec .+ t2.vec) (≈)(x::Translation, y::Translation) = x.vec ≈ y.vec inv(tr::Translation) = Translation(-tr.vec) const Translated{O,Dim,T} = Transformed{O,Translation{Dim,T}} Translated(obj::O, transf::Translation{Dim,T}) where {O,Dim,T} = Translated{O,Dim,T}(obj, transf)
################################################################################ # Pooling in channels ################################################################################ function max_channel_pooling_forward{T}(input::Array{T,3}, output::Array{T,3}, mask::Array{Csize_t,3}, layer) spatial_dim, channels, num = size(input) pooled_chann = size(output, 2) for n = 1:num for pc = 1:pooled_chann cstart = (pc-1)*layer.stride - layer.pad[1] + 1 cend = min(cstart + layer.kernel - 1, channels) cstart = max(1, cstart) for s = 1:spatial_dim @inbounds output[s,pc,n] = input[s,cstart,n] @inbounds mask[s,pc,n] = cstart end for c = cstart+1:cend for s = 1:spatial_dim @inbounds maxval = output[s,pc,n] @inbounds val = input[s,c,n] if val > maxval @inbounds output[s,pc,n] = val @inbounds mask[s,pc,n] = c end end end end end end function mean_channel_pooling_forward{T}(input::Array{T,3}, output::Array{T,3}, integral::Array{T}, layer) spatial_dim_T, channels, num = size(input) pooled_chann = size(output, 2) one = convert(T, 1) neg_one = convert(T, -1) scale = 1/convert(T, layer.kernel) spatial_dim = spatial_dim_T * sizeof(T) fea_dim = spatial_dim * channels output_fea_dim = spatial_dim * pooled_chann for n = 1:num input_ptr = pointer(input) + fea_dim*(n-1) output_ptr = pointer(output) + output_fea_dim*(n-1) integral_ptr = pointer(integral) # compute integral image BLAS.blascopy!(spatial_dim_T, input_ptr, 1, integral_ptr, 1) for c = 2:channels BLAS.blascopy!(spatial_dim_T, input_ptr + (c-1)*spatial_dim, 1, integral_ptr + (c-1)*spatial_dim, 1) BLAS.axpy!(spatial_dim_T, one, integral_ptr + (c-2)*spatial_dim, 1, integral_ptr + (c-1)*spatial_dim, 1) end for pc = 1:pooled_chann cstart = (pc-1)*layer.stride - layer.pad[1] + 1 cend = min(cstart + layer.kernel - 1, channels) cstart = max(1, cstart) output_ptr_pc = output_ptr + (pc-1)*spatial_dim BLAS.blascopy!(spatial_dim_T, integral_ptr + (cend-1)*spatial_dim, 1, output_ptr_pc, 1) if cstart > 1 BLAS.axpy!(spatial_dim_T, neg_one, integral_ptr + (cstart-2)*spatial_dim, 1, output_ptr_pc, 1) end BLAS.scal!(spatial_dim_T, scale, output_ptr_pc, 1) end end end function max_channel_pooling_backward{T}(input::Array{T,3}, output::Array{T,3}, mask::Array{Csize_t,3}, layer) spatial_dim, channels, num = size(input) pooled_chann = size(output, 2) fill!(input, 0) for n = 1:num for pc = 1:pooled_chann cstart = (pc-1)*layer.stride - layer.pad[1] + 1 cend = min(cstart + layer.kernel - 1, channels) cstart = max(1, cstart) for s = 1:spatial_dim @inbounds input[s,mask[s,pc,n],n] += output[s,pc,n] end end end end function mean_channel_pooling_backward{T}(input::Array{T,3}, output::Array{T,3}, layer) spatial_dim_T, channels, num = size(input) pooled_chann = size(output, 2) scale = 1/convert(T, layer.kernel) fill!(input, 0) spatial_dim = spatial_dim_T * sizeof(T) fea_dim = spatial_dim * channels output_fea_dim = spatial_dim * pooled_chann for n = 1:num input_ptr = pointer(input) + fea_dim*(n-1) output_ptr = pointer(output) + output_fea_dim*(n-1) for pc = 1:pooled_chann cstart = (pc-1)*layer.stride - layer.pad[1] + 1 cend = min(cstart + layer.kernel - 1, channels) cstart = max(1, cstart) output_ptr_pc = output_ptr + (pc-1)*spatial_dim for c = cstart:cend BLAS.axpy!(spatial_dim_T, scale, output_ptr_pc, 1, input_ptr + (c-1)*spatial_dim, 1) end end end end
# helper functions function h2o_get(endpoint::AbstractString, args...; version = 3) url = "http://$(H2Oip):$(H2Oport)/$(version)/$(endpoint)" if !isempty(args) url = string(url, "?") for i in args url = string(url, i, "&") end url = url[1:end-1] end println("h2o_get: ", url, "\n") # for debugging, remove later results = get(url) return results end function h2o_post(endpoint::AbstractString, args...; version = 3) url = "http://$(H2Oip):$(H2Oport)/$(version)/$(endpoint)" if !isempty(args) url = string(url, "?") for i in args url = string(url, i, "&") end url = url[1:end-1] end println("h2o_post: ", url, "\n") # for debugging, remove later results = post(url) return results end
export Soln, SolvData, ExData ## # Types for Examples ## type ExData a::Function b::Function c::Function f::Function chi::Function g::Function phi::Function s::Function u::Function ux::Function ut::Function sp::Function γ::Function function ExData(a::Function, b::Function, c::Function, s::Function, u::Function, ux::Function, ut::Function, sp::Function, γf::Function) aux{T<:Real}(x::T, t::T)=a(x,t)*ux(x,t) auxx{T<:Real}(x::T, t::T)=(aux(x+gPDEDiffEst,t)-aux(x,t))/gPDEDiffEst f{T<:Real}(x::T, t::T)=-(auxx(x,t)+b(x,t)*ux(x,t)+c(x,t)*u(x,t)-ut(x,t)) g{T<:Real}(t::T)=aux(zero(T), t) phi{T<:Real}(x::T)=u(x, zero(T)) chi{T<:Real}(x::T, t::T)=a(x, t)*ux(x, t)+γf(x, t)*sp(t) return new(a,b,c,f,chi,g,phi,s,u,ux,ut,sp,γf) end end ## # Types for PDE Solver Routine ## @lintpragma("Info me PDEtypes: Consider removing dx information from SolnA.") type SolnA{T<:Real, T1<:Real} xgrid::Vector{T} tgrid::Vector{T} dx::Vector{T} # TODO: Consider removing dx information tau::T xbdy::Vector{T} soln::Matrix{T1} end SolnA(nSlice::Int, s::Function)=SolnA(linspace(0,1), linspace(0,1), linspace(0,1), 0.1, linspace(0,1), [one(Float) for x in linspace(0,1), t in linspace(0,1)]) typealias Soln SolnA type SolvData verbose::Bool a::Function b::Function c::Function f::Function rhs::Function g::Function phi::Function s::Function grid::SolnA SolvData(verbose::Bool, a::Function, b::Function, c::Function, f::Function, rhs::Function, g::Function, phi::Function, s::Function, nSlice::Int) = new(verbose, a, b, c, f, rhs, g, phi, s, SolnA(nSlice, s)) end SolvData(a::Function, b::Function, c::Function, f::Function, rhs::Function, g::Function, phi::Function, s::Function, nSlice::Int) = SolvData(gSetVerbose, a, b, c, f, rhs, g, phi, s, nSlice) function SolvData(exin::ExData, nSlice::Int) rhs{T<:Real}(t::T) = exin.chi(exin.s(t), t) - exin.γ(exin.s(t), t) * exin.sp(t) return SolvData(gSetVerbose, exin.a, exin.b, exin.c, exin.f, rhs, exin.g, exin.phi, exin.s, nSlice) end # include("PDESolvDataHelpers.jl")
using ClassicalOrthogonalPolynomials, Test import ClassicalOrthogonalPolynomials: OrthogonalPolynomialRatio, recurrencecoefficients @testset "OrthogonalPolynomialRatio" begin P = Legendre() R = OrthogonalPolynomialRatio(P,0.1) @test P[0.1,1:10] ./ P[0.1,2:11] ≈ R[1:10] R = OrthogonalPolynomialRatio(P,-1) @test R[1:10] ≈ fill(-1,10) end
# This file is a part of Julia. License is MIT: https://julialang.org/license # Operations with the file system (paths) ## export cd, chmod, chown, cp, cptree, mkdir, mkpath, mktemp, mktempdir, mv, pwd, rename, readlink, readdir, rm, samefile, sendfile, symlink, tempdir, tempname, touch, unlink, walkdir import .Base.RefValue # get and set current directory """ pwd() -> AbstractString Get the current working directory. # Examples ```julia-repl julia> pwd() "/home/JuliaUser" julia> cd("/home/JuliaUser/Projects/julia") julia> pwd() "/home/JuliaUser/Projects/julia" ``` """ function pwd() b = Vector{UInt8}(undef, 1024) len = RefValue{Csize_t}(length(b)) uv_error(:getcwd, ccall(:uv_cwd, Cint, (Ptr{UInt8}, Ptr{Csize_t}), b, len)) String(b[1:len[]]) end """ cd(dir::AbstractString=homedir()) Set the current working directory. # Examples ```julia-repl julia> cd("/home/JuliaUser/Projects/julia") julia> pwd() "/home/JuliaUser/Projects/julia" julia> cd() julia> pwd() "/home/JuliaUser" ``` """ function cd(dir::AbstractString) uv_error("chdir $dir", ccall(:uv_chdir, Cint, (Cstring,), dir)) end cd() = cd(homedir()) if Sys.iswindows() function cd(f::Function, dir::AbstractString) old = pwd() try cd(dir) f() finally cd(old) end end else function cd(f::Function, dir::AbstractString) fd = ccall(:open, Int32, (Cstring, Int32), :., 0) systemerror(:open, fd == -1) try cd(dir) f() finally systemerror(:fchdir, ccall(:fchdir, Int32, (Int32,), fd) != 0) systemerror(:close, ccall(:close, Int32, (Int32,), fd) != 0) end end end """ cd(f::Function, dir::AbstractString=homedir()) Temporarily change the current working directory to `dir`, apply function `f` and finally return to the original directory. # Examples ```julia-repl julia> pwd() "/home/JuliaUser" julia> cd(readdir, "/home/JuliaUser/Projects/julia") 34-element Array{String,1}: ".circleci" ".freebsdci.sh" ".git" ".gitattributes" ".github" ⋮ "test" "ui" "usr" "usr-staging" julia> pwd() "/home/JuliaUser" ``` """ cd(f::Function) = cd(f, homedir()) function checkmode(mode::Integer) if !(0 <= mode <= 511) throw(ArgumentError("Mode must be between 0 and 511 = 0o777")) end mode end """ mkdir(path::AbstractString; mode::Unsigned = 0o777) Make a new directory with name `path` and permissions `mode`. `mode` defaults to `0o777`, modified by the current file creation mask. This function never creates more than one directory. If the directory already exists, or some intermediate directories do not exist, this function throws an error. See [`mkpath`](@ref) for a function which creates all required intermediate directories. Return `path`. # Examples ```julia-repl julia> mkdir("testingdir") "testingdir" julia> cd("testingdir") julia> pwd() "/home/JuliaUser/testingdir" ``` """ function mkdir(path::AbstractString; mode::Integer = 0o777) @static if Sys.iswindows() ret = ccall(:_wmkdir, Int32, (Cwstring,), path) else ret = ccall(:mkdir, Int32, (Cstring, UInt32), path, checkmode(mode)) end systemerror(:mkdir, ret != 0; extrainfo=path) path end """ mkpath(path::AbstractString; mode::Unsigned = 0o777) Create all directories in the given `path`, with permissions `mode`. `mode` defaults to `0o777`, modified by the current file creation mask. Return `path`. # Examples ```julia-repl julia> mkdir("testingdir") "testingdir" julia> cd("testingdir") julia> pwd() "/home/JuliaUser/testingdir" julia> mkpath("my/test/dir") "my/test/dir" julia> readdir() 1-element Array{String,1}: "my" julia> cd("my") julia> readdir() 1-element Array{String,1}: "test" julia> readdir("test") 1-element Array{String,1}: "dir" ``` """ function mkpath(path::AbstractString; mode::Integer = 0o777) isdirpath(path) && (path = dirname(path)) dir = dirname(path) (path == dir || isdir(path)) && return path mkpath(dir, mode = checkmode(mode)) try mkdir(path, mode = mode) catch err # If there is a problem with making the directory, but the directory # does in fact exist, then ignore the error. Else re-throw it. if !isa(err, SystemError) || !isdir(path) rethrow() end end path end """ rm(path::AbstractString; force::Bool=false, recursive::Bool=false) Delete the file, link, or empty directory at the given path. If `force=true` is passed, a non-existing path is not treated as error. If `recursive=true` is passed and the path is a directory, then all contents are removed recursively. # Examples ```jldoctest julia> mkpath("my/test/dir"); julia> rm("my", recursive=true) julia> rm("this_file_does_not_exist", force=true) julia> rm("this_file_does_not_exist") ERROR: IOError: unlink: no such file or directory (ENOENT) Stacktrace: [...] ``` """ function rm(path::AbstractString; force::Bool=false, recursive::Bool=false) if islink(path) || !isdir(path) try @static if Sys.iswindows() # is writable on windows actually means "is deletable" if (filemode(path) & 0o222) == 0 chmod(path, 0o777) end end unlink(path) catch err if force && isa(err, IOError) && err.code==Base.UV_ENOENT return end rethrow() end else if recursive for p in readdir(path) rm(joinpath(path, p), force=force, recursive=true) end end @static if Sys.iswindows() ret = ccall(:_wrmdir, Int32, (Cwstring,), path) else ret = ccall(:rmdir, Int32, (Cstring,), path) end systemerror(:rmdir, ret != 0, extrainfo=path) end end # The following use Unix command line facilities function checkfor_mv_cp_cptree(src::AbstractString, dst::AbstractString, txt::AbstractString; force::Bool=false) if ispath(dst) if force # Check for issue when: (src == dst) or when one is a link to the other # https://github.com/JuliaLang/julia/pull/11172#issuecomment-100391076 if Base.samefile(src, dst) abs_src = islink(src) ? abspath(readlink(src)) : abspath(src) abs_dst = islink(dst) ? abspath(readlink(dst)) : abspath(dst) throw(ArgumentError(string("'src' and 'dst' refer to the same file/dir.", "This is not supported.\n ", "`src` refers to: $(abs_src)\n ", "`dst` refers to: $(abs_dst)\n"))) end rm(dst; recursive=true) else throw(ArgumentError(string("'$dst' exists. `force=true` ", "is required to remove '$dst' before $(txt)."))) end end end function cptree(src::AbstractString, dst::AbstractString; force::Bool=false, follow_symlinks::Bool=false) isdir(src) || throw(ArgumentError("'$src' is not a directory. Use `cp(src, dst)`")) checkfor_mv_cp_cptree(src, dst, "copying"; force=force) mkdir(dst) for name in readdir(src) srcname = joinpath(src, name) if !follow_symlinks && islink(srcname) symlink(readlink(srcname), joinpath(dst, name)) elseif isdir(srcname) cptree(srcname, joinpath(dst, name); force=force, follow_symlinks=follow_symlinks) else sendfile(srcname, joinpath(dst, name)) end end end """ cp(src::AbstractString, dst::AbstractString; force::Bool=false, follow_symlinks::Bool=false) Copy the file, link, or directory from `src` to `dst`. `force=true` will first remove an existing `dst`. If `follow_symlinks=false`, and `src` is a symbolic link, `dst` will be created as a symbolic link. If `follow_symlinks=true` and `src` is a symbolic link, `dst` will be a copy of the file or directory `src` refers to. Return `dst`. """ function cp(src::AbstractString, dst::AbstractString; force::Bool=false, follow_symlinks::Bool=false) checkfor_mv_cp_cptree(src, dst, "copying"; force=force) if !follow_symlinks && islink(src) symlink(readlink(src), dst) elseif isdir(src) cptree(src, dst; force=force, follow_symlinks=follow_symlinks) else sendfile(src, dst) end dst end """ mv(src::AbstractString, dst::AbstractString; force::Bool=false) Move the file, link, or directory from `src` to `dst`. `force=true` will first remove an existing `dst`. Return `dst`. # Examples ```jldoctest; filter = r"Stacktrace:(\\n \\[[0-9]+\\].*)*" julia> write("hello.txt", "world"); julia> mv("hello.txt", "goodbye.txt") "goodbye.txt" julia> "hello.txt" in readdir() false julia> readline("goodbye.txt") "world" julia> write("hello.txt", "world2"); julia> mv("hello.txt", "goodbye.txt") ERROR: ArgumentError: 'goodbye.txt' exists. `force=true` is required to remove 'goodbye.txt' before moving. Stacktrace: [1] #checkfor_mv_cp_cptree#10(::Bool, ::Function, ::String, ::String, ::String) at ./file.jl:293 [...] julia> mv("hello.txt", "goodbye.txt", force=true) "goodbye.txt" julia> rm("goodbye.txt"); ``` """ function mv(src::AbstractString, dst::AbstractString; force::Bool=false) checkfor_mv_cp_cptree(src, dst, "moving"; force=force) rename(src, dst) dst end """ touch(path::AbstractString) Update the last-modified timestamp on a file to the current time. Return `path`. # Examples ```julia-repl julia> write("my_little_file", 2); julia> mtime("my_little_file") 1.5273815391135583e9 julia> touch("my_little_file"); julia> mtime("my_little_file") 1.527381559163435e9 ``` We can see the [`mtime`](@ref) has been modified by `touch`. """ function touch(path::AbstractString) f = open(path, JL_O_WRONLY | JL_O_CREAT, 0o0666) try if Sys.isunix() ret = ccall(:futimes, Cint, (Cint, Ptr{Cvoid}), fd(f), C_NULL) systemerror(:futimes, ret != 0, extrainfo=path) else t = time() futime(f,t,t) end finally close(f) end path end if Sys.iswindows() function tempdir() temppath = Vector{UInt16}(undef, 32767) lentemppath = ccall(:GetTempPathW,stdcall,UInt32,(UInt32,Ptr{UInt16}),length(temppath),temppath) if lentemppath >= length(temppath) || lentemppath == 0 error("GetTempPath failed: $(Libc.FormatMessage())") end resize!(temppath,lentemppath) return transcode(String, temppath) end const temp_prefix = cwstring("jl_") function _win_tempname(temppath::AbstractString, uunique::UInt32) tempp = cwstring(temppath) tname = Vector{UInt16}(undef, 32767) uunique = ccall(:GetTempFileNameW,stdcall,UInt32,(Ptr{UInt16},Ptr{UInt16},UInt32,Ptr{UInt16}), tempp,temp_prefix,uunique,tname) lentname = something(findfirst(iszero,tname), 0)-1 if uunique == 0 || lentname <= 0 error("GetTempFileName failed: $(Libc.FormatMessage())") end resize!(tname,lentname) return transcode(String, tname) end function mktemp(parent=tempdir()) filename = _win_tempname(parent, UInt32(0)) return (filename, Base.open(filename, "r+")) end function mktempdir(parent=tempdir()) seed::UInt32 = Libc.rand(UInt32) while true if (seed & typemax(UInt16)) == 0 seed += 1 end filename = _win_tempname(parent, seed) ret = ccall(:_wmkdir, Int32, (Ptr{UInt16},), cwstring(filename)) if ret == 0 return filename end systemerror(:mktempdir, Libc.errno()!=Libc.EEXIST) seed += 1 end end function tempname() parent = tempdir() seed::UInt32 = rand(UInt32) while true if (seed & typemax(UInt16)) == 0 seed += 1 end filename = _win_tempname(parent, seed) if !ispath(filename) return filename end seed += 1 end end else # !windows # Obtain a temporary filename. function tempname() d = get(ENV, "TMPDIR", C_NULL) # tempnam ignores TMPDIR on darwin p = ccall(:tempnam, Cstring, (Cstring,Cstring), d, :julia) systemerror(:tempnam, p == C_NULL) s = unsafe_string(p) Libc.free(p) return s end # Obtain a temporary directory's path. tempdir() = dirname(tempname()) # Create and return the name of a temporary file along with an IOStream function mktemp(parent=tempdir()) b = joinpath(parent, "tmpXXXXXX") p = ccall(:mkstemp, Int32, (Cstring,), b) # modifies b systemerror(:mktemp, p == -1) return (b, fdio(p, true)) end # Create and return the name of a temporary directory function mktempdir(parent=tempdir()) b = joinpath(parent, "tmpXXXXXX") p = ccall(:mkdtemp, Cstring, (Cstring,), b) systemerror(:mktempdir, p == C_NULL) return unsafe_string(p) end end # os-test """ tempdir() Obtain the path of a temporary directory (possibly shared with other processes). """ tempdir() """ tempname() Generate a temporary file path. This function only returns a path; no file is created. The path is likely to be unique, but this cannot be guaranteed. !!! warning This can lead to race conditions if another process obtains the same file name and creates the file before you are able to. Using [`mktemp()`](@ref) is recommended instead. """ tempname() """ mktemp(parent=tempdir()) Return `(path, io)`, where `path` is the path of a new temporary file in `parent` and `io` is an open file object for this path. """ mktemp(parent) """ mktempdir(parent=tempdir()) Create a temporary directory in the `parent` directory and return its path. If `parent` does not exist, throw an error. """ mktempdir(parent) """ mktemp(f::Function, parent=tempdir()) Apply the function `f` to the result of [`mktemp(parent)`](@ref) and remove the temporary file upon completion. """ function mktemp(fn::Function, parent=tempdir()) (tmp_path, tmp_io) = mktemp(parent) try fn(tmp_path, tmp_io) finally # TODO: should we call GC.gc() first on error, to make it much more likely that `rm` succeeds? try close(tmp_io) rm(tmp_path) catch ex @error "mktemp cleanup" _group=:file exception=(ex, catch_backtrace()) end end end """ mktempdir(f::Function, parent=tempdir()) Apply the function `f` to the result of [`mktempdir(parent)`](@ref) and remove the temporary directory upon completion. """ function mktempdir(fn::Function, parent=tempdir()) tmpdir = mktempdir(parent) try fn(tmpdir) finally # TODO: should we call GC.gc() first on error, to make it much more likely that `rm` succeeds? try rm(tmpdir, recursive=true) catch ex @error "mktempdir cleanup" _group=:file exception=(ex, catch_backtrace()) end end end struct uv_dirent_t name::Ptr{UInt8} typ::Cint end """ readdir(dir::AbstractString=".") -> Vector{String} Return the files and directories in the directory `dir` (or the current working directory if not given). # Examples ```julia-repl julia> readdir("/home/JuliaUser/Projects/julia") 34-element Array{String,1}: ".circleci" ".freebsdci.sh" ".git" ".gitattributes" ".github" ⋮ "test" "ui" "usr" "usr-staging" ``` """ function readdir(path::AbstractString) # Allocate space for uv_fs_t struct uv_readdir_req = zeros(UInt8, ccall(:jl_sizeof_uv_fs_t, Int32, ())) # defined in sys.c, to call uv_fs_readdir, which sets errno on error. err = ccall(:uv_fs_scandir, Int32, (Ptr{Cvoid}, Ptr{UInt8}, Cstring, Cint, Ptr{Cvoid}), eventloop(), uv_readdir_req, path, 0, C_NULL) err < 0 && throw(SystemError("unable to read directory $path", -err)) #uv_error("unable to read directory $path", err) # iterate the listing into entries entries = String[] ent = Ref{uv_dirent_t}() while Base.UV_EOF != ccall(:uv_fs_scandir_next, Cint, (Ptr{Cvoid}, Ptr{uv_dirent_t}), uv_readdir_req, ent) push!(entries, unsafe_string(ent[].name)) end # Clean up the request string ccall(:jl_uv_fs_req_cleanup, Cvoid, (Ptr{UInt8},), uv_readdir_req) return entries end readdir() = readdir(".") """ walkdir(dir; topdown=true, follow_symlinks=false, onerror=throw) Return an iterator that walks the directory tree of a directory. The iterator returns a tuple containing `(rootpath, dirs, files)`. The directory tree can be traversed top-down or bottom-up. If `walkdir` encounters a [`SystemError`](@ref) it will rethrow the error by default. A custom error handling function can be provided through `onerror` keyword argument. `onerror` is called with a `SystemError` as argument. # Examples ```julia for (root, dirs, files) in walkdir(".") println("Directories in \$root") for dir in dirs println(joinpath(root, dir)) # path to directories end println("Files in \$root") for file in files println(joinpath(root, file)) # path to files end end ``` ```julia-repl julia> mkpath("my/test/dir"); julia> itr = walkdir("my"); julia> (root, dirs, files) = first(itr) ("my", ["test"], String[]) julia> (root, dirs, files) = first(itr) ("my/test", ["dir"], String[]) julia> (root, dirs, files) = first(itr) ("my/test/dir", String[], String[]) ``` """ function walkdir(root; topdown=true, follow_symlinks=false, onerror=throw) content = nothing try content = readdir(root) catch err isa(err, SystemError) || throw(err) onerror(err) # Need to return an empty closed channel to skip the current root folder chnl = Channel(0) close(chnl) return chnl end dirs = Vector{eltype(content)}() files = Vector{eltype(content)}() for name in content if isdir(joinpath(root, name)) push!(dirs, name) else push!(files, name) end end function _it(chnl) if topdown put!(chnl, (root, dirs, files)) end for dir in dirs path = joinpath(root,dir) if follow_symlinks || !islink(path) for (root_l, dirs_l, files_l) in walkdir(path, topdown=topdown, follow_symlinks=follow_symlinks, onerror=onerror) put!(chnl, (root_l, dirs_l, files_l)) end end end if !topdown put!(chnl, (root, dirs, files)) end end return Channel(_it) end function unlink(p::AbstractString) err = ccall(:jl_fs_unlink, Int32, (Cstring,), p) uv_error("unlink", err) nothing end # For move command function rename(src::AbstractString, dst::AbstractString) err = ccall(:jl_fs_rename, Int32, (Cstring, Cstring), src, dst) # on error, default to cp && rm if err < 0 # force: is already done in the mv function cp(src, dst; force=false, follow_symlinks=false) rm(src; recursive=true) end nothing end function sendfile(src::AbstractString, dst::AbstractString) src_open = false dst_open = false local src_file, dst_file try src_file = open(src, JL_O_RDONLY) src_open = true dst_file = open(dst, JL_O_CREAT | JL_O_TRUNC | JL_O_WRONLY, filemode(src_file)) dst_open = true bytes = filesize(stat(src_file)) sendfile(dst_file, src_file, Int64(0), Int(bytes)) finally if src_open && isopen(src_file) close(src_file) end if dst_open && isopen(dst_file) close(dst_file) end end end if Sys.iswindows() const UV_FS_SYMLINK_JUNCTION = 0x0002 end """ symlink(target::AbstractString, link::AbstractString) Creates a symbolic link to `target` with the name `link`. !!! note This function raises an error under operating systems that do not support soft symbolic links, such as Windows XP. """ function symlink(p::AbstractString, np::AbstractString) @static if Sys.iswindows() if Sys.windows_version() < Sys.WINDOWS_VISTA_VER error("Windows XP does not support soft symlinks") end end flags = 0 @static if Sys.iswindows() if isdir(p) flags |= UV_FS_SYMLINK_JUNCTION p = abspath(p) end end err = ccall(:jl_fs_symlink, Int32, (Cstring, Cstring, Cint), p, np, flags) @static if Sys.iswindows() if err < 0 && !isdir(p) @warn "On Windows, creating file symlinks requires Administrator privileges" maxlog=1 _group=:file end end uv_error("symlink",err) end """ readlink(path::AbstractString) -> AbstractString Return the target location a symbolic link `path` points to. """ function readlink(path::AbstractString) req = Libc.malloc(_sizeof_uv_fs) try ret = ccall(:uv_fs_readlink, Int32, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), eventloop(), req, path, C_NULL) if ret < 0 ccall(:uv_fs_req_cleanup, Cvoid, (Ptr{Cvoid},), req) uv_error("readlink", ret) @assert false end tgt = unsafe_string(ccall(:jl_uv_fs_t_ptr, Ptr{Cchar}, (Ptr{Cvoid},), req)) ccall(:uv_fs_req_cleanup, Cvoid, (Ptr{Cvoid},), req) return tgt finally Libc.free(req) end end """ chmod(path::AbstractString, mode::Integer; recursive::Bool=false) Change the permissions mode of `path` to `mode`. Only integer `mode`s (e.g. `0o777`) are currently supported. If `recursive=true` and the path is a directory all permissions in that directory will be recursively changed. Return `path`. """ function chmod(path::AbstractString, mode::Integer; recursive::Bool=false) err = ccall(:jl_fs_chmod, Int32, (Cstring, Cint), path, mode) uv_error("chmod", err) if recursive && isdir(path) for p in readdir(path) if !islink(joinpath(path, p)) chmod(joinpath(path, p), mode, recursive=true) end end end path end """ chown(path::AbstractString, owner::Integer, group::Integer=-1) Change the owner and/or group of `path` to `owner` and/or `group`. If the value entered for `owner` or `group` is `-1` the corresponding ID will not change. Only integer `owner`s and `group`s are currently supported. Return `path`. """ function chown(path::AbstractString, owner::Integer, group::Integer=-1) err = ccall(:jl_fs_chown, Int32, (Cstring, Cint, Cint), path, owner, group) uv_error("chown",err) path end
function test_reshape_layer(backend::Backend, n_input, T, eps) println("-- Testing ReshapeLayer on $(typeof(backend)){$T}...") # make sure the prod of the first 3 dimensions are the same dims = [(3*2^isodd(i), 2*2^iseven(i), 4, abs(rand(Int))%5+3) for i = 1:n_input] input = [rand(T, dims[i]...) for i = 1:n_input] input_blob = Blob[make_blob(backend, x) for x in input] diff_blob = Blob[make_blob(backend, x) for x in input] println(" > Setup") layer = ReshapeLayer(bottoms=Array{Symbol}(n_input), tops=Array{Symbol}(n_input), shape=(1,1,prod(dims[1][1:end-1]))) state = setup(backend, layer, input_blob, diff_blob) println(" > Forward") forward(backend, state, input_blob) for i = 1:n_input got_output = zeros(T, size(input[i])) copy!(got_output, input_blob[i]) @test all(abs.(got_output - input[i]) .< eps) end println(" > Backward") top_diff = [rand(T, size(input[i])) for i = 1:n_input] for i = 1:n_input copy!(diff_blob[i], top_diff[i]) end backward(backend, state, input_blob, diff_blob) for i = 1:n_input got_grad = zeros(T, size(input[i])) copy!(got_grad, diff_blob[i]) @test all(abs.(got_grad - top_diff[i]) .< eps) end shutdown(backend, state) end function test_reshape_layer(backend::Backend) test_reshape_layer(backend, 3, Float64, 1e-10) test_reshape_layer(backend, 3, Float32, 1e-4) end if test_cpu test_reshape_layer(backend_cpu) end if test_gpu test_reshape_layer(backend_gpu) end
import Base.show "LMCLUS algorithm parameters" mutable struct Parameters "Minimum dimension of the cluster" min_dim::Int "Maximum dimension of the cluster" max_dim::Int "Nominal number of resulting clusters" number_of_clusters::Int "Terminate algorithm upon founding specified number of clusters" stop_after_cluster::Int "Force to search clusters in high subspaces" force_max_dim::Bool "Fixed number of bins for the distance histogram" hist_bin_size::Int "Minimum number of bins for the distance histogram" min_bin_num::Int "Minimum cluster size (or noise size) in order to prevent generation of small clusters" min_cluster_size::Int "Separation best bound value is used for evaluating a goodness of separation characterized by a discriminability and a depth between modes of a distance histogram." best_bound::Float64 "Sampling error bound determines a minimal number of samples required to correctly identify a linear manifold cluster." error_bound::Float64 "Maximum histogram bin size" max_bin_portion::Float64 "RNG seed" random_seed::Int64 "Sampling heuristic (1-3)" sampling_heuristic::Int "Sampling factor used in one of sampling heuristics" sampling_factor::Float64 "Enables a sampling for a distance histogram" histogram_sampling::Bool "Enables an alignment of a manifold cluster basis" basis_alignment::Bool "Enables a linear manifold cluster dimensionality detection" dim_adjustment::Bool "Ratio of manifold principal subspace variance" dim_adjustment_ratio::Float64 "Enables the minimum description length heuristic for a complexity validation of a generated cluster" mdl::Bool "MDL model precision encoding constant" mdl_model_precision::Int "MDL data precision encoding constant" mdl_data_precision::Int "Quantization error of a bin size calculation" mdl_quant_error::Float64 "Compression threshold value for discarding candidate clusters" mdl_compres_ratio::Float64 "MDL algorithm" mdl_algo::DataType "Enable creation of bounded linear manifold clusters" bounded_cluster::Bool "Separation threshold algorithm" sep_algo::Type{<:Thresholding} Parameters(max_dim) = new( 1, # min_dim max_dim, # max_dim 10, # number_of_clusters 1000, # stop_after_cluster false, # force_max_dim 0, # hist_bin_size 7, # min_bin_num 20, # min_cluster_size 1.0, # best_bound 0.0001, # error_bound 0.1, # max_bin_portion 0, # random_seed 3, # sampling_heuristic 0.01, # sampling_factor false, # histogram_sampling false, # basis_alignment false, # dim_adjustment 0.99, # dim_adjustment_ratio false, # mdl 32, # mdl_model_precision 16, # mdl_data_precision 0.001, # mdl_quant_error 1.05, # mdl_compres_ratio MDL.OptimalQuant, # mdl_algo false, # bounded_cluster Kittler # sep_algo ) end show(io::IO, p::Parameters) = print(io, """Linear Manifold Clustering parameters: Min dimension (min_dim): $(p.min_dim) Max dimension (max_dim): $(p.max_dim) Nominal number of clusters (number_of_clusters): $(p.number_of_clusters) Stop searching after number for clusters found (stop_after_cluster): $(p.stop_after_cluster) Force algorithm to search in higher dimensions (force_max_dim): $(p.force_max_dim) Minimum cluster size (min_cluster_size): $(p.min_cluster_size) Best bound (best_bound): $(p.best_bound) Error bound (error_bound): $(p.error_bound) Sample points for distance histogram (histogram_sampling): $(p.histogram_sampling) Histogram bins (hist_bin_size): $(p.hist_bin_size) Minimum number of histogram bins (min_bin_num): $(p.min_bin_num) Maximum histogram bin size (max_bin_portion): $(p.max_bin_portion) Sampling heuristic (sampling_heuristic): $(p.sampling_heuristic) Sampling factor (sampling_factor): $(p.sampling_factor) Random seed (random_seed): $(p.random_seed) (0 - random seed) Manifold cluster basis alignment (basis_alignment): $(p.basis_alignment) Manifold dimensionality adjustment (dim_adjustment): $(p.dim_adjustment) Ratio of manifold principal subspace variance (dim_adjustment_ratio): $(p.dim_adjustment_ratio) Use MDL heuristic (mdl): $(p.mdl) MDL model precision encoding (mdl_model_precision): $(p.mdl_model_precision) MDL data precision encoding (mdl_data_precision): $(p.mdl_data_precision) MDL quantizing error (mdl_quant_error): $(p.mdl_quant_error) MDL compression ratio threshold (mdl_compres_ratio): $(p.mdl_compres_ratio) MDL algorithm (mdl_algo): $(p.mdl_algo) Creation of bounded linear manifold clusters (bounded_cluster): $(p.bounded_cluster) Separation algorithm (sep_algo): $(p.sep_algo)""")
struct GroupedDataFrame <: AbstractFuture data::Future length::Future parent::DataFrame groupcols::Future groupkwargs::Future # GroupedDataFrame() = new(Future(), Future(), Future()) # GroupedDataFrame(gdf::GroupedDataFrame) = # new(Future(), Future(gdf.nrows), Future(gdf.offset)) end Banyan.convert(::Type{Future}, gdf::GroupedDataFrame) = gdf.data Banyan.isview(gdf::GroupedDataFrame) = true Banyan.sample_memory_usage(gdf::DataFrames.GroupedDataFrame) = total_memory_usage(gdf) - total_memory_usage(parent(gdf)) Base.length(gdf::GroupedDataFrame) = collect(gdf.length) Base.size(gdf::GroupedDataFrame) = Tuple(length(gdf)) Base.ndims(gdf::GroupedDataFrame) = 1 DataFrames.groupcols(gdf::GroupedDataFrame) = groupcols(sample(gdf)) DataFrames.valuecols(gdf::GroupedDataFrame) = valuecols(sample(gdf)) # NOTE: For now we don't allow grouped dataframes to be copied since we are # only supporting simple use-cases where you want to aggregate or transform # or filter your grouped dataframe. # NOTE: We don't need to implement any of the sample computation functions # (that we implement in `df.jl`) because a `GroupedDataFrame` will never be # assigned a `Grouped` PT. The sample computation functions are only used by # the `Grouped` PT constructor. And we never want to assign the `Grouped` PT # constructor to `GroupedDataFrame`s. `Blocked` will be sufficient. # GroupedDataFrame creation function DataFrames.groupby(df::DataFrame, cols; kwargs...)::GroupedDataFrame get(kwargs, :sort, true) || error("Groups cannot currently be ordered by how they originally appeared") gdf_data = Future() gdf_length = Future() cols = Future(cols) kwargs = Future(kwargs) gdf = GroupedDataFrame(Future(), gdf_length, df, cols, kwargs) # partition(df, Replicated()) # partition(gdf, Replicated()) # partition(gdf_length, Replicated()) groupingkeys = Symbol.(names(sample(df), collect(cols))) partitioned_using_modules("DataFrames") partitioned_using() do keep_sample_rate(gdf, df) end partitioned_with() do pt(df, Grouped(df, by=groupingkeys, scaled_by_same_as=gdf)) # TODO: Avoid circular dependency # TODO: Specify key for Blocked # TODO: Ensure that bangs in splitting functions in PF library are used # appropriately pt(gdf, Blocked(along=1) & ScaledBySame(as=df)) pt(gdf_length, Reducing(quote + end)) # TODO: See if we can `using Banyan` on the cluster and avoid this pt(df, gdf, gdf_length, cols, kwargs, Replicated()) end @partitioned df gdf gdf_length cols kwargs begin gdf = DataFrames.groupby(df, cols; kwargs...) gdf_length = DataFrames.length(gdf) end # allowedgroupingkeys = names(sample(df), compute(cols)) # allowedgroupingkeys = get(kwargs, :sort, false) ? allowedgroupingkeys[1:1] : allowedgroupingkeys # union!(sample(df, :allowedgroupingkeys), allowedgroupingkeys) # setsample(gdf, :allowedgroupingkeys, allowedgroupingkeys) # for key in allowedgroupingkeys # for balanced in [true, false] # partition(df, Grouped(;key=key, balanced=balanced)) # end # # Grouped computes keystatistics for key for df # setsample(gdf, :keystatistics, key, sample(df, :keystatistics, key)) # end # pt(gdf, Blocked(;dim=1), match=df, on=["balanced", "id"]) # ptartition(gdf_length, Reducing(;reducer=+)) # papt(df, gdf, gdf_length, cols, kwargs, Replicated()) # # TODO: Ensure splitting/merging functions work for Blocked on GroupedDataFrame # mutated(gdf) # mutated(gdf_length) # @partitioned df gdf gdf_length cols kwargs begin # gdf = groupby(df, cols; kwargs...) # gdf_length = length(gdf) # end # gdf # # TODO: approximate -> sample and evaluate -> compute # # w.r.t. keys and axes, there are several things you need to know: # # - reuse of columns # # Create Future for result # # gdf = GroupedDataFrame() # # gdf_len = gdf.whole_len # # df_len = df.whole_len # # for (gpt, max_ngroups) in Grouped(gdf, ) # # partition(gdf, Distributed(), parent=df) # # @partitioned df gdf begin end # # when merging a GroupedDataFrame which must be pseudogrouped, # # vcat the parents and the groupindices and modify the cat-ed parents # # to have a column for the parent index and the gorup iindex within that parent # # and then do a group-by on this # # for writing to disk, just be sure to put everything into a dataframe such that it # # can be read back and have a column that specifies how to group by gdf end # GroupedDataFrame column manipulation function DataFrames.select(gdf::GroupedDataFrame, args...; kwargs...) get(kwargs, :ungroup, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must produce dataframes")) get(kwargs, :copycols, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes cannot return a view")) get(kwargs, :keepkeys, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must keep the grouping columns")) gdf_parent = gdf.parent groupcols = gdf.groupcols groupkwargs = gdf.groupkwargs res = Future() args = Future(args) kwargs = Future(kwargs) groupingkeys = Symbol.(names(sample(gdf_parent), collect(groupcols))) partitioned_using_modules("DataFrames") partitioned_using() do keep_sample_keys(if get(collect(kwargs), :keepkeys, true) groupingkeys else [] end, res, gdf_parent, drifted=true) keep_sample_rate(res, gdf_parent) end partitioned_with() do pt(gdf_parent, Grouped(gdf_parent, by=groupingkeys, scaled_by_same_as=res), match=res) pt(gdf, Blocked(along=1) & ScaledBySame(as=res)) pt(res, ScaledBySame(as=gdf_parent)) pt(gdf_parent, gdf, res, groupcols, groupkwargs, args, kwargs, Replicated()) end # partition(gdf, Replicated()) # partition(gdf_parent, Replicated()) # partition(res, Replicated()) # # TODO: Share sampled names if performance is impacted by repeatedly getting names # # allowedgroupingkeys = names(sample(gdf_parent), compute(groupcols)) # # allowedgroupingkeys = get(collect(groupkwargs), :sort, false) ? allowedgroupingkeys[1:1] : allowedgroupingkeys # # union!(sample(gdf_parent, :allowedgroupingkeys), allowedgroupingkeys) # if get(collect(kwargs), :keepkeys, true) # union!(sample(res, :allowedgroupingkeys), sample(gdf, :allowedgroupingkeys)) # end # for key in sample(gdf_parent, :allowedgroupingkeys) # setsample(res, :keystatistics, key, sample(gdf_parent, :keystatistics, key)) # for balanced in [true, false] # partition(gdf_parent, Grouped(;key=key, balanced=balanced)) # if get(collect(kwargs), :keepkeys, true) # partition(res, Partitioned(), match=gdf_parent) # else # partition(res, Blocked(dim=1), match=gdf_parent, on=["balanced", "id"]) # end # end # end # partition(gdf, Blocked(;dim=1), match=gdf_parent, on=["balanced", "id"]) # partition(groupcols, Replicated()) # partition(groupkwargs, Replicated()) # partition(args, Replicated()) # partition(kwargs, Replicated()) # # if kwargs[:ungroup] # # else # # res = GroupedDataFrame(gdf) # # res_nrows = res.nrows # # partition(gdf, Pseudogrouped()) # # partition(args, Replicated()) # # partition(kwargs, Replicated()) # # @partitioned gdf res res_nrows args kwargs begin # # res = select(gdf, args..., kwargs...) # # res_nrows = length(gdf_nrows) # # end # # end # mutated(res) @partitioned gdf gdf_parent groupcols groupkwargs args kwargs res begin if !(gdf isa DataFrames.GroupedDataFrame) || gdf.parent != gdf_parent gdf = DataFrames.groupby(gdf_parent, groupcols; groupkwargs...) end res = DataFrames.select(gdf, args...; kwargs...) end DataFrame(res, copy(gdf_parent.nrows)) end function DataFrames.transform(gdf::GroupedDataFrame, args...; kwargs...) get(kwargs, :ungroup, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must produce dataframes")) get(kwargs, :copycols, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes cannot return a view")) get(kwargs, :keepkeys, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must keep the grouping columns")) gdf_parent = gdf.parent groupcols = gdf.groupcols groupkwargs = gdf.groupkwargs res = Future() args = Future(args) kwargs = Future(kwargs) # TODO: Put groupingkeys in GroupedDataFrame groupingkeys = Symbol.(names(sample(gdf_parent), collect(groupcols))) partitioned_using_modules("DataFrames") partitioned_using() do keep_sample_keys( get(collect(kwargs), :keepkeys, true) ? groupingkeys : [], res, gdf_parent, drifted=true ) keep_sample_rate(res, gdf_parent) end # TODO: Maybe automatically infer sample properties (set with # `partitioned_using`) by looking at the actual annotations in # `partitioned_with` partitioned_with() do pt(gdf_parent, Grouped(gdf_parent, by=groupingkeys, scaled_by_same_as=res), match=res) pt(gdf, Blocked(along=1) & ScaledBySame(as=res)) pt(res, ScaledBySame(as=gdf_parent)) pt(gdf_parent, gdf, res, groupcols, groupkwargs, args, kwargs, Replicated()) end @partitioned gdf gdf_parent groupcols groupkwargs args kwargs res begin if !(gdf isa DataFrames.GroupedDataFrame) || gdf.parent != gdf_parent gdf = DataFrames.groupby(gdf_parent, groupcols; groupkwargs...) end res = DataFrames.transform(gdf, args...; kwargs...) end DataFrame(res, copy(gdf_parent.nrows)) end function DataFrames.combine(gdf::GroupedDataFrame, args...; kwargs...) get(kwargs, :ungroup, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must produce dataframes")) get(kwargs, :copycols, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes cannot return a view")) get(kwargs, :keepkeys, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must keep the grouping columns")) gdf_parent = gdf.parent groupcols = gdf.groupcols groupkwargs = gdf.groupkwargs res_nrows = Future() res = DataFrame(Future(), res_nrows) args = Future(args) kwargs = Future(kwargs) # TODO: Put groupingkeys in GroupedDataFrame groupingkeys = Symbol.(names(sample(gdf_parent), collect(groupcols))) partitioned_using_modules("DataFrames") partitioned_using() do keep_sample_keys( get(collect(kwargs), :keepkeys, true) ? groupingkeys : [], res, gdf_parent, drifted=true ) keep_sample_rate(res, gdf_parent) end partitioned_with() do # TODO: If we want to support `keepkeys=false`, we need to make the # result be Blocked and `filtered_from` the input pts_for_filtering(gdf_parent, res, with=Grouped, by=groupingkeys) pt(gdf, Blocked(along=1) & ScaledBySame(as=gdf_parent)) pt(res_nrows, Reducing(quote + end)) # TODO: Change to + if possible # pt(gdf_parent, res, gdf, res_nrows, groupcols, groupkwargs, args, kwargs, Replicated()) pt(gdf_parent, res, gdf, res_nrows, groupcols, groupkwargs, args, kwargs, Replicated()) end @partitioned gdf gdf_parent groupcols groupkwargs args kwargs res res_nrows begin if !(gdf isa DataFrames.GroupedDataFrame) || gdf.parent != gdf_parent gdf = DataFrames.groupby(gdf_parent, groupcols; groupkwargs...) end res = DataFrames.combine(gdf, args...; kwargs...) res_nrows = DataFrames.nrow(res) end res end function DataFrames.subset(gdf::GroupedDataFrame, args...; kwargs...) get(kwargs, :ungroup, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must produce dataframes")) get(kwargs, :copycols, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes cannot return a view")) get(kwargs, :keepkeys, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must keep the grouping columns")) gdf_parent = gdf.parent groupcols = gdf.groupcols groupkwargs = gdf.groupkwargs res_nrows = Future() res = DataFrame(Future(), res_nrows) args = Future(args) kwargs = Future(kwargs) # TODO: Put groupingkeys in GroupedDataFrame groupingkeys = Symbol.(names(sample(gdf_parent), collect(groupcols))) partitioned_using_modules("DataFrames") partitioned_using() do keep_sample_keys( get(collect(kwargs), :keepkeys, true) ? groupingkeys : [], res, gdf_parent, drifted=true ) keep_sample_rate(res, gdf_parent) end partitioned_with() do pts_for_filtering(gdf_parent, res, with=Grouped, by=groupingkeys) pt(gdf, Blocked(along=1) & ScaledBySame(as=gdf_parent)) pt(res_nrows, Reducing(quote (a, b) -> a .+ b end)) pt(gdf_parent, res, gdf, res_nrows, groupcols, groupkwargs, args, kwargs, Replicated()) end @partitioned gdf gdf_parent groupcols groupkwargs args kwargs res res_nrows begin if !(gdf isa DataFrames.GroupedDataFrame) || gdf.parent != gdf_parent gdf = DataFrames.groupby(gdf_parent, groupcols; groupkwargs...) end res = DataFrames.subset(gdf, args...; kwargs...) res_nrows = DataFrames.nrow(res) end res end # function transform(gdf::GroupedDataFrame) # get(kwargs, :ungroup, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must produce dataframes")) # get(kwargs, :copycols, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes cannot return a view")) # gdf_parent = gdf.parent # groupcols = gdf.groupcols # groupkwargs = gdf.groupkwargs # res = Future() # args = Future(args) # kwargs = Future(kwargs) # partition(gdf, Replicated()) # partition(gdf_parent, Replicated()) # partition(res, Replicated()) # if get(collect(kwargs), :keepkeys, true) # union!(sample(res, :allowedgroupingkeys), sample(gdf, :allowedgroupingkeys)) # end # for key in sample(gdf_parent, :allowedgroupingkeys) # setsample(res, :keystatistics, key, sample(gdf_parent, :keystatistics, key)) # for balanced in [true, false] # partition(gdf_parent, Grouped(;key=key, balanced=balanced)) # if get(collect(kwargs), :keepkeys, true) # partition(res, Partitioned(), match=gdf_parent) # else # partition(res, Blocked(dim=1), match=gdf_parent, on=["balanced", "id"]) # end # end # end # partition(gdf, Blocked(;dim=1), match=gdf_parent, on=["balanced", "id"]) # partition(groupcols, Replicated()) # partition(groupkwargs, Replicated()) # partition(args, Replicated()) # partition(kwargs, Replicated()) # mutated(res) # @partitioned gdf gdf_parent groupcols groupkwargs args kwargs res begin # if gdf.parent != gdf_parent # gdf = groupby(gdf_parent, groupcols; groupkwargs...) # end # res = transform(gdf, args...; kwargs...) # end # res # end # function combine(gdf::GroupedDataFrame) # get(kwargs, :ungroup, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must produce dataframes")) # get(kwargs, :copycols, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes cannot return a view")) # gdf_parent = gdf.parent # groupcols = gdf.groupcols # groupkwargs = gdf.groupkwargs # res = Future() # args = Future(args) # kwargs = Future(kwargs) # partition(gdf, Replicated()) # partition(gdf_parent, Replicated()) # partition(res, Replicated()) # if get(collect(kwargs), :keepkeys, true) # union!(sample(res, :allowedgroupingkeys), sample(gdf, :allowedgroupingkeys)) # end # for key in sample(gdf_parent, :allowedgroupingkeys) # for balanced in [true, false] # partition(gdf_parent, Grouped(;key=key, balanced=balanced)) # if get(collect(kwargs), :keepkeys, true) # partition(res, Grouped(key=key, balanced=false, id="*"), match=gdf_parent, on="divisions") # else # partition(res, Blocked(dim=1, balanced=false, id="*")) # end # end # end # partition(gdf, Blocked(;dim=1), match=gdf_parent, on=["balanced", "id"]) # partition(groupcols, Replicated()) # partition(groupkwargs, Replicated()) # partition(args, Replicated()) # partition(kwargs, Replicated()) # # TODO: Allow for putting multiple variables that share a PT in a call to partition # mutated(res) # @partitioned gdf gdf_parent groupcols groupkwargs args kwargs res begin # if gdf.parent != gdf_parent # gdf = groupby(gdf_parent, groupcols; groupkwargs...) # end # res = combine(gdf, args...; kwargs...) # end # res # end # # TODO: Implement filter using some framework for having references by keeping # # track of the lineage of which code regions produced which # function subset(gdf::GroupedDataFrame) # get(kwargs, :ungroup, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes must produce dataframes")) # get(kwargs, :copycols, true) || throw(ArgumentError("Select/transform/combine/subset operations on grouped dataframes cannot return a view")) # gdf_parent = gdf.parent # groupcols = gdf.groupcols # groupkwargs = gdf.groupkwargs # res = Future() # args = Future(args) # kwargs = Future(kwargs) # partition(gdf, Replicated()) # partition(gdf_parent, Replicated()) # partition(res, Replicated()) # if get(collect(kwargs), :keepkeys, true) # union!(sample(res, :allowedgroupingkeys), sample(gdf, :allowedgroupingkeys)) # end # for key in sample(gdf_parent, :allowedgroupingkeys) # for balanced in [true, false] # partition(gdf_parent, Grouped(;key=key, balanced=balanced)) # if get(collect(kwargs), :keepkeys, true) # partition(res, Grouped(key=key, balanced=false, id="*"), match=gdf_parent, on="divisions") # else # partition(res, Blocked(dim=1, balanced=false, id="*")) # end # end # end # partition(gdf, Blocked(;dim=1), match=gdf_parent, on=["balanced", "id"]) # partition(groupcols, Replicated()) # partition(groupkwargs, Replicated()) # partition(args, Replicated()) # partition(kwargs, Replicated()) # mutated(res) # @partitioned gdf gdf_parent groupcols groupkwargs args kwargs res begin # if gdf.parent != gdf_parent # gdf = groupby(gdf_parent, groupcols; groupkwargs...) # end # res = subset(gdf, args...; kwargs...) # end # res # end
# # Correctness Tests # if VERSION >= v"0.5.0-" using Base.Test else using BaseTestNext const Test = BaseTestNext end using XMLconvert using BioMedQuery.Processes using BioMedQuery.Entrez using BioMedQuery.Entrez.DB using BioMedQuery.DBUtils using BioMedQuery.UMLS using MySQL using SQLite using DataStreams my_tests = [("dbutils_sqlite.jl", " Testing: DBUtils SQLite"), ("dbutils_mysql.jl", " Testing: DBUtils MySQL"), ("entrez.jl", " Testing: ENTREZ"), ("entrez_types.jl", " Testing: Entrez Types"), ("ct.jl", " Testing: CLINICAL TRIALS"), ("umls.jl", " Testing: UMLS"), ("processes_mysql.jl", " Testing: Processes MySQL"), ("processes_sqlite.jl", " Testing: Processes SQLite"), ("export_citations.jl", " Testing: Export Citations") ] println("Running tests:") for (my_test, test_string) in my_tests println("-----------------------------------------") println("-----------------------------------------") println(test_string) println("-----------------------------------------") println("-----------------------------------------") include(my_test) end