licenses
sequencelengths
1
3
version
stringclasses
636 values
tree_hash
stringlengths
40
40
path
stringlengths
5
135
type
stringclasses
2 values
size
stringlengths
2
8
text
stringlengths
25
67.1M
package_name
stringlengths
2
41
repo
stringlengths
33
86
[ "Apache-2.0" ]
0.1.0
5dd50891df13013c7551fd92b1f37f4cdac9976b
examples/sst_loaded_from_torch.jl
code
3218
using BERT using Knet import Base: length, iterate using Random using CSV using PyCall VOCABFILE = "bert-base-uncased-vocab.txt" NUM_CLASSES = 2 token2int = Dict() f = open(VOCABFILE) do file lines = readlines(file) for (i,line) in enumerate(lines) token2int[line] = i end end int2token = Dict(value => key for (key, value) in token2int) VOCABSIZE = length(token2int) mutable struct ClassificationData2 input_ids input_mask segment_ids labels batchsize ninstances shuffled end function ClassificationData2(input_file; batchsize=8, shuffled=true, seq_len=64) input_ids = [] input_mask = [] segment_ids = [] labels = [] f = open(input_file) tmp = split.(readlines(f), "\t") for i in 1:length(tmp) instance = eval.(Meta.parse.(tmp[i])) push!(input_ids, (instance[1] .+ 1)[1:seq_len]) push!(input_mask, instance[2][1:seq_len]) push!(segment_ids, (instance[3] .+ 1)[1:seq_len]) push!(labels, (instance[4] + 1)) end ninstances = length(input_ids) return ClassificationData2(input_ids, input_mask, segment_ids, labels, batchsize, ninstances, shuffled) end function length(d::ClassificationData2) d, r = divrem(d.ninstances, d.batchsize) return r == 0 ? d : d+1 end function iterate(d::ClassificationData2, state=ifelse(d.shuffled, randperm(d.ninstances), 1:d.ninstances)) state === nothing && return nothing if length(state) > d.batchsize new_state = state[d.batchsize+1:end] input_ids = hcat(d.input_ids[state[1:d.batchsize]]...) input_mask = hcat(d.input_mask[state[1:d.batchsize]]...) segment_ids = hcat(d.segment_ids[state[1:d.batchsize]]...) labels = hcat(d.labels[state[1:d.batchsize]]...) else new_state = nothing input_ids = hcat(d.input_ids[state]...) input_mask = hcat(d.input_mask[state]...) segment_ids = hcat(d.segment_ids[state]...) labels = hcat(d.labels[state]...) end return ((input_ids, input_mask, segment_ids, labels), new_state) end # Embedding Size, Vocab Size, Intermediate Hidden Size, Max Sequence Length, Sequence Length, Num of Segments, Num of Heads in Attention, Num of Encoders in Stack, Batch Size, Matrix Type, General Dropout Rate, Attention Dropout Rate, Activation Function config = BertConfig(768, 30522, 3072, 512, 64, 2, 12, 12, 8, KnetArray{Float32}, 0.1, 0.1, "gelu") dtst = ClassificationData2("../project/sst-test.tsv", batchsize=config.batchsize, seq_len=config.seq_len) model = BertClassification(config, NUM_CLASSES) @pyimport torch torch_model = torch.load("../project/model-64-32.pt") model = load_from_torch_classification(model, config.num_encoder, config.atype, torch_model) function accuracy2(model, dtst) true_count = 0 all_count = 0 for (x, attention_mask, segment_ids, y) in dtst probs = model(x, segment_ids, attention_mask=attention_mask) preds = map(x -> x[1], argmax(Array{Float32}(probs),dims=1)) true_count += sum(y .== preds) all_count += length(y) end return true_count/all_count end result = accuracy2(model, dtst) println("Test accuracy is : $result")
BERT
https://github.com/OsmanMutlu/BERT.jl.git
[ "Apache-2.0" ]
0.1.0
5dd50891df13013c7551fd92b1f37f4cdac9976b
examples/train.jl
code
398
using BERT config = BertConfig(128, 30022, 256, 512, 4, 2, 8, 2, 3, Array{Float32}, 0.1, 0.1, "relu") model = BertPreTraining(config) x = [213 234 7789; 712 9182 8912; 7812 12 432; 12389 1823 8483] # 4x3 segment_ids = [1 1 1;1 2 1;1 2 1;1 1 1] mlm_labels = [-1 234 -1; -1 -1 8912; -1 -1 -1; 12389 -1 -1] nsp_labels = [1, 2, 1] loss = model(x, segment_ids, mlm_labels, nsp_labels) println(loss)
BERT
https://github.com/OsmanMutlu/BERT.jl.git
[ "Apache-2.0" ]
0.1.0
5dd50891df13013c7551fd92b1f37f4cdac9976b
src/BERT.jl
code
337
module BERT export BertPreTraining, BertClassification, BertConfig, load_from_torch_base, load_from_torch_pretraining, load_from_torch_classification, BertAdam, bert_tokenize using Knet, SpecialFunctions, LinearAlgebra include("model.jl") include("optimizer.jl") include("preprocess.jl") end # module
BERT
https://github.com/OsmanMutlu/BERT.jl.git
[ "Apache-2.0" ]
0.1.0
5dd50891df13013c7551fd92b1f37f4cdac9976b
src/model.jl
code
19559
# import Base: * # import Knet: getindex, setindex! # Matmuls 2d and 3d arrays # function *(a::AbstractArray{T,2}, b::AbstractArray{T,3}) where T<:Real # b_sizes = size(b) # a = a * reshape(b, b_sizes[1], :) # return reshape(a, :, b_sizes[2:end]...) # end # Matmuls 2d and 3d arrays for KnetArrays # function *(a::KnetArray{T,2}, b::KnetArray{T,3}) where T<:Real # b_sizes = size(b) # a = a * reshape(b, b_sizes[1], :) # return reshape(a, :, b_sizes[2:end]...) # end # TODO : # Since backprop doesn't work with this new import, we define it as a complex function consisting primitives that Autograd can take derivatives of. A primitive derivative of this function would speed things up. # function matmul23(a::KnetArray{T,2}, b::KnetArray{T,3}) where T<:Real # b_sizes = size(b) # a = a * reshape(b, b_sizes[1], :) # return reshape(a, :, b_sizes[2:end]...) # end gelu(x) = x .* 0.5 .* (1.0 .+ erf.(x ./ sqrt(2.0))) function matmul23(a, b) b_sizes = size(b) a = a * reshape(b, b_sizes[1], :) return reshape(a, :, b_sizes[2:end]...) end # Wrote these first, then realized we don't need them. Might come in handy later. # function matmul23(a::AbstractArray{T,2}, b::AbstractArray{T,3}) where T<:Real # b_sizes = size(b) # a = a * reshape(b, b_sizes[1], :) # return reshape(a, :, b_sizes[2:end]...) # end # function matmul23(a::Param{KnetArray{T,2}}, b::KnetArray{T,3}) where T<:Real # matmul23(value(a), b) # end # function matmul23(a::Param{AbstractArray{T,2}}, b::AbstractArray{T,3}) where T<:Real # matmul23(value(a), b) # end # function matmul23(a::Param{KnetArray{T,2}}, b::AutoGrad.Result{KnetArray{T,3}}) where T<:Real # matmul23(value(a), value(b)) # end # function matmul23(a::Param{AbstractArray{T,2}}, b::AutoGrad.Result{AbstractArray{T,3}}) where T<:Real # matmul23(value(a), value(b)) # end # @primitive *(x1::KnetArray{T,2},x2::KnetArray{T,3}),dy # Not using this anymore # function getindex(A::KnetArray{Float32,3}, ::Colon, I::Real, ::Colon) # sizes = size(A) # A = reshape(A, :, sizes[3]) # return A[(I-1)*sizes[1]+1:I*sizes[1],:] # # reshape(A, :, size(A,3))[(I-1)*size(A,1)+1:I*size(A,1),:] # end # Does not work # function setindex!(A::KnetArray{Float32,3}, v, ::Colon, I::Real, ::Colon) # A = reshape(A, :, size(A,3)) # # setindex!(A, v, (I-1)*size(A,1)+1:I*size(A,1), ::Colon) # A[(I-1)*size(A,1)+1:I*size(A,1),:] = v # end # std doesn't work! std2(a, μ, ϵ) = sqrt.(Knet.mean(abs2.(a .- μ), dims=1) .+ ϵ) # Legend # V -> Vocab size, E -> Embedding size, S -> Sequence length, B -> Batch size # H -> head_size, N -> num_heads abstract type Layer end # MAYBE TODO : sin-cos positionwise embeddings. This will reduce model size by max_seq_len * E mutable struct Embedding <: Layer w end Embedding(vocabsize::Int,embed::Int; atype=Array{Float32}) = Embedding(param(embed,vocabsize, atype=atype)) function (e::Embedding)(x) e.w[:,x] end # If we need 0's as pads #= struct SegmentEmbedding <: Layer w atype end SegmentEmbedding(vocabsize::Int,embed::Int; atype=Array{Float32}) = SegmentEmbedding(param(embed,vocabsize, atype=atype), atype) function (e::SegmentEmbedding)(x) x != 0 ? e.w[:,x] : e.atype(zeros(size(e.w,1))) end =# mutable struct Linear <: Layer w b end Linear(input_size::Int, output_size::Int; atype=Array{Float32}) = Linear(param(output_size, input_size, atype=atype), param0(output_size, atype=atype)) function (l::Linear)(x) return l.w * x .+ l.b end mutable struct Linear3D <: Layer w b end Linear3D(input_size::Int, output_size::Int; atype=Array{Float32}) = Linear3D(param(output_size, input_size, atype=atype), param0(output_size, atype=atype)) function (l::Linear3D)(x) return matmul23(l.w, x) .+ l.b end # Absolutely no difference between Dense and Linear! Except one has dropout and activation function. mutable struct Dense <: Layer linear pdrop func end function Dense(input_size::Int, output_size::Int; pdrop=0.0, func=identity, atype=Array{Float32}, threeD=false) if threeD return Dense(Linear3D(input_size, output_size, atype=atype), pdrop, func) else return Dense(Linear(input_size, output_size, atype=atype), pdrop, func) end end function (a::Dense)(x) return a.func.(dropout(a.linear(x), a.pdrop)) end mutable struct LayerNormalization <: Layer γ β ϵ end LayerNormalization(hidden_size::Int; epsilon=1e-12, atype=Array{Float32}) = LayerNormalization(Param(atype(ones(hidden_size))), param0(hidden_size, atype=atype), epsilon) function (n::LayerNormalization)(x) μ = Knet.mean(x, dims=1) x = (x .- μ) ./ std2(x, μ, n.ϵ) # corrected=false for n return n.γ .* x .+ n.β end mutable struct EmbedLayer <: Layer wordpiece::Embedding positional::Embedding # segment::SegmentEmbedding segment::Embedding layer_norm::LayerNormalization seq_len::Int pdrop end function EmbedLayer(config) wordpiece = Embedding(config.vocab_size, config.embed_size, atype=config.atype) positional = Embedding(config.max_seq_len, config.embed_size, atype=config.atype) #segment = SegmentEmbedding(config.num_segment, config.embed_size, atype=config.atype) segment = Embedding(config.num_segment, config.embed_size, atype=config.atype) layer_norm = LayerNormalization(config.embed_size, atype=config.atype) return EmbedLayer(wordpiece, positional, segment, layer_norm, config.seq_len, config.pdrop) end function (e::EmbedLayer)(x, segment_ids) # segment_ids are SxB, containing 1 or 2, or 0 in case of pads. x = e.wordpiece(x) positions = zeros(Int64, e.seq_len, size(x,3)) .+ collect(1:e.seq_len) # size(x,3) is batchsize. Resulting matrix is SxB x = x .+ e.positional(positions) #x .+= reshape(hcat(e.segment.(segment_ids)...), (:, size(segment_ids,1),size(segment_ids,2))) x = x .+ e.segment(segment_ids) x = e.layer_norm(x) return dropout(x, e.pdrop) end function divide_to_heads(x, num_heads, head_size, seq_len) x = reshape(x, (head_size, num_heads, seq_len, :)) x = permutedims(x, (1,3,2,4)) return reshape(x, (head_size, seq_len, :)) # Reshape to 3D so bmm can handle it. end mutable struct SelfAttention <: Layer query::Linear3D # N*H x E key::Linear3D value::Linear3D linear::Linear3D num_heads::Int seq_len::Int embed_size::Int head_size::Int head_size_sqrt::Int attention_pdrop pdrop end function SelfAttention(config) config.embed_size % config.num_heads != 0 && throw("Embed size should be divisible by number of heads!") head_size = Int(config.embed_size / config.num_heads) head_size_sqrt = Int(sqrt(head_size)) head_size_sqrt * head_size_sqrt != head_size && throw("Square root of head size should be an integer!") query = Linear3D(config.embed_size, head_size*config.num_heads, atype=config.atype) # H*N is always equal to E key = Linear3D(config.embed_size, head_size*config.num_heads, atype=config.atype) value = Linear3D(config.embed_size, head_size*config.num_heads, atype=config.atype) linear = Linear3D(config.embed_size, config.embed_size, atype=config.atype) return SelfAttention(query, key, value, linear, config.num_heads, config.seq_len, config.embed_size, head_size, head_size_sqrt, config.attention_pdrop, config.pdrop) end function (s::SelfAttention)(x, attention_mask) # We make all the batchsize ones colon, in case of batches smaller than batchsize. # x is ExSxB query = divide_to_heads(s.query(x), s.num_heads, s.head_size, s.seq_len) # H x S x N*B key = divide_to_heads(s.key(x), s.num_heads, s.head_size, s.seq_len) value = divide_to_heads(s.value(x), s.num_heads, s.head_size, s.seq_len) # Scaled Dot Product Attention query = bmm(permutedims(key, (2,1,3)), query) query = query ./ s.head_size_sqrt # Scale down. I init this value to avoid taking sqrt every forward operation. # Masking. First reshape to 4d, then add mask, then reshape back to 3d. query = reshape(reshape(query, (s.seq_len, s.seq_len, s.num_heads, :)) .+ attention_mask, (s.seq_len, s.seq_len, :)) query = Knet.softmax(query, dims=1) query = dropout(query, s.attention_pdrop) query = bmm(value, query) query = permutedims(reshape(query, (s.head_size, s.seq_len, s.num_heads, :)), (1,3,2,4)) query = reshape(query, (s.embed_size, s.seq_len, :)) # Concat return dropout(s.linear(query), s.pdrop) # Linear transformation at the end # In pytorch version dropout is after layer_norm! end mutable struct FeedForward <: Layer dense::Dense linear::Linear3D pdrop end function FeedForward(config) dense = Dense(config.embed_size, config.ff_hidden_size, func=eval(Meta.parse(config.func)), atype=config.atype, threeD=true) linear = Linear3D(config.ff_hidden_size, config.embed_size, atype=config.atype) return FeedForward(dense, linear, config.pdrop) end function (f::FeedForward)(x) x = f.dense(x) return dropout(f.linear(x), f.pdrop) end mutable struct Encoder <: Layer self_attention::SelfAttention layer_norm1::LayerNormalization feed_forward::FeedForward layer_norm2::LayerNormalization end function Encoder(config) return Encoder(SelfAttention(config), LayerNormalization(config.embed_size, atype=config.atype), FeedForward(config), LayerNormalization(config.embed_size, atype=config.atype)) end function (e::Encoder)(x, attention_mask) x = e.layer_norm1(x .+ e.self_attention(x, attention_mask)) return e.layer_norm2(x .+ e.feed_forward(x)) end mutable struct Bert <: Layer embed_layer::EmbedLayer encoder_stack atype end function Bert(config) embed_layer = EmbedLayer(config) encoder_stack = Encoder[] for _ in 1:config.num_encoder push!(encoder_stack, Encoder(config)) end return Bert(embed_layer, encoder_stack, config.atype) end # x and segment_ids are SxB integers function (b::Bert)(x, segment_ids; attention_mask=nothing) # Init attention_mask if it's not given attention_mask = attention_mask == nothing ? ones(size(x)) : attention_mask attention_mask = reshape(attention_mask, (size(attention_mask,1), 1, 1, size(attention_mask,2))) # Make it 4d attention_mask = (1 .- attention_mask) .* -10000.0 # If integer was 0, now it is masking. ones(size(attention_mask)) attention_mask = b.atype(attention_mask) x = b.embed_layer(x, segment_ids) for encoder in b.encoder_stack x = encoder(x, attention_mask) end return x end mutable struct Pooler <: Layer linear::Linear end Pooler(embed_size::Int; atype=Array{Float32}) = Pooler(Linear(embed_size, embed_size, atype=atype)) function (p::Pooler)(x) # TODO : # Gave up on getindex function for 3D matrices because I could not figure out how to write setindex! for backprop # x = reshape(x, :, size(x,3)) # return tanh.(p.linear(x[:,1,:])) # Use only CLS token. Returns ExB return tanh.(p.linear(reshape(x, :, size(x,3))[1:size(x,1),:])) end mutable struct NSPHead <: Layer linear::Linear end NSPHead(embed_size::Int; atype=Array{Float32}) = NSPHead(Linear(embed_size, 2, atype=atype)) (n::NSPHead)(x) = n.linear(x) mutable struct MLMHead <: Layer dense::Dense layer_norm::LayerNormalization linear::Linear3D end function MLMHead(config, embedding_matrix) dense = Dense(config.embed_size, config.embed_size, func=eval(Meta.parse(config.func)), pdrop=0.0, atype=config.atype, threeD=true) layer_norm = LayerNormalization(config.embed_size, atype=config.atype) linear = Linear3D(config.embed_size, config.vocab_size, atype=config.atype) # TODO : Do this a shared weight #linear.w = permutedims(embedding_matrix, (2,1)) return MLMHead(dense, layer_norm, linear) end function (m::MLMHead)(x) x = m.dense(x) x = m.layer_norm(x) return m.linear(x) end mutable struct BertPreTraining <: Layer bert::Bert pooler::Pooler nsp::NSPHead mlm::MLMHead end function BertPreTraining(config) bert = Bert(config) pooler = Pooler(config.embed_size, atype=config.atype) nsp = NSPHead(config.embed_size, atype=config.atype) mlm = MLMHead(config, bert.embed_layer.wordpiece.w) # TODO : Dont forget about embedding matrix return BertPreTraining(bert, pooler, nsp, mlm) end # We do not need a predictor, since this is only for pretraining function (b::BertPreTraining)(x, segment_ids, mlm_labels, nsp_labels; attention_mask=nothing) # mlm_labels are SxB, so we just flatten them. x = b.bert(x, segment_ids, attention_mask=attention_mask) nsp_preds = b.nsp(b.pooler(x)) # 2xB mlm_preds = b.mlm(x) # VxSxB mlm_preds = reshape(mlm_preds, size(mlm_preds, 1), :) # VxS*B nsp_loss = nll(nsp_preds, nsp_labels) mlm_labels = reshape(mlm_labels, :) # S*B mlm_loss = nll(mlm_preds[:,mlm_labels.!=-1], mlm_labels[mlm_labels.!=-1]) return mlm_loss + nsp_loss end function (b::BertPreTraining)(dtrn) lvals = [] for (x, attention_mask, segment_ids, mlm_labels, nsp_labels) in dtrn push!(lvals, b(x, segment_ids, mlm_labels, nsp_labels, attention_mask=attention_mask)) end return Knet.mean(lvals) end mutable struct BertClassification <: Layer bert::Bert pooler::Pooler linear::Linear pdrop end function BertClassification(config, num_of_classes) bert = Bert(config) pooler = Pooler(config.embed_size, atype=config.atype) linear = Linear(config.embed_size, num_of_classes, atype=config.atype) return BertClassification(bert, pooler, linear, config.pdrop) end function (b::BertClassification)(x, segment_ids; attention_mask=nothing) x = b.bert(x, segment_ids, attention_mask=attention_mask) x = dropout(b.pooler(x), b.pdrop) # 2xB return b.linear(x) end function (b::BertClassification)(x, segment_ids, y; attention_mask=nothing) return nll(b(x, segment_ids, attention_mask=attention_mask), y) end function (b::BertClassification)(dtrn) lvals = [] for (x, attention_mask, segment_ids, y) in dtrn push!(lvals, b(x, segment_ids, y, attention_mask=attention_mask)) end return Knet.mean(lvals) end mutable struct BertConfig embed_size::Int vocab_size::Int ff_hidden_size::Int max_seq_len::Int seq_len::Int num_segment::Int num_heads::Int num_encoder::Int batchsize::Int atype pdrop attention_pdrop func end function load_from_torch_base(model, num_encoder, atype, torch_model) # Embed Layer model.bert.embed_layer.wordpiece.w = Param(atype(permutedims(torch_model["bert.embeddings.word_embeddings.weight"][:cpu]()[:numpy](), (2,1)))) model.bert.embed_layer.positional.w = Param(atype(permutedims(torch_model["bert.embeddings.position_embeddings.weight"][:cpu]()[:numpy](), (2,1)))) model.bert.embed_layer.segment.w = Param(atype(permutedims(torch_model["bert.embeddings.token_type_embeddings.weight"][:cpu]()[:numpy](), (2,1)))) model.bert.embed_layer.layer_norm.γ = Param(atype(torch_model["bert.embeddings.LayerNorm.gamma"][:cpu]()[:numpy]())) model.bert.embed_layer.layer_norm.β = Param(atype(torch_model["bert.embeddings.LayerNorm.beta"][:cpu]()[:numpy]())) # Encoder Stack for i in 1:num_encoder model.bert.encoder_stack[i].self_attention.query.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.query.weight"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].self_attention.query.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.query.bias"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].self_attention.key.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.key.weight"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].self_attention.key.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.key.bias"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].self_attention.value.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.value.weight"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].self_attention.value.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.self.value.bias"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].self_attention.linear.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.output.dense.weight"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].self_attention.linear.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.output.dense.bias"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].layer_norm1.γ = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.output.LayerNorm.gamma"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].layer_norm1.β = Param(atype(torch_model["bert.encoder.layer.$(i-1).attention.output.LayerNorm.beta"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].feed_forward.dense.linear.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).intermediate.dense.weight"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].feed_forward.dense.linear.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).intermediate.dense.bias"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].feed_forward.linear.w = Param(atype(torch_model["bert.encoder.layer.$(i-1).output.dense.weight"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].feed_forward.linear.b = Param(atype(torch_model["bert.encoder.layer.$(i-1).output.dense.bias"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].layer_norm2.γ = Param(atype(torch_model["bert.encoder.layer.$(i-1).output.LayerNorm.gamma"][:cpu]()[:numpy]())) model.bert.encoder_stack[i].layer_norm2.β = Param(atype(torch_model["bert.encoder.layer.$(i-1).output.LayerNorm.beta"][:cpu]()[:numpy]())) end # Pooler model.pooler.linear.w = Param(atype(torch_model["bert.pooler.dense.weight"][:cpu]()[:numpy]())) model.pooler.linear.b = Param(atype(torch_model["bert.pooler.dense.bias"][:cpu]()[:numpy]())) return model end function load_from_torch_pretraining(model, num_encoder, atype, torch_model) model = load_from_torch_base(model, num_encoder, atype, torch_model) # NSP Head model.nsp.linear.w = Param(atype(torch_model["cls.seq_relationship.weight"][:cpu]()[:numpy]())) model.nsp.linear.b = Param(atype(torch_model["cls.seq_relationship.bias"][:cpu]()[:numpy]())) # MLM Head. model.mlm.dense.linear.w = Param(atype(torch_model["cls.predictions.transform.dense.weight"][:cpu]()[:numpy]())) model.mlm.dense.linear.b = Param(atype(torch_model["cls.predictions.transform.dense.bias"][:cpu]()[:numpy]())) model.mlm.layer_norm.γ = Param(atype(torch_model["cls.predictions.transform.LayerNorm.gamma"][:cpu]()[:numpy]())) model.mlm.layer_norm.β = Param(atype(torch_model["cls.predictions.transform.LayerNorm.beta"][:cpu]()[:numpy]())) model.mlm.linear.w = Param(atype(torch_model["cls.predictions.decoder.weight"][:cpu]()[:numpy]())) model.mlm.linear.b = Param(atype(torch_model["cls.predictions.bias"][:cpu]()[:numpy]())) return model end function load_from_torch_classification(model, num_encoder, atype, torch_model) model = load_from_torch_base(model, num_encoder, atype, torch_model) model.linear.w = Param(atype(torch_model["classifier.weight"][:cpu]()[:numpy]())) model.linear.b = Param(atype(torch_model["classifier.bias"][:cpu]()[:numpy]())) return model end
BERT
https://github.com/OsmanMutlu/BERT.jl.git
[ "Apache-2.0" ]
0.1.0
5dd50891df13013c7551fd92b1f37f4cdac9976b
src/optimizer.jl
code
1751
import Knet: update! warmup_cosine(x, warmup=0.002) = x < warmup ? x/warmup : 0.5 * (1.0 + cos(π * x)) warmup_constant(x, warmup=0.002) = x < warmup ? x/warmup : 1.0 warmup_linear(x, warmup=0.002) = x < warmup ? x/warmup : 1.0 - x mutable struct BertAdam lr::AbstractFloat beta1::AbstractFloat beta2::AbstractFloat eps::AbstractFloat t::Int gclip::AbstractFloat fstm scndm w_decay_rate::AbstractFloat schedule warmup t_total end BertAdam(; lr=0.001, gclip=1.0, beta1=0.9, beta2=0.999, eps=1e-6, w_decay_rate=0.0, schedule="warmup_linear", warmup=-1, t_total=-1)=BertAdam(lr, beta1, beta2, eps, 0, gclip, nothing, nothing, w_decay_rate, schedule, warmup, t_total) for T in (Array{Float32},Array{Float64},KnetArray{Float32},KnetArray{Float64}); @eval begin function update!(w::$T, g::$T, p::BertAdam) Knet.gclip!(g, p.gclip) if p.fstm===nothing; p.fstm=zero(w); p.scndm=zero(w); end lmul!(p.beta1, p.fstm) axpy!(1-p.beta1, g, p.fstm) lmul!(p.beta2, p.scndm) axpy!(1-p.beta2, g .* g, p.scndm) # They don't do bias correction for some reason #fstm_corrected = p.fstm / (1 - p.beta1 ^ p.t) #scndm_corrected = p.scndm / (1 - p.beta2 ^ p.t) if p.t_total !== -1 schedule_func = eval(Meta.parse(p.schedule)) lr_scheduled = p.lr * schedule_func(p.t/p.t_total, p.warmup) else lr_scheduled = p.lr end if p.w_decay_rate > 0.0 axpy!(-lr_scheduled, (p.fstm ./ (sqrt.(p.scndm) .+ p.eps)) .+ (p.w_decay_rate * w), w) else axpy!(-lr_scheduled, (p.fstm ./ (sqrt.(p.scndm) .+ p.eps)), w) end p.t += 1 end end;end
BERT
https://github.com/OsmanMutlu/BERT.jl.git
[ "Apache-2.0" ]
0.1.0
5dd50891df13013c7551fd92b1f37f4cdac9976b
src/preprocess.jl
code
1689
function wordpiece_tokenize(token, dict) # This is a longest-match-first algorithm. out_tokens = [] start = 1 while start <= length(token) finish = length(token) final_token = "" for i in finish:-1:start # String Indexing Error for an unknown reason. Might be because of unicode chars. tkn = try start == 1 ? token[start:i] : string("##", token[start:i]) catch "" end if tkn in keys(dict) final_token = tkn finish = i break end end if final_token == "" # if there is no match at all, assign unk token return ["[UNK]"] end push!(out_tokens, final_token) start = finish + 1 end return out_tokens end function process_punc(tokens) out_tokens = [] for token in tokens out = [] str = "" for (i, char) in enumerate(token) if ispunct(char) str != "" && push!(out, str) str = "" push!(out, string(char)) else str = string(str, char) end end str != "" && push!(out, str) append!(out_tokens, out) end return out_tokens end function bert_tokenize(text, dict; lower_case=true) text = strip(text) text == "" && return [] if lower_case text = lowercase(text) end tokens = split(text) tokens = process_punc(tokens) out_tokens = [] for token in tokens append!(out_tokens, wordpiece_tokenize(token, dict)) end return out_tokens end
BERT
https://github.com/OsmanMutlu/BERT.jl.git
[ "Apache-2.0" ]
0.1.0
5dd50891df13013c7551fd92b1f37f4cdac9976b
README.md
docs
197
# BERT-in-Knet This repo is for the final project for Comp541 Deep Learning class in Koc University. This is a replication attempt of the original https://github.com/google-research/bert in Knet.
BERT
https://github.com/OsmanMutlu/BERT.jl.git
[ "MIT" ]
0.1.6
ac6f8ac979a738a894d1a0a5777d6b8e43f0e94e
src/OpenIDConnect.jl
code
9780
module OpenIDConnect using HTTP using JSON using MbedTLS using Base64 using Random using JWTs const DEFAULT_SCOPES = ["openid", "profile", "email"] const DEFAULT_STATE_TIMEOUT_SECS = 60 const DEFAULT_SKEW_SECS = 2*60 const STATE_PURGE_TRIGGER = 1024 const DEFAULT_KEY_REFRESH_SECS = 60*60 export OIDCCtx, flow_request_authorization_code, flow_get_authorization_code, flow_get_token, flow_validate_id_token, flow_refresh_token """ Holds an OpenID Connect context that can be used in subsequent OpenID request flows. The context holds request states, and configuration options. """ struct OIDCCtx states::Dict{String,Float64} state_timeout_secs::Int allowed_skew_secs::Int openid_config::Dict{String,Any} http_tls_opts::Dict{Symbol,Any} validator::JWKSet key_refresh_secs::Int last_key_refresh::Float64 client_id::String client_secret::String scopes::Vector{String} redirect_uri::String random_device::RandomDevice function OIDCCtx(issuer::String, redirect_uri::String, client_id::String, client_secret::String, scopes::Vector{String}=DEFAULT_SCOPES; verify::Union{Nothing,Bool}=nothing, cacrt::Union{Nothing,String,MbedTLS.CRT}=nothing, state_timeout_secs::Int=DEFAULT_STATE_TIMEOUT_SECS, allowed_skew_secs::Int=DEFAULT_SKEW_SECS, key_refresh_secs::Int=DEFAULT_KEY_REFRESH_SECS, random_device::RandomDevice=RandomDevice()) endswith(issuer, "/") || (issuer = issuer * "/") openid_config_url = issuer * ".well-known/openid-configuration" http_tls_opts = Dict{Symbol,Any}() http_tls_opts[:socket_type_tls] = MbedTLS.SSLContext if verify !== nothing http_tls_opts[:require_ssl_verification] = verify end if cacrt !== nothing if isa(cacrt, String) cacrt = isfile(cacrt) ? MbedTLS.crt_parse_file(cacrt) : MbedTLS.crt_parse(cacrt) end conf = MbedTLS.SSLConfig(verify === nothing || verify) MbedTLS.ca_chain!(conf, cacrt) http_tls_opts[:sslconfig] = conf end # fetch and store the openid config, along with the additional args for SSL openid_config = JSON.parse(String(HTTP.request("GET", openid_config_url; status_exception=true, http_tls_opts...).body)) validator = JWKSet(openid_config["jwks_uri"]) new(Dict{String,Float64}(), state_timeout_secs, allowed_skew_secs, openid_config, http_tls_opts, validator, key_refresh_secs, 0.0, client_id, client_secret, scopes, redirect_uri, random_device) end end authorization_endpoint(ctx::OIDCCtx) = ctx.openid_config["authorization_endpoint"] token_endpoint(ctx::OIDCCtx) = ctx.openid_config["token_endpoint"] function remember_state(ctx::OIDCCtx, state::String) ctx.states[state] = time() nothing end function validate_state(ctx::OIDCCtx, state::String) statestore = ctx.states if state in keys(statestore) t = statestore[state] delete!(statestore, state) if (time() - t) <= ctx.state_timeout_secs return true end end @info("encountered an unknown or expired state") if length(statestore) > STATE_PURGE_TRIGGER purge_states!(ctx) end false end function purge_states(ctx::OIDCCtx) tnow = time() tmout = ctx.state_timeout_secs filter!(nv->(tnow-nv[2])>tmout, ctx.states) nothing end """ API calling error detected by this library """ struct APIError error::String end """ Error returned from OpenID server See section 3.1.2.6 of https://openid.net/specs/openid-connect-core-1_0.html """ struct AuthServerError error::String error_description::Union{Nothing,String} error_uri::Union{Nothing,String} end """ Authentication request. Uses the authorization code flow. Acceptable optional args as listed in section 3.1.2.1 of specifications (https://openid.net/specs/openid-connect-core-1_0.html) Returns a String with the redirect URL. Caller must perform the redirection. """ function flow_request_authorization_code(ctx::OIDCCtx; nonce=nothing, display=nothing, prompt=nothing, max_age=nothing, ui_locales=nothing, id_token_hint=nothing, login_hint=nothing, acr_values=nothing) @debug("oidc negotiation: initiating...") scopes = join(ctx.scopes, ' ') state = randstring(ctx.random_device, 10) remember_state(ctx, state) query = Dict("response_type"=>"code", "client_id"=>ctx.client_id, "redirect_uri"=>ctx.redirect_uri, "scope"=>scopes, "state"=>state) (nonce === nothing) || (query["nonce"] = String(nonce)) (display === nothing) || (query["display"] = String(display)) (prompt === nothing) || (query["prompt"] = String(prompt)) (max_age === nothing) || (query["max_age"] = String(max_age)) (ui_locales === nothing) || (query["ui_locales"] = String(ui_locales)) (id_token_hint === nothing) || (query["id_token_hint"] = String(id_token_hint)) (login_hint === nothing) || (query["login_hint"] = String(login_hint)) (acr_values === nothing) || (query["acr_values"] = String(acr_values)) uri = HTTP.URIs.URI(HTTP.URIs.URI(authorization_endpoint(ctx)); query=query) return string(uri) end """ Given the params from the redirected response from the authentication request, extract the authorization code. See sections 3.1.2.5 and 3.1.2.6 of https://openid.net/specs/openid-connect-core-1_0.html. Returns the authorization code on success. Returns one of APIError or AuthServerError on failure. """ function flow_get_authorization_code(ctx::OIDCCtx, @nospecialize(query)) state = get(query, "state", get(query, :state, nothing)) if state === nothing return APIError("invalid request, no state found") end if validate_state(ctx, String(state)) === nothing return APIError("invalid or expired state") end code = get(query, "code", get(query, :code, nothing)) if code !== nothing return String(code) end errcode = get(query, "error", nothing) if errcode !== nothing return AuthServerError(errcode, get(query, "error_description", nothing), get(query, "error_uri", nothing)) end return APIError("invalid request, no code or error found") end function parse_token_response(tok_res) @info("oidc: success response from token endpoint") resp_str = String(tok_res.body) if tok_res.status == 200 return JSON.parse(resp_str) end try err_resp = JSON.parse(resp_str) errcode = get(err_resp, "error", nothing) if errcode !== nothing return AuthServerError(errcode, get(err_resp, "error_description", nothing), get(err_resp, "error_uri", nothing)) end catch return APIError("unknown response from server: " * resp_str) end end """ Token Request. Given the authorization code obtained, invoke the token end point and obtain an id_token, access_token, refresh_token. See section 3.1.3.1 of https://openid.net/specs/openid-connect-core-1_0.html. Returns a JSON object containing tokens on success. Returns a AuthServerError or APIError object on failure. """ function flow_get_token(ctx::OIDCCtx, code) data = Dict("grant_type"=>"authorization_code", "code"=>String(code), "redirect_uri"=>ctx.redirect_uri, "client_id"=>ctx.client_id, "client_secret"=>ctx.client_secret) headers = Dict("Content-Type"=>"application/x-www-form-urlencoded") tok_res = HTTP.request("POST", token_endpoint(ctx), headers, HTTP.URIs.escapeuri(data); status_exception=false, ctx.http_tls_opts...) return parse_token_response(tok_res) end """ Token Refresh. Given the refresh code obtained, invoke the token end point and obtain new tokens. See section 12 of https://openid.net/specs/openid-connect-core-1_0.html. Returns a JSON object containing tokens on success. Returns a AuthServerError or APIError object on failure. """ function flow_refresh_token(ctx::OIDCCtx, refresh_token) data = Dict("grant_type"=>"refresh_token", "refresh_token"=>String(refresh_token), "client_id"=>ctx.client_id, "client_secret"=>ctx.client_secret) headers = Dict("Content-Type"=>"application/x-www-form-urlencoded") tok_res = HTTP.request("POST", token_endpoint(ctx), headers, HTTP.URIs.escapeuri(data); status_exception=false, ctx.http_tls_opts...) return parse_token_response(tok_res) end """ Validate an OIDC token. Validates both the structure and signature. See section 3.1.3.7 of https://openid.net/specs/openid-connect-core-1_0.html """ flow_validate_id_token(ctx::OIDCCtx, id_token) = flow_validate_id_token(ctx, JWT(;jwt=String(id_token))) function flow_validate_id_token(ctx::OIDCCtx, jwt::JWT) isvalid = false if issigned(jwt) try tokclaims = claims(jwt) issue_time = tokclaims["iat"] - ctx.allowed_skew_secs expiry_time = tokclaims["exp"] + ctx.allowed_skew_secs isvalid = issue_time <= round(Int, time()) <= expiry_time catch ex @info("invalid token format ($ex)") end if isvalid validator = ctx.validator if (time() - ctx.last_key_refresh) >= ctx.key_refresh_secs jstr = String(HTTP.get(ctx.validator.url; ctx.http_tls_opts...).body) keys = JSON.parse(jstr)["keys"] keysetdict = Dict{String,JWK}() refresh!(keys, keysetdict) validator.keys = keysetdict end isvalid = validate!(jwt, validator) end end return isvalid end end # module
OpenIDConnect
https://github.com/tanmaykm/OpenIDConnect.jl.git
[ "MIT" ]
0.1.6
ac6f8ac979a738a894d1a0a5777d6b8e43f0e94e
test/runtests.jl
code
3577
using OpenIDConnect using Test using Random using HTTP function test_state_store() @testset "State store" begin ctx = OIDCCtx("https://accounts.google.com", "http://127.0.0.1:8888/auth/login", "test_client_id", "test_client_secret"; state_timeout_secs=5) state = randstring(10) OpenIDConnect.remember_state(ctx, state) @test length(ctx.states) == 1 @test OpenIDConnect.validate_state(ctx, state) sleep(10) @info("expecting an invalid state") @test !OpenIDConnect.validate_state(ctx, state) @test length(ctx.states) == 0 nothing end end function test_oidc_flow() @testset "OIDC flow" begin ctx = OIDCCtx("https://accounts.google.com", "http://127.0.0.1:8888/auth/login", "test_client_id", "test_client_secret"; state_timeout_secs=5) @test OpenIDConnect.authorization_endpoint(ctx) == "https://accounts.google.com/o/oauth2/v2/auth" @test OpenIDConnect.token_endpoint(ctx) == "https://oauth2.googleapis.com/token" # flow request authorization code uri_string = flow_request_authorization_code(ctx) uri = HTTP.URIs.URI(uri_string) @test uri.host == "accounts.google.com" query = HTTP.URIs.queryparams(uri) @test get(query, "client_id", "") == "test_client_id" @test get(query, "redirect_uri", "") == "http://127.0.0.1:8888/auth/login" @test get(query, "scope", "") == "openid profile email" @test get(query, "response_type", "") == "code" @test !isempty(get(query, "state", "")) uri_string = flow_request_authorization_code(ctx; nonce="test_nonce", display="test_display", prompt="test_prompt", max_age="12345", ui_locales="en", id_token_hint="test_id_tok_hint", login_hint="test_login_hint", acr_values="test_acr") uri = HTTP.URIs.URI(uri_string) @test uri.host == "accounts.google.com" query = HTTP.URIs.queryparams(uri) @test get(query, "client_id", "") == "test_client_id" @test get(query, "redirect_uri", "") == "http://127.0.0.1:8888/auth/login" @test get(query, "scope", "") == "openid profile email" @test get(query, "response_type", "") == "code" @test !isempty(get(query, "state", "")) @test get(query, "nonce", "") == "test_nonce" @test get(query, "display", "") == "test_display" @test get(query, "prompt", "") == "test_prompt" @test get(query, "max_age", "") == "12345" @test get(query, "ui_locales", "") == "en" @test get(query, "id_token_hint", "") == "test_id_tok_hint" @test get(query, "login_hint", "") == "test_login_hint" @test get(query, "acr_values", "") == "test_acr" # flow get authorization code @test isa(flow_get_authorization_code(ctx, Dict()), OpenIDConnect.APIError) @info("expecting an invalid state") @test isa(flow_get_authorization_code(ctx, Dict("state"=>"teststate")), OpenIDConnect.APIError) OpenIDConnect.remember_state(ctx, "teststate") @test isa(flow_get_authorization_code(ctx, Dict("state"=>"teststate")), OpenIDConnect.APIError) @info("expecting an invalid state") @test isa(flow_get_authorization_code(ctx, Dict("state"=>"teststate", "error"=>"testerror")), OpenIDConnect.AuthServerError) @info("expecting an invalid state") @test "testcode" == flow_get_authorization_code(ctx, Dict("state"=>"teststate", "code"=>"testcode")) end end @testset "OpenIDConnect" begin test_state_store() test_oidc_flow() end
OpenIDConnect
https://github.com/tanmaykm/OpenIDConnect.jl.git
[ "MIT" ]
0.1.6
ac6f8ac979a738a894d1a0a5777d6b8e43f0e94e
tools/oidc_standalone.jl
code
3736
using Mux using HTTP using JSON using OpenIDConnect using JWTs headers(req) = req[:headers] query(req) = parse_query(req[:query]) function parse_query(qstr) res = Dict{String,String}() for qsub in split(qstr, '&') nv = split(qsub, '=') res[nv[1]] = length(nv) > 1 ? nv[2] : "" end res end function pretty(j) iob = IOBuffer() JSON.print(iob, j, 4) String(take!(iob)) end function login(oidcctx::OIDCCtx) openid_config = oidcctx.openid_config issuer = openid_config["issuer"] openid_config_url = issuer * ".well-known/openid-configuration" """ <html><head> <script src="https://cdnjs.cloudflare.com/ajax/libs/oidc-client/1.5.1/oidc-client.js"></script> <script> var settings = { issuer: '$issuer', authority: '$openid_config_url', metadata: { issuer: '$issuer', authorization_endpoint: '$(openid_config["authorization_endpoint"])', userinfo_endpoint: '$(openid_config["token_endpoint"])', jwks_uri: '$(openid_config["jwks_uri"])', }, client_id: '$(oidcctx.client_id)', redirect_uri: 'http://127.0.0.1:8888/auth/login', response_type: 'code', scope: 'openid email profile offline_access' }; var mgr = new Oidc.UserManager(settings); var user = mgr.signinRedirect(); </script> </head><body></body></html> """ end function show_token(oidcctx::OIDCCtx, authresp, authenticated) id_token = authresp["id_token"] jwt = JWT(;jwt=id_token) isvalid = flow_validate_id_token(oidcctx, string(jwt)) token_claims = claims(jwt) jbox_auth = Dict( "Authorization" => ("Bearer " * id_token) ) authenticated[] = true can_refresh = "refresh_token" in keys(authresp) refresh_link = can_refresh ? """<hr/><a href="/auth/refresh?refresh_token=$(authresp["refresh_token"])">Refresh</a>""" : "" """<html><body> OpenID Authentication: <pre>$(pretty(authresp))</pre><hr/> JWT Token: <pre>$(pretty(token_claims))</pre><hr/> Authentication Bearer Token: <pre>$(pretty(jbox_auth))</pre><hr/> Validation success: $isvalid $(refresh_link) </body></html>""" end function token(oidcctx::OIDCCtx, req, authenticated) resp = query(req) code = resp["code"] authresp = flow_get_token(oidcctx, code) show_token(oidcctx, authresp, authenticated) end function refresh(oidcctx::OIDCCtx, req, authenticated) resp = query(req) refresh_token = resp["refresh_token"] authresp = flow_refresh_token(oidcctx, refresh_token) show_token(oidcctx, authresp, authenticated) end function main() if length(ARGS) != 1 println("Usage: julia oidc_standalone.jl <configuration_file>") exit(1) end config = open(ARGS[1]) do f JSON.parse(f) end oidcctx = OIDCCtx(String(config["issuer"]), "http://127.0.0.1:8888/auth/login", String(config["client_id"]), String(config["client_secret"]), ["openid", "email", "profile", "offline_access"]) authenticated = Ref(false) @app test = ( Mux.defaults, page("/", req->login(oidcctx)), page("/auth/login", req->token(oidcctx, req, authenticated)), page("/auth/refresh", req->refresh(oidcctx, req, authenticated)), Mux.notfound()) @info("Standalone OIDC test server starting on port 8888") serve(test, 8888) while config["do_refresh"] || !(authenticated[]) sleep(10) end sleep(10) end main()
OpenIDConnect
https://github.com/tanmaykm/OpenIDConnect.jl.git
[ "MIT" ]
0.1.6
ac6f8ac979a738a894d1a0a5777d6b8e43f0e94e
README.md
docs
5233
# OpenIDConnect [![Build Status](https://github.com/tanmaykm/OpenIDConnect.jl/workflows/CI/badge.svg)](https://github.com/tanmaykm/OpenIDConnect.jl/actions?query=workflow%3ACI+branch%3Amaster) [![codecov.io](http://codecov.io/github/tanmaykm/OpenIDConnect.jl/coverage.svg?branch=master)](http://codecov.io/github/tanmaykm/OpenIDConnect.jl?branch=master) [OpenID Connect](https://openid.net/specs/openid-connect-core-1_0.html) is a simple identity layer on top of the OAuth 2.0 protocol. It enables Clients to verify the identity of the End-User based on the authentication performed by an Authorization Server, as well as to obtain basic profile information about the End-User in an interoperable and REST-like manner. This is an implementation of OpenID Connect in Julia, with methods implementing the authorization code flow. # OpenID Connect Context (OIDCCtx) The OpenID Connect context holds all states for a single OpenID Connect client configuration. ```julia function OIDCCtx( issuer::String, redirect_uri::String, client_id::String, client_secret::String, scopes::Vector{String}=DEFAULT_SCOPES; verify::Union{Nothing,Bool}=nothing, cacrt::Union{Nothing,String,MbedTLS.CRT}=nothing, state_timeout_secs::Int=DEFAULT_STATE_TIMEOUT_SECS, allowed_skew_secs::Int=DEFAULT_SKEW_SECS, key_refresh_secs::Int=DEFAULT_KEY_REFRESH_SECS), random_device::RandomDevice=RandomDevice() ) ``` Parameters: - `issuer`: Issuer URL, pointing to the OpenID server - `redirect_uri`: The app URI to which OpenID server must redirect after authorization - `client_id`, and `client_secret`: Client ID and secret that this context represents - `scopes`: The scopes to request during authorization (default: openid, profile, email) Keyword Parameters: - `verify`: whether to validate the server certificate - `cacrt`: the CA certificate to use to check the server certificate - `state_timeout_secs`: seconds for which to keep the state associated with an authorization request (default: 60 seconds), server responses beyond this are rejected as stale - `allowed_skew_secs`: while validating tokens, seconds to allow to account for time skew between machines (default: 120 seconds) - `key_refresh_secs`: time interval in which to refresh the JWT signing keys (default: 1hr) # Error Structures - `OpenIDConnect.APIError`: Error detected at the client side. Members: - `error`: error code or message (String) - `OpenIDConnect.AuthServerError`: Error returned from the OpenID server (see section 3.1.2.6 of https://openid.net/specs/openid-connect-core-1_0.html) - `error`: error code (String) - `error_description`: optional error description (String) - `error_uri`: optional error URI (String) # Authorization Code Flow ## Authentication request. ### `flow_request_authorization_code` Returns a String with the redirect URL. Caller must perform the redirection. Acceptable optional args as listed in section 3.1.2.1 of specifications (https://openid.net/specs/openid-connect-core-1_0.html) ```julia function flow_request_authorization_code( ctx::OIDCCtx; nonce=nothing, display=nothing, prompt=nothing, max_age=nothing, ui_locales=nothing, id_token_hint=nothing, login_hint=nothing, acr_values=nothing ) ``` ### `flow_get_authorization_code` Given the params from the redirected response from the authentication request, extract the authorization code. See sections 3.1.2.5 and 3.1.2.6 of https://openid.net/specs/openid-connect-core-1_0.html. Returns the authorization code on success. Returns one of APIError or AuthServerError on failure. ```julia function flow_get_authorization_code( ctx::OIDCCtx, query # name-value pair Dict with query parameters are received from the OpenID server redirect ) ``` ## Token Requests ### `flow_get_token` Token Request. Given the authorization code obtained, invoke the token end point and obtain an id_token, access_token, refresh_token. See section 3.1.3.1 of https://openid.net/specs/openid-connect-core-1_0.html. Returns a JSON object containing tokens on success. Returns a AuthServerError or APIError object on failure. ```julia function flow_get_token( ctx::OIDCCtx, code ) ``` ### `flow_refresh_token` Token Refresh. Given the refresh code obtained, invoke the token end point and obtain new tokens. See section 12 of https://openid.net/specs/openid-connect-core-1_0.html. Returns a JSON object containing tokens on success. Returns a AuthServerError or APIError object on failure. ```julia function flow_refresh_token( ctx::OIDCCtx, refresh_token ) ``` ## Token Validation ### `flow_validate_id_token` Validate an OIDC token. Validates both the structure and signature. See section 3.1.3.7 of https://openid.net/specs/openid-connect-core-1_0.html ``` function flow_validate_id_token( ctx::OIDCCtx, id_token::Union{JWTs.JWT, String} ) ``` # Examples An example application built using OpenIDClient with Mux and HTTP is available as a [tool](tools/oidc_standalone.jl). Populate a configuration file following this [template](tools/settings.template) and start the standalone application. Point your browser to it to experience the complete flow.
OpenIDConnect
https://github.com/tanmaykm/OpenIDConnect.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmark.jl
code
514
using SciMLBenchmarks target = ARGS[1] if isdir(target) if !isfile(joinpath(target, "Project.toml")) error("Cannot benchmark folder $(target) without Project.toml!") end println("Benchmarking the $(target) folder") SciMLBenchmarks.weave_folder(target) elseif isfile(target) folder = dirname(target) file = basename(target) println("Benchmarking $(folder)/$(file)") SciMLBenchmarks.weave_file(folder, file) else error("Unable to find benchmarking target $(target)!") end
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
.github/workflows/update.jl
code
2208
using Pkg Pkg.add(["Git", "GitHub", "Dates"]) using Git, GitHub, Dates gh_token = ARGS[1] myauth = GitHub.authenticate(gh_token) (@isdefined myauth) ? @info("Authentication token is found...") : @info("Coudn't find the authentication token") const git = Git.git() date = Dates.format(now(), "yyyy-mm-dd") benchpath = joinpath(@__DIR__, "..", "..", "benchmarks") # Get all the open PRs and their number gh_prs = GitHub.pull_requests("SciML/SciMLBenchmarks.jl"; auth=myauth) prs = Dict{String, Int64}() for i in 1:length(gh_prs[1]) prs[gh_prs[1][i].head.ref] = gh_prs[1][i].number end # Get all the branches from the repo gh_branches = GitHub.branches("SciML/SciMLBenchmarks.jl"; auth=myauth) branches = [gh_branches[1][i].name for i in 1:length(gh_branches[1])] @info("PRs and branches", prs, branches) for dir in readdir(benchpath) model_dir = joinpath(benchpath, dir) isdir(model_dir) || continue println("--- Inspecting $dir ---") cd(model_dir) Pkg.activate(".") do Pkg.update() end manpath = (joinpath(benchpath, "Manifest.toml")) if length(readlines(`git status . --porcelain`)) > 1 if dir ∉ branches run(`git checkout -b $(dir) master`) run(`$git add -A . :!$(manpath)`) run(`$git commit -m "Updated $(dir) on $(date)"`) run(`$git push --set-upstream origin $(dir)`) else run(`$git fetch origin`) run(`$git checkout $(dir)`) run(`$git pull -Xours`) run(`$git add -A . :!$(manpath)`) run(`$git commit -m "Updated $(dir) on $(date)"`) run(`$git push`) end if dir ∉ keys(prs) params = Dict( "title" => "Updated $(dir) for benchmarks", "head" => "$(dir)", "base" => "master" ) @info("Creating a pull request from head: ", dir) GitHub.create_pull_request("SciML/SciMLBenchmarks.jl"; params=params, auth=myauth) else @info("Updating the pull request numbered: ", prs[dir]) GitHub.update_pull_request("SciML/SciMLBenchmarks.jl", prs[dir]; auth=myauth) end end end
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MethodOfLinesPDE/MOLxPDESystemLibrary.jl
code
8742
""" --- title: Burgers FDM Work-Precision Diagrams with Various MethodOfLines Methods author: Alex Jones --- This benchmark is for the MethodOfLines.jl package, which is an automatic PDE discretization package. It is concerned with comparing the performance of various discretization methods for the Burgers equation. """ using MethodOfLines, DomainSets, OrdinaryDiffEq, ModelingToolkit, DiffEqDevTools, LinearAlgebra, LinearSolve, Plots using PDESystemLibrary """ Next we define some functions to generate approproiate discretizations for the PDESystemLibrary systems. """ function center_uniform_grid(ex, ivs, N) map(ivs) do x xdomain = ex.domain[findfirst(d -> isequal(x, d.variables), ex.domain)] x => (supremum(xdomain.domain) - infimum(xdomain.domain)) / (floor(N^(1 / length(ivs))) - 1) end end function edge_uniform_grid(ex, ivs, N) map(ivs) do x xdomain = ex.domain[findfirst(d -> isequal(x, d.variables), ex.domain)] x => (supremum(xdomain.domain) - infimum(xdomain.domain)) / (floor(N^(1 / length(ivs)))) end end function center_chebygrid(ex, ivs, N) map(ivs) do x xdomain = ex.domain[findfirst(d -> isequal(x, d.variables), ex.domain)] x => chebyspace(xdomain, trunc(Int, N^(1 / length(ivs)))) end end function edge_chebygrid(ex, ivs, N) map(ivs) do x xdomain = ex.domain[findfirst(d -> isequal(x, d.variables), ex.domain)] x => chebyspace(xdomain, trunc(Int, N^(1 / length(ivs))) - 1) end end function uniformupwind1(ex, ivs, t, N) dxs = center_uniform_grid(ex, ivs, N) MOLFiniteDifference(dxs, t, advection_scheme=UpwindScheme()) end function uniformupwind2(ex, ivs, t, N) dxs = edge_uniform_grid(ex, ivs, N) MOLFiniteDifference(dxs, t, advection_scheme=UpwindScheme(), grid_align=edge_align) end function chebyupwind1(ex, ivs, t, N) dxs = center_chebygrid(ex, ivs, N) MOLFiniteDifference(dxs, t, advection_scheme=UpwindScheme()) end function chebyupwind2(ex, ivs, t, N) dxs = edge_chebygrid(ex, ivs, N) MOLFiniteDifference(dxs, t, advection_scheme=UpwindScheme(), grid_align=edge_align) end function discweno1(ex, ivs, t, N) dxs = center_uniform_grid(ex, ivs, N) MOLFiniteDifference(dxs, t, advection_scheme=WENOScheme()) end function discweno2(ex, ivs, t, N) dxs = edge_uniform_grid(ex, ivs, N) MOLFiniteDifference(dxs, t, advection_scheme=WENOScheme(), grid_align=edge_align) end """ This script tests all systems in PDESystemLibrary against different MethodOfLines.jl discretizations. """ N = 100 for ex in PSL.all_systems try if ex.analytic_func === nothing continue end ivs = filter(x -> !isequal(Symbol(x), :t), ex.ivs) if length(ivs) == 0 continue elseif length(ivs) == length(ex.ivs) # Skip nonlinear systems until I know the syntax for that continue # advection = false # discuu1 = uniformupwind1(ex, ivs, nothing, N) # discuu2 = uniformupwind2(ex, ivs, nothing, N) # discnu1 = chebyupwind1(ex, ivs, nothing, N) # discnu2 = chebyupwind2(ex, ivs, nothing, N) # discs = [discuu1, discuu2, discnu1, discnu2] # if "Advection" in ex.metadata # advection = true # discw1 = discweno1(ex, ivs, nothing, N) # discw2 = discweno2(ex, ivs, nothing, N) # push!(discs, discw1, discw2) # end # probs = map(discs) do disc # discretize(ex, disc, analytic = ex.analytic_func) # end # title = "Work Precision Diagram for $(ex.name), Tags: $(ex.metadata)" # println("Running $title") # if advection # dummy_appxsol = [nothing for i in 1:length(probs1)] # abstols = 1.0 ./ 10.0 .^ (5:8) # reltols = 1.0 ./ 10.0 .^ (1:4); # setups = [Dict(:alg => solver, :prob_choice => 1), # Dict(:alg => solver, :prob_choice => 2), # Dict(:alg => solver, :prob_choice => 3), # Dict(:alg => solver, :prob_choice => 4), # Dict(:alg => solver, :prob_choice => 5), # Dict(:alg => solver, :prob_choice => 6),] # names = ["Uniform Upwind, center_align", "Uniform Upwind, edge_align", # "Chebyshev Upwind, center_align", "Chebyshev Upwind, edge_align", # "Uniform WENO, center_align", "Uniform WENO, edge_align"]; # wp = WorkPrecisionSet(probs, abstols, reltols, setups; names=names, # save_everystep=false, appxsol = dummy_appxsol, maxiters=Int(1e5), # numruns=10, wrap=Val(false)) # plot(wp, title=title) # else # dummy_appxsol = [nothing for i in 1:length(probs)] # abstols = 1.0 ./ 10.0 .^ (5:8) # reltols = 1.0 ./ 10.0 .^ (1:4); # setups = [Dict(:alg => solver, :prob_choice => 1), # Dict(:alg => solver, :prob_choice => 2), # Dict(:alg => solver, :prob_choice => 3), # Dict(:alg => solver, :prob_choice => 4),] # names = ["Uniform Upwind, center_align", "Uniform Upwind, edge_align", # "Chebyshev Upwind, center_align", "Chebyshev Upwind, edge_align"]; # wp = WorkPrecisionSet(probs1, abstols, reltols, setups; names=names, # save_everystep=false, appxsol = dummy_appxsol, maxiters=Int(1e5), # numruns=10, wrap=Val(false)) # plot(wp, title=title) # end else @parameters t # Create discretizations advection = false discuu1 = uniformupwind1(ex, ivs, t, N) discuu2 = uniformupwind2(ex, ivs, t, N) discnu1 = chebyupwind1(ex, ivs, t, N) discnu2 = chebyupwind2(ex, ivs, t, N) discs = [discuu1, discuu2, discnu1, discnu2] if "Advection" in ex.metadata advection = true discw1 = discweno1(ex, ivs, t, N) discw2 = discweno2(ex, ivs, t, N) push!(discs, discw1, discw2) end # Create problems probs = map(discs) do disc discretize(ex, disc, analytic = ex.analytic_func) end title = "Work Precision Diagram for $(ex.name), Tags: $(ex.metadata)" println("Running $title") if advection dummy_appxsol = [nothing for i in 1:length(probs1)] abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg => solver, :prob_choice => 1), Dict(:alg => solver, :prob_choice => 2), Dict(:alg => solver, :prob_choice => 3), Dict(:alg => solver, :prob_choice => 4), Dict(:alg => solver, :prob_choice => 5), Dict(:alg => solver, :prob_choice => 6),] names = ["Uniform Upwind, center_align", "Uniform Upwind, edge_align", "Chebyshev Upwind, center_align", "Chebyshev Upwind, edge_align", "Uniform WENO, center_align", "Uniform WENO, edge_align"]; wp = WorkPrecisionSet(probs, abstols, reltols, setups; names=names, save_everystep=false, appxsol = dummy_appxsol, maxiters=Int(1e5), numruns=10, wrap=Val(false)) plot(wp, title=title) else dummy_appxsol = [nothing for i in 1:length(probs)] abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg => solver, :prob_choice => 1), Dict(:alg => solver, :prob_choice => 2), Dict(:alg => solver, :prob_choice => 3), Dict(:alg => solver, :prob_choice => 4),] names = ["Uniform, center_align", "Uniform, edge_align", "Chebyshev, center_align", "Chebyshev, edge_align"]; wp = WorkPrecisionSet(probs1, abstols, reltols, setups; names=names, save_everystep=false, appxsol = dummy_appxsol, maxiters=Int(1e5), numruns=10, wrap=Val(false)) plot(wp, title=title) end end catch e println("Failed on $(ex.name):") println(e) end end
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
docs/make.jl
code
544
using Documenter, SciMLBenchmarksOutput dir = @__DIR__() * "/.." @show dir @show readdir(dir) include("pages.jl") makedocs( sitename="The SciML Benchmarks", authors="Chris Rackauckas", modules=[SciMLBenchmarksOutput], clean=true, doctest=false, format=Documenter.HTML(#analytics = "UA-90474609-3", assets=["assets/favicon.ico"], canonical="https://benchmarks.sciml.ai/stable/"), pages=pages ) deploydocs(; repo="github.com/SciML/SciMLBenchmarksOutput", devbranch="main", branch="main" )
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
docs/pages.jl
code
2937
# This file assumes `dir` is the directory for the package! dir = @__DIR__() * "/.." dir = @__DIR__() * "/.." cp(joinpath(dir, "markdown"), joinpath(dir, "docs", "src"), force=true) cp(joinpath(dir, "docs", "extrasrc", "assets"), joinpath(dir, "docs", "src", "assets"), force=true) cp(joinpath(dir, "README.md"), joinpath(dir, "docs", "src", "index.md"), force=true) benchmarksdir = joinpath(dir, "docs", "src") @show readdir(benchmarksdir) pages = Any["SciMLBenchmarks.jl: Benchmarks for Scientific Machine Learning (SciML), Equation Solvers, and AI for Science"=>"index.md"] for folder in readdir(benchmarksdir) newpages = Any[] if folder[end-2:end] != ".md" && folder != "Testing" && folder != "figures" && folder != "assets" for file in filter(x -> x[end-2:end] == ".md", readdir( joinpath(benchmarksdir, folder))) try filecontents = readlines(joinpath(benchmarksdir, folder, file)) title = filecontents[3][9:end-1] # Cut out the first 5 lines from the file to remove the Weave header stuff open(joinpath(benchmarksdir, folder, file), "w") do output println(output, "# $title") for line in Iterators.drop(filecontents, 4) println(output, line) end end push!(newpages, title => joinpath(folder, file)) catch e @show folder, file, e end end push!(pages, folder => newpages) end end # The result is in alphabetical order, change to the wanted order permute!(pages, [1, 10, 13, 19, 4, 5, 9, 6, 11, 14, 20, 12, 18, 7, 17, 3, 8, 15, 16, 2] ) names = [ "SciMLBenchmarks.jl: Benchmarks for Scientific Machine Learning (SciML) and Equation Solvers", "Multi-Language Wrapper Benchmarks", "Non-Stiff Ordinary Differential Equations", "Stiff Ordinary Differential Equations", "Biological Differential Equations", "Differential-Algebraic Equations (DAEs)", "Method of Lines Partial Differential Equations (PDEs)", "Dynamical ODEs (Hamiltonian and Second Order)", "N-Body Problem Benchmarks", "Non-Stiff Stochastic Differential Equations", "Stiff Stochastic Differential Equations", "Non-Stiff Delay Differential Equations", "Stiff Delay Differential equations", "Jump Process Equations (Gillespie Benchmarks)", "Parameter Estimation and Inverse Problem Benchmarks", "Bayesian Inference and Probabilistic Inverse Problem Benchmarks", "MethodOfLines.jl Partial Differential Equation (PDE) Formulations", "Physics-Informed Neural Network (Neural Network PDE Solver) Cost Function Benchmarks", "Physics-Informed Neural Network (Neural Network PDE Solver) Optimizer Benchmarks", "SDE Adaptivity Benchmarks"] for i in 1:length(pages) pages[i] = names[i] => pages[i][2] end
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
src/SciMLBenchmarks.jl
code
3604
module SciMLBenchmarks using Weave, Pkg, IJulia, InteractiveUtils, Markdown repo_directory = joinpath(@__DIR__,"..") function weave_file(folder,file,build_list=(:script,:github)) target = joinpath(folder, file) @info("Weaving $(target)") if isfile(joinpath(folder, "Project.toml")) && build_list != (:notebook,) @info("Instantiating", folder) Pkg.activate(folder) Pkg.instantiate() Pkg.build() end args = Dict{Symbol,String}(:folder=>folder,:file=>file) if :script ∈ build_list println("Building Script") dir = joinpath(repo_directory,"script",basename(folder)) mkpath(dir) tangle(target; out_path=dir) end if :html ∈ build_list println("Building HTML") dir = joinpath(repo_directory,"html",basename(folder)) mkpath(dir) weave(target,doctype = "md2html",out_path=dir,args=args,fig_ext=".svg") end if :pdf ∈ build_list println("Building PDF") dir = joinpath(repo_directory,"pdf",basename(folder)) mkpath(dir) try weave(target,doctype="md2pdf",out_path=dir,args=args) catch ex @warn "PDF generation failed" exception=(ex, catch_backtrace()) end end if :github ∈ build_list println("Building Github Markdown") dir = joinpath(repo_directory,"markdown",basename(folder)) mkpath(dir) weave(target,doctype = "github",out_path=dir,args=args) end if :notebook ∈ build_list println("Building Notebook") dir = joinpath(repo_directory,"notebook",basename(folder)) mkpath(dir) Weave.convert_doc(target,joinpath(dir,file[1:end-4]*".ipynb")) end end function weave_all(build_list=(:script,:github)) for folder in readdir(joinpath(repo_directory,"benchmarks")) folder == "test.jmd" && continue weave_folder(joinpath(repo_directory,"benchmarks",folder),build_list) end end function weave_folder(folder, build_list=(:script,:github)) for file in readdir(folder) # Skip non-`.jmd` files if !endswith(file, ".jmd") continue end try weave_file(folder, file, build_list) catch e @show folder, file @error(e) end end end function bench_footer(folder=nothing, file=nothing) display(md""" ## Appendix These benchmarks are a part of the SciMLBenchmarks.jl repository, found at: <https://github.com/SciML/SciMLBenchmarks.jl>. For more information on high-performance scientific machine learning, check out the SciML Open Source Software Organization <https://sciml.ai>. """) if folder !== nothing && file !== nothing display(Markdown.parse(""" To locally run this benchmark, do the following commands: ``` using SciMLBenchmarks SciMLBenchmarks.weave_file("$folder","$file") ``` """)) end display(md"Computer Information:") vinfo = sprint(InteractiveUtils.versioninfo) display(Markdown.parse(""" ``` $(vinfo) ``` """)) display(md""" Package Information: """) proj = sprint(io -> Pkg.status(io=io)) mani = sprint(io -> Pkg.status(io=io, mode = Pkg.PKGMODE_MANIFEST)) md = """ ``` $(chomp(proj)) ``` And the full manifest: ``` $(chomp(mani)) ``` """ display(Markdown.parse(md)) end function open_notebooks() Base.eval(Main, Meta.parse("import IJulia")) weave_all((:notebook,)) path = joinpath(repo_directory,"notebook") IJulia.notebook(;dir=path) newpath = joinpath(pwd(),"generated_notebooks") mv(path, newpath) IJulia.notebook(;dir=newpath) end end # module SciMLBenchmarks
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
test/runtests.jl
code
443
using SciMLBenchmarks, Test @testset "weave_file" begin benchmarks_dir = joinpath(dirname(@__DIR__), "benchmarks") SciMLBenchmarks.weave_file(joinpath(benchmarks_dir, "Testing"), "test.jmd") #@test isfile(joinpath(dirname(@__DIR__), "script", "Testing", "test.jl")) #@test isfile(joinpath(dirname(@__DIR__), "html", "Testing", "test.html")) @test isfile(joinpath(dirname(@__DIR__), "markdown", "Testing", "test.md")) end
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
README.md
docs
11742
# SciMLBenchmarks.jl: Benchmarks for Scientific Machine Learning (SciML) and Equation Solvers [![Join the chat at https://julialang.zulipchat.com #sciml-bridged](https://img.shields.io/static/v1?label=Zulip&message=chat&color=9558b2&labelColor=389826)](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged) [![Global Docs](https://img.shields.io/badge/docs-SciML-blue.svg)](https://docs.sciml.ai/SciMLBenchmarksOutput/stable/) [![Build status](https://badge.buildkite.com/2f4b5708bf098c75ce193f04b3f3c4047f993f0e363e314c61.svg)](https://buildkite.com/julialang/scimlbenchmarks-dot-jl) [![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac) [![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) SciMLBenchmarks.jl holds webpages, pdfs, and notebooks showing the benchmarks for the SciML Scientific Machine Learning Software ecosystem, including: - Benchmarks of equation solver implementations - Speed and robustness comparisons of methods for parameter estimation / inverse problems - Training universal differential equations (and subsets like neural ODEs) - Training of physics-informed neural networks (PINNs) - Surrogate comparisons, including radial basis functions, neural operators (DeepONets, Fourier Neural Operators), and more The SciML Bench suite is made to be a comprehensive open source benchmark from the ground up, covering the methods of computational science and scientific computing all the way to AI for science. ## Rules: Optimal, Fair, and Reproducible These benchmarks are meant to represent good optimized coding style. Benchmarks are preferred to be run on the provided open benchmarking hardware for full reproducibility (though in some cases, such as with language barriers, this can be difficult). Each benchmark is documented with the compute devices used along with package versions for necessary reproduction. These benchmarks attempt to measure in terms of work-precision efficiency, either timing with an approximately matching the error or building work-precision diagrams for direct comparison of speed at given error tolerances. **If any of the code from any of the languages can be improved, please open a pull request**. ## Results To view the results of the SciML Benchmarks, go to [benchmarks.sciml.ai](https://benchmarks.sciml.ai/stable/). By default, this will lead to the latest tagged version of the benchmarks. To see the in-development version of the benchmarks, go to [https://benchmarks.sciml.ai/dev/](https://benchmarks.sciml.ai/dev/). Static outputs in pdf, markdown, and html reside in [SciMLBenchmarksOutput](https://github.com/SciML/SciMLBenchmarksOutput). ## Citing To cite the SciML Benchmarks, please cite the following: ```bib @article{rackauckas2019confederated, title={Confederated modular differential equation APIs for accelerated algorithm development and benchmarking}, author={Rackauckas, Christopher and Nie, Qing}, journal={Advances in Engineering Software}, volume={132}, pages={1--6}, year={2019}, publisher={Elsevier} } @article{DifferentialEquations.jl-2017, author = {Rackauckas, Christopher and Nie, Qing}, doi = {10.5334/jors.151}, journal = {The Journal of Open Research Software}, keywords = {Applied Mathematics}, note = {Exported from https://app.dimensions.ai on 2019/05/05}, number = {1}, pages = {}, title = {DifferentialEquations.jl – A Performant and Feature-Rich Ecosystem for Solving Differential Equations in Julia}, url = {https://app.dimensions.ai/details/publication/pub.1085583166 and http://openresearchsoftware.metajnl.com/articles/10.5334/jors.151/galley/245/download/}, volume = {5}, year = {2017} } ``` ## Current Summary The following is a quick summary of the benchmarks. These paint broad strokes over the set of tested equations and some specific examples may differ. ### Non-Stiff ODEs - OrdinaryDiffEq.jl's methods are the most efficient by a good amount - The `Vern` methods tend to do the best in every benchmark of this category - At lower tolerances, `Tsit5` does well consistently. - ARKODE and Hairer's `dopri5`/`dop853` perform very similarly, but are both far less efficient than the `Vern` methods. - The multistep methods, `CVODE_Adams` and `lsoda`, tend to not do very well. - The ODEInterface multistep method `ddeabm` does not do as well as the other multistep methods. - ODE.jl's methods are not able to consistently solve the problems. - Fixed time step methods are less efficient than the adaptive methods. ### Stiff ODEs - In this category, the best methods are much more problem dependent. - For smaller problems: - `Rosenbrock23`, `lsoda`, and `TRBDF2` tend to be the most efficient at high tolerances. - `Rodas4` and `Rodas5` tend to be the most efficient at low tolerances. - For larger problems (Filament PDE): - `QNDF` and `FBDF` does the best at all normal tolerances. - The ESDIRK methods like `TRBDF2` and `KenCarp4` can come close. - `radau` is always the most efficient when tolerances go to the low extreme (`1e-13`) - Fixed time step methods tend to diverge on every tested problem because the high stiffness results in divergence of the Newton solvers. - ARKODE is very inconsistent and requires a lot of tweaking in order to not diverge on many of the tested problems. When it doesn't diverge, the similar algorithms in OrdinaryDiffEq.jl (`KenCarp4`) are much more efficient in most cases. - ODE.jl and GeometricIntegrators.jl fail to converge on any of the tested problems. ### Dynamical ODEs - Higher order (generally order >=6) symplectic integrators are much more efficient than the lower order counterparts. - For high accuracy, using a symplectic integrator is not preferred. Their extra cost is not necessary since the other integrators are able to not drift simply due to having low enough error. - In this class, the `DPRKN` methods are by far the most efficient. The `Vern` methods do well for not being specific to the domain. ### Non-Stiff SDEs - For simple 1-dimensional SDEs at low accuracy, the `EM` and `RKMil` methods can do well. Beyond that, they are simply outclassed. - The `SRA` and `SRI` methods both are very similar within-class on the simple SDEs. - `SRA3` is the most efficient when applicable and the tolerances are low. - Generally, only low accuracy is necessary to get to sampling error of the mean. - The adaptive method is very conservative with error estimates. ### Stiff SDEs - The high order adaptive methods (`SRIW1`) generally do well on stiff problems. - The "standard" low-order implicit methods, `ImplicitEM` and `ImplicitRK`, do not do well on all stiff problems. Some exceptions apply to well-behaved problems like the Stochastic Heat Equation. ### Non-Stiff DDEs - The efficiency ranking tends to match the ODE Tests, but the cutoff from low to high tolerance is lower. - `Tsit5` does well in a large class of problems here. - The `Vern` methods do well in low tolerance cases. ### Stiff DDEs - The Rosenbrock methods, specifically `Rodas5`, perform well. ### Parameter Estimation - Broadly two different approaches have been used, Bayesian Inference and Optimisation algorithms. - In general it seems that the optimisation algorithms perform more accurately but that can be attributed to the larger number of data points being used in the optimisation cases, Bayesian approach tends to be slower of the two and hence lesser data points are used, accuracy can increase if proper data is used. - Within the different available optimisation algorithms, BBO from the BlackBoxOptim package and GN_CRS2_LM for the global case while LD_SLSQP,LN_BOBYQA and LN_NELDERMEAD for the local case from the NLopt package perform the best. - Another algorithm being used is the [QuadDIRECT](https://github.com/timholy/QuadDIRECT.jl) algorithm, it gives very good results in the shorter problem case but doesn't do very well in the case of the longer problems. - The choice of global versus local optimization make a huge difference in the timings. BBO tends to find the correct solution for a global optimization setup. For local optimization, most methods in NLopt, like :LN_BOBYQA, solve the problem very fast but require a good initial condition. - The different backends options available for Bayesian method offer some tradeoffs beteween time, accuracy and control. It is observed that sufficiently high accuracy can be observed with any of the backends with the fine tuning of stepsize, constraints on the parameters, tightness of the priors and number of iterations being passed. ## Interactive Notebooks To generate the interactive notebooks, first install the SciMLBenchmarks, instantiate the environment, and then run `SciMLBenchmarks.open_notebooks()`. This looks as follows: ```julia ]add SciMLBenchmarks#master ]activate SciMLBenchmarks ]instantiate using SciMLBenchmarks SciMLBenchmarks.open_notebooks() ``` The benchmarks will be generated at your `pwd()` in a folder called `generated_notebooks`. Note that when running the benchmarks, the packages are not automatically added. Thus you will need to add the packages manually or use the internal Project/Manifest tomls to instantiate the correct packages. This can be done by activating the folder of the benchmarks. For example, ```julia using Pkg Pkg.activate(joinpath(pkgdir(SciMLBenchmarks),"benchmarks","NonStiffODE")) Pkg.instantiate() ``` will add all of the packages required to run any benchmark in the `NonStiffODE` folder. ## Contributing All of the files are generated from the Weave.jl files in the `benchmarks` folder. The generation process runs automatically, and thus one does not necessarily need to test the Weave process locally. Instead, simply open a PR that adds/updates a file in the "benchmarks" folder and the PR will generate the benchmark on demand. Its artifacts can then be inspected in the Buildkite as described below before merging. Note that it will use the Project.toml and Manifest.toml of the subfolder, so any changes to dependencies requires that those are updated. ### Reporting Bugs and Issues Report any bugs or issues at [the SciMLBenchmarks repository](https://github.com/SciML/SciMLBenchmarks.jl). ### Inspecting Benchmark Results To see benchmark results before merging, click into the BuildKite, click onto Artifacts, and then investigate the trained results. ![](https://user-images.githubusercontent.com/1814174/118359358-02ddc980-b551-11eb-8a9b-24de947cefee.PNG) ### Manually Generating Files All of the files are generated from the Weave.jl files in the `benchmarks` folder. To run the generation process, do for example: ```julia ]activate SciMLBenchmarks # Get all of the packages using SciMLBenchmarks SciMLBenchmarks.weave_file(joinpath(pkgdir(SciMLBenchmarks),"benchmarks","NonStiffODE"),"linear_wpd.jmd") ``` To generate all of the files in a folder, for example, run: ```julia SciMLBenchmarks.weave_folder(joinpath(pkgdir(SciMLBenchmarks),"benchmarks","NonStiffODE")) ``` To generate all of the notebooks, do: ```julia SciMLBenchmarks.weave_all() ``` Each of the benchmarks displays the computer characteristics at the bottom of the benchmark. Since performance-necessary computations are normally performed on compute clusters, the official benchmarks use a workstation with an AMD EPYC 7502 32-Core Processor @ 2.50GHz to match the performance characteristics of a standard node in a high performance computing (HPC) cluster or cloud computing setup.
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/AdaptiveSDE/AdaptiveEfficiencyTests.jmd
docs
6311
--- title: Adaptive Efficiency Tests author: Chris Rackauckas --- ```julia using Distributed addprocs(2) p1 = Vector{Any}(undef,3) p2 = Vector{Any}(undef,3) p3 = Vector{Any}(undef,3) @everywhere begin using StochasticDiffEq, SDEProblemLibrary, DiffEqNoiseProcess, Plots, ParallelDataTransfer import SDEProblemLibrary: prob_sde_additive, prob_sde_linear, prob_sde_wave end using StochasticDiffEq, SDEProblemLibrary, DiffEqNoiseProcess, Plots, ParallelDataTransfer import SDEProblemLibrary: prob_sde_additive, prob_sde_linear, prob_sde_wave probs = Matrix{SDEProblem}(undef,3,3) ## Problem 1 prob = prob_sde_linear probs[1,1] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM1))) probs[1,2] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM2))) probs[1,3] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM3))) ## Problem 2 prob = prob_sde_wave probs[2,1] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM1))) probs[2,2] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM2))) probs[2,3] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM3))) ## Problem 3 prob = prob_sde_additive probs[3,1] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM1))) probs[3,2] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM2))) probs[3,3] = SDEProblem(prob.f,prob.g,prob.u0,prob.tspan,prob.p,noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=:RSwM3))) fullMeans = Vector{Array}(undef,3) fullMedians = Vector{Array}(undef,3) fullElapsed = Vector{Array}(undef,3) fullTols = Vector{Array}(undef,3) offset = 0 Ns = [17,23, 17] ``` Timings are only valid if no workers die. Workers die if you run out of memory. ```julia for k in 1:size(probs,1) global probs, Ns, fullMeans, fullMedians, fullElapsed, fullTols println("Problem $k") ## Setup N = Ns[k] msims = Vector{Any}(undef,N) elapsed = Array{Float64}(undef,N,3) medians = Array{Float64}(undef,N,3) means = Array{Float64}(undef,N,3) tols = Array{Float64}(undef,N,3) #Compile prob = probs[k,1] ParallelDataTransfer.sendto(workers(), prob=prob) monte_prob = EnsembleProblem(prob) solve(monte_prob,SRIW1(),dt=1/2^(4),adaptive=true,trajectories=1000,abstol=2.0^(-1),reltol=0) println("RSwM1") for i=1+offset:N+offset tols[i-offset,1] = 2.0^(-i-1) msims[i-offset] = DiffEqBase.calculate_monte_errors(solve(monte_prob,SRIW1(), trajectories=1000,abstol=2.0^(-i-1), reltol=0,force_dtmin=true)) elapsed[i-offset,1] = msims[i-offset].elapsedTime medians[i-offset,1] = msims[i-offset].error_medians[:final] means[i-offset,1] = msims[i-offset].error_means[:final] end println("RSwM2") prob = probs[k,2] ParallelDataTransfer.sendto(workers(), prob=prob) monte_prob = EnsembleProblem(prob) solve(monte_prob,SRIW1(),dt=1/2^(4),adaptive=true,trajectories=1000,abstol=2.0^(-1),reltol=0) for i=1+offset:N+offset tols[i-offset,2] = 2.0^(-i-1) msims[i-offset] = DiffEqBase.calculate_monte_errors(solve(monte_prob,SRIW1(), trajectories=1000,abstol=2.0^(-i-1), reltol=0,force_dtmin=true)) elapsed[i-offset,2] = msims[i-offset].elapsedTime medians[i-offset,2] = msims[i-offset].error_medians[:final] means[i-offset,2] = msims[i-offset].error_means[:final] end println("RSwM3") prob = probs[k,3] ParallelDataTransfer.sendto(workers(), prob=prob) monte_prob = EnsembleProblem(prob) solve(monte_prob,SRIW1(),dt=1/2^(4),adaptive=true,trajectories=1000,abstol=2.0^(-1),reltol=0) for i=1+offset:N+offset tols[i-offset,3] = 2.0^(-i-1) msims[i-offset] = DiffEqBase.calculate_monte_errors(solve(monte_prob,SRIW1(), adaptive=true,trajectories=1000,abstol=2.0^(-i-1), reltol=0,force_dtmin=true)) elapsed[i-offset,3] = msims[i-offset].elapsedTime medians[i-offset,3] = msims[i-offset].error_medians[:final] means[i-offset,3] = msims[i-offset].error_means[:final] end fullMeans[k] = means fullMedians[k] =medians fullElapsed[k] = elapsed fullTols[k] = tols end ``` ```julia gr(fmt=:svg) lw=3 leg=String["RSwM1","RSwM2","RSwM3"] titleFontSize = 16 guideFontSize = 14 legendFontSize= 14 tickFontSize = 12 for k in 1:size(probs,1) global probs, Ns, fullMeans, fullMedians, fullElapsed, fullTols p1[k] = Plots.plot(fullTols[k],fullMeans[k],xscale=:log10,yscale=:log10, xguide="Absolute Tolerance",yguide="Mean Final Error",title="Example $k" ,linewidth=lw,grid=false,lab=leg,titlefont=font(titleFontSize),legendfont=font(legendFontSize),tickfont=font(tickFontSize),guidefont=font(guideFontSize)) p2[k] = Plots.plot(fullTols[k],fullMedians[k],xscale=:log10,yscale=:log10,xguide="Absolute Tolerance",yguide="Median Final Error",title="Example $k",linewidth=lw,grid=false,lab=leg,titlefont=font(titleFontSize),legendfont=font(legendFontSize),tickfont=font(tickFontSize),guidefont=font(guideFontSize)) p3[k] = Plots.plot(fullTols[k],fullElapsed[k],xscale=:log10,yscale=:log10,xguide="Absolute Tolerance",yguide="Elapsed Time",title="Example $k" ,linewidth=lw,grid=false,lab=leg,titlefont=font(titleFontSize),legendfont=font(legendFontSize),tickfont=font(tickFontSize),guidefont=font(guideFontSize)) end Plots.plot!(p1[1]) Plots.plot(p1[1],p1[2],p1[3],layout=(3,1),size=(1000,800)) ``` ```julia #savefig("meanvstol.png") #savefig("meanvstol.pdf") ``` ```julia plot(p3[1],p3[2],p3[3],layout=(3,1),size=(1000,800)) #savefig("timevstol.png") #savefig("timevstol.pdf") ``` ```julia plot(p1[1],p3[1],p1[2],p3[2],p1[3],p3[3],layout=(3,2),size=(1000,800)) ``` ```julia using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/AdaptiveSDE/qmaxDetermination.jmd
docs
2397
--- title: qmax Determination author: Chris Rackauckas --- ```julia qs = 1.0 .+ 2.0.^(-5:2) times = Array{Float64}(undef,length(qs),4) means = Array{Float64}(undef,length(qs),4) using StochasticDiffEq, SDEProblemLibrary, Random, Plots, ParallelDataTransfer, DiffEqMonteCarlo, Distributed Random.seed!(99) full_prob = SDEProblemLibrary.oval2ModelExample(largeFluctuations=true,useBigs=false) import SDEProblemLibrary: prob_sde_additivesystem, prob_sde_additive, prob_sde_2Dlinear, prob_sde_linear, prob_sde_wave prob = remake(full_prob,tspan=(0.0,1.0)) println("Solve once to compile.") sol = solve(prob,EM(),dt=1/2^(18)) Int(sol.u[end][1]!=NaN) println("Compilation complete.") num_runs = 10000 probs = Vector{SDEProblem}(undef,3) p1 = Vector{Any}(undef,3) p2 = Vector{Any}(undef,3) p3 = Vector{Any}(undef,3) ## Problem 1 probs[1] = prob_sde_linear ## Problem 2 probs[2] = prob_sde_wave ## Problem 3 probs[3] = prob_sde_additive println("Setup Complete") ## Timing Runs function runAdaptive(i,k) sol = solve(prob,SRIW1(),dt=1/2^(8),abstol=2.0^(-15),reltol=2.0^(-10), verbose=false,maxIters=Int(1e12),qmax=qs[k]) Int(any(isnan,sol[end]) || sol.t[end] != 1) end #Compile monte_prob = EnsembleProblem(probs[1]) test_mc = solve(monte_prob,SRIW1(),dt=1/2^(4),adaptive=true,trajectories=1000,abstol=2.0^(-1),reltol=0) DiffEqBase.calculate_monte_errors(test_mc); ``` ## qmax test on Oval2 Model ```julia for k in eachindex(qs) global times Random.seed!(99) adaptiveTime = @elapsed numFails = sum(map((i)->runAdaptive(i,k),1:num_runs)) println("k was $k. The number of Adaptive Fails is $numFails. Elapsed time was $adaptiveTime") times[k,4] = adaptiveTime end ``` ## qmax test on other problems ```julia for k in eachindex(probs) global probs, times, means, qs println("Problem $k") ## Setup prob = probs[k] for i in eachindex(qs) msim = solve(monte_prob,dt=1/2^(4),SRIW1(),adaptive=true,trajectories=num_runs,abstol=2.0^(-13),reltol=0,qmax=qs[i]) test_msim = DiffEqBase.calculate_monte_errors(msim) times[i,k] = test_msim.elapsedTime means[i,k] = test_msim.error_means[:final] println("for k=$k and i=$i, we get that the error was $(means[i,k]) and it took $(times[i,k]) seconds") end end ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/BayesianInference/DiffEqBayesFitzHughNagumo.jmd
docs
3211
--- title: Fitzhugh-Nagumo Bayesian Parameter Estimation Benchmarks author: Vaibhav Dixit, Chris Rackauckas --- ```julia using DiffEqBayes, BenchmarkTools ``` ```julia using OrdinaryDiffEq, RecursiveArrayTools, Distributions, ParameterizedFunctions, StanSample, DynamicHMC using Plots, StaticArrays, Turing, LinearAlgebra ``` ```julia gr(fmt=:png) ``` ### Defining the problem. The [FitzHugh-Nagumo model](https://en.wikipedia.org/wiki/FitzHugh%E2%80%93Nagumo_model) is a simplified version of [Hodgkin-Huxley model](https://en.wikipedia.org/wiki/Hodgkin%E2%80%93Huxley_model) and is used to describe an excitable system (e.g. neuron). ```julia fitz = @ode_def FitzhughNagumo begin dv = v - 0.33*v^3 -w + l dw = τinv*(v + a - b*w) end a b τinv l ``` ```julia prob_ode_fitzhughnagumo = ODEProblem(fitz, [1.0,1.0], (0.0,10.0), [0.7,0.8,1/12.5,0.5]) sol = solve(prob_ode_fitzhughnagumo, Tsit5()) ``` ```julia sprob_ode_fitzhughnagumo = ODEProblem{false,SciMLBase.FullSpecialize}(fitz, SA[1.0,1.0], (0.0,10.0), SA[0.7,0.8,1/12.5,0.5]) sol = solve(sprob_ode_fitzhughnagumo, Tsit5()) ``` Data is genereated by adding noise to the solution obtained above. ```julia t = collect(range(1,stop=10,length=10)) sig = 0.20 data = convert(Array, VectorOfArray([(sol(t[i]) + sig*randn(2)) for i in 1:length(t)])) ``` ### Plot of the data and the solution. ```julia scatter(t, data[1,:]) scatter!(t, data[2,:]) plot!(sol) ``` ### Priors for the parameters which will be passed for the Bayesian Inference ```julia priors = [truncated(Normal(1.0,0.5),0,1.5), truncated(Normal(1.0,0.5),0,1.5), truncated(Normal(0.0,0.5),0.0,0.5), truncated(Normal(0.5,0.5),0,1)] ``` ### Benchmarks #### Stan.jl backend ```julia @time bayesian_result_stan = stan_inference(prob_ode_fitzhughnagumo,t,data,priors; delta = 0.65, num_samples = 10_000, print_summary=false, vars=(DiffEqBayes.StanODEData(), InverseGamma(2, 3))) ``` ### Direct Turing.jl ```julia @model function fitlv(data, prob) # Prior distributions. σ ~ InverseGamma(2, 3) a ~ truncated(Normal(1.0,0.5),0,1.5) b ~ truncated(Normal(1.0,0.5),0,1.5) τinv ~ truncated(Normal(0.0,0.5),0.0,0.5) l ~ truncated(Normal(0.5,0.5),0,1) # Simulate Lotka-Volterra model. p = SA[a,b,τinv,l] _prob = remake(prob, p = p) predicted = solve(_prob, Tsit5(); saveat=t) # Observations. for i in 1:length(predicted) data[:, i] ~ MvNormal(predicted[i], σ^2 * I) end return nothing end model = fitlv(data, sprob_ode_fitzhughnagumo) @time chain = sample(model, NUTS(0.65), 10000; progress=false) ``` ```julia @time chain = sample(model, NUTS(0.65), 10000; progress=false) ``` #### Turing.jl backend ```julia @time bayesian_result_turing = turing_inference(prob_ode_fitzhughnagumo,Tsit5(),t,data,priors;num_samples = 10_000) ``` # Conclusion FitzHugh-Ngumo is a standard problem for parameter estimation studies. In the FitzHugh-Nagumo model the parameters to be estimated were `[0.7,0.8,0.08,0.5]`. `dynamichmc_inference` has issues with the model and hence was excluded from this benchmark. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/BayesianInference/DiffEqBayesLorenz.jmd
docs
4408
--- title: Lorenz Bayesian Parameter Estimation Benchmarks author: Vaibhav Dixit, Chris Rackauckas --- ## Parameter estimation of Lorenz Equation using DiffEqBayes.jl ```julia using DiffEqBayes using DiffEqCallbacks, StaticArrays using Distributions, StanSample, DynamicHMC, Turing using OrdinaryDiffEq, RecursiveArrayTools, ParameterizedFunctions, DiffEqCallbacks using Plots, LinearAlgebra ``` ```julia gr(fmt=:png) ``` #### Initializing the problem ```julia g1 = @ode_def LorenzExample begin dx = σ*(y-x) dy = x*(ρ-z) - y dz = x*y - β*z end σ ρ β ``` ```julia r0 = [1.0; 0.0; 0.0] tspan = (0.0, 30.0) p = [10.0,28.0,2.66] ``` ```julia prob = ODEProblem(g1, r0, tspan, p) sol = solve(prob,Tsit5()) ``` ```julia sr0 = SA[1.0; 0.0; 0.0] tspan = (0.0, 30.0) sp = SA[10.0,28.0,2.66] sprob = ODEProblem{false,SciMLBase.FullSpecialize}(g1, sr0, tspan, sp) sol = solve(sprob,Tsit5()) ``` #### Generating data for bayesian estimation of parameters from the obtained solutions using the `Tsit5` algorithm by adding random noise to it. ```julia t = collect(range(1, stop=30, length=30)) sig = 0.49 data = convert(Array, VectorOfArray([(sol(t[i]) + sig*randn(3)) for i in 1:length(t)])) ``` #### Plots of the generated data and the actual data. ```julia Plots.scatter(t, data[1,:],markersize=4,color=:purple) Plots.scatter!(t, data[2,:],markersize=4,color=:yellow) Plots.scatter!(t, data[3,:],markersize=4,color=:black) plot!(sol) ``` #### Uncertainity Quantification plot is used to decide the tolerance for the differential equation. ```julia cb = AdaptiveProbIntsUncertainty(5) monte_prob = EnsembleProblem(prob) sim = solve(monte_prob,Tsit5(),trajectories=100,callback=cb,reltol=1e-5,abstol=1e-5) plot(sim,vars=(0,1),linealpha=0.4) ``` ```julia cb = AdaptiveProbIntsUncertainty(5) monte_prob = EnsembleProblem(prob) sim = solve(monte_prob,Tsit5(),trajectories=100,callback=cb,reltol=1e-6,abstol=1e-6) plot(sim,vars=(0,1),linealpha=0.4) ``` ```julia cb = AdaptiveProbIntsUncertainty(5) monte_prob = EnsembleProblem(prob) sim = solve(monte_prob,Tsit5(),trajectories=100,callback=cb,reltol=1e-8,abstol=1e-8) plot(sim,vars=(0,1),linealpha=0.4) ``` ```julia priors = [truncated(Normal(10,2),1,15),truncated(Normal(30,5),1,45),truncated(Normal(2.5,0.5),1,4)] ``` ## Using Stan.jl backend Lorenz equation is a chaotic system hence requires very low tolerance to be estimated in a reasonable way, we use 1e-8 obtained from the uncertainity plots. Use of truncated priors is necessary to prevent Stan from stepping into negative and other improbable areas. ```julia @time bayesian_result_stan = stan_inference(prob,t,data,priors; delta = 0.65, reltol=1e-8,abstol=1e-8, vars=(DiffEqBayes.StanODEData(), InverseGamma(2, 3))) ``` ### Direct Turing.jl ```julia @model function fitlv(data, prob) # Prior distributions. α ~ InverseGamma(2, 3) σ ~ truncated(Normal(10, 2), 1, 15) ρ ~ truncated(Normal(30, 5), 1, 45) β ~ truncated(Normal(2.5, 0.5), 1, 4) # Simulate Lotka-Volterra model. p = SA[σ, ρ, β] _prob = remake(prob, p = p) predicted = solve(_prob, Vern9(); saveat=t) # Observations. for i in 1:length(predicted) data[:, i] ~ MvNormal(predicted[i], α^2 * I) end return nothing end model = fitlv(data, sprob) @time chain = sample(model, NUTS(0.65), 10000; progress=false) ``` ```julia @time chain = sample(model, NUTS(0.65), 10000; progress=false) ``` ### Using Turing.jl backend ```julia @time bayesian_result_turing = turing_inference(prob, Vern9(), t, data, priors; reltol=1e-8, abstol=1e-8, likelihood=(u, p, t, σ) -> MvNormal(u, Diagonal((σ) .^ 2 .* ones(length(u)))), likelihood_dist_priors=[InverseGamma(2, 3), InverseGamma(2, 3), InverseGamma(2, 3)]) ``` ### Using DynamicHMC.jl backend ```julia @time bayesian_result_dynamichmc = dynamichmc_inference(prob,Tsit5(),t,data,priors;solve_kwargs = (reltol=1e-8,abstol=1e-8,)) ``` ## Conclusion Due to the chaotic nature of Lorenz Equation, it is a very hard problem to estimate as it has the property of exponentially increasing errors. Its uncertainity plot points to its chaotic behaviour and goes awry for different values of tolerance, we use 1e-8 as the tolerance as it makes its uncertainity small enough to be trusted in `(0,30)` time span. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/BayesianInference/DiffEqBayesLotkaVolterra.jmd
docs
3435
--- title: Lotka-Volterra Bayesian Parameter Estimation Benchmarks author: Vaibhav Dixit, Chris Rackauckas --- ## Parameter Estimation of Lotka-Volterra Equation using DiffEqBayes.jl ```julia using DiffEqBayes, StanSample, DynamicHMC, Turing ``` ```julia using Distributions, BenchmarkTools, StaticArrays using OrdinaryDiffEq, RecursiveArrayTools, ParameterizedFunctions using Plots, LinearAlgebra ``` ```julia gr(fmt=:png) ``` #### Initializing the problem ```julia f = @ode_def LotkaVolterraTest begin dx = a*x - b*x*y dy = -c*y + d*x*y end a b c d ``` ```julia u0 = [1.0,1.0] tspan = (0.0,10.0) p = [1.5,1.0,3.0,1,0] ``` ```julia prob = ODEProblem(f, u0, tspan, p) sol = solve(prob,Tsit5()) ``` ```julia su0 = SA[1.0,1.0] sp = SA[1.5,1.0,3.0,1,0] sprob = ODEProblem{false,SciMLBase.FullSpecialize}(f, su0, tspan, sp) sol = solve(sprob,Tsit5()) ``` #### We take the solution data obtained and add noise to it to obtain data for using in the Bayesian Inference of the parameters ```julia t = collect(range(1,stop=10,length=10)) sig = 0.49 data = convert(Array, VectorOfArray([(sol(t[i]) + sig*randn(2)) for i in 1:length(t)])) ``` #### Plots of the actual data and generated data ```julia scatter(t, data[1,:], lab="#prey (data)") scatter!(t, data[2,:], lab="#predator (data)") plot!(sol) ``` ```julia priors = [truncated(Normal(1.5,0.5),0.5,2.5),truncated(Normal(1.2,0.5),0,2),truncated(Normal(3.0,0.5),1,4),truncated(Normal(1.0,0.5),0,2)] ``` ### Stan.jl backend The solution converges for tolerance values lower than 1e-3, lower tolerance leads to better accuracy in result but is accompanied by longer warmup and sampling time, truncated normal priors are used for preventing Stan from stepping into negative values. ```julia @btime bayesian_result_stan = stan_inference(prob,t,data,priors,num_samples=10_000,print_summary=false,delta = 0.65, vars = (DiffEqBayes.StanODEData(), InverseGamma(2, 3))) ``` ### Direct Turing.jl ```julia @model function fitlv(data, prob) # Prior distributions. σ ~ InverseGamma(2, 3) α ~ truncated(Normal(1.5, 0.5), 0.5, 2.5) β ~ truncated(Normal(1.2, 0.5), 0, 2) γ ~ truncated(Normal(3.0, 0.5), 1, 4) δ ~ truncated(Normal(1.0, 0.5), 0, 2) # Simulate Lotka-Volterra model. p = SA[α, β, γ, δ] _prob = remake(prob, p = p) predicted = solve(_prob, Tsit5(); saveat=t) # Observations. for i in 1:length(predicted) data[:, i] ~ MvNormal(predicted[i], σ^2 * I) end return nothing end model = fitlv(data, sprob) @time chain = sample(model, NUTS(0.65), 10000; progress=false) ``` ```julia @time chain = sample(model, NUTS(0.65), 10000; progress=false) ``` ### Turing.jl backend ```julia @btime bayesian_result_turing = turing_inference(prob, Tsit5(), t, data, priors, num_samples=10_000) ``` ### DynamicHMC.jl backend ```julia @btime bayesian_result_dynamichmc = dynamichmc_inference(prob,Tsit5(),t,data,priors,num_samples=10_000) ``` ## Conclusion Lotka-Volterra Equation is a "predator-prey" model, it models population of two species in which one is the predator (wolf) and the other is the prey (rabbit). It depicts a cyclic behaviour, which is also seen in its Uncertainity Quantification Plots. This behaviour makes it easy to estimate even at very high tolerance values (1e-3). ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Bio/BCR.jmd
docs
11413
--- title: BCR Work-Precision Diagrams author: Samuel Isaacson and Chris Rackauckas --- The following benchmark is of 1122 ODEs with 24388 terms that describe a stiff chemical reaction network modeling the BCR signaling network from [Barua et al.](https://doi.org/10.4049/jimmunol.1102003). We use [`ReactionNetworkImporters`](https://github.com/isaacsas/ReactionNetworkImporters.jl) to load the BioNetGen model files as a [Catalyst](https://github.com/SciML/Catalyst.jl) model, and then use [ModelingToolkit](https://github.com/SciML/ModelingToolkit.jl) to convert the Catalyst network model to ODEs. ```julia using DiffEqBase, OrdinaryDiffEq, Catalyst, ReactionNetworkImporters, Sundials, Plots, DiffEqDevTools, ODEInterface, ODEInterfaceDiffEq, LSODA, TimerOutputs, LinearAlgebra, ModelingToolkit, BenchmarkTools, LinearSolve gr() datadir = joinpath(dirname(pathof(ReactionNetworkImporters)),"../data/bcr") const to = TimerOutput() tf = 100000.0 # generate ModelingToolkit ODEs @timeit to "Parse Network" prnbng = loadrxnetwork(BNGNetwork(), joinpath(datadir, "bcr.net")) show(to) rn = prnbng.rn obs = [eq.lhs for eq in observed(rn)] @timeit to "Create ODESys" osys = convert(ODESystem, rn) show(to) tspan = (0.,tf) @timeit to "ODEProb No Jac" oprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[]) show(to) oprob_sparse = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[]; sparse=true); ``` ```julia @timeit to "ODEProb SparseJac" sparsejacprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[], jac=true, sparse=true) show(to) ``` ```julia @show numspecies(rn) # Number of ODEs @show numreactions(rn) # Apprx. number of terms in the ODE @show length(parameters(rn)); # Number of Parameters ``` ## Time ODE derivative function compilation As compiling the ODE derivative functions has in the past taken longer than running a simulation, we first force compilation by evaluating these functions one time. ```julia u = ModelingToolkit.varmap_to_vars(nothing, species(rn); defaults=ModelingToolkit.defaults(rn)) du = copy(u) p = ModelingToolkit.varmap_to_vars(nothing, parameters(rn); defaults=ModelingToolkit.defaults(rn)) @timeit to "ODE rhs Eval1" oprob.f(du,u,p,0.) @timeit to "ODE rhs Eval2" oprob.f(du,u,p,0.) sparsejacprob.f(du,u,p,0.) ``` We also time the ODE rhs function with BenchmarkTools as it is more accurate given how fast evaluating `f` is: ```julia @btime oprob.f($du,$u,$p,0.) ``` ```julia Js = similar(sparsejacprob.f.jac_prototype) @timeit to "SparseJac Eval1" sparsejacprob.f.jac(Js,u,p,0.) @timeit to "SparseJac Eval2" sparsejacprob.f.jac(Js,u,p,0.) show(to) ``` ## Picture of the solution ```julia sol = solve(oprob, CVODE_BDF(), saveat=tf/1000., reltol=1e-5, abstol=1e-5) plot(sol; idxs=obs, legend=false, fmt=:png) ``` ## Generate Test Solution ```julia @time sol = solve(oprob, CVODE_BDF(), abstol=1/10^12, reltol=1/10^12) test_sol = TestSolution(sol); ``` ## Setups #### Sets plotting defaults ```julia default(legendfontsize=7,framestyle=:box,gridalpha=0.3,gridlinewidth=2.5) ``` #### Declares a plotting helper function ```julia function plot_settings(wp) times = vcat(map(wp -> wp.times, wp.wps)...) errors = vcat(map(wp -> wp.errors, wp.wps)...) xlimit = 10 .^ (floor(log10(minimum(errors))), ceil(log10(maximum(errors)))) ylimit = 10 .^ (floor(log10(minimum(times))), ceil(log10(maximum(times)))) return xlimit,ylimit end ``` #### Declare pre-conditioners ```julia using IncompleteLU, LinearAlgebra const τ = 1e2 const τ2 = 1e2 jaccache = sparsejacprob.f.jac(oprob.u0,oprob.p,0.0) W = I - 1.0*jaccache prectmp = ilu(W, τ = τ) preccache = Ref(prectmp) function psetupilu(p, t, u, du, jok, jcurPtr, gamma) if !jok sparsejacprob.f.jac(jaccache,u,p,t) jcurPtr[] = true # W = I - gamma*J @. W = -gamma*jaccache idxs = diagind(W) @. @view(W[idxs]) = @view(W[idxs]) + 1 # Build preconditioner on W preccache[] = ilu(W, τ = τ) end end function precilu(z,r,p,t,y,fy,gamma,delta,lr) ldiv!(z,preccache[],r) end function incompletelu(W,du,u,p,t,newW,Plprev,Prprev,solverdata) if newW === nothing || newW Pl = ilu(convert(AbstractMatrix,W), τ = τ2) else Pl = Plprev end Pl,nothing end; ``` #### Sets tolerances ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (5:8); ``` ## Failures Before proceding to the results, we note the notable omissions. CVODE with KLU diverges in the solution, and thus it is omitted from the results: ```julia solve(sparsejacprob,CVODE_BDF(linear_solver=:KLU), abstol=1e-8, reltol=1e-8); ``` ## Work-Precision Diagrams (CVODE and lsoda solvers) #### Declare solvers. ```julia setups = [ Dict(:alg=>lsoda(), :prob_choice => 1), Dict(:alg=>CVODE_BDF(), :prob_choice => 1), Dict(:alg=>CVODE_BDF(linear_solver=:LapackDense), :prob_choice => 1), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES), :prob_choice => 1), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1), :prob_choice => 2), ]; ``` #### Plot Work-Precision Diagram. ```julia wp = WorkPrecisionSet([oprob,oprob_sparse,sparsejacprob],abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=[test_sol,test_sol,test_sol],maxiters=Int(1e6),numruns=1) names = ["lsoda" "CVODE_BDF" "CVODE_BDF (LapackDense)" "CVODE_BDF (GMRES)" "CVODE_BDF (GMRES, iLU)" "CVODE_BDF (KLU, sparse jac)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Work-Precision Diagrams (various Julia solvers) #### Declare solvers (using default linear solver). ```julia setups = [ Dict(:alg=>TRBDF2(autodiff=false)), Dict(:alg=>QNDF(autodiff=false)), Dict(:alg=>FBDF(autodiff=false)), Dict(:alg=>KenCarp4(autodiff=false)) ]; ``` #### Plot Work-Precision Diagram (using default linear solver). ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e6),numruns=1) names = ["TRBDF2" "QNDF" "FBDF" "KenCarp4"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Declare solvers (using GMRES linear solver). ```julia setups = [ Dict(:alg=>TRBDF2(linsolve=KrylovJL_GMRES(),autodiff=false)), Dict(:alg=>QNDF(linsolve=KrylovJL_GMRES(),autodiff=false)), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES(),autodiff=false)), Dict(:alg=>KenCarp4(linsolve=KrylovJL_GMRES(),autodiff=false)) ]; ``` #### Plot Work-Precision Diagram (using GMRES linear solver). ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e6),numruns=1) names = ["TRBDF2 (GMRES)" "QNDF (GMRES)" "FBDF (GMRES)" "KenCarp4 (GMRES)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Declare solvers (using GMRES linear solver, with pre-conditioner). ```julia setups = [ Dict(:alg=>TRBDF2(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true)), Dict(:alg=>QNDF(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true)), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true)), Dict(:alg=>KenCarp4(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true)) ]; ``` #### Plot Work-Precision Diagram (using GMRES linear solver, with pre-conditioner). ```julia wp = WorkPrecisionSet(sparsejacprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e6),numruns=1) names = ["TRBDF2 (GMRES, iLU)" "QNDF (GMRES, iLU)" "FBDF (GMRES, iLU)" "KenCarp4 (GMRES, iLU)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Declare solvers (using sparse jacobian) We designate the solvers we wish to use. ```julia setups = [ Dict(:alg=>TRBDF2(linsolve=KLUFactorization(),autodiff=false)), Dict(:alg=>QNDF(linsolve=KLUFactorization(),autodiff=false)), Dict(:alg=>FBDF(linsolve=KLUFactorization(),autodiff=false)), Dict(:alg=>KenCarp4(linsolve=KLUFactorization(),autodiff=false)) ]; ``` #### Plot Work-Precision Diagram (using sparse jacobian) Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(sparsejacprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e6),numruns=1) names = ["TRBDF2 (KLU, sparse jac)" "QNDF (KLU, sparse jac)" "FBDF (KLU, sparse jac)" "KenCarp4 (KLU, sparse jac)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Summary of results Finally, we compute a single diagram comparing the various solvers used. #### Declare solvers We designate the solvers we wish to compare. ```julia setups = [ Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1), :prob_choice => 2), Dict(:alg=>QNDF(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true), :prob_choice => 3), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true), :prob_choice => 3), Dict(:alg=>QNDF(linsolve=KLUFactorization(),autodiff=false), :prob_choice => 3), Dict(:alg=>FBDF(linsolve=KLUFactorization(),autodiff=false), :prob_choice => 3), Dict(:alg=>KenCarp4(linsolve=KLUFactorization(),autodiff=false), :prob_choice => 3) ]; ``` #### Plot Work-Precision Diagram For these, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet([oprob,oprob_sparse,sparsejacprob],abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=[test_sol,test_sol,test_sol],maxiters=Int(1e9),numruns=200) names = ["CVODE_BDF (GMRES, iLU)" "QNDF (GMRES, iLU)" "FBDF (GMRES, iLU)" "QNDF (KLU, sparse jac)" "FBDF (KLU, sparse jac)" "KenCarp4 (KLU, sparse jac)"] colors = [:green :deepskyblue1 :dodgerblue2 :royalblue2 :slateblue3 :lightskyblue] markershapes = [:octagon :hexagon :rtriangle :pentagon :ltriangle :star5] xlimit,ylimit = plot_settings(wp) xlimit = xlimit .* [0.95,1/0.95]; ylimit = ylimit .* [0.95,1/0.95]; plot(wp;label=names,left_margin=10Plots.mm,right_margin=10Plots.mm,xlimit=xlimit,ylimit=ylimit,xticks=[1e-3,1e-2,1e-1,1e0,1e1,1e2,1e3],yticks=[1e0,1e1,1e2,1e3],color=colors,markershape=markershapes,legendfontsize=15,tickfontsize=15,guidefontsize=15, legend=:topright, lw=20, la=0.8, markersize=20,markerstrokealpha=1.0, markerstrokewidth=1.5, gridalpha=0.3, gridlinewidth=7.5,size=(1100,1000)) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Bio/Bidkhori2012.jmd
docs
76290
--- title: Bidkhori2012 Work-Precision Diagrams author: Utkarsh --- The following benchmark is of 109 ODEs and 188 parameters that describe a stiff SBML model, modelling the EGFR signalling between normal and NSCLC cells [Bidkhori et al.](https://doi.org/10.1371/journal.pone.0048004). It is referenced from [Biomodels Database](https://www.ebi.ac.uk/biomodels/curation/index), and the model was parsed to Julia are finally to ODEs using [ModelingToolkit](https://github.com/SciML/ModelingToolkit.jl). ```julia using DiffEqBase, OrdinaryDiffEq, Catalyst, ReactionNetworkImporters, Sundials, Plots, DiffEqDevTools, ODEInterface, ODEInterfaceDiffEq, LSODA, TimerOutputs, LinearAlgebra, ModelingToolkit, BenchmarkTools, JLD2 gr() par = load(joinpath(@__DIR__, "params_Bidkhori2012.jld2")) ``` # Defining the reaction system ```julia function sbml_model!(du, u, p, t) # assignmentRule: variable = mwa6994523_5d45_4000_af0c_3e94073bf183 u88 = u[80] + u[79] reaction_mwa67e40c1_693d_4214_adc8_b2f2b71cef12 = p["reaction_mwa67e40c1_693d_4214_adc8_b2f2b71cef12_mw575f7f49_3663_47f1_b492_5b92c1c4345d"] * u[1] * u[2] - p["reaction_mwa67e40c1_693d_4214_adc8_b2f2b71cef12_mw53c64fd3_9a1c_4947_a734_74a73554964c"] * u[3] reaction_mw877cd1e3_b48b_42e8_ab23_682dd893fd9d = p["reaction_mw877cd1e3_b48b_42e8_ab23_682dd893fd9d_mw8cfaf07f_dabe_45de_93cc_ef2c7fd31104"] * u[3] * u[3] - p["reaction_mw877cd1e3_b48b_42e8_ab23_682dd893fd9d_mwab52aceb_4b19_4317_b2da_97ccbb973dab"] * u[4] reaction_mw413c6d45_ab23_4d3e_87b3_a8ed4629b923 = p["reaction_mw413c6d45_ab23_4d3e_87b3_a8ed4629b923_mw6b97a1ec_2cba_4bce_96f7_ec1d0fa2d16c"] * u[4] reaction_mwf61e086d_0345_4d4c_b91d_0b105e543d04 = p["reaction_mwf61e086d_0345_4d4c_b91d_0b105e543d04_mwf1697f55_a3f4_4fb6_ae1d_f96f09ad1daa"] * u[5] * u[7] - p["reaction_mwf61e086d_0345_4d4c_b91d_0b105e543d04_mw880a5942_7549_4466_bd19_0e1768a3a533"] * u[8] reaction_mw91f49311_efdc_47c6_b8b8_a619e042d644 = p["reaction_mw91f49311_efdc_47c6_b8b8_a619e042d644_mw7e889122_d26c_4d09_bae4_d313b992dc8e"] * u[5] * u[9] - p["reaction_mw91f49311_efdc_47c6_b8b8_a619e042d644_mwff6f49f7_268a_4f08_8d36_3ad8449d7472"] * u[10] reaction_mw974c39f5_b82e_44b3_abec_7a724f46c526 = p["reaction_mw974c39f5_b82e_44b3_abec_7a724f46c526_mwe645e76e_bb00_4c22_b25e_a2e77a6aada2"] * u[8] reaction_mw9544e67b_b6d0_4941_b7e0_ecd4f400a335 = p["reaction_mw9544e67b_b6d0_4941_b7e0_ecd4f400a335_mwb0744746_88a2_488e_a483_266747a044c6"] * u[10] reaction_mw486c5261_3d03_4589_a1e9_978b62ad2dfe = p["reaction_mw486c5261_3d03_4589_a1e9_978b62ad2dfe_mw9e24066c_51a5_4c7a_af7c_4656155a4eb0"] * u[11] - p["reaction_mw486c5261_3d03_4589_a1e9_978b62ad2dfe_mwab1ef4d4_2acc_4fa2_b07c_fac51fb7bfaf"] * u[5] * u[12] reaction_mw2cf8a809_63d8_4717_91fc_070516e6f3db = p["reaction_mw2cf8a809_63d8_4717_91fc_070516e6f3db_mwc4824ff0_2b51_4d66_ad48_1145f670a6e1"] * u[12] * u[9] - p["reaction_mw2cf8a809_63d8_4717_91fc_070516e6f3db_mw0f1d282f_1c6b_455c_8254_3760632c6ecc"] * u[13] reaction_mweda6a945_fb5d_4d99_9958_11b2b2840308 = p["reaction_mweda6a945_fb5d_4d99_9958_11b2b2840308_mw0aa92e25_f9aa_461e_92b8_23b1b5b3ab92"] * u[13] reaction_mwd4bf58ea_70c9_43ea_a831_1fcde130ba28 = p["reaction_mwd4bf58ea_70c9_43ea_a831_1fcde130ba28_mw2a4ed8a2_fce4_44a4_adb9_edc24a06b4e1"] * u[12] reaction_mw4817365e_a33b_451f_bee1_de748377ede2 = p["reaction_mw4817365e_a33b_451f_bee1_de748377ede2_mwe879a9ac_4b8d_4c9a_a157_a3751761cf63"] * u[11] * u[14] - p["reaction_mw4817365e_a33b_451f_bee1_de748377ede2_mwa18578d7_236f_4939_baca_52259e38fe15"] * u[15] reaction_mw03998474_934b_4e4a_8c0c_ca359e402ac2 = p["reaction_mw03998474_934b_4e4a_8c0c_ca359e402ac2_mw289fed85_e6ee_43e6_a69f_77b5f487a452"] * u[15] * u[9] - p["reaction_mw03998474_934b_4e4a_8c0c_ca359e402ac2_mw8768b5c7_b227_4825_aa55_a525b0d915c2"] * u[16] reaction_mw7bb43f0a_c87e_41ff_8a43_cdf45c8f05e6 = p["reaction_mw7bb43f0a_c87e_41ff_8a43_cdf45c8f05e6_mwd12a67b3_6d98_40e9_a54b_282a577498eb"] * u[16] reaction_mwd9262331_e35a_4614_943a_89bcf8a492e3 = p["reaction_mwd9262331_e35a_4614_943a_89bcf8a492e3_mw6ac313e2_e8a9_42a9_b13a_27e55c1012a2"] * u[15] * u[17] - p["reaction_mwd9262331_e35a_4614_943a_89bcf8a492e3_mw93f832d7_eefb_43dd_853c_a0d7a76023cf"] * u[18] reaction_mwc5f121dc_d27d_4c3d_90f2_67d0adaf144a = p["reaction_mwc5f121dc_d27d_4c3d_90f2_67d0adaf144a_mwbb727dc5_30e8_45f4_9d15_3b34be5c1e93"] * u[14] * u[17] - p["reaction_mwc5f121dc_d27d_4c3d_90f2_67d0adaf144a_mw7ae1ee96_563e_4684_bc9a_8f4ef373620e"] * u[20] reaction_mw23a29b42_9813_4e46_b8ae_966e3215e6dc = p["reaction_mw23a29b42_9813_4e46_b8ae_966e3215e6dc_mwbc5340b6_06b7_4081_bd0c_e7a397f06a92"] * u[11] * u[20] - p["reaction_mw23a29b42_9813_4e46_b8ae_966e3215e6dc_mw0df80c0e_c32b_4f90_99bd_e8f90e4c8109"] * u[18] reaction_mw0e459167_515b_4c4d_8b67_bf0a5b3e9d61 = p["reaction_mw0e459167_515b_4c4d_8b67_bf0a5b3e9d61_mwc585e0e4_b7e7_4290_8a6d_10fcd9759a2d"] * u[5] * u[14] - p["reaction_mw0e459167_515b_4c4d_8b67_bf0a5b3e9d61_mwf44d37d0_fe7f_4e47_bf10_1e734fbc3391"] * u[21] reaction_mwc52e0f9b_1e0c_46ca_8d18_f05ef4a080cb = p["reaction_mwc52e0f9b_1e0c_46ca_8d18_f05ef4a080cb_mw3d564c3c_aa54_4c16_90be_662cfcbf8bc8"] * u[21] * u[9] - p["reaction_mwc52e0f9b_1e0c_46ca_8d18_f05ef4a080cb_mw371642bb_3836_4ded_93a5_68fa9b464896"] * u[22] reaction_mw4f89bf6c_8691_41a6_a1ac_13e6aa8c4b93 = p["reaction_mw4f89bf6c_8691_41a6_a1ac_13e6aa8c4b93_mw736e4a7b_4a25_4d32_b96b_b088e3bd41e7"] * u[22] reaction_mw35f71989_f89b_4440_b1a4_ebc7b4cc18b2 = p["reaction_mw35f71989_f89b_4440_b1a4_ebc7b4cc18b2_mw084cd67b_f328_48a7_8e16_1d6256c8c137"] * u[21] * u[17] - p["reaction_mw35f71989_f89b_4440_b1a4_ebc7b4cc18b2_mw43f177dc_f522_4dd1_b8e5_21b2b8fdfdba"] * u[23] reaction_mwd0d92dd4_81b7_4385_bfd7_5de82e193ecd = p["reaction_mwd0d92dd4_81b7_4385_bfd7_5de82e193ecd_mwfa6a58ab_0ca5_4c05_92b0_870593ac135d"] * u[5] * u[20] - p["reaction_mwd0d92dd4_81b7_4385_bfd7_5de82e193ecd_mwb9547c37_09b7_4258_95ab_8039d4088298"] * u[23] reaction_mwbb77e3d6_6065_4344_9361_e30c03514f4e = p["reaction_mwbb77e3d6_6065_4344_9361_e30c03514f4e_mw7e09242b_bd80_4af0_90c8_e0cddace89fe"] * u[18] * u[25] - p["reaction_mwbb77e3d6_6065_4344_9361_e30c03514f4e_mw2dfc8a19_1792_4e12_af38_8bfbda31a577"] * u[26] reaction_mw921ee820_1dbb_4b5f_866c_87da620d8f89 = p["reaction_mw921ee820_1dbb_4b5f_866c_87da620d8f89_mw553c0b3c_af7f_4309_8c61_0f1e2c32347c"] * u[27] reaction_mw0bcfad86_59b9_42ff_bcb7_fbb44845049d = p["reaction_mw0bcfad86_59b9_42ff_bcb7_fbb44845049d_mwfc146e94_8070_4727_8416_fb55829068cb"] * u[26] reaction_mwe9b50ac7_dac3_4eba_b1db_b3fd392d8fb7 = p["reaction_mwe9b50ac7_dac3_4eba_b1db_b3fd392d8fb7_mw26688d02_8ab9_4123_89c4_022b981cb72c"] * u[28] reaction_mw934c3638_603e_4ff0_a763_68f9405fa01f = p["reaction_mw934c3638_603e_4ff0_a763_68f9405fa01f_mw5639395a_a5cd_46dd_81b8_30fe72400a2e"] * u[23] * u[25] - p["reaction_mw934c3638_603e_4ff0_a763_68f9405fa01f_mw9cc637fe_d9ca_47d2_a4dc_66009d458094"] * u[28] reaction_mw3c617363_649b_4460_a694_36f7a3127a62 = p["reaction_mw3c617363_649b_4460_a694_36f7a3127a62_mw19173345_925d_427b_8658_add0978e5931"] * u[27] * u[29] - p["reaction_mw3c617363_649b_4460_a694_36f7a3127a62_mw9f6790d7_19ce_41d9_b4de_a1658c047501"] * u[30] reaction_mwf31259aa_32b7_4104_be70_045297b9a512 = p["reaction_mwf31259aa_32b7_4104_be70_045297b9a512_mw23e16d40_acbb_4658_a336_be5d0b0dd86a"] * u[30] reaction_mw0a51fbf0_409b_4b45_b4ac_0220af4c4e3c = p["reaction_mw0a51fbf0_409b_4b45_b4ac_0220af4c4e3c_mw10c97b8e_72aa_4f56_b3b9_c94baad7e213"] * u[5] * u[29] - p["reaction_mw0a51fbf0_409b_4b45_b4ac_0220af4c4e3c_mw0b6eb5f7_b133_4b3d_bf15_9fd6c2e9332d"] * u[31] reaction_mw33baddbd_a23f_45bb_b126_0ba60bbf6c53 = p["reaction_mw33baddbd_a23f_45bb_b126_0ba60bbf6c53_mwe483687f_b591_4c42_9abc_7ea9f47470bf"] * u[31] * u[27] - p["reaction_mw33baddbd_a23f_45bb_b126_0ba60bbf6c53_mwcf964aba_9db6_46c5_b687_beafc5d89169"] * u[32] reaction_mw652570eb_c9d3_499b_b877_61d360b10980 = p["reaction_mw652570eb_c9d3_499b_b877_61d360b10980_mwb881f20a_cf8a_493a_aa84_59ee90f26dd9"] * u[32] reaction_mwc5aae1f8_52e4_4bcd_b044_3768f90b7b19 = p["reaction_mwc5aae1f8_52e4_4bcd_b044_3768f90b7b19_mwb4c6ed27_c7ec_438f_bafd_4a09a9f356f1"] * u[31] * u[9] - p["reaction_mwc5aae1f8_52e4_4bcd_b044_3768f90b7b19_mwba77a9ba_078d_4ec6_a8b8_d7042a2cefe7"] * u[33] reaction_mw642ac312_2ee7_4e66_8f3e_e2da2bb6412a = p["reaction_mw642ac312_2ee7_4e66_8f3e_e2da2bb6412a_mwe1743f7b_ca2c_47d4_91d7_aed2748d98c5"] * u[33] reaction_mw584a64d0_560a_4297_9882_80cb4eff73f3 = p["reaction_mw584a64d0_560a_4297_9882_80cb4eff73f3_mw9f1dbbe6_8aa3_4180_bcea_04343649d7ba"] * u[34] * u[27] - p["reaction_mw584a64d0_560a_4297_9882_80cb4eff73f3_mwdf20ff60_f0b7_4c2a_b393_586ec1337e67"] * u[35] reaction_mw42c97708_4f85_45a8_9141_d0ae529409ca = p["reaction_mw42c97708_4f85_45a8_9141_d0ae529409ca_mw91f2ca92_9556_4fb8_ae12_0b72f3e3f261"] * u[35] reaction_mwaa65a34e_fabf_4d6d_ae0b_f1d08b068f33 = p["reaction_mwaa65a34e_fabf_4d6d_ae0b_f1d08b068f33_mw77c60377_28ae_4aad_b911_5768fc8b824f"] * u[36] * u[37] - p["reaction_mwaa65a34e_fabf_4d6d_ae0b_f1d08b068f33_mw2eed2db0_ba78_435b_b2c8_ee91efdba1b4"] * u[38] reaction_mw1bd186cf_4762_480a_b70d_d7a775462398 = p["reaction_mw1bd186cf_4762_480a_b70d_d7a775462398_mw7e974605_8d9c_4250_8f69_072aab1f24f7"] * u[38] reaction_mwf5573ddf_ad7f_478a_a784_557a9cddaaf2 = p["reaction_mwf5573ddf_ad7f_478a_a784_557a9cddaaf2_mw11cdaca9_941c_4a59_ba2a_3bfeafb65aeb"] * u[36] * u[39] - p["reaction_mwf5573ddf_ad7f_478a_a784_557a9cddaaf2_mw58c37b3e_91e7_445e_846e_77cd0b2320af"] * u[40] reaction_mwb49058ff_2997_4187_abe7_4dce4ccf6ff4 = p["reaction_mwb49058ff_2997_4187_abe7_4dce4ccf6ff4_mw432640ec_11b9_484d_ba26_415538ab9a10"] * u[40] reaction_mw8301b154_9463_4516_b4c5_c8f8b68691fe = p["reaction_mw8301b154_9463_4516_b4c5_c8f8b68691fe_mw11bb74b8_d908_46f0_ac4d_06e8dd1aa5ae"] * u[41] * u[42] - p["reaction_mw8301b154_9463_4516_b4c5_c8f8b68691fe_mwb44117f5_20b2_495e_adf3_3467cd119fd6"] * u[43] reaction_mwf95f743d_6108_49fe_8ffd_bdcc1a9f9a8d = p["reaction_mwf95f743d_6108_49fe_8ffd_bdcc1a9f9a8d_mwa4c71b8d_fb74_465b_b76e_cec4e4c95484"] * u[43] reaction_mw51d9d6b8_f0c0_4763_9d11_9be61b5cf5c9 = p["reaction_mw51d9d6b8_f0c0_4763_9d11_9be61b5cf5c9_mwc40b3165_cc16_4f78_86b5_e34f2731dcbb"] * u[41] * u[44] - p["reaction_mw51d9d6b8_f0c0_4763_9d11_9be61b5cf5c9_mw8bff2fe0_b582_4020_8f05_83f14451b1c0"] * u[45] reaction_mw6fd24d16_f57d_46c6_82f5_3f00759fa16b = p["reaction_mw6fd24d16_f57d_46c6_82f5_3f00759fa16b_mw3d07dc22_f821_49a5_9712_820ba9592353"] * u[45] reaction_mw9c208e18_c70d_4231_af0b_ad17cd0bba2d = p["reaction_mw9c208e18_c70d_4231_af0b_ad17cd0bba2d_mwa8f70790_9f44_4548_988e_49d13016d2f1"] * u[36] * u[47] - p["reaction_mw9c208e18_c70d_4231_af0b_ad17cd0bba2d_mwaad540b6_783e_4576_8862_ad522fd897db"] * u[48] reaction_mw87711dc1_43d7_40fc_b9e9_a24e2f92419d = p["reaction_mw87711dc1_43d7_40fc_b9e9_a24e2f92419d_mwfbc395b5_05b8_4e27_9696_c3ba52edaf74"] * u[48] reaction_mw4b445876_bdce_42d0_867b_fd3c74128a6b = p["reaction_mw4b445876_bdce_42d0_867b_fd3c74128a6b_mwc489f472_68ce_44e7_aad1_f8d2f6dda4ff"] * u[41] * u[49] - p["reaction_mw4b445876_bdce_42d0_867b_fd3c74128a6b_mw56f1bdc0_66fd_47c0_806a_beeaf123e2f2"] * u[50] reaction_mw40950d59_1012_4361_8418_73e25758e367 = p["reaction_mw40950d59_1012_4361_8418_73e25758e367_mwa17c895f_29d8_4977_a99f_cf9bf6216785"] * u[50] reaction_mwbfa79c95_487d_4c6f_b437_9e579451a419 = p["reaction_mwbfa79c95_487d_4c6f_b437_9e579451a419_mwafd23622_952d_44b3_a437_4aa12422add7"] * u[39] * u[49] - p["reaction_mwbfa79c95_487d_4c6f_b437_9e579451a419_mw9d9a7d08_b19a_44f1_a806_151597049345"] * u[51] reaction_mwa4b69c77_6226_46da_b78c_3e6027d0be41 = p["reaction_mwa4b69c77_6226_46da_b78c_3e6027d0be41_mwac85fd83_4e73_43f1_9c42_01773349d50f"] * u[51] reaction_mwf8bb22e2_5aa3_4c25_a022_a266b1856a48 = p["reaction_mwf8bb22e2_5aa3_4c25_a022_a266b1856a48_mwd23d026b_c5b7_4742_aab9_b9beb18ec9bc"] * u[46] * u[52] - p["reaction_mwf8bb22e2_5aa3_4c25_a022_a266b1856a48_mwf4c4d7a7_1498_4f6c_9d72_cd5cb012146c"] * u[54] reaction_mw61305f93_7b2d_4a2d_8d16_f7be026d8671 = p["reaction_mw61305f93_7b2d_4a2d_8d16_f7be026d8671_mwe3e5abe4_9f92_43eb_92e4_cea771f5bf14"] * u[54] reaction_mwcc31b497_6c50_446c_bbc2_6c5739507252 = p["reaction_mwcc31b497_6c50_446c_bbc2_6c5739507252_mwa617804d_95cc_4197_a39b_264a2c66b5a3"] * u[53] reaction_mw1d8c2435_bb85_4352_a25f_82033250579e = p["reaction_mw1d8c2435_bb85_4352_a25f_82033250579e_mw254868f8_c9fb_493c_bc1d_807cc83c18e6"] * u[44] * u[52] - p["reaction_mw1d8c2435_bb85_4352_a25f_82033250579e_mw78a41659_4abc_4614_9e83_38cbfe1c5262"] * u[53] reaction_mw8dec1159_1925_45d9_af25_3cb709a5017c = p["reaction_mw8dec1159_1925_45d9_af25_3cb709a5017c_mwbc2119ce_ade3_4e2a_a3bc_a29cd77adf72"] * u[46] * u[18] - p["reaction_mw8dec1159_1925_45d9_af25_3cb709a5017c_mw54b0e5e9_710f_438e_a8d3_749c594667bc"] * u[55] reaction_mwcf9f1b1d_e19a_4fa8_85ba_8f17e2cec730 = p["reaction_mwcf9f1b1d_e19a_4fa8_85ba_8f17e2cec730_mw1ddaf9f4_dcab_4dc2_a6fa_5ce85b9d7a3a"] * u[55] reaction_mwa5c135b4_77e2_4411_98e1_2000c39d4b30 = p["reaction_mwa5c135b4_77e2_4411_98e1_2000c39d4b30_mw60892818_7ef4_4f65_8003_9700a708c66c"] * u[46] * u[23] - p["reaction_mwa5c135b4_77e2_4411_98e1_2000c39d4b30_mw6843d346_6e9f_43d5_97f6_1059f164aa16"] * u[57] reaction_mw4685274a_2b55_429f_927f_3fd863592af6 = p["reaction_mw4685274a_2b55_429f_927f_3fd863592af6_mwdaa378da_64fe_4ea4_b79d_c25733837b9f"] * u[57] reaction_mw8e331e43_16b4_478d_880b_d5a3244540e4 = p["reaction_mw8e331e43_16b4_478d_880b_d5a3244540e4_mw3f5e2165_9bb6_4ac3_992e_50943dd2ea05"] * u[56] reaction_mw47dee769_daa0_4af4_978a_5ab17e504c2f = p["reaction_mw47dee769_daa0_4af4_978a_5ab17e504c2f_mwe49ede89_014e_40f2_acfd_0d1a0cd11fe7"] * u[58] reaction_mwbd8a133e_1b70_44e8_bef8_78b14141166b = p["reaction_mwbd8a133e_1b70_44e8_bef8_78b14141166b_mw90873203_7a5d_4fca_a789_5e989ff0c999"] * u[18] * u[6] - p["reaction_mwbd8a133e_1b70_44e8_bef8_78b14141166b_mw92d81b3b_fa59_4637_8540_8cb8482490d9"] * u[19] reaction_mw3a87ca5a_845d_4ac4_8806_e343cbbfc630 = p["reaction_mw3a87ca5a_845d_4ac4_8806_e343cbbfc630_mwcc2a950d_261b_4fd7_9c08_9f3c194ba09d"] * u[19] * u[60] - p["reaction_mw3a87ca5a_845d_4ac4_8806_e343cbbfc630_mw1351daea_68be_404a_b7b0_105920ff3371"] * u[59] reaction_mw363a5271_1f51_4d5e_87a7_42ea25cb5657 = p["reaction_mw363a5271_1f51_4d5e_87a7_42ea25cb5657_mwc6b3c76f_af7b_488c_8751_28f1d9ab90a1"] * u[59] reaction_mw6bee0112_92dc_4169_9109_2633772b3aa4 = p["reaction_mw6bee0112_92dc_4169_9109_2633772b3aa4_mwf9c81339_e73a_45b5_a714_0854b718d44f"] * u[23] * u[6] - p["reaction_mw6bee0112_92dc_4169_9109_2633772b3aa4_mw587125c7_6092_4627_9cdd_2415b77a8307"] * u[24] reaction_mwbac9e6ff_2df1_45eb_b3f4_4cae74c64014 = p["reaction_mwbac9e6ff_2df1_45eb_b3f4_4cae74c64014_mwa575cf96_3d57_4222_ac71_bd17006ef035"] * u[24] * u[60] - p["reaction_mwbac9e6ff_2df1_45eb_b3f4_4cae74c64014_mwf7658bc6_acb6_411e_ae2c_9d8de7738d5f"] * u[61] reaction_mweb93165f_cf03_48f1_b035_59d79e324314 = p["reaction_mweb93165f_cf03_48f1_b035_59d79e324314_mwa137184a_0eb0_4bcb_971c_8e19231b2c07"] * u[61] reaction_mw85e457d1_73f8_4236_bb61_a128d300003f = p["reaction_mw85e457d1_73f8_4236_bb61_a128d300003f_mwfa680314_051c_4b10_afc9_7e7fbee49e3f"] * u[5] * u[6] - p["reaction_mw85e457d1_73f8_4236_bb61_a128d300003f_mw97b9ab43_02ae_4e42_a524_6b781633a255"] * u[62] reaction_mw6b159c8f_eee0_4337_b711_2e230c9e2cf6 = p["reaction_mw6b159c8f_eee0_4337_b711_2e230c9e2cf6_mwcc0d3fcd_9b9e_4390_b588_e57b57d89d22"] * u[62] * u[60] - p["reaction_mw6b159c8f_eee0_4337_b711_2e230c9e2cf6_mw56f1be7e_e303_4a72_be17_5bd08e3eb1f2"] * u[63] reaction_mwc9b3b248_3290_452a_9b7c_8fdada3e6687 = p["reaction_mwc9b3b248_3290_452a_9b7c_8fdada3e6687_mw1decb177_5075_41f3_a348_ca13b8f4497e"] * u[63] reaction_mw77484632_4e33_468a_9937_24e9bfd0e17d = p["reaction_mw77484632_4e33_468a_9937_24e9bfd0e17d_mw001b8124_b461_482a_8c8e_30bffc6718f7"] * u[5] * u[64] - p["reaction_mw77484632_4e33_468a_9937_24e9bfd0e17d_mw40eca7d6_80b2_4926_9c2f_330422db0814"] * u[65] reaction_mw2c5858f3_0988_49b0_a94a_057853b84e91 = p["reaction_mw2c5858f3_0988_49b0_a94a_057853b84e91_mwf3d00ca5_89dc_4693_92ec_a47db8150144"] * u[65] - p["reaction_mw2c5858f3_0988_49b0_a94a_057853b84e91_mw91a84697_3231_4fa6_b6ff_d69ee86056dc"] * u[66] reaction_mwd3a36af9_3ccc_4bb1_9867_3b9823ba4ac8 = p["reaction_mwd3a36af9_3ccc_4bb1_9867_3b9823ba4ac8_mw901b5284_bdae_4040_b77d_10f1ec267f06"] * u[65] - p["reaction_mwd3a36af9_3ccc_4bb1_9867_3b9823ba4ac8_mw94cadd24_0432_4f89_a6fc_96cb0475c44e"] * u[5] * u[67] reaction_mw9f000f29_2512_4d4a_9dd9_e59aaf296d31 = p["reaction_mw9f000f29_2512_4d4a_9dd9_e59aaf296d31_mw688106ee_719d_4995_b1a0_faeefdb0af5a"] * u[68] * u[67] - p["reaction_mw9f000f29_2512_4d4a_9dd9_e59aaf296d31_mw85c8ff7d_8d7c_4403_8a58_4996a3e6ac28"] * u[69] reaction_mw837b5ad7_4a8c_4c55_94ff_0fdd63048044 = p["reaction_mw837b5ad7_4a8c_4c55_94ff_0fdd63048044_mw4f6f44d9_408e_49b2_bedf_d34b2448725e"] * u[69] reaction_mwd15926b3_069a_4b16_a6fc_c0c15083d621 = p["reaction_mwd15926b3_069a_4b16_a6fc_c0c15083d621_mwd3e2533f_8d57_407c_834d_e0dde30b7f4a"] * u[70] - p["reaction_mwd15926b3_069a_4b16_a6fc_c0c15083d621_mwbd416b7b_f9b6_4464_b9e8_be4ac001d13d"] * u[68] * u[64] reaction_mw3a5e0932_d50f_4fe6_b8cb_0ad649f305b0 = p["reaction_mw3a5e0932_d50f_4fe6_b8cb_0ad649f305b0_mw64664eb9_353a_4f1d_a8dc_e22bcb06e2c2"] * u[67] * u[71] - p["reaction_mw3a5e0932_d50f_4fe6_b8cb_0ad649f305b0_mw0573df9d_f365_40b7_83d4_3846a05aefdc"] * u[72] reaction_mw5dcc8719_3180_4bd0_8797_08e256131961 = p["reaction_mw5dcc8719_3180_4bd0_8797_08e256131961_mw134431c3_e8e5_4375_89a0_2c51a03d65dd"] * u[72] reaction_mw376b0685_ef73_4fcc_94af_2ada24cf8a8b = p["reaction_mw376b0685_ef73_4fcc_94af_2ada24cf8a8b_mw22510791_ef7e_4373_907c_9eecbc8adda7"] * u[74] * u[73] - p["reaction_mw376b0685_ef73_4fcc_94af_2ada24cf8a8b_mwf59d397b_cfee_4a84_9279_134cc951db8c"] * u[75] reaction_mwcc7cfa9c_4945_403a_938e_b237c371a5ef = p["reaction_mwcc7cfa9c_4945_403a_938e_b237c371a5ef_mwe2aded94_f2b5_4513_8670_71a86abf7968"] * u[75] * u[76] - p["reaction_mwcc7cfa9c_4945_403a_938e_b237c371a5ef_mw8d6eacb6_7184_4564_8cde_53e93add2146"] * u[77] reaction_mw98da32e0_b061_40c5_9d32_40744134f3fa = p["reaction_mw98da32e0_b061_40c5_9d32_40744134f3fa_mw3c3648cb_6d56_4d9d_be47_129483778fd6"] * u[77] reaction_mw31369230_1f14_45bd_be02_a44a275c6e31 = p["reaction_mw31369230_1f14_45bd_be02_a44a275c6e31_mw98405e53_330b_4a64_a700_a62bb3f21426"] * u[78] - p["reaction_mw31369230_1f14_45bd_be02_a44a275c6e31_mw11f8de84_6639_486d_bf17_8f7021f54b66"] * u[79] * u[76] reaction_mw12311a84_3f8d_40c6_8b14_961a8a58d1b6 = p["reaction_mw12311a84_3f8d_40c6_8b14_961a8a58d1b6_mw65e1222f_39ad_4a29_ae76_04b7d591af38"] * u[79] - p["reaction_mw12311a84_3f8d_40c6_8b14_961a8a58d1b6_mw11e520e6_b1f1_4802_af71_92a2bd9cb644"] * u[80] * u[73] reaction_mwf3d393e9_ae09_4eab_a39a_ed0eef0f54bc = p["reaction_mwf3d393e9_ae09_4eab_a39a_ed0eef0f54bc_mw6a4e035b_11a7_4155_9a78_cfba13631cb1"] * u[81] reaction_mw2698f402_d00b_451e_8b22_93a322fe9a92 = p["reaction_mw2698f402_d00b_451e_8b22_93a322fe9a92_mw6eebbe41_cf28_46e8_930c_26f50e08d602"] * u[82] - p["reaction_mw2698f402_d00b_451e_8b22_93a322fe9a92_mw751c2663_d807_482f_991b_c8032cb6d996"] * u[74] * u[83] reaction_mw028e8b3e_b531_4466_9c3a_e3fcf7fc9be9 = p["reaction_mw028e8b3e_b531_4466_9c3a_e3fcf7fc9be9_mwd2d0b340_bbdb_40bd_9eac_992a2a402b94"] * u[80] * u[83] - p["reaction_mw028e8b3e_b531_4466_9c3a_e3fcf7fc9be9_mwb1b46773_a218_4f99_a000_a98fbc1275d7"] * u[81] reaction_mwc5e0c166_6a3a_4913_9ed1_dafe97bdb371 = p["reaction_mwc5e0c166_6a3a_4913_9ed1_dafe97bdb371_mw193f2553_1ab3_4b07_9b4b_201ee9e08c96"] * u[79] * u[83] - p["reaction_mwc5e0c166_6a3a_4913_9ed1_dafe97bdb371_mwb7292ff5_dd13_41aa_b9b8_2c0c75d35fb1"] * u[84] reaction_mw94b3bae0_4da9_4358_a5ac_a46a5cbf621b = p["reaction_mw94b3bae0_4da9_4358_a5ac_a46a5cbf621b_mwf4069175_b898_4633_ac1e_20f44431c36a"] * u[84] reaction_mw362ca1b3_224a_42fb_a14b_6ff467748a5e = p["reaction_mw362ca1b3_224a_42fb_a14b_6ff467748a5e_mw6d852e8c_c64a_4926_80c4_781a9c04b20e"] * u[85] - p["reaction_mw362ca1b3_224a_42fb_a14b_6ff467748a5e_mw4d614bfc_3e20_450e_8890_6326afd0a0d7"] * u[75] * u[83] reaction_mw3994e898_7232_4b70_9c58_b3476e8655f5 = p["reaction_mw3994e898_7232_4b70_9c58_b3476e8655f5_mw3676a900_b098_4a74_a511_e15984ca0cd2"] * u[78] * u[83] - p["reaction_mw3994e898_7232_4b70_9c58_b3476e8655f5_mwf68a0726_94b5_4be1_933f_1ac48053601d"] * u[86] reaction_mw75acd2d1_3fdf_4c3f_8d99_6d62f825d5e2 = p["reaction_mw75acd2d1_3fdf_4c3f_8d99_6d62f825d5e2_mwb4f0353c_d140_44cc_ab75_566fcc2909c5"] * u[86] reaction_mw4a334f7d_9bce_4690_b623_a427ed66a174 = p["reaction_mw4a334f7d_9bce_4690_b623_a427ed66a174_mw6165953d_ce44_4b21_a18a_c401c04993f1"] * u[87] - p["reaction_mw4a334f7d_9bce_4690_b623_a427ed66a174_mw99a30aef_212a_4577_bcfd_8c5764057cca"] * u[77] * u[83] reaction_mw950485f2_4463_4309_a4e4_cc81d16ffb7f = p["reaction_mw950485f2_4463_4309_a4e4_cc81d16ffb7f_mw94b0216f_3353_4b36_b9b7_fd34a0510b08"] * u88 * u[36] / (p["reaction_mw950485f2_4463_4309_a4e4_cc81d16ffb7f_mw2034bbe7_27cc_410c_9870_1f8a5986dfa5"] + u[36]) reaction_mw62f71309_e066_47d2_9b99_01f78a51c218 = p["reaction_mw62f71309_e066_47d2_9b99_01f78a51c218_mw0cea56f3_1cdb_410e_a5a4_f3635ba5c94b"] * u[89] reaction_mwe8647e48_f4a9_40f4_9b32_f89ded572e01 = p["reaction_mwe8647e48_f4a9_40f4_9b32_f89ded572e01_mw50a0e884_a88c_46a7_b985_788868bc1029"] * u[5] * u[90] - p["reaction_mwe8647e48_f4a9_40f4_9b32_f89ded572e01_mw2c88e0e2_e9c3_4e4c_bb2e_b0cd1f6420f4"] * u[91] reaction_mw65b9e026_bc6c_4c94_8b37_8b9acdf50c8a = p["reaction_mw65b9e026_bc6c_4c94_8b37_8b9acdf50c8a_mw95e2190d_8e39_419b_ad26_7cc141f7b87b"] * u[91] reaction_mw1c9d29fa_bff4_4d2f_9d5f_f1791e4882a3 = p["reaction_mw1c9d29fa_bff4_4d2f_9d5f_f1791e4882a3_mw76d68ace_272d_4178_bba2_74dfdf260c70"] * u[5] * u[92] - p["reaction_mw1c9d29fa_bff4_4d2f_9d5f_f1791e4882a3_mwe37b936f_7781_4a01_b59b_96bd7db0c49e"] * u[93] reaction_mwad97bd5a_3dae_49d9_990b_2e6574740618 = p["reaction_mwad97bd5a_3dae_49d9_990b_2e6574740618_mwb6701ead_d3f2_4eb3_8b08_341cea49a4b2"] * u[92] * u[94] - p["reaction_mwad97bd5a_3dae_49d9_990b_2e6574740618_mwa5016035_3f9f_44fc_9f69_1d7a0155eb36"] * u[95] reaction_mwe9988e4a_083c_4f8e_b154_3e599c9307b0 = p["reaction_mwe9988e4a_083c_4f8e_b154_3e599c9307b0_mw26164d03_adda_4a21_b5ac_59e1d5a8d8ab"] * u[95] reaction_mwf8bacf1a_6c1a_49b6_b344_2d3bd404a735 = p["reaction_mwf8bacf1a_6c1a_49b6_b344_2d3bd404a735_mw9fe16c2b_7271_4e4f_b6de_c149721a3198"] * u[92] * u[92] - p["reaction_mwf8bacf1a_6c1a_49b6_b344_2d3bd404a735_mw74ea5b55_ead0_4b6f_8da0_fd1dcf7e231d"] * u[97] reaction_mwc9b945cf_3a14_4bd9_b253_7064498c75e2 = p["reaction_mwc9b945cf_3a14_4bd9_b253_7064498c75e2_mw8cbe6595_6f16_4704_afe2_0dd043a175fa"] * u[97] * u[94] - p["reaction_mwc9b945cf_3a14_4bd9_b253_7064498c75e2_mw21d22acd_ddd4_4794_9700_52201984f75b"] * u[96] reaction_mw75c6078f_fb76_4ca9_9fdd_e221e3ba57ad = p["reaction_mw75c6078f_fb76_4ca9_9fdd_e221e3ba57ad_mw81384973_14a0_4498_ab21_f70666d46d7f"] * u[96] reaction_mw177fa7b0_f0be_4c3e_8b47_2ac4e13159a2 = p["reaction_mw177fa7b0_f0be_4c3e_8b47_2ac4e13159a2_mw9f1a7f64_0b37_42df_9dd5_e1a44efdcbba"] * u[90] * u[92] - p["reaction_mw177fa7b0_f0be_4c3e_8b47_2ac4e13159a2_mw366e6f17_4081_4cdc_9fa5_0aeb354d692c"] * u[98] reaction_mwec4127b5_6bcf_4128_aff4_a6b3c470f690 = p["reaction_mwec4127b5_6bcf_4128_aff4_a6b3c470f690_mw1df2caba_8e41_4fe5_a1b5_7777eb98ed1c"] * u[97] reaction_mw5c806b00_59a1_491e_99a1_2c932b2d5d7a = p["reaction_mw5c806b00_59a1_491e_99a1_2c932b2d5d7a_mw5a798f7a_b4eb_4a27_b413_4ff3956b90e9"] * u[100] * u[100] - p["reaction_mw5c806b00_59a1_491e_99a1_2c932b2d5d7a_mw54178365_18c1_47e0_94ee_6b96582c52ef"] * u[99] reaction_mw26fdabae_323b_4a78_b134_4c2eb70ea6a7 = p["reaction_mw26fdabae_323b_4a78_b134_4c2eb70ea6a7_mw1ff4e75e_fce5_4a7a_907b_05df4981f80b"] * u[99] * u[101] - p["reaction_mw26fdabae_323b_4a78_b134_4c2eb70ea6a7_mw8b269d52_eda9_4dd1_8616_ebcf29c971fa"] * u[102] reaction_mw3b0c171c_6d60_41ca_8193_83cd5e6c188c = p["reaction_mw3b0c171c_6d60_41ca_8193_83cd5e6c188c_mw90b25c4b_ad1a_4ee5_ae20_c60451484516"] * u[102] reaction_mwc38a99c8_74cf_49f2_a16b_f6610ca1a0a7 = p["reaction_mwc38a99c8_74cf_49f2_a16b_f6610ca1a0a7_mwa0806e7a_a90d_4187_9c37_6d9ea569a447"] * u[104] * u[100] - p["reaction_mwc38a99c8_74cf_49f2_a16b_f6610ca1a0a7_mw95cb9071_56e2_447d_b7c7_59ac96baa623"] * u[103] reaction_mw45d92b79_0656_4795_87d0_7a465949ca43 = p["reaction_mw45d92b79_0656_4795_87d0_7a465949ca43_mwba545ecf_c7d4_4a6c_8c47_9e91f052d5a9"] * u[100] * u[101] - p["reaction_mw45d92b79_0656_4795_87d0_7a465949ca43_mw01c5ceef_57a1_4baa_b2cd_fd39e9588a10"] * u[105] reaction_mwb71945c2_03a8_4fad_a995_e1caeee98525 = p["reaction_mwb71945c2_03a8_4fad_a995_e1caeee98525_mw7aba6db3_c7ec_4192_bb5e_0ac4b466c1a5"] * u[105] reaction_mwd189238c_e8f9_40be_b4ea_18a42bba1b4f = p["reaction_mwd189238c_e8f9_40be_b4ea_18a42bba1b4f_mw31eb851a_c381_419d_b694_f158b7f5cfb6"] * u[104] reaction_mwcb637bf1_7618_4d8a_ab5c_399145ecf1df = p["reaction_mwcb637bf1_7618_4d8a_ab5c_399145ecf1df_mwe09b67b9_0d2a_4b82_91ef_5284216beb94"] * u[91] * u[6] - p["reaction_mwcb637bf1_7618_4d8a_ab5c_399145ecf1df_mw77a6c207_ff8c_463c_9b4e_8a7d96652b79"] * u[106] reaction_mw401dde7e_c0a1_4780_b6cc_8f98681c862e = p["reaction_mw401dde7e_c0a1_4780_b6cc_8f98681c862e_mw1df53838_48e5_4331_9084_3790409ad5ff"] * u[106] * u[60] - p["reaction_mw401dde7e_c0a1_4780_b6cc_8f98681c862e_mwe4573b2c_5f99_40d0_9f9e_c238caa5ccbe"] * u[107] reaction_mw0dd5a91d_d76c_494e_9dd6_57f2836aaa19 = p["reaction_mw0dd5a91d_d76c_494e_9dd6_57f2836aaa19_mw8ed5885f_774e_48a0_9338_fe8cdd512023"] * u[107] reaction_mwb205f533_4013_406b_8a4b_691ec3949555 = p["reaction_mwb205f533_4013_406b_8a4b_691ec3949555_mwa6ef5f75_f152_414d_811c_dd037d4b3ca1"] * u[65] * u[6] - p["reaction_mwb205f533_4013_406b_8a4b_691ec3949555_mwee51df1b_3f69_43f8_a1d5_5a8c5d0215f2"] * u[108] reaction_mw602726ea_89ee_41b8_bda6_e2811bb42c1d = p["reaction_mw602726ea_89ee_41b8_bda6_e2811bb42c1d_mw2e0b4751_7227_4815_bf6f_fa5e2370b1d3"] * u[108] * u[60] - p["reaction_mw602726ea_89ee_41b8_bda6_e2811bb42c1d_mwa8eec8e9_74b9_4afc_b6db_1116fe48e858"] * u[109] reaction_mwfab3a9ec_b094_44f0_bd59_12ac56ca1c99 = p["reaction_mwfab3a9ec_b094_44f0_bd59_12ac56ca1c99_mwc3426c7e_3452_4507_9189_4b83ab147bdd"] * u[109] reaction_mw4fceada8_6eb0_4230_a083_b2ab094d2961 = p["reaction_mw4fceada8_6eb0_4230_a083_b2ab094d2961_mw9cafad09_6002_46e1_8336_bb91c3716d70"] * u[73] # Species: id = mwe2fff28d_182c_4a1c_9882_f17774c0958a; name = EGF; affected by kineticLaw du[1] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwa67e40c1_693d_4214_adc8_b2f2b71cef12)) # Species: id = mw93907b2d_53db_4080_9e3f_3eb304441ab9; name = EGFR; affected by kineticLaw du[2] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwa67e40c1_693d_4214_adc8_b2f2b71cef12) + (1.0 * reaction_mw47dee769_daa0_4af4_978a_5ab17e504c2f)) # Species: id = mw7eacabf9_d68c_491a_aba2_ec0809a8ecc8; name = EGF-EGFR; affected by kineticLaw du[3] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwa67e40c1_693d_4214_adc8_b2f2b71cef12) + (-1.0 * reaction_mw877cd1e3_b48b_42e8_ab23_682dd893fd9d) + (-1.0 * reaction_mw877cd1e3_b48b_42e8_ab23_682dd893fd9d)) # Species: id = mwa8f2e7b2_0927_4ab4_a817_dddc43bb4fa3; name = EGF-EGFR2; affected by kineticLaw du[4] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw877cd1e3_b48b_42e8_ab23_682dd893fd9d) + (-1.0 * reaction_mw413c6d45_ab23_4d3e_87b3_a8ed4629b923) + (1.0 * reaction_mw9544e67b_b6d0_4941_b7e0_ecd4f400a335) + (1.0 * reaction_mw7bb43f0a_c87e_41ff_8a43_cdf45c8f05e6) + (1.0 * reaction_mw4f89bf6c_8691_41a6_a1ac_13e6aa8c4b93) + (1.0 * reaction_mw642ac312_2ee7_4e66_8f3e_e2da2bb6412a)) # Species: id = mwbfcf6773_1915_432c_b1d2_1f246094cc74; name = pEGF-EGFR2; affected by kineticLaw du[5] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw413c6d45_ab23_4d3e_87b3_a8ed4629b923) + (-1.0 * reaction_mwf61e086d_0345_4d4c_b91d_0b105e543d04) + (-1.0 * reaction_mw91f49311_efdc_47c6_b8b8_a619e042d644) + (1.0 * reaction_mw486c5261_3d03_4589_a1e9_978b62ad2dfe) + (-1.0 * reaction_mw0e459167_515b_4c4d_8b67_bf0a5b3e9d61) + (-1.0 * reaction_mwd0d92dd4_81b7_4385_bfd7_5de82e193ecd) + (-1.0 * reaction_mw0a51fbf0_409b_4b45_b4ac_0220af4c4e3c) + (1.0 * reaction_mwcf9f1b1d_e19a_4fa8_85ba_8f17e2cec730) + (1.0 * reaction_mw4685274a_2b55_429f_927f_3fd863592af6) + (-1.0 * reaction_mw85e457d1_73f8_4236_bb61_a128d300003f) + (-1.0 * reaction_mw77484632_4e33_468a_9937_24e9bfd0e17d) + (1.0 * reaction_mwd3a36af9_3ccc_4bb1_9867_3b9823ba4ac8) + (-1.0 * reaction_mwe8647e48_f4a9_40f4_9b32_f89ded572e01) + (1.0 * reaction_mw65b9e026_bc6c_4c94_8b37_8b9acdf50c8a) + (-1.0 * reaction_mw1c9d29fa_bff4_4d2f_9d5f_f1791e4882a3)) # Species: id = mw19122f7d_f92e_4dc0_922f_6b681db65b0b; name = cbl; affected by kineticLaw du[6] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwbd8a133e_1b70_44e8_bef8_78b14141166b) + (1.0 * reaction_mw363a5271_1f51_4d5e_87a7_42ea25cb5657) + (-1.0 * reaction_mw6bee0112_92dc_4169_9109_2633772b3aa4) + (1.0 * reaction_mweb93165f_cf03_48f1_b035_59d79e324314) + (-1.0 * reaction_mw85e457d1_73f8_4236_bb61_a128d300003f) + (1.0 * reaction_mwc9b3b248_3290_452a_9b7c_8fdada3e6687) + (-1.0 * reaction_mwcb637bf1_7618_4d8a_ab5c_399145ecf1df) + (1.0 * reaction_mw0dd5a91d_d76c_494e_9dd6_57f2836aaa19) + (-1.0 * reaction_mwb205f533_4013_406b_8a4b_691ec3949555)) # Species: id = mw3c2e1b43_29ca_491a_93e9_c723a993d6fb; name = Shc; affected by kineticLaw du[7] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwf61e086d_0345_4d4c_b91d_0b105e543d04) + (1.0 * reaction_mweda6a945_fb5d_4d99_9958_11b2b2840308) + (1.0 * reaction_mwd4bf58ea_70c9_43ea_a831_1fcde130ba28)) # Species: id = mw5198d3c2_879c_4f0d_b4f8_cd40efe0b1cf; name = pEGF-EGFR2-Shc; affected by kineticLaw du[8] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwf61e086d_0345_4d4c_b91d_0b105e543d04) + (-1.0 * reaction_mw974c39f5_b82e_44b3_abec_7a724f46c526)) # Species: id = mwe57c3282_5935_405c_8c0b_7fadb7a5de17; name = SHP; affected by kineticLaw du[9] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw91f49311_efdc_47c6_b8b8_a619e042d644) + (1.0 * reaction_mw9544e67b_b6d0_4941_b7e0_ecd4f400a335) + (-1.0 * reaction_mw2cf8a809_63d8_4717_91fc_070516e6f3db) + (1.0 * reaction_mweda6a945_fb5d_4d99_9958_11b2b2840308) + (-1.0 * reaction_mw03998474_934b_4e4a_8c0c_ca359e402ac2) + (1.0 * reaction_mw7bb43f0a_c87e_41ff_8a43_cdf45c8f05e6) + (-1.0 * reaction_mwc52e0f9b_1e0c_46ca_8d18_f05ef4a080cb) + (1.0 * reaction_mw4f89bf6c_8691_41a6_a1ac_13e6aa8c4b93) + (-1.0 * reaction_mwc5aae1f8_52e4_4bcd_b044_3768f90b7b19) + (1.0 * reaction_mw642ac312_2ee7_4e66_8f3e_e2da2bb6412a)) # Species: id = mw954e8fcb_ac0a_459d_8878_f19080208a17; name = pEGF-EGFR2-SHP2; affected by kineticLaw du[10] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw91f49311_efdc_47c6_b8b8_a619e042d644) + (-1.0 * reaction_mw9544e67b_b6d0_4941_b7e0_ecd4f400a335)) # Species: id = mwa98802cb_c977_4fe0_9e67_5000904c2c36; name = pEGF-EGFR2-pShc; affected by kineticLaw du[11] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw974c39f5_b82e_44b3_abec_7a724f46c526) + (-1.0 * reaction_mw486c5261_3d03_4589_a1e9_978b62ad2dfe) + (-1.0 * reaction_mw4817365e_a33b_451f_bee1_de748377ede2) + (-1.0 * reaction_mw23a29b42_9813_4e46_b8ae_966e3215e6dc)) # Species: id = mwa0349407_8187_48fc_9e94_5698ccc4e06d; name = pShc; affected by kineticLaw du[12] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw486c5261_3d03_4589_a1e9_978b62ad2dfe) + (-1.0 * reaction_mw2cf8a809_63d8_4717_91fc_070516e6f3db) + (-1.0 * reaction_mwd4bf58ea_70c9_43ea_a831_1fcde130ba28) + (1.0 * reaction_mw7bb43f0a_c87e_41ff_8a43_cdf45c8f05e6) + (1.0 * reaction_mwcf9f1b1d_e19a_4fa8_85ba_8f17e2cec730) + (1.0 * reaction_mw363a5271_1f51_4d5e_87a7_42ea25cb5657)) # Species: id = mwf9999977_6f0e_4e35_9b73_75587f3448e9; name = pShc-SHP2; affected by kineticLaw du[13] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw2cf8a809_63d8_4717_91fc_070516e6f3db) + (-1.0 * reaction_mweda6a945_fb5d_4d99_9958_11b2b2840308)) # Species: id = mwf430a579_ecbf_48ba_80c2_06e455808f2a; name = Grb2; affected by kineticLaw du[14] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw4817365e_a33b_451f_bee1_de748377ede2) + (1.0 * reaction_mw7bb43f0a_c87e_41ff_8a43_cdf45c8f05e6) + (-1.0 * reaction_mwc5f121dc_d27d_4c3d_90f2_67d0adaf144a) + (-1.0 * reaction_mw0e459167_515b_4c4d_8b67_bf0a5b3e9d61) + (1.0 * reaction_mw4f89bf6c_8691_41a6_a1ac_13e6aa8c4b93) + (1.0 * reaction_mwcf9f1b1d_e19a_4fa8_85ba_8f17e2cec730) + (1.0 * reaction_mw4685274a_2b55_429f_927f_3fd863592af6)) # Species: id = mw504578d8_96c3_471f_8a7e_8c14e7535d3d; name = pEGF-EGFR2-pShc-Grb2; affected by kineticLaw du[15] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw4817365e_a33b_451f_bee1_de748377ede2) + (-1.0 * reaction_mw03998474_934b_4e4a_8c0c_ca359e402ac2) + (-1.0 * reaction_mwd9262331_e35a_4614_943a_89bcf8a492e3)) # Species: id = mw45ab688a_6467_4a3e_a779_2118fa84d69e; name = pEGF-EGFR2-pShc-Grb2-SHP2; affected by kineticLaw du[16] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw03998474_934b_4e4a_8c0c_ca359e402ac2) + (-1.0 * reaction_mw7bb43f0a_c87e_41ff_8a43_cdf45c8f05e6)) # Species: id = mw9dcaa655_a755_426e_a3fa_1ad7c3c45575; name = SOS; affected by kineticLaw du[17] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwd9262331_e35a_4614_943a_89bcf8a492e3) + (-1.0 * reaction_mwc5f121dc_d27d_4c3d_90f2_67d0adaf144a) + (-1.0 * reaction_mw35f71989_f89b_4440_b1a4_ebc7b4cc18b2) + (1.0 * reaction_mw8e331e43_16b4_478d_880b_d5a3244540e4)) # Species: id = mwfbda4e09_0cbb_49bc_ae69_f88b7a79ed21; name = pEGF-EGFR2-pShc-Grb2-SOS; affected by kineticLaw du[18] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwd9262331_e35a_4614_943a_89bcf8a492e3) + (1.0 * reaction_mw23a29b42_9813_4e46_b8ae_966e3215e6dc) + (-1.0 * reaction_mwbb77e3d6_6065_4344_9361_e30c03514f4e) + (1.0 * reaction_mw0bcfad86_59b9_42ff_bcb7_fbb44845049d) + (-1.0 * reaction_mw8dec1159_1925_45d9_af25_3cb709a5017c) + (-1.0 * reaction_mwbd8a133e_1b70_44e8_bef8_78b14141166b)) # Species: id = mwb1bc2058_e6d8_4680_9e6c_d27bb366cde0; name = pEGF-EGFR2-pShc-Grb2-SOS-cbl; affected by kineticLaw du[19] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwbd8a133e_1b70_44e8_bef8_78b14141166b) + (-1.0 * reaction_mw3a87ca5a_845d_4ac4_8806_e343cbbfc630)) # Species: id = mw1093b3af_1864_4ba3_a541_6009a9921282; name = Grb2-SOS; affected by kineticLaw du[20] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwc5f121dc_d27d_4c3d_90f2_67d0adaf144a) + (-1.0 * reaction_mw23a29b42_9813_4e46_b8ae_966e3215e6dc) + (-1.0 * reaction_mwd0d92dd4_81b7_4385_bfd7_5de82e193ecd) + (1.0 * reaction_mw363a5271_1f51_4d5e_87a7_42ea25cb5657) + (1.0 * reaction_mweb93165f_cf03_48f1_b035_59d79e324314)) # Species: id = mwd9462e5b_a272_4b66_ab66_fde9266b1a43; name = pEGF-EGFR2-Grb2; affected by kineticLaw du[21] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw0e459167_515b_4c4d_8b67_bf0a5b3e9d61) + (-1.0 * reaction_mwc52e0f9b_1e0c_46ca_8d18_f05ef4a080cb) + (-1.0 * reaction_mw35f71989_f89b_4440_b1a4_ebc7b4cc18b2)) # Species: id = mw925b938a_fe73_4664_ba6f_e72e57780891; name = pEGF-EGFR2-Grb2-SHP2; affected by kineticLaw du[22] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwc52e0f9b_1e0c_46ca_8d18_f05ef4a080cb) + (-1.0 * reaction_mw4f89bf6c_8691_41a6_a1ac_13e6aa8c4b93)) # Species: id = mwf8cc7834_bf4f_4ccd_8235_d0890badf0f6; name = pEGF-EGFR2-Grb2-SOS; affected by kineticLaw du[23] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw35f71989_f89b_4440_b1a4_ebc7b4cc18b2) + (1.0 * reaction_mwd0d92dd4_81b7_4385_bfd7_5de82e193ecd) + (1.0 * reaction_mwe9b50ac7_dac3_4eba_b1db_b3fd392d8fb7) + (-1.0 * reaction_mw934c3638_603e_4ff0_a763_68f9405fa01f) + (-1.0 * reaction_mwa5c135b4_77e2_4411_98e1_2000c39d4b30) + (-1.0 * reaction_mw6bee0112_92dc_4169_9109_2633772b3aa4)) # Species: id = mw481cd12b_61ba_44e5_93bf_8b88c6c4a4e7; name = pEGF-EGFR2-Grb2-SOS-cbl; affected by kineticLaw du[24] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw6bee0112_92dc_4169_9109_2633772b3aa4) + (-1.0 * reaction_mwbac9e6ff_2df1_45eb_b3f4_4cae74c64014)) # Species: id = mw8f5a7b5c_ca4c_4a4c_85b1_e5d640c426bf; name = Ras-GDP; affected by kineticLaw du[25] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwbb77e3d6_6065_4344_9361_e30c03514f4e) + (1.0 * reaction_mw921ee820_1dbb_4b5f_866c_87da620d8f89) + (-1.0 * reaction_mw934c3638_603e_4ff0_a763_68f9405fa01f) + (1.0 * reaction_mwf31259aa_32b7_4104_be70_045297b9a512) + (1.0 * reaction_mw652570eb_c9d3_499b_b877_61d360b10980)) # Species: id = mwf40d6176_abfc_4a30_886f_83a19fcffc48; name = pEGF-EGFR2-pShc-Grb2-SOS-Ras-GDP; affected by kineticLaw du[26] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwbb77e3d6_6065_4344_9361_e30c03514f4e) + (-1.0 * reaction_mw0bcfad86_59b9_42ff_bcb7_fbb44845049d)) # Species: id = mwa54a9c38_c98b_45e5_8432_4119fb777e44; name = Ras-GTP; affected by kineticLaw du[27] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw921ee820_1dbb_4b5f_866c_87da620d8f89) + (1.0 * reaction_mw0bcfad86_59b9_42ff_bcb7_fbb44845049d) + (1.0 * reaction_mwe9b50ac7_dac3_4eba_b1db_b3fd392d8fb7) + (-1.0 * reaction_mw3c617363_649b_4460_a694_36f7a3127a62) + (-1.0 * reaction_mw33baddbd_a23f_45bb_b126_0ba60bbf6c53) + (-1.0 * reaction_mw584a64d0_560a_4297_9882_80cb4eff73f3) + (1.0 * reaction_mw42c97708_4f85_45a8_9141_d0ae529409ca)) # Species: id = mw28464aad_8013_4a23_ae09_a406954859a6; name = pEGF-EGFR2-Grb2-SOS-Ras-GDP; affected by kineticLaw du[28] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwe9b50ac7_dac3_4eba_b1db_b3fd392d8fb7) + (1.0 * reaction_mw934c3638_603e_4ff0_a763_68f9405fa01f)) # Species: id = mw7cff9a0e_094d_498e_bf7f_7b162c61d63a; name = Ras-GAP; affected by kineticLaw du[29] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw3c617363_649b_4460_a694_36f7a3127a62) + (1.0 * reaction_mwf31259aa_32b7_4104_be70_045297b9a512) + (-1.0 * reaction_mw0a51fbf0_409b_4b45_b4ac_0220af4c4e3c) + (1.0 * reaction_mw642ac312_2ee7_4e66_8f3e_e2da2bb6412a)) # Species: id = mwdf82303e_323f_4c51_a858_56a59233cd98; name = Ras-GTP-Ras-GAP; affected by kineticLaw du[30] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw3c617363_649b_4460_a694_36f7a3127a62) + (-1.0 * reaction_mwf31259aa_32b7_4104_be70_045297b9a512)) # Species: id = mwd39388fd_4f85_4d1c_b2a3_37857c595a2d; name = pEGF-EGFR2-Ras-GAP; affected by kineticLaw du[31] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw0a51fbf0_409b_4b45_b4ac_0220af4c4e3c) + (-1.0 * reaction_mw33baddbd_a23f_45bb_b126_0ba60bbf6c53) + (1.0 * reaction_mw652570eb_c9d3_499b_b877_61d360b10980) + (-1.0 * reaction_mwc5aae1f8_52e4_4bcd_b044_3768f90b7b19)) # Species: id = mwd7bf31ba_b05c_4c45_bb2f_6a2468a2a507; name = pEGF-EGFR2-Ras-GAP-Ras-GTP; affected by kineticLaw du[32] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw33baddbd_a23f_45bb_b126_0ba60bbf6c53) + (-1.0 * reaction_mw652570eb_c9d3_499b_b877_61d360b10980)) # Species: id = mwbf5cb039_b830_4282_aa22_a3dda6272ec1; name = pEGF-EGFR2-Ras-GAP-SHP2; affected by kineticLaw du[33] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwc5aae1f8_52e4_4bcd_b044_3768f90b7b19) + (-1.0 * reaction_mw642ac312_2ee7_4e66_8f3e_e2da2bb6412a)) # Species: id = mw66ac98c4_7e7b_4071_954d_43eb17584220; name = Raf1; affected by kineticLaw du[34] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw584a64d0_560a_4297_9882_80cb4eff73f3) + (1.0 * reaction_mw87711dc1_43d7_40fc_b9e9_a24e2f92419d)) # Species: id = mw83de7813_4941_45a6_a320_a551165bf22a; name = Raf1-Ras-GTP; affected by kineticLaw du[35] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw584a64d0_560a_4297_9882_80cb4eff73f3) + (-1.0 * reaction_mw42c97708_4f85_45a8_9141_d0ae529409ca)) # Species: id = mwaff92910_ed3d_40b9_a29c_e4866167e828; name = Raf1active; affected by kineticLaw du[36] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw42c97708_4f85_45a8_9141_d0ae529409ca) + (-1.0 * reaction_mwaa65a34e_fabf_4d6d_ae0b_f1d08b068f33) + (1.0 * reaction_mw1bd186cf_4762_480a_b70d_d7a775462398) + (-1.0 * reaction_mwf5573ddf_ad7f_478a_a784_557a9cddaaf2) + (1.0 * reaction_mwb49058ff_2997_4187_abe7_4dce4ccf6ff4) + (-1.0 * reaction_mw9c208e18_c70d_4231_af0b_ad17cd0bba2d) + (-1.0 * reaction_mw950485f2_4463_4309_a4e4_cc81d16ffb7f) + (1.0 * reaction_mw62f71309_e066_47d2_9b99_01f78a51c218)) # Species: id = mw0834731b_0477_4217_a53b_30cef851191b; name = MEK; affected by kineticLaw du[37] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwaa65a34e_fabf_4d6d_ae0b_f1d08b068f33) + (1.0 * reaction_mwa4b69c77_6226_46da_b78c_3e6027d0be41)) # Species: id = mw4628f984_eb87_4922_9760_4975095ce6eb; name = Raf1active-MEK; affected by kineticLaw du[38] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwaa65a34e_fabf_4d6d_ae0b_f1d08b068f33) + (-1.0 * reaction_mw1bd186cf_4762_480a_b70d_d7a775462398)) # Species: id = mw9b25f809_18a1_4c14_8f4b_cf18e6d93c28; name = pMEK; affected by kineticLaw du[39] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw1bd186cf_4762_480a_b70d_d7a775462398) + (-1.0 * reaction_mwf5573ddf_ad7f_478a_a784_557a9cddaaf2) + (1.0 * reaction_mw40950d59_1012_4361_8418_73e25758e367) + (-1.0 * reaction_mwbfa79c95_487d_4c6f_b437_9e579451a419)) # Species: id = mw12ba4000_d452_420c_be63_96d2848aca32; name = Raf1active-pMEK; affected by kineticLaw du[40] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwf5573ddf_ad7f_478a_a784_557a9cddaaf2) + (-1.0 * reaction_mwb49058ff_2997_4187_abe7_4dce4ccf6ff4)) # Species: id = mwf816df4c_4593_4d23_990f_0d7c15ddde5d; name = ppMEK; affected by kineticLaw du[41] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwb49058ff_2997_4187_abe7_4dce4ccf6ff4) + (-1.0 * reaction_mw8301b154_9463_4516_b4c5_c8f8b68691fe) + (1.0 * reaction_mwf95f743d_6108_49fe_8ffd_bdcc1a9f9a8d) + (-1.0 * reaction_mw51d9d6b8_f0c0_4763_9d11_9be61b5cf5c9) + (1.0 * reaction_mw6fd24d16_f57d_46c6_82f5_3f00759fa16b) + (-1.0 * reaction_mw4b445876_bdce_42d0_867b_fd3c74128a6b)) # Species: id = mw7e23b961_186b_47a0_a8b5_5e9957766792; name = ERK; affected by kineticLaw du[42] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw8301b154_9463_4516_b4c5_c8f8b68691fe) + (1.0 * reaction_mwcc31b497_6c50_446c_bbc2_6c5739507252)) # Species: id = mwcedf8ecd_67bd_4b91_aa04_d58782dec2a4; name = ppMEK-ERK; affected by kineticLaw du[43] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw8301b154_9463_4516_b4c5_c8f8b68691fe) + (-1.0 * reaction_mwf95f743d_6108_49fe_8ffd_bdcc1a9f9a8d)) # Species: id = mwcc894c94_0ddf_42cc_913e_cdcc4d471d94; name = pERK; affected by kineticLaw du[44] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwf95f743d_6108_49fe_8ffd_bdcc1a9f9a8d) + (-1.0 * reaction_mw51d9d6b8_f0c0_4763_9d11_9be61b5cf5c9) + (1.0 * reaction_mw61305f93_7b2d_4a2d_8d16_f7be026d8671) + (-1.0 * reaction_mw1d8c2435_bb85_4352_a25f_82033250579e)) # Species: id = mw6cb74b27_ffef_49bb_8ffb_622d552caa9e; name = ppMEK-pERK; affected by kineticLaw du[45] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw51d9d6b8_f0c0_4763_9d11_9be61b5cf5c9) + (-1.0 * reaction_mw6fd24d16_f57d_46c6_82f5_3f00759fa16b)) # Species: id = mwd784228d_0cb5_468a_ac70_02d8f04b3d9c; name = ppERK; affected by kineticLaw du[46] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw6fd24d16_f57d_46c6_82f5_3f00759fa16b) + (-1.0 * reaction_mwf8bb22e2_5aa3_4c25_a022_a266b1856a48) + (-1.0 * reaction_mw8dec1159_1925_45d9_af25_3cb709a5017c) + (1.0 * reaction_mwcf9f1b1d_e19a_4fa8_85ba_8f17e2cec730) + (-1.0 * reaction_mwa5c135b4_77e2_4411_98e1_2000c39d4b30) + (1.0 * reaction_mw4685274a_2b55_429f_927f_3fd863592af6)) # Species: id = mwbaaeb210_4806_4076_9d60_219f4ed945b6; name = Pase; affected by kineticLaw du[47] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw9c208e18_c70d_4231_af0b_ad17cd0bba2d) + (1.0 * reaction_mw87711dc1_43d7_40fc_b9e9_a24e2f92419d)) # Species: id = mw19a33ad5_5ba4_46c7_84eb_c1287f02bcd5; name = Raf1active-Pase; affected by kineticLaw du[48] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw9c208e18_c70d_4231_af0b_ad17cd0bba2d) + (-1.0 * reaction_mw87711dc1_43d7_40fc_b9e9_a24e2f92419d)) # Species: id = mwf9e2a044_7774_400b_a74e_a111b4a21f30; name = Pase2; affected by kineticLaw du[49] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw4b445876_bdce_42d0_867b_fd3c74128a6b) + (1.0 * reaction_mw40950d59_1012_4361_8418_73e25758e367) + (-1.0 * reaction_mwbfa79c95_487d_4c6f_b437_9e579451a419) + (1.0 * reaction_mwa4b69c77_6226_46da_b78c_3e6027d0be41)) # Species: id = mwcb572fe2_c3ac_40e7_8141_da7d55fce18a; name = ppMEK-Pase2; affected by kineticLaw du[50] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw4b445876_bdce_42d0_867b_fd3c74128a6b) + (-1.0 * reaction_mw40950d59_1012_4361_8418_73e25758e367)) # Species: id = mwa0acc0ac_5fac_4a42_a3be_e36db44994b0; name = pMEK-Pase2; affected by kineticLaw du[51] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwbfa79c95_487d_4c6f_b437_9e579451a419) + (-1.0 * reaction_mwa4b69c77_6226_46da_b78c_3e6027d0be41)) # Species: id = mwd087f76b_65dc_47f1_ba21_c43774457686; name = Pase3; affected by kineticLaw du[52] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwf8bb22e2_5aa3_4c25_a022_a266b1856a48) + (1.0 * reaction_mw61305f93_7b2d_4a2d_8d16_f7be026d8671) + (1.0 * reaction_mwcc31b497_6c50_446c_bbc2_6c5739507252) + (-1.0 * reaction_mw1d8c2435_bb85_4352_a25f_82033250579e)) # Species: id = mw35f5adaa_d1c0_433c_817d_76e317f4cb15; name = pERK-Pase3; affected by kineticLaw du[53] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwcc31b497_6c50_446c_bbc2_6c5739507252) + (1.0 * reaction_mw1d8c2435_bb85_4352_a25f_82033250579e)) # Species: id = mwa7e3103a_6394_472c_b0f4_8ed527f68604; name = ppERK-Pase3; affected by kineticLaw du[54] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwf8bb22e2_5aa3_4c25_a022_a266b1856a48) + (-1.0 * reaction_mw61305f93_7b2d_4a2d_8d16_f7be026d8671)) # Species: id = mw5babe3d5_a9af_4dfd_ac01_35474ef64af2; name = ppERK-pEGF-EGFR2-pShc-Grb2-SOS; affected by kineticLaw du[55] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw8dec1159_1925_45d9_af25_3cb709a5017c) + (-1.0 * reaction_mwcf9f1b1d_e19a_4fa8_85ba_8f17e2cec730)) # Species: id = mw31ac308f_da36_4f73_830f_67f3e5b945d9; name = pSOS; affected by kineticLaw du[56] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwcf9f1b1d_e19a_4fa8_85ba_8f17e2cec730) + (1.0 * reaction_mw4685274a_2b55_429f_927f_3fd863592af6) + (-1.0 * reaction_mw8e331e43_16b4_478d_880b_d5a3244540e4)) # Species: id = mw31261227_9cd6_4059_a0bb_04dbf4888080; name = ppERK-pEGF-EGFR2-Grb2-SOS; affected by kineticLaw du[57] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwa5c135b4_77e2_4411_98e1_2000c39d4b30) + (-1.0 * reaction_mw4685274a_2b55_429f_927f_3fd863592af6)) # Species: id = mw0a0ca6ba_cb28_44c7_a0c0_1593cb720966; name = ProEGFR; affected by kineticLaw du[58] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw47dee769_daa0_4af4_978a_5ab17e504c2f)) # Species: id = mw06b8aada_c92a_48eb_8ee7_af3778cfe62f; name = pEGF-EGFR2-pShc-Grb2-SOS-cbl-EPn; affected by kineticLaw du[59] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw3a87ca5a_845d_4ac4_8806_e343cbbfc630) + (-1.0 * reaction_mw363a5271_1f51_4d5e_87a7_42ea25cb5657)) # Species: id = mwb2366216_0b3c_4f28_8303_fec92c68dd57; name = EPn; affected by kineticLaw du[60] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw3a87ca5a_845d_4ac4_8806_e343cbbfc630) + (1.0 * reaction_mw363a5271_1f51_4d5e_87a7_42ea25cb5657) + (-1.0 * reaction_mwbac9e6ff_2df1_45eb_b3f4_4cae74c64014) + (1.0 * reaction_mweb93165f_cf03_48f1_b035_59d79e324314) + (-1.0 * reaction_mw6b159c8f_eee0_4337_b711_2e230c9e2cf6) + (1.0 * reaction_mwc9b3b248_3290_452a_9b7c_8fdada3e6687) + (-1.0 * reaction_mw401dde7e_c0a1_4780_b6cc_8f98681c862e) + (1.0 * reaction_mw0dd5a91d_d76c_494e_9dd6_57f2836aaa19) + (-1.0 * reaction_mw602726ea_89ee_41b8_bda6_e2811bb42c1d) + (1.0 * reaction_mwfab3a9ec_b094_44f0_bd59_12ac56ca1c99)) # Species: id = mw1d5948e7_5504_4224_9d71_227911b4f1ee; name = pEGF-EGFR2-Grb2-SOS-cbl-EPn; affected by kineticLaw du[61] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwbac9e6ff_2df1_45eb_b3f4_4cae74c64014) + (-1.0 * reaction_mweb93165f_cf03_48f1_b035_59d79e324314)) # Species: id = mwec1b368b_8f73_47eb_9636_9956389836eb; name = pEGF-EGFR2-cbl; affected by kineticLaw du[62] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw85e457d1_73f8_4236_bb61_a128d300003f) + (-1.0 * reaction_mw6b159c8f_eee0_4337_b711_2e230c9e2cf6)) # Species: id = mwa455ec7e_1a12_4659_95a2_a5695d09ca60; name = pEGF-EGFR2-cbl-EPn; affected by kineticLaw du[63] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw6b159c8f_eee0_4337_b711_2e230c9e2cf6) + (-1.0 * reaction_mwc9b3b248_3290_452a_9b7c_8fdada3e6687)) # Species: id = mw2ba1db9a_4483_44fa_a3a2_b4a5ea66898c; name = PI3K; affected by kineticLaw du[64] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw77484632_4e33_468a_9937_24e9bfd0e17d) + (1.0 * reaction_mwd15926b3_069a_4b16_a6fc_c0c15083d621) + (1.0 * reaction_mwfab3a9ec_b094_44f0_bd59_12ac56ca1c99)) # Species: id = mw0dc4e5eb_4366_4799_bebc_cfcffe5c06f5; name = pEGF-EGFR2-PI3K; affected by kineticLaw du[65] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw77484632_4e33_468a_9937_24e9bfd0e17d) + (-1.0 * reaction_mw2c5858f3_0988_49b0_a94a_057853b84e91) + (-1.0 * reaction_mwd3a36af9_3ccc_4bb1_9867_3b9823ba4ac8) + (-1.0 * reaction_mwb205f533_4013_406b_8a4b_691ec3949555)) # Species: id = mw1e591998_65c0_484e_8a3b_537a38d94de1; name = pEGF-EGFR2-pPI3K; affected by kineticLaw du[66] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw2c5858f3_0988_49b0_a94a_057853b84e91)) # Species: id = mw78e207c4_4faf_4b48_8e22_1ee666e9cc4c; name = pPI3K; affected by kineticLaw du[67] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwd3a36af9_3ccc_4bb1_9867_3b9823ba4ac8) + (-1.0 * reaction_mw9f000f29_2512_4d4a_9dd9_e59aaf296d31) + (-1.0 * reaction_mw3a5e0932_d50f_4fe6_b8cb_0ad649f305b0) + (1.0 * reaction_mw5dcc8719_3180_4bd0_8797_08e256131961)) # Species: id = mwfc4a9c3d_3ebb_4033_8b7d_f4d7613d2078; name = TP4; affected by kineticLaw du[68] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw9f000f29_2512_4d4a_9dd9_e59aaf296d31) + (1.0 * reaction_mwd15926b3_069a_4b16_a6fc_c0c15083d621)) # Species: id = mwbd6bb050_89bd_41df_8cea_d2e1fb77bafe; name = TP4-pPI3K; affected by kineticLaw du[69] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw9f000f29_2512_4d4a_9dd9_e59aaf296d31) + (-1.0 * reaction_mw837b5ad7_4a8c_4c55_94ff_0fdd63048044)) # Species: id = mw7033dfd6_53c5_433b_a132_f8cb34dea20f; name = TP4-PI3K; affected by kineticLaw du[70] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw837b5ad7_4a8c_4c55_94ff_0fdd63048044) + (-1.0 * reaction_mwd15926b3_069a_4b16_a6fc_c0c15083d621)) # Species: id = mwb561d9f3_a9ed_4bdb_8d40_87be5cc3237a; name = PIP2; affected by kineticLaw du[71] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw3a5e0932_d50f_4fe6_b8cb_0ad649f305b0) + (1.0 * reaction_mw4fceada8_6eb0_4230_a083_b2ab094d2961)) # Species: id = mw014cc419_b720_4b90_9192_2ec6e706c87d; name = pPI3K-PIP2; affected by kineticLaw du[72] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw3a5e0932_d50f_4fe6_b8cb_0ad649f305b0) + (-1.0 * reaction_mw5dcc8719_3180_4bd0_8797_08e256131961)) # Species: id = mwd7f41594_8377_4e2e_9528_45d5a82ffdb4; name = PIP3; affected by kineticLaw du[73] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw5dcc8719_3180_4bd0_8797_08e256131961) + (-1.0 * reaction_mw376b0685_ef73_4fcc_94af_2ada24cf8a8b) + (1.0 * reaction_mw12311a84_3f8d_40c6_8b14_961a8a58d1b6) + (-1.0 * reaction_mw4fceada8_6eb0_4230_a083_b2ab094d2961)) # Species: id = mwcef73e0e_d195_4077_ae71_723664ee1602; name = Akt; affected by kineticLaw du[74] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw376b0685_ef73_4fcc_94af_2ada24cf8a8b) + (1.0 * reaction_mw2698f402_d00b_451e_8b22_93a322fe9a92)) # Species: id = mw62bf5275_ce02_4e86_b3b6_3f87a335e1de; name = Aktm; affected by kineticLaw du[75] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw376b0685_ef73_4fcc_94af_2ada24cf8a8b) + (-1.0 * reaction_mwcc7cfa9c_4945_403a_938e_b237c371a5ef) + (1.0 * reaction_mw362ca1b3_224a_42fb_a14b_6ff467748a5e)) # Species: id = mw6e01967b_3e2a_433d_bec6_9f9cf3ba243c; name = PDK1; affected by kineticLaw du[76] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwcc7cfa9c_4945_403a_938e_b237c371a5ef) + (1.0 * reaction_mw31369230_1f14_45bd_be02_a44a275c6e31)) # Species: id = mw6353aa36_d4a4_4254_8a1f_1f7f571d4233; name = Aktm-PDK1; affected by kineticLaw du[77] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwcc7cfa9c_4945_403a_938e_b237c371a5ef) + (-1.0 * reaction_mw98da32e0_b061_40c5_9d32_40744134f3fa) + (1.0 * reaction_mw4a334f7d_9bce_4690_b623_a427ed66a174)) # Species: id = mwc1935afc_56b1_4a87_923c_ae6d82455d80; name = pAktm-PDK1; affected by kineticLaw du[78] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw98da32e0_b061_40c5_9d32_40744134f3fa) + (-1.0 * reaction_mw31369230_1f14_45bd_be02_a44a275c6e31) + (-1.0 * reaction_mw3994e898_7232_4b70_9c58_b3476e8655f5)) # Species: id = mw3d81860d_d786_4fcc_b8bb_64f1a2d7739d; name = pAktm; affected by kineticLaw du[79] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw31369230_1f14_45bd_be02_a44a275c6e31) + (-1.0 * reaction_mw12311a84_3f8d_40c6_8b14_961a8a58d1b6) + (-1.0 * reaction_mwc5e0c166_6a3a_4913_9ed1_dafe97bdb371)) # Species: id = mw16796ffe_4764_4a9f_942e_149f42c1cd28; name = pAkt; affected by kineticLaw du[80] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw12311a84_3f8d_40c6_8b14_961a8a58d1b6) + (-1.0 * reaction_mw028e8b3e_b531_4466_9c3a_e3fcf7fc9be9)) # Species: id = mwa6e82fc9_a0ce_461c_93c8_17f3c807c1a1; name = pAkt-Takt; affected by kineticLaw du[81] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwf3d393e9_ae09_4eab_a39a_ed0eef0f54bc) + (1.0 * reaction_mw028e8b3e_b531_4466_9c3a_e3fcf7fc9be9)) # Species: id = mw236a3250_4c96_4f6e_b94c_ab3d12852801; name = Akt-Takt; affected by kineticLaw du[82] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwf3d393e9_ae09_4eab_a39a_ed0eef0f54bc) + (-1.0 * reaction_mw2698f402_d00b_451e_8b22_93a322fe9a92)) # Species: id = mw11a8b702_b8ac_4513_b4aa_063e51089812; name = Takt; affected by kineticLaw du[83] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw2698f402_d00b_451e_8b22_93a322fe9a92) + (-1.0 * reaction_mw028e8b3e_b531_4466_9c3a_e3fcf7fc9be9) + (-1.0 * reaction_mwc5e0c166_6a3a_4913_9ed1_dafe97bdb371) + (1.0 * reaction_mw362ca1b3_224a_42fb_a14b_6ff467748a5e) + (-1.0 * reaction_mw3994e898_7232_4b70_9c58_b3476e8655f5) + (1.0 * reaction_mw4a334f7d_9bce_4690_b623_a427ed66a174)) # Species: id = mw1a0cb97a_b657_430b_963c_92217f643081; name = pAktm-Takt; affected by kineticLaw du[84] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwc5e0c166_6a3a_4913_9ed1_dafe97bdb371) + (-1.0 * reaction_mw94b3bae0_4da9_4358_a5ac_a46a5cbf621b)) # Species: id = mw9b937ca3_0d82_46d5_8f5a_0f9701002797; name = Aktm-Takt; affected by kineticLaw du[85] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw94b3bae0_4da9_4358_a5ac_a46a5cbf621b) + (-1.0 * reaction_mw362ca1b3_224a_42fb_a14b_6ff467748a5e)) # Species: id = mw57a44eb0_ace7_4294_905a_219e87d3c281; name = pAktm-PDK1-Takt; affected by kineticLaw du[86] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw3994e898_7232_4b70_9c58_b3476e8655f5) + (-1.0 * reaction_mw75acd2d1_3fdf_4c3f_8d99_6d62f825d5e2)) # Species: id = mwd746a5d5_5e65_4a4c_9f84_0e4a3cb7d2fc; name = Aktm-PDK1-Takt; affected by kineticLaw du[87] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw75acd2d1_3fdf_4c3f_8d99_6d62f825d5e2) + (-1.0 * reaction_mw4a334f7d_9bce_4690_b623_a427ed66a174)) # Species: id = mwa6994523_5d45_4000_af0c_3e94073bf183, name = pAkt_total, defined in a rule du[88] = u[88] # Species: id = mwdf92bdc0_f426_45b0_9ad0_876521f41312; name = pRaf1active; affected by kineticLaw du[89] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw950485f2_4463_4309_a4e4_cc81d16ffb7f) + (-1.0 * reaction_mw62f71309_e066_47d2_9b99_01f78a51c218)) # Species: id = mw13abe2a6_9905_40e5_8c23_3fc8834b572a; name = STAT3c; affected by kineticLaw du[90] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwe8647e48_f4a9_40f4_9b32_f89ded572e01) + (1.0 * reaction_mwe9988e4a_083c_4f8e_b154_3e599c9307b0) + (-1.0 * reaction_mw177fa7b0_f0be_4c3e_8b47_2ac4e13159a2) + (1.0 * reaction_mwd189238c_e8f9_40be_b4ea_18a42bba1b4f) + (1.0 * reaction_mw0dd5a91d_d76c_494e_9dd6_57f2836aaa19)) # Species: id = mw2fd710a6_7fe2_4484_bca6_59c187bade8b; name = pEGF-EGFR2-STAT3c; affected by kineticLaw du[91] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwe8647e48_f4a9_40f4_9b32_f89ded572e01) + (-1.0 * reaction_mw65b9e026_bc6c_4c94_8b37_8b9acdf50c8a) + (-1.0 * reaction_mwcb637bf1_7618_4d8a_ab5c_399145ecf1df)) # Species: id = mwb6a9aa2c_62e7_410f_9c33_dbe36dfcc4af; name = pSTAT3c; affected by kineticLaw du[92] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw65b9e026_bc6c_4c94_8b37_8b9acdf50c8a) + (-1.0 * reaction_mw1c9d29fa_bff4_4d2f_9d5f_f1791e4882a3) + (-1.0 * reaction_mwad97bd5a_3dae_49d9_990b_2e6574740618) + (-1.0 * reaction_mwf8bacf1a_6c1a_49b6_b344_2d3bd404a735) + (-1.0 * reaction_mwf8bacf1a_6c1a_49b6_b344_2d3bd404a735) + (-1.0 * reaction_mw177fa7b0_f0be_4c3e_8b47_2ac4e13159a2)) # Species: id = mw341082a0_8017_4cc7_9d00_b1211a196072; name = pEGF-EGFR2-pSTAT3c; affected by kineticLaw du[93] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw1c9d29fa_bff4_4d2f_9d5f_f1791e4882a3)) # Species: id = mwcea1f1c1_2f85_4af1_98ea_ef14cf580c09; name = PP1; affected by kineticLaw du[94] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwad97bd5a_3dae_49d9_990b_2e6574740618) + (1.0 * reaction_mwe9988e4a_083c_4f8e_b154_3e599c9307b0) + (-1.0 * reaction_mwc9b945cf_3a14_4bd9_b253_7064498c75e2) + (1.0 * reaction_mw75c6078f_fb76_4ca9_9fdd_e221e3ba57ad)) # Species: id = mwdc34472c_a6f9_4002_951d_e0e8da64eb42; name = pSTAT3c-PP1; affected by kineticLaw du[95] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwad97bd5a_3dae_49d9_990b_2e6574740618) + (-1.0 * reaction_mwe9988e4a_083c_4f8e_b154_3e599c9307b0)) # Species: id = mw472d5cb9_120e_4f60_bbae_1ae2552837dd; name = pSTAT3c-pSTAT3c-PP1; affected by kineticLaw du[96] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwc9b945cf_3a14_4bd9_b253_7064498c75e2) + (-1.0 * reaction_mw75c6078f_fb76_4ca9_9fdd_e221e3ba57ad)) # Species: id = mw4f575c55_7dff_45d7_94ad_cda9621d5b63; name = pSTAT3c-pSTAT3c; affected by kineticLaw du[97] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwf8bacf1a_6c1a_49b6_b344_2d3bd404a735) + (-1.0 * reaction_mwc9b945cf_3a14_4bd9_b253_7064498c75e2) + (-1.0 * reaction_mwec4127b5_6bcf_4128_aff4_a6b3c470f690)) # Species: id = mwd2c465fb_eea7_499a_8ea4_f318a64cb9ee; name = STAT3c-pSTAT3c; affected by kineticLaw du[98] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw75c6078f_fb76_4ca9_9fdd_e221e3ba57ad) + (1.0 * reaction_mw177fa7b0_f0be_4c3e_8b47_2ac4e13159a2)) # Species: id = mw4110f531_7513_4786_8896_7c9d969ff558; name = pSTAT3n-pSTAT3n; affected by kineticLaw du[99] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwec4127b5_6bcf_4128_aff4_a6b3c470f690) + (1.0 * reaction_mw5c806b00_59a1_491e_99a1_2c932b2d5d7a) + (-1.0 * reaction_mw26fdabae_323b_4a78_b134_4c2eb70ea6a7)) # Species: id = mwe3fd7f65_b0d1_44d9_b6f3_d2f7d332f664; name = pSTAT3n; affected by kineticLaw du[100] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw5c806b00_59a1_491e_99a1_2c932b2d5d7a) + (-1.0 * reaction_mw5c806b00_59a1_491e_99a1_2c932b2d5d7a) + (-1.0 * reaction_mwc38a99c8_74cf_49f2_a16b_f6610ca1a0a7) + (-1.0 * reaction_mw45d92b79_0656_4795_87d0_7a465949ca43)) # Species: id = mw0e1be972_fded_4bff_a93d_091ec942485f; name = PP2; affected by kineticLaw du[101] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mw26fdabae_323b_4a78_b134_4c2eb70ea6a7) + (1.0 * reaction_mw3b0c171c_6d60_41ca_8193_83cd5e6c188c) + (-1.0 * reaction_mw45d92b79_0656_4795_87d0_7a465949ca43) + (1.0 * reaction_mwb71945c2_03a8_4fad_a995_e1caeee98525)) # Species: id = mw0facb8f2_95cf_4ddf_a959_b24ba64f320b; name = pSTAT3n-pSTAT3n-PP2; affected by kineticLaw du[102] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw26fdabae_323b_4a78_b134_4c2eb70ea6a7) + (-1.0 * reaction_mw3b0c171c_6d60_41ca_8193_83cd5e6c188c)) # Species: id = mw9686f53e_d343_45fd_b441_9c992219546a; name = STAT3n-pSTAT3n; affected by kineticLaw du[103] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw3b0c171c_6d60_41ca_8193_83cd5e6c188c) + (1.0 * reaction_mwc38a99c8_74cf_49f2_a16b_f6610ca1a0a7)) # Species: id = mw960bddeb_e567_46dd_b2f3_ed5e6a5c7972; name = STAT3n; affected by kineticLaw du[104] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((-1.0 * reaction_mwc38a99c8_74cf_49f2_a16b_f6610ca1a0a7) + (1.0 * reaction_mwb71945c2_03a8_4fad_a995_e1caeee98525) + (-1.0 * reaction_mwd189238c_e8f9_40be_b4ea_18a42bba1b4f)) # Species: id = mw8c85ff7f_6368_4b11_a2ed_ce83481b55e6; name = pSTAT3n-PP2; affected by kineticLaw du[105] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw45d92b79_0656_4795_87d0_7a465949ca43) + (-1.0 * reaction_mwb71945c2_03a8_4fad_a995_e1caeee98525)) # Species: id = mw548c81c2_c626_4df8_9177_a1a6fc3d4ce8; name = pEGF-EGFR2-STAT3c-cbl; affected by kineticLaw du[106] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwcb637bf1_7618_4d8a_ab5c_399145ecf1df) + (-1.0 * reaction_mw401dde7e_c0a1_4780_b6cc_8f98681c862e)) # Species: id = mw142e6dc4_ec15_459d_a184_6b20be04f08d; name = pEGF-EGFR2-STAT3c-cbl-EPn; affected by kineticLaw du[107] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw401dde7e_c0a1_4780_b6cc_8f98681c862e) + (-1.0 * reaction_mw0dd5a91d_d76c_494e_9dd6_57f2836aaa19)) # Species: id = mw2c47ae3f_06d9_40ec_a252_535db0ae5caa; name = pEGF-EGFR2-PI3K-cbl; affected by kineticLaw du[108] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mwb205f533_4013_406b_8a4b_691ec3949555) + (-1.0 * reaction_mw602726ea_89ee_41b8_bda6_e2811bb42c1d)) # Species: id = mwd32d108b_49c2_4df2_9b67_d6c6b84f54b9; name = pEGF-EGFR2-PI3K-cbl-EPn; affected by kineticLaw du[109] = (1 / (p["compartment_mw1637dd35_5f09_4a8d_bb7f_58717cdf1612"])) * ((1.0 * reaction_mw602726ea_89ee_41b8_bda6_e2811bb42c1d) + (-1.0 * reaction_mwfab3a9ec_b094_44f0_bd59_12ac56ca1c99)) end u0=zeros(109) u0[1] = 0.0081967 u0[2] = 0.3 u0[3] = 0.0 u0[4] = 0.0 u0[5] = 0.0 u0[6] = 0.8 u0[7] = 1.0 u0[8] = 0.0 u0[9] = 0.1 u0[10] = 0.0 u0[11] = 0.0 u0[12] = 0.0 u0[13] = 0.0 u0[14] = 1.0 u0[15] = 0.0 u0[16] = 0.0 u0[17] = 0.3 u0[18] = 0.0 u0[19] = 0.0 u0[20] = 0.0 u0[21] = 0.0 u0[22] = 0.0 u0[23] = 0.0 u0[24] = 0.0 u0[25] = 0.15 u0[26] = 0.0 u0[27] = 0.0 u0[28] = 0.0 u0[29] = 0.1 u0[30] = 0.0 u0[31] = 0.0 u0[32] = 0.0 u0[33] = 0.0 u0[34] = 0.5 u0[35] = 0.0 u0[36] = 0.0 u0[37] = 0.68 u0[38] = 0.0 u0[39] = 0.0 u0[40] = 0.0 u0[41] = 0.0 u0[42] = 0.4 u0[43] = 0.0 u0[44] = 0.0 u0[45] = 0.0 u0[46] = 0.0 u0[47] = 0.5 u0[48] = 0.0 u0[49] = 0.02 u0[50] = 0.0 u0[51] = 0.0 u0[52] = 0.002 u0[53] = 0.0 u0[54] = 0.0 u0[55] = 0.0 u0[56] = 0.0 u0[57] = 0.0 u0[58] = 1.0 u0[59] = 0.0 u0[60] = 0.5 u0[61] = 0.0 u0[62] = 0.0 u0[63] = 0.0 u0[64] = 0.2 u0[65] = 0.0 u0[66] = 0.0 u0[67] = 0.0 u0[68] = 0.2 u0[69] = 0.0 u0[70] = 0.0 u0[71] = 0.5 u0[72] = 0.0 u0[73] = 0.0 u0[74] = 0.1 u0[75] = 0.0 u0[76] = 0.1 u0[77] = 0.0 u0[78] = 0.0 u0[79] = 0.0 u0[80] = 0.0 u0[81] = 0.0 u0[82] = 0.0 u0[83] = 0.1 u0[84] = 0.0 u0[85] = 0.0 u0[86] = 0.0 u0[87] = 0.0 u0[88] = 0.0 u0[89] = 0.0 u0[90] = 1.0 u0[91] = 0.0 u0[92] = 0.0 u0[93] = 0.0 u0[94] = 0.5 u0[95] = 0.0 u0[96] = 0.0 u0[97] = 0.0 u0[98] = 0.0 u0[99] = 0.0 u0[100] = 0.0 u0[101] = 0.6 u0[102] = 0.0 u0[103] = 0.0 u0[104] = 0.0 u0[105] = 0.0 u0[106] = 0.0 u0[107] = 0.0 u0[108] = 0.0 u0[109] = 0.0 tspan = (0.0,100.0) prob = ODEProblem{true,SciMLBase.FullSpecialize}(sbml_model!, u0, tspan, par) sys = modelingtoolkitize(prob) sys = structural_simplify(sys) const to = TimerOutput() @timeit to "ODEProb No Jac" oprob = ODEProblem{true,SciMLBase.FullSpecialize}(sys, Float64[], tspan, Float64[]) @timeit to "ODEProb DenseJac" densejacprob = ODEProblem{true,SciMLBase.FullSpecialize}(sys, Float64[], tspan, Float64[], jac=true) ``` ```julia @timeit to "ODEProb SparseJac" sparsejacprob = ODEProblem{true,SciMLBase.FullSpecialize}(sys, Float64[], tspan, Float64[], jac=true, sparse=true) show(to) ``` ```julia @show numspecies(sys) # Number of ODEs @show numreactions(sys) # Apprx. number of terms in the ODE @show length(parameters(sys)) # Number of Parameters ``` ## Picture of the solution ```julia sol = solve(oprob, CVODE_BDF()) plot(sol, legend=false, fmt=:png) ``` For these benchmarks we will be using the time-series error with these saving points since the final time point is not well-indicative of the solution behavior (capturing the oscillation is the key!). ## Generate Test Solution ```julia @time sol = solve(oprob, CVODE_BDF(), abstol=1/10^14, reltol=1/10^14) test_sol = TestSolution(sol) ``` ## Setups ```julia abstols = 1.0 ./ 10.0 .^ (9:13) reltols = 1.0 ./ 10.0 .^ (6:10) setups = [ Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rodas4()), Dict(:alg=>QNDF()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>seulex()), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 8, init_order = 9,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 8, init_order = 9,threading = false)), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 7, init_order = 8,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 7, init_order = 8,threading = false)), Dict(:alg=>ImplicitHairerWannerExtrapolation(init_order = 5,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitHairerWannerExtrapolation(init_order = 5, threading = false)), ] solnames = ["CVODE_BDF","KenCarp4","Rodas4","QNDF","lsoda","radau","seulex","ImplEulerExtpl (threaded)", "ImplEulerExtpl (non-threaded)", "ImplEulerBaryExtpl (threaded)","ImplEulerBaryExtpl (non-threaded)","ImplHWExtpl (threaded)","ImplHWExtpl (non-threaded)"] ``` ## Automatic Jacobian Solves First we test using auto-generated Jacobians (finite difference) ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups; names = solnames,appxsol=test_sol,save_everystep=false,maxiters=Int(1e5),numruns=10) plot(wp, title = "Implicit Methods: SBML Model",legend=:outertopleft,size = (1000,500), xticks = 10.0 .^ (-15:1:1), yticks = 10.0 .^ (-6:0.3:5), bottom_margin= 5Plots.mm) ``` ## Analytical Jacobian Now we test using the generated analytic Jacobian function. ```julia setups = [ Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rodas4()), Dict(:alg=>QNDF()), #Dict(:alg=>lsoda()), #Dict(:alg=>radau()), #Dict(:alg=>seulex()), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 8, init_order = 9,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 8, init_order = 9,threading = false)), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 7, init_order = 8,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 7, init_order = 8,threading = false)), Dict(:alg=>ImplicitHairerWannerExtrapolation(init_order = 5,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitHairerWannerExtrapolation(init_order = 5, threading = false)), ] solnames = ["CVODE_BDF","KenCarp4","Rodas4","QNDF","ImplEulerExtpl (threaded)", "ImplEulerExtpl (non-threaded)", "ImplEulerBaryExtpl (threaded)","ImplEulerBaryExtpl (non-threaded)","ImplHWExtpl (threaded)","ImplHWExtpl (non-threaded)"] wp = WorkPrecisionSet(densejacprob,abstols,reltols,setups; names = solnames,appxsol=test_sol,save_everystep=false,maxiters=Int(1e5),numruns=10) plot(wp, title = "Implicit Methods: SBML Model",legend=:outertopleft,size = (1000,500), xticks = 10.0 .^ (-15:1:1), yticks = 10.0 .^ (-6:0.3:5), bottom_margin= 5Plots.mm) ``` ## Sparse Jacobian Finally we test using the generated sparse analytic Jacobian function. Note that the extrapolation methods currently do not support sparse Jacobians. ```julia setups = [ #Dict(:alg=>CVODE_BDF()), #Fails! Dict(:alg=>KenCarp4(autodiff = false)), Dict(:alg=>Rodas4(autodiff = false)), Dict(:alg=>QNDF(autodiff = false)), #Dict(:alg=>lsoda()), #Dict(:alg=>radau()), #Dict(:alg=>seulex()), #Dict(:alg=>ImplicitEulerExtrapolation(autodiff = false,min_order = 8, init_order = 9,threading = OrdinaryDiffEq.PolyesterThreads())), #Dict(:alg=>ImplicitEulerExtrapolation(autodiff = false,min_order = 8, init_order = 9,threading = false)), #Dict(:alg=>ImplicitEulerBarycentricExtrapolation(autodiff = false,min_order = 7, init_order = 8,threading = OrdinaryDiffEq.PolyesterThreads())), #Dict(:alg=>ImplicitEulerBarycentricExtrapolation(autodiff = false,min_order = 7, init_order = 8,threading = false)), #Dict(:alg=>ImplicitHairerWannerExtrapolation(autodiff = false,init_order = 5,threading = OrdinaryDiffEq.PolyesterThreads())), #Dict(:alg=>ImplicitHairerWannerExtrapolation(autodiff = false,init_order = 5, threading = false)), ] solnames = ["KenCarp4","Rodas4","QNDF",#"ImplEulerExtpl (threaded)", "ImplEulerExtpl (non-threaded)", #"ImplEulerBaryExtpl (threaded)","ImplEulerBaryExtpl (non-threaded)","ImplHWExtpl (threaded)","ImplHWExtpl (non-threaded)" ] wp = WorkPrecisionSet(sparsejacprob ,abstols,reltols,setups; names = solnames,appxsol=test_sol,save_everystep=false,maxiters=Int(1e5),numruns=10) plot(wp, title = "Implicit Methods: SBML Model",legend=:outertopleft,size = (1000,500), xticks = 10.0 .^ (-15:1:1), yticks = 10.0 .^ (-6:0.3:5), bottom_margin= 5Plots.mm) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Bio/egfr_net.jmd
docs
10474
--- title: Egfr_net Work-Precision Diagrams author: Torkel Loman --- The following benchmark is of 356 ODEs with 3749 terms that describe a chemical reaction network. This egfr_net model was used as a benchmark model in [Gupta et al.](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6013266/). It describes the epidermal growth factor receptor signalling system [Blinov et al.](https://pubmed.ncbi.nlm.nih.gov/16233948/). We use [`ReactionNetworkImporters`](https://github.com/isaacsas/ReactionNetworkImporters.jl) to load the BioNetGen model files as a [Catalyst](https://github.com/SciML/Catalyst.jl) model, and then use [ModelingToolkit](https://github.com/SciML/ModelingToolkit.jl) to convert the Catalyst network model to ODEs. ```julia using DiffEqBase, OrdinaryDiffEq, Catalyst, ReactionNetworkImporters, Sundials, Plots, DiffEqDevTools, ODEInterface, ODEInterfaceDiffEq, LSODA, TimerOutputs, LinearAlgebra, ModelingToolkit, BenchmarkTools, LinearSolve gr() const to = TimerOutput() tf = 10.0 # generate ModelingToolkit ODEs @timeit to "Parse Network" prnbng = loadrxnetwork(BNGNetwork(), joinpath(@__DIR__, "Models/egfr_net.net")) show(to) rn = prnbng.rn obs = [eq.lhs for eq in observed(rn)] @timeit to "Create ODESys" osys = convert(ODESystem, rn) show(to) tspan = (0.,tf) @timeit to "ODEProb No Jac" oprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[]) show(to); ``` ```julia @timeit to "ODEProb SparseJac" sparsejacprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[], jac=true, sparse=true) show(to) ``` ```julia @show numspecies(rn) # Number of ODEs @show numreactions(rn) # Apprx. number of terms in the ODE @show length(parameters(rn)); # Number of Parameters ``` ## Time ODE derivative function compilation As compiling the ODE derivative functions has in the past taken longer than running a simulation, we first force compilation by evaluating these functions one time. ```julia u = ModelingToolkit.varmap_to_vars(nothing, species(rn); defaults=ModelingToolkit.defaults(rn)) du = copy(u) p = ModelingToolkit.varmap_to_vars(nothing, parameters(rn); defaults=ModelingToolkit.defaults(rn)) @timeit to "ODE rhs Eval1" oprob.f(du,u,p,0.) @timeit to "ODE rhs Eval2" oprob.f(du,u,p,0.) sparsejacprob.f(du,u,p,0.) ``` We also time the ODE rhs function with BenchmarkTools as it is more accurate given how fast evaluating `f` is: ```julia @btime oprob.f($du,$u,$p,0.) ``` ## Picture of the solution ```julia sol = solve(oprob, CVODE_BDF(), saveat=tf/1000., reltol=1e-5, abstol=1e-5) plot(sol; idxs=obs, legend=false, fmt=:png) ``` For these benchmarks we will be using the time-series error with these saving points. ## Generate Test Solution ```julia @time sol = solve(oprob, CVODE_BDF(), abstol=1/10^14, reltol=1/10^14) test_sol = TestSolution(sol); ``` ## Setups #### Sets plotting defaults ```julia default(legendfontsize=7,framestyle=:box,gridalpha=0.3,gridlinewidth=2.5) ``` #### Declares a plotting helper function ```julia function plot_settings(wp) times = vcat(map(wp -> wp.times, wp.wps)...) errors = vcat(map(wp -> wp.errors, wp.wps)...) xlimit = 10 .^ (floor(log10(minimum(errors))), ceil(log10(maximum(errors)))) ylimit = 10 .^ (floor(log10(minimum(times))), ceil(log10(maximum(times)))) return xlimit,ylimit end ``` #### Sets tolerances ```julia abstols = 1.0 ./ 10.0 .^ (6:10) reltols = 1.0 ./ 10.0 .^ (6:10); ``` ## Implicit Work-Precision Diagrams Benchmarks for implicit solvers. #### Declare solvers (using default linear solver) We designate the solvers we wish to use. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>CVODE_BDF(linear_solver=:LapackDense)), Dict(:alg=>CVODE_Adams()), Dict(:alg=>TRBDF2()), Dict(:alg=>QNDF()), Dict(:alg=>FBDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas5P()) ]; ``` #### Plot Work-Precision Diagram (using default linear solver) Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=100) names = ["lsoda" "CVODE_BDF" "CVODE_BDF (Lapack Dense)" "CVODE_Adams" "TRBDF2" "QNDF" "FBDF" "KenCarp4" "Rosenbrock23" "Rodas4" "Rodas5P"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Declare solvers (using GMRES linear solver) We designate the solvers we wish to use. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES)), Dict(:alg=>TRBDF2(linsolve=KrylovJL_GMRES())), Dict(:alg=>QNDF(linsolve=KrylovJL_GMRES())), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES())), Dict(:alg=>KenCarp4(linsolve=KrylovJL_GMRES())), Dict(:alg=>Rosenbrock23(linsolve=KrylovJL_GMRES())), Dict(:alg=>Rodas4(linsolve=KrylovJL_GMRES())), Dict(:alg=>Rodas5P(linsolve=KrylovJL_GMRES())) ]; ``` #### Plot Work-Precision Diagram (using GMRES linear solver) Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=100) names = ["lsoda" "CVODE_BDF (GMRES)" "TRBDF2 (GMRES)" "QNDF (GMRES)" "FBDF (GMRES)" "KenCarp4 (GMRES)" "Rosenbrock23 (GMRES)" "Rodas4 (GMRES)" "Rodas5P (GMRES)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Declare solvers (using sparse jacobian) We designate the solvers we wish to use. ```julia setups = [ Dict(:alg=>CVODE_BDF(linear_solver=:KLU)), Dict(:alg=>TRBDF2(linsolve=KLUFactorization())), Dict(:alg=>QNDF(linsolve=KLUFactorization())), Dict(:alg=>FBDF(linsolve=KLUFactorization())), Dict(:alg=>KenCarp4(linsolve=KLUFactorization())), Dict(:alg=>Rosenbrock23(linsolve=KLUFactorization())), Dict(:alg=>Rodas4(linsolve=KLUFactorization())), Dict(:alg=>Rodas5P(linsolve=KLUFactorization())) ]; ``` #### Plot Work-Precision Diagram (using sparse jacobian) Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(sparsejacprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=100) names = ["CVODE_BDF (KLU, sparse jac)" "TRBDF2 (KLU, sparse jac)" "QNDF (KLU, sparse jac)" "FBDF (KLU, sparse jac)" "KenCarp4 (KLU, sparse jac)" "Rosenbrock23 (KLU, sparse jac)" "Rodas4 (KLU, sparse jac)" "Rodas5P (KLU, sparse jac)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Explicit Work-Precision Diagram Benchmarks for explicit solvers. #### Declare solvers We designate the solvers we wish to use, this also includes lsoda and CVODE. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>CVODE_Adams()), Dict(:alg=>Tsit5()), Dict(:alg=>BS5()), Dict(:alg=>VCABM()), Dict(:alg=>Vern6()), Dict(:alg=>Vern7()), Dict(:alg=>Vern8()), Dict(:alg=>Vern9()), Dict(:alg=>ROCK4()) ]; ``` #### Plot Work-Precision Diagram ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["lsoda" "CVODE_Adams" "Tsit5" "BS5" "VCABM" "Vern6" "Vern7" "Vern8" "Vern9" "ROCK4"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Additional explicit solvers One additional explicit solver, `ROCK2`, performs noticeably worse as compared to the other ones. ```julia setups = [Dict(:alg=>ROCK2())]; wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["ROCK2"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Summary of results Finally, we compute a single diagram comparing the various solvers used. #### Declare solvers We designate the solvers we wish to compare. ```julia setups = [ Dict(:alg=>lsoda(), :prob_choice => 1), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES), :prob_choice => 1), Dict(:alg=>QNDF(linsolve=KrylovJL_GMRES()), :prob_choice => 2), Dict(:alg=>QNDF(linsolve=KLUFactorization()), :prob_choice => 2), Dict(:alg=>BS5(), :prob_choice => 1), Dict(:alg=>Vern6(), :prob_choice => 1), Dict(:alg=>ROCK4(), :prob_choice => 1) ]; ``` #### Plot Work-Precision Diagram For these, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet([oprob,sparsejacprob],abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=[test_sol,test_sol],maxiters=Int(1e9),numruns=200) names = ["lsoda" "CVODE_BDF (GMRES)" "QNDF (GMRES)" "QNDF (KLU)" "BS5" "Vern6" "ROCK4"] colors = [:seagreen1 :darkgreen :deepskyblue1 :deepskyblue4 :thistle2 :lightslateblue :purple4] markershapes = [:star4 :rect :hexagon :octagon :star8 :rtriangle :square] xlimit,ylimit = plot_settings(wp) xlimit = xlimit .* [0.95,1/0.95]; ylimit = ylimit .* [0.95,1/0.95]; plot(wp;label=names,left_margin=10Plots.mm,right_margin=10Plots.mm,xlimit=xlimit,ylimit=ylimit,xticks=[1e-9,1e-8,1e-7,1e-6,1e-5,1e-4,1e-3,1e-2],yticks=[1e-2,1e-1],color=colors,markershape=markershapes,legendfontsize=15,tickfontsize=15,guidefontsize=15, legend=:topright, lw=20, la=0.8, markersize=20,markerstrokealpha=1.0, markerstrokewidth=1.5, gridalpha=0.3, gridlinewidth=7.5,size=(1100,1000)) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Bio/fceri_gamma2.jmd
docs
11811
--- title: Fceri_gamma2 Work-Precision Diagrams author: Torkel Loman --- The following benchmark is of 356 ODEs with 3749 terms that describe a stiff chemical reaction network. This fceri_gamma2 model was used as a benchmark model in [Gupta et al.](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6013266/). It describes high-affinity human IgE receptor signalling [Faeder et al.](https://www.jimmunol.org/content/170/7/3769.long). We use [`ReactionNetworkImporters`](https://github.com/isaacsas/ReactionNetworkImporters.jl) to load the BioNetGen model files as a [Catalyst](https://github.com/SciML/Catalyst.jl) model, and then use [ModelingToolkit](https://github.com/SciML/ModelingToolkit.jl) to convert the Catalyst network model to ODEs. ```julia using DiffEqBase, OrdinaryDiffEq, Catalyst, ReactionNetworkImporters, Sundials, Plots, DiffEqDevTools, ODEInterface, ODEInterfaceDiffEq, LSODA, TimerOutputs, LinearAlgebra, ModelingToolkit, BenchmarkTools, LinearSolve gr() const to = TimerOutput() tf = 150.0 # generate ModelingToolkit ODEs @timeit to "Parse Network" prnbng = loadrxnetwork(BNGNetwork(), joinpath(@__DIR__, "Models/fceri_gamma2.net")) show(to) rn = prnbng.rn obs = [eq.lhs for eq in observed(rn)] @timeit to "Create ODESys" osys = convert(ODESystem, rn) show(to) tspan = (0.,tf) @timeit to "ODEProb SparseJac" sparsejacprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[], jac=true, sparse=true) show(to) @timeit to "ODEProb No Jac" oprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[]) show(to) oprob_sparse = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[]; sparse=true); ``` ```julia @show numspecies(rn) # Number of ODEs @show numreactions(rn) # Apprx. number of terms in the ODE @show length(parameters(rn)); # Number of Parameters ``` ## Time ODE derivative function compilation As compiling the ODE derivative functions has in the past taken longer than running a simulation, we first force compilation by evaluating these functions one time. ```julia u = ModelingToolkit.varmap_to_vars(nothing, species(rn); defaults=ModelingToolkit.defaults(rn)) du = copy(u) p = ModelingToolkit.varmap_to_vars(nothing, parameters(rn); defaults=ModelingToolkit.defaults(rn)) @timeit to "ODE rhs Eval1" sparsejacprob.f(du,u,p,0.) sparsejacprob.f(du,u,p,0.) ``` We also time the ODE rhs function with BenchmarkTools as it is more accurate given how fast evaluating `f` is: ```julia @btime sparsejacprob.f($du,$u,$p,0.) ``` ## Picture of the solution ```julia sol = solve(oprob, CVODE_BDF(), saveat=tf/1000., reltol=1e-5, abstol=1e-5) plot(sol; idxs=obs, legend=false, fmt=:png) ``` For these benchmarks we will be using the time-series error with these saving points. ## Generate Test Solution ```julia @time sol = solve(sparsejacprob, CVODE_BDF(linear_solver=:GMRES), reltol=1e-15, abstol=1e-15) test_sol = TestSolution(sol); ``` ## Setups #### Sets plotting defaults ```julia default(legendfontsize=7,framestyle=:box,gridalpha=0.3,gridlinewidth=2.5) ``` #### Declares a plotting helper function ```julia function plot_settings(wp) times = vcat(map(wp -> wp.times, wp.wps)...) errors = vcat(map(wp -> wp.errors, wp.wps)...) xlimit = 10 .^ (floor(log10(minimum(errors))), ceil(log10(maximum(errors)))) ylimit = 10 .^ (floor(log10(minimum(times))), ceil(log10(maximum(times)))) return xlimit,ylimit end ``` #### Declare pre-conditioners ```julia using IncompleteLU, LinearAlgebra jaccache = sparsejacprob.f.jac(oprob.u0,oprob.p,0.0) W = I - 1.0*jaccache prectmp = ilu(W, τ = 50.0) preccache = Ref(prectmp) const τ1 = 5 function psetupilu(p, t, u, du, jok, jcurPtr, gamma) if !jok sparsejacprob.f.jac(jaccache,u,p,t) jcurPtr[] = true # W = I - gamma*J @. W = -gamma*jaccache idxs = diagind(W) @. @view(W[idxs]) = @view(W[idxs]) + 1 # Build preconditioner on W preccache[] = ilu(W, τ = τ1) end end function precilu(z,r,p,t,y,fy,gamma,delta,lr) ldiv!(z,preccache[],r) end const τ2 = 5 function incompletelu(W,du,u,p,t,newW,Plprev,Prprev,solverdata) if newW === nothing || newW Pl = ilu(convert(AbstractMatrix,W), τ = τ2) else Pl = Plprev end Pl,nothing end; ``` #### Sets tolerances ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (5:8); ``` ## Work-Precision Diagrams (CVODE and lsoda solvers) #### Declare solvers. ```julia setups = [ Dict(:alg=>lsoda(), :prob_choice => 1), Dict(:alg=>CVODE_BDF(), :prob_choice => 1), Dict(:alg=>CVODE_BDF(linear_solver=:LapackDense), :prob_choice => 1), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES), :prob_choice => 1), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver=:KLU), :prob_choice => 3) ]; ``` #### Plot Work-Precision Diagram. ```julia wp = WorkPrecisionSet([oprob,oprob_sparse,sparsejacprob],abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=[test_sol,test_sol,test_sol],maxiters=Int(1e9),numruns=10) names = ["lsoda" "CVODE_BDF" "CVODE_BDF (LapackDense)" "CVODE_BDF (GMRES)" "CVODE_BDF (GMRES, iLU)" "CVODE_BDF (KLU, sparse jac)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Work-Precision Diagrams (various Julia solvers) #### Declare solvers (using default linear solver). ```julia setups = [ Dict(:alg=>TRBDF2(autodiff=false)), Dict(:alg=>QNDF(autodiff=false)), Dict(:alg=>FBDF(autodiff=false)), Dict(:alg=>KenCarp4(autodiff=false)) ]; ``` #### Plot Work-Precision Diagram (using default linear solver). ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e12),dtmin=1e-18,numruns=10) names = ["TRBDF2" "QNDF" "FBDF" "KenCarp4"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Declare solvers (using GMRES linear solver). ```julia setups = [ Dict(:alg=>TRBDF2(linsolve=KrylovJL_GMRES(),autodiff=false)), Dict(:alg=>QNDF(linsolve=KrylovJL_GMRES(),autodiff=false)), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES(),autodiff=false)), Dict(:alg=>KenCarp4(linsolve=KrylovJL_GMRES(),autodiff=false)) ]; ``` #### Plot Work-Precision Diagram (using GMRES linear solver). ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e12),dtmin=1e-18,numruns=10) names = ["TRBDF2 (GMRES)" "QNDF (GMRES)" "FBDF (GMRES)" "KenCarp4 (GMRES)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Declare solvers (using GMRES linear solver, with pre-conditioner). ```julia setups = [ Dict(:alg=>TRBDF2(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true)), Dict(:alg=>QNDF(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true)), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true)), Dict(:alg=>KenCarp4(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true)) ]; ``` #### Plot Work-Precision Diagram (using GMRES linear solver, with pre-conditioner). ```julia wp = WorkPrecisionSet(sparsejacprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e12),dtmin=1e-18,numruns=10) names = ["TRBDF2 (GMRES, iLU)" "QNDF (GMRES, iLU)" "FBDF (GMRES, iLU)" "KenCarp4 (GMRES, iLU)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Declare solvers (using sparse jacobian) We designate the solvers we wish to use. ```julia setups = [ Dict(:alg=>TRBDF2(linsolve=KLUFactorization(),autodiff=false)), Dict(:alg=>QNDF(linsolve=KLUFactorization(),autodiff=false)), Dict(:alg=>FBDF(linsolve=KLUFactorization(),autodiff=false)), Dict(:alg=>KenCarp4(linsolve=KLUFactorization(),autodiff=false)) ]; ``` #### Plot Work-Precision Diagram (using sparse jacobian) Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(sparsejacprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e12),dtmin=1e-18,numruns=10) names = ["TRBDF2 (KLU, sparse jac)" "QNDF (KLU, sparse jac)" "FBDF (KLU, sparse jac)" "KenCarp4 (KLU, sparse jac)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Explicit Work-Precision Diagram Benchmarks for explicit solvers. #### Declare solvers We designate the solvers we wish to use, this also includes lsoda and CVODE. ```julia setups = [ Dict(:alg=>CVODE_Adams()), Dict(:alg=>Tsit5()), Dict(:alg=>BS5()), Dict(:alg=>VCABM()), Dict(:alg=>Vern6()), Dict(:alg=>Vern7()), Dict(:alg=>Vern8()), Dict(:alg=>Vern9()) ]; ``` #### Plot Work-Precision Diagram ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=10) names = ["CVODE_Adams" "Tsit5" "BS5" "Vern6" "Vern7" "Vern8" "Vern9"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Summary of results Finally, we compute a single diagram comparing the various solvers used. #### Declare solvers We designate the solvers we wish to compare. ```julia setups = [ Dict(:alg=>CVODE_BDF(linear_solver=:GMRES), :prob_choice => 1), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1), :prob_choice => 2), Dict(:alg=>QNDF(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true), :prob_choice => 3), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES(),autodiff=false,precs=incompletelu,concrete_jac=true), :prob_choice => 3), Dict(:alg=>Tsit5()) ]; ``` #### Plot Work-Precision Diagram For these, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet([oprob,oprob_sparse,sparsejacprob],abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=[test_sol,test_sol,test_sol],maxiters=Int(1e9),numruns=200) names = ["CVODE_BDF (GMRES)" "CVODE_BDF (GMRES, iLU)" "QNDF (GMRES, iLU)" "FBDF (GMRES, iLU)" "Tsit5"] colors = [:darkgreen :green :deepskyblue1 :dodgerblue2 :orchid2] markershapes = [:rect :octagon :hexagon :rtriangle :ltriangle] xlimit,ylimit = plot_settings(wp) xlimit = xlimit .* [0.95,1/0.95]; ylimit = ylimit .* [0.95,1/0.95]; plot(wp;label=names,left_margin=10Plots.mm,right_margin=10Plots.mm,xlimit=xlimit,ylimit=ylimit,xticks=[1e-9,1e-8,1e-7,1e-6,1e-5,1e-4,1e-3,1e-2,1e-1],yticks=[1e-1,1e0,1e1,1e2],color=colors,markershape=markershapes,legendfontsize=15,tickfontsize=15,guidefontsize=15, legend=:topright, lw=20, la=0.8, markersize=20,markerstrokealpha=1.0, markerstrokewidth=1.5, gridalpha=0.3, gridlinewidth=7.5,size=(1100,1000)) ``` ```julia echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Bio/multisite2.jmd
docs
8424
--- title: Multisite2 Work-Precision Diagrams author: Torkel Loman --- The following benchmark is of 66 ODEs with 288 terms that describe a chemical reaction network. This multisite2 model was used as a benchmark model in [Gupta et al.](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6013266/). We use [`ReactionNetworkImporters`](https://github.com/isaacsas/ReactionNetworkImporters.jl) to load the BioNetGen model files as a [Catalyst](https://github.com/SciML/Catalyst.jl) model, and then use [ModelingToolkit](https://github.com/SciML/ModelingToolkit.jl) to convert the Catalyst network model to ODEs. ```julia using DiffEqBase, OrdinaryDiffEq, Catalyst, ReactionNetworkImporters, Sundials, Plots, DiffEqDevTools, ODEInterface, ODEInterfaceDiffEq, LSODA, TimerOutputs, LinearAlgebra, ModelingToolkit, BenchmarkTools, LinearSolve gr() const to = TimerOutput() tf = 2.0 # generate ModelingToolkit ODEs @timeit to "Parse Network" prnbng = loadrxnetwork(BNGNetwork(), joinpath(@__DIR__, "Models/multisite2.net")) show(to) rn = prnbng.rn obs = [eq.lhs for eq in observed(rn)] @timeit to "Create ODESys" osys = convert(ODESystem, rn) show(to) tspan = (0.,tf) @timeit to "ODEProb No Jac" oprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[]) show(to); ``` ```julia @timeit to "ODEProb SparseJac" sparsejacprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[], jac=true, sparse=true) show(to) ``` ```julia @show numspecies(rn) # Number of ODEs @show numreactions(rn) # Apprx. number of terms in the ODE @show length(parameters(rn)); # Number of Parameters ``` ## Time ODE derivative function compilation As compiling the ODE derivative functions has in the past taken longer than running a simulation, we first force compilation by evaluating these functions one time. ```julia u = ModelingToolkit.varmap_to_vars(nothing, species(rn); defaults=ModelingToolkit.defaults(rn)) du = copy(u) p = ModelingToolkit.varmap_to_vars(nothing, parameters(rn); defaults=ModelingToolkit.defaults(rn)) @timeit to "ODE rhs Eval1" oprob.f(du,u,p,0.) @timeit to "ODE rhs Eval2" oprob.f(du,u,p,0.) sparsejacprob.f(du,u,p,0.) ``` We also time the ODE rhs function with BenchmarkTools as it is more accurate given how fast evaluating `f` is: ```julia @btime oprob.f($du,$u,$p,0.) ``` ## Picture of the solution ```julia sol = solve(oprob, CVODE_BDF(), saveat=tf/1000., reltol=1e-5, abstol=1e-5) plot(sol; idxs=obs, legend=false, fmt=:png) ``` For these benchmarks we will be using the time-series error with these saving points. ## Generate Test Solution ```julia @time sol = solve(oprob, CVODE_BDF(), reltol=1e-15, abstol=1e-15) test_sol = TestSolution(sol); ``` ## Setups #### Sets plotting defaults ```julia default(legendfontsize=7,framestyle=:box,gridalpha=0.3,gridlinewidth=2.5) ``` #### Declares a plotting helper function ```julia function plot_settings(wp) times = vcat(map(wp -> wp.times, wp.wps)...) errors = vcat(map(wp -> wp.errors, wp.wps)...) xlimit = 10 .^ (floor(log10(minimum(errors))), ceil(log10(maximum(errors)))) ylimit = 10 .^ (floor(log10(minimum(times))), ceil(log10(maximum(times)))) return xlimit,ylimit end ``` #### Sets tolerances ```julia abstols = 1.0 ./ 10.0 .^ (6:10) reltols = 1.0 ./ 10.0 .^ (6:10); ``` ## Work-Precision Diagram We start by trying lsoda and CVODE solvers. #### Declare solvers We designate the solvers (and options) we wish to use. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>CVODE_BDF(linear_solver=:LapackDense)), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES)) ]; ``` #### Plot Work-Precision Diagram Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["lsoda" "CVODE_BDF" "CVODE_BDF (LapackDense)" "CVODE_BDF (GMRES)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Implicit Work-Precision Diagram Next, we try a couple of implicit Julia solvers. #### Declare solvers We designate the solvers we wish to use. ```julia setups = [ Dict(:alg=>TRBDF2()), Dict(:alg=>QNDF()), Dict(:alg=>FBDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas5P()) ]; ``` #### Plot Work-Precision Diagram Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e12),dtmin=1e-18,numruns=200) names = ["TRBDF2" "QNDF" "FBDF" "KenCarp4" "Rosenbrock23" "Rodas4" "Rodas5P"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` Implicit methods doing poorly suggests it's non-stiff. ## Explicit Work-Precision Diagram Benchmarks for explicit solvers. #### Declare solvers We designate the solvers we wish to use, this also includes lsoda and CVODE. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>CVODE_Adams()), Dict(:alg=>Tsit5()), Dict(:alg=>BS5()), Dict(:alg=>VCABM()), Dict(:alg=>Vern6()), Dict(:alg=>Vern7()), Dict(:alg=>Vern8()), Dict(:alg=>Vern9()), Dict(:alg=>ROCK4()) ]; ``` #### Plot Work-Precision Diagram ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["lsoda" "CVODE_Adams" "Tsit5" "BS5" "VCABM" "Vern6" "Vern7" "Vern8" "Vern9" "ROCK4"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Additional explicit solvers One additional explicit solver, `ROCK2`, performs noticeably worse as compared to the other ones. ```julia setups = [Dict(:alg=>ROCK2())]; wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["ROCK2"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Summary of results Finally, we compute a single diagram comparing the various solvers used. #### Declare solvers We designate the solvers we wish to compare. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES)), Dict(:alg=>QNDF()), Dict(:alg=>FBDF()), Dict(:alg=>Rodas5P()), Dict(:alg=>BS5()), Dict(:alg=>VCABM()), Dict(:alg=>Vern6()), Dict(:alg=>ROCK4()) ]; ``` #### Plot Work-Precision Diagram For these, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["lsoda" "CVODE_BDF (GMRES)" "QNDF" "FBDF" "Rodas5P" "BS5" "VCABM" "Vern6" "ROCK4"] colors = [:seagreen1 :darkgreen :deepskyblue1 :dodgerblue2 :blue :thistle2 :lightsteelblue2 :lightslateblue :purple4] markershapes = [:star4 :rect :hexagon :rtriangle :heptagon :star8 :heptagon :rtriangle :square] xlimit,ylimit = plot_settings(wp) xlimit = xlimit .* [0.95,1/0.95]; ylimit = ylimit .* [0.95,1/0.95]; plot(wp;label=names,left_margin=10Plots.mm,right_margin=10Plots.mm,xlimit=xlimit,ylimit=ylimit,xticks=[1e-10,1e-9,1e-8,1e-7,1e-6,1e-5,1e-4,1e-3],yticks=[1e-3,1e-2,1e-1],color=colors,markershape=markershapes,legendfontsize=15,tickfontsize=15,guidefontsize=15, legend=:topright, lw=20, la=0.8, markersize=20,markerstrokealpha=1.0, markerstrokewidth=1.5, gridalpha=0.3, gridlinewidth=7.5,size=(1100,1000)) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Bio/multistate.jmd
docs
8394
--- title: Multistate Work-Precision Diagrams author: Torkel Loman --- The following benchmark is of 9 ODEs with 18 terms that describe a chemical reaction network. This multistate model was used as a benchmark model in [Gupta et al.](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6013266/). We use [`ReactionNetworkImporters`](https://github.com/isaacsas/ReactionNetworkImporters.jl) to load the BioNetGen model files as a [Catalyst](https://github.com/SciML/Catalyst.jl) model, and then use [ModelingToolkit](https://github.com/SciML/ModelingToolkit.jl) to convert the Catalyst network model to ODEs. ```julia using DiffEqBase, OrdinaryDiffEq, Catalyst, ReactionNetworkImporters, Sundials, Plots, DiffEqDevTools, ODEInterface, ODEInterfaceDiffEq, LSODA, TimerOutputs, LinearAlgebra, ModelingToolkit, BenchmarkTools gr() const to = TimerOutput() tf = 20.0 # generate ModelingToolkit ODEs @timeit to "Parse Network" prnbng = loadrxnetwork(BNGNetwork(), joinpath(@__DIR__, "Models/multistate.net")) show(to) rn = prnbng.rn obs = [eq.lhs for eq in observed(rn)] @timeit to "Create ODESys" osys = convert(ODESystem, rn) show(to) tspan = (0.,tf) @timeit to "ODEProb No Jac" oprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[]) show(to); ``` ```julia @timeit to "ODEProb SparseJac" sparsejacprob = ODEProblem{true, SciMLBase.FullSpecialize}(osys, Float64[], tspan, Float64[], jac=true, sparse=true) show(to) ``` ```julia @show numspecies(rn) # Number of ODEs @show numreactions(rn) # Apprx. number of terms in the ODE @show length(parameters(rn)); # Number of Parameters ``` ## Time ODE derivative function compilation As compiling the ODE derivative functions has in the past taken longer than running a simulation, we first force compilation by evaluating these functions one time. ```julia u = ModelingToolkit.varmap_to_vars(nothing, species(rn); defaults=ModelingToolkit.defaults(rn)) du = copy(u) p = ModelingToolkit.varmap_to_vars(nothing, parameters(rn); defaults=ModelingToolkit.defaults(rn)) @timeit to "ODE rhs Eval1" oprob.f(du,u,p,0.) @timeit to "ODE rhs Eval2" oprob.f(du,u,p,0.) sparsejacprob.f(du,u,p,0.) ``` We also time the ODE rhs function with BenchmarkTools as it is more accurate given how fast evaluating `f` is: ```julia @btime oprob.f($du,$u,$p,0.) ``` ## Picture of the solution ```julia sol = solve(oprob, CVODE_BDF(), saveat=tf/1000., reltol=1e-5, abstol=1e-5) plot(sol; idxs=obs, legend=false, fmt=:png) ``` For these benchmarks we will be using the time-series error with these saving points. ## Generate Test Solution ```julia @time sol = solve(oprob, CVODE_BDF(), reltol=1e-15, abstol=1e-15) test_sol = TestSolution(sol); ``` ## Setups #### Sets plotting defaults ```julia default(legendfontsize=7,framestyle=:box,gridalpha=0.3,gridlinewidth=2.5) ``` #### Declares a plotting helper function ```julia function plot_settings(wp) times = vcat(map(wp -> wp.times, wp.wps)...) errors = vcat(map(wp -> wp.errors, wp.wps)...) xlimit = 10 .^ (floor(log10(minimum(errors))), ceil(log10(maximum(errors)))) ylimit = 10 .^ (floor(log10(minimum(times))), ceil(log10(maximum(times)))) return xlimit,ylimit end ``` #### Sets tolerances ```julia abstols = 1.0 ./ 10.0 .^ (6:10) reltols = 1.0 ./ 10.0 .^ (6:10); ``` ## Work-Precision Diagram We start by trying lsoda and CVODE solvers. #### Declare solvers We designate the solvers (and options) we wish to use. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>CVODE_BDF(linear_solver=:LapackDense)), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES)) ]; ``` #### Plot Work-Precision Diagram Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["lsoda" "CVODE_BDF" "CVODE_BDF (LapackDense)" "CVODE_BDF (GMRES)"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Implicit Work-Precision Diagram Next, we try a couple of implicit Julia solvers. #### Declare solvers We designate the solvers we wish to use. ```julia setups = [ Dict(:alg=>TRBDF2()), Dict(:alg=>QNDF()), Dict(:alg=>FBDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas5P()) ]; ``` #### Plot Work-Precision Diagram Finally, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e12),dtmin=1e-18,numruns=200) names = ["TRBDF2" "QNDF" "FBDF" "KenCarp4" "Rosenbrock23" "Rodas4" "Rodas5P"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` Implicit methods doing poorly suggests it's non-stiff. ## Explicit Work-Precision Diagram Benchmarks for explicit solvers. #### Declare solvers We designate the solvers we wish to use, this also includes lsoda and CVODE. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>Tsit5()), Dict(:alg=>BS5()), Dict(:alg=>VCABM()), Dict(:alg=>Vern6()), Dict(:alg=>Vern7()), Dict(:alg=>Vern8()), Dict(:alg=>Vern9()), Dict(:alg=>ROCK4()) ]; ``` #### Plot Work-Precision Diagram ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["lsoda" "Tsit5" "BS5" "VCABM" "Vern6" "Vern7" "Vern8" "Vern9" "ROCK4"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` #### Additional explicit solvers Two additional explicit solvers, `CVODE_Adams` and `ROCK2`, perform noticeably worse as compared to the other ones. ```julia setups = [Dict(:alg=>CVODE_Adams()), Dict(:alg=>ROCK2())]; wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["CVODE_Adams" "ROCK2"] xlimit,ylimit = plot_settings(wp) plot(wp;label=names,xlimit=xlimit,ylimit=ylimit) ``` ## Summary of results Finally, we compute a single diagram comparing the various solvers used. #### Declare solvers We designate the solvers we wish to compare. ```julia setups = [ Dict(:alg=>lsoda()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>QNDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rodas5P()), Dict(:alg=>Tsit5()), Dict(:alg=>BS5()), Dict(:alg=>VCABM()), Dict(:alg=>Vern7()) ]; ``` #### Plot Work-Precision Diagram For these, we generate a work-precision diagram for the selection of solvers. ```julia wp = WorkPrecisionSet(oprob,abstols,reltols,setups;error_estimate=:l2, saveat=tf/10000.,appxsol=test_sol,maxiters=Int(1e9),numruns=200) names = ["lsoda" "CVODE_BDF" "QNDF" "KenCarp4" "Rodas5P" "Tsit5" "BS5" "VCABM" "Vern7"] colors = [:seagreen1 :chartreuse1 :deepskyblue1 :lightskyblue :blue :orchid2 :thistle2 :lightsteelblue2 :mediumpurple1] markershapes = [:star4 :circle :hexagon :star5 :heptagon :ltriangle :star8 :heptagon :star6] xlimit,ylimit = plot_settings(wp) xlimit = xlimit .* [0.95,1/0.95]; ylimit = ylimit .* [0.95,1/0.95]; plot(wp;label=names,left_margin=10Plots.mm,right_margin=10Plots.mm,xlimit=xlimit,ylimit=ylimit,xticks=[1e-12,1e-11,1e-10,1e-9,1e-8,1e-7,1e-6,1e-5,1e-4,1e-3,1e-2],yticks=[1e-3,1e-2],color=colors,markershape=markershapes,legendfontsize=15,tickfontsize=15,guidefontsize=15, legend=:topright, lw=20, la=0.8, markersize=20,markerstrokealpha=1.0, markerstrokewidth=1.5, gridalpha=0.3, gridlinewidth=7.5,size=(1100,1000)) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/DAE/ChemicalAkzoNobel.jmd
docs
6788
--- title: Chemical Akzo Nobel Differential-Algebraic Equation (DAE) Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, DiffEqDevTools, Sundials, ModelingToolkit, ODEInterfaceDiffEq, Plots, DASSL, DASKR using LinearAlgebra ModelingToolkit.@parameters begin k₁=18.7 k₂=0.58 k₃=0.09 k₄=0.42 kbig=34.4 kla=3.3 ks=115.83 po2=0.9 hen=737 end @variables begin t y₁(t) = 0.444 y₂(t) = 0.00123 y₃(t) = 0.0 y₄(t) = 0.007 y₅(t) = 1.0 y₆(t) = 115.83*0.444*0.007 # ks*y₁*y₄ end D = Differential(t) r₁ = k₁ * (y₁^4.)*sqrt(abs(y₂)) r₂ = k₂ * y₃ * y₄ r₃ = k₂/kbig * y₁ * y₅ r₄ = k₃*y₁*(y₄^2) r₅ = k₄*(y₆^2)*sqrt(abs(y₂)) fin = kla*(po2/hen-y₂) eqs = [ D(y₁) ~ -2. * r₁ + r₂ - r₃ - r₄ D(y₂) ~ -0.5 * r₁ - r₄ - 0.5*r₅ + fin D(y₃) ~ r₁ - r₂ + r₃ D(y₄) ~ -r₂ + r₃ - 2. * r₄ D(y₅) ~ r₂ - r₃ + r₅ 0. ~ ks * y₁ * y₄ - y₆ ] ModelingToolkit.@named sys = ModelingToolkit.ODESystem(eqs) tspan = (0.0, 180.0) mmprob = ODEProblem(sys, [], tspan) sol = solve(mmprob, Rodas4(),abstol=1/10^14,reltol=1/10^14) odaeprob = ODAEProblem(structural_simplify(sys),[],tspan) ode_ref_sol = solve(odaeprob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14); du = mmprob.f(mmprob.u0,mmprob.p,0.0) du0 = D.(states(sys)) .=> du daeprob = DAEProblem(sys,du0,[],tspan) ref_sol = solve(daeprob,IDA(),abstol=1/10^14,reltol=1/10^14); probs = [mmprob,daeprob,odaeprob] refs = [ref_sol,ref_sol,ode_ref_sol]; ``` ```julia plot(ode_ref_sol, vars = [y₁,y₂,y₃,y₄,y₅,y₆]) ``` ## High Tolerances ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (6:8) reltols = 1.0 ./ 10.0 .^ (2:4); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>CVODE_BDF()), Dict(:prob_choice => 3, :alg=>TRBDF2()), Dict(:prob_choice => 3, :alg=>KenCarp4()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (6:8) reltols = 1.0 ./ 10.0 .^ (3:5); setups = [Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 2, :alg=>DASSL.dassl()), Dict(:prob_choice => 2, :alg=>DASKR.daskr()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Timeseries Errors ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), # too slow Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (6:8) reltols = 1.0 ./ 10.0 .^ (2:4); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>CVODE_BDF()), Dict(:prob_choice => 3, :alg=>TRBDF2()), Dict(:prob_choice => 3, :alg=>KenCarp4()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Low Tolerances This is the speed at lower tolerances, measuring what's good when accuracy is needed. ```julia abstols = 1.0 ./ 10.0 .^ (7:12) reltols = 1.0 ./ 10.0 .^ (4:9) setups = [Dict(:prob_choice => 1, :alg=>Rodas5()), Dict(:prob_choice => 3, :alg=>Rodas5()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` DASKR fails at too low of tolerances, so pull back for a comparison. ```julia abstols = 1.0 ./ 10.0 .^ (7:9) reltols = 1.0 ./ 10.0 .^ (4:6) setups = [Dict(:prob_choice => 1, :alg=>Rodas5()), Dict(:prob_choice => 3, :alg=>Rodas5()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 2, :alg=>DASKR.daskr()), ] gr() wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Conclusion ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/DAE/OregoDAE.jmd
docs
5482
--- title: OREGO Differential-Algebraic Equation (DAE) Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, DiffEqDevTools, Sundials, ModelingToolkit, ODEInterfaceDiffEq, Plots, DASSL, DASKR using LinearAlgebra @variables t y1(t)=1.0 y2(t)=2.0 y3(t)=3.0 @parameters p1=77.27 p2=8.375e-6 p3=0.161 D = Differential(t) eqs = [ D(y1) ~ p1*(y2+y1*(1-p2*y1-y2)) D(y2) ~ (y3-(1+y1)*y2)/p1 D(y3) ~ p3*(y1-y3) ] @named sys = ODESystem(eqs) simpsys = structural_simplify(sys) mmprob = ODEProblem(sys,[],(0.0,30.0)) daeprob = DAEProblem(sys,[D(y1)=>77.26935286375, D(y2)=>-0.012941633234114146, D(y3)=>-0.322],[],(0.0,30.0)) odaeprob = ODAEProblem(simpsys,[],(0.0,30.0)) ref_sol = solve(daeprob,IDA(),abstol=1/10^14,reltol=1/10^14); ode_ref_sol = solve(odaeprob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14); probs = [mmprob,daeprob,odaeprob] refs = [ref_sol,ref_sol,ode_ref_sol]; ``` ```julia plot(ref_sol) ``` ## High Tolerances ```julia abstols = 1.0 ./ 10.0 .^ (6:9) reltols = 1.0 ./ 10.0 .^ (2:5); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;print_names=true, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>CVODE_BDF()), Dict(:prob_choice => 3, :alg=>TRBDF2()), Dict(:prob_choice => 3, :alg=>KenCarp4()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (6:8) reltols = 1.0 ./ 10.0 .^ (2:4); setups = [Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 2, :alg=>DASSL.dassl()), Dict(:prob_choice => 2, :alg=>DASKR.daskr()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Timeseries Errors ```julia abstols = 1.0 ./ 10.0 .^ (6:9) reltols = 1.0 ./ 10.0 .^ (2:5); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), # too slow Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] gr() wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (6:9) reltols = 1.0 ./ 10.0 .^ (2:5); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>CVODE_BDF()), Dict(:prob_choice => 3, :alg=>TRBDF2()), Dict(:prob_choice => 3, :alg=>KenCarp4()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Low Tolerances This is the speed at lower tolerances, measuring what's good when accuracy is needed. ```julia abstols = 1.0 ./ 10.0 .^ (7:12) reltols = 1.0 ./ 10.0 .^ (4:9) setups = [Dict(:prob_choice => 1, :alg=>Rodas5()), Dict(:prob_choice => 3, :alg=>Rodas5()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] gr() wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Conclusion ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/DAE/ROBERDAE.jmd
docs
6178
--- title: ROBER Differential-Algebraic Equation (DAE) Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, DiffEqDevTools, Sundials, ModelingToolkit, ODEInterfaceDiffEq, Plots, DASSL, DASKR using LinearAlgebra @variables t y₁(t)=1.0 y₂(t)=0.0 y₃(t)=0.0 @parameters k₁=0.04 k₂=3e7 k₃=1e4 D = Differential(t) eqs = [ D(y₁) ~ -k₁*y₁ + k₃*y₂*y₃ D(y₂) ~ k₁*y₁ - k₃*y₂*y₃ - k₂*y₂^2 0 ~ y₁ + y₂ + y₃ - 1 ] @named sys = ODESystem(eqs) simpsys = structural_simplify(sys) mmprob = ODEProblem(sys,[],(0.0,1e5)) daeprob = DAEProblem(sys,[D(y₁)=>-0.04, D(y₂)=>0.04, D(y₃)=>0.0],[],(0.0,1e5)) odaeprob = ODAEProblem(simpsys,[],(0.0,1e5)) ref_sol = solve(daeprob,IDA(),abstol=1/10^14,reltol=1/10^14); ode_ref_sol = solve(odaeprob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14); probs = [mmprob,daeprob,odaeprob] refs = [ref_sol,ref_sol,ode_ref_sol]; ``` ```julia plot(ode_ref_sol, vars = [y₁,y₂,y₃]) ``` ## High Tolerances ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (6:8) reltols = 1.0 ./ 10.0 .^ (2:4); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>CVODE_BDF()), Dict(:prob_choice => 3, :alg=>TRBDF2()), Dict(:prob_choice => 3, :alg=>KenCarp4()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (6:8) reltols = 1.0 ./ 10.0 .^ (3:5); setups = [Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 2, :alg=>DASSL.dassl()), Dict(:prob_choice => 2, :alg=>DASKR.daskr()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Timeseries Errors ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), # too slow Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (6:8) reltols = 1.0 ./ 10.0 .^ (2:4); setups = [Dict(:prob_choice => 1, :alg=>Rosenbrock23()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 3, :alg=>Rosenbrock23()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>CVODE_BDF()), Dict(:prob_choice => 3, :alg=>TRBDF2()), Dict(:prob_choice => 3, :alg=>KenCarp4()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Low Tolerances This is the speed at lower tolerances, measuring what's good when accuracy is needed. ```julia abstols = 1.0 ./ 10.0 .^ (7:12) reltols = 1.0 ./ 10.0 .^ (4:9) setups = [Dict(:prob_choice => 1, :alg=>Rodas5()), Dict(:prob_choice => 3, :alg=>Rodas5()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>Rodas4()), #Dict(:prob_choice => 1, :alg=>FBDF()), Dict(:prob_choice => 1, :alg=>QNDF()), Dict(:prob_choice => 1, :alg=>rodas()), Dict(:prob_choice => 1, :alg=>radau()), Dict(:prob_choice => 1, :alg=>RadauIIA5()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate = :l2, save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` DASKR fails at too low of tolerances, so pull back for a comparison. ```julia abstols = 1.0 ./ 10.0 .^ (7:9) reltols = 1.0 ./ 10.0 .^ (4:6) setups = [Dict(:prob_choice => 1, :alg=>Rodas5()), Dict(:prob_choice => 3, :alg=>Rodas5()), Dict(:prob_choice => 1, :alg=>Rodas4()), Dict(:prob_choice => 3, :alg=>Rodas4()), Dict(:prob_choice => 2, :alg=>DFBDF()), Dict(:prob_choice => 2, :alg=>IDA()), Dict(:prob_choice => 2, :alg=>DASKR.daskr()), ] gr() wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=refs,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Conclusion ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/DynamicalODE/Henon-Heiles_energy_conservation_benchmark.jmd
docs
7328
--- title: Hénon-Heiles Energy Conservation author: Sebastian Micluța-Câmpeanu, Chris Rackauckas --- In this notebook we will study the energy conservation properties of several high-order methods for the [Hénon-Heiles system](https://en.wikipedia.org/wiki/H%C3%A9non%E2%80%93Heiles_system). We will se how the energy error behaves at very thight tolerances and how different techniques, such as using symplectic solvers or manifold projections, benchmark against each other. The Hamiltonian for this system is given by: $$\mathcal{H}=\frac{1}{2}(p_1^2 + p_2^2) + \frac{1}{2}\left(q_1^2 + q_2^2 + 2q_1^2 q_2 - \frac{2}{3}q_2^3\right)$$ We will also compare the in place apporach with the out of place approach by using `Array`s (for the in place version) and `StaticArrays` (for out of place versions). In order to separate these two, we will use `iip` for the in-place names and `oop` for out of place ones. ```julia using OrdinaryDiffEq, Plots, DiffEqCallbacks using SciMLBenchmarks using TaylorIntegration, LinearAlgebra, StaticArrays gr(fmt=:png) default(fmt=:png) T(p) = 1//2 * norm(p)^2 V(q) = 1//2 * (q[1]^2 + q[2]^2 + 2q[1]^2 * q[2]- 2//3 * q[2]^3) H(p,q, params) = T(p) + V(q) function iip_dq(dq,p,q,params,t) dq[1] = p[1] dq[2] = p[2] end function iip_dp(dp,p,q,params,t) dp[1] = -q[1] * (1 + 2q[2]) dp[2] = -q[2] - (q[1]^2 - q[2]^2) end const iip_q0 = [0.1, 0.] const iip_p0 = [0., 0.5] function oop_dq(p, q, params, t) p end function oop_dp(p, q, params, t) dp1 = -q[1] * (1 + 2q[2]) dp2 = -q[2] - (q[1]^2 - q[2]^2) @SVector [dp1, dp2] end const oop_q0 = @SVector [0.1, 0.] const oop_p0 = @SVector [0., 0.5] function hamilton(du,u,p,t) dq, q = @views u[3:4], du[3:4] dp, p = @views u[1:2], du[1:2] dp[1] = -q[1] * (1 + 2q[2]) dp[2] = -q[2] - (q[1]^2 - q[2]^2) dq .= p return nothing end function g(resid, u, p) resid[1] = H([u[1],u[2]], [u[3],u[4]], nothing) - E resid[2:4] .= 0 end const cb = ManifoldProjection(g, nlopts=Dict(:ftol=>1e-13)) const E = H(iip_p0, iip_q0, nothing) ``` For the comparison we will use the following function ```julia energy_err(sol) = map(i->H([sol[1,i], sol[2,i]], [sol[3,i], sol[4,i]], nothing)-E, 1:length(sol.u)) abs_energy_err(sol) = [abs.(H([sol[1,j], sol[2,j]], [sol[3,j], sol[4,j]], nothing) - E) for j=1:length(sol.u)] function compare(mode=:inplace, all=true, plt=nothing; tmax=1e2) if mode == :inplace prob = DynamicalODEProblem(iip_dp, iip_dq, iip_p0, iip_q0, (0., tmax)) else prob = DynamicalODEProblem(oop_dp, oop_dq, oop_p0, oop_q0, (0., tmax)) end prob_linear = ODEProblem(hamilton, vcat(iip_p0, iip_q0), (0., tmax)) GC.gc() (mode == :inplace && all) && @time sol1 = solve(prob, Vern9(), callback=cb, abstol=1e-14, reltol=1e-14) GC.gc() @time sol2 = solve(prob, KahanLi8(), dt=1e-2, maxiters=1e10) GC.gc() @time sol3 = solve(prob, SofSpa10(), dt=1e-2, maxiters=1e8) GC.gc() @time sol4 = solve(prob, Vern9(), abstol=1e-14, reltol=1e-14) GC.gc() @time sol5 = solve(prob, DPRKN12(), abstol=1e-14, reltol=1e-14) GC.gc() (mode == :inplace && all) && @time sol6 = solve(prob_linear, TaylorMethod(50), abstol=1e-20) (mode == :inplace && all) && println("Vern9 + ManifoldProjection max energy error:\t"* "$(maximum(abs_energy_err(sol1)))\tin\t$(length(sol1.u))\tsteps.") println("KahanLi8 max energy error:\t\t\t$(maximum(abs_energy_err(sol2)))\tin\t$(length(sol2.u))\tsteps.") println("SofSpa10 max energy error:\t\t\t$(maximum(abs_energy_err(sol3)))\tin\t$(length(sol3.u))\tsteps.") println("Vern9 max energy error:\t\t\t\t$(maximum(abs_energy_err(sol4)))\tin\t$(length(sol4.u))\tsteps.") println("DPRKN12 max energy error:\t\t\t$(maximum(abs_energy_err(sol5)))\tin\t$(length(sol5.u))\tsteps.") (mode == :inplace && all) && println("TaylorMethod max energy error:\t\t\t$(maximum(abs_energy_err(sol6)))"* "\tin\t$(length(sol6.u))\tsteps.") if plt === nothing plt = plot(xlabel="t", ylabel="Energy error") end (mode == :inplace && all) && plot!(sol1.t, energy_err(sol1), label="Vern9 + ManifoldProjection") plot!(sol2.t, energy_err(sol2), label="KahanLi8", ls=mode==:inplace ? :solid : :dash) plot!(sol3.t, energy_err(sol3), label="SofSpa10", ls=mode==:inplace ? :solid : :dash) plot!(sol4.t, energy_err(sol4), label="Vern9", ls=mode==:inplace ? :solid : :dash) plot!(sol5.t, energy_err(sol5), label="DPRKN12", ls=mode==:inplace ? :solid : :dash) (mode == :inplace && all) && plot!(sol6.t, energy_err(sol6), label="TaylorMethod") return plt end ``` The `mode` argument choses between the in place approach and the out of place one. The `all` parameter is used to compare only the integrators that support both the in place and the out of place versions (we reffer here only to the 6 high order methods chosen bellow). The `plt` argument can be used to overlay the results over a previous plot and the `tmax` keyword determines the simulation time. Note: 1. The `Vern9` method is used with `ODEProblem` because of performance issues with `ArrayPartition` indexing which manifest for `DynamicalODEProblem`. 2. The `NLsolve` call used by `ManifoldProjection` was modified to use `ftol=1e-13` in order to obtain a very low energy error. Here are the results of the comparisons between the in place methods: ```julia compare(tmax=1e2) ``` ```julia compare(tmax=1e3) ``` ```julia compare(tmax=1e4) ``` ```julia compare(tmax=5e4) ``` We can see that as the simulation time increases, the energy error increases. For this particular example the energy error for all the methods is comparable. For relatively short simulation times, if a highly accurate solution is required, the symplectic method is not recommended as its energy error fluctuations are larger than for other methods. An other thing to notice is the fact that the two versions of `Vern9` behave identically, as expected, untill the energy error set by `ftol` is reached. We will now compare the in place with the out of place versions. In the plots bellow we will use a dashed line for the out of place versions. ```julia function in_vs_out(;all=false, tmax=1e2) println("In place versions:") plt = compare(:inplace, all, tmax=tmax) println("\nOut of place versions:") plt = compare(:oop, false, plt; tmax=tmax) end ``` First, here is a summary of all the available methods for `tmax = 1e3`: ```julia in_vs_out(all=true, tmax=1e3) ``` Now we will compare the in place and the out of place versions, but only for the integrators that are compatible with `StaticArrays` ```julia in_vs_out(tmax=1e2) ``` ```julia in_vs_out(tmax=1e3) ``` ```julia in_vs_out(tmax=1e4) ``` ```julia in_vs_out(tmax=5e4) ``` As we see from the above comparisons, the `StaticArray` versions are significantly faster and use less memory. The speedup provided for the out of place version is more proeminent at larger values for `tmax`. We can see again that if the simulation time is increased, the energy error of the symplectic methods is less noticeable compared to the rest of the methods. The benchmarks were performed on a machine with ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/DynamicalODE/Quadrupole_boson_Hamiltonian_energy_conservation_benchmark.jmd
docs
7443
--- title: Quadruple Boson Energy Conservation author: Sebastian Micluța-Câmpeanu, Chris Rackauckas --- In this notebook we will study the energy conservation properties of several high-order methods for a system with the following Hamiltonian: $$\mathcal{H}\left(q_0,q_2,p_0,p_2\right) = \frac{A}{2} \left(p_0^2 + p_2^2 + q_0^2 + q_2^2\right) + \frac{B}{\sqrt{2}} q_0 \left(3q_2^2 - q_0^2\right) + \frac{D}{4} \left(q_0^2+q_2^2\right)^2$$ This Hamiltonian resembles the Hénon-Heiles one, but it has an additional fourth order term. The aim of this benchmark is to see what happens with the energy error when highly accurate solutions are needed and how the results compare with the Hénon-Heiles case. ```julia using OrdinaryDiffEq, Plots, DiffEqCallbacks, LinearAlgebra using TaylorIntegration using ParameterizedFunctions using StaticArrays gr() default(fmt=:png) T(p) = A / 2 * norm(p)^2 V(q) = A / 2 * (q[1]^2 + q[2]^2) + B / sqrt(2) * q[1] * (3 * q[2]^2 - q[1]^2) + D / 4 * (q[1]^2 + q[2]^2)^2 H(p, q, params) = T(p) + V(q) const A, B, D = 1., 0.55, 0.4 function iip_dq(dq, p, q, params, t) dq[1] = A * p[1] dq[2] = A * p[2] end function iip_dp(dp, p, q, params, t) dp[1] = -A * q[1] - 3 * B / sqrt(2) * (q[2]^2 - q[1]^2) - D * q[1] * (q[1]^2 + q[2]^2) dp[2] = -q[2] * (A + 3 * sqrt(2) * B * q[1] + D * (q[1]^2 + q[2]^2)) end const iip_q0 = [4.919080920016389, 2.836942666663649] const iip_p0 = [0., 0.] const iip_u0 = vcat(iip_p0,iip_q0) function oop_dq(p, q, params, t) p end function oop_dp(p, q, params, t) dp1 = -A * q[1] - 3 * B / sqrt(2) * (q[2]^2 - q[1]^2) - D * q[1] * (q[1]^2 + q[2]^2) dp2 = -q[2] * (A + 3 * sqrt(2) * B * q[1] + D * (q[1]^2 + q[2]^2)) @SVector [dp1, dp2] end const oop_q0 = @SVector [4.919080920016389, 2.836942666663649] const oop_p0 = @SVector [0., 0.] const oop_u0 = vcat(oop_p0,oop_q0) function hamilton(z, params, t) SVector( -A * z[3] - 3 * B / sqrt(2) * (z[4]^2 - z[3]^2) - D * z[3] * (z[3]^2 + z[4]^2), -z[4] * (A + 3 * sqrt(2) * B * z[3] + D * (z[3]^2 + z[4]^2)), z[1], z[2] ) end function g(resid, u, p) resid[1] = H([u[1],u[2]],[u[3],u[4]],nothing) - E resid[2:4] .= 0 end const E = H(iip_p0, iip_q0, nothing) const cb = ManifoldProjection(g, nlopts=Dict(:ftol=>1e-13)); ``` For the comparison we will use the following function ```julia energy_err(sol) = map(i->H([sol[1,i], sol[2,i]], [sol[3,i], sol[4,i]],nothing)-E, 1:length(sol.u)) abs_energy_err(sol) = [abs.(H([sol[1,j], sol[2,j]], [sol[3,j], sol[4,j]],nothing) - E) for j=1:length(sol.u)] function compare(mode=:inplace, all=true, plt=nothing; tmax=1e2) if mode == :inplace prob = DynamicalODEProblem(iip_dp, iip_dq, iip_p0, iip_q0, (0., tmax)) else prob = DynamicalODEProblem(oop_dp, oop_dq, oop_p0, oop_q0, (0., tmax)) end prob_linear = ODEProblem(hamilton, vcat(iip_p0, iip_q0), (0., tmax)) GC.gc() (mode == :inplace && all) && @time sol1 = solve(prob, Vern9(), callback=cb, abstol=1e-14, reltol=1e-14) GC.gc() @time sol2 = solve(prob, KahanLi8(), dt=1e-2, maxiters=1e10) GC.gc() @time sol3 = solve(prob, SofSpa10(), dt=1e-2, maxiters=1e8) GC.gc() @time sol4 = solve(prob, Vern9(), abstol=1e-14, reltol=1e-14) GC.gc() @time sol5 = solve(prob, DPRKN12(), abstol=1e-14, reltol=1e-14) GC.gc() (mode == :inplace && all) && @time sol6 = solve(prob_linear, TaylorMethod(50), abstol=1e-20) (mode == :inplace && all) && println("Vern9 + ManifoldProjection max energy error:\t"* "$(maximum(abs_energy_err(sol1)))\tin\t$(length(sol1.u))\tsteps.") println("KahanLi8 max energy error:\t\t\t$(maximum(abs_energy_err(sol2)))\tin\t$(length(sol2.u))\tsteps.") println("SofSpa10 max energy error:\t\t\t$(maximum(abs_energy_err(sol3)))\tin\t$(length(sol3.u))\tsteps.") println("Vern9 max energy error:\t\t\t\t$(maximum(abs_energy_err(sol4)))\tin\t$(length(sol4.u))\tsteps.") println("DPRKN12 max energy error:\t\t\t$(maximum(abs_energy_err(sol5)))\tin\t$(length(sol5.u))\tsteps.") (mode == :inplace && all) && println("TaylorMethod max energy error:\t\t\t$(maximum(abs_energy_err(sol6)))"* "\tin\t$(length(sol6.u))\tsteps.") if plt == nothing plt = plot(xlabel="t", ylabel="Energy error") end (mode == :inplace && all) && plot!(sol1.t, energy_err(sol1), label="Vern9 + ManifoldProjection") plot!(sol2.t, energy_err(sol2), label="KahanLi8", ls=mode==:inplace ? :solid : :dash) plot!(sol3.t, energy_err(sol3), label="SofSpa10", ls=mode==:inplace ? :solid : :dash) plot!(sol4.t, energy_err(sol4), label="Vern9", ls=mode==:inplace ? :solid : :dash) plot!(sol5.t, energy_err(sol5), label="DPRKN12", ls=mode==:inplace ? :solid : :dash) (mode == :inplace && all) && plot!(sol6.t, energy_err(sol6), label="TaylorMethod") return plt end ``` The `mode` argument choses between the in place approach and the out of place one. The `all` parameter is used to compare only the integrators that support both the in place and the out of place versions (we reffer here only to the 6 high order methods chosen bellow). The `plt` argument can be used to overlay the results over a previous plot and the `tmax` keyword determines the simulation time. Note: 1. The `Vern9` method is used with `ODEProblem` because of performance issues with `ArrayPartition` indexing which manifest for `DynamicalODEProblem`. 2. The `NLsolve` call used by `ManifoldProjection` was modified to use `ftol=1e-13` in order to obtain a very low energy error. Here are the results of the comparisons between the in place methods: ```julia compare(tmax=1e2) ``` ```julia compare(tmax=1e3) ``` ```julia compare(tmax=1e4) ``` ```julia compare(tmax=2e4) ``` As we can see from the above plots, we can achieve a very low energy error for long time simulation by manifold projection and with very high order Taylor methods. In comparison with the Hénon-Heiles system we see that as the Hamiltonian got more complex, the energy error for the other integration methods increased significantly. We will now compare the in place with the out of place versions. In the plots bellow we will use a dashed line for the out of place versions. ```julia function in_vs_out(;all=false, tmax=1e2) println("In place versions:") plt = compare(:inplace, all, tmax=tmax) println("\nOut of place versions:") plt = compare(:oop, false, plt; tmax=tmax) end ``` First, here is a summary of all the available methods for `tmax = 1e3`: ```julia in_vs_out(all=true, tmax=1e3) ``` Now we will compare the in place and the out of place versions, but only for the integrators that are compatible with `StaticArrays` ```julia in_vs_out(tmax=1e2) ``` ```julia in_vs_out(tmax=1e3) ``` ```julia in_vs_out(tmax=1e4) ``` ```julia in_vs_out(tmax=2e4) ``` As we see from the above comparisons, the `StaticArray` versions are significantly faster and use less memory. The speedup provided for the out of place version is more proeminent at larger values for `tmax`. We can see again that if the simulation time is increased, the energy error of the symplectic methods is less noticeable compared to the rest of the methods. In comparison with the Henon-Heiles case, we see that the symplectic methods are more competitive with `DPRKN12`. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/DynamicalODE/single_pendulums.jmd
docs
5946
--- title: Single Pedulum Comparison author: Gen Kuroki (黒木玄), Chris Rackauckas --- # Solving single pendulums by DifferentialEquations.jl In this notebook, we shall solve the single pendulum equation: $$\ddot q = -\sin q,$$ where $q$ means the angle. Hamiltonian: $$H(q,p) = \frac{1}{2}p^2 - \cos q + 1.$$ Canonical equation: $$\dot q = p, \quad \dot p = - \sin q.$$ Initial condition: $$q(0) = 0, \quad p(0) = 2k.$$ Exact solution: $$q(t) = 2\arcsin(k\,\mathrm{sn}(t,k)).$$ Maximum of $q(t)$: $$\sin(q_{\max}/2) = k, \quad q_{\max} = \max\{q(t)\}.$$ Define $y(t)$ by $$y(t) = \sin(q(t)/2) = k\,\mathrm{sn}(t,k), \quad y_{\max} = k.$$ ```julia # Single pendulums shall be solved numerically. # using OrdinaryDiffEq, Elliptic, Printf, DiffEqPhysics, Statistics sol2q(sol) = [sol.u[i][j] for i in 1:length(sol.u), j in 1:length(sol.u[1])÷2] sol2p(sol) = [sol.u[i][j] for i in 1:length(sol.u), j in length(sol.u[1])÷2+1:length(sol.u[1])] sol2tqp(sol) = (sol.t, sol2q(sol), sol2p(sol)) # The exact solutions of single pendulums can be expressed by the Jacobian elliptic functions. # sn(u, k) = Jacobi.sn(u, k^2) # the Jacobian sn function # Use PyPlot. # using PyPlot colorlist = [ "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf", ] cc(k) = colorlist[mod1(k, length(colorlist))] # plot the sulution of a Hamiltonian problem # function plotsol(sol::ODESolution) local t, q, p t, q, p = sol2tqp(sol) local d = size(q)[2] for j in 1:d j_str = d > 1 ? "[$j]" : "" plot(t, q[:,j], color=cc(2j-1), label="q$(j_str)", lw=1) plot(t, p[:,j], color=cc(2j), label="p$(j_str)", lw=1, ls="--") end grid(ls=":") xlabel("t") legend() end # plot the solution of a Hamiltonian problem on the 2D phase space # function plotsol2(sol::ODESolution) local t, q, p t, q, p = sol2tqp(sol) local d = size(q)[2] for j in 1:d j_str = d > 1 ? "[$j]" : "" plot(q[:,j], p[:,j], color=cc(j), label="(q$(j_str),p$(j_str))", lw=1) end grid(ls=":") xlabel("q") ylabel("p") legend() end # plot the energy of a Hamiltonian problem # function plotenergy(H, sol::ODESolution) local t, q, p t, q, p = sol2tqp(sol) local energy = [H(q[i,:], p[i,:], nothing) for i in 1:size(q)[1]] plot(t, energy, label="energy", color="red", lw=1) grid(ls=":") xlabel("t") legend() local stdenergy_str = @sprintf("%.3e", std(energy)) title(" std(energy) = $stdenergy_str", fontsize=10) end # plot the numerical and exact solutions of a single pendulum # # Warning: Assume q(0) = 0, p(0) = 2k. (for the sake of laziness) # function plotcomparison(k, sol::ODESolution) local t, q, p t, q, p = sol2tqp(sol) local y = sin.(q/2) local y_exact = k*sn.(t, k) # the exact solution plot(t, y, label="numerical", lw=1) plot(t, y_exact, label="exact", lw=1, ls="--") grid(ls=":") xlabel("t") ylabel("y = sin(q(t)/2)") legend() local error_str = @sprintf("%.3e", maximum(abs.(y - y_exact))) title("maximum(abs(numerical - exact)) = $error_str", fontsize=10) end # plot solution and energy # function plotsolenergy(H, integrator, Δt, sol::ODESolution) local integrator_str = replace("$integrator", r"^[^.]*\." => "") figure(figsize=(10,8)) subplot2grid((21,20), ( 1, 0), rowspan=10, colspan=10) plotsol(sol) subplot2grid((21,20), ( 1,10), rowspan=10, colspan=10) plotsol2(sol) subplot2grid((21,20), (11, 0), rowspan=10, colspan=10) plotenergy(H, sol) suptitle("===== $integrator_str, Δt = $Δt =====") end # Solve a single pendulum # function singlependulum(k, integrator, Δt; t0 = 0.0, t1 = 100.0) local H(p,q,params) = p[1]^2/2 - cos(q[1]) + 1 local q0 = [0.0] local p0 = [2k] local prob = HamiltonianProblem(H, p0, q0, (t0, t1)) local integrator_str = replace("$integrator", r"^[^.]*\." => "") @printf("%-25s", "$integrator_str:") sol = solve(prob, integrator, dt=Δt) @time local sol = solve(prob, integrator, dt=Δt) sleep(0.1) figure(figsize=(10,8)) subplot2grid((21,20), ( 1, 0), rowspan=10, colspan=10) plotsol(sol) subplot2grid((21,20), ( 1,10), rowspan=10, colspan=10) plotsol2(sol) subplot2grid((21,20), (11, 0), rowspan=10, colspan=10) plotenergy(H, sol) subplot2grid((21,20), (11,10), rowspan=10, colspan=10) plotcomparison(k, sol) suptitle("===== $integrator_str, Δt = $Δt =====") end ``` ## Tests ```julia # Single pendulum k = rand() integrator = VelocityVerlet() Δt = 0.1 singlependulum(k, integrator, Δt, t0=-20.0, t1=20.0) ``` ```julia # Two single pendulums H(q,p,param) = sum(p.^2/2 .- cos.(q) .+ 1) q0 = pi*rand(2) p0 = zeros(2) t0, t1 = -20.0, 20.0 prob = HamiltonianProblem(H, q0, p0, (t0, t1)) integrator = McAte4() Δt = 0.1 sol = solve(prob, integrator, dt=Δt) @time sol = solve(prob, integrator, dt=Δt) sleep(0.1) plotsolenergy(H, integrator, Δt, sol) ``` ## Comparison of symplectic Integrators ```julia SymplecticIntegrators = [ SymplecticEuler(), VelocityVerlet(), VerletLeapfrog(), PseudoVerletLeapfrog(), McAte2(), Ruth3(), McAte3(), CandyRoz4(), McAte4(), CalvoSanz4(), McAte42(), McAte5(), Yoshida6(), KahanLi6(), McAte8(), KahanLi8(), SofSpa10(), ] k = 0.999 Δt = 0.1 for integrator in SymplecticIntegrators singlependulum(k, integrator, Δt) end ``` ```julia k = 0.999 Δt = 0.01 for integrator in SymplecticIntegrators[1:4] singlependulum(k, integrator, Δt) end ``` ```julia k = 0.999 Δt = 0.001 singlependulum(k, SymplecticEuler(), Δt) ``` ```julia k = 0.999 Δt = 0.0001 singlependulum(k, SymplecticEuler(), Δt) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Jumps/Diffusion_CTRW.jmd
docs
2711
--- title: Diffusion Model author: Samuel Isaacson, Chris Rackauckas weave_options: fig_ext : ".png" --- ```julia using Catalyst, JumpProcesses, JumpProblemLibrary, Plots, Statistics, DataFrames ``` # Model and example solutions Here we implement a 1D continuous time random walk approximation of diffusion for $N$ lattice sites on $\left[0,1\right]$, with reflecting boundary conditions at $x=0$ and $x=1$. Note that our goal is to benchmark the non-spatial well-mixed stochastic simulation algorithms (SSAs), so we do not benchmark the spatial SSAs too here. ```julia N = 256 h = 1 / N u0 = 10 * ones(Int64, N) tf = .01 methods = (Direct(), FRM(), SortingDirect(), NRM(), DirectCR(), RSSA(), RSSACR(), Coevolve()) shortlabels = [string(leg)[15:end-2] for leg in methods] jprob = JumpProblemLibrary.prob_jump_diffnetwork rn = jprob.network(N) prob = DiscreteProblem(rn, u0, (0.0, tf), [1 / (h*h)]) ploth = plot(reuse=false) for (i,method) in enumerate(methods) println("Benchmarking method: ", method) jump_prob = JumpProblem(rn, prob, method, save_positions=(false, false)) sol = solve(jump_prob, SSAStepper(); saveat=tf/1000.) plot!(ploth, sol.t, sol[Int(N//2),:], label=shortlabels[i]) end plot!(ploth, title="Population at middle lattice site", xlabel="time") ``` # Benchmarking performance of the methods ```julia function run_benchmark!(t, jump_prob, stepper) sol = solve(jump_prob, stepper) @inbounds for i in 1:length(t) t[i] = @elapsed (sol = solve(jump_prob, stepper)) end end ``` ```julia nsims = 50 benchmarks = Vector{Vector{Float64}}() for method in methods jump_prob = JumpProblem(rn, prob, method, save_positions=(false, false)) stepper = SSAStepper() t = Vector{Float64}(undef, nsims) run_benchmark!(t, jump_prob, stepper) push!(benchmarks, t) end ``` ```julia medtimes = Vector{Float64}(undef,length(methods)) stdtimes = Vector{Float64}(undef,length(methods)) avgtimes = Vector{Float64}(undef,length(methods)) for i in 1:length(methods) medtimes[i] = median(benchmarks[i]) avgtimes[i] = mean(benchmarks[i]) stdtimes[i] = std(benchmarks[i]) end df = DataFrame(names=shortlabels, medtimes=medtimes, relmedtimes=(medtimes/medtimes[1]), avgtimes=avgtimes, std=stdtimes, cv=stdtimes./avgtimes) ``` # Plotting ```julia sa = [string(round(mt,digits=4),"s") for mt in df.medtimes] bar(df.names, df.relmedtimes, legend=:false) scatter!(df.names, .05 .+ df.relmedtimes, markeralpha=0, series_annotations=sa) ylabel!("median relative to Direct") title!("256 Site 1D Diffusion CTRW") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Jumps/Mendes_multistate_example.jmd
docs
3110
--- title: Mendes Multistate Model author: Samuel Isaacson, Chris Rackauckas weave_options: fig_ext : ".png" --- Taken from Gupta and Mendes, *An Overview of Network-Based and -Free Approaches for Stochastic Simulation of Biochemical Systems*, Computation, 6 (9), 2018. ```julia using Catalyst, JumpProcesses, JumpProblemLibrary, Plots, Statistics fmt = :png ``` Our model is ```julia jprob = JumpProblemLibrary.prob_jump_multistate rn = jprob.network reactions(rn) ``` # Plot solutions by each method ```julia methods = (Direct(), FRM(), SortingDirect(), NRM(), DirectCR(), RSSA(), RSSACR(), Coevolve()) shortlabels = [string(leg)[15:end-2] for leg in methods] tf = 10.0 * jprob.tstop prob = DiscreteProblem(rn, jprob.u0, (0.0, tf), jprob.rates) varlegs = ["A_P" "A_bound_P" "A_unbound_P" "RLA_P"] @variables t S7(t) S8(t) S9(t) varsyms = [ [S7,S8,S9], [S9], [S7,S8], [S7] ] varidxs = [] for vars in varsyms push!(varidxs, [findfirst(isequal(sym),rn.states) for sym in vars]) end ``` ```julia p = [] for (i,method) in enumerate(methods) jump_prob = JumpProblem(rn, prob, method, save_positions=(false, false)) sol = solve(jump_prob, SSAStepper(), saveat=tf/1000.) solv = zeros(1001, 4) for (i,varidx) in enumerate(varidxs) solv[:,i] = sum(sol[varidx,:], dims=1) end if i < length(methods) push!(p, plot(sol.t, solv, title=shortlabels[i], legend=false, format=fmt)) else push!(p, plot(sol.t, solv, title=shortlabels[i], legend=false, format=fmt)) end end push!(p, plot((1:4)', framestyle = :none, legend=:inside, labels=varlegs)) plot(p..., layout=(5,2), format=fmt) ``` # Benchmarking performance of the methods ```julia function run_benchmark!(t, jump_prob, stepper) sol = solve(jump_prob, stepper) @inbounds for i in 1:length(t) t[i] = @elapsed (sol = solve(jump_prob, stepper)) end end ``` ```julia nsims = 100 benchmarks = Vector{Vector{Float64}}() for method in methods jump_prob = JumpProblem(rn, prob, method, save_positions=(false, false)) stepper = SSAStepper() time = Vector{Float64}(undef, nsims) run_benchmark!(time, jump_prob, stepper) push!(benchmarks, time) end ``` ```julia medtimes = Vector{Float64}(undef,length(methods)) stdtimes = Vector{Float64}(undef,length(methods)) avgtimes = Vector{Float64}(undef,length(methods)) for i in 1:length(methods) medtimes[i] = median(benchmarks[i]) avgtimes[i] = mean(benchmarks[i]) stdtimes[i] = std(benchmarks[i]) end using DataFrames df = DataFrame(names=shortlabels, medtimes=medtimes, relmedtimes=(medtimes/medtimes[1]), avgtimes=avgtimes, std=stdtimes, cv=stdtimes./avgtimes) sa = [text(string(round(mt,digits=3),"s"),:center,12) for mt in df.medtimes] bar(df.names,df.relmedtimes,legend=:false, fmt=fmt) scatter!(df.names, .05 .+ df.relmedtimes, markeralpha=0, series_annotations=sa, fmt=fmt) ylabel!("median relative to Direct") title!("Multistate Model") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Jumps/MultivariateHawkes.jmd
docs
23208
--- title: Multivariate Hawkes Model author: Guilherme Zagatti weave_options: fig_ext : ".png" --- ```julia using JumpProcesses, Graphs, Statistics, BenchmarkTools, Plots using OrdinaryDiffEq: Tsit5 fmt = :png width_px, height_px = default(:size); ``` # Model and example solutions Let a graph with $V$ nodes, then the multivariate Hawkes process is characterized by $V$ point processes such that the conditional intensity rate of node $i$ connected to a set of nodes $E_i$ in the graph is given by: $$ \lambda_i^\ast (t) = \lambda + \sum_{j \in E_i} \sum_{t_{n_j} < t} \alpha \exp \left[-\beta (t - t_{n_j}) \right] $$ This process is known as self-exciting, because the occurence of an event $ j $ at $t_{n_j}$ will increase the conditional intensity of all the processes connected to it by $\alpha$. The excited intensity then decreases at a rate proportional to $\beta$. The conditional intensity of this process has a recursive formulation which can significantly speed the simulation. The recursive formulation for the univariate case is derived in Laub et al. [2]. We derive the compound case here. Let $t_{N_i} = \max \{ t_{n_j} < t \mid j \in E_i \}$ and $$ \begin{split} \phi_i^\ast (t) &= \sum_{j \in E_i} \sum_{t_{n_j} < t} \alpha \exp \left[-\beta (t - t_{N_i} + t_{N_i} - t_{n_j}) \right] \\ &= \exp \left[ -\beta (t - t_{N_i}) \right] \sum_{j \in E_i} \sum_{t_{n_j} \leq t_{N_i}} \alpha \exp \left[-\beta (t_{N_i} - t_{n_j}) \right] \\ &= \exp \left[ -\beta (t - t_{N_i}) \right] \left( \alpha + \phi^\ast (t_{N_i}) \right) \end{split} $$ Then the conditional intensity can be re-written in terms of $\phi_i^\ast (t_{N_i})$ $$ \lambda_i^\ast (t) = \lambda + \phi_i^\ast (t) = \lambda + \exp \left[ -\beta (t - t_{N_i}) \right] \left( \alpha + \phi_i^\ast (t_{N_i}) \right) $$ In Julia, we define a factory for the conditional intensity $\lambda_i$ which returns the brute-force or recursive versions of the intensity given node $i$ and network $g$. ```julia function hawkes_rate(i::Int, g; use_recursion = false) @inline @inbounds function rate_recursion(u, p, t) λ, α, β, h, urate, ϕ = p urate[i] = λ + exp(-β*(t - h[i]))*ϕ[i] return urate[i] end @inline @inbounds function rate_brute(u, p, t) λ, α, β, h, urate = p x = zero(typeof(t)) for j in g[i] for _t in reverse(h[j]) ϕij = α * exp(-β * (t - _t)) if ϕij ≈ 0 break end x += ϕij end end urate[i] = λ + x return urate[i] end if use_recursion return rate_recursion else return rate_brute end end ``` Given the rate factory, we can create a jump factory which will create all the jumps in our model. ```julia function hawkes_jump(i::Int, g; use_recursion = false) rate = hawkes_rate(i, g; use_recursion) urate = rate @inbounds rateinterval(u, p, t) = p[5][i] == p[1] ? typemax(t) : 1 / (2*p[5][i]) @inbounds lrate(u, p, t) = p[1] @inbounds function affect_recursion!(integrator) λ, α, β, h, _, ϕ = integrator.p for j in g[i] ϕ[j] *= exp(-β*(integrator.t - h[j])) ϕ[j] += α h[j] = integrator.t end integrator.u[i] += 1 end @inbounds function affect_brute!(integrator) push!(integrator.p[4][i], integrator.t) integrator.u[i] += 1 end return VariableRateJump( rate, use_recursion ? affect_recursion! : affect_brute!; lrate, urate, rateinterval, ) end function hawkes_jump(u, g; use_recursion = false) return [hawkes_jump(i, g; use_recursion) for i = 1:length(u)] end ``` We can then create a factory for Multivariate Hawkes `JumpProblem`s. We can define two types of `JumpProblem`s depending on the aggregator. The `Direct()` aggregator expects an `ODEProblem` since it cannot handle the `SSAStepper` with `VariableRateJump`s. ```julia function f!(du, u, p, t) du .= 0 nothing end function hawkes_problem( p, agg; u = [0.0], tspan = (0.0, 50.0), save_positions = (false, true), g = [[1]], use_recursion = false, ) oprob = ODEProblem(f!, u, tspan, p) jumps = hawkes_jump(u, g; use_recursion) jprob = JumpProblem(oprob, agg, jumps...; save_positions = save_positions) return jprob end ``` The `Coevolve()` aggregator knows how to handle the `SSAStepper`, so it accepts a `DiscreteProblem`. ```julia function hawkes_problem( p, agg::Coevolve; u = [0.0], tspan = (0.0, 50.0), save_positions = (false, true), g = [[1]], use_recursion = false, ) dprob = DiscreteProblem(u, tspan, p) jumps = hawkes_jump(u, g; use_recursion) jprob = JumpProblem(dprob, agg, jumps...; dep_graph = g, save_positions = save_positions) return jprob end ``` Lets solve the problems defined so far. We sample a random graph sampled from the Erdős-Rényi model. This model assumes that the probability of an edge between two nodes is independent of other edges, which we fix at $0.2$. For illustration purposes, we fix $V = 10$. ```julia V = 10 G = erdos_renyi(V, 0.2, seed = 9103) g = [neighbors(G, i) for i = 1:nv(G)] ``` We fix the Hawkes parameters at $\lambda = 0.5 , \alpha = 0.1 , \beta = 2.0$ which ensures the process does not explode. ```julia tspan = (0.0, 50.0) u = [0.0 for i = 1:nv(G)] p = (0.5, 0.1, 2.0) ``` Now, we instantiate the problems, find their solutions and plot the results. ```julia algorithms = Tuple{Any, Any, Bool, String}[ (Direct(), Tsit5(), false, "Direct (brute-force)"), (Coevolve(), SSAStepper(), false, "Coevolve (brute-force)"), (Direct(), Tsit5(), true, "Direct (recursive)"), (Coevolve(), SSAStepper(), true, "Coevolve (recursive)"), ] let fig = [] for (i, (algo, stepper, use_recursion, label)) in enumerate(algorithms) if use_recursion h = zeros(eltype(tspan), nv(G)) urate = zeros(eltype(tspan), nv(G)) ϕ = zeros(eltype(tspan), nv(G)) _p = (p[1], p[2], p[3], h, ϕ, urate) else h = [eltype(tspan)[] for _ = 1:nv(G)] urate = zeros(eltype(tspan), nv(G)) _p = (p[1], p[2], p[3], h, urate) end jump_prob = hawkes_problem(_p, algo; u, tspan, g, use_recursion) sol = solve(jump_prob, stepper) push!(fig, plot(sol.t, sol[1:V, :]', title=label, legend=false, format=fmt)) end fig = plot(fig..., layout=(2,2), format=fmt) end ``` ## Alternative libraries We benchmark `JumpProcesses.jl` against `PiecewiseDeterministicMarkovProcesses.jl` and Python `Tick` library. In order to compare with the `PiecewiseDeterministicMarkovProcesses.jl`, we need to reformulate our jump problem as a Piecewise Deterministic Markov Process (PDMP). In this setting, we need to describe how the conditional intensity changes with time which we derive below: $$ \begin{split} \frac{d \lambda_i^\ast (t)}{d t} &= -\beta \sum_{j \in E_i} \sum_{t_{n_j} < t} \alpha \exp \left[-\beta (t - t_{n_j}) \right] \\ &= -\beta \left( \lambda_i^\ast (t) - \lambda \right) \end{split} $$ ```julia function hawkes_drate(dxc, xc, xd, p, t) λ, α, β, _, _, g = p for i = 1:length(g) dxc[i] = -β * (xc[i] - λ) end end ``` Next, we need to define the intensity rate and the jumps according to library's specification. ```julia function hawkes_rate(rate, xc, xd, p, t, issum::Bool) λ, α, β, _, _, g = p if issum return sum(@view(xc[1:length(g)])) end rate[1:length(g)] .= @view xc[1:length(g)] return 0.0 end function hawkes_affect!(xc, xd, p, t, i::Int64) λ, α, β, _, _, g = p for j in g[i] xc[i] += α end end ``` Finally, we create a factory for the Multivariate Hawkes `PDMPCHV` problem. ```julia import LinearAlgebra: I using PiecewiseDeterministicMarkovProcesses const PDMP = PiecewiseDeterministicMarkovProcesses struct PDMPCHV end function hawkes_problem( p, agg::PDMPCHV; u = [0.0], tspan = (0.0, 50.0), save_positions = (false, true), g = [[1]], use_recursion = true, ) xd0 = Array{Int}(u) xc0 = [p[1] for i = 1:length(u)] nu = one(eltype(xd0)) * I(length(xd0)) jprob = PDMPProblem(hawkes_drate, hawkes_rate, hawkes_affect!, nu, xc0, xd0, p, tspan) return jprob end push!(algorithms, (PDMPCHV(), CHV(Tsit5()), true, "PDMPCHV")); ``` The Python `Tick` library can be accessed with the `PyCall.jl`. We install the required Python dependencies with `Conda.jl` and define a factory for the Multivariate Hawkes `PyTick` problem. ```julia const BENCHMARK_PYTHON = false const REBUILD_PYCALL = false struct PyTick end if BENCHMARK_PYTHON if REBUILD_PYCALL using Pkg, Conda # rebuild PyCall to ensure it links to the python provided by Conda.jl ENV["PYTHON"] = "" Pkg.build("PyCall") # PyCall only works with Conda.ROOTENV # tick requires python=3.8 Conda.add("python=3.8", Conda.ROOTENV) Conda.add("numpy", Conda.ROOTENV) Conda.pip_interop(true, Conda.ROOTENV) Conda.pip("install", "tick", Conda.ROOTENV) end using PyCall function hawkes_problem( p, agg::PyTick; u = [0.0], tspan = (0.0, 50.0), save_positions = (false, true), g = [[1]], use_recursion = true, ) λ, α, β = p SimuHawkesSumExpKernels = pyimport("tick.hawkes")[:SimuHawkesSumExpKernels] jprob = SimuHawkesSumExpKernels( baseline = fill(λ, length(u)), adjacency = [i in j ? α / β : 0.0 for j in g, i = 1:length(u), u = 1:1], decays = [β], end_time = tspan[2], verbose = false, force_simulation = true, ) return jprob end push!(algorithms, (PyTick(), nothing, true, "PyTick")); end ``` Now, we instantiate the problems, find their solutions and plot the results. ```julia let fig = [] for (i, (algo, stepper, use_recursion, label)) in enumerate(algorithms[5:end]) if typeof(algo) <: PyTick _p = (p[1], p[2], p[3]) jump_prob = hawkes_problem(_p, algo; u, tspan, g, use_recursion) jump_prob.reset() jump_prob.simulate() t = tspan[1]:0.1:tspan[2] N = [[sum(jumps .< _t) for _t in t] for jumps in jump_prob.timestamps] push!(fig, plot(t, N, title=label, legend=false, format=fmt)) elseif typeof(algo) <: PDMPCHV _p = (p[1], p[2], p[3], nothing, nothing, g) jump_prob = hawkes_problem(_p, algo; u, tspan, g, use_recursion) sol = solve(jump_prob, stepper) push!(fig, plot(sol.time, sol.xd[1:V, :]', title=label, legend=false, format=fmt)) end end fig = plot(fig..., layout=(1,2), format=fmt, size=(width_px, height_px/2)) end ``` # Correctness: QQ-Plots We check that the algorithms produce correct simulation by inspecting their QQ-plots. Point process theory says that transforming the simulated points using the compensator should produce points whose inter-arrival duration is distributed according to the exponential distribution (see Section 7.4 [1]). The compensator of any point process is the integral of the conditional intensity $\Lambda_i^\ast(t) = \int_0^t \lambda_i^\ast(u) du$. The compensator for the Multivariate Hawkes process is defined below. $$ \Lambda_i^\ast(t) = \lambda t + \frac{\alpha}{\beta} \sum_{j \in E_i} \sum_{t_{n_j} < t} ( 1 - \exp \left[-\beta (t - t_{n_j}) \right]) $$ ```julia function hawkes_Λ(i::Int, g, p) @inline @inbounds function Λ(t, h) λ, α, β = p x = λ * t for j in g[i] for _t in h[j] if _t >= t break end x += (α / β) * (1 - exp(-β * (t - _t))) end end return x end return Λ end function hawkes_Λ(g, p) return [hawkes_Λ(i, g, p) for i = 1:length(g)] end Λ = hawkes_Λ(g, p) ``` We need a method for extracting the history from a simulation run. Below, we define such functions for each type of algorithm. ```julia """ Given an ODE solution `sol`, recover the timestamp in which events occurred. It returns a vector with the history of each process in `sol`. It assumes that `JumpProblem` was initialized with `save_positions` equal to `(true, false)`, `(false, true)` or `(true, true)` such the system's state is saved before and/or after the jump occurs; and, that `sol.u` is a non-decreasing series that counts the total number of events observed as a function of time. """ function histories(u, t) _u = permutedims(reduce(hcat, u)) k = size(_u)[2] # computes a mask that show when total counts change mask = cat(fill(0.0, 1, k), _u[2:end, :] .- _u[1:end-1, :], dims = 1) .≈ 1 h = Vector{typeof(t)}(undef, k) @inbounds for i = 1:k h[i] = t[mask[:, i]] end return h end function histories(sol::S) where {S<:ODESolution} # get u and permute the dimensions to get a matrix n x k with n obsevations and k processes. if typeof(sol.u[1]) <: ExtendedJumpArray u = map((u) -> u.u, sol.u) else u = sol.u end return histories(u, sol.t) end function histories(sol::S) where {S<:PDMP.PDMPResult} return histories(sol.xd.u, sol.time) end function histories(sols) map(histories, sols) end ``` We also need to compute the quantiles of the empirical distribution given a history of events `hs`, the compensator `Λ` and the target quantiles `quant`. ```julia import Distributions: Exponential """ Computes the empirical and expected quantiles given a history of events `hs`, the compensator `Λ` and the target quantiles `quant`. The history `hs` is a vector with the history of each process. Alternatively, the function also takes a vector of histories containing the histories from multiple runs. The compensator `Λ` can either be an homogeneous compensator function that equally applies to all the processes in `hs`. Alternatively, it accepts a vector of compensator that applies to each process. """ function qq(hs, Λ, quant = 0.01:0.01:0.99) _hs = apply_Λ(hs, Λ) T = typeof(hs[1][1][1]) Δs = Vector{Vector{T}}(undef, length(hs[1])) for k = 1:length(Δs) _Δs = Vector{Vector{T}}(undef, length(hs)) for i = 1:length(_Δs) _Δs[i] = _hs[i][k][2:end] .- _hs[i][k][1:end-1] end Δs[k] = reduce(vcat, _Δs) end empirical_quant = map((_Δs) -> quantile(_Δs, quant), Δs) expected_quant = quantile(Exponential(1.0), quant) return empirical_quant, expected_quant end """ Compute the compensator `Λ` value for each timestamp recorded in history `hs`. The history `hs` is a vector with the history of each process. Alternatively, the function also takes a vector of histories containing the histories from multiple runs. The compensator `Λ` can either be an homogeneous compensator function that equally applies to all the processes in `hs`. Alternatively, it accepts a vector of compensator that applies to each process. """ function apply_Λ(hs::V, Λ) where {V<:Vector{<:Number}} _hs = similar(hs) @inbounds for n = 1:length(hs) _hs[n] = Λ(hs[n], hs) end return _hs end function apply_Λ(k::Int, hs::V, Λ::A) where {V<:Vector{<:Vector{<:Number}},A<:Array} @inbounds hsk = hs[k] @inbounds Λk = Λ[k] _hs = similar(hsk) @inbounds for n = 1:length(hsk) _hs[n] = Λk(hsk[n], hs) end return _hs end function apply_Λ(hs::V, Λ) where {V<:Vector{<:Vector{<:Number}}} _hs = similar(hs) @inbounds for k = 1:length(_hs) _hs[k] = apply_Λ(hs[k], Λ) end return _hs end function apply_Λ(hs::V, Λ::A) where {V<:Vector{<:Vector{<:Number}},A<:Array} _hs = similar(hs) @inbounds for k = 1:length(_hs) _hs[k] = apply_Λ(k, hs, Λ) end return _hs end function apply_Λ(hs::V, Λ) where {V<:Vector{<:Vector{<:Vector{<:Number}}}} return map((_hs) -> apply_Λ(_hs, Λ), hs) end ``` We can construct QQ-plots with a Plot recipe as following. ```julia @userplot QQPlot @recipe function f(x::QQPlot) empirical_quant, expected_quant = x.args max_empirical_quant = maximum(maximum, empirical_quant) max_expected_quant = maximum(expected_quant) upperlim = ceil(maximum([max_empirical_quant, max_expected_quant])) @series begin seriestype := :line linecolor := :lightgray label --> "" (x) -> x end @series begin seriestype := :scatter aspect_ratio := :equal xlims := (0.0, upperlim) ylims := (0.0, upperlim) xaxis --> "Expected" yaxis --> "Empirical" markerstrokewidth --> 0 markerstrokealpha --> 0 markersize --> 1.5 size --> (400, 500) label --> permutedims(["quantiles $i" for i = 1:length(empirical_quant)]) expected_quant, empirical_quant end end ``` Now, we simulate all of the algorithms we defined in the previous Section $250$ times to produce their QQ-plots. ```julia let fig = [] for (i, (algo, stepper, use_recursion, label)) in enumerate(algorithms) if typeof(algo) <: PyTick _p = (p[1], p[2], p[3]) elseif typeof(algo) <: PDMPCHV _p = (p[1], p[2], p[3], nothing, nothing, g) else if use_recursion h = zeros(eltype(tspan), nv(G)) ϕ = zeros(eltype(tspan), nv(G)) urate = zeros(eltype(tspan), nv(G)) _p = (p[1], p[2], p[3], h, urate, ϕ) else h = [eltype(tspan)[] for _ = 1:nv(G)] urate = zeros(eltype(tspan), nv(G)) _p = (p[1], p[2], p[3], h, urate) end end jump_prob = hawkes_problem(_p, algo; u, tspan, g, use_recursion) runs = Vector{Vector{Vector{Number}}}(undef, 250) for n = 1:length(runs) if typeof(algo) <: PyTick jump_prob.reset() jump_prob.simulate() runs[n] = jump_prob.timestamps else if ~(typeof(algo) <: PDMPCHV) if use_recursion h .= 0 ϕ .= 0 else for _h in h empty!(_h) end end urate .= 0 end runs[n] = histories(solve(jump_prob, stepper)) end end qqs = qq(runs, Λ) push!(fig, qqplot(qqs..., legend = false, aspect_ratio = :equal, title=label, fmt=fmt)) end fig = plot(fig..., layout = (3, 2), fmt=fmt, size=(width_px, 3*height_px/2)) end ``` # Benchmarking performance In this Section we benchmark all the algorithms introduced in the first Section. We generate networks in the range from $1$ to $95$ nodes and simulate the Multivariate Hawkes process $25$ units of time. and simulate models in the range from $1$ to $95$ nodes for $25$ units of time. We fix the Hawkes parameters at $\lambda = 0.5 , \alpha = 0.1 , \beta = 5.0$ which ensures the process does not explode. We simulate $50$ trajectories with a limit of ten seconds to complete execution for each configuration. ```julia tspan = (0.0, 25.0) p = (0.5, 0.1, 5.0) Vs = append!([1], 5:5:95) Gs = [erdos_renyi(V, 0.2, seed = 6221) for V in Vs] bs = Vector{Vector{BenchmarkTools.Trial}}() for (algo, stepper, use_recursion, label) in algorithms global _stepper = stepper push!(bs, Vector{BenchmarkTools.Trial}()) _bs = bs[end] for (i, G) in enumerate(Gs) local g = [neighbors(G, i) for i = 1:nv(G)] local u = [0.0 for i = 1:nv(G)] if typeof(algo) <: PyTick _p = (p[1], p[2], p[3]) elseif typeof(algo) <: PDMPCHV _p = (p[1], p[2], p[3], nothing, nothing, g) else if use_recursion global h = zeros(eltype(tspan), nv(G)) global urate = zeros(eltype(tspan), nv(G)) global ϕ = zeros(eltype(tspan), nv(G)) _p = (p[1], p[2], p[3], h, urate, ϕ) else global h = [eltype(tspan)[] for _ = 1:nv(G)] global urate = zeros(eltype(tspan), nv(G)) _p = (p[1], p[2], p[3], h, urate) end end global jump_prob = hawkes_problem(_p, algo; u, tspan, g, use_recursion) trial = try if typeof(algo) <: PyTick @benchmark( jump_prob.simulate(), setup = (jump_prob.reset()), samples = 50, evals = 1, seconds = 10, ) else if typeof(algo) <: PDMPCHV @benchmark( solve(jump_prob, _stepper), setup = (), samples = 50, evals = 1, seconds = 10, ) else if use_recursion @benchmark( solve(jump_prob, _stepper), setup = (h .= 0; urate .= 0; ϕ .= 0), samples = 50, evals = 1, seconds = 10, ) else @benchmark( solve(jump_prob, _stepper), setup = ([empty!(_h) for _h in h]; urate .= 0), samples = 50, evals = 1, seconds = 10, ) end end end catch e BenchmarkTools.Trial( BenchmarkTools.Parameters(samples = 50, evals = 1, seconds = 10), ) end push!(_bs, trial) if (nv(G) == 1 || nv(G) % 10 == 0) median_time = length(trial) > 0 ? "$(BenchmarkTools.prettytime(median(trial.times)))" : "nan" println("algo=$(label), V = $(nv(G)), length = $(length(trial.times)), median time = $median_time") end end end ``` ```julia let fig = plot( yscale = :log10, xlabel = "V", ylabel = "Time (ns)", legend_position = :outertopright, ) for (i, (algo, stepper, use_recursion, label)) in enumerate(algorithms) _bs, _Vs = [], [] for (j, b) in enumerate(bs[i]) if length(b) == 50 push!(_bs, median(b.times)) push!(_Vs, Vs[j]) end end plot!(_Vs, _bs, label=label) end title!("Simulations, 50 samples: nodes × time") end ``` # References [1] D. J. Daley and D. Vere-Jones. An Introduction to the Theory of Point Processes: Volume I: Elementary Theory and Methods. Probability and Its Applications, An Introduction to the Theory of Point Processes. Springer-Verlag, 2 edition. doi:10.1007/b97277. [2] Patrick J. Laub, Young Lee, and Thomas Taimre. The Elements of Hawkes Processes. Springer International Publishing. doi:10.1007/978-3-030-84639-8.
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Jumps/NegFeedback_GeneExpr.jmd
docs
2478
--- title: Negative Feedback Gene Expression Model author: Samuel Isaacson, Chris Rackauckas weave_options: fig_ext : ".png" --- ```julia using JumpProcesses, Catalyst, JumpProblemLibrary, Plots, Statistics import JumpProblemLibrary: prob_jump_dnarepressor fmt = :png ``` Our model is ```julia rn = prob_jump_dnarepressor.network reactions(rn) ``` # Plot solutions by each method ```julia methods = (Direct(), FRM(), SortingDirect(), NRM(), DirectCR(), RSSA(), RSSACR(), Coevolve()) shortlabels = [string(leg)[15:end-2] for leg in methods] prob = prob_jump_dnarepressor.discrete_prob tf = prob_jump_dnarepressor.tstop ploth = plot(reuse=false) for (i,method) in enumerate(methods) jump_prob = JumpProblem(rn, prob, method, save_positions=(false, false)) sol = solve(jump_prob, SSAStepper(), saveat=tf/1000.) plot!(ploth,sol.t, sol[3,:], label=shortlabels[i], format=fmt) end plot(ploth, title="Protein level", xlabel="time", format=fmt) ``` # Benchmarking performance of the methods ```julia function run_benchmark!(t, jump_prob, stepper) sol = solve(jump_prob, stepper) @inbounds for i in 1:length(t) t[i] = @elapsed (sol = solve(jump_prob, stepper)) end end ``` ```julia nsims = 2000 benchmarks = Vector{Vector{Float64}}() for method in methods jump_prob = JumpProblem(rn, prob, method, save_positions=(false, false)) stepper = SSAStepper() t = Vector{Float64}(undef,nsims) run_benchmark!(t, jump_prob, stepper) push!(benchmarks, t) end ``` ```julia medtimes = Vector{Float64}(undef, length(methods)) stdtimes = Vector{Float64}(undef, length(methods)) avgtimes = Vector{Float64}(undef, length(methods)) for i in 1:length(methods) medtimes[i] = median(benchmarks[i]) avgtimes[i] = mean(benchmarks[i]) stdtimes[i] = std(benchmarks[i]) end println(medtimes/medtimes[1]) ``` ```julia using DataFrames df = DataFrame(names=shortlabels, medtimes=medtimes, relmedtimes=(medtimes/medtimes[1]), avgtimes=avgtimes, std=stdtimes, cv=stdtimes./avgtimes) sa = [text(string(round(mt,sigdigits=2),"s"),:center,10) for mt in df.medtimes] bar(df.names,df.relmedtimes,legend=:false, fmt=fmt) scatter!(df.names, .05 .+ df.relmedtimes, markeralpha=0, series_annotations=sa, fmt=fmt) ylabel!("median relative to Direct") title!("Negative Feedback Gene Expression Model") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Jumps/NegFeedback_GeneExpr_Marchetti.jmd
docs
2853
--- title: Negative Feedback Marchetti Model author: Samuel Isaacson, Chris Rackauckas weave_options: fig_ext : ".png" --- ```julia using OrdinaryDiffEq, Catalyst, JumpProcesses, JumpProblemLibrary, Plots, Statistics fmt = :png ``` # Model and example solutions Here we implement the gene expression model from appendix A.6 of Marchetti, Priami and Thanh, *Simulation Algorithms for Comptuational Systems Biology*, Springer (2017). ```julia jprob = JumpProblemLibrary.prob_jump_dnadimer_repressor rnpar = jprob.rates u0 = jprob.u0 tf = jprob.tstop rn = jprob.network reactions(rn) ``` ```julia u0f = [1000., 0., 0., 0., 0.] odeprob = ODEProblem(rn, u0f, (0.,tf), rnpar) solution = solve(odeprob, Tsit5()) plot(solution, format=fmt) ``` ```julia tf = 4000. methods = (Direct(), FRM(), SortingDirect(), NRM(), DirectCR(), RSSA(), RSSACR(), Coevolve()) shortlabels = [string(leg)[15:end-2] for leg in methods] prob = prob = DiscreteProblem(rn, u0, (0.0, tf), rnpar) ploth = plot(reuse=false) p = [] for (i,method) in enumerate(methods) jump_prob = JumpProblem(rn, prob, method, save_positions=(false, false)) sol = solve(jump_prob, SSAStepper(), saveat=tf/1000.) plot!(ploth, sol.t, sol[3,:], label=shortlabels[i], format=fmt) push!(p, plot(sol, title=shortlabels[i], format=fmt)) end plot(ploth, title="Protein level", xlabel="time", format=fmt) ``` ```julia plot(p[end]) ``` # Benchmarking performance of the methods ```julia function run_benchmark!(t, jump_prob, stepper) sol = solve(jump_prob, stepper) @inbounds for i in 1:length(t) t[i] = @elapsed (sol = solve(jump_prob, stepper)) end end ``` ```julia nsims = 200 benchmarks = Vector{Vector{Float64}}() for method in methods jump_prob = JumpProblem(rn, prob, method, save_positions=(false, false)) stepper = SSAStepper() t = Vector{Float64}(undef, nsims) run_benchmark!(t, jump_prob, stepper) push!(benchmarks, t) end ``` ```julia medtimes = Vector{Float64}(undef, length(methods)) stdtimes = Vector{Float64}(undef, length(methods)) avgtimes = Vector{Float64}(undef, length(methods)) for i in 1:length(methods) medtimes[i] = median(benchmarks[i]) avgtimes[i] = mean(benchmarks[i]) stdtimes[i] = std(benchmarks[i]) end using DataFrames df = DataFrame(names=shortlabels, medtimes=medtimes, relmedtimes=(medtimes/medtimes[1]), avgtimes=avgtimes, std=stdtimes, cv=stdtimes./avgtimes) sa = [text(string(round(mt,digits=3),"s"),:center,12) for mt in df.medtimes] bar(df.names,df.relmedtimes,legend=:false, fmt=fmt) scatter!(df.names, .05 .+ df.relmedtimes, markeralpha=0, series_annotations=sa, fmt=fmt) ylabel!("median relative to Direct") title!("Marchetti Gene Expression Model") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Jumps/Spatial_Signaling_Sanft.jmd
docs
8607
--- title: Spatial Signaling Model from Sanft and Othmer (2015) author: Vasily Ilin and Samuel Isaacson weave_options: fig_ext : ".png" --- ```julia using Catalyst, JumpProcesses, BenchmarkTools, Plots, Random ``` # Model description and setup Here we implement the model from [^1] (8 species and 12 reactions) for different mesh sizes, and benchmark the performance of JumpProcesses.jl's spatial stochastic simulation alorithms (SSAs). Below, the value `N` will denote the number of subvolumes along one dimension of a cubic grid, representing the reaction volume. In [^1] this value ranges from 20 to 60. We first define some helper functions to convert concentration units into number units, as needed for spatial SSAs. ```julia invmicromolar_to_cubicmicrometer(invconcen) = invconcen / (6.02214076e2) micromolar_to_invcubicmicrometer(concen) = (6.02214076e2) * concen ``` Next we create a well-mixed model with the desired chemistry ```julia rn = @reaction_network begin k₁, EA --> EA + A k₁, EB --> EB + B (ka,kd), EA + B <--> EAB (ka,kd), EAB + B <--> EAB₂ (ka,kd), EB + A <--> EBA (ka,kd), EBA + A <--> EBA₂ k₄, A --> ∅ k₄, B --> ∅ end k₁ ka kd k₄ ``` Let's next make a function to calculate the spatial transport rates, mesh/graph that will represent our domain, and initial condition. We use a cubic lattice of size `N` by `N` by `N` with reflecting boundary conditions ```julia # domain_len is the physical length of each side of the cubic domain # units should be in μm (6.0 or 12.0 in Sanft) # D is the diffusivity in units of (μm)^2 s⁻¹ function transport_model(rn, N; domain_len = 6.0, D = 1.0, rng = Random.default_rng()) # topology h = domain_len / N dims = (N, N, N) num_nodes = prod(dims) # Cartesian grid with reflecting BC at boundaries grid = CartesianGrid(dims) # Cartesian grid hopping rate to neighbors hopping_rate = D / h^2 # this indicates we have a uniform rate of D/h^2 along each edge at each site hopping_constants = hopping_rate * ones(numspecies(rn)) # figure out the indices of species EA and EB @unpack EA, EB = rn EAidx = findfirst(isequal(EA), species(rn)) EBidx = findfirst(isequal(EB), species(rn)) # spatial initial condition # initial concentration of 12.3 nM = 12.3 * 1e-3 μM num_molecules = trunc(Int, micromolar_to_invcubicmicrometer(12.3*1e-3) * (domain_len^3)) u0 = zeros(Int, 8, num_nodes) rand_EA = rand(rng, 1:num_nodes, num_molecules) rand_EB = rand(rng, 1:num_nodes, num_molecules) for i in 1:num_molecules u0[EAidx, rand_EA[i]] += 1 u0[EBidx, rand_EB[i]] += 1 end grid, hopping_constants, h, u0 end ``` Finally, let's make a function to setup the well-mixed model from the reaction model in a cube of side length `h`: ```julia function wellmixed_model(rn, u0, end_time, h) kaval = invmicromolar_to_cubicmicrometer(46.2) / h^3 setdefaults!(rn, [:k₁ => 150, :ka => kaval, :kd => 3.82, :k₄ => 6.0]) # well-mixed initial condition corresponding to the spatial initial condition u0wm = sum(u0, dims = 2) dprobwm = DiscreteProblem(rn, u0wm, (0.0, end_time)) jprobwm = JumpProblem(rn, dprobwm, Direct(), save_positions = (false,false)) majumps = jprobwm.massaction_jump majumps, dprobwm, jprobwm, u0wm end ``` # Model Solution Let's look at one example to check our model seems reasonable. We'll plot the total number of molecules in the system to verify we get around 28,000 molecules, as reported in Sanft [^1], when using a domain length of 6 μm. ```julia end_time = 3.0 grid, hopping_constants, h, u0 = transport_model(rn, 60) majumps, dprobwm, jprobwm, u0wm = wellmixed_model(rn, u0, end_time, 6.0) sol = solve(jprobwm, SSAStepper(); saveat = end_time/200) Ntot = [sum(u) for u in sol.u] plt = plot(sol.t, Ntot, label="Well-mixed", ylabel="Total Number of Molecules", xlabel="time") # spatial model majumps, dprobwm, jprobwm, u0wm = wellmixed_model(rn, u0, end_time, h) dprob = DiscreteProblem(u0, (0.0, end_time), copy(dprobwm.p)) jprob = JumpProblem(dprob, DirectCRDirect(), majumps; hopping_constants, spatial_system = grid, save_positions = (false, false)) spatial_sol = solve(jprob, SSAStepper(); saveat = end_time/200) Ntot = [sum(vec(u)) for u in spatial_sol.u] plot!(plt, spatial_sol.t, Ntot, label="Spatial", title="Steady-state number of molecules is $(Ntot[end])") ``` # Benchmarking performance of the methods We can now run the solvers and record the performance with `BenchmarkTools`. Let's first create a `DiscreteCallback` to terminate simulations once we reach `10^8` events: ```julia @Base.kwdef mutable struct EventCallback n::Int = 0 end function (ecb::EventCallback)(u, t, integ) ecb.n += 1 ecb.n == 10^8 end function (ecb::EventCallback)(integ) # save the final state terminate!(integ) nothing end ``` We next create a function to run and return our benchmarking results. ```julia function benchmark_and_save!(bench_dict, end_times, Nv, algs, domain_len) @assert length(end_times) == length(Nv) # callback for terminating simulations ecb = EventCallback() cb = DiscreteCallback(ecb, ecb) for (end_time, N) in zip(end_times, Nv) names = ["$s"[1:end-2] for s in algs] grid, hopping_constants, h, u0 = transport_model(rn, N; domain_len) # we create a well-mixed model within a domain of the size of *one* voxel, h majumps, dprobwm, jprobwm, u0wm = wellmixed_model(rn, u0, end_time, h) # the spatial problem dprob = DiscreteProblem(u0, (0.0, end_time), copy(dprobwm.p)) @show N # benchmarking and saving benchmarks = Vector{BenchmarkTools.Trial}(undef, length(algs)) # callback for terminating simulations for (i, alg) in enumerate(algs) name = names[i] println("benchmarking $name") jp = JumpProblem(dprob, alg, majumps, hopping_constants=hopping_constants, spatial_system = grid, save_positions=(false,false)) b = @benchmarkable solve($jp, SSAStepper(); saveat = $(dprob.tspan[2]), callback) setup = (callback = deepcopy($cb)) samples = 10 seconds = 3600 bench_dict[name, N] = run(b) end end end ``` Finally, let's make a function to plot the benchmarking data. ```julia function fetch_and_plot(bench_dict, domain_len) names = unique([key[1] for key in keys(bench_dict)]) Nv = sort(unique([key[2] for key in keys(bench_dict)])) plt1 = plot() plt2 = plot() medtimes = [Float64[] for i in 1:length(names)] for (i,name) in enumerate(names) for N in Nv try push!(medtimes[i], median(bench_dict[name, N]).time/1e9) catch break end end len = length(medtimes[i]) plot!(plt1, Nv[1:len], medtimes[i], marker = :hex, label = name, lw = 2) plot!(plt2, (Nv.^3)[1:len], medtimes[i], marker = :hex, label = name, lw = 2) end plot!(plt1, xlabel = "number of sites per edge", ylabel = "median time in seconds", xticks = Nv, legend = :bottomright) plot!(plt2, xlabel = "total number of sites", ylabel = "median time in seconds", xticks = (Nv.^3, string.(Nv.^3)), legend = :bottomright) plot(plt1, plt2; size = (1200,800), legendtitle = "SSAs", plot_title="3D RDME, domain length = $domain_len", left_margin=5Plots.mm) end ``` We are now ready to run the benchmarks and plot the results. We start with a domain length of `12` μm, analogous to Fig. 6 in [^1]: ```julia bench_dict = Dict{Tuple{String, Int}, BenchmarkTools.Trial}() algs = [NSM(), DirectCRDirect()] Nv = [20, 30, 40, 50, 60, 90, 120, 240, 360] end_times = 20000.0 * ones(length(Nv)) domain_len = 12.0 benchmark_and_save!(bench_dict, end_times, Nv, algs, domain_len) ``` ```julia plt=fetch_and_plot(bench_dict, domain_len) ``` We next consider a domain of length `6` μm, analogous to Fig. 7 in [^1]. ```julia bench_dict = Dict{Tuple{String, Int}, BenchmarkTools.Trial}() domain_len = 6.0 benchmark_and_save!(bench_dict, end_times, Nv, algs, domain_len) ``` ```julia plt=fetch_and_plot(bench_dict, domain_len) ``` # References [^1]: Sanft, Kevin R and Othmer, Hans G. *Constant-complexity stochastic simulation algorithm with optimal binning*. J. Chem. Phys., 143(7), 11 pp. (2015). ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder], WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/Filament.jmd
docs
20011
--- title: Filament Work-Precision Diagrams author: dextorious, Chris Rackauckas --- # Filament Benchmark In this notebook we will benchmark a real-world biological model from a paper entitled [Magnetic dipole with a flexible tail as a self-propelling microdevice](https://doi.org/10.1103/PhysRevE.85.041502). This is a system of PDEs representing a Kirchhoff model of an elastic rod, where the equations of motion are given by the Rouse approximation with free boundary conditions. ## Model Implementation First we will show the full model implementation. It is not necessary to understand the full model specification in order to understand the benchmark results, but it's all contained here for completeness. The model is highly optimized, with all internal vectors pre-cached, loops unrolled for efficiency (along with `@simd` annotations), a pre-defined Jacobian, matrix multiplications are all in-place, etc. Thus this model is a good stand-in for other optimized PDE solving cases. The model is thus defined as follows: ```julia using OrdinaryDiffEq, ODEInterfaceDiffEq, Sundials, DiffEqDevTools, LSODA using LinearAlgebra using Plots gr() ``` ```julia const T = Float64 abstract type AbstractFilamentCache end abstract type AbstractMagneticForce end abstract type AbstractInextensibilityCache end abstract type AbstractSolver end abstract type AbstractSolverCache end ``` ```julia struct FerromagneticContinuous <: AbstractMagneticForce ω :: T F :: Vector{T} end mutable struct FilamentCache{ MagneticForce <: AbstractMagneticForce, InextensibilityCache <: AbstractInextensibilityCache, SolverCache <: AbstractSolverCache } <: AbstractFilamentCache N :: Int μ :: T Cm :: T x :: SubArray{T,1,Vector{T},Tuple{StepRange{Int,Int}},true} y :: SubArray{T,1,Vector{T},Tuple{StepRange{Int,Int}},true} z :: SubArray{T,1,Vector{T},Tuple{StepRange{Int,Int}},true} A :: Matrix{T} P :: InextensibilityCache F :: MagneticForce Sc :: SolverCache end ``` ```julia struct NoHydroProjectionCache <: AbstractInextensibilityCache J :: Matrix{T} P :: Matrix{T} J_JT :: Matrix{T} J_JT_LDLT :: LinearAlgebra.LDLt{T, SymTridiagonal{T}} P0 :: Matrix{T} NoHydroProjectionCache(N::Int) = new( zeros(N, 3*(N+1)), # J zeros(3*(N+1), 3*(N+1)), # P zeros(N,N), # J_JT LinearAlgebra.LDLt{T,SymTridiagonal{T}}(SymTridiagonal(zeros(N), zeros(N-1))), zeros(N, 3*(N+1)) ) end ``` ```julia struct DiffEqSolverCache <: AbstractSolverCache S1 :: Vector{T} S2 :: Vector{T} DiffEqSolverCache(N::Integer) = new(zeros(T,3*(N+1)), zeros(T,3*(N+1))) end ``` ```julia function FilamentCache(N=20; Cm=32, ω=200, Solver=SolverDiffEq) InextensibilityCache = NoHydroProjectionCache SolverCache = DiffEqSolverCache tmp = zeros(3*(N+1)) FilamentCache{FerromagneticContinuous, InextensibilityCache, SolverCache}( N, N+1, Cm, view(tmp,1:3:3*(N+1)), view(tmp,2:3:3*(N+1)), view(tmp,3:3:3*(N+1)), zeros(3*(N+1), 3*(N+1)), # A InextensibilityCache(N), # P FerromagneticContinuous(ω, zeros(3*(N+1))), SolverCache(N) ) end ``` ```julia function stiffness_matrix!(f::AbstractFilamentCache) N, μ, A = f.N, f.μ, f.A @inbounds for j in axes(A, 2), i in axes(A, 1) A[i, j] = j == i ? 1 : 0 end @inbounds for i in 1 : 3 A[i,i] = 1 A[i,3+i] = -2 A[i,6+i] = 1 A[3+i,i] = -2 A[3+i,3+i] = 5 A[3+i,6+i] = -4 A[3+i,9+i] = 1 A[3*(N-1)+i,3*(N-3)+i] = 1 A[3*(N-1)+i,3*(N-2)+i] = -4 A[3*(N-1)+i,3*(N-1)+i] = 5 A[3*(N-1)+i,3*N+i] = -2 A[3*N+i,3*(N-2)+i] = 1 A[3*N+i,3*(N-1)+i] = -2 A[3*N+i,3*N+i] = 1 for j in 2 : N-2 A[3*j+i,3*j+i] = 6 A[3*j+i,3*(j-1)+i] = -4 A[3*j+i,3*(j+1)+i] = -4 A[3*j+i,3*(j-2)+i] = 1 A[3*j+i,3*(j+2)+i] = 1 end end rmul!(A, -μ^4) nothing end ``` ```julia function update_separate_coordinates!(f::AbstractFilamentCache, r) N, x, y, z = f.N, f.x, f.y, f.z @inbounds for i in 1 : length(x) x[i] = r[3*i-2] y[i] = r[3*i-1] z[i] = r[3*i] end nothing end function update_united_coordinates!(f::AbstractFilamentCache, r) N, x, y, z = f.N, f.x, f.y, f.z @inbounds for i in 1 : length(x) r[3*i-2] = x[i] r[3*i-1] = y[i] r[3*i] = z[i] end nothing end function update_united_coordinates(f::AbstractFilamentCache) r = zeros(T, 3*length(f.x)) update_united_coordinates!(f, r) r end ``` ```julia function initialize!(initial_conf_type::Symbol, f::AbstractFilamentCache) N, x, y, z = f.N, f.x, f.y, f.z if initial_conf_type == :StraightX x .= range(0, stop=1, length=N+1) y .= 0 z .= 0 else error("Unknown initial configuration requested.") end update_united_coordinates(f) end ``` ```julia function magnetic_force!(::FerromagneticContinuous, f::AbstractFilamentCache, t) # TODO: generalize this for different magnetic fields as well N, μ, Cm, ω, F = f.N, f.μ, f.Cm, f.F.ω, f.F.F F[1] = -μ * Cm * cos(ω*t) F[2] = -μ * Cm * sin(ω*t) F[3*(N+1)-2] = μ * Cm * cos(ω*t) F[3*(N+1)-1] = μ * Cm * sin(ω*t) nothing end ``` ```julia struct SolverDiffEq <: AbstractSolver end function (f::FilamentCache)(dr, r, p, t) @views f.x, f.y, f.z = r[1:3:end], r[2:3:end], r[3:3:end] jacobian!(f) projection!(f) magnetic_force!(f.F, f, t) A, P, F, S1, S2 = f.A, f.P.P, f.F.F, f.Sc.S1, f.Sc.S2 # implement dr = P * (A*r + F) in an optimized way to avoid temporaries mul!(S1, A, r) S1 .+= F mul!(S2, P, S1) copyto!(dr, S2) return dr end ``` ```julia function jacobian!(f::FilamentCache) N, x, y, z, J = f.N, f.x, f.y, f.z, f.P.J @inbounds for i in 1 : N J[i, 3*i-2] = -2 * (x[i+1]-x[i]) J[i, 3*i-1] = -2 * (y[i+1]-y[i]) J[i, 3*i] = -2 * (z[i+1]-z[i]) J[i, 3*(i+1)-2] = 2 * (x[i+1]-x[i]) J[i, 3*(i+1)-1] = 2 * (y[i+1]-y[i]) J[i, 3*(i+1)] = 2 * (z[i+1]-z[i]) end nothing end ``` ```julia function projection!(f::FilamentCache) # implement P[:] = I - J'/(J*J')*J in an optimized way to avoid temporaries J, P, J_JT, J_JT_LDLT, P0 = f.P.J, f.P.P, f.P.J_JT, f.P.J_JT_LDLT, f.P.P0 mul!(J_JT, J, J') LDLt_inplace!(J_JT_LDLT, J_JT) ldiv!(P0, J_JT_LDLT, J) mul!(P, P0', J) subtract_from_identity!(P) nothing end ``` ```julia function subtract_from_identity!(A) lmul!(-1, A) @inbounds for i in 1 : size(A,1) A[i,i] += 1 end nothing end ``` ```julia function LDLt_inplace!(L::LinearAlgebra.LDLt{T,SymTridiagonal{T}}, A::Matrix{T}) where {T<:Real} n = size(A,1) dv, ev = L.data.dv, L.data.ev @inbounds for (i,d) in enumerate(diagind(A)) dv[i] = A[d] end @inbounds for (i,d) in enumerate(diagind(A,-1)) ev[i] = A[d] end @inbounds @simd for i in 1 : n-1 ev[i] /= dv[i] dv[i+1] -= abs2(ev[i]) * dv[i] end L end ``` # Investigating the model Let's take a look at what results of the model look like: ```julia function run(::SolverDiffEq; N=20, Cm=32, ω=200, time_end=1., solver=TRBDF2(autodiff=false), reltol=1e-6, abstol=1e-6) f = FilamentCache(N, Solver=SolverDiffEq, Cm=Cm, ω=ω) r0 = initialize!(:StraightX, f) stiffness_matrix!(f) prob = ODEProblem(ODEFunction(f, jac=(J, u, p, t)->(mul!(J, f.P.P, f.A); nothing)), r0, (0., time_end)) sol = solve(prob, solver, dense=false, reltol=reltol, abstol=abstol) end ``` This method runs the model with the `TRBDF2` method and the default parameters. ```julia sol = run(SolverDiffEq()) plot(sol,vars = (0,25)) ``` The model quickly falls into a highly oscillatory mode which then dominates throughout the rest of the solution. # Work-Precision Diagrams Now let's build the problem and solve it once at high accuracy to get a reference solution: ```julia N=20 f = FilamentCache(N, Solver=SolverDiffEq) r0 = initialize!(:StraightX, f) stiffness_matrix!(f) prob = ODEProblem(f, r0, (0., 0.01)) sol = solve(prob, Vern9(), reltol=1e-14, abstol=1e-14) test_sol = TestSolution(sol); ``` ## Omissions ```julia;eval=false abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => Rosenbrock23(autodiff=false)), Dict(:alg => Rodas4(autodiff=false)), Dict(:alg => radau()), Dict(:alg=>Exprb43(autodiff=false)), Dict(:alg=>Exprb32(autodiff=false)), Dict(:alg=>ImplicitEulerExtrapolation(autodiff=false)), Dict(:alg=>ImplicitDeuflhardExtrapolation(autodiff=false)), Dict(:alg=>ImplicitHairerWannerExtrapolation(autodiff=false)), ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` Rosenbrock23, Rodas4, Exprb32, Exprb43, extrapolation methods, and Rodas5 do not perform well at all and are thus dropped from future tests. For reference, they are in the 10^(2.5) range in for their most accurate run (with ImplicitEulerExtrapolation takes over a day to run, and had to be prematurely stopped), so about 500x slower than CVODE_BDF and thus make the benchmarks take forever. It looks like `radau` fails on this problem with high tolerance so its values should be ignored since it exits early. It is thus removed from the next sections. The EPIRK methods currently do not work on this problem ```julia sol = solve(prob, EPIRK4s3B(autodiff=false), dt=2^-3) ``` but would be called like: ```julia;eval=false abstols=1 ./10 .^(3:5) reltols=1 ./10 .^(3:5) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => HochOst4(),:dts=>2.0.^(-3:-1:-5)), Dict(:alg => EPIRK4s3B(),:dts=>2.0.^(-3:-1:-5)), Dict(:alg => EXPRB53s3(),:dts=>2.0.^(-3:-1:-5)), ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ## High Tolerance (Low Accuracy) ### Endpoint Error ```julia abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => BS3()), Dict(:alg => Tsit5()), Dict(:alg => ImplicitEuler(autodiff=false)), Dict(:alg => Trapezoid(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => rodas()), Dict(:alg => dop853()), Dict(:alg => lsoda()), Dict(:alg => ROCK2()), Dict(:alg => ROCK4()), Dict(:alg => ESERK5()) ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ```julia abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => ImplicitEuler(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => KenCarp3(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => Kvaerno3(autodiff=false)), Dict(:alg => Kvaerno4(autodiff=false)), Dict(:alg => ABDF2(autodiff=false)), Dict(:alg => QNDF(autodiff=false)), Dict(:alg => RadauIIA5(autodiff=false)), ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ```julia abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => CVODE_BDF(linear_solver=:GMRES)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false,linsolve=LinSolveGMRES())), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false,linsolve=LinSolveGMRES())), ]; names = [ "CVODE-BDF", "CVODE-BDF (GMRES)", "TRBDF2", "TRBDF2 (GMRES)", "KenCarp4", "KenCarp4 (GMRES)", ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; names=names, appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ### Timeseries Error ```julia abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => Trapezoid(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => rodas()), Dict(:alg => lsoda()), Dict(:alg => KenCarp3(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => Kvaerno3(autodiff=false)), Dict(:alg => Kvaerno4(autodiff=false)), Dict(:alg => ROCK2()), Dict(:alg => ROCK4()), Dict(:alg => ESERK5()) ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` Timeseries errors seem to match final point errors very closely in this problem, so these are turned off in future benchmarks. (Confirmed in the other cases) ### Dense Error ```julia abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => KenCarp3(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => Kvaerno3(autodiff=false)), Dict(:alg => Kvaerno4(autodiff=false)), Dict(:alg => ROCK2()), Dict(:alg => ROCK4()), Dict(:alg => ESERK5()) ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false, dense_errors = true, error_estimate=:L2) plot(wp) ``` Dense errors seem to match timeseries errors very closely in this problem, so these are turned off in future benchmarks. (Confirmed in the other cases) ## Low Tolerance (High Accuracy) ```julia abstols=1 ./10 .^(6:12) reltols=1 ./10 .^(6:12) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => Vern7()), Dict(:alg => Vern9()), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => dop853()), Dict(:alg => ROCK4()) ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ```julia abstols=1 ./10 .^(6:12) reltols=1 ./10 .^(6:12) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => radau()), Dict(:alg => RadauIIA5(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => Kvaerno3(autodiff=false)), Dict(:alg => KenCarp3(autodiff=false)), Dict(:alg => Kvaerno4(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => Kvaerno5(autodiff=false)), Dict(:alg => KenCarp5(autodiff=false)), Dict(:alg => lsoda()), ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ### Timeseries Error ```julia;eval=false abstols=1 ./10 .^(6:12) reltols=1 ./10 .^(6:12) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => radau()), Dict(:alg => RadauIIA5(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => Kvaerno3(autodiff=false)), Dict(:alg => KenCarp3(autodiff=false)), Dict(:alg => Kvaerno4(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => Kvaerno5(autodiff=false)), Dict(:alg => KenCarp5(autodiff=false)), Dict(:alg => lsoda()), ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false, error_estimate = :l2) plot(wp) ``` ### Dense Error ```julia;eval=false abstols=1 ./10 .^(6:12) reltols=1 ./10 .^(6:12) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => radau()), Dict(:alg => RadauIIA5(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => Kvaerno3(autodiff=false)), Dict(:alg => KenCarp3(autodiff=false)), Dict(:alg => Kvaerno4(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => Kvaerno5(autodiff=false)), Dict(:alg => KenCarp5(autodiff=false)), Dict(:alg => lsoda()), ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false, dense_errors=true, error_estimate = :L2) plot(wp) ``` # No Jacobian Work-Precision Diagrams In the previous cases the analytical Jacobian is given and is used by the solvers. Now we will solve the same problem without the analytical Jacobian. Note that the pre-caching means that the model is not compatible with autodifferentiation by ForwardDiff. Thus all of the native Julia solvers are set to `autodiff=false` to use DiffEqDiffTools.jl's numerical differentiation backend. We'll only benchmark the methods that did well before. ```julia N=20 f = FilamentCache(N, Solver=SolverDiffEq) r0 = initialize!(:StraightX, f) stiffness_matrix!(f) prob = ODEProblem(ODEFunction(f, jac=nothing), r0, (0., 0.01)) sol = solve(prob, Vern9(), reltol=1e-14, abstol=1e-14) test_sol = TestSolution(sol.t, sol.u); ``` ## High Tolerance (Low Accuracy) ```julia abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => BS3()), Dict(:alg => Tsit5()), Dict(:alg => ImplicitEuler(autodiff=false)), Dict(:alg => Trapezoid(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => rodas()), Dict(:alg => dop853()), Dict(:alg => lsoda()) ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ```julia abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => BS3()), Dict(:alg => Tsit5()), Dict(:alg => ImplicitEuler(autodiff=false)), Dict(:alg => Trapezoid(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => rodas()), Dict(:alg => dop853()), Dict(:alg => lsoda()), Dict(:alg => ROCK2()), Dict(:alg => ROCK4()), Dict(:alg => ESERK5()) ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ```julia abstols=1 ./10 .^(3:8) reltols=1 ./10 .^(3:8) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => CVODE_BDF(linear_solver=:GMRES)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false,linsolve=LinSolveGMRES())), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false,linsolve=LinSolveGMRES())), ]; names = [ "CVODE-BDF", "CVODE-BDF (GMRES)", "TRBDF2", "TRBDF2 (GMRES)", "KenCarp4", "KenCarp4 (GMRES)", ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; names=names, appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ## Low Tolerance (High Accuracy) ```julia abstols=1 ./10 .^(6:12) reltols=1 ./10 .^(6:12) setups = [ Dict(:alg => CVODE_BDF()), Dict(:alg => radau()), Dict(:alg => RadauIIA5(autodiff=false)), Dict(:alg => TRBDF2(autodiff=false)), Dict(:alg => Kvaerno3(autodiff=false)), Dict(:alg => KenCarp3(autodiff=false)), Dict(:alg => Kvaerno4(autodiff=false)), Dict(:alg => KenCarp4(autodiff=false)), Dict(:alg => Kvaerno5(autodiff=false)), Dict(:alg => KenCarp5(autodiff=false)), Dict(:alg => lsoda()), ]; wp = WorkPrecisionSet(prob, abstols, reltols, setups; appxsol=test_sol, maxiters=Int(1e6), verbose = false) plot(wp) ``` ## Conclusion Sundials' `CVODE_BDF` does the best in this test. When the Jacobian is given, the ESDIRK methods `TRBDF2` and `KenCarp3` are able to do almost as well as it until `<1e-6` error is needed. When Jacobians are not given, Sundials is the fastest without competition. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/allen_cahn_fdm_wpd.jmd
docs
9072
--- title: Allen_Cahn FDM Work-Precision Diagrams author: HAO HAO --- ```julia using ApproxFun, OrdinaryDiffEq, Sundials using DiffEqDevTools using LinearAlgebra using Plots; gr() ``` Here is the Burgers equation using FDM. ```julia # Define the linear and nonlinear terms function lin_term(N) dx = 1/(N + 1) du = 1/2 * ones(N - 1) # super diagonal dl = -1/2 * ones(N - 1) # lower diagonal DiffEqArrayOperator(0.01*(1/dx) * diagm(-1 => dl, 1 => du)) end function nl_term(N) function (du,u,p,t) du .= u .- u.^3 end end # Construct the problem function allen_cahn(N) f1 = lin_term(N) f2 = nl_term(N) dx = 1 / (N + 1) xs = (1:N) * dx f0 = x -> .53*x + .47*sin(-1.5*pi*x) - x u0 = f0.(xs) prob = SplitODEProblem(f1, f2, u0, (0.0, 1.0)) xs, prob end; ``` Reference solution using Vern9 is below: ```julia xs, prob = allen_cahn(100) sol = solve(prob, RadauIIA5(autodiff=false); abstol=1e-14, reltol=1e-14, dt=1e-4, adaptive=false) test_sol = TestSolution(sol); tslices = [0.0 0.25 0.50 0.75 1.] ys = hcat((sol(t) for t in tslices)...) labels = ["t = $t" for t in tslices] plot(xs, ys, label=labels) ``` Linear solvers ```julia const LS_Dense = LinSolveFactorize(lu) ``` ## High tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(), :dts => 1e-4 * multipliers), Dict(:alg => CNLF2(), :dts => 1e-4 * multipliers), Dict(:alg => SBDF2(), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, dense linsolve, low order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(linsolve=LinSolveGMRES()), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 1e-4 * multipliers), Dict(:alg => CNLF2(linsolve=LinSolveGMRES()), :dts => 1e-4 * multipliers), Dict(:alg => SBDF2(linsolve=LinSolveGMRES()), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, Krylov linsolve, low order") ``` 2. ExpRK methods ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => NorsettEuler(), :dts => 1e-3 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=5), :dts => 1e-3 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=20), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers)] labels = hcat("NorsettEuler (caching)", "NorsettEuler (m=5)", "NorsettEuler (m=20)", "ETDRK2 (caching)", "ETDRK2 (m=5)", "ETDRK2 (m=20)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="ExpRK methods, low order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => CNAB2(), :dts => 1e-4 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 1e-4 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-3 * multipliers)] labels = ["CNAB2 (dense linsolve)" "CNAB2 (Krylov linsolve)" "ETDRK2 (m=5)"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="Between family, low orders") ``` ## Low tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => KenCarp3()), Dict(:alg => KenCarp4()), Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:Dense)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:Dense)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense))] labels = hcat("KenCarp3", "KenCarp4", "KenCarp5", "ARKODE3", "ARKODE4", "ARKODE5") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, dense linsolve, medium order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => KenCarp3(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp4(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES))] labels = ["KenCarp3" "KenCarp4" "KenCarp5" "ARKODE3" "ARKODE4" "ARKODE5"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, medium order") ``` 2.ExpRK methods ```julia abstols = 0.1 .^ (7:11) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = hcat("ETDRK3 (caching)", "ETDRK3 (m=5)", "ETDRK4 (caching)", "ETDRK4 (m=5)", "HochOst4 (caching)", "HochOst4 (m=5)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="ExpRK methods, medium order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (7:11) reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense)), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES)), Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = hcat("KenCarp5 (dense linsolve)", "ARKODE (dense linsolve)", "KenCarp5 (Krylov linsolve)", "ARKODE (Krylov linsolve)", "ETDRK3 (m=5)", "ETDRK4 (m=5)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="Between family, medium order") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/allen_cahn_spectral_wpd.jmd
docs
9109
--- title: Allen-Cahn Pseudospectral Methods Work-Precision Diagrams author: HAO HAO --- ```julia using ApproxFun, OrdinaryDiffEq, Sundials using DiffEqDevTools using LinearAlgebra using Plots; gr() ``` Here is the Allen-Cahn equation using Chebyshev spectral methods. ```julia function cheb(N) N==0 && return (0,1) x = cos.(pi*(0:N)/N) c = [2; ones(N-1,1); 2].*(-1).^(0:N) X = hcat([x for i in 1:N+1]...) dX = X-X' D = (c*(1 ./c)')./(dX+I) # off-diagonal entries D = D .- Diagonal(vec(sum(D,dims=2))) # diagonal entries D,x end N = 128 ChebD2,x = cheb(N) xx = x x = x[2:N] w = .53*x + .47*sin.(-1.5*pi*x) - x # use w = u-x to make BCs homogeneous u = [1;w+x;-1] ϵ=0.01 D2=ϵ*(ChebD2^2)[2:N, 2:N] function allen_cahn(du,u,x,t) @. du = (u + x) - (u + x)^3 end ``` Reference solution using RadauIIA5 is below: ```julia prob = SplitODEProblem(DiffEqArrayOperator(D2), allen_cahn, w, (0.0,5.0), x) sol = solve(prob, RadauIIA5(autodiff=false); reltol=1e-14,abstol=1e-14) test_sol = TestSolution(sol) tslices=[0.0 1.0 2.0 3.0 5.0] ys=hcat(([1;x.+sol(t);-1] for t in tslices)...) labels=["t=$t" for t in tslices] plot(xx,ys,label=labels) ``` ## High tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (5:8) reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(), :dts => 5e-3 * multipliers), Dict(:alg => CNLF2(), :dts => 5e-4 * multipliers), Dict(:alg => SBDF2(), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp1 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true,names=labels, numruns=5,seconds=5, save_everystop=false,appxsol=test_sol,maxiters=Int(1e5)); plot(wp1,label=labels,markershape=:auto,title="IMEX methods, dense linsolve, low order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(linsolve=LinSolveGMRES()), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 5e-3 * multipliers), Dict(:alg => CNLF2(linsolve=LinSolveGMRES()), :dts => 5e-4 * multipliers), Dict(:alg => SBDF2(linsolve=LinSolveGMRES()), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp1 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp1, label=labels, markershape=:auto, title="IMEX methods, Krylov linsolve, low order") ``` 2. ExpRK methods ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => NorsettEuler(), :dts => 1e-3 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=5), :dts => 1e-3 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=20), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers)] labels = hcat("NorsettEuler (caching)", "NorsettEuler (m=5)", "NorsettEuler (m=20)", "ETDRK2 (caching)", "ETDRK2 (m=5)", "ETDRK2 (m=20)") @time wp2 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp2, label=labels, markershape=:auto, title="ExpRK methods, low order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => CNAB2(), :dts => 5e-3 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 5e-3 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = ["CNAB2 (dense linsolve)" "CNAB2 (Krylov linsolve)" "ETDRK2 (m=5)"] @time wp3 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp3, label=labels, markershape=:auto, title="Between family, low orders") ``` ## Low tolerances ## In-family comparisons 1.IMEX methods (dense/band linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => KenCarp3()), Dict(:alg => KenCarp4()), Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Band, jac_upper=1, jac_lower=1))] labels = hcat("KenCarp3", "KenCarp4", "KenCarp5", "ARKODE3", "ARKODE4", "ARKODE5") @time wp4 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp4, label=labels, markershape=:auto, title="IMEX methods, dense/band linsolve, medium order") ``` 1.IMEX methods (krylov linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => KenCarp3(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp4(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES))] labels = ["KenCarp3" "KenCarp4" "KenCarp5" "ARKODE3" "ARKODE4" "ARKODE5"] @time wp4 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp4, label=labels, markershape=:auto, title="IMEX methods, Krylov linsolve, medium order") ``` 2.ExpRK methods ```julia abstols = 0.1 .^ (7:11) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = hcat("ETDRK3 (caching)", "ETDRK3 (m=5)", "ETDRK4 (caching)", "ETDRK4 (m=5)", "HochOst4 (caching)", "HochOst4 (m=5)") @time wp5 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp5, label=labels, markershape=:auto, title="ExpRK methods, medium order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (7:11) reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense)), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES)), Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = hcat("KenCarp5 (dense linsolve)", "ARKODE (dense linsolve)", "KenCarp5 (Krylov linsolve)", "ARKODE (Krylov linsolve)", "ETDRK3 (m=5)", "ETDRK4 (m=5)") @time wp6 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp6, label=labels, markershape=:auto, title="Between family, medium order") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/burgers_fdm_wpd.jmd
docs
9190
--- title: Burgers FDM Work-Precision Diagrams author: HAO HAO --- ```julia using ApproxFun, OrdinaryDiffEq, Sundials using DiffEqDevTools using LinearAlgebra using Plots; gr() ``` Here is the Burgers equation using FDM. ```julia function lin_term(N, ϵ) dx = 1/(N + 1) d = -2 * ones(N) # main diagonal du = ones(N - 1) # off diagonal DiffEqArrayOperator((ϵ/dx^2) * diagm(-1 => du, 0 => d, 1 => du)) end function nl_term(N) dx = 1/(N + 1) du = ones(N - 1) # super diagonal dl = -ones(N - 1) # lower diagonal D = (-1/(4*dx)) * diagm(-1 => dl, 1 => du) tmp = zeros(N) function (du,u,p,t) @. tmp = u^2 mul!(du, D, tmp) end end # Construct the problem function burgers(N, ϵ) f1 = lin_term(N, ϵ) f2 = nl_term(N) dx = 1 / (N + 1) xs = (1:N) * dx μ0 = 0.3; σ0 = 0.05 f0 = x -> exp(-(x - μ0)^2 / (2 * σ0^2)) u0 = f0.(xs) prob = SplitODEProblem(f1, f2, u0, (0.0, 1.0)) xs, prob end; ``` Reference solution using Vern9 is below: ```julia xs, prob = burgers(512, 1e-3) sol = solve(prob, Vern9(); abstol=1e-14, reltol=1e-14) test_sol = TestSolution(sol); tslices = [0.0 0.25 0.50 0.75 1.00] ys = hcat((sol(t) for t in tslices)...) labels = ["t = $t" for t in tslices] plot(xs, ys, label=labels) ``` ## High tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(), :dts => 1e-4 * multipliers), Dict(:alg => CNLF2(), :dts => 1e-4 * multipliers), Dict(:alg => SBDF2(), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, dense linsolve, low order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(linsolve=LinSolveGMRES()), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 1e-4 * multipliers), Dict(:alg => CNLF2(linsolve=LinSolveGMRES()), :dts => 1e-4 * multipliers), Dict(:alg => SBDF2(linsolve=LinSolveGMRES()), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, Krylov linsolve, low order") ``` 2. ExpRK methods ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => NorsettEuler(), :dts => 1e-3 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=5), :dts => 1e-3 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=20), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-3 * multipliers)] #Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers)) matrix contains Inf or NaN #Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers) matrix contains Inf or NaN labels = hcat("NorsettEuler (caching)", "NorsettEuler (m=5)", "NorsettEuler (m=20)", "ETDRK2 (caching)")#, "ETDRK2 (m=5)"), "ETDRK2 (m=20)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="ExpRK methods, low order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => CNAB2(), :dts => 1e-4 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 1e-4 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-3 * multipliers)] labels = ["CNAB2 (dense linsolve)" "CNAB2 (Krylov linsolve)" "ETDRK2 (m=5)"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="Between family, low orders") ``` ## Low tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (8:13) reltols = 0.1 .^ (5:10) setups = [Dict(:alg => KenCarp3()), Dict(:alg => KenCarp4()), Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:Dense)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:Dense)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense))] labels = hcat("KenCarp3", "KenCarp4", "KenCarp5", "ARKODE3", "ARKODE4", "ARKODE5") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, dense linsolve, medium order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (8:13) reltols = 0.1 .^ (5:10) setups = [Dict(:alg => KenCarp3(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp4(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES))] labels = ["KenCarp3" "KenCarp4" "KenCarp5" "ARKODE3" "ARKODE4" "ARKODE5"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, medium order") ``` 2.ExpRK methods ```julia abstols = 0.1 .^ (7:11) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = hcat("ETDRK3 (caching)", "ETDRK3 (m=5)", "ETDRK4 (caching)", "ETDRK4 (m=5)", "HochOst4 (caching)", "HochOst4 (m=5)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="ExpRK methods, medium order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (8:13) reltols = 0.1 .^ (5:10) multipliers = 0.5 .^ (0:5) setups = [Dict(:alg => KenCarp4()), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense)), Dict(:alg => KenCarp4(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES)), Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = hcat("KenCarp4 (dense linsolve)", "ARKODE (dense linsolve)", "KenCarp4 (Krylov linsolve)", "ARKODE (Krylov linsolve)", "ETDRK3 (m=5)", "ETDRK4 (m=5)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5));#162s plot(wp, label=labels, markershape=:auto, title="Between family, medium order") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/burgers_spectral_wpd.jmd
docs
6115
--- title: Burgers Pseudospectral Methods Work-Precision Diagrams author: HAO HAO --- ```julia using ApproxFun, OrdinaryDiffEq, Sundials using DiffEqDevTools using LinearAlgebra using Plots; gr() ``` Here is the Burgers equation using Fourier spectral methods. ```julia S = Fourier() n = 512 x = points(S, n) D2 = Derivative(S,2)[1:n,1:n] D = (Derivative(S) → S)[1:n,1:n] T = ApproxFun.plan_transform(S, n) Ti = ApproxFun.plan_itransform(S, n) û₀ = T*cos.(cos.(x.-0.1)) A = 0.03*D2 tmp = similar(û₀) p = (D,D2,T,Ti,tmp,similar(tmp)) function burgers_nl(dû,û,p,t) D,D2,T,Ti,u,tmp = p mul!(tmp, D, û) mul!(u, Ti, tmp) mul!(tmp, Ti, û) @. tmp = tmp*u mul!(u, T, tmp) @. dû = - u end ``` Reference solution using Rodas5 is below: ```julia prob = SplitODEProblem(DiffEqArrayOperator(Diagonal(A)), burgers_nl, û₀, (0.0,5.0), p) sol = solve(prob, Rodas5(autodiff=false); reltol=1e-12,abstol=1e-12) test_sol = TestSolution(sol) tslices=[0.0 1.0 2.0 3.0 5.0] ys=hcat((Ti*sol(t) for t in tslices)...) labels=["t=$t" for t in tslices] plot(x,ys,label=labels) ``` ## High tolerances ```julia diag_linsolve=LinSolveFactorize(W->let tmp = tmp for i in 1:size(W, 1) tmp[i] = W[i, i] end Diagonal(tmp) end) ``` ## In-family comparisons 1.IMEX methods (diagonal linear solver) ```julia abstols = 0.1 .^ (5:8) reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(linsolve=diag_linsolve), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(linsolve=diag_linsolve), :dts => 5e-3 * multipliers), Dict(:alg => CNLF2(linsolve=diag_linsolve), :dts => 5e-3 * multipliers), Dict(:alg => SBDF2(linsolve=diag_linsolve), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp1 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true,names=labels, numruns=5,seconds=5, save_everystop=false,appxsol=test_sol,maxiters=Int(1e5)); plot(wp1,label=labels,markershape=:auto,title="IMEX methods, diagonal linsolve, low order") ``` 2. ExpRK methods ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => NorsettEuler(), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-2 * multipliers)] labels = hcat("NorsettEuler", "ETDRK2 (caching)") @time wp2 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp2, label=labels, markershape=:auto, title="ExpRK methods, low order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => CNAB2(linsolve=diag_linsolve), :dts => 5e-3 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-2 * multipliers)] labels = ["CNAB2 (diagonal linsolve)" "ETDRK2"] @time wp3 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp3, label=labels, markershape=:auto, title="Between family, low orders") ``` ## Low tolerances ## In-family comparisons 1.IMEX methods (band linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Band, jac_upper=1, jac_lower=1))] labels = hcat("ARKODE3", "ARKODE4", "ARKODE5") @time wp4 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp4, label=labels, markershape=:auto, title="IMEX methods, band linsolve, medium order") ``` 2.ExpRK methods ```julia abstols = 0.1 .^ (7:11) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(), :dts => 1e-2 * multipliers)] labels = hcat("ETDRK3 (caching)", "ETDRK4 (caching)", "HochOst4 (caching)") @time wp5 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp5, label=labels, markershape=:auto, title="ExpRK methods, medium order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (7:11) reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers)] labels = hcat("ARKODE (nondiagonal linsolve)", "ETDRK3 ()", "ETDRK4 ()") #"ARKODE (Krylov linsolve)") @time wp6 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp6, label=labels, markershape=:auto, title="Between family, medium order") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/kdv_fdm_wpd.jmd
docs
9412
--- title: KdV FDM Work-Precision Diagrams author: HAO HAO --- ```julia using ApproxFun, OrdinaryDiffEq, Sundials using DiffEqDevTools using LinearAlgebra using Plots; gr() ``` Here is the KdV equation using FDM. ```julia # Define the linear and nonlinear terms function lin_term(N) dx = 1/(N + 1) du = ones(N-1) # off diagonal du2 = -ones(N-1) # off diagonal d = (1/2) * ones(N-2) d2 = (-1/2) * ones(N-2) diag=-2 * ones(N) DiffEqArrayOperator(5e-5*(1/dx^3) * diagm(-2 => d2, -1 => du, 1 => du2, 2 => d)) end function nl_term(N) dx = 1/(N + 1) du = ones(N - 1) # super diagonal dl = -ones(N - 1) # lower diagonal D = (0.2/dx) * diagm(-1 => dl, 1 => du) tmp = zeros(N) function (du,u,p,t) @. tmp = u^2 mul!(du, D, tmp) end end # Construct the problem function kdv(N) f1 = lin_term(N) f2 = nl_term(N) dx = 1 / (N + 1) xs = (1:N) * dx μ0 = 0.3; σ0 = 0.05 f0 = x -> 0.9*exp(-(x - μ0)^2 / (2 * σ0^2)) u0 = f0.(xs) prob = SplitODEProblem(f1, f2, u0, (0.0, 1.0)) xs, prob end; ``` Reference solution using Tsit5 is below: ```julia xs, prob = kdv(200) sol = solve(prob, Tsit5(); abstol=1e-11, reltol=1e-11, dt=1e-7, adptive=false) test_sol = TestSolution(sol); tslices = [0.0 0.25 0.50 0.75 1.00] ys = hcat((sol(t) for t in tslices)...) labels = ["t = $t" for t in tslices] fn=plot(xs, ys, label=labels) ``` Linear solvers ```julia const LS_Dense = LinSolveFactorize(lu) ``` ## High tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(), :dts => 1e-5 * multipliers), Dict(:alg => CNLF2(), :dts => 1e-4 * multipliers), Dict(:alg => SBDF2(), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, dense linsolve, low order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [#Dict(:alg => IMEXEuler(linsolve=LinSolveGMRES()), :dts => 1e-5 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 1e-5 * multipliers), Dict(:alg => CNLF2(linsolve=LinSolveGMRES()), :dts => 1e-5 * multipliers), Dict(:alg => SBDF2(linsolve=LinSolveGMRES()), :dts => 1e-4 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, Krylov linsolve, low order") ``` 2. ExpRK methods ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => NorsettEuler(), :dts => 1e-6 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=5), :dts => 1e-3 * multipliers), #Dict(:alg => NorsettEuler(krylov=true, m=20), :dts => 1e-3 * multipliers), matrix contains Infs or NaNs Dict(:alg => ETDRK2(), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers)] labels = hcat("NorsettEuler (caching)", "NorsettEuler (m=5)",# "NorsettEuler (m=20)", "ETDRK2 (caching)", "ETDRK2 (m=5)"), "ETDRK2 (m=20)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="ExpRK methods, low order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => CNAB2(), :dts => 1e-5 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 1e-5 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-3 * multipliers)] labels = ["CNAB2 (dense linsolve)" "CNAB2 (Krylov linsolve)" "ETDRK2 (m=5)"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="Between family, low orders") ``` ## Low tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => KenCarp3()), Dict(:alg => KenCarp4()), Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:Dense)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:Dense)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense))] labels = hcat("KenCarp3", "KenCarp4", "KenCarp5", "ARKODE3", "ARKODE4", "ARKODE5") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, dense linsolve, medium order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => KenCarp3(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp4(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES))] labels = ["KenCarp3" "KenCarp4" "KenCarp5" "ARKODE3" "ARKODE4" "ARKODE5"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, medium order") ``` 2.ExpRK methods ```julia abstols = 0.1 .^ (7:11) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), #Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers),matrix contains Infs or NaNs Dict(:alg => ETDRK4(), :dts => 1e-3 * multipliers), #Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers),matrix contains Infs or NaNs Dict(:alg => HochOst4(), :dts => 1e-2 * multipliers)] #Dict(:alg => HochOst4(krylov=true, m=5), :dts => 1e-2 * multipliers)] matrix contains Infs or NaNs labels = hcat("ETDRK3 (caching)", "ETDRK4 (caching)",# "ETDRK3 (m=5)", "ETDRK4 (m=5)" "HochOst4 (caching)")#, "HochOst4 (m=5)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="ExpRK methods, medium order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (7:11) reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense)), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES)), Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-3 * multipliers)] labels = hcat("KenCarp5 (dense linsolve)", "ARKODE (dense linsolve)", "KenCarp5 (Krylov linsolve)", "ARKODE (Krylov linsolve)", "ETDRK3 (m=5)", "ETDRK4 (m=5)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="Between family, medium order") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/kdv_spectral_wpd.jmd
docs
5947
--- title: KdV Pseudospectral Methods Work-Precision Diagrams author: HAO HAO --- ```julia using ApproxFun, OrdinaryDiffEq, Sundials using DiffEqDevTools using LinearAlgebra using Plots; gr() ``` Here is the Burgers equation using Fourier spectral methods. ```julia S = Fourier() n = 512 x = points(S, n) D2 = Derivative(S,2)[1:n,1:n] D = (Derivative(S) → S)[1:n,1:n] T = ApproxFun.plan_transform(S, n) Ti = ApproxFun.plan_itransform(S, n) D3 = (Derivative(S,3) → S)[1:n,1:n] û₀ = T*cos.(x) δ=0.022 tmp = similar(û₀) p = (D,T,Ti,similar(tmp),tmp) function kdv(dû,û,p,t) D,T,Ti,u,tmp = p mul!(u,D,û) mul!(tmp,Ti,u) mul!(u,Ti,û) @. tmp=u*tmp mul!(u,T,tmp) @.dû = 6*u end ``` Reference solution using RadauIIA5 is below: ```julia prob = SplitODEProblem(DiffEqArrayOperator(-D3), kdv, û₀, (0.0,5.0), p) sol = solve(prob, RadauIIA5(autodiff=false); reltol=1e-14,abstol=1e-14) test_sol = TestSolution(sol) tslices=[0.0 1.0 2.0 3.0 5.0] ys=hcat((Ti*sol(t) for t in tslices)...) labels=["t=$t" for t in tslices] plot(x,ys,label=labels) ``` ## High tolerances ## In-family comparisons 1.IMEX methods (diagonal linear solver) ```julia abstols = 0.1 .^ (5:8) reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(linsolve=diag_linsolve), :dts => 1e-5 * multipliers), Dict(:alg => CNAB2(linsolve=diag_linsolve), :dts => 5e-7 * multipliers), Dict(:alg => CNLF2(linsolve=diag_linsolve), :dts => 5e-7 * multipliers), Dict(:alg => SBDF2(linsolve=diag_linsolve), :dts => 1e-5 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp1 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true,names=labels, numruns=5,seconds=5, save_everystop=false,appxsol=test_sol,maxiters=Int(1e5)); plot(wp1,label=labels,markershape=:auto,title="IMEX methods, diagonal linsolve, low order") ``` 2. ExpRK methods ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => NorsettEuler(), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-2 * multipliers)] labels = hcat("NorsettEuler", "ETDRK2 (caching)") @time wp2 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp2, label=labels, markershape=:auto, title="ExpRK methods, low order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => CNAB2(linsolve=diag_linsolve), :dts => 5e-5 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-2 * multipliers)] labels = ["CNAB2 (diagonal linsolve)" "ETDRK2"] @time wp3 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp3, label=labels, markershape=:auto, title="Between family, low orders") ``` ## Low tolerances ## In-family comparisons 1.IMEX methods (band linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Band, jac_upper=1, jac_lower=1))] labels = hcat("ARKODE3", "ARKODE4", "ARKODE5") @time wp4 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp4, label=labels, markershape=:auto, title="IMEX methods, band linsolve, medium order") ``` 2.ExpRK methods ```julia abstols = 0.1 .^ (7:11) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(), :dts => 1e-2 * multipliers)] labels = hcat("ETDRK3 (caching)", "ETDRK4 (caching)", "HochOst4 (caching)")#,"ETDRK4 (m=5)" "ETDRK3 (m=5)" "HochOst4 (m=5)") @time wp5 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp5, label=labels, markershape=:auto, title="ExpRK methods, medium order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (7:11) reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers)] labels = hcat("ARKODE (nondiagonal linsolve)", "ETDRK3 ()", "ETDRK4 ()") @time wp6 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp6, label=labels, markershape=:auto, title="Between family, medium order") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/ks_fdm_wpd.jmd
docs
9466
--- title: KS FDM Work-Precision Diagrams author: HAO HAO --- ```julia using ApproxFun, OrdinaryDiffEq, Sundials using DiffEqDevTools using LinearAlgebra using Plots; gr() ``` Here is the KS equation using FDM. ```julia # Define the linear and nonlinear terms function lin_term(N) #which is -(D2+D4) dx = 1/(N + 1) d2 = (-2) * ones(N) # main diagonal du2 = ones(N - 1) # off diagonal d4 = 6 * ones(N) # main diagonal du4 = (-4) * ones(N - 1) # off diagonal duu4 = ones(N - 2) DiffEqArrayOperator(-0.0004*((1/dx^2) * diagm(-1 => du2, 0 => d2, 1 => du2) +(1/dx^4) * diagm(-2 => duu4, -1 => du4, 0 => d4, 1 => du4, 2 => duu4))) end function nl_term(N) dx = 1/(N + 1) du = ones(N - 1) # super diagonal dl = -ones(N - 1) # lower diagonal D = (-0.2/(4*dx)) * diagm(-1 => dl, 1 => du) tmp = zeros(N) function (du,u,p,t) @. tmp = u^2 mul!(du, D, tmp) end end # Construct the problem function ks(N) f1 = lin_term(N) f2 = nl_term(N) dx = 1 / (N + 1) xs = (1:N) * dx μ0 = 0.3; σ0 = 0.05 f0 = x -> 0.6*exp(-(x - μ0)^2 / (2 * σ0^2)) u0 = f0.(xs) prob = SplitODEProblem(f1, f2, u0, (0.0, 1.0)) xs, prob end; ``` Reference solution using RadauIIA5 is below: ```julia xs, prob = ks(200) sol = solve(prob, RadauIIA5(autodiff=false); abstol=1e-14, reltol=1e-14) test_sol = TestSolution(sol); tslices = [0.0 0.25 0.50 0.75 1.] ys = hcat((sol(t) for t in tslices)...) labels = ["t = $t" for t in tslices] plot(xs, ys, label=labels) ``` Linear solvers ```julia const LS_Dense = LinSolveFactorize(lu) ``` ## High tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(), :dts => 1e-4 * multipliers), Dict(:alg => CNLF2(), :dts => 1e-4 * multipliers), Dict(:alg => SBDF2(), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, dense linsolve, low order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(linsolve=LinSolveGMRES()), :dts => 1e-9 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 1e-9 * multipliers), Dict(:alg => CNLF2(linsolve=LinSolveGMRES()), :dts => 1e-9 * multipliers), Dict(:alg => SBDF2(linsolve=LinSolveGMRES()), :dts => 1e-9 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, Krylov linsolve, low order") ``` 2. ExpRK methods ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => NorsettEuler(), :dts => 1e-3 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=5), :dts => 1e-3 * multipliers), Dict(:alg => NorsettEuler(krylov=true, m=20), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK2(krylov=true, m=20), :dts => 1e-2 * multipliers)] labels = hcat("NorsettEuler (caching)", "NorsettEuler (m=5)", "NorsettEuler (m=20)", "ETDRK2 (caching)", "ETDRK2 (m=5)", "ETDRK2 (m=20)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="ExpRK methods, low order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => CNAB2(), :dts => 1e-4 * multipliers), Dict(:alg => CNAB2(linsolve=LinSolveGMRES()), :dts => 1e-9 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-3 * multipliers)] labels = ["CNAB2 (dense linsolve)" "CNAB2 (Krylov linsolve)" "ETDRK2 (m=5)"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="Between family, low orders") ``` ## Low tolerances ## In-family comparisons 1.IMEX methods (dense linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => KenCarp3()), Dict(:alg => KenCarp4()), Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:Dense)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:Dense)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense))] labels = hcat("KenCarp3", "KenCarp4", "KenCarp5", "ARKODE3", "ARKODE4", "ARKODE5") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, dense linsolve, medium order") ``` 1.IMEX methods (Krylov linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => KenCarp3(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp4(linsolve=LinSolveGMRES())), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:GMRES)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES))] labels = ["KenCarp3" "KenCarp4" "KenCarp5" "ARKODE3" "ARKODE4" "ARKODE5"] @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="IMEX methods, medium order") ``` 2.ExpRK methods ```julia abstols = 0.1 .^ (7:11) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = hcat("ETDRK3 (caching)", "ETDRK3 (m=5)", "ETDRK4 (caching)", "ETDRK4 (m=5)", "HochOst4 (caching)", "HochOst4 (m=5)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="ExpRK methods, medium order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (7:11) reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => KenCarp5()), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Dense)), Dict(:alg => KenCarp5(linsolve=LinSolveGMRES())), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:GMRES)), Dict(:alg => ETDRK3(krylov=true, m=5), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(krylov=true, m=5), :dts => 1e-2 * multipliers)] labels = hcat("KenCarp5 (dense linsolve)", "ARKODE (dense linsolve)", "KenCarp5 (Krylov linsolve)", "ARKODE (Krylov linsolve)", "ETDRK3 (m=5)", "ETDRK4 (m=5)") @time wp = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp, label=labels, markershape=:auto, title="Between family, medium order") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MOLPDE/ks_spectral_wpd.jmd
docs
6154
--- title: KS Pseudospectral Methods Work-Precision Diagrams author: HAO HAO --- ```julia using ApproxFun, OrdinaryDiffEq, Sundials using DiffEqDevTools using LinearAlgebra using Plots; gr() ``` Here is the kuramoto_sivashinsky equation using Fourier spectral methods. ```julia S = Fourier() n = 512 x = points(S, n) D2 = Derivative(S,2)[1:n,1:n] D = (Derivative(S) → S)[1:n,1:n] T = ApproxFun.plan_transform(S, n) Ti = ApproxFun.plan_itransform(S, n) D4 = Derivative(S,4)[1:n,1:n] û₀ = T*(cos.(x./16).*(1 .+ sin.(x./2.04))) tmp=similar(û₀) q = (D,T,Ti,tmp,similar(tmp),similar(tmp)) function kuramoto_sivashinsky(dû,û,q,t) D,T,Ti,tmp,u,uc = q mul!(u, D, û) mul!(tmp, Ti, u) mul!(u, Ti, û) @. tmp=tmp*u mul!(u,T, tmp) #mul!(uc, D2, û) @. dû = - u end ``` Reference solution using Rodas5 is below: ```julia prob = SplitODEProblem(DiffEqArrayOperator(-Diagonal(D4+D2)), kuramoto_sivashinsky, û₀, (0.0,5.0), q) sol = solve(prob,RadauIIA5(autodiff=false); reltol=1e-14,abstol=1e-14) test_sol = TestSolution(sol) tslices=[0.0 1.0 2.0 3.0 5.0] ys=hcat((Ti*sol(t) for t in tslices)...) labels=["t=$t" for t in tslices] plot(x,ys,label=labels) ``` ## High tolerances ```julia diag_linsolve=LinSolveFactorize(W->let tmp = tmp for i in 1:size(W, 1) tmp[i] = W[i, i] end Diagonal(tmp) end) ``` ## In-family comparisons 1.IMEX methods (diagonal linear solver) ```julia abstols = 0.1 .^ (5:8) reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => IMEXEuler(linsolve=diag_linsolve), :dts => 1e-3 * multipliers), Dict(:alg => CNAB2(linsolve=diag_linsolve), :dts => 5e-3 * multipliers), Dict(:alg => CNLF2(linsolve=diag_linsolve), :dts => 5e-3 * multipliers), Dict(:alg => SBDF2(linsolve=diag_linsolve), :dts => 1e-3 * multipliers)] labels = ["IMEXEuler" "CNAB2" "CNLF2" "SBDF2"] @time wp1 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true,names=labels, numruns=5,seconds=5, save_everystop=false,appxsol=test_sol,maxiters=Int(1e5)); plot(wp1,label=labels,markershape=:auto,title="IMEX methods, diagonal linsolve, low order") ``` 2. ExpRK methods ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => NorsettEuler(), :dts => 1e-3 * multipliers), Dict(:alg => ETDRK2(), :dts => 1e-2 * multipliers)] labels = hcat("NorsettEuler", "ETDRK2 (caching)") @time wp2 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp2, label=labels, markershape=:auto, title="ExpRK methods, low order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (5:8) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (1:4) multipliers = 0.5 .^ (0:3) setups = [Dict(:alg => CNAB2(linsolve=diag_linsolve), :dts => 5e-3 * multipliers) Dict(:alg => ETDRK2(), :dts => 1e-2 * multipliers)] labels = ["CNAB2 (diagonal linsolve)" "ETDRK2"] @time wp3 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp3, label=labels, markershape=:auto, title="Between family, low orders") ``` ## Low tolerances ## In-family comparisons 1.IMEX methods (band linear solver) ```julia abstols = 0.1 .^ (7:13) reltols = 0.1 .^ (4:10) setups = [Dict(:alg => ARKODE(Sundials.Implicit(), order=3, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ARKODE(Sundials.Implicit(), order=4, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Band, jac_upper=1, jac_lower=1))] labels = hcat("ARKODE3", "ARKODE4", "ARKODE5") @time wp4 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp4, label=labels, markershape=:auto, title="IMEX methods, band linsolve, medium order") ``` 2.ExpRK methods ```julia abstols = 0.1 .^ (7:11) # all fixed dt methods so these don't matter much reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers), Dict(:alg => HochOst4(), :dts => 1e-2 * multipliers)] labels = hcat("ETDRK3 (caching)", "ETDRK4 (caching)", "HochOst4 (caching)") @time wp5 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp5, label=labels, markershape=:auto, title="ExpRK methods, medium order") ``` ## Between family comparisons ```julia abstols = 0.1 .^ (7:11) reltols = 0.1 .^ (4:8) multipliers = 0.5 .^ (0:4) setups = [Dict(:alg => ARKODE(Sundials.Implicit(), order=5, linear_solver=:Band, jac_upper=1, jac_lower=1)), Dict(:alg => ETDRK3(), :dts => 1e-2 * multipliers), Dict(:alg => ETDRK4(), :dts => 1e-2 * multipliers)] labels = hcat("ARKODE (nondiagonal linsolve)", "ETDRK3 ()", "ETDRK4 ()") @time wp6 = WorkPrecisionSet(prob,abstols,reltols,setups; print_names=true, names=labels, numruns=5, error_estimate=:l2, save_everystep=false, appxsol=test_sol, maxiters=Int(1e5)); plot(wp6, label=labels, markershape=:auto, title="Between family, medium order") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MethodOfLinesPDE/MOL_fdm.jmd
docs
5035
--- title: Burgers FDM Work-Precision Diagrams with Various MethodOfLines Methods author: Alex Jones --- This benchmark is for the MethodOfLines package, which is an automatic PDE discretization package. It is concerned with comparing the performance of various discretization methods for the Burgers equation. ```julia using MethodOfLines, DomainSets, OrdinaryDiffEq, ModelingToolkit, DiffEqDevTools, LinearAlgebra, LinearSolve, Plots ``` Here is the burgers equation with a Dirichlet and Neumann boundary conditions, ```julia # pdesys1 has Dirichlet BCs, pdesys2 has Neumann BCs const N = 30 @parameters x t @variables u(..) Dx = Differential(x) Dt = Differential(t) x_min = 0.0 x_max = 1.0 t_min = 0.0 t_max = 20.0 solver = FBDF() analytic_u(p, t, x) = x / (t + 1) analytic = [u(t, x) ~ analytic_u([], t, x)] eq = Dt(u(t, x)) ~ -u(t, x) * Dx(u(t, x)) bcs1 = [u(0, x) ~ x, u(t, x_min) ~ analytic_u([], t, x_min), u(t, x_max) ~ analytic_u([], t, x_max)] bcs2 = [u(0, x) ~ x, Dx(u(t, x_min)) ~ 1 / (t + 1), Dx(u(t, x_max)) ~ 1 / (t + 1)] domains = [t ∈ Interval(t_min, t_max), x ∈ Interval(x_min, x_max)] @named pdesys1 = PDESystem(eq, bcs1, domains, [t, x], [u(t, x)], analytic=analytic) @named pdesys2 = PDESystem(eq, bcs2, domains, [t, x], [u(t, x)], analytic=analytic) ``` Here is a uniform discretization with the Upwind scheme: ```julia discupwind1 = MOLFiniteDifference([x => N], t, advection_scheme=UpwindScheme()) discupwind2 = MOLFiniteDifference([x => N-1], t, advection_scheme=UpwindScheme(), grid_align=edge_align) ``` Here is a uniform discretization with the WENO scheme: ```julia discweno1 = MOLFiniteDifference([x => N], t, advection_scheme=WENOScheme()) discweno2 = MOLFiniteDifference([x => N-1], t, advection_scheme=WENOScheme(), grid_align=edge_align) ``` Here is a non-uniform discretization with the Upwind scheme, using tanh (nonuniform WENO is not implemented yet): ```julia gridnu1 = chebyspace(domains[2], N) gridnu2 = chebyspace(domains[2], N-1) discnu1 = MOLFiniteDifference([x => gridnu1], t, advection_scheme=UpwindScheme()) discnu2 = MOLFiniteDifference([x => gridnu2], t, advection_scheme=UpwindScheme(), grid_align=edge_align) ``` Here are the problems for pdesys1: ```julia probupwind1 = discretize(pdesys1, discupwind1; analytic=pdesys1.analytic_func) probupwind2 = discretize(pdesys1, discupwind2; analytic=pdesys1.analytic_func) probweno1 = discretize(pdesys1, discweno1; analytic=pdesys1.analytic_func) probweno2 = discretize(pdesys1, discweno2; analytic=pdesys1.analytic_func) probnu1 = discretize(pdesys1, discnu1; analytic=pdesys1.analytic_func) probnu2 = discretize(pdesys1, discnu2; analytic=pdesys1.analytic_func) probs1 = [probupwind1, probupwind2, probnu1, probnu2, probweno1, probweno2] ``` ## Work-Precision Plot for Burgers Equation, Dirichlet BCs ```julia dummy_appxsol = [nothing for i in 1:length(probs1)] abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg => solver, :prob_choice => 1), Dict(:alg => solver, :prob_choice => 2), Dict(:alg => solver, :prob_choice => 3), Dict(:alg => solver, :prob_choice => 4), Dict(:alg => solver, :prob_choice => 5), Dict(:alg => solver, :prob_choice => 6),] names = ["Uniform Upwind, center_align", "Uniform Upwind, edge_align", "Nonuniform Upwind, center_align", "Nonuniform Upwind, edge_align", "WENO, center_align", "WENO, edge_align"]; wp = WorkPrecisionSet(probs1, abstols, reltols, setups; names=names, save_everystep=false, appxsol = dummy_appxsol, maxiters=Int(1e5), numruns=10, wrap=Val(false)) plot(wp) ``` Here are the problems for pdesys2: ```julia probupwind1 = discretize(pdesys2, discupwind1; analytic=pdesys2.analytic_func) probupwind2 = discretize(pdesys2, discupwind2; analytic=pdesys2.analytic_func) probweno1 = discretize(pdesys2, discweno1; analytic=pdesys2.analytic_func) probweno2 = discretize(pdesys2, discweno2; analytic=pdesys2.analytic_func) probnu1 = discretize(pdesys2, discnu1; analytic=pdesys2.analytic_func) probnu2 = discretize(pdesys2, discnu2; analytic=pdesys2.analytic_func) probs2 = [probupwind1, probupwind2, probnu1, probnu2, probweno1, probweno2] ``` ## Work-Precision Plot for Burgers Equation, Neumann BCs ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg => solver, :prob_choice => 1), Dict(:alg => solver, :prob_choice => 2), Dict(:alg => solver, :prob_choice => 3), Dict(:alg => solver, :prob_choice => 4), Dict(:alg => solver, :prob_choice => 5), Dict(:alg => solver, :prob_choice => 6),] names = ["Uniform Upwind, center_align", "Uniform Upwind, edge_align", "Nonuniform Upwind, center_align", "Nonuniform Upwind, edge_align", "WENO, center_align", "WENO, edge_align"]; wp = WorkPrecisionSet(probs2, abstols, reltols, setups; names=names, save_everystep=false, maxiters=Int(1e5), numruns=10, wrap=Val(false)) plot(wp) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MultiLanguage/ode_wrapper_packages.jmd
docs
10044
--- title: ODE Solver Multi-Language Wrapper Package Work-Precision Benchmarks (MATLAB, SciPy, Julia, deSolve (R)) author: Chris Rackauckas --- The following benchmarks demonstrate the performance differences due to using similar algorithms from wrapper packages in the main scripting languages across a range of stiff and non-stiff ODEs. It takes into account solver time and error in order to ensure correctness of interpretations. These were ran with Julia 1.7, MATLAB 2019B, deSolve 1.3.0, and SciPy 1.6.1. These benchmarks are generated using the following bindings: - [MATLABDiffEq.jl](https://github.com/JuliaDiffEq/MATLABDiffEq.jl) (MATLAB) - [SciPyDiffEq.jl](https://github.com/JuliaDiffEq/SciPyDiffEq.jl) (SciPy) - [deSolveDiffEq.jl](https://github.com/JuliaDiffEq/deSolveDiffEq.jl) (deSolve) - [OrdinaryDiffEq.jl](https://github.com/JuliaDiffEq/OrdinaryDiffEq.jl) (OrdinaryDiffEq.jl) - [Sundials.jl](https://github.com/JuliaDiffEq/Sundials.jl) (Sundials) - [ODEInterfaceDiffEq.jl](https://github.com/JuliaDiffEq/ODEInterfaceDiffEq.jl) (Hairer and Netlib) The respective repos verify negligible overhead on interop (MATLAB, ODEInterface, and Sundials overhead are negligable, SciPy is accelerated 3x over SciPy+Numba setups due to the Julia JIT on the ODE function, deSolve sees a 3x overhead over the pure-R version). Error and timing is compared together to ensure the methods are solving to the same accuracy when compared. More wrappers will continue to be added as necessary. ## Setup ```julia using ParameterizedFunctions, MATLABDiffEq, OrdinaryDiffEq, ODEInterfaceDiffEq, Plots, Sundials, SciPyDiffEq, deSolveDiffEq using DiffEqDevTools using LinearAlgebra, StaticArrays ``` #### Non-Stiff Problem 1: Lotka-Volterra ```julia f = @ode_def_bare LotkaVolterra begin dx = a*x - b*x*y dy = -c*y + d*x*y end a b c d p = [1.5,1,3,1] tspan = (0.0,10.0) u0 = [1.0,1.0] prob = ODEProblem(f,u0,tspan,p) staticprob = ODEProblem{false}(f,SVector{2}(u0),tspan,SVector{4}(p)) sol = solve(prob,Vern7(),abstol=1/10^14,reltol=1/10^14) test_sol = TestSolution(sol) setups = [ Dict(:alg=>DP5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern7()) Dict(:prob_choice => 2, :alg=>DP5()) Dict(:prob_choice => 2, :alg=>Tsit5()) Dict(:prob_choice => 2, :alg=>Vern7()) Dict(:alg=>dopri5()) Dict(:alg=>MATLABDiffEq.ode45()) Dict(:alg=>MATLABDiffEq.ode113()) Dict(:alg=>SciPyDiffEq.RK45()) Dict(:alg=>SciPyDiffEq.LSODA()) Dict(:alg=>SciPyDiffEq.odeint()) Dict(:alg=>deSolveDiffEq.lsoda()) Dict(:alg=>deSolveDiffEq.ode45()) Dict(:alg=>CVODE_Adams()) ] labels = [ "Julia: DP5" "Julia: Tsit5" "Julia: Vern7" "Julia: DP5 Static" "Julia: Tsit5 Static" "Julia: Vern7 Static" "Hairer: dopri5" "MATLAB: ode45" "MATLAB: ode113" "SciPy: RK45" "SciPy: LSODA" "SciPy: odeint" "deSolve: lsoda" "deSolve: ode45" "Sundials: Adams" ] abstols = 1.0 ./ 10.0 .^ (6:13) reltols = 1.0 ./ 10.0 .^ (3:10) wp = WorkPrecisionSet([prob,staticprob],abstols,reltols,setups; names = labels,print_names = true, appxsol=[test_sol,test_sol],dense=false, save_everystep=false,numruns=100,maxiters=10000000, timeseries_errors=false,verbose=false) plot(wp,title="Non-stiff 1: Lotka-Volterra",legend=:outertopleft, color=permutedims([repeat([:LightGreen],3)...,repeat([:DarkGreen],3)..., :Red,repeat([:Orange],2)...,repeat([:Yellow],3)..., repeat([:Blue],2)...,:Purple]),size = (800,350), xticks = 10.0 .^ (-12:1:5), yticks = 10.0 .^ (-6:0.5:5), bottom_margin=5Plots.mm) ``` #### Non-Stiff Problem 2: Rigid Body ```julia f = @ode_def_bare RigidBodyBench begin dy1 = -2*y2*y3 dy2 = 1.25*y1*y3 dy3 = -0.5*y1*y2 + 0.25*sin(t)^2 end u0 = [1.0;0.0;0.9] prob = ODEProblem(f,u0,(0.0,100.0)) staticprob = ODEProblem{false}(f,SVector{3}(u0),(0.0,100.0)) sol = solve(prob,Vern7(),abstol=1/10^14,reltol=1/10^14) test_sol = TestSolution(sol) setups = [Dict(:alg=>DP5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern7()) Dict(:prob_choice => 2, :alg=>DP5()) Dict(:prob_choice => 2, :alg=>Tsit5()) Dict(:prob_choice => 2, :alg=>Vern7()) Dict(:alg=>dopri5()) Dict(:alg=>MATLABDiffEq.ode45()) Dict(:alg=>MATLABDiffEq.ode113()) Dict(:alg=>SciPyDiffEq.RK45()) Dict(:alg=>SciPyDiffEq.LSODA()) Dict(:alg=>SciPyDiffEq.odeint()) Dict(:alg=>deSolveDiffEq.lsoda()) Dict(:alg=>deSolveDiffEq.ode45()) Dict(:alg=>CVODE_Adams()) ] labels = [ "Julia: DP5" "Julia: Tsit5" "Julia: Vern7" "Julia: DP5 Static" "Julia: Tsit5 Static" "Julia: Vern7 Static" "Hairer: dopri5" "MATLAB: ode45" "MATLAB: ode113" "SciPy: RK45" "SciPy: LSODA" "SciPy: odeint" "deSolve: lsoda" "deSolve: ode45" "Sundials: Adams" ] abstols = 1.0 ./ 10.0 .^ (6:13) reltols = 1.0 ./ 10.0 .^ (3:10) wp = WorkPrecisionSet([prob,staticprob],abstols,reltols,setups; names = labels,print_names = true, appxsol=[test_sol,test_sol],dense=false, save_everystep=false,numruns=100,maxiters=10000000, timeseries_errors=false,verbose=false) plot(wp,title="Non-stiff 2: Rigid-Body",legend=:outertopleft, color=permutedims([repeat([:LightGreen],3)...,repeat([:DarkGreen],3)..., :Red,repeat([:Orange],2)...,repeat([:Yellow],3)..., repeat([:Blue],2)...,:Purple]),size = (800,350), xticks = 10.0 .^ (-12:1:5), yticks = 10.0 .^ (-6:0.5:5), bottom_margin=5Plots.mm) ``` #### Stiff Problem 1: ROBER ```julia rober = @ode_def begin dy₁ = -k₁*y₁+k₃*y₂*y₃ dy₂ = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ dy₃ = k₂*y₂^2 end k₁ k₂ k₃ u0 = [1.0,0.0,0.0] p = [0.04,3e7,1e4] prob = ODEProblem(rober,u0,(0.0,1e5),p) staticprob = ODEProblem{false}(rober,SVector{3}(u0),(0.0,1e5),SVector{3}(p)) sol = solve(prob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14) test_sol = TestSolution(sol) abstols = 1.0 ./ 10.0 .^ (7:12) reltols = 1.0 ./ 10.0 .^ (3:8); setups = [Dict(:alg=>Rosenbrock23()) Dict(:alg=>Rodas4()) Dict(:alg=>Rodas5()) Dict(:prob_choice => 2, :alg=>Rosenbrock23()) Dict(:prob_choice => 2, :alg=>Rodas4()) Dict(:prob_choice => 2, :alg=>Rodas5()) Dict(:alg=>rodas()) Dict(:alg=>radau()) Dict(:alg=>MATLABDiffEq.ode23s()) Dict(:alg=>MATLABDiffEq.ode15s()) Dict(:alg=>SciPyDiffEq.LSODA()) Dict(:alg=>SciPyDiffEq.BDF()) Dict(:alg=>SciPyDiffEq.odeint()) Dict(:alg=>deSolveDiffEq.lsoda()) Dict(:alg=>CVODE_BDF()) ] labels = [ "Julia: Rosenbrock23" "Julia: Rodas4" "Julia: Rodas5" "Julia: Rosenbrock23 Static" "Julia: Rodas4 Static" "Julia: Rodas5 Static" "Hairer: rodas" "Hairer: radau" "MATLAB: ode23s" "MATLAB: ode15s" "SciPy: LSODA" "SciPy: BDF" "SciPy: odeint" "deSolve: lsoda" "Sundials: CVODE" ] wp = WorkPrecisionSet([prob,staticprob],abstols,reltols,setups; names = labels,print_names = true, dense=false,verbose = false, save_everystep=false,appxsol=[test_sol,test_sol], maxiters=Int(1e5)) plot(wp,title="Stiff 1: ROBER", legend=:outertopleft, color=permutedims([repeat([:LightGreen],3)...,repeat([:DarkGreen],3)..., :Red,:Red,repeat([:Orange],2)...,repeat([:Yellow],3)..., repeat([:Blue],1)...,:Purple]),size = (800,350), xticks = 10.0 .^ (-12:1:5), yticks = 10.0 .^ (-6:0.5:5), bottom_margin=5Plots.mm) ``` #### Stiff Problem 2: HIRES ```julia f = @ode_def Hires begin dy1 = -1.71*y1 + 0.43*y2 + 8.32*y3 + 0.0007 dy2 = 1.71*y1 - 8.75*y2 dy3 = -10.03*y3 + 0.43*y4 + 0.035*y5 dy4 = 8.32*y2 + 1.71*y3 - 1.12*y4 dy5 = -1.745*y5 + 0.43*y6 + 0.43*y7 dy6 = -280.0*y6*y8 + 0.69*y4 + 1.71*y5 - 0.43*y6 + 0.69*y7 dy7 = 280.0*y6*y8 - 1.81*y7 dy8 = -280.0*y6*y8 + 1.81*y7 end u0 = zeros(8) u0[1] = 1 u0[8] = 0.0057 prob = ODEProblem(f,u0,(0.0,321.8122)) staticprob = ODEProblem{false}(f,SVector{8}(u0),(0.0,321.8122)) sol = solve(prob,Rodas5(),abstol=1/10^14,reltol=1/10^14) test_sol = TestSolution(sol) abstols = 1.0 ./ 10.0 .^ (5:10) reltols = 1.0 ./ 10.0 .^ (1:6); setups = [Dict(:alg=>Rosenbrock23()) Dict(:alg=>Rodas4()) Dict(:alg=>RadauIIA5()) Dict(:prob_choice => 2, :alg=>Rosenbrock23()) Dict(:prob_choice => 2, :alg=>Rodas4()) Dict(:prob_choice => 2, :alg=>RadauIIA5()) Dict(:alg=>rodas()) Dict(:alg=>radau()) Dict(:alg=>MATLABDiffEq.ode23s()) Dict(:alg=>MATLABDiffEq.ode15s()) Dict(:alg=>SciPyDiffEq.LSODA()) Dict(:alg=>SciPyDiffEq.BDF()) Dict(:alg=>SciPyDiffEq.odeint()) Dict(:alg=>deSolveDiffEq.lsoda()) Dict(:alg=>CVODE_BDF()) ] labels = [ "Julia: Rosenbrock23" "Julia: Rodas4" "Julia: radau" "Julia: Rosenbrock23 Static" "Julia: Rodas4 Static" "Julia: radau Static" "Hairer: rodas" "Hairer: radau" "MATLAB: ode23s" "MATLAB: ode15s" "SciPy: LSODA" "SciPy: BDF" "SciPy: odeint" "deSolve: lsoda" "Sundials: CVODE" ] wp = WorkPrecisionSet([prob,staticprob],abstols,reltols,setups; names = labels,print_names = true, dense=false,verbose = false, save_everystep=false,appxsol=[test_sol,test_sol], maxiters=Int(1e5),numruns=100) plot(wp,title="Stiff 2: Hires",legend=:outertopleft, color=permutedims([repeat([:LightGreen],3)...,repeat([:DarkGreen],3)..., :Red,:Red,repeat([:Orange],2)...,repeat([:Yellow],3)..., repeat([:Blue],1)...,:Purple]),size = (800,350), xticks = 10.0 .^ (-12:1:5), yticks = 10.0 .^ (-6:0.5:5), bottom_margin=5Plots.mm) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/MultiLanguage/special_benchmarks.jmd
docs
1647
--- title: Special Multi-Language Differential Equation Solver Comparison Benchmarks author: Chris Rackauckas --- The following are extra multi-language benchmarks which are harder to fit into the standard SciMLBenchmarks format. As such, they are less complete as they generally do not make use of work-precision diagrams but give information about the general status of the performance developments. - [Torchdiffeq vs DifferentialEquations.jl (/ DiffEqFlux.jl) Benchmarks](https://gist.github.com/ChrisRackauckas/cc6ac746e2dfd285c28e0584a2bfd320) - [torchdiffeq vs Julia DiffEqFlux Neural ODE Training Benchmark](https://gist.github.com/ChrisRackauckas/4a4d526c15cc4170ce37da837bfc32c4) - [torchsde vs DifferentialEquations.jl / DiffEqFlux.jl](https://gist.github.com/ChrisRackauckas/6a03e7b151c86b32d74b41af54d495c6) - [JITCODE vs SciPy vs DifferentialEquations.jl on large network dynamics](https://github.com/PIK-ICoN/NetworkDynamicsBenchmarks) - [DifferentialEquations.jl vs Mujuco and DiffTaichi](https://arxiv.org/abs/2012.06684) - [DiffEqFlux.jl / DifferentialEquations.jl vs Jax on an epidemic model](https://gist.github.com/ChrisRackauckas/62a063f23cccf3a55a4ac9f6e497739a) - [DifferentialEquations.jl vs SciPy vs NumbaLSODA on a stiff ODE](https://gist.github.com/ChrisRackauckas/fd62e005c4c86520306338b6bdae6b79) - [DifferentialEquations.jl vs SciPy vs NumbaLSODA](https://github.com/Nicholaswogan/NumbaLSODA/tree/main/benchmark) - [Brusselator Stiff Partial Differential Equation Benchmark: Julia DifferentialEquations.jl vs Python SciPy](https://gist.github.com/ChrisRackauckas/0bdbea0079a8a3ce28522e9bc8473bf0)
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NBodySimulator/acceleration_functions.jmd
docs
8002
--- title: Acceleration function benchmarks author: Sebastian Micluța-Câmpeanu, Mikhail Vaganov --- Solving the equations of notions for an N-body problem implies solving a (large) system of differential equations. In `DifferentialEquations.jl` these are represented through ODE or SDE problems. To build the problem we need a function that describe the equations. In the case of N-body problems, this function gives the accelerations for the particles in the system. Here we will test the performance of several acceleration functions used in N-body simulations. The systems that will be used are not necessarily realistic as we are not solving the problem, we just time how fast is an acceleration function call. ```julia using BenchmarkTools, NBodySimulator using NBodySimulator: gather_bodies_initial_coordinates, gather_accelerations_for_potentials, gather_simultaneous_acceleration, gather_group_accelerations using StaticArrays const SUITE = BenchmarkGroup(); function acceleration(simulation) (u0, v0, n) = gather_bodies_initial_coordinates(simulation) acceleration_functions = gather_accelerations_for_potentials(simulation) simultaneous_acceleration = gather_simultaneous_acceleration(simulation) function soode_system!(dv, v, u, p, t) @inbounds for i = 1:n a = MVector(0.0, 0.0, 0.0) for acceleration! in acceleration_functions acceleration!(a, u, v, t, i); end dv[:, i] .= a end for acceleration! in simultaneous_acceleration acceleration!(dv, u, v, t); end end return soode_system! end ``` ## Gravitational potential ```julia let SUITE=SUITE G = 6.67e-11 # m^3/kg/s^2 N = 200 # number of bodies/particles m = 1.0 # mass of each of them v = 10.0 # mean velocity L = 20.0 # size of the cell side bodies = generate_bodies_in_cell_nodes(N, m, v, L) g_parameters = GravitationalParameters(G) system = PotentialNBodySystem(bodies, Dict(:gravitational => g_parameters)) tspan = (0.0, 1.0) simulation = NBodySimulation(system, tspan) f = acceleration(simulation) u0, v0, n = gather_bodies_initial_coordinates(simulation) dv = zero(v0) b = @benchmarkable $f(dv, $v0, $u0, $g_parameters, 0.) setup=(dv=zero($v0)) evals=1 SUITE["gravitational"] = b end ``` ## Coulomb potential ```julia let SUITE=SUITE n = 200 bodies = ChargedParticle[] L = 20.0 m = 1.0 q = 1.0 count = 1 dL = L / (ceil(n^(1 / 3)) + 1) for x = dL / 2:dL:L, y = dL / 2:dL:L, z = dL / 2:dL:L if count > n break end r = SVector(x, y, z) v = SVector(.0, .0, .0) body = ChargedParticle(r, v, m, q) push!(bodies, body) count += 1 end k = 9e9 τ = 0.01 * dL / sqrt(2 * k * q * q / (dL * m)) t1 = 0.0 t2 = 1000 * τ potential = ElectrostaticParameters(k, 0.45 * L) system = PotentialNBodySystem(bodies, Dict(:electrostatic => potential)) pbc = CubicPeriodicBoundaryConditions(L) simulation = NBodySimulation(system, (t1, t2), pbc) f = acceleration(simulation) u0, v0, n = gather_bodies_initial_coordinates(simulation) dv = zero(v0) b = @benchmarkable $f(dv, $v0, $u0, $potential, 0.) setup=(dv=zero($v0)) evals=1 SUITE["coulomb"] = b end ``` ## Magnetic dipole potential ```julia let SUITE=SUITE n = 200 bodies = MagneticParticle[] L = 20.0 m = 1.0 count = 1 dL = L / (ceil(n^(1 / 3)) + 1) for x = dL / 2:dL:L, y = dL / 2:dL:L, z = dL / 2:dL:L if count > n break end r = SVector(x, y, z) v = SVector(.0, .0, .0) mm = rand(SVector{3}) body = MagneticParticle(r, v, m, mm) push!(bodies, body) count += 1 end μ_4π = 1e-7 t1 = 0.0 # s t2 = 1.0 # s τ = (t2 - t1) / 100 parameters = MagnetostaticParameters(μ_4π) system = PotentialNBodySystem(bodies, Dict(:magnetic => parameters)) simulation = NBodySimulation(system, (t1, t2)) f = acceleration(simulation) u0, v0, n = gather_bodies_initial_coordinates(simulation) dv = zero(v0) b = @benchmarkable $f(dv, $v0, $u0, $parameters, 0.) setup=(dv=zero($v0)) evals=1 SUITE["magnetic_dipole"] = b end ``` ## Lennard Jones potential ```julia let SUITE=SUITE T = 120.0 # K T0 = 90.0 # K kb = 8.3144598e-3 # kJ/(K*mol) ϵ = T * kb σ = 0.34 # nm ρ = 1374/1.6747# Da/nm^3 N = 200 m = 39.95# Da = 216 # number of bodies/particles L = (m*N/ρ)^(1/3)#10.229σ R = 0.5*L v_dev = sqrt(kb * T / m) bodies = generate_bodies_in_cell_nodes(N, m, v_dev, L) τ = 0.5e-3 # ps or 1e-12 s t1 = 0.0 t2 = 2000τ lj_parameters = LennardJonesParameters(ϵ, σ, R) lj_system = PotentialNBodySystem(bodies, Dict(:lennard_jones => lj_parameters)); pbc = CubicPeriodicBoundaryConditions(L) simulation = NBodySimulation(lj_system, (t1, t2), pbc, kb) f = acceleration(simulation) u0, v0, n = gather_bodies_initial_coordinates(simulation) dv = zero(v0) b = @benchmarkable $f(dv, $v0, $u0, $lj_parameters, 0.) setup=(dv=zero($v0)) evals=1 SUITE["lennard_jones"] = b end ``` ## WaterSPCFw model ```julia function acceleration(simulation::NBodySimulation{<:WaterSPCFw}) (u0, v0, n) = gather_bodies_initial_coordinates(simulation) (o_acelerations, h_acelerations) = gather_accelerations_for_potentials(simulation) group_accelerations = gather_group_accelerations(simulation) simultaneous_acceleration = gather_simultaneous_acceleration(simulation) function soode_system!(dv, v, u, p, t) @inbounds for i = 1:n a = MVector(0.0, 0.0, 0.0) for acceleration! in o_acelerations acceleration!(a, u, v, t, 3 * (i - 1) + 1); end dv[:, 3 * (i - 1) + 1] .= a end @inbounds for i in 1:n, j in (2, 3) a = MVector(0.0, 0.0, 0.0) for acceleration! in h_acelerations acceleration!(a, u, v, t, 3 * (i - 1) + j); end dv[:, 3 * (i - 1) + j] .= a end @inbounds for i = 1:n for acceleration! in group_accelerations acceleration!(dv, u, v, t, i); end end for acceleration! in simultaneous_acceleration acceleration!(dv, u, v, t); end end return soode_system! end let SUITE=SUITE T = 370 # K T0 = 275 # K kb = 8.3144598e-3 # kJ/(K*mol) ϵOO = 0.1554253*4.184 # kJ σOO = 0.3165492 # nm ρ = 997/1.6747# Da/nm^3 mO = 15.999 # Da mH = 1.00794 # Da mH2O = mO+2*mH N = 200 L = (mH2O*N/ρ)^(1/3) R = 0.9 # ~3*σOO Rel = 0.49*L v_dev = sqrt(kb * T /mH2O) τ = 0.5e-3 # ps t1 = 0τ t2 = 5τ # ps k_bond = 1059.162*4.184*1e2 # kJ/(mol*nm^2) k_angle = 75.90*4.184 # kJ/(mol*rad^2) rOH = 0.1012 # nm ∠HOH = 113.24*pi/180 # rad qH = 0.41 qO = -0.82 k = 138.935458 # bodies = generate_bodies_in_cell_nodes(N, mH2O, v_dev, L) jl_parameters = LennardJonesParameters(ϵOO, σOO, R) e_parameters = ElectrostaticParameters(k, Rel) spc_parameters = SPCFwParameters(rOH, ∠HOH, k_bond, k_angle) pbc = CubicPeriodicBoundaryConditions(L) water = WaterSPCFw(bodies, mH, mO, qH, qO, jl_parameters, e_parameters, spc_parameters); simulation = NBodySimulation(water, (t1, t2), pbc, kb); f = acceleration(simulation) u0, v0, n = gather_bodies_initial_coordinates(simulation) dv = zero(v0) b = @benchmarkable $f(dv, $v0, $u0, $spc_parameters, 0.) setup=(dv=zero($v0)) evals=1 SUITE["water_spcfw"] = b end ``` Here are the results of the benchmarks ```julia r = run(SUITE) minimum(r) ``` and ```julia memory(r) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NBodySimulator/liquid_argon.jmd
docs
9598
--- title: Liquid argon benchmarks author: Sebastian Micluța-Câmpeanu, Mikhail Vaganov --- The purpose of these benchmarks is to compare several integrators for use in molecular dynamics simulation. We will use a simulation of liquid argon form the examples of NBodySimulator as test case. ```julia using ProgressLogging using NBodySimulator, OrdinaryDiffEq, StaticArrays using Plots, DataFrames, StatsPlots function setup(t) T = 120.0 # K kb = 1.38e-23 # J/K ϵ = T * kb # J σ = 3.4e-10 # m ρ = 1374 # kg/m^3 m = 39.95 * 1.6747 * 1e-27 # kg N = 350 L = (m*N/ρ)^(1/3) R = 3.5σ v_dev = sqrt(kb * T / m) # m/s _L = L / σ _σ = 1.0 _ϵ = 1.0 _m = 1.0 _v = v_dev / sqrt(ϵ / m) _R = R / σ bodies = generate_bodies_in_cell_nodes(N, _m, _v, _L) lj_parameters = LennardJonesParameters(_ϵ, _σ, _R) pbc = CubicPeriodicBoundaryConditions(_L) lj_system = PotentialNBodySystem(bodies, Dict(:lennard_jones => lj_parameters)); simulation = NBodySimulation(lj_system, (0.0, t), pbc, _ϵ/T) return simulation end ``` In order to compare different integrating methods we will consider a fixed simulation time and change the timestep (or tolerances in the case of adaptive methods). ```julia function benchmark(energyerr, rts, bytes, allocs, nt, nf, t, configs) simulation = setup(t) prob = SecondOrderODEProblem(simulation) for config in configs alg = config.alg sol, rt, b, gc, memalloc = @timed solve(prob, alg(); save_everystep=false, progress=true, progress_name="$alg", config...) result = NBodySimulator.SimulationResult(sol, simulation) ΔE = total_energy(result, t) - total_energy(result, 0) energyerr[alg] = ΔE rts[alg] = rt bytes[alg] = b allocs[alg] = memalloc nt[alg] = sol.destats.naccept nf[alg] = sol.destats.nf + sol.destats.nf2 end end function run_benchmark!(results, t, integrators, tol...; c=ones(length(integrators))) @progress "Benchmark at t=$t" for τ in zip(tol...) runtime = Dict() ΔE = Dict() nt = Dict() nf = Dict() b = Dict() allocs = Dict() cfg = config(integrators, c, τ...) GC.gc() benchmark(ΔE, runtime, b, allocs, nt, nf, t, cfg) get_tol(idx) = haskey(cfg[idx], :dt) ? cfg[idx].dt : (cfg[idx].abstol, cfg[idx].rtol) for (idx,i) in enumerate(integrators) push!(results, [string(i), runtime[i], get_tol(idx)..., abs(ΔE[i]), nt[i], nf[i], c[idx]]) end end return results end ``` We will consider symplectic integrators first ```julia symplectic_integrators = [ VelocityVerlet, VerletLeapfrog, PseudoVerletLeapfrog, McAte2, CalvoSanz4, McAte5, Yoshida6, KahanLi8, SofSpa10 ]; ``` Since for each method there is a different cost for a timestep, we need to take that into account when choosing the tolerances (`dt`s or `abstol`&`reltol`) for the solvers. This cost was estimated using the commented code below and the results were hardcoded in order to prevent fluctuations in the results between runs due to differences in callibration times. The calibration is based on running a simulation with equal tolerances for all solvers and then computing the cost as the runtime / number of timesteps. The absolute value of the cost is not very relevant, so the cost was normalized to the cost of one `VelocityVerlet` step. ```julia config(integrators, c, τ) = [ (alg=a, dt=τ*cₐ) for (a,cₐ) in zip(integrators, c)] t = 35.0 τs = 1e-3 # warmup c_symplectic = ones(length(symplectic_integrators)) benchmark(Dict(), Dict(), Dict(), Dict(), Dict(), Dict(), 10., config(symplectic_integrators, c_symplectic, τs)) # results = DataFrame(:integrator=>String[], :runtime=>Float64[], :τ=>Float64[], # :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); # run_benchmark!(results, t, symplectic_integrators, τs) # c_symplectic .= results[!, :runtime] ./ results[!, :timesteps] # c_Verlet = c_symplectic[1] # c_symplectic /= c_Verlet c_symplectic = [ 1.00, # VelocityVerlet 1.05, # VerletLeapfrog 0.98, # PseudoVerletLeapfrog 1.02, # McAte2 2.38, # CalvoSanz4 2.92, # McAte5 3.74, # Yoshida6 8.44, # KahanLi8 15.76 # SofSpa10 ] ``` Let us now benchmark the solvers for a fixed simulation time and variable timestep ```julia t = 40.0 τs = 10 .^range(-4, -3, length=10) results = DataFrame(:integrator=>String[], :runtime=>Float64[], :τ=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); run_benchmark!(results, t, symplectic_integrators, τs, c=c_symplectic) ``` The energy error as a function of runtime is given by ```julia @df results plot(:EnergyError, :runtime, group=:integrator, xscale=:log10, yscale=:log10, xlabel="Energy error", ylabel="Runtime (s)") ``` Looking at the runtime as a function of timesteps, we can observe that we have a linear dependency for each method, and the slope is the previously computed cost per step. ```julia @df results plot(:timesteps, :runtime, group=:integrator, xscale=:log10, yscale=:log10, xlabel="Number of timesteps", ylabel="Runtime (s)") ``` We can also look at the energy error history ```julia function benchmark(energyerr, rts, ts, t, configs) simulation = setup(t) prob = SecondOrderODEProblem(simulation) for config in configs alg = config.alg sol, rt = @timed solve(prob, alg(); progress=true, progress_name="$alg", config...) result = NBodySimulator.SimulationResult(sol, simulation) ΔE(t) = total_energy(result, t) - total_energy(result, 0) energyerr[alg] = [ΔE(t) for t in sol.t[2:10^2:end]] rts[alg] = rt ts[alg] = sol.t[2:10^2:end] end end ΔE = Dict() rt = Dict() ts = Dict() configs = config(symplectic_integrators, c_symplectic, 2.3e-4) benchmark(ΔE, rt, ts, 40., configs) plt = plot(xlabel="Rescaled Time", ylabel="Energy error", legend=:bottomleft); for c in configs plot!(plt, ts[c.alg], abs.(ΔE[c.alg]), label="$(c.alg), $(rt[c.alg])s") end plt ``` Now, let us compare some adaptive methods ```julia adaptive_integrators=[ # Non-stiff ODE methods Tsit5, Vern7, Vern9, # DPRKN DPRKN6, DPRKN8, DPRKN12, ]; ``` Similarly to the case of symplectic methods, we will take into account the average cost per timestep in order to have a fair comparison between the solvers. ```julia config(integrators, c, at, rt) = [ (alg=a, abstol=at*2^cₐ, rtol=rt*2^cₐ) for (a,cₐ) in zip(integrators, c)] t = 35.0 ats = 10 .^range(-14, -4, length=10) rts = 10 .^range(-14, -4, length=10) # warmup c_adaptive = ones(length(adaptive_integrators)) benchmark(Dict(), Dict(), Dict(), Dict(), Dict(), Dict(), 10., config(adaptive_integrators, 1, ats[1], rts[1])) # results = DataFrame(:integrator=>String[], :runtime=>Float64[], :abstol=>Float64[], # :reltol=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); # run_benchmark!(results, t, adaptive_integrators, ats[1], rts[1]) # c_adaptive .= results[!, :runtime] ./ results[!, :timesteps] # c_adaptive /= c_Verlet c_adaptive = [ 3.55, # Tsit5, 7.84, # Vern7, 11.38, # Vern9 3.56, # DPRKN6, 5.10, # DPRKN8, 8.85 # DPRKN12, ] ``` Let us now benchmark the solvers for a fixed simulation time and variable timestep ```julia t = 40.0 results = DataFrame(:integrator=>String[], :runtime=>Float64[], :abstol=>Float64[], :reltol=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); run_benchmark!(results, t, adaptive_integrators, ats, rts, c=c_adaptive) ``` The energy error as a function of runtime is given by ```julia @df results plot(:EnergyError, :runtime, group=:integrator, xscale=:log10, yscale=:log10, xlabel="Energy error", ylabel="Runtime (s)") ``` If we consider the number of function evaluations instead, we obtain ```julia @df results plot(:EnergyError, :f_evals, group=:integrator, xscale=:log10, yscale=:log10, xlabel="Energy error", ylabel="Number of f evals") ``` We will now compare the best performing solvers ```julia t = 40.0 symplectic_integrators = [ VelocityVerlet, VerletLeapfrog, PseudoVerletLeapfrog, McAte2, CalvoSanz4 ] c_symplectic = [ 1.00, # VelocityVerlet 1.05, # VerletLeapfrog 0.98, # PseudoVerletLeapfrog 1.02, # McAte2 2.38, # CalvoSanz4 ] results1 = DataFrame(:integrator=>String[], :runtime=>Float64[], :τ=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); run_benchmark!(results1, t, symplectic_integrators, τs, c=c_symplectic) adaptive_integrators=[ DPRKN6, DPRKN8, DPRKN12, ] c_adaptive = [ 3.56, # DPRKN6, 5.10, # DPRKN8, 8.85 # DPRKN12, ] results2 = DataFrame(:integrator=>String[], :runtime=>Float64[], :abstol=>Float64[], :reltol=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); run_benchmark!(results2, t, adaptive_integrators, ats, rts, c=c_adaptive) append!(results1, results2, cols=:union) results1 ``` The energy error as a function of runtime is given by ```julia @df results1 plot(:EnergyError, :runtime, group=:integrator, xscale=:log10, yscale=:log10, xlabel="Energy error", ylabel="Runtime (s)") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NBodySimulator/liquid_argon_long.jmd
docs
7220
--- title: Liquid argon benchmarks author: Sebastian Micluța-Câmpeanu, Mikhail Vaganov --- The purpose of these benchmarks is to compare several integrators for use in molecular dynamics simulation. We will use a simulation of liquid argon form the examples of NBodySimulator as test case. ```julia using ProgressLogging using NBodySimulator, OrdinaryDiffEq, StaticArrays using Plots, DataFrames, StatsPlots function setup(t) T = 120.0 # K kb = 1.38e-23 # J/K ϵ = T * kb # J σ = 3.4e-10 # m ρ = 1374 # kg/m^3 m = 39.95 * 1.6747 * 1e-27 # kg N = 350 L = (m*N/ρ)^(1/3) R = 3.5σ v_dev = sqrt(kb * T / m) # m/s _L = L / σ _σ = 1.0 _ϵ = 1.0 _m = 1.0 _v = v_dev / sqrt(ϵ / m) _R = R / σ bodies = generate_bodies_in_cell_nodes(N, _m, _v, _L) lj_parameters = LennardJonesParameters(_ϵ, _σ, _R) pbc = CubicPeriodicBoundaryConditions(_L) lj_system = PotentialNBodySystem(bodies, Dict(:lennard_jones => lj_parameters)); simulation = NBodySimulation(lj_system, (0.0, t), pbc, _ϵ/T) return simulation end ``` In order to compare different integrating methods we will consider a fixed simulation time and change the timestep (or tolerances in the case of adaptive methods). ```julia function benchmark(energyerr, rts, bytes, allocs, nt, nf, t, configs) simulation = setup(t) prob = SecondOrderODEProblem(simulation) for config in configs alg = config.alg sol, rt, b, gc, memalloc = @timed solve(prob, alg(); save_everystep=false, progress=true, progress_name="$alg", config...) result = NBodySimulator.SimulationResult(sol, simulation) ΔE = total_energy(result, t) - total_energy(result, 0) energyerr[alg] = ΔE rts[alg] = rt bytes[alg] = b allocs[alg] = memalloc nt[alg] = sol.destats.naccept nf[alg] = sol.destats.nf + sol.destats.nf2 end end function run_benchmark!(results, t, integrators, tol...; c=ones(length(integrators))) @progress "Benchmark at t=$t" for τ in zip(tol...) runtime = Dict() ΔE = Dict() nt = Dict() nf = Dict() b = Dict() allocs = Dict() cfg = config(integrators, c, τ...) GC.gc() benchmark(ΔE, runtime, b, allocs, nt, nf, t, cfg) get_tol(idx) = haskey(cfg[idx], :dt) ? cfg[idx].dt : (cfg[idx].abstol, cfg[idx].rtol) for (idx,i) in enumerate(integrators) push!(results, [string(i), runtime[i], get_tol(idx)..., abs(ΔE[i]), nt[i], nf[i], c[idx]]) end end return results end ``` We will consider symplectic integrators first ```julia symplectic_integrators = [ VelocityVerlet, VerletLeapfrog, PseudoVerletLeapfrog, McAte2, CalvoSanz4, McAte5, Yoshida6, KahanLi8, SofSpa10 ]; ``` ```julia config(integrators, c, τ) = [ (alg=a, dt=τ*cₐ) for (a,cₐ) in zip(integrators, c)] t = 35.0 τs = 1e-3 # warmup c_symplectic = ones(length(symplectic_integrators)) benchmark(Dict(), Dict(), Dict(), Dict(), Dict(), Dict(), 10., config(symplectic_integrators, c_symplectic, τs)) # results = DataFrame(:integrator=>String[], :runtime=>Float64[], :τ=>Float64[], # :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); # run_benchmark!(results, t, symplectic_integrators, τs) # c_symplectic .= results[!, :runtime] ./ results[!, :timesteps] # c_Verlet = c_symplectic[1] # c_symplectic /= c_Verlet c_symplectic = [ 1.00, # VelocityVerlet 1.05, # VerletLeapfrog 0.98, # PseudoVerletLeapfrog 1.02, # McAte2 2.38, # CalvoSanz4 2.92, # McAte5 3.74, # Yoshida6 8.44, # KahanLi8 15.76 # SofSpa10 ] ``` We will consider a longer simulation time ```julia t = 200.0 results = DataFrame(:integrator=>String[], :runtime=>Float64[], :τ=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); run_benchmark!(results, t, symplectic_integrators, τs, c=c_symplectic) ``` The energy error as a function of runtime is given by ```julia @df results plot(:EnergyError, :runtime, group=:integrator, xscale=:log10, yscale=:log10, xlabel="Energy error", ylabel="Runtime (s)") ``` Now, let us compare some adaptive methods ```julia adaptive_integrators=[ # Non-stiff ODE methods Tsit5, Vern7, Vern9, # DPRKN DPRKN6, DPRKN8, DPRKN12, ]; ``` ```julia config(integrators, c, at, rt) = [ (alg=a, abstol=at*2^cₐ, rtol=rt*2^cₐ) for (a,cₐ) in zip(integrators, c)] t = 35.0 ats = 10 .^range(-14, -4, length=10) rts = 10 .^range(-14, -4, length=10) # warmup c_adaptive = ones(length(adaptive_integrators)) benchmark(Dict(), Dict(), Dict(), Dict(), Dict(), Dict(), 10., config(adaptive_integrators, 1, ats[1], rts[1])) # results = DataFrame(:integrator=>String[], :runtime=>Float64[], :abstol=>Float64[], # :reltol=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); # run_benchmark!(results, t, adaptive_integrators, ats[1], rts[1]) # c_adaptive .= results[!, :runtime] ./ results[!, :timesteps] # c_adaptive /= c_Verlet c_adaptive = [ 3.55, # Tsit5, 7.84, # Vern7, 11.38, # Vern9 3.56, # DPRKN6, 5.10, # DPRKN8, 8.85 # DPRKN12, ] ``` We will consider a longer simulation time ```julia t = 200.0 results = DataFrame(:integrator=>String[], :runtime=>Float64[], :abstol=>Float64[], :reltol=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); run_benchmark!(results, t, integrators, ats, rts, c=c_adaptive) ``` The energy error as a function of runtime is given by ```julia @df results plot(:EnergyError, :runtime, group=:integrator, xscale=:log10, yscale=:log10, xlabel="Energy error", ylabel="Runtime (s)") ``` We will now compare the best performing solvers ```julia t = 200.0 symplectic_integrators = [ VelocityVerlet, VerletLeapfrog, PseudoVerletLeapfrog, McAte2, CalvoSanz4 ] c_symplectic = [ 1.00, # VelocityVerlet 1.05, # VerletLeapfrog 0.98, # PseudoVerletLeapfrog 1.02, # McAte2 2.38, # CalvoSanz4 ] results1 = DataFrame(:integrator=>String[], :runtime=>Float64[], :τ=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); run_benchmark!(results1, t, symplectic_integrators, τs, c=c_symplectic) adaptive_integrators=[ DPRKN6, DPRKN8, DPRKN12, ] c_adaptive = [ 3.56, # DPRKN6, 5.10, # DPRKN8, 8.85 # DPRKN12, ] results2 = DataFrame(:integrator=>String[], :runtime=>Float64[], :abstol=>Float64[], :reltol=>Float64[], :EnergyError=>Float64[], :timesteps=>Int[], :f_evals=>Int[], :cost=>Float64[]); run_benchmark!(results2, t, adaptive_integrators, ats, rts, c=c_adaptive) append!(results1, results2, cols=:union) results1 ``` The energy error as a function of runtime is given by ```julia @df results1 plot(:EnergyError, :runtime, group=:integrator, xscale=:log10, yscale=:log10, xlabel="Energy error", ylabel="Runtime (s)") ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffDDE/Mackey_Glass_wpd.jmd
docs
6056
--- title: Mackey and Glass Work-Precision Diagrams author: David Widmann, Chris Rackauckas --- # Mackey and Glass We study algorithms for solving constant delay differential equations with a test problem from W.H. Enright and H. Hayashi, "The evaluation of numerical software for delay differential equations", 1997. It is a model of blood production that was published by M. C. Mackey and L. Glass in "Oscillation and chaos in physiological control systems", 1977, and is given by ```math \begin{equation} y'(t) = \frac{0.2y(t-14)}{1 + y(t-14)^{10}} - 0.1y(t) \end{equation} ``` ```julia using DelayDiffEq, DiffEqDevTools, Plots using DDEProblemLibrary: prob_dde_DDETST_A1 as prob gr() sol = solve(prob, MethodOfSteps(Vern9(); fpsolve = NLFunctional(; max_iter = 1000)); reltol=1e-14, abstol=1e-14) test_sol = TestSolution(sol) plot(sol) ``` ## Low order RK methods ### High tolerances First we test final error estimates of continuous RK methods of low order at high tolerances. `OwrenZen4`, `OwrenZen5`, and `RK4` yield the best error estimates. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` Next we test average interpolation errors: ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` As before, `OwrenZen4` and `OwrenZen5` perform well over the whole range of investigated tolerances. ### Low tolerances We repeat our tests with low tolerances. ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` And once again we also test the interpolation errors: ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` Apparently `Tsit5` and `DP5` perform quite well at low tolerances, but only `OwrenZen5`, `OwrenZen4` and `RK4` achieve interpolation errors of around 1e-9. ## Lazy interpolants ### High tolerances We repeat our tests with the Verner methods which, in contrast to the methods above, use lazy interpolants. As reference we include `OwrenZen4`. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(OwrenZen4()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` And we obtain the following interpolation errors: ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(OwrenZen4()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` `Vern6`, `Vern7`, and `Vern9` are outperformed by `OwrenZen4`. ### Low tolerances Again, we repeat our tests at low tolerances. ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(OwrenZen4()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(OwrenZen4()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` `Vern6`, `Vern7`, and `Vern9` show similar results at low tolerances, and perform even better than `OwrenZen4`. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffDDE/Wheldon_Kirk_Finlay_wpd.jmd
docs
6181
--- title: Wheldon, Kirk, and Finlay Work-Precision Diagrams author: David Widmann, Chris Rackauckas --- # Wheldon, Kirk, and Finlay We study algorithms for solving constant delay differential equations with a test problem from W.H. Enright and H. Hayashi, "The evaluation of numerical software for delay differential equations", 1997. It is a model of chronic granulocytic leukemia that was published by T. Wheldon, J. Kirk and H. Finlay in "Cyclical granulopoiesis in chronic granulocytic leukemia: A simulation study", 1974, and is given by ```math \begin{align} y_1'(t) &= \frac{1.1}{1 + \sqrt{10}y_1(t-20)^{5/4}} - \frac{10y_1(t)}{1 + 40y_2(t)} \\ y_2'(t) &= \frac{100y_1(t)}{1 + 40y_2(t)} - 2.43y_2(t) \end{align} ``` ```julia using DelayDiffEq, DiffEqDevTools, Plots using DDEProblemLibrary: prob_dde_DDETST_A2 as prob gr() sol = solve(prob, MethodOfSteps(Vern9(); fpsolve = NLFunctional(; max_iter = 1000)); reltol=1e-14, abstol=1e-14) test_sol = TestSolution(sol) plot(sol) ``` ## Low order RK methods ### High tolerances First we compare final errors of solutions with low order RK methods at high tolerances. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` Next we test interpolation errors: ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` Both interpolation tests and tests of final error show similar results. `BS3` does quite well but only `OwrenZen4`, `OwrenZen5`, and `RK4` achieve interpolation errors of about 1e-5. ### Low tolerances We repeat our tests at low tolerances. ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` Out of the compared methods, `Tsit5`, `DP5`, and `OwrenZen5` seem to be the best methods for this problem at low tolerances, but also `OwrenZen4` performs similarly well. `OwrenZen5` and `OwrenZen4` can even achieve interpolation errors below 1e-9. ## Lazy interpolants ### High tolerances We compare the Verner methods, which use lazy interpolants, at high tolerances. As reference we include `OwrenZen4`. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(OwrenZen4()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(OwrenZen4()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` ### Low tolerances We repeat these tests and compare the Verner methods also at low tolerances. ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(OwrenZen4()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(OwrenZen4()))] wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` It seems `Vern6` and `Vern7` are both well suited for the problem at low tolerances and outperform `OwrenZen4`, whereas at high tolerances `OwrenZen4` is more efficient. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffODE/FitzhughNagumo_wpd.jmd
docs
6267
--- title: Fitzhugh-Nagumo Work-Precision Diagrams author: Chris Rackauckas --- # Fitzhugh-Nagumo The purpose of this is to see how the errors scale on a standard nonlinear problem. ```julia using OrdinaryDiffEq, ParameterizedFunctions, ODE, ODEInterface, ODEInterfaceDiffEq, LSODA, Sundials, DiffEqDevTools, StaticArrays using Plots; gr() f = @ode_def FitzhughNagumo begin dv = v - v^3/3 -w + l dw = τinv*(v + a - b*w) end a b τinv l p = SA[0.7,0.8,1/12.5,0.5] prob = ODEProblem{true, SciMLBase.FullSpecialize}(f,[1.0;1.0],(0.0,10.0),p) probstatic = ODEProblem{false}(f,SA[1.0;1.0],(0.0,10.0),p) abstols = 1.0 ./ 10.0 .^ (6:13) reltols = 1.0 ./ 10.0 .^ (3:10); sol = solve(prob,Vern7(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(probstatic,Vern7(),abstol=1/10^14,reltol=1/10^14) probs = [prob,probstatic] test_sol = [sol,sol2]; ``` ```julia plot(sol) ``` ## Low Order ```julia setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) #fails Dict(:alg=>dopri5()) Dict(:alg=>BS5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern6()) Dict(:alg=>Tsit5(), :prob_choice => 2) Dict(:alg=>Vern6(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000) plot(wp) ``` ### Interpolation ```julia setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) # fails Dict(:alg=>BS5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern6()) Dict(:alg=>Tsit5(), :prob_choice => 2) Dict(:alg=>Vern6(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,numruns=100,maxiters=10000,error_estimate=:L2,dense_errors=true) plot(wp) ``` ## Higher Order ```julia setups = [Dict(:alg=>DP8()) Dict(:alg=>dop853()) #Dict(:alg=>ode78()) # fails Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) Dict(:alg=>Vern6(), :prob_choice => 2) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>Vern9(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000) plot(wp) ``` ```julia setups = [Dict(:alg=>DP8()) Dict(:alg=>Vern7()) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>CVODE_Adams()) Dict(:alg=>ARKODE(Sundials.Explicit(),order=6)) Dict(:alg=>lsoda()) Dict(:alg=>odex()) Dict(:alg=>ddeabm()) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000) plot(wp) ``` ### Interpolation ```julia setups = [Dict(:alg=>DP8()) #Dict(:alg=>ode78()) # fails Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) Dict(:alg=>Vern6(), :prob_choice => 2) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>Vern9(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,numruns=100,maxiters=1000,error_estimate=:L2,dense_errors=true) plot(wp) ``` ## Comparison with Non-RK methods Now let's test Tsit5 and Vern9 against parallel extrapolation methods and an Adams-Bashforth-Moulton: ```julia setups = [Dict(:alg=>Tsit5()) Dict(:alg=>Vern9()) Dict(:alg=>VCABM()) Dict(:alg=>Vern9(), :prob_choice => 2) Dict(:alg=>VCABM(), :prob_choice => 2) Dict(:alg=>AitkenNeville(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))] solnames = ["Tsit5","Vern9","VCABM","Vern9 Static","VCABM Static","AitkenNeville","Midpoint Deuflhard","Midpoint Hairer Wanner"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=9, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :romberg, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :bulirsch, threading=true))] solnames = ["Deuflhard","No threads","standard","Romberg","Bulirsch"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=15, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=7, init_order=6, threading=true))] solnames = ["1","2","3","4","5"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ## Conclusion As expected, the algorithms are all pretty matched on time for this problem. However, you can clearly see the OrdinaryDiffEq.jl algorithms solving to a much higher accuracy and still faster, especially when the interpolations are involved. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffODE/LotkaVolterra_wpd.jmd
docs
7059
--- title: Lotka-Volterra Work-Precision Diagrams author: Chris Rackauckas --- ## Lotka-Volterra The purpose of this problem is to test the performance on easy problems. Since it's periodic, the error is naturally low, and so most of the difference will come down to startup times and, when measuring the interpolations, the algorithm choices. ```julia using OrdinaryDiffEq, ParameterizedFunctions, ODE, ODEInterfaceDiffEq, LSODA, Sundials, DiffEqDevTools, StaticArrays f = @ode_def LotkaVolterra begin dx = a*x - b*x*y dy = -c*y + d*x*y end a b c d p = SA[1.5,1.0,3.0,1.0] prob = ODEProblem{true, SciMLBase.FullSpecialize}(f,[1.0;1.0],(0.0,10.0),p) probstatic = ODEProblem{false}(f,SA[1.0;1.0],(0.0,10.0),p) abstols = 1.0 ./ 10.0 .^ (6:13) reltols = 1.0 ./ 10.0 .^ (3:10); sol = solve(prob,Vern7(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(probstatic,Vern7(),abstol=1/10^14,reltol=1/10^14) probs = [prob,probstatic] test_sol = [sol,sol2]; using Plots; gr() ``` ```julia plot(sol) ``` ### Low Order ```julia setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) # fail Dict(:alg=>dopri5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern6()) Dict(:alg=>Tsit5(), :prob_choice => 2) Dict(:alg=>Vern6(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,maxiters=10000,numruns=100) plot(wp) ``` Here we see the OrdinaryDiffEq.jl algorithms once again far in the lead. ### Interpolation Error Since the problem is periodic, the real measure of error is the error throughout the solution. ```julia setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern6()) Dict(:alg=>Tsit5(), :prob_choice => 2) Dict(:alg=>Vern6(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,maxiters=10000,error_estimate=:L2,dense_errors=true,numruns=100) plot(wp) ``` Here we see the power of algorithm specific interpolations. The ODE.jl algorithm is only able to reach $10^{-7}$ error even at a tolerance of $10^{-13}$, while the DifferentialEquations.jl algorithms are below $10^{-10}$ ## Higher Order ```julia setups = [Dict(:alg=>DP8()) Dict(:alg=>dop853()) #Dict(:alg=>ode78()) # fails Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) Dict(:alg=>Vern6(), :prob_choice => 2) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>Vern9(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,maxiters=1000,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>odex()) Dict(:alg=>ddeabm()) Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) Dict(:alg=>Vern6(), :prob_choice => 2) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>Vern9(), :prob_choice => 2) Dict(:alg=>CVODE_Adams()) Dict(:alg=>lsoda()) Dict(:alg=>ARKODE(Sundials.Explicit(),order=6)) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,maxiters=1000,numruns=100) plot(wp) ``` Again we look at interpolations: ```julia setups = [Dict(:alg=>DP8()) #Dict(:alg=>ode78()) Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) Dict(:alg=>Vern6(), :prob_choice => 2) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>Vern9(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,dense=true,maxiters=1000,error_estimate=:L2,numruns=100) plot(wp) ``` Again, the ODE.jl algorithms suffer when measuring the interpolations due to relying on an order 3 Hermite polynomial instead of an algorithm-specific order matching interpolation which uses the timesteps. ## Comparison with Non-RK methods Now let's test Tsit5 and Vern9 against parallel extrapolation methods and an Adams-Bashforth-Moulton: ```julia setups = [Dict(:alg=>Tsit5()) Dict(:alg=>Vern9()) Dict(:alg=>VCABM()) Dict(:alg=>Vern9(), :prob_choice => 2) Dict(:alg=>VCABM(), :prob_choice => 2) Dict(:alg=>AitkenNeville(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))] solnames = ["Tsit5","Vern9","VCABM","Vern9 Static","VCABM Static","AitkenNeville","Midpoint Deuflhard","Midpoint Hairer Wanner"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=9, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :romberg, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :bulirsch, threading=true))] solnames = ["Deuflhard","No threads","standard","Romberg","Bulirsch"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=15, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=7, init_order=6, threading=true))] solnames = ["1","2","3","4","5"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ## Conclusion The OrdinaryDiffEq.jl are quicker and still solve to a much higher accuracy, especially when the interpolations are involved. ODE.jl errors a lot. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffODE/Pleiades_wpd.jmd
docs
6141
--- title: Pleiades Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, ODE, ODEInterfaceDiffEq, LSODA, Sundials, DiffEqDevTools f = (du,u,p,t) -> begin @inbounds begin x = view(u,1:7) # x y = view(u,8:14) # y v = view(u,15:21) # x′ w = view(u,22:28) # y′ du[1:7] .= v du[8:14].= w for i in 15:28 du[i] = zero(u[1]) end for i=1:7,j=1:7 if i != j r = ((x[i]-x[j])^2 + (y[i] - y[j])^2)^(3/2) du[14+i] += j*(x[j] - x[i])/r du[21+i] += j*(y[j] - y[i])/r end end end end prob = ODEProblem{true, SciMLBase.FullSpecialize}(f,[3.0,3.0,-1.0,-3.0,2.0,-2.0,2.0,3.0,-3.0,2.0,0,0,-4.0,4.0,0,0,0,0,0,1.75,-1.5,0,0,0,-1.25,1,0,0],(0.0,3.0)) abstols = 1.0 ./ 10.0 .^ (6:9) reltols = 1.0 ./ 10.0 .^ (3:6); using Plots; gr() ``` ```julia sol = solve(prob,Vern8(),abstol=1/10^12,reltol=1/10^10,maxiters=1000000) test_sol = TestSolution(sol); plot(sol) ``` ## Low Order ODE.jl had to be discarded. The error estimate is off since it throws errors and aborts and so that artificially lowers the error the the time is serverly diminished. ```julia #setups = [Dict(:alg=>ode45())] #wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=10000) #plot(wp) ``` ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>dopri5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern6()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=10000) plot(wp) ``` ### Interpolation ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern6()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,numruns=100,maxiters=10000,error_estimate=:L2,dense_errors=true) plot(wp) ``` ## Higher Order Once again ODE.jl had to be discarded since it errors. ```julia #setups = [Dict(:alg=>ode78())] #wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000) #plot(wp) ``` ```julia setups = [Dict(:alg=>DP8()) Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) Dict(:alg=>dop853()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000) plot(wp) ``` ```julia setups = [Dict(:alg=>odex()) Dict(:alg=>Vern7()) Dict(:alg=>CVODE_Adams()) Dict(:alg=>lsoda()) Dict(:alg=>Vern6()) Dict(:alg=>Tsit5()) Dict(:alg=>ddeabm()) Dict(:alg=>ARKODE(Sundials.Explicit(),order=6)) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=20) plot(wp) ``` ### Interpolations ```julia setups = [Dict(:alg=>DP8()) Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,numruns=100,maxiters=1000,error_estimate=:L2,dense_errors=true) plot(wp) ``` ## Comparison with Non-RK methods Now let's test Tsit5 and Vern9 against parallel extrapolation methods and an Adams-Bashforth-Moulton: ```julia setups = [Dict(:alg=>Tsit5()) Dict(:alg=>Vern9()) Dict(:alg=>VCABM()) Dict(:alg=>AitkenNeville(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))] solnames = ["Tsit5","Vern9","VCABM","AitkenNeville","Midpoint Deuflhard","Midpoint Hairer Wanner"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=9, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :romberg, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :bulirsch, threading=true))] solnames = ["Deuflhard","No threads","standard","Romberg","Bulirsch"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=15, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=7, init_order=6, threading=true))] solnames = ["1","2","3","4","5"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ## Conclusion One big conclusion is that, once again, the ODE.jl algorithms fail to run on difficult problems. Its minimum timestep is essentially machine epsilon, and so this shows some fatal flaws in its timestepping algorithm. The OrdinaryDiffEq.jl algorithms come out as faster in each case than the ODEInterface algorithms. Overall, the Verner methods have a really good showing once again. The `CVODE_Adams` method does really well here when the tolerances are higher. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffODE/RigidBody_wpd.jmd
docs
5448
--- title: Rigid Body Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, ParameterizedFunctions, ODE, ODEInterfaceDiffEq, LSODA, Sundials, DiffEqDevTools, StaticArrays k(t) = 0.25*sin(t)^2 g = @ode_def RigidBody begin dy1 = I₁*y2*y3 dy2 = I₂*y1*y3 dy3 = I₃*y1*y2 + k(t) end I₁ I₂ I₃ p = SA[-2.0,1.25,-0.5] prob = ODEProblem{true, SciMLBase.FullSpecialize}(g,[1.0;0.0;0.9],(0.0,10.0),p) probstatic = ODEProblem{false}(g,SA[1.0;0.0;0.9],(0.0,10.0),p) abstols = 1.0 ./ 10.0 .^ (6:13) reltols = 1.0 ./ 10.0 .^ (3:10); sol = solve(prob,Vern7(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(probstatic,Vern7(),abstol=1/10^14,reltol=1/10^14) probs = [prob,probstatic] test_sol = [sol,sol2]; using Plots; gr() ``` ```julia plot(sol) ``` ```julia setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) # fails Dict(:alg=>dopri5()) Dict(:alg=>Tsit5()) Dict(:alg=>Vern6()) Dict(:alg=>Tsit5(), :prob_choice => 2) Dict(:alg=>Vern6(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=true,numruns=100,maxiters=10000) plot(wp) ``` The DifferentialEquations.jl algorithms once again pull ahead. This is the first benchmark we've ran where `ode45` doesn't fail. However, it still doesn't do as well as `Tsit5`. One reason why it does so well is that the maximum norm that ODE.jl uses (as opposed to the L2 norm of Sundials, DifferentialEquations, and ODEInterface) seems to do really well on this problem. `dopri5` does surprisingly bad in this test. ## Higher Order ```julia setups = [Dict(:alg=>DP8()) Dict(:alg=>dop853()) #Dict(:alg=>ode78()) # fails Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) Dict(:alg=>Vern6(), :prob_choice => 2) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>Vern9(), :prob_choice => 2) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000) plot(wp) ``` ```julia setups = [Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>odex()) Dict(:alg=>CVODE_Adams()) Dict(:alg=>lsoda()) Dict(:alg=>ddeabm()) Dict(:alg=>ARKODE(Sundials.Explicit(),order=6)) ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100,maxiters=1000) plot(wp) ``` ## Comparison with Non-RK methods Now let's test Tsit5 and Vern9 against parallel extrapolation methods and an Adams-Bashforth-Moulton: ```julia setups = [Dict(:alg=>Tsit5()) Dict(:alg=>Vern9()) Dict(:alg=>VCABM()) Dict(:alg=>Vern9(), :prob_choice => 2) Dict(:alg=>VCABM(), :prob_choice => 2) Dict(:alg=>AitkenNeville(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))] solnames = ["Tsit5","Vern9","VCABM","Vern9 Static","VCABM Static","AitkenNeville","Midpoint Deuflhard","Midpoint Hairer Wanner"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=9, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :romberg, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :bulirsch, threading=true))] solnames = ["Deuflhard","No threads","standard","Romberg","Bulirsch"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=15, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=7, init_order=6, threading=true))] solnames = ["1","2","3","4","5"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ### Conclusion Once again, the OrdinaryDiffEq.jl pull far ahead in terms of speed and accuracy. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffODE/ThreeBody_wpd.jmd
docs
7989
--- title: Three Body Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, ODE, ODEInterfaceDiffEq, LSODA, Sundials, DiffEqDevTools, StaticArrays using Plots; gr() ## Define the ThreeBody Problem const threebody_μ = parse(Float64,"0.012277471") const threebody_μ′ = 1 - threebody_μ function f(du,u,p,t) @inbounds begin # 1 = y₁ # 2 = y₂ # 3 = y₁' # 4 = y₂' D₁ = ((u[1]+threebody_μ)^2 + u[2]^2)^(3/2) D₂ = ((u[1]-threebody_μ′)^2 + u[2]^2)^(3/2) du[1] = u[3] du[2] = u[4] du[3] = u[1] + 2u[4] - threebody_μ′*(u[1]+threebody_μ)/D₁ - threebody_μ*(u[1]-threebody_μ′)/D₂ du[4] = u[2] - 2u[3] - threebody_μ′*u[2]/D₁ - threebody_μ*u[2]/D₂ end end function f(u,p,t) @inbounds begin # 1 = y₁ # 2 = y₂ # 3 = y₁' # 4 = y₂' D₁ = ((u[1]+threebody_μ)^2 + u[2]^2)^(3/2) D₂ = ((u[1]-threebody_μ′)^2 + u[2]^2)^(3/2) du1 = u[3] du2 = u[4] du3 = u[1] + 2u[4] - threebody_μ′*(u[1]+threebody_μ)/D₁ - threebody_μ*(u[1]-threebody_μ′)/D₂ du4 = u[2] - 2u[3] - threebody_μ′*u[2]/D₁ - threebody_μ*u[2]/D₂ end SA[du1,du2,du3,du4] end t₀ = 0.0; T = parse(Float64,"17.0652165601579625588917206249") tspan = (t₀,2T) prob = ODEProblem{true, SciMLBase.FullSpecialize}(f,[0.994, 0.0, 0.0, parse(Float64,"-2.00158510637908252240537862224")],tspan) probstatic = ODEProblem{false}(f,SA[0.994, 0.0, 0.0, parse(Float64,"-2.00158510637908252240537862224")],tspan) sol = solve(prob,Vern7(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(probstatic,Vern7(),abstol=1/10^14,reltol=1/10^14) probs = [prob,probstatic] test_sol = [sol,sol2]; abstols = 1.0 ./ 10.0 .^ (3:13); reltols = 1.0 ./ 10.0 .^ (0:10); ``` See that it's periodic in the chosen timespan: ```julia sol = solve(prob,Vern9(),abstol=1e-14,reltol=1e-14) @show sol[1] - sol[end] @show sol[end] - prob.u0; ``` This three-body problem is known to be a tough problem. Let's see how the algorithms do at standard tolerances. ### 5th Order Runge-Kutta Methods ```julia setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) #fails Dict(:alg=>BS5()) Dict(:alg=>Tsit5()) Dict(:alg=>Tsit5(), :prob_choice => 2) Dict(:alg=>dopri5())]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100) plot(wp) ``` #### Full save, but no dense ```julia setups = [Dict(:alg=>DP5(),:dense=>false) #Dict(:alg=>ode45()) # Fails Dict(:alg=>BS5(),:dense=>false) Dict(:alg=>Tsit5(),:dense=>false) Dict(:alg=>Tsit5(),:dense=>false, :prob_choice => 2) Dict(:alg=>dopri5())]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,numruns=100) plot(wp) ``` #### Dense ```julia setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) #fails Dict(:alg=>BS5()) Dict(:alg=>Tsit5()) Dict(:alg=>Tsit5(), :prob_choice => 2) Dict(:alg=>dopri5())]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,numruns=100) plot(wp) ``` In these tests we see that most of the algorithms are close,with `BS5` and `DP5` showing much better than `Tsit5`. `ode45` errors. ### Higher Order Algorithms ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>TanYam7()) Dict(:alg=>DP8()) Dict(:alg=>dop853()) Dict(:alg=>Vern6()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>Vern9()) Dict(:alg=>Vern6(), :prob_choice => 2) Dict(:alg=>Vern7(), :prob_choice => 2) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>Vern9(), :prob_choice => 2)]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,dense=false,numruns=100,verbose=false) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,numruns=100) plot(wp) ``` In this test we see `Vern7` and `Vern8` shine. ### Other Algorithms Once again we separate ODE.jl because it fails. We also separate Sundials' `CVODE_Adams` since it fails at high tolerances. ```julia #setups = [Dict(:alg=>ode78()) # Dict(:alg=>VCABM()) # Dict(:alg=>CVODE_Adams())]; #wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,dense=false,numruns=100) ``` ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>lsoda()) Dict(:alg=>Vern8()) Dict(:alg=>Vern8(), :prob_choice => 2) Dict(:alg=>ddeabm()) Dict(:alg=>odex()) Dict(:alg=>ARKODE(Sundials.Explicit(),order=6)) ]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100) plot(wp) ``` Again, on cheap function calculations the Adams methods are shown to not be efficient once the error is sufficiently small. Also, as seen in other places, the extrapolation methods do not fare as well as the Runge-Kutta methods. ## Comparison with Non-RK methods Now let's test Tsit5 and Vern9 against parallel extrapolation methods and an Adams-Bashforth-Moulton: ```julia abstols = 1.0 ./ 10.0 .^ (3:13); reltols = 1.0 ./ 10.0 .^ (0:10); setups = [Dict(:alg=>Tsit5()) Dict(:alg=>Vern9()) Dict(:alg=>Vern9(), :prob_choice => 2) Dict(:alg=>AitkenNeville(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))] solnames = ["Tsit5","Vern9","Vern9 Static","AitkenNeville","Midpoint Deuflhard","Midpoint Hairer Wanner"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=9, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :romberg, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :bulirsch, threading=true))] solnames = ["Deuflhard","No threads","standard","Romberg","Bulirsch"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=15, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=7, init_order=6, threading=true))] solnames = ["1","2","3","4","5"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;appxsol=test_sol,names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ### Conclusion As in the other tests, the OrdinaryDiffEq.jl algorithms with the Verner Efficient methods are the most efficient solvers at stringent tolerances for most of the tests, while the order 5 methods do well at cruder tolerances. ODE.jl fails to run the test problems without erroring. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffODE/linear_wpd.jmd
docs
10473
--- title: 100 Independent Linear Work-Precision Diagrams author: Chris Rackauckas --- For these tests we will solve a diagonal 100 independent linear differential equations. This will demonstrate the efficiency of the implementation of the methods for handling large systems, since the system is both large enough that array handling matters, but `f` is cheap enough that it is not simply a game of calculating `f` as few times as possible. We will be mostly looking at the efficiency of the work-horse Dormand-Prince Order 4/5 Pairs: one from DifferentialEquations.jl (`DP5`), one from ODE.jl `rk45`, one from ODEInterface (Hairer's famous `dopri5`, and one from SUNDIALS' ARKODE suite. Also included is `Tsit5`. While all other ODE programs have gone with the traditional choice of using the Dormand-Prince 4/5 pair as the default, DifferentialEquations.jl uses `Tsit5` as one of the default algorithms. It's a very new (2011) and not widely known, but the theory and the implimentation shows it's more efficient than DP5. Thus we include it just to show off how re-designing a library from the ground up in a language for rapid code and rapid development has its advantages. ## Setup ```julia using OrdinaryDiffEq, Sundials, DiffEqDevTools, Plots, ODEInterfaceDiffEq, ODE, LSODA using Random Random.seed!(123) gr() # 2D Linear ODE function f(du,u,p,t) @inbounds for i in eachindex(u) du[i] = 1.01*u[i] end end function f_analytic(u₀,p,t) u₀*exp(1.01*t) end tspan = (0.0,10.0) prob = ODEProblem(ODEFunction{true, SciMLBase.FullSpecialize}(f,analytic=f_analytic),rand(100,100),tspan) abstols = 1.0 ./ 10.0 .^ (3:13) reltols = 1.0 ./ 10.0 .^ (0:10); ``` ### Speed Baseline First a baseline. These are all testing the same Dormand-Prince order 5/4 algorithm of each package. While all the same Runge-Kutta tableau, they exhibit different behavior due to different choices of adaptive timestepping algorithms and tuning. First we will test with all extra saving features are turned off to put DifferentialEquations.jl in "speed mode". ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>ode45()) Dict(:alg=>dopri5()) Dict(:alg=>ARKODE(Sundials.Explicit(),etable=Sundials.DORMAND_PRINCE_7_4_5)) Dict(:alg=>Tsit5())] solnames = ["OrdinaryDiffEq";"ODE";"ODEInterface";"Sundials ARKODE";"OrdinaryDiffEq Tsit5"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;names=solnames,save_everystep=false,numruns=100) plot(wp) ``` ### Full Saving ```julia setups = [Dict(:alg=>DP5(),:dense=>false) Dict(:alg=>ode45(),:dense=>false) Dict(:alg=>dopri5()) # dense=false by default: no nonlinear interpolation Dict(:alg=>ARKODE(Sundials.Explicit(),etable=Sundials.DORMAND_PRINCE_7_4_5),:dense=>false) Dict(:alg=>Tsit5(),:dense=>false)] solnames = ["OrdinaryDiffEq";"ODE";"ODEInterface";"Sundials ARKODE";"OrdinaryDiffEq Tsit5"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;names=solnames,numruns=100) plot(wp) ``` ### Continuous Output Now we include continuous output. This has a large overhead because at every timepoint the matrix of rates `k` has to be deep copied. ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>ode45()) Dict(:alg=>dopri5()) Dict(:alg=>ARKODE(Sundials.Explicit(),etable=Sundials.DORMAND_PRINCE_7_4_5)) Dict(:alg=>Tsit5())] solnames = ["OrdinaryDiffEq";"ODE";"ODEInterface";"Sundials ARKODE";"OrdinaryDiffEq Tsit5"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;names=solnames,numruns=100) plot(wp) ``` ### Other Runge-Kutta Algorithms Now let's test it against a smattering of other Runge-Kutta algorithms. First we will test it with all overheads off. Let's do the Order 5 (and the 2/3 pair) algorithms: ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>BS3()) Dict(:alg=>BS5()) Dict(:alg=>Tsit5())] wp = WorkPrecisionSet(prob,abstols,reltols,setups;save_everystep=false,numruns=100) plot(wp) ``` ## Higher Order Now let's see how OrdinaryDiffEq.jl fairs with some higher order algorithms: ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>Vern6()) Dict(:alg=>TanYam7()) Dict(:alg=>Vern7()) Dict(:alg=>Vern8()) Dict(:alg=>DP8()) Dict(:alg=>Vern9())] wp = WorkPrecisionSet(prob,abstols,reltols,setups;save_everystep=false,numruns=100) plot(wp) ``` ## Higher Order With Many Packages Now we test OrdinaryDiffEq against the high order methods of the other packages: ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>Vern7()) Dict(:alg=>dop853()) Dict(:alg=>ode78()) Dict(:alg=>odex()) Dict(:alg=>lsoda()) Dict(:alg=>ddeabm()) Dict(:alg=>ARKODE(Sundials.Explicit(),order=8)) Dict(:alg=>CVODE_Adams())] wp = WorkPrecisionSet(prob,abstols,reltols,setups;save_everystep=false,numruns=100) plot(wp) ``` ## Interpolation Error Now we will look at the error using an interpolation measurement instead of at the timestepping points. Since the DifferentialEquations.jl algorithms have higher order interpolants than the ODE.jl algorithms, one would expect this would magnify the difference. First the order 4/5 comparison: ```julia setups = [Dict(:alg=>DP5()) #Dict(:alg=>ode45()) Dict(:alg=>Tsit5())] wp = WorkPrecisionSet(prob,abstols,reltols,setups;error_estimate=:L2,dense_errors=true,numruns=100) plot(wp) ``` Note that all of ODE.jl uses a 3rd order Hermite interpolation, while the DifferentialEquations algorithms interpolations which are specialized to the algorithm. For example, `DP5` and `Tsit5` both use "free" order 4 interpolations, which are both as fast as the Hermite interpolation while achieving far less error. At higher order: ```julia setups = [Dict(:alg=>DP5()) Dict(:alg=>Vern7()) #Dict(:alg=>ode78()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;error_estimate=:L2,dense_errors=true,numruns=100) plot(wp) ``` ## Comparison with Fixed Timestep RK4 Let's run the first benchmark but add some fixed timestep RK4 methods to see the difference: ```julia abstols = 1.0 ./ 10.0 .^ (3:13) reltols = 1.0 ./ 10.0 .^ (0:10); dts = [1,1/2,1/4,1/10,1/20,1/40,1/60,1/80,1/100,1/140,1/240] setups = [Dict(:alg=>DP5()) Dict(:alg=>ode45()) Dict(:alg=>dopri5()) Dict(:alg=>RK4(),:dts=>dts) Dict(:alg=>Tsit5())] solnames = ["DifferentialEquations";"ODE";"ODEInterface";"DifferentialEquations RK4";"DifferentialEquations Tsit5"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ## Comparison with Non-RK methods Now let's test Tsit5 and Vern9 against parallel extrapolation methods and an Adams-Bashforth-Moulton: ```julia setups = [Dict(:alg=>Tsit5()) Dict(:alg=>Vern9()) Dict(:alg=>VCABM()) Dict(:alg=>AitkenNeville(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true))] solnames = ["Tsit5","Vern9","VCABM","AitkenNeville","Midpoint Deuflhard","Midpoint Hairer Wanner"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointDeuflhard(min_order=1, max_order=9, init_order=9, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=false)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :romberg, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, sequence = :bulirsch, threading=true))] solnames = ["Deuflhard","No threads","standard","Romberg","Bulirsch"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia setups = [Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=11, init_order=4, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=11, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=2, max_order=15, init_order=10, threading=true)) Dict(:alg=>ExtrapolationMidpointHairerWanner(min_order=5, max_order=7, init_order=6, threading=true))] solnames = ["1","2","3","4","5"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;names=solnames, save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (12:15) reltols = 1.0 ./ 10.0 .^ (9:12) setups = [Dict(:alg=>Tsit5()) Dict(:alg=>Vern9()) Dict(:alg=>VCABM()) #Dict(:alg=>AitkenNeville(threading = OrdinaryDiffEq.PolyesterThreads())) Dict(:alg=>ExtrapolationMidpointDeuflhard(threading = OrdinaryDiffEq.PolyesterThreads())) Dict(:alg=>ExtrapolationMidpointHairerWanner(threading = OrdinaryDiffEq.PolyesterThreads())) Dict(:alg=>odex()) Dict(:alg=>dop853()) Dict(:alg=>CVODE_Adams()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups; save_everystep=false,verbose=false,numruns=100) plot(wp) ``` ## Conclusion DifferentialEquations's default choice of `Tsit5` does well for quick and easy solving at normal tolerances. However, at low tolerances the higher order algorithms are faster. In every case, the DifferentialEquations algorithms are far in the lead, many times an order of magnitude faster than the competitors. `Vern7` with its included 7th order interpolation looks to be a good workhorse for scientific computing in floating point range. These along with many other benchmarks are why these algorithms were chosen as part of the defaults. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffSDE/BasicSDEWeakWorkPrecision.jmd
docs
9367
--- title: SDE Basic Weak Work-Precision Diagrams author: Chris Rackauckas --- # SDE Basic Weak Work-Precision Diagrams In this notebook we will run some benchmarks for the weak error on some simple sample SDEs. The weak error is defined as: $$E_W = \mathbb{E}[Y_\delta(t)] - \mathbb{E}[Y(t)]$$ and is thus a measure of how close the mean of the numerical solution is to the mean of the true solution. Other moments can be measured as well, but the mean is a good stand-in for other properties. Note that convergence of the mean is calculated on a sample. Thus there's acutally two sources of error. We have not only the error between the numerical and actual results, but we also have the error of the mean to the true mean due to only taking a finite sample. Using the normal confidence interval of the mean due to the Central Limit Theorem, the error due to finite sampling is $$E_S = V[Y(t)]/\sqrt(N)$$ for $N$ being the number of samples. In practice, $$E = minimum(E_W,E_S)$$ Thus in each case, we will determine the variance of the true solution and use that to estimate the sample error, and the goal is to thus find the numerical method that achieves the sample error most efficiently. ```julia using StochasticDiffEq, DiffEqDevTools, ParameterizedFunctions, SDEProblemLibrary using Plots; gr() import SDEProblemLibrary: prob_sde_additive, prob_sde_linear, prob_sde_wave const N = 1000 ``` ### Additive Noise Problem $$dX_{t}=\left(\frac{\beta}{\sqrt{1+t}}-\frac{1}{2\left(1+t\right)}X_{t}\right)dt+\frac{\alpha\beta}{\sqrt{1+t}}dW_{t},\thinspace\thinspace\thinspace X_{0}=\frac{1}{2}$$ where $\alpha=\frac{1}{10}$ and $\beta=\frac{1}{20}$. Actual Solution: $$X_{t}=\frac{1}{\sqrt{1+t}}X_{0}+\frac{\beta}{\sqrt{1+t}}\left(t+\alpha W_{t}\right).$$ ```julia prob = prob_sde_additive reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [ Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRA1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRA1()) Dict(:alg=>SRIW1()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns_error=N, save_everystep = false, parallel_type = :none, error_estimate=:weak_final)# plot(wp) ``` ```julia sample_size = Int[10;1e2;1e3;1e4] se = get_sample_errors(prob,setups[6],numruns=sample_size, sample_error_runs = 100_000,solution_runs=100) ``` ```julia times = [wp[i].times for i in 1:length(wp)] times = [minimum(minimum(t) for t in times),maximum(maximum(t) for t in times)] plot!([se[end];se[end]],times,color=:red,linestyle=:dash,label="Sample Error: 1000",lw=3) ``` ```julia prob = prob_sde_additive reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [ Dict(:alg=>SRA1()) Dict(:alg=>SRA2()) Dict(:alg=>SRA3()) Dict(:alg=>SOSRA()) Dict(:alg=>SOSRA2()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns_error=N, save_everystep = false, maxiters = 1e7, parallel_type = :none, error_estimate=:weak_final) plot(wp) ``` ```julia sample_size = Int[10;1e2;1e3;1e4] se = get_sample_errors(prob,setups[4],numruns=sample_size, sample_error_runs = 100_000,solution_runs=100) ``` ```julia times = [wp[i].times for i in 1:length(wp)] times = [minimum(minimum(t) for t in times),maximum(maximum(t) for t in times)] plot!([se[end];se[end]],times,color=:red,linestyle=:dash,label="Sample Error: 1000",lw=3) ``` ### Scalar Noise We will use a the linear SDE (also known as the Black-Scholes equation) $$dX_{t}=\alpha X_{t}dt+\beta X_{t}dW_{t},\thinspace\thinspace\thinspace X_{0}=\frac{1}{2}$$ where $\alpha=\frac{1}{10}$ and $\beta=\frac{1}{20}$. Actual Solution: $$X_{t}=X_{0}e^{\left(\beta-\frac{\alpha^{2}}{2}\right)t+\alpha W_{t}}.$$ ```julia prob = prob_sde_linear reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()) Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns_error=N, save_everystep = false, maxiters = 1e7, parallel_type = :none, error_estimate=:weak_final) plot(wp) ``` ```julia sample_size = Int[10;1e2;1e3;1e4] se = get_sample_errors(prob,setups[1],numruns=sample_size, sample_error_runs = 100_000,solution_runs=100) ``` ```julia times = [wp[i].times for i in 1:length(wp)] times = [minimum(minimum(t) for t in times),maximum(maximum(t) for t in times)] plot!([se[end];se[end]],times,color=:red,linestyle=:dash,label="Sample Error: 1000",lw=3) ``` ```julia prob = prob_sde_linear reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2),:adaptive=>false) Dict(:alg=>SRI()) Dict(:alg=>SRIW1()) Dict(:alg=>SRIW2()) Dict(:alg=>SOSRI()) Dict(:alg=>SOSRI2()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns_error=N, save_everystep = false, maxiters = 1e7, parallel_type = :none, error_estimate=:weak_final) plot(wp) ``` ```julia sample_size = Int[10;1e2;1e3;1e4] se = get_sample_errors(prob,setups[6],numruns=sample_size, sample_error_runs = 100_000,solution_runs=100) ``` ```julia times = [wp[i].times for i in 1:length(wp)] times = [minimum(minimum(t) for t in times),maximum(maximum(t) for t in times)] plot!([se[end];se[end]],times,color=:red,linestyle=:dash,label="Sample Error: 1000",lw=3) ``` ## Scalar Wave SDE $$dX_{t}=-\left(\frac{1}{10}\right)^{2}\sin\left(X_{t}\right)\cos^{3}\left(X_{t}\right)dt+\frac{1}{10}\cos^{2}\left(X_{t}\right)dW_{t},\thinspace\thinspace\thinspace X_{0}=\frac{1}{2}$$ Actual Solution: $$X_{t}=\arctan\left(\frac{1}{10}W_{t}+\tan\left(X_{0}\right)\right).$$ ```julia prob = prob_sde_wave reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [ Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns_error=N, save_everystep = false, maxiters = 1e7, parallel_type = :none, error_estimate=:weak_final) plot(wp) ``` ```julia sample_size = Int[10;1e2;1e3;1e4] se = get_sample_errors(prob,setups[4],numruns=sample_size, sample_error_runs = 100_000,solution_runs=100) ``` ```julia times = [wp[i].times for i in 1:length(wp)] times = [minimum(minimum(t) for t in times),maximum(maximum(t) for t in times)] plot!([se[end];se[end]],times,color=:red,linestyle=:dash,label="Sample Error: 1000",lw=3) ``` ```julia prob = prob_sde_wave reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2),:adaptive=>false) Dict(:alg=>SRI()) Dict(:alg=>SRIW1()) Dict(:alg=>SRIW2()) Dict(:alg=>SOSRI()) Dict(:alg=>SOSRI2()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns_error=N, save_everystep = false, maxiters = 1e7, parallel_type = :none, error_estimate=:weak_final) plot(wp) ``` ```julia sample_size = Int[10;1e2;1e3;1e4] se = get_sample_errors(prob,setups[6],numruns=sample_size, sample_error_runs = 100_000,solution_runs=100) ``` ```julia times = [wp[i].times for i in 1:length(wp)] times = [minimum(minimum(t) for t in times),maximum(maximum(t) for t in times)] plot!([se[end];se[end]],times,color=:red,linestyle=:dash,label="Sample Error: 1000",lw=3) ``` ## Summary In the additive noise problem, the `EM` and `RKMil` algorithms are not effective at reaching the sample error. In the other two problems, the `EM` and `RKMil` algorithms are as efficient as the higher order methods at achieving the maximal weak error. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffSDE/BasicSDEWorkPrecision.jmd
docs
9117
--- title: SDE Basic Work-Precision Diagrams author: Chris Rackauckas --- # SDE Work-Precision Diagrams In this notebook we will run some simple work-precision diagrams for the SDE integrators. These problems are additive and diagonal noise SDEs which can utilize the specialized Rossler methods. These problems are very well-behaved, meaning that adaptive timestepping should not be a significant advantage (unlike more difficult and realistic problems). Thus these tests will measure both the efficiency gains of the Rossler methods along with the overhead of adaptivity. ```julia using StochasticDiffEq, Plots, DiffEqDevTools, SDEProblemLibrary import SDEProblemLibrary: prob_sde_additivesystem, prob_sde_additive, prob_sde_2Dlinear, prob_sde_linear, prob_sde_wave gr() const N = 1000 ``` In this notebook, the error that will be measured is the strong error. The strong error is defined as $$ E = \mathbb{E}[Y_\delta(t) - Y(t)] $$ where $Y_\delta$ is the numerical approximation to $Y$. This is the same as saying, for a given Wiener trajectory $W(t)$, how well does the numerical trajectory match the real trajectory? Note that this is not how well the mean or other moments match the true mean/variance/etc. (that's the weak error), this is how close the trajectory is to the true trajectory which is a stronger notion. In a sense, this is measuring convergence, rather than just convergence in distribution. ### Additive Noise Problem \begin{equation} dX_{t}=\left(\frac{\beta}{\sqrt{1+t}}-\frac{1}{2\left(1+t\right)}X_{t}\right)dt+\frac{\alpha\beta}{\sqrt{1+t}}dW_{t},\thinspace\thinspace\thinspace X_{0}=\frac{1}{2} \end{equation} where $\alpha=\frac{1}{10}$ and $\beta=\frac{1}{20}$. Actual Solution: \begin{equation} X_{t}=\frac{1}{\sqrt{1+t}}X_{0}+\frac{\beta}{\sqrt{1+t}}\left(t+\alpha W_{t}\right). \end{equation} First let's solve this using a system of SDEs, repeating this same problem 4 times. ```julia prob = prob_sde_additivesystem prob = remake(prob,tspan=(0.0,1.0)) reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()) Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRA1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRA1()) ] names = ["SRIW1","EM","RKMil","SRIW1 Fixed","SRA1 Fixed","SRA1"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,names=names,maxiters=1e7,error_estimate=:l2) plot(wp) ``` ```julia prob = prob_sde_additivesystem prob = remake(prob,tspan=(0.0,1.0)) reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [ Dict(:alg=>SRA1()) Dict(:alg=>SRA2()) Dict(:alg=>SRA3()) Dict(:alg=>SOSRA()) Dict(:alg=>SOSRA2()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,maxiters=1e7,error_estimate=:l2) plot(wp) ``` Now as a scalar SDE. ```julia prob = prob_sde_additive prob = remake(prob,tspan=(0.0,1.0)) reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()) Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRA1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRA1()) ] names = ["SRIW1","EM","RKMil","SRIW1 Fixed","SRA1 Fixed","SRA1"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,names=names,maxiters=1e7,error_estimate=:l2) plot(wp) ``` ```julia prob = prob_sde_additive prob = remake(prob,tspan=(0.0,1.0)) reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [ Dict(:alg=>SRA1()) Dict(:alg=>SRA2()) Dict(:alg=>SRA3()) Dict(:alg=>SOSRA()) Dict(:alg=>SOSRA2()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,error_estimate=:l2) plot(wp) ``` ### Diagonal Noise We will use a 4x2 matrix of indepdendent linear SDEs (also known as the Black-Scholes equation) \begin{equation} dX_{t}=\alpha X_{t}dt+\beta X_{t}dW_{t},\thinspace\thinspace\thinspace X_{0}=\frac{1}{2} \end{equation} where $\alpha=\frac{1}{10}$ and $\beta=\frac{1}{20}$. Actual Solution: \begin{equation} X_{t}=X_{0}e^{\left(\beta-\frac{\alpha^{2}}{2}\right)t+\alpha W_{t}}. \end{equation} ```julia prob = prob_sde_2Dlinear prob = remake(prob,tspan=(0.0,1.0)) reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()) Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) ] names = ["SRIW1","EM","RKMil","SRIW1 Fixed"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,names=names,maxiters=1e7,error_estimate=:l2) plot(wp) ``` ```julia prob = prob_sde_2Dlinear prob = remake(prob,tspan=(0.0,1.0)) reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2),:adaptive=>false) Dict(:alg=>SRI()) Dict(:alg=>SRIW1()) Dict(:alg=>SRIW2()) Dict(:alg=>SOSRI()) Dict(:alg=>SOSRI2()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,maxiters=1e7,error_estimate=:l2) plot(wp) ``` Now just the scalar Black-Scholes ```julia prob = prob_sde_linear prob = remake(prob,tspan=(0.0,1.0)) reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()) Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) ] names = ["SRIW1","EM","RKMil","SRIW1 Fixed"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,names=names,maxiters=1e7,error_estimate=:l2) plot(wp) ``` ```julia setups = [Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2),:adaptive=>false) Dict(:alg=>SRI()) Dict(:alg=>SRIW1()) Dict(:alg=>SRIW2()) Dict(:alg=>SOSRI()) Dict(:alg=>SOSRI2()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,maxiters=1e7,error_estimate=:l2) plot(wp) ``` Now a scalar wave SDE: \begin{equation} dX_{t}=-\left(\frac{1}{10}\right)^{2}\sin\left(X_{t}\right)\cos^{3}\left(X_{t}\right)dt+\frac{1}{10}\cos^{2}\left(X_{t}\right)dW_{t},\thinspace\thinspace\thinspace X_{0}=\frac{1}{2} \end{equation} Actual Solution: \begin{equation} X_{t}=\arctan\left(\frac{1}{10}W_{t}+\tan\left(X_{0}\right)\right). \end{equation} ```julia prob = prob_sde_wave prob = remake(prob,tspan=(0.0,1.0)) reltols = 1.0 ./ 10.0 .^ (1:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()) Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 1),:adaptive=>false) ] names = ["SRIW1","EM","RKMil","SRIW1 Fixed"] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,names=names,maxiters=1e7,error_estimate=:l2) plot(wp) ``` Note that in this last problem, the adaptivity algorithm accurately detects that the error is already low enough, and does not increase the number of steps as the tolerance drops further. ```julia setups = [Dict(:alg=>EM(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2)) Dict(:alg=>RKMil(),:dts=>1.0./5.0.^((1:length(reltols)) .+ 2),:adaptive=>false) Dict(:alg=>SRI()) Dict(:alg=>SRIW1()) Dict(:alg=>SRIW2()) Dict(:alg=>SOSRI()) Dict(:alg=>SOSRI2()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,maxiters=1e7,error_estimate=:l2) plot(wp) ``` ### Conclusion The RSwM3 adaptivity algorithm does not appear to have any significant overhead even on problems which do not necessitate adaptive timestepping. The tolerance clearly In addition, the Rossler methods are shown to be orders of magnitude more efficient and should be used whenever applicable. The Oval2 tests show that these results are only magnified as the problem difficulty increases. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/NonStiffSDE/LotkaVolterraSDE.jmd
docs
3719
--- title: SDE Lokta-Volterra Work-Precision Diagrams author: Chris Rackauckas --- ```julia using StochasticDiffEq, DiffEqDevTools, ParameterizedFunctions using Plots; gr() const N = 100 f = @ode_def LotkaVolterraTest begin dx = a*x - b*x*y dy = -c*y + d*x*y end a b c d p = [1.5,1.0,3.0,1.0] function g(du,u,p,t) du .= 0.1u end u0 = [1.0;1.0] tspan = (0.0,10.0) prob = SDEProblem(f,g,u0,tspan,p); ``` ```julia sol = solve(prob,SRIW1(),abstol=1e-4,reltol=1e-4) plot(sol) ``` ## Strong Error The starting `dt`s was chosen as the largest in the `1/4^i` which were stable. All larger `dt`s contained trajectories which would veer off to infinity. ```julia reltols = 1.0 ./ 4.0 .^ (2:4) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()) Dict(:alg=>EM(),:dts=>1.0./12.0.^((1:length(reltols)) .+ 1.5)) Dict(:alg=>RKMil(),:dts=>1.0./12.0.^((1:length(reltols)) .+ 1.5),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./4.0.^((1:length(reltols)) .+ 5),:adaptive=>false) Dict(:alg=>SRIW2()) Dict(:alg=>SOSRI()) Dict(:alg=>SOSRI2()) ] test_dt = 1/10^2 appxsol_setup = Dict(:alg=>SRIW1(),:abstol=>1e-4,:reltol=>1e-4) wp = WorkPrecisionSet(prob,abstols,reltols,setups,test_dt; maxiters = 1e7, verbose=false,save_everystep=false, parallel_type = :threads, appxsol_setup = appxsol_setup, numruns_error=N,error_estimate=:final) plot(wp) ``` ## Weak Error ```julia reltols = 1.0 ./ 4.0 .^ (2:4) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()) Dict(:alg=>EM(),:dts=>1.0./12.0.^((1:length(reltols)) .+ 1.5)) Dict(:alg=>RKMil(),:dts=>1.0./12.0.^((1:length(reltols)) .+ 1.5),:adaptive=>false) Dict(:alg=>SRIW1(),:dts=>1.0./4.0.^((1:length(reltols)) .+ 5),:adaptive=>false) Dict(:alg=>SRIW2()) Dict(:alg=>SOSRI()) Dict(:alg=>SOSRI2()) ] test_dt = 1e-2 appxsol_setup = Dict(:alg=>SRIW1(),:abstol=>1e-4,:reltol=>1e-4) wp = WorkPrecisionSet(prob,abstols,reltols,setups,test_dt; maxiters = 1e7, verbose=false,save_everystep=false, parallel_type = :none, appxsol_setup = appxsol_setup, numruns_error=N,error_estimate=:weak_final) plot(wp;legend=:topleft) ``` ```julia sample_size = Int[10;1e2;1e3] se = get_sample_errors(prob,setups[6],test_dt,numruns=sample_size, appxsol_setup = appxsol_setup, sample_error_runs = 100_000,solution_runs=20) ``` ```julia plot(wp;legend=:topleft) times = [wp[i].times for i in 1:length(wp)] times = [minimum(minimum(t) for t in times),maximum(maximum(t) for t in times)] plot!([se[end];se[end]],times,color=:orange,linestyle=:dash,label="Sample Error: 1000",lw=3) ``` ## Conclusion These results show that in both strong and weak error, the high order method is more efficient. The strong and the weak are track each other well for the methods tested on this problem, with the strong error slightly higher than the weak error. To reach the sample error for a 100 trajectories, the higher order method is around 5x faster. To reach the sampling error for 10000 trajectories, the higher order method is nearly 100x faster. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNErrorsVsTime/allen_cahn_et.jmd
docs
9061
--- title: Allen-Cahn PDE Physics-Informed Neural Network (PINN) Loss Function Error vs Time Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup Code ```julia using NeuralPDE using Integrals, IntegralsCubature, IntegralsCuba using OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots using DelimitedFiles using QuasiMonteCarlo import ModelingToolkit: Interval, infimum, supremum function allen_cahn(strategy, minimizer, maxIters) ## DECLARATIONS @parameters t x1 x2 x3 x4 @variables u(..) Dt = Differential(t) Dxx1 = Differential(x1)^2 Dxx2 = Differential(x2)^2 Dxx3 = Differential(x3)^2 Dxx4 = Differential(x4)^2 # Discretization tmax = 1.0 x1width = 1.0 x2width = 1.0 x3width = 1.0 x4width = 1.0 tMeshNum = 10 x1MeshNum = 10 x2MeshNum = 10 x3MeshNum = 10 x4MeshNum = 10 dt = tmax / tMeshNum dx1 = x1width / x1MeshNum dx2 = x2width / x2MeshNum dx3 = x3width / x3MeshNum dx4 = x4width / x4MeshNum domains = [t ∈ Interval(0.0, tmax), x1 ∈ Interval(0.0, x1width), x2 ∈ Interval(0.0, x2width), x3 ∈ Interval(0.0, x3width), x4 ∈ Interval(0.0, x4width)] ts = 0.0:dt:tmax x1s = 0.0:dx1:x1width x2s = 0.0:dx2:x2width x3s = 0.0:dx3:x3width x4s = 0.0:dx4:x4width # Operators Δu = Dxx1(u(t, x1, x2, x3, x4)) + Dxx2(u(t, x1, x2, x3, x4)) + Dxx3(u(t, x1, x2, x3, x4)) + Dxx4(u(t, x1, x2, x3, x4)) # Laplacian # Equation eq = Dt(u(t, x1, x2, x3, x4)) - Δu - u(t, x1, x2, x3, x4) + u(t, x1, x2, x3, x4) * u(t, x1, x2, x3, x4) * u(t, x1, x2, x3, x4) ~ 0 #ALLEN CAHN EQUATION initialCondition = 1 / (2 + 0.4 * (x1 * x1 + x2 * x2 + x3 * x3 + x4 * x4)) # see PNAS paper bcs = [u(0, x1, x2, x3, x4) ~ initialCondition] #from literature ## NEURAL NETWORK n = 10 #neuron number chain = Lux.Chain(Lux.Dense(5, n, tanh), Lux.Dense(n, n, tanh), Lux.Dense(n, 1)) #Neural network from OptimizationFlux library indvars = [t, x1, x2, x3, x4] #phisically independent variables depvars = [u(t, x1, x2, x3, x4)] #dependent (target) variable dim = length(domains) losses = [] error = [] times = [] dx_err = 0.2 error_strategy = GridTraining(dx_err) discretization_ = PhysicsInformedNN(chain, error_strategy) @named pde_system_ = PDESystem(eq, bcs, domains, indvars, depvars) prob_ = discretize(pde_system_, discretization_) function loss_function_(θ, p) return prob_.f.f(θ, nothing) end cb_ = function (p, l) deltaT_s = time_ns() #Start a clock when the callback begins, this will evaluate questo misurerà anche il calcolo degli uniform error ctime = time_ns() - startTime - timeCounter #This variable is the time to use for the time benchmark plot append!(times, ctime / 10^9) #Conversion nanosec to seconds append!(losses, l) loss_ = loss_function_(p, nothing) append!(error, loss_) timeCounter = timeCounter + time_ns() - deltaT_s #timeCounter sums all delays due to the callback functions of the previous iterations #if (ctime/10^9 > time) #if I exceed the limit time I stop the training # return true #Stop the minimizer and continue from line 142 #end return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) discretization = NeuralPDE.PhysicsInformedNN(chain, strategy) prob = NeuralPDE.discretize(pde_system, discretization) timeCounter = 0.0 startTime = time_ns() #Fix initial time (t=0) before starting the training res = Optimization.solve(prob, minimizer, callback=cb_, maxiters=maxIters) phi = discretization.phi params = res.minimizer # Model prediction domain = [ts, x1s, x2s, x3s, x4s] u_predict = [reshape([first(phi([t, x1, x2, x3, x4], res.minimizer)) for x1 in x1s for x2 in x2s for x3 in x3s for x4 in x4s], (length(x1s), length(x2s), length(x3s), length(x4s))) for t in ts] #matrix of model's prediction return [error, params, domain, times, losses] end ``` ```julia maxIters = [(1,1,1,1,1,1,1000),(1,1,1,1,300,300,300)] #iters for ADAM/LBFGS # maxIters = [(1,1,1,1,1,1,10),(1,1,1,3,3,3,3)] #iters for ADAM/LBFGS strategies = [NeuralPDE.QuadratureTraining(quadrature_alg = CubaCuhre(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.QuadratureTraining(quadrature_alg = HCubatureJL(), reltol = 1e-4, abstol = 1e-4, maxiters = 100, batch = 0), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLh(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLp(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.GridTraining(0.2), NeuralPDE.StochasticTraining(400 ; bcs_points= 50), NeuralPDE.QuasiRandomTraining(400 ; bcs_points= 50)] strategies_short_name = ["CubaCuhre", "HCubatureJL", "CubatureJLh", "CubatureJLp", "GridTraining", "StochasticTraining", "QuasiRandomTraining"] minimizers = [ADAM(0.005),BFGS()] minimizers_short_name = ["ADAM","BFGS"] # Run models error_res = Dict() domains = Dict() params_res = Dict() #to use same params for the next run times = Dict() losses_res = Dict() ``` ## Solve ```julia ## Convergence for min =1:length(minimizers) # minimizer for strat=1:length(strategies) # strategy # println(string(strategies_short_name[strat], " ", minimizers_short_name[min])) res = allen_cahn(strategies[strat], minimizers[min], maxIters[min][strat]) push!(error_res, string(strat,min) => res[1]) push!(params_res, string(strat,min) => res[2]) push!(domains, string(strat,min) => res[3]) push!(times, string(strat,min) => res[4]) push!(losses_res, string(strat,min) => res[5]) end end ``` ## Results ```julia print("\n Plotting error vs times") #Plotting the first strategy with the first minimizer out from the loop to initialize the canvas current_label = string(strategies_short_name[1], " + " , minimizers_short_name[1]) error = Plots.plot(times["11"], error_res["11"], yaxis=:log10, label = current_label)#, xlims = (0,10))#legend = true)#, size=(1200,700)) plot!(error, times["21"], error_res["21"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[1])) plot!(error, times["31"], error_res["31"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[1])) plot!(error, times["41"], error_res["41"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[1])) plot!(error, times["51"], error_res["51"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[1])) plot!(error, times["61"], error_res["61"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[1])) plot!(error, times["71"], error_res["71"], yaxis=:log10, label = string(strategies_short_name[7], " + " , minimizers_short_name[1])) plot!(error, times["12"], error_res["12"], yaxis=:log10, label = string(strategies_short_name[1], " + " , minimizers_short_name[2])) plot!(error, times["22"], error_res["22"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[2])) plot!(error, times["32"], error_res["32"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[2])) plot!(error, times["42"], error_res["42"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[2])) plot!(error, times["52"], error_res["52"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[2])) plot!(error, times["62"], error_res["62"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[2])) plot!(error, times["72"], error_res["72"], yaxis=:log10, title = string("Allen Cahn convergence ADAM/LBFGS"), ylabel = "log(error)",xlabel = "t", label = string(strategies_short_name[7], " + " , minimizers_short_name[2])) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNErrorsVsTime/diffusion_et.jmd
docs
7542
--- title: Diffusion PDE Physics-Informed Neural Network (PINN) Loss Function Error vs Time Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup ```julia using NeuralPDE using Integrals, IntegralsCubature, IntegralsCuba using OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots using DelimitedFiles using QuasiMonteCarlo import ModelingToolkit: Interval, infimum, supremum ``` ```julia function diffusion(strategy, minimizer, maxIters) ## DECLARATIONS @parameters x t @variables u(..) Dt = Differential(t) Dxx = Differential(x)^2 eq = Dt(u(x,t)) - Dxx(u(x,t)) ~ -exp(-t) * (sin(pi * x) - pi^2 * sin(pi * x)) bcs = [u(x,0) ~ sin(pi*x), u(-1,t) ~ 0., u(1,t) ~ 0.] domains = [x ∈ Interval(-1.0,1.0), t ∈ Interval(0.0,1.0)] dx = 0.2; dt = 0.1 xs,ts = [infimum(domain.domain):dx/10:supremum(domain.domain) for (dx,domain) in zip([dx,dt],domains)] indvars = [x,t] depvars = [u(x,t)] chain = Lux.Chain(Lux.Dense(2,10,tanh),Lux.Dense(10,10,tanh),Lux.Dense(10,1)) losses = [] error = [] times = [] dx_err = [0.2,0.1] error_strategy = GridTraining(dx_err) discretization_ = PhysicsInformedNN(chain,error_strategy) @named pde_system_ = PDESystem(eq, bcs, domains, indvars, depvars) prob_ = discretize(pde_system_, discretization_) function loss_function_(θ, p) return prob_.f.f(θ, nothing) end cb_ = function (p,l) deltaT_s = time_ns() #Start a clock when the callback begins, this will evaluate questo misurerà anche il calcolo degli uniform error ctime = time_ns() - startTime - timeCounter #This variable is the time to use for the time benchmark plot append!(times, ctime/10^9) #Conversion nanosec to seconds append!(losses, l) loss_ = loss_function_(p,nothing) append!(error, loss_) timeCounter = timeCounter + time_ns() - deltaT_s #timeCounter sums all delays due to the callback functions of the previous iterations return false end discretization = PhysicsInformedNN(chain,strategy) @named pde_system = PDESystem(eq,bcs,domains,indvars,depvars) prob = discretize(pde_system,discretization) timeCounter = 0.0 startTime = time_ns() #Fix initial time (t=0) before starting the training res = Optimization.solve(prob, minimizer; callback=cb_, maxiters=maxIters) phi = discretization.phi params = res.minimizer # Model prediction domain = [x,t] u_predict = reshape([first(phi([x,t],res.minimizer)) for x in xs for t in ts],(length(xs),length(ts))) return [error, params, domain, times, u_predict, losses] end ``` ```julia maxIters = [(5000,5000,5000,5000,5000,5000),(300,300,300,300,300,300)] #iters for ADAM/LBFGS # maxIters = [(5,5,5,5,5,5),(3,3,3,3,3,3)] #iters for ADAM/LBFGS strategies = [#NeuralPDE.QuadratureTraining(quadrature_alg = CubaCuhre(), reltol = 1e-4, abstol = 1e-3, maxiters = 10, batch = 10), NeuralPDE.QuadratureTraining(quadrature_alg = HCubatureJL(), reltol = 1e-4, abstol=1e-5, maxiters=100, batch = 0), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLh(), reltol = 1e-4, abstol=1e-5, maxiters=100), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLp(), reltol = 1e-4, abstol=1e-5, maxiters=100), NeuralPDE.GridTraining([0.2,0.1]), NeuralPDE.StochasticTraining(400 ; bcs_points= 50), NeuralPDE.QuasiRandomTraining(400 ; bcs_points= 50)] strategies_short_name = [#"CubaCuhre", "HCubatureJL", "CubatureJLh", "CubatureJLp", #"CubaVegas", #"CubaSUAVE"] "GridTraining", "StochasticTraining", "QuasiRandomTraining"] minimizers = [ADAM(0.001), #BFGS()] LBFGS()] minimizers_short_name = ["ADAM", "LBFGS"] # "BFGS"] # Run models error_res = Dict() domains = Dict() params_res = Dict() #to use same params for the next run times = Dict() prediction = Dict() losses_res = Dict() ``` ## Solve ```julia print("Starting run") ## Convergence for min =1:length(minimizers) # minimizer for strat=1:length(strategies) # strategy # println(string(strategies_short_name[strat], " ", minimizers_short_name[min])) res = diffusion(strategies[strat], minimizers[min], maxIters[min][strat]) push!(error_res, string(strat,min) => res[1]) push!(params_res, string(strat,min) => res[2]) push!(domains, string(strat,min) => res[3]) push!(times, string(strat,min) => res[4]) push!(prediction, string(strat,min) => res[5]) push!(losses_res, string(strat,min) => res[6]) end end ``` ## Results ```julia current_label = string(strategies_short_name[1], " + " , minimizers_short_name[1]) error = Plots.plot(times["11"], error_res["11"], yaxis=:log10, label = current_label)#, xlims = (0,100))#legend = true)#, size=(1200,700)) plot!(error, times["21"], error_res["21"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[1])) plot!(error, times["31"], error_res["31"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[1])) plot!(error, times["41"], error_res["41"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[1])) plot!(error, times["51"], error_res["51"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[1])) plot!(error, times["61"], error_res["61"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[1])) plot!(error, times["12"], error_res["12"], yaxis=:log10, label = string(strategies_short_name[1], " + " , minimizers_short_name[2])) plot!(error, times["22"], error_res["22"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[2])) plot!(error, times["32"], error_res["32"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[2])) plot!(error, times["42"], error_res["42"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[2])) plot!(error, times["52"], error_res["52"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[2])) plot!(error, times["62"], error_res["62"], yaxis=:log10, title = string("Diffusion convergence ADAM/LBFGS"), ylabel = "log(error)",xlabel = "t", label = string(strategies_short_name[6], " + " , minimizers_short_name[2])) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNErrorsVsTime/hamilton_jacobi_et.jmd
docs
9168
--- title: Hamilton-Jacobi PDE Physics-Informed Neural Network (PINN) Loss Function Error vs Time Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ```julia using NeuralPDE using Integrals, IntegralsCubature, IntegralsCuba using OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots using DelimitedFiles using QuasiMonteCarlo import ModelingToolkit: Interval, infimum, supremum ``` ```julia function hamilton_jacobi(strategy, minimizer, maxIters) ## DECLARATIONS @parameters t x1 x2 x3 x4 @variables u(..) Dt = Differential(t) Dx1 = Differential(x1) Dx2 = Differential(x2) Dx3 = Differential(x3) Dx4 = Differential(x4) Dxx1 = Differential(x1)^2 Dxx2 = Differential(x2)^2 Dxx3 = Differential(x3)^2 Dxx4 = Differential(x4)^2 # Discretization tmax = 1.0 x1width = 1.0 x2width = 1.0 x3width = 1.0 x4width = 1.0 tMeshNum = 10 x1MeshNum = 10 x2MeshNum = 10 x3MeshNum = 10 x4MeshNum = 10 dt = tmax / tMeshNum dx1 = x1width / x1MeshNum dx2 = x2width / x2MeshNum dx3 = x3width / x3MeshNum dx4 = x4width / x4MeshNum domains = [t ∈ Interval(0.0, tmax), x1 ∈ Interval(0.0, x1width), x2 ∈ Interval(0.0, x2width), x3 ∈ Interval(0.0, x3width), x4 ∈ Interval(0.0, x4width)] ts = 0.0:dt:tmax x1s = 0.0:dx1:x1width x2s = 0.0:dx2:x2width x3s = 0.0:dx3:x3width x4s = 0.0:dx4:x4width λ = 1.0f0 # Operators Δu = Dxx1(u(t, x1, x2, x3, x4)) + Dxx2(u(t, x1, x2, x3, x4)) + Dxx3(u(t, x1, x2, x3, x4)) + Dxx4(u(t, x1, x2, x3, x4)) # Laplacian ∇u = [Dx1(u(t, x1, x2, x3, x4)), Dx2(u(t, x1, x2, x3, x4)), Dx3(u(t, x1, x2, x3, x4)), Dx4(u(t, x1, x2, x3, x4))] # Equation eq = Dt(u(t, x1, x2, x3, x4)) + Δu - λ * sum(∇u .^ 2) ~ 0 #HAMILTON-JACOBI-BELLMAN EQUATION terminalCondition = log((1 + x1 * x1 + x2 * x2 + x3 * x3 + x4 * x4) / 2) # see PNAS paper bcs = [u(tmax, x1, x2, x3, x4) ~ terminalCondition] #PNAS paper again ## NEURAL NETWORK n = 10 #neuron number chain = Lux.Chain(Lux.Dense(5, n, tanh), Lux.Dense(n, n, tanh), Lux.Dense(n, 1)) #Neural network from OptimizationFlux library indvars = [t, x1, x2, x3, x4] #phisically independent variables depvars = [u(t, x1, x2, x3, x4)] #dependent (target) variable dim = length(domains) losses = [] error = [] times = [] dx_err = 0.2 error_strategy = GridTraining(dx_err) discretization_ = PhysicsInformedNN(chain, error_strategy) @named pde_system_ = PDESystem(eq, bcs, domains, indvars, depvars) prob_ = discretize(pde_system_, discretization_) function loss_function_(θ, p) return prob_.f.f(θ, nothing) end cb_ = function (p, l) deltaT_s = time_ns() #Start a clock when the callback begins, this will evaluate questo misurerà anche il calcolo degli uniform error ctime = time_ns() - startTime - timeCounter #This variable is the time to use for the time benchmark plot append!(times, ctime / 10^9) #Conversion nanosec to seconds append!(losses, l) loss_ = loss_function_(p, nothing) append!(error, loss_) timeCounter = timeCounter + time_ns() - deltaT_s #timeCounter sums all delays due to the callback functions of the previous iterations return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) discretization = NeuralPDE.PhysicsInformedNN(chain, strategy) prob = NeuralPDE.discretize(pde_system, discretization) timeCounter = 0.0 startTime = time_ns() #Fix initial time (t=0) before starting the training res = Optimization.solve(prob, minimizer, callback=cb_, maxiters=maxIters) phi = discretization.phi params = res.minimizer # Model prediction domain = [ts, x1s, x2s, x3s, x4s] u_predict = [reshape([first(phi([t, x1, x2, x3, x4], res.minimizer)) for x1 in x1s for x2 in x2s for x3 in x3s for x4 in x4s], (length(x1s), length(x2s), length(x3s), length(x4s))) for t in ts] #matrix of model's prediction return [error, params, domain, times, losses] end maxIters = [(1,1,1,1000,1000,1000,1000),(1,1,1,300,300,300,300)] #iters for ADAM/LBFGS # maxIters = [(1,1,1,1,1,2,2),(1,1,1,3,3,3,3)] #iters for ADAM/LBFGS strategies = [NeuralPDE.QuadratureTraining(quadrature_alg = CubaCuhre(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.QuadratureTraining(quadrature_alg = HCubatureJL(), reltol = 1e-4, abstol = 1e-4, maxiters = 100, batch = 0), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLh(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLp(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.GridTraining(0.2), NeuralPDE.StochasticTraining(400 ; bcs_points= 50), NeuralPDE.QuasiRandomTraining(400 ; bcs_points= 50)] strategies_short_name = ["CubaCuhre", "HCubatureJL", "CubatureJLh", "CubatureJLp", "GridTraining", "StochasticTraining", "QuasiRandomTraining"] minimizers = [ADAM(0.005), #BFGS()] LBFGS()] minimizers_short_name = ["ADAM", "LBFGS"] #"BFGS"] # Run models error_res = Dict() domains = Dict() params_res = Dict() #to use same params for the next run times = Dict() losses_res = Dict() ``` ## Solve ```julia print("Starting run") ## Convergence for min =1:length(minimizers) # minimizer for strat=1:length(strategies) # strategy # println(string(strategies_short_name[strat], " ", minimizers_short_name[min])) res = hamilton_jacobi(strategies[strat], minimizers[min], maxIters[min][strat]) push!(error_res, string(strat,min) => res[1]) push!(params_res, string(strat,min) => res[2]) push!(domains, string(strat,min) => res[3]) push!(times, string(strat,min) => res[4]) push!(losses_res, string(strat,min) => res[5]) end end ``` ```julia #Plotting the first strategy with the first minimizer out from the loop to initialize the canvas current_label = string(strategies_short_name[1], " + " , minimizers_short_name[1]) error = Plots.plot(times["11"], error_res["11"], yaxis=:log10, label = current_label)#, xlims = (0,10))#legend = true)#, size=(1200,700)) plot!(error, times["21"], error_res["21"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[1])) plot!(error, times["31"], error_res["31"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[1])) plot!(error, times["41"], error_res["41"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[1])) plot!(error, times["51"], error_res["51"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[1])) plot!(error, times["61"], error_res["61"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[1])) plot!(error, times["71"], error_res["71"], yaxis=:log10, label = string(strategies_short_name[7], " + " , minimizers_short_name[1])) plot!(error, times["12"], error_res["12"], yaxis=:log10, label = string(strategies_short_name[1], " + " , minimizers_short_name[2])) plot!(error, times["22"], error_res["22"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[2])) plot!(error, times["32"], error_res["32"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[2])) plot!(error, times["42"], error_res["42"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[2])) plot!(error, times["52"], error_res["52"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[2])) plot!(error, times["62"], error_res["62"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[2])) plot!(error, times["72"], error_res["72"], yaxis=:log10, title = string("Hamilton Jacobi convergence ADAM/LBFGS"), ylabel = "log(error)",xlabel = "t", label = string(strategies_short_name[7], " + " , minimizers_short_name[2])) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNErrorsVsTime/level_set_et.jmd
docs
8939
--- title: Level Set PDE Physics-Informed Neural Network (PINN) Loss Function Error vs Time Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup ```julia using NeuralPDE using Integrals, IntegralsCubature, IntegralsCuba using OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots using DelimitedFiles using QuasiMonteCarlo import ModelingToolkit: Interval, infimum, supremum ``` ```julia function level_set(strategy, minimizer, maxIters) ## DECLARATIONS @parameters t x y @variables u(..) Dt = Differential(t) Dx = Differential(x) Dy = Differential(y) # Discretization xwidth = 1.0 #ft ywidth = 1.0 tmax = 1.0 #min xScale = 1.0 yScale = 1.0 xMeshNum = 10 yMeshNum = 10 tMeshNum = 10 dx = xwidth / xMeshNum dy = ywidth / yMeshNum dt = tmax / tMeshNum domains = [t ∈ Interval(0.0, tmax), x ∈ Interval(0.0, xwidth), y ∈ Interval(0.0, ywidth)] xs = 0.0:dx:xwidth ys = 0.0:dy:ywidth ts = 0.0:dt:tmax # Definitions x0 = 0.5 y0 = 0.5 Uwind = [0.0, 2.0] #wind vector # Operators gn = (Dx(u(t, x, y))^2 + Dy(u(t, x, y))^2)^0.5 #gradient's norm ∇u = [Dx(u(t, x, y)), Dy(u(t, x, y))] n = ∇u / gn #normal versor #U = ((Uwind[1]*n[1] + Uwind[2]*n[2])^2)^0.5 #inner product between wind and normal vector R0 = 0.112471 ϕw = 0#0.156927*max((0.44*U)^0.04086,1.447799) ϕs = 0 S = R0 * (1 + ϕw + ϕs) # Equation eq = Dt(u(t, x, y)) + S * gn ~ 0 #LEVEL SET EQUATION initialCondition = (xScale * (x - x0)^2 + (yScale * (y - y0)^2))^0.5 - 0.2 #Distance from ignition bcs = [u(0, x, y) ~ initialCondition] #from literature ## NEURAL NETWORK n = 10 #neuron number chain = Lux.Chain(Lux.Dense(3, n, tanh), Lux.Dense(n, n, tanh), Lux.Dense(n, 1)) #Neural network from OptimizationFlux library indvars = [t, x, y] #phisically independent variables depvars = [u(t, x, y)] #dependent (target) variable dim = length(domains) losses = [] error = [] times = [] dx_err = 0.1 error_strategy = GridTraining(dx_err) discretization_ = PhysicsInformedNN(chain, error_strategy) @named pde_system_ = PDESystem(eq, bcs, domains, indvars, depvars) prob_ = discretize(pde_system_, discretization_) function loss_function_(θ, p) return prob_.f.f(θ, nothing) end cb_ = function (p, l) deltaT_s = time_ns() #Start a clock when the callback begins, this will evaluate questo misurerà anche il calcolo degli uniform error ctime = time_ns() - startTime - timeCounter #This variable is the time to use for the time benchmark plot append!(times, ctime / 10^9) #Conversion nanosec to seconds append!(losses, l) loss_ = loss_function_(p, nothing) append!(error, loss_) timeCounter = timeCounter + time_ns() - deltaT_s #timeCounter sums all delays due to the callback functions of the previous iterations return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) discretization = NeuralPDE.PhysicsInformedNN(chain, strategy) prob = NeuralPDE.discretize(pde_system, discretization) timeCounter = 0.0 startTime = time_ns() #Fix initial time (t=0) before starting the training res = Optimization.solve(prob, minimizer, callback=cb_, maxiters=maxIters) phi = discretization.phi params = res.minimizer # Model prediction domain = [ts, xs, ys] u_predict = [reshape([first(phi([t, x, y], res.minimizer)) for x in xs for y in ys], (length(xs), length(ys))) for t in ts] #matrix of model's prediction return [error, params, domain, times, losses] #add numeric solution end #level_set(NeuralPDE.QuadratureTraining(algorithm = CubaCuhre(), reltol = 1e-8, abstol = 1e-8, maxiters = 100), ADAM(0.01), 500) maxIters = [(1,1,1,1000,1000,1000,1000),(1,1,1,500,500,500,500)] #iters for ADAM/LBFGS # maxIters = [(1,1,1,2,2,2,2),(1,1,1,2,2,2,2)] #iters for ADAM/LBFGS strategies = [NeuralPDE.QuadratureTraining(quadrature_alg = CubaCuhre(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.QuadratureTraining(quadrature_alg = HCubatureJL(), reltol = 1e-4, abstol = 1e-4, maxiters = 100, batch = 0), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLh(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLp(), reltol = 1e-4, abstol = 1e-4, maxiters = 100), NeuralPDE.GridTraining(0.1), NeuralPDE.StochasticTraining(400 ; bcs_points= 50), NeuralPDE.QuasiRandomTraining(400 ; bcs_points= 50)] strategies_short_name = ["CubaCuhre", "HCubatureJL", "CubatureJLh", "CubatureJLp", "GridTraining", "StochasticTraining", "QuasiRandomTraining"] minimizers = [ADAM(0.005), #BFGS()] LBFGS()] minimizers_short_name = ["ADAM", "LBFGS"] # "BFGS"] # Run models prediction_res = Dict() error_res = Dict() domains = Dict() params_res = Dict() #to use same params for the next run times = Dict() losses_res = Dict() ``` ## Solve ```julia ## Convergence for min =1:length(minimizers) # minimizer for strat=1:length(strategies) # strategy # println(string(strategies_short_name[strat], " ", minimizers_short_name[min])) res = level_set(strategies[strat], minimizers[min], maxIters[min][strat]) push!(error_res, string(strat,min) => res[1]) push!(params_res, string(strat,min) => res[2]) push!(domains, string(strat,min) => res[3]) push!(times, string(strat,min) => res[4]) push!(losses_res, string(strat,min) => res[5]) end end ``` ## Results ```julia #Plotting the first strategy with the first minimizer out from the loop to initialize the canvas current_label = string(strategies_short_name[1], " + " , minimizers_short_name[1]) error = Plots.plot(times["11"], error_res["11"], yaxis=:log10, label = current_label)# xlims = (0,10))#legend = true)#, size=(1200,700)) plot!(error, times["21"], error_res["21"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[1])) plot!(error, times["31"], error_res["31"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[1])) plot!(error, times["41"], error_res["41"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[1])) plot!(error, times["51"], error_res["51"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[1])) plot!(error, times["61"], error_res["61"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[1])) plot!(error, times["71"], error_res["71"], yaxis=:log10, label = string(strategies_short_name[7], " + " , minimizers_short_name[1])) plot!(error, times["12"], error_res["12"], yaxis=:log10, label = string(strategies_short_name[1], " + " , minimizers_short_name[2])) plot!(error, times["22"], error_res["22"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[2])) plot!(error, times["32"], error_res["32"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[2])) plot!(error, times["42"], error_res["42"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[2])) plot!(error, times["52"], error_res["52"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[2])) plot!(error, times["62"], error_res["62"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[2])) plot!(error, times["72"], error_res["72"], yaxis=:log10, title = string("Level Set convergence ADAM/LBFGS"), ylabel = "log(error)", xlabel = "t", label = string(strategies_short_name[7], " + " , minimizers_short_name[2])) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNErrorsVsTime/nernst_planck_et.jmd
docs
8627
--- title: Nernst-Planck PDE Physics-Informed Neural Network (PINN) Loss Function Error vs Time Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup ```julia using NeuralPDE using Integrals, IntegralsCubature, IntegralsCuba using OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots using DelimitedFiles using QuasiMonteCarlo import ModelingToolkit: Interval, infimum, supremum ``` ```julia function nernst_planck(strategy, minimizer, maxIters) ## DECLARATIONS @parameters t x y z @variables c(..) Dt = Differential(t) Dx = Differential(x) Dy = Differential(y) Dz = Differential(z) Dxx = Differential(x)^2 Dyy = Differential(y)^2 Dzz = Differential(z)^2 ## DOMAINS AND OPERATORS # Discretization xwidth = 1.0 ywidth = 1.0 zwidth = 1.0 tmax = 1.0 xMeshNum = 10 yMeshNum = 10 zMeshNum = 10 tMeshNum = 10 dx = xwidth/xMeshNum dy = ywidth/yMeshNum dz = zwidth/zMeshNum dt = tmax/tMeshNum domains = [t ∈ Interval(0.0,tmax), x ∈ Interval(0.0,xwidth), y ∈ Interval(0.0,ywidth), z ∈ Interval(0.0,zwidth)] xs = 0.0 : dx : xwidth ys = 0.0 : dy : ywidth zs = 0.0 : dz : zwidth ts = 0.0 : dt : tmax # Constants D = 1 #dummy ux = 10 #dummy uy = 10 #dummy uz = 10 #dummy # Operators div = - D*(Dxx(c(t,x,y,z)) + Dyy(c(t,x,y,z)) + Dzz(c(t,x,y,z))) + (ux*Dx(c(t,x,y,z)) + uy*Dy(c(t,x,y,z)) + uz*Dz(c(t,x,y,z))) # Equation eq = Dt(c(t,x,y,z)) + div ~ 0 #NERNST-PLANCK EQUATION # Boundary conditions bcs = [c(0,x,y,z) ~ 0] ## NEURAL NETWORK n = 16 #neuron number chain = Lux.Chain(Lux.Dense(4,n,tanh),Lux.Dense(n,n,tanh),Lux.Dense(n,1)) #Neural network from OptimizationFlux library indvars = [t,x,y,z] #independent variables depvars = [c(t,x,y,z)] #dependent (target) variable dim = length(domains) losses = [] error = [] times = [] dx_err = 0.2 error_strategy = GridTraining(dx_err) discretization_ = PhysicsInformedNN(chain, error_strategy) @named pde_system_ = PDESystem(eq, bcs, domains, indvars, depvars) prob_ = discretize(pde_system_, discretization_) function loss_function_(θ, p) return prob_.f.f(θ, nothing) end cb_ = function (p,l) deltaT_s = time_ns() #Start a clock when the callback begins, this will evaluate questo misurerà anche il calcolo degli uniform error ctime = time_ns() - startTime - timeCounter #This variable is the time to use for the time benchmark plot append!(times, ctime/10^9) #Conversion nanosec to seconds append!(losses, l) loss_ = loss_function_(p,nothing) append!(error, loss_) timeCounter = timeCounter + time_ns() - deltaT_s #timeCounter sums all delays due to the callback functions of the previous iterations return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) discretization = NeuralPDE.PhysicsInformedNN(chain,strategy) prob = NeuralPDE.discretize(pde_system,discretization) timeCounter = 0.0 startTime = time_ns() #Fix initial time (t=0) before starting the training res = Optimization.solve(prob, minimizer, callback = cb_, maxiters=maxIters) phi = discretization.phi params = res.minimizer # Model prediction domain = [ts, xs, ys, zs] u_predict = [reshape([phi([t,x,y,z],res.minimizer) for x in xs for y in ys for z in zs], (length(xs),length(ys),length(zs))) for t in ts] return [error, params, domain, times] end maxIters = [(1,1,1,1000,1000,1000,1000),(1,1,1,300,300,300,300)] #iters for ADAM/LBFGS # maxIters = [(1,1,1,10,10,10,10),(1,1,1,3,3,3,3)] #iters for ADAM/LBFGS strategies = [NeuralPDE.QuadratureTraining(quadrature_alg = CubaCuhre(), reltol = 1e-4, abstol = 1e-4, maxiters = 50), NeuralPDE.QuadratureTraining(quadrature_alg = HCubatureJL(), reltol = 1e-4, abstol = 1e-4, maxiters = 50, batch = 0), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLh(), reltol = 1e-4, abstol = 1e-4, maxiters = 50), NeuralPDE.QuadratureTraining(quadrature_alg = CubatureJLp(), reltol = 1e-4, abstol = 1e-4, maxiters = 50), NeuralPDE.GridTraining(0.2), NeuralPDE.StochasticTraining(400 ; bcs_points= 50), NeuralPDE.QuasiRandomTraining(400 ; bcs_points= 50)] strategies_short_name = ["CubaCuhre", "HCubatureJL", "CubatureJLh", "CubatureJLp", "GridTraining", "StochasticTraining", "QuasiRandomTraining"] minimizers = [ADAM(0.005), #BFGS()] LBFGS()] minimizers_short_name = ["ADAM", "LBFGS"] # "BFGS"] # Run models error_res = Dict() domains = Dict() params_res = Dict() #to use same params for the next run times = Dict() ``` ## Solve ```julia ## Convergence for strat=1:length(strategies) # strategy for min =1:length(minimizers) # minimizer # println(string(strategies_short_name[strat], " ", minimizers_short_name[min])) res = nernst_planck(strategies[strat], minimizers[min], maxIters[min][strat]) push!(error_res, string(strat,min) => res[1]) push!(params_res, string(strat,min) => res[2]) push!(domains, string(strat,min) => res[3]) push!(times, string(strat,min) => res[4]) end end ``` ## Results ```julia #Plotting the first strategy with the first minimizer out from the loop to initialize the canvas current_label = string(strategies_short_name[1], " + " , minimizers_short_name[1]) error = Plots.plot(times["11"], error_res["11"], yaxis=:log10, label = current_label)#, xlims = (0,10))#legend = true)#, size=(1200,700)) plot!(error, times["21"], error_res["21"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[1])) plot!(error, times["31"], error_res["31"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[1])) plot!(error, times["41"], error_res["41"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[1])) plot!(error, times["51"], error_res["51"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[1])) plot!(error, times["61"], error_res["61"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[1])) plot!(error, times["71"], error_res["71"], yaxis=:log10, label = string(strategies_short_name[7], " + " , minimizers_short_name[1])) plot!(error, times["12"], error_res["12"], yaxis=:log10, label = string(strategies_short_name[1], " + " , minimizers_short_name[2])) plot!(error, times["22"], error_res["22"], yaxis=:log10, label = string(strategies_short_name[2], " + " , minimizers_short_name[2])) plot!(error, times["32"], error_res["32"], yaxis=:log10, label = string(strategies_short_name[3], " + " , minimizers_short_name[2])) plot!(error, times["42"], error_res["42"], yaxis=:log10, label = string(strategies_short_name[4], " + " , minimizers_short_name[2])) plot!(error, times["52"], error_res["52"], yaxis=:log10, label = string(strategies_short_name[5], " + " , minimizers_short_name[2])) plot!(error, times["62"], error_res["62"], yaxis=:log10, label = string(strategies_short_name[6], " + " , minimizers_short_name[2])) plot!(error, times["72"], error_res["72"], yaxis=:log10, title = string("Nernst Planck convergence ADAM/LBFGS"), ylabel = "log(error)", xlabel = "t",label = string(strategies_short_name[7], " + " , minimizers_short_name[2])) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNOptimizers/1d_diffusion.jmd
docs
4109
--- title: Diffusion Equation Physics-Informed Neural Network (PINN) Optimizer Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup ```julia using NeuralPDE, OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots import ModelingToolkit: Interval, infimum, supremum ``` ```julia function solve(opt) strategy = QuadratureTraining() @parameters x t @variables u(..) Dt = Differential(t) Dxx = Differential(x)^2 eq = Dt(u(x,t)) - Dxx(u(x,t)) ~ -exp(-t) * (sin(pi * x) - pi^2 * sin(pi * x)) bcs = [u(x,0) ~ sin(pi*x), u(-1,t) ~ 0., u(1,t) ~ 0.] domains = [x ∈ Interval(-1.0,1.0), t ∈ Interval(0.0,1.0)] chain = Lux.Chain(Lux.Dense(2,18,tanh),Lux.Dense(18,18,tanh),Lux.Dense(18,1)) discretization = PhysicsInformedNN(chain,strategy) indvars = [x, t] #phisically independent variables depvars = [u(x,t)] #dependent (target) variable loss = [] initial_time = nothing times = [] cb_ = function (p,l) if initial_time == nothing initial_time = time() end push!(times, time() - initial_time) #println("Current loss for $opt is: $l") push!(loss, l) # println(l ) # println(time() - initial_time) return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) prob = discretize(pde_system, discretization) if opt == "both" res = Optimization.solve(prob, ADAM(); callback = cb_, maxiters=50) prob = remake(prob,u0=res.minimizer) res = Optimization.solve(prob, BFGS(); callback = cb_, maxiters=150) else res = Optimization.solve(prob, opt; callback = cb_, maxiters=200) end times[1] = 0.01 return loss, times #add numeric solution end ``` ```julia opt1 = ADAM() opt2 = ADAM(0.005) opt3 = ADAM(0.05) opt4 = RMSProp() opt5 = RMSProp(0.005) opt6 = RMSProp(0.05) opt7 = OptimizationOptimJL.BFGS() opt8 = OptimizationOptimJL.LBFGS() ``` ## Solve ```julia loss_1, times_1 = solve(opt1) loss_2, times_2 = solve(opt2) loss_3, times_3 = solve(opt3) loss_4, times_4 = solve(opt4) loss_5, times_5 = solve(opt5) loss_6, times_6 = solve(opt6) loss_7, times_7 = solve(opt7) loss_8, times_8 = solve(opt8) loss_9, times_9 = solve("both") ``` ## Results ```julia p = plot([times_1, times_2, times_3, times_4, times_5, times_6, times_7, times_8, times_9], [loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9],xlabel="time (s)", ylabel="loss", xscale=:log10, yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia p = plot([loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9], xlabel="iterations", ylabel="loss", yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia @show loss_1[end], loss_2[end], loss_3[end], loss_4[end], loss_5[end], loss_6[end], loss_7[end], loss_8[end], loss_9[end] ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNOptimizers/1d_poisson_nernst_planck.jmd
docs
6717
--- title: Nernst-Planck Equation Physics-Informed Neural Network (PINN) Optimizer Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup ```julia using NeuralPDE, OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots import ModelingToolkit: Interval, infimum, supremum ``` ```julia t_ref = 1.0 # s x_ref = 0.38 # dm C_ref = 0.16 # mol/dm^3 Phi_ref = 1.0 # V epsilon = 78.5 # K F = 96485.3415 # A s mol^-1 R = 831.0 # kg dm^2 s^-2 K^-1 mol^-1 T = 298.0 # K z_Na = 1.0 # non-dim z_Cl = -1.0 # non-dim D_Na = 0.89e-7 # dm^2 s^−1 D_Cl = 1.36e-7 # dm^2 s^−1 u_Na = D_Na * abs(z_Na) * F / (R * T) u_Cl = D_Cl * abs(z_Cl) * F / (R * T) t_max = 0.01 / t_ref # non-dim x_max = 0.38 / x_ref # non-dim Na_0 = 0.16 / C_ref # non-dim Cl_0 = 0.16 / C_ref # non-dim Phi_0 = 4.0 / Phi_ref # non-dim Na_anode = 0.0 # non-dim Na_cathode = 2.0 * Na_0 # non-dim Cl_anode = 1.37 * Cl_0 # non-dim Cl_cathode = 0.0 # non-dim Pe_Na = x_ref^2 / ( t_ref * D_Na ) # non-dim Pe_Cl = x_ref^2 / ( t_ref * D_Cl ) # non-dim M_Na = x_ref^2 / ( t_ref * Phi_ref * u_Na ) # non-dim M_Cl = x_ref^2 / ( t_ref * Phi_ref * u_Cl ) # non-dim Po_1 = (epsilon * Phi_ref) / (F * x_ref * C_ref) # non-dim dx = 0.01 # non-dim ``` ```julia function solve(opt) strategy = QuadratureTraining() @parameters t,x @variables Phi(..),Na(..),Cl(..) Dt = Differential(t) Dx = Differential(x) Dxx = Differential(x)^2 eqs = [ ( Dxx(Phi(t,x)) ~ ( 1.0 / Po_1 ) * ( z_Na * Na(t,x) + z_Cl * Cl(t,x) ) ) , ( Dt(Na(t,x)) ~ ( 1.0 / Pe_Na ) * Dxx(Na(t,x)) + z_Na / ( abs(z_Na) * M_Na ) * ( Dx(Na(t,x)) * Dx(Phi(t,x)) + Na(t,x) * Dxx(Phi(t,x)) ) ) , ( Dt(Cl(t,x)) ~ ( 1.0 / Pe_Cl ) * Dxx(Cl(t,x)) + z_Cl / ( abs(z_Cl) * M_Cl ) * ( Dx(Cl(t,x)) * Dx(Phi(t,x)) + Cl(t,x) * Dxx(Phi(t,x)) ) ) ] bcs = [ Phi(t,0.0) ~ Phi_0, Phi(t,x_max) ~ 0.0 , Na(0.0,x) ~ Na_0, Na(t,0.0) ~ Na_anode, Na(t,x_max) ~ Na_cathode , Cl(0.0,x) ~ Cl_0, Cl(t,0.0) ~ Cl_anode, Cl(t,x_max) ~ Cl_cathode ] # Space and time domains ################################################### domains = [ t ∈ Interval(0.0, t_max), x ∈ Interval(0.0, x_max) ] # Neural network, Discretization ########################################### dim = length(domains) output = length(eqs) neurons = 16 chain1 = Lux.Chain( Lux.Dense(dim, neurons, tanh), Lux.Dense(neurons, neurons, tanh), Lux.Dense(neurons, neurons, tanh), Lux.Dense(neurons, 1)) chain2 = Lux.Chain( Lux.Dense(dim, neurons, tanh), Lux.Dense(neurons, neurons, tanh), Lux.Dense(neurons, neurons, tanh), Lux.Dense(neurons, 1)) chain3 = Lux.Chain( Lux.Dense(dim, neurons, tanh), Lux.Dense(neurons, neurons, tanh), Lux.Dense(neurons, neurons, tanh), Lux.Dense(neurons, 1)) discretization = PhysicsInformedNN([chain1, chain2, chain3], strategy) indvars = [t, x] #phisically independent variables depvars = [Phi, Na, Cl] #dependent (target) variable loss = [] initial_time = 0 times = [] cb = function (p,l) if initial_time == 0 initial_time = time() end push!(times, time() - initial_time) #println("Current loss for $opt is: $l") push!(loss, l) return false end @named pde_system = PDESystem(eqs, bcs, domains, indvars, depvars) prob = discretize(pde_system, discretization) if opt == "both" res = Optimization.solve(prob, ADAM(); callback = cb, maxiters=50) prob = remake(prob,u0=res.minimizer) res = Optimization.solve(prob, BFGS(); callback = cb, maxiters=150) else res = Optimization.solve(prob, opt; callback = cb, maxiters=200) end times[1] = 0.001 return loss, times #add numeric solution end ``` ```julia opt1 = ADAM() opt2 = ADAM(0.005) opt3 = ADAM(0.05) opt4 = RMSProp() opt5 = RMSProp(0.005) opt6 = RMSProp(0.05) opt7 = OptimizationOptimJL.BFGS() opt8 = OptimizationOptimJL.LBFGS() ``` ## Solve ```julia loss_1, times_1 = solve(opt1) loss_2, times_2 = solve(opt2) loss_3, times_3 = solve(opt3) loss_4, times_4 = solve(opt4) loss_5, times_5 = solve(opt5) loss_6, times_6 = solve(opt6) loss_7, times_7 = solve(opt7) loss_8, times_8 = solve(opt8) loss_9, times_9 = solve("both") ``` ## Results ```julia p = plot([times_1, times_2, times_3, times_4, times_5, times_6, times_7, times_8, times_9], [loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9],xlabel="time (s)", ylabel="loss", xscale=:log10, yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia p = plot([loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9[2:end]], xlabel="iterations", ylabel="loss", yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia @show loss_1[end], loss_2[end], loss_3[end], loss_4[end], loss_5[end], loss_6[end], loss_7[end], loss_8[end], loss_9[end] ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNOptimizers/allen_cahn.jmd
docs
5197
--- title: Allen-Cahn Equation Physics-Informed Neural Network (PINN) Optimizer Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup ```julia using NeuralPDE, OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots import ModelingToolkit: Interval, infimum, supremum ``` ```julia function solve(opt) strategy = QuadratureTraining() @parameters t x1 x2 x3 x4 @variables u(..) Dt = Differential(t) Dxx1 = Differential(x1)^2 Dxx2 = Differential(x2)^2 Dxx3 = Differential(x3)^2 Dxx4 = Differential(x4)^2 # Discretization tmax = 1.0 x1width = 1.0 x2width = 1.0 x3width = 1.0 x4width = 1.0 tMeshNum = 10 x1MeshNum = 10 x2MeshNum = 10 x3MeshNum = 10 x4MeshNum = 10 dt = tmax/tMeshNum dx1 = x1width/x1MeshNum dx2 = x2width/x2MeshNum dx3 = x3width/x3MeshNum dx4 = x4width/x4MeshNum domains = [t ∈ Interval(0.0,tmax), x1 ∈ Interval(0.0,x1width), x2 ∈ Interval(0.0,x2width), x3 ∈ Interval(0.0,x3width), x4 ∈ Interval(0.0,x4width)] ts = 0.0 : dt : tmax x1s = 0.0 : dx1 : x1width x2s = 0.0 : dx2 : x2width x3s = 0.0 : dx3 : x3width x4s = 0.0 : dx4 : x4width # Operators Δu = Dxx1(u(t,x1,x2,x3,x4)) + Dxx2(u(t,x1,x2,x3,x4)) + Dxx3(u(t,x1,x2,x3,x4)) + Dxx4(u(t,x1,x2,x3,x4)) # Laplacian # Equation eq = Dt(u(t,x1,x2,x3,x4)) - Δu - u(t,x1,x2,x3,x4) + u(t,x1,x2,x3,x4)*u(t,x1,x2,x3,x4)*u(t,x1,x2,x3,x4) ~ 0 #ALLEN CAHN EQUATION initialCondition = 1/(2 + 0.4 * (x1*x1 + x2*x2 + x3*x3 + x4*x4)) # see PNAS paper bcs = [u(0,x1,x2,x3,x4) ~ initialCondition] #from literature ## NEURAL NETWORK n = 20 #neuron number chain = Lux.Chain(Lux.Dense(5,n,tanh),Lux.Dense(n,n,tanh),Lux.Dense(n,1)) #Neural network from OptimizationFlux library discretization = PhysicsInformedNN(chain, strategy) indvars = [t,x1,x2,x3,x4] #phisically independent variables depvars = [u] #dependent (target) variable loss = [] initial_time = 0 times = [] cb = function (p,l) if initial_time == 0 initial_time = time() end push!(times, time() - initial_time) #println("Current loss for $opt is: $l") push!(loss, l) return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) prob = discretize(pde_system, discretization) if opt == "both" res = Optimization.solve(prob, ADAM(); callback = cb, maxiters=50) prob = remake(prob,u0=res.minimizer) res = Optimization.solve(prob, BFGS(); callback = cb, maxiters=150) else res = Optimization.solve(prob, opt; callback = cb, maxiters=200) end times[1] = 0.001 return loss, times #add numeric solution end ``` ```julia opt1 = ADAM() opt2 = ADAM(0.005) opt3 = ADAM(0.05) opt4 = RMSProp() opt5 = RMSProp(0.005) opt6 = RMSProp(0.05) opt7 = OptimizationOptimJL.BFGS() opt8 = OptimizationOptimJL.LBFGS() ``` ## Solve ```julia loss_1, times_1 = solve(opt1) loss_2, times_2 = solve(opt2) loss_3, times_3 = solve(opt3) loss_4, times_4 = solve(opt4) loss_5, times_5 = solve(opt5) loss_6, times_6 = solve(opt6) loss_7, times_7 = solve(opt7) loss_8, times_8 = solve(opt8) loss_9, times_9 = solve("both") ``` ## Results ```julia p = plot([times_1, times_2, times_3, times_4, times_5, times_6, times_7, times_8, times_9], [loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9],xlabel="time (s)", ylabel="loss", xscale=:log10, yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia p = plot([loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9[2:end]], xlabel="iterations", ylabel="loss", yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia @show loss_1[end], loss_2[end], loss_3[end], loss_4[end], loss_5[end], loss_6[end], loss_7[end], loss_8[end], loss_9[end] ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNOptimizers/burgers_equation.jmd
docs
9728
--- title: Berger's Equation Physics-Informed Neural Network (PINN) Optimizer Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup ```julia using NeuralPDE, OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots import ModelingToolkit: Interval, infimum, supremum ``` ```julia # Physical and numerical parameters (fixed) nu = 0.07 nx = 10001 #101 x_max = 2.0 * pi dx = x_max / (nx - 1.0) nt = 2 #10 dt = dx * nu t_max = dt * nt # Analytic function analytic_sol_func(t, x) = -2*nu*(-(-8*t + 2*x)*exp(-(-4*t + x)^2/(4*nu*(t + 1)))/ (4*nu*(t + 1)) - (-8*t + 2*x - 12.5663706143592)* exp(-(-4*t + x - 6.28318530717959)^2/(4*nu*(t + 1)))/ (4*nu*(t + 1)))/(exp(-(-4*t + x - 6.28318530717959)^2/ (4*nu*(t + 1))) + exp(-(-4*t + x)^2/(4*nu*(t + 1)))) + 4 ``` ```julia function burgers(strategy, minimizer) @parameters x t @variables u(..) Dt = Differential(t) Dx = Differential(x) Dxx = Differential(x)^2 eq = Dt(u(x, t)) + u(x, t) * Dx(u(x, t)) ~ nu * Dxx(u(x, t)) bcs = [u(x, 0.0) ~ analytic_sol_func(x, 0.0), u(0.0, t) ~ u(x_max, t)] domains = [x ∈ Interval(0.0, x_max), t ∈ Interval(0.0, t_max)] chain = Lux.Chain(Lux.Dense(2, 16, tanh), Lux.Dense(16, 16, tanh), Lux.Dense(16, 1)) discretization = PhysicsInformedNN(chain, strategy) indvars = [x, t] #physically independent variables depvars = [u] #dependent (target) variable dim = length(domains) losses = [] error = [] times = [] dx_err = 0.00005 error_strategy = GridTraining(dx_err) discretization_ = PhysicsInformedNN(chain, error_strategy) @named pde_system_ = PDESystem(eq, bcs, domains, indvars, depvars) prob_ = discretize(pde_system_, discretization_) function loss_function__(θ) return prob_.f.f(θ, nothing) end cb = function (p, l) timeCounter = 0.0 deltaT_s = time_ns() #Start a clock when the callback begins, this will evaluate questo misurerà anche il calcolo degli uniform error ctime = time_ns() - startTime - timeCounter #This variable is the time to use for the time benchmark plot append!(times, ctime / 10^9) #Conversion nanosec to seconds append!(losses, l) append!(error, loss_function__(p)) #println(length(losses), " Current loss is: ", l, " uniform error is, ", loss_function__(p)) timeCounter = timeCounter + time_ns() - deltaT_s #timeCounter sums all delays due to the callback functions of the previous iterations return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) discretization = NeuralPDE.PhysicsInformedNN(chain, strategy) prob = NeuralPDE.discretize(pde_system, discretization) startTime = time_ns() #Fix initial time (t=0) before starting the training if minimizer == "both" res = Optimization.solve(prob, ADAM(); callback=cb, maxiters=5) prob = remake(prob, u0=res.minimizer) res = Optimization.solve(prob, BFGS(); callback=cb, maxiters=15) else res = Optimization.solve(prob, minimizer; callback=cb, maxiters=500) end phi = discretization.phi params = res.minimizer return [error, params, times, losses] end ``` ## Solve ```julia # Settings: #maxIters = [(0,0,0,0,0,0,20000),(300,300,300,300,300,300,300)] #iters strategies = [NeuralPDE.QuadratureTraining()] strategies_short_name = ["QuadratureTraining"] minimizers = [ADAM(), ADAM(0.000005), ADAM(0.0005), RMSProp(), RMSProp(0.00005), RMSProp(0.05), BFGS(), LBFGS()] minimizers_short_name = ["ADAM", "ADAM(0.000005)", "ADAM(0.0005)", "RMS", "RMS(0.00005)", "RMS(0.05)", "BFGS", "LBFGS"] ``` ```julia # Run models error_res = Dict() params_res = Dict() times = Dict() losses_res = Dict() print("Starting run \n") for min in 1:length(minimizers) # minimizer for strat in 1:length(strategies) # strategy #println(string(strategies_short_name[1], " ", minimizers_short_name[min])) res = burgers(strategies[strat], minimizers[min]) push!(error_res, string(strat,min) => res[1]) push!(params_res, string(strat,min) => res[2]) push!(times, string(strat,min) => res[3]) push!(losses_res, string(strat,min) => res[4]) end end ``` ## Results ```julia #PLOT ERROR VS ITER: to compare to compare between minimizers, keeping the same strategy (easily adjustable to compare between strategies) error_iter = Plots.plot(1:length(error_res["11"]), error_res["11"], yaxis=:log10, title = string("Burger error vs iter"), ylabel = "Error", label = string(minimizers_short_name[1]), ylims = (0.0001,1)) plot!(error_iter, 1:length(error_res["12"]), error_res["12"], yaxis=:log10, label = string(minimizers_short_name[2])) plot!(error_iter, 1:length(error_res["13"]), error_res["13"], yaxis=:log10, label = string(minimizers_short_name[3])) plot!(error_iter, 1:length(error_res["14"]), error_res["14"], yaxis=:log10, label = string(minimizers_short_name[4])) plot!(error_iter, 1:length(error_res["15"]), error_res["15"], yaxis=:log10, label = string(minimizers_short_name[5])) plot!(error_iter, 1:length(error_res["16"]), error_res["16"], yaxis=:log10, label = string(minimizers_short_name[6])) plot!(error_iter, 1:length(error_res["17"]), error_res["17"], yaxis=:log10, label = string(minimizers_short_name[7])) plot!(error_iter, 1:length(error_res["18"]), error_res["18"], yaxis=:log10, label = string(minimizers_short_name[8])) Plots.plot(error_iter) ``` ```julia #Use after having modified the analysis setting correctly --> Error vs iter: to compare different strategies, keeping the same minimizer #error_iter = Plots.plot(1:length(error_res["11"]), error_res["11"], yaxis=:log10, title = string("Burger error vs iter"), ylabel = "Error", label = string(strategies_short_name[1]), ylims = (0.0001,1)) #plot!(error_iter, 1:length(error_res["21"]), error_res["21"], yaxis=:log10, label = string(strategies_short_name[2])) #plot!(error_iter, 1:length(error_res["31"]), error_res["31"], yaxis=:log10, label = string(strategies_short_name[3])) #plot!(error_iter, 1:length(error_res["41"]), error_res["41"], yaxis=:log10, label = string(strategies_short_name[4])) #plot!(error_iter, 1:length(error_res["51"]), error_res["51"], yaxis=:log10, label = string(strategies_short_name[5])) #plot!(error_iter, 1:length(error_res["61"]), error_res["61"], yaxis=:log10, label = string(strategies_short_name[6])) #plot!(error_iter, 1:length(error_res["71"]), error_res["71"], yaxis=:log10, label = string(strategies_short_name[7])) ``` ```julia #PLOT ERROR VS TIME: to compare to compare between minimizers, keeping the same strategy error_time = plot(times["11"], error_res["11"], yaxis=:log10, label = string(minimizers_short_name[1]),title = string("Burger error vs time"), ylabel = "Error", size = (1500,500)) plot!(error_time, times["12"], error_res["12"], yaxis=:log10, label = string(minimizers_short_name[2])) plot!(error_time, times["13"], error_res["13"], yaxis=:log10, label = string(minimizers_short_name[3])) plot!(error_time, times["14"], error_res["14"], yaxis=:log10, label = string(minimizers_short_name[4])) plot!(error_time, times["15"], error_res["15"], yaxis=:log10, label = string(minimizers_short_name[5])) plot!(error_time, times["16"], error_res["16"], yaxis=:log10, label = string(minimizers_short_name[6])) plot!(error_time, times["17"], error_res["17"], yaxis=:log10, label = string(minimizers_short_name[7])) plot!(error_time, times["18"], error_res["18"], yaxis=:log10, label = string(minimizers_short_name[7])) Plots.plot(error_time) ``` ```julia #Use after having modified the analysis setting correctly --> Error vs time: to compare different strategies, keeping the same minimizer #error_time = plot(times["11"], error_res["11"], yaxis=:log10, label = string(strategies_short_name[1]),title = string("Burger error vs time"), ylabel = "Error", size = (1500,500)) #plot!(error_time, times["21"], error_res["21"], yaxis=:log10, label = string(strategies_short_name[2])) #plot!(error_time, times["31"], error_res["31"], yaxis=:log10, label = string(strategies_short_name[3])) #plot!(error_time, times["41"], error_res["41"], yaxis=:log10, label = string(strategies_short_name[4])) #plot!(error_time, times["51"], error_res["51"], yaxis=:log10, label = string(strategies_short_name[5])) #plot!(error_time, times["61"], error_res["61"], yaxis=:log10, label = string(strategies_short_name[6])) #plot!(error_time, times["71"], error_res["71"], yaxis=:log10, label = string(strategies_short_name[7])) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNOptimizers/hamilton_jacobi.jmd
docs
5393
--- title: Hamilton-Jacobi PDE Physics-Informed Neural Network (PINN) Optimizer Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup ```julia using NeuralPDE, OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots import ModelingToolkit: Interval, infimum, supremum ``` ```julia function solve(opt) strategy = QuadratureTraining() ## DECLARATIONS @parameters t x1 x2 x3 x4 @variables u(..) Dt = Differential(t) Dx1 = Differential(x1) Dx2 = Differential(x2) Dx3 = Differential(x3) Dx4 = Differential(x4) Dxx1 = Differential(x1)^2 Dxx2 = Differential(x2)^2 Dxx3 = Differential(x3)^2 Dxx4 = Differential(x4)^2 # Discretization tmax = 1.0 x1width = 1.0 x2width = 1.0 x3width = 1.0 x4width = 1.0 tMeshNum = 10 x1MeshNum = 10 x2MeshNum = 10 x3MeshNum = 10 x4MeshNum = 10 dt = tmax/tMeshNum dx1 = x1width/x1MeshNum dx2 = x2width/x2MeshNum dx3 = x3width/x3MeshNum dx4 = x4width/x4MeshNum domains = [t ∈ Interval(0.0,tmax), x1 ∈ Interval(0.0,x1width), x2 ∈ Interval(0.0,x2width), x3 ∈ Interval(0.0,x3width), x4 ∈ Interval(0.0,x4width)] ts = 0.0 : dt : tmax x1s = 0.0 : dx1 : x1width x2s = 0.0 : dx2 : x2width x3s = 0.0 : dx3 : x3width x4s = 0.0 : dx4 : x4width λ = 1.0f0 # Operators Δu = Dxx1(u(t,x1,x2,x3,x4)) + Dxx2(u(t,x1,x2,x3,x4)) + Dxx3(u(t,x1,x2,x3,x4)) + Dxx4(u(t,x1,x2,x3,x4)) # Laplacian ∇u = [Dx1(u(t,x1,x2,x3,x4)), Dx2(u(t,x1,x2,x3,x4)),Dx3(u(t,x1,x2,x3,x4)),Dx4(u(t,x1,x2,x3,x4))] # Equation eq = Dt(u(t,x1,x2,x3,x4)) + Δu - λ*sum(∇u.^2) ~ 0 #HAMILTON-JACOBI-BELLMAN EQUATION terminalCondition = log((1 + x1*x1 + x2*x2 + x3*x3 + x4*x4)/2) # see PNAS paper bcs = [u(tmax,x1,x2,x3,x4) ~ terminalCondition] #PNAS paper again ## NEURAL NETWORK n = 20 #neuron number chain = Lux.Chain(Lux.Dense(5,n,tanh),Lux.Dense(n,n,tanh),Lux.Dense(n,1)) #Neural network from OptimizationFlux library discretization = PhysicsInformedNN(chain, strategy) indvars = [t,x1,x2,x3,x4] #phisically independent variables depvars = [u] #dependent (target) variable loss = [] initial_time = 0 times = [] cb = function (p,l) if initial_time == 0 initial_time = time() end push!(times, time() - initial_time) #println("Current loss for $opt is: $l") push!(loss, l) return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) prob = discretize(pde_system, discretization) if opt == "both" res = Optimization.solve(prob, ADAM(); callback = cb, maxiters=50) prob = remake(prob,u0=res.minimizer) res = Optimization.solve(prob, BFGS(); callback = cb, maxiters=150) else res = Optimization.solve(prob, opt; callback = cb, maxiters=200) end times[1] = 0.001 return loss, times #add numeric solution end ``` ```julia opt1 = ADAM() opt2 = ADAM(0.005) opt3 = ADAM(0.05) opt4 = RMSProp() opt5 = RMSProp(0.005) opt6 = RMSProp(0.05) opt7 = OptimizationOptimJL.BFGS() opt8 = OptimizationOptimJL.LBFGS() ``` ## Solve ```julia loss_1, times_1 = solve(opt1) loss_2, times_2 = solve(opt2) loss_3, times_3 = solve(opt3) loss_4, times_4 = solve(opt4) loss_5, times_5 = solve(opt5) loss_6, times_6 = solve(opt6) loss_7, times_7 = solve(opt7) loss_8, times_8 = solve(opt8) loss_9, times_9 = solve("both") ``` ## Results ```julia p = plot([times_1, times_2, times_3, times_4, times_5, times_6, times_7, times_8, times_9], [loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9],xlabel="time (s)", ylabel="loss", xscale=:log10, yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia p = plot([loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9], xlabel="iterations", ylabel="loss", yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia @show loss_1[end], loss_2[end], loss_3[end], loss_4[end], loss_5[end], loss_6[end], loss_7[end], loss_8[end], loss_9[end] ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/PINNOptimizers/poisson.jmd
docs
4181
--- title: Poisson PDE Physics-Informed Neural Network (PINN) Optimizer Benchmarks author: Kirill Zubov, Zoe McCarthy, Yingbo Ma, Francesco Calisto, Valerio Pagliarino, Simone Azeglio, Luca Bottero, Emmanuel Luján, Valentin Sulzer, Ashutosh Bharambe, Nand Vinchhi, Kaushik Balakrishnan, Devesh Upadhyay, Chris Rackauckas --- Adapted from [NeuralPDE: Automating Physics-Informed Neural Networks (PINNs) with Error Approximations](https://arxiv.org/abs/2107.09443). Uses the [NeuralPDE.jl](https://neuralpde.sciml.ai/dev/) library from the [SciML Scientific Machine Learning Open Source Organization](https://sciml.ai/) for the implementation of physics-informed neural networks (PINNs) and other science-guided AI techniques. ## Setup Code ```julia using NeuralPDE, OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL using Lux, Plots import ModelingToolkit: Interval, infimum, supremum ``` ```julia function solve(opt) strategy = QuadratureTraining() @parameters x y @variables u(..) Dxx = Differential(x)^2 Dyy = Differential(y)^2 # 2D PDE eq = Dxx(u(x,y)) + Dyy(u(x,y)) ~ -sin(pi*x)*sin(pi*y) # Boundary conditions bcs = [u(0,y) ~ 0.f0, u(1,y) ~ -sin(pi*1)*sin(pi*y), u(x,0) ~ 0.f0, u(x,1) ~ -sin(pi*x)*sin(pi*1)] # Space and time domains domains = [x ∈ Interval(0.0,1.0), y ∈ Interval(0.0,1.0)] # Neural network dim = 2 # number of dimensions chain = Lux.Chain(Lux.Dense(dim,16,tanh),Lux.Dense(16,16,tanh),Lux.Dense(16,1)) discretization = PhysicsInformedNN(chain,strategy) indvars = [x, y] #phisically independent variables depvars = [u(x,y)] #dependent (target) variable loss = [] initial_time = nothing times = [] cb = function (p,l) if initial_time == nothing initial_time = time() end push!(times, time() - initial_time) #println("Current loss for $opt is: $l") push!(loss, l) return false end @named pde_system = PDESystem(eq, bcs, domains, indvars, depvars) prob = discretize(pde_system, discretization) if opt == "both" res = Optimization.solve(prob, ADAM(); callback = cb, maxiters=50) prob = remake(prob,u0=res.minimizer) res = Optimization.solve(prob, BFGS(); callback = cb, maxiters=150) else res = Optimization.solve(prob, opt; callback = cb, maxiters=200) end times[1] = 0.001 return loss, times #add numeric solution end ``` ```julia opt1 = ADAM() opt2 = ADAM(0.005) opt3 = ADAM(0.05) opt4 = RMSProp() opt5 = RMSProp(0.005) opt6 = RMSProp(0.05) opt7 = OptimizationOptimJL.BFGS() opt8 = OptimizationOptimJL.LBFGS() ``` ## Solve ```julia loss_1, times_1 = solve(opt1) loss_2, times_2 = solve(opt2) loss_3, times_3 = solve(opt3) loss_4, times_4 = solve(opt4) loss_5, times_5 = solve(opt5) loss_6, times_6 = solve(opt6) loss_7, times_7 = solve(opt7) loss_8, times_8 = solve(opt8) loss_9, times_9 = solve("both") ``` ## Results ```julia p = plot([times_1, times_2, times_3, times_4, times_5, times_6, times_7, times_8, times_9], [loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9],xlabel="time (s)", ylabel="loss", xscale=:log10, yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia p = plot([loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, loss_8, loss_9], xlabel="iterations", ylabel="loss", yscale=:log10, labels=["ADAM(0.001)" "ADAM(0.005)" "ADAM(0.05)" "RMSProp(0.001)" "RMSProp(0.005)" "RMSProp(0.05)" "BFGS()" "LBFGS()" "ADAM + BFGS"], legend=:bottomleft, linecolor=["#2660A4" "#4CD0F4" "#FEC32F" "#F763CD" "#44BD79" "#831894" "#A6ED18" "#980000" "#FF912B"]) ``` ```julia @show loss_1[end], loss_2[end], loss_3[end], loss_4[end], loss_5[end], loss_6[end], loss_7[end], loss_8[end], loss_9[end] ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/ParameterEstimation/FitzHughNagumoParameterEstimation.jmd
docs
9813
--- title: FitzHugh-Nagumo Parameter Estimation Benchmarks author: Vaibhav Dixit, Chris Rackauckas --- # Parameter estimation of FitzHugh-Nagumo model using optimisation methods ```julia using ParameterizedFunctions, OrdinaryDiffEq, DiffEqParamEstim using BlackBoxOptim, NLopt, Plots,QuadDIRECT gr(fmt=:png) ``` ```julia loc_bounds = Tuple{Float64,Float64}[(0, 1), (0, 1), (0, 1), (0, 1)] glo_bounds = Tuple{Float64,Float64}[(0, 5), (0, 5), (0, 5), (0, 5)] loc_init = [0.5,0.5,0.5,0.5] glo_init = [2.5,2.5,2.5,2.5] ``` ```julia fitz = @ode_def FitzhughNagumo begin dv = v - v^3/3 -w + l dw = τinv*(v + a - b*w) end a b τinv l ``` ```julia p = [0.7,0.8,0.08,0.5] # Parameters used to construct the dataset r0 = [1.0; 1.0] # initial value tspan = (0.0, 30.0) # sample of 3000 observations over the (0,30) timespan prob = ODEProblem(fitz, r0, tspan,p) tspan2 = (0.0, 3.0) # sample of 300 observations with a timestep of 0.01 prob_short = ODEProblem(fitz, r0, tspan2,p) ``` ```julia dt = 30.0/3000 tf = 30.0 tinterval = 0:dt:tf t = collect(tinterval) ``` ```julia h = 0.01 M = 300 tstart = 0.0 tstop = tstart + M * h tinterval_short = 0:h:tstop t_short = collect(tinterval_short) ``` ```julia #Generate Data data_sol_short = solve(prob_short,Vern9(),saveat=t_short,reltol=1e-9,abstol=1e-9) data_short = convert(Array, data_sol_short) # This operation produces column major dataset obs as columns, equations as rows data_sol = solve(prob,Vern9(),saveat=t,reltol=1e-9,abstol=1e-9) data = convert(Array, data_sol) ``` #### Plot of the solution ##### Short Solution ```julia plot(data_sol_short) ``` ##### Longer Solution ```julia plot(data_sol) ``` ## Local Solution from the short data set ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short) res1 = bboptimize(obj_short;SearchRange = glo_bounds, MaxSteps = 7e3) # Lower tolerance could lead to smaller fitness (more accuracy) ``` ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9) res1 = bboptimize(obj_short;SearchRange = glo_bounds, MaxSteps = 7e3) # Change in tolerance makes it worse ``` ```julia obj_short = build_loss_objective(prob_short,Vern9(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9,abstol=1e-9) res1 = bboptimize(obj_short;SearchRange = glo_bounds, MaxSteps = 7e3) # using the moe accurate Vern9() reduces the fitness marginally and leads to some increase in time taken ``` ## Using NLopt #### Global Optimisation ```julia obj_short = build_loss_objective(prob_short,Vern9(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9,abstol=1e-9) ``` ```julia opt = Opt(:GN_ORIG_DIRECT_L, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_CRS2_LM, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_ISRES, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_ESCH, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` Now local optimization algorithms are used to check the global ones, these use the local constraints, different intial values and time step ```julia opt = Opt(:LN_BOBYQA, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_NELDERMEAD, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LD_SLSQP, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_COBYLA, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_NEWUOA_BOUND, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_PRAXIS, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_SBPLX, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LD_MMA, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ### Now the longer problem is solved for a global solution Vern9 solver with reltol=1e-9 and abstol=1e-9 is used and the dataset is increased to 3000 observations per variable with the same integration time step of 0.01. ```julia obj = build_loss_objective(prob,Vern9(),L2Loss(t,data),tstops=t,reltol=1e-9,abstol=1e-9) res1 = bboptimize(obj;SearchRange = glo_bounds, MaxSteps = 4e3) ``` ```julia opt = Opt(:GN_ORIG_DIRECT_L, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_CRS2_LM, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 20000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_ISRES, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 50000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_ESCH, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 20000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:LN_BOBYQA, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_NELDERMEAD, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-9) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LD_SLSQP, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[1.0,1.0,1.0,1.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` As expected from other problems the longer sample proves to be extremely challenging for some of the global optimizers. A few give the accurate values, while others seem to struggle with accuracy a lot. #### Using QuadDIRECT ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short) lower = [0,0,0,0] upper = [1,1,1,1] splits = ([0,0.3,0.7],[0,0.3,0.7],[0,0.3,0.7],[0,0.3,0.7]) @time root, x0 = analyze(obj_short,splits,lower,upper) ``` ```julia minimum(root) ``` ```julia obj = build_loss_objective(prob,Vern9(),L2Loss(t,data),tstops=t,reltol=1e-9,abstol=1e-9) lower = [0,0,0,0] upper = [5,5,5,5] splits = ([0,0.5,1],[0,0.5,1],[0,0.5,1],[0,0.5,1]) @time root, x0 = analyze(obj_short,splits,lower,upper) ``` ```julia minimum(root) ``` # Conclusion It is observed that lower tolerance lead to higher accuracy but too low tolerance could affect the convergance time drastically. Also fitting a shorter timespan seems to be easier in comparision (quite intutively). NLOpt methods seem to give great accuracy in the shorter problem with a lot of the algorithms giving 0 fitness, BBO performs very well on it with marginal change with tol values. In case of global optimization of the longer problem there is some difference in the perfomance amongst the algorithms with :LN_BOBYQA giving accurate results for the local optimization and :GN_ISRES :GN_CRS2_LM in case of the global give the highest accuracy. BBO also fails to perform too well in the case of the longer problem. QuadDIRECT performs well in case of the shorter problem but fails to give good results in the longer version. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/ParameterEstimation/LorenzParameterEstimation.jmd
docs
14845
--- title: Lorenz Parameter Estimation Benchmarks author: finmod, Chris Rackauckas, Vaibhav Dixit --- # Estimate the parameters of the Lorenz system from the dataset Note: If data is generated with a fixed time step method and then is tested against with the same time step, there is a biased introduced since it's no longer about hitting the true solution, rather it's just about retreiving the same values that the ODE was first generated by! Thus this version uses adaptive timestepping for all portions so that way tests are against the true solution. ```julia using ParameterizedFunctions, OrdinaryDiffEq, DiffEqParamEstim using BlackBoxOptim, NLopt, Plots, QuadDIRECT gr(fmt=:png) ``` ```julia Xiang2015Bounds = Tuple{Float64, Float64}[(9, 11), (20, 30), (2, 3)] # for local optimizations xlow_bounds = [9.0,20.0,2.0] xhigh_bounds = [11.0,30.0,3.0] LooserBounds = Tuple{Float64, Float64}[(0, 22), (0, 60), (0, 6)] # for global optimization GloIniPar = [0.0, 0.5, 0.1] # for global optimizations LocIniPar = [9.0, 20.0, 2.0] # for local optimization ``` ```julia g1 = @ode_def LorenzExample begin dx = σ*(y-x) dy = x*(ρ-z) - y dz = x*y - β*z end σ ρ β p = [10.0,28.0,2.66] # Parameters used to construct the dataset r0 = [1.0; 0.0; 0.0] #[-11.8,-5.1,37.5] PODES Initial values of the system in space # [0.1, 0.0, 0.0] tspan = (0.0, 30.0) # PODES sample of 3000 observations over the (0,30) timespan prob = ODEProblem(g1, r0, tspan,p) tspan2 = (0.0, 3.0) # Xiang test sample of 300 observations with a timestep of 0.01 prob_short = ODEProblem(g1, r0, tspan2,p) ``` ```julia dt = 30.0/3000 tf = 30.0 tinterval = 0:dt:tf t = collect(tinterval) ``` ```julia h = 0.01 M = 300 tstart = 0.0 tstop = tstart + M * h tinterval_short = 0:h:tstop t_short = collect(tinterval_short) ``` ```julia # Generate Data data_sol_short = solve(prob_short,Vern9(),saveat=t_short,reltol=1e-9,abstol=1e-9) data_short = convert(Array, data_sol_short) # This operation produces column major dataset obs as columns, equations as rows data_sol = solve(prob,Vern9(),saveat=t,reltol=1e-9,abstol=1e-9) data = convert(Array, data_sol) ``` Plot the data ```julia plot(data_sol_short,vars=(1,2,3)) # the short solution plot(data_sol,vars=(1,2,3)) # the longer solution interpolation_sol = solve(prob,Vern7(),saveat=t,reltol=1e-12,abstol=1e-12) plot(interpolation_sol,vars=(1,2,3)) ``` ```julia xyzt = plot(data_sol_short, plotdensity=10000,lw=1.5) xy = plot(data_sol_short, plotdensity=10000, vars=(1,2)) xz = plot(data_sol_short, plotdensity=10000, vars=(1,3)) yz = plot(data_sol_short, plotdensity=10000, vars=(2,3)) xyz = plot(data_sol_short, plotdensity=10000, vars=(1,2,3)) plot(plot(xyzt,xyz),plot(xy, xz, yz, layout=(1,3),w=1), layout=(2,1), size=(800,600)) ``` ```julia xyzt = plot(data_sol, plotdensity=10000,lw=1.5) xy = plot(data_sol, plotdensity=10000, vars=(1,2)) xz = plot(data_sol, plotdensity=10000, vars=(1,3)) yz = plot(data_sol, plotdensity=10000, vars=(2,3)) xyz = plot(data_sol, plotdensity=10000, vars=(1,2,3)) plot(plot(xyzt,xyz),plot(xy, xz, yz, layout=(1,3),w=1), layout=(2,1), size=(800,600)) ``` ## Find a local solution for the three parameters from a short data set ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short) res1 = bboptimize(obj_short;SearchRange = LooserBounds, MaxSteps = 7e3) # Tolernace is still too high to get close enough ``` ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9) res1 = bboptimize(obj_short;SearchRange = LooserBounds, MaxSteps = 7e3) # With the tolerance lower, it achieves the correct solution in 3.5 seconds. ``` ```julia obj_short = build_loss_objective(prob_short,Vern9(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9,abstol=1e-9) res1 = bboptimize(obj_short;SearchRange = LooserBounds, MaxSteps = 7e3) # With the more accurate solver Vern9 in the solution of the ODE, the convergence is less efficient! # Fastest BlackBoxOptim: 3.5 seconds ``` # Using NLopt First, the global optimization algorithms ```julia obj_short = build_loss_objective(prob_short,Vern9(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9,abstol=1e-9) ``` ```julia opt = Opt(:GN_ORIG_DIRECT_L, 3) lower_bounds!(opt,[0.0,0.0,0.0]) upper_bounds!(opt,[22.0,60.0,6.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,GloIniPar) # Accurate 3.2 seconds ``` ```julia opt = Opt(:GN_CRS2_LM, 3) lower_bounds!(opt,[0.0,0.0,0.0]) upper_bounds!(opt,[22.0,60.0,6.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,GloIniPar) # Accurate 3.0 seconds ``` ```julia opt = Opt(:GN_ISRES, 3) lower_bounds!(opt,[0.0,0.0,0.0]) upper_bounds!(opt,[22.0,60.0,6.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,GloIniPar) # Accurate to single precision 8.2 seconds ``` ```julia opt = Opt(:GN_ESCH, 3) lower_bounds!(opt,[0.0,0.0,0.0]) upper_bounds!(opt,[22.0,60.0,6.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,GloIniPar) # Approximatively accurate, good starting values for local optimization ``` Next, the local optimization algorithms that could be used after the global algorithms as a check on the solution and its precision. All the local optimizers are started from LocIniPar and with the narrow bounds of the Xiang2015Paper. ```julia opt = Opt(:LN_BOBYQA, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # 0.1 seconds ``` ```julia opt = Opt(:LN_NELDERMEAD, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 0.29 sec ``` ```julia opt = Opt(:LD_SLSQP, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 0.21 sec ``` ```julia opt = Opt(:LN_COBYLA, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 1.84 sec ``` ```julia opt = Opt(:LN_NEWUOA_BOUND, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 0.18 sec ROUNDOFF LIMITED ``` ```julia opt = Opt(:LN_PRAXIS, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 0.18 sec ``` ```julia opt = Opt(:LN_SBPLX, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 0.65 sec ``` ```julia opt = Opt(:LD_MMA, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 0.7 sec ``` ```julia opt = Opt(:LD_LBFGS, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 0.12 sec ``` ```julia opt = Opt(:LD_TNEWTON_PRECOND_RESTART, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Accurate 0.15 sec ``` ## Now let's solve the longer version for a global solution Notice from the plotting above that this ODE problem is chaotic and tends to diverge over time. In the longer version of parameter estimation, the dataset is increased to 3000 observations per variable with the same integration time step of 0.01. Vern9 solver with reltol=1e-9 and abstol=1e-9 has been established to be accurate on the time interval [0,50] ```julia # BB with Vern9 converges very slowly. The final values are within the NarrowBounds. obj = build_loss_objective(prob,Vern9(),L2Loss(t,data),tstops=t,reltol=1e-9,abstol=1e-9) res1 = bboptimize(obj;SearchRange = LooserBounds, MaxSteps = 4e3) # Default adaptive_de_rand_1_bin_radiuslimited 33 sec [10.2183, 24.6711, 2.28969] #res1 = bboptimize(obj;SearchRange = LooserBounds, Method = :adaptive_de_rand_1_bin, MaxSteps = 4e3) # Method 32 sec [13.2222, 25.8589, 2.56176] #res1 = bboptimize(obj;SearchRange = LooserBounds, Method = :dxnes, MaxSteps = 2e3) # Method dxnes 119 sec [16.8648, 24.393, 2.29119] #res1 = bboptimize(obj;SearchRange = LooserBounds, Method = :xnes, MaxSteps = 2e3) # Method xnes 304 sec [19.1647, 24.9479, 2.39467] #res1 = bboptimize(obj;SearchRange = LooserBounds, Method = :de_rand_1_bin_radiuslimited, MaxSteps = 2e3) # Method 44 sec [13.805, 24.6054, 2.37274] #res1 = bboptimize(obj;SearchRange = LooserBounds, Method = :generating_set_search, MaxSteps = 2e3) # Method 195 sec [19.1847, 24.9492, 2.39412] ``` ```julia # using Evolutionary # N = 3 # @time result, fitness, cnt = cmaes(obj, N; μ = 3, λ = 12, iterations = 1000) # cmaes( rastrigin, N; μ = 15, λ = P, tol = 1e-8) ``` ```julia opt = Opt(:GN_ORIG_DIRECT_L, 3) lower_bounds!(opt,[0.0,0.0,0.0]) upper_bounds!(opt,[22.0,60.0,6.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,GloIniPar) # Fail to converge ``` ```julia opt = Opt(:GN_CRS2_LM, 3) lower_bounds!(opt,[0.0,0.0,0.0]) upper_bounds!(opt,[22.0,60.0,6.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 20000) @time (minf,minx,ret) = NLopt.optimize(opt,GloIniPar) # Hit and miss. converge approximately accurate values for local opt.91 seconds ``` ```julia opt = Opt(:GN_ISRES, 3) lower_bounds!(opt,[0.0,0.0,0.0]) upper_bounds!(opt,[22.0,60.0,6.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 50000) @time (minf,minx,ret) = NLopt.optimize(opt,GloIniPar) # Approximately accurate within local bounds ``` ```julia opt = Opt(:GN_ESCH, 3) lower_bounds!(opt,[0.0,0.0,0.0]) upper_bounds!(opt,[22.0,60.0,6.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 20000) @time (minf,minx,ret) = NLopt.optimize(opt,GloIniPar) # Approximately accurate ``` This parameter estimation on the longer sample proves to be extremely challenging for the global optimizers. BlackBoxOptim is best in optimizing the objective function. All of the global algorithms produces final parameter estimates that could be used as starting values for further refinement with the local optimization algorithms. ```julia opt = Opt(:LN_BOBYQA, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Claims SUCCESS but does not iterate to the true values. ``` ```julia opt = Opt(:LN_NELDERMEAD, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-9) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Inaccurate final values ``` ```julia opt = Opt(:LD_SLSQP, 3) lower_bounds!(opt,[9.0,20.0,2.0]) upper_bounds!(opt,[11.0,30.0,3.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,LocIniPar) # Inaccurate final values ``` No local optimizer can improve the global solution to the true values. #### Using QuadDIRECT ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short) lower = [0.0,0.0,0.0] upper = [50.0,50.0,50.0] splits = ([1.0,5.0,15.0],[0,10,20],[0,10,20]) @time root, x0 = analyze(obj_short,splits,lower,upper) ``` ```julia minimum(root) ``` ```julia obj = build_loss_objective(prob,Vern9(),L2Loss(t,data),tstops=t,reltol=1e-9,abstol=1e-9) lower = [0.0,0.0,0.0] upper = [50.0,50.0,50.0] splits = ([0,5.0,15.0],[0,15,30],[0,2,5]) @time root, x0 = analyze(obj,splits,lower,upper) ``` ```julia minimum(root) ``` # Conclusion: 1) As expected the Lorenz system is extremely sensitive to initial space values. Starting the integration from `r0 = [0.1,0.0,0.0]` produces convergence with the short sample of 300 observations. This can be achieved by all the global optimizers as well as most of the local optimizers. Instead starting from `r0= [-11.8,-5.1,37.5]`, as in PODES, with the shorter sample shrinks the number of successful algorithms to 3: `BBO`, `:GN_CRS2_LM `and `:LD_SLSQP`. For the longer sample, all the algorithms fail. 2) When trying to hit the real data, having a low enough tolerance on the numerical solution is key. If the numerical solution is too rough, then we can never actually hone in on the true parameters since even with the true parameters we will erroneously induce numerical error. Maybe this could be adaptive? 3) Excessively low tolerance in the numerical solution is inefficient and delays the convergence of the estimation. 4) The estimation method and the global versus local optimization make a huge difference in the timings. Here, BBO always find the correct solution for a global optimization setup. For local optimization, most methods in NLopt, like :LN_BOBYQA, solve the problem in <0.05 seconds. This is an algorithm that can scale a local optimization but we are aiming to scale a global optimization. 5) QuadDIRECT performs very well on the shorter problem but doesn't give very great results for the longer in the Lorenz case, more can be read about the algorithm [here](https://github.com/timholy/QuadDIRECT.jl). 6) Fitting shorter timespans is easier... maybe this can lead to determining a minimal sample size for the optimizers and the estimator to succeed. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/ParameterEstimation/LotkaVolterraParameterEstimation.jmd
docs
10124
--- title: Lotka-Volterra Parameter Estimation Benchmarks author: Vaibhav Dixit, Chris Rackauckas --- # Parameter estimation of Lotka Volterra model using optimisation methods ```julia using ParameterizedFunctions, OrdinaryDiffEq, DiffEqParamEstim using BlackBoxOptim, NLopt, Plots, RecursiveArrayTools, QuadDIRECT gr(fmt=:png) ``` ```julia loc_bounds = Tuple{Float64, Float64}[(0, 5), (0, 5), (0, 5), (0, 5)] glo_bounds = Tuple{Float64, Float64}[(0, 10), (0, 10), (0, 10), (0, 10)] loc_init = [1,0.5,3.5,1.5] glo_init = [5,5,5,5] ``` ```julia f = @ode_def LotkaVolterraTest begin dx = a*x - b*x*y dy = -c*y + d*x*y end a b c d ``` ```julia u0 = [1.0,1.0] #initial values tspan = (0.0,10.0) p = [1.5,1.0,3.0,1,0] #parameters used, these need to be estimated from the data tspan = (0.0, 30.0) # sample of 3000 observations over the (0,30) timespan prob = ODEProblem(f, u0, tspan,p) tspan2 = (0.0, 3.0) # sample of 3000 observations over the (0,30) timespan prob_short = ODEProblem(f, u0, tspan2,p) ``` ```julia dt = 30.0/3000 tf = 30.0 tinterval = 0:dt:tf t = collect(tinterval) ``` ```julia h = 0.01 M = 300 tstart = 0.0 tstop = tstart + M * h tinterval_short = 0:h:tstop t_short = collect(tinterval_short) ``` ```julia #Generate Data data_sol_short = solve(prob_short,Tsit5(),saveat=t_short,reltol=1e-9,abstol=1e-9) data_short = convert(Array, data_sol_short) data_sol = solve(prob,Tsit5(),saveat=t,reltol=1e-9,abstol=1e-9) data = convert(Array, data_sol) ``` #### Plot of the solution ##### Short Solution ```julia p1 = plot(data_sol_short) ``` ##### Longer Solution ```julia p2 = plot(data_sol) ``` ### Local Solution from the short data set ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short) res1 = bboptimize(obj_short;SearchRange = glo_bounds, MaxSteps = 7e3) # Lower tolerance could lead to smaller fitness (more accuracy) ``` ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9) res1 = bboptimize(obj_short;SearchRange = glo_bounds, MaxSteps = 7e3) # Change in tolerance makes it worse ``` ```julia obj_short = build_loss_objective(prob_short,Vern9(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9,abstol=1e-9) res1 = bboptimize(obj_short;SearchRange = glo_bounds, MaxSteps = 7e3) # using the moe accurate Vern9() reduces the fitness marginally and leads to some increase in time taken ``` # Using NLopt #### Global Optimisation first ```julia obj_short = build_loss_objective(prob_short,Vern9(),L2Loss(t_short,data_short),tstops=t_short,reltol=1e-9,abstol=1e-9) ``` ```julia opt = Opt(:GN_ORIG_DIRECT_L, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[10.0,10.0,10.0,10.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_CRS2_LM, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[10.0,10.0,10.0,10.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_ISRES, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[10.0,10.0,10.0,10.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_ESCH, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[10.0,10.0,10.0,10.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` Now local optimization algorithms are used to check the global ones, these use the local constraints, different intial values and time step ```julia opt = Opt(:LN_BOBYQA, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_NELDERMEAD, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LD_SLSQP, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_COBYLA, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_NEWUOA_BOUND, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_PRAXIS, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_SBPLX, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LD_MMA, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LD_TNEWTON_PRECOND_RESTART, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj_short.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ## Now the longer problem is solved for a global solution Vern9 solver with reltol=1e-9 and abstol=1e-9 is used and the dataset is increased to 3000 observations per variable with the same integration time step of 0.01. ```julia obj = build_loss_objective(prob,Vern9(),L2Loss(t,data),tstops=t,reltol=1e-9,abstol=1e-9) res1 = bboptimize(obj;SearchRange = glo_bounds, MaxSteps = 4e3) ``` ```julia opt = Opt(:GN_ORIG_DIRECT_L, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[10.0,10.0,10.0,10.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_CRS2_LM, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[10.0,10.0,10.0,10.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 20000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_ISRES, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[10.0,10.0,10.0,10.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 50000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:GN_ESCH, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[10.0,10.0,10.0,10.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 20000) @time (minf,minx,ret) = NLopt.optimize(opt,glo_init) ``` ```julia opt = Opt(:LN_BOBYQA, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LN_NELDERMEAD, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-9) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` ```julia opt = Opt(:LD_SLSQP, 4) lower_bounds!(opt,[0.0,0.0,0.0,0.0]) upper_bounds!(opt,[5.0,5.0,5.0,5.0]) min_objective!(opt, obj.cost_function2) xtol_rel!(opt,1e-12) maxeval!(opt, 10000) @time (minf,minx,ret) = NLopt.optimize(opt,loc_init) ``` #### Using QuadDIRECT ```julia obj_short = build_loss_objective(prob_short,Tsit5(),L2Loss(t_short,data_short),tstops=t_short) lower = [0.0,0.0,0.0,0.0] upper = [5.0,5.0,5.0,5.0] splits = ([0.0,1.0,3.0],[0.0,1.0,3.0],[0.0,1.0,3.0],[0.0,1.0,3.0]) root, x0 = analyze(obj_short,splits,lower,upper) ``` ```julia minimum(root) ``` ```julia obj = build_loss_objective(prob,Vern9(),L2Loss(t,data),tstops=t,reltol=1e-9,abstol=1e-9) lower = [0.0,0.0,0.0,0.0] upper = [10.0,10.0,10.0,10.0] splits = ([0.0,3.0,6.0],[0.0,3.0,6.0],[0.0,3.0,6.0],[0.0,3.0,6.0]) root, x0 = analyze(obj,splits,lower,upper) ``` ```julia minimum(root) ``` #### Parameter estimation on the longer sample proves to be extremely challenging for some of the global optimizers. A few give the accurate values, BlacBoxOptim also performs quite well while others seem to struggle with accuracy a lot. # Conclusion In general we observe that lower tolerance lead to higher accuracy but too low tolerance could affect the convergance time drastically. Also fitting a shorter timespan seems to be easier in comparision (quite intutively). NLOpt methods seem to give great accuracy in the shorter problem with a lot of the algorithms giving 0 fitness, BBO performs very well on it with marginal change with `tol` values. In case of global optimization of the longer problem there is some difference in the perfomance amongst the algorithms with `LD_SLSQP` `GN_ESCH` `GN_ISRES` `GN_ORIG_DIRECT_L` performing among the worse, BBO also gives a bit high fitness in comparison. QuadDIRECT gives accurate results in the case of the shorter problem but doesn't perform very well in the longer problem case. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffDDE/QuorumSensing.jmd
docs
10127
--- title: Quorum Sensing Work-Precision Diagrams author: David Widmann, Chris Rackauckas --- # Quorum Sensing Here we test a model of quorum sensing of Pseudomonas putida IsoF in continuous cultures with constant delay which was published by K. Buddrus-Schiemann et al. in "Analysis of N-Acylhomoserine Lactone Dynamics in Continuous Cultures of Pseudomonas Putida IsoF By Use of ELISA and UHPLC/qTOF-MS-derived Measurements and Mathematical Models", Analytical and Bioanalytical Chemistry, 2014. ```julia using DelayDiffEq, DiffEqDevTools, DDEProblemLibrary, Plots import DDEProblemLibrary: prob_dde_qs gr() sol = solve(prob_dde_qs, MethodOfSteps(Vern9(); fpsolve = NLFunctional(; max_iter = 1000)); reltol=1e-14, abstol=1e-14) plot(sol) ``` Particularly, we are interested in the third, low-level component of the system: ```julia sol = solve(prob_dde_qs, MethodOfSteps(Vern9(); fpsolve = NLFunctional(; max_iter = 1000)); reltol=1e-14, abstol=1e-14, save_idxs=3) test_sol = TestSolution(sol) plot(sol) ``` ## Qualitative comparisons First we compare the quality of the solution's third component for different algorithms, using the default tolerances. ### RK methods ```julia sol = solve(prob_dde_qs, MethodOfSteps(BS3()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(Tsit5()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(RK4()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(DP5()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(DP8()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(OwrenZen3()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(OwrenZen4()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(OwrenZen5()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ### Rosenbrock methods ```julia sol = solve(prob_dde_qs, MethodOfSteps(Rosenbrock23()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(Rosenbrock32()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(Rodas4()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(Rodas5()); reltol=1e-4, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ### Lazy interpolants ```julia sol = solve(prob_dde_qs, MethodOfSteps(Vern7()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ```julia sol = solve(prob_dde_qs, MethodOfSteps(Vern9()); reltol=1e-3, abstol=1e-6, save_idxs=3) p = plot(sol); scatter!(p,sol.t, sol.u) p ``` ## Qualitative comparisons Now we compare these methods quantitatively. ### High tolerances #### RK methods We start with RK methods at high tolerances. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` We also compare interpolation errors: ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` And the maximal interpolation error: ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(BS3())), Dict(:alg=>MethodOfSteps(Tsit5())), Dict(:alg=>MethodOfSteps(RK4())), Dict(:alg=>MethodOfSteps(DP5())), Dict(:alg=>MethodOfSteps(OwrenZen3())), Dict(:alg=>MethodOfSteps(OwrenZen4())), Dict(:alg=>MethodOfSteps(OwrenZen5()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L∞) plot(wp) ``` Since the correct solution is in the range of 1e-7, we see that most solutions, even at the lower end of tested tolerances, always lead to relative maximal interpolation errors of at least 1e-1 (and usually worse). `RK4` performs slightly better with relative maximal errors of at least 1e-2. This matches our qualitative analysis above. #### Rosenbrock methods We repeat these tests with Rosenbrock methods, and include `RK4` as reference. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Rosenbrock23())), Dict(:alg=>MethodOfSteps(Rosenbrock32())), Dict(:alg=>MethodOfSteps(Rodas4())), Dict(:alg=>MethodOfSteps(RK4()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Rosenbrock23())), Dict(:alg=>MethodOfSteps(Rosenbrock32())), Dict(:alg=>MethodOfSteps(Rodas4())), Dict(:alg=>MethodOfSteps(RK4()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Rosenbrock23())), Dict(:alg=>MethodOfSteps(Rosenbrock32())), Dict(:alg=>MethodOfSteps(Rodas4())), Dict(:alg=>MethodOfSteps(RK4()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L∞) plot(wp) ``` Out of the tested Rosenbrock methods `Rodas4` and `Rosenbrock23` perform best at high tolerances. #### Lazy interpolants Finally we test the Verner methods with lazy interpolants, and include `Rosenbrock23` as reference. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(Rosenbrock23()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(Rosenbrock23()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>MethodOfSteps(Vern6())), Dict(:alg=>MethodOfSteps(Vern7())), Dict(:alg=>MethodOfSteps(Vern8())), Dict(:alg=>MethodOfSteps(Vern9())), Dict(:alg=>MethodOfSteps(Rosenbrock23()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L∞) plot(wp) ``` All in all, at high tolerances `Rodas5` and `Rosenbrock23` are the best methods for solving this stiff DDE. ### Low tolerances #### Rosenbrock methods We repeat our tests of Rosenbrock methods `Rosenbrock23` and `Rodas5` at low tolerances: ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(Rosenbrock23())), Dict(:alg=>MethodOfSteps(Rodas4())), Dict(:alg=>MethodOfSteps(Rodas5()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:final) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(Rosenbrock23())), Dict(:alg=>MethodOfSteps(Rodas4())), Dict(:alg=>MethodOfSteps(Rodas5()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (8:11) reltols = 1.0 ./ 10.0 .^ (5:8) setups = [Dict(:alg=>MethodOfSteps(Rosenbrock23())), Dict(:alg=>MethodOfSteps(Rodas4())), Dict(:alg=>MethodOfSteps(Rodas5()))] wp = WorkPrecisionSet(prob_dde_qs,abstols,reltols,setups; save_idxs=3,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L∞) plot(wp) ``` Thus at low tolerances `Rodas5` outperforms `Rosenbrock23`. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffODE/Bruss.jmd
docs
17152
--- title: Brusselator Work-Precision Diagrams author: Chris Rackauckas and Utkarsh --- ```julia using OrdinaryDiffEq, DiffEqDevTools, Sundials, ParameterizedFunctions, Plots, ODEInterfaceDiffEq, LSODA, SparseArrays, LinearSolve, LinearAlgebra, IncompleteLU, AlgebraicMultigrid, Symbolics, ModelingToolkit gr() const N = 8 xyd_brusselator = range(0,stop=1,length=N) brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5. limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a function brusselator_2d_loop(du, u, p, t) A, B, alpha, dx = p alpha = alpha/dx^2 @inbounds for I in CartesianIndices((N, N)) i, j = Tuple(I) x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]] ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N) du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) + B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t) du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) + A*u[i,j,1] - u[i,j,1]^2*u[i,j,2] end end p = (3.4, 1., 10., step(xyd_brusselator)) input = rand(N,N,2) output = similar(input) sparsity_pattern = Symbolics.jacobian_sparsity(brusselator_2d_loop,output,input,p,0.0) jac_sparsity = Float64.(sparse(sparsity_pattern)) f = ODEFunction{true, SciMLBase.FullSpecialize}(brusselator_2d_loop;jac_prototype=jac_sparsity) function init_brusselator_2d(xyd) N = length(xyd) u = zeros(N, N, 2) for I in CartesianIndices((N, N)) x = xyd[I[1]] y = xyd[I[2]] u[I,1] = 22*(y*(1-y))^(3/2) u[I,2] = 27*(x*(1-x))^(3/2) end u end u0 = init_brusselator_2d(xyd_brusselator) prob = ODEProblem(f,u0,(0.,11.5),p); ``` ```julia prob_mtk = ODEProblem(modelingtoolkitize(prob),[],(0.0,11.5),jac=true,sparse=true); ``` Also comparing with MethodOfLines.jl: ```julia using MethodOfLines, DomainSets @parameters x y t @variables u(..) v(..) Dt = Differential(t) Dx = Differential(x) Dy = Differential(y) Dxx = Differential(x)^2 Dyy = Differential(y)^2 ∇²(u) = Dxx(u) + Dyy(u) brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5. x_min = y_min = t_min = 0.0 x_max = y_max = 1.0 t_max = 11.5 α = 10. u0_mol(x,y,t) = 22(y*(1-y))^(3/2) v0_mol(x,y,t) = 27(x*(1-x))^(3/2) eq = [Dt(u(x,y,t)) ~ 1. + v(x,y,t)*u(x,y,t)^2 - 4.4*u(x,y,t) + α*∇²(u(x,y,t)) + brusselator_f(x, y, t), Dt(v(x,y,t)) ~ 3.4*u(x,y,t) - v(x,y,t)*u(x,y,t)^2 + α*∇²(v(x,y,t))] domains = [x ∈ Interval(x_min, x_max), y ∈ Interval(y_min, y_max), t ∈ Interval(t_min, t_max)] bcs = [u(x,y,0) ~ u0_mol(x,y,0), u(0,y,t) ~ u(1,y,t), u(x,0,t) ~ u(x,1,t), v(x,y,0) ~ v0_mol(x,y,0), v(0,y,t) ~ v(1,y,t), v(x,0,t) ~ v(x,1,t)] @named pdesys = PDESystem(eq,bcs,domains,[x,y,t],[u(x,y,t),v(x,y,t)]) # Method of lines discretization dx = 1/N dy = 1/N order = 2 discretization = MOLFiniteDifference([x=>dx, y=>dy], t; approx_order = order, jac = true, sparse = true, wrap = Val(false)) # Convert the PDE system into an ODE problem prob_mol = discretize(pdesys,discretization) ``` ```julia using Base.Experimental: Const, @aliasscope macro vp(expr) nodes = (Symbol("llvm.loop.vectorize.predicate.enable"), 1) if expr.head != :for error("Syntax error: loopinfo needs a for loop") end push!(expr.args[2].args, Expr(:loopinfo, nodes)) return esc(expr) end struct Brusselator2DLoop <: Function N::Int s::Float64 end function (b::Brusselator2DLoop)(du, unc, p, t) N = b.N s = b.s A, B, alpha, dx = p alpha = alpha/abs2(dx) u = Base.Experimental.Const(unc) Base.Experimental.@aliasscope begin @inbounds @fastmath begin b = ((abs2(-0.3) + abs2(-0.6)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du1 = alpha*(u[N,1,1] + u[2,1,1] + u[1,2,1] + u[1,N,1] - 4u[1,1,1]) + B + abs2(u[1,1,1])*u[1,1,2] - (A + 1)*u[1,1,1] + b du2 = alpha*(u[N,1,2] + u[2,1,2] + u[1,2,2] + u[1,N,2] - 4u[1,1,2]) + A*u[1,1,1] - abs2(u[1,1,1])*u[1,1,2] du[1,1,1] = du1 du[1,1,2] = du2 @vp for i = 2:N-1 x = (i-1)*s ip1 = i+1 im1 = i-1 b = ((abs2(x-0.3) + abs2(-0.6)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du1 = alpha*(u[im1,1,1] + u[ip1,1,1] + u[i,2,1] + u[i,N,1] - 4u[i,1,1]) + B + abs2(u[i,1,1])*u[i,1,2] - (A + 1)*u[i,1,1] + b du2 = alpha*(u[im1,1,2] + u[ip1,1,2] + u[i,2,2] + u[i,N,2] - 4u[i,1,2]) + A*u[i,1,1] - abs2(u[i,1,1])*u[i,1,2] du[i,1,1] = du1 du[i,1,2] = du2 end b = ((abs2(0.7) + abs2(-0.6)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du1 = alpha*(u[N-1,1,1] + u[1,1,1] + u[N,2,1] + u[N,N,1] - 4u[N,1,1]) + B + abs2(u[N,1,1])*u[N,1,2] - (A + 1)*u[N,1,1] + b du2 = alpha*(u[N-1,1,2] + u[1,1,2] + u[N,2,2] + u[N,N,2] - 4u[N,1,2]) + A*u[N,1,1] - abs2(u[N,1,1])*u[N,1,2] du[N,1,1] = du1 du[N,1,2] = du2 for j = 2:N-1 y = (j-1)*s jp1 = j+1 jm1 = j-1 b0 = ((abs2(-0.3) + abs2(y-0.6)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du[1,j,1] = alpha*(u[N,j,1] + u[2,j,1] + u[1,jp1,1] + u[1,jm1,1] - 4u[1,j,1]) + B + abs2(u[1,j,1])*u[1,j,2] - (A + 1)*u[1,j,1] + b0 du[1,j,2] = alpha*(u[N,j,2] + u[2,j,2] + u[1,jp1,2] + u[1,jm1,2] - 4u[1,j,2]) + A*u[1,j,1] - abs2(u[1,j,1])*u[1,j,2] @vp for i = 2:N-1 x = (i-1)*s b = ((abs2(x-0.3) + abs2(y-0.6)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du1 = alpha*(u[i-1,j,1] + u[i+1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) + B + abs2(u[i,j,1])*u[i,j,2] - (A + 1)*u[i,j,1] + b du2 = alpha*(u[i-1,j,2] + u[i+1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) + A*u[i,j,1] - abs2(u[i,j,1])*u[i,j,2] du[i,j,1] = du1 du[i,j,2] = du2 end bN = ((abs2(0.7) + abs2(y-0.6)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du[N,j,1] = alpha*(u[N-1,j,1] + u[1,j,1] + u[N,jp1,1] + u[N,jm1,1] - 4u[N,j,1]) + B + abs2(u[N,j,1])*u[N,j,2] - (A + 1)*u[N,j,1] + bN du[N,j,2] = alpha*(u[N-1,j,2] + u[1,j,2] + u[N,jp1,2] + u[N,jm1,2] - 4u[N,j,2]) + A*u[N,j,1] - abs2(u[N,j,1])*u[N,j,2] end b = ((abs2(-0.3) + abs2(0.4)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du1 = alpha*(u[N,N,1] + u[2,N,1] + u[1,1,1] + u[1,N-1,1] - 4u[1,N,1]) + B + abs2(u[1,N,1])*u[1,N,2] - (A + 1)*u[1,N,1] + b du2 = alpha*(u[N,N,2] + u[2,N,2] + u[1,1,2] + u[1,N-1,2] - 4u[1,N,2]) + A*u[1,N,1] - abs2(u[1,N,1])*u[1,N,2] du[1,N,1] = du1 du[1,N,2] = du2 @vp for i = 2:N-1 x = (i-1)*s ip1 = i+1 im1 = i-1 b = ((abs2(x-0.3) + abs2(0.4)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du1 = alpha*(u[im1,N,1] + u[ip1,N,1] + u[i,1,1] + u[i,N-1,1] - 4u[i,N,1]) + B + abs2(u[i,N,1])*u[i,N,2] - (A + 1)*u[i,N,1] + b du2 = alpha*(u[im1,N,2] + u[ip1,N,2] + u[i,1,2] + u[i,N-1,2] - 4u[i,N,2]) + A*u[i,N,1] - abs2(u[i,N,1])*u[i,N,2] du[i,N,1] = du1 du[i,N,2] = du2 end b = ((abs2(0.7) + abs2(0.4)) <= abs2(0.1)) * (t >= 1.1) * 5.0 du1 = alpha*(u[N-1,N,1] + u[1,N,1] + u[N,1,1] + u[N,N-1,1] - 4u[N,N,1]) + B + abs2(u[N,N,1])*u[N,N,2] - (A + 1)*u[N,N,1] + b du2 = alpha*(u[N-1,N,2] + u[1,N,2] + u[N,1,2] + u[N,N-1,2] - 4u[N,N,2]) + A*u[N,N,1] - abs2(u[N,N,1])*u[N,N,2] du[N,N,1] = du1 du[N,N,2] = du2 end end end function fast_bruss(N) xyd_brusselator = range(0,stop=1,length=N) brusselator_2d_loop = Brusselator2DLoop(N,Float64(step(xyd_brusselator))) p = (3.4, 1., 10., step(xyd_brusselator)) input = rand(N,N,2) output = similar(input) sparsity_pattern = Symbolics.jacobian_sparsity(brusselator_2d_loop,output,input,p,0.0) jac_sparsity = Float64.(sparse(sparsity_pattern)) f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity) u0 = zeros(N, N, 2) @inbounds for I in CartesianIndices((N, N)) x = xyd_brusselator[I[1]] y = xyd_brusselator[I[2]] u0[I,1] = 22*(y*(1-y))^(3/2) u0[I,2] = 27*(x*(1-x))^(3/2) end return ODEProblem(f,u0,(0.,11.5),p) end fastprob = fast_bruss(N) ``` ```julia sol = solve(prob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(prob_mtk,CVODE_BDF(linear_solver = :KLU),abstol=1/10^14,reltol=1/10^14) sol3 = solve(prob_mol,CVODE_BDF(linear_solver = :KLU),abstol=1/10^14,reltol=1/10^14,wrap=Val(false)) ``` ```julia test_sol = [sol,sol2,sol,sol3] probs = [prob,prob_mtk,fastprob,prob_mol]; ``` ```julia plot(sol,vars = 1) ``` ```julia plot(sol,vars = 10) ``` ## Setup Preconditioners ### OrdinaryDiffEq ```julia function incompletelu(W,du,u,p,t,newW,Plprev,Prprev,solverdata) if newW === nothing || newW Pl = ilu(convert(AbstractMatrix,W), τ = 50.0) else Pl = Plprev end Pl,nothing end function algebraicmultigrid(W,du,u,p,t,newW,Plprev,Prprev,solverdata) if newW === nothing || newW Pl = aspreconditioner(ruge_stuben(convert(AbstractMatrix,W))) else Pl = Plprev end Pl,nothing end ``` ### Sundials ```julia const jaccache = prob_mtk.f.jac(prob.u0,prob.p,0.0) const W = I - 1.0*jaccache prectmp = ilu(W, τ = 50.0) const preccache = Ref(prectmp) function psetupilu(p, t, u, du, jok, jcurPtr, gamma) if !jok prob_mtk.f.jac(jaccache,u,p,t) jcurPtr[] = true # W = I - gamma*J @. W = -gamma*jaccache idxs = diagind(W) @. @view(W[idxs]) = @view(W[idxs]) + 1 # Build preconditioner on W preccache[] = ilu(W, τ = 5.0) end end function precilu(z,r,p,t,y,fy,gamma,delta,lr) ldiv!(z,preccache[],r) end prectmp2 = aspreconditioner(ruge_stuben(W, presmoother = AlgebraicMultigrid.Jacobi(rand(size(W,1))), postsmoother = AlgebraicMultigrid.Jacobi(rand(size(W,1))))) const preccache2 = Ref(prectmp2) function psetupamg(p, t, u, du, jok, jcurPtr, gamma) if !jok prob_mtk.f.jac(jaccache,u,p,t) jcurPtr[] = true # W = I - gamma*J @. W = -gamma*jaccache idxs = diagind(W) @. @view(W[idxs]) = @view(W[idxs]) + 1 # Build preconditioner on W preccache2[] = aspreconditioner(ruge_stuben(W, presmoother = AlgebraicMultigrid.Jacobi(rand(size(W,1))), postsmoother = AlgebraicMultigrid.Jacobi(rand(size(W,1))))) end end function precamg(z,r,p,t,y,fy,gamma,delta,lr) ldiv!(z,preccache2[],r) end ``` ## Compare Problem Implementations ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [ Dict(:alg => KenCarp47(linsolve=KLUFactorization())), Dict(:alg => KenCarp47(linsolve=KLUFactorization()), :prob_choice => 2), Dict(:alg => KenCarp47(linsolve=KLUFactorization()), :prob_choice => 3), Dict(:alg => KenCarp47(linsolve=KLUFactorization()), :prob_choice => 4), Dict(:alg => KenCarp47(linsolve=KrylovJL_GMRES())), Dict(:alg => KenCarp47(linsolve=KrylovJL_GMRES()), :prob_choice => 2), Dict(:alg => KenCarp47(linsolve=KrylovJL_GMRES()), :prob_choice => 3), Dict(:alg => KenCarp47(linsolve=KrylovJL_GMRES()), :prob_choice => 4),] names = ["KenCarp47 KLU","KenCarp47 KLU MTK","KenCarp47 KLU FastBruss", "KenCarp47 KLU MOL", "KenCarp47 GMRES", "KenCarp47 GMRES MTK", "KenCarp47 GMRES FastBruss", "KenCarp47 GMRES MOL"]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;names = names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10,wrap=Val(false)) plot(wp) ``` ## High Tolerances This is the speed when you just want the answer. ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [ Dict(:alg=>CVODE_BDF(linear_solver = :KLU), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver = :GMRES)), Dict(:alg=>CVODE_BDF(linear_solver = :GMRES), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1)), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precamg,psetup=psetupamg,prec_side=1)), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precamg,psetup=psetupamg,prec_side=1), :prob_choice => 2), ] names = ["CVODE MTK KLU","CVODE GMRES","CVODE MTK GMRES", "CVODE iLU GMRES", "CVODE AMG GMRES", "CVODE iLU MTK GMRES", "CVODE AMG MTK GMRES"]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;names=names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [ Dict(:alg=>KenCarp47(linsolve=KLUFactorization())), Dict(:alg=>KenCarp47(linsolve=KLUFactorization()), :prob_choice => 2), Dict(:alg=>KenCarp47(linsolve=UMFPACKFactorization())), Dict(:alg=>KenCarp47(linsolve=UMFPACKFactorization()), :prob_choice => 2), Dict(:alg=>KenCarp47(linsolve=KrylovJL_GMRES())), Dict(:alg=>KenCarp47(linsolve=KrylovJL_GMRES()), :prob_choice => 2), Dict(:alg=>KenCarp47(linsolve=KrylovJL_GMRES(),precs=incompletelu,concrete_jac=true)), Dict(:alg=>KenCarp47(linsolve=KrylovJL_GMRES(),precs=incompletelu,concrete_jac=true), :prob_choice => 2), Dict(:alg=>KenCarp47(linsolve=KrylovJL_GMRES(),precs=algebraicmultigrid,concrete_jac=true)), Dict(:alg=>KenCarp47(linsolve=KrylovJL_GMRES(),precs=algebraicmultigrid,concrete_jac=true), :prob_choice => 2), ] names = ["KenCarp47 KLU","KenCarp47 KLU MTK","KenCarp47 UMFPACK", "KenCarp47 UMFPACK MTK", "KenCarp47 GMRES", "KenCarp47 GMRES MTK", "KenCarp47 iLU GMRES", "KenCarp47 iLU GMRES MTK", "KenCarp47 AMG GMRES", "KenCarp47 AMG GMRES MTK"]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;names = names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [ Dict(:alg=>TRBDF2()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp47()), # Dict(:alg=>QNDF()), # bad Dict(:alg=>FBDF()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [ Dict(:alg=>KenCarp47(linsolve=KLUFactorization()), :prob_choice => 2), Dict(:alg=>KenCarp47(linsolve=KrylovJL_GMRES()), :prob_choice => 2), Dict(:alg=>FBDF(linsolve=KLUFactorization()), :prob_choice => 2), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES()), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver = :KLU), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1), :prob_choice => 2), ] names = ["KenCarp47 KLU MTK", "KenCarp47 GMRES MTK", "FBDF KLU MTK", "FBDF GMRES MTK", "CVODE MTK KLU", "CVODE iLU MTK GMRES" ]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;names = names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Low Tolerances This is the speed at lower tolerances, measuring what's good when accuracy is needed. ```julia abstols = 1.0 ./ 10.0 .^ (7:12) reltols = 1.0 ./ 10.0 .^ (4:9) setups = [ Dict(:alg=>CVODE_BDF(linear_solver = :KLU), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver = :GMRES)), Dict(:alg=>CVODE_BDF(linear_solver = :GMRES), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1)), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precamg,psetup=psetupamg,prec_side=1)), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precamg,psetup=psetupamg,prec_side=1), :prob_choice => 2), ] names = ["CVODE MTK KLU","CVODE GMRES","CVODE MTK GMRES", "CVODE iLU GMRES", "CVODE AMG GMRES", "CVODE iLU MTK GMRES", "CVODE AMG MTK GMRES"]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;names = names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [ Dict(:alg=>KenCarp47(linsolve=KLUFactorization()), :prob_choice => 2), Dict(:alg=>KenCarp47(linsolve=KrylovJL_GMRES()), :prob_choice => 2), Dict(:alg=>FBDF(linsolve=KLUFactorization()), :prob_choice => 2), Dict(:alg=>FBDF(linsolve=KrylovJL_GMRES()), :prob_choice => 2), Dict(:alg=>Rodas5P(linsolve=KrylovJL_GMRES()), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver = :KLU), :prob_choice => 2), Dict(:alg=>CVODE_BDF(linear_solver=:GMRES,prec=precilu,psetup=psetupilu,prec_side=1), :prob_choice => 2), ] names = ["KenCarp47 KLU MTK", "KenCarp47 GMRES MTK", "FBDF KLU MTK", "FBDF GMRES MTK", "Rodas5P GMRES MTK", "CVODE MTK KLU", "CVODE iLU MTK GMRES" ]; wp = WorkPrecisionSet(probs,abstols,reltols,setups;names = names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffODE/Hires.jmd
docs
10640
--- title: HIRES Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, ParameterizedFunctions, Plots, ODE, ODEInterfaceDiffEq, LSODA, DiffEqDevTools, Sundials using LinearAlgebra, StaticArrays gr() #gr(fmt=:png) f = @ode_def Hires begin dy1 = -1.71*y1 + 0.43*y2 + 8.32*y3 + 0.0007 dy2 = 1.71*y1 - 8.75*y2 dy3 = -10.03*y3 + 0.43*y4 + 0.035*y5 dy4 = 8.32*y2 + 1.71*y3 - 1.12*y4 dy5 = -1.745*y5 + 0.43*y6 + 0.43*y7 dy6 = -280.0*y6*y8 + 0.69*y4 + 1.71*y5 - 0.43*y6 + 0.69*y7 dy7 = 280.0*y6*y8 - 1.81*y7 dy8 = -280.0*y6*y8 + 1.81*y7 end u0 = zeros(8) u0[1] = 1 u0[8] = 0.0057 prob = ODEProblem{true, SciMLBase.FullSpecialize}(f,u0,(0.0,321.8122)) probstatic = ODEProblem{false}(f,SVector{8}(u0),(0.0,321.8122)) sol = solve(prob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(probstatic,Rodas5P(),abstol=1/10^14,reltol=1/10^14) probs = [prob,probstatic] test_sol = [sol,sol2]; abstols = 1.0 ./ 10.0 .^ (4:11) reltols = 1.0 ./ 10.0 .^ (1:8); ``` ```julia plot(sol) ``` ```julia plot(sol,tspan=(0.0,5.0)) ``` ## Omissions The following were omitted from the tests due to convergence failures. ODE.jl's adaptivity is not able to stabilize its algorithms, while GeometricIntegratorsDiffEq has not upgraded to Julia 1.0. GeometricIntegrators.jl's methods used to be either fail to converge at comparable dts (or on some computers errors due to type conversions). ```julia #sol = solve(prob,ode23s()); println("Total ODE.jl steps: $(length(sol))") #using GeometricIntegratorsDiffEq #try # sol = solve(prob,GIRadIIA3(),dt=1/10) #catch e # println(e) #end ``` The stabilized explicit methods are not stable enough to handle this problem well. While they don't diverge, they are really slow. ```julia setups = [ #Dict(:alg=>ROCK2()), #Dict(:alg=>ROCK4()) #Dict(:alg=>ESERK5()) ] ``` ## High Tolerances This is the speed when you just want the answer. ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>TRBDF2()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>rodas()), Dict(:alg=>radau()), Dict(:alg=>RadauIIA5()), Dict(:alg=>ROS34PW1a()), Dict(:alg=>lsoda()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;dense = false,verbose=false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>Kvaerno3()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>TRBDF2()), Dict(:alg=>KenCarp3()), # Dict(:alg=>SDIRK2()), # Removed because it's bad Dict(:alg=>radau())] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;dense = false,verbose=false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>KenCarp5()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp4(), :prob_choice => 2), Dict(:alg=>KenCarp3()), Dict(:alg=>ARKODE(order=5)), Dict(:alg=>ARKODE()), Dict(:alg=>ARKODE(order=3))] names = ["Rosenbrock23" "Rosenbrock23 Static" "KenCarp5" "KenCarp4" "KenCarp3" "ARKODE5" "ARKODE4" "ARKODE3"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names=names,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;dense = false,verbose=false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>TRBDF2()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), Dict(:alg=>ABDF2()), Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>Exprb43()), Dict(:alg=>Exprb32()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` ### Low Tolerances This is the speed at lower tolerances, measuring what's good when accuracy is needed. ```julia abstols = 1.0 ./ 10.0 .^ (7:13) reltols = 1.0 ./ 10.0 .^ (4:10) setups = [ Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), Dict(:alg=>CVODE_BDF()), Dict(:alg=>ddebdf()), Dict(:alg=>Rodas5()), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>rodas()), Dict(:alg=>radau()), Dict(:alg=>lsoda()), Dict(:alg=>RadauIIA5()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;verbose=false, dense=false,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` ```julia setups = [Dict(:alg=>GRK4A()), Dict(:alg=>Rodas5()), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>Kvaerno5()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>lsoda()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rodas4()), Dict(:alg=>radau()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;verbose=false, dense=false,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2) plot(wp) ``` ```julia setups = [Dict(:alg=>Rodas5()), Dict(:alg=>Rodas5(), :prob_choice => 2), Dict(:alg=>KenCarp5()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp4(), :prob_choice => 2), Dict(:alg=>KenCarp3()), Dict(:alg=>ARKODE(order=5)), Dict(:alg=>ARKODE()), Dict(:alg=>ARKODE(order=3))] names = ["Rodas5" "Rodas5 Static" "KenCarp5" "KenCarp4" "KenCarp4 Static" "KenCarp3" "ARKODE5" "ARKODE4" "ARKODE3"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names=names,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;verbose=false, dense=false,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2) plot(wp) ``` The following algorithms were removed since they failed. ```julia #setups = [#Dict(:alg=>Hairer4()), #Dict(:alg=>Hairer42()), #Dict(:alg=>Rodas3()), #Dict(:alg=>Kvaerno4()), #Dict(:alg=>KenCarp5()), #Dict(:alg=>Cash4()) #] #wp = WorkPrecisionSet(probs,abstols,reltols,setups; # save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) #plot(wp) ``` Multithreading with Parallel Extrapolation Methods ```julia #Setting BLAS to one thread to measure gains LinearAlgebra.BLAS.set_num_threads(1) abstols = 1.0 ./ 10.0 .^ (10:12) reltols = 1.0 ./ 10.0 .^ (7:9) setups = [ Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>QNDF()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>seulex()), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 4, init_order = 7,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 4, init_order = 7,threading = false)), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 4, init_order = 7, threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 4, init_order = 7, threading = false)), Dict(:alg=>ImplicitHairerWannerExtrapolation(min_order = 3, init_order = 6,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitHairerWannerExtrapolation(min_order = 3, init_order = 6,threading = false)), ] solnames = ["CVODE_BDF","KenCarp4","Rodas4","Rodas4 Static","Rodas5P","Rodas5P Static","QNDF","lsoda","radau","seulex","ImplEulerExtpl (threaded)", "ImplEulerExtpl (non-threaded)", "ImplEulerBaryExtpl (threaded)","ImplEulerBaryExtpl (non-threaded)","ImplHWExtpl (threaded)","ImplHWExtpl (non-threaded)"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names = solnames,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp, title = "Implicit Methods: HIRES",legend=:outertopleft,size = (1000,500), xticks = 10.0 .^ (-15:1:1), yticks = 10.0 .^ (-6:0.3:5), bottom_margin= 5Plots.mm) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffODE/Orego.jmd
docs
10359
--- title: OREGO Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, DiffEqDevTools, ParameterizedFunctions, Plots, ODE, ODEInterfaceDiffEq, LSODA, Sundials gr() #gr(fmt=:png) using LinearAlgebra, StaticArrays f = @ode_def Orego begin dy1 = p1*(y2+y1*(1-p2*y1-y2)) dy2 = (y3-(1+y1)*y2)/p1 dy3 = p3*(y1-y3) end p1 p2 p3 p = SA[77.27,8.375e-6,0.161] prob = ODEProblem{true, SciMLBase.FullSpecialize}(f,[1.0,2.0,3.0],(0.0,30.0),p) probstatic = ODEProblem{false}(f,SA[1.0,2.0,3.0],(0.0,30.0),p) sol = solve(prob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(probstatic,Rodas5P(),abstol=1/10^14,reltol=1/10^14) probs = [prob,probstatic] test_sol = [sol,sol2]; abstols = 1.0 ./ 10.0 .^ (4:11) reltols = 1.0 ./ 10.0 .^ (1:8); ``` ```julia plot_prob = ODEProblem(f,[1.0,2.0,3.0],(0.0,400.0),p) sol = solve(plot_prob,CVODE_BDF()) plot(sol,yscale=:log10) ``` ## Omissions and Tweaking The following were omitted from the tests due to convergence failures. ODE.jl's adaptivity is not able to stabilize its algorithms, while GeometricIntegratorsDiffEq has not upgraded to Julia 1.0. GeometricIntegrators.jl's methods used to be either fail to converge at comparable dts (or on some computers errors due to type conversions). ```julia #sol = solve(prob,ode23s()); println("Total ODE.jl steps: $(length(sol))") #using GeometricIntegratorsDiffEq #try # sol = solve(prob,GIRadIIA3(),dt=1/10) #catch e # println(e) #end ``` ```julia sol = solve(prob,ARKODE(),abstol=1e-5,reltol=1e-1); ``` ```julia sol = solve(prob,ARKODE(nonlinear_convergence_coefficient = 1e-3),abstol=1e-5,reltol=1e-1); ``` ```julia sol = solve(prob,ARKODE(order=3),abstol=1e-5,reltol=1e-1); ``` ```julia sol = solve(prob,ARKODE(order=3,nonlinear_convergence_coefficient = 1e-5),abstol=1e-5,reltol=1e-1); ``` ```julia sol = solve(prob,ARKODE(order=5),abstol=1e-5,reltol=1e-1); ``` The stabilized explicit methods are not stable enough to handle this problem well. While they don't diverge, they are really slow. ```julia setups = [ #Dict(:alg=>ROCK2()) #Unstable #Dict(:alg=>ROCK4()) #needs more iterations #Dict(:alg=>ESERK5()), ] ``` The EPIRK and exponential methods also fail: ```julia sol = solve(prob,EXPRB53s3(),dt=2.0^(-8)); sol = solve(prob,EPIRK4s3B(),dt=2.0^(-8)); sol = solve(prob,EPIRK5P2(),dt=2.0^(-8)); ``` PDIRK44 also fails ```julia sol = solve(prob,PDIRK44(),dt=2.0^(-8)); ``` ## High Tolerances This is the speed when you just want the answer. ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>TRBDF2()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>rodas()), Dict(:alg=>radau()), Dict(:alg=>RadauIIA5()), Dict(:alg=>ROS34PW1a()), Dict(:alg=>lsoda()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;dense = false,verbose=false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2,numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>Kvaerno3()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>TRBDF2()), Dict(:alg=>KenCarp3()), Dict(:alg=>lsoda()), # Dict(:alg=>SDIRK2()), # Removed because it's bad Dict(:alg=>radau())] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;dense = false,verbose = false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2,numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>KenCarp5()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp4(), :prob_choice => 2), Dict(:alg=>KenCarp3()), Dict(:alg=>ARKODE(order=5)), Dict(:alg=>ARKODE(nonlinear_convergence_coefficient = 1e-6)), Dict(:alg=>ARKODE(nonlinear_convergence_coefficient = 1e-5,order=3)) ] names = ["Rosenbrock23" "Rosenbrock23 Static" "KenCarp5" "KenCarp4" "KenCarp4 Static" "KenCarp3" "ARKODE5" "ARKODE4" "ARKODE3"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names=names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Low Tolerances This is the speed at lower tolerances, measuring what's good when accuracy is needed. ```julia abstols = 1.0 ./ 10.0 .^ (7:13) reltols = 1.0 ./ 10.0 .^ (4:10) setups = [ Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>Rodas4P()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>ddebdf()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>rodas()), Dict(:alg=>radau()), Dict(:alg=>RadauIIA5()), Dict(:alg=>lsoda()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;verbose=false, dense=false,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2,numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>GRK4A()), Dict(:alg=>Rodas5()), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>Kvaerno5()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp4(), :prob_choice => 2), Dict(:alg=>KenCarp5()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups;verbose=false, dense=false,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2,numruns=10) plot(wp) ``` The following algorithms were removed since they failed. ```julia #setups = [Dict(:alg=>Hairer4()), #Dict(:alg=>Hairer42()), #Dict(:alg=>Rodas3()), #Dict(:alg=>Kvaerno4()), #Dict(:alg=>Cash4()) #] #wp = WorkPrecisionSet(probs,abstols,reltols,setups; # save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) #plot(wp) ``` Multithreading benchmarks with Parallel Extrapolation Methods ```julia #Checking for threading print(Threads.nthreads()) ``` ```julia #Setting BLAS to one thread to measure gains LinearAlgebra.BLAS.set_num_threads(1) abstols = 1.0 ./ 10.0 .^ (10:12) reltols = 1.0 ./ 10.0 .^ (7:9) setups = [ Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>QNDF()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>seulex()), Dict(:alg=>ImplicitEulerExtrapolation(init_order = 4,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerExtrapolation(init_order = 4,threading = false)), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(init_order = 4, threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(init_order = 4, threading = false)), Dict(:alg=>ImplicitHairerWannerExtrapolation(init_order = 5,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitHairerWannerExtrapolation(init_order = 5,threading = false)), ] solnames = ["CVODE_BDF","KenCarp4","Rodas4","Rodas4 Static","Rodas%P","Rodas5P Static","QNDF","lsoda","radau","seulex","ImplEulerExtpl (threaded)", "ImplEulerExtpl (non-threaded)", "ImplEulerBaryExtpl (threaded)","ImplEulerBaryExtpl (non-threaded)","ImplHWExtpl (threaded)","ImplHWExtpl (non-threaded)"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names = solnames,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp, title = "Implicit Methods: OREGO",legend=:outertopleft,size = (1000,500), xticks = 10.0 .^ (-15:1:1), yticks = 10.0 .^ (-6:0.3:5), bottom_margin= 5Plots.mm) ``` ### Conclusion At high tolerances, `Rosenbrock23` hits the the error estimates and is fast. At lower tolerances and normal user tolerances, `Rodas4` and `Rodas5` are extremely fast. When you get down to `reltol=1e-9` `radau` begins to become as efficient as `Rodas4`, and it continues to do well below that. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffODE/Pollution.jmd
docs
13605
--- title: POLLU Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, DiffEqDevTools, Sundials, ParameterizedFunctions, Plots, ODE, ODEInterfaceDiffEq, LSODA, LinearSolve using ProfileSVG, BenchmarkTools, Profile gr() # gr(fmt=:png) using LinearAlgebra const k1=.35e0 const k2=.266e2 const k3=.123e5 const k4=.86e-3 const k5=.82e-3 const k6=.15e5 const k7=.13e-3 const k8=.24e5 const k9=.165e5 const k10=.9e4 const k11=.22e-1 const k12=.12e5 const k13=.188e1 const k14=.163e5 const k15=.48e7 const k16=.35e-3 const k17=.175e-1 const k18=.1e9 const k19=.444e12 const k20=.124e4 const k21=.21e1 const k22=.578e1 const k23=.474e-1 const k24=.178e4 const k25=.312e1 function f(dy,y,p,t) r1 = k1 *y[1] r2 = k2 *y[2]*y[4] r3 = k3 *y[5]*y[2] r4 = k4 *y[7] r5 = k5 *y[7] r6 = k6 *y[7]*y[6] r7 = k7 *y[9] r8 = k8 *y[9]*y[6] r9 = k9 *y[11]*y[2] r10 = k10*y[11]*y[1] r11 = k11*y[13] r12 = k12*y[10]*y[2] r13 = k13*y[14] r14 = k14*y[1]*y[6] r15 = k15*y[3] r16 = k16*y[4] r17 = k17*y[4] r18 = k18*y[16] r19 = k19*y[16] r20 = k20*y[17]*y[6] r21 = k21*y[19] r22 = k22*y[19] r23 = k23*y[1]*y[4] r24 = k24*y[19]*y[1] r25 = k25*y[20] dy[1] = -r1-r10-r14-r23-r24+ r2+r3+r9+r11+r12+r22+r25 dy[2] = -r2-r3-r9-r12+r1+r21 dy[3] = -r15+r1+r17+r19+r22 dy[4] = -r2-r16-r17-r23+r15 dy[5] = -r3+r4+r4+r6+r7+r13+r20 dy[6] = -r6-r8-r14-r20+r3+r18+r18 dy[7] = -r4-r5-r6+r13 dy[8] = r4+r5+r6+r7 dy[9] = -r7-r8 dy[10] = -r12+r7+r9 dy[11] = -r9-r10+r8+r11 dy[12] = r9 dy[13] = -r11+r10 dy[14] = -r13+r12 dy[15] = r14 dy[16] = -r18-r19+r16 dy[17] = -r20 dy[18] = r20 dy[19] = -r21-r22-r24+r23+r25 dy[20] = -r25+r24 end function fjac(J,y,p,t) J .= 0.0 J[1,1] = -k1-k10*y[11]-k14*y[6]-k23*y[4]-k24*y[19] J[1,11] = -k10*y[1]+k9*y[2] J[1,6] = -k14*y[1] J[1,4] = -k23*y[1]+k2*y[2] J[1,19] = -k24*y[1]+k22 J[1,2] = k2*y[4]+k9*y[11]+k3*y[5]+k12*y[10] J[1,13] = k11 J[1,20] = k25 J[1,5] = k3*y[2] J[1,10] = k12*y[2] J[2,4] = -k2*y[2] J[2,5] = -k3*y[2] J[2,11] = -k9*y[2] J[2,10] = -k12*y[2] J[2,19] = k21 J[2,1] = k1 J[2,2] = -k2*y[4]-k3*y[5]-k9*y[11]-k12*y[10] J[3,1] = k1 J[3,4] = k17 J[3,16] = k19 J[3,19] = k22 J[3,3] = -k15 J[4,4] = -k2*y[2]-k16-k17-k23*y[1] J[4,2] = -k2*y[4] J[4,1] = -k23*y[4] J[4,3] = k15 J[5,5] = -k3*y[2] J[5,2] = -k3*y[5] J[5,7] = 2k4+k6*y[6] J[5,6] = k6*y[7]+k20*y[17] J[5,9] = k7 J[5,14] = k13 J[5,17] = k20*y[6] J[6,6] = -k6*y[7]-k8*y[9]-k14*y[1]-k20*y[17] J[6,7] = -k6*y[6] J[6,9] = -k8*y[6] J[6,1] = -k14*y[6] J[6,17] = -k20*y[6] J[6,2] = k3*y[5] J[6,5] = k3*y[2] J[6,16] = 2k18 J[7,7] = -k4-k5-k6*y[6] J[7,6] = -k6*y[7] J[7,14] = k13 J[8,7] = k4+k5+k6*y[6] J[8,6] = k6*y[7] J[8,9] = k7 J[9,9] = -k7-k8*y[6] J[9,6] = -k8*y[9] J[10,10] = -k12*y[2] J[10,2] = -k12*y[10]+k9*y[11] J[10,9] = k7 J[10,11] = k9*y[2] J[11,11] = -k9*y[2]-k10*y[1] J[11,2] = -k9*y[11] J[11,1] = -k10*y[11] J[11,9] = k8*y[6] J[11,6] = k8*y[9] J[11,13] = k11 J[12,11] = k9*y[2] J[12,2] = k9*y[11] J[13,13] = -k11 J[13,11] = k10*y[1] J[13,1] = k10*y[11] J[14,14] = -k13 J[14,10] = k12*y[2] J[14,2] = k12*y[10] J[15,1] = k14*y[6] J[15,6] = k14*y[1] J[16,16] = -k18-k19 J[16,4] = k16 J[17,17] = -k20*y[6] J[17,6] = -k20*y[17] J[18,17] = k20*y[6] J[18,6] = k20*y[17] J[19,19] = -k21-k22-k24*y[1] J[19,1] = -k24*y[19]+k23*y[4] J[19,4] = k23*y[1] J[19,20] = k25 J[20,20] = -k25 J[20,1] = k24*y[19] J[20,19] = k24*y[1] return end u0 = zeros(20) u0[2] = 0.2 u0[4] = 0.04 u0[7] = 0.1 u0[8] = 0.3 u0[9] = 0.01 u0[17] = 0.007 prob = ODEProblem(ODEFunction{true, SciMLBase.FullSpecialize}(f, jac=fjac),u0,(0.0,60.0)) sol = solve(prob,Rodas5(),abstol=1/10^14,reltol=1/10^14) test_sol = TestSolution(sol) abstols = 1.0 ./ 10.0 .^ (4:11) reltols = 1.0 ./ 10.0 .^ (1:8); ``` ```julia plot(sol) ``` ```julia plot(sol,tspan=(0.0,5.0)) ``` ## Omissions The following were omitted from the tests due to convergence failures. ODE.jl's adaptivity is not able to stabilize its algorithms, while GeometricIntegratorsDiffEq has not upgraded to Julia 1.0. GeometricIntegrators.jl's methods used to be either fail to converge at comparable dts (or on some computers errors due to type conversions). ```julia #sol = solve(prob,ode23s()); println("Total ODE.jl steps: $(length(sol))") #using GeometricIntegratorsDiffEq #try # sol = solve(prob,GIRadIIA3(),dt=1/10) #catch e # println(e) #end ``` The stabilized explicit methods fail. ```julia setups = [ #Dict(:alg=>ROCK2()), #Dict(:alg=>ROCK4()) #Dict(:alg=>ESERK5()) ] ``` The EPIRK and exponential methods also fail: ```julia sol = solve(prob,EXPRB53s3(),dt=2.0^(-8)); sol = solve(prob,EPIRK4s3B(),dt=2.0^(-8)); sol = solve(prob,EPIRK5P2(),dt=2.0^(-8)); ``` ## High Tolerances This is the speed when you just want the answer. ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>TRBDF2()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>rodas()), Dict(:alg=>radau()), Dict(:alg=>lsoda()), Dict(:alg=>RadauIIA5()), ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;verbose=false, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(prob,abstols,reltols,setups;dense = false,verbose = false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(prob,abstols,reltols,setups;verbose=false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2,numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Kvaerno3()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>TRBDF2()), Dict(:alg=>KenCarp3()), Dict(:alg=>Rodas4()), Dict(:alg=>lsoda()), Dict(:alg=>radau())] wp = WorkPrecisionSet(prob,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(prob,abstols,reltols,setups;dense = false,verbose = false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2,numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>KenCarp5()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp3()), Dict(:alg=>ARKODE(order=5)), Dict(:alg=>ARKODE()), Dict(:alg=>ARKODE(order=3))] names = ["Rosenbrock23" "KenCarp5" "KenCarp4" "KenCarp3" "ARKODE5" "ARKODE4" "ARKODE3"] wp = WorkPrecisionSet(prob,abstols,reltols,setups; names=names,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>TRBDF2()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), Dict(:alg=>ABDF2()), Dict(:alg=>FBDF()), #Dict(:alg=>QNDF()), #Dict(:alg=>Exprb43()), #matrix contains Infs or NaNs #Dict(:alg=>Exprb32()), #matrix contains Infs or NaNs ] wp = WorkPrecisionSet(prob,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>TRBDF2()), Dict(:alg=>ImplicitEulerExtrapolation(linsolve = RFLUFactorization())), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(linsolve = RFLUFactorization())), Dict(:alg=>ImplicitHairerWannerExtrapolation(linsolve = RFLUFactorization())), Dict(:alg=>ABDF2()), Dict(:alg=>FBDF()), #Dict(:alg=>QNDF()), #Dict(:alg=>Exprb43()), #matrix contains Infs or NaNs #Dict(:alg=>Exprb32()), #matrix contains Infs or NaNs ] wp = WorkPrecisionSet(prob,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` ### Low Tolerances This is the speed at lower tolerances, measuring what's good when accuracy is needed. ```julia abstols = 1.0 ./ 10.0 .^ (7:13) reltols = 1.0 ./ 10.0 .^ (4:10) setups = [ Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>Rodas4P()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>ddebdf()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas5P()), Dict(:alg=>rodas()), Dict(:alg=>radau()), Dict(:alg=>lsoda()) ] wp = WorkPrecisionSet(prob,abstols,reltols,setups;verbose=false, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(prob,abstols,reltols,setups;verbose=false, dense=false,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(prob,abstols,reltols,setups;verbose=false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2,numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>GRK4A()), Dict(:alg=>Rodas5()), Dict(:alg=>Kvaerno4()), Dict(:alg=>Kvaerno5()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp5()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas5P()), Dict(:alg=>radau()), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 3)), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), ] wp = WorkPrecisionSet(prob,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(prob,abstols,reltols,setups;verbose=false, dense=false,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia wp = WorkPrecisionSet(prob,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e5),error_estimate=:L2,numruns=10) plot(wp) ``` The following algorithms were removed since they failed. ```julia #setups = [#Dict(:alg=>Hairer4()), #Dict(:alg=>Hairer42()), #Dict(:alg=>Rodas3()), #Dict(:alg=>Cash4()) #] #wp = WorkPrecisionSet(prob,abstols,reltols,setups; # save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) #plot(wp) ``` Multithreading benchmarks with Parallel Extrapolation Methods ```julia #Setting BLAS to one thread to measure gains LinearAlgebra.BLAS.set_num_threads(1) abstols = 1.0 ./ 10.0 .^ (11:13) reltols = 1.0 ./ 10.0 .^ (8:10) setups = [ Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas5()), Dict(:alg=>Rodas5P()), Dict(:alg=>QNDF()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>seulex()), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 5, init_order = 3,threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerExtrapolation(min_order = 5, init_order = 3,threading = false)), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 5, threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 5, threading = false)), Dict(:alg=>ImplicitHairerWannerExtrapolation(threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitHairerWannerExtrapolation(threading = false)), ] solnames = ["CVODE_BDF","KenCarp4","Rodas4","Rodas5","Rodas5P","QNDF","lsoda","radau","seulex","ImplEulerExtpl (threaded)", "ImplEulerExtpl (non-threaded)", "ImplEulerBaryExtpl (threaded)","ImplEulerBaryExtpl (non-threaded)","ImplHWExtpl (threaded)","ImplHWExtpl (non-threaded)"] wp = WorkPrecisionSet(prob,abstols,reltols,setups; names = solnames,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp, title = "Implicit Methods: POLLUTION",legend=:outertopleft,size = (1000,500), xticks = 10.0 .^ (-15:1:1), yticks = 10.0 .^ (-6:0.3:5), bottom_margin= 5Plots.mm) ``` ### Conclusion Sundials `CVODE_BDF` the best here. `lsoda` does well at high tolerances but then grows fast when tolerances get too low. `KenCarp4` or `Rodas5` is a decent substitute when necessary. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffODE/ROBER.jmd
docs
12290
--- title: ROBER Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, DiffEqDevTools, Sundials, ParameterizedFunctions, Plots, ODE, ODEInterfaceDiffEq, LSODA gr() using LinearAlgebra, StaticArrays rober = @ode_def begin dy₁ = -k₁*y₁+k₃*y₂*y₃ dy₂ = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ dy₃ = k₂*y₂^2 end k₁ k₂ k₃ prob = ODEProblem{true, SciMLBase.FullSpecialize}(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) probstatic = ODEProblem{false}(rober,SA[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4)) sol = solve(prob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(probstatic,Rodas5P(),abstol=1/10^14,reltol=1/10^14) probs = [prob,probstatic] test_sol = [sol,sol2]; abstols = 1.0 ./ 10.0 .^ (4:11) reltols = 1.0 ./ 10.0 .^ (1:8); ``` ```julia plot(sol,labels=["y1","y2","y3"]) ``` ## Omissions And Tweaking The following were omitted from the tests due to convergence failures. ODE.jl's adaptivity is not able to stabilize its algorithms, while GeometricIntegratorsDiffEq has not upgraded to Julia 1.0. GeometricIntegrators.jl's methods used to be either fail to converge at comparable dts (or on some computers errors due to type conversions). ```julia #sol = solve(prob,ode23s()); println("Total ODE.jl steps: $(length(sol))") #using GeometricIntegratorsDiffEq #try # sol = solve(prob,GIRadIIA3(),dt=1/10) #catch e # println(e) #end ``` `ARKODE` needs a lower `nonlinear_convergence_coefficient` in order to not diverge. ```julia #sol = solve(prob,ARKODE(nonlinear_convergence_coefficient = 1e-6),abstol=1e-5,reltol=1e-1); # Noisy, output omitted ``` ```julia sol = solve(prob,ARKODE(nonlinear_convergence_coefficient = 1e-7),abstol=1e-5,reltol=1e-1); ``` Note that `1e-7` matches the value from the Sundials manual which was required for their example to converge on this problem. The default is `1e-1`. ```julia #sol = solve(prob,ARKODE(order=3),abstol=1e-4,reltol=1e-1); # Fails to diverge but doesn't finish ``` ```julia #sol = solve(prob,ARKODE(order=5),abstol=1e-4,reltol=1e-1); # Noisy, output omitted ``` ```julia #sol = solve(prob,ARKODE(order=5,nonlinear_convergence_coefficient = 1e-9),abstol=1e-5,reltol=1e-1); # Noisy, output omitted ``` Additionally, the ROCK methods do not perform well on this benchmark. ```julia setups = [ #Dict(:alg=>ROCK2()) #Unstable #Dict(:alg=>ROCK4()) #needs more iterations ] ``` Some of the bad Rosenbrocks fail: ```julia setups = [ #Dict(:alg=>Hairer4()), #Dict(:alg=>Hairer42()), #Dict(:alg=>Cash4()), ] ``` The EPIRK and exponential methods also fail: ```julia sol = solve(prob,EXPRB53s3(),dt=2.0^(-8)); sol = solve(prob,EPIRK4s3B(),dt=2.0^(-8)); sol = solve(prob,EPIRK5P2(),dt=2.0^(-8)); ``` PDIRK44 also fails ```julia sol = solve(prob,PDIRK44(),dt=2.0^(-8)); ``` In fact, all non-adaptive methods fail on this problem. ## High Tolerances This is the speed when you just want the answer. `ode23s` from ODE.jl was removed since it fails. Note that at high tolerances Sundials' `CVODE_BDF` fails as well so it's excluded from this test. ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>TRBDF2()), Dict(:alg=>rodas()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>RadauIIA5()), Dict(:alg=>ROS34PW1a()), ] gr() wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>Kvaerno3()), Dict(:alg=>KenCarp4()), Dict(:alg=>TRBDF2()), Dict(:alg=>KenCarp3()), Dict(:alg=>lsoda()), # Dict(:alg=>SDIRK2()), # Removed because it's bad Dict(:alg=>radau())] names = ["Rosenbrock23" "Rosenbrock23 Static" "Kvaerno3" "KenCarp4" "TRBDF2" "KenCarp3" "lsoda" "radau"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;names=names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>KenCarp5()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp4(), :prob_choice => 2), Dict(:alg=>KenCarp3()), Dict(:alg=>ARKODE(nonlinear_convergence_coefficient = 1e-9,order=5)), Dict(:alg=>ARKODE(nonlinear_convergence_coefficient = 1e-8)), Dict(:alg=>ARKODE(nonlinear_convergence_coefficient = 1e-7,order=3)) ] names = ["Rosenbrock23" "Rosenbrock23 Static" "KenCarp5" "KenCarp4" "KenCarp4 Static" "KenCarp3" "ARKODE5" "ARKODE4" "ARKODE3"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names=names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>TRBDF2()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), #Dict(:alg=>ABDF2()), # Maxiters Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), #Dict(:alg=>Exprb43()), #SingularException #Dict(:alg=>Exprb32()), #SingularException ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Timeseries Errors ```julia abstols = 1.0 ./ 10.0 .^ (5:8) reltols = 1.0 ./ 10.0 .^ (1:4); setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>TRBDF2()), Dict(:alg=>rodas()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>RadauIIA5()), Dict(:alg=>ROS34PW1a()), ] gr() wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate=:l2, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>Kvaerno3()), Dict(:alg=>KenCarp4()), Dict(:alg=>TRBDF2()), Dict(:alg=>KenCarp3()), # Dict(:alg=>SDIRK2()), # Removed because it's bad Dict(:alg=>radau())] names = ["Rosenbrock23" "Rosenbrock23 Static" "Kvaerno3" "KenCarp4" "TRBDF2" "KenCarp3" "radau"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;names=names, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2,numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>TRBDF2()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), #Dict(:alg=>ABDF2()), # Maxiters Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), #Dict(:alg=>Exprb43()), #SingularException #Dict(:alg=>Exprb32()), #SingularException ] wp = WorkPrecisionSet(probs,abstols,reltols,setups;verbose=false,error_estimate=:l2, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Low Tolerances This is the speed at lower tolerances, measuring what's good when accuracy is needed. ```julia abstols = 1.0 ./ 10.0 .^ (7:12) reltols = 1.0 ./ 10.0 .^ (4:9) setups = [#Dict(:alg=>Rodas5()), Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>ddebdf()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), #Dict(:alg=>Rodas5P()), Dict(:alg=>rodas()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>RadauIIA5()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Kvaerno4()), Dict(:alg=>Kvaerno5()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp47()), Dict(:alg=>KenCarp47(), :prob_choice => 2), Dict(:alg=>KenCarp5()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), #Dict(:alg=>Rodas5P()), #Dict(:alg=>Rodas5()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (10:12) reltols = 1.0 ./ 10.0 .^ (7:9) setups = [Dict(:alg=>Rodas4()) Dict(:alg=>Rodas4(), :prob_choice => 2) Dict(:alg=>Rodas5()) Dict(:alg=>Rodas5(), :prob_choice => 2) Dict(:alg=>Rodas5P()) Dict(:alg=>Rodas5P(), :prob_choice => 2)] names = ["Rodas4" "Rodas4 Static" "Rodas5" "Rodas5 Static" "Rodas5P" "Rodas5P Static"] wp = WorkPrecisionSet(probs,abstols,reltols,setups;names=names, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` Multithreading benchmarks with Parallel Extrapolation Methods ```julia #Setting BLAS to one thread to measure gains LinearAlgebra.BLAS.set_num_threads(1) abstols = 1.0 ./ 10.0 .^ (10:12) reltols = 1.0 ./ 10.0 .^ (7:9) setups = [ Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>QNDF()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>seulex()), Dict(:alg=>ImplicitEulerExtrapolation(threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerExtrapolation(threading = false)), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 4, threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitEulerBarycentricExtrapolation(min_order = 4, threading = false)), Dict(:alg=>ImplicitHairerWannerExtrapolation(threading = OrdinaryDiffEq.PolyesterThreads())), Dict(:alg=>ImplicitHairerWannerExtrapolation(threading = false)), ] solnames = ["CVODE_BDF","KenCarp4","Rodas4","Rodas4 Static","Rodas5P","Rodas5P Static","QNDF","lsoda","radau","seulex","ImplEulerExtpl (threaded)", "ImplEulerExtpl (non-threaded)", "ImplEulerBaryExtpl (threaded)","ImplEulerBaryExtpl (non-threaded)","ImplHWExtpl (threaded)","ImplHWExtpl (non-threaded)"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names = solnames,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp, title = "Implicit Methods: ROBER",legend=:outertopleft,size = (1000,500), xticks = 10.0 .^ (-15:1:1), yticks = 10.0 .^ (-6:0.3:5), bottom_margin= 5Plots.mm) ``` ### Conclusion At high tolerances, `Rosenbrock23` and `lsoda` hit the the error estimates and are fast. At lower tolerances and normal user tolerances, `Rodas4` and `Rodas5` are extremely fast. `lsoda` does quite well across both ends. When you get down to `reltol=1e-9` `radau` begins to become as efficient as `Rodas4`, and it continues to do well below that. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffODE/VanDerPol.jmd
docs
13583
--- title: VanDerPol Work-Precision Diagrams author: Chris Rackauckas --- ```julia using OrdinaryDiffEq, DiffEqDevTools, Sundials, ParameterizedFunctions, Plots, ODE, ODEInterfaceDiffEq, LSODA gr() using LinearAlgebra, StaticArrays van = @ode_def begin dy = μ*((1-x^2)*y - x) dx = 1*y end μ abstols = 1.0 ./ 10.0 .^ (5:9) reltols = 1.0 ./ 10.0 .^ (2:6) prob = ODEProblem{true, SciMLBase.FullSpecialize}(van,[1.0;1.0],(0.0,6.3),1e6) probstatic = ODEProblem{false}(van,SA[0;2.],(0.0,6.3),1e6) sol = solve(prob,CVODE_BDF(),abstol=1/10^14,reltol=1/10^14) sol2 = solve(probstatic,Rodas5P(),abstol=1/10^14,reltol=1/10^14) probs = [prob,probstatic] test_sol = [sol,sol2]; ``` ### Plot Test ```julia plot(sol,ylim=[-4;4]) ``` ```julia plot(sol) ``` ## Omissions And Tweaking The following were omitted from the tests due to convergence failures. ODE.jl's adaptivity is not able to stabilize its algorithms, while GeometricIntegratorsDiffEq has not upgraded to Julia 1.0. GeometricIntegrators.jl's methods used to be either fail to converge at comparable dts (or on some computers errors due to type conversions). ```julia #sol = solve(prob,ode23s()); println("Total ODE.jl steps: $(length(sol))") #using GeometricIntegratorsDiffEq #try # sol = solve(prob,GIRadIIA3(),dt=1/1000) #catch e # println(e) #end ``` `ARKODE` needs a lower `nonlinear_convergence_coefficient` in order to not diverge. ```julia sol = solve(prob,ARKODE(),abstol=1e-4,reltol=1e-2); ``` ```julia sol = solve(prob,ARKODE(nonlinear_convergence_coefficient = 1e-6),abstol=1e-4,reltol=1e-1); ``` ```julia sol = solve(prob,ARKODE(order=3),abstol=1e-4,reltol=1e-1); ``` ```julia sol = solve(prob,ARKODE(nonlinear_convergence_coefficient = 1e-6,order=3),abstol=1e-4,reltol=1e-1); ``` ```julia sol = solve(prob,ARKODE(order=5,nonlinear_convergence_coefficient = 1e-3),abstol=1e-4,reltol=1e-1); ``` ```julia sol = solve(prob,ARKODE(order=5,nonlinear_convergence_coefficient = 1e-4),abstol=1e-4,reltol=1e-1); ``` Additionally, the ROCK methods do not perform well on this benchmark. ```julia setups = [ #Dict(:alg=>ROCK2()) #Unstable #Dict(:alg=>ROCK4()) #needs more iterations #Dict(:alg=>ESERK5()), ] ``` Some of the bad Rosenbrocks fail: ```julia setups = [ #Dict(:alg=>Hairer4()), #Dict(:alg=>Hairer42()), #Dict(:alg=>Cash4()), ] ``` The EPIRK and exponential methods also fail: ```julia sol = solve(prob,EXPRB53s3(),dt=2.0^(-8)); sol = solve(prob,EPIRK4s3B(),dt=2.0^(-8)); sol = solve(prob,EPIRK5P2(),dt=2.0^(-8)); ``` ## Low Order and High Tolerance This tests the case where accuracy is not needed as much and quick robust solutions are necessary. Note that `ARKODE`'s convergence coefficient must be lowered to `1e-7` in order to converge. #### Final timepoint error This measures the efficiency to get the value at the endpoint correct. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>FBDF()), Dict(:alg=>QNDF()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>TRBDF2()), Dict(:alg=>ddebdf()), Dict(:alg=>rodas()), Dict(:alg=>lsoda()), Dict(:alg=>radau())] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),seconds=5) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>Rodas3()), Dict(:alg=>TRBDF2()), Dict(:alg=>rodas()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>RadauIIA5()), Dict(:alg=>ROS34PW1a()), ] gr() wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>Kvaerno3()), Dict(:alg=>KenCarp4()), Dict(:alg=>TRBDF2()), Dict(:alg=>KenCarp3()), Dict(:alg=>ARKODE(nonlinear_convergence_coefficient = 1e-6)), Dict(:alg=>SDIRK2()), Dict(:alg=>radau())] names = ["Rosenbrock23" "Rosenbrock23 Static" "Kvaerno3" "KenCarp4" "TRBDF2" "KenCarp3" "ARKODE" "SDIRK2" "radau"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names=names,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),seconds=5) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>KenCarp5()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp4(), :prob_choice => 2), Dict(:alg=>KenCarp3()), Dict(:alg=>ARKODE(order=5,nonlinear_convergence_coefficient = 1e-4)), Dict(:alg=>ARKODE(nonlinear_convergence_coefficient = 1e-6)), Dict(:alg=>ARKODE(nonlinear_convergence_coefficient = 1e-6,order=3))] names = ["Rosenbrock23" "Rosenbrock23 Static" "KenCarp5" "KenCarp4" "KenCarp4 Static" "KenCarp3" "ARKODE5" "ARKODE4" "ARKODE3"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names=names,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),seconds=5) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>TRBDF2()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), Dict(:alg=>ABDF2()), Dict(:alg=>FBDF()), #Dict(:alg=>QNDF()), # ??? #Dict(:alg=>Exprb43()), # Diverges #Dict(:alg=>Exprb32()), # SingularException ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` Notice that `KenCarp4` is the same overarching algorithm as `ARKODE` here (with major differences to stage predictors and adaptivity though). In this case, `KenCarp4` is more robust and more efficient than `ARKODE`. `CVODE_BDF` does quite well here, which is unusual for it on small equations. You can see that the low-order Rosenbrock methods `Rosenbrock23` and `Rodas3` dominate this test. #### Timeseries error Now we measure the average error of the timeseries. ```julia abstols = 1.0 ./ 10.0 .^ (4:7) reltols = 1.0 ./ 10.0 .^ (1:4) setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>FBDF()), #Dict(:alg=>QNDF()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>TRBDF2()), Dict(:alg=>ddebdf()), Dict(:alg=>rodas()), Dict(:alg=>lsoda()), Dict(:alg=>radau())] wp = WorkPrecisionSet(probs,abstols,reltols,setups; error_estimate=:l2,appxsol=test_sol,maxiters=Int(1e5),seconds=5) plot(wp) ``` ```julia setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>Rosenbrock23(), :prob_choice => 2), Dict(:alg=>Rodas3()), Dict(:alg=>TRBDF2()), Dict(:alg=>rodas()), Dict(:alg=>lsoda()), Dict(:alg=>radau()), Dict(:alg=>RadauIIA5()), Dict(:alg=>ROS34PW1a()), ] gr() wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate=:l2, save_everystep=false,appxsol=test_sol,maxiters=Int(1e5),numruns=10) plot(wp) ``` ### Higher accuracy tests Now we transition to higher accracy tests. In this domain higher order methods are stable and much more efficient. ```julia abstols = 1.0 ./ 10.0 .^ (7:11) reltols = 1.0 ./ 10.0 .^ (4:8) setups = [Dict(:alg=>Rodas3()), #Dict(:alg=>FBDF()), #Diverges #Dict(:alg=>QNDF()), Dict(:alg=>Rodas4P()), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>CVODE_BDF()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), Dict(:alg=>rodas()), Dict(:alg=>radau()), Dict(:alg=>lsoda()), Dict(:alg=>RadauIIA5()), Dict(:alg=>Rodas5()), Dict(:alg=>ImplicitEulerExtrapolation()), Dict(:alg=>ImplicitEulerBarycentricExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation()), ] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e6),seconds=5) plot(wp) ``` ```julia abstols = 1.0 ./ 10.0 .^ (7:11) reltols = 1.0 ./ 10.0 .^ (4:8) setups = [Dict(:alg=>Rodas3()), Dict(:alg=>Kvaerno4()), Dict(:alg=>Kvaerno5()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp5()), Dict(:alg=>ARKODE()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>radau()), Dict(:alg=>Rodas5())] names = ["Rodas3" "Kvaerno4" "Kvaerno5" "CVODE_BDF" "KenCarp4" "KenCarp5" "ARKODE" "Rodas4" "Rodas5P" "Rodas5P Static" "radau" "Rodas5"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names=names,save_everystep=false,appxsol=test_sol,maxiters=Int(1e6),seconds=5) plot(wp) ``` ```julia setups = [Dict(:alg=>Rodas3()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>Rodas4()), Dict(:alg=>radau()), Dict(:alg=>Rodas5()), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2)] wp = WorkPrecisionSet(probs,abstols,reltols,setups; save_everystep=false,appxsol=test_sol,maxiters=Int(1e6),seconds=5) plot(wp) ``` #### Timeseries Errors ```julia abstols = 1.0 ./ 10.0 .^ (7:11) reltols = 1.0 ./ 10.0 .^ (4:8) setups = [Dict(:alg=>Rodas3()), #Dict(:alg=>FBDF()), #Diverges #Dict(:alg=>QNDF()), Dict(:alg=>Rodas4P()), Dict(:alg=>Rodas5P(), :prob_choice => 2), Dict(:alg=>CVODE_BDF()), Dict(:alg=>Rodas4()), Dict(:alg=>Rodas4(), :prob_choice => 2), Dict(:alg=>rodas()), Dict(:alg=>radau()), Dict(:alg=>lsoda()), Dict(:alg=>RadauIIA5()), Dict(:alg=>Rodas5())] wp = WorkPrecisionSet(probs,abstols,reltols,setups;error_estimate=:l2, save_everystep=false,appxsol=test_sol,maxiters=Int(1e6),seconds=5) plot(wp) ``` ```julia setups = [Dict(:alg=>Rodas3()), Dict(:alg=>Kvaerno4()), Dict(:alg=>Kvaerno5()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>KenCarp4()), Dict(:alg=>KenCarp5()), Dict(:alg=>Rodas4()), Dict(:alg=>radau()), Dict(:alg=>Rodas5()), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2)] names = ["Rodas3" "Kvaerno4" "Kvaerno5" "CVODE_BDF" "KenCarp4" "KenCarp5" "Rodas4" "radau" "Rodas5" "Rodas5P" "Rodas5P Static"] wp = WorkPrecisionSet(probs,abstols,reltols,setups; names=names,appxsol=test_sol,maxiters=Int(1e6),error_estimate=:l2,seconds=5) plot(wp) ``` ```julia setups = [Dict(:alg=>CVODE_BDF()), Dict(:alg=>Rodas4()), Dict(:alg=>radau()), Dict(:alg=>Rodas5()), Dict(:alg=>Rodas5P()), Dict(:alg=>Rodas5P(), :prob_choice => 2)] wp = WorkPrecisionSet(probs,abstols,reltols,setups; appxsol=test_sol,maxiters=Int(1e6),error_estimate=:l2,seconds=5) plot(wp) ``` Multithreading benchmarks with Parallel Extrapolation Methods ```julia #Setting BLAS to one thread to measure gains LinearAlgebra.BLAS.set_num_threads(1) abstols = 1.0 ./ 10.0 .^ (7:11) reltols = 1.0 ./ 10.0 .^ (4:8) setups = [Dict(:alg=>ImplicitHairerWannerExtrapolation()), Dict(:alg=>ImplicitHairerWannerExtrapolation(threading = true)), Dict(:alg=>ImplicitHairerWannerExtrapolation(threading = OrdinaryDiffEq.PolyesterThreads())), ] names = ["unthreaded","threaded","Polyester"]; wp = WorkPrecisionSet(probs,abstols,reltols,setups; names = names,save_everystep=false,appxsol=test_sol,maxiters=Int(1e5)) plot(wp) ``` The timeseries test is a little odd here because of the high peaks in the VanDerPol oscillator. At a certain accuracy, the steps try to resolve those peaks and so the error becomes higher. While the higher order order Julia-based Rodas methods (`Rodas4` and `Rodas4P`) Rosenbrock methods are not viable at higher tolerances, they dominate for a large portion of this benchmark. When the tolerance gets low enough, `radau` adaptive high order (up to order 13) takes the lead. ### Conclusion `Rosenbrock23` and `Rodas3` do well when tolerances are higher. In most standard tolerances, `Rodas4` and `Rodas4P` do extremely well. Only when the tolerances get very low does `radau` do well. The Julia Rosenbrock methods vastly outperform their Fortran counterparts. `CVODE_BDF` is a top performer in the final timepoint errors with low accuracy, but take that with a grain of salt because the problem is periodic which means it's getting the spikes wrong but the low parts correct. `ARKODE` does poorly in these tests. `lsoda` does quite well in both low and high accuracy domains, but is never the top. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffSDE/Oval2LongRun.jmd
docs
2609
--- title: Oval2 Long Run author: Chris Rackauckas --- ```julia using StochasticDiffEq, SDEProblemLibrary, Random prob = SDEProblemLibrary.oval2ModelExample(largeFluctuations=true,useBigs=false) ``` ```julia Random.seed!(250) prob = remake(prob,tspan = (0.0,500.0)) sol = solve(prob,SRIW1(),dt=(1/2)^(18),progress=true,qmax=1.125, saveat=0.1,abstol=1e-5,reltol=1e-3,maxiters=1e7); Random.seed!(250) prob = remake(prob,tspan = (0.0,500.0)) @time sol = solve(prob,SRIW1(),dt=(1/2)^(18),progress=true,qmax=1.125, saveat=0.1,abstol=1e-5,reltol=1e-3,maxiters=1e7); ``` ```julia println(maximum(sol[:,2])) using Plots; gr() lw = 2 lw2 = 3 p1 = plot(sol,vars=(0,16), title="(A) Timeseries of Ecad Concentration",xguide="Time (s)", yguide="Concentration",guidefont=font(16),tickfont=font(16), linewidth=lw,leg=false) ``` ```julia p2 = plot(sol,vars=(0,17), title="(B) Timeseries of Vim Concentration",xguide="Time (s)", yguide="Concentration",guidefont=font(16), tickfont=font(16),linewidth=lw,leg=false) ``` ```julia prob = remake(prob,tspan = (0.0,1.0)) ## Little Run sol = solve(prob,EM(),dt=(1/2)^(20), progressbar=true,saveat=0.1) println("EM") @time sol = solve(prob,EM(),dt=(1/2)^(20), progressbar=true,saveat=0.1) sol = solve(prob,SRI(),dt=(1/2)^(18),adaptive=false, progressbar=true,save_everystep=false) println("SRI") @time sol = solve(prob,SRI(),dt=(1/2)^(18),adaptive=false, progressbar=true,save_everystep=false) sol = solve(prob,SRIW1(),dt=(1/2)^(18),adaptive=false, adaptivealg=:RSwM3,progressbar=false,qmax=4,saveat=0.1) println("SRIW1") @time sol = solve(prob,SRIW1(),dt=(1/2)^(18),adaptive=false, adaptivealg=:RSwM3,progressbar=false,qmax=4,saveat=0.1) sol = solve(prob,SRI(),dt=(1/2)^(18), adaptivealg=:RSwM3,progressbar=false,qmax=1.125, saveat=0.1,abstol=1e-6,reltol=1e-4) println("SRI Adaptive") @time sol = solve(prob,SRI(),dt=(1/2)^(18), adaptivealg=:RSwM3,progressbar=false,qmax=1.125, saveat=0.1,abstol=1e-6,reltol=1e-4) @show length(sol.t) sol = solve(prob,SRIW1(),dt=(1/2)^(18), adaptivealg=:RSwM3,progressbar=false,qmax=1.125, saveat=0.1,abstol=1e-6,reltol=1e-4) println("SRIW1 Adaptive") @time sol = solve(prob,SRIW1(),dt=(1/2)^(18), adaptivealg=:RSwM3,progressbar=false,qmax=1.125, saveat=0.1,abstol=1e-6,reltol=1e-4) @show length(sol.t) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffSDE/Oval2LongTimes.jmd
docs
2546
--- title: Oval2 Long Times author: Chris Rackauckas --- ```julia using StochasticDiffEq, SDEProblemLibrary, Random Random.seed!(200) prob = SDEProblemLibrary.oval2ModelExample(largeFluctuations=true,useBigs=false) using LinearAlgebra BLAS.set_num_threads(1) ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SRIW1(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-5,reltol=1e-3) end ``` ```julia Random.seed!(200) @time for i in 1:10 @show i sol = solve(prob,ImplicitEM(),dt=1/60000) end ``` ```julia Random.seed!(200) @time for i in 1:10 @show i sol = solve(prob,ImplicitRKMil(),dt=1/50000) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-4,reltol=1e-2) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI2(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-4,reltol=1e-4) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI2(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-5,reltol=1e-3) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-3,reltol=1e-2) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-4,reltol=1e-4) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-2,reltol=1e-2) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-5,reltol=1e-3) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-2,reltol=1e-1) end ``` ```julia Random.seed!(200) @time for i in 1:10 sol = solve(prob,SOSRI2(),dt=(1/2)^(18),qmax=1.125, saveat=0.1,maxiters=1e7,abstol=1e-4,reltol=1e-1) end ``` ```julia Random.seed!(200) @time for i in 1:10 @show i sol = solve(prob,ImplicitEM(),dt=1/50000) end ``` ```julia Random.seed!(200) @time for i in 1:10 @show i sol = solve(prob,ImplicitRKMil(),dt=1/40000) end ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffSDE/Oval2Timings.jmd
docs
11360
--- title: Oval2 Timings author: Chris Rackauckas --- ```julia using StochasticDiffEq, SDEProblemLibrary, Random, Base.Threads prob = SDEProblemLibrary.oval2ModelExample(largeFluctuations=true,useBigs=false) prob_func(prob,i,repeat) = remake(prob,seed=i) prob = EnsembleProblem(remake(prob,tspan=(0.0,1.0)),prob_func=prob_func) js = 16:21 dts = 1.0 ./ 2.0 .^ (js) trajectories = 1000 fails = Array{Int}(undef,length(dts),3) times = Array{Float64}(undef,length(dts),3) ``` ## Timing Runs ```julia sol = solve(prob,SRIW1(),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-7),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SRIW1(),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-7),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? Inf : adaptive_time println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SRI(error_terms=2),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-7),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SRI(error_terms=2),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-7),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SRI(),EnsembleThreads(),abstol=2.0^(-14),reltol=2.0^(-18),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SRI(),EnsembleThreads(),abstol=2.0^(-14),reltol=2.0^(-18),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SRI(tableau=StochasticDiffEq.constructSRIOpt1()),EnsembleThreads(),abstol=2.0^(-7),reltol=2.0^(-4),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SRI(tableau=StochasticDiffEq.constructSRIOpt1()),EnsembleThreads(),abstol=2.0^(-7),reltol=2.0^(-4),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-7),reltol=2.0^(-4),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-7),reltol=2.0^(-4),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-7),reltol=2.0^(-6),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-7),reltol=2.0^(-6),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-12),reltol=2.0^(-15),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-12),reltol=2.0^(-15),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-7),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-7),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-12),reltol=2.0^(-15),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SOSRI(),EnsembleThreads(),abstol=2.0^(-12),reltol=2.0^(-15),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SOSRI2(),EnsembleThreads(),abstol=2.0^(-12),reltol=2.0^(-15),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SOSRI2(),EnsembleThreads(),abstol=2.0^(-12),reltol=2.0^(-15),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SOSRI2(),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-11),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SOSRI2(),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-11),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia sol = solve(prob,SOSRI2(),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-11),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=Threads.nthreads()) adaptive_time = @elapsed sol = solve(prob,SOSRI2(),EnsembleThreads(),abstol=2.0^(-13),reltol=2.0^(-11),maxiters=Int(1e11),qmax=1.125,save_everystep=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) best_adaptive_time = numfails != 0 ? adaptive_time : min(best_adaptive_time,adaptive_time) println("The number of Adaptive Fails is $numfails. Elapsed time was $adaptive_time") ``` ```julia for j in eachindex(js) println("j = $j") sol =solve(prob,EM(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=Threads.nthreads()) t1 = @elapsed sol = solve(prob,EM(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) println("The number of Euler-Maruyama Fails is $numfails. Elapsed time was $t1") fails[j,1] = numfails times[j,1] = t1 end ``` ```julia for j in 1:4 println("j = $j") sol =solve(prob,SRIW1(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=Threads.nthreads()) t1 = @elapsed sol = solve(prob,SRIW1(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) println("The number of SRIW1 Fails is $numfails. Elapsed time was $t1") fails[j,3] = numfails times[j,3] = t1 end ``` ```julia js = 17:21 dts = 1.0 ./2.0 .^ (js) for j in 1:6 println("j = $j") sol =solve(prob,ImplicitEM(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=Threads.nthreads()) t1 = @elapsed sol = solve(prob,ImplicitEM(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) println("The number of Implicit-EM Fails is $numfails. Elapsed time was $t1") end ``` ```julia js = 17:21 dts = 1.0 ./ 2.0 .^(js) for j in 1:6 println("j = $j") sol =solve(prob,ImplicitRKMil(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=Threads.nthreads()) t1 = @elapsed sol = solve(prob,ImplicitRKMil(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) println("The number of Implicit-RKMil Fails is $numfails. Elapsed time was $t1") end ``` ```julia for j in 1:6 println("j = $j") sol =solve(prob,RKMil(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=Threads.nthreads()) t1 = @elapsed sol = solve(prob,RKMil(),EnsembleThreads(),dt=dts[j],maxiters=Int(1e11),save_everystep=false,verbose=false,trajectories=trajectories) numfails = sum([Int(any(isnan,sol[i]) || sol[i].t[end] != 1) for i in 1:trajectories]) println("The number of RKMil Fails is $numfails. Elapsed time was $t1") fails[j,2] = numfails times[j,2] = t1 end ``` ```julia using Plots lw = 3 p2 = plot(dts,times,xscale=:log2,yscale=:log2,guidefont=font(16),tickfont=font(14),yguide="Elapsed Time (s)",xguide=L"Chosen $\Delta t$",top_margin=50px,linewidth=lw,lab=["Euler-Maruyama" "RK-Mil" "RosslerSRI"],legendfont=font(14)) plot!(dts,repmat([best_adaptive_time],11),linewidth=lw,line=:dash,lab="ESRK+RSwM3",left_margin=75px) scatter!([2.0^(-20);2.0^(-20);2.0^(-18)],[times[5,1];times[5,2];times[3,3]],markersize=20,c=:red,lab="") plot(p2,size=(800,800)) ``` ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffSDE/QuadraticStiffness.jmd
docs
3704
--- title: Quadratic Stiffness Benchmarks author: Chris Rackauckas --- # Quadratic Stiffness In this notebook we will explore the quadratic stiffness problem. References: The composite Euler method for stiff stochastic differential equations Kevin Burrage, Tianhai Tian And S-ROCK: CHEBYSHEV METHODS FOR STIFF STOCHASTIC DIFFERENTIAL EQUATIONS ASSYR ABDULLE AND STEPHANE CIRILLI This is a scalar SDE with two arguments. The first controls the deterministic stiffness and the later controls the diffusion stiffness. ```julia using SDEProblemLibrary, StochasticDiffEq, DiffEqDevTools import SDEProblemLibrary: prob_sde_stiffquadito using Plots; gr() const N = 10 ``` ```julia prob = remake(prob_sde_stiffquadito,p=(50.0,1.0)) sol = solve(prob,SRIW1()) plot(sol) ``` ```julia prob = remake(prob_sde_stiffquadito,p=(500.0,1.0)) sol = solve(prob,SRIW1()) plot(sol) ``` ## Top dts Let's first determine the maximum dts which are allowed. Anything higher is mostly unstable. ### Deterministic Stiffness Mild ```julia prob = remake(prob_sde_stiffquadito,p=(50.0,1.0)) @time sol = solve(prob,SRIW1()) @time sol = solve(prob,SRIW1(),adaptive=false,dt=0.01) @time sol = solve(prob,ImplicitRKMil(),dt=0.005) @time sol = solve(prob,EM(),dt=0.01); ``` ### Deterministic Stiffness High ```julia prob = remake(prob_sde_stiffquadito,p=(500.0,1.0)) @time sol = solve(prob,SRIW1()) @time sol = solve(prob,SRIW1(),adaptive=false,dt=0.002) @time sol = solve(prob,ImplicitRKMil(),dt=0.001) @time sol = solve(prob,EM(),dt=0.002); ``` ### Mixed Stiffness ```julia prob = remake(prob_sde_stiffquadito,p=(5000.0,70.0)) @time sol = solve(prob,SRIW1(),dt=0.0001) @time sol = solve(prob,SRIW1(),adaptive=false,dt=0.00001) @time sol = solve(prob,ImplicitRKMil(),dt=0.00001) @time sol = solve(prob,EM(),dt=0.00001); ``` Notice that in this problem, the stiffness in the noise term still prevents the semi-implicit integrator to do well. In that case, the advantage of implicitness does not take effect, and thus explicit methods do well. When we don't care about the error, Euler-Maruyama is fastest. When there's mixed stiffness, the adaptive algorithm is unstable. ## Work-Precision Diagrams ```julia prob = remake(prob_sde_stiffquadito,p=(50.0,1.0)) reltols = 1.0 ./ 10.0 .^ (3:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()), Dict(:alg=>EM(),:dts=>1.0./8.0.^((1:length(reltols)) .+ 1)), Dict(:alg=>SRIW1(),:dts=>1.0./8.0.^((1:length(reltols)) .+ 1),:adaptive=>false) #Dict(:alg=>RKMil(),:dts=>1.0./8.0.^((1:length(reltols)) .+ 1),:adaptive=>false), ] names = ["SRIW1","EM","SRIW1 Fixed"] #"RKMil", wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,names=names,error_estimate=:l2) plot(wp) ``` ```julia prob = remake(prob_sde_stiffquadito,p=(500.0,1.0)) reltols = 1.0 ./ 10.0 .^ (3:5) abstols = reltols#[0.0 for i in eachindex(reltols)] setups = [Dict(:alg=>SRIW1()), Dict(:alg=>EM(),:dts=>1.0./8.0.^((1:length(reltols)) .+ 2)), Dict(:alg=>SRIW1(),:dts=>1.0./8.0.^((1:length(reltols)) .+ 2),:adaptive=>false) #Dict(:alg=>RKMil(),:dts=>1.0./8.0.^((1:length(reltols)) .+ 2),:adaptive=>false), ] names = ["SRIW1","EM","SRIW1 Fixed"] #"RKMil", wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=N,names=names,error_estimate=:l2,print_names=true) plot(wp) ``` ## Conclusion Noise stiffness is tough. Right now the best solution is to run an explicit integrator with a low enough dt. Adaptivity does have a cost in this case, likely due to memory management. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/StiffSDE/StochasticHeat.jmd
docs
4053
--- title: Stochastic Heat Equation Benchmarks author: Chris Rackauckas --- # Stochastic Heat Equation Benchmarks In this notebook we will benchmark against the stochastic heat equation with Dirichlet BCs and scalar noise. The function for generating the problem is as follows: Stochastic Heat Equation with scalar multiplicative noise S-ROCK: CHEBYSHEV METHODS FOR STIFF STOCHASTIC DIFFERENTIAL EQUATIONS ASSYR ABDULLE AND STEPHANE CIRILLI Raising D or k increases stiffness ```julia using StochasticDiffEq, DiffEqNoiseProcess, LinearAlgebra, Statistics function generate_stiff_stoch_heat(D=1,k=1;N = 100, t_end = 3.0, adaptivealg = :RSwM3) A = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1])) dx = 1/N A = D/(dx^2) * A function f(du,u,p,t) mul!(du,A,u) end #= function f(::Type{Val{:analytic}},u0,p,t,W) exp((A-k/2)*t+W*I)*u0 # no -k/2 for Strat end =# function g(du,u,p,t) @. du = k*u end SDEProblem(f,g,ones(N),(0.0,t_end),noise=WienerProcess(0.0,0.0,0.0,rswm=RSWM(adaptivealg=adaptivealg))) end N = 100 D = 1; k = 1 A = Array(Tridiagonal([1.0 for i in 1:N-1],[-2.0 for i in 1:N],[1.0 for i in 1:N-1])) dx = 1/N A = D/(dx^2) * A; ``` Now lets solve it with high accuracy. ```julia prob = generate_stiff_stoch_heat(1.0,1.0) @time sol = solve(prob,SRIW1(),progress=true,abstol=1e-6,reltol=1e-6); ``` ## Highest dt Let's try to find the highest possible dt: ```julia @time sol = solve(generate_stiff_stoch_heat(1.0,1.0),SRIW1()); ``` ```julia @time sol = solve(generate_stiff_stoch_heat(1.0,1.0),SRIW1(),progress=true,adaptive=false,dt=0.00005); ``` ```julia @time sol = solve(generate_stiff_stoch_heat(1.0,1.0),EM(),progress=true,adaptive=false,dt=0.00005); ``` ```julia @time sol = solve(generate_stiff_stoch_heat(1.0,1.0),ImplicitRKMil(),progress=true,dt=0.1); ``` ```julia @time sol = solve(generate_stiff_stoch_heat(1.0,1.0),ImplicitRKMil(),progress=true,dt=0.01); ``` ```julia @time sol = solve(generate_stiff_stoch_heat(1.0,1.0),ImplicitRKMil(),progress=true,dt=0.001); ``` ```julia @time sol = solve(generate_stiff_stoch_heat(1.0,1.0),ImplicitEM(),progress=true,dt=0.001); ``` ## Simple Error Analysis Now let's check the error at an arbitrary timepoint in there. Our analytical solution only exists in the Stratanovich sense, so we are limited in the methods we can calculate errors for. ```julia function simple_error(alg;kwargs...) sol = solve(generate_stiff_stoch_heat(1.0,1.0,t_end=0.25),alg;kwargs...); sum(abs2,sol[end] - exp(A*sol.t[end]+sol.W[end]*I)*prob.u0) end ``` ```julia mean(simple_error(EulerHeun(),dt=0.00005) for i in 1:400) ``` ```julia mean(simple_error(ImplicitRKMil(interpretation=:Stratanovich),dt=0.1) for i in 1:400) ``` ```julia mean(simple_error(ImplicitRKMil(interpretation=:Stratanovich),dt=0.01) for i in 1:400) ``` ```julia mean(simple_error(ImplicitRKMil(interpretation=:Stratanovich),dt=0.001) for i in 1:400) ``` ```julia mean(simple_error(ImplicitEulerHeun(),dt=0.001) for i in 1:400) ``` ```julia mean(simple_error(ImplicitEulerHeun(),dt=0.01) for i in 1:400) ``` ```julia mean(simple_error(ImplicitEulerHeun(),dt=0.1) for i in 1:400) ``` ## Interesting Property Note that RSwM1 and RSwM2 are not stable on this problem. ```julia sol = solve(generate_stiff_stoch_heat(1.0,1.0,adaptivealg=:RSwM1),SRIW1()); ``` ## Conclusion In this problem, the implicit methods do not have a stepsize limit. This is because the stiffness almost entirely deteriministic due to diffusion. In that case, if we do not care about the error too much, the implicit methods dominate. Of course, as the tolerance gets lower there is a tradeoff point where the higher order methods will become more efficient. The explicit methods are clearly stability-bound and thus unless we want an error of like 10^-10 we are better off using an implicit method here. ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
0.1.3
f4076dd5a103010d48bb6c4e50c5526f6622fa96
benchmarks/Testing/test.jmd
docs
507
--- title: Test author: Chris Rackauckas --- This is a test of the builder system. It often gets bumped in PRs related to CI. ```julia using InteractiveUtils versioninfo() ``` ```julia Threads.nthreads() ``` ```julia using Plots plot(rand(10,10)) ``` ```math \begin{equation} y'(t) = \frac{0.2y(t-14)}{1 + y(t-14)^{10}} - 0.1y(t) \end{equation} ``` $\alpha$ ``u_0`` ## Appendix ```julia, echo = false using SciMLBenchmarks SciMLBenchmarks.bench_footer(WEAVE_ARGS[:folder],WEAVE_ARGS[:file]) ```
SciMLBenchmarks
https://github.com/SciML/SciMLBenchmarks.jl.git
[ "MIT" ]
3.6.1
db6084790646024c59a036eb1e694c646b3b738e
docs/make.jl
code
1040
using Documenter, DiffEqBayes cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml", force = true) cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true) ENV["PLOTS_TEST"] = "true" ENV["GKSwstype"] = "100" include("pages.jl") makedocs(sitename = "DiffEqBayes.jl", authors = "Chris Rackauckas, Vaibhav Kumar Dixit et al.", clean = true, doctest = false, modules = [DiffEqBayes], strict = [ :doctest, :linkcheck, :parse_error, :example_block, # Other available options are # :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block ], format = Documenter.HTML(assets = ["assets/favicon.ico"], canonical = "https://docs.sciml.ai/DiffEqBayes/stable/"), pages = pages) deploydocs(repo = "github.com/SciML/DiffEqBayes.jl.git"; push_preview = true)
DiffEqBayes
https://github.com/SciML/DiffEqBayes.jl.git
[ "MIT" ]
3.6.1
db6084790646024c59a036eb1e694c646b3b738e
docs/pages.jl
code
113
pages = ["index.md", "Methods" => "methods.md", "Examples" => ["examples.md", "examples/pendulum.md"], ]
DiffEqBayes
https://github.com/SciML/DiffEqBayes.jl.git
[ "MIT" ]
3.6.1
db6084790646024c59a036eb1e694c646b3b738e
src/DiffEqBayes.jl
code
808
""" $(DocStringExtensions.README) """ module DiffEqBayes using DiffEqBase, Distributions, Turing, MacroTools using RecursiveArrayTools, ModelingToolkit, LinearAlgebra using Parameters, Distributions, Optim, Requires using Distances, DocStringExtensions, Random, StanSample using DynamicHMC, TransformVariables, LogDensityProblemsAD, TransformedLogDensities STANDARD_PROB_GENERATOR(prob, p) = remake(prob; u0 = eltype(p).(prob.u0), p = p) function STANDARD_PROB_GENERATOR(prob::EnsembleProblem, p) EnsembleProblem(remake(prob.prob; u0 = eltype(p).(prob.prob.u0), p = p)) end include("turing_inference.jl") # include("abc_inference.jl") include("stan_string.jl") include("stan_inference.jl") include("dynamichmc_inference.jl") export turing_inference, stan_inference, dynamichmc_inference end # module
DiffEqBayes
https://github.com/SciML/DiffEqBayes.jl.git
[ "MIT" ]
3.6.1
db6084790646024c59a036eb1e694c646b3b738e
src/abc_inference.jl
code
1820
function createabcfunction(prob, t, distancefunction, alg; save_idxs = nothing, sample_u0 = false, kwargs...) function simfunc(params, constants, data) local u0 if sample_u0 u0 = save_idxs === nothing ? params[1:length(prob.u0)] : params[1:length(save_idxs)] if length(u0) < length(prob.u0) for i in length(u0):length(prob.u0) push!(u0, prob.u0[i]) end end else u0 = prob.u0 end sol = solve(prob, alg, u0 = u0, p = params, saveat = t, save_idxs = save_idxs, kwargs...) if size(sol, 2) < length(t) return Inf, nothing else simdata = convert(Array, sol) return distancefunction(data, simdata), nothing end end end function abc_inference(prob::DiffEqBase.DEProblem, alg, t, data, priors; ϵ = 0.001, distancefunction = euclidean, ABCalgorithm = ABCSMC, progress = false, num_samples = 500, maxiterations = 10^5, save_idxs = nothing, sample_u0 = false, parallel = false, kwargs...) abcsetup = ABCalgorithm(createabcfunction(prob, t, distancefunction, alg; save_idxs = save_idxs, sample_u0 = sample_u0, kwargs...), length(priors), ϵ, ApproxBayes.Prior(priors); nparticles = num_samples, maxiterations = maxiterations) abcresult = runabc(abcsetup, data, progress = progress, parallel = parallel) return abcresult end
DiffEqBayes
https://github.com/SciML/DiffEqBayes.jl.git
[ "MIT" ]
3.6.1
db6084790646024c59a036eb1e694c646b3b738e
src/dynamichmc_inference.jl
code
6102
""" $(TYPEDEF) Defines a callable that returns the log density for given parameter values when called with a `NamedTuple` `(parameters = ..., σ = ...)` where `parameters` is a vector of parameters, and `σ` is the vector of noise scales. For a common use case, see [`dynamichmc_inference`](@ref). # Fields $(FIELDS) """ Base.@kwdef struct DynamicHMCPosterior{TA, TP, TD, TT, TR, TS, TK, TI} "Algorithm for the ODE solver." algorithm::TA "An ODE problem definition (`DiffEqBase.DEProblem`)." problem::TP "Time values at which the simulated path is compared to `data`." t::TT "Data, as a matrix with each time value in a column." data::TD "Priors for parameters, an iterable with the same length as the number of parameters." parameter_priors::TR """ Priors for the noise scales (currently the standard deviation of a normal distribution), one for each variable. """ σ_priors::TS "Keyword arguments passed on the the ODE solver `solve`." solve_kwargs::TK sample_u0::Bool save_idxs::TI end function (P::DynamicHMCPosterior)(θ) @unpack parameters, σ = θ @unpack algorithm, problem, data, t, parameter_priors = P @unpack σ_priors, solve_kwargs, sample_u0, save_idxs = P T = eltype(parameters) nu = save_idxs === nothing ? length(problem.u0) : length(save_idxs) u0 = convert.(T, sample_u0 ? parameters[1:nu] : problem.u0) p = convert.(T, sample_u0 ? parameters[(nu + 1):end] : parameters) if length(u0) < length(problem.u0) # assumes u is ordered such that the observed variables are in the beginning, consistent with ordered theta for i in length(u0):length(problem.u0) push!(u0, convert(T, problem.u0[i])) end end _saveat = t === nothing ? Float64[] : t sol = solve(problem, algorithm; u0 = u0, p = p, saveat = _saveat, save_idxs = save_idxs, solve_kwargs...) failure = size(sol, 2) < length(_saveat) failure && return T(0) * sum(σ) + T(-Inf) log_likelihood = sum(sum(map(logpdf, Normal.(0.0, σ), sol[:, i] .- data[:, i])) for (i, t) in enumerate(t)) log_prior_parameters = sum(map(logpdf, parameter_priors, parameters)) log_prior_σ = sum(map(logpdf, σ_priors, σ)) log_likelihood + log_prior_parameters + log_prior_σ end # function (P::DynamicHMCPosterior)(θ) # @unpack parameters, σ = θ # @unpack algorithm, problem, data, t, parameter_priors, σ_priors, solve_kwargs = P # prob = remake(problem, u0 = convert.(eltype(parameters), problem.u0), p = parameters) # solution = solve(prob, algorithm; solve_kwargs...) # any((s.retcode ≠ :Success && s.retcode ≠ :Terminated) for s in solution) && return -Inf # log_likelihood = sum(sum(logpdf.(Normal.(0.0, σ), solution(t) .- data[:, i])) # for (i, t) in enumerate(t)) # log_prior_parameters = sum(map(logpdf, parameter_priors, parameters)) # log_prior_σ = sum(map(logpdf, σ_priors, σ)) # log_likelihood + log_prior_parameters + log_prior_σ # end """ $(SIGNATURES) Run MCMC for an ODE problem. Return a `NamedTuple`, which is similar to the one returned by `DynamicHMC.mcmc_with_warmup`, with an added field `posterior` which contains a vector of posterior values (transformed from `ℝⁿ`). # Arguments - `problem` is the ODE problem - `algorithm` is the ODE algorithm - `t` is the time values at which the solution is compared to `data` - `data` is a matrix of data, with one column for each element in `t` - `parameter_priors` is an iterable with the length of the number of paramers, and is used as a prior on it, should have comparable structure. - `parameter_transformations`: a `TransformVariables` transformation to mapping `ℝⁿ` to the vector of valid parameters. # Keyword arguments - `rng` is the random number generator used for MCMC. Defaults to the global one. - `num_samples` is the number of MCMC draws (default: 1000) - `AD_gradient_kind` is passed on to `LogDensityProblems.ADgradient`, make sure to `import` the corresponding library. - `solve_kwargs` is passed on to `solve` - `mcmc_kwargs` are passed on as keyword arguments to `DynamicHMC.mcmc_with_warmup` """ function dynamichmc_inference(problem::DiffEqBase.DEProblem, algorithm, t, data, parameter_priors, parameter_transformations = as(Vector, asℝ₊, length(parameter_priors)); σ_priors = fill(Normal(0, 5), size(data, 1)), sample_u0 = false, rng = Random.GLOBAL_RNG, num_samples = 1000, AD_gradient_kind = Val(:ForwardDiff), save_idxs = nothing, solve_kwargs = (), mcmc_kwargs = (initialization = (q = zeros(length(parameter_priors) + (save_idxs === nothing ? length(data[:, 1]) : length(save_idxs))),),)) P = DynamicHMCPosterior(; algorithm = algorithm, problem = problem, t = t, data = data, parameter_priors = parameter_priors, σ_priors = σ_priors, solve_kwargs = solve_kwargs, sample_u0 = sample_u0, save_idxs = save_idxs) trans = as((parameters = parameter_transformations, σ = as(Vector, asℝ₊, length(σ_priors)))) ℓ = TransformedLogDensity(trans, P) ∇ℓ = LogDensityProblemsAD.ADgradient(AD_gradient_kind, ℓ) results = mcmc_with_warmup(rng, ∇ℓ, num_samples; mcmc_kwargs...) chain = if haskey(results, :chain) # DynamicHMC < 3.3.0 results.chain else eachcol(results.posterior_matrix) end posterior = map(Base.Fix1(TransformVariables.transform, trans), chain) merge((; posterior), results) end
DiffEqBayes
https://github.com/SciML/DiffEqBayes.jl.git