licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | code | 5260 | include("../docs/example_data.jl")
dat, evts = UnfoldSim.predef_eeg(; noiselevel = 10, return_epoched = true)
dat_e, evts_e, times = example_data("sort_data")
@testset "ERP image basic" begin
plot_erpimage(dat;)
end
@testset "ERP image naked" begin
f = Figure(; figure_padding = 0)
plot_erpimage!(
f,
dat;
layout = (; hidespines = (), hidedecorations = (), use_colorbar = false),
)
end
@testset "ERP image sorted and naked" begin
f = Figure(; figure_padding = 0)
plot_erpimage!(
f,
dat_e;
sortvalues = evts_e.Δlatency,
layout = (; hidespines = (), hidedecorations = (), use_colorbar = false),
)
end
@testset "ERP image with changing erpblur to zero" begin
plot_erpimage(dat; erpblur = 0)
end
@testset "ERP image with GridPosition" begin
f = Figure()
plot_erpimage!(f[1, 1], dat)
end
@testset "ERP image with sortvalues" begin
plot_erpimage(times, dat_e; sortvalues = evts_e.Δlatency)
end
@testset "ERP image with sortindex" begin
plot_erpimage(
times,
dat_e;
sortindex = rand(1:length(evts_e.Δlatency), length(evts_e.Δlatency)),
)
end
@testset "ERP image normalized" begin
dat_norm = dat_e[:, :] .- mean(dat_e, dims = 2)
plot_erpimage(times, dat_norm; sortvalues = evts_e.Δlatency)
end
@testset "ERP image with and without sorting" begin
f = Figure()
plot_erpimage!(f[1, 1], times, dat_e; axis = (; title = "Bad"))
plot_erpimage!(
f[2, 1],
times,
dat_e;
sortvalues = evts_e.Δlatency,
axis = (; title = "Good"),
)
f
#save("erpimage.png", f)
end
@testset "ERP image with different labels" begin
f = Figure()
dat_norm = dat_e[:, :] .- mean(dat_e, dims = 2)
plot_erpimage!(f[1, 1], times, dat_e; axis = (; ylabel = "test"))
plot_erpimage!(
f[2, 1],
times,
dat_e;
sortvalues = evts_e.Δlatency,
axis = (; ylabel = "test"),
)
plot_erpimage!(f[1, 2], times, dat_norm;)
plot_erpimage!(f[2, 2], times, dat_norm; sortvalues = evts_e.Δlatency)
f
end
@testset "ERP image with mean plot" begin
plot_erpimage(dat; meanplot = true)
end
@testset "ERP image with meanplot and show_sortval" begin
plot_erpimage(
times,
dat_e;
sortvalues = evts_e.Δlatency,
meanplot = true,
show_sortval = true,
)
end
@testset "ERP image with show_sortval" begin
dat_e, evts_e, times = example_data("sort_data")
plot_erpimage(times, dat_e; sortvalues = evts_e.Δlatency, show_sortval = true)
end
@testset "ERP image with Observables" begin
obs = Observable(dat)
f = plot_erpimage(obs)
obs[] = rand(size(to_value(dat))...)
end
@testset "ERP image with Observables and sort_val" begin
obs = Observable(dat)
sort_val = Observable(evts_e.Δlatency)
f = plot_erpimage(times, dat_e; sortvalues = sort_val, show_sortval = true)
sort_val[] = rand(Int, size(to_value(evts_e.Δlatency))...)
end
@testset "ERP image with Observables and title as Observable" begin
obs = Observable(dat)
chan_i = Observable(1)
sort_val = Observable(evts_e.Δlatency)
str = @lift("ERP image: channel " * string($chan_i))
f = plot_erpimage(
times,
dat_e;
sortvalues = sort_val,
show_sortval = true,
axis = (; title = str),
)
str[] = "TEST"
end
@testset "ERP image with sortval_xlabel" begin
str = Observable("&&&")
plot_erpimage(
times,
dat_e;
sortvalues = evts_e.Δlatency,
meanplot = true,
show_sortval = true,
sortval_xlabel = str,
)
str[] = "TEST2"
end
@testset "ERP image with sortval_xlabel" begin
sortval = Observable(evts_e.Δlatency)
plot_erpimage(times, dat_e; sortvalues = sortval, meanplot = true, show_sortval = true)
sortval = Observable(evts_e.continuous)
end
@testset "check error of empty sortvalues" begin
err1 = nothing
t() = error(plot_erpimage(times, dat_e; show_sortval = true))
try
t()
catch err1
end
@test err1 == ErrorException("`show_sortval` needs non-empty `sortvalues` argument")
end
@testset "check error of all NaN sortvalues" begin
tmp = fill(NaN, size(dat_e, 2))
err1 = nothing
t() = error(plot_erpimage(times, dat_e; sortvalues = tmp, show_sortval = true))
try
t()
catch err1
end
@test err1 ==
ErrorException("`show_sortval` can not take `sortvalues` with all NaN-values")
end
@testset "check length mismatch" begin
tmp = fill(NaN, size(dat_e, 1))
err1 = nothing
t() = error(plot_erpimage(times, dat_e; sortvalues = tmp, show_sortval = true))
try
t()
catch err1
end
@test err1 == ErrorException(
"The length of sortvalues differs from the length of data trials. This leads to incorrect sorting.",
)
end
@testset "ERP image: meanplot axis" begin
plot_erpimage(dat; meanplot = true, meanplot_axis = (; title = "test"))
end
@testset "ERP image: sortplot axis" begin
plot_erpimage(
times,
dat_e;
sortvalues = evts_e.Δlatency,
show_sortval = true,
sortplot_axis = (; title = "test"),
)
end
| UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | code | 6262 | include("../docs/example_data.jl") # we need more specified example data
results_plot, positions = example_data()
@testset "PCP: data input DataFrame" begin
plot_parallelcoordinates(results_plot)
end
@testset "PCP: data input Matrix" begin
tmp = DataFrame(channel = results_plot.channel, estimate = results_plot.estimate)
grouped = groupby(tmp, :channel)
mat = Matrix(reduce(hcat, [group.estimate for group in grouped])')
plot_parallelcoordinates(mat)
end
@testset "PCP: Figure, 64 channels, 1 condition" begin
plot_parallelcoordinates(results_plot; mapping = (color = :coefname, y = :estimate))
end
@testset "PCP: show_legend = false" begin
plot_parallelcoordinates(
results_plot;
mapping = (color = :coefname, y = :estimate),
layout = (; show_legend = false),
)
end
@testset "PCP: Figure, 64 channels, 1 condition, bigger size" begin
plot_parallelcoordinates(
Figure(size = (1200, 800)),
results_plot;
mapping = (color = :coefname, y = :estimate),
)
end
@testset "PCP: Figure, 5 channels (filtered), 1 condition" begin
results_plot2 = filter(row -> row.channel <= 5, results_plot) # select channels
plot_parallelcoordinates(results_plot2; mapping = (color = :coefname, y = :estimate))
end
@testset "PCP: Figure, 5 channels (subsetted), 1 condition" begin
plot_parallelcoordinates(
subset(results_plot, :channel => x -> x .<= 5);
mapping = (; color = :coefname),
)
end
@testset "change colormap" begin
# https://docs.makie.org/stable/explanations/colors/index.html
# Use only categorical with high contrast between adjacent colors.
f = Figure()
plot_parallelcoordinates(
f[1, 1],
subset(results_plot, :channel => x -> x .<= 5);
mapping = (; color = :coefname),
visual = (; colormap = :tab10),
axis = (; title = "colormap = tab10"),
)
plot_parallelcoordinates(
f[2, 1],
subset(results_plot, :channel => x -> x .<= 5);
mapping = (; color = :coefname),
visual = (; colormap = :Accent_3),
axis = (; title = "colormap = Accent_3"),
)
f
end
@testset "PCP: GridPosition" begin
f = Figure()
plot_parallelcoordinates(
f[1, 1],
results_plot;
mapping = (color = :coefname, y = :estimate),
)
f
end
@testset "PCP: 3 conditions and 5 channels" begin
uf_5chan = example_data("UnfoldLinearModelMultiChannel")
plot_parallelcoordinates(
uf_5chan;
mapping = (; color = :coefname),
legend = (; valign = :center),
)
end
@testset "Bending 1" begin
# check that the points actually go through the provided points
f, b, c, d = UnfoldMakie.parallelcoordinates(
Figure(),
[0 1 0 2 0 3 0 4.0; -1 0 -2 0 -3 0 -4 0]',
normalize = :no,
bend = true,
)
f
end
@testset "Bending 2" begin
# check that the points actually go through the provided points
f = Figure()
plot_parallelcoordinates(
f[1, 1],
subset(results_plot, :channel => x -> x .< 10),
bend = true,
)
f
end
@testset "Normalisation of axis" begin
f = Figure()
plot_parallelcoordinates(
f[1, 1],
subset(results_plot, :channel => x -> x .< 10);
mapping = (; color = :coefname),
axis = (; title = "normalize = nothing"),
)
plot_parallelcoordinates(
f[2, 1],
subset(results_plot, :channel => x -> x .< 10);
mapping = (; color = :coefname),
normalize = :minmax,
axis = (; title = "normalize = :minmax"),
)
f
end
@testset "Axis labels" begin
plot_parallelcoordinates(
subset(results_plot, :channel => x -> x .< 5);
visual = (; color = "#6BAED6"),
ax_labels = ["Fz", "Cz", "O1", "O2"],
)
end
@testset "Axis tick labels" begin
f = Figure(size = (400, 800))
plot_parallelcoordinates(
f[1, 1],
subset(results_plot, :channel => x -> x .< 5, :time => x -> x .< 0);
ax_labels = ["Fz", "Cz", "O1", "O2"],
ax_ticklabels = :all,
normalize = :minmax,
axis = (; title = "ax_ticklabels = :all"),
) # show all ticks on all axes
plot_parallelcoordinates(
f[2, 1],
subset(results_plot, :channel => x -> x .< 5, :time => x -> x .< 0);
ax_labels = ["Fz", "Cz", "O1", "O2"],
ax_ticklabels = :left,
normalize = :minmax,
axis = (; title = "ax_ticklabels = :left"),
) # show all ticks on the left axis, but only extremities on others
plot_parallelcoordinates(
f[3, 1],
subset(results_plot, :channel => x -> x .< 5, :time => x -> x .< 0);
ax_labels = ["Fz", "Cz", "O1", "O2"],
ax_ticklabels = :outmost,
normalize = :minmax,
axis = (; title = "ax_ticklabels = :outmost"),
) # show ticks on extremities of all axes
plot_parallelcoordinates(
f[4, 1],
subset(results_plot, :channel => x -> x .< 5, :time => x -> x .< 0);
ax_labels = ["Fz", "Cz", "O1", "O2"],
ax_ticklabels = :none,
normalize = :minmax,
axis = (; title = "ax_ticklabels = :none"),
) # disable all ticks
f
end
@testset "transparency" begin
uf_5chan = example_data("UnfoldLinearModelMultiChannel")
f = Figure()
plot_parallelcoordinates(
f[1, 1],
uf_5chan;
mapping = (; color = :coefname),
visual = (; alpha = 0.1),
axis = (; title = "alpha = 0.1"),
)
plot_parallelcoordinates(
f[2, 1],
uf_5chan,
mapping = (; color = :coefname),
visual = (; alpha = 0.9),
axis = (; title = "alpha = 0.9"),
)
f
end
@testset "styling" begin
r1, positions = example_data()
r2 = deepcopy(r1)
r2.coefname .= "B" # create a second category
r2.estimate .+= rand(length(r2.estimate)) * 0.1
results_plot = vcat(r1, r2)
f = Figure()
plot_parallelcoordinates(
f[1, 1],
subset(results_plot, :channel => x -> x .< 8, :time => x -> x .< 0);
mapping = (; color = :coefname),
normalize = :minmax,
ax_labels = ["FP1", "F3", "F7", "FC3", "C3", "C5", "P3", "P7"],
)
f
end
| UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | code | 948 | using BSplineKit, Unfold
m0 = example_data("UnfoldLinearModel")
m1 = example_data("UnfoldLinearModelwith1Spline")
m2 = example_data("UnfoldLinearModelwith2Splines")
@testset "Spline plot: no splines" begin
err1 = nothing
t() = error(plot_splines(m0))
try
t()
catch err1
end
@test err1 == AssertionError(
"No spline term is found in UnfoldModel. Does your UnfoldModel really have a `spl(...)` or other `AbstractSplineTerm`?",
)
end
@testset "Spline plot: basic" begin
plot_splines(m1)
end
@testset "Spline plot: two spline terms" begin
plot_splines(m2)
end
@testset "Spline plot: spline_axis check" begin
plot_splines(m2; spline_axis = (; ylabel = "test"))
end
@testset "Spline plot: density_axis check" begin
plot_splines(m2, density_axis = (; ylabel = "test"))
end
@testset "Spline plot: superlabel_axis check" begin
plot_splines(m2; superlabel_config = (; fontsize = 60))
end
| UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | code | 1864 | dat, positions = TopoPlots.example_data()
data_for_topoplot = UnfoldMakie.eeg_array_to_dataframe(rand(10)')
@testset "topoplot: basic" begin
plot_topoplot(dat[:, 50, 1]; positions)
end
@testset "topoplot: data input as DataFrame" begin
plot_topoplot(
data_for_topoplot;
positions = positions[1:10],
axis = (; title = "Topoplot"),
)
end
@testset "topoplot: data input as AbstractVector" begin
d = rand(128)
p = rand(Point2f, 128)
plot_topoplot(d; positions = p)
end
@testset "topoplot: data input as SubDataFrame" begin
d = DataFrame(:estimate => rand(20), :label => string.(1:20))
d1 = @view(d[1:10, :])
plot_topoplot(d1; positions = rand(Point2f, 10))
end
@testset "topoplot: highliht an electrode" begin
plot_topoplot(dat[:, 50, 1]; positions, high_chan = 2)
end
@testset "topoplot: highliht several electrodes" begin
plot_topoplot(dat[:, 50, 1]; positions, high_chan = [1, 2])
end
@testset "topoplot: no legend" begin
plot_topoplot(dat[:, 50, 1]; positions = positions, layout = (; use_colorbar = false))
end
@testset "topoplot: xlabel" begin
plot_topoplot(dat[:, 50, 1]; positions = positions, axis = (; xlabel = "[50 ms]"))
end
@testset "topoplot: GridLayout" begin
f = Figure()
plot_topoplot!(f[1, 1], dat[:, 150, 1]; positions = positions)
f
end
@testset "topoplot: labels" begin
labels = ["s$i" for i = 1:size(dat[:, 150, 1], 1)]
plot_topoplot(dat[:, 150, 1], positions = positions; labels = labels)
end
@testset "topoplot: GridSubposition" begin
f = Figure()
plot_topoplot!(
f[1, 1][1, 1],
data_for_topoplot;
positions = rand(Point2f, 10),
labels = string.(1:10),
)
f
end
@testset "topoplot: positions through labels" begin
plot_topoplot(dat[1:19, 50, 1]; labels = TopoPlots.CHANNELS_10_20)
end
| UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | code | 7141 | # simple checks
dat, positions = TopoPlots.example_data()
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, :, 1], string.(1:length(positions)))
bin_width = 80
@testset "toposeries: bin_width" begin
plot_topoplotseries(df; bin_width = 80, positions = positions)
end
@testset "toposeries: bin_num" begin
plot_topoplotseries(df; bin_num = 5, positions = positions)
end
@testset "toposeries: bin_num" begin
plot_topoplotseries(df; bin_num = 5, positions = positions, axis = (; xlabel = "test"))
end
@testset "toposeries: checking other y value" begin
df.cont = df.time .* 3
plot_topoplotseries(df; bin_num = 5, positions = positions, mapping = (; col = :cont))
end
#= @testset "toposeries: Δbin deprecated" begin #fail
plot_topoplotseries(df, Δbin; positions = positions)
end =#
@testset "toposeries: nrows = 2" begin
plot_topoplotseries(df; bin_num = 5, nrows = 2, positions = positions)
end
@testset "toposeries: nrows = 5" begin
plot_topoplotseries(df; bin_num = 5, nrows = 3, positions = positions)
end
@testset "toposeries: nrows = -6" begin
plot_topoplotseries(df; bin_num = 5, nrows = -6, positions = positions)
end
@testset "error checking: bin_width and bin_num specified" begin
err1 = nothing
t() = error(plot_topoplotseries(df; bin_width = 80, bin_num = 5, positions = positions))
try
t()
catch err1
end
@test err1 ==
ErrorException("Ambigious parameters: specify only `bin_width` or `bin_num`.")
end
@testset "error checking: bin_width and bin_num not specified" begin
err1 = nothing
t() = error(plot_topoplotseries(df; positions = positions))
try
t()
catch err1
end
@test err1 == ErrorException(
"You haven't specified `bin_width` or `bin_num`. Such option is available only with categorical `mapping.col` or `mapping.row`.",
)
end
@testset "toposeries: channel names" begin
plot_topoplotseries(df; bin_width = 80, positions = positions, labels = raw_ch_names)
end # doesnt work rn
@testset "toposeries: xlabel" begin
f = Figure()
ax = Axis(f[1, 1])
plot_topoplotseries!(f[1, 1], df; bin_width = 80, positions = positions)
text!(ax, 0, 0, text = "Time [ms] ", align = (:center, :center), offset = (0, -120))
hidespines!(ax) # delete unnecessary spines (lines)
hidedecorations!(ax, label = false)
f
end
@testset "toposeries: one time point (what is it?)" begin
plot_topoplotseries(
df;
bin_width = 80,
positions = positions,
combinefun = x -> x[end÷2],
)
end
@testset "toposeries: differend comb functions" begin
f = Figure(size = (500, 500))
plot_topoplotseries!(
f[1, 1],
df;
bin_width,
positions = positions,
combinefun = mean,
axis = (; xlabel = "", title = "combinefun = mean"),
)
plot_topoplotseries!(
f[2, 1],
df;
bin_width,
positions = positions,
combinefun = median,
axis = (; xlabel = "", title = "combinefun = median"),
)
plot_topoplotseries!(
f[3, 1],
df;
bin_width,
positions = positions,
combinefun = std,
axis = (; title = "combinefun = std"),
)
f
end
@testset "toposeries: no colorbar" begin
plot_topoplotseries(
df;
bin_width,
positions = positions,
layout = (; use_colorbar = false),
)
end
@testset "toposeries: GridPosition with a title" begin
f = Figure()
ax = Axis(f[1:2, 1:5], aspect = DataAspect(), title = "Just a title")
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, :, 1], string.(1:length(positions)))
bin_width = 80
a = plot_topoplotseries!(
f[1:2, 1:5],
df;
bin_width,
positions = positions,
layout = (; use_colorbar = true),
)
hidespines!(ax)
hidedecorations!(ax, label = false)
f
end
@testset "toposeries: specified xlabel" begin
plot_topoplotseries(df; bin_width, positions = positions, axis = (; xlabel = "test"))
end
@testset "toposeries: adjustable colorrange" begin
plot_topoplotseries(
df;
bin_width,
positions = positions,
visual = (; colorrange = (-3, 3)),
)
end
@testset "toposeries: visual.colorrange and colorbar.colorrange" begin
plot_topoplotseries(
df;
bin_width,
positions = positions,
colorbar = (; colorrange = (-1, 1)),
visual = (; colorrange = (-1, 1)),
)
end
@testset "toposeries: adjusted ylim_topo" begin
plot_topoplotseries(
df;
bin_width,
positions = positions,
axis = (; ylim_topo = (0, 0.7)),
)
end
@testset "basic eeg_topoplot_series" begin
df = DataFrame(
:erp => repeat(1:64, 100),
:cont_cuts => repeat(1:20, 5 * 64),
:label => repeat(1:64, 100),
:col_coord => repeat(1:5, 20 * 64),
:row_coord => repeat(1:1, 6400),
) # simulated data
UnfoldMakie.eeg_topoplot_series(
df;
bin_width = 5,
positions = positions,
col = :col_coord,
row = :row_coord,
)
end
@testset "toposeries: GridSubposition" begin
f = Figure(size = (500, 500))
plot_topoplotseries!(
f[2, 1][1, 1],
df;
bin_width,
positions = positions,
combinefun = mean,
axis = (; title = "combinefun = mean"),
)
end
@testset "eeg_array_to_dataframe" begin
eeg_array_to_dataframe(rand(2, 2))
end
@testset "contours" begin
plot_topoplotseries(
df;
bin_width,
positions = positions,
visual = (; enlarge = 0.9, contours = (; linewidth = 1, color = :black)),
)
end
@testset "contours.levels" begin
plot_topoplotseries(
df;
bin_width,
positions = positions,
visual = (;
enlarge = 0.9,
contours = (; linewidth = 1, color = :black, levels = 3),
),
)
end
@testset "contours.levels" begin
plot_topoplotseries(
df;
bin_width,
positions = positions,
visual = (;
enlarge = 0.9,
contours = (; linewidth = 1, color = :black, levels = [0, 0.2]),
),
)
end
@testset "adjustable colorbar" begin #need to be elaborated
f = Figure()
plot_topoplotseries!(
f[1, 1],
df;
bin_width = 80,
positions = positions,
colorbar = (; height = 100, width = 30),
axis = (; aspect = AxisAspect(1)),
)
#Box(f[1, 1], color = (:red, 0.2), strokewidth = 0)
f
end
@testset "toposeries: precision" begin
df.time = df.time .+ 0.5555
plot_topoplotseries(df; bin_num = 5, positions = positions)
end
@testset "toposeries: colgap" begin
with_theme(colgap = 50) do
plot_topoplotseries(df, bin_num = 5; positions = positions)
end
end
@testset "toposeries: colgap for subsets" begin
f = Figure()
plot_topoplotseries!(
f[1, 1],
df,
bin_num = 5;
positions = positions,
topoplot_axes = (; limits = (-0.05, 1.05, -0.1, 1.05)),
)
f
end
| UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | code | 5427 | # advanced features: facetting and interactivity
dat, positions = TopoPlots.example_data()
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, :, 1], string.(1:length(positions)))
bin_width = 80
@testset "14 topoplots, 4 rows" begin # horrific
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, :, 1], string.(1:length(positions)))
plot_topoplotseries(
df;
bin_num = 14,
nrows = 4,
positions = positions,
visual = (; label_scatter = false),
)
end
@testset "facetting by layout" begin # could be changed to nrwos = "auto"
df = UnfoldMakie.eeg_array_to_dataframe(
dat[:, 200:1:206, 1],
string.(1:length(positions)),
)
plot_topoplotseries(
df;
bin_width = 1,
mapping = (; layout = :time),
positions = positions,
)
end
@testset "error checking: bin_width and bin_num specified" begin
err1 = nothing
t() = error(plot_topoplotseries(df; bin_width = 80, bin_num = 5, positions = positions))
try
t()
catch err1
end
@test err1 ==
ErrorException("Ambigious parameters: specify only `bin_width` or `bin_num`.")
end
@testset "error checking: bin_width or bin_num with categorical columns" begin
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:2, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B"], size(df, 1) ÷ 2)
err1 = nothing
t() = error(
plot_topoplotseries(
df;
bin_num = 5,
col_labels = true,
mapping = (; col = :condition),
positions = positions,
),
)
try
t()
catch err1
end
@test err1 == ErrorException(
"Parameters `bin_width` or `bin_num` are only allowed with continonus `mapping.col` or `mapping.row`, while you specifed categorical.",
)
end
@testset "categorical columns" begin
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:2, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B"], size(df, 1) ÷ 2)
plot_topoplotseries(
df;
col_labels = true,
mapping = (; col = :condition),
positions = positions,
)
end
@testset "4 conditions" begin
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:4, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B", "C", "D"], size(df, 1) ÷ 4)
plot_topoplotseries(
df;
positions = positions,
axis = (; xlabel = "Conditions"),
mapping = (; col = :condition),
)
end
@testset "4 conditions in 2 rows" begin
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:4, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B", "C", "D"], size(df, 1) ÷ 4)
plot_topoplotseries(
df;
nrows = 2,
positions = positions,
mapping = (; col = :condition),
)
end
#=
@testset "4 condtions in rows" begin # TBD
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:4, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B", "C", "D"], size(df, 1) ÷ 4)
plot_topoplotseries(
df;
bin_num = 3,
positions = positions,
mapping = (; row = :condition),
)
end
=#
@testset "topoplot axes configuration" begin # TBD
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:4, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B", "C", "D"], size(df, 1) ÷ 4)
plot_topoplotseries(
df;
nrows = 2,
positions = positions,
mapping = (; col = :condition),
axis = (; title = "axis title", xlabel = "Conditions"),
topoplot_axes = (;
rightspinevisible = true,
xlabelvisible = false,
title = "single topoplot title",
),
)
end
@testset "change xlabel" begin
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:2, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B"], size(df, 1) ÷ 2)
plot_topoplotseries(
df;
col_labels = true,
mapping = (; col = :condition),
axis = (; xlabel = "test"),
positions = positions,
)
end
# use with WGlMakie
@testset "interactive data" begin
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:2, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B"], size(df, 1) ÷ 2)
df_obs = Observable(df)
f = Figure()
plot_topoplotseries!(
f[1, 1],
df_obs;
col_labels = true,
mapping = (; col = :condition),
positions = positions,
)
f
df = to_value(df_obs)
df.estimate .= rand(length(df.estimate))
df_obs[] = df
end
@testset "interactive scatter markers" begin
df = UnfoldMakie.eeg_array_to_dataframe(dat[:, 1:2, 1], string.(1:length(positions)))
df.condition = repeat(["A", "B"], size(df, 1) ÷ 2)
obs_tuple = Observable((0, 0, 0))
plot_topoplotseries(
df;
col_labels = true,
mapping = (; col = :condition),
positions = positions,
visual = (; label_scatter = (markersize = 15, strokewidth = 2)),
interactive_scatter = obs_tuple,
)
end
#=
@testset "interactive data in eeg_array_to_dataframe" begin
data_obs3 = Observable(UnfoldMakie.eeg_array_to_dataframe(rand(10, 20)))
plot_topoplotseries!(Figure(), data_obs3; bin_num = 5, positions = rand(Point2f, 10))
data_obs3[] = UnfoldMakie.eeg_array_to_dataframe(rand(10, 20))
end
=#
| UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | docs | 11743 | # 
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
[](#contributors-)
<!-- ALL-CONTRIBUTORS-BADGE:END -->
[](https://unfoldtoolbox.github.io/UnfoldMakie.jl/dev)
[](https://github.com/unfoldtoolbox/UnfoldMakie.jl/actions)
[](https://codecov.io/gh/behinger/UnfoldMakie.jl)
[](https://doi.org/10.5281/zenodo.10235220)
|rERP|EEG visualisation|EEG Simulations|BIDS pipeline|Decode EEG data|Statistical testing|
|---|---|---|---|---|---|
| <a href="https://github.com/unfoldtoolbox/Unfold.jl/tree/main"><img src="https://github-production-user-asset-6210df.s3.amazonaws.com/10183650/277623787-757575d0-aeb9-4d94-a5f8-832f13dcd2dd.png"></a> | <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl"><img src="https://github-production-user-asset-6210df.s3.amazonaws.com/10183650/277623793-37af35a0-c99c-4374-827b-40fc37de7c2b.png"></a>|<a href="https://github.com/unfoldtoolbox/UnfoldSim.jl"><img src="https://github-production-user-asset-6210df.s3.amazonaws.com/10183650/277623795-328a4ccd-8860-4b13-9fb6-64d3df9e2091.png"></a>|<a href="https://github.com/unfoldtoolbox/UnfoldBIDS.jl"><img src="https://github-production-user-asset-6210df.s3.amazonaws.com/10183650/277622460-2956ca20-9c48-4066-9e50-c5d25c50f0d1.png"></a>|<a href="https://github.com/unfoldtoolbox/UnfoldDecode.jl"><img src="https://github-production-user-asset-6210df.s3.amazonaws.com/10183650/277622487-802002c0-a1f2-4236-9123-562684d39dcf.png"></a>|<a href="https://github.com/unfoldtoolbox/UnfoldStats.jl"><img src="https://github-production-user-asset-6210df.s3.amazonaws.com/10183650/277623799-4c8f2b5a-ea84-4ee3-82f9-01ef05b4f4c6.png"></a>|
A toolbox for visualizations of EEG/ERP data and Unfold.jl models.
Based on three libraries
- [Unfold](https://github.com/unfoldtoolbox/unfold.jl/) - for performing deconvolution regression;
- [Makie](https://makie.juliaplots.org/stable/) - very flexible visualisation library ([Maki-e](https://en.wikipedia.org/wiki/Maki-e) means "visualisation" on Japanese);
- [Algebra of Graphics](https://github.com/MakieOrg/AlgebraOfGraphics.jl) - Makie-based visualisation library, allowing flexible mapping.
This grants users high performance, and highly customizable plots.
We currently support 9 general ERP plots:
<img src="https://raw.githubusercontent.com/unfoldtoolbox/UnfoldMakie.jl/8plots3/docs/src/assets/complex_plot.png" width="300" align="right">
- 
ERP plots
- 
Butterfly plots
- 
Topography plots
- 
Topography time series
- 
ERP grid
- 
ERP images
- 
Channel images
- 
Parallel coordinates
- Circular topoplots
And 2 Unfold-specific plots:
- Design matrices
- Splines plot
## Install
### Installing Julia
<details>
<summary>Click to expand</summary>
The recommended way to install julia is [juliaup](https://github.com/JuliaLang/juliaup).
It allows you to, e.g., easily update Julia at a later point, but also to test out alpha/beta versions etc.
TLDR: If you don't want to read the explicit instructions, just copy the following command
#### Windows
AppStore -> JuliaUp, or `winget install julia -s msstore` in CMD
#### Mac & Linux
`curl -fsSL https://install.julialang.org | sh` in any shell
</details>
### Installing Unfold
```julia
using Pkg
Pkg.add("UnfoldMakie")
```
## Quickstart
```julia
using UnfoldMakie
using CairoMakie # backend
using Unfold, UnfoldSim # Fit / Simulation
data, evts = UnfoldSim.predef_eeg(; noiselevel = 12, return_epoched = true)
data = reshape(data, 1, size(data)...) # simulate a single channel
times = range(0, step = 1 / 100, length = size(data, 2))
m = fit(UnfoldModel, @formula(0 ~ 1 + condition), evts, data, times)
plot_erp(coeftable(m))
```
## Contributions
Contributions are very welcome. These can be typos, bug reports, feature requests, speed improvements, new solvers, better code, better documentation.
### How to Contribute
You are very welcome to submit issues and start pull requests!
### Adding Documentation
1. We recommend to write a Literate.jl document and place it in `docs/literate/FOLDER/FILENAME.jl` with `FOLDER` being `HowTo`, `Explanation`, `Tutorial` or `Reference` ([recommended reading on the 4 categories](https://documentation.divio.com/)).
2. Literate.jl converts the `.jl` file to a `.md` automatically and places it in `docs/src/generated/FOLDER/FILENAME.md`.
3. Edit [make.jl](https://github.com/unfoldtoolbox/Unfold.jl/blob/main/docs/make.jl) with a reference to `docs/src/generated/FOLDER/FILENAME.md`.
## Citation
If you use these visualizations, please cite:
[](https://doi.org/10.5281/zenodo.6531996)
## Contributors
<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
<table>
<tbody>
<tr>
<td align="center" valign="top" width="14.28%"><a href="http://www.benediktehinger.de"><img src="https://avatars.githubusercontent.com/u/10183650?v=4?s=100" width="100px;" alt="Benedikt Ehinger"/><br /><sub><b>Benedikt Ehinger</b></sub></a><br /><a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/issues?q=author%3Abehinger" title="Bug reports">🐛</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=behinger" title="Code">💻</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=behinger" title="Documentation">📖</a> <a href="#ideas-behinger" title="Ideas, Planning, & Feedback">🤔</a> <a href="#infra-behinger" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#maintenance-behinger" title="Maintenance">🚧</a> <a href="#question-behinger" title="Answering Questions">💬</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/pulls?q=is%3Apr+reviewed-by%3Abehinger" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=behinger" title="Tests">⚠️</a> <a href="#tutorial-behinger" title="Tutorials">✅</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/vladdez"><img src="https://avatars.githubusercontent.com/u/33777074?v=4?s=100" width="100px;" alt="Vladimir Mikheev"/><br /><sub><b>Vladimir Mikheev</b></sub></a><br /><a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/issues?q=author%3Avladdez" title="Bug reports">🐛</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=vladdez" title="Code">💻</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=vladdez" title="Documentation">📖</a> <a href="#ideas-vladdez" title="Ideas, Planning, & Feedback">🤔</a> <a href="#maintenance-vladdez" title="Maintenance">🚧</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/pulls?q=is%3Apr+reviewed-by%3Avladdez" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=vladdez" title="Tests">⚠️</a> <a href="#tutorial-vladdez" title="Tutorials">✅</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Link250"><img src="https://avatars.githubusercontent.com/u/4541950?v=4?s=100" width="100px;" alt="Quantum"/><br /><sub><b>Daniel Baumgartner</b></sub></a><br /><a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=Link250" title="Code">💻</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=Link250" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/NiklasMGaertner"><img src="https://avatars.githubusercontent.com/u/54365174?v=4?s=100" width="100px;" alt="NiklasMGaertner"/><br /><sub><b>Niklas Gärtner</b></sub></a><br /><a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=NiklasMGaertner" title="Code">💻</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=NiklasMGaertner" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/SorenDoring"><img src="https://avatars.githubusercontent.com/u/54365184?v=4?s=100" width="100px;" alt="SorenDoring"/><br /><sub><b>Soren Doring</b></sub></a><br /><a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=SorenDoring" title="Code">💻</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=SorenDoring" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/lokmanfl"><img src="https://avatars.githubusercontent.com/u/44772645?v=4?s=100" width="100px;" alt="lokmanfl"/><br /><sub><b>Fadil Furkan Lokman</b></sub></a><br /><a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=lokmanfl" title="Code">💻</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=lokmanfl" title="Documentation">📖</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://github.com/jschepers"><img src="https://avatars.githubusercontent.com/u/22366977?v=4?s=100" width="100px;" alt="Judith Schepers"/><br /><sub><b>Judith Schepers</b></sub></a><br /><a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/issues?q=author%3Ajschepers" title="Bug reports">🐛</a> <a href="#ideas-jschepers" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=jschepers" title="Documentation">📖</a></td>
</tr>
<tr>
<td align="center" valign="top" width="14.28%"><a href="https://reneskukies.de/"><img src="https://avatars.githubusercontent.com/u/57703446?v=4?s=100" width="100px;" alt="René Skukies"/><br /><sub><b>René Skukies</b></sub></a><br /><a href="https://github.com/unfoldtoolbox/UnfoldMakie.jl/commits?author=ReneSkukies" title="Documentation">📖</a></td>
</tr>
</tbody>
</table>
<!-- markdownlint-restore -->
<!-- prettier-ignore-end -->
<!-- ALL-CONTRIBUTORS-LIST:END -->
## Acknowledgements
Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) – Project-ID 251654672 – TRR 161” / “Gefördert durch die Deutsche Forschungsgemeinschaft (DFG) – Projektnummer 251654672 – TRR 161.
Funded by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany´s Excellence Strategy – EXC 2075 – 390740016
| UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | docs | 233 | ```@autodocs
Modules = [UnfoldMakie]
Order = [:function]
```
Internally, we use a `PlotConfig` struct to keep track of common plotting options, so that all functions have a similar API.
```@docs
PlotConfig
UnfoldMakie.PCPTicks
``` | UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | docs | 241 | ```@docs
UnfoldMakie.RelativeAxis
UnfoldMakie.eeg_topoplot_series
UnfoldMakie.config_kwargs!
UnfoldMakie.eeg_array_to_dataframe
UnfoldMakie.rel_to_abs_bbox
UnfoldMakie.to_positions
UnfoldMakie.data_binning
UnfoldMakie.cart3d_to_spherical
``` | UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.5.8 | a24dcd72b75b13a7d8a90c21a4a1d1a23e6db576 | docs | 1525 | # UnfoldMakie Documentation
```@raw html
<img src="assets/complex_plot.png" width="300" align="right"/>
```
This is the documentation of the UnfoldMakie.jl package for the Julia programming language.
## Highlights of UnfoldMakie.jl
- **11 plot functions for displaying ERPs.**
Each plot emphasizes certain dimensions while collapsing others.
- **Fast plotting**
Plot one figure with 20 topoplots in 1 second? No problemo!
- **Highly adaptable.**
The package is primarily based on [Unfold.jl](https://github.com/unfoldtoolbox/unfold.jl/) and [Makie.jl](https://makie.juliaplots.org/stable/).
- **Many usage examples**
You can find many user-friendly examples of how to use and adapt the plots in this documentation.
- **Scientific colormaps by default**
According to our study [(Mikheev, 2024)](https://apertureneuro.org/article/116386-the-art-of-brainwaves-a-survey-on-event-related-potential-visualization-practices), 40% of EEG researchers do not know about the issue of scientific color maps. As default color maps we use `Reverse(:RdBu)` (based on [colorbrewer](https://colorbrewer2.org/#type=sequential&scheme=BuGn&n=3)) and `Roma` (based on [Sceintific Colormaps](https://www.fabiocrameri.ch/colourmaps/) by Fabio Crameri).
- **Interactivity**
Several plots make use of `Observables.jl` which allows fast updating of the underlying data. Several plots already have predefined interactive features, e.g. you can click on labels to enable / disable them. See `plot_topoplotseries` and `plot_erpimage` for examples.
| UnfoldMakie | https://github.com/unfoldtoolbox/UnfoldMakie.jl.git |
|
[
"MIT"
] | 0.2.0 | e1cf03d4095f0169e8f979a3c039e21c258bfa4f | code | 8814 | module ExpectationMaximizationPCA
using LinearAlgebra
"""
EMPCA(μ, n_comp, data, weights; basis_vecs, scores, kwargs...)
Performs expectation-maximization principal component analysis (EMPCA) on `data` with `n_comp` basis vectors using `weights` as the weights. Pre-allocated arrays for `basis_vecs`, and `scores`, can be passed via keyword arguments.
"""
function EMPCA(
μ::AbstractVector,
n_comp::Int,
data::AbstractMatrix,
weights::AbstractMatrix;
basis_vecs::AbstractMatrix=Array{Float64}(undef, size(data, 1), n_comp),
scores::AbstractMatrix=Array{Float64}(undef, n_comp, size(data, 2)),
kwargs...)
EMPCA!(basis_vecs, scores, μ, copy(data), weights; kwargs...)
return basis_vecs, scores
end
"""
EMPCA!(basis_vecs, scores, μ, data_tmp, weights; use_log, kwargs...)
Performs in-place (modifying `basis_vecs`, `scores`, and `data_tmp`) expectation-maximization principal component analysis (EMPCA) on `data_tmp` using `weights` as the weights.
# Keyword Arguments
- `use_log::false`: whether you want to perform EMPCA on the log of `data` instead
- `inds::AbstractUnitRange=axes(basis_vecs, 2)`: which indices of `basis_vecs` you want to use
- `vec_by_vec::Bool=true`: whether you want to perform EMPCA one vector at a time (generally preffered) or all at once
"""
function EMPCA!(basis_vecs::AbstractMatrix, scores::AbstractMatrix, μ::AbstractVector, data_tmp::AbstractMatrix, weights::AbstractMatrix; use_log::Bool=false, inds::AbstractUnitRange=axes(basis_vecs, 2), vec_by_vec::Bool=true, kwargs...)
# if you want to perform EMPCA on the log of the data, modify `data_tmp` and `weights` appropriately
if use_log
weights .*= (data_tmp .^ 2)
mask = weights.!=0
data_tmp[mask] = log.(view(data_tmp ./ μ, mask))
data_tmp[.!mask] .= 0
else
data_tmp .-= μ
end
if length(inds) > 0
@assert inds[1] > 0
vec_by_vec ?
_empca_vec_by_vec!(basis_vecs, scores, data_tmp, weights; inds=inds, kwargs...) :
_empca_all_at_once!(basis_vecs, scores, data_tmp, weights; inds=inds, kwargs...)
end
end
"""
_solve(dm, data, w)
Get optimal score(s) for modeling `data` with the basis vectors in the design matrix (`dm`) with weights (`w`) using generalized least squares (GLS)
"""
function _solve(
dm::AbstractVecOrMat{T},
data::AbstractVector,
w::AbstractVector) where {T<:Real}
return (dm' * (w .* dm)) \ (dm' * (w .* data))
end
"""
_solve_scores!(basis_vec, scores, data, weights)
Fill `scores` with those that optimally model `data` with the `basis_vec` and weights (`w`) using generalized least squares (GLS)
"""
function _solve_scores!(basis_vec::AbstractVector, scores::AbstractVector, data::AbstractMatrix, weights::AbstractMatrix)
for i in axes(data, 2)
scores[i] = _solve(basis_vec, view(data, :, i), view(weights, :, i))
end
end
"""
_solve_scores!(basis_vecs, scores, data, weights)
Fill `scores` with those that optimally model `data` with the `basis_vecs` and weights (`w`) using generalized least squares (GLS)+
"""
function _solve_scores!(basis_vecs::AbstractMatrix, scores::AbstractMatrix, data::AbstractMatrix, weights::AbstractMatrix; inds::AbstractUnitRange=axes(basis_vecs, 2))
for i in axes(data, 2)
scores[inds, i] .= _solve(view(basis_vecs, :, inds), view(data, :, i), view(weights, :, i))
end
end
"""
_solve_eigenvectors!(basis_vecs, scores, data, weights)
Fill `basis_vecs` with those that optimally model `data` with the `scores` and weights (`w`)
"""
function _solve_eigenvectors!(basis_vecs::AbstractMatrix, scores::AbstractMatrix, data::AbstractMatrix, weights::AbstractMatrix; inds::AbstractUnitRange=axes(basis_vecs, 2))
nvar = size(basis_vecs, 1)
cw = Array{Float64}(undef, size(data, 2))
for i in inds
c = view(scores, i, :)
for j in 1:nvar
cw[:] = c .* view(weights, j, :)
cwc = dot(c, cw)
iszero(cwc) ? basis_vecs[j, i] = 0 : basis_vecs[j, i] = dot(view(data, j, :), cw) / cwc
end
data .-= view(basis_vecs, :, i) * c'
end
basis_vecs[:, 1] ./= norm(view(basis_vecs, :, 1))
_reorthogonalize(basis_vecs)
end
"""
_solve_eigenvectors!(basis_vecs, scores, data, weights)
Fill `basis_vec` with the one that optimally model `data` with the `scores` and weights (`w`)
"""
function _solve_eigenvectors!(basis_vec::AbstractVector, scores::AbstractVector, data::AbstractMatrix, weights::AbstractMatrix)
nvar = length(basis_vec)
cw = Array{Float64}(undef, size(data, 2))
for j in 1:nvar
cw[:] = scores .* view(weights, j, :)
cwc = dot(scores, cw)
iszero(cwc) ? basis_vec[j] = 0 : basis_vec[j] = dot(view(data, j, :), cw) / cwc
end
# Renormalize the answer
basis_vec ./= norm(basis_vec)
end
"""
_reorthogonalize!(basis_vec)
Modifies `basis_vec` to ensure all basis vectors are orthagonal and normalized
"""
function _reorthogonalize!(basis_vec::AbstractMatrix; inds=2:size(basis_vec, 2), kwargs...)
nvec = size(basis_vec, 2)
@assert inds[1] > 1
if nvec > 1
for i in inds
_reorthogonalize_vec_i!(basis_vec, i; kwargs...)
end
end
end
"""
_reorthogonalize!(basis_vec, i)
Modifies `basis_vec[:, i]` to ensure it orthagonal to `basis_vec[:, 1:i-1]` and normalized
"""
function _reorthogonalize_vec_i!(basis_vec::AbstractMatrix, i::Int; extra_vec::Union{Nothing, AbstractVector}=nothing)
#- Renormalize and re-orthogonalize the answer
if !isnothing(extra_vec)
_reorthogonalize_no_renorm!(view(basis_vec, :, i), extra_vec)
end
for j in 1:(i-1)
_reorthogonalize_no_renorm!(view(basis_vec, :, i), view(basis_vec, :, j))
end
basis_vec[:, i] ./= norm(view(basis_vec, :, i))
end
"""
_reorthogonalize!(basis_vec1, basis_vec2)
Modifies `basis_vec1` to be orthagonal to `basis_vec2` without normalizing
"""
function _reorthogonalize_no_renorm!(basis_vec1::AbstractVector, basis_vec2::AbstractVector)
basis_vec1 .-= dot(basis_vec1, basis_vec2) .* basis_vec2 / sum(abs2, basis_vec2)
end
"""
_random_orthonormal!(A)
Fill `A` with orthonormal basis vectors
"""
function _random_orthonormal!(A::AbstractMatrix; inds::AbstractUnitRange=axes(A, 2))
keep_going = true
i = 0
while keep_going
i += 1
A[:, inds] .= randn(size(A, 1), length(inds))
for i in inds
for j in 1:(i-1)
A[:, i] .-= dot(view(A, :, j), view(A, :, i)) .* view(A, :, j)
end
A[:, i] ./= norm(view(A, :, i))
end
keep_going = any(isnan.(A)) && (i < 100)
end
if i > 99; println("_random_orthonormal!() in empca failed for some reason") end
return A
end
"""
_empca_all_at_once!(basis_vec, scores, data, weights; niter, kwargs...)
Performs in-place EMPCA, improving all basis vectors and scores with each iteration
# Keyword Arguments
- `niter::Int=100`: the amount of iterations used
"""
function _empca_all_at_once!(basis_vec::AbstractMatrix, scores::AbstractMatrix, data::AbstractMatrix, weights::AbstractMatrix; niter::Int=100, inds::AbstractUnitRange=axes(basis_vec, 2), kwargs...)
#- Basic dimensions
@assert size(data) == size(weights)
@assert size(scores, 1) == size(basis_vec, 2)
@assert size(scores, 2) == size(data, 2)
@assert size(basis_vec, 1) == size(data, 1)
#- Starting random guess
basis_vec .= _random_orthonormal!(basis_vec; inds=inds, kwargs...)
_solve_scores!(basis_vec, scores, data, weights)
_data = copy(data)
for k in 1:niter
_solve_eigenvectors!(basis_vec, scores, _data, weights; inds=inds)
_data .= data
_solve_scores!(basis_vec, scores, _data, weights; inds=inds)
end
return basis_vec, scores
end
"""
_empca_vec_by_vec!(basis_vec, scores, data, weights; niter, kwargs...)
Performs in-place EMPCA, finishing one basis vector (and its scores) before moving onto the next
# Keyword Arguments
- `niter::Int=100`: the amount of iterations used
"""
function _empca_vec_by_vec!(basis_vec::AbstractMatrix, scores::AbstractMatrix, data::AbstractMatrix, weights::AbstractMatrix; niter::Int=100, inds::AbstractUnitRange=axes(basis_vec, 2), kwargs...)
#- Basic dimensions
nvar, nobs = size(data)
@assert size(data) == size(weights)
@assert size(scores, 1) == size(basis_vec, 2)
@assert size(scores, 2) == nobs
@assert size(basis_vec, 1) == nvar
_data = copy(data)
for i in 1:inds[end]
if i in inds
basis_vec[:, i] .= randn(nvar)
# basis_vec[:, i] ./= norm(view(basis_vec, :, i))
_reorthogonalize_vec_i!(basis_vec, i; kwargs...) # actually useful
_solve_scores!(view(basis_vec, :, i), view(scores, i, :), data, weights)
for k in 1:niter
_solve_eigenvectors!(view(basis_vec, :, i), view(scores, i, :), _data, weights)
_reorthogonalize_vec_i!(basis_vec, i; kwargs...) # actually useful
_solve_scores!(view(basis_vec, :, i), view(scores, i, :), _data, weights)
end
end
_data .-= view(basis_vec, :, i) * view(scores, i, :)'
end
return basis_vec, scores
end
end # module
| ExpectationMaximizationPCA | https://github.com/christiangil/ExpectationMaximizationPCA.jl.git |
|
[
"MIT"
] | 0.2.0 | e1cf03d4095f0169e8f979a3c039e21c258bfa4f | code | 1209 | using Test
using LinearAlgebra
using MultivariateStats
using Statistics
using Random
import ExpectationMaximizationPCA as EMPCA
nx = 200
b1 = sin.(((1:nx) ./ nx) * 2π)
b2 = cos.(((1:nx) ./ nx) * 2π)
d = rand(Random.MersenneTwister(0), 50)' .* b1 + (0.2 .* rand(Random.MersenneTwister(1), 50)') .* b2
@testset "Replicating exact PCA" begin
M = fit(PCA, d; maxoutdim=2)
basis_vecs, scores = EMPCA.EMPCA(vec(mean(d;dims=2)), 2, d, ones(size(d)))
for i in 1:2
s = sum(abs, M.proj[:, i] - basis_vecs[:, i]) < sum(abs, M.proj[:, i] + basis_vecs[:, i])
basis_vecs[:, i] .*= 2*s-1
scores[:, i] .*= 2*s-1
end
@test all(isapprox.(M.proj,basis_vecs; atol=1e-6, rtol=1e-6))
println()
end
@testset "Better than PCA in a χ²-sense" begin
σ = ((((((1:nx) .- nx/2).^2) ./ (nx/2)^2) .+ 1)* ones(50)') ./ 6 # edges are twice as noisy as the center
dn = d + σ .* randn(Random.MersenneTwister(2), size(d))
M = fit(PCA, dn; maxoutdim=2)
basis_vecs, scores = EMPCA.EMPCA(vec(mean(dn;dims=2)), 2, dn, 1 ./ σ.^2)
@test sum(abs2, (dn - (basis_vecs * scores .+ vec(mean(dn;dims=2)))) ./ σ) < sum(abs2, (dn - reconstruct(M, predict(M, dn))) ./ σ)
println()
end
| ExpectationMaximizationPCA | https://github.com/christiangil/ExpectationMaximizationPCA.jl.git |
|
[
"MIT"
] | 0.2.0 | e1cf03d4095f0169e8f979a3c039e21c258bfa4f | docs | 641 | # ExpectationMaximizationPCA.jl
ExpectationMaximizationPCA.jl is a Julia rewrite of [empca](https://github.com/sbailey/empca) which provides Weighted Expectation Maximization PCA, an iterative method for solving PCA while properly weighting data.
The paper S. Bailey 2012, PASP, 124, 1015 describes the underlying math
and is available as a pre-print at:
http://arxiv.org/abs/1208.4122
If you use this code in an academic paper, please include a citation
as described in CITATION.txt, and optionally an acknowledgement such as:
> This work uses the Weighted EMPCA code by Stephen Bailey,
> available at https://github.com/sbailey/empca/
| ExpectationMaximizationPCA | https://github.com/christiangil/ExpectationMaximizationPCA.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 937 | using Pkg
Pkg.develop(; path=joinpath(@__DIR__, "SparseConnectivityTracerBenchmarks"))
using BenchmarkTools
using SparseConnectivityTracer
using SparseConnectivityTracer: HessianTracer, DictHessianPattern, Shared
include("jacobian.jl")
include("hessian.jl")
include("nlpmodels.jl")
suite = BenchmarkGroup()
suite["OptimizationProblems"] = optbench([:britgas])
suite["Jacobian"]["Global"] = jacbench(TracerSparsityDetector())
suite["Jacobian"]["Local"] = jacbench(TracerLocalSparsityDetector())
suite["Hessian"]["Global"] = hessbench(TracerSparsityDetector())
suite["Hessian"]["Local"] = hessbench(TracerLocalSparsityDetector())
# Shared tracers
P = DictHessianPattern{Int,BitSet,Dict{Int,BitSet},Shared}
H = HessianTracer{P}
suite["Hessian"]["Global shared"] = hessbench(
TracerSparsityDetector(; hessian_tracer_type=H)
)
suite["Hessian"]["Local shared"] = hessbench(
TracerLocalSparsityDetector(; hessian_tracer_type=H)
)
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 191 | using PkgJogger
using SparseConnectivityTracer
# Use PkgJogger.@jog to create the JogSparseConnectivityTracer module
@jog SparseConnectivityTracer
SUITE = JogSparseConnectivityTracer.suite()
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2377 | using BenchmarkTools
using SparseConnectivityTracer
using Random: MersenneTwister
#=
Test cases taken from the article:
> "On efficient Hessian computation using the edge pushing algorithm in Julia"
> https://www.tandfonline.com/doi/full/10.1080/10556788.2018.1480625
=#
function hessbench(detector)
suite = BenchmarkGroup()
suite["ArrowHead"] = hessbench_arrowhead(detector)
suite["RandomSparsity"] = hessbench_randomsparsity(detector)
return suite
end
## 4.1 Arrow-head structure
struct ArrowHead
K::Int
end
function (ah::ArrowHead)(x::AbstractVector)
K = ah.K
N = length(x)
return sum(1:N) do i
s1 = sum(x[i + j] for j in 1:K if (i + j) in eachindex(x); init=zero(eltype(x)))
s2 = sum((x[i] + x[j])^2 for j in 1:K)
cos(s1) + s2
end
end
function hessbench_arrowhead(detector)
suite = BenchmarkGroup()
# Commented-out cases (N, K) are included in the JuMP paper linked above,
# but excluded from to accelerate the benchmark suite.
for (N, K) in [
## Table 1
(200, 16),
# (400, 16),
# (800, 16),
## Table 2
(3200, 2),
# (3200, 4),
# (3200, 8),
]
x = rand(N)
f = ArrowHead(K)
suite["N=$N, K=$K"] = @benchmarkable hessian_sparsity($f, $x, $detector)
end
return suite
end
# 4.2 Random sparsity structure
struct RandomSparsity
rand_sets::Vector{Vector{Int}}
end
function RandomSparsity(N::Integer, K::Integer)
rand_sets = [rand(MersenneTwister(123 + i), 1:N, K) for i in 1:N]
return RandomSparsity(rand_sets)
end
function (rs::RandomSparsity)(x::AbstractVector)
return sum(eachindex(x, rs.rand_sets)) do i
(x[i] - 1)^2 + prod(x[rs.rand_sets[i]])
end
end
function hessbench_randomsparsity(detector)
suite = BenchmarkGroup()
# Commented-out cases (N, K) are included in the JuMP paper linked above,
# but excluded from to accelerate the benchmark suite.
for (N, K) in [
## Table 3
(400, 2),
# (400, 4),
# (400, 8),
## Table 4
(100, 32),
# (200, 32),
# (400, 32),
]
x = rand(N)
f = RandomSparsity(N, K)
suite["N=$N, K=$K"] = @benchmarkable hessian_sparsity($f, $x, $detector)
end
return suite
end
# 4.3 Logistic regression
# TODO: Add this test case
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2334 | using BenchmarkTools
using SparseConnectivityTracer
using SparseConnectivityTracerBenchmarks.ODE: Brusselator!, brusselator_2d_loop!
using Random: MersenneTwister
using SparseArrays: sprand
using SimpleDiffEq: ODEProblem, solve, SimpleEuler
using Flux: Conv
function jacbench(detector)
suite = BenchmarkGroup()
suite["SparseMul"] = jacbench_sparsemul(detector)
suite["Brusselator"] = jacbench_brusselator(detector)
suite["Conv"] = jacbench_conv(detector)
return suite
end
## Iterated sparse mul
struct IteratedSparseMul{M<:AbstractMatrix}
As::Vector{M}
end
function IteratedSparseMul(; n::Integer, p::Real=0.1, depth::Integer=5)
As = [sprand(MersenneTwister(123 + i), n, n, p) for i in 1:depth]
return IteratedSparseMul(As)
end
function (ism::IteratedSparseMul)(x::AbstractVector)
@assert length(x) == size(ism.As[1], 1)
y = copy(x)
for l in eachindex(ism.As)
y = ism.As[l] * y
end
return y
end
function jacbench_sparsemul(detector)
suite = BenchmarkGroup()
for n in [50], p in [0.01, 0.25], depth in [5]
x = rand(n)
f = IteratedSparseMul(; n, p, depth)
suite["n=$n, p=$p, depth=$depth"] = @benchmarkable jacobian_sparsity(
$f, $x, $detector
)
end
return suite
end
## Brusselator
function jacbench_brusselator(detector)
suite = BenchmarkGroup()
for N in (6, 24)
f! = Brusselator!(N)
x = rand(N, N, 2)
y = similar(x)
suite["operator"]["N=$N"] = @benchmarkable jacobian_sparsity($f!, $y, $x, $detector)
solver = SimpleEuler()
prob = ODEProblem(brusselator_2d_loop!, x, (0.0, 1.0), f!.params)
function brusselator_ode_solve(x)
return solve(ODEProblem(brusselator_2d_loop!, x, (0.0, 1.0), f!.params), solver; dt=0.5).u[end]
end
suite["ODE"]["N=$N"] = @benchmarkable jacobian_sparsity(
$brusselator_ode_solve, $x, $detector
)
end
return suite
end
## Convolution
function jacbench_conv(detector)
# TODO: benchmark local sparsity tracers on LeNet-5 CNN
layer = Conv((5, 5), 3 => 2)
suite = BenchmarkGroup()
for N in (28, 128)
suite["N=$N"] = @benchmarkable jacobian_sparsity(
$layer, $(rand(N, N, 3, 1)), $detector
)
end
return suite
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 497 | using BenchmarkTools
using OptimizationProblems: ADNLPProblems
using SparseConnectivityTracerBenchmarks.Optimization:
compute_jac_sparsity_sct, compute_hess_sparsity_sct
function optbench(names::Vector{Symbol})
suite = BenchmarkGroup()
for name in names
nlp = ADNLPProblems.eval(name)()
suite[name]["Jacobian"] = @benchmarkable compute_jac_sparsity_sct($nlp)
suite[name]["Hessian"] = @benchmarkable compute_hess_sparsity_sct($nlp)
end
return suite
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 304 | # To run benchmarks locally, BenchmarkCI should be added to root project. Then call:
# ```bash
# julia benchmark/run_benchmarks.jl
# ```
using BenchmarkCI
on_CI = haskey(ENV, "GITHUB_ACTIONS")
BenchmarkCI.judge(; baseline="origin/main")
on_CI ? BenchmarkCI.postjudge() : BenchmarkCI.displayjudgement()
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 759 | module SparseConnectivityTracerBenchmarks
module ODE
include("brusselator.jl")
export Brusselator!, brusselator_2d_loop!
end
module Optimization
using ADTypes: ADTypes
using SparseConnectivityTracer
import SparseConnectivityTracer as SCT
using ADNLPModels: ADNLPModels
using NLPModels: NLPModels, AbstractNLPModel
using NLPModelsJuMP: NLPModelsJuMP
using OptimizationProblems: OptimizationProblems
using LinearAlgebra
using SparseArrays
include("nlpmodels.jl")
export optimization_problem_names
export compute_jac_sparsity_sct, compute_hess_sparsity_sct
export compute_jac_and_hess_sparsity_sct, compute_jac_and_hess_sparsity_and_value_jump
end
end # module SparseConnectivityTracerBenchmarks
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 1348 | # Brusselator example taken from Gowda et al.
# "Sparsity Programming: Automated Sparsity-Aware Optimizations in Differentiable Programming"
# https://openreview.net/pdf?id=rJlPdcY38B
#! format: off
brusselator_f(x, y, t) = ifelse((((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) && (t >= 1.1), 5., 0.)
limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a
function brusselator_2d_loop!(du, u, p, t)
A, B, alpha, xyd, dx, N = p; alpha = alpha/dx^2
@inbounds for I in CartesianIndices((N, N))
i, j = Tuple(I)
x, y = xyd[I[1]], xyd[I[2]]
ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N)
du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) +
B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t)
du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) +
A*u[i,j,1] - u[i,j,1]^2*u[i,j,2]
end
end
#! format: on
struct Brusselator!{P}
N::Int
params::P
end
Base.show(b!::Brusselator!) = "Brusselator(N=$(b!.N))"
function Brusselator!(N::Integer)
dims = (N, N, 2)
A = 1.0
B = 1.0
alpha = 1.0
xyd = fill(1.0, N)
dx = 1.0
params = (; A, B, alpha, xyd, dx, N)
return Brusselator!(N, params)
end
(b!::Brusselator!)(y, x) = brusselator_2d_loop!(y, x, b!.params, nothing)
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 3553 | #=
Given an optimization problem `min f(x) s.t. c(x) <= 0`, we study
- the Jacobian of the constraints `c(x)`
- the Hessian of the Lagrangian `L(x,y) = f(x) + yᵀc(x)`
Package ecosystem overview: https://jso.dev/ecosystems/models/
- NLPModels.jl: abstract interface `AbstractNLPModel` for nonlinear optimization problems (with utilities to query objective, constraints, and their derivatives). See API at https://jso.dev/NLPModels.jl/stable/api/
- ADNLPModels.jl: concrete `ADNLPModel <: AbstractNLPModel` created from pure Julia code with autodiff
- NLPModelsJuMP.jl: concrete `MathOptNLPModel <: AbstractNLPModel` converted from a `JuMP.Model`
- OptimizationProblems.jl: suite of benchmark problems available in two formulations:
- OptimizationProblems.ADNLPProblems: spits out `ADNLPModel`
- OptimizationProblems.PureJuMP: spits out `JuMP.Model`
=#
optimization_problem_names() = Symbol.(OptimizationProblems.meta[!, :name])
## SCT
#=
Here we use OptimizationProblems.ADNLPProblems because we need the problems in pure Julia.
https://jso.dev/OptimizationProblems.jl/stable/tutorial/#Problems-in-ADNLPModel-syntax:-ADNLPProblems
=#
function myconstraints(nlp::AbstractNLPModel, x::AbstractVector)
c = similar(x, nlp.meta.ncon)
NLPModels.cons!(nlp, x, c)
return c
end
function mylagrangian(nlp::AbstractNLPModel, x::AbstractVector)
f = NLPModels.obj(nlp, x)
c = myconstraints(nlp, x)
y = randn(length(c))
L = f + dot(y, c)
return L
end
function compute_jac_sparsity_sct(nlp::AbstractNLPModel)
c = Base.Fix1(myconstraints, nlp)
x0 = nlp.meta.x0
jac_sparsity = ADTypes.jacobian_sparsity(c, x0, TracerSparsityDetector())
return jac_sparsity
end
function compute_hess_sparsity_sct(nlp::AbstractNLPModel)
L = Base.Fix1(mylagrangian, nlp)
x0 = nlp.meta.x0
hess_sparsity = ADTypes.hessian_sparsity(L, x0, TracerSparsityDetector())
return hess_sparsity
end
function compute_jac_and_hess_sparsity_sct(name::Symbol)
nlp = OptimizationProblems.ADNLPProblems.eval(name)()
return compute_jac_sparsity_sct(nlp), compute_hess_sparsity_sct(nlp)
end
## Generic
function compute_jac_sparsity_and_value(nlp::AbstractNLPModel)
n, m = nlp.meta.nvar, nlp.meta.ncon
x0 = nlp.meta.x0
I, J = NLPModels.jac_structure(nlp)
V = NLPModels.jac_coord(nlp, x0)
jac_sparsity = sparse(I, J, ones(Bool, length(I)), m, n)
jac = sparse(I, J, V, m, n)
return jac_sparsity, jac
end
function compute_hess_sparsity_and_value(nlp::AbstractNLPModel)
n, m = nlp.meta.nvar, nlp.meta.ncon
x0 = nlp.meta.x0
yrand = rand(m)
I, J = NLPModels.hess_structure(nlp)
V = NLPModels.hess_coord(nlp, x0, yrand)
hess_sparsity = sparse(Symmetric(sparse(I, J, ones(Bool, length(I)), n, n), :L))
hess = sparse(Symmetric(sparse(I, J, V, n, n), :L))
return hess_sparsity, hess
end
## JuMP
#=
Here we use OptimizationProblems.PureJuMP because JuMP is the ground truth, but we translate with NLPModelsJuMP to easily query the stuff we need.
https://jso.dev/OptimizationProblems.jl/stable/tutorial/#Problems-in-JuMP-syntax:-PureJuMP
https://jso.dev/NLPModelsJuMP.jl/stable/tutorial/#MathOptNLPModel
=#
function compute_jac_and_hess_sparsity_and_value_jump(name::Symbol)
nlp_jump = OptimizationProblems.PureJuMP.eval(name)()
nlp = NLPModelsJuMP.MathOptNLPModel(nlp_jump)
jac_sparsity, jac = compute_jac_sparsity_and_value(nlp)
hess_sparsity, hess = compute_hess_sparsity_and_value(nlp)
return ((jac_sparsity, jac), (hess_sparsity, hess))
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 1136 | using SparseConnectivityTracer
using ADTypes
using Documenter
using DocumenterMermaid
# Create index.md from README
cp(joinpath(@__DIR__, "..", "README.md"), joinpath(@__DIR__, "src", "index.md"); force=true)
DocMeta.setdocmeta!(
SparseConnectivityTracer,
:DocTestSetup,
:(using SparseConnectivityTracer);
recursive=true,
)
makedocs(;
modules=[SparseConnectivityTracer, ADTypes],
authors="Adrian Hill <gh@adrianhill.de>",
sitename="SparseConnectivityTracer.jl",
format=Documenter.HTML(;
canonical = "https://adrhill.github.io/SparseConnectivityTracer.jl",
edit_link = "main",
assets = ["assets/favicon.ico"],
),
pages=[
"Getting Started" => "index.md",
"User Documentation" =>
["user/global_vs_local.md", "user/limitations.md", "user/api.md"],
"Developer Documentation" => [
"internals/how_it_works.md",
"internals/adding_overloads.md",
"internals/api.md",
],
],
warnonly=[:missing_docs],
)
deploydocs(; repo="github.com/adrhill/SparseConnectivityTracer.jl", devbranch="main")
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 5259 | # WARNING: If you are following the "Adding Overloads" guide's advice to copy an existing package extension,
# copy another, less complicated one!
module SparseConnectivityTracerDataInterpolationsExt
if isdefined(Base, :get_extension)
using SparseConnectivityTracer: AbstractTracer, Dual, primal, tracer
using SparseConnectivityTracer: GradientTracer, gradient_tracer_1_to_1
using SparseConnectivityTracer: HessianTracer, hessian_tracer_1_to_1
using FillArrays: Fill # from FillArrays.jl
import DataInterpolations:
LinearInterpolation,
QuadraticInterpolation,
LagrangeInterpolation,
AkimaInterpolation,
ConstantInterpolation,
QuadraticSpline,
CubicSpline,
BSplineInterpolation,
BSplineApprox,
CubicHermiteSpline,
# PCHIPInterpolation,
QuinticHermiteSpline
else
using ..SparseConnectivityTracer: AbstractTracer, Dual, primal, tracer
using ..SparseConnectivityTracer: GradientTracer, gradient_tracer_1_to_1
using ..SparseConnectivityTracer: HessianTracer, hessian_tracer_1_to_1
using ..FillArrays: Fill # from FillArrays.jl
import ..DataInterpolations:
LinearInterpolation,
QuadraticInterpolation,
LagrangeInterpolation,
AkimaInterpolation,
ConstantInterpolation,
QuadraticSpline,
CubicSpline,
BSplineInterpolation,
BSplineApprox,
CubicHermiteSpline,
# PCHIPInterpolation,
QuinticHermiteSpline
end
#========================#
# General interpolations #
#========================#
# We assume that with the exception of ConstantInterpolation and LinearInterpolation,
# all interpolations have a non-zero second derivative at some point in the input domain.
for I in (
:QuadraticInterpolation,
:LagrangeInterpolation,
:AkimaInterpolation,
:QuadraticSpline,
:CubicSpline,
:BSplineInterpolation,
:BSplineApprox,
:CubicHermiteSpline,
:QuinticHermiteSpline,
)
# 1D Interpolations (uType<:AbstractVector)
@eval function (interp::$(I){uType})(t::GradientTracer) where {uType<:AbstractVector}
return gradient_tracer_1_to_1(t, false)
end
@eval function (interp::$(I){uType})(t::HessianTracer) where {uType<:AbstractVector}
return hessian_tracer_1_to_1(t, false, false)
end
# ND Interpolations (uType<:AbstractMatrix)
@eval function (interp::$(I){uType})(t::GradientTracer) where {uType<:AbstractMatrix}
t = gradient_tracer_1_to_1(t, false)
nstates = size(interp.u, 1)
return Fill(t, nstates)
end
@eval function (interp::$(I){uType})(t::HessianTracer) where {uType<:AbstractMatrix}
t = hessian_tracer_1_to_1(t, false, false)
nstates = size(interp.u, 1)
return Fill(t, nstates)
end
end
# Some Interpolations require custom overloads on `Dual` due to mutation of caches.
for I in (
:LagrangeInterpolation,
:BSplineInterpolation,
:BSplineApprox,
:CubicHermiteSpline,
:QuinticHermiteSpline,
)
@eval function (interp::$(I){uType})(d::Dual) where {uType<:AbstractVector}
p = interp(primal(d))
t = interp(tracer(d))
return Dual(p, t)
end
@eval function (interp::$(I){uType})(d::Dual) where {uType<:AbstractMatrix}
p = interp(primal(d))
t = interp(tracer(d))
return Dual.(p, t)
end
end
#=======================#
# ConstantInterpolation #
#=======================#
# 1D Interpolations (uType<:AbstractVector)
function (interp::ConstantInterpolation{uType})(
t::GradientTracer
) where {uType<:AbstractVector}
return gradient_tracer_1_to_1(t, true)
end
function (interp::ConstantInterpolation{uType})(
t::HessianTracer
) where {uType<:AbstractVector}
return hessian_tracer_1_to_1(t, true, true)
end
# ND Interpolations (uType<:AbstractMatrix)
function (interp::ConstantInterpolation{uType})(
t::GradientTracer
) where {uType<:AbstractMatrix}
t = gradient_tracer_1_to_1(t, true)
nstates = size(interp.u, 1)
return Fill(t, nstates)
end
function (interp::ConstantInterpolation{uType})(
t::HessianTracer
) where {uType<:AbstractMatrix}
t = hessian_tracer_1_to_1(t, true, true)
nstates = size(interp.u, 1)
return Fill(t, nstates)
end
#=====================#
# LinearInterpolation #
#=====================#
# 1D Interpolations (uType<:AbstractVector)
function (interp::LinearInterpolation{uType})(
t::GradientTracer
) where {uType<:AbstractVector}
return gradient_tracer_1_to_1(t, false)
end
function (interp::LinearInterpolation{uType})(
t::HessianTracer
) where {uType<:AbstractVector}
return hessian_tracer_1_to_1(t, false, true)
end
# ND Interpolations (uType<:AbstractMatrix)
function (interp::LinearInterpolation{uType})(
t::GradientTracer
) where {uType<:AbstractMatrix}
t = gradient_tracer_1_to_1(t, false)
nstates = size(interp.u, 1)
return Fill(t, nstates)
end
function (interp::LinearInterpolation{uType})(
t::HessianTracer
) where {uType<:AbstractMatrix}
t = hessian_tracer_1_to_1(t, false, true)
nstates = size(interp.u, 1)
return Fill(t, nstates)
end
end # module SparseConnectivityTracerDataInterpolationsExt
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2426 | module SparseConnectivityTracerLogExpFunctionsExt
if isdefined(Base, :get_extension)
import SparseConnectivityTracer as SCT
using LogExpFunctions:
LogExpFunctions,
cexpexp,
cloglog,
log1mexp,
log1mlogistic,
log1pexp,
log1pmx,
log1psq,
log2mexp,
logabssinh,
logaddexp,
logcosh,
logexpm1,
logistic,
logit,
logit1mexp,
logitexp,
loglogistic,
logmxp1,
logsubexp,
xexpx,
xexpy,
xlog1py,
xlogx,
xlogy
else
import ..SparseConnectivityTracer as SCT
using ..LogExpFunctions:
LogExpFunctions,
cexpexp,
cloglog,
log1mexp,
log1mlogistic,
log1pexp,
log1pmx,
log1psq,
log2mexp,
logabssinh,
logaddexp,
logcosh,
logexpm1,
logistic,
logit,
logit1mexp,
logitexp,
loglogistic,
logmxp1,
logsubexp,
xexpx,
xexpy,
xlog1py,
xlogx,
xlogy
end
## 1-to-1 functions
ops_1_to_1 = (
xlogx,
xexpx,
logistic,
logit,
logcosh,
logabssinh,
log1psq,
log1pexp,
log1mexp,
log2mexp,
logexpm1,
log1pmx,
logmxp1,
cloglog,
cexpexp,
loglogistic,
logitexp,
log1mlogistic,
logit1mexp,
# softplus, # alias for log1pexp
# invsoftplus, # alias for logexpm1
)
for op in ops_1_to_1
T = typeof(op)
@eval SCT.is_der1_zero_global(::$T) = false
@eval SCT.is_der2_zero_global(::$T) = false
end
## 2-to-1 functions
ops_2_to_1 = (xlogy, xlog1py, xexpy, logaddexp, logsubexp)
for op in ops_2_to_1
T = typeof(op)
@eval SCT.is_der1_arg1_zero_global(::$T) = false
@eval SCT.is_der2_arg1_zero_global(::$T) = false
@eval SCT.is_der1_arg2_zero_global(::$T) = false
@eval SCT.is_der2_arg2_zero_global(::$T) = false
@eval SCT.is_der_cross_zero_global(::$T) = false
end
## Generate overloads
eval(SCT.generate_code_1_to_1(:LogExpFunctions, ops_1_to_1))
eval(SCT.generate_code_2_to_1(:LogExpFunctions, ops_2_to_1))
## List operators for later testing
SCT.test_operators_1_to_1(::Val{:LogExpFunctions}) = ops_1_to_1
SCT.test_operators_2_to_1(::Val{:LogExpFunctions}) = ops_2_to_1
SCT.test_operators_1_to_2(::Val{:LogExpFunctions}) = ()
end # module
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2918 | # NNlib activation functions on tracers.
# Parametric activation functions with two or more arguments are ignored.
module SparseConnectivityTracerNNlibExt
if isdefined(Base, :get_extension)
import SparseConnectivityTracer as SCT
using NNlib:
NNlib,
celu,
elu,
gelu,
hardswish,
hardtanh,
hardσ,
leakyrelu,
lisht,
logcosh,
logσ,
mish,
relu,
relu6,
selu,
sigmoid_fast,
softplus,
softshrink,
softsign,
swish,
tanh_fast,
tanhshrink,
trelu,
σ
else
import ..SparseConnectivityTracer as SCT
using ..NNlib:
NNlib,
celu,
elu,
gelu,
hardswish,
hardtanh,
hardσ,
leakyrelu,
lisht,
logcosh,
logσ,
mish,
relu,
relu6,
selu,
sigmoid_fast,
softplus,
softshrink,
softsign,
swish,
tanh_fast,
tanhshrink,
trelu,
σ
end
## 1-to-1
# ops_1_to_1_s:
# x -> f != 0
# ∂f/∂x != 0
# ∂²f/∂x² != 0
ops_1_to_1_s = (
# ReLU-like activation functions
celu,
elu,
gelu,
selu,
# Other activation functions
σ, # sigmoid
hardswish,
lisht,
logσ,
logcosh,
mish,
sigmoid_fast,
softplus,
softsign,
swish,
tanh_fast,
tanhshrink,
)
for op in ops_1_to_1_s
T = typeof(op)
@eval SCT.is_der1_zero_global(::$T) = false
@eval SCT.is_der2_zero_global(::$T) = false
end
SCT.is_der2_zero_local(::typeof(celu), x) = x > 0
SCT.is_der2_zero_local(::typeof(elu), x) = x > 0
SCT.is_der2_zero_local(::typeof(selu), x) = x > 0
SCT.is_der1_zero_local(::typeof(hardswish), x) = x < -3
SCT.is_der2_zero_local(::typeof(hardswish), x) = x < -3 || x > 3
# ops_1_to_1_f:
# x -> f != 0
# ∂f/∂x != 0
# ∂²f/∂x² == 0
ops_1_to_1_f = (
# ReLU-like activation functions
leakyrelu,
relu,
relu6,
trelu,
# Other activation functions
hardσ,
hardtanh,
softshrink,
)
for op in ops_1_to_1_f
T = typeof(op)
@eval SCT.is_der1_zero_global(::$T) = false
@eval SCT.is_der2_zero_global(::$T) = true
end
SCT.is_der1_zero_local(::typeof(relu), x) = x < 0
SCT.is_der1_zero_local(::typeof(relu6), x) = x < 0 || x > 6
SCT.is_der1_zero_local(::typeof(trelu), x) = x < 1
SCT.is_der1_zero_local(::typeof(hardσ), x) = x < -3 || x > 3
SCT.is_der1_zero_local(::typeof(hardtanh), x) = x < -1 || x > 1
SCT.is_der1_zero_local(::typeof(softshrink), x) = x > -0.5 && x < 0.5
ops_1_to_1 = union(ops_1_to_1_s, ops_1_to_1_f)
## Overload
eval(SCT.generate_code_1_to_1(:NNlib, ops_1_to_1))
## List operators for later testing
SCT.test_operators_1_to_1(::Val{:NNlib}) = ops_1_to_1
SCT.test_operators_2_to_1(::Val{:NNlib}) = ()
SCT.test_operators_1_to_2(::Val{:NNlib}) = ()
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2284 | module SparseConnectivityTracerNaNMathExt
if isdefined(Base, :get_extension)
import SparseConnectivityTracer as SCT
using NaNMath: NaNMath
else
import ..SparseConnectivityTracer as SCT
using ..NaNMath: NaNMath
end
## 1-to-1
# ops_1_to_1_s:
# x -> f != 0
# ∂f/∂x != 0
# ∂²f/∂x² != 0
ops_1_to_1_s = (
NaNMath.sqrt,
NaNMath.sin,
NaNMath.cos,
NaNMath.tan,
NaNMath.asin,
NaNMath.acos,
NaNMath.acosh,
NaNMath.atanh,
NaNMath.log,
NaNMath.log2,
NaNMath.log10,
NaNMath.log1p,
NaNMath.lgamma,
)
for op in ops_1_to_1_s
T = typeof(op)
@eval SCT.is_der1_zero_global(::$T) = false
@eval SCT.is_der2_zero_global(::$T) = false
end
ops_1_to_1 = ops_1_to_1_s
## 2-to-1
# ops_2_to_1_ssc:
# ∂f/∂x != 0
# ∂²f/∂x² != 0
# ∂f/∂y != 0
# ∂²f/∂y² != 0
# ∂²f/∂x∂y != 0
ops_2_to_1_ssc = (NaNMath.pow,)
for op in ops_2_to_1_ssc
T = typeof(op)
@eval SCT.is_der1_arg1_zero_global(::$T) = false
@eval SCT.is_der2_arg1_zero_global(::$T) = false
@eval SCT.is_der1_arg2_zero_global(::$T) = false
@eval SCT.is_der2_arg2_zero_global(::$T) = false
@eval SCT.is_der_cross_zero_global(::$T) = false
end
# ops_2_to_1_ffz:
# ∂f/∂x != 0
# ∂²f/∂x² == 0
# ∂f/∂y != 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y == 0
ops_2_to_1_ffz = (NaNMath.max, NaNMath.min)
for op in ops_2_to_1_ffz
T = typeof(op)
@eval SCT.is_der1_arg1_zero_global(::$T) = false
@eval SCT.is_der2_arg1_zero_global(::$T) = true
@eval SCT.is_der1_arg2_zero_global(::$T) = false
@eval SCT.is_der2_arg2_zero_global(::$T) = true
@eval SCT.is_der_cross_zero_global(::$T) = true
end
SCT.is_der1_arg1_zero_local(::typeof(NaNMath.max), x, y) = x < y
SCT.is_der1_arg2_zero_local(::typeof(NaNMath.max), x, y) = y < x
SCT.is_der1_arg1_zero_local(::typeof(NaNMath.min), x, y) = x > y
SCT.is_der1_arg2_zero_local(::typeof(NaNMath.min), x, y) = y > x
ops_2_to_1 = union(ops_2_to_1_ssc, ops_2_to_1_ffz)
## Overloads
eval(SCT.generate_code_1_to_1(:NaNMath, ops_1_to_1))
eval(SCT.generate_code_2_to_1(:NaNMath, ops_2_to_1))
## List operators for later testing
SCT.test_operators_1_to_1(::Val{:NaNMath}) = ops_1_to_1
SCT.test_operators_2_to_1(::Val{:NaNMath}) = ops_2_to_1
SCT.test_operators_1_to_2(::Val{:NaNMath}) = ()
end # module
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 4002 | module SparseConnectivityTracerSpecialFunctionsExt
if isdefined(Base, :get_extension)
import SparseConnectivityTracer as SCT
using SpecialFunctions:
SpecialFunctions,
airyai,
airyaiprime,
airyaiprimex,
airyaix,
airybi,
airybiprime,
airybiprimex,
airybix,
besseli,
besselix,
besselj,
besselj0,
besselj1,
besseljx,
besselk,
besselkx,
bessely,
bessely0,
bessely1,
besselyx,
beta,
cosint,
digamma,
ellipe,
ellipk,
erf,
erfc,
erfcinv,
erfcx,
erfinv,
expint,
expinti,
expintx,
gamma,
invdigamma,
jinc,
logbeta,
logerfc,
loggamma,
sinint,
sphericalbesselj,
sphericalbessely,
trigamma
else
import ..SparseConnectivityTracer as SCT
using ..SpecialFunctions:
SpecialFunctions,
airyai,
airyaiprime,
airyaiprimex,
airyaix,
airybi,
airybiprime,
airybiprimex,
airybix,
besseli,
besselix,
besselj,
besselj0,
besselj1,
besseljx,
besselk,
besselkx,
bessely,
bessely0,
bessely1,
besselyx,
beta,
cosint,
digamma,
ellipe,
ellipk,
erf,
erfc,
erfcinv,
erfcx,
erfinv,
expint,
expinti,
expintx,
gamma,
invdigamma,
jinc,
logbeta,
logerfc,
loggamma,
sinint,
sphericalbesselj,
sphericalbessely,
trigamma
end
#=
Complex functions are ignored.
Functions with more than 2 arguments are ignored.
TODO: add functions with integer arguments.
=#
## 1-to-1
# ops_1_to_1_s:
# x -> f != 0
# ∂f/∂x != 0
# ∂²f/∂x² != 0
ops_1_to_1_s = (
# Gamma Function
gamma,
loggamma,
digamma,
invdigamma,
trigamma,
# Exponential and Trigonometric Integrals
expinti,
sinint,
cosint,
# Error functions, Dawson's and Fresnel Integrals
erf,
erfc,
erfcinv,
erfcx,
logerfc,
erfinv,
# Airy and Related Functions
airyai,
airyaiprime,
airybi,
airybiprime,
airyaix,
airyaiprimex,
airybix,
airybiprimex,
# Bessel Functions
besselj0,
besselj1,
bessely0,
bessely1,
jinc,
# Elliptic Integrals
ellipk,
ellipe,
)
for op in ops_1_to_1_s
T = typeof(op)
@eval SCT.is_der1_zero_global(::$T) = false
@eval SCT.is_der2_zero_global(::$T) = false
end
ops_1_to_1 = ops_1_to_1_s
## 2-to-1
# ops_2_to_1_ssc:
# ∂f/∂x != 0
# ∂²f/∂x² != 0
# ∂f/∂y != 0
# ∂²f/∂y² != 0
# ∂²f/∂x∂y != 0
ops_2_to_1_ssc = (
# Gamma Function
gamma,
loggamma,
beta,
logbeta,
# Exponential and Trigonometric Integrals
expint,
expintx,
# Error functions, Dawson's and Fresnel Integrals
erf,
# Bessel Functions
besselj,
besseljx,
sphericalbesselj,
bessely,
besselyx,
sphericalbessely,
besseli,
besselix,
besselk,
besselkx,
)
for op in ops_2_to_1_ssc
T = typeof(op)
@eval SCT.is_der1_arg1_zero_global(::$T) = false
@eval SCT.is_der2_arg1_zero_global(::$T) = false
@eval SCT.is_der1_arg2_zero_global(::$T) = false
@eval SCT.is_der2_arg2_zero_global(::$T) = false
@eval SCT.is_der_cross_zero_global(::$T) = false
end
ops_2_to_1 = ops_2_to_1_ssc
## Overloads
eval(SCT.generate_code_1_to_1(:SpecialFunctions, ops_1_to_1))
eval(SCT.generate_code_2_to_1(:SpecialFunctions, ops_2_to_1))
## List operators for later testing
SCT.test_operators_1_to_1(::Val{:SpecialFunctions}) = ops_1_to_1
SCT.test_operators_2_to_1(::Val{:SpecialFunctions}) = ops_2_to_1
SCT.test_operators_1_to_2(::Val{:SpecialFunctions}) = ()
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 1937 | module SparseConnectivityTracer
using ADTypes: ADTypes, jacobian_sparsity, hessian_sparsity
using SparseArrays: SparseArrays
using SparseArrays: sparse
using Random: AbstractRNG, SamplerType
using LinearAlgebra: LinearAlgebra, Symmetric
using LinearAlgebra: Diagonal, diag, diagind
using FillArrays: Fill
using DocStringExtensions: DocStringExtensions, TYPEDEF, TYPEDFIELDS
if !isdefined(Base, :get_extension)
using Requires
end
include("settypes/duplicatevector.jl")
include("settypes/recursiveset.jl")
include("settypes/sortedvector.jl")
include("patterns.jl")
include("tracers.jl")
include("exceptions.jl")
include("operators.jl")
include("overloads/conversion.jl")
include("overloads/gradient_tracer.jl")
include("overloads/hessian_tracer.jl")
include("overloads/special_cases.jl")
include("overloads/ifelse_global.jl")
include("overloads/dual.jl")
include("overloads/arrays.jl")
include("overloads/utils.jl")
include("overloads/ambiguities.jl")
include("trace_functions.jl")
include("adtypes_interface.jl")
export TracerSparsityDetector
export TracerLocalSparsityDetector
# Reexport ADTypes interface
export jacobian_sparsity, hessian_sparsity
function __init__()
@static if !isdefined(Base, :get_extension)
@require LogExpFunctions = "2ab3a3ac-af41-5b50-aa03-7779005ae688" include(
"../ext/SparseConnectivityTracerLogExpFunctionsExt.jl"
)
@require NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" include(
"../ext/SparseConnectivityTracerNaNMathExt.jl"
)
@require NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" include(
"../ext/SparseConnectivityTracerNNlibExt.jl"
)
@require SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" include(
"../ext/SparseConnectivityTracerSpecialFunctionsExt.jl"
)
# NOTE: DataInterpolations extension is not loaded on Julia <1.10
end
end
end # module
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 5547 | #= This file implements the ADTypes interface for `AbstractSparsityDetector`s =#
const DEFAULT_GRADIENT_PATTERN = IndexSetGradientPattern{Int,BitSet}
const DEFAULT_GRADIENT_TRACER = GradientTracer{DEFAULT_GRADIENT_PATTERN}
const DEFAULT_HESSIAN_PATTERN = DictHessianPattern{Int,BitSet,Dict{Int,BitSet},NotShared}
const DEFAULT_HESSIAN_TRACER = HessianTracer{DEFAULT_HESSIAN_PATTERN}
"""
TracerSparsityDetector <: ADTypes.AbstractSparsityDetector
Singleton struct for integration with the sparsity detection framework of [ADTypes.jl](https://github.com/SciML/ADTypes.jl).
Computes global sparsity patterns over the entire input domain.
For local sparsity patterns at a specific input point, use [`TracerLocalSparsityDetector`](@ref).
# Example
```jldoctest
julia> using SparseConnectivityTracer
julia> detector = TracerSparsityDetector()
TracerSparsityDetector()
julia> jacobian_sparsity(diff, rand(4), detector)
3×4 SparseArrays.SparseMatrixCSC{Bool, Int64} with 6 stored entries:
1 1 ⋅ ⋅
⋅ 1 1 ⋅
⋅ ⋅ 1 1
julia> f(x) = x[1] + x[2]*x[3] + 1/x[4];
julia> hessian_sparsity(f, rand(4), detector)
4×4 SparseArrays.SparseMatrixCSC{Bool, Int64} with 3 stored entries:
⋅ ⋅ ⋅ ⋅
⋅ ⋅ 1 ⋅
⋅ 1 ⋅ ⋅
⋅ ⋅ ⋅ 1
```
"""
struct TracerSparsityDetector{TG<:GradientTracer,TH<:HessianTracer} <:
ADTypes.AbstractSparsityDetector end
function TracerSparsityDetector(
::Type{TG}, ::Type{TH}
) where {TG<:GradientTracer,TH<:HessianTracer}
return TracerSparsityDetector{TG,TH}()
end
function TracerSparsityDetector(;
gradient_tracer_type::Type{TG}=DEFAULT_GRADIENT_TRACER,
hessian_tracer_type::Type{TH}=DEFAULT_HESSIAN_TRACER,
) where {TG<:GradientTracer,TH<:HessianTracer}
return TracerSparsityDetector(gradient_tracer_type, hessian_tracer_type)
end
function ADTypes.jacobian_sparsity(f, x, ::TracerSparsityDetector{TG,TH}) where {TG,TH}
return _jacobian_sparsity(f, x, TG)
end
function ADTypes.jacobian_sparsity(f!, y, x, ::TracerSparsityDetector{TG,TH}) where {TG,TH}
return _jacobian_sparsity(f!, y, x, TG)
end
function ADTypes.hessian_sparsity(f, x, ::TracerSparsityDetector{TG,TH}) where {TG,TH}
return _hessian_sparsity(f, x, TH)
end
"""
TracerLocalSparsityDetector <: ADTypes.AbstractSparsityDetector
Singleton struct for integration with the sparsity detection framework of [ADTypes.jl](https://github.com/SciML/ADTypes.jl).
Computes local sparsity patterns at an input point `x`.
For global sparsity patterns, use [`TracerSparsityDetector`](@ref).
# Example
Local sparsity patterns are less convervative than global patterns and need to be recomputed for each input `x`:
```jldoctest
julia> using SparseConnectivityTracer
julia> detector = TracerLocalSparsityDetector()
TracerLocalSparsityDetector()
julia> f(x) = x[1] * x[2]; # J_f = [x[2], x[1]]
julia> jacobian_sparsity(f, [1, 0], detector)
1×2 SparseArrays.SparseMatrixCSC{Bool, Int64} with 1 stored entry:
⋅ 1
julia> jacobian_sparsity(f, [0, 1], detector)
1×2 SparseArrays.SparseMatrixCSC{Bool, Int64} with 1 stored entry:
1 ⋅
julia> jacobian_sparsity(f, [0, 0], detector)
1×2 SparseArrays.SparseMatrixCSC{Bool, Int64} with 0 stored entries:
⋅ ⋅
julia> jacobian_sparsity(f, [1, 1], detector)
1×2 SparseArrays.SparseMatrixCSC{Bool, Int64} with 2 stored entries:
1 1
```
`TracerLocalSparsityDetector` can compute sparsity patterns of functions that contain comparisons and `ifelse` statements:
```jldoctest
julia> f(x) = x[1] > x[2] ? x[1:3] : x[2:4];
julia> jacobian_sparsity(f, [1, 2, 3, 4], TracerLocalSparsityDetector())
3×4 SparseArrays.SparseMatrixCSC{Bool, Int64} with 3 stored entries:
⋅ 1 ⋅ ⋅
⋅ ⋅ 1 ⋅
⋅ ⋅ ⋅ 1
julia> jacobian_sparsity(f, [2, 1, 3, 4], TracerLocalSparsityDetector())
3×4 SparseArrays.SparseMatrixCSC{Bool, Int64} with 3 stored entries:
1 ⋅ ⋅ ⋅
⋅ 1 ⋅ ⋅
⋅ ⋅ 1 ⋅
```
```jldoctest
julia> f(x) = x[1] + max(x[2], x[3]) * x[3] + 1/x[4];
julia> hessian_sparsity(f, [1.0, 2.0, 3.0, 4.0], TracerLocalSparsityDetector())
4×4 SparseArrays.SparseMatrixCSC{Bool, Int64} with 2 stored entries:
⋅ ⋅ ⋅ ⋅
⋅ ⋅ ⋅ ⋅
⋅ ⋅ 1 ⋅
⋅ ⋅ ⋅ 1
```
"""
struct TracerLocalSparsityDetector{TG<:GradientTracer,TH<:HessianTracer} <:
ADTypes.AbstractSparsityDetector end
function TracerLocalSparsityDetector(
::Type{TG}, ::Type{TH}
) where {TG<:GradientTracer,TH<:HessianTracer}
return TracerLocalSparsityDetector{TG,TH}()
end
function TracerLocalSparsityDetector(;
gradient_tracer_type::Type{TG}=DEFAULT_GRADIENT_TRACER,
hessian_tracer_type::Type{TH}=DEFAULT_HESSIAN_TRACER,
) where {TG<:GradientTracer,TH<:HessianTracer}
return TracerLocalSparsityDetector(gradient_tracer_type, hessian_tracer_type)
end
function ADTypes.jacobian_sparsity(f, x, ::TracerLocalSparsityDetector{TG,TH}) where {TG,TH}
return _local_jacobian_sparsity(f, x, TG)
end
function ADTypes.jacobian_sparsity(
f!, y, x, ::TracerLocalSparsityDetector{TG,TH}
) where {TG,TH}
return _local_jacobian_sparsity(f!, y, x, TG)
end
function ADTypes.hessian_sparsity(f, x, ::TracerLocalSparsityDetector{TG,TH}) where {TG,TH}
return _local_hessian_sparsity(f, x, TH)
end
## Pretty printing
for detector in (:TracerSparsityDetector, :TracerLocalSparsityDetector)
@eval function Base.show(io::IO, d::$detector{TG,TH}) where {TG,TH}
if TG == DEFAULT_GRADIENT_TRACER && TH == DEFAULT_HESSIAN_TRACER
print(io, $detector, "()")
else
print(io, $detector, "{", TG, ",", TH, "}()")
end
return nothing
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 368 | struct MissingPrimalError <: Exception
fn::Function
tracer::AbstractTracer
end
function Base.showerror(io::IO, e::MissingPrimalError)
println(io, "Function ", e.fn, " requires primal value(s).")
print(
io,
"A dual-number tracer for local sparsity detection can be used via `TracerLocalSparsityDetector`.",
)
return nothing
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 13818 | ## Operator definitions
# We use a system of letters to categorize operators:
# z: first- and second-order derivatives (FOD, SOD) are zero
# f: FOD ∂f/∂x is non-zero, SOD ∂²f/∂x² is zero
# s: FOD ∂f/∂x is non-zero, SOD ∂²f/∂x² is non-zero
# c: Cross-derivative ∂²f/∂x∂y is non-zero
#! format: off
##=================================#
# Operators for functions f: ℝ → ℝ #
#==================================#
function is_der1_zero_global end
function is_der2_zero_global end
# Fallbacks for local derivatives:
is_der1_zero_local(f::F, x) where {F} = is_der1_zero_global(f)
is_der2_zero_local(f::F, x) where {F} = is_der2_zero_global(f)
# ops_1_to_1_s:
# x -> f != 0
# ∂f/∂x != 0
# ∂²f/∂x² != 0
ops_1_to_1_s = (
# trigonometric functions
cos, cosd, cosh, cospi, cosc,
sin, sind, sinh, sinpi, sinc,
tan, tand, tanh,
# reciprocal trigonometric functions
csc, cscd, csch,
sec, secd, sech,
cot, cotd, coth,
# inverse trigonometric functions
acos, acosd, acosh,
asin, asind, asinh,
atan, atand, atanh,
asec, asech,
acsc, acsch,
acot, acoth,
# exponentials
exp, exp2, exp10, expm1,
log, log2, log10, log1p,
# roots
sqrt, cbrt,
# absolute values
abs2,
# other
inv,
)
for op in ops_1_to_1_s
T = typeof(op)
@eval is_der1_zero_global(::$T) = false
@eval is_der2_zero_global(::$T) = false
end
# ops_1_to_1_f:
# x -> f != 0
# ∂f/∂x != 0
# ∂²f/∂x² == 0
ops_1_to_1_f = (
+, -,
identity,
abs, hypot,
# angles
deg2rad, rad2deg, mod2pi,
# floats
float, prevfloat, nextfloat,
big, widen,
# linalg
transpose, adjoint,
# complex
conj, real,
)
for op in ops_1_to_1_f
T = typeof(op)
@eval is_der1_zero_global(::$T) = false
@eval is_der2_zero_global(::$T) = true
end
# ops_1_to_1_z:
# x -> f != 0
# ∂f/∂x == 0
# ∂²f/∂x² == 0
ops_1_to_1_z = (
round, floor, ceil, trunc,
sign, !
)
for op in ops_1_to_1_z
T = typeof(op)
@eval is_der1_zero_global(::$T) = true
@eval is_der2_zero_global(::$T) = true
end
# ops_1_to_1_i:
# x -> f == 0
# ∂f/∂x == 0
# ∂²f/∂x² == 0
ops_1_to_1_i = (
zero, one, oneunit,
typemin, typemax, eps,
floatmin, floatmax, maxintfloat,
)
for op in ops_1_to_1_i
T = typeof(op)
@eval is_der1_zero_global(::$T) = true
@eval is_der2_zero_global(::$T) = true
end
ops_1_to_1 = union(
ops_1_to_1_s,
ops_1_to_1_f,
ops_1_to_1_z,
ops_1_to_1_i,
)
##==================================#
# Operators for functions f: ℝ² → ℝ #
#===================================#
function is_der1_arg1_zero_global end
function is_der2_arg1_zero_global end
function is_der1_arg2_zero_global end
function is_der2_arg2_zero_global end
function is_der_cross_zero_global end
# Fallbacks for local derivatives:
is_der1_arg1_zero_local(f::F, x, y) where {F} = is_der1_arg1_zero_global(f)
is_der2_arg1_zero_local(f::F, x, y) where {F} = is_der2_arg1_zero_global(f)
is_der1_arg2_zero_local(f::F, x, y) where {F} = is_der1_arg2_zero_global(f)
is_der2_arg2_zero_local(f::F, x, y) where {F} = is_der2_arg2_zero_global(f)
is_der_cross_zero_local(f::F, x, y) where {F} = is_der_cross_zero_global(f)
# ops_2_to_1_ssc:
# ∂f/∂x != 0
# ∂²f/∂x² != 0
# ∂f/∂y != 0
# ∂²f/∂y² != 0
# ∂²f/∂x∂y != 0
ops_2_to_1_ssc = (
^, hypot, atan, log
)
for op in ops_2_to_1_ssc
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = false
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = false
@eval is_der_cross_zero_global(::$T) = false
end
# ops_2_to_1_ssz:
# ∂f/∂x != 0
# ∂²f/∂x² != 0
# ∂f/∂y != 0
# ∂²f/∂y² != 0
# ∂²f/∂x∂y == 0
ops_2_to_1_ssz = ()
#=
for op in ops_2_to_1_ssz
T = typeof(op)
@eval is_der2_arg1_zero_global(::$T) = false
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = false
@eval is_der_cross_zero_global(::$T) = true
end
=#
# ops_2_to_1_sfc:
# ∂f/∂x != 0
# ∂²f/∂x² != 0
# ∂f/∂y != 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y != 0
ops_2_to_1_sfc = ()
#=
for op in ops_2_to_1_sfc
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = false
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = true
@eval is_der_cross_zero_global(::$T) = false
end
=#
# ops_2_to_1_sfz:
# ∂f/∂x != 0
# ∂²f/∂x² != 0
# ∂f/∂y != 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y == 0
ops_2_to_1_sfz = ()
#=
for op in ops_2_to_1_sfz
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = false
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = true
@eval is_der_cross_zero_global(::$T) = true
end
=#
# ops_2_to_1_fsc:
# ∂f/∂x != 0
# ∂²f/∂x² == 0
# ∂f/∂y != 0
# ∂²f/∂y² != 0
# ∂²f/∂x∂y != 0
ops_2_to_1_fsc = (
/,
# ldexp, # TODO: removed for now
)
for op in ops_2_to_1_fsc
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = true
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = false
@eval is_der_cross_zero_global(::$T) = false
end
# gradient of x/y: [1/y -x/y²]
is_der1_arg2_zero_local(::typeof(/), x, y) = iszero(x)
# ops_2_to_1_fsz:
# ∂f/∂x != 0
# ∂²f/∂x² == 0
# ∂f/∂y != 0
# ∂²f/∂y² != 0
# ∂²f/∂x∂y == 0
ops_2_to_1_fsz = ()
#=
for op in ops_2_to_1_fsz
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = true
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = false
@eval is_der_cross_zero_global(::$T) = true
end
=#
# ops_2_to_1_ffc:
# ∂f/∂x != 0
# ∂²f/∂x² == 0
# ∂f/∂y != 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y != 0
ops_2_to_1_ffc = (
*,
)
for op in ops_2_to_1_ffc
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = true
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = true
@eval is_der_cross_zero_global(::$T) = false
end
# gradient of x*y: [y x]
is_der1_arg1_zero_local(::typeof(*), x, y) = iszero(y)
is_der1_arg2_zero_local(::typeof(*), x, y) = iszero(x)
# ops_2_to_1_ffz:
# ∂f/∂x != 0
# ∂²f/∂x² == 0
# ∂f/∂y != 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y == 0
ops_2_to_1_ffz = (
+, -,
mod, rem,
min, max,
)
for op in ops_2_to_1_ffz
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = true
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = true
@eval is_der_cross_zero_global(::$T) = true
end
is_der1_arg2_zero_local(::typeof(mod), x, y) = ifelse(y > zero(y), y > x, x > y)
is_der1_arg1_zero_local(::typeof(max), x, y) = x < y
is_der1_arg2_zero_local(::typeof(max), x, y) = y < x
is_der1_arg1_zero_local(::typeof(min), x, y) = x > y
is_der1_arg2_zero_local(::typeof(min), x, y) = y > x
# ops_2_to_1_szz:
# ∂f/∂x != 0
# ∂²f/∂x² != 0
# ∂f/∂y == 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y == 0
ops_2_to_1_szz = ()
#=
for op in ops_2_to_1_szz
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = false
@eval is_der1_arg2_zero_global(::$T) = true
@eval is_der2_arg2_zero_global(::$T) = true
@eval is_der_cross_zero_global(::$T) = true
end
=#
# ops_2_to_1_zsz:
# ∂f/∂x == 0
# ∂²f/∂x² == 0
# ∂f/∂y != 0
# ∂²f/∂y² != 0
# ∂²f/∂x∂y == 0
ops_2_to_1_zsz = ()
#=
for op in ops_2_to_1_zsz
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = true
@eval is_der2_arg1_zero_global(::$T) = true
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = false
@eval is_der_cross_zero_global(::$T) = true
end
=#
# ops_2_to_1_fzz:
# ∂f/∂x != 0
# ∂²f/∂x² == 0
# ∂f/∂y == 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y == 0
ops_2_to_1_fzz = (
copysign, flipsign,
)
for op in ops_2_to_1_fzz
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = false
@eval is_der2_arg1_zero_global(::$T) = true
@eval is_der1_arg2_zero_global(::$T) = true
@eval is_der2_arg2_zero_global(::$T) = true
@eval is_der_cross_zero_global(::$T) = true
end
# ops_2_to_1_zfz:
# ∂f/∂x == 0
# ∂²f/∂x² == 0
# ∂f/∂y != 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y == 0
ops_2_to_1_zfz = ()
#=
for op in ops_2_to_1_zfz
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = true
@eval is_der2_arg1_zero_global(::$T) = true
@eval is_der1_arg2_zero_global(::$T) = false
@eval is_der2_arg2_zero_global(::$T) = true
@eval is_der_cross_zero_global(::$T) = true
end
=#
# ops_2_to_1_zfz:
# ∂f/∂x == 0
# ∂²f/∂x² == 0
# ∂f/∂y == 0
# ∂²f/∂y² == 0
# ∂²f/∂x∂y == 0
ops_2_to_1_zzz = (
# division
div, fld, fld1, cld,
# comparisons
isequal, isapprox, isless, ==, <, >, <=, >=,
)
for op in ops_2_to_1_zzz
T = typeof(op)
@eval is_der1_arg1_zero_global(::$T) = true
@eval is_der2_arg1_zero_global(::$T) = true
@eval is_der1_arg2_zero_global(::$T) = true
@eval is_der2_arg2_zero_global(::$T) = true
@eval is_der_cross_zero_global(::$T) = true
end
ops_2_to_1 = union(
# Including second-order only
ops_2_to_1_ssc,
ops_2_to_1_ssz,
# Including second- and first-order
ops_2_to_1_sfc,
ops_2_to_1_sfz,
ops_2_to_1_fsc,
ops_2_to_1_fsz,
# Including first-order only
ops_2_to_1_ffc,
ops_2_to_1_ffz,
# Including zero-order
ops_2_to_1_szz,
ops_2_to_1_zsz,
ops_2_to_1_fzz,
ops_2_to_1_zfz,
ops_2_to_1_zzz,
)
##==================================#
# Operators for functions f: ℝ → ℝ² #
#===================================#
function is_der1_out1_zero_global end
function is_der2_out1_zero_global end
function is_der1_out2_zero_global end
function is_der2_out2_zero_global end
# Fallbacks for local derivatives:
is_der1_out1_zero_local(f::F, x) where {F} = is_der1_out1_zero_global(f)
is_der2_out1_zero_local(f::F, x) where {F} = is_der2_out1_zero_global(f)
is_der1_out2_zero_local(f::F, x) where {F} = is_der1_out2_zero_global(f)
is_der2_out2_zero_local(f::F, x) where {F} = is_der2_out2_zero_global(f)
# ops_1_to_2_ss:
# ∂f₁/∂x != 0
# ∂²f₁/∂x² != 0
# ∂f₂/∂x != 0
# ∂²f₂/∂x² != 0
ops_1_to_2_ss = (
sincos,
sincosd,
sincospi,
)
for op in ops_1_to_2_ss
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = false
@eval is_der2_out1_zero_global(::$T) = false
@eval is_der1_out2_zero_global(::$T) = false
@eval is_der2_out2_zero_global(::$T) = false
end
# ops_1_to_2_sf:
# ∂f₁/∂x != 0
# ∂²f₁/∂x² != 0
# ∂f₂/∂x != 0
# ∂²f₂/∂x² == 0
ops_1_to_2_sf = ()
#=
for op in ops_1_to_2_sf
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = false
@eval is_der2_out1_zero_global(::$T) = false
@eval is_der1_out2_zero_global(::$T) = false
@eval is_der2_out2_zero_global(::$T) = true
end
=#
# ops_1_to_2_sz:
# ∂f₁/∂x != 0
# ∂²f₁/∂x² != 0
# ∂f₂/∂x == 0
# ∂²f₂/∂x² == 0
ops_1_to_2_sz = ()
#=
for op in ops_1_to_2_sz
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = false
@eval is_der2_out1_zero_global(::$T) = false
@eval is_der1_out2_zero_global(::$T) = true
@eval is_der2_out2_zero_global(::$T) = true
end
=#
# ops_1_to_2_fs:
# ∂f₁/∂x != 0
# ∂²f₁/∂x² == 0
# ∂f₂/∂x != 0
# ∂²f₂/∂x² != 0
ops_1_to_2_fs = ()
#=
for op in ops_1_to_2_fs
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = false
@eval is_der2_out1_zero_global(::$T) = true
@eval is_der1_out2_zero_global(::$T) = false
@eval is_der2_out2_zero_global(::$T) = false
end
=#
# ops_1_to_2_ff:
# ∂f₁/∂x != 0
# ∂²f₁/∂x² == 0
# ∂f₂/∂x != 0
# ∂²f₂/∂x² == 0
ops_1_to_2_ff = ()
#=
for op in ops_1_to_2_ff
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = false
@eval is_der2_out1_zero_global(::$T) = true
@eval is_der1_out2_zero_global(::$T) = false
@eval is_der2_out2_zero_global(::$T) = true
end
=#
# ops_1_to_2_fz:
# ∂f₁/∂x != 0
# ∂²f₁/∂x² == 0
# ∂f₂/∂x == 0
# ∂²f₂/∂x² == 0
ops_1_to_2_fz = (
# frexp, # TODO: removed for now
)
#=
for op in ops_1_to_2_fz
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = false
@eval is_der2_out1_zero_global(::$T) = true
@eval is_der1_out2_zero_global(::$T) = true
@eval is_der2_out2_zero_global(::$T) = true
end
=#
# ops_1_to_2_zs:
# ∂f₁/∂x == 0
# ∂²f₁/∂x² == 0
# ∂f₂/∂x != 0
# ∂²f₂/∂x² != 0
ops_1_to_2_zs = ()
#=
for op in ops_1_to_2_zs
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = true
@eval is_der2_out1_zero_global(::$T) = true
@eval is_der1_out2_zero_global(::$T) = false
@eval is_der2_out2_zero_global(::$T) = false
end
=#
# ops_1_to_2_zf:
# ∂f₁/∂x == 0
# ∂²f₁/∂x² == 0
# ∂f₂/∂x != 0
# ∂²f₂/∂x² == 0
ops_1_to_2_zf = ()
#=
for op in ops_1_to_2_zf
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = true
@eval is_der2_out1_zero_global(::$T) = true
@eval is_der1_out2_zero_global(::$T) = false
@eval is_der2_out2_zero_global(::$T) = true
end
=#
# ops_1_to_2_zz:
# ∂f₁/∂x == 0
# ∂²f₁/∂x² == 0
# ∂f₂/∂x == 0
# ∂²f₂/∂x² == 0
ops_1_to_2_zz = ()
#=
for op in ops_1_to_2_zz
T = typeof(op)
@eval is_der1_out1_zero_global(::$T) = true
@eval is_der2_out1_zero_global(::$T) = true
@eval is_der1_out2_zero_global(::$T) = true
@eval is_der2_out2_zero_global(::$T) = true
end
=#
ops_1_to_2 = union(
ops_1_to_2_ss,
ops_1_to_2_sf,
ops_1_to_2_fs,
ops_1_to_2_ff,
ops_1_to_2_sz,
ops_1_to_2_zs,
ops_1_to_2_fz,
ops_1_to_2_zf,
ops_1_to_2_zz,
)
#! format: on
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 8409 | """
AbstractPattern
Abstract supertype of all sparsity pattern representations.
## Type hierarchy
```
AbstractPattern
├── AbstractGradientPattern: used in GradientTracer
│ └── IndexSetGradientPattern
└── AbstractHessianPattern: used in HessianTracer
└── IndexSetHessianPattern
```
"""
abstract type AbstractPattern end
"""
shared(pattern)
Indicates whether patterns **always** share memory and whether operators are **allowed** to mutate their `AbstractTracer` arguments.
Returns either the `Shared()` or `NotShared()` trait.
If `NotShared()`, patterns **can** share memory and operators are **prohibited** from mutating `AbstractTracer` arguments.
## Note
In practice, memory sharing is limited to second-order information in `AbstractHessianPattern`.
"""
shared(::P) where {P<:AbstractPattern} = shared(P)
shared(::Type{P}) where {P<:AbstractPattern} = NotShared()
abstract type SharingBehavior end
struct Shared <: SharingBehavior end
struct NotShared <: SharingBehavior end
Base.Bool(::Shared) = true
Base.Bool(::NotShared) = false
"""
myempty(T)
myempty(tracer::AbstractTracer)
myempty(pattern::AbstractPattern)
Constructor for an empty tracer or pattern of type `T` representing a new number (usually an empty pattern).
"""
myempty
"""
create_patterns(P, xs, is)
Convenience constructor for patterns of type `P` for multiple inputs `xs` and their indices `is`.
"""
create_patterns
"""
gradient(pattern::AbstractTracer)
Return a representation of non-zero values ``∇f(x)_{i} ≠ 0`` in the gradient.
"""
gradient
"""
hessian(pattern::HessianTracer)
Return a representation of non-zero values ``∇²f(x)_{ij} ≠ 0`` in the Hessian.
"""
hessian
#===========#
# Utilities #
#===========#
myempty(::S) where {S<:AbstractSet} = S()
myempty(::Type{S}) where {S<:AbstractSet} = S()
myempty(::D) where {D<:AbstractDict} = D()
myempty(::Type{D}) where {D<:AbstractDict} = D()
seed(::Type{S}, i::Integer) where {S<:AbstractSet} = S(i)
myunion!(a::S, b::S) where {S<:AbstractSet} = union!(a, b)
function myunion!(a::D, b::D) where {I<:Integer,S<:AbstractSet{I},D<:AbstractDict{I,S}}
for k in keys(b)
if haskey(a, k)
union!(a[k], b[k])
else
push!(a, k => b[k])
end
end
return a
end
# convert to set of index tuples
tuple_set(s::AbstractSet{Tuple{I,I}}) where {I<:Integer} = s
function tuple_set(d::AbstractDict{I,S}) where {I<:Integer,S<:AbstractSet{I}}
return Set((k, v) for k in keys(d) for v in d[k])
end
""""
product(a::S{T}, b::S{T})::S{Tuple{T,T}}
Inner product of set-like inputs `a` and `b`.
"""
function product(a::AbstractSet{I}, b::AbstractSet{I}) where {I<:Integer}
# Since the Hessian is symmetric, we only have to keep track of index-tuples (i,j) with i≤j.
return Set((i, j) for i in a, j in b if i <= j)
end
function union_product!(
hessian::H, gradient_x::G, gradient_y::G
) where {I<:Integer,G<:AbstractSet{I},H<:AbstractSet{Tuple{I,I}}}
for i in gradient_x
for j in gradient_y
if i <= j # symmetric Hessian
push!(hessian, (i, j))
end
end
end
return hessian
end
# Some custom set types don't support `push!`
for S in (:DuplicateVector, :SortedVector, :RecursiveSet)
@eval function union_product!(
hessian::$S{Tuple{I,I}}, gradient_x::$S{I}, gradient_y::$S{I}
) where {I<:Integer}
hxy = product(gradient_x, gradient_y)
return union!(hessian, hxy)
end
end
function union_product!(
hessian::AbstractDict{I,S}, gradient_x::S, gradient_y::S
) where {I<:Integer,S<:AbstractSet{I}}
for i in gradient_x
if !haskey(hessian, i)
push!(hessian, i => S())
end
for j in gradient_y
if i <= j # symmetric Hessian
push!(hessian[i], j)
end
end
end
return hessian
end
#=========================#
# AbstractGradientPattern #
#=========================#
# For use with GradientTracer.
"""
AbstractGradientPattern <: AbstractPattern
Abstract supertype of sparsity patterns representing a vector.
For use with [`GradientTracer`](@ref).
## Expected interface
* [`myempty`](@ref)
* [`create_patterns`](@ref)
* [`gradient`](@ref)
* [`shared`](@ref)
"""
abstract type AbstractGradientPattern <: AbstractPattern end
"""
$(TYPEDEF)
Gradient sparsity pattern represented by a set.
## Fields
$(TYPEDFIELDS)
"""
struct IndexSetGradientPattern{I<:Integer,S<:AbstractSet{I}} <: AbstractGradientPattern
"Set of indices ``i`` of non-zero values ``∇f(x)_i ≠ 0`` in the gradient."
gradient::S
end
function myempty(::Type{IndexSetGradientPattern{I,S}}) where {I,S}
return IndexSetGradientPattern{I,S}(myempty(S))
end
function create_patterns(::Type{P}, xs, is) where {I,S,P<:IndexSetGradientPattern{I,S}}
sets = seed.(Ref(S), is)
return P.(sets)
end
# Tracer compatibility
gradient(s::IndexSetGradientPattern) = s.gradient
#========================#
# AbstractHessianPattern #
#========================#
# For use with HessianTracer.
"""
AbstractHessianPattern <: AbstractPattern
Abstract supertype of sparsity patterns representing both gradient and Hessian sparsity.
For use with [`HessianTracer`](@ref).
## Expected interface
* [`myempty`](@ref)
* [`create_patterns`](@ref)
* [`gradient`](@ref)
* [`hessian`](@ref)
* [`shared`](@ref)
"""
abstract type AbstractHessianPattern <: AbstractPattern end
"""
$(TYPEDEF)
Hessian sparsity pattern represented by two sets.
## Fields
$(TYPEDFIELDS)
## Internals
The last type parameter `shared` is a `Bool` indicating whether the `hessian` field of this object should be shared among all intermediate scalar quantities involved in a function.
"""
struct IndexSetHessianPattern{
I<:Integer,G<:AbstractSet{I},H<:AbstractSet{Tuple{I,I}},SB<:SharingBehavior
} <: AbstractHessianPattern
"Set of indices ``i`` of non-zero values ``∇f(x)_i ≠ 0`` in the gradient."
gradient::G
"Set of index-tuples ``(i, j)`` of non-zero values ``∇²f(x)_{ij} ≠ 0`` in the Hessian."
hessian::H
end
shared(::Type{IndexSetHessianPattern{I,G,H,Shared}}) where {I,G,H} = Shared()
shared(::Type{IndexSetHessianPattern{I,G,H,NotShared}}) where {I,G,H} = NotShared()
function myempty(::Type{IndexSetHessianPattern{I,G,H,SB}}) where {I,G,H,SB}
return IndexSetHessianPattern{I,G,H,SB}(myempty(G), myempty(H))
end
function create_patterns(
::Type{P}, xs, is
) where {I,G,H,S,P<:IndexSetHessianPattern{I,G,H,S}}
gradients = seed.(G, is)
hessian = myempty(H)
# Even if `NotShared`, sharing a single reference to `hessian` is allowed upon initialization,
# since mutation is prohibited when `isshared` is false.
return P.(gradients, Ref(hessian))
end
# Tracer compatibility
gradient(s::IndexSetHessianPattern) = s.gradient
hessian(s::IndexSetHessianPattern) = s.hessian
"""
$(TYPEDEF)
Hessian sparsity pattern represented by a set and a dictionary.
## Fields
$(TYPEDFIELDS)
## Internals
The last type parameter `shared` is a `Bool` indicating whether the `hessian` field of this object should be shared among all intermediate scalar quantities involved in a function.
"""
struct DictHessianPattern{
I<:Integer,S<:AbstractSet{I},D<:AbstractDict{I,S},shared<:SharingBehavior
} <: AbstractHessianPattern
"Set of indices ``i`` of non-zero values ``∇f(x)_i ≠ 0`` in the gradient."
gradient::S
"Dictionary representing index-tuples ``(i, j)`` of non-zero values ``∇²f(x)_{ij} ≠ 0`` in the Hessian. For a given key ``i``, values in the set ``{j}`` represent index-tuples ``{i, j}``."
hessian::D
end
shared(::Type{DictHessianPattern{I,S,D,Shared}}) where {I,S,D} = Shared()
shared(::Type{DictHessianPattern{I,S,D,NotShared}}) where {I,S,D} = NotShared()
function myempty(::Type{DictHessianPattern{I,S,D,SB}}) where {I,S,D,SB}
return DictHessianPattern{I,S,D,SB}(myempty(S), myempty(D))
end
function create_patterns(::Type{P}, xs, is) where {I,S,D,SB,P<:DictHessianPattern{I,S,D,SB}}
gradients = seed.(Ref(S), is)
hessian = myempty(D)
# Even if `NotShared`, sharing a single reference to `hessian` is allowed upon initialization,
# since mutation is prohibited when `isshared` is false.
return P.(gradients, Ref(hessian))
end
# Tracer compatibility
gradient(p::DictHessianPattern) = p.gradient
hessian(p::DictHessianPattern) = p.hessian
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 5632 | #= This file handles the actual tracing of functions:
1) creating tracers from inputs
2) evaluating the function with the created tracers
3) parsing the resulting tracers into an output matrix
=#
#==================#
# Enumerate inputs #
#==================#
"""
trace_input(T, x)
trace_input(T, xs)
Enumerates input indices and constructs the specified type `T` of tracer.
Supports [`GradientTracer`](@ref), [`HessianTracer`](@ref) and [`Dual`](@ref).
"""
trace_input(::Type{T}, xs) where {T<:Union{AbstractTracer,Dual}} = trace_input(T, xs, 1)
# If possible, this should call `similar` and have the function signature `A{T} -> A{Int}`.
# For some array types, this function signature isn't possible,
# e.g. on `Symmetric`, where symmetry doesn't hold for the index matrix.
allocate_index_matrix(A::AbstractArray) = similar(A, Int)
allocate_index_matrix(A::Symmetric) = Matrix{Int}(undef, size(A)...)
function trace_input(::Type{T}, xs::AbstractArray, i) where {T<:Union{AbstractTracer,Dual}}
is = allocate_index_matrix(xs)
is .= reshape(1:length(xs), size(xs)) .+ (i - 1)
return create_tracers(T, xs, is)
end
function trace_input(::Type{T}, xs::Diagonal, i) where {T<:Union{AbstractTracer,Dual}}
ts = create_tracers(T, diag(xs), diagind(xs))
return Diagonal(ts)
end
function trace_input(::Type{T}, x::Real, i::Integer) where {T<:Union{AbstractTracer,Dual}}
return only(create_tracers(T, [x], [i]))
end
#=========================#
# Trace through functions #
#=========================#
function trace_function(::Type{T}, f, x) where {T<:Union{AbstractTracer,Dual}}
xt = trace_input(T, x)
yt = f(xt)
return xt, yt
end
function trace_function(::Type{T}, f!, y, x) where {T<:Union{AbstractTracer,Dual}}
xt = trace_input(T, x)
yt = similar(y, T)
f!(yt, xt)
return xt, yt
end
to_array(x::Real) = [x]
to_array(x::AbstractArray) = x
# Utilities
_tracer_or_number(x::Real) = x
_tracer_or_number(d::Dual) = tracer(d)
#================#
# GradientTracer #
#================#
# Compute the sparsity pattern of the Jacobian of `y = f(x)`.
function _jacobian_sparsity(
f, x, ::Type{T}=DEFAULT_GRADIENT_TRACER
) where {T<:GradientTracer}
xt, yt = trace_function(T, f, x)
return jacobian_pattern_to_mat(to_array(xt), to_array(yt))
end
# Compute the sparsity pattern of the Jacobian of `f!(y, x)`.
function _jacobian_sparsity(
f!, y, x, ::Type{T}=DEFAULT_GRADIENT_TRACER
) where {T<:GradientTracer}
xt, yt = trace_function(T, f!, y, x)
return jacobian_pattern_to_mat(to_array(xt), to_array(yt))
end
# Compute the local sparsity pattern of the Jacobian of `y = f(x)` at `x`.
function _local_jacobian_sparsity(
f, x, ::Type{T}=DEFAULT_GRADIENT_TRACER
) where {T<:GradientTracer}
D = Dual{eltype(x),T}
xt, yt = trace_function(D, f, x)
return jacobian_pattern_to_mat(to_array(xt), to_array(yt))
end
# Compute the local sparsity pattern of the Jacobian of `f!(y, x)` at `x`.
function _local_jacobian_sparsity(
f!, y, x, ::Type{T}=DEFAULT_GRADIENT_TRACER
) where {T<:GradientTracer}
D = Dual{eltype(x),T}
xt, yt = trace_function(D, f!, y, x)
return jacobian_pattern_to_mat(to_array(xt), to_array(yt))
end
function jacobian_pattern_to_mat(
xt::AbstractArray{T}, yt::AbstractArray{<:Real}
) where {T<:GradientTracer}
n, m = length(xt), length(yt)
I = Int[] # row indices
J = Int[] # column indices
V = Bool[] # values
for (i, y) in enumerate(yt)
if y isa T && !isemptytracer(y)
for j in gradient(y)
push!(I, i)
push!(J, j)
push!(V, true)
end
end
end
return sparse(I, J, V, m, n)
end
function jacobian_pattern_to_mat(
xt::AbstractArray{D}, yt::AbstractArray{<:Real}
) where {P,T<:GradientTracer,D<:Dual{P,T}}
return jacobian_pattern_to_mat(tracer.(xt), _tracer_or_number.(yt))
end
#===============#
# HessianTracer #
#===============#
# Compute the sparsity pattern of the Hessian of a scalar function `y = f(x)`.
function _hessian_sparsity(f, x, ::Type{T}=DEFAULT_HESSIAN_TRACER) where {T<:HessianTracer}
xt, yt = trace_function(T, f, x)
return hessian_pattern_to_mat(to_array(xt), yt)
end
# Compute the local sparsity pattern of the Hessian of a scalar function `y = f(x)` at `x`.
function _local_hessian_sparsity(
f, x, ::Type{T}=DEFAULT_HESSIAN_TRACER
) where {T<:HessianTracer}
D = Dual{eltype(x),T}
xt, yt = trace_function(D, f, x)
return hessian_pattern_to_mat(to_array(xt), yt)
end
function hessian_pattern_to_mat(xt::AbstractArray{T}, yt::T) where {T<:HessianTracer}
n = length(xt)
I = Int[] # row indices
J = Int[] # column indices
V = Bool[] # values
if !isemptytracer(yt)
for (i, j) in tuple_set(hessian(yt))
push!(I, i)
push!(J, j)
push!(V, true)
# TODO: return `Symmetric` instead on next breaking release
push!(I, j)
push!(J, i)
push!(V, true)
end
end
h = sparse(I, J, V, n, n)
return h
end
function hessian_pattern_to_mat(
xt::AbstractArray{D1}, yt::D2
) where {P1,P2,T<:HessianTracer,D1<:Dual{P1,T},D2<:Dual{P2,T}}
return hessian_pattern_to_mat(tracer.(xt), tracer(yt))
end
function hessian_pattern_to_mat(xt::AbstractArray{T}, yt::Number) where {T<:HessianTracer}
return hessian_pattern_to_mat(xt, myempty(T))
end
function hessian_pattern_to_mat(
xt::AbstractArray{D1}, yt::Number
) where {P1,T<:HessianTracer,D1<:Dual{P1,T}}
return hessian_pattern_to_mat(tracer.(xt), myempty(T))
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 4442 | """
AbstractTracer
Abstract supertype of tracers.
## Type hierarchy
```
AbstractTracer
├── GradientTracer
└── HessianTracer
```
Note that [`Dual`](@ref) is not an `AbstractTracer`.
"""
abstract type AbstractTracer{P<:AbstractPattern} <: Real end
#================#
# GradientTracer #
#================#
"""
$(TYPEDEF)
`Real` number type keeping track of non-zero gradient entries.
## Fields
$(TYPEDFIELDS)
"""
struct GradientTracer{P<:AbstractGradientPattern} <: AbstractTracer{P}
"Sparse representation of non-zero entries in the gradient."
pattern::P
"Indicator whether gradient in tracer contains only zeros."
isempty::Bool
function GradientTracer{P}(gradient::P, isempty::Bool=false) where {P}
return new{P}(gradient, isempty)
end
end
GradientTracer{P}(::Real) where {P} = myempty(GradientTracer{P})
GradientTracer{P}(t::GradientTracer{P}) where {P} = t
GradientTracer(t::GradientTracer) = t
isemptytracer(t::GradientTracer) = t.isempty
pattern(t::GradientTracer) = t.pattern
gradient(t::GradientTracer) = gradient(pattern(t))
#===============#
# HessianTracer #
#===============#
"""
$(TYPEDEF)
`Real` number type keeping track of non-zero gradient and Hessian entries.
## Fields
$(TYPEDFIELDS)
"""
struct HessianTracer{P<:AbstractHessianPattern} <: AbstractTracer{P}
"Sparse representation of non-zero entries in the gradient and the Hessian."
pattern::P
"Indicator whether gradient and Hessian in tracer both contain only zeros."
isempty::Bool
function HessianTracer{P}(pattern::P, isempty::Bool=false) where {P}
return new{P}(pattern, isempty)
end
end
HessianTracer{P}(::Real) where {P} = myempty(HessianTracer{P})
HessianTracer{P}(t::HessianTracer{P}) where {P} = t
HessianTracer(t::HessianTracer) = t
isemptytracer(t::HessianTracer) = t.isempty
pattern(t::HessianTracer) = t.pattern
gradient(t::HessianTracer) = gradient(pattern(t))
hessian(t::HessianTracer) = hessian(pattern(t))
#================================#
# Dual numbers for local tracing #
#================================#
"""
$(TYPEDEF)
Dual `Real` number type keeping track of the results of a primal computation as well as a tracer.
## Fields
$(TYPEDFIELDS)
"""
struct Dual{P<:Real,T<:AbstractTracer} <: Real
primal::P
tracer::T
function Dual{P,T}(primal::P, tracer::T) where {P<:Number,T<:AbstractTracer}
if P <: AbstractTracer || P <: Dual
error("Primal value of Dual tracer can't be an AbstractTracer.")
end
return new{P,T}(primal, tracer)
end
end
primal(d::Dual) = d.primal
tracer(d::Dual) = d.tracer
gradient(d::Dual{P,T}) where {P,T<:GradientTracer} = gradient(tracer(d))
gradient(d::Dual{P,T}) where {P,T<:HessianTracer} = gradient(tracer(d))
hessian(d::Dual{P,T}) where {P,T<:HessianTracer} = hessian(tracer(d))
isemptytracer(d::Dual) = isemptytracer(tracer(d))
Dual{P,T}(d::Dual{P,T}) where {P<:Real,T<:AbstractTracer} = d
Dual(primal::P, tracer::T) where {P,T} = Dual{P,T}(primal, tracer)
function Dual{P,T}(x::Real) where {P<:Real,T<:AbstractTracer}
return Dual(convert(P, x), myempty(T))
end
#===========#
# Utilities #
#===========#
shared(::Type{T}) where {P,T<:HessianTracer{P}} = shared(P)
myempty(::T) where {T<:AbstractTracer} = myempty(T)
myempty(::Type{GradientTracer{P}}) where {P} = GradientTracer{P}(myempty(P), true)
myempty(::Type{HessianTracer{P}}) where {P} = HessianTracer{P}(myempty(P), true)
"""
create_tracers(T, xs, indices)
Convenience constructor for [`GradientTracer`](@ref), [`HessianTracer`](@ref) and [`Dual`](@ref)
from multiple inputs `xs` and their indices `is`.
"""
function create_tracers(
::Type{T}, xs::AbstractArray{<:Real,N}, indices::AbstractArray{<:Integer,N}
) where {P<:AbstractPattern,T<:AbstractTracer{P},N}
patterns = create_patterns(P, xs, indices)
return T.(patterns)
end
function create_tracers(
::Type{D}, xs::AbstractArray{<:Real,N}, indices::AbstractArray{<:Integer,N}
) where {P,T,D<:Dual{P,T},N}
tracers = create_tracers(T, xs, indices)
return D.(xs, tracers)
end
# Pretty-printing of Dual tracers
name(::Type{T}) where {T<:GradientTracer} = "GradientTracer"
name(::Type{T}) where {T<:HessianTracer} = "HessianTracer"
name(::Type{D}) where {P,T,D<:Dual{P,T}} = "Dual-$(name(T))"
name(::T) where {T<:AbstractTracer} = name(T)
name(::D) where {D<:Dual} = name(D)
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 274 | ## Special overloads to avoid ambiguity errors
eval(generate_code_2_to_1_typed(:Base, ^, Integer))
eval(generate_code_2_to_1_typed(:Base, ^, Rational))
eval(generate_code_2_to_1_typed(:Base, ^, Irrational{:ℯ}))
eval(generate_code_2_to_1_typed(:Base, isless, AbstractFloat))
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 6414 | """
second_order_or(tracers)
Compute the most conservative elementwise OR of tracer sparsity patterns,
including second-order interactions to update the `hessian` field of `HessianTracer`.
This is functionally equivalent to:
```julia
reduce(^, tracers)
```
"""
function second_order_or(ts::AbstractArray{T}) where {T<:AbstractTracer}
# TODO: improve performance
return reduce(second_order_or, ts; init=myempty(T))
end
function second_order_or(a::T, b::T) where {T<:GradientTracer}
return gradient_tracer_2_to_1(a, b, false, false)
end
function second_order_or(a::T, b::T) where {T<:HessianTracer}
return hessian_tracer_2_to_1(a, b, false, false, false, false, false)
end
"""
first_order_or(tracers)
Compute the most conservative elementwise OR of tracer sparsity patterns,
excluding second-order interactions of `HessianTracer`.
This is functionally equivalent to:
```julia
reduce(+, tracers)
```
"""
function first_order_or(ts::AbstractArray{T}) where {T<:AbstractTracer}
# TODO: improve performance
return reduce(first_order_or, ts; init=myempty(T))
end
function first_order_or(a::T, b::T) where {T<:GradientTracer}
return gradient_tracer_2_to_1(a, b, false, false)
end
function first_order_or(a::T, b::T) where {T<:HessianTracer}
return hessian_tracer_2_to_1(a, b, false, true, false, true, true)
end
#===========#
# Utilities #
#===========#
function split_dual_array(A::AbstractArray{D}) where {D<:Dual}
primals = getproperty.(A, :primal)
tracers = getproperty.(A, :tracer)
return primals, tracers
end
#==================#
# LinearAlgebra.jl #
#==================#
# TODO: replace `second_order_or` by less conservative sparsity patterns when possible
## Determinant
LinearAlgebra.det(A::AbstractMatrix{T}) where {T<:AbstractTracer} = second_order_or(A)
LinearAlgebra.logdet(A::AbstractMatrix{T}) where {T<:AbstractTracer} = second_order_or(A)
function LinearAlgebra.logabsdet(A::AbstractMatrix{T}) where {T<:AbstractTracer}
t1 = second_order_or(A)
t2 = sign(t1) # corresponds to sign of det(A): set first- and second-order derivatives to zero
return (t1, t2)
end
## Norm
function LinearAlgebra.norm(A::AbstractArray{T}, p::Real=2) where {T<:AbstractTracer}
if isone(p) || isinf(p)
return first_order_or(A)
else
return second_order_or(A)
end
end
function LinearAlgebra.opnorm(A::AbstractMatrix{T}, p::Real=2) where {T<:AbstractTracer}
if isone(p) || isinf(p)
return first_order_or(A)
else
return second_order_or(A)
end
end
## Eigenvalues
function LinearAlgebra.eigmax(
A::Union{T,AbstractMatrix{T}}; permute::Bool=true, scale::Bool=true
) where {T<:AbstractTracer}
return second_order_or(A)
end
function LinearAlgebra.eigmin(
A::Union{T,AbstractMatrix{T}}; permute::Bool=true, scale::Bool=true
) where {T<:AbstractTracer}
return second_order_or(A)
end
function LinearAlgebra.eigen(
A::AbstractMatrix{T};
permute::Bool=true,
scale::Bool=true,
sortby::Union{Function,Nothing}=nothing,
) where {T<:AbstractTracer}
LinearAlgebra.checksquare(A)
n = size(A, 1)
t = second_order_or(A)
values = Fill(t, n)
vectors = Fill(t, n, n)
return LinearAlgebra.Eigen(values, vectors)
end
## Inverse
function Base.inv(A::StridedMatrix{T}) where {T<:AbstractTracer}
LinearAlgebra.checksquare(A)
t = second_order_or(A)
return Fill(t, size(A)...)
end
function Base.inv(D::Diagonal{T}) where {T<:AbstractTracer}
ts_in = D.diag
ts_out = similar(ts_in)
for i in 1:length(ts_out)
ts_out[i] = inv(ts_in[i])
end
return Diagonal(ts_out)
end
function LinearAlgebra.pinv(
A::AbstractMatrix{T}; atol::Real=0.0, rtol::Real=0.0
) where {T<:AbstractTracer}
n, m = size(A)
t = second_order_or(A)
return Fill(t, m, n)
end
LinearAlgebra.pinv(D::Diagonal{T}) where {T<:AbstractTracer} = inv(D)
## Division
function LinearAlgebra.:\(
A::AbstractMatrix{T}, B::AbstractVecOrMat
) where {T<:AbstractTracer}
Ainv = LinearAlgebra.pinv(A)
return Ainv * B
end
## Exponential
function Base.exp(A::AbstractMatrix{T}) where {T<:AbstractTracer}
LinearAlgebra.checksquare(A)
n = size(A, 1)
t = second_order_or(A)
return Fill(t, n, n)
end
## Matrix power
function LinearAlgebra.:^(A::AbstractMatrix{T}, p::Integer) where {T<:AbstractTracer}
LinearAlgebra.checksquare(A)
n = size(A, 1)
if iszero(p)
return Fill(myempty(T), n, n)
else
t = second_order_or(A)
return Fill(t, n, n)
end
end
function Base.literal_pow(::typeof(^), D::Diagonal{T}, ::Val{0}) where {T<:AbstractTracer}
ts = similar(D.diag)
ts .= myempty(T)
return Diagonal(ts)
end
#==========================#
# LinearAlgebra.jl on Dual #
#==========================#
# `Duals` should use LinearAlgebra's generic fallback implementations
# to compute the "least conservative" sparsity patterns possible on a scalar level.
# The following three methods are a temporary fix for issue #108.
# TODO: instead overload `lu` on AbstractMatrix of Duals.
function LinearAlgebra.det(A::AbstractMatrix{D}) where {D<:Dual}
primals, tracers = split_dual_array(A)
p = LinearAlgebra.logdet(primals)
t = LinearAlgebra.logdet(tracers)
return D(p, t)
end
function LinearAlgebra.logdet(A::AbstractMatrix{D}) where {D<:Dual}
primals, tracers = split_dual_array(A)
p = LinearAlgebra.logdet(primals)
t = LinearAlgebra.logdet(tracers)
return D(p, t)
end
function LinearAlgebra.logabsdet(A::AbstractMatrix{D}) where {D<:Dual}
primals, tracers = split_dual_array(A)
p1, p2 = LinearAlgebra.logabsdet(primals)
t1, t2 = LinearAlgebra.logabsdet(tracers)
return (D(p1, t1), D(p2, t2))
end
#==============#
# SparseArrays #
#==============#
# Helper function needed in SparseArrays's sparsematrix, sparsevector and higherorderfns.
# On Tracers, `iszero` and `!iszero` don't return a boolean,
# but we need a function that does to handle the structure of the array.
if VERSION >= v"1.9" # _iszero was added in JuliaSparse/SparseArrays.jl#177
SparseArrays._iszero(t::AbstractTracer) = isemptytracer(t)
SparseArrays._iszero(d::Dual) = isemptytracer(tracer(d)) && iszero(primal(d))
SparseArrays._isnotzero(t::AbstractTracer) = !isemptytracer(t)
SparseArrays._isnotzero(d::Dual) = !isemptytracer(tracer(d)) || !iszero(primal(d))
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2363 | ##===============#
# AbstractTracer #
#================#
Base.promote_rule(::Type{T}, ::Type{N}) where {T<:AbstractTracer,N<:Real} = T
Base.promote_rule(::Type{N}, ::Type{T}) where {T<:AbstractTracer,N<:Real} = T
Base.convert(::Type{T}, x::Real) where {T<:AbstractTracer} = myempty(T)
Base.convert(::Type{T}, t::T) where {T<:AbstractTracer} = t
Base.convert(::Type{<:Real}, t::T) where {T<:AbstractTracer} = t
##======#
# Duals #
#=======#
function Base.promote_rule(::Type{Dual{P1,T}}, ::Type{Dual{P2,T}}) where {P1,P2,T}
PP = Base.promote_type(P1, P2) # TODO: possible method call error?
return Dual{PP,T}
end
function Base.promote_rule(::Type{Dual{P,T}}, ::Type{N}) where {P,T,N<:Real}
PP = Base.promote_type(P, N) # TODO: possible method call error?
return Dual{PP,T}
end
function Base.promote_rule(::Type{N}, ::Type{Dual{P,T}}) where {P,T,N<:Real}
PP = Base.promote_type(P, N) # TODO: possible method call error?
return Dual{PP,T}
end
Base.convert(::Type{D}, x::Real) where {P,T,D<:Dual{P,T}} = Dual(x, myempty(T))
Base.convert(::Type{D}, d::D) where {P,T,D<:Dual{P,T}} = d
Base.convert(::Type{N}, d::D) where {N<:Real,P,T,D<:Dual{P,T}} = Dual(convert(N, primal(d)), tracer(d))
function Base.convert(::Type{Dual{P1,T}}, d::Dual{P2,T}) where {P1,P2,T}
return Dual(convert(P1, primal(d)), tracer(d))
end
##==========================#
# Explicit type conversions #
#===========================#
for T in (:Int, :Integer, :Float64, :Float32)
# Currently only defined on Dual to avoid invalidations.
@eval function Base.$T(d::Dual)
isemptytracer(d) || throw(InexactError(Symbol($T), $T, d))
return $T(primal(d))
end
end
##======================#
# Named type promotions #
#=======================#
for f in (:big, :widen, :float)
@eval Base.$f(::Type{T}) where {T<:AbstractTracer} = T
@eval Base.$f(::Type{D}) where {P,T,D<:Dual{P,T}} = $f(P) # only return primal type
end
##============================#
# Constant functions on types #
#=============================#
# Methods on variables are in operators.jl
for f in
(:zero, :one, :oneunit, :typemin, :typemax, :eps, :floatmin, :floatmax, :maxintfloat)
@eval Base.$f(::Type{T}) where {T<:AbstractTracer} = myempty(T)
@eval Base.$f(::Type{D}) where {P,T,D<:Dual{P,T}} = $f(P) # only return primal
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 374 |
# Special overloads for Dual numbers
for fn in (
:iseven,
:isfinite,
:isinf,
:isinteger,
:ismissing,
:isnan,
:isnothing,
:isodd,
:isone,
:isreal,
:iszero,
)
@eval Base.$fn(d::D) where {D<:Dual} = $fn(primal(d))
@eval function Base.$fn(t::T) where {T<:AbstractTracer}
throw(MissingPrimalError($fn, t))
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 8985 | SCT = SparseConnectivityTracer
## 1-to-1
@noinline function gradient_tracer_1_to_1(
t::T, is_der1_zero::Bool
) where {T<:GradientTracer}
if is_der1_zero && !isemptytracer(t)
return myempty(T)
else
return t
end
end
function gradient_tracer_1_to_1_inner(
p::P, is_der1_zero::Bool
) where {P<:IndexSetGradientPattern}
return P(gradient_tracer_1_to_1_inner(gradient(p), is_der1_zero)) # return pattern
end
# This is only required because it is called by HessianTracer with IndexSetHessianPattern
# Otherwise, we would just have the method on IndexSetGradientPattern above.
function gradient_tracer_1_to_1_inner(
s::S, is_der1_zero::Bool
) where {S<:AbstractSet{<:Integer}}
if is_der1_zero
return myempty(S)
else
return s # return set
end
end
function generate_code_gradient_1_to_1(M::Symbol, f::Function)
fname = nameof(f)
is_der1_zero_g = is_der1_zero_global(f)
expr_gradienttracer = quote
function $M.$fname(t::$SCT.GradientTracer)
return $SCT.gradient_tracer_1_to_1(t, $is_der1_zero_g)
end
end
expr_dual = if is_der1_zero_g
quote
function $M.$fname(d::D) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(d)
return $M.$fname(x)
end
end
else
quote
function $M.$fname(d::D) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(d)
p_out = $M.$fname(x)
t = $SCT.tracer(d)
is_der1_zero = $SCT.is_der1_zero_local($M.$fname, x)
t_out = $SCT.gradient_tracer_1_to_1(t, is_der1_zero)
return $SCT.Dual(p_out, t_out)
end
end
end
return Expr(:block, expr_gradienttracer, expr_dual)
end
## 2-to-1
@noinline function gradient_tracer_2_to_1(
tx::T, ty::T, is_der1_arg1_zero::Bool, is_der1_arg2_zero::Bool
) where {T<:GradientTracer}
# TODO: add tests for isempty
if tx.isempty && ty.isempty
return tx # empty tracer
elseif ty.isempty
return gradient_tracer_1_to_1(tx, is_der1_arg1_zero)
elseif tx.isempty
return gradient_tracer_1_to_1(ty, is_der1_arg2_zero)
else
g_out = gradient_tracer_2_to_1_inner(
pattern(tx), pattern(ty), is_der1_arg1_zero, is_der1_arg2_zero
)
return T(g_out) # return tracer
end
end
function gradient_tracer_2_to_1_inner(
px::P, py::P, is_der1_arg1_zero::Bool, is_der1_arg2_zero::Bool
) where {P<:IndexSetGradientPattern}
return P(
gradient_tracer_2_to_1_inner(
gradient(px), gradient(py), is_der1_arg1_zero, is_der1_arg2_zero
),
) # return pattern
end
# This is only required because it is called by HessianTracer with IndexSetHessianPattern
# Otherwise, we would just have the method on IndexSetGradientPattern above.
function gradient_tracer_2_to_1_inner(
sx::S, sy::S, is_der1_arg1_zero::Bool, is_der1_arg2_zero::Bool
) where {S<:AbstractSet{<:Integer}}
if is_der1_arg1_zero && is_der1_arg2_zero
return myempty(S)
elseif !is_der1_arg1_zero && is_der1_arg2_zero
return sx
elseif is_der1_arg1_zero && !is_der1_arg2_zero
return sy
else
return union(sx, sy) # return set
end
end
function generate_code_gradient_2_to_1(M::Symbol, f::Function)
fname = nameof(f)
is_der1_arg1_zero_g = is_der1_arg1_zero_global(f)
is_der1_arg2_zero_g = is_der1_arg2_zero_global(f)
expr_tracer_tracer = quote
function $M.$fname(tx::T, ty::T) where {T<:$SCT.GradientTracer}
return $SCT.gradient_tracer_2_to_1(
tx, ty, $is_der1_arg1_zero_g, $is_der1_arg2_zero_g
)
end
end
expr_dual_dual = if is_der1_arg1_zero_g && is_der1_arg2_zero_g
quote
function $M.$fname(dx::D, dy::D) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(dx)
y = $SCT.primal(dy)
return $M.$fname(x, y)
end
end
else
quote
function $M.$fname(dx::D, dy::D) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(dx)
y = $SCT.primal(dy)
p_out = $M.$fname(x, y)
tx = $SCT.tracer(dx)
ty = $SCT.tracer(dy)
is_der1_arg1_zero = $SCT.is_der1_arg1_zero_local($M.$fname, x, y)
is_der1_arg2_zero = $SCT.is_der1_arg2_zero_local($M.$fname, x, y)
t_out = $SCT.gradient_tracer_2_to_1(
tx, ty, is_der1_arg1_zero, is_der1_arg2_zero
)
return $SCT.Dual(p_out, t_out)
end
end
end
exprs_typed = generate_code_gradient_2_to_1_typed(M, f, Real)
return Expr(:block, expr_tracer_tracer, expr_dual_dual, exprs_typed)
end
function generate_code_gradient_2_to_1_typed(
M::Symbol, # Symbol indicating Module of f, usually `:Base`
f::Function, # function to overload
Z::Type, # external non-tracer-type to overload on
)
fname = nameof(f)
is_der1_arg1_zero_g = is_der1_arg1_zero_global(f)
is_der1_arg2_zero_g = is_der1_arg2_zero_global(f)
expr_tracer_type = quote
function $M.$fname(tx::$SCT.GradientTracer, ::$Z)
return $SCT.gradient_tracer_1_to_1(tx, $is_der1_arg1_zero_g)
end
end
expr_type_tracer = quote
function $M.$fname(::$Z, ty::$SCT.GradientTracer)
return $SCT.gradient_tracer_1_to_1(ty, $is_der1_arg2_zero_g)
end
end
expr_dual_type = if is_der1_arg1_zero_g
quote
function $M.$fname(dx::D, y::$Z) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(dx)
return $M.$fname(x, y)
end
end
else
quote
function $M.$fname(dx::D, y::$Z) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(dx)
p_out = $M.$fname(x, y)
tx = $SCT.tracer(dx)
is_der1_arg1_zero = $SCT.is_der1_arg1_zero_local($M.$fname, x, y)
t_out = $SCT.gradient_tracer_1_to_1(tx, is_der1_arg1_zero)
return $SCT.Dual(p_out, t_out)
end
end
end
expr_type_dual = if is_der1_arg2_zero_g
quote
function $M.$fname(x::$Z, dy::D) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
y = $SCT.primal(dy)
return $M.$fname(x, y)
end
end
else
quote
function $M.$fname(x::$Z, dy::D) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
y = $SCT.primal(dy)
p_out = $M.$fname(x, y)
ty = $SCT.tracer(dy)
is_der1_arg2_zero = $SCT.is_der1_arg2_zero_local($M.$fname, x, y)
t_out = $SCT.gradient_tracer_1_to_1(ty, is_der1_arg2_zero)
return $SCT.Dual(p_out, t_out)
end
end
end
return Expr(:block, expr_tracer_type, expr_type_tracer, expr_dual_type, expr_type_dual)
end
## 1-to-2
@noinline function gradient_tracer_1_to_2(
t::T, is_der1_out1_zero::Bool, is_der1_out2_zero::Bool
) where {T<:GradientTracer}
if isemptytracer(t) # TODO: add test
return (t, t)
else
t_out1 = gradient_tracer_1_to_1(t, is_der1_out1_zero)
t_out2 = gradient_tracer_1_to_1(t, is_der1_out2_zero)
return (t_out1, t_out2)
end
end
function generate_code_gradient_1_to_2(M::Symbol, f::Function)
fname = nameof(f)
is_der1_out1_zero_g = is_der1_out1_zero_global(f)
is_der1_out2_zero_g = is_der1_out2_zero_global(f)
expr_gradienttracer = quote
function $M.$fname(t::$SCT.GradientTracer)
return $SCT.gradient_tracer_1_to_2(t, $is_der1_out1_zero_g, $is_der1_out2_zero_g)
end
end
expr_dual = if is_der1_out1_zero_g && is_der1_out2_zero_g
quote
function $M.$fname(d::D) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(d)
return $M.$fname(x)
end
end
else
quote
function $M.$fname(d::D) where {P,T<:$SCT.GradientTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(d)
p_out1, p_out2 = $M.$fname(x)
t = $SCT.tracer(d)
is_der1_out2_zero = $SCT.is_der1_out2_zero_local($M.$fname, x)
is_der1_out1_zero = $SCT.is_der1_out1_zero_local($M.$fname, x)
t_out1, t_out2 = $SCT.gradient_tracer_1_to_2(
t, is_der1_out1_zero, is_der1_out2_zero
)
return ($SCT.Dual(p_out1, t_out1), $SCT.Dual(p_out2, t_out2)) # TODO: this was wrong, add test
end
end
end
return Expr(:block, expr_gradienttracer, expr_dual)
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 14043 | SCT = SparseConnectivityTracer
## 1-to-1
# 𝟙[∇γ] = 𝟙[∂φ]⋅𝟙[∇α]
# 𝟙[∇²γ] = 𝟙[∂φ]⋅𝟙[∇²α] ∨ 𝟙[∂²φ]⋅(𝟙[∇α] ∨ 𝟙[∇α]ᵀ)
@noinline function hessian_tracer_1_to_1(
t::T, is_der1_zero::Bool, is_der2_zero::Bool
) where {P<:AbstractHessianPattern,T<:HessianTracer{P}}
if isemptytracer(t) # TODO: add test
return t
else
p_out = hessian_tracer_1_to_1_inner(
pattern(t), is_der1_zero, is_der2_zero, shared(P)
)
return T(p_out) # return tracer
end
end
function hessian_tracer_1_to_1_inner(
p::P, is_der1_zero::Bool, is_der2_zero::Bool, ::NotShared
) where {P<:AbstractHessianPattern}
g = gradient(p)
h = hessian(p)
g_out = gradient_tracer_1_to_1_inner(g, is_der1_zero) # 𝟙[∇γ] = 𝟙[∂φ]⋅𝟙[∇α]
h_out = if is_der1_zero && is_der2_zero # 𝟙[∇²γ] = 0
myempty(h)
elseif !is_der1_zero && is_der2_zero # 𝟙[∇²γ] = 𝟙[∂φ]⋅𝟙[∇²α]
h
elseif is_der1_zero && !is_der2_zero # 𝟙[∇²γ] = 𝟙[∇α] ∨ 𝟙[∇α]ᵀ
# TODO: this branch of the code currently isn't tested.
# Covering it would require a scalar 1-to-1 function with local overloads,
# such that ∂f/∂x == 0 and ∂²f/∂x² != 0.
union_product!(myempty(h), g, g)
else # !is_der1_zero && !is_der2_zero, 𝟙[∇²γ] = 𝟙[∇²α] ∨ (𝟙[∇α] ∨ 𝟙[∇α]ᵀ)
union_product!(copy(h), g, g)
end
return P(g_out, h_out) # return pattern
end
# NOTE: mutates argument p and should arguably be called `hessian_tracer_1_to_1_inner!`
function hessian_tracer_1_to_1_inner(
p::P, is_der1_zero::Bool, is_der2_zero::Bool, ::Shared
) where {P<:AbstractHessianPattern}
g = gradient(p)
g_out = gradient_tracer_1_to_1_inner(g, is_der1_zero)
# shared Hessian patterns can't remove second-order information, only add to it.
h = hessian(p)
h_out = if is_der2_zero # 𝟙[∇²γ] = 𝟙[∂φ]⋅𝟙[∇²α]
h
else # 𝟙[∇²γ] = 𝟙[∇²α] ∨ (𝟙[∇α] ∨ 𝟙[∇α]ᵀ)
union_product!(h, g, g)
end
return P(g_out, h_out) # return pattern
end
function generate_code_hessian_1_to_1(M::Symbol, f::Function)
fname = nameof(f)
is_der1_zero_g = is_der1_zero_global(f)
is_der2_zero_g = is_der2_zero_global(f)
expr_hessiantracer = quote
## HessianTracer
function $M.$fname(t::$SCT.HessianTracer)
return $SCT.hessian_tracer_1_to_1(t, $is_der1_zero_g, $is_der2_zero_g)
end
end
expr_dual = if is_der1_zero_g && is_der1_zero_g
quote
function $M.$fname(d::D) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(d)
return $M.$fname(x)
end
end
else
quote
function $M.$fname(d::D) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(d)
p_out = $M.$fname(x)
t = $SCT.tracer(d)
is_der1_zero = $SCT.is_der1_zero_local($M.$fname, x)
is_der2_zero = $SCT.is_der2_zero_local($M.$fname, x)
t_out = $SCT.hessian_tracer_1_to_1(t, is_der1_zero, is_der2_zero)
return $SCT.Dual(p_out, t_out)
end
end
end
return Expr(:block, expr_hessiantracer, expr_dual)
end
## 2-to-1
@noinline function hessian_tracer_2_to_1(
tx::T,
ty::T,
is_der1_arg1_zero::Bool,
is_der2_arg1_zero::Bool,
is_der1_arg2_zero::Bool,
is_der2_arg2_zero::Bool,
is_der_cross_zero::Bool,
) where {P<:AbstractHessianPattern,T<:HessianTracer{P}}
# TODO: add tests for isempty
if tx.isempty && ty.isempty
return tx # empty tracer
elseif ty.isempty
return hessian_tracer_1_to_1(tx, is_der1_arg1_zero, is_der2_arg1_zero)
elseif tx.isempty
return hessian_tracer_1_to_1(ty, is_der1_arg2_zero, is_der2_arg2_zero)
else
p_out = hessian_tracer_2_to_1_inner(
pattern(tx),
pattern(ty),
is_der1_arg1_zero,
is_der2_arg1_zero,
is_der1_arg2_zero,
is_der2_arg2_zero,
is_der_cross_zero,
shared(P),
)
return T(p_out) # return tracer
end
end
function hessian_tracer_2_to_1_inner(
px::P,
py::P,
is_der1_arg1_zero::Bool,
is_der2_arg1_zero::Bool,
is_der1_arg2_zero::Bool,
is_der2_arg2_zero::Bool,
is_der_cross_zero::Bool,
::NotShared,
) where {P<:AbstractHessianPattern}
gx, hx = gradient(px), hessian(px)
gy, hy = gradient(py), hessian(py)
g_out = gradient_tracer_2_to_1_inner(gx, gy, is_der1_arg1_zero, is_der1_arg2_zero)
h_out = myempty(hx)
!is_der1_arg1_zero && myunion!(h_out, hx) # hessian alpha
!is_der1_arg2_zero && myunion!(h_out, hy) # hessian beta
!is_der2_arg1_zero && union_product!(h_out, gx, gx) # product alpha
!is_der2_arg2_zero && union_product!(h_out, gy, gy) # product beta
!is_der_cross_zero && union_product!(h_out, gx, gy) # cross product 1
!is_der_cross_zero && union_product!(h_out, gy, gx) # cross product 2
return P(g_out, h_out) # return pattern
end
# NOTE: mutates arguments px and py and should arguably be called `hessian_tracer_1_to_1_inner!`
function hessian_tracer_2_to_1_inner(
px::P,
py::P,
is_der1_arg1_zero::Bool,
is_der2_arg1_zero::Bool,
is_der1_arg2_zero::Bool,
is_der2_arg2_zero::Bool,
is_der_cross_zero::Bool,
::Shared,
) where {P<:AbstractHessianPattern}
gx, hx = gradient(px), hessian(px)
gy, hy = gradient(py), hessian(py)
hx !== hy && error("Expected shared Hessians, got $hx, $hy.")
h_out = hx # union of hx and hy can be skipped since they are the same object
g_out = gradient_tracer_2_to_1_inner(gx, gy, is_der1_arg1_zero, is_der1_arg2_zero)
!is_der2_arg1_zero && union_product!(h_out, gx, gx) # product alpha
!is_der2_arg2_zero && union_product!(h_out, gy, gy) # product beta
!is_der_cross_zero && union_product!(h_out, gx, gy) # cross product 1
!is_der_cross_zero && union_product!(h_out, gy, gx) # cross product 2
return P(g_out, h_out) # return pattern
end
function generate_code_hessian_2_to_1(
M::Symbol, # Symbol indicating Module of f, usually `:Base`
f::Function, # function to overload
Z::Type=Real, # external non-tracer-type to overload on
)
fname = nameof(f)
is_der1_arg1_zero_g = is_der1_arg1_zero_global(f)
is_der2_arg1_zero_g = is_der2_arg1_zero_global(f)
is_der1_arg2_zero_g = is_der1_arg2_zero_global(f)
is_der2_arg2_zero_g = is_der2_arg2_zero_global(f)
is_der_cross_zero_g = is_der_cross_zero_global(f)
expr_tracer_tracer = quote
function $M.$fname(tx::T, ty::T) where {T<:$SCT.HessianTracer}
return $SCT.hessian_tracer_2_to_1(
tx,
ty,
$is_der1_arg1_zero_g,
$is_der2_arg1_zero_g,
$is_der1_arg2_zero_g,
$is_der2_arg2_zero_g,
$is_der_cross_zero_g,
)
end
end
expr_dual_dual =
if is_der1_arg1_zero_g &&
is_der2_arg1_zero_g &&
is_der1_arg2_zero_g &&
is_der2_arg2_zero_g &&
is_der_cross_zero_g
quote
function $M.$fname(
dx::D, dy::D
) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(dx)
y = $SCT.primal(dy)
return $M.$fname(x, y)
end
end
else
quote
function $M.$fname(
dx::D, dy::D
) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(dx)
y = $SCT.primal(dy)
p_out = $M.$fname(x, y)
tx = $SCT.tracer(dx)
ty = $SCT.tracer(dy)
is_der1_arg1_zero = $SCT.is_der1_arg1_zero_local($M.$fname, x, y)
is_der2_arg1_zero = $SCT.is_der2_arg1_zero_local($M.$fname, x, y)
is_der1_arg2_zero = $SCT.is_der1_arg2_zero_local($M.$fname, x, y)
is_der2_arg2_zero = $SCT.is_der2_arg2_zero_local($M.$fname, x, y)
is_der_cross_zero = $SCT.is_der_cross_zero_local($M.$fname, x, y)
t_out = $SCT.hessian_tracer_2_to_1(
tx,
ty,
is_der1_arg1_zero,
is_der2_arg1_zero,
is_der1_arg2_zero,
is_der2_arg2_zero,
is_der_cross_zero,
)
return $SCT.Dual(p_out, t_out)
end
end
end
exprs_typed = generate_code_hessian_2_to_1_typed(M, f, Real)
return Expr(:block, expr_tracer_tracer, expr_dual_dual, exprs_typed)
end
function generate_code_hessian_2_to_1_typed(
M::Symbol, # Symbol indicating Module of f, usually `:Base`
f::Function, # function to overload
Z::Type, # external non-tracer-type to overload on
)
fname = nameof(f)
is_der1_arg1_zero_g = is_der1_arg1_zero_global(f)
is_der2_arg1_zero_g = is_der2_arg1_zero_global(f)
is_der1_arg2_zero_g = is_der1_arg2_zero_global(f)
is_der2_arg2_zero_g = is_der2_arg2_zero_global(f)
expr_tracer_type = quote
function $M.$fname(tx::$SCT.HessianTracer, y::$Z)
return $SCT.hessian_tracer_1_to_1(tx, $is_der1_arg1_zero_g, $is_der2_arg1_zero_g)
end
end
expr_type_tracer = quote
function $M.$fname(x::$Z, ty::$SCT.HessianTracer)
return $SCT.hessian_tracer_1_to_1(ty, $is_der1_arg2_zero_g, $is_der2_arg2_zero_g)
end
end
expr_dual_type = if is_der1_arg1_zero_g && is_der2_arg1_zero_g
quote
function $M.$fname(dx::D, y::$Z) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(dx)
return $M.$fname(x, y)
end
end
else
quote
function $M.$fname(dx::D, y::$Z) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(dx)
p_out = $M.$fname(x, y)
tx = $SCT.tracer(dx)
is_der1_arg1_zero = $SCT.is_der1_arg1_zero_local($M.$fname, x, y)
is_der2_arg1_zero = $SCT.is_der2_arg1_zero_local($M.$fname, x, y)
t_out = $SCT.hessian_tracer_1_to_1(tx, is_der1_arg1_zero, is_der2_arg1_zero)
return $SCT.Dual(p_out, t_out)
end
end
end
expr_type_dual = if is_der1_arg2_zero_g && is_der2_arg2_zero_g
quote
function $M.$fname(x::$Z, dy::D) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
y = $SCT.primal(dy)
return $M.$fname(x, y)
end
end
else
quote
function $M.$fname(x::$Z, dy::D) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
y = $SCT.primal(dy)
p_out = $M.$fname(x, y)
ty = $SCT.tracer(dy)
is_der1_arg2_zero = $SCT.is_der1_arg2_zero_local($M.$fname, x, y)
is_der2_arg2_zero = $SCT.is_der2_arg2_zero_local($M.$fname, x, y)
t_out = $SCT.hessian_tracer_1_to_1(ty, is_der1_arg2_zero, is_der2_arg2_zero)
return $SCT.Dual(p_out, t_out)
end
end
end
return Expr(:block, expr_tracer_type, expr_type_tracer, expr_dual_type, expr_type_dual)
end
## 1-to-2
@noinline function hessian_tracer_1_to_2(
t::T,
is_der1_out1_zero::Bool,
is_der2_out1_zero::Bool,
is_der1_out2_zero::Bool,
is_der2_out2_zero::Bool,
) where {T<:HessianTracer}
if isemptytracer(t) # TODO: add test
return (t, t)
else
t_out1 = hessian_tracer_1_to_1(t, is_der1_out1_zero, is_der2_out1_zero)
t_out2 = hessian_tracer_1_to_1(t, is_der1_out2_zero, is_der2_out2_zero)
return (t_out1, t_out2)
end
end
function generate_code_hessian_1_to_2(M::Symbol, f::Function)
fname = nameof(f)
is_der1_out1_zero_g = is_der1_out1_zero_global(f)
is_der2_out1_zero_g = is_der2_out1_zero_global(f)
is_der1_out2_zero_g = is_der1_out2_zero_global(f)
is_der2_out2_zero_g = is_der2_out2_zero_global(f)
expr_hessiantracer = quote
function $M.$fname(t::$SCT.HessianTracer)
return $SCT.hessian_tracer_1_to_2(
t,
$is_der1_out1_zero_g,
$is_der2_out1_zero_g,
$is_der1_out2_zero_g,
$is_der2_out2_zero_g,
)
end
end
expr_dual =
if is_der1_out1_zero_g &&
is_der2_out1_zero_g &&
is_der1_out2_zero_g &&
is_der2_out2_zero_g
quote
function $M.$fname(d::D) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(d)
return $M.$fname(x)
end
end
else
quote
function $M.$fname(d::D) where {P,T<:$SCT.HessianTracer,D<:$SCT.Dual{P,T}}
x = $SCT.primal(d)
p_out1, p_out2 = $M.$fname(x)
is_der1_out1_zero = $SCT.is_der1_out1_zero_local($M.$fname, x)
is_der2_out1_zero = $SCT.is_der2_out1_zero_local($M.$fname, x)
is_der1_out2_zero = $SCT.is_der1_out2_zero_local($M.$fname, x)
is_der2_out2_zero = $SCT.is_der2_out2_zero_local($M.$fname, x)
t_out1, t_out2 = $SCT.hessian_tracer_1_to_2(
d,
is_der1_out1_zero,
is_der2_out1_zero,
is_der1_out2_zero,
is_der2_out2_zero,
)
return ($SCT.Dual(p_out1, t_out1), $SCT.Dual(p_out2, t_out2))
end
end
end
return Expr(:block, expr_hessiantracer, expr_dual)
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2260 | @static if VERSION >= v"1.8"
function Base.ifelse(::AbstractTracer, x, y)
size(x) != size(y) && throw(
DimensionMismatch(
"Output sizes of x and y in `ifelse(condition, x, y)` don't match in size.",
),
)
return output_union(x, y)
end
## output union on scalar outputs
function output_union(tx::T, ty::T) where {T<:GradientTracer}
return T(output_union(pattern(tx), pattern(ty))) # return tracer
end
function output_union(px::P, py::P) where {P<:IndexSetGradientPattern}
return P(union(gradient(px), gradient(py))) # return pattern
end
function output_union(tx::T, ty::T) where {T<:HessianTracer}
return T(output_union(pattern(tx), pattern(ty))) # return tracer
end
function output_union(px::P, py::P) where {P<:AbstractHessianPattern}
return output_union(px, py, shared(P)) # return pattern
end
function output_union(px::P, py::P, ::Shared) where {P<:AbstractHessianPattern}
g_out = union(gradient(px), gradient(py))
hx, hy = hessian(px), hessian(py)
hx !== hy && error("Expected shared Hessians, got $hx, $hy.")
return P(g_out, hx) # return pattern
end
function output_union(px::P, py::P, ::NotShared) where {P<:IndexSetHessianPattern}
g_out = union(gradient(px), gradient(py))
h_out = union(hessian(px), hessian(py))
return P(g_out, h_out) # return pattern
end
function output_union(px::P, py::P, ::NotShared) where {P<:DictHessianPattern}
g_out = union(gradient(px), gradient(py))
h_out = myunion!(deepcopy(hessian(px)), hessian(py))
return P(g_out, h_out) # return pattern
end
output_union(tx::AbstractTracer, y) = tx
output_union(x, ty::AbstractTracer) = ty
## output union on AbstractArray outputs
function output_union(
tx::AbstractArray{T}, ty::AbstractArray{T}
) where {T<:AbstractTracer}
return output_union.(tx, ty)
end
function output_union(tx::AbstractArray{T}, y::AbstractArray) where {T<:AbstractTracer}
return tx
end
function output_union(x::AbstractArray, ty::AbstractArray{T}) where {T<:AbstractTracer}
return ty
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 974 | ## Rounding
Base.round(t::T, ::RoundingMode; kwargs...) where {T<:AbstractTracer} = myempty(T)
function Base.round(
d::D, mode::RoundingMode; kwargs...
) where {P,T<:AbstractTracer,D<:Dual{P,T}}
return round(primal(d), mode; kwargs...) # only return primal
end
for RR in (Real, Integer, Bool)
Base.round(::Type{R}, ::T) where {R<:RR,T<:AbstractTracer} = myempty(T)
function Base.round(::Type{R}, d::D) where {R<:RR,P,T<:AbstractTracer,D<:Dual{P,T}}
return round(R, primal(d)) # only return primal
end
end
## Random numbers
Base.rand(::AbstractRNG, ::SamplerType{T}) where {T<:AbstractTracer} = myempty(T)
function Base.rand(
rng::AbstractRNG, ::SamplerType{D}
) where {P,T<:AbstractTracer,D<:Dual{P,T}}
p = rand(rng, P)
# This unfortunately can't just return the primal value.
# Random.jl will otherwise throw "TypeError: in typeassert, expected Dual{P,T}, got a value of type P".
t = myempty(T)
return Dual(p, t)
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 1690 | dims = (Symbol("1_to_1"), Symbol("2_to_1"), Symbol("1_to_2"))
# Generate both Gradient and Hessian code with one call to `generate_code_X_to_Y`
for d in dims
f = Symbol("generate_code_", d)
g = Symbol("generate_code_gradient_", d)
h = Symbol("generate_code_hessian_", d)
@eval function $f(M::Symbol, f::Function)
expr_g = $g(M, f)
expr_h = $h(M, f)
return Expr(:block, expr_g, expr_h)
end
end
# Allow all `generate_code_*` functions to be called on several operators at once
for d in dims
for f in (
Symbol("generate_code_", d),
Symbol("generate_code_gradient_", d),
Symbol("generate_code_hessian_", d),
)
@eval function $f(M::Symbol, ops::Union{AbstractVector,Tuple})
exprs = [$f(M, op) for op in ops]
return Expr(:block, exprs...)
end
end
end
# Overloads of 2-argument functions on arbitrary types
function generate_code_2_to_1_typed(M::Symbol, f::Function, Z::Type)
expr_g = generate_code_gradient_2_to_1_typed(M, f, Z)
expr_h = generate_code_hessian_2_to_1_typed(M, f, Z)
return Expr(:block, expr_g, expr_h)
end
function generate_code_2_to_1_typed(M::Symbol, ops::Union{AbstractVector,Tuple}, Z::Type)
exprs = [generate_code_2_to_1_typed(M, op, Z) for op in ops]
return Expr(:block, exprs...)
end
## Overload operators
eval(generate_code_1_to_1(:Base, ops_1_to_1))
eval(generate_code_2_to_1(:Base, ops_2_to_1))
eval(generate_code_1_to_2(:Base, ops_1_to_2))
## List operators for later testing
test_operators_1_to_1(::Val{:Base}) = ops_1_to_1
test_operators_2_to_1(::Val{:Base}) = ops_2_to_1
test_operators_1_to_2(::Val{:Base}) = ops_1_to_2
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 1376 | """
DuplicateVector
Vector that can have duplicate values, for which union is just concatenation.
"""
struct DuplicateVector{T} <: AbstractSet{T}
data::Vector{T}
DuplicateVector{T}(data::AbstractVector) where {T} = new{T}(convert(Vector{T}, data))
DuplicateVector{T}(x) where {T} = new{T}([convert(T, x)])
DuplicateVector{T}() where {T} = new{T}(T[])
end
Base.show(io::IO, dv::DuplicateVector) = print(io, "DuplicateVector($(dv.data))")
function Base.show(io::IO, ::MIME"text/plain", dv::DuplicateVector)
return print(io, "DuplicateVector($(dv.data))")
end
Base.eltype(::Type{DuplicateVector{T}}) where {T} = T
Base.length(dv::DuplicateVector) = length(collect(dv)) # TODO: slow
Base.copy(dv::DuplicateVector{T}) where {T} = DuplicateVector{T}(dv.data)
function Base.union!(a::S, b::S) where {S<:DuplicateVector}
append!(a.data, b.data)
return a
end
function Base.union(a::S, b::S) where {S<:DuplicateVector}
return S(vcat(a.data, b.data))
end
Base.collect(dv::DuplicateVector) = unique!(dv.data)
Base.iterate(dv::DuplicateVector) = iterate(collect(dv))
Base.iterate(dv::DuplicateVector, i::Integer) = iterate(collect(dv), i)
function product(a::DuplicateVector{T}, b::DuplicateVector{T}) where {T}
return DuplicateVector{Tuple{T,T}}(
vec(collect((i, j) for i in a.data, j in b.data if i <= j))
)
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2867 | """
RecursiveSet
Lazy union of sets.
"""
mutable struct RecursiveSet{T} <: AbstractSet{T}
s::Union{Nothing,Set{T}}
child1::Union{Nothing,RecursiveSet{T}}
child2::Union{Nothing,RecursiveSet{T}}
function RecursiveSet{T}(s::Union{AbstractSet,AbstractVector}) where {T}
return new{T}(Set{T}(s), nothing, nothing)
end
function RecursiveSet{T}(x) where {T}
return new{T}(Set{T}(convert(T, x)), nothing, nothing)
end
function RecursiveSet{T}() where {T}
return new{T}(Set{T}(), nothing, nothing)
end
function RecursiveSet{T}(rs1::RecursiveSet{T}, rs2::RecursiveSet{T}) where {T}
return new{T}(nothing, rs1, rs2)
end
end
function print_recursiveset(io::IO, rs::RecursiveSet{T}; offset) where {T}
if !isnothing(rs.s)
print(io, "RecursiveSet{$T} containing $(rs.s)")
else
print(io, "RecursiveSet{$T} with two children:")
print(io, "\n ", " "^offset, "1: ")
print_recursiveset(io, rs.child1; offset=offset + 2)
print(io, "\n ", " "^offset, "2: ")
print_recursiveset(io, rs.child2; offset=offset + 2)
end
end
function Base.show(io::IO, rs::RecursiveSet)
return print_recursiveset(io, rs; offset=0)
end
function Base.show(io::IO, ::MIME"text/plain", rs::RecursiveSet)
return print_recursiveset(io, rs; offset=0)
end
Base.eltype(::Type{RecursiveSet{T}}) where {T} = T
Base.length(rs::RecursiveSet) = length(collect(rs)) # TODO: slow
function Base.copy(rs::RecursiveSet{T}) where {T}
if !isnothing(rs.s)
return RecursiveSet{T}(copy(rs.s))
else
return RecursiveSet{T}(rs.child1, rs.child2)
end
end
function Base.union(rs1::RecursiveSet{T}, rs2::RecursiveSet{T}) where {T}
return RecursiveSet{T}(rs1, rs2)
end
function Base.union!(rs1::RecursiveSet{T}, rs2::RecursiveSet{T}) where {T}
new_rs1 = if isnothing(rs1.s)
RecursiveSet{T}(rs1.child1, rs1.child2)
else
RecursiveSet{T}(rs1.s)
end
rs1.child1 = new_rs1
rs1.child2 = rs2
return rs1
end
function collect_aux!(accumulator::Set{T}, rs::RecursiveSet{T})::Nothing where {T}
if !isnothing(rs.s)
union!(accumulator, rs.s::Set{T})
else
collect_aux!(accumulator, rs.child1::RecursiveSet{T})
collect_aux!(accumulator, rs.child2::RecursiveSet{T})
end
return nothing
end
function Base.collect(rs::RecursiveSet{T}) where {T}
accumulator = Set{T}()
collect_aux!(accumulator, rs)
return collect(accumulator)
end
Base.iterate(rs::RecursiveSet) = iterate(collect(rs))
Base.iterate(rs::RecursiveSet, i::Integer) = iterate(collect(rs), i)
function product(a::RecursiveSet{T}, b::RecursiveSet{T}) where {T}
# TODO: slow
return RecursiveSet{Tuple{T,T}}(
vec(collect((i, j) for i in collect(a), j in collect(b) if i <= j))
)
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 3216 | """
SortedVector
Sorted vector without duplicates, designed for fast set unions with merging.
"""
struct SortedVector{T} <: AbstractSet{T}
data::Vector{T}
function SortedVector{T}(data::AbstractVector; sorted=false) where {T}
sorted_data = if sorted
data
else
sort(data)
end
return new{T}(convert(Vector{T}, sorted_data))
end
function SortedVector{T}(x) where {T}
return new{T}([convert(T, x)])
end
function SortedVector{T}() where {T}
return new{T}(T[])
end
end
function Base.convert(::Type{SortedVector{T}}, v::Vector{T}) where {T}
return SortedVector{T}(v; sorted=false)
end
Base.show(io::IO, v::SortedVector) = print(io, "SortedVector($(v.data))")
function Base.show(io::IO, ::MIME"text/plain", dv::SortedVector)
return print(io, "SortedVector($(dv.data))")
end
Base.eltype(::Type{SortedVector{T}}) where {T} = T
Base.length(v::SortedVector) = length(v.data)
Base.copy(v::SortedVector{T}) where {T} = SortedVector{T}(copy(v.data); sorted=true)
function merge_sorted!(result::Vector{T}, left::Vector{T}, right::Vector{T}) where {T}
resize!(result, length(left) + length(right))
left_index, right_index, result_index = 1, 1, 1
# common part of left and right
while (left_index in eachindex(left) && right_index in eachindex(right))
left_item = left[left_index]
right_item = right[right_index]
left_smaller = left_item <= right_item
right_smaller = right_item <= left_item
result_item = ifelse(left_smaller, left_item, right_item)
result[result_index] = result_item
left_index = ifelse(left_smaller, left_index + 1, left_index)
right_index = ifelse(right_smaller, right_index + 1, right_index)
result_index += 1
end
# either left or right has reached its end at this point
while left_index in eachindex(left)
result[result_index] = left[left_index]
left_index += 1
result_index += 1
end
while right_index in eachindex(right)
result[result_index] = right[right_index]
right_index += 1
result_index += 1
end
resize!(result, result_index - 1)
return result
end
function merge_sorted!(result::Vector{T}, other::Vector{T}) where {T}
return merge_sorted!(result, copy(result), other)
end
function merge_sorted(left::Vector{T}, right::Vector{T}) where {T}
result = similar(left, length(left) + length(right))
merge_sorted!(result, left, right)
return result
end
function Base.union(v1::SortedVector{T}, v2::SortedVector{T}) where {T}
return SortedVector{T}(merge_sorted(v1.data, v2.data); sorted=true)
end
function Base.union!(v1::SortedVector{T}, v2::SortedVector{T}) where {T}
merge_sorted!(v1.data, v2.data)
return v1
end
Base.collect(v::SortedVector) = v.data
Base.iterate(v::SortedVector) = iterate(v.data)
Base.iterate(v::SortedVector, i::Integer) = iterate(v.data, i)
function product(v1::SortedVector{T}, v2::SortedVector{T}) where {T}
prod_data = vec(collect((i, j) for i in v1.data, j in v2.data if i <= j))
return SortedVector{Tuple{T,T}}(prod_data; sorted=true)
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 1318 | using ADTypes: jacobian_sparsity, hessian_sparsity
using SparseConnectivityTracer
using SparseArrays
using Test
@testset "Global" begin
sd = TracerSparsityDetector()
x = rand(10)
y = zeros(9)
J1 = jacobian_sparsity(diff, x, sd)
J2 = jacobian_sparsity((y, x) -> y .= diff(x), y, x, sd)
@test J1 == J2
@test J1 isa SparseMatrixCSC{Bool,Int}
@test J2 isa SparseMatrixCSC{Bool,Int}
@test nnz(J1) == nnz(J2) == 18
H1 = hessian_sparsity(x -> sum(diff(x)), x, sd)
@test H1 ≈ zeros(10, 10)
x = rand(5)
f(x) = x[1] + x[2] * x[3] + 1 / x[4] + 1 * x[5]
H2 = hessian_sparsity(f, x, sd)
@test H2 isa SparseMatrixCSC{Bool,Int}
@test H2 ≈ [
0 0 0 0 0
0 0 1 0 0
0 1 0 0 0
0 0 0 1 0
0 0 0 0 0
]
end
@testset "Local" begin
lsd = TracerLocalSparsityDetector()
fl1(x) = x[1] + x[2] * x[3] + 1 / x[4] + x[2] * max(x[1], x[5])
HL1 = hessian_sparsity(fl1, [1.0 3.0 5.0 1.0 2.0], lsd)
@test HL1 ≈ [
0 0 0 0 0
0 0 1 0 1
0 1 0 0 0
0 0 0 1 0
0 1 0 0 0
]
HL2 = hessian_sparsity(fl1, [4.0 3.0 5.0 1.0 2.0], lsd)
@test HL2 ≈ [
0 1 0 0 0
1 0 1 0 0
0 1 0 0 0
0 0 0 1 0
0 0 0 0 0
]
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 100 | using PkgJogger
using SparseConnectivityTracer
PkgJogger.@test_benchmarks SparseConnectivityTracer
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 812 | using ADTypes
using ADTypes: AbstractSparsityDetector
using ReferenceTests
using SparseConnectivityTracer
using SparseConnectivityTracer: DuplicateVector, RecursiveSet, SortedVector
using SparseConnectivityTracerBenchmarks.ODE: Brusselator!
using Test
# Load definitions of GRADIENT_TRACERS, GRADIENT_PATTERNS, HESSIAN_TRACERS and HESSIAN_PATTERNS
include("tracers_definitions.jl")
function test_brusselator(detector::AbstractSparsityDetector)
N = 6
f! = Brusselator!(N)
x = rand(N, N, 2)
y = similar(x)
J = ADTypes.jacobian_sparsity(f!, y, x, detector)
@test_reference "references/pattern/jacobian/Brusselator.txt" BitMatrix(J)
end
@testset "$T" for T in GRADIENT_TRACERS
detector = TracerSparsityDetector(; gradient_tracer_type=T)
test_brusselator(detector)
yield()
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 6111 | using SparseConnectivityTracer: # 1-to-1
is_der1_zero_global,
is_der2_zero_global,
is_der1_zero_local,
is_der2_zero_local
using SparseConnectivityTracer: # 2-to-1
is_der1_arg1_zero_global,
is_der2_arg1_zero_global,
is_der1_arg2_zero_global,
is_der2_arg2_zero_global,
is_der_cross_zero_global,
is_der1_arg1_zero_local,
is_der2_arg1_zero_local,
is_der1_arg2_zero_local,
is_der2_arg2_zero_local,
is_der_cross_zero_local
using SparseConnectivityTracer: # 1-to-2
is_der1_out1_zero_global,
is_der2_out1_zero_global,
is_der1_out2_zero_global,
is_der2_out2_zero_global,
is_der2_out1_zero_local,
is_der1_out1_zero_local,
is_der1_out2_zero_local,
is_der2_out2_zero_local
using SparseConnectivityTracer: # testing
test_operators_2_to_1,
test_operators_1_to_1,
test_operators_1_to_2
using SpecialFunctions: SpecialFunctions
using NNlib: NNlib
using LogExpFunctions: LogExpFunctions
using NaNMath: NaNMath
using Test
using ForwardDiff: derivative, gradient, hessian
second_derivative(f, x) = derivative(_x -> derivative(f, _x), x)
DEFAULT_ATOL = 1e-8
DEFAULT_TRIALS = 20
## Random inputs
random_input(op) = rand()
random_input(::Union{typeof(acosh),typeof(acoth),typeof(acsc),typeof(asec)}) = 1 + rand()
random_input(::typeof(sincosd)) = 180 * rand()
# LogExpFunctions.jl
random_input(::typeof(LogExpFunctions.log1mexp)) = -rand() # log1mexp(x) is defined for x < 0
random_input(::typeof(LogExpFunctions.log2mexp)) = -rand() # log2mexp(x) is defined for x < 0
random_input(::typeof(LogExpFunctions.logitexp)) = -rand() # logitexp(x) is defined for x < 0
random_input(::typeof(LogExpFunctions.logit1mexp)) = -rand() # logit1mexp(x) is defined for x < 0
# NaNMath.jl
random_input(::typeof(NaNMath.acosh)) = 1 + rand() # Range: [1, ∞)
random_first_input(op) = random_input(op)
random_second_input(op) = random_input(op)
## Skip tests on functions that don't support ForwardDiff's Dual numbers
correct_classification_1_to_1(op::typeof(!), x; atol) = true
## Derivatives and special cases
both_derivatives_1_to_1(op, x) = derivative(op, x), second_derivative(op, x)
function both_derivatives_1_to_1(::Union{typeof(big),typeof(widen)}, x)
return both_derivatives_1_to_1(identity, x)
end
function both_derivatives_1_to_1(
::Union{typeof(floatmin),typeof(floatmax),typeof(maxintfloat)}, x
)
return both_derivatives_1_to_1(zero, x)
end
function both_derivatives_2_to_1(op, x, y)
return gradient(Base.splat(op), [x, y]), hessian(Base.splat(op), [x, y])
end
function both_derivatives_1_to_2(op, x)
function op_vec(x)
y = op(x)
return [y[1], y[2]]
end
return derivative(op_vec, x), second_derivative(op_vec, x)
end
## 1-to-1
function correct_classification_1_to_1(op, x; atol)
dfdx, d²fdx² = both_derivatives_1_to_1(op, x)
if (is_der1_zero_global(op) | is_der1_zero_local(op, x)) && !isapprox(dfdx, 0; atol)
return false
elseif (is_der2_zero_global(op) | is_der2_zero_local(op, x)) &&
!isapprox(d²fdx², 0; atol)
return false
else
return true
end
end
@testset verbose = true "1-to-1" begin
@testset "$m" for m in (Base, SpecialFunctions, NNlib, LogExpFunctions, NaNMath)
@testset "$op" for op in test_operators_1_to_1(Val(Symbol(m)))
@test all(
correct_classification_1_to_1(op, random_input(op); atol=DEFAULT_ATOL) for
_ in 1:DEFAULT_TRIALS
)
yield()
end
end
end;
## 2-to-1
function correct_classification_2_to_1(op, x, y; atol)
g, H = both_derivatives_2_to_1(op, x, y)
∂f∂x = g[1]
∂f∂y = g[2]
∂²f∂x² = H[1, 1]
∂²f∂y² = H[2, 2]
∂²f∂x∂y = H[1, 2]
if (is_der1_arg1_zero_global(op) | is_der1_arg1_zero_local(op, x, y)) &&
!isapprox(∂f∂x, 0; atol)
return false
elseif (is_der2_arg1_zero_global(op) | is_der2_arg1_zero_local(op, x, y)) &&
!isapprox(∂²f∂x², 0; atol)
return false
elseif (is_der1_arg2_zero_global(op) | is_der1_arg2_zero_local(op, x, y)) &&
!isapprox(∂f∂y, 0; atol)
return false
elseif (is_der2_arg2_zero_global(op) | is_der2_arg2_zero_local(op, x, y)) &&
!isapprox(∂²f∂y², 0; atol)
return false
elseif (is_der_cross_zero_global(op) | is_der_cross_zero_local(op, x, y)) &&
!isapprox(∂²f∂x∂y, 0; atol)
return false
else
return true
end
end
@testset verbose = true "2-to-1" begin
@testset "$m" for m in (Base, SpecialFunctions, NNlib, LogExpFunctions, NaNMath)
@testset "$op" for op in test_operators_2_to_1(Val(Symbol(m)))
@test all(
correct_classification_2_to_1(
op, random_first_input(op), random_second_input(op); atol=DEFAULT_ATOL
) for _ in 1:DEFAULT_TRIALS
)
yield()
end
end
end;
## 1-to-2
function correct_classification_1_to_2(op, x; atol)
d1, d2 = both_derivatives_1_to_2(op, x)
∂f₁∂x = d1[1]
∂f₂∂x = d1[2]
∂²f₁∂x² = d2[1]
∂²f₂∂x² = d2[2]
if (is_der1_out1_zero_global(op) | is_der1_out1_zero_local(op, x)) &&
!isapprox(∂f₁∂x, 0; atol)
return false
elseif (is_der2_out1_zero_global(op) | is_der2_out1_zero_local(op, x)) &&
!isapprox(∂²f₁∂x², 0; atol)
return false
elseif (is_der1_out2_zero_global(op) | is_der1_out2_zero_local(op, x)) &&
!isapprox(∂f₂∂x, 0; atol)
return false
elseif (is_der2_out2_zero_global(op) | is_der2_out2_zero_local(op, x)) &&
!isapprox(∂²f₂∂x², 0; atol)
return false
else
return true
end
end
@testset verbose = true "1-to-2" begin
@testset "$m" for m in (Base, SpecialFunctions, NNlib, LogExpFunctions, NaNMath)
@testset "$op" for op in test_operators_1_to_2(Val(Symbol(m)))
@test all(
correct_classification_1_to_2(op, random_input(op); atol=DEFAULT_ATOL) for
_ in 1:DEFAULT_TRIALS
)
yield()
end
end
end;
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 862 | using ComponentArrays
using SparseConnectivityTracer
using Test
f(x::AbstractVector) = abs2.(x)
f_comp(x::ComponentVector) = ComponentVector(; a=abs2.(x.a), b=abs2.(x.b))
function f!(y::AbstractVector, x::AbstractVector)
y .= abs2.(x)
return y
end
function f_comp!(y::ComponentVector, x::ComponentVector)
y.a .= abs2.(x.a)
y.b .= abs2.(x.b)
return y
end
x_comp = ComponentVector(; a=rand(2), b=rand(3))
y_comp = ComponentVector(; a=rand(2), b=rand(3))
x = Vector(x_comp)
y = Vector(y_comp)
detector = TracerSparsityDetector()
@test jacobian_sparsity(f_comp, x_comp, detector) == jacobian_sparsity(f, x, detector)
@test jacobian_sparsity(f_comp!, similar(y_comp), x_comp, detector) ==
jacobian_sparsity(f!, similar(y), x, detector)
@test hessian_sparsity(sum ∘ f_comp, x_comp, detector) ==
hessian_sparsity(sum ∘ f, x, detector)
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2197 | using ADTypes
using ADTypes: AbstractSparsityDetector
using Flux: Conv, relu
using ReferenceTests
using SparseConnectivityTracer
using SparseConnectivityTracer: DuplicateVector, RecursiveSet, SortedVector
using Test
# Load definitions of GRADIENT_TRACERS, GRADIENT_PATTERNS, HESSIAN_TRACERS and HESSIAN_PATTERNS
include("tracers_definitions.jl")
const INPUT_FLUX = reshape(
[
0.2677768300138966
1.1934917429169245
-1.0496617141319355
0.456668782925957
0.09678342859916624
-0.7962039825333248
-0.6138709208787495
-0.6809396498148278
0.4938230574627916
0.7847107012511034
0.7423059724033608
-0.6914378396432983
1.2062310319178624
-0.19647670394840708
0.10708057449244994
-0.4787927739226245
0.045072020113458774
-1.219617669693635
],
3,
3,
2,
1,
) # WHCN
const WEIGHTS_FLUX = reshape(
[
0.311843398150865
0.488663701947109
0.648497438559604
-0.41742794246238
0.174865988551499
1.061745573803265
-0.72434245370475
-0.05213963181095
],
2,
2,
2,
1,
)
const BIAS_FLUX = [0.1]
const LAYER = Conv(WEIGHTS_FLUX, BIAS_FLUX) # Conv((2, 2), 2 => 1)
const LAYER_RELU = Conv(WEIGHTS_FLUX, BIAS_FLUX, relu) # Conv((2, 2), 2 => 1, relu)
function test_flux_conv(detector::AbstractSparsityDetector)
J = jacobian_sparsity(LAYER, INPUT_FLUX, detector)
@test_reference "references/pattern/jacobian/NNlib/conv.txt" BitMatrix(J)
end
function test_flux_conv_local(detector::AbstractSparsityDetector)
J = jacobian_sparsity(LAYER_RELU, INPUT_FLUX, detector)
@test_reference "references/pattern/jacobian/NNlib/conv_relu.txt" BitMatrix(J)
end
@testset "Global" begin
@testset "$T" for T in GRADIENT_TRACERS
detector = TracerSparsityDetector(; gradient_tracer_type=T)
test_flux_conv(detector)
end
end
@testset "Local" begin
@testset "$T" for T in GRADIENT_TRACERS
detector = TracerLocalSparsityDetector(; gradient_tracer_type=T)
test_flux_conv(detector)
test_flux_conv_local(detector)
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2639 | using SparseConnectivityTracer
using Test
using JuliaFormatter: JuliaFormatter
using Aqua: Aqua
using JET: JET
using ExplicitImports: ExplicitImports
# Load package extensions so they get tested by ExplicitImports.jl
using DataInterpolations: DataInterpolations
using NaNMath: NaNMath
using NNlib: NNlib
using SpecialFunctions: SpecialFunctions
@testset "Code formatting" begin
@info "...with JuliaFormatter.jl"
@test JuliaFormatter.format(SparseConnectivityTracer; verbose=false, overwrite=false)
end
@testset "Aqua tests" begin
@info "...with Aqua.jl"
Aqua.test_all(
SparseConnectivityTracer;
ambiguities=false,
deps_compat=(check_extras=false,),
stale_deps=(ignore=[:Requires],),
persistent_tasks=false,
)
end
@testset "JET tests" begin
@info "...with JET.jl"
JET.test_package(SparseConnectivityTracer; target_defined_modules=true)
end
@testset "ExplicitImports tests" begin
@info "...with ExplicitImports.jl"
@testset "Improper implicit imports" begin
@test ExplicitImports.check_no_implicit_imports(SparseConnectivityTracer) ===
nothing
end
@testset "Improper explicit imports" begin
@test ExplicitImports.check_no_stale_explicit_imports(
SparseConnectivityTracer;
ignore=(
# Used in code generation, which ExplicitImports doesn't pick up
:AbstractTracer,
:AkimaInterpolation,
:BSplineApprox,
:BSplineInterpolation,
:CubicHermiteSpline,
:CubicSpline,
:LagrangeInterpolation,
:QuadraticInterpolation,
:QuadraticSpline,
:QuinticHermiteSpline,
),
) === nothing
@test ExplicitImports.check_all_explicit_imports_via_owners(
SparseConnectivityTracer
) === nothing
# TODO: test in the future when `public` is more common
# @test ExplicitImports.check_all_explicit_imports_are_public(
# SparseConnectivityTracer
# ) === nothing
end
@testset "Improper qualified accesses" begin
@test ExplicitImports.check_all_qualified_accesses_via_owners(
SparseConnectivityTracer
) === nothing
@test ExplicitImports.check_no_self_qualified_accesses(SparseConnectivityTracer) ===
nothing
# TODO: test in the future when `public` is more common
# @test ExplicitImports.check_all_qualified_accesses_are_public(
# SparseConnectivityTracer
# ) === nothing
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2831 | using Dates: now
using LinearAlgebra
using OptimizationProblems
using SparseArrays
using Test
using SparseConnectivityTracerBenchmarks.Optimization:
compute_jac_and_hess_sparsity_sct,
compute_jac_and_hess_sparsity_and_value_jump,
optimization_problem_names
function compare_patterns(
truth::AbstractMatrix{<:Real}; sct::AbstractMatrix{Bool}, jump::AbstractMatrix{Bool}
)
difference = jump - sct
if nnz(difference) > 0
# test that all pattern differences are local zeros in the ground truth
I, J, _ = findnz(difference)
coeffs = [truth[i, j] for (i, j) in zip(I, J)]
@test maximum(abs, coeffs) < 1e-7
end
nnz_sct, nnz_jump = nnz(sct), nnz(jump)
diagonal = (difference == Diagonal(difference)) ? "[diagonal difference only]" : ""
message = if all(>(0), nonzeros(difference))
"SCT ($nnz_sct nz) ⊂ JuMP ($nnz_jump nz) $diagonal"
elseif all(<(0), nonzeros(difference))
"SCT ($nnz_sct nz) ⊃ JuMP ($nnz_jump nz) $diagonal"
else
"SCT ($nnz_sct nz) ≠ JuMP ($nnz_jump nz) $diagonal"
end
return message
end
#=
Please look at the warnings displayed at the end.
=#
jac_inconsistencies = []
hess_inconsistencies = []
@testset "$name" for name in optimization_problem_names()
@info "$(now()) - $name"
(jac_sparsity_sct, hess_sparsity_sct) = compute_jac_and_hess_sparsity_sct(name)
((jac_sparsity_jump, jac), (hess_sparsity_jump, hess)) = compute_jac_and_hess_sparsity_and_value_jump(
name
)
@testset verbose = true "Jacobian comparison" begin
if jac_sparsity_sct == jac_sparsity_jump
@test jac_sparsity_sct == jac_sparsity_jump
else
@test_broken jac_sparsity_sct == jac_sparsity_jump
message = compare_patterns(jac; sct=jac_sparsity_sct, jump=jac_sparsity_jump)
@warn "Inconsistency for Jacobian of $name: $message"
push!(jac_inconsistencies, (name, message))
end
end
@testset verbose = true "Hessian comparison" begin
if hess_sparsity_sct == hess_sparsity_jump
@test hess_sparsity_sct == hess_sparsity_jump
else
@test_broken hess_sparsity_sct == hess_sparsity_jump
message = compare_patterns(hess; sct=hess_sparsity_sct, jump=hess_sparsity_jump)
@warn "Inconsistency for Hessian of $name: $message"
push!(hess_inconsistencies, (name, message))
end
end
yield()
end;
if !isempty(jac_inconsistencies) || !isempty(hess_inconsistencies)
@warn "Inconsistencies were detected"
for (name, message) in jac_inconsistencies
@warn "Inconsistency for Jacobian of $name: $message"
end
for (name, message) in hess_inconsistencies
@warn "Inconsistency for Hessian of $name: $message"
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 3772 | using Pkg
Pkg.develop(;
path=joinpath(@__DIR__, "..", "benchmark", "SparseConnectivityTracerBenchmarks")
)
using SparseConnectivityTracer
using Compat: pkgversion
using Documenter: Documenter, DocMeta
using Test
DocMeta.setdocmeta!(
SparseConnectivityTracer,
:DocTestSetup,
:(using SparseConnectivityTracer);
recursive=true,
)
GROUP = get(ENV, "JULIA_SCT_TEST_GROUP", "Core")
@testset verbose = true "SparseConnectivityTracer.jl" begin
if GROUP in ("Core", "All")
if VERSION >= v"1.10"
@testset verbose = true "Linting" begin
@info "Testing linting..."
include("linting.jl")
end
end
end
if GROUP in ("Core", "All")
@testset "Doctests" begin
Documenter.doctest(SparseConnectivityTracer)
end
end
if GROUP in ("Core", "All")
@testset verbose = true "Set types" begin
@testset "Correctness" begin
include("settypes/correctness.jl")
end
@testset "SortedVector" begin
include("settypes/sortedvector.jl")
end
end
end
if GROUP in ("Core", "All")
@info "Testing operator classification..."
@testset "Operator classification" begin
include("classification.jl")
end
end
if GROUP in ("Core", "All")
@info "Testing simple examples..."
@testset verbose = true "Simple examples" begin
@testset "Tracer Construction" begin
include("test_constructors.jl")
end
@testset "GradientTracer" begin
include("test_gradient.jl")
end
@testset "HessianTracer" begin
include("test_hessian.jl")
end
@testset "Array overloads" begin
include("test_arrays.jl")
end
@testset "ComponentArrays" begin
include("componentarrays.jl")
end
end
end
if GROUP in ("Core", "All")
@info "Testing package extensions..."
@testset verbose = true "Package extensions" begin
for ext in (:LogExpFunctions, :NaNMath, :NNlib, :SpecialFunctions)
@testset "$ext" begin
@info "...$ext"
include("ext/test_$ext.jl")
end
end
# Some extensions are only loaded in newer Julia releases
if VERSION >= v"1.10"
for ext in (:DataInterpolations,)
@testset "$ext" begin
@info "...$ext"
include("ext/test_$ext.jl")
end
end
end
end
end
if GROUP in ("Core", "All")
@info "Testing real-world examples..."
@testset verbose = true "Real-world examples" begin
@testset "Brusselator" begin
include("brusselator.jl")
end
if pkgversion(NNlib) >= v"0.9.18" # contains NNlib PR #592
@testset "Flux.jl" begin
include("flux.jl")
end
end
end
end
if GROUP in ("Core", "All")
@info "Testing ADTypes integration..."
@testset "ADTypes integration" begin
include("adtypes.jl")
end
end
if GROUP in ("Benchmarks", "All")
@info "Testing benchmarks correctness..."
@testset "Benchmarks correctness" begin
include("benchmarks_correctness.jl")
end
end
if GROUP in ("NLPModels", "All")
@info "Testing NLPModels..."
@testset "NLPModels" begin
include("nlpmodels.jl")
end
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 11314 | import SparseConnectivityTracer as SCT
using SparseConnectivityTracer
using SparseConnectivityTracer: GradientTracer, IndexSetGradientPattern
using Test
using LinearAlgebra: Symmetric, Diagonal, diagind
using LinearAlgebra: det, logdet, logabsdet, norm, opnorm
using LinearAlgebra: eigen, eigmax, eigmin
using LinearAlgebra: inv, pinv
using SparseArrays: sparse, spdiagm
#=========================#
# Weird function wrappers #
#=========================#
# These print better stack traces than lambda functions.
struct SparsifyInput{F}
f::F
end
(s::SparsifyInput)(x) = s.f(sparse(x))
struct SpdiagmifyInput{F}
f::F
end
(s::SpdiagmifyInput)(x) = s.f(spdiagm(x))
struct SumOutputs{F}
f::F
end
(s::SumOutputs)(x) = sum(s.f(x))
norm1(A) = norm(A, 1)
norm2(A) = norm(A, 2)
norminf(A) = norm(A, Inf)
opnorm1(A) = opnorm(A, 1)
opnorm2(A) = opnorm(A, 2)
opnorminf(A) = opnorm(A, Inf)
logabsdet_first(A) = first(logabsdet(A))
logabsdet_last(A) = last(logabsdet(A))
pow0(A) = A^0
pow3(A) = A^3
#===================#
# Testing utilities #
#===================#
detector = TracerSparsityDetector()
allone(A) = all(isone, A)
allzero(A) = all(iszero, A)
# Short-hand for Jacobian pattern of `x -> sum(f(A))`
Jsum(f, A) = jacobian_sparsity(SumOutputs(f), A, detector)
# Test whether all entries in Jacobian are zero
testJ0(f, A) = @testset "Jacobian" begin
@test allzero(Jsum(f, A))
end
# Test whether all entries in Jacobian are one where inputs were non-zero.
testJ1(f, A) = @testset "Jacobian" begin
@test allone(Jsum(f, A))
end
function testJ1(f, A::Diagonal)
@testset "Jacobian" begin
jac = Jsum(f, A)
di = diagind(A)
for (i, x) in enumerate(jac)
if i in di
@test isone(x)
else
@test iszero(x)
end
end
end
end
# Short-hand for Hessian pattern of `x -> sum(f(A))`
Hsum(f, A) = hessian_sparsity(SumOutputs(f), A, detector)
# Test whether all entries in Hessian are zero
testH0(f, A) = @testset "Hessian" begin
@test allzero(Hsum(f, A))
end
# Test whether all entries in Hessian are one where inputs were non-zero.
testH1(f, A) = @testset "Hessian" begin
@test allone(Hsum(f, A))
end
function testH1(f, A::Diagonal)
@testset "Hessian" begin
hess = Hsum(f, A)
di = diagind(A)
for I in CartesianIndices(A)
i, j = Tuple(I)
x = hess[I]
if i in di && j in di
@test isone(x)
else
@test iszero(x)
end
end
end
end
#===================#
# Arrays to test on #
#===================#
mat33 = rand(3, 3)
mat34 = rand(3, 4)
sym33 = Symmetric(rand(3, 3))
dia33 = Diagonal(rand(3))
ALL_MATRICES = (mat33, mat34, sym33, dia33)
SQUARE_MATRICES = (mat33, sym33, dia33)
NONDIAG_MATRICES = (mat33, mat34, sym33)
NONDIAG_SQUARE_MATRICES = (mat33, sym33)
DIAG_MATRICES = (dia33,)
DIAG_SQUARE_MATRICES = (dia33,)
arrayname(A) = "$(typeof(A)) $(size(A))"
#=================#
# TEST START HERE #
#=================#
@testset "Scalar functions" begin
@testset "det $(arrayname(A))" for A in NONDIAG_MATRICES
testJ1(det, A)
testH1(det, A)
end
@testset "det $(arrayname(A))" for A in DIAG_MATRICES
@test Jsum(det, A) == [1 0 0 0 1 0 0 0 1;]
@test Hsum(det, A) == [
0 0 0 0 1 0 0 0 1
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 0 0 1
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
1 0 0 0 1 0 0 0 0
]
end
@testset "logdet $(arrayname(A))" for A in ALL_MATRICES
testJ1(logdet, A)
testH1(logdet, A)
end
@testset "norm(A, 1) $(arrayname(A))" for A in ALL_MATRICES
testJ1(norm1, A)
testH0(norm1, A)
end
@testset "norm(A, 2) $(arrayname(A))" for A in ALL_MATRICES
testJ1(norm2, A)
testH1(norm2, A)
end
@testset "norm(A, Inf) $(arrayname(A))" for A in ALL_MATRICES
testJ1(norminf, A)
testH0(norminf, A)
end
@testset "eigmax $(arrayname(A))" for A in ALL_MATRICES
testJ1(eigmax, A)
testH1(eigmax, A)
end
@testset "eigmin $(arrayname(A))" for A in ALL_MATRICES
testJ1(eigmin, A)
testH1(eigmin, A)
end
@testset "opnorm(A, 1) $(arrayname(A))" for A in ALL_MATRICES
testJ1(opnorm1, A)
testH0(opnorm1, A)
end
@testset "opnorm(A, 2) $(arrayname(A))" for A in ALL_MATRICES
testJ1(opnorm2, A)
testH1(opnorm2, A)
end
@testset "opnorm(A, Inf) $(arrayname(A))" for A in ALL_MATRICES
testJ1(opnorminf, A)
testH0(opnorminf, A)
end
@testset "first(logabsdet(A)) $(arrayname(A))" for A in ALL_MATRICES
testJ1(logabsdet_first, A)
testH1(logabsdet_first, A)
end
@testset "last(logabsdet(A)) $(arrayname(A))" for A in ALL_MATRICES
testJ0(logabsdet_last, A)
testH0(logabsdet_last, A)
end
if VERSION >= v"1.9"
@testset "`SparseMatrixCSC` (3×3)" begin
A = rand(3, 3)
v = rand(3)
# TODO: this is a temporary solution until sparse matrix inputs are supported (#28)
@testset "det" begin
testJ1(SparsifyInput(det), A)
testH1(SparsifyInput(det), A)
testJ1(SpdiagmifyInput(det), v)
testH1(SpdiagmifyInput(det), v)
end
@testset "logdet" begin
testJ1(SparsifyInput(logdet), A)
testH1(SparsifyInput(logdet), A)
testJ1(SpdiagmifyInput(logdet), v)
testH1(SpdiagmifyInput(logdet), v)
end
@testset "norm" begin
testJ1(SparsifyInput(norm), A)
testH1(SparsifyInput(norm), A)
testJ1(SpdiagmifyInput(norm), v)
testH1(SpdiagmifyInput(norm), v)
end
@testset "eigmax" begin
testJ1(SparsifyInput(eigmax), A)
testH1(SparsifyInput(eigmax), A)
testJ1(SpdiagmifyInput(eigmax), v)
testH1(SpdiagmifyInput(eigmax), v)
end
@testset "eigmin" begin
testJ1(SparsifyInput(eigmin), A)
testH1(SparsifyInput(eigmin), A)
testJ1(SpdiagmifyInput(eigmin), v)
testH1(SpdiagmifyInput(eigmin), v)
end
@testset "opnorm(x, 1)" begin
testJ1(SparsifyInput(opnorm1), A)
testH0(SparsifyInput(opnorm1), A)
testJ1(SpdiagmifyInput(opnorm1), v)
testH0(SpdiagmifyInput(opnorm1), v)
end
@testset "first(logabsdet(x))" begin
testJ1(SparsifyInput(logabsdet_first), A)
testH1(SparsifyInput(logabsdet_first), A)
testJ1(SpdiagmifyInput(logabsdet_first), v)
testH1(SpdiagmifyInput(logabsdet_first), v)
end
@testset "last(logabsdet(x))" begin
testJ0(SparsifyInput(logabsdet_last), A)
testH0(SparsifyInput(logabsdet_last), A)
testJ0(SpdiagmifyInput(logabsdet_last), v)
testH0(SpdiagmifyInput(logabsdet_last), v)
end
end
end
end
@testset "Matrix-valued functions" begin
# Functions that only work on square matrices
@testset "inv $(arrayname(A))" for A in NONDIAG_SQUARE_MATRICES
testJ1(inv, A)
testH1(inv, A)
end
@testset "inv $(arrayname(A))" for A in DIAG_SQUARE_MATRICES
@test Hsum(inv, A) == [
1 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 1 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 1
]
end
@testset "exp $(arrayname(A))" for A in NONDIAG_SQUARE_MATRICES
testJ1(exp, A)
testH1(exp, A)
end
@testset "exp $(arrayname(A))" for A in DIAG_SQUARE_MATRICES
testJ1(exp, A)
@test Hsum(exp, A) == [
1 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 1 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 1
]
end
@testset "pow0 $(arrayname(A))" for A in SQUARE_MATRICES
testJ0(pow0, A)
testH0(pow0, A)
end
@testset "pow3 $(arrayname(A))" for A in SQUARE_MATRICES
testJ1(pow3, A)
testH1(pow3, A)
end
if VERSION >= v"1.9"
A = rand(3, 3)
v = rand(3)
@testset "`SparseMatrixCSC` (3×3)" begin
# TODO: this is a temporary solution until sparse matrix inputs are supported (#28)
testJ1(SparsifyInput(exp), A)
testH1(SparsifyInput(exp), A)
testJ0(SparsifyInput(pow0), A)
testH0(SparsifyInput(pow0), A)
testJ1(SparsifyInput(pow3), A)
testH1(SparsifyInput(pow3), A)
testJ1(SpdiagmifyInput(exp), v)
testH1(SpdiagmifyInput(exp), v)
if VERSION >= v"1.10"
# issue with custom _mapreducezeros in SparseArrays on Julia 1.6
testJ0(SpdiagmifyInput(pow0), v)
testH0(SpdiagmifyInput(pow0), v)
testJ1(SpdiagmifyInput(pow3), v)
testH1(SpdiagmifyInput(pow3), v)
end
end
end
# Functions that work on all matrices
@testset "pinv $(arrayname(A))" for A in ALL_MATRICES
testJ1(pinv, A)
testH1(pinv, A)
end
if VERSION >= v"1.9"
@testset "`SparseMatrixCSC` (3×4)" begin
testJ1(SparsifyInput(pinv), rand(3, 4))
testH1(SparsifyInput(pinv), rand(3, 4))
end
end
end
S = BitSet
P = IndexSetGradientPattern{Int,S}
TG = GradientTracer{P}
@testset "Matrix division" begin
t1 = TG(P(S([1, 3, 4])))
t2 = TG(P(S([2, 4])))
t3 = TG(P(S([8, 9])))
t4 = TG(P(S([8, 9])))
A = [t1 t2; t3 t4]
s_out = S([1, 2, 3, 4, 8, 9])
x = rand(2)
b = A \ x
@test all(t -> SCT.gradient(t) == s_out, b)
end
@testset "Eigenvalues" begin
t1 = TG(P(S([1, 3, 4])))
t2 = TG(P(S([2, 4])))
t3 = TG(P(S([8, 9])))
t4 = TG(P(S([8, 9])))
A = [t1 t2; t3 t4]
s_out = S([1, 2, 3, 4, 8, 9])
values, vectors = eigen(A)
@test size(values) == (2,)
@test size(vectors) == (2, 2)
@test all(t -> SCT.gradient(t) == s_out, values)
@test all(t -> SCT.gradient(t) == s_out, vectors)
end
if VERSION >= v"1.9"
@testset "SparseMatrixCSC construction" begin
t1 = TG(P(S(1)))
t2 = TG(P(S(2)))
t3 = TG(P(S(3)))
SA = sparse([t1 t2; t3 0])
@test length(SA.nzval) == 3
res = opnorm(SA, 1)
@test SCT.gradient(res) == S([1, 2, 3])
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 6609 | # Test construction and conversions of internal tracer types
using SparseConnectivityTracer: AbstractTracer, GradientTracer, HessianTracer, Dual
using SparseConnectivityTracer: primal, tracer, isemptytracer, myempty, name
using SparseConnectivityTracer: IndexSetGradientPattern
using Test
# Load definitions of GRADIENT_TRACERS, GRADIENT_PATTERNS, HESSIAN_TRACERS and HESSIAN_PATTERNS
include("tracers_definitions.jl")
function test_nested_duals(::Type{T}) where {T<:AbstractTracer}
# Putting Duals into Duals is prohibited
t = myempty(T)
D1 = Dual(1.0, t)
@test_throws ErrorException D2 = Dual(D1, t)
end
function test_constant_functions(::Type{T}) where {T<:AbstractTracer}
@testset "$f" for f in (
zero, one, oneunit, typemin, typemax, eps, floatmin, floatmax, maxintfloat
)
t = f(T)
@test isa(t, T)
@test isemptytracer(t)
end
end
function test_constant_functions(::Type{D}) where {P,T,D<:Dual{P,T}}
@testset "$f" for f in (
zero, one, oneunit, typemin, typemax, eps, floatmin, floatmax, maxintfloat
)
out = f(D)
@test out isa P
@test out == f(P)
end
end
function test_type_conversion_functions(::Type{T}) where {T}
@testset "$f" for f in (big, widen, float)
test_type_conversion_functions(T, f)
end
end
function test_type_conversion_functions(::Type{T}, f::Function) where {T<:AbstractTracer}
@test f(T) == T
end
function test_type_conversion_functions(::Type{D}, f::Function) where {P,T,D<:Dual{P,T}}
@testset "Primal type $P_IN" for P_IN in (Int, Float32, Irrational)
P_OUT = f(P_IN)
@test f(Dual{P_IN,T}) == P_OUT # NOTE: this tests Dual{P_IN,T}, not Dual{P,T}
end
end
function test_type_casting(::Type{T}) where {T<:AbstractTracer}
t_in = myempty(T)
@testset "$T to $T" begin
t_out = T(t_in)
@test t_out isa T
@test isemptytracer(t_out)
end
@testset "$N to $T" for N in (Int, Float32, Irrational)
t_out = T(one(N))
@test t_out isa T
@test isemptytracer(t_out)
end
end
function test_type_casting(::Type{D}) where {P,T,D<:Dual{P,T}}
d_in = Dual(one(P), myempty(T))
@testset "$(name(D)) to $(name(D))" begin
d_out = D(d_in)
@test primal(d_out) == primal(d_in)
@test tracer(d_out) isa T
@test isemptytracer(d_out)
end
@testset "$P2 to $(name(D))" for P2 in (Int, Float32, Irrational)
p_in = one(P2)
d_out = D(p_in)
@test primal(d_out) == P(p_in)
@test tracer(d_out) isa T
@test isemptytracer(d_out)
end
end
function test_similar(::Type{T}) where {T<:AbstractTracer}
A = rand(Int, 2, 3)
# 2-arg from matrix of Reals
B = similar(A, T)
@test eltype(B) == T
@test size(B) == (2, 3)
# 1-arg from matrix of tracers
B1 = similar(B)
@test eltype(B1) == T
@test size(B1) == (2, 3)
# 2-arg from matrix of tracers
B2 = similar(B, T)
@test eltype(B2) == T
@test size(B2) == (2, 3)
# 2-arg from matrix of tracers, custom size
B3 = similar(B, 4, 5)
@test eltype(B3) == T
@test size(B3) == (4, 5)
# 3-arg from matrix of Reals
B4 = similar(A, T, 4, 5)
@test eltype(B4) == T
@test size(B4) == (4, 5)
# 3-arg from matrix of tracers
B5 = similar(B, T, 5, 6)
@test eltype(B5) == T
@test size(B5) == (5, 6)
end
function test_similar(::Type{D}) where {P,T,D<:Dual{P,T}}
# Test `similar`
P2 = Float16 # using something different than P
@test P2 != P # this is important for following tests
A = rand(P2, 2, 3)
# 2-arg from matrix of Reals P2
B = similar(A, D)
@test eltype(B) == D
@test size(B) == (2, 3)
# 1-arg from matrix of tracers
B1 = similar(B)
@test eltype(B1) == D
@test size(B1) == (2, 3)
# 2-arg from matrix of tracers
B2 = similar(B, D)
@test eltype(B2) == D
@test size(B2) == (2, 3)
# 2-arg from matrix of tracers, custom size
B3 = similar(B, 4, 5)
@test eltype(B3) == D
@test size(B3) == (4, 5)
# 3-arg from matrix of Reals
B4 = similar(A, D, 4, 5)
@test eltype(B4) == D
@test size(B4) == (4, 5)
# 3-arg from matrix of tracers
B5 = similar(B, D, 5, 6)
@test eltype(B5) == D
@test size(B5) == (5, 6)
end
@testset "GradientTracer" begin
P = Float32
DUAL_GRADIENT_TRACERS = [Dual{P,T} for T in GRADIENT_TRACERS]
ALL_GRADIENT_TRACERS = (GRADIENT_TRACERS..., DUAL_GRADIENT_TRACERS...)
@testset "Nested Duals on HessianTracer" for T in GRADIENT_TRACERS
test_nested_duals(T)
end
@testset "Constant functions on $T" for T in ALL_GRADIENT_TRACERS
test_constant_functions(T)
end
@testset "Type conversions on $T" for T in ALL_GRADIENT_TRACERS
test_type_conversion_functions(T)
end
@testset "Type casting on $T" for T in ALL_GRADIENT_TRACERS
test_type_casting(T)
end
@testset "similar on $T" for T in ALL_GRADIENT_TRACERS
test_similar(T)
end
end
@testset "HessianTracer" begin
P = Float32
DUAL_HESSIAN_TRACERS = [Dual{P,T} for T in HESSIAN_TRACERS]
ALL_HESSIAN_TRACERS = (HESSIAN_TRACERS..., DUAL_HESSIAN_TRACERS...)
@testset "Nested Duals on HessianTracer" for T in HESSIAN_TRACERS
test_nested_duals(T)
end
@testset "Constant functions on $T" for T in ALL_HESSIAN_TRACERS
test_constant_functions(T)
end
@testset "Type conversions on $T" for T in ALL_HESSIAN_TRACERS
test_type_conversion_functions(T)
end
@testset "Type casting on $T" for T in ALL_HESSIAN_TRACERS
test_type_casting(T)
end
@testset "similar on $T" for T in ALL_HESSIAN_TRACERS
test_similar(T)
end
end
@testset "Explicit type conversions on Dual" begin
@testset "$T" for T in union(GRADIENT_TRACERS, HESSIAN_TRACERS)
P = IndexSetGradientPattern{Int,BitSet}
T = GradientTracer{P}
p = P(BitSet(2))
t_full = T(p)
t_empty = myempty(T)
d_full = Dual(1.0, t_full)
d_empty = Dual(1.0, t_empty)
@testset "Non-empty tracer" begin
@testset "$TOUT" for TOUT in (Int, Integer, Float64, Float32)
@test_throws InexactError TOUT(d_full)
end
end
@testset "Empty tracer" begin
@testset "$TOUT" for TOUT in (Int, Integer, Float64, Float32)
out = TOUT(d_empty)
@test out isa TOUT # not a Dual!
@test isone(out)
end
end
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 9749 | using SparseConnectivityTracer
using SparseConnectivityTracer: GradientTracer, Dual, MissingPrimalError
using Test
using Compat: Returns
using Random: rand, GLOBAL_RNG
using LinearAlgebra: det, dot, logdet
# Load definitions of GRADIENT_TRACERS, GRADIENT_PATTERNS, HESSIAN_TRACERS and HESSIAN_PATTERNS
include("tracers_definitions.jl")
REAL_TYPES = (Float64, Int, Bool, UInt8, Float16, Rational{Int})
# These exists to be able to quickly run tests in the REPL.
# NOTE: J gets overwritten inside the testsets.
detector = TracerSparsityDetector()
J(f, x) = jacobian_sparsity(f, x, detector)
@testset "Jacobian Global" begin
@testset "$P" for P in GRADIENT_PATTERNS
T = GradientTracer{P}
detector = TracerSparsityDetector(; gradient_tracer_type=T)
J(f, x) = jacobian_sparsity(f, x, detector)
@testset "Trivial examples" begin
f(x) = [x[1]^2, 2 * x[1] * x[2]^2, sin(x[3])]
@test J(f, rand(3)) == [1 0 0; 1 1 0; 0 0 1]
@test J(identity, rand()) ≈ [1;;]
@test J(Returns(1), 1) ≈ [0;;]
end
# Test GradientTracer on functions with zero derivatives
@testset "Zero derivatives" begin
x = rand(2)
g(x) = [x[1] * x[2], ceil(x[1] * x[2]), x[1] * round(x[2])]
@test J(g, x) == [1 1; 0 0; 1 0]
@test J(!, true) ≈ [0;;]
end
# Code coverage
@testset "Miscellaneous" begin
@test J(x -> [sincos(x)...], 1) ≈ [1; 1]
@test J(typemax, 1) ≈ [0;;]
@test J(x -> x^(2//3), 1) ≈ [1;;]
@test J(x -> (2//3)^x, 1) ≈ [1;;]
@test J(x -> x^ℯ, 1) ≈ [1;;]
@test J(x -> ℯ^x, 1) ≈ [1;;]
@test J(x -> 0, 1) ≈ [0;;]
# Test special cases on empty tracer
@test J(x -> zero(x)^(2//3), 1) ≈ [0;;]
@test J(x -> (2//3)^zero(x), 1) ≈ [0;;]
@test J(x -> zero(x)^ℯ, 1) ≈ [0;;]
@test J(x -> ℯ^zero(x), 1) ≈ [0;;]
end
# Conversions
@testset "Conversion" begin
@testset "to $T" for T in REAL_TYPES
@test J(x -> convert(T, x), 1.0) ≈ [1;;]
end
end
@testset "Round" begin
@test J(round, 1.1) ≈ [0;;]
@test J(x -> round(Int, x), 1.1) ≈ [0;;]
@test J(x -> round(Bool, x), 1.1) ≈ [0;;]
@test J(x -> round(Float16, x), 1.1) ≈ [0;;]
@test J(x -> round(x, RoundNearestTiesAway), 1.1) ≈ [0;;]
@test J(x -> round(x; digits=3, base=2), 1.1) ≈ [0;;]
end
@testset "Random" begin
@test J(x -> rand(typeof(x)), 1) ≈ [0;;]
@test J(x -> rand(GLOBAL_RNG, typeof(x)), 1) ≈ [0;;]
end
@testset "LinearAlgebra" begin
@test J(x -> dot(x[1:2], x[4:5]), rand(5)) == [1 1 0 1 1]
end
@testset "MissingPrimalError" begin
@testset "$f" for f in (
iseven,
isfinite,
isinf,
isinteger,
ismissing,
isnan,
isnothing,
isodd,
isone,
isreal,
iszero,
)
@test_throws MissingPrimalError J(f, rand())
end
end
@testset "ifelse and comparisons" begin
if VERSION >= v"1.8"
@test J(x -> ifelse(x[2] < x[3], x[1] + x[2], x[3] * x[4]), [1 2 3 4]) ==
[1 1 1 1]
@test J(x -> ifelse(x[2] < x[3], x[1] + x[2], 1.0), [1 2 3 4]) == [1 1 0 0]
@test J(x -> ifelse(x[2] < x[3], 1.0, x[3] * x[4]), [1 2 3 4]) == [0 0 1 1]
end
function f_ampgo07(x)
return (x[1] <= 0) * convert(eltype(x), Inf) +
sin(x[1]) +
sin(10//3 * x[1]) +
log(abs(x[1])) - 84//100 * x[1] + 3
end
@test J(f_ampgo07, [1.0]) ≈ [1;;]
# Error handling when applying non-dual tracers to "local" functions with control flow
# TypeError: non-boolean (SparseConnectivityTracer.GradientTracer{BitSet}) used in boolean context
@test_throws TypeError J(
x -> x[1] > x[2] ? x[3] : x[4], [1.0, 2.0, 3.0, 4.0]
) == [0 0 1 1;]
end
yield()
end
end
@testset "Jacobian Local" begin
@testset "$P" for P in GRADIENT_PATTERNS
T = GradientTracer{P}
detector = TracerLocalSparsityDetector(; gradient_tracer_type=T)
J(f, x) = jacobian_sparsity(f, x, detector)
@testset "Trivial examples" begin
# Multiplication
@test J(x -> x[1] * x[2], [1.0, 1.0]) == [1 1;]
@test J(x -> x[1] * x[2], [1.0, 0.0]) == [0 1;]
@test J(x -> x[1] * x[2], [0.0, 1.0]) == [1 0;]
@test J(x -> x[1] * x[2], [0.0, 0.0]) == [0 0;]
# Division
@test J(x -> x[1] / x[2], [1.0, 1.0]) == [1 1;]
@test J(x -> x[1] / x[2], [0.0, 0.0]) == [1 0;]
# Maximum
@test J(x -> max(x[1], x[2]), [1.0, 2.0]) == [0 1;]
@test J(x -> max(x[1], x[2]), [2.0, 1.0]) == [1 0;]
@test J(x -> max(x[1], x[2]), [1.0, 1.0]) == [1 1;]
# Minimum
@test J(x -> min(x[1], x[2]), [1.0, 2.0]) == [1 0;]
@test J(x -> min(x[1], x[2]), [2.0, 1.0]) == [0 1;]
@test J(x -> min(x[1], x[2]), [1.0, 1.0]) == [1 1;]
end
# Comparisons
@testset "Comparisons" begin
@test J(x -> x[1] > x[2] ? x[3] : x[4], [1.0, 2.0, 3.0, 4.0]) == [0 0 0 1;]
@test J(x -> x[1] > x[2] ? x[3] : x[4], [2.0, 1.0, 3.0, 4.0]) == [0 0 1 0;]
@test J(x -> x[1] < x[2] ? x[3] : x[4], [1.0, 2.0, 3.0, 4.0]) == [0 0 1 0;]
@test J(x -> x[1] < x[2] ? x[3] : x[4], [2.0, 1.0, 3.0, 4.0]) == [0 0 0 1;]
@test J(x -> x[1] >= x[2] ? x[1] : x[2], [1.0, 2.0]) == [0 1;]
@test J(x -> x[1] >= x[2] ? x[1] : x[2], [2.0, 1.0]) == [1 0;]
@test J(x -> x[1] >= x[2] ? x[1] : x[2], [1.0, 1.0]) == [1 0;]
@test J(x -> x[1] >= x[2] ? x[1] : x[2], [1.0, 2.0]) == [0 1;]
@test J(x -> x[1] >= x[2] ? x[1] : x[2], [2.0, 1.0]) == [1 0;]
@test J(x -> x[1] >= x[2] ? x[1] : x[2], [1.0, 1.0]) == [1 0;]
@test J(x -> x[1] <= x[2] ? x[1] : x[2], [1.0, 2.0]) == [1 0;]
@test J(x -> x[1] <= x[2] ? x[1] : x[2], [2.0, 1.0]) == [0 1;]
@test J(x -> x[1] <= x[2] ? x[1] : x[2], [1.0, 1.0]) == [1 0;]
@test J(x -> x[1] == x[2] ? x[1] : x[2], [1.0, 2.0]) == [0 1;]
@test J(x -> x[1] == x[2] ? x[1] : x[2], [2.0, 1.0]) == [0 1;]
@test J(x -> x[1] == x[2] ? x[1] : x[2], [1.0, 1.0]) == [1 0;]
@testset "Comparison with $T" for T in REAL_TYPES
_1 = oneunit(T)
@test J(x -> x[1] > _1 ? x[1] : x[2], [0.0, 2.0]) == [0 1;]
@test J(x -> x[1] > _1 ? x[1] : x[2], [2.0, 0.0]) == [1 0;]
@test J(x -> x[1] >= _1 ? x[1] : x[2], [0.0, 2.0]) == [0 1;]
@test J(x -> x[1] >= _1 ? x[1] : x[2], [2.0, 0.0]) == [1 0;]
@test J(x -> x[1] < _1 ? x[1] : x[2], [0.0, 2.0]) == [1 0;]
@test J(x -> x[1] < _1 ? x[1] : x[2], [2.0, 0.0]) == [0 1;]
@test J(x -> isless(x[1], _1) ? x[1] : x[2], [0.0, 2.0]) == [1 0;]
@test J(x -> isless(x[1], _1) ? x[1] : x[2], [2.0, 0.0]) == [0 1;]
@test J(x -> x[1] <= _1 ? x[1] : x[2], [0.0, 2.0]) == [1 0;]
@test J(x -> x[1] <= _1 ? x[1] : x[2], [2.0, 0.0]) == [0 1;]
@test J(x -> _1 > x[2] ? x[1] : x[2], [0.0, 2.0]) == [0 1;]
@test J(x -> _1 > x[2] ? x[1] : x[2], [2.0, 0.0]) == [1 0;]
@test J(x -> _1 >= x[2] ? x[1] : x[2], [0.0, 2.0]) == [0 1;]
@test J(x -> _1 >= x[2] ? x[1] : x[2], [2.0, 0.0]) == [1 0;]
@test J(x -> _1 < x[2] ? x[1] : x[2], [0.0, 2.0]) == [1 0;]
@test J(x -> _1 < x[2] ? x[1] : x[2], [2.0, 0.0]) == [0 1;]
@test J(x -> _1 <= x[2] ? x[1] : x[2], [0.0, 2.0]) == [1 0;]
@test J(x -> _1 <= x[2] ? x[1] : x[2], [2.0, 0.0]) == [0 1;]
end
end
# Code coverage
@testset "Miscellaneous" begin
@test J(x -> [sincos(x)...], 1) ≈ [1; 1]
@test J(typemax, 1) ≈ [0;;]
@test J(x -> x^(2//3), 1) ≈ [1;;]
@test J(x -> (2//3)^x, 1) ≈ [1;;]
@test J(x -> x^ℯ, 1) ≈ [1;;]
@test J(x -> ℯ^x, 1) ≈ [1;;]
@test J(x -> 0, 1) ≈ [0;;]
end
# Conversions
@testset "Conversion" begin
@testset "Conversion to $T" for T in REAL_TYPES
@test J(x -> convert(T, x), 1.0) ≈ [1;;]
end
end
@testset "Round" begin
@test J(round, 1.1) ≈ [0;;]
@test J(x -> round(Int, x), 1.1) ≈ [0;;]
@test J(x -> round(Bool, x), 1.1) ≈ [0;;]
@test J(x -> round(x, RoundNearestTiesAway), 1.1) ≈ [0;;]
@test J(x -> round(x; digits=3, base=2), 1.1) ≈ [0;;]
end
@testset "Random" begin
@test J(x -> rand(typeof(x)), 1) ≈ [0;;]
@test J(x -> rand(GLOBAL_RNG, typeof(x)), 1) ≈ [0;;]
end
@testset "LinearAlgebra." begin
@test J(logdet, [1.0 -1.0; 2.0 2.0]) == [1 1 1 1] # (#68)
@test J(x -> log(det(x)), [1.0 -1.0; 2.0 2.0]) == [1 1 1 1]
@test J(x -> dot(x[1:2], x[4:5]), [0, 1, 0, 1, 0]) == [1 0 0 0 1]
end
yield()
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 12379 | using SparseConnectivityTracer
using SparseConnectivityTracer: Dual, HessianTracer, MissingPrimalError
using SparseConnectivityTracer: create_tracers, pattern, shared
using Test
using Random: rand, GLOBAL_RNG
# Load definitions of GRADIENT_TRACERS, GRADIENT_PATTERNS, HESSIAN_TRACERS and HESSIAN_PATTERNS
include("tracers_definitions.jl")
REAL_TYPES = (Float64, Int, Bool, UInt8, Float16, Rational{Int})
# These exists to be able to quickly run tests in the REPL.
# NOTE: H gets overwritten inside the testsets.
detector = TracerSparsityDetector()
H(f, x) = hessian_sparsity(f, x, detector)
P = first(HESSIAN_PATTERNS)
T = HessianTracer{P}
D = Dual{Int,T}
@testset "Global Hessian" begin
@testset "$P" for P in HESSIAN_PATTERNS
T = HessianTracer{P}
detector = TracerSparsityDetector(; hessian_tracer_type=T)
H(f, x) = hessian_sparsity(f, x, detector)
@testset "Trivial examples" begin
@test H(identity, rand()) ≈ [0;;]
@test H(sqrt, rand()) ≈ [1;;]
@test H(x -> 1 * x, rand()) ≈ [0;;]
@test H(x -> x * 1, rand()) ≈ [0;;]
end
# Code coverage
@testset "Miscellaneous" begin
@test H(sign, 1) ≈ [0;;]
@test H(typemax, 1) ≈ [0;;]
@test H(x -> x^(2//3), 1) ≈ [1;;]
@test H(x -> (2//3)^x, 1) ≈ [1;;]
@test H(x -> x^ℯ, 1) ≈ [1;;]
@test H(x -> ℯ^x, 1) ≈ [1;;]
@test H(x -> 0, 1) ≈ [0;;]
@test H(x -> sum(sincosd(x)), 1.0) ≈ [1;;]
@test H(x -> sum(diff(x) .^ 3), rand(4)) == [
1 1 0 0
1 1 1 0
0 1 1 1
0 0 1 1
]
end
# Conversions
@testset "Conversion" begin
@testset "to $T" for T in REAL_TYPES
@test H(x -> convert(T, x), 1.0) ≈ [0;;]
@test H(x -> convert(T, x^2), 1.0) ≈ [1;;]
@test H(x -> convert(T, x)^2, 1.0) ≈ [1;;]
end
end
@testset "Round" begin
@test H(round, 1.1) ≈ [0;;]
@test H(x -> round(Int, x), 1.1) ≈ [0;;]
@test H(x -> round(Bool, x), 1.1) ≈ [0;;]
@test H(x -> round(Float16, x), 1.1) ≈ [0;;]
@test H(x -> round(x, RoundNearestTiesAway), 1.1) ≈ [0;;]
@test H(x -> round(x; digits=3, base=2), 1.1) ≈ [0;;]
end
@testset "Random" begin
@test H(x -> rand(typeof(x)), 1) ≈ [0;;]
@test H(x -> rand(GLOBAL_RNG, typeof(x)), 1) ≈ [0;;]
end
@testset "Basic operators" begin
@test H(x -> x[1] / x[2] + x[3] / 1 + 1 / x[4], rand(4)) == [
0 1 0 0
1 1 0 0
0 0 0 0
0 0 0 1
]
@test H(x -> x[1] * x[2] + x[3] * 1 + 1 * x[4], rand(4)) == [
0 1 0 0
1 0 0 0
0 0 0 0
0 0 0 0
]
@test H(x -> (x[1] * x[2]) * (x[3] * x[4]), rand(4)) == [
0 1 1 1
1 0 1 1
1 1 0 1
1 1 1 0
]
@test H(x -> (x[1] + x[2]) * (x[3] + x[4]), rand(4)) == [
0 0 1 1
0 0 1 1
1 1 0 0
1 1 0 0
]
@test H(x -> (x[1] + x[2] + x[3] + x[4])^2, rand(4)) == [
1 1 1 1
1 1 1 1
1 1 1 1
1 1 1 1
]
@test H(x -> 1 / (x[1] + x[2] + x[3] + x[4]), rand(4)) == [
1 1 1 1
1 1 1 1
1 1 1 1
1 1 1 1
]
@test H(x -> (x[1] - x[2]) + (x[3] - 1) + (1 - x[4]), rand(4)) == [
0 0 0 0
0 0 0 0
0 0 0 0
0 0 0 0
]
x = rand(5)
foo(x) = x[1] + x[2] * x[3] + 1 / x[4] + 1 * x[5]
@test H(foo, x) == [
0 0 0 0 0
0 0 1 0 0
0 1 0 0 0
0 0 0 1 0
0 0 0 0 0
]
bar(x) = foo(x) + x[2]^x[5]
@test H(bar, x) == [
0 0 0 0 0
0 1 1 0 1
0 1 0 0 0
0 0 0 1 0
0 1 0 0 1
]
end
@testset "Zero derivatives" begin
h = H(x -> copysign(x[1] * x[2], x[3] * x[4]), rand(4))
if Bool(shared(T))
@test h == [
0 1 0 0
1 0 0 0
0 0 0 1
0 0 1 0
]
else
@test h == [
0 1 0 0
1 0 0 0
0 0 0 0
0 0 0 0
]
end
h = H(x -> div(x[1] * x[2], x[3] * x[4]), rand(4))
if Bool(shared(T))
@test Matrix(h) == [
0 1 0 0
1 0 0 0
0 0 0 1
0 0 1 0
]
else
@test h == [
0 0 0 0
0 0 0 0
0 0 0 0
0 0 0 0
]
end
end
@testset "shared Hessian" begin
function dead_end(x)
z = x[1] * x[2]
return x[3] * x[4]
end
h = H(dead_end, rand(4))
if Bool(shared(T))
@test h == [
0 1 0 0
1 0 0 0
0 0 0 1
0 0 1 0
]
else
@test h == [
0 0 0 0
0 0 0 0
0 0 0 1
0 0 1 0
]
end
end
# Missing primal errors
@testset "MissingPrimalError" begin
@testset "$f" for f in (
iseven,
isfinite,
isinf,
isinteger,
ismissing,
isnan,
isnothing,
isodd,
isone,
isreal,
iszero,
)
@test_throws MissingPrimalError H(f, rand())
end
end
@testset "ifelse and comparisons" begin
if VERSION >= v"1.8"
@test H(x -> ifelse(x[1], x[1]^x[2], x[3] * x[4]), rand(4)) == [
1 1 0 0
1 1 0 0
0 0 0 1
0 0 1 0
]
@test H(x -> ifelse(x[1], x[1]^x[2], 1.0), rand(4)) == [
1 1 0 0
1 1 0 0
0 0 0 0
0 0 0 0
]
@test H(x -> ifelse(x[1], 1.0, x[3] * x[4]), rand(4)) == [
0 0 0 0
0 0 0 0
0 0 0 1
0 0 1 0
]
end
function f_ampgo07(x)
return (x[1] <= 0) * convert(eltype(x), Inf) +
sin(x[1]) +
sin(10//3 * x[1]) +
log(abs(x[1])) - 84//100 * x[1] + 3
end
@test H(f_ampgo07, [1.0]) ≈ [1;;]
# Error handling when applying non-dual tracers to "local" functions with control flow
# TypeError: non-boolean (SparseConnectivityTracer.GradientTracer{BitSet}) used in boolean context
@test_throws TypeError H(x -> x[1] > x[2] ? x[1]^x[2] : x[3] * x[4], rand(4))
end
yield()
end
end
@testset "Local Hessian" begin
@testset "$P" for P in HESSIAN_PATTERNS
T = HessianTracer{P}
detector = TracerLocalSparsityDetector(; hessian_tracer_type=T)
H(f, x) = hessian_sparsity(f, x, detector)
@testset "Trivial examples" begin
f1(x) = x[1] + x[2] * x[3] + 1 / x[4] + x[2] * max(x[1], x[5])
@test H(f1, [1.0 3.0 5.0 1.0 2.0]) == [
0 0 0 0 0
0 0 1 0 1
0 1 0 0 0
0 0 0 1 0
0 1 0 0 0
]
@test H(f1, [4.0 3.0 5.0 1.0 2.0]) == [
0 1 0 0 0
1 0 1 0 0
0 1 0 0 0
0 0 0 1 0
0 0 0 0 0
]
f2(x) = ifelse(x[2] < x[3], x[1] * x[2], x[3] * x[4])
h = H(f2, [1 2 3 4])
if Bool(shared(T))
@test h == [
0 1 0 0
1 0 0 0
0 0 0 1
0 0 1 0
]
else
@test h == [
0 1 0 0
1 0 0 0
0 0 0 0
0 0 0 0
]
end
h = H(f2, [1 3 2 4])
if Bool(shared(T))
@test h == [
0 1 0 0
1 0 0 0
0 0 0 1
0 0 1 0
]
else
@test h == [
0 0 0 0
0 0 0 0
0 0 0 1
0 0 1 0
]
end
end
@testset "Shared Hessian" begin
function dead_end(x)
z = x[1] * x[2]
return x[3] * x[4]
end
h = H(dead_end, rand(4))
if Bool(shared(T))
@test h == [
0 1 0 0
1 0 0 0
0 0 0 1
0 0 1 0
]
else
@test h == [
0 0 0 0
0 0 0 0
0 0 0 1
0 0 1 0
]
end
end
@testset "Miscellaneous" begin
@test H(sign, 1) ≈ [0;;]
@test H(typemax, 1) ≈ [0;;]
@test H(x -> x^(2//3), 1) ≈ [1;;]
@test H(x -> (2//3)^x, 1) ≈ [1;;]
@test H(x -> x^ℯ, 1) ≈ [1;;]
@test H(x -> ℯ^x, 1) ≈ [1;;]
@test H(x -> 0, 1) ≈ [0;;]
# Test special cases on empty tracer
@test H(x -> zero(x)^(2//3), 1) ≈ [0;;]
@test H(x -> (2//3)^zero(x), 1) ≈ [0;;]
@test H(x -> zero(x)^ℯ, 1) ≈ [0;;]
@test H(x -> ℯ^zero(x), 1) ≈ [0;;]
end
@testset "Conversion" begin
@testset "to $T" for T in REAL_TYPES
@test H(x -> convert(T, x), 1.0) ≈ [0;;]
@test H(x -> convert(T, x^2), 1.0) ≈ [1;;]
@test H(x -> convert(T, x)^2, 1.0) ≈ [1;;]
end
end
@testset "Round" begin
@test H(round, 1.1) ≈ [0;;]
@test H(x -> round(Int, x), 1.1) ≈ [0;;]
@test H(x -> round(Bool, x), 1.1) ≈ [0;;]
@test H(x -> round(x, RoundNearestTiesAway), 1.1) ≈ [0;;]
@test H(x -> round(x; digits=3, base=2), 1.1) ≈ [0;;]
end
@testset "Random" begin
@test H(x -> rand(typeof(x)), 1) ≈ [0;;]
@test H(x -> rand(GLOBAL_RNG, typeof(x)), 1) ≈ [0;;]
end
yield()
end
end
@testset "Shared IndexSetHessianPattern - same objects" begin
@testset "$P" for P in HESSIAN_PATTERNS_SHARED
T = HessianTracer{P}
function multi_output_for_shared_test(x::AbstractArray)
z = ones(eltype(x), size(x))
y1 = x[1]^2 * z[1]
y2 = z[2] * x[2]^2
y3 = x[1] * x[2]
y4 = z[1] * z[2] # entirely new tracer
y = [y1, y2, y3, y4]
return y
end
x = rand(2)
xt = create_tracers(T, x, eachindex(x))
yt = multi_output_for_shared_test(xt)
@test pattern(yt[1]).hessian === pattern(yt[2]).hessian
@test pattern(yt[1]).hessian === pattern(yt[3]).hessian
@test_broken pattern(yt[1]).hessian === pattern(yt[4]).hessian
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 1459 | using SparseConnectivityTracer: AbstractTracer, GradientTracer, HessianTracer, Dual
using SparseConnectivityTracer: IndexSetGradientPattern
using SparseConnectivityTracer: IndexSetHessianPattern, DictHessianPattern
using SparseConnectivityTracer: DuplicateVector, RecursiveSet, SortedVector
using SparseConnectivityTracer: Shared, NotShared
GRADIENT_PATTERNS = (
IndexSetGradientPattern{Int,BitSet},
IndexSetGradientPattern{Int,Set{Int}},
IndexSetGradientPattern{Int,DuplicateVector{Int}},
IndexSetGradientPattern{Int,SortedVector{Int}},
)
HESSIAN_PATTERNS_SHARED = (
IndexSetHessianPattern{Int,BitSet,Set{Tuple{Int,Int}},Shared},
DictHessianPattern{Int,BitSet,Dict{Int,BitSet},Shared},
)
HESSIAN_PATTERNS_NOTSHARED = (
IndexSetHessianPattern{Int,BitSet,Set{Tuple{Int,Int}},NotShared},
IndexSetHessianPattern{Int,BitSet,Set{Tuple{Int,Int}},NotShared},
IndexSetHessianPattern{
Int,DuplicateVector{Int},DuplicateVector{Tuple{Int,Int}},NotShared
},
IndexSetHessianPattern{Int,SortedVector{Int},SortedVector{Tuple{Int,Int}},NotShared},
# TODO: test on RecursiveSet
DictHessianPattern{Int,BitSet,Dict{Int,BitSet},NotShared},
DictHessianPattern{Int,Set{Int},Dict{Int,Set{Int}},NotShared},
)
HESSIAN_PATTERNS = union(HESSIAN_PATTERNS_SHARED, HESSIAN_PATTERNS_NOTSHARED)
GRADIENT_TRACERS = (GradientTracer{P} for P in GRADIENT_PATTERNS)
HESSIAN_TRACERS = (HessianTracer{P} for P in HESSIAN_PATTERNS)
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 9576 | using SparseConnectivityTracer
using SparseConnectivityTracer: DEFAULT_GRADIENT_TRACER, DEFAULT_HESSIAN_TRACER
using SparseConnectivityTracer: trace_input, Dual, primal
using DataInterpolations
using DataInterpolations: AbstractInterpolation
using LinearAlgebra: I
using Test
myprimal(x) = x
myprimal(d::Dual) = primal(d)
#===========#
# Test data #
#===========#
t = [0.0, 1.0, 2.5, 4.0, 6.0];
t_scalar = 2.0
t_vector = [2.0, 2.5, 3.0]
t_range = 2:5
test_inputs = (t_scalar, t_vector, t_range)
u = sin.(t) # vector
du = cos.(t)
ddu = -sin.(t)
#==================#
# Test definitions #
#==================#
struct InterpolationTest{N,I<:AbstractInterpolation} # N = output dim. of interpolation
interp::I
is_der1_zero::Bool
is_der2_zero::Bool
end
function InterpolationTest(
N, interp::I; is_der1_zero=false, is_der2_zero=false
) where {I<:AbstractInterpolation}
return InterpolationTest{N,I}(interp, is_der1_zero, is_der2_zero)
end
testname(t::InterpolationTest{N}) where {N} = "$N-dim $(typeof(t.interp))"
#================#
# Jacobian Tests #
#================#
function test_jacobian(t::InterpolationTest)
@testset "Jacobian" begin
for input in test_inputs
test_jacobian(t, input)
end
end
end
function test_jacobian(t::InterpolationTest{N}, input::Real) where {N}
N_IN = length(input)
N_OUT = N * N_IN
Jref = t.is_der1_zero ? zeros(N, N_IN) : ones(N, N_IN)
@testset "input type $(typeof(input)): $N_IN inputs, $N states, $N_OUT outputs" begin
@testset "Global Jacobian sparsity" begin
J = jacobian_sparsity(t.interp, input, TracerSparsityDetector())
@test J ≈ Jref
end
@testset "Local Jacobian sparsity" begin
J = jacobian_sparsity(t.interp, input, TracerLocalSparsityDetector())
@test J ≈ Jref
end
end
end
function test_jacobian(t::InterpolationTest{1}, input::AbstractVector)
N = 1
N_IN = length(input)
N_OUT = N * N_IN
Jref = t.is_der1_zero ? zeros(N_IN, N_IN) : I(N_IN)
@testset "input type $(typeof(input)): $N_IN inputs, $N states, $N_OUT outputs" begin
@testset "Global Jacobian sparsity" begin
J = jacobian_sparsity(x -> vec(t.interp(x)), input, TracerSparsityDetector())
@test J ≈ Jref
end
@testset "Local Jacobian sparsity" begin
J = jacobian_sparsity(
x -> vec(t.interp(x)), input, TracerLocalSparsityDetector()
)
@test J ≈ Jref
end
end
end
function test_jacobian(t::InterpolationTest{N}, input::AbstractVector) where {N}
N_IN = length(input)
N_OUT = N * N_IN
# Construct reference Jacobian
Jref = zeros(Bool, N_OUT, N_IN)
if !t.is_der1_zero
for (i, col) in enumerate(eachcol(Jref)) # iterate over outputs
i0 = 1 + N * (i - 1)
irange = i0:(i0 + N - 1)
col[irange] .= true
end
end
@testset "input type $(typeof(input)): $N_IN inputs, $N states, $N_OUT outputs" begin
@testset "Global Jacobian sparsity" begin
J = jacobian_sparsity(x -> vec(t.interp(x)), input, TracerSparsityDetector())
@test J ≈ Jref
end
@testset "Local Jacobian sparsity" begin
J = jacobian_sparsity(
x -> vec(t.interp(x)), input, TracerLocalSparsityDetector()
)
@test J ≈ Jref
end
end
end
#===============#
# Hessian Tests #
#===============#
function test_hessian(t::InterpolationTest)
@testset "Hessian" begin
for input in test_inputs
test_hessian(t, input)
end
end
end
function test_hessian(t::InterpolationTest{1}, input::Real)
N = 1
N_IN = length(input)
N_OUT = N * N_IN
Href = t.is_der2_zero ? zeros(N_IN, N_IN) : ones(N_IN, N_IN)
@testset "input type $(typeof(input)): $N_IN inputs, $N states, $N_OUT outputs" begin
@testset "Global Hessian sparsity" begin
H = hessian_sparsity(t.interp, input, TracerSparsityDetector())
@test H ≈ Href
end
@testset "Local Hessian sparsity" begin
H = hessian_sparsity(t.interp, input, TracerLocalSparsityDetector())
@test H ≈ Href
end
end
end
function test_hessian(t::InterpolationTest{N}, input::Real) where {N} # N ≠ 1
N_IN = length(input)
N_OUT = N * N_IN
Href = t.is_der2_zero ? zeros(N_IN, N_IN) : ones(N_IN, N_IN)
@testset "input type $(typeof(input)): $N_IN inputs, $N states, $N_OUT outputs" begin
@testset "Global Hessian sparsity" begin
H = hessian_sparsity(x -> sum(t.interp(x)), input, TracerSparsityDetector())
@test H ≈ Href
end
@testset "Local Hessian sparsity" begin
H = hessian_sparsity(
x -> sum(t.interp(x)), input, TracerLocalSparsityDetector()
)
@test H ≈ Href
end
end
end
function test_hessian(t::InterpolationTest{1}, input::AbstractVector)
N = 1
N_IN = length(input)
N_OUT = N * N_IN
Href = t.is_der2_zero ? zeros(N_IN, N_IN) : I(N_IN)
@testset "input type $(typeof(input)): $N_IN inputs, $N states, $N_OUT outputs" begin
@testset "Global Hessian sparsity" begin
H = hessian_sparsity(x -> sum(t.interp(x)), input, TracerSparsityDetector())
@test H ≈ Href
end
@testset "Local Hessian sparsity" begin
H = hessian_sparsity(
x -> sum(t.interp(x)), input, TracerLocalSparsityDetector()
)
@test H ≈ Href
end
end
end
function test_hessian(t::InterpolationTest{N}, input::AbstractVector) where {N} # N ≠ 1
N_IN = length(input)
N_OUT = N * N_IN
Href = t.is_der2_zero ? zeros(N_IN, N_IN) : I(N_IN)
@testset "input type $(typeof(input)): $N_IN inputs, $N states, $N_OUT outputs" begin
@testset "Global Hessian sparsity" begin
H = hessian_sparsity(x -> sum(t.interp(x)), input, TracerSparsityDetector())
@test H ≈ Href
end
@testset "Local Hessian sparsity" begin
H = hessian_sparsity(
x -> sum(t.interp(x)), input, TracerLocalSparsityDetector()
)
@test H ≈ Href
end
end
end
function test_output(t::InterpolationTest)
@testset "Output sizes and values" begin
@testset "input type: $(typeof(input))" for input in test_inputs
out_ref = t.interp(input)
s_ref = size(out_ref)
@testset "$T" for T in (DEFAULT_GRADIENT_TRACER, DEFAULT_HESSIAN_TRACER)
t_tracer = trace_input(T, input)
out_tracer = t.interp(t_tracer)
s_tracer = size(out_tracer)
@test s_tracer == s_ref
end
@testset "$T" for T in (
Dual{eltype(input),DEFAULT_GRADIENT_TRACER},
Dual{eltype(input),DEFAULT_HESSIAN_TRACER},
)
t_dual = trace_input(T, input)
out_dual = t.interp(t_dual)
s_dual = size(out_dual)
@test s_dual == s_ref
@test myprimal.(out_dual) ≈ out_ref
end
end
end
end
#===========#
# Run tests #
#===========#
@testset "1D Interpolations" begin
@testset "$(testname(t))" for t in (
InterpolationTest(
1, ConstantInterpolation(u, t); is_der1_zero=true, is_der2_zero=true
),
InterpolationTest(1, LinearInterpolation(u, t); is_der2_zero=true),
InterpolationTest(1, QuadraticInterpolation(u, t)),
InterpolationTest(1, LagrangeInterpolation(u, t)),
InterpolationTest(1, AkimaInterpolation(u, t)),
InterpolationTest(1, QuadraticSpline(u, t)),
InterpolationTest(1, CubicSpline(u, t)),
InterpolationTest(1, BSplineInterpolation(u, t, 3, :ArcLen, :Average)),
InterpolationTest(1, BSplineApprox(u, t, 3, 4, :ArcLen, :Average)),
InterpolationTest(1, PCHIPInterpolation(u, t)),
InterpolationTest(1, CubicHermiteSpline(du, u, t)),
InterpolationTest(1, QuinticHermiteSpline(ddu, du, u, t)),
)
test_jacobian(t)
test_hessian(t)
test_output(t)
yield()
end
end
for N in (2, 5)
local um = rand(N, length(t)) # matrix
@testset "$(N)D Interpolations" begin
@testset "$(testname(t))" for t in (
InterpolationTest(
N, ConstantInterpolation(um, t); is_der1_zero=true, is_der2_zero=true
),
# InterpolationTest(N, LinearInterpolation(um, t); is_der2_zero=true), # TODO: include once https://github.com/SciML/DataInterpolations.jl/pull/335 is settled
InterpolationTest(N, QuadraticInterpolation(um, t)),
InterpolationTest(N, LagrangeInterpolation(um, t)),
## The following interpolations appear to not be supported on N dimensions as of DataInterpolations v6.2.0:
# InterpolationTest(N, AkimaInterpolation(um, t)),
# InterpolationTest(N, BSplineApprox(um, t, 3, 4, :ArcLen, :Average)),
# InterpolationTest(N, QuadraticSpline(um, t)),
# InterpolationTest(N, CubicSpline(um, t)),
# InterpolationTest(N, BSplineInterpolation(um, t, 3, :ArcLen, :Average)),
# InterpolationTest(N, PCHIPInterpolation(um, t)),
)
test_jacobian(t)
test_hessian(t)
test_output(t)
yield()
end
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2724 | using SparseConnectivityTracer
using LogExpFunctions
using Test
# Load definitions of GRADIENT_TRACERS, GRADIENT_PATTERNS, HESSIAN_TRACERS and HESSIAN_PATTERNS
include("../tracers_definitions.jl")
lef_1_to_1_pos_input = (
xlogx,
logistic,
logit,
log1psq,
log1pexp,
logexpm1,
softplus,
invsoftplus,
log1pmx,
logmxp1,
logcosh,
logabssinh,
cloglog,
cexpexp,
loglogistic,
log1mlogistic,
)
lef_1_to_1_neg_input = (log1mexp, log2mexp, logitexp, logit1mexp)
lef_1_to_1 = union(lef_1_to_1_pos_input, lef_1_to_1_neg_input)
lef_2_to_1 = (xlogy, xlog1py, xexpy, logaddexp, logsubexp)
@testset "Jacobian Global" begin
detector = TracerSparsityDetector()
J(f, x) = jacobian_sparsity(f, x, detector)
@testset "1-to-1 functions" begin
@testset "$f" for f in lef_1_to_1
@test J(x -> f(x[1]), rand(2)) == [1 0]
end
end
@testset "2-to-1 functions" begin
@testset "$f" for f in lef_2_to_1
@test J(x -> f(x[1], x[2]), rand(3)) == [1 1 0]
end
end
end
@testset "Jacobian Local" begin
detector = TracerLocalSparsityDetector()
J(f, x) = jacobian_sparsity(f, x, detector)
@testset "1-to-1 functions" begin
@testset "$f" for f in lef_1_to_1_pos_input
@test J(x -> f(x[1]), [0.5, 1.0]) == [1 0]
end
@testset "$f" for f in lef_1_to_1_neg_input
@test J(x -> f(x[1]), [-0.5, 1.0]) == [1 0]
end
end
@testset "2-to-1 functions" begin
@testset "$f" for f in lef_2_to_1
@test J(x -> f(x[1], x[2]), [0.5, 1.0, 2.0]) == [1 1 0]
end
end
end
@testset "Hessian Global" begin
detector = TracerSparsityDetector()
H(f, x) = hessian_sparsity(f, x, detector)
@testset "1-to-1 functions" begin
@testset "$f" for f in lef_1_to_1
@test H(x -> f(x[1]), rand(2)) == [1 0; 0 0]
end
end
@testset "2-to-1 functions" begin
@testset "$f" for f in lef_2_to_1
@test H(x -> f(x[1], x[2]), rand(3)) == [1 1 0; 1 1 0; 0 0 0]
end
end
end
@testset "Hessian Local" begin
detector = TracerLocalSparsityDetector()
H(f, x) = hessian_sparsity(f, x, detector)
@testset "1-to-1 functions" begin
@testset "$f" for f in lef_1_to_1_pos_input
@test H(x -> f(x[1]), [0.5, 1.0]) == [1 0; 0 0]
end
@testset "$f" for f in lef_1_to_1_neg_input
@test H(x -> f(x[1]), [-0.5, 1.0]) == [1 0; 0 0]
end
end
@testset "2-to-1 functions" begin
@testset "$f" for f in lef_2_to_1
@test H(x -> f(x[1], x[2]), [0.5, 1.0, 2.0]) == [1 1 0; 1 1 0; 0 0 0]
end
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 3613 | using SparseConnectivityTracer
using NNlib: NNlib
using Test
NNLIB_ACTIVATIONS_S = (
NNlib.σ,
NNlib.celu,
NNlib.elu,
NNlib.gelu,
NNlib.hardswish,
NNlib.lisht,
NNlib.logσ,
NNlib.logcosh,
NNlib.mish,
NNlib.selu,
NNlib.softplus,
NNlib.softsign,
NNlib.swish,
NNlib.sigmoid_fast,
NNlib.tanhshrink,
NNlib.tanh_fast,
)
NNLIB_ACTIVATIONS_F = (
NNlib.hardσ,
NNlib.hardtanh,
NNlib.leakyrelu,
NNlib.relu,
NNlib.relu6,
NNlib.softshrink,
NNlib.trelu,
)
NNLIB_ACTIVATIONS = union(NNLIB_ACTIVATIONS_S, NNLIB_ACTIVATIONS_F)
@testset "Jacobian Global" begin
detector = TracerSparsityDetector()
J(f, x) = jacobian_sparsity(f, x, detector)
@testset "$f" for f in NNLIB_ACTIVATIONS
@test J(f, 1) ≈ [1;;]
end
end
@testset "Jacobian Local" begin
detector = TracerLocalSparsityDetector()
J(f, x) = jacobian_sparsity(f, x, detector)
@test J(NNlib.relu, -1) ≈ [0;;]
@test J(NNlib.relu, 1) ≈ [1;;]
@test J(NNlib.elu, -1) ≈ [1;;]
@test J(NNlib.elu, 1) ≈ [1;;]
@test J(NNlib.celu, -1) ≈ [1;;]
@test J(NNlib.celu, 1) ≈ [1;;]
@test J(NNlib.selu, -1) ≈ [1;;]
@test J(NNlib.selu, 1) ≈ [1;;]
@test J(NNlib.relu6, -1) ≈ [0;;]
@test J(NNlib.relu6, 1) ≈ [1;;]
@test J(NNlib.relu6, 7) ≈ [0;;]
@test J(NNlib.trelu, 0.9) ≈ [0;;]
@test J(NNlib.trelu, 1.1) ≈ [1;;]
@test J(NNlib.swish, -5) ≈ [1;;]
@test J(NNlib.swish, 0) ≈ [1;;]
@test J(NNlib.swish, 5) ≈ [1;;]
@test J(NNlib.hardswish, -5) ≈ [0;;]
@test J(NNlib.hardswish, 0) ≈ [1;;]
@test J(NNlib.hardswish, 5) ≈ [1;;]
@test J(NNlib.hardσ, -4) ≈ [0;;]
@test J(NNlib.hardσ, 0) ≈ [1;;]
@test J(NNlib.hardσ, 4) ≈ [0;;]
@test J(NNlib.hardtanh, -2) ≈ [0;;]
@test J(NNlib.hardtanh, 0) ≈ [1;;]
@test J(NNlib.hardtanh, 2) ≈ [0;;]
@test J(NNlib.softshrink, -1) ≈ [1;;]
@test J(NNlib.softshrink, 0) ≈ [0;;]
@test J(NNlib.softshrink, 1) ≈ [1;;]
end
@testset "Global Hessian" begin
detector = TracerSparsityDetector()
H(f, x) = hessian_sparsity(f, x, detector)
@testset "First-order differentiable" begin
@testset "$f" for f in NNLIB_ACTIVATIONS_F
@test H(f, 1) ≈ [0;;]
end
end
@testset "Second-order differentiable" begin
@testset "$f" for f in NNLIB_ACTIVATIONS_S
@test H(f, 1) ≈ [1;;]
end
end
end
@testset "Local Hessian" begin
detector = TracerLocalSparsityDetector()
H(f, x) = hessian_sparsity(f, x, detector)
@test H(NNlib.relu, -1) ≈ [0;;]
@test H(NNlib.relu, 1) ≈ [0;;]
@test H(NNlib.elu, -1) ≈ [1;;]
@test H(NNlib.elu, 1) ≈ [0;;]
@test H(NNlib.celu, -1) ≈ [1;;]
@test H(NNlib.celu, 1) ≈ [0;;]
@test H(NNlib.selu, -1) ≈ [1;;]
@test H(NNlib.selu, 1) ≈ [0;;]
@test H(NNlib.relu6, -1) ≈ [0;;]
@test H(NNlib.relu6, 1) ≈ [0;;]
@test H(NNlib.relu6, 7) ≈ [0;;]
@test H(NNlib.trelu, 0.9) ≈ [0;;]
@test H(NNlib.trelu, 1.1) ≈ [0;;]
@test H(NNlib.swish, -5) ≈ [1;;]
@test H(NNlib.swish, 0) ≈ [1;;]
@test H(NNlib.swish, 5) ≈ [1;;]
@test H(NNlib.hardswish, -5) ≈ [0;;]
@test H(NNlib.hardswish, 0) ≈ [1;;]
@test H(NNlib.hardswish, 5) ≈ [0;;]
@test H(NNlib.hardσ, -4) ≈ [0;;]
@test H(NNlib.hardσ, 0) ≈ [0;;]
@test H(NNlib.hardσ, 4) ≈ [0;;]
@test H(NNlib.hardtanh, -2) ≈ [0;;]
@test H(NNlib.hardtanh, 0) ≈ [0;;]
@test H(NNlib.hardtanh, 2) ≈ [0;;]
@test H(NNlib.softshrink, -1) ≈ [0;;]
@test H(NNlib.softshrink, 0) ≈ [0;;]
@test H(NNlib.softshrink, 1) ≈ [0;;]
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 2511 | using SparseConnectivityTracer
using NaNMath
using Test
# Load definitions of GRADIENT_TRACERS, GRADIENT_PATTERNS, HESSIAN_TRACERS and HESSIAN_PATTERNS
include("../tracers_definitions.jl")
nan_1_to_1 = (
NaNMath.sqrt,
NaNMath.sin,
NaNMath.cos,
NaNMath.tan,
NaNMath.asin,
NaNMath.acos,
NaNMath.acosh,
NaNMath.atanh,
NaNMath.log,
NaNMath.log2,
NaNMath.log10,
NaNMath.log1p,
NaNMath.lgamma,
)
@testset "Jacobian Global" begin
detector = TracerSparsityDetector()
J(f, x) = jacobian_sparsity(f, x, detector)
@testset "1-to-1 functions" begin
@testset "$f" for f in nan_1_to_1
@test J(x -> f(x[1]), rand(2)) == [1 0]
end
end
@testset "2-to-1 functions" begin
@test J(x -> NaNMath.pow(x[1], x[2]), rand(3)) == [1 1 0]
@test J(x -> NaNMath.max(x[1], x[2]), rand(3)) == [1 1 0]
@test J(x -> NaNMath.min(x[1], x[2]), rand(3)) == [1 1 0]
end
end
@testset "Jacobian Local" begin
detector = TracerLocalSparsityDetector()
J(f, x) = jacobian_sparsity(f, x, detector)
@testset "2-to-1 functions" begin
@test J(x -> NaNMath.max(x[1], x[2]), [1.0, 2.0, 0.0]) == [0 1 0]
@test J(x -> NaNMath.max(x[1], x[2]), [2.0, 1.0, 0.0]) == [1 0 0]
@test J(x -> NaNMath.min(x[1], x[2]), [1.0, 2.0, 0.0]) == [1 0 0]
@test J(x -> NaNMath.min(x[1], x[2]), [2.0, 1.0, 0.0]) == [0 1 0]
end
end
@testset "Hessian Global" begin
detector = TracerSparsityDetector()
H(f, x) = hessian_sparsity(f, x, detector)
@testset "1-to-1 functions" begin
@testset "$f" for f in nan_1_to_1
@test H(x -> f(x[1]), rand(2)) == [1 0; 0 0]
end
end
@testset "2-to-1 functions" begin
@test H(x -> NaNMath.pow(x[1], x[2]), rand(3)) == [1 1 0; 1 1 0; 0 0 0]
@test H(x -> NaNMath.max(x[1], x[2]), rand(3)) == zeros(Bool, 3, 3)
@test H(x -> NaNMath.min(x[1], x[2]), rand(3)) == zeros(Bool, 3, 3)
end
end
@testset "Hessian Local" begin
detector = TracerLocalSparsityDetector()
H(f, x) = hessian_sparsity(f, x, detector)
@testset "2-to-1 functions" begin
@test H(x -> NaNMath.max(x[1], x[2]), [1.0, 2.0, 0.0]) == zeros(Bool, 3, 3)
@test H(x -> NaNMath.max(x[1], x[2]), [2.0, 1.0, 0.0]) == zeros(Bool, 3, 3)
@test H(x -> NaNMath.min(x[1], x[2]), [1.0, 2.0, 0.0]) == zeros(Bool, 3, 3)
@test H(x -> NaNMath.min(x[1], x[2]), [2.0, 1.0, 0.0]) == zeros(Bool, 3, 3)
end
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 1043 |
using SparseConnectivityTracer
using SpecialFunctions: erf, beta
using Test
# Load definitions of GRADIENT_TRACERS, GRADIENT_PATTERNS, HESSIAN_TRACERS and HESSIAN_PATTERNS
include("../tracers_definitions.jl")
@testset "Jacobian Global" begin
detector = TracerSparsityDetector()
J(f, x) = jacobian_sparsity(f, x, detector)
@test J(x -> erf(x[1]), rand(2)) == [1 0]
@test J(x -> beta(x[1], x[2]), rand(3)) == [1 1 0]
end
# TODO: add tests
# @testset "Jacobian Local" begin
# detector = TracerLocalSparsityDetector()
# J(f, x) = jacobian_sparsity(f, x, detector)
# end
@testset "Global Hessian" begin
detector = TracerSparsityDetector()
H(f, x) = hessian_sparsity(f, x, detector)
@test H(x -> erf(x[1]), rand(2)) == [
1 0
0 0
]
@test H(x -> beta(x[1], x[2]), rand(3)) == [
1 1 0
1 1 0
0 0 0
]
end
# TODO: add tests
# @testset "Local Hessian" begin
# detector = TracerLocalSparsityDetector()
# H(f, x) = hessian_sparsity(f, x, detector)
# end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 618 | using SparseConnectivityTracer
using SparseConnectivityTracer: DuplicateVector, RecursiveSet, SortedVector, product
using Test
@testset "$S" for S in (
BitSet, Set{Int}, DuplicateVector{Int}, RecursiveSet{Int}, SortedVector{Int}
)
x = S.(1:10)
y = (x[1] ∪ x[3]) ∪ (x[3] ∪ ((x[5] ∪ x[7]) ∪ x[1]))
@test length(string(x)) > 0
@test eltype(y) == Int
@test length(y) == 4
@test sort(collect(y)) == [1, 3, 5, 7]
@test sort(collect(copy(y))) == [1, 3, 5, 7]
P = collect(product(y, y)) # (1,1), (1,3), (1,5), (1,7), (3,3), (3,5), (3,7), (5,5), (5,7), (7,7)
@test length(P) == 10
end
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | code | 933 | using SparseConnectivityTracer
using SparseConnectivityTracer: SortedVector
using Test
@testset "SortedVector merging" begin
@testset "$T - ($k1, $k2)" for T in (Int32, Int64),
k1 in (0, 10, 100, 1000),
k2 in (0, 10, 100, 1000)
@test all(1:100) do _
x = SortedVector{T}(rand(T(1):T(1000), k1); sorted=false)
y = SortedVector{T}(sort(rand(T(1):T(1000), k2)); sorted=true)
z = union(x, y)
eltype(z) == T || return false
issorted(z.data) || return false
Set(z.data) == union(Set(x.data), Set(y.data)) || return false
if k1 > 0 && k2 > 0
xc = collect(x)
yc = collect(y)
zc = collect(z)
zc[1] == min(xc[1], yc[1]) || return false
zc[end] == max(xc[end], yc[end]) || return false
end
return true
end
end
end;
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | docs | 7780 | # SparseConnectivityTracer.jl
## Version `v0.6.6`
* ![Bugfix][badge-bugfix] Fix detector display by replacing `println` with `print` ([#201])
* ![Enhancement][badge-enhancement] Improve code generation for 2-to-1 overloads on arbitrary types ([#197], [#202])
* ![Maintenance][badge-maintenance] Update package tests and CI workflow ([#198], [#199])
## Version `v0.6.5`
* ![Bugfix][badge-bugfix] Fix LogExpFunctions.jl compat entry ([#195])
* ![Documentation][badge-docs] Fix "How it works" documentation ([#193])
## Version `v0.6.4`
* ![Enhancement][badge-enhancement] Shorter printing of default detectors ([#190])
* ![Documentation][badge-docs] Consistently refer to `TracerSparsityDetector` as `detector` ([#191])
* ![Maintenance][badge-maintenance] Make imports explicit, test with ExplicitImports.jl ([#188])
## Version `v0.6.3`
* ![Feature][badge-feature] Add DataInterpolations.jl package extension ([#178])
* ![Feature][badge-feature] Add LogExpFunctions.jl package extension ([#184])
* ![Feature][badge-feature] Add NaNMath.jl package extension ([#187])
* ![Feature][badge-feature] Support two-argument `atan` and `log` ([#185])
* ![Documentation][badge-docs] Document limitations of operator overloading utils ([#180])
* ![Documentation][badge-docs] Reexport ADTypes interface ([#182])
* ![Documentation][badge-docs] Update developer documentation URLs ([#186])
* ![Maintenance][badge-maintenance] Reorganize code and update code generation utilities ([#179], [#183])
## Version `v0.6.2`
* ![Feature][badge-feature] Return only primal value when applying non-differentiable methods to `Dual` numbers ([#169])
* ![Feature][badge-feature] Increase sparsity for Diagonal inputs ([#165])
* ![Feature][badge-feature] Add more methods on `round`, `rand` ([#162])
* ![Documentation][badge-docs] This release brings large updates to the documentation:
* Document limitations ([#175])
* Document global vs. local patterns ([#176])
* Add "How it works" developer documentation ([#174])
* Add developer documentation on custom overloads ([#177])
* ![Enhancement][badge-enhancement] Refactor type conversions ([#173], [#168], [#166])
* ![Enhancement][badge-enhancement] Make comparisons regular operators ([#169])
* ![Bugfix][badge-bugfix] Fix Hessian on NNlib activation functions `celu`, `elu`, `selu`, `hardswish` ([#162])
* ![Bugfix][badge-bugfix] Fix `isless` ([#161])
## Version `v0.6.1`
* ![Enhancement][badge-enhancement] Improve the performance of Hessian pattern tracing by an order of magnitude:
* Internally represent Hessian patterns with dictionaries ([#155], [#158])
* Increase performance via symmetry of Hessian ([#151])
* ![Feature][badge-feature] Support ComponentArrays ([#146])
* ![Feature][badge-feature] Support boolean not (`!`) ([#150])
* ![Feature][badge-feature] Support `isless` ([#149])
## Version `v0.6.0`
* ![BREAKING][badge-breaking] Remove `ConnectivityTracer` ([#140])
* ![BREAKING][badge-breaking] Remove legacy interface ([#140])
* instead of `jacobian_pattern(f, x)`, use `jacobian_sparsity(f, x, TracerSparsityDetector())`
* instead of `hessian_pattern(f, x)`, use `hessian_sparsity(f, x, TracerSparsityDetector())`
* instead of `local_jacobian_pattern(f, x)`, use `jacobian_sparsity(f, x, TracerLocalSparsityDetector())`
* instead of `local_hessian_pattern(f, x)`, use `hessian_sparsity(f, x, TracerLocalSparsityDetector())`
* ![Bugfix][badge-bugfix] Remove overloads on `similar` to reduce amount of invalidations ([#132])
* ![Bugfix][badge-bugfix] Fix sparse array construction ([#142])
* ![Enhancement][badge-enhancement] Add array overloads ([#131])
* ![Enhancement][badge-enhancement] Generalize sparsity pattern representations ([#139], [#119])
* ![Enhancement][badge-enhancement] Reduce allocations of new tracers ([#128])
* ![Enhancement][badge-enhancement] Reduce compile times ([#119])
[badge-breaking]: https://img.shields.io/badge/BREAKING-red.svg
[badge-deprecation]: https://img.shields.io/badge/deprecation-orange.svg
[badge-feature]: https://img.shields.io/badge/feature-green.svg
[badge-enhancement]: https://img.shields.io/badge/enhancement-blue.svg
[badge-bugfix]: https://img.shields.io/badge/bugfix-purple.svg
[badge-experimental]: https://img.shields.io/badge/experimental-lightgrey.svg
[badge-maintenance]: https://img.shields.io/badge/maintenance-gray.svg
[badge-docs]: https://img.shields.io/badge/docs-orange.svg
[#202]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/202
[#201]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/201
[#199]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/199
[#198]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/198
[#197]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/197
[#195]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/195
[#193]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/193
[#191]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/191
[#190]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/190
[#188]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/188
[#186]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/186
[#185]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/185
[#184]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/184
[#183]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/183
[#182]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/182
[#180]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/180
[#179]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/179
[#178]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/178
[#177]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/177
[#176]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/176
[#175]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/175
[#174]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/174
[#173]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/173
[#169]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/169
[#168]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/168
[#166]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/166
[#165]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/165
[#162]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/162
[#161]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/161
[#158]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/158
[#155]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/155
[#151]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/151
[#150]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/150
[#149]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/149
[#146]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/146
[#142]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/142
[#140]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/140
[#139]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/139
[#132]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/132
[#131]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/131
[#128]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/128
[#126]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/126
[#119]: https://github.com/adrhill/SparseConnectivityTracer.jl/pull/119
<!--
# Badges
![BREAKING][badge-breaking]
![Deprecation][badge-deprecation]
![Feature][badge-feature]
![Enhancement][badge-enhancement]
![Bugfix][badge-bugfix]
![Experimental][badge-experimental]
![Maintenance][badge-maintenance]
![Documentation][badge-docs]
-->
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | docs | 6581 | # SparseConnectivityTracer.jl
| | |
|:--------------|:--------------------------------------------------------------------|
| Documentation | [](https://adrhill.github.io/SparseConnectivityTracer.jl/stable/) [](https://adrhill.github.io/SparseConnectivityTracer.jl/dev/) [](https://github.com/adrhill/SparseConnectivityTracer.jl/blob/main/CHANGELOG.md) |
| Build Status | [](https://github.com/adrhill/SparseConnectivityTracer.jl/actions/workflows/CI.yml?query=branch%3Amain) [](https://codecov.io/gh/adrhill/SparseConnectivityTracer.jl) [](https://github.com/JuliaTesting/Aqua.jl) [](https://github.com/aviatesk/JET.jl) |
| Code Style | [](https://github.com/invenia/BlueStyle) [](https://github.com/SciML/ColPrac) |
| Downloads | [](http://juliapkgstats.com/pkg/SparseConnectivityTracer) [](https://juliahub.com/ui/Packages/General/SparseConnectivityTracer?t=2) |
| Citation | [](https://zenodo.org/doi/10.5281/zenodo.13138554) |
Fast Jacobian and Hessian sparsity detection via operator-overloading.
## Installation
To install this package, open the Julia REPL and run
```julia-repl
julia> ]add SparseConnectivityTracer
```
## Examples
### Jacobian
For functions `y = f(x)` and `f!(y, x)`, the sparsity pattern of the Jacobian can be obtained
by computing a single forward-pass through the function:
```julia-repl
julia> using SparseConnectivityTracer
julia> detector = TracerSparsityDetector();
julia> x = rand(3);
julia> f(x) = [x[1]^2, 2 * x[1] * x[2]^2, sin(x[3])];
julia> jacobian_sparsity(f, x, detector)
3×3 SparseArrays.SparseMatrixCSC{Bool, Int64} with 4 stored entries:
1 ⋅ ⋅
1 1 ⋅
⋅ ⋅ 1
```
As a larger example, let's compute the sparsity pattern from a convolutional layer from [Flux.jl](https://github.com/FluxML/Flux.jl):
```julia-repl
julia> using SparseConnectivityTracer, Flux
julia> detector = TracerSparsityDetector();
julia> x = rand(28, 28, 3, 1);
julia> layer = Conv((3, 3), 3 => 2);
julia> jacobian_sparsity(layer, x, detector)
1352×2352 SparseArrays.SparseMatrixCSC{Bool, Int64} with 36504 stored entries:
⎡⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠻⣷⣤⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⎤
⎢⠀⠀⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣤⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⎥
⎢⠀⠀⠀⠀⠙⢿⣦⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⎥
⎢⠀⠀⠀⠀⠀⠀⠙⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⡀⠀⠀⠀⠀⠀⎥
⎢⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⣀⠀⠀⠀⎥
⎢⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⠻⣷⣄⠀⎥
⎢⢤⣤⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⠛⢦⣤⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⠳⣤⣤⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⠓⎥
⎢⠀⠙⢿⣦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⎥
⎢⠀⠀⠀⠉⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⎥
⎢⠀⠀⠀⠀⠀⠈⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⣄⠀⠀⠀⠀⠀⠀⎥
⎢⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠻⣷⣄⠀⠀⠀⠀⎥
⎢⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⠀⠀⎥
⎣⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠛⢿⣦⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣷⣄⎦
```
### Hessian
For scalar functions `y = f(x)`, the sparsity pattern of the Hessian of $f$ can be obtained
by computing a single forward-pass through `f`:
```julia-repl
julia> x = rand(5);
julia> f(x) = x[1] + x[2]*x[3] + 1/x[4] + 1*x[5];
julia> hessian_sparsity(f, x, detector)
5×5 SparseArrays.SparseMatrixCSC{Bool, Int64} with 3 stored entries:
⋅ ⋅ ⋅ ⋅ ⋅
⋅ ⋅ 1 ⋅ ⋅
⋅ 1 ⋅ ⋅ ⋅
⋅ ⋅ ⋅ 1 ⋅
⋅ ⋅ ⋅ ⋅ ⋅
julia> g(x) = f(x) + x[2]^x[5];
julia> hessian_sparsity(g, x, detector)
5×5 SparseArrays.SparseMatrixCSC{Bool, Int64} with 7 stored entries:
⋅ ⋅ ⋅ ⋅ ⋅
⋅ 1 1 ⋅ 1
⋅ 1 ⋅ ⋅ ⋅
⋅ ⋅ ⋅ 1 ⋅
⋅ 1 ⋅ ⋅ 1
```
For more detailed examples, take a look at the [documentation](https://adrianhill.de/SparseConnectivityTracer.jl/dev).
### Local tracing
`TracerSparsityDetector` returns conservative sparsity patterns over the entire input domain of `x`.
It is not compatible with functions that require information about the primal values of a computation (e.g. `iszero`, `>`, `==`).
To compute a less conservative sparsity pattern at an input point `x`, use `TracerLocalSparsityDetector` instead.
Note that patterns computed with `TracerLocalSparsityDetector` depend on the input `x` and have to be recomputed when `x` changes:
```julia-repl
julia> using SparseConnectivityTracer
julia> detector = TracerLocalSparsityDetector();
julia> f(x) = ifelse(x[2] < x[3], x[1] ^ x[2], x[3] * x[4]);
julia> hessian_sparsity(f, [1 2 3 4], detector)
4×4 SparseArrays.SparseMatrixCSC{Bool, Int64} with 4 stored entries:
1 1 ⋅ ⋅
1 1 ⋅ ⋅
⋅ ⋅ ⋅ ⋅
⋅ ⋅ ⋅ ⋅
julia> hessian_sparsity(f, [1 3 2 4], detector)
4×4 SparseArrays.SparseMatrixCSC{Bool, Int64} with 2 stored entries:
⋅ ⋅ ⋅ ⋅
⋅ ⋅ ⋅ ⋅
⋅ ⋅ ⋅ 1
⋅ ⋅ 1 ⋅
```
## ADTypes.jl compatibility
SparseConnectivityTracer uses [ADTypes.jl](https://github.com/SciML/ADTypes.jl)'s interface for [sparsity detection](https://sciml.github.io/ADTypes.jl/stable/#Sparsity-detector),
making it compatible with [DifferentiationInterface.jl](https://github.com/gdalle/DifferentiationInterface.jl)'s [sparse automatic differentiation](https://gdalle.github.io/DifferentiationInterface.jl/DifferentiationInterface/stable/tutorial2/) functionality.
In fact, the functions `jacobian_sparsity` and `hessian_sparsity` are re-exported from ADTypes.
## Related packages
* [SparseDiffTools.jl](https://github.com/JuliaDiff/SparseDiffTools.jl): automatic sparsity detection via Symbolics.jl and Cassette.jl
* [SparsityTracing.jl](https://github.com/PALEOtoolkit/SparsityTracing.jl): automatic Jacobian sparsity detection using an algorithm based on SparsLinC by Bischof et al. (1996)
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | docs | 10806 | # Adding Overloads
!!! danger "Internals may change"
The developer documentation might refer to internals which can change without warning in a future release of SparseConnectivityTracer.
Only functionality that is exported or part of the [user documentation](@ref api) adheres to semantic versioning.
Having read our guide [*"How SparseConnectivityTracer works"*](@ref how-sct-works), you might want to add your own methods on
[`GradientTracer`](@ref SparseConnectivityTracer.GradientTracer),
[`HessianTracer`](@ref SparseConnectivityTracer.HessianTracer) and
[`Dual`](@ref SparseConnectivityTracer.Dual)
to improve the performance of your functions or to work around some of SCT's [limitations](@ref limitations).
## Avoid hand-written overloads
!!! warning "Don't overload manually"
If you want to overload a `Function` that takes `Real` arguments,
we strongly discourage you from manually adding methods to your function that use our internal tracer types.
Instead, use the same code generation mechanisms that we use.
This page of the documentation shows you how.
!!! tip "Copy one of our package extensions"
The easiest way to add overloads is to copy one of our package extensions, [e.g. our NNlib extension](https://github.com/adrhill/SparseConnectivityTracer.jl/blob/main/ext/SparseConnectivityTracerNNlibExt.jl), and to modify it.
Please upstream your additions by opening a pull request! We will help you out to get your feature merged.
## Operator classification
SCT currently supports three types of functions:
1. **1-to-1**: operators with one input and one output
2. **2-to-1**: operators with two inputs and one output
3. **1-to-2**: operators with one input and two outputs
Depending on the type of function you're dealing with, you will have to specify the way in which your function is differentiable:
| In | Out | Examples | Methods you need to implement |
|:--:|:---:|:-------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------|
| 1 | 1 | `sin`, `cos`, `abs` | `is_der1_zero_global`, `is_der2_zero_global` |
| 2 | 1 | `+`, `*`, `>`, `isequal` | `is_der1_arg1_zero_global`, `is_der2_arg1_zero_global`, `is_der1_arg2_zero_global`, `is_der2_arg2_zero_global`, `is_der_cross_zero_global` |
| 1 | 2 | `sincos` | `is_der1_out1_zero_global`, `is_der2_out1_zero_global`, `is_der1_out2_zero_global`, `is_der2_out2_zero_global` |
!!! details "Methods you have to implement for 1-to-1 operators"
| Function | Meaning |
|:-------------------------------------------|:--------------------------------------------------------|
| `is_der1_zero_global(::typeof(f)) = false` | $\frac{\partial f}{\partial x} \neq 0$ for some $x$ |
| `is_der2_zero_global(::typeof(f)) = false` | $\frac{\partial^2 f}{\partial x^2} \neq 0$ for some $x$ |
Optionally, to increase the sparsity of [`TracerLocalSparsityDetector`](@ref), you can additionally implement
| Function | Meaning |
|:---------------------------------------------|:---------------------------------------------------------|
| `is_der1_zero_local(::typeof(f), x) = false` | $\frac{\partial f}{\partial x} \neq 0$ for given $x$ |
| `is_der2_zero_local(::typeof(f), x) = false` | $\frac{\partial^2 f}{\partial x^2} \neq 0$ for given $x$ |
These fall back to
```julia
is_der1_zero_local(f::F, x) where {F} = is_der1_zero_global(f)
is_der2_zero_local(f::F, x) where {F} = is_der2_zero_global(f)
```
!!! details "Methods you have to implement for 2-to-1 operators"
| Function | Meaning |
|:------------------------------------------------|:-------------------------------------------------------------------|
| `is_der1_arg1_zero_global(::typeof(f)) = false` | $\frac{\partial f}{\partial x} \neq 0$ for some $x,y$ |
| `is_der2_arg1_zero_global(::typeof(f)) = false` | $\frac{\partial^2 f}{\partial x^2} \neq 0$ for some $x,y$ |
| `is_der1_arg2_zero_global(::typeof(f)) = false` | $\frac{\partial f}{\partial y} \neq 0$ for some $x,y$ |
| `is_der2_arg2_zero_global(::typeof(f)) = false` | $\frac{\partial^2 f}{\partial y^2} \neq 0$ for some $x,y$ |
| `is_der_cross_zero_global(::typeof(f)) = false` | $\frac{\partial^2 f}{\partial x \partial y} \neq 0$ for some $x,y$ |
Optionally, to increase the sparsity of [`TracerLocalSparsityDetector`](@ref), you can additionally implement
| Function | Meaning |
|:-----------------------------------------------------|:--------------------------------------------------------------------|
| `is_der1_arg1_zero_local(::typeof(f), x, y) = false` | $\frac{\partial f}{\partial x} \neq 0$ for given $x,y$ |
| `is_der2_arg1_zero_local(::typeof(f), x, y) = false` | $\frac{\partial^2 f}{\partial x^2} \neq 0$ for given $x,y$ |
| `is_der1_arg2_zero_local(::typeof(f), x, y) = false` | $\frac{\partial f}{\partial x} \neq 0$ for given $x,y$ |
| `is_der2_arg2_zero_local(::typeof(f), x, y) = false` | $\frac{\partial^2 f}{\partial x^2} \neq 0$ for given $x,y$ |
| `is_der_cross_zero_local(::typeof(f), x, y) = false` | $\frac{\partial^2 f}{\partial x \partial y} \neq 0$ for given $x,y$ |
These fall back to
```julia
is_der1_arg1_zero_local(f::F, x, y) where {F} = is_der1_arg1_zero_global(f)
is_der2_arg1_zero_local(f::F, x, y) where {F} = is_der2_arg1_zero_global(f)
is_der1_arg2_zero_local(f::F, x, y) where {F} = is_der1_arg2_zero_global(f)
is_der2_arg2_zero_local(f::F, x, y) where {F} = is_der2_arg2_zero_global(f)
is_der_cross_zero_local(f::F, x, y) where {F} = is_der_cross_zero_global(f)
```
!!! details "Methods you have to implement for 1-to-2 operators"
| Function | Meaning |
|:-----------------------------------------------|:----------------------------------------------------------|
| `is_der1_out1_zero_local(::typeof(f)) = false` | $\frac{\partial f_1}{\partial x} \neq 0$ for some $x$ |
| `is_der2_out1_zero_local(::typeof(f)) = false` | $\frac{\partial^2 f_1}{\partial x^2} \neq 0$ for some $x$ |
| `is_der1_out2_zero_local(::typeof(f)) = false` | $\frac{\partial f_2}{\partial x} \neq 0$ for some $x$ |
| `is_der2_out2_zero_local(::typeof(f)) = false` | $\frac{\partial^2 f_2}{\partial x^2} \neq 0$ for some $x$ |
Optionally, to increase the sparsity of [`TracerLocalSparsityDetector`](@ref), you can additionally implement
| Function | Meaning |
|:--------------------------------------------------|:-----------------------------------------------------------|
| `is_der1_out1_zero_local(::typeof(f), x) = false` | $\frac{\partial f_1}{\partial x} \neq 0$ for given $x$ |
| `is_der2_out1_zero_local(::typeof(f), x) = false` | $\frac{\partial^2 f_1}{\partial x^2} \neq 0$ for given $x$ |
| `is_der1_out2_zero_local(::typeof(f), x) = false` | $\frac{\partial f_2}{\partial x} \neq 0$ for given $x$ |
| `is_der2_out2_zero_local(::typeof(f), x) = false` | $\frac{\partial^2 f_2}{\partial x^2} \neq 0$ for given $x$ |
These fall back to
```julia
is_der1_out1_zero_local(f::F, x) where {F} = is_der1_out1_zero_global(f)
is_der2_out1_zero_local(f::F, x) where {F} = is_der2_out1_zero_global(f)
is_der1_out2_zero_local(f::F, x) where {F} = is_der1_out2_zero_global(f)
is_der2_out2_zero_local(f::F, x) where {F} = is_der2_out2_zero_global(f)
```
## [Overloading](@id code-gen)
After implementing the required classification methods for a function, the function has not been overloaded on our tracer types yet.
SCT provides three functions that generate code via meta-programming:
* 1-to-1: `eval(SCT.generate_code_1_to_1(module_symbol, f))`
* 2-to-1: `eval(SCT.generate_code_1_to_2(module_symbol, f))`
* 1-to-2: `eval(SCT.generate_code_2_to_1(module_symbol, f))`
You are required to call the function that matches your type of operator.
!!! tip "Code generation"
We will take a look at the code generation mechanism in the example below.
## Example
For some examples on how to overload methods, take a look at our [package extensions](https://github.com/adrhill/SparseConnectivityTracer.jl/tree/main/ext).
Let's look at the `relu` activation function from `ext/SparseConnectivityTracerNNlibExt.jl`, which is a 1-to-1 operator defined as $\text{relu}(x) = \text{max}(0, x)$.
### Step 1: Classification
The `relu` function has a non-zero first-order derivative $\frac{\partial f}{\partial x}=1$ for inputs $x>0$.
The second derivative is zero everywhere.
We therefore implement:
```@example overload
import SparseConnectivityTracer as SCT
using NNlib
SCT.is_der1_zero_global(::typeof(relu)) = false
SCT.is_der2_zero_global(::typeof(relu)) = true
SCT.is_der1_zero_local(::typeof(relu), x) = x < 0
```
!!! warning "import SparseConnectivityTracer"
Note that we imported SCT to extend its operator classification methods on `typeof(relu)`.
### Step 2: Overloading
The `relu` function has not been overloaded on our tracer types yet.
Let's call the code generation utilities from the [*"Overloading"*](@ref code-gen) section for this purpose:
```@example overload
eval(SCT.generate_code_1_to_1(:NNlib, relu))
```
The `relu` function is now ready to be called with SCT's tracer types.
!!! details "What is the eval call doing?"
Let's call `generate_code_1_to_1` without wrapping it `eval`:
```@example overload
SCT.generate_code_1_to_1(:NNlib, relu)
```
As you can see, this returns a `quote`, a type of expression containing our generated Julia code.
**We have to use quotes:**
The code generation mechanism lives in SCT, but the generated code has to be evaluated in the package extension, not SCT.
As you can see in the generated quote, we handle the necessary name-spacing for you.
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | docs | 1238 | # [Internals Reference](@id internal-api)
!!! danger "Internals may change"
This part of the developer documentation **exclusively** refers to internals that may change without warning in a future release of SparseConnectivityTracer.
Anything written on this page should be treated as if it was undocumented.
Only functionality that is exported or part of the [user documentation](@ref api) adheres to semantic versioning.
```@index
```
## Tracer Types
```@docs
SparseConnectivityTracer.AbstractTracer
SparseConnectivityTracer.GradientTracer
SparseConnectivityTracer.HessianTracer
SparseConnectivityTracer.Dual
```
## Patterns
```@docs
SparseConnectivityTracer.AbstractPattern
```
### Gradient Patterns
```@docs
SparseConnectivityTracer.AbstractGradientPattern
SparseConnectivityTracer.IndexSetGradientPattern
```
### Hessian Patterns
```@docs
SparseConnectivityTracer.AbstractHessianPattern
SparseConnectivityTracer.IndexSetHessianPattern
SparseConnectivityTracer.DictHessianPattern
```
### Traits
```@docs
SparseConnectivityTracer.shared
```
### Utilities
```@docs
SparseConnectivityTracer.gradient
SparseConnectivityTracer.hessian
SparseConnectivityTracer.myempty
SparseConnectivityTracer.create_patterns
``` | SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | docs | 7628 | # [How SparseConnectivityTracer works](@id how-sct-works)
!!! danger "Internals may change"
The developer documentation might refer to internals which can change without warning in a future release of SparseConnectivityTracer.
Only functionality that is exported or part of the [user documentation](@ref api) adheres to semantic versioning.
## Tracers are scalars
SparseConnectivityTracer (SCT) works by pushing `Real` number types called tracers through generic functions using operator overloading.
Currently, two tracer types are provided:
* [`GradientTracer`](@ref SparseConnectivityTracer.GradientTracer): used for Jacobian sparsity patterns
* [`HessianTracer`](@ref SparseConnectivityTracer.HessianTracer): used for Hessian sparsity patterns
When used alone, these tracers compute [**global** sparsity patterns](@ref TracerSparsityDetector).
Alternatively, these can be used inside of a dual number type [`Dual`](@ref SparseConnectivityTracer.Dual),
which keeps track of the primal computation and allows tracing through comparisons and control flow.
This is how [**local** sparsity patterns](@ref TracerLocalSparsityDetector) are computed.
!!! tip "Tip: View SparseConnectivityTracer as binary ForwardDiff"
SparseConnectivityTracer's `Dual{T, GradientTracer}` can be thought of as a binary version of [ForwardDiff](https://github.com/JuliaDiff/ForwardDiff.jl)'s own `Dual` number type.
This is a good mental model for SparseConnectivityTracer if you are familiar with ForwardDiff and its limitations.
## Index sets
Let's take a look at a scalar function $f: \mathbb{R}^n \rightarrow \mathbb{R}$.
For a given input $\mathbf{x} \in \mathbb{R}^n$,
the gradient of $f$ is defined as $\left(\nabla f(\mathbf{x})\right)_{i} = \frac{\partial f}{\partial x_i}$
and the Hessian as $\left(\nabla^2 f(\mathbf{x})\right)_{i,j} = \frac{\partial^2 f}{\partial x_i \partial x_j}$.
Sparsity patterns correspond to the mask of non-zero values in the gradient and Hessian.
Instead of saving the values of individual partial derivatives, they can efficiently be represented by the set of indices corresponding to non-zero values:
* Gradient patterns are represented by sets of indices $\left\{i \;\big|\; \frac{\partial f}{\partial x_i} \neq 0\right\}$
* Hessian patterns are represented by sets of index tuples $\left\{(i, j) \;\Big|\; \frac{\partial^2 f}{\partial x_i \partial x_j} \neq 0\right\}$
!!! warning "Global vs. Local"
As shown in the page [*"Global vs. Local"*](@ref global-vs-local),
global sparsity patterns are the index sets over all $\mathbf{x}\in\mathbb{R}^n$,
whereas local patterns are the index sets for a given point $\mathbf{x}$.
For a given function $f$, global sparsity patterns are therefore always supersets of local sparsity patterns
and more "conservative" in the sense that they are less sparse.
## Motivating example
Let's take a look at the computational graph of the equation $f(\mathbf{x}) = x_1 + x_2x_3 + \text{sgn}(x_4)$,
where $\text{sgn}$ is the [sign function](https://en.wikipedia.org/wiki/Sign_function):
```mermaid
flowchart LR
subgraph Inputs
X1["$$x_1$$"]
X2["$$x_2$$"]
X3["$$x_3$$"]
X4["$$x_4$$"]
end
PLUS((+))
TIMES((*))
SIGN((sgn))
PLUS2((+))
X1 --> |"{1}"| PLUS
X2 --> |"{2}"| TIMES
X3 --> |"{3}"| TIMES
X4 --> |"{4}"| SIGN
TIMES --> |"{2,3}"| PLUS
PLUS --> |"{1,2,3}"| PLUS2
SIGN --> |"{}"| PLUS2
PLUS2 --> |"{1,2,3}"| RES["$$y=f(x)$$"]
```
To obtain a sparsity pattern, each scalar input $x_i$ gets seeded with a corresponding singleton index set $\{i\}$ [^1].
Since addition and multiplication have non-zero derivatives with respect to both of their inputs,
their outputs accumulate and propagate the index sets of their inputs (annotated on the edges of the graph above).
The sign function has zero derivatives for any input value. It therefore doesn't propagate the index set ${4}$ corresponding to the input $x_4$. Instead, it returns an empty set.
[^1]: $\frac{\partial x_i}{\partial x_j} \neq 0$ only holds for $i=j$
The resulting **global** gradient sparsity pattern $\left(\nabla f(\mathbf{x})\right)_{i} \neq 0$ for $i$ in $\{1, 2, 3\}$ matches the analytical gradient
```math
\nabla f(\mathbf{x}) = \begin{bmatrix}
\frac{\partial f}{\partial x_1} \\
\frac{\partial f}{\partial x_2} \\
\frac{\partial f}{\partial x_3} \\
\frac{\partial f}{\partial x_4}
\end{bmatrix}
=
\begin{bmatrix}
1 \\
x_3 \\
x_2 \\
0
\end{bmatrix} \quad .
```
!!! tip "From Global to Local"
Note that the **local** sparsity pattern could be more sparse in case $x_2$ and/or $x_3$ are zero.
Computing such local sparsity patterns requires [`Dual`](@ref SparseConnectivityTracer.Dual) numbers with information about the primal computation.
These are used to evaluate the **local** differentiability of operations like multiplication.
## Toy implementation
As mentioned above, SCT uses operator overloading to keep track of index sets.
Let's start by implementing our own `MyGradientTracer` type:
```@example toytracer
struct MyGradientTracer
indexset::Set
end
```
We can now overload operators from Julia Base using our type:
```@example toytracer
import Base: +, *, sign
Base.:+(a::MyGradientTracer, b::MyGradientTracer) = MyGradientTracer(union(a.indexset, b.indexset))
Base.:*(a::MyGradientTracer, b::MyGradientTracer) = MyGradientTracer(union(a.indexset, b.indexset))
Base.sign(x::MyGradientTracer) = MyGradientTracer(Set()) # return empty index set
```
Let's create a vector of tracers to represent our input and evaluate our function with it:
```@example toytracer
f(x) = x[1] + x[2]*x[3] * sign(x[4])
xtracer = [
MyGradientTracer(Set(1)),
MyGradientTracer(Set(2)),
MyGradientTracer(Set(3)),
MyGradientTracer(Set(4)),
]
ytracer = f(xtracer)
```
Compared to this toy implementation, SCT adds some utilities to automatically create `xtracer` and parse the output `ytracer` into a sparse matrix, which we will omit here.
[`jacobian_sparsity(f, x, TracerSparsityDetector())`](@ref TracerSparsityDetector) calls these three steps of (1) tracer creation, (2) function evaluation and (3) output parsing in sequence:
```@example toytracer
using SparseConnectivityTracer
x = rand(4)
jacobian_sparsity(f, x, TracerSparsityDetector())
```
## Tracing Jacobians
Our toy implementation above doesn't just work on scalar functions, but also on vector valued functions:
```@example toytracer
g(x) = [x[1], x[2]*x[3], x[1]+x[4]]
g(xtracer)
```
By stacking individual `MyGradientTracer`s row-wise, we obtain the sparsity pattern of the Jacobian of $g$
```math
J_g(\mathbf{x})=
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & x_3 & x_2 & 0 \\
1 & 0 & 0 & 1
\end{pmatrix} \quad .
```
We obtain the same result using SCT's `jacobian_sparsity`:
```@example toytracer
jacobian_sparsity(g, x, TracerSparsityDetector())
```
## Tracing Hessians
In the sections above, we outlined how to implement our own [`GradientTracer`](@ref SparseConnectivityTracer.GradientTracer) from scratch.
[`HessianTracer`](@ref SparseConnectivityTracer.HessianTracer) use the same operator overloading approach but are a bit more involved as they contain two index sets:
one for the gradient pattern and one for the Hessian pattern.
These sets are updated based on whether the first- and second-order derivatives of an operator are zero or not.
!!! tip "To be published"
Look forward to our upcoming publication of SparseConnectivityTracer,
where we will go into more detail on the implementation of `HessianTracer`!
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | docs | 678 |
```@meta
CurrentModule = Main
CollapsedDocStrings = true
```
# [API Reference](@id api)
SparseConnectivityTracer uses [ADTypes.jl](https://github.com/SciML/ADTypes.jl)'s [interface for sparsity detection](https://sciml.github.io/ADTypes.jl/stable/#Sparsity-detector).
In fact, the functions `jacobian_sparsity` and `hessian_sparsity` are re-exported from ADTypes.
```@docs
ADTypes.jacobian_sparsity
ADTypes.hessian_sparsity
```
To compute **global** sparsity patterns of `f(x)` over the entire input domain `x`, use
```@docs
TracerSparsityDetector
```
To compute **local** sparsity patterns of `f(x)` at a specific input `x`, use
```@docs
TracerLocalSparsityDetector
```
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | docs | 1819 | # [Global vs. Local Sparsity](@id global-vs-local)
Let's motivate the difference between local and global sparsity patterns by taking a look at the function $f(\mathbf{x}) = x_1x_2$.
The corresponding Jacobian is:
```math
J_f = \begin{bmatrix}
\frac{\partial f}{\partial x_1} &
\frac{\partial f}{\partial x_2}
\end{bmatrix}
=
\begin{bmatrix}
x_2 & x_1
\end{bmatrix}
```
Depending on the values of $\mathbf{x}$, the resulting **local** Jacobian sparsity pattern could be either:
* $[1\; 1]$ for $x_1 \neq 0$, $x_2 \neq 0$
* $[1\; 0]$ for $x_1 = 0$, $x_2 \neq 0$
* $[0\; 1]$ for $x_1 \neq 0$, $x_2 = 0$
* $[0\; 0]$ for $x_1 = 0$, $x_2 = 0$
These are computed by [`TracerLocalSparsityDetector`](@ref):
```@repl localvsglobal
using SparseConnectivityTracer
detector = TracerLocalSparsityDetector();
f(x) = x[1]*x[2];
jacobian_sparsity(f, [1, 1], detector)
jacobian_sparsity(f, [0, 1], detector)
jacobian_sparsity(f, [1, 0], detector)
jacobian_sparsity(f, [0, 0], detector)
```
In contrast to this, [`TracerSparsityDetector`](@ref) computes a conservative union over all sparsity patterns in $\mathbf{x} \in \mathbb{R}^2$.
The resulting **global** pattern therefore does not depend on the input.
All of the following function calls are equivalent:
```@repl localvsglobal
detector = TracerSparsityDetector()
jacobian_sparsity(f, [1, 1], detector)
jacobian_sparsity(f, [0, 1], detector)
jacobian_sparsity(f, [1, 0], detector)
jacobian_sparsity(f, [0, 0], detector)
jacobian_sparsity(f, rand(2), detector)
```
!!! tip "Global vs. Local"
Global sparsity patterns are the union of all local sparsity patterns over the entire input domain.
For a given function, they are therefore always supersets of local sparsity patterns
and more "conservative" in the sense that they are less sparse.
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.6.6 | e5efbf152d3e14a513f19a9119b810340b7ac86b | docs | 5108 | # [Limitations](@id limitations)
## Sparsity patterns are conservative approximations
Sparsity patterns returned by SparseConnectivityTracer (SCT) can in some cases be overly conservative, meaning that they might contain "too many ones".
If you observe an overly conservative pattern, [please open a feature request](https://github.com/adrhill/SparseConnectivityTracer.jl/issues) so we know where to add more method overloads to increase the sparsity.
!!! warning "SCT's no-false-negatives policy"
If you ever observe a sparsity pattern that contains too many zeros, we urge you to [open a bug report](https://github.com/adrhill/SparseConnectivityTracer.jl/issues)!
## Function must be composed of generic Julia functions
SCT can't trace through non-Julia code.
However, if you know the sparsity pattern of an external, non-Julia function,
you might be able to work around it by adding methods on SCT's tracer types.
## Function types must be generic
When computing the sparsity pattern of a function,
it must be written generically enough to accept numbers of type `T<:Real` as (or `AbstractArray{<:Real}`) as inputs.
!!! details "Example: Overly restrictive type annotations"
Let's see this mistake in action:
```@example notgeneric
using SparseConnectivityTracer
detector = TracerSparsityDetector()
relu_bad(x::AbstractFloat) = max(zero(x), x)
outer_function_bad(xs) = sum(relu_bad, xs)
nothing # hide
```
Since tracers and dual numbers are `Real` numbers and not `AbstractFloat`s,
`relu_bad` throws a `MethodError`:
```@repl notgeneric
xs = [1.0, -2.0, 3.0];
outer_function_bad(xs)
jacobian_sparsity(outer_function_bad, xs, detector)
```
This is easily fixed by loosening type restrictions or adding an additional methods on `Real`:
```@example notgeneric
relu_good(x) = max(zero(x), x)
outer_function_good(xs) = sum(relu_good, xs)
nothing # hide
```
```@repl notgeneric
jacobian_sparsity(outer_function_good, xs, detector)
```
## Limited control flow
Only [`TracerLocalSparsityDetector`](@ref) supports comparison operators (`<`, `==`, ...), indicator functions (`iszero`, `iseven`, ...) and control flow.
[`TracerSparsityDetector`](@ref) does not support any boolean functions and control flow (with the exception of `ifelse`).
This might seem unintuitive but follows from our policy stated above: SCT guarantees conservative sparsity patterns.
Using an approach based on operator-overloading, this means that global sparsity detection isn't allowed to hit any branching code.
`ifelse` is the only exception, since it allows us to evaluate both branches.
!!! warning "Common control flow errors"
By design, SCT will throw errors instead of returning wrong sparsity patterns. Common error messages include:
```julia
ERROR: TypeError: non-boolean [tracer type] used in boolean context
```
```julia
ERROR: Function [function] requires primal value(s).
A dual-number tracer for local sparsity detection can be used via `TracerLocalSparsityDetector`.
```
!!! details "Why does TracerSparsityDetector not support control flow and comparisons?"
Let us motivate the design decision above by a simple example function:
```@example ctrlflow
function f(x)
if x[1] > x[2]
return x[1]
else
return x[2]
end
end
nothing # hide
```
The desired **global** Jacobian sparsity pattern over the entire input domain $x \in \mathbb{R}^2$ is `[1 1]`.
Two **local** sparsity patterns are possible:
`[1 0]` for $\{x | x_1 > x_2\}$,
`[0 1]` for $\{x | x_1 \le x_2\}$.
The local sparsity patterns of [`TracerLocalSparsityDetector`](@ref) are easy to compute using operator overloading by using [dual numbers](@ref SparseConnectivityTracer.Dual)
which contain primal values on which we can evaluate comparisons like `>`:
```@repl ctrlflow
using SparseConnectivityTracer
jacobian_sparsity(f, [2, 1], TracerLocalSparsityDetector())
jacobian_sparsity(f, [1, 2], TracerLocalSparsityDetector())
```
The global sparsity pattern is **impossible** to compute when code branches with an if-else condition,
since we can only ever hit one branch during run-time.
If we made comparisons like `>` return `true` or `false`, we'd get the local patterns `[1 0]` and `[0 1]` respectively.
But SCT's policy is to guarantee conservative sparsity patterns, which means that "false positives" (ones) are acceptable, but "false negatives" (zeros) are not.
In my our opinion, the right thing to do here is to throw an error:
```@repl ctrlflow
jacobian_sparsity(f, [1, 2], TracerSparsityDetector())
```
In some cases, we can work around this by using `ifelse`.
Since `ifelse` is a method, it can evaluate "both branches" and take a conservative union of both resulting sparsity patterns:
```@repl ctrlflow
f(x) = ifelse(x[1] > x[2], x[1], x[2])
jacobian_sparsity(f, [1, 2], TracerSparsityDetector())
```
| SparseConnectivityTracer | https://github.com/adrhill/SparseConnectivityTracer.jl.git |
|
[
"MIT"
] | 0.1.1 | 5cf33e96d18c8aa607bc45b9c0db0e19367ad24c | code | 274 | using Documenter, CryptoUtils
makedocs(modules = [CryptoUtils],
sitename = "CryptoUtils.jl",
pages = Any[
"Home" => "index.md",
"Functions" => "api.md"
])
deploydocs(repo = "github.com/fcasal/CryptoUtils.jl.git",
)
| CryptoUtils | https://github.com/fcasal/CryptoUtils.jl.git |
|
[
"MIT"
] | 0.1.1 | 5cf33e96d18c8aa607bc45b9c0db0e19367ad24c | code | 14079 | module CryptoUtils
import Primes: isprime, primes
export legendre, jacobi, sqrt_mod_prime, find_quadratic_non_residue, is_quadratic_residue,
convergents, continued_fraction, surd
export n2b, b2n
export random_prime, safe_prime, tower_two_prime, get_first_primes, twin_primes,
is_generator, get_safe_prime_generator
export factor_with_ed, wiener
################################################################
## Number theory utilities ##
################################################################
"""
legendre(a::Integer, p::Integer)
Return the Legendre symbol of `(a, p)`.
`p` should be an odd prime number.
"""
function legendre(a::Integer, p::Integer)
if p % 2 == 0 && !isprime(p)
throw("Argument p should be an odd prime.")
end
b = div(p - 1, 2)
return powermod(a, b, p)
end
"""
jacobi(n::Integer, k::Integer)
Return the Jacobi symbol of `n, k`.
`k` should be an odd number.
"""
function jacobi(n::Integer, k::Integer)
rem(k, 2) != zero(k) || throw("Argument k=$k should be odd.")
n = mod(n, k)
t = 1
while n != zero(n)
while rem(n, 2) == 0
n = div(n, 2)
r = k % 8
if r == 3 || r == 5
t = -t
end
end
n, k = k, n
if n % 4 == 3 && k % 4 == 3
t = -t
end
n = mod(n, k)
end
if k == oneunit(k)
return t
else
return 0
end
end
"""
find_quadratic_non_residue(p::Integer)
Return a random number `R` which has no square root mod `p`,
i.e., `x^2 == R mod p` has no solutions.
"""
function find_quadratic_non_residue(p::Integer)
two = big"2"
qnr = rand(two:p - 1)
while jacobi(qnr, p) != -1
qnr = rand(two:p)
end
return qnr
end
"""
tonelli_shanks(a::Integer, p::Integer)
Implements the Tonelli Shanks algorithm for computing
square roots modulo a prime number.
It assumes such square roots exist.
"""
function tonelli_shanks(a::Integer, p::Integer)
b = find_quadratic_non_residue(p)
t = p - 1
s = 0
while t % 2 == 0
t = div(t, 2)
s += 1
end
M = s
Q = t
c = powermod(b, t, p)
t = powermod(a, t, p)
r = powermod(a, div(Q + 1, 2), p)
while t != 1
i = 1
tpow = powermod(t, 2, p)
while tpow != 1
tpow = mod(tpow * tpow, p)
i += 1
end
po = M - i - 1
b = powermod(c, big"2"^po, p)
M = i
c = powermod(b, 2, p)
t = mod(t * powermod(b, 2, p), p)
r = mod(r * b, p)
end
return r
end
"""
hoc_sqrt(a::Integer, p::Integer)
Algorithm from Handbook of cryptography, Koblitz pp 48-49.
Finds a solution to `x^2 == a mod p`.
It assumes such solution exists.
Running time highly depends on |alpha|, assuming
`p-1 = 2^alpha * s`, for an odd `s`.
"""
function hoc_sqrt(a::Integer, p::Integer)
n = find_quadratic_non_residue(p)
s = p - 1
alpha = 0
while s % 2 == 0
s = div(s, 2)
alpha += 1
end
inv_a = invmod(a, p)
b = powermod(n, s, p)
r = powermod(a, div(s + 1, 2), p)
r_sqr = powermod(r, 2, p)
two = BigInt(2)
expon = two^(alpha - 1)
for i in 1:alpha - 1
expon = div(expon, two)
d = powermod(inv_a * r_sqr, expon, p)
if d == p - 1
r = mod(r * b, p)
r_sqr = powermod(r, two, p)
end
b = powermod(b, two, p)
end
return r
end
"""
is_quadratic_residue(a::Integer, p::Integer) -> Bool
Return true or false depending on wheter `a` is a quadratic residue mod `p`.
That is, it checks if `x^2 == a mod p` has solutions.
"""
function is_quadratic_residue(a::Integer, p::Integer)
if p == oftype(p, 2)
return true
end
return jacobi(a, p) == 1
end
"""
sqrt_mod_prime(a::Integer, p::Integer) -> Integer
Solves `x^2 == a mod p` and returns one of the square roots `r`.
The other root is `p - r`.
If there are no solutions, throws an exception.
```julia
julia> sqrt_mod_prime(33^2, 73)
33
```
"""
function sqrt_mod_prime(a::Integer, p::Integer)
a = mod(a, p)
is_quadratic_residue(a, p) || throw("$a is not a quadratic residue mod $p.")
if p % 2 == 0
return a
elseif p % 4 == 3
return powermod(a, div(p + 1, 4), p)
elseif p % 8 == 5
d = powermod(a, div(p - 1, 4), p)
if d == 1
r = powermod(a, div(p + 3, 8), p)
elseif d == p - 1
r = mod(2 * a * powermod(4 * a, div(p - 5, 8), p), p)
end
return r
# If p-1 is of the form 2^k*s for large k, use tonelli-shanks.
# Here k is large if k > 100
elseif mod(p - 1, 1267650600228229401496703205376) == 0
return tonelli_shanks(a, p)
# depends on size of k
else
return hoc_sqrt(a, p)
end
end
"""
continued_fraction(a::T, b::T) where T <: Integer
Return the continued fraction of the rational `a/b`.
# Example
```julia
julia> continued_fraction(31, 73)
6-element Array{Int64,1}:
0
2
2
1
4
2
```
"""
function continued_fraction(a::T, b::T) where T <: Integer
r = a // b
p = Integer(floor(r))
fraction = typeof(a)[]
push!(fraction, p)
f = r - p
while f != 0
r = 1 // f
p = Integer(floor(r))
push!(fraction, p)
f = r - p
end
return fraction
end
"""
convergents(a::T, b::T) where T <: Integer
Return the convergents of a rational `a/b`.
# Example
```julia
julia> convergents(31, 73)
6-element Array{Rational,1}:
0//1
1//2
2//5
3//7
14//33
31//73
```
"""
function convergents(a::T, b::T) where T <: Integer
fraction = continued_fraction(a, b)
return convergents(fraction)
end
"""
convergents(cont_fraction::Array)
Return the convergents given the continued fraction of a rational.
"""
function convergents(cont_fraction::Array)
nums = BigInt[0, 1]
dens = BigInt[1, 0]
converg = Rational[]
for a in cont_fraction
next_num = a * nums[end] + nums[end - 1]
next_den = a * dens[end] + dens[end - 1]
push!(converg, next_num // next_den)
push!(nums, next_num)
push!(dens, next_den)
end
return converg
end
"""
solve_quadratic(a::Integer, b::Integer, c::Integer)
Return solutions to the equation `a*x^2 + b*x + c`.
Assumes the solutions are integer.
"""
function solve_quadratic(a::Integer, b::Integer, c::Integer)
d = isqrt(b^2 - 4 * a * c)
return div(-b + d, 2 * a), div(-b - d, 2 * a)
end
"""
surd(n::BigInt, k::Int64)
Return largest integer smaller or equal than the `k`-th root of `n`.
"""
function surd(n::BigInt, k::Int64)
if k == 1
return n
elseif k == 0
return big"1"
end
low = 1
high = n
mid = div(low + high, 2)
while abs(mid^k - n) >= 0.49
if mid^k < n
low = mid
elseif mid^k > n
high = mid
end
mid = div(low + high, 2)
if high - low == 1
return low
end
end
return floor(mid)
end
"""
is_generator(g::Integer, q::Integer, factors::Array) -> Bool
Returns true if `g` is a generator of `Z_q` where `q` is prime and
`factors` is the prime factorization of `q - 1 = p1^e1 * p2^e2 ... * pk^ek`.
```
q = 2^7 * 5 + 1
is_generator(2, q, [2, 5]) -> false
is_generator(3, q, [2, 5]) -> true
```
"""
function is_generator(g::Integer, q::Integer, factors::Array)::Bool
if q % 2 == 0 && !isprime(q)
throw("Argument q should be an odd prime.")
end
n = q - 1
for factor in factors
if powermod(g, div(n,factor), q) == 1
return false
end
end
return true
end
"""
get_safe_prime_generator(q::BigInt) -> BigInt
Returns a generator of `Z_q`, where `q = 2 * p + 1` with `q, p` primes.
"""
function get_safe_prime_generator(q::BigInt)::BigInt
if q % 2 == 0 && !isprime(q)
throw("Argument q should be an odd prime.")
end
p = div(q - 1, 2)
factors = [2, p]
@label sample_generator
g = rand(1 : q - 1)
if !is_generator(g, q, factors)
@goto sample_generator
end
return g
end
################################################################
## Prime number utilities ##
################################################################
"""
random_prime(bitsize::Integer) -> BigInt
Return a random prime with `bitsize` bits.
```
julia> random_prime(42)
2458636110727
```
"""
function random_prime(bitsize::Integer)::BigInt
n = oneunit(BigInt)
two = BigInt(2)
lo = two^(bitsize - 1)
hi = two^bitsize
while !isprime(n)
n = rand(lo:hi)
end
return n
end
"""
tower_two_prime(bitsize::Integer, tower_len::Integer) -> BigInt
Return a random prime of the form `2^towerlen * q + 1`
with `bitsize` bits and where `q` is also a prime.
```
julia> tower_two_prime(22, 6)
2362433
```
"""
function tower_two_prime(bitsize::Integer, tower_len::Integer)::BigInt
n = oneunit(BigInt)
tower = big"2"^tower_len
lo = big"2"^(bitsize - tower_len - 1)
hi = big"2"^(bitsize - tower_len)
while !isprime(n)
n = tower * random_prime(bitsize - tower_len) + 1
end
return n
end
"""
safe_prime(bitsize::Integer) -> BigInt
Return a random safe-prime `q` of the form `q = 2 * p + 1`
where `p` is also a prime number.
The returning prime number has `bitsize` bits.
```
julia> safe_prime(10)
1439
```
"""
function safe_prime(bitsize::Integer)::BigInt
n = oneunit(BigInt)
two = BigInt(2)
while !isprime(n)
p = random_prime(bitsize - 1)
n = two * p + 1
end
return n
end
"""
get_first_primes(k::Integer) -> Collection
Output the first `k` prime numbers.
```julia
julia> get_first_primes(10)
10-element Array{Int64,1}:
2
3
5
7
11
13
17
19
23
29
```
"""
function get_first_primes(k::Integer)
if k <= zero(k)
return []
end
l = log(k + 1)
hi = max(2, Int(ceil((k + 1) * (l + log(l)))))
res = primes(hi)
res = res[1:min(length(res), k)]
return res
end
"""
twin_primes(bitsize::Integer)
Return a pair of prime numbers `p, p + 2`
with `bitsize` bits.
This might take a while to run.
"""
function twin_primes(bitsize::Integer)
q = 1
p = 1
while !isprime(q)
p = random_prime(bitsize)
q = p + 2
end
return p, q
end
################################################################
## Conversion utilities ##
################################################################
"""
n2b(n::Integer) -> String
Converts a number to its bytes representation,
effectively writing the number in base 256, and returning
the corresponding bytes.
```julia
julia> n2b(22405534230753963835153736737)
"Hello world!"
```
"""
function n2b(n::Integer)::String
res::String = ""
while n != zero(n)
n, r = divrem(n, 256)
res = Char(r) * res
end
return res
end
"""
b2n(str::String) -> BigInt
Converts a byte-string to a number,
converting the string from base 256 to base 10.
```julia
julia> b2n("Hello world!")
22405534230753963835153736737
```
"""
function b2n(str::String)::BigInt
res::BigInt = 0
for (i, char) in enumerate(reverse(str))
res += big"256"^(i - 1) * Int(char)
end
return res
end
################################################################
## Cryptography utilities ##
################################################################
"""
factor_with_ed(n::Integer, e::Integer, d::Integer) -> (Integer, Integer)
Factors `n = p*q` given `(e, d)` such that `e*d = 1 mod phi(n)`
Stinson page 204 - algorithm 5.10
"""
function factor_with_ed(n::Integer, e::Integer, d::Integer)
k = e * d - 1
s = 0
r = k
while r % 2 == 0
s += 1
r = div(r, 2)
end
while true
w = rand(2:n - 1)
x = gcd(w, n)
if 1 < x < n
return x, div(n, x)
end
v = powermod(w, r, n)
v0 = v
while v != 1
v0 = v
v = powermod(v, 2, n)
end
if v0 == n - 1
continue
end
x = gcd(v0 + 1, n)
if 1 < x < n
return x, div(n, x)
end
end
end
"""
wiener(n::Integer, e::Integer, dujella_bound=20)
Factors the semiprime `n`, assuming Wiener's attack holds:
`d < n^(1/4)`, where `d*e = 1 mod phi(n)`.
Uses Dujella extension attack. Increasing the `dujella_bound` argument
slows the running time but increases chances of finding the correct `d`
in case `d ~ n^(1/4)`.
"""
function wiener(n::Integer, e::Integer, dujella_bound = 20)
# usual aproximation of k/d
# convergs = convergents(e, n)
# better aproximation of k/d
convergs = convergents(e, n + 1 - 2 * isqrt(n))
# ignore first convergent 0/1
deleteat!(convergs, 1)
old_d = 1
# ciphertext to test decryption exponent
test_cipher = powermod(2, e, n)
# build (r, s) pairs
# with 1 < s < r < dujella_bound
# and gcd(r, s) = 1
# https://bib.irb.hr/datoteka/383127.dujececc.pdf
pairs_rs = Pair[]
for r in 1:dujella_bound
for s in 1:r
if gcd(r, s) == 1
push!(pairs_rs, r => s)
end
end
end
for fraction in convergs
k = numerator(fraction)
d = denominator(fraction)
# regular Wiener attack
if powermod(test_cipher, d, n) == 2
phi = div(e * d - 1, k)
p, q = solve_quadratic(1, phi - n - 1, n)
@assert p * q == n
return p, q
end
# Dujella extension
for p in pairs_rs
dujella = p.first * d + p.second * old_d
if powermod(test_cipher, dujella, n) == 2
return factor_with_ed(n, e, dujella)
end
end
old_d = d
end
end
end # module
| CryptoUtils | https://github.com/fcasal/CryptoUtils.jl.git |
|
[
"MIT"
] | 0.1.1 | 5cf33e96d18c8aa607bc45b9c0db0e19367ad24c | code | 3188 |
struct EllipticCurve
a::BigInt
b::BigInt
n::BigInt
end
struct EllipticPoint
x::BigInt
y::BigInt
end
function isInfinityPoint(P::EllipticPoint)
return P.x == -1 && P.y == -1
end
function double(P::EllipticPoint, E::EllipticCurve)::EllipticPoint
if isInfinityPoint(P)
return P
end
g::BigInt, inv::BigInt, _ = gcdx(2 * P.y, E.n)
# if g != 1
# return g
# elseif g == E.n
# return InfinityPoint()
# end
lambda::BigInt = mod((3 * P.x^2 + E.a) * inv, E.n)
new_x::BigInt = mod(lambda^2 - 2 * P.x, E.n)
new_y::BigInt = mod(lambda * (P.x - new_x) - P.y, E.n)
return EllipticPoint(new_x, new_y)
end
function add(P::EllipticPoint, Q::EllipticPoint, E::EllipticCurve)::EllipticPoint
if isInfinityPoint(P)
return Q
elseif isInfinityPoint(Q)
return P
end
if P.x == Q.x && P.y == Q.y
return double(P, E)
end
@time delta_x = mod(Q.x - P.x, E.n)
@time g::BigInt, inv::BigInt, _ = gcdx(delta_x, E.n)
# if 1 < g < E.n
# return g
# elseif g == E.n
# return InfinityPoint()
# end
@time delta_y = mod(Q.y - P.y, E.n)
@time lambda = mod(delta_y * inv, E.n)
@time new_x = mod(lambda^2 - P.x - Q.x, E.n)
@time new_y = mod(lambda * (P.x - new_x) - P.y, E.n)
return EllipticPoint(new_x, new_y)
end
function InfinityPoint()
return EllipticPoint(-1, -1)
end
function multiply(k::BigInt, P::EllipticPoint, E::EllipticCurve)::EllipticPoint
Q = InfinityPoint()
@inbounds for b in string(k, base = 2)
Q = double(Q, E)
# if typeof(Q) != EllipticPoint
# return Q
# end
if b == '1'
Q = add(Q, P, E)
# if typeof(Q) != EllipticPoint
# return Q
# end
end
end
return Q
end
function test_add()
p = big"115792089210356248762697446949407573530086143415290314195533631308867097853951"
a = big"-3"
b = big"41058363725152142129326129780047268409114441015993725554835256314039467401291"
E = EllipticCurve(a, b, p)
gx = big"48439561293906451759052585252797914202762949526041747995844080717082404635286"
gy = big"36134250956749795798585127919587881956611106672985015071877198253568414405109"
P = EllipticPoint(gx, gy)
P = add(P, P, E)
P = add(P, P, E)
P = add(P, P, E)
# for i=1:100000
# P = add(P, P, E)
# end
end
function test_mult()
p = big"115792089210356248762697446949407573530086143415290314195533631308867097853951"
a = big"-3"
b = big"41058363725152142129326129780047268409114441015993725554835256314039467401291"
E = EllipticCurve(a, b, p)
gx = big"48439561293906451759052585252797914202762949526041747995844080717082404635286"
gy = big"36134250956749795798585127919587881956611106672985015071877198253568414405109"
P = EllipticPoint(gx, gy)
for i=1:100000
P = multiply(big"123456", P, E)
end
end
# initial
# 1.211539 seconds (5.45 M allocations: 153.242 MiB, 11.24% gc time)
# 21.950572 seconds (112.61 M allocations: 2.961 GiB, 12.85% gc time)
@time test_add()
# @time test_mult() | CryptoUtils | https://github.com/fcasal/CryptoUtils.jl.git |
|
[
"MIT"
] | 0.1.1 | 5cf33e96d18c8aa607bc45b9c0db0e19367ad24c | code | 99 | using CryptoUtils
using Test
@testset "CryptoUtils.jl" begin
# Write your own tests here.
end
| CryptoUtils | https://github.com/fcasal/CryptoUtils.jl.git |
|
[
"MIT"
] | 0.1.1 | 5cf33e96d18c8aa607bc45b9c0db0e19367ad24c | docs | 2314 | # CryptoUtils
[](https://fcasal.github.io/CryptoUtils.jl/dev/)
## Features
This package supports a number of cryptographic and number theory related primitives:
* generation of random prime numbers and safe primes;
* Jacobi and Legendre symbols;
* modular square roots;
* continued fractions of rationals and convergents;
* "bytes to number" and "number to bytes" conversion functions;
* RSA attacks:
* Wiener's attack with Dujella's extension;
* Factor `n = p*q`, given `e` and `d`.
## Installation
To install the package, simply run from the julia REPL:
```
julia> ]add CryptoUtils
```
## Documentation
Access the full [documentation](https://fcasal.github.io/CryptoUtils.jl/dev/).
## Example
```julia
julia> using CryptoUtils
julia> p = random_prime(512); q = random_prime(512); n = p*q # generate semiprime n
131745220523048272797760331579564741491214753542423769574982847786514776153155250573474208964071358407384440164414712528658264269591534035958596966591477455650082238340858337257106511821924740760868219278777077341070367621453029038918351903399126114774879588852878367966142856170535138849910785240708139225987
julia> e = 65537
65537
julia> d = invmod(e, (p-1)*(q-1)) # compute RSA decryption exponent
32362883641310315451899592262377172791965856192371946631485250568611645044625881242387678564972226360689108476233462883544705990145324113781489121643593621753163078450834460663942035227770596133499206721223993086064885467845603112395435294663436699341967664046213003429586468421266641276398515468366056248785
julia> factor_with_ed(n, e, d) # factor n with e and d
(11209007052907094316298587866392085453240213973638699831846376613053337678939099626874977325024647359864974367465362518878257931790980202563932031187056729, 11753513928682888932534842071560505691719602160983337271901213762089936749492510218729499032535262339600976674663969869808030961514878135483359095845990203)
julia> p
11209007052907094316298587866392085453240213973638699831846376613053337678939099626874977325024647359864974367465362518878257931790980202563932031187056729
julia> q
11753513928682888932534842071560505691719602160983337271901213762089936749492510218729499032535262339600976674663969869808030961514878135483359095845990203
```
| CryptoUtils | https://github.com/fcasal/CryptoUtils.jl.git |
|
[
"MIT"
] | 0.1.1 | 5cf33e96d18c8aa607bc45b9c0db0e19367ad24c | docs | 685 | # CryptoUtils
## Number to bytes conversion
```@docs
CryptoUtils.b2n
CryptoUtils.n2b
```
## Prime numbers
```@docs
CryptoUtils.random_prime
CryptoUtils.safe_prime
CryptoUtils.tower_two_prime
CryptoUtils.get_first_primes
CryptoUtils.twin_primes
```
## Number theory
```@docs
CryptoUtils.find_quadratic_non_residue
CryptoUtils.is_quadratic_residue
CryptoUtils.sqrt_mod_prime
CryptoUtils.jacobi
CryptoUtils.legendre
CryptoUtils.continued_fraction
CryptoUtils.convergents
CryptoUtils.surd
CryptoUtils.hoc_sqrt
CryptoUtils.tonelli_shanks
CryptoUtils.is_generator
CryptoUtils.get_safe_prime_generator
```
## Cryptography
```@docs
CryptoUtils.factor_with_ed
CryptoUtils.wiener
```
| CryptoUtils | https://github.com/fcasal/CryptoUtils.jl.git |
|
[
"MIT"
] | 0.1.1 | 5cf33e96d18c8aa607bc45b9c0db0e19367ad24c | docs | 2129 | # CryptoUtils.jl
## Features
This package supports a number of cryptographic and number theory related primitives:
* generation of random prime numbers and safe primes;
* Jacobi and Legendre symbols;
* modular square roots;
* continued fractions of rationals and convergents;
* "bytes to number" and "number to bytes" conversion;
* RSA attacks:
* Wiener's attack with Dujella's extension;
* Factor `n = p*q`, given `e` and `d`.
## Installation
To install the package, simply run from the julia REPL:
```
julia> ]add CryptoUtils
```
## Example
```julia
julia> using CryptoUtils
julia> p = random_prime(512); q = random_prime(512); n = p*q # generate semiprime n
131745220523048272797760331579564741491214753542423769574982847786514776153155250573474208964071358407384440164414712528658264269591534035958596966591477455650082238340858337257106511821924740760868219278777077341070367621453029038918351903399126114774879588852878367966142856170535138849910785240708139225987
julia> e = 65537
65537
julia> d = invmod(e, (p-1)*(q-1)) # compute RSA decryption exponent
32362883641310315451899592262377172791965856192371946631485250568611645044625881242387678564972226360689108476233462883544705990145324113781489121643593621753163078450834460663942035227770596133499206721223993086064885467845603112395435294663436699341967664046213003429586468421266641276398515468366056248785
julia> factor_with_ed(n, e, d) # factor n with n and d
(11209007052907094316298587866392085453240213973638699831846376613053337678939099626874977325024647359864974367465362518878257931790980202563932031187056729, 11753513928682888932534842071560505691719602160983337271901213762089936749492510218729499032535262339600976674663969869808030961514878135483359095845990203)
julia> p
11209007052907094316298587866392085453240213973638699831846376613053337678939099626874977325024647359864974367465362518878257931790980202563932031187056729
julia> q
11753513928682888932534842071560505691719602160983337271901213762089936749492510218729499032535262339600976674663969869808030961514878135483359095845990203
```
## Index
```@index
``` | CryptoUtils | https://github.com/fcasal/CryptoUtils.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 621 | using Isoplot
using Documenter
DocMeta.setdocmeta!(Isoplot, :DocTestSetup, :(using Isoplot); recursive=true)
makedocs(;
modules=[Isoplot],
authors="C. Brenhin Keller",
repo="https://github.com/JuliaGeochronology/Isoplot.jl/blob/{commit}{path}#{line}",
sitename="Isoplot.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://JuliaGeochronology.github.io/Isoplot.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/JuliaGeochronology/Isoplot.jl",
devbranch = "main",
)
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 3120 | using Isoplot, Plots, VectorizedStatistics
cd(@__DIR__)
# Example U-Pb dataset (MacLennan et al. 2020)
# 207/235 1σ abs 206/236 1σ abs correlation
data = [1.1009 0.00093576 0.123906 0.00002849838 0.319
1.1003 0.00077021 0.123901 0.00003531178 0.415
1.0995 0.00049477 0.123829 0.00002538494 0.434
1.0992 0.00060456 0.123813 0.00003652483 0.616
1.1006 0.00071539 0.123813 0.00002228634 0.321
1.0998 0.00076986 0.123802 0.00002537941 0.418
1.0992 0.00065952 0.123764 0.00003589156 0.509
1.0981 0.00109810 0.123727 0.00003959264 0.232
1.0973 0.00052670 0.123612 0.00002966688 0.470
1.0985 0.00087880 0.123588 0.00002842524 0.341
1.0936 0.00054680 0.123193 0.00003264614 0.575
1.0814 0.00051366 0.121838 0.00003045950 0.587 ]
# Turn into UPbAnalysis objects
analyses = UPbAnalysis.(eachcol(data)...,)
# Screen for discordance
t = discordance.(analyses) .< 0.2
@info "Excluding $(count(.!t)) discordant analyses"
analyses = analyses[t]
# Plot in Wetherill concordia space
hdl = plot(xlabel="²⁰⁷Pb/²³⁵U", ylabel="²⁰⁶Pb/²³⁸U", grid=false, framestyle=:box)
plot!(hdl, analyses, color=:darkblue, alpha=0.3, label="")
concordiacurve!(hdl) # Add concordia curve
savefig(hdl, "concordia.pdf")
display(hdl)
# Plot in rankorder plot
age_06_38 = last.(age.(analyses))
rankorder_plot = rankorder(val.(age_06_38), σ1.(age_06_38), ylabel="Age (Ma)")
savefig(rankorder_plot, "rank_order.pdf")
display(rankorder_plot)
## --- Bayesian Pb-loss-aware eruption age estimation
nsteps = 10^6
tmindist, t0dist = metropolis_min(nsteps, UniformDistribution, analyses; burnin=10^4)
tpbloss = CI(t0dist)
terupt = CI(tmindist)
display(terupt)
println("Eruption/deposition age: $terupt Ma (95% CI)")
# Add to concordia plot
I = rand(1:length(tmindist), 1000) # Pick 100 random samples from the posterior distribution
concordialine!(hdl, t0dist[I], tmindist[I], color=:darkred, alpha=0.02, label="Model: $terupt Ma") # Add to Concordia plot
display(hdl)
terupt
## --- Histogram of distribution of eruption age
h = histogram(tmindist, xlabel="Age [Ma]", ylabel="Probability Density", normalize=true, label="Eruption age", color=:darkblue, alpha=0.65, linealpha=0.1, framestyle=:box)
ylims!(h, 0, last(ylims()))
savefig(h, "EruptionAge.pdf")
display(h)
## --- Show eruption age relative to distribution of upper intercepts
uis = upperintercept.(vmean(t0dist), analyses)
h = rankorder(Isoplot.val.(uis), Isoplot.err.(uis))
plot!(h,1:length(uis),fill(terupt.lower,length(uis)),fillto=terupt.upper,color=:blue,fillalpha=0.5,linealpha=0, label="Model ($terupt Ma, 95% CI)")
plot!(h,1:length(uis),fill(terupt.mean,length(uis)),linecolor=:black,linestyle=:dot,label="",legend=:topleft,fg_color_legend=:white,framestyle=:box)
## --- Histogram of distribution of time of Pb-loss
h = histogram(t0dist, xlabel="Age [Ma]", ylabel="Probability Density", normalize=true, label="Time of Pb-loss", color=:darkblue, alpha=0.65, linealpha=0.1, framestyle=:box)
xlims!(h, 0, last(xlims()))
ylims!(h, 0, last(ylims()))
savefig(h, "PbLoss.pdf")
display(h)
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 2352 | using Isoplot, Plots, Distributions, DelimitedFiles
# Example Detrital zircon U-Pb dataset (Karlstrom et al. 2018)
cd(@__DIR__)
data = readdlm("data/sixtymileA.csv", ',')
# Turn into UPbAnalysis objects
analyses = UPbAnalysis.(eachcol(data)...,)
cc = plot(xlabel="²⁰⁷Pb/²³⁵U", ylabel="²⁰⁶Pb/²³⁸U", framestyle=:box)
plot!(cc, analyses, color=:black, alpha=0.05, label="")
# Screen for discordance
t = first.(age.(analyses)) .< 1000
@info "Excluding $(count(.!t)) Paleoproterozoic analyses"
analyses = analyses[t]
# Plot in Wetherill concordia space
plot!(cc, analyses, color=:darkblue, alpha=0.5, label="")
ylims!(cc, 0, last(ylims(cc)))
xlims!(cc, 0, last(xlims(cc)))
concordiacurve!(cc) # Add concordia curve
savefig(cc, "concordia.pdf")
display(cc)
## --- Bayesian Pb-loss-aware deposition age estimation
nsteps = 10^6
tmindist, t0dist = metropolis_min(nsteps, UniformDistribution, analyses, burnin=10^4,lossprior=Normal(0,30))
## ---
tpbloss = CI(t0dist)
tdepo = CI(tmindist)
display(tdepo)
display(tpbloss)
# Add to concordia plot
I = rand(1:length(tmindist), 200) # Pick 100 random samples from the posterior distribution
concordialine!(cc, t0dist[I], tmindist[I], truncate=true, color=:darkred, alpha=0.05, label="Deposition: $tdepo Ma", legend=:bottomright) # Add to Concordia plot
savefig("concordiadepo.pdf")
display(cc)
tdepo
## --- Histogram of distribution of deposition age
te = histogram(tmindist, xlabel="Age [Ma]", ylabel="Probability Density", normalize=true, label="Deposition: $tdepo Ma", color=:darkred, fill=true, alpha=0.75, linealpha=0.1, framestyle=:box)
ylims!(te, 0, last(ylims()))
savefig(te, "depositionage.pdf")
display(te)
## --- Histogram of distribution of time of Pb-loss
tpb = histogram(t0dist, xlabel="Age [Ma]", ylabel="Probability Density", normalize=true, label="Pb-loss: $tpbloss Ma", color=:darkblue, fill=true, alpha=0.65, linealpha=0.1, framestyle=:box)
xlims!(tpb, 0, last(xlims()))
ylims!(tpb, 0, last(ylims()))
savefig(tpb, "pbloss.pdf")
display(tpb)
## --- Plot stacked
h = plot(cc,te,tpb, layout=(3,1), size=(500,1000), left_margin=(8,:mm))
savefig(h, "depositionestimation.pdf")
display(h)
## --- Standard deposition age estimation
nsteps = 10^6
tmindist68 = metropolis_min(nsteps, UniformDistribution, first.(age.(analyses)), burnin=10^4)
tdep68 = CI(tmindist68)
display(tdep68)
## ---
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 3324 | using Isoplot, Plots, VectorizedStatistics
# Example U-Pb dataset (MacLennan et al. 2020)
# 207/235 1σ abs 206/236 1σ abs correlation
data = [1.1009 0.00093576 0.123906 0.00002849838 0.319
1.1003 0.00077021 0.123901 0.00003531178 0.415
1.0995 0.00049477 0.123829 0.00002538494 0.434
1.0992 0.00060456 0.123813 0.00003652483 0.616
1.1006 0.00071539 0.123813 0.00002228634 0.321
1.0998 0.00076986 0.123802 0.00002537941 0.418
1.0992 0.00065952 0.123764 0.00003589156 0.509
1.0981 0.00109810 0.123727 0.00003959264 0.232
1.0973 0.00052670 0.123612 0.00002966688 0.470
1.0985 0.00087880 0.123588 0.00002842524 0.341
1.0936 0.00054680 0.123193 0.00003264614 0.575
1.0814 0.00051366 0.121838 0.00003045950 0.587 ]
# Turn into UPbAnalysis objects
analyses = UPbAnalysis.(eachcol(data)...,)
# Screen for discordance
t = discordance.(analyses) .< 0.2
@info "Excluding $(count(.!t)) discordant analyses"
analyses = analyses[t]
# Plot in Wetherill concordia space
cc = plot(xlabel="²⁰⁷Pb/²³⁵U", ylabel="²⁰⁶Pb/²³⁸U", framestyle=:box)
plot!(cc, analyses, color=:darkblue, alpha=0.3, label="")
concordiacurve!(cc) # Add concordia curve
savefig(cc, "concordia.pdf")
display(cc)
## --- Log likelihood distribution / demonstration of burnin
nsteps = 10^6
tmindist, tmaxdist, t0dist, lldist, acceptancedist = metropolis_minmax(nsteps, UniformDistribution, analyses)
ll = plot(lldist, label="", xlabel="Step number", ylabel="Log likelihood", color=:darkblue, framestyle=:box, alpha=0.2)
savefig(ll, "lldist.pdf")
display(ll)
## --- Bayesian Pb-loss-aware eruption age estimation
nsteps = 10^7
tmindist, t0dist = metropolis_min(nsteps, UniformDistribution, analyses, burnin=10^4)
# ---
tpbloss = CI(t0dist)
terupt = CI(tmindist)
display(terupt)
display(tpbloss)
# Add to concordia plot
I = rand(1:length(tmindist), 200) # Pick 100 random samples from the posterior distribution
concordialine!(cc, t0dist[I], tmindist[I], color=:darkred, alpha=0.05, label="Eruption: $terupt Ma", legend=:bottomright) # Add to Concordia plot
savefig("concordiaterupt.pdf")
display(cc)
terupt
## --- Histogram of distribution of eruption age
te = histogram(tmindist, xlabel="Age [Ma]", ylabel="Probability Density", normalize=true, label="Eruption: $terupt Ma", color=:darkred, fill=true, alpha=0.75, linealpha=0.1, framestyle=:box)
ylims!(te, 0, last(ylims()))
savefig(te, "eruptionage.pdf")
display(te)
## --- Histogram of distribution of time of Pb-loss
tpb = histogram(t0dist, xlabel="Age [Ma]", ylabel="Probability Density", normalize=true, label="Pb-loss: $tpbloss Ma", color=:darkblue, fill=true, alpha=0.65, linealpha=0.1, framestyle=:box)
xlims!(tpb, 0, last(xlims()))
ylims!(tpb, 0, last(ylims()))
savefig(tpb, "pbloss.pdf")
display(tpb)
## --- Plot stacked
h = plot(cc,te,tpb, layout=(3,1), size=(500,1000), left_margin=(8,:mm))
savefig(h, "eruptionestimation.pdf")
display(h)
## --- Standard eruption age estimation
nsteps = 10^7
tmindist = metropolis_min(nsteps, UniformDistribution, first.(age.(analyses)), burnin=10^4)
teruptold = CI(tmindist)
display(teruptold)
## --- Stacked plot of screening options
# h = plot(ccfiltered,cc, layout=(2,1), size=(500,660), left_margin=(4,:mm))
# savefig(h, "filteredunfiltered.pdf")
## ---
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 5964 | module MakieExt
using Isoplot
using Makie
using Measurements
import Isoplot: concordialine, concordialine!, concordiacurve, concordiacurve!
Makie.convert_arguments(P::Type{<:Poly},e::Ellipse) = convert_arguments(P,as_points(e.x,e.y))
Makie.convert_arguments(P::Type{<:Poly},a::Analysis) = convert_arguments(P,Ellipse(a))
Makie.plottype(::Ellipse) = Poly
Makie.plottype(::Analysis) = Poly
function as_points(v1,v2)
if length(v1) == length(v2)
pointList = Point2f[]
for i in 1:lastindex(v1)
push!(pointList,Point2f(v1[i],v2[i]))
end
return pointList
else
Throw(DimensionMismatch("Arguments must be the same length"))
end
end
# Plot a line between two times in Wetherill Concordia space
@recipe(ConcordiaLine, t₀, t₁) do scene
Attributes(
concordiaType = :Wetherril,
color = :black,
linewidth = 1,
errorcolor = (:black,0.15)
)
end
function Makie.plot!(cline::ConcordiaLine)
r75₀ = ratio(cline.t₀[], λ235U)
r68₀ = ratio(cline.t₀[], λ238U)
r75₁ = ratio(cline.t₁[], λ235U)
r68₁ = ratio(cline.t₁[], λ238U)
slope = 0.0 ± 0.0
intercept = 0.0 ± 0.0
if cline[:concordiaType][] == :Wetherril
slope = (r68₁-r68₀)/(r75₁-r75₀)
intercept = r68₀ - r75₀*slope
elseif cline[:concordiaType][] == :TeraWasserburg
#TODO
else
throw(ArgumentError("concordiaType must be :Wetherril or :TeraWasserburg"))
end
xmin = val(r75₀)
xmax =val(r75₁)
x = Observable(collect(range(xmin, xmax, length=50)))
y = intercept .+ slope .* x[]
y_val = Observable(val.(y))
y_err = err.(y)
upperError = as_points(x[],y_val[] .+ y_err)
lowerError = as_points(reverse(x[]),reverse(y_val[] .- y_err))
errorPoly = Observable([upperError;lowerError])
poly!(cline,errorPoly,color=cline[:errorcolor][],strokewidth =0)
lines!(cline,x,y_val,color = cline[:color][], linewidth = cline[:linewidth][])
return cline
end
@recipe(ConcordiaCurve,t₀, t₁) do scene
Attributes(
concordiaType = :Wetherril,
color = :black,
linewidth = 1,
errorcolor = (:black,0.15),
agefontsize = 10,
ageticksize = 5,
agetickcolor = :black
)
end
function Makie.plot!(ccurve::ConcordiaCurve)
# Uncertainty of 235 decay constant relative to the 238 decay constant
σₜ = λ235U_jaffey.val .* sqrt((λ238U.err/λ238U.val).^2 + (λ235U_jaffey.err/λ235U_jaffey.val).^2) # 1/Years
# Plot the concordia curve
tlim = [ccurve.t₀[],ccurve.t₁[]]
# xl = [ccurve.t₀[],ccurve.t₁[]]
# xl, yl = Plots.xlims(hdl), Plots.ylims(hdl) # Note current size of figure
# tlim = age.(max.(xl, 0.0), λ235U_jaffey.val) # Calculate time range of current window
dt = tlim[2] - tlim[1]
tmin = max(tlim[1]-0.1dt, 0.0)
tmax = tlim[2]+0.1dt
t = range(tmin, tmax, length=1000) # Time vector, including padding
xratio = Observable(Float64[])
yratio = Observable(Float64[])
errx = Float64[]
erry = Float64[]
scale = floor(log10(tlim[2]-tlim[1])) # Characteristic timescale (order of magnitude)
trange = round.(tlim./10.0^scale) # Minimum and maximum time to a round number
majorstep = 0.5
tticks = (trange[1]:majorstep:trange[2]).*10.0^scale # Time ticks, to a round number
tickx = Observable(Float64[])
ticky = Observable(Float64[])
if ccurve[:concordiaType][] == :Wetherril
xratio[]= ratio.(t, λ235U_jaffey.val) # X axis values
yratio[] = ratio.(t, λ238U.val)# Y axis values
errx = [ratio.(t, λ235U_jaffey.val-σₜ*2); reverse(ratio.(t, λ235U_jaffey.val+σₜ*2))]
erry = [yratio[]; reverse(yratio[])]
tickx[] = ratio.(tticks, λ235U_jaffey.val) # X axis values
ticky[] = ratio.(tticks, λ238U.val)# Y axis values
elseif ccurve[:concordiaType][] == :TeraWasserburg
xratio[]= ratio.(t, λ235U_jaffey.val) # X axis values
yratio[] = ratio.(t, λ238U.val)# Y axis values
errx = [ratio.(t, λ235U_jaffey.val-σₜ*2); reverse(ratio.(t, λ235U_jaffey.val+σₜ*2))]
erry = [yratio[]; reverse(yratio[])]
tickx[] = ratio.(tticks, λ235U_jaffey.val) # X axis values
ticky[] = ratio.(tticks, λ238U.val)# Y axis values
else
throw(ArgumentError("concordiaType must be :Wetherril or :TeraWasserburg"))
end
errorPts = Observable(as_points(errx,erry))
poly!(ccurve,errorPts,color=ccurve[:errorcolor][],strokewidth =0)
lines!(ccurve,xratio, yratio,color = ccurve[:color][], linewidth = ccurve[:linewidth][])
# # Plot age markers with text labels
scatter!(ccurve,tickx,ticky,markersize = ccurve[:ageticksize][],color = ccurve[:agetickcolor][],transform_marker=true)
xoffset = (maximum(tickx[])-minimum(tickx[]))/200
yoffset = (maximum(ticky[])-minimum(ticky[]))/100
t = (minimum(xratio[])+8*xoffset) .< tickx[] .< maximum(xratio[])
ticklabels = Observable(string.(round.(Int, tticks[t])))
#can probably make these mobile based on zoom level
tickLabelX = Observable(tickx[][t].-xoffset)
tickLabelY = Observable(ticky[][t].+yoffset)
text!(ccurve,tickLabelX,tickLabelY,text=ticklabels,fontsize = ccurve[:agefontsize][],transform_marker=true)
# Plots.annotate!(hdl, r75tticks[t].-xoffset,r68tticks[t].+yoffset,ticklabels)
return ccurve
end
end
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 5804 | module PlotsExt
using Isoplot
using Plots: Shape, plot, plot!
import Plots
using Measurements
# export plot, plot!
const PlotOrSubplot = Union{Plots.Plot, Plots.Subplot}
Base.retry_load_extensions()
# Plot 2d uncertainty ellipses of any sort
Plots.Shape(e::Ellipse{T}) where {T} = Shape{T,T}(e.x, e.y)
Plots.plot(e::Union{Data,Vector{<:Data}}, args...; kwargs...) = plot!(plot(), e, args...; kwargs...)
for P in (Plots.Plot, Plots.Subplot)
@eval Plots.plot!(hdl::($P), a::Analysis, args...; kwargs...) = plot!(hdl, Ellipse(a), args...; kwargs...)
@eval Plots.plot!(hdl::($P), a::Vector{<:Analysis}, args...; kwargs...) = plot!(hdl, Ellipse.(a), args...; kwargs...)
@eval Plots.plot!(hdl::($P), e::Ellipse, args...; kwargs...) = plot!(hdl, Shape(e), args...; kwargs...)
@eval Plots.plot!(hdl::($P), e::Vector{<:Ellipse}, args...; kwargs...) = plot!(hdl, Shape.(e), args...; kwargs...)
end
# Plot a line between two times in Wetherill Concordia space
Isoplot.concordialine(t₀, t₁; framestyle=:box, kwargs...) = concordialine!(plot(xlims=ratio.((first(t₀), first(t₁)), λ235U.val)), t₀, t₁; framestyle, kwargs...)
function Isoplot.concordialine!(hdl::PlotOrSubplot, t₀::Number, t₁::Number; truncate::Bool=false, kwargs...)
xl = Plots.xlims(hdl)
r75₀ = ratio(t₀, λ235U.val)
r68₀ = ratio(t₀, λ238U.val)
r75₁ = ratio(t₁, λ235U.val)
r68₁ = ratio(t₁, λ238U.val)
slope = (r68₁-r68₀)/(r75₁-r75₀)
intercept = r68₀ - r75₀*slope
x = if truncate
xmin = max(first(xl), val(r75₀))
xmax = min(last(xl), val(r75₁))
range(xmin, xmax, length=50)
else
range(xl..., length=50)
end
y = intercept .+ slope .* x
plot!(hdl, x, val.(y); ribbon=err.(y), kwargs...)
Plots.xlims!(hdl, xl)
end
function Isoplot.concordialine!(hdl::PlotOrSubplot, t₀::Collection, t₁::Collection; truncate::Bool=false, label="", color=:black, alpha=0.05, kwargs...)
xl = Plots.xlims(hdl)
r75₀ = ratio.(t₀, λ235U.val)
r68₀ = ratio.(t₀, λ238U.val)
r75₁ = ratio.(t₁, λ235U.val)
r68₁ = ratio.(t₁, λ238U.val)
slope = @. (r68₁-r68₀)/(r75₁-r75₀)
intercept = @. r68₀ - r75₀*slope
x = if truncate
xmin = max(first(xl), vminimum(r75₀))
xmax = min(last(xl), vmaximum(r75₁))
collect(range(xmin, xmax, length=50))
else
collect(range(xl..., length=50))
end
y(slope, intercept) = @. intercept + slope * x
ys = y.(slope, intercept)
plot!(hdl, x, ys; label="", color, alpha, kwargs...)
plot!(hdl, x, sum(ys)./length(ys); label, color, alpha=1, kwargs...)
Plots.xlims!(hdl, xl)
end
# Plot the Wetherill Concordia curve
function Isoplot.concordiacurve!(hdl::PlotOrSubplot=Plots.current())
# Uncertainty of 235 decay constant relative to the 238 decay constant
σₜ = λ235U_jaffey.val .* sqrt((λ238U.err/λ238U.val).^2 + (λ235U_jaffey.err/λ235U_jaffey.val).^2) # 1/Years
# Plot the concordia curve
xl, yl = Plots.xlims(hdl), Plots.ylims(hdl) # Note current size of figure
tlim = age.(max.(xl, 0.0), λ235U_jaffey.val) # Calculate time range of current window
dt = tlim[2] - tlim[1]
tmin = max(tlim[1]-0.1dt, 0.0)
tmax = tlim[2]+0.1dt
t = range(tmin, tmax, length=1000) # Time vector, including padding
r75t = ratio.(t, λ235U_jaffey.val) # X axis values
r68t = ratio.(t, λ238U.val) # Y axis values
x = [ratio.(t, λ235U_jaffey.val-σₜ*2); reverse(ratio.(t, λ235U_jaffey.val+σₜ*2))]
y = [r68t; reverse(r68t)]
Plots.plot!(hdl, Shape(x,y), color=:black, alpha=0.15, label="") # Two-sigma concordia uncertainty
Plots.plot!(hdl, r75t, r68t, color=:black, label="") # Concordia line
r75t_Schoene = ratio.(t, λ235U.val) # X axis values
Plots.plot!(hdl, r75t_Schoene,r68t,color=:black,style=:dash, label="") # Concordia line
Plots.xlims!(hdl, xl) # Ensure that figure size hasn't changed
Plots.ylims!(hdl, yl)
# Calculate desired range of age markers
scale = floor(log10(tlim[2]-tlim[1])) # Characteristic timescale (order of magnitude)
trange = round.(tlim./10.0^scale) # Minimum and maximum time to a round number
majorstep = 0.5
tticks = (trange[1]:majorstep:trange[2]).*10.0^scale # Time ticks, to a round number
r75tticks = ratio.(tticks, λ235U_jaffey.val) # X axis values
r68tticks = ratio.(tticks, λ238U.val) # Y axis values
# Plot age markers with text labels
Plots.plot!(hdl, r75tticks,r68tticks, color=:black, seriestype=:scatter, ms=2, label="")
xoffset = (xl[2]-xl[1])/200
yoffset = (yl[2]-yl[1])/100
t = (xl[1]+8*xoffset) .< r75tticks .< xl[2]
ticklabels = Plots.text.(string.(round.(Int, tticks[t])), 10, :right)
Plots.annotate!(hdl, r75tticks[t].-xoffset,r68tticks[t].+yoffset,ticklabels)
return hdl
end
# Rank-order plots
Isoplot.rankorder(args...; framestyle=:box, kwargs...) = rankorder!(plot(), args...; framestyle, kwargs...)
Isoplot.rankorder!(h::PlotOrSubplot, data, sigma, i0::Number=0; kwargs...) = rankorder!(h, data .± sigma, i0; kwargs...)
function Isoplot.rankorder!(h::PlotOrSubplot, data::Vector{<:Measurement}, i0::Number=0;
scale=1,
label="",
mscolor=:auto,
seriestype=:scatter,
xticks=Float64[],
kwargs...
)
x = i0 .+ scale.*(1:length(data))
plot!(h, x, sort(data); label, mscolor, seriestype, xticks, kwargs...)
end
end | Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 1662 | module Isoplot
using VectorizedStatistics
using LoopVectorization: @turbo
using LinearAlgebra
using Distributions
using Measurements
# A type alias for array-ish types
const Collection{T} = Union{AbstractArray{T}, NTuple{N,T}} where N
const Collection1D{T} = Union{AbstractVector{T}, NTuple{N,T}} where N
# Age of Earth and the Solar System
const t🜨 = 4.567e3 #Myr
# Abstract types which we'll subtype later
include("analysis.jl")
export age, ratio, CI, Age, Interval, Ellipse
include("regression.jl")
export wmean, awmean, gwmean, distwmean, mswd
export lsqfit, yorkfit
include("U-Pb.jl")
export UPbAnalysis, discordance, age68, age75, stacey_kramers
include("concordia.jl")
export upperintercept, lowerintercept, intercepts
include("U-Th.jl")
include("Re-Os.jl")
include("Lu-Hf.jl")
include("Sm-Nd.jl")
include("Rb-Sr.jl")
include("K-Ar.jl")
export UThAnalysis, ReOsAnalysis, LuHfAnalysis, SmNdAnalysis, RbSrAnalysis
include("generic_plotting.jl")
export concordiacurve, concordiacurve!, concordialine, concordialine!,
rankorder, rankorder!
include("metropolis.jl")
export metropolis_min, metropolis_min!,
metropolis_minmax, metropolis_minmax!
include("distributions.jl")
export UniformDistribution, TriangularDistribution,
HalfNormalDistribution, ExponentialDistribution,
MeltsZirconDistribution, MeltsVolcanicZirconDistribution
include("show.jl")
# extra exports for pkg extensions
export Data, Analysis, Collection, val, err, vminimum, vmaximum, datalimits
end # module Isoplot
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 421 | # Renne et al. 2011 reply 10.1016/j.gca.2011.06.021
const κFCs = 1.6417E-03 ± 0.0045E-03
const λ40Kϵ = 0.5757E-4 ± 0.0017E-4 # 1/Myr
const λ40Kβ = 4.9548E-4 ± 0.0134E-4 # 1/Myr
const λ40K = λ40Kβ + λ40Kϵ
export λ40K, λ40Kβ, λ40Kϵ, κFCs
# σκσλϵ = 7.1903E-13
# σκσλβ = -6.5839E-13
# σλϵσλβ = -3.4711E-14 # 1/Myr^2
#
# Σ = [ err(κFCs)^2 σκσλϵ σκσλβ
# σκσλϵ err(λ40Kϵ)^2 σλϵσλβ
# σκσλβ σλϵσλβ err(λ40Kβ)^2]
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 324 | # Söderlund et al. (2004) 0.1016/S0012-821X(04)00012-3
λ176Lu = 1.867e-5 ± 0.008e-5 # 1/Myr, calibrated against U-Pb # 2-sigma?
export λ176Lu
struct LuHfAnalysis{T} <: Analysis{T}
μ::Vector{T}
σ::Vector{T}
Σ::Matrix{T}
end
LuHfAnalysis(μ::Vector{T}, σ::Vector{T}) where {T} = LuHfAnalysis{T}(μ, σ, diagm(σ).^2)
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 307 | # Nebel et al. (2011) 10.1016/j.epsl.2010.11.004
λ87Rb = 1.393e-5 ± 0.004e-5/2 # 1/Myr, calibrated against U-Pb
export λ87Rb
struct RbSrAnalysis{T} <: Analysis{T}
μ::Vector{T}
σ::Vector{T}
Σ::Matrix{T}
end
RbSrAnalysis(μ::Vector{T}, σ::Vector{T}) where {T} = RbSrAnalysis{T}(μ, σ, diagm(σ).^2)
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 320 | # Selby et al. (2007) 10.1016/j.gca.2007.01.008
λ187Re = 1.6689e-5 ± 0.0031e-5 # 1/Myr, calibrated against U-Pb. # 2-sigma?
export λ187Re
struct ReOsAnalysis{T} <: Analysis{T}
μ::Vector{T}
σ::Vector{T}
Σ::Matrix{T}
end
ReOsAnalysis(μ::Vector{T}, σ::Vector{T}) where {T} = ReOsAnalysis{T}(μ, σ, diagm(σ).^2)
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 351 | λ147Sm = 6.524-6 ± 0.024e-6/2 # 1/Myr, Villa et al. (2020) 10.1016/j.gca.2020.06.022
λ146Sm = log(2)/(103.1 ± 4.5) # 1/Myr, Meissner et al. (1987)
export λ147Sm, λ147Sm
struct SmNdAnalysis{T} <: Analysis{T}
μ::Vector{T}
σ::Vector{T}
Σ::Matrix{T}
end
SmNdAnalysis(μ::Vector{T}, σ::Vector{T}) where {T} = SmNdAnalysis{T}(μ, σ, diagm(σ).^2)
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 3023 | # Decay constants:
const λ238U = log(2)/(4.4683e3 ± 0.0024e3) # Jaffey, 1/Myr
const λ235U = 9.8569E-4 ± 0.0110E-4/2 # Schoene, 1/Myr
const λ235U_internal = 9.8569E-4 ± 0.0017E-4/2 # Schoene, 1/Myr, including only internal uncertainty [U-238 years]
export λ238U, λ235U
const λ235U_jaffey = log(2)/(7.0381e2 ± 0.0048e2) # Jaffey, 1/Myr
export λ235U_jaffey
"""
```
struct UPbAnalysis{T} <: Analysis{T}
```
Core type for U-Pb analyses.
Has fields
```
μ :: Vector{T<:AbstractFloat}
σ :: Vector{T<:AbstractFloat}
Σ :: Matrix{T<:AbstractFloat}
```
where `μ` contains the means
```
μ = [r²⁰⁷Pb²³⁵U, r²⁰⁶Pb²³⁸U]
```
where `σ` contains the standard deviations
```
σ = [σ²⁰⁷Pb²³⁵U, σ²⁰⁶Pb²³⁸U]
```
and Σ contains the covariance matrix
```
Σ = [σ₇_₅^2 σ₇_₅*σ₃_₈
σ₇_₅*σ₃_₈ σ₃_₈^2]
```
If `σ` is not provided, it will be automatically calculated from `Σ`,
given that `σ.^2 = diag(Σ)`.
"""
struct UPbAnalysis{T} <: Analysis{T}
μ::Vector{T}
σ::Vector{T}
Σ::Matrix{T}
end
"""
```julia
UPbAnalysis(r²⁰⁷Pb²³⁵U, σ²⁰⁷Pb²³⁵U, r²⁰⁶Pb²³⁸U, σ²⁰⁶Pb²³⁸U, correlation; T=Float64)
```
Construct a `UPbAnalysis` object from individual isotope ratios and (1-sigma!) uncertainties.
### Examples
```
julia> UPbAnalysis(22.6602, 0.0175, 0.40864, 0.00017, 0.83183)
UPbAnalysis{Float64}([22.6602, 0.40864], [0.00030625000000000004 2.4746942500000003e-6; 2.4746942500000003e-6 2.8900000000000004e-8])
```
"""
function UPbAnalysis(r²⁰⁷Pb²³⁵U::Number, σ²⁰⁷Pb²³⁵U::Number, r²⁰⁶Pb²³⁸U::Number, σ²⁰⁶Pb²³⁸U::Number, correlation::Number; T=Float64)
cov = σ²⁰⁷Pb²³⁵U * σ²⁰⁶Pb²³⁸U * correlation
Σ = T[σ²⁰⁷Pb²³⁵U^2 cov
cov σ²⁰⁶Pb²³⁸U^2]
σ = T[σ²⁰⁷Pb²³⁵U, σ²⁰⁶Pb²³⁸U]
μ = T[r²⁰⁷Pb²³⁵U, r²⁰⁶Pb²³⁸U]
UPbAnalysis(μ, σ, Σ)
end
UPbAnalysis(μ::Vector{T}, Σ::Matrix{T}) where {T} = UPbAnalysis{T}(μ, sqrt.(diag(Σ)), Σ)
# 75 and 68 ages
function age(d::UPbAnalysis)
a75 = log(1 + d.μ[1] ± d.σ[1])/λ235U
a68 = log(1 + d.μ[2] ± d.σ[2])/λ238U
return a75, a68
end
function age68(d::UPbAnalysis)
log(1 + d.μ[2] ± d.σ[2])/λ238U
end
function age75(d::UPbAnalysis)
log(1 + d.μ[1] ± d.σ[1])/λ235U
end
# Percent discordance
function discordance(d::UPbAnalysis)
μ75 = log(1 + d.μ[1])/λ235U.val
μ68 = log(1 + d.μ[2])/λ238U.val
return (μ75 - μ68) / μ75 * 100
end
# Add custom methods to Base.rand to sample from a UPbAnalysis
Base.rand(d::UPbAnalysis) = rand(MvNormal(d.μ, d.Σ))
Base.rand(d::UPbAnalysis, n::Integer) = rand(MvNormal(d.μ, d.Σ), n)
Base.rand(d::UPbAnalysis, dims::Dims) = rand(MvNormal(d.μ, d.Σ), dims)
function stacey_kramers(t)
if 3700 <= t < 4570
t0 = 3700
r64 = 11.152
r74 = 12.998
U_Pb = 7.19
elseif t < 3700
t0 = 0
r64 = 18.700
r74 = 15.628
U_Pb = 9.74
else
t0 = NaN
r64 = NaN
r74 = NaN
U_Pb = NaN
end
r64 -= ((exp(val(λ238U)*t)-1) - (exp(val(λ238U)*t0)-1)) * U_Pb
r74 -= ((exp(val(λ238U)*t)-1) - (exp(val(λ238U)*t0)-1)) * U_Pb/137.818
return r64, r74
end
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 353 | # Cheng et al. (2013) 10.1016/j.epsl.2013.04.006
const λ234U = log(2)/(245620e-6 ± 260e-6/2) # 1/Myr
const λ230Th = log(2)/(75584e-6 ± 110e-6/2) # 1/Myr
export λ234U, λ230Th
struct UThAnalysis{T} <: Analysis{T}
μ::Vector{T}
σ::Vector{T}
Σ::Matrix{T}
end
UThAnalysis(μ::Vector{T}, σ::Vector{T}) where {T} = UThAnalysis{T}(μ, σ, diagm(σ).^2)
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 3931 | # Our overarching analysis type.
# Must contain a vector of means μ, standard deviations σ, and a covariance matrix Σ
abstract type Data{T} end
abstract type Analysis{T<:AbstractFloat} <: Data{T} end
age(r::Number, λ::Number) = log(1+r)/λ
ratio(t::Number, λ::Number) = exp(λ*t) - 1
# Extend Base.isnan to return true if any component of the Analysis is NaN
Base.isnan(a::Analysis) = any(isnan, a.μ) || any(isnan, a.σ) || any(isnan, a.Σ)
# A moment in time
struct Age{T<:AbstractFloat}
mean::T
sigma::T
end
Age(μ, σ) = Age(float(μ), float(σ))
Age(x) = Age(val(x), err(x))
# A duration of time
struct Interval{T<:AbstractFloat}
min::T
min_sigma::T
max::T
max_sigma::T
end
Interval(lμ, lσ, uμ, uσ) = Interval(float(lμ), float(lσ), float(uμ), float(uσ))
Interval(l, u) = Interval(val(l), err(l), val(u), err(u))
Base.min(x::Interval{T}) where {T} = Age{T}(x.min, x.min_sigma)
Base.max(x::Interval{T}) where {T} = Age{T}(x.max, x.max_sigma)
# A confidence or credible interval with 95% bounds
struct CI{T<:AbstractFloat}
mean::T
sigma::T
median::T
lower::T
upper::T
end
function CI(x::AbstractVector{T}) where {T}
xₜ = copy(x)
Tₒ = float(T)
mean = vmean(xₜ)
CI{Tₒ}(mean,
vstd(xₜ; mean),
vmedian!(xₜ),
vpercentile!(xₜ, 2.5),
vpercentile!(xₜ, 97.5),
)
end
# A type to hold a 2d covariance ellipse for any pair of measurements
struct Ellipse{T} <: Data{T}
x::Vector{T}
y::Vector{T}
x₀::T
y₀::T
σx₀::T
σy₀::T
end
# Make an ellipse from a Analysis object
function Ellipse(d::Analysis;
sigmalevel::Number=2.447746830680816, # bivariate p=0.05 level: sqrt(invlogccdf(Chisq(2), log(0.05)))
npoints::Integer=50,
)
a, b, θ = ellipseparameters(d, sigmalevel)
return Ellipse(d, a, b, θ; npoints)
end
# Make an ellipse if given x and y positions, major and minor axes, and rotation
function Ellipse(d::Analysis, a, b, θ; npoints::Integer=50)
x₀, y₀ = d.μ[1], d.μ[2]
t = range(0, 2π, length=npoints)
x = a*cos(θ)*cos.(t) .- b*sin(θ)*sin.(t) .+ x₀
y = a*sin(θ)*cos.(t) .+ b*cos(θ)*sin.(t) .+ y₀
return Ellipse(x, y, x₀, y₀, d.σ[1], d.σ[2])
end
# Non-exported function: return semimajor and minor axes for a given U-Pb analysis
function ellipseparameters(d::Analysis{T}, sigmalevel::Number) where T
# Quickly exit if any NaNs
any(isnan, d.Σ) && return T.((NaN, NaN, NaN))
# Calculate eigenvectors and eigenvalues from the covariance matrix.
# V: matrix of eigenvectors, D: diagonal matrix of eigenvalues
F = eigen(d.Σ)
# Find index of semimajor and semiminor axes
major = argmax(F.values)
minor = argmin(F.values)
v = view(F.vectors, :, major)
# Calculate angle of major axis of ellipse from horizontal
θ = atan(v[2]/v[1])
# Calculate length of semimajor and semiminor axes for given p-value
a = T(sigmalevel)*sqrt(abs(F.values[major]))
b = T(sigmalevel)*sqrt(abs(F.values[minor]))
return a, b, θ
end
function datalimits(ellipses::Array{<:Ellipse})
xmin = minimum(minimum.(x.(ellipses)))
xmax = maximum(maximum.(x.(ellipses)))
ymin = minimum(minimum.(y.(ellipses)))
ymax = maximum(maximum.(y.(ellipses)))
return xmin, xmax, ymin, ymax
end
datalimits(analyses::Array{<:Analysis}) = datalimits(Ellipse.(analyses))
x(e::Ellipse) = e.x
y(e::Ellipse) = e.y
# Convenience methods for possibly obtaining values or uncertainties
# Generic fallback methods for things that don't have uncertainties
val(x) = x
err(x::T) where {T} = zero(T)
# Specialized methods for `CI`s
val(x::CI{T}) where {T} = x.mean::T
err(x::CI{T}) where {T} = x.sigma::T
# Specialized methods for `Age`s
val(x::Age{T}) where {T} = x.mean::T
err(x::Age{T}) where {T} = x.sigma::T
# Specialized methods for `Measurement`s
val(x::Measurement{T}) where {T} = x.val::T
err(x::Measurement{T}) where {T} = x.err::T
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 5819 |
Δ68(t,(slope,r75,r68)) = slope * (exp(λ235U.val*t) - 1 - r75) + r68 - exp(λ238U.val*t) + 1
dΔ68(t,(slope,r75,r68)) = slope * λ235U.val*exp(λ235U.val*t) - λ238U.val*exp(λ238U.val*t)
function newton_zero(f, df, x0, args::Tuple, iterations=10)
for i in 1:iterations
δx = f(x0, args)/df(x0, args)
x0 -= δx
end
return x0
end
function upperintercept(tₗₗ::Number, s::Ellipse{T}, sigmalevel::T=2.447746830680816) where {T<:AbstractFloat}
# bivariate p=0.05 level: sqrt(invlogccdf(Chisq(2), log(0.05)))
# Get ratios from our ellipse
r75, r68 = s.x, s.y
r75₀, r68₀ = s.x₀, s.y₀
σ75₀, σ68₀ = s.σx₀, s.σy₀
# Return early if our lead loss time is too old or anything is NaN'd
tₗₗ < age(r68₀,λ238U.val) || return T(NaN) ± T(NaN)
tₗₗ < age(r75₀,λ235U.val) || return T(NaN) ± T(NaN)
# If reversely discordant, move to the closest point on Concordia rather
# than projecting down a fictive "lead gain" array, increasing uncertainty
# by sqrt(MSWD) if discordance is large
age68 = age(r68₀ ± σ68₀, λ238U.val)
age75 = age(r75₀ ± σ75₀, λ235U.val)
age75.val > age68.val || return first(wmean([age68, age75], corrected=true))
# Calculate isotopic ratios of our time of Pb-loss
r75ₗₗ = ratio(tₗₗ, λ235U.val)
r68ₗₗ = ratio(tₗₗ, λ238U.val)
slope₀ = (r68₀-r68ₗₗ)/(r75₀-r75ₗₗ)
# Find the values on the margin of the ellipse with the
# largest and smallest angular difference from the center
r75₋, r68₋ = argmax(x->atan((x[2]-r68ₗₗ)/(x[1]-r75ₗₗ)), zip(r75, r68))
slope₋ = (r68₋-r68ₗₗ)/(r75₋-r75ₗₗ)
0 < slope₋ < Inf || return T(NaN) ± T(NaN)
r75₊, r68₊ = argmin(x->atan((x[2]-r68ₗₗ)/(x[1]-r75ₗₗ)), zip(r75, r68))
slope₊ = (r68₊-r68ₗₗ)/(r75₊-r75ₗₗ)
0 < slope₊ < Inf || return T(NaN) ± T(NaN)
# Find the upper intercept of our Pb-loss arrays with Concordia
ui₀ = newton_zero(Δ68, dΔ68, t🜨, (slope₀,r75₀,r68₀))
0 < ui₀ < t🜨 || return T(NaN) ± T(NaN)
ui₋ = newton_zero(Δ68, dΔ68, t🜨, (slope₋,r75₋,r68₋))
ui₊ = newton_zero(Δ68, dΔ68, t🜨, (slope₊,r75₊,r68₊))
# Direct uncertainty, from spread in intercepts given size of ellipse
σ = (val(ui₊) - val(ui₋))/2sigmalevel
# Include also uncertainty, from lower intercept if tₗₗ (and ui) are `Measurement`s
return val(ui₀) ± σcombined(ui₀, σ)
end
σcombined(m::Measurement, σ) = sqrt(err(m)^2 + σ^2)
σcombined(m, σ) = σ # If m is not a Measurement
upperintercept(tₗₗ::Number, d::UPbAnalysis) = upperintercept(tₗₗ, Ellipse(d; npoints=50))
function upperintercept(tₗₗ::Number, d::UPbAnalysis{T}, nresamplings::Integer) where T
# Get ratios
r75₀, r68₀ = d.μ
# Return early if our lead loss time is too old or anything is NaN'd
tₗₗ < log(r68₀+1)/λ238U.val || return fill!(uis, T(NaN))
tₗₗ < log(r75₀+1)/λ235U.val || return fill!(uis, T(NaN))
# Calculate isotopic ratios of our time of Pb-loss
r75ₗₗ = exp(λ235U.val*tₗₗ) - 1
r68ₗₗ = exp(λ238U.val*tₗₗ) - 1
slope₀ = (r68₀-r68ₗₗ)/(r75₀-r75ₗₗ)
ui = zeros(T, nresamplings)
samples = rand(d, nresamplings)
@assert axes(samples,2) == eachindex(ui)
@inbounds for i in axes(samples,2)
r75, r68 = view(samples, :, i)
slope = (r68-r68ₗₗ)/(r75-r75ₗₗ)
ui[i] = newton_zero(Δ68, dΔ68, t🜨, (slope,r75,r68))
end
return ui
end
function upperintercept(d::Collection{UPbAnalysis{T}}, nresamplings::Integer) where {T}
ui = zeros(T, nresamplings)
slopes, intercepts = fit_lines(d, nresamplings)
@inbounds for i in eachindex(ui, slopes, intercepts)
ui[i] = newton_zero(Δ68, dΔ68, t🜨, (slopes[i],zero(T),intercepts[i]))
end
return ui
end
function upperintercept(d::Collection{UPbAnalysis{T}}) where {T}
yf = yorkfit(d)
return newton_zero(Δ68, dΔ68, t🜨, (yf.slope, yf.xm, yf.ym))
end
function lowerintercept(d::Collection{UPbAnalysis{T}}, nresamplings::Integer) where {T}
li = zeros(T, nresamplings)
slopes, intercepts = fit_lines(d, nresamplings)
@inbounds for i in eachindex(li, slopes, intercepts)
li[i] = newton_zero(Δ68, dΔ68, zero(T), (slopes[i],zero(T),intercepts[i]))
end
return li
end
function lowerintercept(d::Collection{UPbAnalysis{T}}) where {T}
yf = yorkfit(d)
return newton_zero(Δ68, dΔ68, zero(T), (yf.slope, yf.xm, yf.ym))
end
function intercepts(d::Collection{UPbAnalysis{T}}, nresamplings::Integer) where {T}
ui, li = zeros(T, nresamplings), zeros(T, nresamplings)
slopes, intercepts = fit_lines(d, nresamplings)
@inbounds for i in eachindex(ui, li, slopes, intercepts)
ui[i] = newton_zero(Δ68, dΔ68, t🜨, (slopes[i],zero(T),intercepts[i]))
li[i] = newton_zero(Δ68, dΔ68, zero(T), (slopes[i],zero(T),intercepts[i]))
end
return ui, li
end
function intercepts(d::Collection{UPbAnalysis{T}}) where {T}
yf = yorkfit(d)
ui = newton_zero(Δ68, dΔ68, t🜨, (yf.slope, yf.xm, yf.ym))
li = newton_zero(Δ68, dΔ68, zero(T), (yf.slope, yf.xm, yf.ym))
return ui, li
end
function fit_lines(d::Collection{UPbAnalysis{T}}, nresamplings::Integer) where {T}
nanalyses = length(d)
# Vector of ratios
r68 = zeros(T, nanalyses)
# Draw random ratios from each analysis
randratios = rand.(d, nresamplings)
# Allocate temporary arrays for regression
A = ones(T, nanalyses, 2)
# Allocate output slopes and intercepts
slopes, intercepts = zeros(T, nresamplings), zeros(T, nresamplings)
@inbounds for n in eachindex(slopes, intercepts)
for i in eachindex(d, randratios)
A[i,2] = randratios[i][1,n]
r68[i] = randratios[i][2,n]
end
# Linear regression
ϕ = A\r68 # or perhgaps alternatively in some cases ϕ = ldiv!(lu!(A), r68)?
slopes[n], intercepts[n] = ϕ[2], ϕ[1]
end
return slopes, intercepts
end
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
|
[
"MIT"
] | 0.3.6 | b9a419e46fe4516f3c0b52041d84e2bb5be9cb81 | code | 2708 |
# The standard Uniform distribution. Maximum entropy?
const UniformDistribution = ones(100)
# Triangular with the high end near eruption/deposition
const TriangularDistribution = collect(range(2,0,length=100))
# Similar to Triangular, a reasonable option if more ages are expected near time of eruption/deposition
const HalfNormalDistribution = pdf.(Normal(0,1), range(0,10/3,length=100))/0.150374848
# Best for survivorship processes, apparently including Ar-Ar partial degassing / entrainment / inheritance
const ExponentialDistribution = 7.94734 .* exp.(-7.94734 .* range(0,1,length=100))
# The expected distribution for zircon saturation in a single magma body
const MeltsZirconDistribution =
[0.282361, 0.28919, 0.296019, 0.302849, 0.30968, 0.316567, 0.323614, 0.33064, 0.337727, 0.344848, 0.352146, 0.359642,0.367482, 0.375622, 0.384052, 0.392828, 0.401968, 0.411518, 0.421442, 0.43171, 0.44213, 0.45295, 0.464036, 0.47539, 0.486938, 0.498644, 0.51075, 0.523026, 0.535688, 0.548764, 0.562124, 0.575927, 0.590363, 0.604879, 0.620415, 0.636022, 0.652333, 0.669112, 0.686441, 0.704341, 0.72283, 0.742036, 0.761964, 0.782541, 0.803718, 0.825707, 0.848386, 0.871895,0.896139, 0.920462, 0.946071, 0.972964, 0.999905, 1.02776, 1.05664, 1.08637, 1.11731, 1.14919, 1.18202, 1.21582, 1.24956, 1.28342, 1.31828, 1.35427, 1.39153, 1.43006, 1.46879, 1.50812, 1.5477, 1.58888, 1.63149, 1.6748, 1.71724, 1.76126, 1.80668, 1.85101, 1.89546, 1.94144, 1.98379, 2.02785, 2.06738, 2.10669, 2.1377, 2.16306, 2.17843, 2.17924, 2.16073, 2.11744, 2.04444, 1.93323, 1.7923, 1.62527, 1.44425, 1.25401, 1.0528, 0.843628, 0.632687, 0.421876, 0.211064, 0.000252985]
# As above, but truncated by eruption at an eruptible melt percentage
const MeltsVolcanicZirconDistribution =
[0.54933, 0.556409, 0.563488, 0.570567, 0.577653, 0.584759, 0.591912, 0.599251, 0.606793, 0.614519, 0.622425, 0.630421, 0.63852, 0.646681, 0.654972, 0.663533, 0.672274, 0.681233, 0.690399, 0.699787, 0.709334, 0.719174, 0.729157, 0.739423, 0.749935, 0.760644, 0.771726, 0.782974, 0.794507, 0.806296, 0.818297, 0.830517, 0.842957, 0.855411, 0.866744, 0.878127, 0.889792, 0.901792, 0.914121, 0.926689, 0.939557, 0.952834, 0.966425, 0.980333, 0.994521, 1.00914, 1.02403, 1.03928, 1.05487, 1.0705, 1.08587, 1.10097, 1.11608, 1.13153, 1.1474, 1.16353, 1.18025, 1.19743, 1.21504, 1.23312, 1.25034, 1.26711, 1.28441, 1.30212, 1.32024, 1.33892, 1.35769, 1.37491, 1.3923, 1.41046, 1.42924, 1.44775, 1.46432, 1.48171, 1.49969, 1.51516, 1.53001, 1.54571, 1.5566, 1.56814, 1.57522, 1.58168, 1.58206, 1.57869, 1.56907, 1.55064, 1.51982, 1.4737, 1.40944, 1.32047, 1.21218, 1.09157, 0.965488, 0.834108, 0.697552, 0.558304, 0.418827, 0.279262, 0.139695, 0.000127237]
| Isoplot | https://github.com/JuliaGeochronology/Isoplot.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.