licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 6231 | include("args.jl")
using DelimitedFiles
function sw_p_to_lambda_den(sw, p)
sw = tf.reshape(sw, (1, m, n, 1))
p = tf.reshape(p, (1, m, n, 1))
sw = tf.image.resize_bilinear(sw, (nz, nx))
p = tf.image.resize_bilinear(p, (nz, nx))
sw = cast(sw, Float64)
p = cast(p, Float64)
sw = squeeze(sw)
p = squeeze(p)
# tran_lambda, tran_den = Gassman(sw)
# tran_lambda, tran_den = RockLinear(sw) # test linear relationship
tran_lambda, tran_den = Patchy(sw)
return tran_lambda, tran_den
end
if !isdir("figures_summary")
mkdir("figures_summary")
end
iter = 100
# Prj_names = "Brie_true3_set2_noupdate";
Prj_names = "CO2"
K_name = "/K$iter.txt"
K = readdlm(Prj_names*K_name)
tfCtxTrue = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo, sw0, true)
out_sw_true, out_p_true = imseq(tfCtxTrue)
lambdas = Array{PyObject}(undef, n_survey)
dens = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
sw = out_sw_true[survey_indices[i]]
p = out_p_true[survey_indices[i]]
lambdas[i], dens[i] = sw_p_to_lambda_den(sw, p)
end
sess = Session();init(sess);
vps = Array{PyObject}(undef, n_survey)
for i=1:n_survey
vps[i] = sqrt((lambdas[i] + 2.0 * tf_shear_sat1[i])/dens[i])
end
V = run(sess, vps);
S = run(sess, out_sw_true);
P = run(sess, out_p_true);
z_inj = (9-1)*h + h/2.0
x_inj = (3-1)*h + h/2.0
z_prod = (9-1)*h + h/2.0
x_prod = (28-1)*h + h/2.0
rc("axes", titlesize=30)
rc("axes", labelsize=30)
rc("xtick", labelsize=28)
rc("ytick", labelsize=28)
rc("legend", fontsize=30)
fig1,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(V[(iPrj-1)*3+jPrj], extent=[0,n*h,m*h,0], vmin=3350, vmax=3500);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# cb = fig1.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Vp")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">", s=128)
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<", s=128)
end
end
fig1.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig1.add_axes([0.91, 0.08, 0.01, 0.82])
cb1 = fig1.colorbar(ims[1], cax=cbar_ax)
cb1.set_label("Vp (m/s)")
savefig("figures_summary/predicted_Vp_evo_CO2.pdf",bbox_inches="tight",pad_inches = 0);
fig2,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(S[survey_indices[(iPrj-1)*3+jPrj], :, :], extent=[0,n*h,m*h,0], vmin=0.0, vmax=0.6);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# if iPrj ==2 && jPrj == 3
# cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Saturation")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">", s=128)
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<", s=128)
end
end
# fig2.subplots_adjust(wspace=0.04, hspace=0.042)
fig2.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig2.add_axes([0.91, 0.08, 0.01, 0.82])
cb2 = fig2.colorbar(ims[1], cax=cbar_ax)
cb2.set_label("Saturation")
savefig("figures_summary/predicted_Saturation_evo_CO2.pdf",bbox_inches="tight",pad_inches = 0);
# fig3,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
# ims = Array{Any}(undef, 9)
# for iPrj = 1:3
# for jPrj = 1:3
# ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(P[survey_indices[(iPrj-1)*3+jPrj], :, :]*1.4504e-04, extent=[0,n*h,m*h,0], vmin=-2500.0, vmax=500);
# axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
# if jPrj == 1 || jPrj == 1
# axs[iPrj,jPrj].set_ylabel("Depth (m)")
# end
# if iPrj == 3 || iPrj == 3
# axs[iPrj,jPrj].set_xlabel("Distance (m)")
# end
# # if iPrj ==2 && jPrj == 3
# # cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# # cb.set_label("Saturation")
# axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">")
# axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<")
# end
# end
# # fig2.subplots_adjust(wspace=0.04, hspace=0.042)
# fig3.subplots_adjust(wspace=0.02, hspace=0.18)
# cbar_ax = fig3.add_axes([0.91, 0.08, 0.01, 0.82])
# cb3 = fig3.colorbar(ims[1], cax=cbar_ax)
# cb3.set_label("Potential (psi)")
# savefig("figures_summary/Potential_evo_patchy_true.pdf",bbox_inches="tight",pad_inches = 0);
# iter = 100
# Prj_names = ["CO2", "CO2_1src", "CO2_2surveys", "CO2_6surveys"]
# K_name = "/K$iter.txt"
# fig,axs = subplots(2,2, figsize=[18,8], sharex=true, sharey=true)
# for iPrj = 1:2
# for jPrj = 1:2
# # println(ax)
# A = readdlm(Prj_names[(iPrj-1)*2 + jPrj] * K_name)
# im = axs[iPrj,jPrj].imshow(A, extent=[0,n*h,m*h,0]);
# if jPrj == 1 || jPrj == 1
# axs[iPrj,jPrj].set_ylabel("Depth (m)")
# end
# if iPrj == 2 || iPrj == 2
# axs[iPrj,jPrj].set_xlabel("Distance (m)")
# end
# axs[iPrj,jPrj].text(-0.1,1.1,string("(" * Char((iPrj-1)*2 + jPrj+'a'-1) * ")"),transform=axs[iPrj,jPrj].transAxes,size=12,weight="bold")
# end
# end
# fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.9,
# wspace=0.1, hspace=0.2)
# cb_ax = fig.add_axes([0.93, 0.1, 0.02, 0.8])
# cbar = fig.colorbar(im, cax=cb_ax)
# cb = fig.colorbar()
# clim([20, 120])
# cb.set_label("Permeability (md)")
# fig = figure()
# ax = fig.add_subplot(111) # The big subplot
# ax1 = fig.add_subplot(211)
# ax2 = fig.add_subplot(212)
# # Turn off axis lines and ticks of the big subplot
# ax.spines["top"].set_color("none")
# ax.spines["bottom"].set_color("none")
# ax.spines["left"].set_color("none")
# ax.spines["right"].set_color("none")
# ax.tick_params(labelcolor="w", top="off", bottom="off", left="off", right="off")
# # Set common labels
# ax.set_xlabel("common xlabel")
# ax.set_ylabel("common ylabel")
# ax1.set_title('ax1 title')
# ax2.set_title('ax2 title') | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 1299 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
if Sys.islinux()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.so')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.dylib')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.dll')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
end
sat_op = py"sat_op" | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 6667 | using ArgParse
function parse_commandline()
s = ArgParseSettings()
@add_arg_table s begin
"--generate_data"
arg_type = Bool
default = false
"--version"
arg_type = String
default = "0000"
"--gpuIds"
arg_type = String
default = "0"
"--indStage"
arg_type = Int64
default = 2
"--verbose"
arg_type = Bool
default = false
end
return parse_args(s)
end
args = parse_commandline()
if !isdir("./$(args["version"])")
mkdir("./$(args["version"])")
end
if !isdir("./$(args["version"])/Stage$(args["indStage"])")
mkdir("./$(args["version"])/Stage$(args["indStage"])")
end
using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
include("ops_imseq.jl")
include("../Ops/FWI/fwi_util.jl")
include("fwi_util_op.jl")
np = pyimport("numpy")
# NOTE Parameters
# const ALPHA = 0.006323996017182
# const SRC_CONST = 5.6146
# const GRAV_CONST = 1.0/144.0
const ALPHA = 1.0
const SRC_CONST = 86400.0
const GRAV_CONST = 1.0
# NOTE Hyperparameter for flow simulation
m = 15
n = 30
h = 30.0 # meter
NT = 50
dt_survey = 5
Δt = 20.0 # day
z = (1:m)*h|>collect
x = (1:n)*h|>collect
X, Z = np.meshgrid(x, z)
# ρw = 996.9571
# ρo = 640.7385
# μw = 1.0
# μo = 3.0
ρw = 501.9
ρo = 1053.0
μw = 0.1
μo = 1.0
# K_init = 20.0 .* ones(m,n)
g = 9.8*GRAV_CONST
ϕ = 0.25 .* ones(m,n)
qw = zeros(NT, m, n)
qw[:,9,3] .= 0.005 * (1/h^2)/10.0 * SRC_CONST
qo = zeros(NT, m, n)
qo[:,9,28] .= -0.005 * (1/h^2)/10.0 * SRC_CONST
sw0 = zeros(m, n)
survey_indices = collect(1:dt_survey:NT+1) # 10 stages
n_survey = length(survey_indices)
# NOTE Hyperparameter for fwi_op
# argsparse.jl
# ENV["CUDA_VISIBLE_DEVICES"] = 1
# ENV["PARAMDIR"] = "Src/params/"
# config = tf.ConfigProto(device_count = Dict("GPU"=>0))
dz = 3 # meters
dx = 3
nz = Int64(round((m * h) / dz)) + 1
nx = Int64(round((n * h) / dx)) + 1
nPml = 64
nSteps = 3001
dt = 0.00025
f0 = 50.0
nPad = 32 - mod((nz+2*nPml), 32)
nz_pad = nz + 2*nPml + nPad
nx_pad = nx + 2*nPml
# reflection
# x_src = collect(5:20:nx-5)
# z_src = 5ones(Int64, size(x_src))
# x_rec = collect(5:1:nx-5)
# z_rec = 5 .* ones(Int64, size(x_rec))
# xwell
# # z_src = collect(5:10:nz-5) #14->11srcs 10->15srcs
# # z_src = collect(5:10:nz-5)
z_src = collect(5:10:nz-5)
x_src = 5ones(Int64, size(z_src))
z_rec = collect(5:1:nz-5)
x_rec = (nx-5) .* ones(Int64, size(z_rec))
# para_fname = "./$(args["version"])/para_file.json"
# survey_fname = "./$(args["version"])/survey_file.json"
# data_dir_name = "./$(args["version"])/Data"
# paraGen(nz, nx, dz, dx, nSteps, dt, f0, nPml, nPad, filter_para, isAc, para_fname, survey_fname, data_dir_name)
# surveyGen(z_src, x_src, z_rec, x_rec, survey_fname)
cp_nopad = 3500.0 .* ones(nz, nx) # initial cp
cs = cp_nopad ./ sqrt(3.0)
den = 2200.0 .* ones(nz, nx)
cp_pad = 3500.0 .* ones(nz_pad, nx_pad) # initial cp
cs_pad = cp_pad ./ sqrt(3.0)
den_pad = 2200.0 .* ones(nz_pad, nx_pad)
cp_pad_value = 3500.0
# tf_cp = constant(cp)
tf_cs = constant(cs_pad)
tf_den = constant(den_pad)
# src = Matrix{Float64}(undef, 1, 2001)
# # src[1,:] = Float64.(reinterpret(Float32, read("../Ops/FWI/Src/params/ricker_10Hz.bin")))
# src[1,:] = Float64.(reinterpret(Float32, read("../Ops/FWI/Src/params/Mar_source_2001.bin")))
src = sourceGene(f0, nSteps, dt)
tf_stf = constant(repeat(src, outer=length(z_src)))
# tf_para_fname = tf.strings.join([para_fname])
tf_gpu_id0 = constant(0, dtype=Int32)
tf_gpu_id1 = constant(1, dtype=Int32)
gpu_id_array = [parse(Int, ss) for ss in split(args["gpuIds"],"_")]
nGpus = length(gpu_id_array)
tf_gpu_id_array = constant(gpu_id_array, dtype=Int32)
tf_shot_ids0 = constant(collect(Int32, 0:length(x_src)-1), dtype=Int32)
tf_shot_ids1 = constant(collect(Int32, 13:25), dtype=Int32)
# NOTE Hyperparameter for rock physics
tf_bulk_fl1 = constant(2.735e9)
tf_bulk_fl2 = constant(0.125e9) # to displace fl1
tf_bulk_sat1 = constant(den .* (cp_nopad.^2 .- 4.0/3.0 .* cp_nopad.^2 ./3.0)) # vp/vs ratio as sqrt(3)
tf_bulk_min = constant(36.6e9)
tf_shear_sat1 = constant(den .* cp_nopad.^2 ./3.0)
tf_ϕ_pad = tf.image.resize_bilinear(tf.reshape(constant(ϕ), (1, m, n, 1)), (nz, nx)) # upsample the porosity
tf_ϕ_pad = cast(tf_ϕ_pad, Float64)
tf_ϕ_pad = squeeze(tf_ϕ_pad)
tf_shear_pad = tf.pad(tf_shear_sat1, [nPml (nPml+nPad); nPml nPml],
constant_values=den[1,1] * cp_nopad[1,1]^2 /3.0) / 1e6
function Gassman(sw)
tf_bulk_fl_mix = 1.0/( (1-sw)/tf_bulk_fl1 + sw/tf_bulk_fl2 )
temp = tf_bulk_sat1/(tf_bulk_min - tf_bulk_sat1) - tf_bulk_fl1/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl1) + tf_bulk_fl_mix/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl_mix)
tf_bulk_new = tf_bulk_min / (1.0/temp + 1.0)
# tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo) *16.018463373960138;
tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo)
# tf_cp_new = sqrt((tf_bulk_new + 4.0/3.0 * tf_shear_sat1)/tf_den_new)
tf_lambda_new = tf_bulk_new - 2.0/3.0 * tf_shear_sat1
return tf_lambda_new, tf_den_new
end
tf_brie_coef = Variable(2.0*30.0)
# tf_brie_coef = constant(3.0)
# tf_brie_coef = constant(2.0)
function Brie(sw)
tf_bulk_fl_mix = (tf_bulk_fl1-tf_bulk_fl2)*(1-sw)^(tf_brie_coef/30.0) + tf_bulk_fl2
temp = tf_bulk_sat1/(tf_bulk_min - tf_bulk_sat1) - tf_bulk_fl1/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl1) + tf_bulk_fl_mix/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl_mix)
tf_bulk_new = tf_bulk_min / (1.0/temp + 1.0)
# tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo) *16.018463373960138;
tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo)
# tf_cp_new = sqrt((tf_bulk_new + 4.0/3.0 * tf_shear_sat1)/tf_den_new)
tf_lambda_new = tf_bulk_new - 2.0/3.0 * tf_shear_sat1
return tf_lambda_new, tf_den_new
end
function RockLinear(sw)
# tf_lambda_new = constant(7500.0*1e6 .* ones(nz,nx)) + (17400.0-7500.0)*1e6 * sw
tf_lambda_new = constant(7500.0*1e6 .* ones(nz,nx)) + (9200.0-7500.0)*1e6 * sw
tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo)
return tf_lambda_new, tf_den_new
end
tf_patch_temp = tf_bulk_sat1/(tf_bulk_min - tf_bulk_sat1) -
tf_bulk_fl1/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl1) +
tf_bulk_fl2/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl2)
tf_bulk_sat2 = tf_bulk_min/(1.0/tf_patch_temp + 1.0)
function Patchy(sw)
tf_bulk_new = 1/( (1-sw)/(tf_bulk_sat1+4.0/3.0*tf_shear_sat1)
+ sw/(tf_bulk_sat2+4.0/3.0*tf_shear_sat1) ) - 4.0/3.0*tf_shear_sat1
tf_lambda_new = tf_bulk_new - 2.0/3.0 * tf_shear_sat1
tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo)
return tf_lambda_new, tf_den_new
end
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 2036 |
if Sys.islinux()
py"""
import tensorflow as tf
import socket
if socket.gethostname() != "Dolores":
libFwiOp = tf.load_op_library('../Ops/FWI/build/libFwiOp.so')
else:
libFwiOp = tf.load_op_library('../Ops/FWI/build_dolores/libFwiOp.so')
@tf.custom_gradient
def fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
def grad(dy):
return libFwiOp.fwi_op_grad(dy, tf.constant(1.0,dtype=tf.float64),λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit, grad
def fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libFwiOp = tf.load_op_library('../Ops/FWI/build/libFwiOp.so')
@tf.custom_gradient
def fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
def grad(dy):
return libFwiOp.fwi_op_grad(dy, tf.constant(1.0,dtype=tf.float64),λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit, grad
def fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libFwiOp = tf.load_op_library('../Ops/FWI/build/libFwiOp.so')
@tf.custom_gradient
def fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
def grad(dy):
return libFwiOp.fwi_op_grad(dy, tf.constant(1.0,dtype=tf.float64),λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit, grad
def fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit
"""
end
fwi_op = py"fwi_op"
fwi_obs_op = py"fwi_obs_op" | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 2301 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
if Sys.islinux()
py"""
import tensorflow as tf
libLaplacian = tf.load_op_library('../Ops/Laplacian/build/libLaplacian.so')
@tf.custom_gradient
def laplacian_op(coef,func,h,rhograv):
p = libLaplacian.laplacian(coef,func,h,rhograv)
def grad(dy):
return libLaplacian.laplacian_grad(dy, coef, func, h, rhograv)
return p, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Laplacian/build/libLaplacian.dylib')
@tf.custom_gradient
def laplacian_op(coef,func,h,rhograv):
p = libLaplacian.laplacian(coef,func,h,rhograv)
def grad(dy):
return libLaplacian.laplacian_grad(dy, coef, func, h, rhograv)
return p, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Laplacian/build/libLaplacian.dll')
@tf.custom_gradient
def laplacian_op(coef,func,h,rhograv):
p = libLaplacian.laplacian(coef,func,h,rhograv)
def grad(dy):
return libLaplacian.laplacian_grad(dy, coef, func, h, rhograv)
return p, grad
"""
end
laplacian_op = py"laplacian_op"
if Sys.islinux()
py"""
import tensorflow as tf
libUpwlapOp = tf.load_op_library('../Ops/Upwlap/build/libUpwlapOp.so')
@tf.custom_gradient
def upwlap_op(perm,mobi,func,h,rhograv):
out = libUpwlapOp.upwlap_op(perm,mobi,func,h,rhograv)
def grad(dy):
return libUpwlapOp.upwlap_op_grad(dy, out, perm,mobi,func,h,rhograv)
return out, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libUpwlapOp = tf.load_op_library('../Ops/Upwlap/build/libUpwlapOp.dylib')
@tf.custom_gradient
def upwlap_op(perm,mobi,func,h,rhograv):
out = libUpwlapOp.upwlap_op(perm,mobi,func,h,rhograv)
def grad(dy):
return libUpwlapOp.upwlap_op_grad(dy, out, perm,mobi,func,h,rhograv)
return out, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libUpwlapOp = tf.load_op_library('./Ops/Upwlap/build/libUpwlapOp.dll')
@tf.custom_gradient
def upwlap_op(perm,mobi,func,h,rhograv):
out = libUpwlapOp.upwlap_op(perm,mobi,func,h,rhograv)
def grad(dy):
return libUpwlapOp.upwlap_op_grad(dy, out, perm,mobi,func,h,rhograv)
return out, grad
"""
end
upwlap_op = py"upwlap_op" | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 5967 | #=
Main program for FWI
=#
using ArgParse
function parse_commandline()
s = ArgParseSettings()
@add_arg_table s begin
"--generate_data"
arg_type = Bool
default = false
"--version"
arg_type = String
default = "0000"
"--verbose"
arg_type = Bool
default = false
end
return parse_args(s)
end
args = parse_commandline()
if !isdir("./$(args["version"])")
mkdir("./$(args["version"])")
end
using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
include("ops_imseq.jl")
include("../Ops/FWI/fwi_util.jl")
include("fwi_util_op.jl")
np = pyimport("numpy")
nz = 134
nx = 384
dz = 24. # meters
dx = 24.
nSteps = 2001
dt = 0.0025
f0 = 4.5
filter_para = [0, 0.1, 100.0, 200.0]
isAc = true
nPml = 32
nPad = 32 - mod((nz+2*nPml), 32)
nz_pad = nz + 2*nPml + nPad
nx_pad = nx + 2*nPml
# reflection
x_src = collect(4:8:384)
z_src = 2ones(Int64, size(x_src))
x_rec = collect(3:381)
z_rec = 2ones(Int64, size(x_rec))
# xwell
# z_src = collect(5:10:nz-5) #14->11srcs 10->15srcs
# x_src = 5ones(Int64, size(z_src))
# z_rec = collect(5:1:nz-5)
# x_rec = (nx-5) .* ones(Int64, size(z_rec))
para_fname = "./$(args["version"])/para_file.json"
survey_fname = "./$(args["version"])/survey_file.json"
data_dir_name = "./$(args["version"])/Data"
paraGen(nz_pad, nx_pad, dz, dx, nSteps, dt, f0, nPml, nPad, filter_para, isAc, para_fname, survey_fname, data_dir_name)
surveyGen(z_src, x_src, z_rec, x_rec, survey_fname)
tf_cp = constant(reshape(reinterpret(Float32,read("Mar_models/Model_Cp_true.bin")),(nz_pad, nx_pad)), dtype=Float64)
cs = zeros(nz_pad, nx_pad)
den = 1000.0 .* ones(nz_pad, nx_pad)
cp_pad_value = 3000.0
# tf_cp = constant(cp)
tf_cs = constant(cs)
tf_den = constant(den)
src = Matrix{Float64}(undef, 1, 2001)
# # src[1,:] = Float64.(reinterpret(Float32, read("../Ops/FWI/Src/params/ricker_10Hz.bin")))
src[1,:] = Float64.(reinterpret(Float32, read("../Ops/FWI/Src/params/Mar_source_2001.bin")))
# src = sourceGene(f0, nSteps, dt)
tf_stf = constant(repeat(src, outer=length(z_src)))
# tf_para_fname = tf.strings.join([para_fname])
tf_gpu_id0 = constant(0, dtype=Int32)
tf_gpu_id1 = constant(1, dtype=Int32)
nGpus = 2
tf_gpu_id_array = constant(collect(0:nGpus-1), dtype=Int32)
tf_shot_ids0 = constant(collect(Int32, 0:length(x_src)-1), dtype=Int32)
shot_id_points = Int32.(trunc.(collect(LinRange(0, length(z_src)-1, nGpus+1))))
function pad_cp(cp)
tran_cp = cast(cp, Float64)
return tf.pad(tran_cp, [nPml (nPml+nPad); nPml nPml], constant_values=3000.0)
end
# NOTE Generate Data
if args["generate_data"]
println("Generate Test Data...")
if !isdir("./$(args["version"])/Data")
mkdir("./$(args["version"])/Data")
end
res = fwi_obs_op(tf_cp, tf_cs, tf_den, tf_stf, tf_gpu_id0, tf_shot_ids0, para_fname)
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 24
config.inter_op_parallelism_threads = 24
sess = Session(config=config); init(sess);
run(sess, res)
error("Generate Data: Stop")
end
cp_init = reshape(reinterpret(Float32,read("Mar_models/Model_Cp_init_1D.bin")),(nz_pad, nx_pad))
tf_cp_inv = Variable(cp_init, dtype=Float64)
Mask = ones(nz_pad, nx_pad)
Mask[nPml+1:nPml+10,:] .= 0.0
tf_cp_inv_msk = tf_cp_inv .* constant(Mask) + constant(cp_init[1,1] .* (1. .- Mask))
# NOTE Compute FWI loss
# loss = constant(0.0)
# for i = 1:nGpus
# global loss
# tf_shot_ids = constant(collect(shot_id_points[i] : shot_id_points[i+1]), dtype=Int32)
# loss += fwi_op(tf_cp_inv_msk, tf_cs, tf_den, tf_stf, tf_gpu_id_array[i], tf_shot_ids, para_fname)
# end
loss = fwi_op(tf_cp_inv_msk, tf_cs, tf_den, tf_stf, tf_gpu_id_array[1], tf_shot_ids0, para_fname)
gradCp = gradients(loss, tf_cp_inv)
if args["verbose"]
sess = Session(); init(sess)
println("Initial loss = ", run(sess, loss))
g = gradients(loss, tfCtxInit.K)
G = run(sess, g)
pcolormesh(G); savefig("test.png"); close("all")
end
# Optimization
__cnt = 0
# invK = zeros(m,n)
function print_loss(l, Cp, gradCp)
global __cnt, __l, __Cp, __gradCp
if mod(__cnt,1)==0
println("\niter=$__iter, eval=$__cnt, current loss=",l)
# println("a=$a, b1=$b1, b2=$b2")
end
__cnt += 1
__l = l
__Cp = Cp
__gradCp = gradCp
end
__iter = 0
function print_iter(rk)
global __iter, __l
if mod(__iter,1)==0
println("\n************* ITER=$__iter *************\n")
end
__iter += 1
open("./$(args["version"])/loss.txt", "a") do io
writedlm(io, Any[__iter __l])
end
open("./$(args["version"])/Cp$__iter.txt", "w") do io
writedlm(io, __Cp)
end
open("./$(args["version"])/gradCp$__iter.txt", "w") do io
writedlm(io, __gradCp)
end
end
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 24
config.inter_op_parallelism_threads = 24
sess = Session(config=config); init(sess);
# cp_low_bd = 1500. .* ones(nz_pad, nx_pad)
# cp_high_bd = 5500. .* ones(nz_pad, nx_pad)
# cp_high_bd[nPml+1:nPml+10,:] .= 1500.0
opt = ScipyOptimizerInterface(loss, var_list=[tf_cp_inv], var_to_bounds=Dict(tf_cp_inv=> (1500.0, 5500.0)), method="L-BFGS-B",
options=Dict("maxiter"=> 100, "ftol"=>1e-6, "gtol"=>1e-6))
@info "Optimization Starts..."
ScipyOptimizerMinimize(sess, opt, loss_callback=print_loss, step_callback=print_iter, fetches=[loss,tf_cp_inv,gradCp])
# adam = AdamOptimizer(learning_rate=50.0)
# op = minimize(adam, loss)
# sess = Session(); init(sess);
# for iter = 1:1000
# _, misfit, cp, cpgrad = run(sess, [op, loss, tf_cp_inv, gradCp])
# open("./$(args["version"])/Cp$iter.txt", "w") do io
# writedlm(io, cp)
# end
# open("./$(args["version"])/loss.txt", "a") do io
# writedlm(io, Any[iter misfit])
# end
# open("./$(args["version"])/gradCp$iter.txt", "w") do io
# writedlm(io, cpgrad)
# end
# end | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 6076 | #=
Main program for two phase flow inversion
=#
include("args.jl")
function sw_p_to_lambda_den(sw, p)
sw = tf.reshape(sw, (1, m, n, 1))
p = tf.reshape(p, (1, m, n, 1))
sw = tf.image.resize_bilinear(sw, (nz, nx))
p = tf.image.resize_bilinear(p, (nz, nx))
sw = cast(sw, Float64)
p = cast(p, Float64)
sw = squeeze(sw)
p = squeeze(p)
# tran_lambda, tran_den = Gassman(sw)
# tran_lambda, tran_den = RockLinear(sw) # test linear relationship
tran_lambda, tran_den = Patchy(sw)
# tran_lambda, tran_den = Brie(sw)
tran_lambda_pad = tf.pad(tran_lambda, [nPml (nPml+nPad); nPml nPml], constant_values=3500.0^2*2200.0/3.0) /1e6
tran_den_pad = tf.pad(tran_den, [nPml (nPml+nPad); nPml nPml], constant_values=2200.0)
return tran_lambda_pad, tran_den_pad
end
# NOTE Generate Data
if args["generate_data"]
println("Generate Test Data...")
K = 20.0 .* ones(m,n) # millidarcy
K[8:10,:] .= 120.0
# K[17:21,:] .= 100.0
# for i = 1:m
# for j = 1:n
# if i <= (14 - 24)/(30 - 1)*(j-1) + 24 && i >= (12 - 18)/(30 - 1)*(j-1) + 18
# K[i,j] = 100.0
# end
# end
# end
tfCtxTrue = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo, sw0, true)
out_sw_true, out_p_true = imseq(tfCtxTrue)
lambdas = Array{PyObject}(undef, n_survey)
dens = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
sw = out_sw_true[survey_indices[i]]
p = out_p_true[survey_indices[i]]
lambdas[i], dens[i] = sw_p_to_lambda_den(sw, p)
end
misfit = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
if !isdir("./$(args["version"])/Data$i")
mkdir("./$(args["version"])/Data$i")
end
para_fname = "./$(args["version"])/para_file$i.json"
survey_fname = "./$(args["version"])/survey_file$i.json"
paraGen(nz_pad, nx_pad, dz, dx, nSteps, dt, f0, nPml, nPad, para_fname, survey_fname, "./$(args["version"])/Data$i/")
# shot_inds = collect(1:3:length(z_src)) .+ mod(i-1,3) # 5src rotation
# shot_inds = i # 1src rotation
shot_inds = collect(1:length(z_src)) # all sources
surveyGen(z_src[shot_inds], x_src[shot_inds], z_rec, x_rec, survey_fname)
tf_shot_ids0 = constant(collect(0:length(shot_inds)-1), dtype=Int32)
misfit[i] = fwi_obs_op(lambdas[i], tf_shear_pad, dens[i], tf_stf, tf_gpu_id0, tf_shot_ids0, para_fname)
end
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 24
config.inter_op_parallelism_threads = 24
sess = Session(config=config); init(sess);
run(sess, misfit)
error("Generate Data: Stop")
end
if args["indStage"] == 2
K_init = 20.0 .* ones(m,n)
elseif args["indStage"] == 1
error("indStage == 1")
else
ls = readdlm("./$(args["version"])/Stage$(args["indStage"]-1)/loss.txt")
Ls = Int64((ls[end,1]))
K_init = readdlm("./$(args["version"])/Stage$(args["indStage"]-1)/K$Ls.txt")
end
tfCtxInit = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K_init,g,ϕ,qw,qo, sw0, false)
out_sw_init, out_p_init = imseq(tfCtxInit)
lambdas = Array{PyObject}(undef, n_survey)
dens = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
sw = out_sw_init[survey_indices[i]]
p = out_p_init[survey_indices[i]]
lambdas[i], dens[i] = sw_p_to_lambda_den(sw, p)
end
# NOTE Compute FWI loss
loss = constant(0.0)
for i = 1:args["indStage"]
global loss
para_fname = "./$(args["version"])/para_file$i.json"
survey_fname = "./$(args["version"])/survey_file$i.json"
# shot_inds = collect(1:3:length(z_src)) .+ mod(i-1,3)
# shot_inds = i
shot_inds = collect(1:length(z_src)) # all sources
tf_shot_ids0 = constant(collect(0:length(shot_inds)-1), dtype=Int32)
loss += fwi_op(lambdas[i], tf_shear_pad, dens[i], tf_stf, tf_gpu_id_array[mod(i,nGpus)], tf_shot_ids0, para_fname) # mod(i,2)
end
gradK = gradients(loss, tfCtxInit.K)
if args["verbose"]
sess = Session(); init(sess)
println("Initial loss = ", run(sess, loss))
g = gradients(loss, tfCtxInit.K)
G = run(sess, g)
pcolormesh(G); savefig("test.png"); close("all")
end
# Optimization
__cnt = 0
# invK = zeros(m,n)
function print_loss(l, K, gradK, brie_coef)
global __cnt, __l, __K, __gradK, __brie_coef
if mod(__cnt,1)==0
println("\niter=$__iter, eval=$__cnt, current loss=",l)
# println("a=$a, b1=$b1, b2=$b2")
end
__cnt += 1
__l = l
__K = K
__gradK = gradK
__brie_coef = brie_coef
end
__iter = 0
function print_iter(rk)
global __iter, __l
if mod(__iter,1)==0
println("\n************* ITER=$__iter *************\n")
end
__iter += 1
open("./$(args["version"])/Stage$(args["indStage"])/loss.txt", "a") do io
writedlm(io, Any[__iter __l])
end
open("./$(args["version"])/Stage$(args["indStage"])/K$__iter.txt", "w") do io
writedlm(io, __K)
end
open("./$(args["version"])/Stage$(args["indStage"])/gradK$__iter.txt", "w") do io
writedlm(io, __gradK)
end
open("./$(args["version"])/Stage$(args["indStage"])/brie_coef.txt", "a") do io
writedlm(io, Any[__iter __brie_coef])
end
end
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 24
config.inter_op_parallelism_threads = 24
sess = Session(config=config); init(sess);
opt = ScipyOptimizerInterface(loss, var_list=[tfCtxInit.K], var_to_bounds=Dict(tfCtxInit.K=> (10.0, 130.0)), method="L-BFGS-B",
options=Dict("maxiter"=> 100, "ftol"=>1e-6, "gtol"=>1e-6))
# opt = ScipyOptimizerInterface(loss, var_list=[tfCtxInit.K, tf_brie_coef], var_to_bounds=Dict(tfCtxInit.K=> (10.0, 130.0), tf_brie_coef=>(1.0,100.0)), method="L-BFGS-B",
# options=Dict("maxiter"=> 100, "ftol"=>1e-6, "gtol"=>1e-6))
@info "Optimization Starts..."
# ScipyOptimizerMinimize(sess, opt, loss_callback=print_loss, step_callback=print_iter, fetches=[loss,tfCtxInit.K,gradK])
ScipyOptimizerMinimize(sess, opt, loss_callback=print_loss, step_callback=print_iter, fetches=[loss,tfCtxInit.K,gradK, tf_brie_coef])
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 3208 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using DelimitedFiles
using Random
Random.seed!(233)
np = pyimport("numpy")
include("poisson_op.jl")
include("laplacian_op.jl")
include("sat_op.jl")
const K_CONST = 9.869232667160130e-16 * 86400 * 1e3
mutable struct Ctx
m; n; h; NT; Δt; Z; X; ρw; ρo;
μw; μo; K; g; ϕ; qw; qo; sw0
end
function tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo,sw0,ifTrue)
tf_h = constant(h)
# tf_NT = constant(NT)
tf_Δt = constant(Δt)
tf_Z = constant(Z)
tf_X= constant(X)
tf_ρw = constant(ρw)
tf_ρo = constant(ρo)
tf_μw = constant(μw)
tf_μo = constant(μo)
# tf_K = isa(K,Array) ? Variable(K) : K
if ifTrue
tf_K = constant(K)
else
tf_K = Variable(K)
end
tf_g = constant(g)
# tf_ϕ = Variable(ϕ)
tf_ϕ = constant(ϕ)
tf_qw = constant(qw)
tf_qo = constant(qo)
tf_sw0 = constant(sw0)
return Ctx(m,n,tf_h,NT,tf_Δt,tf_Z,tf_X,tf_ρw,tf_ρo,tf_μw,tf_μo,tf_K,tf_g,tf_ϕ,tf_qw,tf_qo,tf_sw0)
end
function Krw(Sw)
return Sw ^ 1.5
end
function Kro(So)
return So ^1.5
end
function ave_normal(quantity, m, n)
aa = sum(quantity)
return aa/(m*n)
end
# variables : sw, u, v, p
# (time dependent) parameters: qw, qo, ϕ
function onestep(sw, p, m, n, h, Δt, Z, ρw, ρo, μw, μo, K, g, ϕ, qw, qo)
# step 1: update p
# λw = Krw(sw)/μw
# λo = Kro(1-sw)/μo
λw = sw.*sw/μw
λo = (1-sw).*(1-sw)/μo
λ = λw + λo
q = qw + qo + λw/(λo+1e-16).*qo
# q = qw + qo
potential_c = (ρw - ρo)*g .* Z
# Step 1: implicit potential
Θ = upwlap_op(K * K_CONST, λo, potential_c, h, constant(0.0))
load_normal = (Θ+q/ALPHA) - ave_normal(Θ+q/ALPHA, m, n)
# p = poisson_op(λ.*K* K_CONST, load_normal, h, constant(0.0), constant(1))
p = upwps_op(K * K_CONST, λ, load_normal, p, h, constant(0.0), constant(0)) # potential p = pw - ρw*g*h
# step 2: implicit transport
sw = sat_op(sw, p, K * K_CONST, ϕ, qw, qo, μw, μo, sw, Δt, h)
return sw, p
end
"""
impes(tf_ctx)
Solve the two phase flow equation.
`qw` and `qo` -- `NT x m x n` numerical array, `qw[i,:,:]` the corresponding value of qw at i*Δt
`sw0` and `p0` -- initial value for `sw` and `p`. `m x n` numerical array.
"""
function imseq(tf_ctx)
ta_sw, ta_p = TensorArray(NT+1), TensorArray(NT+1)
ta_sw = write(ta_sw, 1, tf_ctx.sw0)
ta_p = write(ta_p, 1, constant(zeros(tf_ctx.m, tf_ctx.n)))
i = constant(1, dtype=Int32)
function condition(i, tas...)
i <= tf_ctx.NT
end
function body(i, tas...)
ta_sw, ta_p = tas
sw, p = onestep(read(ta_sw, i), read(ta_p, i), tf_ctx.m, tf_ctx.n, tf_ctx.h, tf_ctx.Δt, tf_ctx.Z, tf_ctx.ρw, tf_ctx.ρo, tf_ctx.μw, tf_ctx.μo, tf_ctx.K, tf_ctx.g, tf_ctx.ϕ, tf_ctx.qw[i], tf_ctx.qo[i])
ta_sw = write(ta_sw, i+1, sw)
ta_p = write(ta_p, i+1, p)
i+1, ta_sw, ta_p
end
_, ta_sw, ta_p = while_loop(condition, body, [i; ta_sw; ta_p;])
out_sw, out_p = stack(ta_sw), stack(ta_p)
end
function vis(val, args...;kwargs...)
close("all")
ns = Int64.(round.(LinRange(1,size(val,1),9)))
for i = 1:9
subplot(330+i)
imshow(val[ns[i],:,:], args...;kwargs...)
colorbar()
end
end | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 1349 | using PyPlot
using DelimitedFiles
if !isdir("figures_summary")
mkdir("figures_summary")
end
m = 15
n = 30
h = 30.0
dz = 3.0 # meters
dx = 3.0
nz = Int64(round((m * h) / dz)) + 1
nx = Int64(round((n * h) / dx)) + 1
z_src = (collect(5:10:nz-5) .- 1 ) .* dz .+ dz/2.0
x_src = (5-1)ones(Int64, size(z_src)) .* dx .+ dx/2.0
z_rec = (collect(5:1:nz-5) .- 1) .* dz .+ dz/2.0
x_rec = (nx-5-1) .* ones(Int64, size(z_rec)) .*dx .+ dx/2.0
z_inj = (9-1)*h + h/2.0
x_inj = (3-1)*h + h/2.0
z_prod = (9-1)*h + h/2.0
x_prod = (28-1)*h + h/2.0
iter = 100
Prj_names = ["CO2_patchy_pgs"]
K_name = "/K$iter.txt"
rc("axes", titlesize=20)
rc("axes", labelsize=18)
rc("xtick", labelsize=18)
rc("ytick", labelsize=18)
rc("legend", fontsize=20)
for indStage = 2:11
figure()
iPrj = 1
K = readdlm(Prj_names[iPrj] * "/Stage$indStage/" * K_name)
imshow(K, extent=[0,n*h,m*h,0]);
xlabel("Distance (m)")
ylabel("Depth (m)")
cb = colorbar()
clim([20, 120])
cb.set_label("Permeability (md)")
shot_inds = collect(1:length(z_src))
scatter(x_src[shot_inds], z_src[shot_inds], c="w", marker="*")
scatter(x_rec, z_rec, s=16.0, c="r", marker="v")
scatter(x_inj, z_inj, c="r", marker=">")
scatter(x_prod, z_prod, c="r", marker="<")
savefig("figures_summary/K_$(Prj_names[iPrj])_stage_$indStage.pdf", bbox_inches="tight",pad_inches = 0, dpi = 300);
end | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 1236 | using DelimitedFiles
using PyPlot
close("all")
if !isdir("figures_summary")
mkdir("figures_summary")
end
Prj_names = ["CO2", "CO2_1src", "CO2_2surveys", "Brie_3_nocoefupdate", "Brie_tune_coef_true3_start2"]
rc("axes", titlesize=14)
rc("axes", labelsize=14)
rc("xtick", labelsize=14)
rc("ytick", labelsize=14)
rc("legend", fontsize=14)
figure()
L1 = readdlm("$(Prj_names[1])/loss.txt")
l1=semilogy(L1[:,1], L1[:,2]/L1[1,2], label="Baseline")
legend()
L2 = readdlm("$(Prj_names[2])/loss.txt")
l2=semilogy(L2[:,1], L2[:,2]/L2[1,2], label="One source")
legend()
L3 = readdlm("$(Prj_names[3])/loss.txt")
l3=semilogy(L3[:,1], L3[:,2]/L3[1,2], label="Two surveys")
legend()
grid(ls="--")
xlabel("Iteration Number")
ylabel("Normalized misfit")
savefig("figures_summary/loss.pdf", bbox_inches="tight",pad_inches = 0, dpi = 300);
figure()
L4 = readdlm("$(Prj_names[4])/loss.txt")
l4=semilogy(L4[:,1], L4[:,2]/L4[1,2], label="Exact coefficient")
legend()
L5 = readdlm("$(Prj_names[5])/loss.txt")
l5=semilogy(L5[:,1], L5[:,2]/L5[1,2], label="Inexact coefficient")
legend()
grid(ls="--")
xlabel("Iteration Number")
ylabel("Normalized misfit")
savefig("figures_summary/loss_brie.pdf", bbox_inches="tight",pad_inches = 0, dpi = 300); | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 92 | using PyPlot
include("args.jl")
Sw = constant(collect(0:0.001:1))
lambda_brie_3 = Brie(Sw) | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 6256 | include("args.jl")
function sw_p_to_lambda_den(sw, p)
sw = tf.reshape(sw, (1, m, n, 1))
p = tf.reshape(p, (1, m, n, 1))
sw = tf.image.resize_bilinear(sw, (nz, nx))
p = tf.image.resize_bilinear(p, (nz, nx))
sw = cast(sw, Float64)
p = cast(p, Float64)
sw = squeeze(sw)
p = squeeze(p)
# tran_lambda, tran_den = Gassman(sw)
# tran_lambda, tran_den = RockLinear(sw) # test linear relationship
tran_lambda, tran_den = Patchy(sw)
return tran_lambda, tran_den
end
if !isdir("figures_summary")
mkdir("figures_summary")
end
K = 20.0 .* ones(m,n) # millidarcy
K[8:10,:] .= 120.0
# K[17:21,:] .= 100.0
# for i = 1:m
# for j = 1:n
# if i <= (14 - 24)/(30 - 1)*(j-1) + 24 && i >= (12 - 18)/(30 - 1)*(j-1) + 18
# K[i,j] = 100.0
# end
# end
# end
tfCtxTrue = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo, sw0, true)
out_sw_true, out_p_true = imseq(tfCtxTrue)
lambdas = Array{PyObject}(undef, n_survey)
dens = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
sw = out_sw_true[survey_indices[i]]
p = out_p_true[survey_indices[i]]
lambdas[i], dens[i] = sw_p_to_lambda_den(sw, p)
end
sess = Session();init(sess);
vps = Array{PyObject}(undef, n_survey)
for i=1:n_survey
vps[i] = sqrt((lambdas[i] + 2.0 * tf_shear_sat1[i])/dens[i])
end
V = run(sess, vps);
S = run(sess, out_sw_true);
P = run(sess, out_p_true);
z_inj = (9-1)*h + h/2.0
x_inj = (3-1)*h + h/2.0
z_prod = (9-1)*h + h/2.0
x_prod = (28-1)*h + h/2.0
rc("axes", titlesize=30)
rc("axes", labelsize=30)
rc("xtick", labelsize=28)
rc("ytick", labelsize=28)
rc("legend", fontsize=30)
fig1,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(V[(iPrj-1)*3+jPrj], extent=[0,n*h,m*h,0], vmin=3350, vmax=3500);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# cb = fig1.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Vp")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">")
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<")
end
end
fig1.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig1.add_axes([0.91, 0.08, 0.01, 0.82])
cb1 = fig1.colorbar(ims[1], cax=cbar_ax)
cb1.set_label("Vp (m/s)")
savefig("figures_summary/Vp_evo_patchy_true.pdf",bbox_inches="tight",pad_inches = 0);
fig2,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(S[survey_indices[(iPrj-1)*3+jPrj], :, :], extent=[0,n*h,m*h,0], vmin=0.0, vmax=0.6);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# if iPrj ==2 && jPrj == 3
# cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Saturation")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">")
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<")
end
end
# fig2.subplots_adjust(wspace=0.04, hspace=0.042)
fig2.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig2.add_axes([0.91, 0.08, 0.01, 0.82])
cb2 = fig2.colorbar(ims[1], cax=cbar_ax)
cb2.set_label("Saturation")
savefig("figures_summary/Saturation_evo_patchy_true.pdf",bbox_inches="tight",pad_inches = 0);
fig3,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(P[survey_indices[(iPrj-1)*3+jPrj], :, :]*1.4504e-04, extent=[0,n*h,m*h,0], vmin=-2500.0, vmax=500);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# if iPrj ==2 && jPrj == 3
# cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Saturation")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">")
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<")
end
end
# fig2.subplots_adjust(wspace=0.04, hspace=0.042)
fig3.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig3.add_axes([0.91, 0.08, 0.01, 0.82])
cb3 = fig3.colorbar(ims[1], cax=cbar_ax)
cb3.set_label("Potential (psi)")
savefig("figures_summary/Potential_evo_patchy_true.pdf",bbox_inches="tight",pad_inches = 0);
# iter = 100
# Prj_names = ["CO2", "CO2_1src", "CO2_2surveys", "CO2_6surveys"]
# K_name = "/K$iter.txt"
# fig,axs = subplots(2,2, figsize=[18,8], sharex=true, sharey=true)
# for iPrj = 1:2
# for jPrj = 1:2
# # println(ax)
# A = readdlm(Prj_names[(iPrj-1)*2 + jPrj] * K_name)
# im = axs[iPrj,jPrj].imshow(A, extent=[0,n*h,m*h,0]);
# if jPrj == 1 || jPrj == 1
# axs[iPrj,jPrj].set_ylabel("Depth (m)")
# end
# if iPrj == 2 || iPrj == 2
# axs[iPrj,jPrj].set_xlabel("Distance (m)")
# end
# axs[iPrj,jPrj].text(-0.1,1.1,string("(" * Char((iPrj-1)*2 + jPrj+'a'-1) * ")"),transform=axs[iPrj,jPrj].transAxes,size=12,weight="bold")
# end
# end
# fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.9,
# wspace=0.1, hspace=0.2)
# cb_ax = fig.add_axes([0.93, 0.1, 0.02, 0.8])
# cbar = fig.colorbar(im, cax=cb_ax)
# cb = fig.colorbar()
# clim([20, 120])
# cb.set_label("Permeability (md)")
# fig = figure()
# ax = fig.add_subplot(111) # The big subplot
# ax1 = fig.add_subplot(211)
# ax2 = fig.add_subplot(212)
# # Turn off axis lines and ticks of the big subplot
# ax.spines["top"].set_color("none")
# ax.spines["bottom"].set_color("none")
# ax.spines["left"].set_color("none")
# ax.spines["right"].set_color("none")
# ax.tick_params(labelcolor="w", top="off", bottom="off", left="off", right="off")
# # Set common labels
# ax.set_xlabel("common xlabel")
# ax.set_ylabel("common ylabel")
# ax1.set_title('ax1 title')
# ax2.set_title('ax2 title') | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 2446 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
if Sys.islinux()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Poisson/build/libPoissonOp.so')
@tf.custom_gradient
def poisson_op(coef,g,h,rhograv,index):
p = libPoissonOp.poisson_op(coef,g,h,rhograv,index)
def grad(dy):
return libPoissonOp.poisson_op_grad(dy, p, coef, g, h, rhograv, index)
return p, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Poisson/build/libPoissonOp.dylib')
@tf.custom_gradient
def poisson_op(coef,g,h,rhograv,index):
p = libPoissonOp.poisson_op(coef,g,h,rhograv,index)
def grad(dy):
return libPoissonOp.poisson_op_grad(dy, p, coef, g, h, rhograv, index)
return p, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Poisson/build/libPoissonOp.dll')
@tf.custom_gradient
def poisson_op(coef,g,h,rhograv,index):
p = libPoissonOp.poisson_op(coef,g,h,rhograv,index)
def grad(dy):
return libPoissonOp.poisson_op_grad(dy, p, coef, g, h, rhograv, index)
return p, grad
"""
end
poisson_op = py"poisson_op"
if Sys.islinux()
py"""
import tensorflow as tf
libUpwpsOp = tf.load_op_library('../Ops/Upwps/build/libUpwpsOp.so')
@tf.custom_gradient
def upwps_op(permi,mobi,src,funcref,h,rhograv,index):
pres = libUpwpsOp.upwps_op(permi,mobi,src,funcref,h,rhograv,index)
def grad(dy):
return libUpwpsOp.upwps_op_grad(dy, pres, permi,mobi,src,funcref,h,rhograv,index)
return pres, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libUpwpsOp = tf.load_op_library('../Ops/Upwps/build/libUpwpsOp.dylib')
@tf.custom_gradient
def upwps_op(permi,mobi,src,funcref,h,rhograv,index):
pres = libUpwpsOp.upwps_op(permi,mobi,src,funcref,h,rhograv,index)
def grad(dy):
return libUpwpsOp.upwps_op_grad(dy, pres, permi,mobi,src,funcref,h,rhograv,index)
return pres, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libUpwpsOp = tf.load_op_library('../Ops/Upwps/build/libUpwpsOp.dll')
@tf.custom_gradient
def upwps_op(permi,mobi,src,funcref,h,rhograv,index):
pres = libUpwpsOp.upwps_op(permi,mobi,src,funcref,h,rhograv,index)
def grad(dy):
return libUpwpsOp.upwps_op_grad(dy, pres, permi,mobi,src,funcref,h,rhograv,index)
return pres, grad
"""
end
upwps_op = py"upwps_op"
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 6175 | include("args.jl")
using DelimitedFiles
function sw_p_to_lambda_den(sw, p)
sw = tf.reshape(sw, (1, m, n, 1))
p = tf.reshape(p, (1, m, n, 1))
sw = tf.image.resize_bilinear(sw, (nz, nx))
p = tf.image.resize_bilinear(p, (nz, nx))
sw = cast(sw, Float64)
p = cast(p, Float64)
sw = squeeze(sw)
p = squeeze(p)
# tran_lambda, tran_den = Gassman(sw)
# tran_lambda, tran_den = RockLinear(sw) # test linear relationship
tran_lambda, tran_den = Patchy(sw)
return tran_lambda, tran_den
end
if !isdir("figures_summary")
mkdir("figures_summary")
end
iter = 100
Prj_names = "Brie_true3_set2_noupdate";
K_name = "/K$iter.txt"
K = readdlm(Prj_names*K_name)
tfCtxTrue = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo, sw0, true)
out_sw_true, out_p_true = imseq(tfCtxTrue)
lambdas = Array{PyObject}(undef, n_survey)
dens = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
sw = out_sw_true[survey_indices[i]]
p = out_p_true[survey_indices[i]]
lambdas[i], dens[i] = sw_p_to_lambda_den(sw, p)
end
sess = Session();init(sess);
vps = Array{PyObject}(undef, n_survey)
for i=1:n_survey
vps[i] = sqrt((lambdas[i] + 2.0 * tf_shear_sat1[i])/dens[i])
end
V = run(sess, vps);
S = run(sess, out_sw_true);
P = run(sess, out_p_true);
z_inj = (9-1)*h + h/2.0
x_inj = (3-1)*h + h/2.0
z_prod = (9-1)*h + h/2.0
x_prod = (28-1)*h + h/2.0
rc("axes", titlesize=30)
rc("axes", labelsize=30)
rc("xtick", labelsize=28)
rc("ytick", labelsize=28)
rc("legend", fontsize=30)
fig1,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(V[(iPrj-1)*3+jPrj], extent=[0,n*h,m*h,0], vmin=3350, vmax=3500);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# cb = fig1.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Vp")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">")
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<")
end
end
fig1.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig1.add_axes([0.91, 0.08, 0.01, 0.82])
cb1 = fig1.colorbar(ims[1], cax=cbar_ax)
cb1.set_label("Vp (m/s)")
savefig("figures_summary/predicted_Vp_evo.pdf",bbox_inches="tight",pad_inches = 0);
fig2,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(S[survey_indices[(iPrj-1)*3+jPrj], :, :], extent=[0,n*h,m*h,0], vmin=0.0, vmax=0.6);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# if iPrj ==2 && jPrj == 3
# cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Saturation")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">")
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<")
end
end
# fig2.subplots_adjust(wspace=0.04, hspace=0.042)
fig2.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig2.add_axes([0.91, 0.08, 0.01, 0.82])
cb2 = fig2.colorbar(ims[1], cax=cbar_ax)
cb2.set_label("Saturation")
savefig("figures_summary/predicted_Saturation_evo.pdf",bbox_inches="tight",pad_inches = 0);
# fig3,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
# ims = Array{Any}(undef, 9)
# for iPrj = 1:3
# for jPrj = 1:3
# ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(P[survey_indices[(iPrj-1)*3+jPrj], :, :]*1.4504e-04, extent=[0,n*h,m*h,0], vmin=-2500.0, vmax=500);
# axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
# if jPrj == 1 || jPrj == 1
# axs[iPrj,jPrj].set_ylabel("Depth (m)")
# end
# if iPrj == 3 || iPrj == 3
# axs[iPrj,jPrj].set_xlabel("Distance (m)")
# end
# # if iPrj ==2 && jPrj == 3
# # cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# # cb.set_label("Saturation")
# axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">")
# axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<")
# end
# end
# # fig2.subplots_adjust(wspace=0.04, hspace=0.042)
# fig3.subplots_adjust(wspace=0.02, hspace=0.18)
# cbar_ax = fig3.add_axes([0.91, 0.08, 0.01, 0.82])
# cb3 = fig3.colorbar(ims[1], cax=cbar_ax)
# cb3.set_label("Potential (psi)")
# savefig("figures_summary/Potential_evo_patchy_true.pdf",bbox_inches="tight",pad_inches = 0);
# iter = 100
# Prj_names = ["CO2", "CO2_1src", "CO2_2surveys", "CO2_6surveys"]
# K_name = "/K$iter.txt"
# fig,axs = subplots(2,2, figsize=[18,8], sharex=true, sharey=true)
# for iPrj = 1:2
# for jPrj = 1:2
# # println(ax)
# A = readdlm(Prj_names[(iPrj-1)*2 + jPrj] * K_name)
# im = axs[iPrj,jPrj].imshow(A, extent=[0,n*h,m*h,0]);
# if jPrj == 1 || jPrj == 1
# axs[iPrj,jPrj].set_ylabel("Depth (m)")
# end
# if iPrj == 2 || iPrj == 2
# axs[iPrj,jPrj].set_xlabel("Distance (m)")
# end
# axs[iPrj,jPrj].text(-0.1,1.1,string("(" * Char((iPrj-1)*2 + jPrj+'a'-1) * ")"),transform=axs[iPrj,jPrj].transAxes,size=12,weight="bold")
# end
# end
# fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.9,
# wspace=0.1, hspace=0.2)
# cb_ax = fig.add_axes([0.93, 0.1, 0.02, 0.8])
# cbar = fig.colorbar(im, cax=cb_ax)
# cb = fig.colorbar()
# clim([20, 120])
# cb.set_label("Permeability (md)")
# fig = figure()
# ax = fig.add_subplot(111) # The big subplot
# ax1 = fig.add_subplot(211)
# ax2 = fig.add_subplot(212)
# # Turn off axis lines and ticks of the big subplot
# ax.spines["top"].set_color("none")
# ax.spines["bottom"].set_color("none")
# ax.spines["left"].set_color("none")
# ax.spines["right"].set_color("none")
# ax.tick_params(labelcolor="w", top="off", bottom="off", left="off", right="off")
# # Set common labels
# ax.set_xlabel("common xlabel")
# ax.set_ylabel("common ylabel")
# ax1.set_title('ax1 title')
# ax2.set_title('ax2 title') | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 1299 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
if Sys.islinux()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.so')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.dylib')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.dll')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
end
sat_op = py"sat_op" | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 5635 | using ArgParse
function parse_commandline()
s = ArgParseSettings()
@add_arg_table s begin
"--generate_data"
arg_type = Bool
default = false
"--version"
arg_type = String
default = "0000"
"--verbose"
arg_type = Bool
default = false
end
return parse_args(s)
end
args = parse_commandline()
if !isdir("./$(args["version"])")
mkdir("./$(args["version"])")
end
using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
include("ops_imseq.jl")
include("../Ops/FWI/fwi_util.jl")
include("fwi_util_op.jl")
np = pyimport("numpy")
# NOTE Parameters
# const ALPHA = 0.006323996017182
# const SRC_CONST = 5.6146
# const GRAV_CONST = 1.0/144.0
const ALPHA = 1.0
const SRC_CONST = 86400.0
const GRAV_CONST = 1.0
# NOTE Hyperparameter for flow simulation
m = 15
n = 30
h = 30.0 # meter
NT = 50
dt_survey = 5
Δt = 20.0 # day
z = (1:m)*h|>collect
x = (1:n)*h|>collect
X, Z = np.meshgrid(x, z)
# ρw = 996.9571
# ρo = 640.7385
# μw = 1.0
# μo = 3.0
ρw = 501.9
ρo = 1053.0
μw = 0.1
μo = 1.0
K_init = 20.0 .* ones(m,n)
g = 9.8*GRAV_CONST
ϕ = 0.25 .* ones(m,n)
qw = zeros(NT, m, n)
qw[:,9,3] .= 0.005 * (1/h^2)/10.0 * SRC_CONST
qo = zeros(NT, m, n)
qo[:,9,28] .= -0.005 * (1/h^2)/10.0 * SRC_CONST
sw0 = zeros(m, n)
survey_indices = collect(1:dt_survey:NT+1) # 10 stages
n_survey = length(survey_indices)
# NOTE Hyperparameter for fwi_op
# argsparse.jl
# ENV["CUDA_VISIBLE_DEVICES"] = 1
# ENV["PARAMDIR"] = "Src/params/"
# config = tf.ConfigProto(device_count = Dict("GPU"=>0))
dz = 3 # meters
dx = 3
nz = Int64(round((m * h) / dz)) + 1
nx = Int64(round((n * h) / dx)) + 1
nPml = 64
nSteps = 3001
dt = 0.00025
f0 = 50.0
nPad = 32 - mod((nz+2*nPml), 32)
nz_pad = nz + 2*nPml + nPad
nx_pad = nx + 2*nPml
# reflection
# x_src = collect(5:20:nx-5)
# z_src = 5ones(Int64, size(x_src))
# x_rec = collect(5:1:nx-5)
# z_rec = 5 .* ones(Int64, size(x_rec))
# xwell
# # z_src = collect(5:10:nz-5) #14->11srcs 10->15srcs
# # z_src = collect(5:10:nz-5)
z_src = collect(5:10:nz-5)
x_src = 5ones(Int64, size(z_src))
z_rec = collect(5:1:nz-5)
x_rec = (nx-5) .* ones(Int64, size(z_rec))
# para_fname = "./$(args["version"])/para_file.json"
# survey_fname = "./$(args["version"])/survey_file.json"
# data_dir_name = "./$(args["version"])/Data"
# paraGen(nz, nx, dz, dx, nSteps, dt, f0, nPml, nPad, filter_para, isAc, para_fname, survey_fname, data_dir_name)
# surveyGen(z_src, x_src, z_rec, x_rec, survey_fname)
cp_nopad = 3500.0 .* ones(nz, nx) # initial cp
cs = cp_nopad ./ sqrt(3.0)
den = 2200.0 .* ones(nz, nx)
cp_pad = 3500.0 .* ones(nz_pad, nx_pad) # initial cp
cs_pad = cp_pad ./ sqrt(3.0)
den_pad = 2200.0 .* ones(nz_pad, nx_pad)
cp_pad_value = 3500.0
# tf_cp = constant(cp)
tf_cs = constant(cs_pad)
tf_den = constant(den_pad)
# src = Matrix{Float64}(undef, 1, 2001)
# # src[1,:] = Float64.(reinterpret(Float32, read("../Ops/FWI/Src/params/ricker_10Hz.bin")))
# src[1,:] = Float64.(reinterpret(Float32, read("../Ops/FWI/Src/params/Mar_source_2001.bin")))
src = sourceGene(f0, nSteps, dt)
tf_stf = constant(repeat(src, outer=length(z_src)))
# tf_para_fname = tf.strings.join([para_fname])
tf_gpu_id0 = constant(0, dtype=Int32)
tf_gpu_id1 = constant(1, dtype=Int32)
nGpus = 4
# tf_gpu_id_array = constant(collect(0:nGpus-1), dtype=Int32)
tf_gpu_id_array = constant([0,1,2,3], dtype=Int32)
tf_shot_ids0 = constant(collect(Int32, 0:length(x_src)-1), dtype=Int32)
tf_shot_ids1 = constant(collect(Int32, 13:25), dtype=Int32)
# NOTE Hyperparameter for rock physics
tf_bulk_fl1 = constant(2.735e9)
tf_bulk_fl2 = constant(0.125e9) # to displace fl1
tf_bulk_sat1 = constant(den .* (cp_nopad.^2 .- 4.0/3.0 .* cp_nopad.^2 ./3.0)) # vp/vs ratio as sqrt(3)
tf_bulk_min = constant(36.6e9)
tf_shear_sat1 = constant(den .* cp_nopad.^2 ./3.0)
tf_ϕ_pad = tf.image.resize_bilinear(tf.reshape(constant(ϕ), (1, m, n, 1)), (nz, nx)) # upsample the porosity
tf_ϕ_pad = cast(tf_ϕ_pad, Float64)
tf_ϕ_pad = squeeze(tf_ϕ_pad)
tf_shear_pad = tf.pad(tf_shear_sat1, [nPml (nPml+nPad); nPml nPml],
constant_values=den[1,1] * cp_nopad[1,1]^2 /3.0) / 1e6
function Gassman(sw)
tf_bulk_fl_mix = 1.0/( (1-sw)/tf_bulk_fl1 + sw/tf_bulk_fl2 )
temp = tf_bulk_sat1/(tf_bulk_min - tf_bulk_sat1) - tf_bulk_fl1/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl1) + tf_bulk_fl_mix/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl_mix)
tf_bulk_new = tf_bulk_min / (1.0/temp + 1.0)
# tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo) *16.018463373960138;
tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo)
# tf_cp_new = sqrt((tf_bulk_new + 4.0/3.0 * tf_shear_sat1)/tf_den_new)
tf_lambda_new = tf_bulk_new - 2.0/3.0 * tf_shear_sat1
return tf_lambda_new, tf_den_new
end
function RockLinear(sw)
# tf_lambda_new = constant(7500.0*1e6 .* ones(nz,nx)) + (17400.0-7500.0)*1e6 * sw
tf_lambda_new = constant(7500.0*1e6 .* ones(nz,nx)) + (9200.0-7500.0)*1e6 * sw
tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo)
return tf_lambda_new, tf_den_new
end
tf_patch_temp = tf_bulk_sat1/(tf_bulk_min - tf_bulk_sat1) -
tf_bulk_fl1/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl1) +
tf_bulk_fl2/tf_ϕ_pad /(tf_bulk_min - tf_bulk_fl2)
tf_bulk_sat2 = tf_bulk_min/(1.0/tf_patch_temp + 1.0)
function Patchy(sw)
tf_bulk_new = 1/( (1-sw)/(tf_bulk_sat1+4.0/3.0*tf_shear_sat1)
+ sw/(tf_bulk_sat2+4.0/3.0*tf_shear_sat1) ) - 4.0/3.0*tf_shear_sat1
tf_lambda_new = tf_bulk_new - 2.0/3.0 * tf_shear_sat1
tf_den_new = constant(den) + tf_ϕ_pad .* sw * (ρw - ρo)
return tf_lambda_new, tf_den_new
end
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 1884 |
if Sys.islinux()
py"""
import tensorflow as tf
libFwiOp = tf.load_op_library('../Ops/FWI/build/libFwiOp.so')
@tf.custom_gradient
def fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
def grad(dy):
return libFwiOp.fwi_op_grad(dy, tf.constant(1.0,dtype=tf.float64),λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit, grad
def fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libFwiOp = tf.load_op_library('../Ops/FWI/build/libFwiOp.so')
@tf.custom_gradient
def fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
def grad(dy):
return libFwiOp.fwi_op_grad(dy, tf.constant(1.0,dtype=tf.float64),λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit, grad
def fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libFwiOp = tf.load_op_library('../Ops/FWI/build/libFwiOp.so')
@tf.custom_gradient
def fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
def grad(dy):
return libFwiOp.fwi_op_grad(dy, tf.constant(1.0,dtype=tf.float64),λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit, grad
def fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname):
misfit = libFwiOp.fwi_obs_op(λ,μ,ρ,stf,gpu_id,shot_ids,para_fname)
return misfit
"""
end
fwi_op = py"fwi_op"
fwi_obs_op = py"fwi_obs_op" | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 2301 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
if Sys.islinux()
py"""
import tensorflow as tf
libLaplacian = tf.load_op_library('../Ops/Laplacian/build/libLaplacian.so')
@tf.custom_gradient
def laplacian_op(coef,func,h,rhograv):
p = libLaplacian.laplacian(coef,func,h,rhograv)
def grad(dy):
return libLaplacian.laplacian_grad(dy, coef, func, h, rhograv)
return p, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Laplacian/build/libLaplacian.dylib')
@tf.custom_gradient
def laplacian_op(coef,func,h,rhograv):
p = libLaplacian.laplacian(coef,func,h,rhograv)
def grad(dy):
return libLaplacian.laplacian_grad(dy, coef, func, h, rhograv)
return p, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Laplacian/build/libLaplacian.dll')
@tf.custom_gradient
def laplacian_op(coef,func,h,rhograv):
p = libLaplacian.laplacian(coef,func,h,rhograv)
def grad(dy):
return libLaplacian.laplacian_grad(dy, coef, func, h, rhograv)
return p, grad
"""
end
laplacian_op = py"laplacian_op"
if Sys.islinux()
py"""
import tensorflow as tf
libUpwlapOp = tf.load_op_library('../Ops/Upwlap/build/libUpwlapOp.so')
@tf.custom_gradient
def upwlap_op(perm,mobi,func,h,rhograv):
out = libUpwlapOp.upwlap_op(perm,mobi,func,h,rhograv)
def grad(dy):
return libUpwlapOp.upwlap_op_grad(dy, out, perm,mobi,func,h,rhograv)
return out, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libUpwlapOp = tf.load_op_library('../Ops/Upwlap/build/libUpwlapOp.dylib')
@tf.custom_gradient
def upwlap_op(perm,mobi,func,h,rhograv):
out = libUpwlapOp.upwlap_op(perm,mobi,func,h,rhograv)
def grad(dy):
return libUpwlapOp.upwlap_op_grad(dy, out, perm,mobi,func,h,rhograv)
return out, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libUpwlapOp = tf.load_op_library('./Ops/Upwlap/build/libUpwlapOp.dll')
@tf.custom_gradient
def upwlap_op(perm,mobi,func,h,rhograv):
out = libUpwlapOp.upwlap_op(perm,mobi,func,h,rhograv)
def grad(dy):
return libUpwlapOp.upwlap_op_grad(dy, out, perm,mobi,func,h,rhograv)
return out, grad
"""
end
upwlap_op = py"upwlap_op" | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 3341 | include("args.jl")
function sw_p_to_lambda_den(sw, p)
sw = tf.reshape(sw, (1, m, n, 1))
p = tf.reshape(p, (1, m, n, 1))
sw = tf.image.resize_bilinear(sw, (nz, nx))
p = tf.image.resize_bilinear(p, (nz, nx))
sw = cast(sw, Float64)
p = cast(p, Float64)
sw = squeeze(sw)
p = squeeze(p)
# tran_lambda, tran_den = Gassman(sw)
# tran_lambda, tran_den = RockLinear(sw) # test linear relationship
tran_lambda, tran_den = Patchy(sw)
# tran_lambda_pad = tf.pad(tran_lambda, [nPml (nPml+nPad); nPml nPml], constant_values=3500.0^2*2200.0/3.0) /1e6
# tran_den_pad = tf.pad(tran_den, [nPml (nPml+nPad); nPml nPml], constant_values=2200.0)
return tran_lambda, tran_den
end
args["version"] = "CO2"
lambdasObs = Array{PyObject}(undef, n_survey)
densObs = Array{PyObject}(undef, n_survey)
for iSur = 2:n_survey
lp = readdlm("./$(args["version"])/FWI_stage$(iSur)/loss.txt")
Lp = Int64((lp[end,1]))
lambdasObs[iSur] = constant(readdlm("./$(args["version"])/FWI_stage$(iSur)/Lambda$Lp.txt")*1e6)
densObs[iSur] = constant(readdlm("./$(args["version"])/FWI_stage$(iSur)/Den$Lp.txt")*1e6)
end
tfCtxInit = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K_init,g,ϕ,qw,qo, sw0, false)
out_sw_init, out_p_init = imseq(tfCtxInit)
lambdas = Array{PyObject}(undef, n_survey)
dens = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
sw = out_sw_init[survey_indices[i]]
p = out_p_init[survey_indices[i]]
lambdas[i], dens[i] = sw_p_to_lambda_den(sw, p)
end
lambdasObs[1] = lambdas[1]
densObs[1] = dens[1]
function objective_function(lambdasObs, lambdas, densObs, dens)
# tf.nn.l2_loss(lambdasObs - lambdas) + tf.nn.l2_loss(densObs - dens)
# tf.nn.l2_loss(densObs - dens)
loss = constant(0.0)
for i=1:n_survey
loss += tf.nn.l2_loss(lambdasObs[i][10:nz-10,10:nx-10] - lambdas[i][10:nz-10,10:nx-10])
end
return loss
end
J = objective_function(lambdasObs, lambdas, densObs, dens)
gradK = gradients(J, tfCtxInit.K)
if !isdir("./$(args["version"])/flow_fit_results")
mkdir("./$(args["version"])/flow_fit_results")
end
__cnt = 0
# invK = zeros(m,n)
function print_loss(l, invK, grad)
global __cnt, __l, __K, __grad
if mod(__cnt,1)==0
println("\niter=$__iter, eval=$__cnt, current loss=",l)
# println("a=$a, b1=$b1, b2=$b2")
end
__l = l
__K = invK
__grad = grad
__cnt += 1
end
__iter = 0
function print_iter(rk)
global __iter, __l, __K, __grad
if mod(__iter,1)==0
println("\n************* ITER=$__iter *************\n")
end
__iter += 1
open("./$(args["version"])/flow_fit_results/loss.txt", "a") do io
writedlm(io, Any[__iter __l])
end
writedlm("./$(args["version"])/flow_fit_results/K$__iter.txt", __K)
writedlm("./$(args["version"])/flow_fit_results/gradK$__iter.txt", __grad)
end
# u = readdlm("...") --> plot(u[:,2])
# config = tf.ConfigProto()
# config.intra_op_parallelism_threads = 24
# config.inter_op_parallelism_threads = 24
sess = Session(); init(sess);
opt = ScipyOptimizerInterface(J, var_list=[tfCtxInit.K], var_to_bounds=Dict(tfCtxInit.K=> (10.0, 130.0)), method="L-BFGS-B",
options=Dict("maxiter"=> 100, "ftol"=>1e-12, "gtol"=>1e-12))
ScipyOptimizerMinimize(sess, opt, loss_callback=print_loss, step_callback=print_iter, fetches=[J, tfCtxInit.K, gradK])
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 4198 | function padding(lambda, den)
tran_lambda = cast(lambda, Float64)
tran_den = cast(den, Float64)
lambda_pad = tf.pad(tran_lambda, [nPml (nPml+nPad); nPml nPml], constant_values=1.0/3.0*3500.0^2*2200.0/1e6)
den_pad = tf.pad(tran_den, [nPml (nPml+nPad); nPml nPml], constant_values=2200.0)
return lambda_pad, den_pad
end
# global loss
function fwi_sep(iSur)
if !isdir("./$(args["version"])/FWI_stage$iSur/")
mkdir("./$(args["version"])/FWI_stage$iSur/")
end
if iSur <= 2
lambda_init = den .* (cp_nopad.^2 .- 2.0 .* cp_nopad.^2 ./3.0) /1e6
den_init = den
else
lp = readdlm("./$(args["version"])/FWI_stage$(iSur-1)/loss.txt")
Lp = Int64((lp[end,1]))
lambda_init = readdlm("./$(args["version"])/FWI_stage$(iSur-1)/Lambda$Lp.txt")
den_init = readdlm("./$(args["version"])/FWI_stage$(iSur-1)/Den$Lp.txt")
# den_init = den
end
tf_lambda_inv = Variable(lambda_init, dtype=Float64)
tf_den_inv = Variable(den_init, dtype=Float64)
tf_lambda_inv_pad, tf_den_inv_pad = padding(tf_lambda_inv, tf_den_inv)
shot_id_points = Int32.(trunc.(collect(LinRange(0, length(x_src)-1, nGpus+1))))
shot_jump = 1
# NOTE Compute FWI loss
para_fname = "./$(args["version"])/para_file$iSur.json"
loss = constant(0.0)
for i = 1:nGpus
# global loss
tf_shot_ids = constant(collect(shot_id_points[i]:shot_jump:shot_id_points[i+1]), dtype=Int32)
loss += fwi_op(tf_lambda_inv_pad, tf_shear_pad, tf_den_inv_pad, tf_stf, tf_gpu_id_array[i], tf_shot_ids0, para_fname)
end
gradLambda = gradients(loss, tf_lambda_inv)
gradDen= gradients(loss, tf_den_inv)
# loss = fwi_op(tf_lambda_inv_pad, tf_shear_pad, tf_den_inv_pad, tf_stf, tf_gpu_id_array[1], tf_shot_ids0, para_fname)
# Optimization
__cnt = 0
__l = 0
__Lambda = zeros(nz,nx)
__gradLambda = zeros(nz,nx)
__Den = zeros(nz,nx)
__gradDen = zeros(nz,nx)
# invK = zeros(m,n)
function print_loss(l, Lambda, Den, gradLambda, gradDen)
# global __l, __Lambda, __gradLambda, __Den, __gradDen
if mod(__cnt,1)==0
println("\niter=$__iter, eval=$__cnt, current loss=",l)
# println("a=$a, b1=$b1, b2=$b2")
end
__cnt += 1
__l = l
__Lambda = Lambda
__gradLambda = gradLambda
__Den = Den
__gradDen = gradDen
end
__iter = 0
function print_iter(rk)
# global __iter
if mod(__iter,1)==0
println("\n************* ITER=$__iter *************\n")
end
__iter += 1
open("./$(args["version"])/FWI_stage$iSur/loss.txt", "a") do io
println("\n outer_iter=$__iter, current loss=", __l)
writedlm(io, Any[__iter __l])
end
open("./$(args["version"])/FWI_stage$iSur/Lambda$__iter.txt", "w") do io
writedlm(io, __Lambda)
end
open("./$(args["version"])/FWI_stage$iSur/gradLambda$__iter.txt", "w") do io
writedlm(io, __gradLambda)
end
open("./$(args["version"])/FWI_stage$iSur/Den$__iter.txt", "w") do io
writedlm(io, __Den)
end
open("./$(args["version"])/FWI_stage$iSur/gradDen$__iter.txt", "w") do io
writedlm(io, __gradDen)
end
end
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 24
config.inter_op_parallelism_threads = 24
sess = Session(config=config); init(sess);
lambda_lb = 5800.0
lambda_ub = 9000.0
opt = ScipyOptimizerInterface(loss, var_list=[tf_lambda_inv, tf_den_inv], var_to_bounds=Dict(tf_lambda_inv=> (lambda_lb, lambda_ub),tf_den_inv=> (2100.0, 2200.0)), method="L-BFGS-B", options=Dict("maxiter"=> 100, "ftol"=>1e-12, "gtol"=>1e-12))
# # lambda_lb = (2500.0^2 - 2.0 * 3000.0^2/3.0) * 2500.0 /1e6
# # lambda_ub = (4000.0^2 - 2.0 * 3000.0^2/3.0) * 2500.0 /1e6
# lambda_lb = 5800.0
# lambda_ub = 9000.0
# opt = ScipyOptimizerInterface(loss, var_list=[tf_lambda_inv], var_to_bounds=Dict(tf_lambda_inv=> (lambda_lb, lambda_ub)), method="L-BFGS-B", options=Dict("maxiter"=> 100, "ftol"=>1e-12, "gtol"=>1e-12))
@info "Optimization Starts..."
ScipyOptimizerMinimize(sess, opt, loss_callback=print_loss, step_callback=print_iter, fetches=[loss,tf_lambda_inv,tf_den_inv,gradLambda,gradDen])
end | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 2726 | #=
Main program for two phase flow inversion
=#
include("args.jl")
include("main_fwi_sepinv.jl")
function sw_p_to_lambda_den(sw, p)
sw = tf.reshape(sw, (1, m, n, 1))
p = tf.reshape(p, (1, m, n, 1))
sw = tf.image.resize_bilinear(sw, (nz, nx))
p = tf.image.resize_bilinear(p, (nz, nx))
sw = cast(sw, Float64)
p = cast(p, Float64)
sw = squeeze(sw)
p = squeeze(p)
# tran_lambda, tran_den = Gassman(sw)
# tran_lambda, tran_den = RockLinear(sw) # test linear relationship
tran_lambda, tran_den = Patchy(sw)
tran_lambda_pad = tf.pad(tran_lambda, [nPml (nPml+nPad); nPml nPml], constant_values=3500.0^2*2200.0/3.0) /1e6
tran_den_pad = tf.pad(tran_den, [nPml (nPml+nPad); nPml nPml], constant_values=2200.0)
return tran_lambda_pad, tran_den_pad
end
# NOTE Generate Data
if args["generate_data"]
println("Generate Test Data...")
K = 20.0 .* ones(m,n) # millidarcy
K[8:10,:] .= 120.0
# K[17:21,:] .= 100.0
# for i = 1:m
# for j = 1:n
# if i <= (14 - 24)/(30 - 1)*(j-1) + 24 && i >= (12 - 18)/(30 - 1)*(j-1) + 18
# K[i,j] = 100.0
# end
# end
# end
tfCtxTrue = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo, sw0, true)
out_sw_true, out_p_true = imseq(tfCtxTrue)
lambdas = Array{PyObject}(undef, n_survey)
dens = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
sw = out_sw_true[survey_indices[i]]
p = out_p_true[survey_indices[i]]
lambdas[i], dens[i] = sw_p_to_lambda_den(sw, p)
end
misfit = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
if !isdir("./$(args["version"])/Data$i")
mkdir("./$(args["version"])/Data$i")
end
para_fname = "./$(args["version"])/para_file$i.json"
survey_fname = "./$(args["version"])/survey_file$i.json"
paraGen(nz_pad, nx_pad, dz, dx, nSteps, dt, f0, nPml, nPad, para_fname, survey_fname, "./$(args["version"])/Data$i/")
# shot_inds = collect(1:3:length(z_src)) .+ mod(i-1,3) # 5src rotation
# shot_inds = i # 1src rotation
shot_inds = collect(1:length(z_src)) # all sources
surveyGen(z_src[shot_inds], x_src[shot_inds], z_rec, x_rec, survey_fname)
tf_shot_ids0 = constant(collect(0:length(shot_inds)-1), dtype=Int32)
misfit[i] = fwi_obs_op(lambdas[i], tf_shear_pad, dens[i], tf_stf, tf_gpu_id0, tf_shot_ids0, para_fname)
end
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 24
config.inter_op_parallelism_threads = 24
sess = Session(config=config); init(sess);
run(sess, misfit)
error("Generate Data: Stop")
end
for iSur = 2:n_survey
fwi_sep(iSur)
end
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 3110 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using DelimitedFiles
using Random
Random.seed!(233)
np = pyimport("numpy")
include("poisson_op.jl")
include("laplacian_op.jl")
include("sat_op.jl")
const K_CONST = 9.869232667160130e-16 * 86400 * 1e3
mutable struct Ctx
m; n; h; NT; Δt; Z; X; ρw; ρo;
μw; μo; K; g; ϕ; qw; qo; sw0
end
function tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo,sw0,ifTrue)
tf_h = constant(h)
# tf_NT = constant(NT)
tf_Δt = constant(Δt)
tf_Z = constant(Z)
tf_X= constant(X)
tf_ρw = constant(ρw)
tf_ρo = constant(ρo)
tf_μw = constant(μw)
tf_μo = constant(μo)
# tf_K = isa(K,Array) ? Variable(K) : K
if ifTrue
tf_K = constant(K)
else
tf_K = Variable(K)
end
tf_g = constant(g)
# tf_ϕ = Variable(ϕ)
tf_ϕ = constant(ϕ)
tf_qw = constant(qw)
tf_qo = constant(qo)
tf_sw0 = constant(sw0)
return Ctx(m,n,tf_h,NT,tf_Δt,tf_Z,tf_X,tf_ρw,tf_ρo,tf_μw,tf_μo,tf_K,tf_g,tf_ϕ,tf_qw,tf_qo,tf_sw0)
end
function Krw(Sw)
return Sw ^ 1.5
end
function Kro(So)
return So ^1.5
end
function ave_normal(quantity, m, n)
aa = sum(quantity)
return aa/(m*n)
end
# variables : sw, u, v, p
# (time dependent) parameters: qw, qo, ϕ
function onestep(sw, p, m, n, h, Δt, Z, ρw, ρo, μw, μo, K, g, ϕ, qw, qo)
# step 1: update p
# λw = Krw(sw)/μw
# λo = Kro(1-sw)/μo
λw = sw.*sw/μw
λo = (1-sw).*(1-sw)/μo
λ = λw + λo
q = qw + qo + λw/(λo+1e-16).*qo
potential_c = (ρw - ρo)*g .* Z
# Step 1: implicit potential
Θ = upwlap_op(K * K_CONST, λo, potential_c, h, constant(0.0))
load_normal = (Θ+q/ALPHA) - ave_normal(Θ+q/ALPHA, m, n)
p = upwps_op(K * K_CONST, λ, load_normal, p, h, constant(0.0), constant(0)) # potential p = pw - ρw*g*h
# step 2: implicit transport
sw = sat_op(sw, p, K * K_CONST, ϕ, qw, qo, μw, μo, sw, Δt, h)
return sw, p
end
"""
impes(tf_ctx)
Solve the two phase flow equation.
`qw` and `qo` -- `NT x m x n` numerical array, `qw[i,:,:]` the corresponding value of qw at i*Δt
`sw0` and `p0` -- initial value for `sw` and `p`. `m x n` numerical array.
"""
function imseq(tf_ctx)
ta_sw, ta_p = TensorArray(NT+1), TensorArray(NT+1)
ta_sw = write(ta_sw, 1, tf_ctx.sw0)
ta_p = write(ta_p, 1, constant(zeros(tf_ctx.m, tf_ctx.n)))
i = constant(1, dtype=Int32)
function condition(i, tas...)
i <= tf_ctx.NT
end
function body(i, tas...)
ta_sw, ta_p = tas
sw, p = onestep(read(ta_sw, i), read(ta_p, i), tf_ctx.m, tf_ctx.n, tf_ctx.h, tf_ctx.Δt, tf_ctx.Z, tf_ctx.ρw, tf_ctx.ρo, tf_ctx.μw, tf_ctx.μo, tf_ctx.K, tf_ctx.g, tf_ctx.ϕ, tf_ctx.qw[i], tf_ctx.qo[i])
ta_sw = write(ta_sw, i+1, sw)
ta_p = write(ta_p, i+1, p)
i+1, ta_sw, ta_p
end
_, ta_sw, ta_p = while_loop(condition, body, [i; ta_sw; ta_p;])
out_sw, out_p = stack(ta_sw), stack(ta_p)
end
function vis(val, args...;kwargs...)
close("all")
ns = Int64.(round.(LinRange(1,size(val,1),9)))
for i = 1:9
subplot(330+i)
imshow(val[ns[i],:,:], args...;kwargs...)
colorbar()
end
end | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 3341 | include("args.jl")
function sw_p_to_lambda_den(sw, p)
sw = tf.reshape(sw, (1, m, n, 1))
p = tf.reshape(p, (1, m, n, 1))
sw = tf.image.resize_bilinear(sw, (nz, nx))
p = tf.image.resize_bilinear(p, (nz, nx))
sw = cast(sw, Float64)
p = cast(p, Float64)
sw = squeeze(sw)
p = squeeze(p)
# tran_lambda, tran_den = Gassman(sw)
# tran_lambda, tran_den = RockLinear(sw) # test linear relationship
tran_lambda, tran_den = Patchy(sw)
# tran_lambda_pad = tf.pad(tran_lambda, [nPml (nPml+nPad); nPml nPml], constant_values=3500.0^2*2200.0/3.0) /1e6
# tran_den_pad = tf.pad(tran_den, [nPml (nPml+nPad); nPml nPml], constant_values=2200.0)
return tran_lambda, tran_den
end
if !isdir("figures_summary")
mkdir("figures_summary")
end
lambdasObs = Array{PyObject}(undef, n_survey-1)
densObs = Array{PyObject}(undef, n_survey-1)
for iSur = 2:n_survey
lp = readdlm("./CO2/FWI_stage$(iSur)/loss.txt")
Lp = Int64((lp[end,1]))
lambdasObs[iSur-1] = readdlm("./CO2/FWI_stage$(iSur)/Lambda$Lp.txt")
densObs[iSur-1] = readdlm("./CO2/FWI_stage$(iSur)/Den$Lp.txt")
end
m = 15
n = 30
h = 30.0
dz = 3.0 # meters
dx = 3.0
nz = Int64(round((m * h) / dz)) + 1
nx = Int64(round((n * h) / dx)) + 1
z_src = (collect(5:10:nz-5) .- 1 ) .* dz .+ dz/2.0
x_src = (5-1)ones(Int64, size(z_src)) .* dx .+ dx/2.0
z_rec = (collect(5:1:nz-5) .- 1) .* dz .+ dz/2.0
x_rec = (nx-5-1) .* ones(Int64, size(z_rec)) .*dx .+ dx/2.0
z_inj = (9-1)*h + h/2.0
x_inj = (3-1)*h + h/2.0
z_prod = (9-1)*h + h/2.0
x_prod = (28-1)*h + h/2.0
rc("axes", titlesize=20)
rc("axes", labelsize=18)
rc("xtick", labelsize=18)
rc("ytick", labelsize=18)
rc("legend", fontsize=20)
figure()
K = readdlm("CO2/flow_fit_results/K100.txt")
imshow(K, extent=[0,n*h,m*h,0]);
xlabel("Distance (m)")
ylabel("Depth (m)")
cb = colorbar()
clim([20, 120])
cb.set_label("Permeability (md)")
shot_inds = collect(1:length(z_src))
scatter(x_src[shot_inds], z_src[shot_inds], c="w", marker="*")
scatter(x_rec, z_rec, s=16.0, c="r", marker="v")
scatter(x_inj, z_inj, c="r", marker=">")
scatter(x_prod, z_prod, c="r", marker="<")
savefig("figures_summary/K_sep_fit.pdf", bbox_inches="tight",pad_inches = 0, dpi = 300);
rc("axes", titlesize=30)
rc("axes", labelsize=30)
rc("xtick", labelsize=28)
rc("ytick", labelsize=28)
rc("legend", fontsize=30)
fig1,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(lambdasObs[(iPrj-1)*3+jPrj], extent=[0,n*h,m*h,0], vmin=5500, vmax=9000);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj+1)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# cb = fig1.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("λ")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">", s=128)
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<", s=128)
end
end
# fig1.subplots_adjust(wspace=0.02, hspace=0.042)
fig1.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig1.add_axes([0.91, 0.08, 0.01, 0.82])
cb1 = fig1.colorbar(ims[1], cax=cbar_ax)
cb1.set_label("λ (MPa)")
savefig("figures_summary/Lambda_FWI_sep_inv.pdf",bbox_inches="tight",pad_inches = 0); | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 2446 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
if Sys.islinux()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Poisson/build/libPoissonOp.so')
@tf.custom_gradient
def poisson_op(coef,g,h,rhograv,index):
p = libPoissonOp.poisson_op(coef,g,h,rhograv,index)
def grad(dy):
return libPoissonOp.poisson_op_grad(dy, p, coef, g, h, rhograv, index)
return p, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Poisson/build/libPoissonOp.dylib')
@tf.custom_gradient
def poisson_op(coef,g,h,rhograv,index):
p = libPoissonOp.poisson_op(coef,g,h,rhograv,index)
def grad(dy):
return libPoissonOp.poisson_op_grad(dy, p, coef, g, h, rhograv, index)
return p, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libPoissonOp = tf.load_op_library('../Ops/Poisson/build/libPoissonOp.dll')
@tf.custom_gradient
def poisson_op(coef,g,h,rhograv,index):
p = libPoissonOp.poisson_op(coef,g,h,rhograv,index)
def grad(dy):
return libPoissonOp.poisson_op_grad(dy, p, coef, g, h, rhograv, index)
return p, grad
"""
end
poisson_op = py"poisson_op"
if Sys.islinux()
py"""
import tensorflow as tf
libUpwpsOp = tf.load_op_library('../Ops/Upwps/build/libUpwpsOp.so')
@tf.custom_gradient
def upwps_op(permi,mobi,src,funcref,h,rhograv,index):
pres = libUpwpsOp.upwps_op(permi,mobi,src,funcref,h,rhograv,index)
def grad(dy):
return libUpwpsOp.upwps_op_grad(dy, pres, permi,mobi,src,funcref,h,rhograv,index)
return pres, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libUpwpsOp = tf.load_op_library('../Ops/Upwps/build/libUpwpsOp.dylib')
@tf.custom_gradient
def upwps_op(permi,mobi,src,funcref,h,rhograv,index):
pres = libUpwpsOp.upwps_op(permi,mobi,src,funcref,h,rhograv,index)
def grad(dy):
return libUpwpsOp.upwps_op_grad(dy, pres, permi,mobi,src,funcref,h,rhograv,index)
return pres, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libUpwpsOp = tf.load_op_library('../Ops/Upwps/build/libUpwpsOp.dll')
@tf.custom_gradient
def upwps_op(permi,mobi,src,funcref,h,rhograv,index):
pres = libUpwpsOp.upwps_op(permi,mobi,src,funcref,h,rhograv,index)
def grad(dy):
return libUpwpsOp.upwps_op_grad(dy, pres, permi,mobi,src,funcref,h,rhograv,index)
return pres, grad
"""
end
upwps_op = py"upwps_op"
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 6207 | include("args.jl")
using DelimitedFiles
function sw_p_to_lambda_den(sw, p)
sw = tf.reshape(sw, (1, m, n, 1))
p = tf.reshape(p, (1, m, n, 1))
sw = tf.image.resize_bilinear(sw, (nz, nx))
p = tf.image.resize_bilinear(p, (nz, nx))
sw = cast(sw, Float64)
p = cast(p, Float64)
sw = squeeze(sw)
p = squeeze(p)
# tran_lambda, tran_den = Gassman(sw)
# tran_lambda, tran_den = RockLinear(sw) # test linear relationship
tran_lambda, tran_den = Patchy(sw)
return tran_lambda, tran_den
end
if !isdir("figures_summary")
mkdir("figures_summary")
end
iter = 100
Prj_names = "CO2/flow_fit_results";
K_name = "/K$iter.txt"
K = readdlm(Prj_names*K_name)
tfCtxTrue = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo, sw0, true)
out_sw_true, out_p_true = imseq(tfCtxTrue)
lambdas = Array{PyObject}(undef, n_survey)
dens = Array{PyObject}(undef, n_survey)
for i = 1:n_survey
sw = out_sw_true[survey_indices[i]]
p = out_p_true[survey_indices[i]]
lambdas[i], dens[i] = sw_p_to_lambda_den(sw, p)
end
sess = Session();init(sess);
vps = Array{PyObject}(undef, n_survey)
for i=1:n_survey
vps[i] = sqrt((lambdas[i] + 2.0 * tf_shear_sat1[i])/dens[i])
end
V = run(sess, vps);
S = run(sess, out_sw_true);
P = run(sess, out_p_true);
z_inj = (9-1)*h + h/2.0
x_inj = (3-1)*h + h/2.0
z_prod = (9-1)*h + h/2.0
x_prod = (28-1)*h + h/2.0
rc("axes", titlesize=30)
rc("axes", labelsize=30)
rc("xtick", labelsize=28)
rc("ytick", labelsize=28)
rc("legend", fontsize=30)
fig1,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(V[(iPrj-1)*3+jPrj], extent=[0,n*h,m*h,0], vmin=3350, vmax=3500);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# cb = fig1.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Vp")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">", s=128)
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<", s=128)
end
end
fig1.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig1.add_axes([0.91, 0.08, 0.01, 0.82])
cb1 = fig1.colorbar(ims[1], cax=cbar_ax)
cb1.set_label("Vp (m/s)")
savefig("figures_summary/predicted_Vp_evo_sep.pdf",bbox_inches="tight",pad_inches = 0);
fig2,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(S[survey_indices[(iPrj-1)*3+jPrj], :, :], extent=[0,n*h,m*h,0], vmin=0.0, vmax=0.6);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# if iPrj ==2 && jPrj == 3
# cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Saturation")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">", s=128)
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<", s=128)
end
end
# fig2.subplots_adjust(wspace=0.04, hspace=0.042)
fig2.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig2.add_axes([0.91, 0.08, 0.01, 0.82])
cb2 = fig2.colorbar(ims[1], cax=cbar_ax)
cb2.set_label("Saturation")
savefig("figures_summary/predicted_Saturation_evo_sep.pdf",bbox_inches="tight",pad_inches = 0);
# fig3,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
# ims = Array{Any}(undef, 9)
# for iPrj = 1:3
# for jPrj = 1:3
# ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(P[survey_indices[(iPrj-1)*3+jPrj], :, :]*1.4504e-04, extent=[0,n*h,m*h,0], vmin=-2500.0, vmax=500);
# axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
# if jPrj == 1 || jPrj == 1
# axs[iPrj,jPrj].set_ylabel("Depth (m)")
# end
# if iPrj == 3 || iPrj == 3
# axs[iPrj,jPrj].set_xlabel("Distance (m)")
# end
# # if iPrj ==2 && jPrj == 3
# # cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# # cb.set_label("Saturation")
# axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">")
# axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<")
# end
# end
# # fig2.subplots_adjust(wspace=0.04, hspace=0.042)
# fig3.subplots_adjust(wspace=0.02, hspace=0.18)
# cbar_ax = fig3.add_axes([0.91, 0.08, 0.01, 0.82])
# cb3 = fig3.colorbar(ims[1], cax=cbar_ax)
# cb3.set_label("Potential (psi)")
# savefig("figures_summary/Potential_evo_patchy_true.pdf",bbox_inches="tight",pad_inches = 0);
# iter = 100
# Prj_names = ["CO2", "CO2_1src", "CO2_2surveys", "CO2_6surveys"]
# K_name = "/K$iter.txt"
# fig,axs = subplots(2,2, figsize=[18,8], sharex=true, sharey=true)
# for iPrj = 1:2
# for jPrj = 1:2
# # println(ax)
# A = readdlm(Prj_names[(iPrj-1)*2 + jPrj] * K_name)
# im = axs[iPrj,jPrj].imshow(A, extent=[0,n*h,m*h,0]);
# if jPrj == 1 || jPrj == 1
# axs[iPrj,jPrj].set_ylabel("Depth (m)")
# end
# if iPrj == 2 || iPrj == 2
# axs[iPrj,jPrj].set_xlabel("Distance (m)")
# end
# axs[iPrj,jPrj].text(-0.1,1.1,string("(" * Char((iPrj-1)*2 + jPrj+'a'-1) * ")"),transform=axs[iPrj,jPrj].transAxes,size=12,weight="bold")
# end
# end
# fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.9,
# wspace=0.1, hspace=0.2)
# cb_ax = fig.add_axes([0.93, 0.1, 0.02, 0.8])
# cbar = fig.colorbar(im, cax=cb_ax)
# cb = fig.colorbar()
# clim([20, 120])
# cb.set_label("Permeability (md)")
# fig = figure()
# ax = fig.add_subplot(111) # The big subplot
# ax1 = fig.add_subplot(211)
# ax2 = fig.add_subplot(212)
# # Turn off axis lines and ticks of the big subplot
# ax.spines["top"].set_color("none")
# ax.spines["bottom"].set_color("none")
# ax.spines["left"].set_color("none")
# ax.spines["right"].set_color("none")
# ax.tick_params(labelcolor="w", top="off", bottom="off", left="off", right="off")
# # Set common labels
# ax.set_xlabel("common xlabel")
# ax.set_ylabel("common ylabel")
# ax1.set_title('ax1 title')
# ax2.set_title('ax2 title') | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 1299 | using PyTensorFlow
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
if Sys.islinux()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.so')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.dylib')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libSatOp = tf.load_op_library('../Ops/Saturation/build/libSatOp.dll')
@tf.custom_gradient
def sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h):
sat = libSatOp.sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
def grad(dy):
return libSatOp.sat_op_grad(dy, sat, s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
return sat, grad
"""
end
sat_op = py"sat_op" | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 16989 | export laplacian_op, poisson_op, sat_op, upwlap_op, upwps_op, fwi_op, fwi_obs_op, sat_op2, eikonal
OPS_DIR = joinpath(@__DIR__, "../deps/CustomOps")
@doc raw"""
fwi_op(lambda::Union{PyObject, Array{Float64}},mu::Union{PyObject, Array{Float64}},
den::Union{PyObject, Array{Float64}},stf::Union{PyObject, Array{Float64}},
gpu_id::Union{PyObject, Integer},shot_ids::Union{PyObject, Array{T}},para_fname::String) where T<:Integer
Computes the FWI loss function.
- `lambda` : Lame's first parameter (unit: MPa)
- `mu` : Lame's second parameter (shear modulus, unit: MPa)
- `den` : Density
- `stf` : Source time functions
- `gpu_id` : The ID of GPU to run this FWI operator
- `shot_ids` : The source function IDs (determining the location of sources)
- `para_fname` : Parameter file location
"""
function fwi_op(lambda::Union{PyObject, Array{Float64}},mu::Union{PyObject, Array{Float64}},
den::Union{PyObject, Array{Float64}},stf::Union{PyObject, Array{Float64}},
gpu_id::Union{PyObject, Integer},shot_ids::Union{PyObject, Array{T}},para_fname::String) where T<:Integer
lambda = convert_to_tensor(lambda, dtype=Float64)
mu = convert_to_tensor(mu, dtype=Float64)
den = convert_to_tensor(den, dtype=Float64)
stf = convert_to_tensor(stf, dtype=Float64)
gpu_id = convert_to_tensor(gpu_id, dtype=Int32)
shot_ids = convert_to_tensor(shot_ids, dtype=Int32)
fwi_op = load_op_and_grad("$OPS_DIR/FWI/build/libFwiOp", "fwi_op")
fwi_op(lambda,mu,den,stf,gpu_id,shot_ids,para_fname)
end
@doc raw"""
fwi_obs_op(lambda::Union{PyObject, Array{Float64}},mu::Union{PyObject, Array{Float64}},
den::Union{PyObject, Array{Float64}},stf::Union{PyObject, Array{Float64}},
gpu_id::Union{PyObject, Integer},shot_ids::Union{PyObject, Array{T}},para_fname::String) where T<:Integer
Generates the observation data and store them as files which will be used by [`fwi_op`](@ref)
For the meaning of parameters, see [`fwi_op`](@ref).
"""
function fwi_obs_op(lambda::Union{PyObject, Array{Float64}},mu::Union{PyObject, Array{Float64}},
den::Union{PyObject, Array{Float64}},stf::Union{PyObject, Array{Float64}},
gpu_id::Union{PyObject, Integer},shot_ids::Union{PyObject, Array{T}},para_fname::String) where T<:Integer
lambda = convert_to_tensor(lambda, dtype=Float64)
mu = convert_to_tensor(mu, dtype=Float64)
den = convert_to_tensor(den, dtype=Float64)
stf = convert_to_tensor(stf, dtype=Float64)
gpu_id = convert_to_tensor(gpu_id, dtype=Int32)
shot_ids = convert_to_tensor(shot_ids, dtype=Int32)
fwi_obs_op = load_op("$OPS_DIR/FWI/build/libFwiOp", "fwi_obs_op")
fwi_obs_op(lambda, mu, den, stf, gpu_id, shot_ids, para_fname)
end
@doc raw"""
laplacian_op(coef::Union{PyObject, Array{Float64}}, f::Union{PyObject, Array{Float64}},
h::Union{PyObject, Float64}, ρ::Union{PyObject, Float64})
Computes the Laplacian of function $f(\mathbf{x})$; here ($\mathbf{x}=[z\quad x]^T$)
```math
-\nabla\cdot\left(c(\mathbf{x}) \nabla \left(u(\mathbf{x}) -\rho \begin{bmatrix}z \\ 0\end{bmatrix} \right)\right)
```
"""
function laplacian_op(coef::Union{PyObject, Array{Float64}}, f::Union{PyObject, Array{Float64}},
h::Union{PyObject, Float64}, ρ::Union{PyObject, Float64})
coef = convert_to_tensor(coef, dtype=Float64)
f = convert_to_tensor(f, dtype=Float64)
h = convert_to_tensor(h, dtype=Float64)
ρ = convert_to_tensor(ρ, dtype=Float64)
laplacian = load_op_and_grad("$OPS_DIR/Laplacian/build/libLaplacian", "laplacian")
laplacian_op(coef, f, h, ρ)
end
@doc raw"""
poisson_op(c::Union{PyObject, Float64}, g::Union{PyObject, Float64},
h::Union{PyObject, Float64}, ρ::Union{PyObject, Float64}, index::Union{Integer, PyObject}=0)
Solves the Poisson equation ($\mathbf{x}=[z\quad x]^T$)
$\begin{aligned}
-\nabla\cdot\left(c(\mathbf{x}) \nabla \left(u(\mathbf{x}) -\rho \begin{bmatrix}z \\ 0\end{bmatrix} \right)\right) &= g(\mathbf{x}) & \mathbf{x}\in \Omega\\
\frac{\partial u(x)}{\partial n} &= 0 & \mathbf{x}\in \Omega\\
\end{aligned}$
Here $\Omega=[0,n_zh]\times [0, n_xh]$. The equation is solved using finite difference method, where the step size in each direction is $h$. Mathematically, the solution to the PDE is determined up to a constant. Numerically, we discretize the equation with the scheme
```math
(A+E_{11})\mathbf{u} = \mathbf{f}
```
where $A$ is the finite difference coefficient matrix,
```math
(E_{11})_{ij} = \left\{ \begin{matrix}1 & i=j=1 \\ 0 & \text{ otherwise }\end{matrix}\right.
```
- `index` : `Int32`, when `index=1`, `SparseLU` is used to solve the linear system; otherwise the function invokes algebraic multigrid method from `amgcl`.
"""
function poisson_op(c::Union{PyObject, Array{Float64}}, g::Union{PyObject, Array{Float64}},
h::Union{PyObject, Float64},
ρ::Union{PyObject, Float64}, index::Union{PyObject, Integer}=0)
c = convert_to_tensor(c, dtype=Float64)
g = convert_to_tensor(g, dtype=Float64)
h = convert_to_tensor(h, dtype=Float64)
ρ = convert_to_tensor(ρ, dtype=Float64)
index = convert_to_tensor(index, dtype=Int64)
poisson_op = load_op_and_grad("$OPS_DIR/Poisson/build/libPoissonOp", "poisson_op")
poisson_op(c, g, h, ρ, index)
end
@doc raw"""
sat_op(s0::Union{PyObject, Array{Float64}},pt::Union{PyObject, Array{Float64}},
permi::Union{PyObject, Array{Float64}},poro::Union{PyObject, Array{Float64}},
qw::Union{PyObject, Array{Float64}},qo::Union{PyObject, Array{Float64}},
muw::Union{PyObject, Float64},muo::Union{PyObject, Float64},
sref::Union{PyObject, Array{Float64}},dt::Union{PyObject, Float64},h::Union{PyObject, Float64})
Solves the following discretized equation
```math
\phi (S_2^{n + 1} - S_2^n) - \nabla \cdot \left( {{m_2}(S_2^{n + 1})K\nabla \Psi _2^n} \right) \Delta t= \left(q_2^n + q_1^n \frac{m_2(S^{n+1}_2)}{m_1(S^{n+1}_2)}\right) \Delta t
```
where
```math
m_2(s) = \frac{s^2}{\mu_w}\qquad m_1(s) = \frac{(1-s)^2}{\mu_o}
```
This is a nonlinear equation and is solved with the Newton-Raphson method.
- `s0` : $n_z\times n_x$, saturation of fluid, i.e., $S_2^n$
- `pt` : $n_z\times n_x$, potential of fluid, i.e., $\Psi_2^n$
- `permi` : $n_z\times n_x$, permeability, i.e., $K$
- `poro` : $n_z\times n_x$, porosity, i.e., $\phi$
- `qw` : $n_z\times n_x$, injection or production rate of fluid 1, $q_2^n$
- `qo` : $n_z\times n_x$, injection or production rate of fluid 2, $q_1^n$
- `muw` : viscosity of fluid 1, i.e., $\mu_w$
- `muo` : viscosity of fluid 2, i.e., $\mu_o$
- `sref` : $n_z\times n_x$, initial guess for $S_2^{n+1}$
- `dt` : Time step size
- `h` : Spatial step size
"""
function sat_op(s0::Union{PyObject, Array{Float64}},pt::Union{PyObject, Array{Float64}},
permi::Union{PyObject, Array{Float64}},poro::Union{PyObject, Array{Float64}},
qw::Union{PyObject, Array{Float64}},qo::Union{PyObject, Array{Float64}},
muw::Union{PyObject, Float64},muo::Union{PyObject, Float64},
sref::Union{PyObject, Array{Float64}},dt::Union{PyObject, Float64},h::Union{PyObject, Float64})
s0 = convert_to_tensor(s0, dtype=Float64)
pt = convert_to_tensor(pt, dtype=Float64)
permi = convert_to_tensor(permi, dtype=Float64)
poro = convert_to_tensor(poro, dtype=Float64)
qw = convert_to_tensor(qw, dtype=Float64)
qo = convert_to_tensor(qo, dtype=Float64)
muw = convert_to_tensor(muw, dtype=Float64)
muo = convert_to_tensor(muo, dtype=Float64)
sref = convert_to_tensor(sref, dtype=Float64)
dt = convert_to_tensor(dt, dtype=Float64)
h = convert_to_tensor(h, dtype=Float64)
sat_op = load_op_and_grad("$OPS_DIR/Saturation/build/libSatOp", "sat_op")
sat_op(s0,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
end
@doc raw"""
sat_op2(s0::Union{PyObject, Array{Float64}},
dporodt::Union{PyObject, Array{Float64}},
pt::Union{PyObject, Array{Float64}},
permi::Union{PyObject, Array{Float64}},
poro::Union{PyObject, Array{Float64}},
qw::Union{PyObject, Array{Float64}},
qo::Union{PyObject, Array{Float64}},
muw::Union{PyObject, Float64},
muo::Union{PyObject, Float64},
sref::Union{PyObject, Array{Float64}},
dt::Union{PyObject, Float64},
h::Union{PyObject, Float64})
Solves the following discretized equation
```math
\phi (S_2^{n + 1} - S_2^n) + \Delta t \dot \phi S_2^{n+1} - \nabla \cdot \left( {{m_2}(S_2^{n + 1})K\nabla \Psi _2^n} \right) \Delta t= \left(q_2^n + q_1^n \frac{m_2(S^{n+1}_2)}{m_1(S^{n+1}_2)}\right) \Delta t
```
where
```math
m_2(s) = \frac{s^2}{\mu_w}\qquad m_1(s) = \frac{(1-s)^2}{\mu_o}
```
This is a nonlinear equation and is solved with the Newton-Raphson method.
- `s0` : $n_z\times n_x$, saturation of fluid, i.e., $S_2^n$
- `dporodt` : $n_z\times n_x$, rate of porosity, $\dot \phi$
- `pt` : $n_z\times n_x$, potential of fluid, i.e., $\Psi_2^n$
- `permi` : $n_z\times n_x$, permeability, i.e., $K$
- `poro` : $n_z\times n_x$, porosity, i.e., $\phi$
- `qw` : $n_z\times n_x$, injection or production rate of fluid 1, $q_2^n$
- `qo` : $n_z\times n_x$, injection or production rate of fluid 2, $q_1^n$
- `muw` : viscosity of fluid 1, i.e., $\mu_w$
- `muo` : viscosity of fluid 2, i.e., $\mu_o$
- `sref` : $n_z\times n_x$, initial guess for $S_2^{n+1}$
- `dt` : Time step size
- `h` : Spatial step size
"""
function sat_op2(s0::Union{PyObject, Array{Float64}},
dporodt::Union{PyObject, Array{Float64}},
pt::Union{PyObject, Array{Float64}},
permi::Union{PyObject, Array{Float64}},
poro::Union{PyObject, Array{Float64}},
qw::Union{PyObject, Array{Float64}},
qo::Union{PyObject, Array{Float64}},
muw::Union{PyObject, Float64},
muo::Union{PyObject, Float64},
sref::Union{PyObject, Array{Float64}},
dt::Union{PyObject, Float64},
h::Union{PyObject, Float64})
s0 = convert_to_tensor(s0, dtype=Float64)
dporodt = convert_to_tensor(dporodt, dtype=Float64)
pt = convert_to_tensor(pt, dtype=Float64)
permi = convert_to_tensor(permi, dtype=Float64)
poro = convert_to_tensor(poro, dtype=Float64)
qw = convert_to_tensor(qw, dtype=Float64)
qo = convert_to_tensor(qo, dtype=Float64)
muw = convert_to_tensor(muw, dtype=Float64)
muo = convert_to_tensor(muo, dtype=Float64)
sref = convert_to_tensor(sref, dtype=Float64)
dt = convert_to_tensor(dt, dtype=Float64)
h = convert_to_tensor(h, dtype=Float64)
saturation_ = load_op_and_grad("$OPS_DIR/Saturation2/build/libSaturation","saturation")
saturation_(s0,dporodt,pt,permi,poro,qw,qo,muw,muo,sref,dt,h)
end
@doc raw"""
upwlap_op(perm::Union{PyObject, Array{Float64}},
mobi::Union{PyObject, Array{Float64}},
func::Union{PyObject, Array{Float64}},
h::Union{PyObject, Float64},
rhograv::Union{PyObject, Float64})
Computes the Laplacian of function $f(\mathbf{x})$; here $\mathbf{x}=[z\quad x]^T$.
```math
\nabla\cdot\left(m(\mathbf{x})K(\mathbf{x}) \nabla \left(f(\mathbf{x}) -\rho \begin{bmatrix}z \\ 0\end{bmatrix} \right)\right)
```
The permeability on the computational grid is computed with Harmonic mean;
the mobility is computed with upwind scheme.
- `perm` : $n_z\times n_x$, permeability of fluid, i.e., $K$
- `mobi` : $n_z\times n_x$, mobility of fluid, i.e., $m$
- `func` : $n_z\times n_x$, potential of fluid, i.e., $f$
- `h` : `Float64`, spatial step size
- `rhograv` : `Float64`, i.e., $\rho$
"""
function upwlap_op(perm::Union{PyObject, Array{Float64}},
mobi::Union{PyObject, Array{Float64}},
func::Union{PyObject, Array{Float64}},
h::Union{PyObject, Float64},rhograv::Union{PyObject, Float64})
perm = convert_to_tensor(perm, dtype=Float64)
mobi = convert_to_tensor(mobi, dtype=Float64)
func = convert_to_tensor(func, dtype=Float64)
h = convert_to_tensor(h, dtype=Float64)
rhograv = convert_to_tensor(rhograv, dtype=Float64)
upwlap_op = load_op_and_grad("$OPS_DIR/Upwlap/build/libUpwlapOp", "upwlap_op")
upwlap_op(perm,mobi,func,h,rhograv)
end
@doc raw"""
upwps_op(perm::Union{PyObject, Array{Float64}},mobi::Union{PyObject, Array{Float64}},
src,funcref,h::Union{PyObject, Float64},rhograv::Union{PyObject, Float64},index::Union{PyObject, Integer})
Solves the Poisson equation
```math
-\nabla\cdot\left(m(\mathbf{x})K(\mathbf{x}) \nabla \left(u(\mathbf{x}) -\rho \begin{bmatrix}z \\ 0\end{bmatrix} \right)\right) = g(\mathbf{x})
```
See [`upwps_op`](@ref) for detailed description.
- `perm` : $n_z\times n_x$, permeability of fluid, i.e., $K$
- `mobi` : $n_z\times n_x$, mobility of fluid, i.e., $m$
- `src` : $n_z\times n_x$, source function, i.e., $g(\mathbf{x})$
- `funcref` : $n_z\times n_x$, currently it is not not used
- `h` : `Float64`, spatial step size
- `rhograv` : `Float64`, i.e., $\rho$
- `index` : `Int32`, when `index=1`, `SparseLU` is used to solve the linear system; otherwise the function invokes algebraic multigrid method from `amgcl`.
"""
function upwps_op(perm::Union{PyObject, Array{Float64}},mobi::Union{PyObject, Array{Float64}},
src,funcref,h::Union{PyObject, Float64},rhograv::Union{PyObject, Float64},index::Union{PyObject, Integer})
perm = convert_to_tensor(perm, dtype=Float64)
mobi = convert_to_tensor(mobi, dtype=Float64)
funcref = convert_to_tensor(funcref, dtype=Float64)
h = convert_to_tensor(h, dtype=Float64)
rhograv = convert_to_tensor(rhograv, dtype=Float64)
index = convert_to_tensor(index, dtype=Int64)
upwps_op = load_op_and_grad("$OPS_DIR/Upwps/build/libUpwpsOp", "upwps_op")
upwps_op(perm,mobi,src,funcref,h,rhograv,index)
end
export time_fractional_op, time_fractional_t_op
@doc raw"""
time_fractional_t_op(i::Union{Integer, PyObject}, ta::PyObject,
α::Union{Float64, PyObject}, Δt::Union{Float64, PyObject})
Returns the coefficients for the time fractional derivative
```math
{}_0^CD_t^\alpha f(t) = \frac{1}{\Gamma(1-\alpha)}\int_0^t \frac{f'(\tau)d\tau}{(t-\tau)^\alpha}
```
The discretization scheme used here is
$\begin{aligned}
& {}_0^CD_\tau^\alpha u(\tau_n) \\
= &\frac{\Delta \tau^{-\alpha}}{\Gamma(2-\alpha)}\left[G_0 u_n - \sum_{k=1}^{n-1}(G_{n-k-1}-G_{n-k})u_k + G_n u_0 \right] + \mathcal{O}(\Delta \tau^{2-\alpha})
\end{aligned}$
Here
```math
G_m = (m+1)^{1-\alpha} - m^{1-\alpha}, \quad m\geq 0, \quad 0<\alpha<1
```
The function returns
- `c` : $\frac{\Delta \tau^{-\alpha}}{\Gamma(2-\alpha)}$
- `cum` : $- \sum_{k=1}^{n-1}(G_{n-k-1}-G_{n-k})u_k + G_n u_0$
"""
function time_fractional_t_op(i::Union{Integer, PyObject}, ta::PyObject,
α::Union{Float64, PyObject}, Δt::Union{Float64, PyObject})
α = convert_to_tensor(α, dtype=Float64)
i = convert_to_tensor(i, dtype=Int32)
function aki(k,i)
a = cast(i-k, Float64)
return (a+2.0)^(1-α)-2*(a+1.0)^(1-α)+a^(1-α)
end
function cond1(k, i, ta, cum)
k<=i
end
function body1(k, i, ta, cum)
u = read(ta, k)
return k+1, i, ta, cum+aki(k,i)*u
end
cum = tf.zeros_like(read(ta, 1))
k = constant(1, dtype=Int32)
_, _, _, cum = while_loop(cond1, body1, [k, i, ta, cum])
c = Δt^(-α)/exp(tf.math.lgamma(2-α))
return c, cum
end
@doc raw"""
time_fractional_op(α::Union{Float64, PyObject}, f_fun::Function,
T::Float64, u0::Union{Float64, Array{Float64}, PyObject},
NT::Int64, θ=missing)
Returns a $(NT+1)\times \texttt{size}(u_0)$ solution array. The function solves the following time fractional differential equation with explicit scheme
```math
{}_0^CD_t^\alpha u(t) = f(t, u, \theta)
```
"""
function time_fractional_op(α::Union{Float64, PyObject}, f_fun::Function,
T::Float64, u0::Union{Float64, Array{Float64}, PyObject},
NT::Int64, θ=missing)
Δt = T/NT
ta = TensorArray(NT+1)
ta = write(ta, 1, convert_to_tensor(u0))
function condition(i, ta)
i<=NT+1
end
function body(i, ta)
# time = (i-1)*Δt,
u = read(ta, i-1)
k = cast(i,Float64)-1
c, cum = time_fractional_t_op(k, ta, α, Δt)
F = f_fun(k*Δt, u, θ)
ta = write(ta, i, F/c - cum)
i+1, ta
end
i = constant(2, dtype=Int32)
_, out = while_loop(condition, body, [i, ta])
return stack(out)
end
@doc raw"""
eikonal(f::Union{Array{Float64}, PyObject},
srcx::Int64,srcy::Int64,h::Float64)
Solves the Eikonal equation
$$|\nabla u(x)| = f(x)$$
where $f(x)$ is the reciprocal of speeds.
"""
function eikonal(f::Union{Array{Float64}, PyObject},
srcx::Int64,srcy::Int64,h::Float64)
n_, m_ = size(f) # m width, n depth
n = n_-1
m = m_-1
eikonal_ = load_op_and_grad("$OPS_DIR/Eikonal/build/libEikonal","eikonal")
f,srcx,srcy,m,n,h = convert_to_tensor([f,srcx,srcy,m,n,h], [Float64,Int64,Int64,Int64,Int64,Float64])
f = reshape(f, (-1,))
u = eikonal_(f,srcx,srcy,m,n,h)
u = set_shape(u, (length(f),))
reshape(u, (n_, m_))
end
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 9234 | export FWI, FWIExample, compute_observation, plot, compute_misfit
@with_kw mutable struct FWI
nz::Int64 = 134
nx::Int64 = 384
dz::Float64 = 24.
dx::Float64 = 24.
dt::Float64 = 0.0025
nSteps::Int64 = 2000
f0::Float64 = 4.5
nPml::Int64 = 32
nPad::Int64 = 32 - mod((nz+2*nPml), 32)
nz_pad::Int64 = nz + 2*nPml + nPad
nx_pad::Int64 = nx + 2*nPml
para_fname::String = "para_file.json"
survey_fname::String = "survey_file.json"
data_dir_name::String = "Data"
WORKSPACE::String = mktempdir()
mask::Union{Missing, Array{Float64, 2}, PyObject} = missing
mask_neg::Union{Missing, Array{Float64, 2}, PyObject} = missing
ind_src_x::Union{Missing, Array{Int64, 1}} = missing
ind_src_z::Union{Missing, Array{Int64, 1}} = missing
ind_rec_x::Union{Missing, Array{Int64, 1}} = missing
ind_rec_z::Union{Missing, Array{Int64, 1}} = missing
end
"""
FWI(nz::Int64, nx::Int64, dz::Float64, dx::Float64, nSteps::Int64, dt::Float64;
ind_src_z::Array{Int64, 1}, ind_src_x::Array{Int64, 1}, ind_rec_z::Array{Int64, 1}, ind_rec_x::Array{Int64, 1},
kwargs...)
Creates a `FWI` structure that holds geometry data and settings.
Here `nz = n + 1`, `nx = m + 1`, `dz` and `dx` are mesh sizes.
`nSteps` is the number of total iterations. `dt` is the time step.
`ind_src_z` and `ind_src_x` are arrays of source locations.
`ind_rec_z` and `ind_rec_x` are arrays of receiver locations.
"""
function FWI(nz::Int64, nx::Int64, dz::Float64, dx::Float64, nSteps::Int64, dt::Float64;
ind_src_z::Array{Int64, 1}, ind_src_x::Array{Int64, 1}, ind_rec_z::Array{Int64, 1}, ind_rec_x::Array{Int64, 1},
kwargs...)
fwi = FWI(nx = nx, nz = nz, dx = dx, dz = dz, nSteps = nSteps, dt = dt;
ind_src_x = ind_src_x,
ind_src_z = ind_src_z,
ind_rec_x = ind_rec_x,
ind_rec_z = ind_rec_z,
kwargs...)
Mask = zeros(fwi.nz_pad, fwi.nx_pad)
Mask[fwi.nPml+1:fwi.nPml+nz, fwi.nPml+1:fwi.nPml+nx] .= 1.0
Mask[fwi.nPml+1:fwi.nPml+10,:] .= 0.0
fwi.mask = constant(Mask)
fwi.mask_neg = 1 - constant(Mask)
@assert length(ind_rec_x)==length(ind_rec_z)
@assert length(ind_src_x)==length(ind_src_z)
paraGen(fwi.nz_pad, fwi.nx_pad, dz, dx, nSteps, dt, fwi.f0, fwi.nPml, fwi.nPad,
joinpath(fwi.WORKSPACE,fwi.para_fname),
joinpath(fwi.WORKSPACE,fwi.survey_fname),
joinpath(fwi.WORKSPACE,fwi.data_dir_name))
surveyGen(ind_src_z, ind_src_x, ind_rec_z, ind_rec_x, joinpath(fwi.WORKSPACE,fwi.survey_fname))
return fwi
end
function PyPlot.:plot(fwi::FWI)
close("all")
x1, y1, x2, y2 = 0.0, 0.0, fwi.nx_pad*fwi.dx, fwi.nz_pad*fwi.dz
plot(LinRange(x1, x2, 100), y1*ones(100), "k")
plot(LinRange(x1, x2, 100), y2*ones(100), "k")
plot(x1*ones(100), LinRange(y1, y2, 100), "k")
plot(x2*ones(100), LinRange(y1, y2, 100), "k")
x1, y1, x2, y2 = fwi.nPml * fwi.dx, fwi.nPml * fwi.dz, (fwi.nx_pad - fwi.nPml) * fwi.dx, (fwi.nz_pad - fwi.nPml) * fwi.dz
plot(LinRange(x1, x2, 100), y1*ones(100), "g", label="PML Boundary")
plot(LinRange(x1, x2, 100), y2*ones(100), "g")
plot(x1*ones(100), LinRange(y1, y2, 100), "g")
plot(x2*ones(100), LinRange(y1, y2, 100), "g")
plot( (fwi.nPml .+ fwi.ind_rec_x .- 1) * fwi.dx, (fwi.nPml + fwi.nPad .+ fwi.ind_rec_z .- 1) * fwi.dz, "r^", label="Receiver", markersize=1)
plot( (fwi.nPml .+ fwi.ind_src_x .- 1) * fwi.dx, (fwi.nPml + fwi.nPad .+ fwi.ind_src_z .- 1) * fwi.dz, "bv", label="Source", markersize=1)
gca().invert_yaxis()
xlabel("Distance")
ylabel("Depth")
legend()
axis("equal")
end
function FWIExample()
ind_src_x = collect(4:8:384)
ind_src_z = 2ones(Int64, size(ind_src_x))
ind_rec_x = collect(3:381)
ind_rec_z = 2ones(Int64, size(ind_rec_x))
FWI(134,384, 24., 24., 2000, 0.0025;
ind_src_x = ind_src_x,
ind_src_z = ind_src_z,
ind_rec_x = ind_rec_x,
ind_rec_z = ind_rec_z)
end
@doc raw"""
compute_observation(sess::PyObject, fwi::FWI,
cp::Union{Array{Float64}, PyObject},
cs::Union{Array{Float64}, PyObject},
ρ::Union{Array{Float64}, PyObject},
stf_array::Union{Array{Float64}, PyObject},
shot_ids::Union{Missing, Array{<:Integer}} = missing;
gpu_id::Int64 = 0)
Computes the observations using given parameters. Note that `shot_ids` are 1-based. If it is missing, all shots are used.
"""
function compute_observation(sess::PyObject, fwi::FWI,
cp::Union{Array{Float64}, PyObject},
cs::Union{Array{Float64}, PyObject},
ρ::Union{Array{Float64}, PyObject},
stf_array::Union{Array{Float64}, PyObject},
shot_ids::Union{Missing, Array{<:Integer}} = missing;
gpu_id::Int64 = 0)
cp_pad, cs_pad, ρ_pad = try_pad(fwi, cp, cs, ρ)
stf_array = constant(stf_array)
if length(size(stf_array))==1
stf_array = repeat(stf_array', length(fwi.ind_src_z), 1)
end
λ_pad, μ_pad = velocity_to_moduli(cp_pad, cs_pad, ρ_pad)
if ismissing(shot_ids)
shot_ids = collect(1:length(fwi.ind_src_x))
end
shot_ids = shot_ids .- 1
shot_ids_ = constant(shot_ids, dtype=Int32)
data = fwi_obs_op(λ_pad, μ_pad, ρ_pad, stf_array, gpu_id, shot_ids_, joinpath(fwi.WORKSPACE, fwi.para_fname) )
run(sess, data)
data = zeros(length(shot_ids), fwi.nSteps, length(fwi.ind_rec_z))
for i = 1:length(shot_ids)
A = read("$(fwi.WORKSPACE)/Data/Shot$(shot_ids[i]).bin")
data[i,:,:] = reshape(reinterpret(Float32,A),(fwi.nSteps ,length(fwi.ind_rec_z)))
end
data
end
@doc raw"""
compute_misfit(fwi::FWI,
cp::Union{Array{Float64}, PyObject},
cs::Union{Array{Float64}, PyObject},
ρ::Union{Array{Float64}, PyObject},
stf_array::Union{Array{Float64}, PyObject},
shot_ids::Union{Array{Int64}, PyObject} = missing;
gpu_id::Int64 = 0, is_masked::Bool = false,
cp_ref::Union{Array{Float64}, PyObject, Missing} = missing,
cs_ref::Union{Array{Float64}, PyObject, Missing} = missing,
ρ_ref::Union{Array{Float64}, PyObject, Missing} = missing)
Computes the misfit function for the simulation parameters $c_p$, $c_s$, $\rho$, and source time functions `stf_array`
- If `is_masked` is false, `compute_misfit` will add the mask `fwi.mask` to all variables.
- `gpu_id` is an integer in {0,1,2,...,#gpus-1}
- `shot_ids` is 1-based. If it is missing, all shots are used.
"""
function compute_misfit(fwi::FWI,
cp::Union{Array{Float64}, PyObject},
cs::Union{Array{Float64}, PyObject},
ρ::Union{Array{Float64}, PyObject},
stf_array::Union{Array{Float64}, PyObject},
shot_ids::Union{Array{Int64}, PyObject} = missing;
gpu_id::Int64 = 0, is_masked::Bool = false,
cp_ref::Union{Array{Float64}, PyObject, Missing} = missing,
cs_ref::Union{Array{Float64}, PyObject, Missing} = missing,
ρ_ref::Union{Array{Float64}, PyObject, Missing} = missing)
cp_pad, cs_pad, ρ_pad = try_pad(fwi, cp, cs, ρ)
if !ismissing(cp_ref)
cp_ref, cs_ref, ρ_ref = try_pad(fwi, cp_ref, cs_ref, ρ_ref)
end
cp_masked, cs_masked,ρ_masked = cp_pad, cs_pad, ρ_pad
if !is_masked
cp_masked = cp_pad .* fwi.mask + cp_ref .* fwi.mask_neg
cs_masked = cs_pad .* fwi.mask + cs_ref .* fwi.mask_neg
ρ_masked = ρ_pad .* fwi.mask + ρ_ref .* fwi.mask_neg
end
λ_masked, μ_masked = velocity_to_moduli(cp_masked, cs_masked,ρ_masked)
stf_array = constant(stf_array)
if length(size(stf_array))==1
stf_array = repeat(stf_array', length(fwi.ind_src_z), 1)
end
if ismissing(shot_ids)
shot_ids = collect(1:length(fwi.ind_src_x))
end
shot_ids = constant(shot_ids, dtype=Int32) - 1
misfit = fwi_op(λ_masked, μ_masked, ρ_masked, stf_array, gpu_id, shot_ids, joinpath(fwi.WORKSPACE, fwi.para_fname))
end
#------------------------------------------------------------------------------------
function padding(fwi::FWI, cp::Union{PyObject, Array{Float64,2}})
cp = constant(cp)
nz, nx, nPml, nPad = fwi.nz, fwi.nx, fwi.nPml, fwi.nPad
nz_orig, nx_orig = size(cp)
tran_cp = tf.reshape(cp, (1, nz_orig, nx_orig, 1))
if nz_orig!=nz || nx_orig!=nx
@info "resizee image to required size"
tran_cp = squeeze(tf.image.resize_bilinear(tran_cp, (nz, nx)))
end
cp_pad = tf.pad(cp, [nPml (nPml+nPad); nPml nPml], "SYMMETRIC")
cp_pad = cast(cp_pad, Float64)
return cp_pad
end
function padding(fwi::FWI, cp::Union{PyObject, Array{Float64,2}}, cq...)
o = Array{PyObject}(undef, 1 + length(cq))
o[1] = padding(fwi, cp)
for i = 1:length(cq)
o[i+1] = padding(fwi, cq[i])
end
o
end
function try_pad(fwi::FWI, cp::Union{PyObject, Array{Float64,2}})
cp = convert_to_tensor(cp, dtype=Float64)
if size(cp)!=(fwi.nz_pad, fwi.nx_pad)
return padding(fwi, cp)
else
return cp
end
end
function try_pad(fwi::FWI, cp::Union{PyObject, Array{Float64,2}},cq...)
o = Array{PyObject}(undef, 1 + length(cq))
o[1] = try_pad(fwi, cp)
for i = 1:length(cq)
o[i+1] = try_pad(fwi, cq[i])
end
o
end | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 619 | module FwiFlow
using Conda
using PyCall
using Reexport
@reexport using ADCME
using LinearAlgebra
using PyPlot
using Random
using JSON
using DataStructures
using Dierckx
using Parameters
using MAT
export DATADIR
DATADIR = "$(@__DIR__)/../docs/data"
function ADCME.:Session(args...;kwargs...)
config = tf.ConfigProto(
device_count = Dict("GPU"=> 0) # do not use any GPU devices for all ops except FWI
)
sess = tf.Session(config=config)
end
include("Core.jl")
include("Utils.jl")
include("FWI.jl")
end | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 7240 | export paraGen, surveyGen, padding, velocity_to_moduli
"""
paraGen(nz::Int64, nx::Int64, dz::Real, dx::Real, nSteps::Int64, dt::Real,
f0::Real, nPml::Int64, nPad::Int64, para_fname::String, survey_fname::String, data_dir_name::String;
if_win::Bool=false, filter_para=nothing, if_src_update::Bool=false, scratch_dir_name::String="")
Generates a parameter file consumed by [`fwi_op`](@ref) and [`fwi_op_ops`](@ref)
- `nSteps` : Number of time steps in the simulation
- `dt` : Time step size
- `f0` : Source reference frequency for CPML (usually chosen a th dominant frequency)
- `nPoints_pml` : Number of points in CPML boundary condition
- `nPad` : Padding width in the z direction, in order to make the GPU memory coalesced
- `if_win` : Whether to apply window function to data gathers
- `filter` : Whether apply band-pass filters to data
- `if_src_update` : Whether update source signatures
- `survey_fname` : The name of the survey file
- `data_dir_name` : Locations for storing observation data (generated by [`fwi_op_ops`](@ref), in the form of `.bin`)
- `scratch_dir_name` : Temporary data location. If present, will output intermediate files for QC.
"""
function paraGen(nz::Int64, nx::Int64, dz::Real, dx::Real, nSteps::Int64, dt::Real,
f0::Real, nPml::Int64, nPad::Int64, para_fname::String, survey_fname::String, data_dir_name::String;
if_win::Bool=false, filter_para=nothing, if_src_update::Bool=false, scratch_dir_name::String="")
para = OrderedDict()
para["nz"] = nz
para["nx"] = nx
para["dz"] = dz
para["dx"] = dx
para["nSteps"] = nSteps
para["dt"] = dt
para["f0"] = f0
para["nPoints_pml"] = nPml
para["nPad"] = nPad
if if_win != false
para["if_win"] = true
end
if filter_para != nothing
para["filter"] = filter_para
end
if if_src_update != false
para["if_src_update"] = true
end
para["survey_fname"] = survey_fname
para["data_dir_name"] = data_dir_name
if !isdir(data_dir_name)
mkdir(data_dir_name)
end
# if nStepsWrap != nothing
# para["nStepsWrap"] = nStepsWrap
# end
if(scratch_dir_name != "")
para["scratch_dir_name"] = scratch_dir_name
if !isdir(scratch_dir_name)
mkdir(scratch_dir_name)
end
end
para_string = JSON.json(para)
open(para_fname,"w") do f
write(f, para_string)
end
end
# all shots share the same number of receivers
@doc raw"""
surveyGen(z_src::Array{T}, x_src::Array{T},
z_rec::Array{T}, x_rec::Array{T}, survey_fname::String; Windows=nothing, Weights=nothing) where T<:Integer
Generates the survey parameter file.
- `z_src` : $z$ coordinates of sources
- `x_src` : $x$ coordinates of sources
- `z_rec` : $z$ coordinates of receivers
- `x_rec` : $x$ coordinates of receiverss
- `survey_fname` : The name of the survey file
- `Windows` :
- `Weights` :
"""
function surveyGen(z_src::Array{T}, x_src::Array{T},
z_rec::Array{T}, x_rec::Array{T}, survey_fname::String; Windows=nothing, Weights=nothing) where T<:Integer
nsrc = length(x_src)
nrec = length(x_rec)
survey = OrderedDict()
survey["nShots"] = nsrc
for i = 1:nsrc
shot = OrderedDict()
shot["z_src"] = z_src[i]
shot["x_src"] = x_src[i]
shot["nrec"] = nrec
shot["z_rec"] = z_rec
shot["x_rec"] = x_rec
if Windows != nothing
shot["win_start"] = Windows["shot$(i-1)"][:start]
shot["win_end"] = Windows["shot$(i-1)"][:end]
end
if Weights != nothing
# shot["weights"] = Int64.(Weights["shot$(i-1)"][:weights])
shot["weights"] = Weights["shot$(i-1)"][:weights]
end
survey["shot$(i-1)"] = shot
end
survey_string = JSON.json(survey)
open(survey_fname,"w") do f
write(f, survey_string)
end
end
"""
sourceGene(f::Float64, nStep::Integer, delta_t::Float64)
Generates Ricker wavelets.
"""
function sourceGene(f::Float64, nStep::Integer, delta_t::Float64)
# Ricker wavelet generation and integration for source
# Dongzhuo Li @ Stanford
# May, 2015
e = pi*pi*f*f;
t_delay = 1.2/f;
source = Matrix{Float64}(undef, 1, nStep)
for it = 1:nStep
source[it] = (1-2*e*(delta_t*(it-1)-t_delay)^2)*exp(-e*(delta_t*(it-1)-t_delay)^2);
end
for it = 2:nStep
source[it] = source[it] + source[it-1];
end
source = source * delta_t;
end
"""
get `vs` upper and lower bounds from log point cloud
- 1st row of Bounds: `vp ref line`
- 2nd row of Bounds: `vs high ref line`
- 3rd row of Bounds: `vs low ref line`
"""
function cs_bounds_cloud(cpImg, Bounds)
cs_high_itp = Spline1D(Bounds[1,:], Bounds[2,:]; k=1)
cs_low_itp = Spline1D(Bounds[1,:], Bounds[3,:]; k=1)
csHigh = zeros(size(cpImg))
csLow = zeros(size(cpImg))
for i = 1:size(cpImg, 1)
for j = 1:size(cpImg, 2)
csHigh[i,j] = cs_high_itp(cpImg[i,j])
csLow[i,j] = cs_low_itp(cpImg[i,j])
end
end
return csHigh, csLow
end
"""
klauderWave(fmin, fmax, t_sweep, nStepTotal, nStepDelay, delta_t)
Generates Klauder wavelet.
"""
function klauderWave(fmin, fmax, t_sweep, nStepTotal, nStepDelay, delta_t)
nStep = nStepTotal - nStepDelay
source = Matrix{Float64}(undef, 1, nStep+nStep-1)
source_half = Matrix{Float64}(undef, 1, nStep-1)
K = (fmax - fmin) / t_sweep
f0 = (fmin + fmax) / 2.0
t_axis = delta_t:delta_t:(nStep-1)*delta_t
source_half = sin.(pi * K .* t_axis .* (t_sweep .- t_axis)) .* cos.(2.0 * pi * f0 .* t_axis) ./ (pi*K.*t_axis*t_sweep)
for i = 1:nStep-1
source[i] = source_half[end-i+1]
end
for i = nStep+1:2*nStep-1
source[i] = source_half[i-nStep]
end
source[nStep] = 1.0
source_crop = source[:,nStep-nStepDelay:end]
return source_crop
end
@doc raw"""
padding(cp, cs, den, nz_orig, nx_orig, nz, nx, nPml, nPad)
Adds PML boundaries to `cp`, `cs` and `den`.
The original `nz_orig x nx_orig` grid is resampled to `nz x nx`.
# Note
`nPad` is used to make the number of nodes in the $z$ direction a multiple of 32 (for coalesced memory access).
"""
function padding(cp, cs, den, nz_orig, nx_orig, nz, nx, nPml, nPad)
tran_cp = tf.reshape(cp, (1, nz_orig, nx_orig, 1))
tran_cs = tf.reshape(cs, (1, nz_orig, nx_orig, 1))
tran_den = tf.reshape(den, (1, nz_orig, nx_orig, 1))
tran_cp = squeeze(tf.image.resize_bilinear(tran_cp, (nz, nx)))
tran_cs = squeeze(tf.image.resize_bilinear(tran_cs, (nz, nx)))
tran_den = squeeze(tf.image.resize_bilinear(tran_den, (nz, nx)))
# cp_pad = tf.pad(tran_cp, [nPml (nPml+nPad); nPml nPml], constant_values=5500.0)
# cs_pad = tf.pad(tran_cs, [nPml (nPml+nPad); nPml nPml], constant_values=0.0)
# den_pad = tf.pad(tran_den, [nPml (nPml+nPad); nPml nPml], constant_values=2500.0)
cp_pad = tf.pad(tran_cp, [nPml (nPml+nPad); nPml nPml], "SYMMETRIC")
cs_pad = tf.pad(tran_cs, [nPml (nPml+nPad); nPml nPml], "SYMMETRIC")
den_pad = tf.pad(tran_den, [nPml (nPml+nPad); nPml nPml], "SYMMETRIC")
cp_pad = cast(cp_pad, Float64)
cs_pad = cast(cs_pad, Float64)
den_pad = cast(den_pad, Float64)
return cp_pad, cs_pad, den_pad
end
"""
velocity_to_moduli(cp::Union{Array{Float64, 2}, PyObject},
cs::Union{Array{Float64, 2}, PyObject},
den::Union{Array{Float64, 2}, PyObject})
Computes the Lamé parameters using velocities.
"""
function velocity_to_moduli(cp::Union{Array{Float64, 2}, PyObject},
cs::Union{Array{Float64, 2}, PyObject},
den::Union{Array{Float64, 2}, PyObject})
lambda = (cp.*cp - 2.0 * cs.*cs) .* den / 1e6
mu = cs.*cs .* den / 1e6
return lambda, mu
end | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 2120 | using FwiFlow
using ADCME
using MAT
using PyPlot
using LinearAlgebra
oz = 0.0
ox = 0.0
dz_orig = 24.0
dx_orig = 24.0
nz_orig = 134
nx_orig = 384
dz = dz_orig/1.0
dx = dx_orig/1.0
nz = Int64(round((dz_orig * nz_orig) / dz));
nx = Int64(round((dx_orig * nx_orig) / dx))
dt = 0.0025
nSteps = 2000
ind_src_x = collect(4:8:384)
ind_src_z = 2ones(Int64, size(ind_src_x))
ind_rec_x = collect(3:381)
ind_rec_z = 2ones(Int64, size(ind_rec_x))
fwi = FWI(nz, nx, dz, dx, nSteps, dt;
ind_src_x = ind_src_x, ind_src_z = ind_src_z, ind_rec_x = ind_rec_x, ind_rec_z = ind_rec_z)
stf = matread("$(DATADIR)/sourceF_4p5_2_high.mat")["sourceF"][:]
cp = Float64.(reshape(reinterpret(Float32,read("$DATADIR/Model_Cp_true.bin")), (fwi.nz_pad, fwi.nx_pad)))|>Array
cs = zeros(fwi.nz_pad, fwi.nx_pad)
ρ = 2500.0 .* ones(fwi.nz_pad, fwi.nx_pad)
shot_ids = collect(1:length(ind_src_z))
sess = Session()
@info "Computing forward..."
@time obs = compute_observation(sess, fwi, cp, cs, ρ, stf, shot_ids, gpu_id=0)
close("all")
imshow(obs[10,:,:], vmax=2000, vmin=-2000, extent=[0, fwi.nx*fwi.dx, fwi.dt*(fwi.nSteps-1), 0])
xlabel("Receiver Location (m)")
ylabel("Time (s)")
colorbar()
axis("normal")
set_cmap("gray")
savefig("obs.png")
cs_init = zeros(fwi.nz_pad, fwi.nx_pad)
ρ_init = 2500.0 .* ones(fwi.nz_pad, fwi.nx_pad)
cp_init = Float64.(reshape(reinterpret(Float32,read("$DATADIR/Model_Cp_init_1D.bin")), (fwi.nz_pad, fwi.nx_pad)))|>Array
# make variables
cs_inv = Variable(cs_init)
ρ_inv = Variable(ρ_init)
cp_inv = Variable(cp_init)
# allocate GPUs
loss = constant(0.0)
nGpus = length(use_gpu())
shot_id_points = Int64.(trunc.(collect(LinRange(1, length(ind_src_z), nGpus+1))))
@info "$nGpus GPU(s) is(are) available."
loss = constant(0.0)
for i = 1:nGpus
global loss
shot_ids = collect(shot_id_points[i]:shot_id_points[i+1])
loss += compute_misfit(fwi, cp_inv, cs_inv, ρ_inv,
stf , shot_ids; gpu_id = i-1,
is_masked = false, cp_ref = cp_init, cs_ref = cs_init, ρ_ref = ρ_init)
end
sess = Session(); init(sess)
err = run(sess, loss)
@info "Initial error = ", err
BFGS!(sess, loss)
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 11837 | @testset "laplacian" begin
h = 1.0
rho = 1000.0
G = 9.8
len_z = 16
len_x = 32
nz = Int(len_z/h + 1)
nx = Int(len_x/h + 1)
tf_h=constant(1.0)
coef = rand(nz, nx)
func = rand(nz, nx)
tf_coef = constant(coef)
tf_func = constant(func)
# gradient check -- v
function scalar_function(m)
# return sum(tanh(laplacian(m, tf_func, tf_h, constant(rho*G))))
return sum(tanh(laplacian(tf_coef, m, tf_h, constant(rho*G))))
end
# m_ = tf_coef
m_ = tf_func
v_ = 0.01*rand(nz, nx)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt[:gca]()[:invert_xaxis]()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("laplacian.png"); close("all")
end
@testset "poisson" begin
# gradient check -- v
h = 1.0
rho = 1000.0
G = 9.8
len_z = 16
len_x = 32
nz = Int(len_z/h + 1)
nx = Int(len_x/h + 1)
tf_h=constant(1.0)
coef = zeros(nz, nx)
rhs = zeros(nz, nx)
for i = 1:nz
for j = 1:nx
rhs[i,j] = -sin(2*pi/len_z*(i-1)*h) * sin(2*pi/len_x*(j-1)*h)
coef[i,j] = 1.0 - cos(2*pi/len_z*(i-1)*h) * sin(2*pi/len_x*(j-1)*h) * len_z / (2*pi*rho*G)
# rhs[i,j] = 2.0*(i-1)*h*exp(-(((i-1)*h)^2) -(((j-1)*h)^2)) * rho * G
# coef[i,j] = 1.0 + exp(-(((i-1)*h)^2) -(((j-1)*h)^2))
end
end
tf_coef = constant(coef)
tf_rhs = constant(rhs)
function scalar_function(m)
return sum(tanh(poisson_op(tf_coef,m,tf_h,constant(rho*G), constant(0))))
# return sum(tanh(poisson_op(m,tf_rhs,tf_h, constant(rho*G), constant(0))))
end
m_ = tf_rhs
# m_ = tf_coef
v_ = 0.01*rand(nz, nx)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 20^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("poisson.png"); close("all")
end
@testset "sat_op" begin
function ave_normal(quantity, m, n)
aa = sum(quantity)
return aa/(m*n)
end
# TODO:
# const ALPHA = 0.006323996017182
const ALPHA = 1.0
const SRC_CONST = 86400.0
const K_CONST = 9.869232667160130e-16 * 86400
nz=20
nx=30
sw = constant(zeros(nz, nx))
swref = constant(zeros(nz,nx))
μw = constant(0.001)
μo = constant(0.003)
K = constant(100.0 .* ones(nz, nx))
ϕ = constant(0.25 .* ones(nz, nx))
dt = constant(30.0)
h = constant(100.0 * 0.3048)
q1 = zeros(nz,nx)
q2 = zeros(nz,nx)
q1[10,5] = 0.002 * (1/(100.0 * 0.3048)^2)/20.0/0.3048 * SRC_CONST
q2[10,25] = -0.002 * (1/(100.0 * 0.3048)^2)/20.0/0.3048 * SRC_CONST
qw = constant(q1)
qo = constant(q2)
λw = sw.*sw/μw
λo = (1-sw).*(1-sw)/μo
λ = λw + λo
f = λw/λ
q = qw + qo + λw/(λo+1e-16).*qo
# Θ = laplacian_op(K.*λo, potential_c, h, constant(0.0))
Θ = upwlap_op(K*K_CONST, λo, constant(zeros(nz,nx)), h, constant(0.0))
load_normal = (Θ+q/ALPHA) - ave_normal(Θ+q/ALPHA, nz, nx)
tf_comp_p0 = upwps_op(K*K_CONST, λ, load_normal, constant(zeros(nz,nx)), h, constant(0.0), constant(2))
sess = Session()
init(sess)
p0 = run(sess, tf_comp_p0)
tf_p0 = constant(p0)
# s = sat_op(sw,p0,K,ϕ,qw,qo,sw,dt,h)
# function step(sw)
# λw = sw.*sw
# λo = (1-sw).*(1-sw)
# λ = λw + λo
# f = λw/λ
# q = qw + qo + λw/(λo+1e-16).*qo
# # Θ = laplacian_op(K.*λo, constant(zeros(nz,nx)), h, constant(0.0))
# Θ = upwlap_op(K, λo, constant(zeros(nz,nx)), h, constant(0.0))
# # Θ = constant(zeros(nz,nx))
# load_normal = (Θ+q/ALPHA) - ave_normal(Θ+q/ALPHA, nz, nx)
# p = poisson_op(λ.*K, load_normal, h, constant(0.0), constant(0)) # potential p = pw - ρw*g*h
# # p = upwps_op(K, λ, load_normal, constant(zeros(nz,nx)), h, constant(0.0), constant(0))
# sw = sat_op(sw,p,K,ϕ,qw,qo,μw,μo,sw,dt,h)
# return sw
# end
# NT=100
# function evolve(sw, NT, qw, qo)
# # qw_arr = constant(qw) # qw: NT x m x n array
# # qo_arr = constant(qo)
# tf_sw = TensorArray(NT+1)
# function condition(i, ta)
# tf.less(i, NT+1)
# end
# function body(i, tf_sw)
# sw_local = step(read(tf_sw, i))
# i+1, write(tf_sw, i+1, sw_local)
# end
# tf_sw = write(tf_sw, 1, sw)
# i = constant(1, dtype=Int32)
# _, out = while_loop(condition, body, [i;tf_sw])
# read(out, NT+1)
# end
# s = evolve(sw, NT, qw, qo)
# J = tf.nn.l2_loss(s)
# tf_grad_K = gradients(J, K)
# sess = Session()
# init(sess)
# # P = run(sess,p0)
# # error("")
# S=run(sess, s)
# imshow(S);colorbar();
# error("")
# grad_K = run(sess, tf_grad_K)
# imshow(grad_K);colorbar();
# error("")
# TODO:
# gradient check -- v
function scalar_function(m)
# return sum(tanh(sat_op(m,tf_p0,K*K_CONST,ϕ,qw,qo,μw,μo,constant(zeros(nz,nx)),dt,h)))
return sum(tanh(sat_op(sw,m,K*K_CONST,ϕ,qw,qo,μw,μo,constant(zeros(nz,nx)),dt,h)))
# return sum(tanh(sat_op(sw,tf_p0,m,ϕ,qw,qo,μw,μo,constant(zeros(nz,nx)),dt,h)))
# return sum(tanh(sat_op(sw,tf_p0,K*K_CONST,m,qw,qo,μw,μo,constant(zeros(nz,nx)),dt,h)))
end
# m_ = sw
# v_ = 0.1 * rand(nz,nx)
m_ = tf_p0
v_ = 5e5 .* rand(nz,nx)
# m_ = K*K_CONST
# v_ = 10 .* rand(nz,nx) *K_CONST
# m_ = ϕ
# v_ = 0.1 * rand(nz,nx)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("sat_op.png"); close("all")
end
@testset "upwlap_op" begin
h = 1.0
rho = 1000.0
G = 9.8
len_z = 16
len_x = 32
nz = Int(len_z/h + 1)
nx = Int(len_x/h + 1)
tf_h=constant(1.0)
perm = rand(nz, nx)
mobi = rand(nz, nx)
func = rand(nz, nx)
tf_perm = constant(perm)
tf_mobi = constant(mobi)
tf_func = constant(func)
# gradient check -- v
function scalar_function(m)
# return sum(tanh(upwlap_op(m, tf_mobi, tf_func, tf_h, constant(rho*G))))
# return sum(tanh(upwlap_op(tf_perm, m, tf_func, tf_h, constant(rho*G))))
return sum(tanh(upwlap_op(tf_perm, tf_mobi, m, tf_h, constant(rho*G))))
end
# m_ = constant(rand(10,20))
# m_ = tf_perm
# m_ = tf_mobi
m_ = tf_func
v_ = rand(nz, nx)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 20^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("upwlap_op.png"); close("all")
end
@testset "upwps_op" begin
h = 20.0
rho = 1000.0
G = 9.8
len_z = 16*20
len_x = 32*20
nz = Int(len_z/h + 1)
nx = Int(len_x/h + 1)
tf_h=constant(1.0)
coef = zeros(nz, nx)
ones_cons = ones(nz, nx)
rhs = zeros(nz, nx)
for i = 1:nz
for j = 1:nx
rhs[i,j] = -sin(2*pi/len_z*(i-1)*h) * sin(2*pi/len_x*(j-1)*h)
coef[i,j] = 1.0 - cos(2*pi/len_z*(i-1)*h) * sin(2*pi/len_x*(j-1)*h) * len_z / (2*pi*rho*G)
# rhs[i,j] = 2.0*(i-1)*h*exp(-(((i-1)*h)^2) -(((j-1)*h)^2)) * rho * G
# coef[i,j] = 1.0 + exp(-(((i-1)*h)^2) -(((j-1)*h)^2))
end
end
# coef = 1.0 .+ rand(nz, nx)
# rhs = rand(nz, nx)
tf_coef = constant(coef)
tf_rhs = constant(rhs)
tf_funcref = constant(rand(nz, nx))
tf_ones = constant(ones_cons)
function scalar_function(m)
# return sum(tanh(upwps_op(tf_coef, tf_ones, m, tf_funcref, tf_h, constant(rho*G), constant(0))))
# return sum(tanh(upwps_op(m, tf_ones, tf_rhs, tf_ones, tf_h, constant(rho*G), constant(0))))
return sum(tanh(upwps_op(tf_ones, m, tf_rhs, tf_ones, tf_h, constant(rho*G), constant(0))))
end
# m_ = tf_rhs
m_ = tf_coef
v_ = 0.01*rand(nz, nx)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("upwps_op.png"); close("all")
end
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 73 | using FwiFlow
using Test
using PyCall
using PyPlot
matplotlib.use("Agg") | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 10045 | using Revise
using FwiFlow
using PyCall
using LinearAlgebra
using DelimitedFiles
using PyPlot
using MAT
matplotlib.use("agg")
np = pyimport("numpy")
# mode
# 0 -- generate data
# 1 -- running inverse modeling
mode = 1
# mode
# 0 -- small data
# 1 -- large data
datamode = 0
sparsity = 0.01
noise = 0.0
if length(ARGS)==3
global mode = parse(Int64, ARGS[1])
global datamode = parse(Int64, ARGS[2])
global sparsity = parse(Float64, ARGS[3])
elseif length(ARGS)==4
global mode = parse(Int64, ARGS[1])
global datamode = parse(Int64, ARGS[2])
global sparsity = parse(Float64, ARGS[3])
global noise = parse(Float64, ARGS[4])
end
FLDR = "datamode$(datamode)sparsity$sparsity"
if !isdir(FLDR)
mkdir(FLDR)
end
# data structure for flow simulation
const K_CONST = 9.869232667160130e-16 * 86400 * 1e3
const ALPHA = 1.0
mutable struct Ctx
m; n; h; NT; Δt; Z; X; ρw; ρo;
μw; μo; K; g; ϕ; qw; qo; sw0
end
function tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo,sw0,ifTrue)
tf_h = constant(h)
# tf_NT = constant(NT)
tf_Δt = constant(Δt)
tf_Z = constant(Z)
tf_X= constant(X)
tf_ρw = constant(ρw)
tf_ρo = constant(ρo)
tf_μw = constant(μw)
tf_μo = constant(μo)
# tf_K = isa(K,Array) ? Variable(K) : K
if ifTrue
tf_K = constant(K)
else
tf_K = Variable(K)
end
tf_g = constant(g)
# tf_ϕ = Variable(ϕ)
tf_ϕ = constant(ϕ)
tf_qw = constant(qw)
tf_qo = constant(qo)
tf_sw0 = constant(sw0)
return Ctx(m,n,tf_h,NT,tf_Δt,tf_Z,tf_X,tf_ρw,tf_ρo,tf_μw,tf_μo,tf_K,tf_g,tf_ϕ,tf_qw,tf_qo,tf_sw0)
end
if mode == 0
global Krw, Kro
# LET-type
Lw = 1.8
Ew = 2.1
Tw = 2.3
Krwo = 0.6
function Krw(Sw)
return Krwo*Sw^Lw/(Sw^Lw + Ew*(1-Sw)^Tw)
end
function Kro(So)
return So^Lw / (So^Lw + Ew*(1-So)^Tw)
end
elseif mode == 1
global Krw, Kro, θ1, θ2
θ1 = Variable(ae_init([1,20,20,20,1]))
θ2 = Variable(ae_init([1,20,20,20,1]))
function Krw(Sw)
Sw_ = tf.reshape(Sw, (-1,1))
y = ae(Sw_, [20,20,20,1],θ1)
tf.reshape((tanh(y)+1)/2, Sw.shape)
end
function Kro(So)
So_ = tf.reshape(So, (-1,1))
y = ae(So_, [20,20,20,1],θ2)
tf.reshape(1-(tanh(y)+1)/2, So.shape)
end
end
function krw_and_kro()
sw = LinRange(0,1,100)|>collect|>constant
Krw(sw), Kro(1-sw)
end
# IMPES for flow simulation
function ave_normal(quantity, m, n)
aa = sum(quantity)
return aa/(m*n)
end
function onestep(sw, p, m, n, h, Δt, Z, ρw, ρo, μw, μo, K, g, ϕ, qw, qo)
# step 1: update p
λw = Krw(sw)/μw
λo = Kro(1-sw)/μo
# λw = sw.*sw/μw
# λo = (1-sw).*(1-sw)/μo
λ = λw + λo
q = qw + qo + λw/(λo+1e-16).*qo
# q = qw + qo
potential_c = (ρw - ρo)*g .* Z
# Step 1: implicit potential
Θ = upwlap_op(K * K_CONST, λo, potential_c, h, constant(0.0))
load_normal = (Θ+q/ALPHA) - ave_normal(Θ+q/ALPHA, m, n)
# p = poisson_op(λ.*K* K_CONST, load_normal, h, constant(0.0), constant(1))
p = upwps_op(K * K_CONST, λ, load_normal, p, h, constant(0.0), constant(0)) # potential p = pw - ρw*g*h
# step 2: implicit transport
sw = sat_op(sw, p, K * K_CONST, ϕ, qw, qo, μw, μo, sw, Δt, h)
return sw, p
end
function imseq(tf_ctx)
ta_sw, ta_p = TensorArray(NT+1), TensorArray(NT+1)
ta_sw = write(ta_sw, 1, tf_ctx.sw0)
ta_p = write(ta_p, 1, constant(zeros(tf_ctx.m, tf_ctx.n)))
i = constant(1, dtype=Int32)
function condition(i, tas...)
i <= tf_ctx.NT
end
function body(i, tas...)
ta_sw, ta_p = tas
sw, p = onestep(read(ta_sw, i), read(ta_p, i), tf_ctx.m, tf_ctx.n, tf_ctx.h, tf_ctx.Δt, tf_ctx.Z, tf_ctx.ρw, tf_ctx.ρo, tf_ctx.μw, tf_ctx.μo, tf_ctx.K, tf_ctx.g, tf_ctx.ϕ, tf_ctx.qw[i], tf_ctx.qo[i])
ta_sw = write(ta_sw, i+1, sw)
ta_p = write(ta_p, i+1, p)
i+1, ta_sw, ta_p
end
_, ta_sw, ta_p = while_loop(condition, body, [i, ta_sw, ta_p])
out_sw, out_p = stack(ta_sw), stack(ta_p)
end
# visualization functions
function plot_saturation_series(S)
z_inj = (Int(round(0.6*m))-1)*h + h/2.0
x_inj = (Int(round(0.1*n))-1)*h + h/2.0
z_prod = (Int(round(0.6*m))-1)*h + h/2.0
x_prod = (Int(round(0.9*n))-1)*h + h/2.0
fig2,axs = subplots(3,3, figsize=[30,15], sharex=true, sharey=true)
ims = Array{Any}(undef, 9)
for iPrj = 1:3
for jPrj = 1:3
@info iPrj, jPrj
ims[(iPrj-1)*3+jPrj] = axs[iPrj,jPrj].imshow(S[survey_indices[(iPrj-1)*3+jPrj], :, :], extent=[0,n*h,m*h,0], vmin=0.0, vmax=0.6);
axs[iPrj,jPrj].title.set_text("Snapshot $((iPrj-1)*3+jPrj)")
if jPrj == 1 || jPrj == 1
axs[iPrj,jPrj].set_ylabel("Depth (m)")
end
if iPrj == 3 || iPrj == 3
axs[iPrj,jPrj].set_xlabel("Distance (m)")
end
# if iPrj ==2 && jPrj == 3
# cb = fig2.colorbar(ims[(iPrj-1)*3+jPrj], ax=axs[iPrj,jPrj])
# cb.set_label("Saturation")
axs[iPrj,jPrj].scatter(x_inj, z_inj, c="r", marker=">", s=128)
axs[iPrj,jPrj].scatter(x_prod, z_prod, c="r", marker="<", s=128)
end
end
# fig2.subplots_adjust(wspace=0.04, hspace=0.042)
fig2.subplots_adjust(wspace=0.02, hspace=0.18)
cbar_ax = fig2.add_axes([0.91, 0.08, 0.01, 0.82])
cb2 = fig2.colorbar(ims[1], cax=cbar_ax)
cb2.set_label("Saturation")
# savefig("figures_summary/Saturation_evo_patchy_init.pdf",bbox_inches="tight",pad_inches = 0);
end
function plot_saturation(S)
z_inj = (Int(round(0.6*m))-1)*h + h/2.0
x_inj = (Int(round(0.1*n))-1)*h + h/2.0
z_prod = (Int(round(0.6*m))-1)*h + h/2.0
x_prod = (Int(round(0.9*n))-1)*h + h/2.0
imshow(S[end, :, :], extent=[0,n*h,m*h,0], vmin=0.0, vmax=0.6)
ylabel("Depth (m)")
xlabel("Distance (m)")
colorbar()
scatter(x_inj, z_inj, c="r", marker=">", s=128)
scatter(x_prod, z_prod, c="r", marker="<", s=128)
end
function plot_kr(krw, kro, w=missing, o=missing)
close("all")
plot(LinRange(0,1,100), krw, "b-", label="\$K_{rw}\$")
plot(LinRange(0,1,100), kro, "r-", label="\$K_{ro}\$")
if !ismissing(w)
plot(LinRange(0,1,100), w, "g--", label="True \$K_{rw}\$")
plot(LinRange(0,1,100), o, "c--", label="True \$K_{ro}\$")
end
xlabel("\$S_w\$")
ylabel("\$K_r\$")
legend()
end
# parameters for flow simulation
const SRC_CONST = 86400.0 #
const GRAV_CONST = 9.8 # gravity constant
if datamode==0
# Hyperparameter for flow simulation
global m = 15
global n = 30
global h = 30.0 # meter
global NT = 50
global dt_survey = 5
global Δt = 20.0 # day
else
global m = 45
global n = 90
global h = 10.0 # meter
global NT = 100
global dt_survey = 10
global Δt = 10.0 # day
end
z = (1:m)*h|>collect
x = (1:n)*h|>collect
X, Z = np.meshgrid(x, z)
ρw = 501.9
ρo = 1053.0
μw = 0.1
μo = 1.0
K_init = 20.0 .* ones(m,n) # initial guess of permeability
g = GRAV_CONST
ϕ = 0.25 .* ones(m,n)
qw = zeros(NT, m, n)
qw[:,Int(round(0.6*m)),Int(round(0.1*n))] .= 0.005 * (1/h^2)/10.0 * SRC_CONST
qo = zeros(NT, m, n)
qo[:,Int(round(0.6*m)),Int(round(0.9*n))] .= -0.005 * (1/h^2)/10.0 * SRC_CONST
sw0 = zeros(m, n)
survey_indices = collect(1:dt_survey:NT+1) # 10 stages
n_survey = length(survey_indices)
K = 20.0 .* ones(m,n) # millidarcy
K[Int(round(0.52*m)):Int(round(0.67*m)),:] .= 120.0
tfCtxTrue = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo, sw0, true)
out_sw_true, out_p_true = imseq(tfCtxTrue)
krw, kro = krw_and_kro()
using Random; Random.seed!(233)
obs_ids = rand(1:m*n, Int(round(m*n*sparsity)))
obs = tf.reshape(out_sw_true[1:NT+1], (NT+1,-1))[:, obs_ids]
error()
# executing the simulation
if mode == 0
# generate data
sess = Session(); init(sess)
S,obs_ = run(sess, [out_sw_true, obs])
krw_, kro_ = run(sess, [krw, kro])
matwrite("$FLDR/Data.mat", Dict("S"=>S, "krw"=>krw_, "kro"=>kro_, "obs"=>obs_))
close("all");plot_kr(krw_, kro_); savefig("$FLDR/krwo.png")
close("all");plot_saturation(S);
X, Y = np.meshgrid(collect((0:n-1)*h), collect((0:m-1)*h))
x = X[:][obs_ids] .+h/2; y = Y[:][obs_ids].+h/2
scatter(x, y, marker = "*", s=80, color="magenta")
savefig("$FLDR/sat.png")
# savefig("$FLDR/sat.pdf")
# plot_saturation_series(S)
# savefig("$FLDR/satnn.pdf")
# savefig("$FLDR/satall.pdf")
else
dat = Dict{String, Any}("loss" => Float64[], "errw"=>Float64[], "erro"=>Float64[])
summary = (vs, i, loss_)->begin
global dat
S, krw_, kro_, θ1, θ2 = vs
errw = norm(krw_-wref)
erro = norm(kro_-oref)
# dat["S$i"] = S
dat["w$i"] = krw_
dat["o$i"] = kro_
dat["loss"] = [dat["loss"]; loss_]
dat["errw"] = [dat["errw"]; errw]
dat["erro"] = [dat["erro"]; erro]
dat["theta1"] = θ1
dat["theta2"] = θ2
if mod(i, 10)==1
close("all");plot_kr(krw_, kro_, wref, oref); savefig("$FLDR/krwo$i.png")
close("all");plot_saturation(S); savefig("$FLDR/sat$i.png")
close("all");
semilogy(dat["errw"], label="\$k_{r1}\$");
semilogy(dat["erro"], label="\$k_{r2}\$"); legend()
xlabel("Iteration"); ylabel("Error"); savefig("$FLDR/err.png")
close("all");semilogy(dat["loss"]); xlabel("Iteration"); ylabel("Loss"); savefig("$FLDR/loss.png")
end
@show i, loss_, errw, erro
matwrite("$FLDR/invData.mat", dat)
end
d = matread("$FLDR/Data.mat")
Sref, wref, oref, obsref = d["S"], d["krw"][:], d["kro"][:], d["obs"]
# loss = sum((out_sw_true - Sref)^2)
using Random; Random.seed!(233)
obs = obs .* (1. .+ noise * randn(length(obs)))
loss = sum((obsref-obs)^2)
sess = Session(); init(sess)
BFGS!(sess, loss, 500;vars = [out_sw_true, krw, kro, θ1, θ2], callback = summary)
end
# dd = matread("$FLDR/invData.mat")
# run(sess, [assign(θ1,dd["theta1"]),assign(θ2,dd["theta2"])] )
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | code | 1746 | using MAT
using PyPlot
using PyCall
mpl = pyimport("tikzplotlib")
# Error plot
errw = []
erro = []
ss = [0.01,0.05,0.1,0.5]
d = matread("datamode0sparsity0.01/invData.mat")
errw = d["errw"][:]/10
erro = d["erro"][:]/10
close("all")
semilogy(errw, label="\$S_1\$")
semilogy(erro, label="\$S_2\$")
xlabel("Iteration")
ylabel("Error")
legend()
mpl.save("errwo.tex")
savefig("errwo.png")
# Estimation plot
function plot_kr(krw, kro, w=missing, o=missing)
close("all")
plot(LinRange(0,1,100), krw, "b-", label="\$K_{rw}\$")
plot(LinRange(0,1,100), kro, "r-", label="\$K_{ro}\$")
if !ismissing(w)
plot(LinRange(0,1,100), w, "g--", label="True \$K_{rw}\$")
plot(LinRange(0,1,100), o, "c--", label="True \$K_{ro}\$")
end
xlabel("\$S_w\$")
ylabel("\$K_r\$")
legend()
end
d = matread("datamode0sparsity0.01/invData.mat")
dd = matread("datamode0sparsity0.01/Data.mat")
w = d["w491"][:]
o = d["o491"][:]
wref = dd["krw"][:]
oref = dd["kro"][:]
close("all");plot_kr(w, o, wref, oref); savefig("krwo.png")
mpl.save("krwo.tex")
rc("axes", titlesize=20)
rc("axes", labelsize=18)
rc("xtick", labelsize=18)
rc("ytick", labelsize=18)
rc("legend", fontsize=20)
# true model
figure()
m = 15
n = 30
h = 30
z_inj = (Int(round(0.6*m))-1)*h + h/2.0
x_inj = (Int(round(0.1*n))-1)*h + h/2.0
z_prod = (Int(round(0.6*m))-1)*h + h/2.0
x_prod = (Int(round(0.9*n))-1)*h + h/2.0
K = 20.0 .* ones(m,n)
K[8:10,:] .= 120.0
imshow(K, extent=[0,n*h,m*h,0]);
xlabel("Distance (m)")
ylabel("Depth (m)")
cb = colorbar()
clim([20, 120])
cb.set_label("Permeability (md)")
scatter(x_inj, z_inj, c="r", marker=">", s=64)
scatter(x_prod, z_prod, c="r", marker="<", s=64)
savefig("K.pdf", bbox_inches="tight",pad_inches = 0, dpi = 300);
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 3735 | # FwiFlow: Wave and Flow Inversion with Intrusive Automatic Differentiation

| Quick Install | Documentation |
| ------------------------------- | ------------------------------------------------------------ |
| `using Pkg; Pkg.add("FwiFlow")` | [](https://lidongzh.github.io/FwiFlow.jl/dev) |
## Highlights
- GPU-accelerated FWI module with checkpointing schemes;
- AMG-accelerated implicit pressure-implicit saturation scheme;
- Time Fractional Partial Differential Equations
## Philosophy
We treat physical simulations as a chain of multiple differentiable operators, such as discrete Laplacian evaluation, a Poisson solver and a single implicit time stepping for nonlinear PDEs. They are like building blocks that can be assembled to make simulation tools for new physical models.
Those operators are differentiable and integrated in a computational graph so that the gradients can be computed automatically and efficiently via analyzing the dependency in the graph. Independent operators are parallelized executed. With the gradients we can perform gradient-based PDE-constrained optimization for inverse problems.
FwiFlow is built on [ADCME](https://github.com/kailaix/ADCME.jl), a powerful static graph based automatic differentiation library for scientific computing (with TensorFlow backend). FwiFlow implements the idea of **Intrusive Automatic Differentiation**.
<p align="center">
<img src="docs/src/assets/op.png" width="50%">
</p>
## Applications
The following examples are for inversion
| <img src="docs/src/assets/marmousi_inv.png" width="270"> | <img src="docs/src/assets/flow.png" width="270"> | <img src="docs/src/assets/diagram.png" width="270"> |
| ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------------------------------------- |
| [Full-waveform Inversion](https://lidongzh.github.io/FwiFlow.jl/dev/tutorials/fwi/) | [Two Phase Flow](https://lidongzh.github.io/FwiFlow.jl/dev/tutorials/flow/) | FWI-Two Phase Flow Coupled Inversion |
| <img src="docs/src/assets/frac.png" width="270"> | | |
| [Time Fractional PDE](https://lidongzh.github.io/FwiFlow.jl/dev/tutorials/timefrac/) | | |
## Research Papers
1. **Dongzhuo Li** (co-first author), **Kailai Xu** (co-first author), Jerry M. Harris, and Eric Darve. [*Coupled Time‐Lapse Full‐Waveform Inversion for Subsurface Flow Problems Using Intrusive Automatic Differentiation*](https://arxiv.org/abs/1912.07552), *Water Resources Research*, *56*(8), p.e2019WR027032 (https://doi.org/10.1029/2019WR027032).
2. **Kailai Xu** (co-first author), **Dongzhuo Li** (co-first author), Eric Darve, and Jerry M. Harris. [*Learning Hidden Dynamics using Intrusive Automatic Differentiation*](http://arxiv.org/abs/1912.07547).
## Misc
The **TorchFWI** package, which shares the elastic FWI part, can be found [*here*](https://github.com/lidongzh/TorchFWI). It may be helpful if one wants to experiment with PyTorch. <br>
An older version of this repository can be found [*here*](https://github.com/lidongzh/TwoPhaseFlowFWI). It contains all scripts to recreate results in paper 1.
## LICENSE
MIT License
Copyright (c) 2019 Dongzhuo Li and Kailai Xu
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 1196 | * download amgcl and decompress it to `TwoPhaseFlowFWI` directory with name `amgcl` (not `amgcl-master`)
* Create a CmakeLists.txt in `Ops/AMG`
```
cmake_minimum_required(VERSION 3.9)
project(amgcl_eigen)
set (CMAKE_CXX_STANDARD 11)
# --- Find AMGCL ------------------------------------------------------------
include_directories("../../amgcl")
include_directories(/usr/local/include/eigen3)
include_directories(/usr/local/include ${CONDA_INC})
# ---------------------------------------------------------------------------
add_executable(main main.cpp)
# target_link_libraries(amgcl_eigen)
```
* Replace `include_directories(/usr/local/include ${CONDA_INC})` here with include directory of `boost` library
* `cmake` and `make` in the `build` directory
* `./main ../cz308.mtx`, expected output
```
Solver
======
Type: BiCGStab
Unknowns: 308
Memory footprint: 0.00 B
Preconditioner
==============
Number of levels: 1
Operator complexity: 1.00
Grid complexity: 1.00
Memory footprint: 106.38 K
level unknowns nonzeros memory
---------------------------------------------
0 308 3182 106.38 K (100.00%)
1 2.09874e-13
``` | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 1184 | # FwiFlow
<img src="assets/diagram.png" style="zoom:67%;" />
This project consider the coupling of the wave equation and a two-phase incompressible immiscible flow equation, mainly for CO2 injection or water injection monitoring
u_tt = m(x) u_xx + f(x,t)
m_t = grad(a(x)grad(m)) + b(x)*grad(m)
The time scale T_2 for the second equation is much larger than that of the first one T_1
T_2 >> T_1
a(x), b(x) are unknown functions and will be calibrated using observation data d_i(x), which depends on u_i for each observation time i
# Instruction
1. Compile AdvectionDiffusion
```
cd Ops/AdvectionDiffusion/
mkdir build
cd build
cmake ..
make -j
```
2. Test AdvectionDiffusion and Generate Data (required)
```
julia> include("cdtest.jl")
julia> include("gradtest.jl")
```
3. Compile CUFA
```
cd Ops/FWI/CUFD/Src
make -j
```
4. Compile Wrapper
```
cd Ops/FWI/ops/build
cmake ..
make -j
```
5. Generate data
```
julia> include("generate_m.jl")
python main_calc_obs_data.py
python fwitest.py
```
6. Test Wrapper
```
cd src
```
```
julia> include("fwi_gradient_check.jl")
julia> include("coupled_gradient_check")
```
7. Run experiments
```
julia> include("learn_m.jl")
```
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 315 | # API Reference

## Low Level Custom Operators
```@docs
poisson_op
laplacian_op
fwi_obs_op
fwi_op
sat_op
sat_op2
upwlap_op
upwps_op
time_fractional_op
time_fractional_t_op
```
## FWI
```@docs
FWI
```
# Utility Functions
```@autodocs
Modules = [FwiFlow]
Pages = ["utils.jl"]
```
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 3442 | # Getting Started
## Installation
```julia
using Pkg
Pkg.add("FwiFlow")
```
## Intelligent Automatic Differentiation (IAD): Philosophy
We treat physical simulations as a chain of multiple differentiable operators, such as discrete Laplacian evaluation, a Poisson solver and a single implicit time stepping for nonlinear PDEs. They are like building blocks that can be assembled to make simulation tools for new physical models.
Those operators are differentiable and integrated in a computational graph so that the gradients can be computed automatically and efficiently via analyzing the dependency in the graph. Independent operators are executed in parallel. With the gradients we can perform gradient-based PDE-constrained optimization for inverse problems.
FwiFlow is built on [ADCME](https://github.com/kailaix/ADCME.jl), a powerful static graph based automatic differentiation library for scientific computing (with TensorFlow backend). FwiFlow implements the idea of **Intelligent Automatic Differentiation**.

## Tutorials
Here are some examples to start with (`*` denotes advanced examples)
- [FWI](https://lidongzh.github.io/FwiFlow.jl/dev/tutorials/fwi/)
- [Two Phase Flow Inversion](https://lidongzh.github.io/FwiFlow.jl/dev/tutorials/flow/)
- *[Coupled Inversion](https://github.com/lidongzh/FwiFlow.jl/tree/master/docs/codes/src_fwi_coupled)
- *[Coupled Inversion: Channel Flow](https://github.com/lidongzh/FwiFlow.jl/tree/master/docs/codes/src_fwi_channel)
## FwiFlow: Application of IAD to FWI and Two Phase Flow Coupled Inversion
This framework uses waveform data to invert for intrinsic parameters (e.g., permeability and porosity) in subsurface problems, with coupled flow physics, rock physics, and wave physics models.

IAD provides three levels of user control with
- built-in differentiable operators from modern deep-learning infrastructures (TensorFlow), and customized operators that can either
- encapsulate analytic adjoint gradient computation or
- handle the forward simulation and compute the corresponding gradient for a single time step.
This intelligent strategy strikes a good balance between computational efficiency and programming efficiency and would serve as a paradigm for a wide range of PDE-constrained geophysical inverse problems.
### Physical Models
### Flow Physics
The flow physics component maps from intrinsic properties such as permeability to flow properties, such as fluid saturation. We use a model of two-phase flow in porous media as an example. The governing equations are convervation of mass, Darcy's law, and other relationships.
### Rock Physics
The rock physics model describes the relationship between fluid properties and rock elastic properties. As one fluid phase displaces the other, the bulk modulus and density of rocks vary.
### Wave Physics
The elastic wave equation maps from elastic properties to wavefields, such as particle velocity and stress, which can be recorded by receiver arrays as seismic waveform data.
The elastic wave equation maps from elastic properties to wavefields, such as particle velocity and stress, which can be recorded by receiver arrays as seismic waveform data.
### The Adjoint Method & Automatic Differentation
The discrete adjoint method and reverse mode automatic differentiation originates from the same mathematical formula.

| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 28 | # Trouble Shooting
## AMG
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 7413 | # Flow Inversion
This section is an example of solving the flow equation with the Newton-Raphson method. The governing equations are derived from conservation of mass of each phase, and conservation of momentum or Darcy's law for each phase. First, we have
$$\frac{\partial }{{\partial t}}(\phi {S_i}{\rho _i}) + \nabla \cdot ({\rho _i}{\mathbf{v}_i}) = {\rho _i}{q_i}, \quad i = 1,2 \tag{1}$$
The saturation of the two phases satisfies
$$S_{1} + S_{2} = 1\tag{2}$$
and the Darcy's law yields
$${\mathbf{v}_i} = - \frac{{K{k_{ri}(S_i)}}}{{{\tilde{\mu}_i}}}(\nabla {P_i} - g{\rho _i}\nabla Z), \quad i=1,2 \tag{3}$$
Here, $K$ is the permeability tensor, but in our case we assume it is a space varying scalar value. $k_{ri}(S_i)$ is a function of $S_i$, and typically the higher the saturation, the easier the corresponding phase is to flow. $\tilde \mu_i$ is the viscosity [^pcl]. $Z$ is the depth cordinate, $\rho_i$ is the density, $\phi$ is the porosity, $q_i$ is the source, $P_i$ is the fluid pressure and $g$ is the velocity constant.
[^pcl]: [This paper](https://arxiv.org/abs/2002.10521) gives a description of common relative permeability models and proposes a method to calibrate an empirical model from indirect data.
The fluid pressure $P_i$ is related to $S_i$ via the capillary pressure
$$P_2 = P_1 - P_c(S_2)\tag{4}$$
where $P_c$ is a function of the saturation of the wetting phase 2.
In Equations 1-4, the state variables are $S_1$, $S_2$, $\mathbf{v}_i$, $P_1$, and $P_2$. There are 6 equations and 6 variables (taking dimension into consideration) in total, and thus the system is complete.
!!! info
In all the numerical scheme below, we adopt the finite volumn method. Each cell in the following discretized domain is a cell and the state variables $\Psi_2, S_1, S_2$ and the capillary potential $\Psi_c$ are defined per cell.

**Step 1: Parameters Setup**
```julia
const K_CONST = 9.869232667160130e-16 * 86400 * 1e3
const ALPHA = 1.0
mutable struct Ctx
m; n; h; NT; Δt; Z; X; ρw; ρo;
μw; μo; K; g; ϕ; qw; qo; sw0
end
function tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo,sw0,ifTrue)
tf_h = constant(h)
# tf_NT = constant(NT)
tf_Δt = constant(Δt)
tf_Z = constant(Z)
tf_X= constant(X)
tf_ρw = constant(ρw)
tf_ρo = constant(ρo)
tf_μw = constant(μw)
tf_μo = constant(μo)
# tf_K = isa(K,Array) ? Variable(K) : K
if ifTrue
tf_K = constant(K)
else
tf_K = Variable(K)
end
tf_g = constant(g)
# tf_ϕ = Variable(ϕ)
tf_ϕ = constant(ϕ)
tf_qw = constant(qw)
tf_qo = constant(qo)
tf_sw0 = constant(sw0)
return Ctx(m,n,tf_h,NT,tf_Δt,tf_Z,tf_X,tf_ρw,tf_ρo,tf_μw,tf_μo,tf_K,tf_g,tf_ϕ,tf_qw,tf_qo,tf_sw0)
end
function Krw(Sw)
return Sw ^ 1.5
end
function Kro(So)
return So ^1.5
end
function ave_normal(quantity, m, n)
aa = sum(quantity)
return aa/(m*n)
end
```
**Step 2: Implementing the Numerical Scheme**
The major simulation codes consist of using a nonlinear implicit timestep for (1),
$$\phi (S_2^{n + 1} - S_2^n) - \nabla \cdot \left( {{m_{2}}(S_2^{n + 1})K\nabla \Psi _2^n} \right) \Delta t =
\left(q_2^n + q_1^n \frac{m_2(S^{n+1}_2)}{m_1(S^{n+1}_2)}\right)
\Delta t \tag{5}$$
Here $m_i(s) = \frac{k_{ri}(s)}{\tilde \mu_i}$ and $\Psi_i = P_i - \rho_i g Z$.
In [`sat_op`](@ref) we solve the nonlinear equation (5) with a Newton-Raphson scheme.
```julia
# variables : sw, u, v, p
# (time dependent) parameters: qw, qo, ϕ
function onestep(sw, p, m, n, h, Δt, Z, ρw, ρo, μw, μo, K, g, ϕ, qw, qo)
# step 1: update p
# λw = Krw(sw)/μw
# λo = Kro(1-sw)/μo
λw = sw.*sw/μw
λo = (1-sw).*(1-sw)/μo
λ = λw + λo
q = qw + qo + λw/(λo+1e-16).*qo
# q = qw + qo
potential_c = (ρw - ρo)*g .* Z
# Step 1: implicit potential
Θ = upwlap_op(K * K_CONST, λo, potential_c, h, constant(0.0))
load_normal = (Θ+q/ALPHA) - ave_normal(Θ+q/ALPHA, m, n)
# p = poisson_op(λ.*K* K_CONST, load_normal, h, constant(0.0), constant(1))
p = upwps_op(K * K_CONST, λ, load_normal, p, h, constant(0.0), constant(0)) # potential p = pw - ρw*g*h
# step 2: implicit transport
sw = sat_op(sw, p, K * K_CONST, ϕ, qw, qo, μw, μo, sw, Δt, h)
return sw, p
end
function imseq(tf_ctx)
ta_sw, ta_p = TensorArray(NT+1), TensorArray(NT+1)
ta_sw = write(ta_sw, 1, tf_ctx.sw0)
ta_p = write(ta_p, 1, constant(zeros(tf_ctx.m, tf_ctx.n)))
i = constant(1, dtype=Int32)
function condition(i, tas...)
i <= tf_ctx.NT
end
function body(i, tas...)
ta_sw, ta_p = tas
sw, p = onestep(read(ta_sw, i), read(ta_p, i), tf_ctx.m, tf_ctx.n, tf_ctx.h, tf_ctx.Δt, tf_ctx.Z, tf_ctx.ρw, tf_ctx.ρo, tf_ctx.μw, tf_ctx.μo, tf_ctx.K, tf_ctx.g, tf_ctx.ϕ, tf_ctx.qw[i], tf_ctx.qo[i])
ta_sw = write(ta_sw, i+1, sw)
ta_p = write(ta_p, i+1, p)
i+1, ta_sw, ta_p
end
_, ta_sw, ta_p = while_loop(condition, body, [i, ta_sw, ta_p])
out_sw, out_p = stack(ta_sw), stack(ta_p)
end
```
**Step 3: Forward Computation**
We now first generate the synthetic data.
```julia
using FwiFlow
using PyCall
using LinearAlgebra
using DelimitedFiles
np = pyimport("numpy")
const SRC_CONST = 86400.0 #
const GRAV_CONST = 9.8 # gravity constant
# Hyperparameter for flow simulation
m = 15
n = 30
h = 30.0 # meter
NT = 50
dt_survey = 5
Δt = 20.0 # day
z = (1:m)*h|>collect
x = (1:n)*h|>collect
X, Z = np.meshgrid(x, z)
ρw = 501.9
ρo = 1053.0
μw = 0.1
μo = 1.0
K_init = 20.0 .* ones(m,n) # initial guess of permeability
g = GRAV_CONST
ϕ = 0.25 .* ones(m,n)
qw = zeros(NT, m, n)
qw[:,9,3] .= 0.005 * (1/h^2)/10.0 * SRC_CONST
qo = zeros(NT, m, n)
qo[:,9,28] .= -0.005 * (1/h^2)/10.0 * SRC_CONST
sw0 = zeros(m, n)
survey_indices = collect(1:dt_survey:NT+1) # 10 stages
n_survey = length(survey_indices)
K = 20.0 .* ones(m,n) # millidarcy
K[8:10,:] .= 120.0
tfCtxTrue = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K,g,ϕ,qw,qo, sw0, true)
out_sw_true, out_p_true = imseq(tfCtxTrue)
```

**Step 4: Inversion**
We now conduct inversion. The unknown variable is stored in `tfCtxInit.K`.
```julia
tfCtxInit = tfCtxGen(m,n,h,NT,Δt,Z,X,ρw,ρo,μw,μo,K_init,g,ϕ,qw,qo, sw0, false)
out_sw_init, out_p_init = imseq(tfCtxInit)
sess = Session(); init(sess)
O = run(sess, out_sw_init)
vis(O)
# NOTE Compute FWI loss
loss = sum((out_sw_true-out_sw_init)^2)
opt = ScipyOptimizerInterface(loss, options=Dict("maxiter"=> 100, "ftol"=>1e-12, "gtol"=>1e-12),var_to_bounds = Dict(tfCtxInit.K=>(10.0, 130.0)))
__cnt = 0
__loss = 0
out = []
function print_loss(l)
if mod(__cnt,1)==0
println("iter $__cnt, current loss=",l)
end
global __loss = l
global __cnt += 1
end
__iter = 0
function step_callback(rk)
if mod(__iter,1)==0
println("================ ITER $__iter ===============")
end
println("$__loss")
push!(out, __loss)
global __iter += 1
end
sess = Session(); init(sess)
ScipyOptimizerMinimize(sess, opt, loss_callback=print_loss,
step_callback=step_callback, fetches=[loss])
```

We can visualize `K` with
```julia
imshow(run(sess, tfCtxInit.K), extent=[0,n*h,m*h,0]);
xlabel("Distance (m)")
ylabel("Depth (m)")
cb = colorbar()
clim([20, 120])
cb.set_label("Permeability (md)")
```


| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 2511 | # Full Waveform Inversion
The following script shows how to use the high level API
```julia
using FwiFlow
using ADCME
using MAT
using PyPlot
using LinearAlgebra
oz = 0.0
ox = 0.0
dz_orig = 24.0
dx_orig = 24.0
nz_orig = 134
nx_orig = 384
dz = dz_orig/1.0
dx = dx_orig/1.0
nz = Int64(round((dz_orig * nz_orig) / dz));
nx = Int64(round((dx_orig * nx_orig) / dx))
dt = 0.0025
nSteps = 2000
para_fname = "para_file.json"
survey_fname = "survey_file.json"
data_dir_name = "Data"
# source and receiver locations
ind_src_x = collect(4:8:384)
ind_src_z = 2ones(Int64, size(ind_src_x))
ind_rec_x = collect(3:381)
ind_rec_z = 2ones(Int64, size(ind_rec_x))
fwi = FWI(nz, nx, dz, dx, nSteps, dt;
ind_src_x = ind_src_x, ind_src_z = ind_src_z, ind_rec_x = ind_rec_x, ind_rec_z = ind_rec_z)
stf = matread("$(DATADIR)/sourceF_4p5_2_high.mat")["sourceF"][:]
cp = Float64.(reshape(reinterpret(Float32,read("$DATADIR/Model_Cp_true.bin")), (fwi.nz_pad, fwi.nx_pad)))|>Array
cs = zeros(fwi.nz_pad, fwi.nx_pad)
ρ = 2500.0 .* ones(fwi.nz_pad, fwi.nx_pad)
shot_ids = collect(1:length(ind_src_z))
sess = Session()
obs = compute_observation(sess, fwi, cp, cs, ρ, stf, shot_ids, gpu_id=0)
obs_ = obs[10,:,:]
@assert norm(matread("test_data.mat")["obs"]-obs_)≈0.0
@info "Forward test passed!"
close("all")
imshow(obs[10,:,:], vmax=2000, vmin=-2000, extent=[0, fwi.nx*fwi.dx, fwi.dt*(fwi.nSteps-1), 0])
xlabel("Receiver Location (m)")
ylabel("Time (s)")
colorbar()
axis("normal")
set_cmap("gray")
savefig("Utils.png")
cs_init = zeros(fwi.nz, fwi.nx)
ρ_init = 2500.0 .* ones(fwi.nz, fwi.nx)
cp_init_ = Float64.(reshape(reinterpret(Float32,read("$DATADIR/Model_Cp_init_1D.bin")), (fwi.nz_pad, fwi.nx_pad)))|>Array
cp_init = cp_init_[fwi.nPml+1:fwi.nPml+fwi.nz, fwi.nPml+1:fwi.nPml+fwi.nx]
# make variables
cs_inv = Variable(cs_init)
ρ_inv = Variable(ρ_init)
cp_inv = Variable(cp_init)
# allocate GPUs
loss = constant(0.0)
nGpus = length(use_gpu())
shot_id_points = Int64.(trunc.(collect(LinRange(1, length(ind_src_z), nGpus+1))))
loss = constant(0.0)
for i = 1:nGpus
global loss
shot_ids = collect(shot_id_points[i]:shot_id_points[i+1])
loss += compute_misfit(fwi, cp_inv, cs_inv, ρ_inv,
stf , shot_ids; gpu_id = i-1,
is_masked = false, cp_ref = cp_init, cs_ref = cs_init, ρ_ref = ρ_init)
end
sess = Session(); init(sess)
run(sess, assign([cs_inv, ρ_inv, cp_inv], [cs, ρ, cp]))
err = run(sess, loss)
if !(err≈0.0)
error("misfit is wrong!")
end
init(sess)
BFGS!(sess, loss)
```
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 5948 | # Full Waveform Inversion using Low Level APIs
In this example, we perform full-waveform inversion (FWI) with `FwiFlow`. For explanation of FWI, see the [documentation](https://wayneweiqiang.github.io/ADSeismic.jl/dev/forward_simulation/) here.
First we load all necessary packages
```julia
using FwiFlow
using PyCall
using LinearAlgebra
using DelimitedFiles
using MAT
np = pyimport("numpy")
```
We specify the parameters for the computational domain and numerical methods. The computational domain is shown below

```julia
oz = 0.0
ox = 0.0
dz_orig = 24.0
dx_orig = 24.0
nz_orig = 134
nx_orig = 384
dz = dz_orig/1.0
dx = dx_orig/1.0
nz = Int64(round((dz_orig * nz_orig) / dz));
nx = Int64(round((dx_orig * nx_orig) / dx))
dt = 0.0025
nSteps = 2000
para_fname = "para_file.json"
survey_fname = "survey_file.json"
data_dir_name = "Data"
```
We set the PML width, padding width (optional; the padding is for performance, see [`padding`](@ref)) and the mask, which removes the effect of sources and makes inversion more stable.
```julia
nPml = 32
nPad = 32 - mod((nz+2*nPml), 32)
nz_pad = nz + 2*nPml + nPad
nx_pad = nx + 2*nPml
Mask = zeros(nz_pad, nx_pad)
Mask[nPml+1:nPml+nz, nPml+1:nPml+nx] .= 1.0
Mask[nPml+1:nPml+10,:] .= 0.0
mask = constant(Mask)
mask_neg = constant(1.0 .- Mask)
```
The source and receiver indices are given by
```julia
ind_src_x = collect(4:8:384)
ind_src_z = 2ones(Int64, size(ind_src_x))
ind_rec_x = collect(3:381)
ind_rec_z = 2ones(Int64, size(ind_rec_x))
```
Next, we load source time functions (`stf_load`) from the file.
```julia
stf_load = matread("$(DATADIR)/sourceF_4p5_2_high.mat")["sourceF"]
stf_array = repeat(stf_load, length(ind_src_z), 1)
```
Each source time function has the following profile

We load the true P-wave, S-wave and densities to generate synthetic observation data
```julia
cp_true_pad = reshape(reinterpret(Float32,read("$DATADIR/Model_Cp_true.bin")) , (nz_pad, nx_pad))
cs_true_pad = zeros(nz_pad, nx_pad)
den_true_pad = 2500.0 .* ones(nz_pad, nx_pad)
tf_cp_pad = Variable(cp_true_pad, dtype=Float64) # original scale as
tf_cs_pad = constant(cs_true_pad, dtype=Float64)
tf_den_pad = constant(den_true_pad, dtype=Float64)
function vel2moduli(cp,cs,den)
lambda = (cp^2 - 2.0 * cs^2) .* den / 1e6
mu = cs^2 .* den / 1e6
return lambda, mu
end
tf_lambda_inv_pad, tf_mu_inv_pad = vel2moduli(tf_cp_pad, tf_cs_pad, tf_den_pad)
```
We use [`paraGen`](@ref) and [`surveyGen`](@ref) to generate parameter files
```julia
f0 = 4.5
paraGen(nz_pad, nx_pad, dz, dx, nSteps, dt, f0, nPml, nPad, para_fname, survey_fname, data_dir_name)
surveyGen(ind_src_z, ind_src_x, ind_rec_z, ind_rec_x, survey_fname)
```
At this point, we should be able to see two files in the current directory

Finally we execute the forward wave equation and save the observation data to files. In the following script, we explicitly specify the ID of GPU where the operator is executed.
```julia
tf_shot_ids = collect(0:length(ind_src_z)-1)
dummy = fwi_obs_op(tf_lambda_inv_pad, tf_mu_inv_pad, tf_den_pad, stf_array, 0, tf_shot_ids, para_fname) # use GPU:0
sess = Session(); init(sess);
run(sess, dummy)
```
In the `Data` folder, there will be 47 `Shot*.bin` files. We can visualize the result with the following script
```julia
A=read("Data/Shot10.bin");imshow(reshape(reinterpret(Float32,A),(nSteps ,length(ind_rec_z))), aspect="auto", vmax=2000, vmin=-2000, extent=[0, nx*dx, dt*(nSteps-1), 0])
xlabel("Receiver Location (m)")
ylabel("Time (s)")
colorbar()
set_cmap("gray")
```

We now consider the inversion problem: assume that we do not known the P-wave velocity. We mark it as independent variable to be update using `Variable`. Additionally, for better coalesced memory access on GPU, we pad the variables to multiples of 32 in the $z$ direction.
```julia
cs_init_pad = zeros(nz_pad, nx_pad)
den_init_pad = 2500.0 .* ones(nz_pad, nx_pad)
cp_init_pad = reshape(reinterpret(Float32,read("$DATADIR/Model_Cp_init_1D.bin")), (nz_pad, nx_pad))
tf_cp_inv = Variable(cp_init_pad[nPml+1:nPml+nz, nPml+1:nPml+nx], dtype=Float64) # original scale as
tf_cs_inv = constant(cs_init_pad[nPml+1:nPml+nz, nPml+1:nPml+nx], dtype=Float64)
tf_den_inv = constant(den_init_pad[nPml+1:nPml+nz, nPml+1:nPml+nx], dtype=Float64)
tf_cp_ref_pad = constant(cp_init_pad, dtype=Float64) # original scale as
tf_cs_ref_pad = constant(cs_init_pad, dtype=Float64)
tf_den_ref_pad = constant(den_init_pad, dtype=Float64)
tf_cp_inv_pad, tf_cs_inv_pad, tf_den_inv_pad = padding(tf_cp_inv, tf_cs_inv,
tf_den_inv, nz_orig, nx_orig, nz, nx, nPml, nPad)
```
Likewise, to remove the effect of extreme values close to the sources, we use masks
```julia
tf_cp_msk_pad = tf_cp_inv_pad .* mask + tf_cp_ref_pad .* mask_neg
tf_cs_msk_pad = tf_cs_inv_pad .* mask + tf_cs_ref_pad .* mask_neg
tf_den_msk_pad = tf_den_inv_pad .* mask + tf_den_ref_pad .* mask_neg
tf_lambda_inv_pad, tf_mu_inv_pad = vel2moduli(tf_cp_msk_pad, tf_cs_msk_pad,tf_den_msk_pad)
```
The inversion can be done in parallel in multi-GPU machines. This is done by specifying the GPU IDs for different `fwi_op`.
```julia
loss = constant(0.0)
nGpus = length(use_gpu())
shot_id_points = Int32.(trunc.(collect(LinRange(0, length(ind_src_z)-1, nGpus+1))))
loss = constant(0.0)
for i = 1:nGpus
global loss
tf_shot_ids = collect(shot_id_points[i]:shot_id_points[i+1])
loss += fwi_op(tf_lambda_inv_pad, tf_mu_inv_pad, tf_den_inv_pad,
stf_array, i-1, tf_shot_ids, para_fname)
end
```
Finally, we trigger BFGS optimizer with two lines of codes.
```julia
sess = Session(); init(sess)
BFGS!(sess, loss)
```
Here is a snapshot of multi-GPU execution. Note we have only used approximately 2% of total memory, which means we can actually places about 50 times the current number of sources on each GPU!
 | FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.3.1 | 32fa60d65971f3e409a959dedccb0b5c4e29f76e | docs | 2510 | # Time Fractional Differential Equation
The fractional calculus has motivated development and application of novel algorithms and models for describing anomoulous diffusion. In this section, we discuss how to carry out numerical simulation of time fractional differential equations and the corresponding inversion. The major operator in FwiFlow is [`time_fractional_op`](@ref) and [`time_fractional_t_op`](@ref).
We consider the function
```math
u(t) = t^\beta
```
Then the corresponding time derivative is
```math
D^\alpha_{t_0}u(t) = \frac{\Gamma(\beta+1)}{\Gamma(\beta-\alpha+1)} t^{\beta-\alpha}
```
```julia
using FwiFlow
Γ = x->exp(tf.math.lgamma(convert_to_tensor(x)))
function u(t, α)
t^β
end
function du(t, α)
Γ(β+1)/Γ(β-α+1)* t^(β-α)
end
β = 8.0
α = 0.8
NT = 100
f = (t, u, θ) -> du(t, α)
uout = time_fractional_op(α, f, 1.0, 0.0, NT)
sess = Session(); init(sess)
uval = run(sess, uout)
```
We can compare the exact solution and the numerical solution and they are nearly the same.

We can also measure the convergence order in terms of mean square error (MSE)
| $\alpha=0.2$ | Rate | $\alpha=0.5$ | Rate | $\alpha=0.8$ | Rate |
| ------------ | ---- | ------------ | ---- | ------------ | ---- |
| 7.1536E-04 | | 3.4743E-03 | | 1.1166E-02 | |
| 2.2060E-04 | 1.70 | 1.2516E-03 | 1.47 | 4.7746E-03 | 1.23 |
| 6.7502E-05 | 1.71 | 4.5055E-04 | 1.47 | 2.0644E-03 | 1.21 |
| 2.0484E-05 | 1.72 | 1.6170E-04 | 1.48 | 8.9669E-04 | 1.20 |
| 6.1669E-06 | 1.73 | 5.7842E-05 | 1.48 | 3.9018E-04 | 1.20 |
Now we consider **inverse modeling**. In this problem, $\alpha$ is not know and we can only observe part of the solution. For example, we only know $u(2.0)$, initial conditions and the source function and we want to estimate $\alpha$. We can mark $\alpha$ as an independent variable for optimization by using `Variable` key word.
```julia
using FwiFlow
Γ = x->exp(tf.math.lgamma(convert_to_tensor(x)))
function u(t, α)
t^β
end
function du(t, α)
Γ(β+1)/Γ(β-α+1)* t^(β-α)
end
β = 8.0
α = Variable(0.5)
NT = 200
f = (t, u, θ) -> du(t, 0.8)
uout = time_fractional_op(α, f, 1.0, 0.0, NT)
loss = (uout[NT+1] - 1.0)^2
sess = Session(); init(sess)
opt = ScipyOptimizerInterface(loss, method="L-BFGS-B",options=Dict("maxiter"=> 100, "ftol"=>1e-12, "gtol"=>1e-12), var_to_bounds=Dict(α=>(0.0,1.0)))
ScipyOptimizerMinimize(sess, opt)
@show run(sess, α)
```
We have the estimation
```math
\alpha = 0.8036829691152347
```
| FwiFlow | https://github.com/lidongzh/FwiFlow.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | code | 603 | using ServiceSolicitation
using Documenter
makedocs(;
modules=[ServiceSolicitation],
authors="Chris Elrod <elrodc@gmail.com> and contributors",
repo="https://github.com/"chriselrod"/ServiceSolicitation.jl/blob/{commit}{path}#L{line}",
sitename="ServiceSolicitation.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://"chriselrod".github.io/ServiceSolicitation.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/"chriselrod"/ServiceSolicitation.jl",
)
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | code | 651 | module ServiceSolicitation
using ThreadingUtilities, VectorizationBase
using VectorizationBase: num_threads, cache_linesize
using StrideArraysCore: object_and_preserve
using Requires
export batch, num_threads
include("request.jl")
include("batch.jl")
include("unsignediterator.jl")
# reset_workers!() = WORKERS[] = UInt128((1 << (num_threads() - 1)) - 1)
reset_workers!() = WORKERS[] = UInt128((1 << (Threads.nthreads() - 1)) - 1)
function __init__()
reset_workers!()
resize!(STATES, num_threads() * cache_linesize())
STATES .= 0x00
@require ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" include("forwarddiff.jl")
end
end
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | code | 7631 | struct BatchClosure{F, A, B}
f::F
end
function (b::BatchClosure{F,A,B})(p::Ptr{UInt}) where {F,A,B}
(offset, args) = ThreadingUtilities.load(p, A, 1)
(offset, start) = ThreadingUtilities.load(p, UInt, offset)
(offset, stop ) = ThreadingUtilities.load(p, UInt, offset)
b.f(args, start+one(UInt), stop)
B && free_local_threads!()
nothing
end
@inline function batch_closure(f::F, args::A, ::Val{B}) where {F,A,B}
bc = BatchClosure{F,A,B}(f)
@cfunction($bc, Cvoid, (Ptr{UInt},))
end
@inline function setup_batch!(p::Ptr{UInt}, fptr::Ptr{Cvoid}, argtup, start::UInt, stop::UInt)
offset = ThreadingUtilities.store!(p, fptr, 0)
offset = ThreadingUtilities.store!(p, argtup, offset)
offset = ThreadingUtilities.store!(p, start, offset)
offset = ThreadingUtilities.store!(p, stop, offset)
nothing
end
@inline function launch_batched_thread!(cfunc, tid, argtup, start, stop)
p = ThreadingUtilities.taskpointer(tid)
fptr = Base.unsafe_convert(Ptr{Cvoid}, cfunc)
while true
if ThreadingUtilities._atomic_cas_cmp!(p, ThreadingUtilities.SPIN, ThreadingUtilities.STUP)
setup_batch!(p, fptr, argtup, start, stop)
@assert ThreadingUtilities._atomic_cas_cmp!(p, ThreadingUtilities.STUP, ThreadingUtilities.TASK)
return
elseif ThreadingUtilities._atomic_cas_cmp!(p, ThreadingUtilities.WAIT, ThreadingUtilities.STUP)
setup_batch!(p, fptr, argtup, start, stop)
@assert ThreadingUtilities._atomic_cas_cmp!(p, ThreadingUtilities.STUP, ThreadingUtilities.LOCK)
ThreadingUtilities.wake_thread!(tid % UInt)
return
end
ThreadingUtilities.pause()
end
end
function add_var!(q, argtup, gcpres, ::Type{T}, argtupname, gcpresname, k) where {T}
parg_k = Symbol(argtupname, :_, k)
garg_k = Symbol(gcpresname, :_, k)
if T <: Tuple
push!(q.args, Expr(:(=), parg_k, Expr(:ref, argtupname, k)))
t = Expr(:tuple)
for (j,p) ∈ enumerate(T.parameters)
add_var!(q, t, gcpres, p, parg_k, garg_k, j)
end
push!(argtup.args, t)
else
push!(q.args, Expr(:(=), Expr(:tuple, parg_k, garg_k), Expr(:call, :object_and_preserve, Expr(:ref, argtupname, k))))
push!(argtup.args, parg_k)
push!(gcpres.args, garg_k)
end
end
@generated function _batch_no_reserve(
f!::F, threadmask, nthread, torelease, Nr, Nd, ulen, args::Vararg{Any,K}
) where {F,K}
q = quote
threads = UnsignedIteratorEarlyStop(threadmask, nthread)
Ndp = Nd + one(Nd)
end
block = quote
start = zero(UInt)
i = 0x00000000
tid = 0x00000000
tm = mask(threads)
while true
VectorizationBase.assume(tm ≠ zero(tm))
tz = trailing_zeros(tm) % UInt32
stop = start + ifelse(i < Nr, Ndp, Nd)
i += 0x00000001
tz += 0x00000001
tid += tz
tm >>>= tz
launch_batched_thread!(cfunc, tid, argtup, start, stop)
start = stop
i == nthread && break
end
f!(argtup, start, ulen)
end
gcpr = Expr(:gc_preserve, block, :cfunc)
argt = Expr(:tuple)
for k ∈ 1:K
add_var!(q, argt, gcpr, args[k], :args, :gcp, k)
end
push!(q.args, :(argtup = $argt), :(cfunc = batch_closure(f!, argtup, Val{false}())), gcpr)
final = quote
tm = mask(threads)
tid = 0x00000000
while true
VectorizationBase.assume(tm ≠ zero(tm))
tz = trailing_zeros(tm) % UInt32
tz += 0x00000001
tm >>>= tz
tid += tz
ThreadingUtilities.__wait(tid)
iszero(tm) && break
end
free_threads!(torelease)
nothing
end
push!(q.args, final)
q
end
@generated function _batch_reserve(
f!::F, threadmask, nthread, unused_threads, torelease, Nr, Nd, ulen, args::Vararg{Any,K}
) where {F,K}
q = quote
nbatch = nthread + one(nthread)
threads = UnsignedIteratorEarlyStop(threadmask, nthread)
Ndp = Nd + one(Nd)
nres_per = Base.udiv_int(unused_threads, nbatch)
nres_rem = unused_threads - nres_per * nbatch
nres_prr = nres_prr + one(nres_prr)
end
block = quote
start = zero(UInt)
i = zero(nres_rem)
tid = 0x00000000
tm = mask(threads)
wait_mask = zero(worker_type())
while true
VectorizationBase.assume(tm ≠ zero(tm))
tz = trailing_zeros(tm) % UInt32
reserve = ifelse(i < nres_rem, nres_prr, nres_per)
tz += 0x00000001
stop = start + ifelse(i < Nr, Ndp, Nd)
tid += tz
tid_to_launch = tid
wait_mask |= (one(wait_mask) << (tid - one(tid)))
tm >>>= tz
reserved_threads = zero(worker_type())
for _ ∈ 1:reserve
VectorizationBase.assume(tm ≠ zero(tm))
tz = trailing_zeros(tm) % UInt32
tz += 0x00000001
tid += tz
tm >>>= tz
reserved_threads |= (one(reserve) << (tid - one(tid)))
end
reserve_threads!(tid_to_launch, reserved_threads)
launch_batched_thread!(cfunc, tid_to_launch, argtup, start, stop)
i += one(i)
start = stop
i == nthread && break
end
f!(argtup, start, ulen)
end
gcpr = Expr(:gc_preserve, block, :cfunc)
argt = Expr(:tuple)
for k ∈ 1:K
add_var!(q, argt, gcpr, args[k], :args, :gcp, k)
end
push!(q.args, :(argtup = $argt), :(cfunc = batch_closure(f!, argtup, Val{true}())), gcpr)
final = quote
tid = 0x00000000
while true
VectorizationBase.assume(wait_mask ≠ zero(wait_mask))
tz = (trailing_zeros(wait_mask) % UInt32) + 0x00000001
wait_mask >>>= tz
tid += tz
ThreadingUtilities.__wait(tid)
iszero(wait_mask) && break
end
nothing
end
push!(q.args, final)
q
end
function batch(
f!::F, (len, nbatches)::Tuple{Vararg{Integer,2}}, args::Vararg{Any,K}
) where {F,K}
myid = Base.Threads.threadid()
threads, torelease = request_threads(myid, nbatches - one(nbatches))
nthread = length(threads)
ulen = len % UInt
if iszero(nthread)
f!(args, one(UInt), ulen)
return
end
nbatch = nthread + one(nthread)
Nd = Base.udiv_int(ulen, nbatch % UInt) # reasonable for `ulen` to be ≥ 2^32
Nr = ulen - Nd * nbatch
_batch_no_reserve(f!, mask(threads), nthread, torelease, Nr, Nd, ulen, args...)
end
function batch(
f!::F, (len, nbatches, reserve_per_worker)::Tuple{Vararg{Integer,3}}, args::Vararg{Any,K}
) where {F,K}
myid = Base.Threads.threadid()
requested_threads = reserve_per_worker*nbatches
threads, torelease = request_threads(myid, requested_threads - one(nbatches))
nthread = length(threads)
ulen = len % UInt
if iszero(nthread)
f!(args, one(UInt), ulen)
return
end
total_threads = nthread + one(nthread)
nbatch = min(total_threads, nbatches % UInt32)
Nd = Base.udiv_int(ulen, nbatch % UInt)
Nr = ulen - Nd * nbatch
unused_threads = total_threads - nbatch
if iszero(unused_threads)
_batch_no_reserve(f!, mask(threads), nthread, torelease, Nr, Nd, ulen, args...)
else
_batch_no_reserve(f!, mask(threads), nthread, unused_threads, torelease, Nr, Nd, ulen, args...)
end
nothing
end
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | code | 1576 |
import .ForwardDiff
const DiffResult = ForwardDiff.DiffResults.DiffResult
function cld_fast(n, d)
x = Base.udiv_int(n, d)
x += n != d*x
end
store_val!(r::Base.RefValue{T}, x::T) where {T} = (r[] = x)
store_val!(r::Ptr{T}, x::T) where {T} = Base.unsafe_store!(r, x)
function evaluate_chunks!(f::F, (r,Δx,x), start, stop, ::ForwardDiff.Chunk{C}) where {F,C}
cfg = ForwardDiff.GradientConfig(f, x, ForwardDiff.Chunk{C}(), nothing)
N = length(x)
last_stop = cld_fast(N, C)
is_last = last_stop == stop
stop -= is_last
xdual = cfg.duals
seeds = cfg.seeds
ForwardDiff.seed!(xdual, x)
for c ∈ start:stop
i = (c-1) * C + 1
ForwardDiff.seed!(xdual, x, i, seeds)
ydual = f(xdual)
ForwardDiff.extract_gradient_chunk!(Nothing, Δx, ydual, i, C)
ForwardDiff.seed!(xdual, x, i)
end
if is_last
lastchunksize = C + N - last_stop*C
lastchunkindex = N - lastchunksize + 1
ForwardDiff.seed!(xdual, x, lastchunkindex, seeds, lastchunksize)
_ydual = f(xdual)
ForwardDiff.extract_gradient_chunk!(Nothing, Δx, _ydual, lastchunkindex, lastchunksize)
store_val!(r, ForwardDiff.value(_ydual))
end
end
function threaded_gradient!(f::F, Δx::AbstractVector, x::AbstractVector, ::ForwardDiff.Chunk{C}) where {F,C}
N = length(x)
d = cld_fast(N, C)
r = Ref{eltype(Δx)}()
batch((d,min(d,VectorizationBase.num_threads())), r, Δx, x) do rΔxx,start,stop
evaluate_chunks!(f, rΔxx, start, stop, ForwardDiff.Chunk{C}())
end
r[]
end
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | code | 3131 | worker_size() = VectorizationBase.nextpow2(num_threads())
worker_type() = VectorizationBase.mask_type(worker_size())
worker_pointer_type() = Ptr{worker_type()}
const WORKERS = Ref(zero(UInt128)) # 0 = unavailable, 1 = available
const STATES = UInt8[]
worker_pointer() = Base.unsafe_convert(worker_pointer_type(), pointer_from_objref(WORKERS))
function reserved(id)
p = Base.unsafe_convert(worker_pointer_type(), STATES)
vload(p, VectorizationBase.lazymul(cache_linesize(), id))
end
function reserve_threads!(id, reserve)
p = Base.unsafe_convert(worker_pointer_type(), STATES)
vstore!(p, VectorizationBase.lazymul(cache_linesize(), id), reserve)
nothing
end
function free_threads!(freed_threads)
ThreadingUtilities._atomic_or!(worker_pointer(), freed_threads)
nothing
end
function free_local_threads!()
tid = Base.Threads.threadid()
tmask = one(worker_type()) << (tid - one(tid))
r = reserved(tid) | tmask
reserve_threads!(id, zero(worker_type()))
free_threads!(r)
end
function _request_threads(id::UInt32, num_requested::UInt32)
reserved_threads = reserved(id)
reserved_count = count_ones(reserved_threads)
no_threads = zero(worker_type())
# reserved_count ≥ num_requested && return reserved_threads, no_threads
reserved_count ≥ num_requested && return UnsignedIteratorEarlyStop(reserved_threads, num_requested), no_threads
# to get more, we xchng, setting all to `0`
# then see which we need, and free those we aren't using.
wp = worker_pointer()
_all_threads = all_threads = ThreadingUtilities._atomic_xchg!(wp, no_threads)
additional_threads = count_ones(all_threads)
# num_requested === StaticInt{-1}() && return reserved_threads, all_threads
num_requested === StaticInt{-1}() && return UnsignedIteratorEarlyStop(reserved_threads | all_threads), all_threads
excess = additional_threads + reserved_count - num_requested
# signed(excess) ≤ 0 && return reserved_threads, all_threads
signed(excess) ≤ 0 && return UnsignedIteratorEarlyStop(reserved_threads | all_threads), all_threads
# we need to return the `excess` to the pool.
lz = leading_zeros(all_threads) % UInt32
# i = 8
while true
# start by trying to trim off excess from lz
lz += excess%UInt32
m = (one(worker_type()) << (UInt32(worker_size()) - lz)) - one(worker_type())
masked = (all_threads & m) ⊻ all_threads
excess -= count_ones(masked)
all_threads &= (~masked)
# @show bitstring(masked), count_ones(masked), bitstring(unused_threads), excess, lz, bitstring(all_threads)
excess == 0 && break
# i -= 1
# @assert i > 0
end
ThreadingUtilities._atomic_store!(wp, _all_threads & (~all_threads))
return UnsignedIteratorEarlyStop(reserved_threads | all_threads, num_requested), all_threads
end
function request_threads(id, num_requested)
_request_threads(id % UInt32, num_requested % UInt32)
end
reserved_threads(id) = UnsignedIteratorEarlyStop(reserved(id))
reserved_threads(id, count) = UnsignedIteratorEarlyStop(reserved(id), count % UInt32)
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | code | 3358 |
struct UnsignedIterator{U}
u::U
end
Base.IteratorSize(::Type{<:UnsignedIterator}) = Base.HasShape{1}()
Base.IteratorEltype(::Type{<:UnsignedIterator}) = Base.HasEltype()
Base.eltype(::UnsignedIterator) = UInt32
Base.length(u::UnsignedIterator) = count_ones(u.u)
Base.size(u::UnsignedIterator) = (count_ones(u.u),)
# @inline function Base.iterate(u::UnsignedIterator, uu = u.u)
# tz = trailing_zeros(uu) % UInt32
# # tz ≥ 0x00000020 && return nothing
# tz > 0x0000001f && return nothing
# uu ⊻= (0x00000001 << tz)
# tz, uu
# end
@inline function Base.iterate(u::UnsignedIterator, (i,uu) = (0x00000000,u.u))
tz = trailing_zeros(uu) % UInt32
tz == 0x00000020 && return nothing
i += tz
tz += 0x00000001
uu >>>= tz
(i, (i+0x00000001,uu))
end
"""
UnsignedIteratorEarlyStop(thread_mask[, num_threads = count_ones(thread_mask)])
Iterator, returning `(i,t) = Tuple{UInt32,UInt32}`, where `i` iterates from `1,2,...,num_threads`, and `t` gives the threadids to call `ThreadingUtilities.taskpointer` with.
Unfortunately, codegen is suboptimal when used in the ergonomic `for (i,tid) ∈ thread_iterator` fashion. If you want to microoptimize,
You'd get better performance from a pattern like:
```julia
function sumk(u,l = count_ones(u) % UInt32)
uu = ServiceSolicitation.UnsignedIteratorEarlyStop(u,l)
s = zero(UInt32); state = ServiceSolicitation.initial_state(uu)
while true
iter = iterate(uu, state)
iter === nothing && break
(i,t),state = iter
s += t
end
s
end
```
This iterator will iterate at least once; it's important to check and exit early with a single threaded version.
"""
struct UnsignedIteratorEarlyStop{U}
u::U
i::UInt32
end
UnsignedIteratorEarlyStop(u) = UnsignedIteratorEarlyStop(u, count_ones(u) % UInt32)
UnsignedIteratorEarlyStop(u, i) = UnsignedIteratorEarlyStop(u, i % UInt32)
mask(u::UnsignedIteratorEarlyStop) = getfield(u, :u)
Base.IteratorSize(::Type{<:UnsignedIteratorEarlyStop}) = Base.HasShape{1}()
Base.IteratorEltype(::Type{<:UnsignedIteratorEarlyStop}) = Base.HasEltype()
Base.eltype(::UnsignedIteratorEarlyStop) = Tuple{UInt32,UInt32}
Base.length(u::UnsignedIteratorEarlyStop) = getfield(u, :i)
Base.size(u::UnsignedIteratorEarlyStop) = (getfield(u, :i),)
function initial_state(u::UnsignedIteratorEarlyStop)
# LLVM should figure this out if you check?
VectorizationBase.assume(0x00000000 ≠ u.i)
(0x00000000,0x00000000,u.u)
end
@inline function Base.iterate(u::UnsignedIteratorEarlyStop, (i,j,uu) = initial_state(u))
# VectorizationBase.assume(u.i ≤ 0x00000020)
# VectorizationBase.assume(j ≤ count_ones(uu))
# iszero(j) && return nothing
j == u.i && return nothing
VectorizationBase.assume(uu ≠ zero(uu))
j += 0x00000001
tz = trailing_zeros(uu) % UInt32
tz += 0x00000001
i += tz
uu >>>= tz
((j,i), (i,j,uu))
end
function Base.show(io::IO, u::UnsignedIteratorEarlyStop)
l = length(u)
s = Vector{Int32}(undef, l)
if l > 0
s .= last.(u)
end
print("Thread ($l) Iterator: U", s)
end
# @inline function Base.iterate(u::UnsignedIteratorEarlyStop, (i,uu) = (0xffffffff,u.u))
# tz = trailing_zeros(uu) % UInt32
# tz == 0x00000020 && return nothing
# tz += 0x00000001
# i += tz
# uu >>>= tz
# (i, (i,uu))
# end
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | code | 814 | using ServiceSolicitation
# using Aqua
using Test
@testset "ServiceSolicitation.jl" begin
# Aqua.test_all(ServiceSolicitation)
function rangemap!(f::F, allargs, start, stop) where {F}
dest = first(allargs)
args = Base.tail(allargs)
@inbounds @simd for i ∈ start:stop
dest[i] = f(Base.unsafe_getindex.(args, i)...)
end
nothing
end
function tmap!(f::F, args::Vararg{AbstractArray,K}) where {K,F}
dest = first(args)
N = length(dest)
mapfun! = (allargs, start, stop) -> rangemap!(f, allargs, start, stop)
batch(mapfun!, (N, num_threads()), args...)
dest
end
x = rand(1024); y = rand(length(x)); z = similar(x);
foo(x,y) = exp(-0.5abs2(x-y))
@test tmap!(foo, z, x, y) ≈ foo.(x, y)
end
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | docs | 575 | # ServiceSolicitation
[](https://chriselrod.github.io/ServiceSolicitation.jl/stable)
[](https://chriselrod.github.io/ServiceSolicitation.jl/dev)
[](https://github.com/chriselrod/ServiceSolicitation.jl/actions)
[](https://codecov.io/gh/chriselrod/ServiceSolicitation.jl)
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.0 | 1dae89038e5ec31538bb5e9cc24be87e4fbf5e8f | docs | 137 | ```@meta
CurrentModule = ServiceSolicitation
```
# ServiceSolicitation
```@index
```
```@autodocs
Modules = [ServiceSolicitation]
```
| ServiceSolicitation | https://github.com/chriselrod/ServiceSolicitation.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 514 | using ITensorGaussianMPS
using Documenter
DocMeta.setdocmeta!(
ITensorGaussianMPS, :DocTestSetup, :(using ITensorGaussianMPS); recursive=true
)
makedocs(;
modules=[ITensorGaussianMPS],
authors="ITensor developers",
sitename="ITensorGaussianMPS.jl",
format=Documenter.HTML(;
canonical="https://ITensor.github.io/ITensorGaussianMPS.jl",
edit_link="main",
assets=String[],
),
pages=["Home" => "index.md"],
)
deploydocs(; repo="github.com/ITensor/ITensorGaussianMPS.jl", devbranch="main")
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 2680 | using ITensorGaussianMPS
using ITensorMPS
using ITensors
using LinearAlgebra
# Electrons
# Half filling
N = 100
Nf_up = N ÷ 2
Nf_dn = N ÷ 2
Nf = Nf_up + Nf_dn
@show N, Nf
# Maximum MPS link dimension
_maxlinkdim = 200
@show _maxlinkdim
# DMRG cutoff
_cutoff = 1e-8
# Hopping
t = 1.0
# Electron-electron on-site interaction
U = 1.0
@show t, U
# Make the free fermion Hamiltonian for the up spins
os_up = OpSum()
for n in 1:(N - 1)
os_up .+= -t, "Cdagup", n, "Cup", n + 1
os_up .+= -t, "Cdagup", n + 1, "Cup", n
end
# Make the free fermion Hamiltonian for the down spins
os_dn = OpSum()
for n in 1:(N - 1)
os_dn .+= -t, "Cdagdn", n, "Cdn", n + 1
os_dn .+= -t, "Cdagdn", n + 1, "Cdn", n
end
# Hopping Hamiltonians for the up and down spins
h_up = hopping_hamiltonian(os_up)
h_dn = hopping_hamiltonian(os_dn)
# Get the Slater determinant
Φ_up = slater_determinant_matrix(h_up, Nf_up)
Φ_dn = slater_determinant_matrix(h_dn, Nf_dn)
# Create an MPS from the slater determinants.
s = siteinds("Electron", N; conserve_qns=true)
println("Making free fermion starting MPS")
@time ψ0 = slater_determinant_to_mps(
s, Φ_up, Φ_dn; eigval_cutoff=1e-4, cutoff=_cutoff, maxdim=_maxlinkdim
)
@show maxlinkdim(ψ0)
# The total non-interacting part of the Hamiltonian
os_noninteracting = OpSum()
for n in 1:(N - 1)
os_noninteracting .+= -t, "Cdagup", n, "Cup", n + 1
os_noninteracting .+= -t, "Cdagdn", n, "Cdn", n + 1
os_noninteracting .+= -t, "Cdagup", n + 1, "Cup", n
os_noninteracting .+= -t, "Cdagdn", n + 1, "Cdn", n
end
H_noninteracting = MPO(os_noninteracting, s)
@show inner(ψ0', H_noninteracting, ψ0)
@show sum(diag(Φ_up' * h_up * Φ_up)) + sum(diag(Φ_dn' * h_dn * Φ_dn))
# The total interacting Hamiltonian
os_interacting = OpSum()
for n in 1:(N - 1)
os_interacting .+= -t, "Cdagup", n, "Cup", n + 1
os_interacting .+= -t, "Cdagdn", n, "Cdn", n + 1
os_interacting .+= -t, "Cdagup", n + 1, "Cup", n
os_interacting .+= -t, "Cdagdn", n + 1, "Cdn", n
end
for n in 1:N
os_interacting .+= U, "Nupdn", n
end
H = MPO(os_interacting, s)
#@show norm(prod(H) - prod(H_noninteracting))
# Random starting state
ψr = random_mps(s, n -> n ≤ Nf ? (isodd(n) ? "↑" : "↓") : "0")
println("Random starting state energy")
@show flux(ψr)
@show inner(ψr', H, ψr)
println()
println("Free fermion starting state energy")
@show flux(ψ0)
@show inner(ψ0', H, ψ0)
println("\nStart from random product state")
er, ψ̃r = @time dmrg(H, ψr; nsweeps=10, maxdim=[10, 20, _maxlinkdim], cutoff=_cutoff)
@show er
@show flux(ψ̃r)
println("\nStart from free fermion state")
e0, ψ̃0 = @time dmrg(H, ψ0; nsweeps=5, maxdim=_maxlinkdim, cutoff=_cutoff)
@show e0
@show flux(ψ̃0)
nothing
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 2353 | using ITensorGaussianMPS
using ITensorMPS
using ITensors
using LinearAlgebra
# Electrons
# Half filling
Nx, Ny = 6, 3
N = Nx * Ny
Nf = N
Nf_up = N ÷ 2
Nf_dn = N - Nf_up
@show Nx, Ny
@show N, Nf
# Maximum MPS link dimension
_maxlinkdim = 1_000
@show _maxlinkdim
# DMRG cutoff
_cutoff = 1e-5
# Hopping
t = 1.0
# Electron-electon on-site interaction
U = 4.0
@show t, U
lattice = square_lattice(Nx, Ny; yperiodic=true)
# Make the free fermion Hamiltonian for the up spins
os_up = OpSum()
for b in lattice
os_up .+= -t, "Cdagup", b.s1, "Cup", b.s2
os_up .+= -t, "Cdagup", b.s2, "Cup", b.s1
end
# Make the free fermion Hamiltonian for the down spins
os_dn = OpSum()
for b in lattice
os_dn .+= -t, "Cdagdn", b.s1, "Cdn", b.s2
os_dn .+= -t, "Cdagdn", b.s2, "Cdn", b.s1
end
# Hopping Hamiltonian with 2*N spinless fermions,
# alternating up and down spins
h_up = hopping_hamiltonian(os_up)
h_dn = hopping_hamiltonian(os_dn)
# Get the Slater determinant
Φ_up = slater_determinant_matrix(h_up, Nf_up)
Φ_dn = slater_determinant_matrix(h_dn, Nf_dn)
println()
println("Exact free fermion energy: ", tr(Φ_up'h_up * Φ_up) + tr(Φ_dn'h_dn * Φ_dn))
println()
# Create an MPS from the slater determinant.
# For now it only works without Sz conservation, this will be supported soon.
s = siteinds("Electron", N; conserve_qns=true)
println("Making free fermion starting MPS")
@time ψ0 = slater_determinant_to_mps(
s, Φ_up, Φ_dn; eigval_cutoff=1e-4, cutoff=_cutoff, maxdim=_maxlinkdim
)
@show maxlinkdim(ψ0)
os = OpSum()
for b in lattice
os .+= -t, "Cdagup", b.s1, "Cup", b.s2
os .+= -t, "Cdagdn", b.s1, "Cdn", b.s2
os .+= -t, "Cdagup", b.s2, "Cup", b.s1
os .+= -t, "Cdagdn", b.s2, "Cdn", b.s1
end
for n in 1:N
os .+= U, "Nupdn", n
end
H = MPO(os, s)
# Random starting state
ψr = random_mps(s, n -> n ≤ Nf ? (isodd(n) ? "↑" : "↓") : "0")
println("\nRandom starting state energy")
@show flux(ψr)
@show inner(ψr', H, ψr)
println("\nFree fermion MPS starting state energy")
@show flux(ψ0)
@show inner(ψ0', H, ψ0)
println("\nStart from random product state")
dmrg_kwargs = (;
nsweeps=10,
maxdim=[10, 20, 100, 200, _maxlinkdim],
cutoff=_cutoff,
noise=[1e-7, 1e-8, 1e-10, 0.0],
)
@time dmrg(H, ψr; dmrg_kwargs...)
println("\nStart from free fermion state")
@time dmrg(H, ψ0; nsweeps=10, maxdim=_maxlinkdim, cutoff=_cutoff)
nothing
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 1850 | using ITensorGaussianMPS
using ITensorMPS
using ITensors
using LinearAlgebra
# Half filling
N = 20
Nf_up = N ÷ 2
Nf_dn = N ÷ 2
Nf = Nf_up + Nf_dn
@show N, Nf
# Maximum MPS link dimension
_maxlinkdim = 50
@show _maxlinkdim
# DMRG cutoff
_cutoff = 1e-8
# Hopping
t = 1.0
# Electron-electron on-site interaction
U = 1.0
@show t, U
# Make the free fermion Hamiltonian for the up spins
os_up = OpSum()
for n in 1:(N - 1)
os_up .+= -t, "Cdagup", n, "Cup", n + 1
os_up .+= -t, "Cdagup", n + 1, "Cup", n
end
# Make the free fermion Hamiltonian for the down spins
os_dn = OpSum()
for n in 1:(N - 1)
os_dn .+= -t, "Cdagdn", n, "Cdn", n + 1
os_dn .+= -t, "Cdagdn", n + 1, "Cdn", n
end
# Hopping Hamiltonians for the up and down spins
h_up = hopping_hamiltonian(os_up)
h_dn = hopping_hamiltonian(os_dn)
# Get the Slater determinant
Φ_up = slater_determinant_matrix(h_up, Nf_up)
Φ_dn = slater_determinant_matrix(h_dn, Nf_dn)
# Create an MPS from the slater determinants.
s = siteinds("Electron", N; conserve_qns=true)
println("Making free fermion starting MPS")
@time ψ0 = slater_determinant_to_mps(
s, Φ_up, Φ_dn; eigval_cutoff=1e-4, cutoff=_cutoff, maxdim=_maxlinkdim
)
@show maxlinkdim(ψ0)
# The total interacting Hamiltonian
os = os_up + os_dn
for n in 1:N
os .+= U, "Nupdn", n
end
H = MPO(os, s)
println("Free fermion starting state energy")
@show flux(ψ0)
@show inner(ψ0', H, ψ0)
println("\nStart from free fermion state")
e, ψ = @time dmrg(H, ψ0; nsweeps=5, maxdim=_maxlinkdim, cutoff=_cutoff)
@show e
@show flux(ψ)
using ITensorGaussianMPS: correlation_matrix_to_gmps, correlation_matrix_to_mps, entropy
Λ_up = correlation_matrix(ψ, "Cdagup", "Cup")
Λ_dn = correlation_matrix(ψ, "Cdagdn", "Cdn")
ψ̃0 = correlation_matrix_to_mps(s, Λ_up, Λ_dn; eigval_cutoff=1e-2, maxblocksize=4)
@show inner(ψ̃0, ψ)
@show inner(ψ̃0', H, ψ̃0)
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 1441 | using ITensorGaussianMPS
using ITensorMPS
using ITensors
using LinearAlgebra
# Half filling
N = 50
Nf = N ÷ 2
@show N, Nf
# Maximum MPS link dimension
_maxlinkdim = 100
@show _maxlinkdim
# DMRG cutoff
_cutoff = 1e-12
# Hopping
t = 1.0
# Electron-electron on-site interaction
U = 1.0
@show t, U
# Free fermion Hamiltonian
os = OpSum()
for n in 1:(N - 1)
os .+= -t, "Cdag", n, "C", n + 1
os .+= -t, "Cdag", n + 1, "C", n
end
# Hopping Hamiltonian with N spinless fermions
h = hopping_hamiltonian(os)
# Get the Slater determinant
Φ = slater_determinant_matrix(h, Nf)
# Create an mps for the free fermion ground state
s = siteinds("Fermion", N; conserve_qns=true)
println("Making free fermion starting MPS")
@time ψ0 = slater_determinant_to_mps(
s, Φ; eigval_cutoff=1e-4, cutoff=_cutoff, maxdim=_maxlinkdim
)
@show maxlinkdim(ψ0)
# Make an interacting Hamiltonian
for n in 1:(N - 1)
os .+= U, "N", n, "N", n + 1
end
H = MPO(os, s)
# Random starting state
ψr = random_mps(s, n -> n ≤ Nf ? "1" : "0")
println("\nRandom state starting energy")
@show flux(ψr)
@show inner(ψr', H, ψr)
println("\nFree fermion starting energy")
@show flux(ψ0)
@show inner(ψ0', H, ψ0)
println("\nRun dmrg with random starting state")
@time dmrg(H, ψr; nsweeps=20, maxdim=[10, 20, 40, _maxlinkdim], cutoff=_cutoff)
println("\nRun dmrg with free fermion starting state")
@time dmrg(H, ψ0; nsweeps=4, maxdim=_maxlinkdim, cutoff=_cutoff)
nothing
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 2364 | # This script shows a minimal example of the GMPS-MPS conversion
# of the ground state of quadratic fermionic Hamiltonian with pairing terms.
using ITensorGaussianMPS
using ITensorMPS
using ITensors
using LinearAlgebra
ITensors.disable_contraction_sequence_optimization()
let
N = 8
sites = siteinds("Fermion", N; conserve_qns=false, conserve_nfparity=true)
_maxlinkdim = 100
# DMRG cutoff
_cutoff = 1e-13
# Hopping
t = -1.0
# Electron-electron on-site interaction
U = 0.0
# Pairing
Delta = 1.00
@show t, U, Delta
# Free fermion Hamiltonian
os_h = OpSum()
for n in 1:(N - 1)
os_h .+= -t, "Cdag", n, "C", n + 1
os_h .+= -t, "Cdag", n + 1, "C", n
end
os_p = OpSum()
for n in 1:(N - 1)
os_p .+= Delta / 2.0, "Cdag", n, "Cdag", n + 1
os_p .+= -Delta / 2.0, "Cdag", n + 1, "Cdag", n
os_p .+= -Delta / 2.0, "C", n, "C", n + 1
os_p .+= Delta / 2.0, "C", n + 1, "C", n
end
os = os_h + os_p
h = quadratic_hamiltonian(os)
hb = ITensorGaussianMPS.reverse_interleave(h)
# Make MPO from free fermion Hamiltonian in blocked format
os_new = OpSum()
for i in 1:N
for j in 1:N
if abs(hb[i, j]) > 1e-8
os_new .+= -t, "Cdag", i, "C", j
os_new .+= t, "C", i, "Cdag", j
os_new .+= Delta / 2.0 * sign(i - j), "C", i, "C", j
os_new .+= -Delta / 2.0 * sign(i - j), "Cdag", i, "Cdag", j
end
end
end
H = ITensors.MPO(os_h + os_p, sites)
#Get Ground state
@assert ishermitian(h)
e = eigvals(Hermitian(h))
@show e
E, V = eigen_gaussian(h)
@show sum(E[1:N])
Φ = V[:, 1:N]
c = real.(conj(Φ) * transpose(Φ))
#Get (G)MPS
psi = ITensorGaussianMPS.correlation_matrix_to_mps(
sites, c; eigval_cutoff=1e-10, maxblocksize=14, cutoff=1e-11
)
@show eltype(psi[1])
cdagc = correlation_matrix(psi, "C", "Cdag")
cc = correlation_matrix(psi, "C", "C")
println("\nFree fermion starting energy")
@show flux(psi)
@show inner(psi', H, psi)
println("\nRun dmrg with GMPS starting state")
_, psidmrg = dmrg(H, psi; nsweeps=12, maxdim=[10, 20, 40, _maxlinkdim], cutoff=_cutoff)
cdagc_dmrg = correlation_matrix(psidmrg, "C", "Cdag")
cc_dmrg = correlation_matrix(psidmrg, "C", "C")
@show norm(cdagc_dmrg - cdagc)
@show norm(cc_dmrg - cc)
@show inner(psidmrg', H, psidmrg)
@show(abs(inner(psidmrg, psi)))
#return
end
nothing
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 486 | module ITensorGaussianMPS
using Compat
using ITensors
using ITensors.NDTensors
using LinearAlgebra
import LinearAlgebra: Givens
export slater_determinant_to_mps,
correlation_matrix_to_mps,
slater_determinant_to_gmps,
correlation_matrix_to_gmps,
hopping_hamiltonian,
hopping_operator,
quadratic_hamiltonian,
quadratic_operator,
slater_determinant_matrix,
slater_determinant_to_gmera,
eigen_gaussian
include("gmps.jl")
include("gmera.jl")
include("linalg.jl")
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 5794 | # brick wall scanning for a single MERA layer with treatment to the tail
function correlation_matrix_to_gmps_brickwall_tailed(
Λ0::AbstractMatrix{ElT},
inds::Vector{Int};
eigval_cutoff::Float64=1e-8,
maxblocksize::Int=size(Λ0, 1),
) where {ElT<:Number}
Λ = Hermitian(Λ0)
N = size(Λ, 1)
V = Circuit{ElT}([])
#ns = Vector{real(ElT)}(undef, 2*N)
err_tot = 0.0
indsnext = Int[]
relinds = Int[]
for i in 1:N
if i % 2 == 0
append!(indsnext, inds[i])
append!(relinds, i)
continue
end
blocksize = 0
n = 0.0
err = 0.0
p = Int[]
uB = 0.0
# find the block whose lowest eigenvalue is within torelence
for blocksize in 1:maxblocksize
j = min(i + blocksize, N)
ΛB = deepcopy(Λ[i:j, i:j]) #@view Λ[i:j, i:j] # \LambdaB is still part of Lambda
nB, uB = eigen(Hermitian(ΛB))
# sort by -(n * log(n) + (1 - n) * log(1 - n)) in ascending order
p = sortperm(nB; by=entropy)
n = nB[p[1]]
err = min(n, 1 - n)
err ≤ eigval_cutoff && break
end
# keep the node if the err cannot be reduced
if i + maxblocksize >= N && err > eigval_cutoff
append!(indsnext, inds[i])
append!(relinds, i)
continue
end
err_tot += err
#ns[i] = n # eigenvalue
v = deepcopy(uB[:, p[1]]) #@view uB[:, p[1]] # eigenvector of the correlation matrix
g, _ = givens_rotations(v) # convert eigenvector into givens rotation
shift!(g, i - 1) # shift rotation location
# In-place version of:
# V = g * V
lmul!(g, V)
#@show g
Λ = Hermitian(g * Λ * g') #isolate current site i
end
return Λ, V, indsnext, relinds
end
# shift givens rotation indexes according to the inds
function shiftByInds!(G::Circuit, inds::Vector{Int})
for (n, g) in enumerate(G.rotations)
G.rotations[n] = Givens(inds[g.i1], inds[g.i2], g.c, g.s)
end
return G
end
"""
correlation_matrix_to_gmera(Λ::AbstractMatrix{ElT}; eigval_cutoff::Float64 = 1e-8, maxblocksize::Int = size(Λ0, 1))
Diagonalize a correlation matrix through MERA layers,
output gates and eigenvalues of the correlation matrix
"""
# Combine gates for each MERA layer
function correlation_matrix_to_gmera(
Λ0::AbstractMatrix{ElT}; eigval_cutoff::Float64=1e-8, maxblocksize::Int=size(Λ0, 1)
) where {ElT<:Number}
Λ = Hermitian(Λ0)
N = size(Λ, 1)
Nnew = N - 1
inds = collect(1:N)
V = Circuit{ElT}([])
Λtemp = deepcopy(Λ)
layer = 0 # layer label of MERA
while N > Nnew # conditioned on the reduction of nodes
N = Nnew
# indsnext: next layer indexes with original matrix labels
# relinds: next layer indexes with labels from the last layer
Λr, C, indsnext, relinds = correlation_matrix_to_gmps_brickwall_tailed(
Λtemp, inds; eigval_cutoff=eigval_cutoff, maxblocksize=maxblocksize
)
shiftByInds!(C, inds) # shift the index back to the original matrix
inds = indsnext
Λtemp = deepcopy(Λr[relinds, relinds]) # project to even site for next layer based on keeping indexes relinds
Nnew = size(Λtemp, 1)
lmul!(C, V) # add vector of givens rotation C into the larger vector V
#V = C * V
layer += 1
#Λ = ITensors.Hermitian(C * Λ * C')
end
# gmps for the final layer
Λr, C = correlation_matrix_to_gmps(
Λtemp; eigval_cutoff=eigval_cutoff, maxblocksize=maxblocksize
)
shiftByInds!(C, inds)
lmul!(C, V)
Λ = V * Λ0 * V'
ns = real.(diag(Λ))
return ns, V
end
# output the MERA gates and eigenvalues of correlation matrix from WF
function slater_determinant_to_gmera(Φ::AbstractMatrix; kwargs...)
return correlation_matrix_to_gmera(conj(Φ) * transpose(Φ); kwargs...)
end
# ouput the MPS based on the MERA gates
function correlation_matrix_to_mera(
s::Vector{<:Index},
Λ::AbstractMatrix;
eigval_cutoff::Float64=1e-8,
maxblocksize::Int=size(Λ, 1),
kwargs...,
)
@assert size(Λ, 1) == size(Λ, 2)
ns, C = correlation_matrix_to_gmera(
Λ; eigval_cutoff=eigval_cutoff, maxblocksize=maxblocksize
)
if all(hastags("Fermion"), s)
U = [ITensor(s, g) for g in reverse(C.rotations)]
ψ = MPS(s, n -> round(Int, ns[n]) + 1, U; kwargs...)
elseif all(hastags("Electron"), s)
isodd(length(s)) && error(
"For Electron type, must have even number of sites of alternating up and down spins.",
)
N = length(s)
if isspinful(s)
error(
"correlation_matrix_to_mps(Λ::AbstractMatrix) currently only supports spinless Fermions or Electrons that do not conserve Sz. Use correlation_matrix_to_mps(Λ_up::AbstractMatrix, Λ_dn::AbstractMatrix) to use spinful Fermions/Electrons.",
)
else
sf = siteinds("Fermion", 2 * N; conserve_qns=true)
end
U = [ITensor(sf, g) for g in reverse(C.rotations)]
ψf = MPS(sf, n -> round(Int, ns[n]) + 1, U; kwargs...)
ψ = MPS(N)
for n in 1:N
i, j = 2 * n - 1, 2 * n
C = combiner(sf[i], sf[j])
c = combinedind(C)
ψ[n] = ψf[i] * ψf[j] * C
ψ[n] *= δ(dag(c), s[n])
end
else
error("All sites must be Fermion or Electron type.")
end
return ψ
end
function slater_determinant_to_mera(s::Vector{<:Index}, Φ::AbstractMatrix; kwargs...)
return correlation_matrix_to_mera(s, conj(Φ) * transpose(Φ); kwargs...)
end
# G the circuit from the gates, N is the total number of sites
function UmatFromGates(G::Circuit, N::Int)
U = Matrix{Float64}(I, N, N)
n = size(G.rotations, 1)
for k in 1:n
rot = G.rotations[k]
U = rot * U
end
return U
end
# compute the energy of the state based on the gates
function EfromGates(H::Matrix{<:Number}, U::Matrix{<:Number})
Htemp = U * H * U'
Etot = 0
N = size(U, 1)
for i in 1:N
if Htemp[i, i] < 0.0
Etot += Htemp[i, i]
end
end
return Etot
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 34443 | import Base: sortperm, size, length, eltype, conj, transpose, copy, *
using ITensors: alias
using ITensorMPS: ITensorMPS
abstract type AbstractSymmetry end
struct ConservesNfParity{T} <: AbstractSymmetry
data::T
end
struct ConservesNf{T} <: AbstractSymmetry
data::T
end
#
# Single particle von Neumann entanglement entropy
#
function entropy(n::Number)
(n ≤ 0 || n ≥ 1) && return 0
return -(n * log(n) + (1 - n) * log(1 - n))
end
entropy(ns::Vector{Float64}) = sum(entropy, ns)
#
# Linear Algebra tools
#
"""
frobenius_distance(M1::AbstractMatrix, M2::AbstractMatrix)
Computes the Frobenius distance `√tr((M1-M2)'*(M1-M2))`.
"""
function frobenius_distance(M1::AbstractMatrix, M2::AbstractMatrix)
return sqrt(abs(tr(M1'M1) + tr(M2'M2) - tr(M1'M2) - tr(M2'M1)))
end
#
# Rotations
#
struct Circuit{T} <: LinearAlgebra.AbstractRotation{T}
rotations::Vector{Givens{T}}
end
Base.adjoint(R::Circuit) = Adjoint(R)
function Base.show(io::IO, ::MIME"text/plain", C::Circuit{T}) where {T}
print(io, "Circuit{$T}:\n")
return show(io, "text/plain", C.rotations)
end
function Base.copy(aR::Adjoint{<:Any,Circuit{T}}) where {T}
return Circuit{T}(reverse!([r' for r in aR.parent.rotations]))
end
function LinearAlgebra.lmul!(G::Givens, R::Circuit)
push!(R.rotations, G)
return R
end
function LinearAlgebra.lmul!(R::Circuit, A::AbstractArray)
@inbounds for i in 1:length(R.rotations)
lmul!(R.rotations[i], A)
end
return A
end
function LinearAlgebra.rmul!(A::AbstractMatrix, adjR::Adjoint{<:Any,<:Circuit})
R = adjR.parent
@inbounds for i in 1:length(R.rotations)
rmul!(A, adjoint(R.rotations[i]))
end
return A
end
Base.:*(g1::Circuit, g2::Circuit) = Circuit(vcat(g2.rotations, g1.rotations))
LinearAlgebra.lmul!(g1::Circuit, g2::Circuit) = append!(g2.rotations, g1.rotations)
Base.:*(A::Circuit, B::Union{<:Hermitian,<:Diagonal}) = A * convert(Matrix, B)
Base.:*(A::Adjoint{<:Any,<:Circuit}, B::Hermitian) = copy(A) * convert(Matrix, B)
Base.:*(A::Adjoint{<:Any,<:Circuit}, B::Diagonal) = copy(A) * convert(Matrix, B)
function Base.:*(A::Adjoint{<:Any,<:AbstractVector}, B::Adjoint{<:Any,<:Circuit})
return convert(Matrix, A) * B
end
function LinearAlgebra.rmul!(A::AbstractMatrix, R::Circuit)
@inbounds for i in reverse(1:length(R.rotations))
rmul!(A, R.rotations[i])
end
return A
end
function Base.:*(A::AbstractMatrix, B::Adjoint{<:Any,<:Circuit})
AB = copy(A)
rmul!(AB, B)
return AB
end
function replace!(f, G::Circuit)
for i in eachindex(G.rotations)
G.rotations[i] = f(G.rotations[i])
end
return G
end
function replace_indices!(f, G::Circuit)
return replace!(g -> Givens(f(g.i1), f(g.i2), g.c, g.s), G)
end
function shift!(G::Circuit, i::Int)
return replace_indices!(j -> j + i, G)
end
function scale!(G::Circuit, i::Int)
return replace_indices!(j -> j * i, G)
end
function conj!(G::Circuit)
return replace!(g -> Givens(g.i1, g.i2, g.c, g.s'), G)
end
ngates(G::Circuit) = length(G.rotations)
#
# Free fermion tools
#
is_creation_operator(o::Op) = is_creation_operator(ITensors.name(o))
is_creation_operator(o::String) = is_creation_operator(OpName(o))
is_creation_operator(::OpName) = false
is_creation_operator(::OpName"Cdag") = true
is_creation_operator(::OpName"Cdagup") = true
is_creation_operator(::OpName"Cdagdn") = true
is_creation_operator(::OpName"c†") = true
is_creation_operator(::OpName"c†↑") = true
is_creation_operator(::OpName"c†↓") = true
is_annihilation_operator(o::Op) = is_annihilation_operator(ITensors.name(o))
is_annihilation_operator(o::String) = is_annihilation_operator(OpName(o))
is_annihilation_operator(::OpName) = false
is_annihilation_operator(::OpName"C") = true
is_annihilation_operator(::OpName"Cup") = true
is_annihilation_operator(::OpName"Cdn") = true
is_annihilation_operator(::OpName"c") = true
is_annihilation_operator(::OpName"c↑") = true
is_annihilation_operator(::OpName"c↓") = true
expand_to_ladder_operators(o::Op) = expand_to_ladder_operators(ITensors.name(o))
expand_to_ladder_operators(o::String) = expand_to_ladder_operators(OpName(o))
expand_to_ladder_operators(opname::OpName) = opname # By default does nothing
expand_to_ladder_operators(::OpName"N") = ["Cdag", "C"]
expand_to_ladder_operators(::OpName"Nup") = ["Cdagup", "Cup"]
expand_to_ladder_operators(::OpName"Ndn") = ["Cdagdn", "Cdn"]
expand_to_ladder_operators(opname::OpName"n↑") = expand_to_ladder_operators(alias(opname))
expand_to_ladder_operators(opname::OpName"n↓") = expand_to_ladder_operators(alias(opname))
#interlaced_hamiltonian(h::AbstractMatrix) = h
#blocked_hamiltonian(h::AbstractMatrix) = Hermitian(reverse_interleave(Matrix(h)))
function quadrant(term)
if is_creation_operator(term[1]) && is_annihilation_operator(term[2])
q = (2, 2)
elseif is_annihilation_operator(term[1]) && is_creation_operator(term[2])
q = (1, 1)
elseif is_annihilation_operator(term[1]) && is_annihilation_operator(term[2])
q = (1, 2)
elseif is_creation_operator(term[1]) && is_creation_operator(term[2])
q = (2, 1)
else
error("Unknown quadratic hopping term: $term")
end
return q
end
function single_to_quadratic(term)
site = ITensors.site(term[1])
new_ops = expand_to_ladder_operators(term[1])
return coefficient(term) * Op(new_ops[1], site) * Op(new_ops[2], site)
end
function quadratic_operator(os::OpSum)
os = deepcopy(os)
#os = ITensorMPS.sorteachterm(os, sites)
os = ITensorMPS.sortmergeterms(os)
nterms = length(os)
coefs = Vector{Number}(undef, nterms)
sites = Vector{Tuple{Int,Int}}(undef, nterms)
quads = Vector{Tuple{Int,Int}}(undef, nterms)
nsites = 0
# detect terms and size of lattice
for n in 1:nterms
term = os[n]
#@show term
#@show term.coef
coef = isreal(coefficient(term)) ? real(coefficient(term)) : coefficient(term)
coefs[n] = coef
term = (length(term) == 1) ? single_to_quadratic(term) : term
length(term) ≠ 2 && error("Must create hopping Hamiltonian from quadratic Hamiltonian")
quads[n] = quadrant(term)
sites[n] = ntuple(n -> ITensors.site(term[n]), Val(2))
nsites = max(nsites, maximum(sites[n]))
end
# detect coefficient type
coef_type = mapreduce(typeof, promote_type, coefs)
ElT = isreal(coefs) ? real(coef_type) : coef_type
# fill Hamiltonian matrix with elements
h = zeros(ElT, 2 * nsites, 2 * nsites)
other_quad = i -> i == 2 ? 1 : 2
for n in 1:nterms
quad = quads[n]
offsets = nsites .* (quad .- 1)
if quad[1] != quad[2]
h[(sites[n] .+ offsets)...] += coefs[n]
else
h[(sites[n] .+ offsets)...] += 0.5 * coefs[n]
other_offsets = nsites .* (other_quad.(quad) .- 1)
h[(sites[n] .+ other_offsets)...] += -0.5 * conj(coefs[n])
end
end
return interleave(h)
end
function quadratic_operator(os_up::OpSum, os_dn::OpSum)
h_up = quadratic_operator(os_up)
h_dn = quadratic_operator(os_dn)
@assert size(h_up) == size(h_dn)
N = size(h_up, 1)
h = zeros(eltype(h_up), (2 * N, 2 * N))
n = div(N, 2)
# interlace the blocks of both quadratic hamiltonians
h_up = reverse_interleave(Matrix(h_up))
h_dn = reverse_interleave(Matrix(h_dn))
# super-quadrant (1,1)
h[1:2:N, 1:2:N] = h_up[1:n, 1:n]
h[2:2:N, 2:2:N] = h_dn[1:n, 1:n]
# super-quadrant (2,1)
h[(N + 1):2:(2 * N), 1:2:N] = h_up[(n + 1):(2 * n), 1:n]
h[(N + 2):2:(2 * N), 2:2:N] = h_dn[(n + 1):(2 * n), 1:n]
# super-quadrant (2,2)
h[(N + 1):2:(2 * N), (N + 1):2:(2 * N)] = h_up[(n + 1):N, (n + 1):N]
h[(N + 2):2:(2 * N), (N + 2):2:(2 * N)] = h_dn[(n + 1):N, (n + 1):N]
# super-quadrant (1,2)
h[1:2:N, (N + 1):2:(2 * N)] = h_up[1:n, (n + 1):(2 * n)]
h[2:2:N, (N + 2):2:(2 * N)] = h_dn[1:n, (n + 1):(2 * n)]
#convert from blocked to interlaced format. Odd base-rows are spin-up, even are spin-down.
return interleave(h)
end
quadratic_hamiltonian(os::OpSum) = Hermitian(quadratic_operator(os))
function quadratic_hamiltonian(os_up::OpSum, os_dn::OpSum)
return Hermitian(quadratic_operator(os_up, os_dn))
end
function hopping_operator(os::OpSum; drop_pairing_terms_tol=nothing)
# convert to blocked format
h = reverse_interleave(Matrix(quadratic_hamiltonian(os)))
# check that offdiagonal blocks are 0
N = div(size(h, 1), 2)
if isnothing(drop_pairing_terms_tol)
drop_pairing_terms_tol = eps(real(eltype(h)))
end
if !all(abs.(h[1:N, (N + 1):(2 * N)]) .< drop_pairing_terms_tol)
error("Trying to convert hamiltonian with pairing terms to hopping hamiltonian!")
end
return 2 .* h[(N + 1):(2 * N), (N + 1):(2 * N)]
end
# Make a combined hopping Hamiltonian for spin up and down
function hopping_operator(os_up::OpSum, os_dn::OpSum; drop_pairing_terms_tol=nothing)
# convert to blocked format
h = reverse_interleave(Matrix(quadratic_hamiltonian(os_up, os_dn)))
# check that offdiagonal blocks are 0
N = div(size(h, 1), 2)
if isnothing(drop_pairing_terms_tol)
drop_pairing_terms_tol = eps(real(eltype(h)))
end
if !all(abs.(h[1:N, (N + 1):(2 * N)]) .< drop_pairing_terms_tol)
error("Trying to convert hamiltonian with pairing terms to hopping hamiltonian!")
end
return 2 .* h[(N + 1):(2 * N), (N + 1):(2 * N)]
end
function hopping_hamiltonian(os::OpSum; drop_pairing_terms_tol=nothing)
return Hermitian(hopping_operator(os; drop_pairing_terms_tol))
end
function hopping_hamiltonian(os_up::OpSum, os_dn::OpSum; drop_pairing_terms_tol=nothing)
return Hermitian(hopping_operator(os_up, os_dn; drop_pairing_terms_tol))
end
function slater_determinant_matrix(h::AbstractMatrix, Nf::Int)
_, u = eigen(h)
return u[:, 1:Nf]
end
#
# Correlation matrix diagonalization
#
struct Boguliobov
u::Givens
end
set_data(::ConservesNf, x) = ConservesNf(x)
set_data(::ConservesNfParity, x) = ConservesNfParity(x)
site_stride(::ConservesNf) = 1
site_stride(::ConservesNfParity) = 2
copy(A::T) where {T<:AbstractSymmetry} = T(copy(A.data))
size(A::T) where {T<:AbstractSymmetry} = size(A.data)
size(A::T, dim::Int) where {T<:AbstractSymmetry} = size(A.data, dim)
length(A::T) where {T<:AbstractSymmetry} = length(A.data)
eltype(A::T) where {T<:AbstractSymmetry} = eltype(A.data)
Hermitian(A::T) where {T<:AbstractSymmetry} = set_data(A, Hermitian(A.data))
conj(A::T) where {T<:AbstractSymmetry} = set_data(A, conj(A.data))
transpose(A::T) where {T<:AbstractSymmetry} = set_data(A, transpose(A.data))
"""
givens_rotations(v::AbstractVector)
For a vector `v`, return the `length(v)-1`
Givens rotations `g` and the norm `r` such that:
```julia
g * v ≈ r * [n == 1 ? 1 : 0 for n in 1:length(v)]
```
"""
function givens_rotations(v::AbstractVector{ElT}) where {ElT}
N = length(v)
gs = Circuit{ElT}([])
r = v[1]
for n in reverse(1:(N - 1))
g, r = givens(v, n, n + 1)
v = g * v
lmul!(g, gs)
end
return gs, r
end
givens_rotations(v::ConservesNf) = return givens_rotations(v.data)
"""
givens_rotations(_v0::ConservesNfParity)
For a vector
```julia
v=_v0.data
```
from a fermionic Gaussian state, return the `4*length(v)-1`
real Givens/Boguliobov rotations `g` and the norm `r` such that:
```julia
g * v ≈ r * [n == 2 ? 1 : 0 for n in 1:length(v)]
c
with `g` being composed of diagonal rotation aligning pairs
of complex numbers in the complex plane, and Givens/Boguliobov Rotations
with real arguments only, acting on the interlaced single-particle space of
annihilation and creation operator coefficients.
"""
function givens_rotations(_v0::ConservesNfParity;)
v0 = _v0.data
N = div(length(v0), 2)
if N == 1
error(
"Givens rotation on 2-element vector not allowed for ConservesNfParity-type calculations. This should have been caught elsewhere.",
)
end
ElT = eltype(v0)
gs = Circuit{ElT}([])
v = copy(v0)
# detect if v is actually number-conserving because only defined in terms of annihilation operators
if norm(v[2:2:end]) < 10 * eps(real(ElT))
r = v[1]
gsca, _ = givens_rotations(v[1:2:end])
replace_indices!(i -> 2 * i - 1, gsca)
gscc = Circuit(copy(gsca.rotations))
replace_indices!(i -> i + 1, gsca)
conj!(gscc)
gsc = interleave(gscc, gsca)
LinearAlgebra.lmul!(gsc, gs)
return gs, r
end
r = v[2]
# Given's rotations from creation-operator coefficients
gscc, _ = givens_rotations(v[2:2:end])
replace_indices!(i -> 2 * i, gscc)
gsca = Circuit(copy(gscc.rotations))
replace_indices!(i -> i - 1, gsca)
conj!(gsca)
gsc = interleave(gscc, gsca)
LinearAlgebra.lmul!(gsc, gs)
# detect if v is actually number-conserving because only defined in terms of creation operators
if norm(v[1:2:end]) < 10 * eps(real(ElT))
return gs, r
end
v = gsc * v
# if we get here, v was actually number-non conserving, so procedure
# Given's rotations from annihilation-operator coefficients
gsaa, _ = givens_rotations(v[3:2:end])
replace_indices!(i -> 2 * i + 1, gsaa)
gsac = Circuit(copy(gsaa.rotations))
replace_indices!(i -> i + 1, gsac)
conj!(gsac)
gsa = interleave(gsac, gsaa)
v = gsa * v
LinearAlgebra.lmul!(gsa, gs)
# Boguliobov rotation for remaining Bell pair
g1, r = givens(v, 2, 3)
g2 = Givens(1, 4, g1.c, g1.s')
v = g1 * v
v = g2 * v #should have no effect
LinearAlgebra.lmul!(g2, gs)
LinearAlgebra.lmul!(g1, gs)
return gs, r
end
function maybe_drop_pairing_correlations(Λ0::AbstractMatrix{ElT}) where {ElT<:Number}
Λblocked = reverse_interleave(Λ0)
N = div(size(Λblocked, 1), 2)
if all(x -> abs(x) <= 10 * eps(real(eltype(Λ0))), @view Λblocked[1:N, (N + 1):end])
return ConservesNf(Λblocked[(N + 1):end, (N + 1):end])
#return ConservesNfParity(Λ0)
else
return ConservesNfParity(Λ0)
end
end
maybe_drop_pairing_correlations(Λ0::ConservesNf) = Λ0
function maybe_drop_pairing_correlations(Λ0::ConservesNfParity)
return maybe_drop_pairing_correlations(Λ0.data)
end
sortperm(x::ConservesNf) = sortperm(x.data; by=entropy)
sortperm(x::ConservesNfParity) = sortperm(x.data)
function get_error(x::ConservesNf, perm)
n = x.data[first(perm)]
return min(abs(n), abs(1 - n))
end
function get_error(x::ConservesNfParity, perm)
n1 = x.data[first(perm)]
n2 = x.data[last(perm)]
return min(abs(n1), abs(n2))
end
function isolate_subblock_eig(
_Λ::AbstractSymmetry,
startind::Int;
eigval_cutoff::Float64=1e-8,
minblocksize::Int=2,
maxblocksize::Int=div(size(_Λ.data, 1), 1),
)
blocksize = 0
err = 0.0
p = Int[]
ElT = eltype(_Λ.data)
nB = eltype(_Λ.data)[]
uB = 0.0
ΛB = 0.0
i = startind
Λ = _Λ.data
N = size(Λ, 1)
for blocksize in minblocksize:maxblocksize
j = min(site_stride(_Λ) * i + site_stride(_Λ) * blocksize, N)
ΛB = @view Λ[
(site_stride(_Λ) * i + 1 - site_stride(_Λ)):j,
(site_stride(_Λ) * i + 1 - site_stride(_Λ)):j,
]
if typeof(_Λ) <: ConservesNf
nB, uB = eigen(Hermitian(ΛB))
elseif typeof(_Λ) <: ConservesNfParity
m = similar(ΛB)
m .= ΛB
_ΛB = maybe_drop_pairing_correlations(m)
if typeof(_ΛB) <: ConservesNf
nB, uB = eigen(Hermitian(_ΛB.data))
#promote basis uB to non-conserving frame
N2 = size(nB, 1) * 2
nuB = zeros(eltype(uB), N2, N2)
nuB[2:2:N2, 1:2:N2] .= uB
nuB[1:2:N2, 2:2:N2] .= conj(uB)
uB = nuB
nB = interleave(1 .- nB, nB)
elseif typeof(_ΛB) <: ConservesNfParity
nB, uB = ITensorGaussianMPS.diag_corr_gaussian(Hermitian(ΛB))
#try to rotate to real
uB = ITensorGaussianMPS.make_real_if_possible(uB, nB .- 0.5)
if ElT <: Real
if norm(imag.(uB)) <= sqrt(eps(real(ElT)))
uB = real(real.(uB))
else
error(
"Not able to construct real fermionic basis for input correlation matrix. Exiting, retry with complex input type.",
)
end
end
end
end
nB = set_data(_Λ, abs.(nB))
p = sortperm(nB)
err = get_error(nB, p)
err ≤ eigval_cutoff && break
end
v = set_data(_Λ, @view uB[:, p[1]])
return v, nB, err
end
function set_occupations!(_ns::ConservesNf, _nB::ConservesNf, _v::ConservesNf, i::Int)
p = Int[]
ns = _ns.data
nB = _nB.data
p = sortperm(nB; by=entropy)
ns[i] = nB[p[1]]
return nothing
end
function set_occupations!(
_ns::ConservesNfParity, _nB::ConservesNfParity, _v::ConservesNfParity, i::Int
)
p = Int[]
ns = _ns.data
nB = _nB.data
v = _v.data
p = sortperm(nB)
n1 = nB[first(p)]
n2 = nB[last(p)]
ns[2 * i] = n1
ns[2 * i - 1] = n2
if length(v) == 2
# For some reason the last occupations are reversed, so take care of this conditionally here.
# ToDo: Fix this in givens_rotations instead.
if abs(v[1]) >= abs(v[2])
ns[2 * i] = n2
ns[2 * i - 1] = n1
end
end
return nothing
end
stop_gmps_sweep(v::ConservesNfParity) = length(v.data) == 2 ? true : false
stop_gmps_sweep(v::ConservesNf) = false
"""
correlation_matrix_to_gmps(Λ::AbstractMatrix{ElT}; eigval_cutoff::Float64 = 1e-8, maxblocksize::Int = size(Λ0, 1))
Diagonalize a correlation matrix, returning the eigenvalues and eigenvectors
stored in a structure as a set of Givens rotations.
The correlation matrix should be Hermitian, and will be treated as if it itensor
in the algorithm.
If `is_bcs`, the correlation matrix is assumed to be in interlaced format:
Λ[2*i-1:2*i,2*j-1:2*j]=[[c_i c_j^dagger , c_i c_j ], [c_i^dagger c_j^dagger,c_i^dagger c_j]]
Note that this may not be the standard choice in the literature, but it is internally
consistent with the format of single-particle Hamiltonians and Slater determinants employed.
"""
# Default to ConservesNf if no further arguments are given for backward compatibility
function correlation_matrix_to_gmps(
Λ0::AbstractMatrix;
eigval_cutoff::Float64=1e-8,
minblocksize::Int=1,
maxblocksize::Int=size(Λ0, 1),
)
return correlation_matrix_to_gmps(
ConservesNf(Λ0);
eigval_cutoff=eigval_cutoff,
minblocksize=minblocksize,
maxblocksize=maxblocksize,
)
end
function correlation_matrix_to_gmps(
Λ0::AbstractMatrix,
Nsites::Int;
eigval_cutoff::Float64=1e-8,
minblocksize::Int=1,
maxblocksize::Int=size(Λ0, 1),
)
return correlation_matrix_to_gmps(
symmetric_correlation_matrix(Λ0, Nsites);
eigval_cutoff=eigval_cutoff,
minblocksize=minblocksize,
maxblocksize=maxblocksize,
)
end
function correlation_matrix_to_gmps(
Λ0::T;
eigval_cutoff::Float64=1e-8,
minblocksize::Int=1,
maxblocksize::Int=size(Λ0.data, 1),
) where {T<:AbstractSymmetry}
ElT = eltype(Λ0.data)
Λ = T(Hermitian(copy((Λ0.data))))
V = Circuit{ElT}([])
err_tot = 0.0 ### FIXME: keep track of error below
N = size(Λ.data, 1)
#ns = set_data(Λ, Vector{real(ElT)}(undef, N))
for i in 1:div(N, site_stride(Λ))
err = 0.0
v, _, err = isolate_subblock_eig(
Λ,
i;
eigval_cutoff=eigval_cutoff,
minblocksize=minblocksize,
maxblocksize=maxblocksize,
)
if stop_gmps_sweep(v)
break
end
g, _ = givens_rotations(v)
replace_indices!(j -> j + site_stride(Λ) * (i - 1), g)
# In-place version of:
# V = g * V
LinearAlgebra.lmul!(g, V)
Λ = set_data(Λ, Hermitian(g * Matrix(Λ.data) * g'))
end
###return non-wrapped occupations for backwards compatibility
ns = diag(Λ.data)
@assert norm(imag.(ns)) <= sqrt(eps(real(ElT)))
return real(real.(ns)), V
end
function (x::AbstractSymmetry * y::AbstractSymmetry)
if !has_same_symmetry(x, y)
error("Can't multiply two symmetric objects with different symmetries.")
end
return set_data(x, x.data * y.data)
end
has_same_symmetry(::AbstractSymmetry, ::AbstractSymmetry) = false
has_same_symmetry(::ConservesNf, ::ConservesNf) = true
has_same_symmetry(::ConservesNfParity, ::ConservesNfParity) = true
function slater_determinant_to_gmps(Φ::AbstractMatrix, N::Int; kwargs...)
return correlation_matrix_to_gmps(conj(Φ) * transpose(Φ), N; kwargs...)
end
function slater_determinant_to_gmps(Φ::AbstractMatrix; kwargs...)
return correlation_matrix_to_gmps(ConservesNf(conj(Φ) * transpose(Φ)); kwargs...)
end
function slater_determinant_to_gmps(Φ::AbstractSymmetry; kwargs...)
return correlation_matrix_to_gmps(conj(Φ) * transpose(Φ); kwargs...)
end
#
# Turn circuit into MPS
#
function ITensors.ITensor(u::Givens, s1::Index, s2::Index)
U = [
1 0 0 0
0 u.c u.s 0
0 -conj(u.s) u.c 0
0 0 0 1
]
return itensor(U, s2', s1', dag(s2), dag(s1))
end
function ITensors.ITensor(b::Boguliobov, s1::Index, s2::Index)
U = [
b.u.c 0 0 conj(b.u.s)
0 1 0 0
0 0 1 0
-(b.u.s) 0 0 b.u.c
]
return itensor(U, s2', s1', dag(s2), dag(s1))
end
function ITensors.ITensor(sites::Vector{<:Index}, u::ConservesNfParity{Givens{T}}) where {T}
s1 = sites[div(u.data.i1 + 1, 2)]
s2 = sites[div(u.data.i2 + 1, 2)]
if abs(u.data.i2 - u.data.i1) % 2 == 1
return ITensor(Boguliobov(u.data), s1, s2)
else
return ITensor(u.data, s1, s2)
end
end
function ITensors.ITensor(sites::Vector{<:Index}, u::ConservesNf{Givens{T}}) where {T}
return ITensor(sites, u.data)
end
function ITensors.ITensor(sites::Vector{<:Index}, u::Givens)
s1 = sites[u.i1]
s2 = sites[u.i2]
return ITensor(u, s1, s2)
end
function itensors(s::Vector{<:Index}, C::ConservesNfParity)
U = [ITensor(s, set_data(C, g)) for g in reverse(C.data.rotations[begin:2:end])]
return U
end
function itensors(sites::Vector{<:Index}, C::ConservesNf)
return itensors(sites, C.data)
end
function itensors(s::Vector{<:Index}, C::Circuit)
U = [ITensor(s, g) for g in reverse(C.rotations)]
return U
end
"""
MPS(sites::Vector{<:Index}, state, U::Vector{<:ITensor}; kwargs...)
Return an MPS with site indices `sites` by applying the circuit `U` to the starting state `state`.
"""
function ITensors.MPS(sites::Vector{<:Index}, state, U::Vector{<:ITensor}; kwargs...)
return apply(U, productMPS(sites, state); kwargs...)
end
function isspinful(s::Index)
!hasqns(s) && return false
return all(qnblock -> ITensors.hasname(qn(qnblock), ITensors.QNVal("Sz", 0)), space(s))
end
function isspinful(s::Vector{<:Index})
return all(isspinful, s)
end
# Checks whether correlation matrix is of a number conserving system and returns AbstractSymmetry wrapper around correlation matrix
# ToDo: Behaviour assumes (spinless) "Fermion" sites, handle "Electron" sites separately for cases where correlation matrix does not factorize.
function symmetric_correlation_matrix(Λ::AbstractMatrix, s::Vector{<:Index})
if length(s) == size(Λ, 1)
return ConservesNf(Λ)
elseif 2 * length(s) == size(Λ, 1)
return ConservesNfParity(Λ)
else
return error("Correlation matrix is not the same or twice the length of sites")
end
end
function symmetric_correlation_matrix(Λ::AbstractMatrix, Nsites::Int)
if Nsites == size(Λ, 1)
return ConservesNf(Λ)
elseif 2 * Nsites == size(Λ, 1)
return ConservesNfParity(Λ)
else
return error("Correlation matrix is not the same or twice the length of sites")
end
end
function correlation_matrix_to_mps(
s::Vector{<:Index},
Λ::AbstractMatrix;
eigval_cutoff::Float64=1e-8,
maxblocksize::Int=size(Λ, 1),
minblocksize::Int=1,
kwargs...,
)
return correlation_matrix_to_mps(
s,
symmetric_correlation_matrix(Λ, s);
eigval_cutoff=eigval_cutoff,
maxblocksize=maxblocksize,
minblocksize=minblocksize,
kwargs...,
)
end
"""
correlation_matrix_to_mps(s::Vector{<:Index}, Λ::AbstractMatrix{ElT};
eigval_cutoff::Float64 = 1e-8,
maxblocksize::Int = size(Λ, 1),
kwargs...)
Return an approximation to the state represented by the correlation matrix as
a matrix product state (MPS).
The correlation matrix should correspond to a pure state (have all eigenvalues
of zero or one).
"""
function correlation_matrix_to_mps(
s::Vector{<:Index},
Λ0::AbstractSymmetry;
eigval_cutoff::Float64=1e-8,
maxblocksize::Int=size(Λ0.data, 1),
minblocksize::Int=1,
kwargs...,
)
MPS_Elt = eltype(Λ0.data)
Λ = maybe_drop_pairing_correlations(Λ0)
@assert size(Λ.data, 1) == size(Λ.data, 2)
ns, C = correlation_matrix_to_gmps(
Λ; eigval_cutoff=eigval_cutoff, minblocksize=minblocksize, maxblocksize=maxblocksize
)
if all(hastags("Fermion"), s)
U = itensors(s, set_data(Λ, C))
ψ = MPS(MPS_Elt, s, n -> round(Int, ns[site_stride(Λ) * n]) + 1)
ψ = apply(U, ψ; kwargs...)
elseif all(hastags("Electron"), s)
# ToDo: This is not tested properly, Electron sitetype tests currently assume interface with two AbstractSymmetry (correlation matrix) arguments
# FIXME: isodd is not correct here, there shouldn't be any restrictions on the number of electronic sites.
isodd(length(s)) && error(
"For Electron type, must have even number of sites of alternating up and down spins.",
)
N = length(s)
if isspinful(s)
# FIXME: Can we lift this restriction now, at least for ConservesNf?
error(
"correlation_matrix_to_mps(Λ::AbstractMatrix) currently only supports spinless Fermions or Electrons that do not conserve Sz. Use correlation_matrix_to_mps(Λ_up::AbstractMatrix, Λ_dn::AbstractMatrix) to use spinful Fermions/Electrons.",
)
elseif typeof(Λ) <: ConservesNf
sf = siteinds("Fermion", 2 * N; conserve_qns=true)
elseif typeof(Λ) <: ConservesNfParity
# FIXME: Does this also break, even if it doesn't make use of identity blocks? To be safe, issue error.
error(
"ConservesNfParity and Electron site type currently not supported. Please use Fermion sites instead.",
)
sf = siteinds("Fermion", 2 * N; conserve_qns=false, conserve_nfparity=true)
end
U = itensors(sf, set_data(Λ, C))
ψ = MPS(MPS_Elt, sf, n -> round(Int, ns[site_stride(Λ) * n]) + 1)
ψ = apply(U, ψ; kwargs...)
ψ = MPS(N)
for n in 1:N
i, j = 2 * n - 1, 2 * n
C = combiner(sf[i], sf[j])
c = combinedind(C)
ψ[n] = ψf[i] * ψf[j] * C
ψ[n] *= δ(dag(c), s[n]) ###This back conversion to Electron will likely not work reliably for ConservesNfParity
end
else
error("All sites must be Fermion or Electron type.")
end
return ψ
end
"""
slater_determinant_to_mps(s::Vector{<:Index}, Φ::AbstractMatrix; kwargs...)
Given indices and matrix of orbitals representing a Slater determinant,
compute a matrix product state (MPS) approximately having the same correlation
matrices as this Slater determinant.
Optional keyword arguments:
* `eigval_cutoff::Float64=1E-8` - cutoff used to adaptively determine the block size (eigenvalues must be closer to 1 or 0 by an amount smaller than this cutoff for their eigenvectors be labeled as "inactive" orbitals)
* `maxblocksize::Int` - maximum block size used to compute inactive orbitals. Setting this to a smaller value can lead to faster running times and a smaller MPS bond dimension, though the accuracy may be lower.
"""
function slater_determinant_to_mps(s::Vector{<:Index}, Φ::AbstractMatrix; kwargs...)
return correlation_matrix_to_mps(s, conj(Φ) * transpose(Φ); kwargs...)
end
function slater_determinant_to_mps(s::Vector{<:Index}, Φ::AbstractSymmetry; kwargs...)
return correlation_matrix_to_mps(s, conj(Φ) * transpose(Φ); kwargs...)
end
function slater_determinant_to_mps(
s::Vector{<:Index}, Φ_up::AbstractMatrix, Φ_dn::AbstractMatrix; kwargs...
)
return correlation_matrix_to_mps(
s, conj(Φ_up) * transpose(Φ_up), conj(Φ_dn) * transpose(Φ_dn); kwargs...
)
end
function mapindex(f::Function, C::Circuit)
return Circuit(mapindex.(f, C.rotations))
end
function mapindex(f::Function, g::Givens)
return Givens(f(g.i1), f(g.i2), g.c, g.s)
end
function identity_blocks!(T::Tensor)
# FIXME: This is not generic logic. Only works reliably for QN subspace sizes = 1.
for b in nzblocks(T)
T[b] = Matrix{Float64}(I, dims(T[b]))
end
return T
end
# Creates an ITensor with the specified flux where each nonzero block
# is identity
# TODO: make a special constructor for this.
# TODO: Introduce a modified combiner which keeps track of state-ordering/spaces.
function identity_blocks_itensor(flux::QN, i1::Index, i2::Index)
A = ITensor(flux, i1, i2)
identity_blocks!(tensor(A))
return A
end
function identity_blocks_itensor(i1::ITensors.QNIndex, i2::ITensors.QNIndex)
return identity_blocks_itensor(QN(), i1, i2)
end
function identity_blocks_itensor(i1::Index, i2::Index)
M = Matrix{Float64}(I, dim(i1), dim(i2))
return itensor(M, i1, i2)
end
convert_union_nothing(v::Vector{T}) where {T} = convert(Vector{Union{T,Nothing}}, v)
function interleave(xs...)
nexts = convert_union_nothing(collect(Base.iterate.(xs)))
res = Union{eltype.(xs)...}[]
while any(!isnothing, nexts)
for ii in eachindex(nexts)
if !isnothing(nexts[ii])
(item, state) = nexts[ii]
push!(res, item)
nexts[ii] = iterate(xs[ii], state)
end
end
end
return res
end
function interleave(a::ConservesNf{T}, b::ConservesNf{T}) where {T}
return set_data(a, interleave(a.data, b.data))
end
function interleave(a::ConservesNfParity{T}, b::ConservesNfParity{T}) where {T}
return set_data(
a,
interleave(
interleave(a.data[1:2:end], b.data[1:2:end]),
interleave(a.data[2:2:end], b.data[2:2:end]),
),
)
end
function interleave(M::AbstractMatrix)
@assert size(M, 1) == size(M, 2)
n = div(size(M, 1), 2)
first_half = Vector(1:n)
second_half = Vector((n + 1):(2 * n))
interleaved_inds = interleave(first_half, second_half)
return M[interleaved_inds, interleaved_inds]
end
function interleave(g1::Circuit, g2::Circuit)
return Circuit(interleave(g1.rotations, g2.rotations))
end
function reverse_interleave(M::AbstractMatrix)
@assert size(M, 1) == size(M, 2)
n = div(size(M, 1), 2)
first_half = Vector(1:n)
second_half = Vector((n + 1):(2 * n))
interleaved_inds = interleave(first_half, second_half)
ordered_inds = sortperm(interleaved_inds)
return M[ordered_inds, ordered_inds]
end
function correlation_matrix_to_mps(
s::Vector{<:Index},
Λ_up0::AbstractSymmetry,
Λ_dn0::AbstractSymmetry;
eigval_cutoff::Float64=1e-8,
maxblocksize::Int=min(size(Λ_up0, 1), size(Λ_dn0, 1)),
minblocksize::Int=1,
kwargs...,
)
MPS_Elt = promote_type(eltype(Λ_up0.data), eltype(Λ_dn0.data))
Λ_up = maybe_drop_pairing_correlations(Λ_up0)
Λ_dn = maybe_drop_pairing_correlations(Λ_dn0)
@assert size(Λ_up.data, 1) == size(Λ_up.data, 2)
@assert size(Λ_dn.data, 1) == size(Λ_dn.data, 2)
if !(
(typeof(Λ_up) <: ConservesNfParity && typeof(Λ_dn) <: ConservesNfParity) ||
(typeof(Λ_up) <: ConservesNf && typeof(Λ_dn) <: ConservesNf)
)
error("Λ_up and Λ_dn have incompatible subtypes of AbstractSymmetry")
end
N_up = div(size(Λ_up.data, 1), site_stride(Λ_up))
N_dn = div(size(Λ_dn.data, 1), site_stride(Λ_up))
N = N_up + N_dn
ns_up, C_up = correlation_matrix_to_gmps(
Λ_up; eigval_cutoff=eigval_cutoff, maxblocksize=maxblocksize
)
ns_dn, C_dn = correlation_matrix_to_gmps(
Λ_dn; eigval_cutoff=eigval_cutoff, maxblocksize=maxblocksize
)
C_up = mapindex(n -> 2n - 1, C_up)
C_dn = mapindex(n -> 2n, C_dn)
C_up_rot = set_data(Λ_up, C_up.rotations)
C_dn_rot = set_data(Λ_dn, C_dn.rotations)
ns_up = set_data(Λ_up, ns_up)
ns_dn = set_data(Λ_dn, ns_dn)
C = Circuit(interleave(C_up_rot, C_dn_rot).data)
ns = interleave(ns_up, ns_dn).data
if all(hastags("Fermion"), s)
U = itensors(s, set_data(Λ_up, C))
ψ = MPS(MPS_Elt, s, n -> round(Int, ns[site_stride(Λ_up) * n]) + 1)
ψ = apply(U, ψ; kwargs...)
elseif all(hastags("Electron"), s)
@assert length(s) == N_up
@assert length(s) == N_dn
if isspinful(s)
if typeof(Λ_up) <: ConservesNf
space_up = [QN(("Nf", 0, -1), ("Sz", 0)) => 1, QN(("Nf", 1, -1), ("Sz", 1)) => 1]
space_dn = [QN(("Nf", 0, -1), ("Sz", 0)) => 1, QN(("Nf", 1, -1), ("Sz", -1)) => 1]
elseif typeof(Λ_up) <: ConservesNfParity
error(
"ConservesNfParity and Electron site type currently not supported. Please use Fermion sites instead.",
)
# FIXME: issue with combiner-logic for subspace-size > 1 in identity_blocks_itensor, see below
space_up = [QN(("NfParity", 0, -2),) => 1, QN(("NfParity", 1, -2),) => 1]
space_dn = [QN(("NfParity", 0, -2),) => 1, QN(("NfParity", 1, -2),) => 1]
end
sf_up = [Index(space_up, "Fermion,Site,n=$(2n-1)") for n in 1:N_up]
sf_dn = [Index(space_dn, "Fermion,Site,n=$(2n)") for n in 1:N_dn]
sf = collect(Iterators.flatten(zip(sf_up, sf_dn)))
else
if typeof(Λ_up) <: ConservesNf
sf = siteinds("Fermion", N; conserve_qns=true, conserve_sz=false)
elseif typeof(Λ_up) <: ConservesNfParity
error(
"ConservesNfParity and Electron site type currently not supported. Please use Fermion sites instead.",
)
sf = siteinds(
"Fermion", N; conserve_qns=false, conserve_sz=false, conserve_nfparity=true
)
end
end
U = itensors(sf, set_data(Λ_up, C))
ψf = MPS(MPS_Elt, sf, n -> round(Int, ns[site_stride(Λ_up) * n]) + 1)
ψf = apply(U, ψf; kwargs...)
ψ = MPS(N_up)
for n in 1:N_up
i, j = 2 * n - 1, 2 * n
C = combiner(sf[i], sf[j])
c = combinedind(C)
ψ[n] = ψf[i] * ψf[j] * C
# FIXME: combiner looses track of state ordering for QN subspaces > 1 in identity_blocks_itensor
ψ[n] *= identity_blocks_itensor(dag(c), s[n])
end
else
error("All sites must be Fermion or Electron type.")
end
return ψ
end
function correlation_matrix_to_mps(
s::Vector{<:Index},
Λ_up::AbstractMatrix,
Λ_dn::AbstractMatrix;
eigval_cutoff::Float64=1e-8,
maxblocksize::Int=min(size(Λ_up, 1), size(Λ_dn, 1)),
minblocksize::Int=1,
kwargs...,
)
if all(hastags("Electron"), s)
return correlation_matrix_to_mps(
s,
symmetric_correlation_matrix(Λ_up, s),
symmetric_correlation_matrix(Λ_dn, s);
eigval_cutoff=eigval_cutoff,
maxblocksize=maxblocksize,
minblocksize=minblocksize,
kwargs...,
)
elseif all(hastags("Fermion"), s)
# equivalent number of electrons
n_electrons = div(length(s), 2)
return correlation_matrix_to_mps(
s,
symmetric_correlation_matrix(Λ_up, n_electrons),
symmetric_correlation_matrix(Λ_dn, n_electrons);
eigval_cutoff=eigval_cutoff,
maxblocksize=maxblocksize,
minblocksize=minblocksize,
kwargs...,
)
end
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 7116 | """
Some of the functionality in this script is closely related to routines in the following package
https://github.com/Jacupo/F_utilities
and the associated publication
10.21468/SciPostPhysLectNotes.54
"""
"""Takes a single-particle Hamiltonian in blocked Dirac format and finds the fermionic transformation U that diagonalizes it"""
function _eigen_gaussian_blocked(H; noise_scale=nothing)
#make sure H is Hermitian
@assert ishermitian(H)
H = Hermitian(H)
ElT = eltype(H)
#convert from Dirac to Majorana picture
N = size(H, 1)
Ω = build_Ω(ElT, N)
h = real(-im .* (Ω * H * Ω'))
h = (h - h') ./ 2
#@show size(h)
if !isnothing(noise_scale)
noise = rand(size(h)...) * noise_scale
noise = (noise - noise') ./ 2
h = h + noise
end
# Schur diagonalize including reordering
_, O, vals = order_schur(schur(h))
# convert back to Dirac Frame
Fxpxx = build_Fxpxx(N)
U = Ω' * O * (Fxpxx') * Ω
d = vcat(-vals, vals)
if ElT <: Real
U = make_real_if_possible(U, d)
# make another pass with rotation in the complex plane per eigenvector
U .*= exp.(-im * angle.(U[1:1, :]))
@assert norm(imag.(U)) < sqrt(eps(real(ElT)))
U = real(real.(U))
end
return d, U
end
"""Takes a single-particle Hamiltonian in interlaced Dirac format and finds the complex fermionic transformation U that diagonalizes it."""
function eigen_gaussian(H; noise_scale=nothing)
d, U = _eigen_gaussian_blocked(
ITensorGaussianMPS.reverse_interleave(complex(H)); noise_scale=noise_scale
)
nU = similar(U)
n = div(size(H, 1), 2)
nU[1:2:end, :] = U[1:n, :]
nU[2:2:end, :] = U[(n + 1):end, :]
return d, nU
end
"""Takes a single-particle Hamiltonian in interlaced Dirac format and outputs the ground state correlation matrix (with the input Hamiltonians element type)."""
function get_gaussian_GS_corr(H::AbstractMatrix; noise_scale=nothing)
ElT = eltype(H)
d, U = eigen_gaussian(H; noise_scale=noise_scale)
n = div(size(H, 1), 2)
c = conj(U[:, 1:n]) * transpose(U[:, 1:n])
if ElT <: Real && norm(imag.(c)) <= sqrt(eps(real(ElT)))
c = real(real.(c))
end
return c
end
"""Takes a single-particle correlation matrix in interlaced Dirac format and finds the fermionic transformation U that diagonalizes it"""
function diag_corr_gaussian(Λ::Hermitian; noise_scale=nothing)
#shift correlation matrix by half so spectrum is symmetric around 0
populations, U = eigen_gaussian(Λ - 0.5 * I; noise_scale=noise_scale)
n = diag(U' * Λ * U)
if !all(abs.(populations - (n - 0.5 * ones(size(n)))) .< sqrt(eps(real(eltype(Λ)))))
@show n
@show populations .+ 0.5
@error(
"The natural orbital populations are not consistent, see above. Try adding symmetric noise to the input matrix."
)
end
return populations .+ 0.5, U
end
"""Takes a single-particle correlation matrix in interlaced Dirac format and finds the fermionic transformation U that diagonalizes it"""
function diag_corr_gaussian(Γ::AbstractMatrix; noise_scale=nothing)
#enforcing hermitianity
Γ = (Γ + Γ') / 2.0
return diag_corr_gaussian(Hermitian(Γ); noise_scale=noise_scale)
end
"""Schur decomposition of skew-hermitian matrix"""
function order_schur(F::LinearAlgebra.Schur)
T = F.Schur
O = F.vectors #column vectors are Schur vectors
N = size(T, 1)
n = div(N, 2)
shuffled_inds = Vector{Int}[]
ElT = eltype(T)
vals = ElT[]
# build a permutation matrix that takes care of the ordering
for i in 1:n
ind = 2 * i - 1
val = T[ind, ind + 1]
if real(val) >= 0
push!(shuffled_inds, [ind, ind + 1])
else
push!(shuffled_inds, [ind + 1, ind])
end
push!(vals, abs(val))
end
# build block local rotation first
perm = sortperm(real.(vals); rev=true) ##we want the upper left corner to be the largest absolute value eigval pair?
vals = vals[perm]
shuffled_inds = reduce(vcat, shuffled_inds[perm])
# then permute blocks for overall ordering
T = T[shuffled_inds, shuffled_inds]
O = O[:, shuffled_inds]
return T, O, vals #vals are only positive, and of length n and not N
end
"""Checks if we can make degenerate subspaces of a U0 real by multiplying columns or rows with a phase"""
function make_real_if_possible(U0::AbstractMatrix, spectrum::Vector; sigdigits=12)
# only apply to first half of spectrum due to symmetry around 0
# assumes spectrum symmetric around zero and ordered as vcat(-E,E) where E is ordered in descending magnitude
U = copy(U0)
n = div(length(spectrum), 2)
# Round spectrum for comparison within finite floating point precision.
# Not the cleanest way to compare floating point numbers for approximate equality but should be sufficient here.
rounded_halfspectrum = round.(spectrum[1:n], sigdigits=sigdigits)
approx_unique_eigvals = unique(rounded_halfspectrum)
# loop over degenerate subspaces
for e in approx_unique_eigvals
mask = rounded_halfspectrum .== e
if abs(e) < eps(real(eltype(U0)))
# handle values close to zero separately
# rotate subspace for both positive and negative eigenvalue if they are close enough to zero
mask = vcat(mask, mask)
subspace = U[:, mask]
subspace = make_subspace_real_if_possible(subspace)
U[:, mask] = subspace
else
mask = rounded_halfspectrum .== e
# rotate suspace for the negative eigenvalue
subspace = U[:, 1:n][:, mask]
subspace = make_subspace_real_if_possible(subspace)
v = @views U[:, 1:n][:, mask]
v .= subspace
# rotate suspace for the positive eigenvalue
subspace = U[:, (n + 1):end][:, mask]
subspace = make_subspace_real_if_possible(subspace)
v = @views U[:, (n + 1):end][:, mask]
v .= subspace
end
end
return U
end
"""Checks if we can make a degenerate subspace of the eigenbasis of an operator real by multiplying columns or rows with a phase"""
function make_subspace_real_if_possible(U::AbstractMatrix; atol=sqrt(eps(real(eltype(U)))))
if eltype(U) <: Real
return U
end
if size(U, 2) == 1
nU = U .* exp(-im * angle(U[1, 1]))
if norm(imag.(nU)) .<= atol
return nU
else
return U
end
else
n = size(U, 2)
gram = U * U'
if norm(imag.(gram)) .<= atol
D, V = eigen(Hermitian(real.(gram)))
proj = V[:, (size(U, 1) - n + 1):end] * (V[:, (size(U, 1) - n + 1):end])'
@assert norm(proj * U - U) < atol
return complex(V[:, (size(U, 1) - n + 1):end])
else
return U
end
end
end
# transformation matrices (in principle sparse) between and within Majorana and Dirac picture
function build_Ω(T, N::Int)
n = div(N, 2)
nElT = T <: Real ? Complex{T} : T
Ω = zeros(nElT, N, N)
Ω[1:n, 1:n] .= diagm(ones(nElT, n) ./ sqrt(2))
Ω[1:n, (n + 1):N] .= diagm(ones(nElT, n) ./ sqrt(2))
Ω[(n + 1):N, 1:n] .= diagm(ones(nElT, n) * (im / sqrt(2)))
Ω[(n + 1):N, (n + 1):N] .= diagm(ones(nElT, n) * (-im / sqrt(2)))
return Ω
end
function build_Fxpxx(N::Int)
Fxpxx = zeros(Int8, N, N)
n = div(N, 2)
Fxpxx[1:n, 1:2:N] .= diagm(ones(Int8, n))
Fxpxx[(n + 1):N, 2:2:N] .= diagm(ones(Int8, n))
return Fxpxx
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 8361 | using ITensorGaussianMPS
using ITensorMPS
using ITensors
using LinearAlgebra
using Test
function expect_compat(psi::MPS, ops::AbstractString...; kwargs...)
if ITensors.version() >= v"0.2"
return expect(psi, ops...; kwargs...)
end
psi = copy(psi)
N = length(psi)
ElT = real(promote_itensor_eltype(psi))
Nops = length(ops)
s = siteinds(psi)
site_range::UnitRange{Int} = get(kwargs, :site_range, 1:N)
Ns = length(site_range)
start_site = first(site_range)
offset = start_site - 1
orthogonalize!(psi, start_site)
psi[start_site] ./= norm(psi[start_site])
ex = ntuple(n -> zeros(ElT, Ns), Nops)
for j in site_range
orthogonalize!(psi, j)
for n in 1:Nops
ex[n][j - offset] = real(scalar(psi[j] * op(ops[n], s[j]) * dag(prime(psi[j], s[j]))))
end
end
return Nops == 1 ? ex[1] : ex
end
@testset "Electron" begin
# Half filling
N = 40
Nf_up = N ÷ 2
Nf_dn = N ÷ 2
Nf = Nf_up + Nf_dn
# Maximum MPS link dimension
_maxlinkdim = 200
# DMRG cutoff
_cutoff = 1e-8
# Hopping
t = 1.0
# Electron-electron on-site interaction
U = 1.0
# Make the free fermion Hamiltonian for the up spins
os_up = OpSum()
for n in 1:(N - 1)
os_up .+= -t, "Cdagup", n, "Cup", n + 1
os_up .+= -t, "Cdagup", n + 1, "Cup", n
end
# Make the free fermion Hamiltonian for the down spins
os_dn = OpSum()
for n in 1:(N - 1)
os_dn .+= -t, "Cdagdn", n, "Cdn", n + 1
os_dn .+= -t, "Cdagdn", n + 1, "Cdn", n
end
# Hopping Hamiltonians for the up and down spins
h_up = hopping_hamiltonian(os_up)
h_dn = hopping_hamiltonian(os_dn)
h_combined = hopping_hamiltonian(os_up, os_dn)
# Get the Slater determinant
Φ_up = slater_determinant_matrix(h_up, Nf_up)
Φ_dn = slater_determinant_matrix(h_dn, Nf_dn)
# Create an MPS from the slater determinants.
s = siteinds("Electron", N; conserve_qns=true)
ψ0 = slater_determinant_to_mps(
s, Φ_up, Φ_dn; eigval_cutoff=1e-4, cutoff=_cutoff, maxdim=_maxlinkdim
)
@test maxlinkdim(ψ0) ≤ _maxlinkdim
# The total non-interacting part of the Hamiltonian
os_noninteracting = OpSum()
for n in 1:(N - 1)
os_noninteracting .+= -t, "Cdagup", n, "Cup", n + 1
os_noninteracting .+= -t, "Cdagdn", n, "Cdn", n + 1
os_noninteracting .+= -t, "Cdagup", n + 1, "Cup", n
os_noninteracting .+= -t, "Cdagdn", n + 1, "Cdn", n
end
H_noninteracting = MPO(os_noninteracting, s)
@test tr(Φ_up' * h_up * Φ_up) + tr(Φ_dn' * h_dn * Φ_dn) ≈ inner(ψ0', H_noninteracting, ψ0) rtol =
1e-3
# The total interacting Hamiltonian
os_interacting = OpSum()
for n in 1:(N - 1)
os_interacting .+= -t, "Cdagup", n, "Cup", n + 1
os_interacting .+= -t, "Cdagdn", n, "Cdn", n + 1
os_interacting .+= -t, "Cdagup", n + 1, "Cup", n
os_interacting .+= -t, "Cdagdn", n + 1, "Cdn", n
end
for n in 1:N
os_interacting .+= U, "Nupdn", n
end
H = MPO(os_interacting, s)
# Random starting state
ψr = random_mps(s, n -> n ≤ Nf ? (isodd(n) ? "↑" : "↓") : "0")
@test flux(ψr) == QN(("Nf", Nf, -1), ("Sz", 0))
@test flux(ψ0) == QN(("Nf", Nf, -1), ("Sz", 0))
@test inner(ψ0', H, ψ0) < inner(ψr', H, ψr)
sweeps = Sweeps(3)
setmaxdim!(sweeps, 10, 20, _maxlinkdim)
setcutoff!(sweeps, _cutoff)
setnoise!(sweeps, 1e-5, 1e-6, 1e-7, 0.0)
er, _ = dmrg(H, ψr, sweeps; outputlevel=0)
sweeps = Sweeps(3)
setmaxdim!(sweeps, _maxlinkdim)
setcutoff!(sweeps, _cutoff)
setnoise!(sweeps, 1e-5, 1e-6, 1e-7, 0.0)
e0, _ = dmrg(H, ψ0, sweeps; outputlevel=0)
@test e0 > inner(ψ0', H_noninteracting, ψ0)
@test e0 < er
end
@testset "Regression test for bug away from half filling" begin
N = 3
t = 1.0
os_up = OpSum()
for n in 1:(N - 1)
os_up .+= -t, "Cdagup", n, "Cup", n + 1
os_up .+= -t, "Cdagup", n + 1, "Cup", n
end
os_dn = OpSum()
for n in 1:(N - 1)
os_dn .+= -t, "Cdagdn", n, "Cdn", n + 1
os_dn .+= -t, "Cdagdn", n + 1, "Cdn", n
end
h_up = hopping_hamiltonian(os_up)
h_dn = hopping_hamiltonian(os_dn)
s = siteinds("Electron", N; conserve_qns=true)
H = MPO(os_up + os_dn, s)
Nf_up, Nf_dn = 1, 0
Φ_up = slater_determinant_matrix(h_up, Nf_up)
Φ_dn = slater_determinant_matrix(h_dn, Nf_dn)
ψ = slater_determinant_to_mps(s, Φ_up, Φ_dn; eigval_cutoff=0.0, cutoff=0.0)
@test inner(ψ', H, ψ) ≈ tr(Φ_up' * h_up * Φ_up) + tr(Φ_dn' * h_dn * Φ_dn)
@test maxlinkdim(ψ) == 2
@test flux(ψ) == QN(("Nf", 1, -1), ("Sz", 1))
ns_up = expect_compat(ψ, "Nup")
ns_dn = expect_compat(ψ, "Ndn")
@test ns_up ≈ diag(Φ_up * Φ_up')
@test ns_dn ≈ diag(Φ_dn * Φ_dn')
@test sum(ns_up) ≈ Nf_up
@test sum(ns_dn) ≈ Nf_dn
end
@testset "Electron - Pairing (currently inactive)" begin
# Keep this testset for when the Electron-sites + pairing bug is fixed
# But skip the tests for now.
is_implemented = false
if !is_implemented
nothing
else
# Half filling
N = 40
Nf_up = N ÷ 2
Nf_dn = N ÷ 2
Nf = Nf_up + Nf_dn
# Maximum MPS link dimension
_maxlinkdim = 200
# DMRG cutoff
_cutoff = 1e-8
# Hopping
t = 1.0
pairing = 1.2
# Electron-electron on-site interaction
U = 1.0
# Make the free fermion Hamiltonian for the up spins
os_up = OpSum()
for n in 1:(N - 1)
os_up .+= -t, "Cdagup", n, "Cup", n + 1
os_up .+= -t, "Cdagup", n + 1, "Cup", n
os_up .+= -pairing, "Cdagup", n + 1, "Cdagup", n
os_up .+= -pairing, "Cup", n, "Cup", n + 1
#os_up .+= -pairing, "Cdagup", n+1,"Cdagup", n
end
# Make the free fermion Hamiltonian for the down spins
os_dn = OpSum()
for n in 1:(N - 1)
os_dn .+= -t, "Cdagdn", n, "Cdn", n + 1
os_dn .+= -t, "Cdagdn", n + 1, "Cdn", n
os_dn .+= -pairing, "Cdn", n, "Cdn", n + 1
os_dn .+= -pairing, "Cdagdn", n + 1, "Cdagdn", n
end
# Hopping Hamiltonians for the up and down spins
h_up = quadratic_hamiltonian(os_up)
h_dn = quadratic_hamiltonian(os_dn)
# Get the Slater determinant, N*2 because of pairing (should pass chemical potential as arg later)
Φ_up = slater_determinant_matrix(h_up, Nf_up * 2)
Φ_dn = slater_determinant_matrix(h_dn, Nf_dn * 2)
# Create an MPS from the slater determinants.
s = siteinds(
"Electron", N; conserve_qns=false, conserve_nfparity=true, conserve_nf=false
)
H_ni_up = MPO(os_up, s)
ψ0 = slater_determinant_to_mps(
s, Φ_up, Φ_dn; eigval_cutoff=1e-4, cutoff=_cutoff, maxdim=_maxlinkdim
)
@show norm(ψ0)
@test maxlinkdim(ψ0) ≤ _maxlinkdim
# The total non-interacting part of the Hamiltonian
os_noninteracting = OpSum()
for n in 1:(N - 1)
os_noninteracting .+= -t, "Cdagdn", n, "Cdn", n + 1
os_noninteracting .+= -t, "Cdagdn", n + 1, "Cdn", n
os_noninteracting .+= -pairing, "Cdn", n, "Cdn", n + 1
os_noninteracting .+= -pairing, "Cdagdn", n + 1, "Cdagdn", n
os_noninteracting .+= -t, "Cdagup", n, "Cup", n + 1
os_noninteracting .+= -t, "Cdagup", n + 1, "Cup", n
os_noninteracting .+= -pairing, "Cdagup", n + 1, "Cdagup", n
os_noninteracting .+= -pairing, "Cup", n, "Cup", n + 1
end
H_noninteracting = MPO(os_noninteracting, s)
@show tr(Φ_up' * h_up * Φ_up),
tr(Φ_dn' * h_dn * Φ_dn), inner(ψ0', H_noninteracting, ψ0),
inner(ψ0', H_ni_up, ψ0)
@test tr(Φ_up' * h_up * Φ_up) + tr(Φ_dn' * h_dn * Φ_dn) ≈
inner(ψ0', H_noninteracting, ψ0) rtol = 1e-3
# The total interacting Hamiltonian
os_interacting = copy(os_noninteracting)
#os_interacting .+= os_noninteracting
for n in 1:N
os_interacting .+= U, "Nupdn", n
end
H = MPO(os_interacting, s)
# Random starting state
ψr = random_mps(s, n -> n ≤ Nf ? (isodd(n) ? "↑" : "↓") : "0")
@show flux(ψr), flux(ψ0)
#@test flux(ψr) == QN(("Nf", Nf, -1), ("Sz", 0))
#@test flux(ψ0) == QN(("Nf", Nf, -1), ("Sz", 0))
@test inner(ψ0', H, ψ0) < inner(ψr', H, ψr)
sweeps = Sweeps(3)
setmaxdim!(sweeps, 10, 20, _maxlinkdim)
setcutoff!(sweeps, _cutoff)
setnoise!(sweeps, 1e-5, 1e-6, 1e-7, 0.0)
er, _ = dmrg(H, ψr, sweeps; outputlevel=0)
sweeps = Sweeps(3)
setmaxdim!(sweeps, _maxlinkdim)
setcutoff!(sweeps, _cutoff)
setnoise!(sweeps, 1e-5, 1e-6, 1e-7, 0.0)
e0, _ = dmrg(H, ψ0, sweeps; outputlevel=0)
@test e0 > inner(ψ0', H_noninteracting, ψ0)
@test e0 < er
end
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 4281 | using ITensorGaussianMPS
using ITensorMPS
using ITensors
using LinearAlgebra
using Test
@testset "Basic" begin
# Test Givens rotations
v = randn(6)
g, r = ITensorGaussianMPS.givens_rotations(v)
@test g * v ≈ r * [n == 1 ? 1 : 0 for n in 1:length(v)]
end
@testset "Fermion" begin
N = 10
Nf = N ÷ 2
# Hopping
t = 1.0
# Hopping Hamiltonian
h = Hermitian(diagm(1 => fill(-t, N - 1), -1 => fill(-t, N - 1)))
e, u = eigen(h)
@test h * u ≈ u * Diagonal(e)
E = sum(e[1:Nf])
# Get the Slater determinant
Φ = u[:, 1:Nf]
@test h * Φ ≈ Φ * Diagonal(e[1:Nf])
# Diagonalize the correlation matrix as a
# Gaussian MPS (GMPS) gates
n, gmps = slater_determinant_to_gmera(Φ; maxblocksize=10)
ns = round.(Int, n)
@test sum(ns) == Nf
Λ = conj(Φ) * transpose(Φ)
@test gmps * Λ * gmps' ≈ Diagonal(ns) rtol = 1e-2
@test gmps' * Diagonal(ns) * gmps ≈ Λ rtol = 1e-2
# Form the MPS
s = siteinds("Fermion", N; conserve_qns=true)
ψ = ITensorGaussianMPS.slater_determinant_to_mera(s, Φ; maxblocksize=4)
os = OpSum()
for i in 1:N, j in 1:N
if h[i, j] ≠ 0
os .+= h[i, j], "Cdag", i, "C", j
end
end
H = MPO(os, s)
@test inner(ψ', H, ψ) ≈ E rtol = 1e-5
# Compare to DMRG
sweeps = Sweeps(10)
setmaxdim!(sweeps, 10, 20, 40, 60)
setcutoff!(sweeps, 1E-12)
energy, ψ̃ = dmrg(H, productMPS(s, n -> n ≤ Nf ? "1" : "0"), sweeps; outputlevel=0)
# Create an mps
@test abs(inner(ψ, ψ̃)) ≈ 1 rtol = 1e-5
@test inner(ψ̃', H, ψ̃) ≈ inner(ψ', H, ψ) rtol = 1e-5
@test E ≈ energy
end
@testset "Fermion (complex)" begin
N = 10
Nf = N ÷ 2
# Hopping
θ = π / 8
t = exp(im * θ)
# Hopping Hamiltonian
h = Hermitian(diagm(1 => fill(-t, N - 1), -1 => fill(-conj(t), N - 1)))
e, u = eigen(h)
@test h * u ≈ u * Diagonal(e)
E = sum(e[1:Nf])
# Get the Slater determinant
Φ = u[:, 1:Nf]
@test h * Φ ≈ Φ * Diagonal(e[1:Nf])
# Diagonalize the correlation matrix as a
# Gaussian MPS (GMPS)
n, gmps = slater_determinant_to_gmera(Φ; maxblocksize=4)
ns = round.(Int, n)
@test sum(ns) == Nf
Λ = conj(Φ) * transpose(Φ)
@test gmps * Λ * gmps' ≈ Diagonal(ns) rtol = 1e-2
@test gmps' * Diagonal(ns) * gmps ≈ Λ rtol = 1e-2
# Form the MPS
s = siteinds("Fermion", N; conserve_qns=true)
ψ = ITensorGaussianMPS.slater_determinant_to_mera(s, Φ; maxblocksize=4)
os = OpSum()
for i in 1:N, j in 1:N
if h[i, j] ≠ 0
os .+= h[i, j], "Cdag", i, "C", j
end
end
H = MPO(os, s)
@test inner(ψ', H, ψ) ≈ E rtol = 1e-5
@test inner(ψ', H, ψ) / norm(ψ) ≈ E rtol = 1e-5
# Compare to DMRG
sweeps = Sweeps(10)
setmaxdim!(sweeps, 10, 20, 40, 60)
setcutoff!(sweeps, 1E-12)
energy, ψ̃ = dmrg(H, productMPS(s, n -> n ≤ Nf ? "1" : "0"), sweeps; outputlevel=0)
# Create an mps
@test abs(inner(ψ, ψ̃)) ≈ 1 rtol = 1e-5
@test inner(ψ̃', H, ψ̃) ≈ inner(ψ', H, ψ) rtol = 1e-5
@test E ≈ energy
end
# Build 1-d SSH model
function SSH1dModel(N::Int, t::Float64, vardelta::Float64)
# N should be even
s = siteinds("Fermion", N; conserve_qns=true)
limit = div(N - 1, 2)
t1 = -t * (1 + vardelta / 2)
t2 = -t * (1 - vardelta / 2)
os = OpSum()
for n in 1:limit
os .+= t1, "Cdag", 2 * n - 1, "C", 2 * n
os .+= t1, "Cdag", 2 * n, "C", 2 * n - 1
os .+= t2, "Cdag", 2 * n, "C", 2 * n + 1
os .+= t2, "Cdag", 2 * n + 1, "C", 2 * n
end
if N % 2 == 0
os .+= t1, "Cdag", N - 1, "C", N
os .+= t1, "Cdag", N, "C", N - 1
end
h = hopping_hamiltonian(os)
H = MPO(os, s)
#display(t1)
return (h, H, s)
end
@testset "Energy" begin
N = 2^4
Nf = div(N, 2)
t = 1.0
gapsize = 0
vardelta = gapsize / 2
h, H, s = SSH1dModel(N, t, vardelta)
Φ = slater_determinant_matrix(h, Nf)
E, V = eigen(h)
sort(E)
Eana = sum(E[1:Nf])
Λ0 = Φ * Φ'
@test Eana ≈ tr(h * Λ0) rtol = 1e-5
# Diagonalize the correlation matrix as a
# Gaussian MPS (GMPS) and GMERA
ngmps, V1 = ITensorGaussianMPS.correlation_matrix_to_gmps(Λ0; eigval_cutoff=1e-8)
nmera, V1 = ITensorGaussianMPS.correlation_matrix_to_gmera(Λ0; eigval_cutoff=1e-8)#,maxblocksize=6)
@test sum(round.(Int, nmera)) == sum(round.(Int, ngmps))
U = ITensorGaussianMPS.UmatFromGates(V1, N)
Etest = ITensorGaussianMPS.EfromGates(h, U)
@test Eana ≈ Etest rtol = 1e-5
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 6593 | using ITensorGaussianMPS
using ITensorMPS
using ITensors
using LinearAlgebra
using Test
@testset "Basic" begin
# Test Givens rotations
v = randn(6)
g, r = ITensorGaussianMPS.givens_rotations(v)
@test g * v ≈ r * [n == 1 ? 1 : 0 for n in 1:length(v)]
end
@testset "Hamiltonians" begin
N = 8
t = -0.8 ###nearest neighbor hopping
mu = 0.0 ###on-site chemical potential
pairing = 1.2
os = OpSum()
for i in 1:N
if 1 < i < N
js = [i - 1, i + 1]
elseif i == 1
js = [i + 1]
else
js = [i - 1]
end
for j in js
os .+= t, "Cdag", i, "C", j
end
end
h_hop = ITensorGaussianMPS.hopping_hamiltonian(os)
for i in 1:N
if 1 < i < N
js = [i - 1, i + 1]
elseif i == 1
js = [i + 1]
else
js = [i - 1]
end
for j in js
os .+= pairing / 2.0, "Cdag", i, "Cdag", j
os .+= -conj(pairing / 2.0), "C", i, "C", j
end
end
h_hopandpair = ITensorGaussianMPS.quadratic_hamiltonian(os)
h_hopandpair_spinful = ITensorGaussianMPS.quadratic_hamiltonian(os, os)
@test all(
abs.(
(
2 .* ITensorGaussianMPS.reverse_interleave(Matrix(h_hopandpair))[
(N + 1):end, (N + 1):end
]
) - h_hop
) .< eps(Float32),
)
end
@testset "Fermion (real and complex)" begin
N = 10
Nf = N ÷ 2
# Hopping
θs = [0.0, π / 8]
for θ in θs
t = exp(im * θ)
# Hopping Hamiltonian
h = Hermitian(diagm(1 => fill(-t, N - 1), -1 => fill(-conj(t), N - 1)))
if θ == 0.0
h = real(h)
end
e, u = eigen(h)
@test h * u ≈ u * Diagonal(e)
E = sum(e[1:Nf])
# Get the Slater determinant
Φ = u[:, 1:Nf]
@test h * Φ ≈ Φ * Diagonal(e[1:Nf])
# Diagonalize the correlation matrix as a
# Gaussian MPS (GMPS)
n, gmps = slater_determinant_to_gmps(Φ, N; maxblocksize=4)
ns = round.(Int, n)
@test sum(ns) == Nf
Λ = conj(Φ) * transpose(Φ)
@test gmps * Λ * gmps' ≈ Diagonal(ns) rtol = 1e-2
@test gmps' * Diagonal(ns) * gmps ≈ Λ rtol = 1e-2
# Form the MPS
s = siteinds("Fermion", N; conserve_qns=true)
ψ = slater_determinant_to_mps(s, Φ; maxblocksize=4)
os = OpSum()
for i in 1:N, j in 1:N
if h[i, j] ≠ 0
os .+= h[i, j], "Cdag", i, "C", j
end
end
H = MPO(os, s)
@test inner(ψ', H, ψ) ≈ E rtol = 1e-5
@test inner(ψ', H, ψ) / norm(ψ) ≈ E rtol = 1e-5
# Compare to DMRG
sweeps = Sweeps(10)
setmaxdim!(sweeps, 10, 20, 40, 60)
setcutoff!(sweeps, 1E-12)
energy, ψ̃ = dmrg(H, productMPS(s, n -> n ≤ Nf ? "1" : "0"), sweeps; outputlevel=0)
# Create an mps
@test abs(inner(ψ, ψ̃)) ≈ 1 rtol = 1e-5
@test inner(ψ̃', H, ψ̃) ≈ inner(ψ', H, ψ) rtol = 1e-5
@test E ≈ energy
end
end
@testset "Fermion BCS (real,real - no pairing, complex)" begin
N = 12
Nf = N ÷ 2
ts = [1.0, exp(im * pi / 3.0), 1.0]
Deltas = [1.0, 1.0, 0.0]
for (Delta, t) in zip(Deltas, ts)
t = isreal(t) ? real(t) : t
os_h = OpSum()
for n in 1:(N - 1)
os_h .+= -t, "Cdag", n, "C", n + 1
os_h .+= -t', "Cdag", n + 1, "C", n
end
os_p = OpSum()
for n in 1:(N - 1)
os_p .+= Delta / 2.0, "Cdag", n, "Cdag", n + 1
os_p .+= -Delta / 2.0, "Cdag", n + 1, "Cdag", n
os_p .+= -Delta / 2.0, "C", n, "C", n + 1
os_p .+= Delta / 2.0, "C", n + 1, "C", n
end
h = ITensorGaussianMPS.quadratic_hamiltonian(os_h + os_p)
@assert ishermitian(h)
ElT = eltype(h)
e, u = ITensorGaussianMPS.eigen_gaussian(h)
E = sum(e[1:(N)])
Φ = (u[:, 1:N])
@test h * Φ ≈ Φ * Diagonal(e[1:N])
c = conj(Φ) * transpose(Φ)
c2 = ITensorGaussianMPS.get_gaussian_GS_corr(h)
@test norm(c - c2) <= sqrt(eps(real(eltype(h))))
if ElT <: Real
@assert norm(imag.(c)) <= sqrt(eps())
c = real.(c)
end
n, gmps = correlation_matrix_to_gmps(ElT.(c), N; eigval_cutoff=1e-10, maxblocksize=14)
ns = round.(Int, n)
@test sum(ns) == N
Λ = ITensorGaussianMPS.ConservesNfParity(c)
@test gmps * Λ.data * gmps' ≈ Diagonal(ns) rtol = 1e-2
@test gmps' * Diagonal(ns) * gmps ≈ Λ.data rtol = 1e-2
# Form the MPS
s = siteinds("Fermion", N; conserve_qns=false)
h_mpo = MPO(os_h + os_p, s)
psi = correlation_matrix_to_mps(
s, ElT.(c); eigval_cutoff=1e-10, maxblocksize=14, cutoff=1e-11
)
@test eltype(psi[1]) <: ElT
sweeps = Sweeps(5)
_maxlinkdim = 60
_cutoff = 1e-10
setmaxdim!(sweeps, 10, 20, 40, _maxlinkdim)
setcutoff!(sweeps, _cutoff)
E_dmrg, psidmrg = dmrg(h_mpo, psi, sweeps; outputlevel=0)
E_ni_mpo = inner(psi', h_mpo, psi)
@test E_dmrg ≈ E_ni_mpo rtol = 1e-4
@test inner(psidmrg, psi) ≈ 1 rtol = 1e-4
# compare entries of the correlation matrix
cdagc = correlation_matrix(psi, "Cdag", "C")
cdagcdag = correlation_matrix(psi, "Cdag", "Cdag")
ccdag = correlation_matrix(psi, "C", "Cdag")
cc = correlation_matrix(psi, "C", "C")
cblocked = ITensorGaussianMPS.reverse_interleave(c)
tol = 1e-5
@test all(abs.(cblocked[(N + 1):end, (N + 1):end] - cdagc[:, :]) .< tol)
@test all(abs.(cblocked[1:N, 1:N] - ccdag[:, :]) .< tol)
@test all(abs.(cblocked[1:N, (N + 1):end] - cc[:, :]) .< tol)
@test all(abs.(cblocked[(N + 1):end, 1:N] - cdagcdag[:, :]) .< tol)
@show "Completed test for: ", Delta, t
end
end
@testset "Bad Terms" begin
@testset "Bad single" begin
os = OpSum()
os += -1.0, "Nupdn", 1
@test_throws Any h_hop = ITensorGaussianMPS.hopping_hamilontian(os)
end
@testset "Bad quadratic" begin
os = OpSum()
os += -1.0, "Ntot", 1, "Ntot", 2
@test_throws Any h_hop = ITensorGaussianMPS.hopping_hamilontian(os)
end
end
@testset "Rewrite Hamiltonians" begin
@testset "Spinless" begin
os = OpSum()
os += -1.0, "Cdag", 1, "C", 2
os += -1.0, "Cdag", 2, "C", 1
os += 2, "N", 1
os += 3, "N", 2
h_hop = ITensorGaussianMPS.hopping_hamiltonian(os)
@test h_hop[1, 1] == 2
@test h_hop[2, 2] == 3
end
@testset "Spin $o" for o in ("up", "dn")
os = OpSum()
os += -1.0, "Cdag$o", 1, "C$o", 2
os += -1.0, "Cdag$o", 2, "C$o", 1
os += 2, "N$o", 1
os += 3, "N$o", 2
h_hop = ITensorGaussianMPS.hopping_hamiltonian(os)
@test h_hop[1, 1] == 2
@test h_hop[2, 2] == 3
end
@testset "Spin $o" for o in ("↑", "↓")
os = OpSum()
os += -1.0, "c†$o", 1, "c$o", 2
os += -1.0, "c†$o", 2, "c$o", 1
os += 2, "n$o", 1
os += 3, "n$o", 2
h_hop = ITensorGaussianMPS.hopping_hamiltonian(os)
@test h_hop[1, 1] == 2
@test h_hop[2, 2] == 3
end
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 1369 | using ITensorGaussianMPS
using ITensors
using LinearAlgebra
using Test
const GMPS = ITensorGaussianMPS
@testset "Fermionic Hamiltonian diagonalization in parity-conserving frame" begin
N = 10
# generate random Hamiltonian in non-number-conserving space
H = zeros(ComplexF64, 2 * N, 2 * N)
hoffd = rand(N, N) .- 0.5 + im * (rand(N, N) .- 0.5)
hoffd = (hoffd - transpose(hoffd)) ./ 2
H[1:N, (N + 1):end] = hoffd
H[(N + 1):end, 1:N] = -conj.(hoffd)
hd = rand(N, N) .- 0.5 + im * (rand(N, N) .- 0.5)
hd = (hd + hd') ./ 2
H[1:N, 1:N] = -1 .* conj.(hd)
H[(N + 1):end, (N + 1):end] = hd
H = (H + H') ./ 2
# compare spectrum, which can also accurately be computed via standard eigendecomposition
d, U = GMPS._eigen_gaussian_blocked(Hermitian(H))
d2, _ = eigen(Hermitian(H))
d3, _ = GMPS.eigen_gaussian(Hermitian(GMPS.interleave(H)))
@test sort(d) ≈ sort(d2)
@test sort(d) ≈ sort(d3)
end
@testset "Undoing arbitrary complex rotation within degenerate subspaces" begin
A = (x -> Matrix(qr(x).Q))(randn(5, 3))
U = (x -> Matrix(qr(x).Q))(randn(ComplexF64, 3, 3))
AU = A * U
B = GMPS.make_subspace_real_if_possible(AU)
# verify that same subspace is spanned by real eigenvectors B as original eigenvectors A or AU
@test norm(((B * B' * A) .- A)) <= eps(Float64) * 10^2
@test norm(((B * B' * AU) .- AU)) <= eps(Float64) * 10^2
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | code | 169 | using ITensorGaussianMPS
using LinearAlgebra
using Test
@testset "ITensorGaussianMPS.jl" begin
include("gmps.jl")
include("electron.jl")
include("linalg.jl")
end
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | docs | 3244 | # ITensorGaussianMPS
|**Citation** |**Open-access preprint** |
|:-------------------------------------------------------------------------------:|:-----------------------------------------------------:|
| [](https://doi.org/10.1103/PhysRevB.92.075132) | [](https://arxiv.org/abs/1504.07701) |
A package for creating the matrix product state of a free fermion (Gaussian) state.
## Installation
To install this package, first install Julia, start the Julia REPL by typing `julia` at your command line, and run the command:
```julia
julia>]
pkg> add ITensorGaussianMPS
```
## Examples
This can help create starting states for DMRG. For example:
```julia
using ITensors
using ITensorGaussianMPS
using LinearAlgebra
# Half filling
N = 20
Nf = N÷2
@show N, Nf
# Hopping
t = 1.0
# Free fermion hopping Hamiltonian
h = Hermitian(diagm(1 => fill(-t, N-1), -1 => fill(-t, N-1)))
_, u = eigen(h)
# Get the Slater determinant
Φ = u[:, 1:Nf]
# Create an mps for the free fermion ground state
s = siteinds("Fermion", N; conserve_qns = true)
ψ0 = slater_determinant_to_mps(s, Φ; maxblocksize = 4)
# Make an interacting Hamiltonian
U = 1.0
@show U
os = OpSum()
for b in 1:N-1
os .+= -t,"Cdag",b,"C",b+1
os .+= -t,"Cdag",b+1,"C",b
end
for b in 1:N
os .+= U, "Cdag*C", b
end
H = MPO(os, s)
println("\nFree fermion starting energy")
@show inner(ψ0, H, ψ0)
# Random starting state
ψr = random_mps(s, n -> n ≤ Nf ? "1" : "0")
println("\nRandom state starting energy")
@show inner(ψr, H, ψr)
println("\nRun dmrg with random starting state")
@time dmrg(H, ψr; nsweeps=10, maxdim=[10, 20, 40, 60], cutoff=1e-12)
println("\nRun dmrg with free fermion starting state")
@time dmrg(H, ψ0; nsweeps=4, maxdim=60, cutoff=1e-12)
```
This will output something like:
```julia
(N, Nf) = (20, 10)
U = 1.0
Free fermion starting energy
inner(ψ0, H, ψ0) = -2.3812770621299357
Random state starting energy
inner(ψr, H, ψr) = 10.0
Run dmrg with random starting state
After sweep 1 energy=6.261701784151 maxlinkdim=2 time=0.041
After sweep 2 energy=2.844954346204 maxlinkdim=5 time=0.056
After sweep 3 energy=0.245282430911 maxlinkdim=14 time=0.071
After sweep 4 energy=-1.439072132586 maxlinkdim=32 time=0.098
After sweep 5 energy=-2.220202191945 maxlinkdim=59 time=0.148
After sweep 6 energy=-2.376787647893 maxlinkdim=60 time=0.186
After sweep 7 energy=-2.381484153892 maxlinkdim=60 time=0.167
After sweep 8 energy=-2.381489999291 maxlinkdim=57 time=0.233
After sweep 9 energy=-2.381489999595 maxlinkdim=49 time=0.175
After sweep 10 energy=-2.381489999595 maxlinkdim=49 time=0.172
1.349192 seconds (8.94 M allocations: 1.027 GiB, 18.05% gc time)
Run dmrg with free fermion starting state
After sweep 1 energy=-2.381489929965 maxlinkdim=49 time=0.139
After sweep 2 energy=-2.381489999588 maxlinkdim=49 time=0.165
After sweep 3 energy=-2.381489999594 maxlinkdim=48 time=0.161
After sweep 4 energy=-2.381489999594 maxlinkdim=48 time=0.169
0.637021 seconds (4.59 M allocations: 525.989 MiB, 17.09% gc time)
```
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 0.1.11 | e2dd8eca66ec5093008dbedad0ad2763ed07f774 | docs | 225 | ```@meta
CurrentModule = ITensorGaussianMPS
```
# ITensorGaussianMPS
Documentation for [ITensorGaussianMPS](https://github.com/ITensor/ITensorGaussianMPS.jl).
```@index
```
```@autodocs
Modules = [ITensorGaussianMPS]
```
| ITensorGaussianMPS | https://github.com/ITensor/ITensorGaussianMPS.jl.git |
|
[
"MIT"
] | 1.0.0 | be89a2e27c7d221e16fe456a5f01c944962cc3f5 | code | 8762 | __precompile__()
module DeferredFutures
using AutoHashEquals
using Distributed
using Serialization: AbstractSerializer, serialize_any, serialize_cycle, serialize_type
import Distributed: AbstractRemoteRef
import Serialization: serialize
export @defer, DeferredChannel, DeferredFuture, DeferredRemoteRef, reset!
"""
`DeferredRemoteRef` is the common supertype of `DeferredFuture` and `DeferredChannel` and is
the counterpart of `$AbstractRemoteRef`.
"""
abstract type DeferredRemoteRef <: AbstractRemoteRef end
@auto_hash_equals mutable struct DeferredFuture <: DeferredRemoteRef
outer::RemoteChannel
end
"""
DeferredFuture(pid::Integer=myid()) -> DeferredFuture
Create a `DeferredFuture` on process `pid`. The default `pid` is the current process.
Note that the data in the `DeferredFuture` will still be located wherever it was `put!`
from. The `pid` argument controlls where the outermost reference to that data is located.
"""
function DeferredFuture(pid::Integer=myid())
ref = DeferredFuture(RemoteChannel(pid))
finalizer(finalize_ref, ref)
return ref
end
"""
show(io::IO, ref::DeferredFuture)
Print a simplified string representation of the `DeferredFuture` with its RemoteChannel
parameters.
"""
function Base.show(io::IO, ref::DeferredFuture)
rc = ref.outer
print(io, "$(typeof(ref).name.name) at ($(rc.where),$(rc.whence),$(rc.id))")
end
"""
serialize(s::AbstractSerializer, ref::DeferredFuture)
Serialize a DeferredFuture such that it can de deserialized by `deserialize` in a cluster.
"""
function serialize(s::AbstractSerializer, ref::DeferredFuture)
serialize_cycle(s, ref) && return
serialize_type(s, DeferredFuture, true)
serialize_any(s, ref.outer)
end
@auto_hash_equals mutable struct DeferredChannel <: DeferredRemoteRef
outer::RemoteChannel
func::Function # Channel generating function used for creating the `RemoteChannel`
end
"""
DeferredChannel(pid::Integer=myid(), num::Integer=1; content::DataType=Any) -> DeferredChannel
Create a `DeferredChannel` with a reference to a remote channel of a specific size and type.
f() is a function that when executed on `pid` must return an implementation of an
`AbstractChannel`.
The default `pid` is the current process.
"""
function DeferredChannel(f::Function, pid::Integer=myid())
ref = DeferredChannel(RemoteChannel(pid), f)
finalizer(finalize_ref, ref)
return ref
end
"""
DeferredChannel(pid::Integer=myid(), num::Integer=1; content::DataType=Any) -> DeferredChannel
Create a `DeferredChannel`. The default `pid` is the current process. When initialized, the
`DeferredChannel` will reference a `Channel{content}(num)` on process `pid`.
Note that the data in the `DeferredChannel` will still be located wherever the first piece
of data was `put!` from. The `pid` argument controls where the outermost reference to that
data is located.
"""
function DeferredChannel(pid::Integer=myid(), num::Integer=1; content::DataType=Any)
ref = DeferredChannel(RemoteChannel(pid), ()->Channel{content}(num))
finalizer(finalize_ref, ref)
return ref
end
"""
show(io::IO, ref::DeferredChannel)
Print a simplified string representation of the `DeferredChannel` with its RemoteChannel
parameters and its function.
"""
function Base.show(io::IO, ref::DeferredChannel)
rc = ref.outer
print(
io,
"$(typeof(ref).name.name)($(ref.func)) at ($(rc.where),$(rc.whence),$(rc.id))"
)
end
"""
serialize(s::AbstractSerializer, ref::DeferredChannel)
Serialize a DeferredChannel such that it can de deserialized by `deserialize` in a cluster.
"""
function serialize(s::AbstractSerializer, ref::DeferredChannel)
serialize_cycle(s, ref) && return
serialize_type(s, DeferredChannel, true)
serialize_any(s, ref.outer)
serialize(s, ref.func)
end
"""
finalize_ref(ref::DeferredRemoteRef)
This finalizer is attached to both `DeferredFuture` and `DeferredChannel` on construction
and finalizes the inner and outer `RemoteChannel`s.
For more information on finalizing remote references, see the Julia manual[^1].
[^1]: [Remote References and Distributed Garbage Collection](http://docs.julialang.org/en/latest/manual/parallel-computing.html#Remote-References-and-Distributed-Garbage-Collection-1)
"""
function finalize_ref(ref::DeferredRemoteRef)
# finalizes as recommended in Julia docs:
# http://docs.julialang.org/en/latest/manual/parallel-computing.html#Remote-References-and-Distributed-Garbage-Collection-1
# check for ref.outer.where == 0 as the contained RemoteChannel may have already been
# finalized
if ref.outer.where > 0 && isready(ref.outer)
inner = take!(ref.outer)
finalize(inner)
end
finalize(ref.outer)
return nothing
end
"""
reset!{T<:DeferredRemoteRef}(ref::T) -> T
Removes any data from the `DeferredRemoteRef` and allows it to be reinitialized with data.
Returns the input `DeferredRemoteRef`.
"""
function reset!(ref::DeferredRemoteRef)
if isready(ref.outer)
inner = take!(ref.outer)
# as recommended in Julia docs:
# http://docs.julialang.org/en/latest/manual/parallel-computing.html#Remote-References-and-Distributed-Garbage-Collection-1
finalize(inner)
end
return ref
end
"""
put!(ref::DeferredFuture, v) -> DeferredFuture
Store a value to a `DeferredFuture`. `DeferredFuture`s, like `Future`s, are write-once
remote references. A `put!` on an already set `DeferredFuture` throws an `Exception`.
Returns its first argument.
"""
function Base.put!(ref::DeferredFuture, val)
if !isready(ref.outer)
inner = RemoteChannel()
put!(ref.outer, inner)
put!(fetch(ref.outer), val)
return ref
else
throw(ErrorException("DeferredFuture can only be set once."))
end
end
"""
put!(rr::DeferredChannel, val) -> DeferredChannel
Store a value to the `DeferredChannel`. If the channel is full, blocks until space is
available. Returns its first argument.
"""
function Base.put!(ref::DeferredChannel, val)
# On the first call to `put!` create the `RemoteChannel` and `put!` it in the `Future`
if !isready(ref.outer)
inner = RemoteChannel(ref.func)
put!(ref.outer, inner)
end
# `fetch` the `RemoteChannel` and `put!` the value in there
put!(fetch(ref.outer), val)
return ref
end
"""
isready(ref::DeferredRemoteRef) -> Bool
Determine whether a `DeferredRemoteRef` has a value stored to it. Note that this function
can cause race conditions, since by the time you receive its result it may no longer be
true.
"""
function Base.isready(ref::DeferredRemoteRef)
isready(ref.outer) && isready(fetch(ref.outer))
end
"""
fetch(ref::DeferredRemoteRef) -> Any
Wait for and get the value of a remote reference.
"""
function Base.fetch(ref::DeferredRemoteRef)
fetch(fetch(ref.outer))
end
"""
wait(ref::DeferredRemoteRef) -> DeferredRemoteRef
Block the current task until a value becomes available on the `DeferredRemoteRef`. Returns
its first argument.
"""
function Base.wait(ref::DeferredRemoteRef)
wait(ref.outer)
wait(fetch(ref.outer))
return ref
end
# mimics the Future/RemoteChannel indexing behaviour in Base
Base.getindex(ref::DeferredRemoteRef, args...) = getindex(fetch(ref.outer), args...)
"""
close(ref::DeferredChannel)
Closes a `DeferredChannel`. An exception is thrown by:
* `put!` on a closed `DeferredChannel`
* `take!` and `fetch` on an empty, closed `DeferredChannel`
"""
function Base.close(ref::DeferredChannel)
if isready(ref.outer)
inner = fetch(ref.outer)
close(inner)
else
rc = RemoteChannel()
close(rc)
put!(ref.outer, rc)
end
return nothing
end
"""
take!(ref::DeferredChannel, args...)
Fetch value(s) from a `DeferredChannel`, removing the value(s) in the processs. Note that
`take!` passes through `args...` to the innermost `AbstractChannel` and the default
`Channel` accepts no `args...`.
"""
Base.take!(ref::DeferredChannel, args...) = take!(fetch(ref.outer), args...)
"""
@defer Future(...)
@defer RemoteChannel(...)
`@defer` transforms a `Future` or `RemoteChannel` construction into a 'DeferredFuture' or
'DeferredChannel' construction.
"""
macro defer(ex::Expr)
if ex.head != :call
throw(AssertionError("Expected expression to be a function call, but got $(ex)."))
end
if ex.args[1] == :Future
return Expr(:call, :DeferredFuture, ex.args[2:end]...)
elseif ex.args[1] == :RemoteChannel
return Expr(:call, :DeferredChannel, ex.args[2:end]...)
else
throw(AssertionError("Expected RemoteChannel or Future and got $(ex.args[1])."))
end
end
end # module
| DeferredFutures | https://github.com/invenia/DeferredFutures.jl.git |
|
[
"MIT"
] | 1.0.0 | be89a2e27c7d221e16fe456a5f01c944962cc3f5 | code | 15202 | using DeferredFutures
using Distributed
using Serialization
using Test
@testset "DeferredRemoteRefs" begin
@testset "DeferredFuture Comparison" begin
rc = RemoteChannel()
@test DeferredFuture(rc) == DeferredFuture(rc)
@test hash(DeferredFuture(rc)) == hash(DeferredFuture(rc))
end
@testset "DeferredChannel Comparison" begin
rc = RemoteChannel()
func = () -> RemoteChannel()
@test DeferredChannel(rc, func) == DeferredChannel(rc, func)
@test hash(DeferredChannel(rc, func)) == hash(DeferredChannel(rc, func))
end
@testset "Finalizing" begin
df = DeferredFuture()
finalize(df)
@test_throws Exception isready(df)
@test_throws Exception fetch(df)
@test_throws Exception df[]
@test_throws Exception put!(df, 1)
@test_throws Exception take!(df)
@test_throws Exception wait(df)
finalize(df)
dc = DeferredChannel()
finalize(dc)
@test_throws Exception isready(dc)
@test_throws Exception fetch(dc)
@test_throws Exception close(dc)
@test_throws Exception dc[]
@test_throws Exception put!(dc, 1)
@test_throws Exception take!(dc)
@test_throws Exception wait(dc)
finalize(dc)
dc = DeferredChannel()
close(dc)
@test !isready(dc)
@test_throws Exception fetch(dc)
@test_throws Exception dc[]
@test_throws Exception put!(dc, 1)
@test_throws Exception take!(dc)
@test_throws Exception wait(dc)
close(dc)
finalize(dc)
@test_throws Exception isready(dc)
@test_throws Exception fetch(dc)
@test_throws Exception close(dc)
@test_throws Exception dc[]
@test_throws Exception put!(dc, 1)
@test_throws Exception take!(dc)
@test_throws Exception wait(dc)
df = DeferredFuture()
put!(df, 1)
@test df[] == 1
finalize(df)
@test_throws Exception isready(df)
@test_throws Exception fetch(df)
@test_throws Exception df[]
@test_throws Exception put!(df, 1)
@test_throws Exception take!(df)
@test_throws Exception wait(df)
dc = DeferredChannel()
put!(dc, 1)
@test dc[] == 1
finalize(dc)
@test_throws Exception isready(dc)
@test_throws Exception fetch(dc)
@test_throws Exception close(dc)
@test_throws Exception dc[]
@test_throws Exception put!(dc, 1)
@test_throws Exception take!(dc)
@test_throws Exception wait(dc)
dc = DeferredChannel()
put!(dc, 1)
@test dc[] == 1
close(dc)
@test isready(dc)
@test fetch(dc) == 1
@test dc[] == 1
@test_throws Exception put!(dc, 1)
@test take!(dc) == 1
@test !isready(dc)
@test_throws Exception fetch(dc)
@test_throws Exception dc[]
@test_throws Exception put!(dc, 1)
@test_throws Exception take!(dc)
@test_throws Exception wait(dc)
finalize(dc)
@test_throws Exception isready(dc)
@test_throws Exception fetch(dc)
@test_throws Exception close(dc)
@test_throws Exception dc[]
@test_throws Exception put!(dc, 1)
@test_throws Exception take!(dc)
@test_throws Exception wait(dc)
end
@testset "Distributed DeferredFuture" begin
top = myid()
bottom = addprocs(1)[1]
@everywhere using DeferredFutures
try
val = "hello"
df = DeferredFuture(top)
@test !isready(df)
fut = remotecall_wait(bottom, df) do dfr
put!(dfr, val)
end
@test fetch(fut) == df
@test isready(df)
@test fetch(df) == val
@test wait(df) == df
@test_throws ErrorException put!(df, val)
@test df[] == val
@test df[5] == 'o'
@test df.outer.where == top
@test fetch(df.outer).where == bottom
reset!(df)
@test !isready(df)
put!(df, "world")
@test fetch(df) == "world"
finalize(df)
@test_throws Exception isready(df)
@test_throws Exception fetch(df)
@test_throws Exception df[]
@test_throws Exception put!(df, 1)
@test_throws Exception take!(df)
@test_throws Exception wait(df)
finalize(df)
finally
rmprocs(bottom)
end
end
@testset "Distributed DeferredChannel" begin
top = myid()
bottom = addprocs(1)[1]
@everywhere using DeferredFutures
try
val = "hello"
channel = DeferredChannel(top, 32)
@test !isready(channel)
fut = remotecall_wait(bottom, channel) do dfr
put!(dfr, val)
end
@test fetch(fut) == channel
@test isready(channel)
@test fetch(channel) == val
@test wait(channel) == channel
@test channel[] == val
@test channel[5] == 'o'
put!(channel, "world")
@test take!(channel) == val
@test fetch(channel) == "world"
@test channel.outer.where == top
@test fetch(channel.outer).where == bottom
reset!(channel)
@test !isready(channel)
put!(channel, "world")
@test fetch(channel) == "world"
finalize(channel)
@test_throws Exception isready(channel)
@test_throws Exception fetch(channel)
@test_throws Exception close(channel)
@test_throws Exception channel[]
@test_throws Exception put!(channel, 1)
@test_throws Exception take!(channel)
@test_throws Exception wait(channel)
finalize(channel)
finally
rmprocs(bottom)
end
end
@testset "Allocation" begin
rand_size = 800000000 # sizeof(rand(10000, 10000))
GC.gc()
main_size = Base.summarysize(Distributed)
top = myid()
bottom = addprocs(1)[1]
@everywhere using DeferredFutures
try
df = DeferredFuture(top)
remote_size = remotecall_fetch(bottom, df) do dfr
GC.gc()
main_size = Base.summarysize(Distributed)
# the DeferredFuture is initialized and the data is stored on bottom
put!(dfr, rand(10000, 10000))
main_size
end
GC.gc()
# tests that the data has not been transfered to top
@test Base.summarysize(Distributed) < main_size + rand_size
remote_size_new = remotecall_fetch(bottom) do
GC.gc()
Base.summarysize(Distributed)
end
# tests that the data still exists on bottom
@test remote_size_new >= remote_size + rand_size
finally
rmprocs(bottom)
end
end
@testset "Transfer" begin
rand_size = 800000000 # sizeof(rand(10000, 10000))
GC.gc()
main_size = Base.summarysize(Main)
top = myid()
left, right = addprocs(2)
@everywhere using DeferredFutures
try
df = DeferredFuture(top)
left_remote_size = remotecall_fetch(left, df) do dfr
GC.gc()
main_size = Base.summarysize(Main)
put!(dfr, rand(10000, 10000))
main_size
end
right_remote_size = remotecall_fetch(right, df) do dfr
GC.gc()
main_size = Base.summarysize(Main)
global data = fetch(dfr)
main_size
end
GC.gc()
@test Base.summarysize(Main) < main_size + rand_size
right_remote_size_new = remotecall_fetch(right) do
GC.gc()
Base.summarysize(Main)
end
@test right_remote_size_new >= right_remote_size + rand_size
finally
rmprocs([left, right])
end
end
@testset "@defer" begin
ex = macroexpand(@__MODULE__, :(@defer RemoteChannel(()->Channel(5))))
ex = macroexpand(@__MODULE__, :(@defer RemoteChannel()))
channel = @defer RemoteChannel(()->Channel(32))
put!(channel, 1)
put!(channel, 2)
@test fetch(channel) == 1
@test take!(channel) == 1
@test fetch(channel) == 2
fut = macroexpand(@__MODULE__, :(@defer Future()))
other_future = macroexpand(@__MODULE__, :(@defer Future()))
@test_throws LoadError macroexpand(@__MODULE__, :(@defer mutable struct Foo end))
try
macroexpand(@__MODULE__, :(@defer mutable struct Foo end))
@test false
catch e
@test e.error isa AssertionError
end
@test_throws LoadError macroexpand(@__MODULE__, :(@defer Channel()))
try
macroexpand(@__MODULE__, :(@defer Channel()))
@test false
catch e
@test e.error isa AssertionError
end
close(channel)
end
@testset "Show" begin
rc = RemoteChannel()
rc_params = "($(rc.where),$(rc.whence),$(rc.id))"
@test sprint(show, DeferredFuture(rc)) == "DeferredFuture at $rc_params"
dc = DeferredChannel(rc, print)
@test sprint(show, dc) == "DeferredChannel(print) at $rc_params"
end
@testset "Serialization" begin
@testset "DeferredFuture serialization on same process" begin
df = DeferredFuture(myid())
io = IOBuffer()
serialize(io, df)
seekstart(io)
deserialized_df = deserialize(io)
close(io)
@test deserialized_df == df
end
@testset "DeferredFuture serialization on a cluster" begin
df1 = DeferredFuture(myid())
df2 = DeferredFuture(myid())
io = IOBuffer()
serialize(io, df1)
df1_string = take!(io)
close(io)
put!(df2, 28)
io = IOBuffer()
serialize(io, df2)
df2_string = take!(io)
close(io)
bottom = addprocs(1)[1]
@everywhere using DeferredFutures
@everywhere using Serialization
df3_string = ""
try
df3_string = @fetchfrom bottom begin
io = IOBuffer()
write(io, df1_string)
seekstart(io)
bottom_df1 = deserialize(io)
close(io)
put!(bottom_df1, 37)
io = IOBuffer()
write(io, df2_string)
seekstart(io)
bottom_df2 = deserialize(io)
close(io)
@test isready(bottom_df2) == true
@test fetch(bottom_df2) == 28
reset!(bottom_df2)
df3 = DeferredFuture(myid())
put!(df3, 14)
io = IOBuffer()
serialize(io, df3)
df3_string = take!(io)
close(io)
return df3_string
end
@test isready(df1) == true
@test fetch(df1) == 37
@test isready(df2) == false
@test df3_string != ""
finally
rmprocs(bottom)
end
io = IOBuffer()
write(io, df3_string)
seekstart(io)
bottom_df3 = deserialize(io)
close(io)
@test_broken isready(bottom_df3) == true
@test_broken fetch(bottom_df3) == 14
end
@testset "DeferredChannel serialization on same process" begin
dc = DeferredChannel()
io = IOBuffer()
serialize(io, dc)
seekstart(io)
deserialized_dc = deserialize(io)
close(io)
@test deserialized_dc == dc
end
@testset "DeferredChannel serialization on a cluster" begin
dc1 = DeferredChannel()
dc2 = DeferredChannel()
io = IOBuffer()
serialize(io, dc1)
dc1_string = take!(io)
close(io)
put!(dc2, 28)
io = IOBuffer()
serialize(io, dc2)
dc2_string = take!(io)
close(io)
bottom = addprocs(1)[1]
@everywhere using DeferredFutures
@everywhere using Serialization
dc3_string = ""
try
dc3_string = @fetchfrom bottom begin
io = IOBuffer()
write(io, dc1_string)
seekstart(io)
bottom_dc1 = deserialize(io)
close(io)
put!(bottom_dc1, 37)
io = IOBuffer()
write(io, dc2_string)
seekstart(io)
bottom_dc2 = deserialize(io)
close(io)
@test isready(bottom_dc2) == true
@test fetch(bottom_dc2) == 28
reset!(bottom_dc2)
dc3 = DeferredChannel()
put!(dc3, 14)
io = IOBuffer()
serialize(io, dc3)
dc3_string = take!(io)
close(io)
return dc3_string
end
@test isready(dc1) == true
@test fetch(dc1) == 37
@test isready(dc2) == false
@test dc3_string != ""
finally
rmprocs(bottom)
end
io = IOBuffer()
write(io, dc3_string)
seekstart(io)
bottom_dc3 = deserialize(io)
close(io)
@test_broken isready(bottom_dc3) == true
@test_broken fetch(bottom_dc3) == 14
end
@testset "DeferredFuture serialization as part of another object" begin
pnums = addprocs(1)
@everywhere using DeferredFutures
try
x = ()->3
df = DeferredFuture()
result = @fetch (df, x)
@test result[1] == df
@test result[2]() == 3
finally
rmprocs(pnums)
end
end
@testset "DeferredChannel serialization as part of another object" begin
pnums = addprocs(1)
@everywhere using DeferredFutures
try
x = ()->3
dc = DeferredChannel()
result = @fetch (dc, x)
@test result[1] == dc
@test result[2]() == 3
finally
rmprocs(pnums)
end
end
end
end
| DeferredFutures | https://github.com/invenia/DeferredFutures.jl.git |
|
[
"MIT"
] | 1.0.0 | be89a2e27c7d221e16fe456a5f01c944962cc3f5 | docs | 2020 | # DeferredFutures
[](https://travis-ci.org/invenia/DeferredFutures.jl)
[](https://ci.appveyor.com/project/iamed2/deferredfutures-jl/branch/master)
[](https://codecov.io/gh/invenia/DeferredFutures.jl)
A `DeferredFuture` is like a regular Julia `Future`, but is initialized when `put!` is called on it.
This means that the data in the `DeferredFuture` lives with the process the data was created on.
The process the `DeferredFuture` itself lives on never needs to fetch the data to its process.
This is useful when there is a lightweight controller process which handles scheduling work on and transferring data between multiple machines.
## Usage
Use a `DeferredFuture` as you would a `Future`.
```julia
julia> DeferredFuture()
DeferredFuture at (1,1,1)
julia> DeferredFuture(3)
DeferredFuture at (3,1,2)
```
You can also use a `DeferredChannel` as you would a `RemoteChannel`.
```julia
julia> DeferredChannel(()->Channel{Int}(10), 4)
DeferredChannel(#1) at (4,1,3)
julia> DeferredChannel(4)
DeferredChannel(DeferredFutures.#2) at (4,1,4)
julia> DeferredChannel(4, 128; content=Int)
DeferredChannel(DeferredFutures.#2) at (4,1,5)
```
Note that `DeferredChannel()` will create a `RemoteChannel` with `RemoteChannel(()->Channel{Any}(1), myid())` by default.
Furthermore, `@defer` can be used when creating a `Future` or `RemoteChannel` to create their deferred counterparts.
```julia
julia> @defer Future()
DeferredFuture at (1,1,6)
julia> @defer RemoteChannel(()->Channel{Int}(10))
DeferredChannel(#3) at (1,1,7)
```
Note that `DeferredFuture(n)` does not control where the data lives, only where the `RemoteChannel` which refers to the data lives.
## License
DeferredFutures.jl is provided under the [MIT "Expat" License](LICENSE.md).
| DeferredFutures | https://github.com/invenia/DeferredFutures.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | code | 966 | using CUBScout
using Documenter
using BioSequences: @dna_str
DocMeta.setdocmeta!(CUBScout, :DocTestSetup, :(using CUBScout); recursive = true)
makedocs(;
modules = [CUBScout],
authors = "Augustus Pendleton",
repo = "https://github.com/gus-pendleton/CUBScout.jl/blob/{commit}{path}#{line}",
sitename = "CUBScout.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://gus-pendleton.github.io/CUBScout.jl",
edit_link = "main",
assets = String[joinpath("assets", "favicon.ico")],
),
pages = [
"Introduction" => "index.md",
"Inputs" => "inputs.md",
"Counting Codons" => "codons.md",
"Codon Usage Bias" => "cub.md",
"Expressivity Prediction" => "exp.md",
"Functions" => "functions.md",
"References" => "reference.md",
],
)
deploydocs(; repo = "github.com/gus-pendleton/CUBScout.jl", devbranch = "main")
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | code | 647 | module CUBScout
using FASTX: FASTAReader, sequence, identifier, description
using DelimitedFiles: readdlm
using BioSequences: BioSequences, LongDNA, NucSeq, encoded_data, @dna_str, @rna_str
using Artifacts
using Statistics: mean
export CodonDict,
make_CodonDict,
CodonDict_PATH,
DEFAULT_CodonDict,
ALTSTART_CodonDict,
EXAMPLE_DATA_PATH,
find_seqs,
seq_names,
seq_descriptions,
count_codons,
codon_frequency
export b, enc, enc_p, mcb, milc, scuo, all_cub
export e, melp, cai, fop, gcb
include("codon_dict.jl")
include("accessory_functions.jl")
include("CUB_measures.jl")
include("EXP_measures.jl")
end
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | code | 65531 | # B
"""
b(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
b(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
Calculate B from Karlin and Mrazek, 1996.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `ref_seqs`: by default, codon usage bias for each gene is calculated using the whole genome ("self") as a reference subset. If you would like to specify your own subsets to calculate against, such as ribosomal genes, `ref_seqs` takes a named tuple in the form `("subset_name" = Bool[],)`, where `Bool[]` is the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. You can provide multiple reference subsets as separate entries in the named tuple, and `CUBScout` will return the calculated measure using each subset. If providing multiple sets of sequences and want custom reference sets, `ref_seqs` should be a vector of named tuples corresponding to the vector of sequences.
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> result = b(EXAMPLE_DATA_PATH); # Calculate measure on example dataset
julia> result_300 = b(EXAMPLE_DATA_PATH, threshold = 300); # Increase threshold length
julia> length(result.self)
3801
julia> length(result_300.self)
1650
julia> round.(result.self[1:5], digits = 6)
5-element Vector{Float64}:
0.209127
0.328976
0.223653
0.539114
0.249196
julia> b(EXAMPLE_DATA_PATH, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> b(EXAMPLE_DATA_PATH, rm_start = true); # Remove start codons
julia> all_genes = find_seqs(EXAMPLE_DATA_PATH, r""); # Get a vector which is true for all genes
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> b(EXAMPLE_DATA_PATH, ref_seqs = (ribosomal = ribosomal_genes,)); # Calculate using ribosomal genes as a reference subset
julia> b(EXAMPLE_DATA_PATH, ref_seqs = (self = all_genes, ribosomal = ribosomal_genes,)); # Calculate using all genes and ribosomal genes as a reference subset
```
"""
function b(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
stop_mask = fill(true, 64)
end
return b(sequences, ref_seqs, uniqueI, stop_mask, rm_start, threshold, names)
end
function b(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
stop_mask = fill(true, 64)
end
if isempty(ref_seqs) & isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
b(sequences[i], ref_seqs, uniqueI, stop_mask, rm_start, threshold, names)
end
elseif isempty(ref_seqs)
Threads.@threads for i = 1:len
@inbounds results[i] =
b(sequences[i], ref_seqs, uniqueI, stop_mask, rm_start, threshold, names[i])
end
elseif isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
b(sequences[i], ref_seqs[i], uniqueI, stop_mask, rm_start, threshold, names)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] =
b(sequences[i], ref_seqs[i], uniqueI, stop_mask, rm_start, threshold, names[i])
end
end
return results
end
function b(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_seqs,
dict_uniqueI::Vector{Vector{Int32}},
stop_mask::Vector{Bool},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end# Count codons in each gene
@inbounds count_matrix = @views counts[1] # Count matrix 64 (codons) x n sequences
@inbounds names = @views counts[2] # Names of each fasta sequence
@inbounds count_matrix = @views count_matrix[stop_mask, :] # Remove entries if removing stop codons
lengths = @views transpose(sum(count_matrix, dims = 1)) # Find lengths of each gene (in codons)
seqs = @views size(count_matrix, 2) # Count how many genes we have
if isempty(ref_seqs) # If no ref_seqs provided, create a "self" tuple
(ref_seqs = (self = fill(true, seqs),))
else
@inbounds ref_seqs = @views map(x -> x[counts[3]], ref_seqs)
end
countAA = countsbyAA(count_matrix, dict_uniqueI) # Sum total codons for each amino acid for each sequence
@inbounds pa = @views map(x -> x ./ lengths, eachrow(countAA)) # Find frequency of AA in each gene
pa = transpose(reduce(hcat, pa))
normfreq = normFrequency(count_matrix, countAA, seqs, dict_uniqueI) # Calculate frequency of each codon for each amino acid in each sequence
@inbounds normsetfreqs = @views map(
x -> normTotalFreq(count_matrix[:, x], countAA[:, x], dict_uniqueI),
ref_seqs,
) # Calculate frequency of each codon for each amino acid in reference subset
@inbounds dts = @views map(x -> abs.((normfreq .- x)), normsetfreqs) # Subtract the reference frequency of codon from the frequency of that codon within each gene
dts = map(dts) do y
map((x) -> remove_nan(x, 0.0), y) # Replace nans with 0s (will be summed later)
end
bas = map(dts) do dt
ba = Array{Float64}(undef, size(countAA, 1), size(countAA, 2))
for (i, aa) in enumerate(dict_uniqueI)
@inbounds row = @views sum(dt[aa, :], dims = 1) # Sum up contribution of dt for each amino acid
@inbounds ba[i, :] = row
end
ba
end
bs = map(bas) do ba
@inbounds vec(sum(ba .* pa, dims = 1)) # Multiply ba by pa and sum for each gene sequence
end
return (bs..., Identifier = names)
end
# ENC
"""
enc(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
enc(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
Calculate ENC from Wright, 1990.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> result = enc(EXAMPLE_DATA_PATH); # Run ENC on example dataset
julia> round.(result.ENC[1:5], digits = 6)
5-element Vector{Float64}:
56.787282
52.725947
59.287949
52.296686
55.262981
julia> result_300 = enc(EXAMPLE_DATA_PATH, threshold = 300); # Increase threshold length
julia> length(result.ENC)
3801
julia> length(result_300.ENC)
1650
julia> enc(EXAMPLE_DATA_PATH, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> enc(EXAMPLE_DATA_PATH, rm_start = true); # Remove start codons
```
"""
function enc(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
return enc(sequences, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
function enc(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
if isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
enc(sequences[i], uniqueI, deg, stop_mask, rm_start, threshold, names)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] =
enc(sequences[i], uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
end
return results
end
function enc(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict_uniqueI::Vector{Vector{Int32}},
dict_deg::Vector{<:Integer},
stop_mask::Vector{Bool},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds count_matrix = @views counts[1] # This returns the codon count matrix
@inbounds names = @views counts[2] # This is the names for each sequence in the file
@inbounds count_matrix = @views count_matrix[stop_mask, :] # Remove entries if removing stop codons
seqs = @views size(count_matrix, 2) # Count how many genes we have
countAA = countsbyAA(count_matrix, dict_uniqueI) # Sum total codons for each amino acid for each sequence
pi_vec = enc_pi(count_matrix, countAA, seqs, dict_uniqueI) # Calculate pi statistic for each gene
@inbounds fa = @views @. (countAA * pi_vec - 1) / (countAA - 1) # Calculate Fa
fa[isnan.(fa)] .= 0.0 # Replace NaN with 0.0 (okay because will sum next)
res = vec(eFFNc(fa, dict_deg)) # Calculate Nc for each gene
return (ENC = res, Identifier = names)
end
# ENC Prime
"""
enc_p(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
enc_p(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
Calculate ENC' from Novembre, 2002.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `ref_seqs`: by default, codon usage bias for each gene is calculated using the whole genome ("self") as a reference subset. If you would like to specify your own subsets to calculate against, such as ribosomal genes, `ref_seqs` takes a named tuple in the form `("subset_name" = Bool[],)`, where `Bool[]` is the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. You can provide multiple reference subsets as separate entries in the named tuple, and `CUBScout` will return the calculated measure using each subset. If providing multiple sets of sequences and want custom reference sets, `ref_seqs` should be a vector of named tuples corresponding to the vector of sequences.
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> result = enc_p(EXAMPLE_DATA_PATH); # Calculate measure on example dataset
julia> result_300 = enc_p(EXAMPLE_DATA_PATH, threshold = 300); # Increase threshold length
julia> length(result.self)
3801
julia> length(result_300.self)
1650
julia> round.(result.self[1:5], digits = 6)
5-element Vector{Float64}:
61.0
59.369798
60.749462
61.0
61.0
julia> enc_p(EXAMPLE_DATA_PATH, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> enc_p(EXAMPLE_DATA_PATH, rm_start = true); # Remove start codons
julia> all_genes = find_seqs(EXAMPLE_DATA_PATH, r""); # Get a vector which is true for all genes
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> enc_p(EXAMPLE_DATA_PATH, ref_seqs = (ribosomal = ribosomal_genes,)); # Calculate using ribosomal genes as a reference subset
julia> enc_p(EXAMPLE_DATA_PATH, ref_seqs = (self = all_genes, ribosomal = ribosomal_genes,)); # Calculate using all genes and ribosomal genes as a reference subset
```
"""
function enc_p(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
return enc_p(sequences, ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
function enc_p(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
if isempty(ref_seqs) & isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
enc_p(sequences[i], ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
elseif isempty(ref_seqs)
Threads.@threads for i = 1:len
@inbounds results[i] =
enc_p(sequences[i], ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
elseif isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
enc_p(sequences[i], ref_seqs[i], uniqueI, deg, stop_mask, rm_start, threshold, names)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] =
enc_p(sequences[i], ref_seqs[i], uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
end
return results
end
function enc_p(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_seqs,
dict_uniqueI::Vector{Vector{Int32}},
dict_deg::Vector{<:Integer},
stop_mask::Vector{Bool},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds count_matrix = @views counts[1] # Count matrix 64 (codons) x n sequences
@inbounds names = @views counts[2] # Names of each fasta sequence
@inbounds ount_matrix = @views count_matrix[stop_mask, :] # Remove entries if removing stop codons
seqs = @views size(count_matrix, 2) # Count how many genes we have
if isempty(ref_seqs) # If no ref_seqs provided, create a "self" tuple
(ref_seqs = (self = fill(true, seqs),))
else
@views ref_seqs = @views map(x -> x[counts[3]], ref_seqs)
end
countAA = countsbyAA(count_matrix, dict_uniqueI) # Sum total codons for each amino acid for each sequence
normfreq = normFrequency(count_matrix, countAA, seqs, dict_uniqueI) # Find frequency of each codon within aino acid for each gene
@inbounds normsetfreqs = @views map(
x -> normTotalFreq(count_matrix[:, x], countAA[:, x], dict_uniqueI),
ref_seqs,
) # Find the frequency of each codon across reference subset
@inbounds dts = @views map(x -> (@. (normfreq - x)^2 / x), normsetfreqs) # Calculate deviation from reference set for each codon
dts = map(dts) do y
map((x) -> remove_nan(x, 0.0), y)
end # Remove NaNs
chisums = map(dts) do dt
chisum = Array{Float64}(undef, size(countAA, 1), size(countAA, 2))
for (i, aa) in enumerate(dict_uniqueI)
@inbounds row = @views sum(dt[aa, :], dims = 1) # Sum up deviations for each amino acid
@inbounds chisum[i, :] = row
end
chisum
end
chisqs = map(x -> x .* countAA, chisums) # Calculate chi-squared values
fas = map(chisqs) do chisq
@inbounds fa = @. (chisq + countAA - dict_deg) / ((countAA - 1) * dict_deg) # Factor in degeneracy to calculate Fa
@inbounds fa[countAA.<5] .= 0.0
fa
end
res = map(x -> vec(eFFNc(x, dict_deg)), fas) # Calculate Nc
return (res..., Identifier = names)
end
# MCB
"""
mcb(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
mcb(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
Calculate MCB from Urrutia and Hurst, 2001.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `ref_seqs`: by default, codon usage bias for each gene is calculated using the whole genome ("self") as a reference subset. If you would like to specify your own subsets to calculate against, such as ribosomal genes, `ref_seqs` takes a named tuple in the form `("subset_name" = Bool[],)`, where `Bool[]` is the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. You can provide multiple reference subsets as separate entries in the named tuple, and `CUBScout` will return the calculated measure using each subset. If providing multiple sets of sequences and want custom reference sets, `ref_seqs` should be a vector of named tuples corresponding to the vector of sequences.
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> result = mcb(EXAMPLE_DATA_PATH); # Calculate measure on example dataset
julia> result_300 = mcb(EXAMPLE_DATA_PATH, threshold = 300); # Increase threshold length
julia> length(result.self)
3801
julia> length(result_300.self)
1650
julia> round.(result.self[1:5], digits = 6)
5-element Vector{Float64}:
0.087211
0.178337
0.189682
0.24012
0.149869
julia> mcb(EXAMPLE_DATA_PATH, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> mcb(EXAMPLE_DATA_PATH, rm_start = true); # Remove start codons
julia> all_genes = find_seqs(EXAMPLE_DATA_PATH, r""); # Get a vector which is true for all genes
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> mcb(EXAMPLE_DATA_PATH, ref_seqs = (ribosomal = ribosomal_genes,)); # Calculate using ribosomal genes as a reference subset
julia> mcb(EXAMPLE_DATA_PATH, ref_seqs = (self = all_genes, ribosomal = ribosomal_genes,)); # Calculate using all genes and ribosomal genes as a reference subset
```
"""
function mcb(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
return mcb(sequences, ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
function mcb(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
if isempty(ref_seqs) & isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
mcb(sequences[i], ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
elseif isempty(ref_seqs)
Threads.@threads for i = 1:len
@inbounds results[i] =
mcb(sequences[i], ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
elseif isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
mcb(sequences[i], ref_seqs[i], uniqueI, deg, stop_mask, rm_start, threshold, names)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] =
mcb(sequences[i], ref_seqs[i], uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
end
return results
end
function mcb(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_seqs,
dict_uniqueI::Vector{Vector{Int32}},
dict_deg::Vector{<:Integer},
stop_mask::Vector{Bool},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds count_matrix = @views counts[1] # Count matrix 64 (codons) x n sequences
@inbounds names = @views counts[2] # Names of each fasta sequence
@inbounds count_matrix = @views count_matrix[stop_mask, :] # Remove entries if removing stop codons
seqs = @views size(count_matrix, 2) # Count how many genes we have
if isempty(ref_seqs) # If no ref_seqs provided, create a "self" tuple
(ref_seqs = (self = fill(true, seqs),))
else
@inbounds ref_seqs = @views map(x -> x[counts[3]], ref_seqs)
end
countAA = countsbyAA(count_matrix, dict_uniqueI) # Sum total codons for each amino acid for each sequence
@inbounds A = countAA[dict_deg.>1, :] .> 0
normfreq = normFrequency(count_matrix, countAA, seqs, dict_uniqueI) # Find frequency of each codon within each amino acid for each gene
@inbounds normsetfreqs = @views map(
x -> normTotalFreq(count_matrix[:, x], countAA[:, x], dict_uniqueI),
ref_seqs,
) # Find the frequency of each codon across reference subset
@inbounds dts = map(x -> (@. (normfreq - x)^2 / x), normsetfreqs)
dts = map(dts) do y
map((x) -> remove_nan(x, 0.0), y)
end # Remove NaNs
no_counts = count_matrix .<= 0
dts = map(dts) do dt
@inbounds dt[no_counts] .= 0
dt
end
bas = map(dts) do dt
ba = Array{Float64}(undef, size(countAA, 1), size(countAA, 2))
for (i, aa) in enumerate(dict_uniqueI)
@inbounds row = @views sum(dt[aa, :], dims = 1)
@inbounds ba[i, :] = row
end
ba
end
mcbs = map(bas) do ba
mat1 = @. ba[dict_deg>1, :] * log10(countAA[dict_deg>1, :])
mat1 = map((x) -> isnan(x) ? 0.0 : x, mat1)
vec(sum(mat1, dims = 1) ./ sum(A, dims = 1))
end
return (mcbs..., Identifier = names)
end
# MILC
"""
milc(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
milc(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
Calculate MILC from Supek and Vlahovicek, 2005.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `ref_seqs`: by default, codon usage bias for each gene is calculated using the whole genome ("self") as a reference subset. If you would like to specify your own subsets to calculate against, such as ribosomal genes, `ref_seqs` takes a named tuple in the form `("subset_name" = Bool[],)`, where `Bool[]` is the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. You can provide multiple reference subsets as separate entries in the named tuple, and `CUBScout` will return the calculated measure using each subset. If providing multiple sets of sequences and want custom reference sets, `ref_seqs` should be a vector of named tuples corresponding to the vector of sequences.
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> result = milc(EXAMPLE_DATA_PATH); # Calculate measure on example dataset
julia> result_300 = milc(EXAMPLE_DATA_PATH, threshold = 300); # Increase threshold length
julia> length(result.self)
3801
julia> length(result_300.self)
1650
julia> round.(result.self[1:5], digits = 6)
5-element Vector{Float64}:
0.494826
0.583944
0.499472
0.635493
0.543935
julia> milc(EXAMPLE_DATA_PATH, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> milc(EXAMPLE_DATA_PATH, rm_start = true); # Remove start codons
julia> all_genes = find_seqs(EXAMPLE_DATA_PATH, r""); # Get a vector which is true for all genes
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> milc(EXAMPLE_DATA_PATH, ref_seqs = (ribosomal = ribosomal_genes,)); # Calculate using ribosomal genes as a reference subset
julia> milc(EXAMPLE_DATA_PATH, ref_seqs = (self = all_genes, ribosomal = ribosomal_genes,)); # Calculate using all genes and ribosomal genes as a reference subset
```
"""
function milc(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
return milc(sequences, ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
function milc(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
if isempty(ref_seqs) & isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
milc(sequences[i], ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
elseif isempty(ref_seqs)
Threads.@threads for i = 1:len
@inbounds results[i] =
milc(sequences[i], ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
elseif isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
milc(sequences[i], ref_seqs[i], uniqueI, deg, stop_mask, rm_start, threshold, names)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] =
milc(sequences[i], ref_seqs[i], uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
end
return results
end
function milc(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_seqs,
dict_uniqueI::Vector{Vector{Int32}},
dict_deg::Vector{<:Integer},
stop_mask::Vector{Bool},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds count_matrix = @views counts[1] # Count matrix 64 (codons) x n sequences
@inbounds names = @views counts[2] # Names of each fasta sequence
@inbounds count_matrix = count_matrix[stop_mask, :] # Remove entries if removing stop codons
lengths = @views transpose(sum(count_matrix, dims = 1)) # Find lengths of each gene (in codons)
seqs = @views size(count_matrix, 2) # Count how many genes we have
if isempty(ref_seqs) # If no ref_seqs provided, create a "self" tuple
(ref_seqs = (self = fill(true, seqs),))
else
ref_seqs = @views map(x -> x[counts[3]], ref_seqs)
end
countAA = countsbyAA(count_matrix, dict_uniqueI) # Sum total codons for each amino acid for each sequence
normfreq = normFrequency(count_matrix, countAA, seqs, dict_uniqueI) # Find the frequency of each codon for each gene
@inbounds normsetfreqs =
map(x -> normTotalFreq(count_matrix[:, x], countAA[:, x], dict_uniqueI), ref_seqs) # Find the frequency of each codon across reference subset
cor = correction_term(countAA, lengths, dict_deg) # Calculate correction term
@inbounds per_codon_mas = map(x -> (@. log(normfreq / x) * count_matrix), normsetfreqs) # Calculate Ma for each codon
fixed_per_codon_mas = map(per_codon_mas) do y
map((x) -> remove_nan(x, 0.0), y)
end # Remove NaNs
mas = map(fixed_per_codon_mas) do pcmas
ma = Array{Float64}(undef, size(countAA, 1), size(countAA, 2)) # Pre-allocate matrix for Ma across each amino acid
for (i, aa) in enumerate(dict_uniqueI)
@inbounds @views row = 2 .* sum(pcmas[aa, :], dims = 1) # Calculate ma for each amino acid
@inbounds ma[i, :] = row
end
ma
end
milcs = map(mas) do ma
@inbounds @views vec(([sum(ma, dims = 1)...] ./ lengths) .- cor) # Calculate MILC for each gene
end
return (milcs..., Identifier = names)
end
# SCUO
"""
scuo(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
scuo(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
Calculate SCUO from Wan et al., 2004.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> result = scuo(EXAMPLE_DATA_PATH); # Run SCUO on example dataset
julia> round.(result.SCUO[1:5], digits = 6)
5-element Vector{Float64}:
0.143121
0.191237
0.096324
0.345211
0.105744
julia> result_300 = scuo(EXAMPLE_DATA_PATH, threshold = 300); # Increase threshold length
julia> length(result.SCUO)
3801
julia> length(result_300.SCUO)
1650
julia> scuo(EXAMPLE_DATA_PATH, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> scuo(EXAMPLE_DATA_PATH, rm_start = true); # Remove start codons
```
"""
function scuo(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
return scuo(sequences, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
function scuo(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
if isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
scuo(sequences[i], uniqueI, deg, stop_mask, rm_start, threshold, names)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] =
scuo(sequences[i], uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
end
return results
end
function scuo(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict_uniqueI::Vector{Vector{Int32}},
dict_deg::Vector{<:Integer},
stop_mask::Vector{Bool},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds @views count_matrix = counts[1] # This is our codon count matrix
@inbounds @views names = counts[2] # These are the names of each sequence
@inbounds @views count_matrix = count_matrix[stop_mask, :] # Remove entries if removing stop codons
seqs = @views size(count_matrix, 2) # Count how many genes we have
countAA = countsbyAA(count_matrix, dict_uniqueI) # Find amino acid count matrix
Ha = scuo_freq(count_matrix, countAA, seqs, dict_uniqueI) # Calculate normalized frequency of each codon
Hmax = log10.(dict_deg)
Oa = @views map(x -> (Hmax .- x) ./ Hmax, eachcol(Ha))
@inbounds Oa = reduce(hcat, Oa)
@inbounds Fa = @views countAA ./ sum(countAA[dict_deg.>1, :], dims = 1)
@inbounds mult = Oa .* Fa
mult = map((x) -> isnan(x) ? 0.0 : x, mult)
res = vec(sum(mult, dims = 1))
return (SCUO = res, Identifier = names)
end
"""
all_cub(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
all_cub(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, ref_seqs = (), rm_start = false, rm_stop = false, threshold = 80)
Calculate all codon usage bias measures at once. Because many measures require the same initial calculations, this is more efficient than calculating them individually.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> all_cub_results = all_cub(EXAMPLE_DATA_PATH); # Calculate all six codon usage measures on example dataset
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> all_cub(EXAMPLE_DATA_PATH, ref_seqs = (ribosomal = ribosomal_genes,)); # Calculate all measures using ribosomal genes as a reference subset
```
"""
function all_cub(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
return all_cub(sequences, ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
function all_cub(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
ref_seqs = (),
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
end
if isempty(ref_seqs) & isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
all_cub(sequences[i], ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names)
end
elseif isempty(ref_seqs)
Threads.@threads for i = 1:len
@inbounds results[i] =
all_cub(sequences[i], ref_seqs, uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
elseif isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] =
all_cub(sequences[i], ref_seqs[i], uniqueI, deg, stop_mask, rm_start, threshold, names)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] =
all_cub(sequences[i], ref_seqs[i], uniqueI, deg, stop_mask, rm_start, threshold, names[i])
end
end
return results
end
function all_cub(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_seqs,
dict_uniqueI::Vector{Vector{Int32}},
dict_deg::Vector{<:Integer},
stop_mask::Vector{Bool},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds @views count_matrix = counts[1] # This is our codon count matrix
@inbounds @views seqnames = counts[2] # These are our sequence names
@inbounds @views count_matrix = count_matrix[stop_mask, :] # Remove entries if removing stop codons
lengths = @views transpose(sum(count_matrix, dims = 1)) # Find lengths of each gene (in codons)
seqs = @views size(count_matrix, 2) # Count how many genes we have
if isempty(ref_seqs)
(ref_seqs = (self = fill(true, seqs),)) # If no reference subset provided, make a "self" subset
else
ref_seqs = @views map(x -> x[counts[3]], ref_seqs)
end
countAA = countsbyAA(count_matrix, dict_uniqueI) # Count amino acids in each gene
normfreq = normFrequency(count_matrix, countAA, seqs, dict_uniqueI) # Calculate codon frequency within each amino acid within each gene
@inbounds @views normsetfreqs =
map(x -> normTotalFreq(count_matrix[:, x], countAA[:, x], dict_uniqueI), ref_seqs) # Calculate codon frequency within each amino acid across reference subsets
# Up to this point, all of the measures should have been the same
# Calculating B
@inbounds b_pa = map(x -> x ./ lengths, eachrow(countAA))
b_pa = transpose(reduce(hcat, b_pa))
@inbounds b_dts = map(x -> abs.((normfreq .- x)), normsetfreqs) # This is good
b_dts = map(b_dts) do y
map((x) -> remove_nan(x, 0.0), y)
end
b_bas = map(b_dts) do dt
ba = Array{Float64}(undef, size(countAA, 1), size(countAA, 2))
for (i, aa) in enumerate(dict_uniqueI)
@inbounds @views row = @views sum(dt[aa, :], dims = 1)
@inbounds @views ba[i, :] = row
end
ba
end
B_result = map(b_bas) do ba
vec(sum(ba .* b_pa, dims = 1))
end
# End calculating B (B is the result)
# Calculate ENC
pi_vec = enc_pi(count_matrix, countAA, seqs, dict_uniqueI)
@inbounds enc_fa = @. (countAA * pi_vec - 1) / (countAA - 1)
@inbounds enc_fa[isnan.(enc_fa)] .= 0.0
ENC_result = (ENC = vec(eFFNc(enc_fa, dict_deg)),)
# End calculating ENC (ENC is the result)
# Calculate ENC'
@inbounds encp_dts = map(x -> (@. (normfreq - x)^2 / x), normsetfreqs) # Calculate Ma for each codon
encp_dts = map(encp_dts) do y
map((x) -> remove_nan(x, 0.0), y)
end # Remove NaNs
encp_chisums = map(encp_dts) do dt
chisum = Array{Float64}(undef, size(countAA, 1), size(countAA, 2))
for (i, aa) in enumerate(dict_uniqueI)
@inbounds row = @views sum(dt[aa, :], dims = 1)
@inbounds chisum[i, :] = row
end
chisum
end
encp_chisqs = map(x -> x .* countAA, encp_chisums) # This also looks good
encp_fas = map(encp_chisqs) do chisq
@inbounds fa = @. (chisq + countAA - dict_deg) / ((countAA - 1) * dict_deg)
@inbounds fa[countAA.<5] .= 0.0
fa
end
ENCP_result = map(x -> vec(eFFNc(x, dict_deg)), encp_fas)
#End calculating ENC' (ENCP is the result)
#Start calculating MCB (can use ENCP dts)
no_counts = count_matrix .<= 0
mcb_dts = map(encp_dts) do dt
@inbounds dt[no_counts] .= 0
dt
end
mcb_bas = map(mcb_dts) do dt
ba = Array{Float64}(undef, size(countAA, 1), size(countAA, 2))
for (i, aa) in enumerate(dict_uniqueI)
@inbounds row = @views sum(dt[aa, :], dims = 1)
@inbounds ba[i, :] = row
end
ba
end
A = countAA[dict_deg.>1, :] .> 0
MCB_result = map(mcb_bas) do ba
@inbounds mat1 = @. ba[dict_deg>1, :] * log10(countAA[dict_deg>1, :])
mat1 = map((x) -> isnan(x) ? 0.0 : x, mat1)
@inbounds vec(sum(mat1, dims = 1) ./ sum(A, dims = 1))
end
# End calculating MCB (MCB is the result)
# Start calculating MILC
cor = correction_term(countAA, lengths, dict_deg) # Calculate correction term
@inbounds per_codon_mas = map(x -> (@. log(normfreq / x) * count_matrix), normsetfreqs) # Calculate Ma for each codon
fixed_per_codon_mas = map(per_codon_mas) do y
map((x) -> remove_nan(x, 0.0), y)
end # Remove NaNs
mas = map(fixed_per_codon_mas) do pcmas
ma = Array{Float64}(undef, size(countAA, 1), size(countAA, 2)) # Pre-allocate matrix for Ma across each amino acid
for (i, aa) in enumerate(dict_uniqueI)
@inbounds row = 2 .* sum(pcmas[aa, :], dims = 1) # Calculate ma for each amino acid
@inbounds ma[i, :] = row
end
ma
end
MILC_result = map(mas) do ma
@inbounds @views vec(([sum(ma, dims = 1)...] ./ lengths) .- cor) # Calculate MILC for each gene
end
# End calculating MILC (MILC is the result)
# Start calculating SCUO
Ha = scuo_freq(count_matrix, countAA, seqs, dict_uniqueI)
Hmax = log10.(dict_deg)
Oa = map(x -> (Hmax .- x) ./ Hmax, eachcol(Ha))
Oa = reduce(hcat, Oa)
@inbounds Fa = countAA ./ sum(countAA[dict_deg.>1, :], dims = 1)
@inbounds mult = Oa .* Fa
mult = map((x) -> isnan(x) ? 0.0 : x, mult)
SCUO_result = (SCUO = vec(sum(mult, dims = 1)),)
return (
B = B_result,
ENC = ENC_result,
ENCP = ENCP_result,
MCB = MCB_result,
MILC = MILC_result,
SCUO = SCUO_result,
Identifier = seqnames,
)
end
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | code | 37770 |
"""
melp(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, ref_vector::Vector{Bool}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
melp(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, ref_vectors::Vector{Vector{Bool}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
Calculate MELP from Supek and Vlahovicek, 2005.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `ref_vector`: reference subset, which is required for `melp`. `Bool[]` the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. If providing multiple filepaths and want custom reference sets, `ref_vectors` should be a vector of vectors corresponding to the vector of filepaths.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> result = melp(EXAMPLE_DATA_PATH, ribosomal_genes); # Calculate MELP on example dataset
julia> round.(result.MELP[1:5], digits = 6)
5-element Vector{Float64}:
0.929414
1.007671
0.922357
0.951239
1.029531
julia> melp(EXAMPLE_DATA_PATH, ribosomal_genes, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> melp(EXAMPLE_DATA_PATH, ribosomal_genes, rm_start = true); # Remove start codons
```
"""
function melp(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_vector::Vector{Bool},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
milcs = milc(
sequences,
dict,
ref_seqs = (self = fill(true, length(ref_vector)), reference = ref_vector),
rm_start = rm_start,
rm_stop = rm_stop,
threshold = threshold,
names = names
)
return (MELP = milcs.self ./ milcs.reference, Identifier = milcs.Identifier)
end
function melp(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
ref_vectors::Vector{Vector{Bool}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
ref_tuples = map(x -> (self = fill(true, length(x)), reference = x), ref_vectors)
milcs = milc(
sequences,
dict,
ref_seqs = ref_tuples,
rm_start = rm_start,
rm_stop = rm_stop,
threshold = threshold,
names = names
)
return map(x -> (MELP = x.self ./ x.reference, x.Identifier), milcs)
end
"""
e(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, ref_vector::Vector{Bool}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
e(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, ref_vectors::Vector{Vector{Bool}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
Calculate E from Karlin and Mrazek, 1996.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `ref_vector`: reference subset, which is required for `e`. `Bool[]` the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. If providing multiple filepaths and want custom reference sets, `ref_vectors` should be a vector of vectors corresponding to the vector of filepaths.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> result = e(EXAMPLE_DATA_PATH, ribosomal_genes); # Calculate E on example dataset
julia> round.(result.E[1:5], digits = 6)
5-element Vector{Float64}:
0.762317
1.025839
0.875954
0.986498
1.111275
julia> e(EXAMPLE_DATA_PATH, ribosomal_genes, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> e(EXAMPLE_DATA_PATH, ribosomal_genes, rm_start = true); # Remove start codons
```
"""
function e(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_vector::Vector{Bool},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
bs = b(
sequences,
dict,
ref_seqs = (self = fill(true, length(ref_vector)), reference = ref_vector),
rm_start = rm_start,
rm_stop = rm_stop,
threshold = threshold,
names = names
)
return (E = bs.self ./ bs.reference, Identifier = bs.Identifier)
end
function e(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
ref_vectors::Vector{Vector{Bool}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
ref_tuples = map(x -> (self = fill(true, length(x)), reference = x), ref_vectors)
bs = b(
sequences,
dict,
ref_seqs = ref_tuples,
rm_start = rm_start,
rm_stop = rm_stop,
threshold = threshold,
names = names
)
return map(x -> (E = x.self ./ x.reference, x.Identifier), bs)
end
"""
cai(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, ref_vector::Vector{Bool}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
cai(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, ref_vectors::Vector{Vector{Bool}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
Calculate CAI from Sharp and Li, 1987.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `ref_vector`: reference subset, which is required for `cai`. `Bool[]` the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. If providing multiple filepaths and want custom reference sets, `ref_vectors` should be a vector of vectors corresponding to the vector of filepaths.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> result = cai(EXAMPLE_DATA_PATH, ribosomal_genes); # Calculate CAI on example dataset
julia> round.(result.CAI[1:5], digits = 6)
5-element Vector{Float64}:
0.844967
0.88548
0.817348
1.072675
0.834179
julia> cai(EXAMPLE_DATA_PATH, ribosomal_genes, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> cai(EXAMPLE_DATA_PATH, ribosomal_genes, rm_start = true); # Remove start codons
```
"""
function cai(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_vector::Vector{Bool},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
aa_names = dict.AA_nostops
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
aa_names = dict.AA
end
return cai(sequences, ref_vector, uniqueI, deg, stop_mask, aa_names, rm_start, threshold, names)
end
function cai(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
ref_vectors::Vector{Vector{Bool}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
aa_names = dict.AA_nostops
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
aa_names = dict.AA
end
if isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] = cai(
sequences[i],
ref_vectors[i],
uniqueI,
deg,
stop_mask,
aa_names,
rm_start,
threshold,
names
)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] = cai(
sequences[i],
ref_vectors[i],
uniqueI,
deg,
stop_mask,
aa_names,
rm_start,
threshold,
names[i]
)
end
end
return results
end
function cai(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_vector::Vector{Bool},
dict_uniqueI::Vector{Vector{Int32}},
dict_deg::Vector{<:Integer},
stop_mask::Vector{Bool},
aa_names::Vector{String},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds count_matrix = @views counts[1]
@inbounds names = @views counts[2]
@inbounds count_matrix = @views count_matrix[stop_mask, :] # Remove entries if removing stop codons
seqs = @views size(count_matrix, 2) # Count how many genes we have
@inbounds ref_seqs = (self = fill(true, seqs), reference = ref_vector[counts[3]])
countAA = countsbyAA(count_matrix, dict_uniqueI) # This is the same for all measures
normfreq = normFrequency(count_matrix, countAA, seqs, dict_uniqueI)
@inbounds normsetfreqs = @views map(
x -> normTotalFreq(count_matrix[:, x], countAA[:, x], dict_uniqueI),
ref_seqs,
)
max_aa = fill(0.0, length(aa_names))
map(dict_uniqueI) do aa
@inbounds max_aa[aa] .= maximum(normsetfreqs.reference[aa])
end
@inbounds max_aa[max_aa.==0] .= 0.5
@inbounds nodeg = dict_uniqueI[dict_deg.==1]
@inbounds map(x -> count_matrix[x, :] .= 0, nodeg)
@inbounds mult = @. log(normfreq / max_aa) * count_matrix
mult = remove_nan.(mult, 0)
@inbounds cai_result = vec(exp.(sum(mult, dims = 1) ./ sum(count_matrix, dims = 1)))
return (CAI = cai_result, Identifier = names)
end
"""
fop(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, ref_vector::Vector{Bool}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
fop(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, ref_vectors::Vector{Vector{Bool}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, rm_start = false, rm_stop = false, threshold = 80)
Calculate FOP from Ikemura, 1981.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of sequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each filepath. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `ref_vector`: reference subset, which is required for `fop`. `Bool[]` the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. If providing multiple filepaths and want custom reference sets, `ref_vectors` should be a vector of vectors corresponding to the vector of filepaths.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> result = fop(EXAMPLE_DATA_PATH, ribosomal_genes); # Calculate CAI on example dataset
julia> round.(result.FOP[1:5], digits = 6)
5-element Vector{Float64}:
0.567816
0.566845
0.509695
0.725
0.653784
julia> fop(EXAMPLE_DATA_PATH, ribosomal_genes, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> fop(EXAMPLE_DATA_PATH, ribosomal_genes, rm_start = true); # Remove start codons
```
"""
function fop(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_vector::Vector{Bool},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
aa_names = dict.AA_nostops
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
aa_names = dict.AA
end
return fop(sequences, ref_vector, uniqueI, deg, stop_mask, aa_names, rm_start, threshold, names)
end
function fop(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
ref_vectors::Vector{Vector{Bool}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
deg = dict.deg_nostops
stop_mask = dict.stop_mask
aa_names = dict.AA_nostops
else
uniqueI = dict.uniqueI
deg = dict.deg
stop_mask = fill(true, 64)
aa_names = dict.AA
end
if isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] = fop(
sequences[i],
ref_vectors[i],
uniqueI,
deg,
stop_mask,
aa_names,
rm_start,
threshold,
names
)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] = fop(
sequences[i],
ref_vectors[i],
uniqueI,
deg,
stop_mask,
aa_names,
rm_start,
threshold,
names[i]
)
end
end
return results
end
function fop(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
ref_vector::Vector{Bool},
dict_uniqueI::Vector{Vector{Int32}},
dict_deg::Vector{<:Integer},
stop_mask::Vector{Bool},
aa_names::Vector{String},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds count_matrix = @views counts[1]
@inbounds names = @views counts[2]
@inbounds count_matrix = @views count_matrix[stop_mask, :] # Remove entries if removing stop codons
seqs = @views size(count_matrix, 2) # Count how many genes we have
@inbounds ref_seqs = (self = fill(true, seqs), reference = ref_vector[counts[3]])
countAA = countsbyAA(count_matrix, dict_uniqueI) # This is the same for all measures
normfreq = normFrequency(count_matrix, countAA, seqs, dict_uniqueI)
@inbounds normsetfreqs = @views map(
x -> normTotalFreq(count_matrix[:, x], countAA[:, x], dict_uniqueI),
ref_seqs,
)
max_aa = fill(0.0, length(aa_names))
map(dict_uniqueI) do aa
@inbounds max_aa[aa] .= maximum(normsetfreqs.reference[aa])
end
@inbounds max_aa[max_aa.==0] .= 0.5
@inbounds nodeg = dict_uniqueI[dict_deg.==1]
@inbounds map(x -> count_matrix[x, :] .= 0, nodeg)
@inbounds ra = normfreq ./ max_aa
count2 = copy(count_matrix)
@inbounds count2[ra.<0.9] .= 0
fops = vec(sum(count2, dims = 1) ./ sum(count_matrix, dims = 1))
return (FOP = fops, Identifier = names)
end
"""
gcb(sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{String}, Nothing} = nothing, ref_vector = [], perc = 0.05, rm_start = false, rm_stop = false, threshold = 80)
gcb(sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}}, dict::CodonDict = DEFAULT_CodonDict; names::Union{Vector{Vector{String}}, Nothing} = nothing, ref_vectors = [], perc = 0.05, rm_start = false, rm_stop = false, threshold = 80)
Calculate GCB from Merkl, 2003.
# Arguments
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. There are no quality checks, so it's assumed that each entry is assumed to be an individual coding sequence, in the correct frame, without 5' or 3' untranslated regions. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of BioSequences, with each vector corresponding to a genome. `CUBScout` is multithreaded; if there are multiple threads available, `CUBScout` will allocate a thread for each fasta file or vector of BioSequences. As such, providing a vector of filepaths (or `Vector{<:Vector{<:NucSeq}}`) as an argument will be faster than broadcasting across a vector of paths. Because a single file is only accessed by a single thread, it's never worth using more threads than the total number of files being analyzed.
- `dict`: codon dictionary of type `CodonDict`. The standard genetic code is loaded by default, but if necessary you can create your own codon dictionary using `make_CodonDict`
- `names`: An optional vector of names for each sequence. Only relevant if providing a vector of BioSequences, as names are automatically pulled from fasta files. If `sequences` is of type `Vector{<:Vector{<:NucSeq}}`, `names` should be of type `Vector{Vector{String}}`
- `ref_vector`: optional reference subset; by default gcb begins calculations using all genes as a seed. If you want to provide a custom reference set, it should be a vector `Bool[]` the same length as the number of sequences in your fasta file, and contains `true` for sequences you want as your reference subset and false for those you don't. You can use `find_seqs()` to generate this vector. If providing multiple filepaths and want custom reference sets, `ref_vectors` should be a vector of vectors corresponding to the vector of filepaths.
- `perc`: percentage of "top hits" which should be used as a reference set in the next iteration. By default set to 0.05.
- `rm_start`: whether to ignore the first codon of each sequence. Many organisms use alternative start codons such as TTG and CTG, which in other locations would generally code for leucine. There are a few approaches to deal with this. By default, `CUBScout` keeps each start codon and assigns it as though it were any other codon. Of course, this would slightly change leucine's contribution to codon usage bias. If you set `rm_start` to `true`, the first codon of every sequence is simply discarded. This will also affect the gene's length, which means it could be removed if it falls under the threshold. Other CUB packages (such as R's coRdon, alt.init = TRUE), assign all TTG and CTG codons to methionine, regardless of their location. I disagree with this approach from a biological perspective; those codons still code for leucine most of the time they are used. However, if you want matching output as you would get from coRdon, you can supply `ALTSTART_CodonDict` to the `dict` argument, and keep `rm_start` as `false`.
- `rm_stop`: whether to remove stop codons from calculations of codon usage bias.
- `threshold`: minimum length of a gene (in codons) to be used in codon usage bias calculations. By default this is set to 80 codons; any genes less than or equal to that length are discarded. If you want no genes discarded, set `threshold` to 0.
# Examples
```jldoctest
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal"); # Get a vector which is true for ribosomal genes
julia> result = gcb(EXAMPLE_DATA_PATH); # Calculate GCB on example dataset
julia> round.(result.GCB[1:5], digits = 6)
5-element Vector{Float64}:
-0.058765
-0.08659
-0.005496
-0.065659
-0.032062
julia> ribo_result = gcb(EXAMPLE_DATA_PATH, ref_vector = ribosomal_genes); # Calculate GCB with ribosomal genes as reference seed example dataset
julia> round.(ribo_result.GCB[1:5], digits = 6)
5-element Vector{Float64}:
-0.135615
-0.036687
-0.169136
-0.186104
-0.01653
julia> gcb(EXAMPLE_DATA_PATH, ALTSTART_CodonDict); # Code TTG and CTG as methionine
julia> gcb(EXAMPLE_DATA_PATH, rm_start = true); # Remove start codons
```
"""
function gcb(
sequences::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{String}, Nothing} = nothing,
ref_vector = [],
perc = 0.05,
rm_start = false,
rm_stop = false,
threshold = 80,
)
if rm_stop
uniqueI = dict.uniqueI_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
stop_mask = fill(true, 64)
end
return gcb(sequences, ref_vector, uniqueI, perc, stop_mask, rm_start, threshold, names)
end
function gcb(
sequences::Union{Vector{String}, Vector{<:IO}, Vector{<:FASTAReader}, Vector{<:Vector{<:NucSeq}}},
dict::CodonDict = DEFAULT_CodonDict;
names::Union{Vector{Vector{String}}, Nothing} = nothing,
ref_vectors = [],
perc = 0.05,
rm_start = false,
rm_stop = false,
threshold = 80,
)
len = length(sequences)
results = Vector{Any}(undef, len)
if rm_stop
uniqueI = dict.uniqueI_nostops
stop_mask = dict.stop_mask
else
uniqueI = dict.uniqueI
stop_mask = fill(true, 64)
end
if isempty(ref_vectors) & isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] = gcb(
sequences[i],
ref_vectors,
uniqueI,
perc,
stop_mask,
rm_start,
threshold,
names,
)
end
elseif isnothing(names)
Threads.@threads for i = 1:len
@inbounds results[i] = gcb(
sequences[i],
ref_vectors[i],
uniqueI,
perc,
stop_mask,
rm_start,
threshold,
names
)
end
elseif isempty(ref_vectors)
Threads.@threads for i = 1:len
@inbounds results[i] = gcb(
sequences[i],
ref_vectors,
uniqueI,
perc,
stop_mask,
rm_start,
threshold,
names[i]
)
end
else
Threads.@threads for i = 1:len
@inbounds results[i] = gcb(
sequences[i],
ref_vectors[i],
uniqueI,
perc,
stop_mask,
rm_start,
threshold,
names[i]
)
end
end
return results
end
function gcb(
fasta_seq::Union{String, IO, FASTAReader, Vector{<:NucSeq}},
refs,
dict_uniqueI::Vector{Vector{Int32}},
perc::Real,
stop_mask::Vector{Bool},
rm_start::Bool,
threshold::Integer,
names::Union{Vector{String}, Nothing},
)
counts = if typeof(fasta_seq) <: Vector{<:NucSeq}
count_codons(fasta_seq, names = names, remove_start = rm_start, threshold = threshold)
else count_codons(fasta_seq, rm_start, threshold)
end # Count codons in each gene
@inbounds count_matrix = @views counts[1]
@inbounds names = @views counts[2]
@inbounds count_matrix = @views count_matrix[stop_mask, :] # Remove entries if removing stop codons
seqs = @views size(count_matrix, 2) # Count how many genes we have
lengths = @views transpose(sum(count_matrix, dims = 1))
@inbounds seed = isempty(refs) ? fill(true, seqs) : refs[counts[3]] # Make our seed - this will be our initial reference set
countAA = countsbyAA(count_matrix, dict_uniqueI) # Count
normfreq = normFrequency(count_matrix, countAA, seqs, dict_uniqueI)
@inbounds normsetfreq =
@views normTotalFreq(count_matrix[:, seed], countAA[:, seed], dict_uniqueI)
gcb_prev = fill(0.0, seqs)
iter = 0
gcb = []
diff = false
# Now we'd enter the repeat loop
while true
@inbounds cb = log.(normsetfreq ./ nanmean(normfreq, 2))
@inbounds cb[normsetfreq.==0] .= -5
@inbounds gcb = vec(vec(sum(count_matrix .* cb, dims = 1)) ./ lengths)
diff = all(gcb .== gcb_prev)
if diff | iter > 6
break
end
iter += 1
gcb_prev = copy(gcb)
@inbounds tops = sortperm(gcb, rev = true)[1:convert(Int, trunc(perc * seqs))]
seed .= false
@inbounds seed[tops] .= true
@inbounds normsetfreq =
@views normTotalFreq(count_matrix[:, seed], countAA[:, seed], dict_uniqueI)
end
return (GCB = gcb, Identifier = names)
end
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | code | 16453 |
# Counting Codons for filepaths
const TABLE = let
table = fill(0xff, 2^8)
for (n, v) in [('A', 0), ('C', 1), ('G', 2), ('T', 3), ('U', 3)]
table[UInt8(n)+1] = v
table[UInt8(lowercase(n))+1] = v
end
for n in "SWKYMRBDHVN"
table[UInt8(n)+1] = 0xf0
table[UInt8(lowercase(n))+1] = 0xf0
end
Tuple(table)
end
function count_codons!(vector::AbstractVector{<:Integer}, seq::AbstractString, rem::Integer)
fill!(vector, 0)
mask = UInt(1 << 6 - 1)
remaining = rem
kmer = UInt8(0)
for codeunit in codeunits(seq)
value = TABLE[codeunit+0x01]
if value == 0xff
throw(DomainError(codeunit, "Cannot interpret as nucleotide"))
elseif value == 0xf0
remaining += 3 # This will "skip" this Kmer entirely, but keep us in frame
else
remaining -= 1
kmer = (kmer << 2 | value) & mask # This needed to be moved outside of the second if block
end
if remaining < 1
@inbounds vector[kmer+1] += 1
remaining = 3 # This needed to be reset per codon "cycle"
end
end
end
"""
count_codons(path::AbstractString, remove_start::Bool = false, threshold::Integer = 0)
count_codons(stream::IO, remove_start::Bool = false, threshold::Integer = 0)
count_codons(reader::FASTAReader, remove_start::Bool = false, threshold::Integer = 0)
count_codons(sequences::Vector{<:NucSeq}, names::Vector{String} = String[], remove_start::Bool = false, threshold::Integer = 0)
count_codons(sequence::NucSeq, remove_start::Bool = false, threshold::Integer = 0)
Read a fasta file or BioSequence and return the occurence of each codon for each gene or sequence.
# Arguments
- `path` or `stream` or `reader` or `sequence(s)`: Fasta sequence to analyze. This can be a path to a fasta file of sequences, an IOStream, an open FASTAReader, or a BioSequences nucleotide sequence, or a vector of nucleotide sequences. Note that count_codons isn't identifying ORFs - make sure these are actual CDSs in frame.
- `remove_start`: Whether to ignore the initial start codon
- `threshold`: Minimum length of the sequence *in codons* to be returned in the results.
# Output
If providing a single sequence, the result will be a 64x1 Matrix, which corresponds to the 64 codons in alphabetical order. If you want a list of the codons in alphabetical order, this is stored in `CUBScout.DEFAULT_CodonDict.codons`. If analyzing a fasta file or a vector of sequences, the result will be a tuple. The first element of the tuple is a 64xn matrix, where n = # of sequences above the threshold. The second element is a list of corresponding names for each column. The third element is a Boolean vector where `true` corresponds to sequences which did pass the threshold, and `false` is sequences which did not pass the threshold and so are not included in the results matrix. Names are pulled from fasta files and IO streams by default; if you would like to provide a vector of IDs or names when providing a `Vector{<:NucSeq}`, you can.
# Examples
```jldoctest
julia> using BioSequences: @dna_str
julia> example_dna = dna"ATGAAAATGAACTTTTGA"
18nt DNA Sequence:
ATGAAAATGAACTTTTGA
julia> count_codons(example_dna) |> first
1
julia> result = count_codons(EXAMPLE_DATA_PATH);
julia> first(result[1], 5)
5-element Vector{Int32}:
32
7
6
14
11
```
"""
function count_codons(reader::FASTAReader, remove_start::Bool = false, threshold::Integer = 0)
buffer = zeros(Int, 64)
result = Int32[]
names = String[]
length_passes = Bool[]
rem = remove_start ? 6 : 3
for record in reader
count_codons!(buffer, sequence(record), rem)
length_pass = sum(buffer) > threshold
push!(length_passes, length_pass)
if length_pass
@inbounds append!(result, buffer)
@inbounds push!(names, identifier(record))
end
end
@inbounds (reshape(result, 64, :), names, length_passes)
end
function count_codons(path::AbstractString, remove_start::Bool = false, threshold::Integer = 0)
open(FASTAReader, path; copy = false) do reader
count_codons(reader, remove_start, threshold)
end
end
function count_codons(stream::IO, remove_start::Bool = false, threshold::Integer = 0)
FASTAReader(stream) do reader
count_codons(reader, remove_start, threshold)
end
end
# Counting codons for BioSequences
function count_codons(sequence::NucSeq, remove_start::Bool = false)
cod_space = zeros(Int, (4,4,4))
remaining = remove_start ? 6 : 3
index = Int8[0,0,0]
for nuc in sequence
if remaining > 3
remaining -= 1
continue
end
bit = BioSequences.encoded_data(nuc)
if count_ones(bit) == 1 # 34 here
@inbounds index[remaining] = trailing_zeros(bit) + 1
remaining -= 1
else
remaining -= 3
end
if remaining < 1 #Another 23
@inbounds @views cod_space[index[1], index[2], index[3]] += 1
remaining = 3
end
end
return @inbounds @views reshape(cod_space, 64, 1)
end
function count_codons!(cod_array::AbstractArray{<:Integer}, index::AbstractArray{<:Integer}, seq::NucSeq, rem::Integer)
fill!(cod_array, 0)
remaining = rem
for nuc in seq
if remaining > 3
remaining -= 1
continue
end
bit = BioSequences.encoded_data(nuc)
if count_ones(bit) == 1 # 34 here
@inbounds index[remaining] = trailing_zeros(bit) + 1
remaining -= 1
else
remaining -= 3
end
if remaining < 1 #Another 23
@inbounds @views cod_array[index[1], index[2], index[3]] += 1
remaining = 3
end
end
return @inbounds @views reshape(cod_array, 64, 1)
end
function count_codons(sequences::Vector{<:NucSeq}; names::Union{Vector{String}, Nothing} = nothing, remove_start::Bool = false, threshold::Integer = 0)
buffer = zeros(Int, (4,4,4))
i_array = zeros(Int, 3)
result = Int32[]
length_passes = Bool[]
rem = remove_start ? 6 : 3
for cds in sequences
count_codons!(buffer, i_array, cds, rem)
length_pass = sum(buffer) > threshold
push!(length_passes, length_pass)
if length_pass
@inbounds append!(result, buffer)
end
end
name_vec = @inbounds @views isnothing(names) ? nothing : names[length_passes]
@inbounds (reshape(result, 64, :), name_vec, length_passes)
end
function countsbyAA(count_matrix, dict_uniqueI)
aa_matrix = Matrix{Int64}(undef, length(dict_uniqueI), size(count_matrix, 2))
for (i, aa) in enumerate(dict_uniqueI)
for (j, col) in enumerate(eachcol(selectdim(count_matrix, 1, aa)))
@inbounds aa_matrix[i, j] = sum(col)
end
end
return aa_matrix
end
function normFrequency(count_matrix, AAcount_matrix, seq_length::Integer, dict_uniqueI)
freq_matrix = zeros(Float64, size(count_matrix, 1), seq_length)
AAs = length(dict_uniqueI)
for column = 1:seq_length
for row = 1:AAs
@inbounds freq_matrix[dict_uniqueI[row], column] =
@views count_matrix[dict_uniqueI[row], column] / AAcount_matrix[row, column]
end
end
return freq_matrix
end
function normTotalFreq(count_matrix, AAcount_matrix, dict_uniqueI)
rowsums_codon = @views sum(count_matrix, dims = 2)
rowsums_aa = @views sum(AAcount_matrix, dims = 2)
freq_vector = zeros(Float64, size(count_matrix, 1))
for (j, aacount) in enumerate(rowsums_aa)
@inbounds freq_vector[dict_uniqueI[j]] =
@views rowsums_codon[dict_uniqueI[j], :] / (aacount)
end
return freq_vector
end
function scuo_freq(count_matrix, AA_count_matrix, seq_length, dict_uniqueI)
freq_matrix = zeros(Float64, size(AA_count_matrix, 1), seq_length)
AAs = length(dict_uniqueI)
for column = 1:seq_length
for row = 1:AAs
@inbounds freqs =
count_matrix[dict_uniqueI[row], column] / AA_count_matrix[row, column]
@inbounds vals = @. -freqs * log10(freqs)
vals = map((x) -> isnan(x) ? 0.0 : x, vals)
@inbounds freq_matrix[row, column] = sum(vals)
end
end
return freq_matrix
end
function correction_term(AAcount_matrix, length_vector, dict_deg)
cor = Float64[]
for (seq, len) in zip(eachcol(AAcount_matrix), length_vector)
@inbounds push!(cor, (sum((seq .> 0) .* (dict_deg .- 1)) / len) - 0.5)
end
return cor
end
function enc_pi(count_matrix, AAcount_matrix, seq_length::Integer, dict_uniqueI)
enc_pi = zeros(Float64, length(dict_uniqueI), seq_length)
AAs = length(dict_uniqueI)
for column = 1:seq_length
for row = 1:AAs
@inbounds freq =
count_matrix[dict_uniqueI[row], column] / AAcount_matrix[row, column]
@inbounds enc_pi[row, column] = sum(freq .^ 2)
end
end
return enc_pi
end
# Working on effNC
function eFFNc(fa_matrix, dict_deg)
@inbounds red = unique(dict_deg)[unique(dict_deg).!=1]
avgs = map(red) do x
rows = findall(y -> x .== y, dict_deg)
@inbounds avg =
sum(fa_matrix[rows, :], dims = 1) ./ sum(fa_matrix[rows, :] .!= 0, dims = 1)
@inbounds x == 3 || (avg[avg.==0] .= (1 / x))
return length(rows) ./ avg
end
avgs = reduce(vcat, avgs)
if any(avgs .== 0)
threes = findfirst(x -> x == 3, red)
twos = findfirst(x -> x == 2, red)
fours = findfirst(x -> x == 4, red)
cols = findall(x -> x .== 0, avgs[threes, :])
@inbounds avgs[threes, cols] .=
(
avgs[twos, cols] / sum(dict_deg .== 2) +
avgs[fours, cols] / sum(dict_deg .== 4)
) / 2
end
enc = sum(dict_deg .== 1) .+ sum(avgs, dims = 1)
@inbounds enc[enc.>61] .= 61
return enc
end
"""
find_seqs(path::AbstractString, match_pattern::Regex)
find_seqs(stream::IO, match_pattern::Regex)
find_seqs(reader::FASTAReader, match_pattern::Regex)
Read a fasta file at `path` (or `reader` or `IO` which points to a fasta file) and query the *description* field for a given Regex `match_pattern`. These results can be supplied in either the reference tuples (for codon usage bias functions) or reference vectors (for expressivity measures).
# Examples
```jldoctest
julia> find_seqs(EXAMPLE_DATA_PATH, r"ribosomal")[1:5]
5-element Vector{Bool}:
0
0
0
0
0
```
"""
function find_seqs(path::AbstractString, match_pattern::Regex)
open(FASTAReader, path; copy = false) do reader
match_vector = Bool[]
for record in reader
@inbounds push!(match_vector, occursin(match_pattern, description(record)))
end
return match_vector
end
end
function find_seqs(reader::FASTAReader, match_pattern::Regex)
match_vector = Bool[]
for record in reader
@inbounds push!(match_vector, occursin(match_pattern, description(record)))
end
return match_vector
end
function find_seqs(stream::IO, match_pattern::Regex)
match_vector = Bool[]
FASTAReader(stream) do reader
for record in reader
@inbounds push!(match_vector, occursin(match_pattern, description(record)))
end
end
return match_vector
end
"""
seq_names(path::AbstractString)
seq_names(reader::FASTAReader)
seq_names(stream::IO)
Read a fasta file at `path` and return the *name* fields. Just adds convenience on top of FASTX functions.
# Examples
```jldoctest
julia> seq_name_vector = seq_names(EXAMPLE_DATA_PATH);
julia> seq_name_vector[1]
"lcl|NC_000964.3_cds_NP_387882.1_1"
```
"""
function seq_names(path::AbstractString)
open(FASTAReader, path; copy = false) do reader
name_vector = String[]
for record in reader
@inbounds push!(name_vector, identifier(record))
end
return name_vector
end
end
function seq_names(reader::FASTAReader)
name_vector = String[]
for record in reader
@inbounds push!(name_vector, identifier(record))
end
return name_vector
end
function seq_names(stream::IO)
FASTAReader(stream) do reader
name_vector = String[]
for record in reader
@inbounds push!(name_vector, identifier(record))
end
return name_vector
end
end
"""
seq_descriptions(path::AbstractString)
seq_descriptions(reader::FASTAReader)
seq_descriptions(stream::IO)
Read a fasta file at `path` and return the *description* fields. Just adds convenience on top of FASTX functions.
# Examples
```jldoctest
julia> seq_descr = seq_descriptions(EXAMPLE_DATA_PATH);
julia> seq_descr[1]
"lcl|NC_000964.3_cds_NP_387882.1_1 [gene=dnaA] [locus_tag=BSU_00010] [db_xref=EnsemblGenomes-Gn:BSU00010,EnsemblGenomes-Tr:CAB11777,GOA:P05648,InterPro:IPR001957,InterPro:IPR003593,InterPro:IPR010921,InterPro:IPR013159,InterPro:IPR013317,InterPro:IPR018312,InterPro:IPR020591,InterPro:IPR024633,InterPro:IPR027417,PDB:4TPS,SubtiList:BG10065,UniProtKB/Swiss-Prot:P05648] [protein=chromosomal replication initiator informational ATPase] [protein_id=NP_387882.1] [location=410..1750] [gbkey=CDS]"
```
"""
function seq_descriptions(path::AbstractString)
open(FASTAReader, path; copy = false) do reader
desc_vector = String[]
for record in reader
@inbounds push!(desc_vector, description(record))
end
return desc_vector
end
end
function seq_descriptions(reader::FASTAReader)
desc_vector = String[]
for record in reader
@inbounds push!(desc_vector, description(record))
end
return desc_vector
end
function seq_descriptions(stream::IO)
FASTAReader(stream) do reader
desc_vector = String[]
for record in reader
@inbounds push!(desc_vector, description(record))
end
return desc_vector
end
end
# Functions for dealing with NaNs when necessary
function remove_nan(x, replacement)
isnan(x) ? replacement : x
end
nanmean(x) = mean(filter(!isnan, x))
nanmean(x, y) = mapslices(nanmean, x, dims = y)
"""
codon_frequency(codon_counts::Matrix{<:Integer}, form::String, dict::CodonDict = DEFAULT_CodonDict)
Calculate codon frequency from a matrix of codon counts. Accepts as its first argument a `Matrix{<:Integer}` which is a product of `count_codons()`. `form` can be one of four options:
-`net_genomic`: Frequency of each codon across entire genome (matrix).
-`net_gene`: Frequency of each codon within each gene (column).
-`byAA_genomic`: Frequency of each codon within each amino acid across the entire genome (matrix).
-`byAA_gene`: Frequency of each codon within each amino acid within each gene (column).
If using an alternative genetic code, a custom `CodonDict` can be provided.
# Examples
```jldoctest
julia> codon_counts = count_codons(EXAMPLE_DATA_PATH);
julia> count_matrix = codon_counts[1];
julia> codon_frequency(count_matrix, "net_genomic")[1:5]
5-element Vector{Float64}:
0.04941242971710299
0.017114892645228374
0.021009352696846777
0.022269444158755328
0.022257296747490142
julia> codon_frequency(count_matrix, "net_genomic") |> size
(64, 1)
julia> codon_frequency(count_matrix, "net_gene") |> size
(64, 4237)
```
"""
function codon_frequency(codon_counts::Matrix{<:Integer}, form::String, dict::CodonDict = DEFAULT_CodonDict)
form in ("net_genomic", "byAA_genomic", "net_gene", "byAA_gene") || error("""Invalid form. Please provide. Acceptable forms include "net_genomic", "byAA_genomic", "net_gene", or "byAA_gene".""")
if form == "net_genomic"
return @views sum(codon_counts, dims = 2) ./ sum(codon_counts)
elseif form == "net_gene"
geneNet = Float64[]
lengths = @views transpose(sum(codon_counts, dims = 1))
for (x, l) in zip(eachcol(codon_counts), lengths)
append!(geneNet, x ./ l)
end
return reshape(geneNet, 64, :)
else
countAA = countsbyAA(codon_counts, dict.uniqueI)
if form == "byAA_genomic"
freq = normTotalFreq(codon_counts, countAA, dict.uniqueI)
return @views remove_nan.(freq, 0)
end
seqs = @views size(codon_counts, 2)
freq = normFrequency(codon_counts, countAA, seqs, dict.uniqueI)
return @views remove_nan.(freq, 0)
end
end | CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | code | 6829 | """
CodonDict
The `CodonDict` type defines how codons are translated, and is essential for calculating codon usage bias as it identifies stop codons and each amino acid's degeneracy. A default codon dictionary is provided (`default_codon_dict`), or a user can make their own using the `make_codon_dict` function.
### Fields
- `codons`: the 64 codons, in alphabetical order
- `AA`: corresponding amino acid for each codon (64 entries long)
- `AA_nostops`: same as AA, but with stop codons removed
- `uniqueAA`: unique amino acid names including stop codons. Under a standard translation table, this is 21 amino acids long
- `uniqueAA`: same as uniqueAA, but with stop codons removed
- `uniqueI`: a vector of the same length as uniqueAA, containing vectors of the indices of each codon for that amino acid. For instance, the first entry corresponds to Lysine, and contains the vector `[1, 3]`, corresponding to the positions of codons AAA and AAG in the codons field
- `uniqueI_nostops`: same as uniqueI, but with stop codons removed
- `deg`: a vector of the same length as uniqueAA, containing the degeneracy for each amino acid.
- `deg_nostops`: same as deg, but with stop codons removed
- `stop_mask`: a Boolean vector of length 64 which is false for stop codons. This is used to remove stop codons when calculating codon usage bias.
### Notes
Generally, CUBScout users shouldn't need to interact with the `CodonDict` type, as the standard genetic code is applied by default. Details for constructing a custom `CodonDict` are documented under the `make_CodonDict` function.
"""
struct CodonDict
codons::Vector{LongDNA{2}} # 64 codons (in alphabetical order)
AA::Vector{String} # Amino acids corresponding to each codon
AA_nostops::Vector{String} # Amino acids without stop codons
uniqueAA::Vector{String} # Unique amino acids (21, generally)
uniqueAA_nostops::Vector{String} # Amino acids, stops removed
uniqueI::Vector{Vector{Int32}} # Indices of codons for each amino acid
uniqueI_nostops::Vector{Vector{Int32}} # Indices after stops are removed
deg::Vector{Int32} # Degeneracy of each amino acid
deg_nostops::Vector{Int32} # Degeneracy with each amino acid removed
stop_mask::Vector{Bool} # Boolean vector length 64 which removes stop codons
end
"""
make_CodonDict(filepath::AbstractString, delimiter::AbstractChar = '\t')
Make a custom codon dictionary for organisms with non-standard genetic code. `filepath` points to a delimited file with two columns and no header. The first column should be codons, and the second column their corresponding amino acid. Avoid spaces and special characters (e.g., write GlutamicAcid instead of Glutamic Acid). Stop codons can be coded as Stop, stop, STOP, or *. If delimited using any character outside of tab, supply the delimiter as the second argument as Char, not a string (e.g. `','` not `","`). `make_CodonDict` uses `readdlm` from `DelimitedFiles`; it's a good idea to check whether `readdlm` parses your file correctly before passing to `make_CodonDict`
# Examples
```jldoctest
julia> my_CodonDict = make_CodonDict(CUBScout.CodonDict_PATH)
CodonDict(BioSequences.LongSequence{BioSequences.DNAAlphabet{2}}[AAA, AAC, AAG, AAT, ACA, ACC, ACG, ACT, AGA, AGC … TCG, TCT, TGA, TGC, TGG, TGT, TTA, TTC, TTG, TTT], ["Lysine", "Asparagine", "Lysine", "Asparagine", "Threonine", "Threonine", "Threonine", "Threonine", "Arginine", "Serine" … "Serine", "Serine", "Stop", "Cysteine", "Tryptophan", "Cysteine", "Leucine", "Phenylalanine", "Leucine", "Phenylalanine"], ["Lysine", "Asparagine", "Lysine", "Asparagine", "Threonine", "Threonine", "Threonine", "Threonine", "Arginine", "Serine" … "Serine", "Serine", "Serine", "Cysteine", "Tryptophan", "Cysteine", "Leucine", "Phenylalanine", "Leucine", "Phenylalanine"], ["Lysine", "Asparagine", "Threonine", "Arginine", "Serine", "Isoleucine", "Methionine", "Glutamine", "Histidine", "Proline" … "Glutamicacid", "Asparticacid", "Alanine", "Glycine", "Valine", "Stop", "Tyrosine", "Cysteine", "Tryptophan", "Phenylalanine"], ["Lysine", "Asparagine", "Threonine", "Arginine", "Serine", "Isoleucine", "Methionine", "Glutamine", "Histidine", "Proline", "Leucine", "Glutamicacid", "Asparticacid", "Alanine", "Glycine", "Valine", "Tyrosine", "Cysteine", "Tryptophan", "Phenylalanine"], Vector{Int32}[[1, 3], [2, 4], [5, 6, 7, 8], [9, 11, 25, 26, 27, 28], [10, 12, 53, 54, 55, 56], [13, 14, 16], [15], [17, 19], [18, 20], [21, 22, 23, 24] … [33, 35], [34, 36], [37, 38, 39, 40], [41, 42, 43, 44], [45, 46, 47, 48], [49, 51, 57], [50, 52], [58, 60], [59], [62, 64]], Vector{Int32}[[1, 3], [2, 4], [5, 6, 7, 8], [9, 11, 25, 26, 27, 28], [10, 12, 51, 52, 53, 54], [13, 14, 16], [15], [17, 19], [18, 20], [21, 22, 23, 24], [29, 30, 31, 32, 58, 60], [33, 35], [34, 36], [37, 38, 39, 40], [41, 42, 43, 44], [45, 46, 47, 48], [49, 50], [55, 57], [56], [59, 61]], Int32[2, 2, 4, 6, 6, 3, 1, 2, 2, 4 … 2, 2, 4, 4, 4, 3, 2, 2, 1, 2], Int32[2, 2, 4, 6, 6, 3, 1, 2, 2, 4, 6, 2, 2, 4, 4, 4, 2, 2, 1, 2], Bool[1, 1, 1, 1, 1, 1, 1, 1, 1, 1 … 1, 1, 0, 1, 1, 1, 1, 1, 1, 1])
julia> typeof(my_CodonDict)
CodonDict
julia> fieldnames(CodonDict)
(:codons, :AA, :AA_nostops, :uniqueAA, :uniqueAA_nostops, :uniqueI, :uniqueI_nostops, :deg, :deg_nostops, :stop_mask)
```
"""
function make_CodonDict(filepath::AbstractString, delimiter::AbstractChar = '\t')
cod_mat = readdlm(filepath, delimiter)
alph_cod_mat = sortslices(cod_mat, dims = 1, by = x -> x[1], rev = false) # Alphabetize codons
uniqueAA = unique(alph_cod_mat[:, 2]) # Find unique amino acid names
stop = map(x -> occursin(r"Stop|stop|STOP|\*", x), alph_cod_mat[:, 2]) # Search for stop codons
AA_nostops = alph_cod_mat[.!stop, 2] # Filter out stops
uniqueAA_nostops = unique(AA_nostops) # Filter out stops
indices = [findall(alph_cod_mat[:, 2] .== aa) for aa in uniqueAA] # Find codon indices for each
indices_nostops = [findall(AA_nostops .== aa) for aa in uniqueAA_nostops]
CodonDict(
LongDNA{2}.(alph_cod_mat[:, 1]),
alph_cod_mat[:, 2],
AA_nostops,
uniqueAA,
uniqueAA_nostops,
indices,
indices_nostops,
length.(indices),
length.(indices_nostops),
.!stop,
)
end
const CodonDict_PATH = joinpath(artifact"codon_dict", "codon_dict.txt")
const DEFAULT_CodonDict = make_CodonDict(CodonDict_PATH)
const ALTSTART_CodonDict =
make_CodonDict(joinpath(artifact"codon_dict_altstart", "codon_dict_altstart.txt"))
"""
EXAMPLE_DATA_PATH
The path to an example dataset, stored as an artifact within the package. This is an .fna file containing coding sequences from Bacillus subtilis subsp. subtilis str. 168, NCBI Accession # NC_000964.3.
"""
const EXAMPLE_DATA_PATH = joinpath(artifact"example_genome", "B_subtilis.fna")
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | code | 1171 | using CUBScout
using Test
@testset "CUBScout.jl" begin
@test isapprox(b(EXAMPLE_DATA_PATH).self[1], 0.2091269922)
@test isapprox(enc(EXAMPLE_DATA_PATH).ENC[1], 56.7872822025)
@test isapprox(enc_p(EXAMPLE_DATA_PATH).self[1], 61.0)
@test isapprox(mcb(EXAMPLE_DATA_PATH).self[1], 0.0872112376)
@test isapprox(milc(EXAMPLE_DATA_PATH).self[1], 0.494825732)
ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal")
@test isapprox(melp(EXAMPLE_DATA_PATH, ribosomal_genes).MELP[1], 0.9294138732)
@test isapprox(cai(EXAMPLE_DATA_PATH, ribosomal_genes).CAI[1], 0.8449667854)
@test isapprox(fop(EXAMPLE_DATA_PATH, ribosomal_genes).FOP[1], 0.567816092)
@test isapprox(gcb(EXAMPLE_DATA_PATH).GCB[1], -0.0587654329)
codon_counts = count_codons(EXAMPLE_DATA_PATH);
@test isapprox(codon_frequency(codon_counts[1], "net_genomic")[1], 0.04941242971710299)
@test isapprox(codon_frequency(codon_counts[1], "net_gene")[1], 0.07158836689038031)
@test isapprox(codon_frequency(codon_counts[1], "byAA_gene")[1], 0.8421052631578947)
@test isapprox(codon_frequency(codon_counts[1], "byAA_genomic")[1], 0.7016640025759265)
end
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | docs | 1970 | # CUBScout
[](https://gus-pendleton.github.io/CUBScout.jl/stable/)
[](https://gus-pendleton.github.io/CUBScout.jl/dev/)
[](https://github.com/gus-pendleton/CUBScout.jl/actions/workflows/CI.yml?query=branch%3Amain)
**C**odon **U**sage **B**ias (CUB) in Julia
`CUBScout` helps you work with codons! Beyond counting codons and finding codon frequency, `CUBScout` calculates **C**odon **U**sage **B**ias (CUB) and related expressivity predictions. Currently, `CUBScout` calculates:
- Six measures of codon usage bias:
- B, from Karlin and Mrazek, 1996
- ENC, from Wright 1990
- ENC', from Novembre, 2002
- MCB, from Urrutia and Hurst, 2001
- MILC, from Supek and Vlahovicek, 2005
- SCUO, from Wan et al., 2004
- Five expressivity measures based on codon usage bias:
- CAI, from Sharp and Li, 1987
- E, from Karlin and Mrazek, 1996
- FOP, from Ikemura, 1981
- GCB, from Merkl, 2003
- MELP, from Supek and Vlahovicek, 2005
`CUBScout` is based off of the fabulous [coRdon](https://www.bioconductor.org/packages/release/bioc/html/coRdon.html) package in R by Anamaria Elek, Maja Kuzman, and Kristian Vlahovicek. I am grateful for their clear code and would encourage you to cite coRdon as well when using `CUBScout`.
You can install `CUBScout` by:
```julia
using Pkg
pkg> add CUBScout
```
Or for the dev version:
```julia
pkg> add CUBScout#main
```
CUBScout is under active development, and I welcome contributions or suggestions! Additional features I'm working on/would like to incorporate:
- Performance improvements
- Plotting support (e.g. BPlots)
- Additional CUB measures, including S, RCDI, CDC, RCA, RCSU, and RCBS
- Growth predictions derived from CUB, such as those in growthpred and gRodon | CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | docs | 9889 | # Counting Codons and Calculating Codon Frequency
In the process of calculating codon usage bias, `CUBScout` developed some handy functions for counting codons, calculating codon frequency, and sorting through a fasta file. These accessory functions underlie the codon usage bias functions, but are available to users as well. If you just want to calculate codon usage bias, you won't need to interact with these functions and can skip this section.
## Counting Codons
The first step in calculating codon usage bias is counting the occurence of each codon within each coding sequence. The `count_codons` performs this function quickly and can accept numerous datatypes.
#### Inputs and Outputs
`count_codons` can accept a filepath to a fasta file of sequences, an IOStream, an open FASTAReader, or a BioSequences nucleotide sequence, or a vector of nucleotide sequences. If providing a single sequence, the result will be a 64x1 Matrix, which corresponds to the 64 codons in alphabetical order. If you want a list of the codons in alphabetical order, this is stored in `CUBScout.DEFAULT_CodonDict.codons`. If analyzing a fasta file or a vector of sequences, the result will be a tuple. The first element of the tuple is a 64xn matrix, where n is the number of sequences above the length threshold. The second element is a vector of names corresponding to each column. The third element is a Boolean vector where `true` corresponds to sequences which did pass the threshold, and `false` is sequences which did not pass the threshold and so are not included in the results matrix.
`CUBScout` is loaded with an example dataset, which can accessed at `CUBScout.EXAMPLE_DATA_PATH`. This string points to a .fna of coding sequences from *B. subtilis*. Let's go ahead and run `count_codons` on our example data set:
```julia-repl
julia> result = count_codons(EXAMPLE_DATA_PATH);
julia> size(result[1])
(64, 4237)
julia> result[1]
64×4237 Matrix{Int32}:
32 21 1 17 7 33 38 … 33 12 13 14 11 6
7 8 2 7 2 20 19 10 7 5 3 1 2
6 3 2 9 1 10 9 7 8 10 8 8 0
14 7 2 7 1 13 16 12 7 4 4 5 1
11 9 2 8 2 20 23 25 11 5 2 2 1
⋮ ⋮ ⋱ ⋮
0 0 0 0 0 1 0 1 2 0 1 0 0
12 11 3 6 3 10 12 … 4 11 3 10 3 2
4 6 1 3 0 6 14 3 2 1 8 1 2
6 6 0 7 1 5 8 12 6 7 9 2 0
17 8 3 6 6 9 2 5 5 1 4 4 0
julia> first(result[2], 5)
5-element Vector{String}:
"lcl|NC_000964.3_cds_NP_387882.1_1"
"lcl|NC_000964.3_cds_NP_387883.1_2"
"lcl|NC_000964.3_cds_NP_387884.1_3"
"lcl|NC_000964.3_cds_NP_387885.1_4"
"lcl|NC_000964.3_cds_NP_387886.2_5"
```
Each column of the matrix corresponds to a gene in the genome, and each row is a codon. The value of each entry corresponds to the count of that codon. So there are 32 AAA codons in the first gene of the example genome. This gene has the identifier "lcl|NC_000964.3_cds_NP_387882.1_1".
There are a few more arguments you can use to tune how count_codons works.
#### rm_start
Whether to remove the first codon from any counts. A more thorough discussion of `CUBScout`'s treatment of start codons can be found in the Codon Usage Bias section "Alternative Start Codons".
#### threshold
This is the minimum length of a sequence, in codons, to be included in the results. If you would like all sequences analyzed, this can be set to 0. If a sequence does not pass the threshold, it won't be included in the results matrix, and its identifier won't be included in the names vector. As we increase the threshold, our matrix becomes smaller as sequences are filtered out. Note that when supplying a filepath as input, `count_codons` dispatches on argument position, so in order to edit the threshold I also need to include the rm_start argument (`false`):
```julia-repl
julia> result_300 = count_codons(EXAMPLE_DATA_PATH, false, 300);
julia> size(result_300[1])
(64, 1650)
```
#### names
When using `count_codons` on a fasta file, identifiers are automatically pulled from the sequence headers. However, if providing a vector of BioSequences, there aren't names linked to each sequence, and instead identifiers might be stored in a separate vector of the same length. Because `count_codons` removes sequences below the threshold, this poses a quandary: how do you know which sequences were kept and which were removed? By providing a names vector to `count_codons`, as sequences are discarded, their corresponding identifiers will also be discarded from the results. This is an optional argument, and is only relevant if providing `Vector{<:NucSeq}` as an argument. For this reason, however, `count_codons` dispatches on kwargs if provided a `Vector{<:NucSeq}`, instead of position:
```julia-repl
julia> using BioSequences: @dna_str
julia> example_dna1 = dna"ATGAAAATGAACTTTTGA"
18nt DNA Sequence:
ATGAAAATGAACTTTTGA
julia> count_codons(example_dna)
64×1 Matrix{Int64}:
1
1
0
0
0
⋮
0
0
0
0
1
julia> example_dna2 = dna"ATGAAAATGAACTTTTGAATGAAAATGAACTTTTGAATGAAAATGAACTTTTGA"
54nt DNA Sequence:
ATGAAAATGAACTTTTGAATGAAAATGAACTTTTGAATGAAAATGAACTTTTGA
julia> count_codons([example_dna1, example_dna2])
(Int32[1 3; 1 3; … ; 0 0; 1 3], nothing, Bool[1, 1])
julia> count_codons([example_dna1, example_dna2], threshold = 8)
(Int32[3; 3; … ; 0; 3;;], nothing, Bool[0, 1])
julia> count_codons([example_dna1, example_dna2], threshold = 8, names = ["Example1","Example2"])
(Int32[3; 3; … ; 0; 3;;], ["Example2"], Bool[0, 1])
```
## Codon Frequency
Once you've counted your codons, you may want to calculate the *frequency* at which each codon occurs. `codon_frequency` accepts a count matrix from `count_codons` and calculates codon frequency. The input must be a 64xn matrix of integers, where each row corresponds to the 64 codons in order, and each column corresponds do a gene sequence.
There are four ways to calculate codon frequency. Let's use an example to illustrate their differences by making a mock genome with two genes in it.
```julia-repl
julia> gene1 = dna"AAAAAGAAATTT"
12nt DNA Sequence:
AAAAAGAAATTT
julia> gene2 = dna"AAGAAGTTTTTCTTC"
15nt DNA Sequence:
AAGAAGTTTTTCTTC
julia> genome = [gene1, gene2]
2-element Vector{LongSequence{DNAAlphabet{4}}}:
AAAAAGAAATTT
AAGAAGTTTTTCTTC
julia> count_result = count_codons(genome)
(Int32[2 0; 0 0; … ; 0 0; 1 1], nothing, Bool[1, 1])
julia> count_matrix = count_result[1]
64×2 Matrix{Int32}:
2 0
0 0
1 2
0 0
0 0
⋮
0 0
0 0
0 2
0 0
1 1
```
We can see that gene1 (column 1) had two counts of AAA (row 1), one of AAG (row 3), and one of TTT (row 64). For gene2 (column 2), we have 2 counts of AAG, 2 counts of TTC (row 62), and 1 count of TTT. AAA and AAG code for lysine, while TTT and TTC code for phenylalanine.
#### "net_genomic"
This will calculate the cumulative codon frequency of each codon across the entire matrix (genome), as a percentage of all codon counts in the matrix. So for example:
```julia-repl
julia> codon_frequency(count_matrix, "net_genomic")
64×1 Matrix{Float64}:
0.2222222222222222
0.0
0.3333333333333333
0.0
0.0
⋮
0.0
0.0
0.2222222222222222
0.0
0.2222222222222222
```
Across our entire genome, we counted 9 codons. AAA, TTC, and TTT all had two counts, and so at rows 1, 62, and 64 we have 0.222 = 2/9. AAG occurred three times, so we have 0.33 at row 3.
#### "net_gene"
This will calculate the codon frequency within each gene (column) in the matrix, as a percentage of all codon counts in that gene.
```julia-repl
julia> codon_frequency(count_matrix, "net_gene")
64×2 Matrix{Float64}:
0.5 0.0
0.0 0.0
0.25 0.4
0.0 0.0
0.0 0.0
⋮
0.0 0.0
0.0 0.0
0.0 0.4
0.0 0.0
0.25 0.2
```
First, we now have a matrix instead of a vector, because we are getting results for each gene instead of summarized across the genome. There were 4 codons in gene 1. There were two counts of AAA, so its codon frequency was 0.5 = 2/4 (row 1). AAG and TTT both had a by-gene frequency of 0.25. There were 5 codons in gene 2. Both AAG (row 3) and TTC (row 62) had 2 counts, so had a by-gene frequency of 0.4 = 2/5.
#### "byAA_genomic"
This will calculate the codon frequency of each codon within each amino acid across the entire matrix (genome). Let's see what happens in this scenario in our mock genome:
```julia-repl
julia> codon_frequency(count_matrix, "byAA_genomic")
64-element Vector{Real}:
0.4
0
0.6
0
0
⋮
0
0
0.5
0
0.5
```
Because we've summarized across the genome we're back to a vector as output. We had 5 codons which coded for lysine across the genome. If we look at our lysine codons (AAA and AAG), we see AAA had a byAA genomic frequency of 0.4 = 2 / 5, while AAG had a byAA genomic frequency of 0.6 = 3/5. For our phenylalanine codons (TTC and TTT), they both occurred twice across the genome, and so had a byAA genomic frequency of 0.5.
#### "byAA_gene"
Finally, we can calculate the codon frequency of each codon within each amino acid within each gene. For example:
```julia-repl
julia> codon_frequency(count_matrix, "byAA_gene")
64×2 Matrix{Real}:
0.666667 0.0
0 0
0.333333 1.0
0 0
0 0
⋮
0 0
0 0
0.0 0.666667
0 0
1.0 0.333333
```
If we look at gene1 (column 1), it exclusively used TTT to code for phenylalanine, but used AAA to code for lysine 66% of the time. In contrast, gene2 used AAG to code for lysine 100% of the time.
#### Codon Dictionarys
If calculating byAA codon frequencies, codons need to be translated into amino acids, which is done by supplying a codon dictionary. A more complete description of codon dictionaries, including using alternative genetic codes, can be found in the Codon Usage Bias section "Codon Dictionaries". | CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | docs | 14735 |
# Calculating Codon Usage Bias
## Under default parameters
Codon usage bias can be calculated using the functions `b()`, `enc()`, `enc_p()`, `mcb()`, `milc()` and `scuo()`. These functions accept fasta-formatted files or vectors of BioSequences. If analyzing fasta files, these functions can accept filepaths as a string, an open IO, or an open FASTAReader. If providing a vector of BioSequences, the sequence can use either DNA or RNA alphabets. The output is a vector giving the codon usage bias of each coding sequence in the fasta file.
!!! warning
`CUBScout` does not identify ORFs, pause at stop codons, or parse non-nucleotide characters. It is assumed the coding sequences you provide are in-frame and don't contain 5' or 3' untranslated regions. Codons which have non-specific nucleotides, like "W", are skipped. Sequences with characters outside of those recognized by BioSequences will throw an error.
`CUBScout` is loaded with an example dataset, which can accessed at `CUBScout.EXAMPLE_DATA_PATH`. This string points to a .fna of coding sequences from *B. subtilis*. Let's calculate ENC for the genes in this file.
```julia-repl
julia> EXAMPLE_DATA_PATH
"/your/path/to/file/B_subtilis.fna"
julia> enc_result = enc(EXAMPLE_DATA_PATH);
julia> enc_result.ENC
3801-element Vector{Float64}:
56.787282202547104
52.725946690067296
59.287948966886226
52.29668642771212
55.26298060679466
53.44161579771853
⋮
50.30390962534221
56.29539618087172
55.229391962859935
52.58401385627267
60.19275631834157
julia> enc_result.Identifier
3801-element Vector{String}:
"lcl|NC_000964.3_cds_NP_387882.1_1"
"lcl|NC_000964.3_cds_NP_387883.1_2"
"lcl|NC_000964.3_cds_NP_387885.1_4"
"lcl|NC_000964.3_cds_NP_387886.2_5"
"lcl|NC_000964.3_cds_NP_387887.1_6"
"lcl|NC_000964.3_cds_NP_387888.1_7"
⋮
"lcl|NC_000964.3_cds_NP_391981.1_4232"
"lcl|NC_000964.3_cds_NP_391982.1_4233"
"lcl|NC_000964.3_cds_NP_391983.1_4234"
"lcl|NC_000964.3_cds_NP_391984.1_4235"
"lcl|NC_000964.3_cds_NP_391985.1_4236"
```
ENC and SCUO calculate codon usage bias against a theoretical, unbiased distribution, and so simply return a named tuple containing ENC/SCUO and then the gene identifiers. B, ENC', MCB, and MILC calculate an expected codon frequency using a reference set of the genome, and then calculate codon usage bias for each gene against that reference set. As such, these functions return a named tuple which describes which reference set was used, alongside gene identifiers. By default, the codon usage bias is calculated against the codon usage bias of the genome as a whole, which we typically refer to as "self".
```julia-repl
julia> b_result = b(EXAMPLE_DATA_PATH)
(self = [0.20912699220973896, 0.3289759448740455, 0.22365336363593893, 0.5391135258658497, 0.24919594143501034, 0.2880358413249049, 0.31200964304415874, 0.34858035204347476, 0.2455189361074733, 0.4690734561271221 … 0.3629137353834403, 0.3621330537227321, 0.4535285720373026, 0.3357858047622507, 0.28183191395624935, 0.2668809561422238, 0.22381338105820905, 0.4034837015709619, 0.3594626865160133, 0.3724863965444541],)
julia> b_result.self
3801-element Vector{Float64}:
0.20912699220973896
0.3289759448740455
0.22365336363593893
0.5391135258658497
0.24919594143501034
0.2880358413249049
⋮
0.2668809561422238
0.22381338105820905
0.4034837015709619
0.3594626865160133
0.3724863965444541
```
Many of these measures rely on the same initial calculations. If you want to calculate all six measures at the same time, use the function `all_cub()`. This only runs these initial calculations once before calculating individual codon usage measures, and as such is more efficient than running all the functions separately. By default, all_cub returns a named tuple, each key of which corresponds to a different codon usage bias measure.
```julia-repl
julia> all_cub_result = all_cub(EXAMPLE_DATA_PATH);
julia> all_cub_result.B.self
3801-element Vector{Float64}:
0.20912699220973896
0.3289759448740455
0.22365336363593893
0.5391135258658497
0.24919594143501034
0.2880358413249049
⋮
0.2668809561422238
0.22381338105820905
0.4034837015709619
0.3594626865160133
0.3724863965444541
julia> all_cub_result.ENC.ENC
3801-element Vector{Float64}:
56.787282202547104
52.725946690067296
59.287948966886226
52.29668642771212
55.26298060679466
53.44161579771853
⋮
50.30390962534221
56.29539618087172
55.229391962859935
52.58401385627267
60.19275631834157
```
## Codon Dictionaries
If you are working with genomes that use the standard genetic code, than feel free to skip this section - you should not need to worry about it. By default, `CUBScout` translates sequences using the standard code, as loaded in `CUBScout.DEFAULT_CodonDict`. However, if your sequences are translated differently, you will need to provide a custom codon dictionary to `CUBScout`.
Codon dictionaries are of a custom type `CodonDict`. You can use `?CodonDict` to see the information this struct holds, which our codon usage bias functions need to correctly translate codons and calculate codon frequency. However, I recommend you **do not** construct a `CodonDict` manually, but instead make one using the `make_CodonDict()` function.
`make_CodonDict` reads a plain text delimited file which lists the 64 codons and their corresponding amino acid. The file should look something like this:
```
AAA Lysine
AAC Asparagine
AAG Lysine
... ...
```
Please follow these formatting guidelines to make sure the table is parsed correctly:
- The first column should be codons, and the second column their corresponding amino acid.
- Do not include headers and avoid trailing whitespace.
- Codons do not need to be alphabetized.
- Avoid spaces and special characters (e.g., write GlutamicAcid instead of Glutamic Acid).
- Stop codons can be coded as Stop, stop, STOP, or *.
- If delimited using any character outside of tab, supply the delimiter as the second argument as a `Char`, not a `String` (e.g. `','` not `","`).
`make_CodonDict` uses `readdlm` from `DelimitedFiles`; it's a good idea to check whether `readdlm` parses your file correctly before passing to `make_CodonDict`.
For demonstration purposes, `CUBScout` includes the delimited file used to construct the `DEFAULT_CodonDict`.
```julia-repl
julia> CodonDict_PATH
"your/path/to/codon_dict.txt"
julia> our_CodonDict = make_CodonDict(CodonDict_PATH);
julia> our_CodonDict.codons
64-element Vector{BioSequences.LongSequence{BioSequences.DNAAlphabet{2}}}:
AAA
AAC
AAG
AAT
[...]
julia> our_CodonDict.AA
64-element Vector{String}:
"Lysine"
"Asparagine"
"Lysine"
"Asparagine"
[...]
```
You can supply your custom codon dictionary to any of the codon usage bias functions as the second argument.
```julia-repl
julia> milc(EXAMPLE_DATA_PATH, our_CodonDict)
(self = [0.49482573202153163, 0.5839439121281993, 0.49947166558087047, 0.6354929447434434, 0.5439352548027006, 0.6104721251245075, 0.6256398806438782, 0.6228376952086359, 0.5355298113407091, 0.7832276821181443 … 0.5968814155010973, 0.5964500002803941, 0.5930680822246766, 0.5412999510428169, 0.49866919389111675, 0.5830959504630727, 0.5139438478694085, 0.6164434557282711, 0.6018041071661588, 0.48775477465069617],)
```
## Alternative Start Codons
`CUBScout` provides three options to handle start codons outside of ATG. By default, alternative codons are translated as they would be anywhere else in the sequence. As such, TTG would be counted as leucine, even when in the first position.
If you would like to disregard start codons entirely, set the argument `rm_start = true`. This will decrease the length of each gene sequence by one, but is my preferred method to dealing with alternative start codons.
Other packages to calculate codon usage bias, such as [coRdon](https://www.bioconductor.org/packages/release/bioc/html/coRdon.html), handle alternative start codons differently. They encode all TTG and CTG codons as methionine, regardless of their location in the gene. While I disagree with this approach from a biological perspective, you can implement it using the pre-loaded `ALTSTART_CodonDict`.
```julia-repl
julia> scuo_result = scuo(EXAMPLE_DATA_PATH, ALTSTART_CodonDict);
julia> scuo_result.SCUO
3801-element Vector{Float64}:
0.14286111587263958
0.19315278493814017
0.0966128845976179
0.3473543659821751
0.10792236840320082
0.12039525638448735
⋮
0.152064610300728
0.11200912387676948
0.18952246579743504
0.16473723774598686
0.24160824180945173
```
## Custom Reference Sets with `ref_seqs`
B, ENC', MCB, and MILC all calculate an expected codon frequency using a reference set of the genome, and then calculate codon usage bias for each gene against that reference set. By default, this is the entire genome ("self"). However, you can provide your own reference subset(s) to these functions.
First, you'll need a Boolean vector, whose length matches the number of sequences in your fasta file. Genes which you want included in your subset should be `true`; the rest of the vector should be `false`. One way to make this vector is with the `find_seqs` function to look for genes with specific functions.
```julia-repl
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal")
4237-element Vector{Bool}:
0
0
0
0
0
0
⋮
0
0
0
0
1
```
!!! tip
`CUBScout` is designed not to hold the data from your fasta file as an object in your Julia environment. If you want to get sequence identifiers or descriptions outside of codon usage bias functions, there are the convenience functions `seq_names` and `seq_descriptions`:
```julia-repl
julia> seq_names(EXAMPLE_DATA_PATH)[1:5]
5-element Vector{String}:
"lcl|NC_000964.3_cds_NP_387882.1_1"
"lcl|NC_000964.3_cds_NP_387883.1_2"
"lcl|NC_000964.3_cds_NP_387884.1_3"
"lcl|NC_000964.3_cds_NP_387885.1_4"
"lcl|NC_000964.3_cds_NP_387886.2_5"
julia> seq_descriptions(EXAMPLE_DATA_PATH)[1]
"lcl|NC_000964.3_cds_NP_387882.1_1 [gene=dnaA] [locus_tag=BSU_00010] [db_xref=EnsemblGenomes-Gn:BSU00010,EnsemblGenomes-Tr:CAB11777,GOA:P05648,InterPro:IPR001957,InterPro:IPR003593,InterPro:IPR010921,InterPro:IPR013159,InterPro:IPR013317,InterPro:IPR018312,InterPro:IPR020591,InterPro:IPR024633,InterPro:IPR027417,PDB:4TPS,SubtiList:BG10065,UniProtKB/Swiss-Prot:P05648] [protein=chromosomal replication initiator informational ATPase] [protein_id=NP_387882.1] [location=410..1750] [gbkey=CDS]"
```
Once you have your reference vector, you can supply an argument to `ref_seqs` as a named tuple. If you have multiple reference sets you want to use, those can be included as additional entries in the `ref_seqs` tuple.
```julia-repl
julia> b_ribo_result = b(EXAMPLE_DATA_PATH, ref_seqs = (ribosomal = ribosomal_genes,));
julia> b_ribo_result.ribosomal
3801-element Vector{Float64}:
0.27433079214149625
0.3206897249908304
0.25532544766240484
0.5464925047248634
0.22424329272203575
0.22684609299155567
⋮
0.2561376033448253
0.2217345501228918
0.40667338789742696
0.3758568749612823
0.4379807676614555
julia> dna_genes = find_seqs(EXAMPLE_DATA_PATH, r"dna|DNA|Dna")
4237-element Vector{Bool}:
1
1
0
1
0
1
⋮
0
1
0
0
0
julia> b_multi_result = b(EXAMPLE_DATA_PATH, ref_seqs = (ribosomal = ribosomal_genes, DNA = dna_genes));
julia> b_multi_result.ribosomal
3801-element Vector{Float64}:
0.27433079214149625
0.3206897249908304
0.25532544766240484
0.5464925047248634
0.22424329272203575
0.22684609299155567
⋮
0.2561376033448253
0.2217345501228918
0.40667338789742696
0.3758568749612823
0.4379807676614555
julia> b_multi_result.DNA
3801-element Vector{Float64}:
0.2148821062833632
0.3182032724315858
0.23577274334969703
0.5371269155669846
0.2684310325581909
0.2860168153422687
⋮
0.273137416897346
0.21136319951043495
0.3866134722044515
0.3510891124098759
0.3668966776242405
```
## Other Arguments
### `rm_stop`
Whether to remove stop codons from the calculation of codon usage bias. Default is `false`
### `threshold`
The minimum length of a gene in codons to be used when calculating codon usage bias. The default is 80; all genes under that length are discarded. If you want to discard no genes, set `threshold = 0`. You do **not** need to adjust your reference sequence vector when adjusting threshold values.
```julia-repl
julia> b_result_0 = b(EXAMPLE_DATA_PATH, threshold = 0);
julia> b_result_300 = b(EXAMPLE_DATA_PATH, threshold = 300);
julia> length(b_result_0.self)
4237
julia> length(b_result_300.self)
1650
```
### `names`
If providing a vector of BioSequences, `CUBScout` won't be able to provide identifiers for codon usage bias results. As such, you can optionally provide a vector of identifiers as an argument, so you can link results to the original input sequences.
## Analyzing Multiple Files
Often, you might have a directory containing multiple .fna files, each of which you want to analyze. You can provide a vector of filepaths (or FASTAReaders, or IOStreams) to any `CUBScout` function, which will return a vector of results. If using BioSequences, each vector of sequences is considered a genome; if you provide a `Vector{<:Vector{<:NucSeq}}`, this will function the same as providing multiple filepaths. If supplying `ref_seqs`, provide a vector of named tuples corresponding to each file. The same goes for providing `names` - provide a `Vector{Vector{String}}` where each vector of names corresponds to each vector of Biosequences. `CUBScout` is multi-threaded, and if Julia is started with multiple threads, will assign individual threads to process individual files. This means you *should not broadcast* `CUBScout` codon usage bias functions as it will reduce efficiency. Also each file is only ever processed by a single thread, so using more threads than you have files is unnecessary.
```julia-repl
julia> enc_p([EXAMPLE_DATA_PATH,EXAMPLE_DATA_PATH])
2-element Vector{Any}:
self = [61.0, 59.36979815371983, 60.7494622549966, 61.0, ...],
Identifier = ["lcl|NC_000964.3_cds_NP_387882.1_1", "lcl|NC_000964.3_cds_NP_387883.1_2", ...]),
self = [61.0, 59.36979815371983, 60.7494622549966, 61.0, ...],
Identifier = ["lcl|NC_000964.3_cds_NP_387882.1_1", "lcl|NC_000964.3_cds_NP_387883.1_2", ...])
julia> enc_p([EXAMPLE_DATA_PATH,EXAMPLE_DATA_PATH], ref_seqs = [(ribosomal = ribosomal_genes,), (ribosomal = ribosomal_genes,)])
2-element Vector{Any}:
self = [61.0, 58.88817312982425, 56.41038374603565, 61.0, ...],
Identifier = ["lcl|NC_000964.3_cds_NP_387882.1_1", "lcl|NC_000964.3_cds_NP_387883.1_2", ...]),
self = [61.0, 58.88817312982425, 56.41038374603565, 61.0, ...],
Identifier = ["lcl|NC_000964.3_cds_NP_387882.1_1", "lcl|NC_000964.3_cds_NP_387883.1_2", ...])
``` | CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | docs | 3319 | # Expressivity Predictions
## Under default conditions
Expressivity predictions based on codon usage bias can be calculated with the functions `cai()`, `e()`, `fop()`, `gcb()`, and `melp()`. All expressivity functions (besides `gcb`) require two arguments:
- `sequences`: DNA or RNA sequences to be analyzed, which should be coding sequences only. This can take quite a few forms depending on your use case. It can be a path to fasta file of coding sequences (e.g. .fasta, .fna, .fa), or a IO or FASTAReader pointing to these fasta files. It can also be a vector of BioSequences, if you've already brought them into Julia's environment. If you are analyzing multiple genomes (or sets of sequences), `sequences` could instead be a vector of filepaths, IOStreams, FASTAReaders, or vectors of BioSequences, with each vector corresponding to a genome.
- `ref_vector(s)`: `Vector{Bool}` or `Vector{Vector{Bool}}` identifying reference subsets for each file. Values of `true` should correspond to sequences to be used in the reference subset.
!!! note "Why do expressivity functions accept reference subsets in a different format than codon usage bias functions?"
You may have noticed that for codon usage bias functions, you need to provide a named tupled of reference sequences, while in expressivity functions, you just need to provide the vector. Why did I make this so complicated for you? Well, for expressivity functions, a reference subset is **required**, and you can only provide a single reference subset. Because this is more strict, the function could be written less flexibly. However, codon usage bias functions can accept no reference subsets, one reference subset, or multiple reference subsets. As such, the named tuple format is necessary to differentiate your input and provide differentiated output.
Let's calculate MELP on our example data, using ribosomal proteins as a reference subset.
```julia-repl
julia> ribosomal_genes = find_seqs(EXAMPLE_DATA_PATH, r"ribosomal");
julia> melp_result = melp(EXAMPLE_DATA_PATH, ribosomal_genes);
julia> melp_result.MELP
3801-element Vector{Float64}:
0.9294138732153456
1.007671319249364
0.9223573085968517
0.9512392602630869
1.0295311265835025
1.0749743120487463
⋮
1.0147105407479773
0.9929945778238751
0.9682178480589456
0.9651731383865032
0.8840414848184831
```
The functions `cai`, `e`, `fop`, and `melp` all accept the same arguments. Their optional arguments are the same as codon usage bias functions, including options to specify a custom `CodonDict`, remove start or stop codons, and set a filtering threshold. They also handle multiple files and multi-threading in the same way, and so I do not recommend broadcasting these functions.
## GCB-specific Arguments
Because of the iterative way GCB is calculated, its arguments differ slightly from other expressivity functions. Namely:
- GCB uses a "seed" reference subset. By default, this is set to "self", and so is an optional argument. Custom `ref_vector(s)` can be supplied if so desired, as a keyword argument
- GCB iteratively calculates the GCB measure and then uses the genes with highest GCB values as a reference subset in the next iteration. The `perc` argument specifies what percent of the of genes is used a reference subset. By default, `perc = 0.05`, or 5%.
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | docs | 74 | # Index of Functions
```@index
```
```@autodocs
Modules = [CUBScout]
```
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | docs | 1181 | ```@meta
CurrentModule = CUBScout
```

# **C**odon **U**sage **B**ias in Julia
[CUBScout](https://github.com/gus-pendleton/CUBScout.jl) helps you work with codons in Julia. You can calculate **C**odon **U**sage **B**ias (CUB) and related expressivity predictions. Currently, `CUBScout` calculates:
- Six measures of codon usage bias:
- B, from Karlin and Mrazek, 1996
- ENC, from Wright 1990
- ENC', from Novembre, 2002
- MCB, from Urrutia and Hurst, 2001
- MILC, from Supek and Vlahovicek, 2005
- SCUO, from Wan et al., 2004
- Five expressivity measures based on codon usage bias:
- CAI, from Sharp and Li, 1987
- E, from Karlin and Mrazek, 1996
- FOP, from Ikemura, 1981
- GCB, from Merkl, 2003
- MELP, from Supek and Vlahovicek, 2005
`CUBScout` is based off of the fabulous [coRdon](https://www.bioconductor.org/packages/release/bioc/html/coRdon.html) package in R by Anamaria Elek, Maja Kuzman, and Kristian Vlahovicek. I am grateful for their clear code and would encourage you to cite coRdon as well when using `CUBScout`.
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | docs | 2207 | # Providing Inputs to CUBScout
## Genomes and CDSs
First, `CUBScout` **only** works with coding sequences. `CUBScout` does not identify ORFs, pause at stop codons, or parse non-nucleotide characters. It is assumed the coding sequences you provide are in-frame and don't contain 5' or 3' untranslated regions. Codons which have non-specific nucleotides, like "W", are skipped. Sequences with characters outside of those recognized by BioSequences will throw an error.
Some `CUBScout` functions, like `count_codons`, are meaningful when applied to a single nucleotide sequence. However, most `CUBScout` functions are designed to work at the genome-level, and calculate metrics that rely on comparisons between multiple genes. Specifically, none of the codon usage bias or expressivity functions accept a single nucleotide sequence; all expect to operate across a set of sequences, whether in a fasta file or vector of BioSequences.
## FASTA Files
Most functions in `CUBScout` accept any FASTA-formatted file (e.g. .fa, .fna, .fasta) where each entry corresponds to coding sequences or open readings frames. `CUBScout` accepts either a `String` which is the complete filepath to a fast-formatted file, or objects of type `FASTAReader` or `IO` which point to a fasta-formatted file. There is no significant performance advantage between these three options, unless you already have an `IOStream` or `FASTAReader` open for another purpose.
## BioSequences
`CUBScout` functions also accept nucleotide sequences from BioSequences (`<:NucSeq`). Keep in mind that most `CUBScout` functions are designed to operate across genomes, and so accept a vector of nucleotide sequences. The vector corresponds to a genome, with each DNA or RNA string corresponding to a coding sequence.
While there is a slight performance advantage in `CUBScout` functions when supplying BioSequences as an input rather than a filepath, supplying filepaths will still be faster than the cumulative time spent reading in a BioSequence and then running a `CUBScout` function. This will also use less memory and so is generally recommended, unless you already have BioSequences loaded into Julia's environment for a separate reason.
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 1.0.0 | c1074167d7d32d34cb9fa33cc44b79d41916c4f5 | docs | 1805 | # References
Elek A, Kuzman M, Vlahovicek K (2023). coRdon: Codon Usage Analysis and Prediction of Gene Expressivity. R package version 1.18.0, https://github.com/BioinfoHR/coRdon.
Ikemura, T., 1981. Correlation between the abundance of Escherichia coli transfer RNAs and the occurrence of the respective codons in its protein genes: a proposal for a synonymous codon choice that is optimal for the E. coli translational system. Journal of molecular biology, 151(3), pp.389-409.
Karlin, S. and Mrázek, J., 1996. What drives codon choices in human genes?. Journal of molecular biology, 262(4), pp.459-472.
Merkl, R., 2003. A survey of codon and amino acid frequency bias in microbial genomes focusing on translational efficiency. Journal of molecular evolution, 57, pp.453-466.
Novembre, J.A., 2002. Accounting for background nucleotide composition when measuring codon usage bias. Molecular biology and evolution, 19(8), pp.1390-1394.
Sharp, P.M. and Li, W.H., 1987. The codon adaptation index-a measure of directional synonymous codon usage bias, and its potential applications. Nucleic acids research, 15(3), pp.1281-1295.
Supek, F. and Vlahoviček, K., 2005. Comparison of codon usage measures and their applicability in prediction of microbial gene expressivity. BMC bioinformatics, 6, pp.1-15.
Urrutia, A.O. and Hurst, L.D., 2001. Codon usage bias covaries with expression breadth and the rate of synonymous evolution in humans, but this is not evidence for selection. Genetics, 159(3), pp.1191-1199.
Wan, X.F., Xu, D., Kleinhofs, A. and Zhou, J., 2004. Quantitative relationship between synonymous codon usage bias and GC composition across unicellular genomes. BMC Evolutionary Biology, 4, pp.1-11.
Wright, F., 1990. The ‘effective number of codons’ used in a gene. Gene, 87(1), pp.23-29.
| CUBScout | https://github.com/gus-pendleton/CUBScout.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | code | 901 | # Use
#
# DOCUMENTER_DEBUG=true julia --color=yes make.jl local [nonstrict] [fixdoctests]
#
# for local builds.
using Documenter
using AdaptableFunctions
# Doctest setup
DocMeta.setdocmeta!(
AdaptableFunctions,
:DocTestSetup,
:(using AdaptableFunctions);
recursive=true,
)
makedocs(
sitename = "AdaptableFunctions",
modules = [AdaptableFunctions],
format = Documenter.HTML(
prettyurls = !("local" in ARGS),
canonical = "https://oschulz.github.io/AdaptableFunctions.jl/stable/"
),
pages = [
"Home" => "index.md",
"API" => "api.md",
"LICENSE" => "LICENSE.md",
],
doctest = ("fixdoctests" in ARGS) ? :fix : true,
linkcheck = !("nonstrict" in ARGS),
strict = !("nonstrict" in ARGS),
)
deploydocs(
repo = "github.com/oschulz/AdaptableFunctions.jl.git",
forcepush = true,
push_preview = true,
)
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | code | 384 | # This file is a part of AdaptableFunctions.jl, licensed under the MIT License (MIT).
"""
AdaptableFunctions
Provides an API for functions that can be optimized/specialized for specific
input types, sizes and the computing devices the input resides on.
"""
module AdaptableFunctions
import ChangesOfVariables
import InverseFunctions
include("adapt_to_input.jl")
end # module
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | code | 1244 | # This file is a part of AdaptableFunctions.jl, licensed under the MIT License (MIT).
"""
struct UnadaptedFunction{F,T} <: Function
An instance `FunctionNotAdaptable{F,T}()` signifies that functions of type
`F` can't be adapted to inputs of type `T`.
`(f::UnadaptedFunction).f`` contains the original function, and
`(f::UnadaptedFunction)(args...; kwargs...)` calls the original function.
"""
struct UnadaptedFunction{F,T}
f::F
end
export UnadaptedFunction
@inline (f::UnadaptedFunction)(args...; kwargs...) = f.f(args...; kwargs...)
@inline InverseFunctions.inverse(f::UnadaptedFunction) =
InverseFunctions.inverse(f.f)
@inline ChangesOfVariables.with_logabsdet_jacobian(f::UnadaptedFunction, x) =
ChangesOfVariables.with_logabsdet_jacobian(f.f, x)
"""
adapt_to_input(f::F, example_input::T) where {F,T}
Generates a function that performs the same operation as `f`, but this is
optimized/specialized for the type and size of `example_input` and
the computing device `example_input` resides on.
Returns the optimized/specialized version of f, or
[`UnadaptedFunction{F,T}(f)`](@ref) if this is not possible.
"""
adapt_to_input(f::F, example_input::T) where {F,T} = UnadaptedFunction{F,T}(f)
export adapt_to_input
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | code | 314 | # This file is a part of AdaptableFunctions.jl, licensed under the MIT License (MIT).
import Test
Test.@testset "Package AdaptableFunctions" begin
include("test_aqua.jl")
include("test_adapt_to_input.jl")
include("test_docs.jl")
isempty(Test.detect_ambiguities(AdaptableFunctions))
end # testset
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | code | 458 | # This file is a part of AdaptableFunctions.jl, licensed under the MIT License (MIT).
using AdaptableFunctions
using Test
import InverseFunctions
import ChangesOfVariables
@testset "adapt_to_input" begin
@test adapt_to_input(log, 0.4)(0.7) == log(0.7)
InverseFunctions.test_inverse(adapt_to_input(log, 0.4), 0.7)
ChangesOfVariables.test_with_logabsdet_jacobian(adapt_to_input(log, 0.4), 0.7, (f::UnadaptedFunction{typeof(log)},x) -> 1/x)
end
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | code | 223 | # This file is a part of AdaptableFunctions.jl, licensed under the MIT License (MIT).
import Test
import Aqua
import AdaptableFunctions
Test.@testset "Aqua tests" begin
Aqua.test_all(AdaptableFunctions)
end # testset
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | code | 311 | # This file is a part of AdaptableFunctions.jl, licensed under the MIT License (MIT).
using Test
using AdaptableFunctions
import Documenter
Documenter.DocMeta.setdocmeta!(
AdaptableFunctions,
:DocTestSetup,
:(using AdaptableFunctions);
recursive=true,
)
Documenter.doctest(AdaptableFunctions)
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | docs | 1261 | # AdaptableFunctions.jl
[](https://oschulz.github.io/AdaptableFunctions.jl/stable)
[](https://oschulz.github.io/AdaptableFunctions.jl/dev)
[](LICENSE.md)
[](https://github.com/oschulz/AdaptableFunctions.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/oschulz/AdaptableFunctions.jl)
[](https://github.com/JuliaTesting/Aqua.jl)
## Documentation
* [Documentation for stable version](https://oschulz.github.io/AdaptableFunctions.jl/stable)
* [Documentation for development version](https://oschulz.github.io/AdaptableFunctions.jl/dev)
AdaptableFunctions.jl is a Julia package that provides an API for functions that can be optimized/specialized for specific input types, sizes and the computing devices the input resides on.
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | docs | 306 | # API
## Modules
```@index
Order = [:module]
```
## Types and constants
```@index
Order = [:type, :constant]
```
## Functions and macros
```@index
Order = [:macro, :function]
```
# Documentation
```@autodocs
Modules = [AdaptableFunctions]
Order = [:module, :type, :constant, :macro, :function]
```
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 7ffab6456ca1847cfeac5b96bfb2283079c136b9 | docs | 377 | # AdaptableFunctions.jl
AdaptableFunctions provides an API for functions that can be optimized/specialized for specific input types, sizes and the computing devices the input resides on.
This package defines a function [`adapt_to_input(f, example_input)`](@ref) that can be specialized to generate input-optimized versions of functions for specific function and input types.
| AdaptableFunctions | https://github.com/oschulz/AdaptableFunctions.jl.git |
|
[
"Apache-2.0"
] | 0.1.2 | 86077741eb00a97823cf6806d964574a8dc631e2 | code | 337 | using Documenter
import PoseComposition
pages_in_order = [
"index.md",
"operations.md",
"further_api_reference.md",
]
makedocs(
sitename="PoseComposition.jl",
pages = pages_in_order,
expandfirst = pages_in_order,
)
deploydocs(
repo = "github.com/probcomp/PoseComposition.jl.git",
devbranch = "main",
)
| PoseComposition | https://github.com/probcomp/PoseComposition.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.