#=
/opt/openmpi/bin/mpiexec
=#

#在openmpi中运行openblas中的函数时经常有问题，是不是因为我用的是ifort编译的openmpi？
#改用intelmpi或者用MKL
#在利用MPI的情况下，BLAS的多线程实现会很大的干扰性能，用intelmpi+MKL SEQUENTIAL
#MKL的引用放在最上面
using MKL
MKL.set_threading_layer(MKL.THREADING_SEQUENTIAL)

include("../src/qmcmary.jl")
using ..qmcmary

using Random
using MPI
using Dates


"""保存hs场的位型"""
function savehs(hscfg, head, fname)
    fil = open(fname, "a")
    write(fil, head)
    write(fil, "\n")
    Nt = size(hscfg)[1]
    for ni in Base.OneTo(Nt)
        write(fil, string(hscfg[ni, :]))
        write(fil, "\n")
    end
    close(fil)
end


"""
进行前向计算
"""
function forward(sp, Nt, Nx, Ng, rax, iax, rbx, ibx; hsf=nothing)
    comm = MPI.COMM_WORLD
    fname = "tmp$(MPI.Comm_rank(comm))_$(MPI.Comm_size(comm))"
    fil = open(fname, "w")
    close(fil)
    hscfg::Matrix{Int} = Matrix{Int}(undef, Nt, Nx)
    nwarm = 0
    if isnothing(hsf)
        for bi in Base.OneTo(Nt)
            cfg = 2(round.(rand(Nx)) .- 0.5)
            hscfg[bi, :] .= cfg
        end
        nwarm = 1000
    else
        hscfg = hsf
    end
    sslen, bmats, allbmats, ss = initialize_SS_Gauge1(
        Nt, Ng, sp, hscfg, rax, iax, rbx, ibx
    )
    println("rank: $(MPI.Comm_rank(comm))_$(MPI.Comm_size(comm)) tick1: $(Dates.now())")
    for _ in Base.OneTo(nwarm)
        hscfg, bmats, allbmats, ss, gf, ph = dqmc_step_Gauge1(
            Nt, Ng, sp, hscfg, rax, iax, rbx, ibx, sslen, bmats, allbmats, ss
        )
    end
    println("rank: $(MPI.Comm_rank(comm))_$(MPI.Comm_size(comm)) tick2: $(Dates.now())")
    #
    tph = 0.0
    nsp = 2000
    for idx in Base.OneTo(nsp)
        hscfg, bmats, allbmats, ss, gf, ph = dqmc_step_Gauge1(
            Nt, Ng, sp, hscfg, rax, iax, rbx, ibx, sslen, bmats, allbmats, ss
        )
        tph += ph
        savehs(hscfg, string(idx), fname)
    end
    println("rank: $(MPI.Comm_rank(comm))_$(MPI.Comm_size(comm)) tick3: $(Dates.now())")
    return tph/nsp, hscfg
    #println("forward sgn: ", tph/nsp)
end


"""
导数
"""
function step_theta1(sp, Nt, Nx, Ng, rax, iax, rbx, ibx)
    comm = MPI.COMM_WORLD
    fid = open("tmp$(MPI.Comm_rank(comm))_$(MPI.Comm_size(comm))", "r")
    #
    #重新读取cfg
    raxbar = zeros(Nx)
    iaxbar = zeros(Nx)
    rbxbar = zeros(Nx)
    ibxbar = zeros(Nx)
    tph = 0.0
    bistr = "0"
    tapemap = pbpa_calc_tapemap(sp, rax, iax, rbx, ibx)
    println("rank: $(MPI.Comm_rank(comm))_$(MPI.Comm_size(comm)) tick4: $(Dates.now())")
    while !eof(fid)
        bistr = readline(fid)
        #println(bistr)
        hscfg = Matrix{Int}(undef, Nt, Nx)
        for bi in Base.OneTo(Nt)
            cfg = readline(fid)
            cfg = split(cfg[2:end-1], ',')
            for xi in Base.OneTo(Nx)
                hscfg[bi, xi] = strip(cfg[xi]) == "1" ? +1 : -1
            end
        end
        sslen, bmats, allbmats, ss = initialize_SS_Gauge1(
            Nt, Ng, sp, hscfg, rax, iax, rbx, ibx
        )
        ###
        gf, ph = eq_green_scratch(ss)
        #println(ph)
        tph += ph
        #
        drax, diax, drbx, dibx = meas_grada(
            ss, allbmats, sp, hscfg, rax, iax, rbx, ibx; tapemap=tapemap
        )
        raxbar += drax
        iaxbar += diax
        rbxbar += drbx
        ibxbar += dibx
    end
    println("rank: $(MPI.Comm_rank(comm))_$(MPI.Comm_size(comm)) tick5: $(Dates.now())")
    close(fid)
    nsp = parse(Int, bistr)
    #println(tph/nsp)
    return raxbar/nsp, iaxbar/nsp, rbxbar/nsp, ibxbar/nsp
end



function optimal(L)
    MPI.Init()
    #Nx = 3*L^2
    #hk = lattice_kagome(ComplexF64, L, -1.0+0.0im)
    Nx = L^2
    tp = 0.25
    hk = lattice_tprim_square(ComplexF64, L, -1.0+0.0im, tp+0.0im)
    Ui = 6*ones(Nx)
    Nt = 60
    Ng = 4
    dt = 0.1
    rax = @. 2acosh(exp(Ui*dt/2))/Ui/dt
    iax = 1*ones(Nx)
    rbx = 0.5*ones(Nx)
    ibx = 1*ones(Nx)
    #
    comm = MPI.COMM_WORLD
    t_rax = 0
    m_rax = zeros(Nx)
    v_rax = zeros(Nx)
    t_iax = 0
    m_iax = zeros(Nx)
    v_iax = zeros(Nx)
    t_rbx = 0
    m_rbx = zeros(Nx)
    v_rbx = zeros(Nx)
    t_ibx = 0
    m_ibx = zeros(Nx)
    v_ibx = zeros(Nx)
    #
    hsfig = nothing
    #
    sp = default_splitting(Nt, hk, Ui; Z2=true, dt=dt)
    for st in Base.OneTo(100)
        sgn, hsfig = forward(sp, Nt, Nx, Ng, rax, iax, rbx, ibx; hsf=hsfig)
        raxbar, iaxbar, rbxbar, ibxbar = step_theta1(sp, Nt, Nx, Ng, rax, iax, rbx, ibx)
        #Reduce会自动等执行完成, 用Allduce效果是一样的
        println("sgn: $(sgn) $(MPI.Comm_rank(comm))")
        sum = MPI.Reduce(sgn, +, 0, comm)
        sum = MPI.bcast(sum, 0, comm)
        sum = sum / MPI.Comm_size(comm)
        #
        raxbarsum = MPI.Reduce(raxbar, +, 0, comm)
        iaxbarsum = MPI.Reduce(iaxbar, +, 0, comm)
        rbxbarsum = MPI.Reduce(rbxbar, +, 0, comm)
        ibxbarsum = MPI.Reduce(ibxbar, +, 0, comm)
        lr = 1e-3
        if MPI.Comm_rank(comm) == 0
            raxbarsum = raxbarsum / MPI.Comm_size(comm)
            iaxbarsum = iaxbarsum / MPI.Comm_size(comm)
            rbxbarsum = rbxbarsum / MPI.Comm_size(comm)
            ibxbarsum = ibxbarsum / MPI.Comm_size(comm)
            println("forward sign: $(sum)")
            #println("t: $(tbarsum)")
            #println("p: $(pbarsum)")
            #lg_rax, m_rax, v_rax, t_rax = next(Adam, raxbarsum, m_rax, v_rax, t_rax; α=0.03)
            #lg_iax, m_iax, v_iax, t_iax = next(Adam, iaxbarsum, m_iax, v_iax, t_iax; α=0.03)
            #lg_rbx, m_rbx, v_rbx, t_rbx = next(Adam, rbxbarsum, m_rbx, v_rbx, t_rbx; α=0.03)
            #lg_ibx, m_ibx, v_ibx, t_ibx = next(Adam, ibxbarsum, m_ibx, v_ibx, t_ibx; α=0.03)
            #rax = rax .+ lg_rax
            #iax = iax .+ lg_iax
            #rbx = rbx .+ lg_rbx
            #ibx = ibx .+ lg_ibx
            rax = rax .+ lr*raxbarsum
            iax = iax .+ lr*iaxbarsum
            rbx = rbx .+ lr*rbxbarsum
            ibx = ibx .+ lr*ibxbarsum
        end
        rax = MPI.bcast(rax, 0, comm)
        iax = MPI.bcast(iax, 0, comm)
        rbx = MPI.bcast(rbx, 0, comm)
        ibx = MPI.bcast(ibx, 0, comm)
        #println("$(MPI.Comm_rank(comm)) mt: $(m_the) vt: $(v_the)")
        #println("$(MPI.Comm_rank(comm)) mt: $(m_psi) vt: $(v_psi)")
        if MPI.Comm_rank(comm) == 0
            @show MPI.Comm_rank(comm) rax iax
            @show MPI.Comm_rank(comm) rbx ibx
        end
        #println("$(MPI.Comm_rank(comm)) hscfg: $(hsfig[1:3, :])")
    end
end

optimal(6)
