"""
用来更新参数
"""

import torch
import numpy
from torch.nn import Module, Parameter
from torch.optim import Optimizer


class Laye(Module):
    def __init__(self):
        super().__init__()
        self.pa = Parameter(torch.ones(1))

    def forward(self, x):
        return torch.sum(torch.pow(x, 3))*self.pa


def update_param(optim:Optimizer, model:Module, seq_rup:numpy.ndarray, seq_rdn:numpy.ndarray):
    #
    seq_len = seq_rup.shape[0]
    allpara = optim.param_groups[0]['params']
    grad_dict = {}
    elgd_dict = {}
    for p in allpara:
        grad_dict[p] = torch.zeros_like(p)
        elgd_dict[p] = torch.zeros_like(p)
    tot_elist = []
    #
    nup = seq_rup.shape[1]
    ndn = seq_rdn.shape[1]
    for bidx in range(seq_len):
        optim.zero_grad()
        rup = torch.tensor(seq_rup[bidx, :, :], dtype=torch.float32)
        rdn = torch.tensor(seq_rdn[bidx, :, :], dtype=torch.float32)
        #rup.requires_grad = True
        #rdn.requires_grad = True
        rupdn = torch.cat([rup, rdn])
        #print(rupdn)
        rupdn.requires_grad = True
        logabspsi = model(rupdn)
        logabspsi.backward()
        ##
        #rup_grad = rup.grad
        #rdn_grad = rdn.grad
        #print(rup_grad)
        #print(rdn_grad)
        #动能 = -0.5 \partial^2 logabs psi / \partial r ^2 + (\partial logabs psi / \partial r)^2
        tot_el = -0.5*torch.sum(torch.square(rupdn.grad))
        #tot_el += -0.5*torch.sum(torch.square(rdn.grad))
        hess = torch.autograd.functional.hessian(model, rupdn)
        #print(hess)
        tot_el += -0.5*torch.sum(hess)
        #print(tot_el)
        #势能
        for i in range(nup+ndn):
            for j in range(nup+ndn):
                if i == j:
                    continue
                tot_el += 1.0 / torch.sum(torch.square(rupdn[i, :]-rupdn[j, :]))
        for i in range(nup+ndn):
            tot_el -= 1.0 / torch.sum(torch.square(rupdn[i, :]))
        #print("after", tot_el)
        tot_elist.append(tot_el)
        for p in allpara:
            #print(p, p.grad, "\n")
            #return
            if p.grad is not None:
                #grad_dict[p] += p.grad*(engs[bidx]-mean_engs) / seq_len
                #\Del L = E_{px}[(EL - E_{px}[EL])\Del logabs psi]
                #\Del L = E_{px}[EL \Del logabs psi] - E_{px}[\Del logabs psi] E_{px}[EL]
                grad_dict[p] += p.grad
                elgd_dict[p] += tot_el * p.grad
                #平均值最后再减去
    print("tot_elist: ", tot_elist)
    #手动设置导数
    optim.zero_grad()
    for p in allpara:
        p.grad = elgd_dict[p] - grad_dict[p]*torch.sum(tot_el)
        #print(p.grad)
    #print(grad_dict)
    #print(elgd_dict)
    #print(grad_dict)
    optim.step()
    #print(allpara)
        



