'''能量网络为8.14.18.9.2（混合训练），兼顾8.14.18（单尺寸训练）；力网络为8.14.26.12
'''
import torch
from torch import nn
import torch.nn.functional as F
from math import pi


class Bond_Feather_Extract(nn.Module): 
   
    def __init__(self, nfd=36):
        super(Bond_Feather_Extract, self).__init__()
        self.fp = nn.Parameter(torch.Tensor(1,1,1,nfd))
        self.coeff = nn.Parameter(torch.Tensor(self.fp.size()))
        self.reset_parameters()

    def reset_parameters(self):
        self.fp.data.uniform_(1, 6)
        self.coeff.data.uniform_(2, 3)
        
    def forward(self, x):
        '''输入b*N*N，输出b*N*N*nfd'''
        fd = torch.exp(-self.coeff*(torch.unsqueeze(x, -1)-self.fp)**2)
        N = x.shape[1]
        mask = (torch.ones(N,N,dtype=x.dtype)-torch.eye(N,dtype=x.dtype)).to(x.device)
        mask = torch.unsqueeze(torch.unsqueeze(mask,0), -1)
        fd = fd * mask.view(1,N,N,1)
        fd[torch.isnan(fd)] = 0.
        return fd
       
       
class Angle_Feather_Extract(nn.Module):

    def __init__(self, nfa=24):
        '''nfd：距离特征数
        nfa：角度特征数'''
        super(Angle_Feather_Extract, self).__init__()
        self.fp = nn.Parameter(torch.Tensor(1,1,1,1,nfa))
        self.coeff = nn.Parameter(torch.Tensor(self.fp.size()))
        self.reset_parameters()

    def reset_parameters(self):
        self.fp.data.uniform_(-1, 1)
        self.coeff.data.uniform_(2, 3)
        
    def calc_angle(self, d):
        '''余弦定理计算角度
        对角线元素为NaN'''
        d_2 = d*d
        d1 = torch.unsqueeze(d_2,-1)
        d2 = torch.unsqueeze(d_2,-2)
        d3 = torch.unsqueeze(d_2,-3)
        a = (d1+d2-d3) / (2*torch.unsqueeze(d,-1)*torch.unsqueeze(d,-2)) #cos
        a[torch.isnan(a)] = 0
        return a
    
    def forward(self, x):
        '''x:邻接矩阵，b*N*N'''        
        a = self.calc_angle(x) #计算角度
        fa = torch.exp(-self.coeff*(a.unsqueeze(-1)-self.fp.to(x.device))**2) #角度特征
        N = x.shape[1]
        mask = (torch.ones(N,N,dtype=x.dtype)-torch.eye(N,dtype=x.dtype)).to(x.device)
        fa = fa * mask.view(1,1,N,N,1)
        fa[torch.isnan(fa)] = 0
        return fa
 
 
class VertexConvLayer(nn.Module):
    def __init__(self, n_vin, n_vout, n_ein, use_bias=True):
        super(VertexConvLayer, self).__init__()
        self.n_vin = n_vin
        self.n_vout = n_vout
        self.n_ein = n_ein
        self.use_bias = use_bias
        self.weight = nn.Parameter(torch.Tensor(n_vin, n_vout))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(n_vout))
        else:
            self.register_parameter('bias', None)
        self.a = nn.Parameter(torch.Tensor(2*n_vout+n_ein))
        self.beta = nn.Parameter(torch.Tensor(1))
        self.reset_parameters() #使用自定义的参数初始化方式
 
    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight)
        self.a.data.uniform_(-0.01,0.01)
        self.beta.data.uniform_(-0.4, -0.1)
        if self.use_bias:
            nn.init.zeros_(self.bias)
 
    def forward(self, v, e):
        B, N = v.shape[:2]
        h = torch.matmul(v, self.weight)
        a_input = torch.cat([h.repeat(1,1,N).view(B,N,N,-1),
                             h.repeat(1,N,1).view(B,N,N,-1), 
                             e], dim=-1)
        attention = torch.nn.LeakyReLU()(torch.matmul(a_input, self.a))
        attention = F.softmax(attention, dim=-1)
        h_prime = torch.bmm(attention, h)
        if self.use_bias:
            out = self.beta*h_prime + h + self.bias
        else:
            out = self.beta*h_prime + h
        return F.elu(out)
 
 
class EdgeConvLayer(nn.Module):
    '''点和边同时卷积'''
    def __init__(self, n_vin, n_ein, n_eout, use_bias=True):
        super(EdgeConvLayer, self).__init__()
        self.n_vin = n_vin
        self.n_ein = n_ein
        self.n_eout = n_eout
        self.use_bias = use_bias
        self.weight = nn.Parameter(torch.Tensor(2*n_vin+n_ein, n_eout))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(n_eout))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()
 
    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight)
        if self.use_bias:
            nn.init.zeros_(self.bias)
 
    def forward(self, v, e):
        B, N = v.shape[:2]
        e_input = torch.cat([e, v.repeat(1,N,1).view(B,N,N,-1),
                             v.repeat(1,1,N).view(B,N,N,-1)], dim=-1)
        out = torch.matmul(e_input, self.weight)
        if self.use_bias:
            out = out + self.bias
        return F.elu(out)
 
 
class AngleConvLayer(nn.Module):
    def __init__(self, n_ein, n_eout, n_ain, use_bias=True):
        super(AngleConvLayer, self).__init__()
        self.n_ein = n_ein
        self.n_eout = n_eout
        self.n_ain = n_ain
        self.use_bias = use_bias
        self.weight = nn.Parameter(torch.Tensor(n_ein, n_eout))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(n_eout))
        else:
            self.register_parameter('bias', None)
        self.a = nn.Parameter(torch.Tensor(2*n_eout+n_ain))
        self.beta = nn.Parameter(torch.Tensor(1))
        self.reset_parameters()
 
    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight)
        self.a.data.uniform_(-0.01,0.01)
        self.beta.data.uniform_(-0.4, -0.1)
        if self.use_bias:
            nn.init.zeros_(self.bias)
 
    def forward(self, e, a):
        B, N = e.shape[:2]
        h = torch.matmul(e, self.weight)
        a_input = torch.cat([h.repeat(1,1,1,N).view(B,N,N,N,-1),
                             h.repeat(1,1,N,1).view(B,N,N,N,-1), 
                             a], dim=-1)
        attention = torch.nn.LeakyReLU()(torch.matmul(a_input, self.a))
        attention = F.softmax(attention, dim=-1)
        h_prime = torch.bmm(attention.view(B*N,N,N), h.view(B*N,N,-1)).view(B,N,N,-1)
        if self.use_bias:
            out = self.beta*h_prime + h + self.bias
        else:
            out = self.beta*h_prime + h
        return F.elu(out)
        
        
class ForceGraphAttentionLayer(nn.Module):
    '''力的图卷积层'''
    def __init__(self, n_vin, n_ein, use_bias=True):
        '''n_in：原子的特征数'''
        super(ForceGraphAttentionLayer, self).__init__()
        self.use_bias = use_bias
        self.a = nn.Parameter(torch.Tensor(2*n_vin+n_ein, 8))
        self.a.data.uniform_(-1.,1.)
        self.a2 = nn.Parameter(torch.Tensor(8))
        self.a2.data.uniform_(-1.,1.)
 
    def forward(self, v, e, c):
        '''e：距离矩阵,B*N*N
        c：坐标矩阵,B*N*3
        v：特征,B*N*n_in'''
        B, N = v.shape[:2]
        a_input = torch.cat([v.repeat(1,1,N).view(B,N,N,-1),
                             v.repeat(1,N,1).view(B,N,N,-1),
                             e], dim=-1).view(B*N*N,-1)
        attention = torch.nn.LeakyReLU()(torch.mm(a_input, self.a)) #B*N*N*8
        attention = torch.matmul(attention, self.a2)
        attention = attention.view(B,N,N,1)
        r = c.unsqueeze(-2)-c.unsqueeze(-3) #坐标差,B*N*N*3
        r = r / (r.norm(2,dim=-1,keepdim=True)+1e-8)
        return (attention * r).sum(axis=-2)
        
        
class GCNE(nn.Module):

    def __init__(self, nfe1, nfe2, nfv1, nfe3, mix=True):
        super(GCNE, self).__init__()
        nfv0 = 1
        nfe0 = 36
        nfa0 = 24
        self.fe = Bond_Feather_Extract(nfe0)
        self.fa = Angle_Feather_Extract(nfa0)
        self.edge_conv1 = EdgeConvLayer(nfv0, nfe0, nfe1)
        self.angle_conv = AngleConvLayer(nfe1,nfe2,nfa0)
        self.bn1 = nn.BatchNorm1d(nfe2)
        self.vertex_conv = VertexConvLayer(1, nfv1, nfe2)
        self.bn2 = nn.BatchNorm1d(nfv1)
        self.edge_conv2 = EdgeConvLayer(nfv1, nfe2, nfe3)
        self.fcv = nn.Linear(nfv1, 1)
        self.fce = nn.Linear(nfe3, 1)
        if mix:
            self.cv1 = nn.Parameter(torch.Tensor(1))
            self.cv2 = nn.Parameter(torch.Tensor(1))
            self.ce1 = nn.Parameter(torch.Tensor(1))
            self.ce2 = nn.Parameter(torch.Tensor(1))
            self.cv1.data[0] = -4.715436
            self.cv2.data[0] = 1.611696 
            self.ce1.data[0] = -4.100414 
            self.ce2.data[0] = -7.658106 
        else:
            self.cv1 = self.cv2 = self.ce1 = self.ce2 = 0
    
    def forward(self, d):
        '''d:距离矩阵，b*N*N'''
        b,n = d.shape[:2]
        e = self.fe(d)
        a = self.fa(d)
        v = torch.ones(b,n,1).to(d.device)
        e = self.edge_conv1(v, e)
        e = self.angle_conv(e, a)
        
        e = e.view(b*n,n,-1).transpose(1,2)
        e = self.bn1(e)
        e = e.transpose(1,2)
        e = e.view(b,n,n,-1)
        
        v = self.vertex_conv(v, e)
        
        v = v.transpose(1,2)
        v = self.bn2(v)
        v = v.transpose(1,2)
        
        e = self.edge_conv2(v, e)
        
        v = self.fcv(v)
        v = torch.nn.LeakyReLU()(v)
        v = v.squeeze(-1)
        e = self.fce(e)
        e = torch.nn.LeakyReLU()(e)
        e = e.squeeze(-1)
        poly = torch.tensor([n*n,n,1,1/n,1/n/n]).to(d.device)
        x = (1+self.cv1/n+self.cv2/n/n)*v.mean(dim=-1) + (1+self.ce1/n+self.ce2/n/n)*e.mean(dim=(1,2))
        return x
        
        
class GCNF(nn.Module):

    def __init__(self, nfe0, nfa0, nfe1, nfe2, nfv1, nfv2):
        super(GCNF, self).__init__()
        nfv0 = 1
        self.fe = Bond_Feather_Extract(nfe0)
        self.fa = Angle_Feather_Extract(nfa0)
        self.edge_conv = EdgeConvLayer(nfv0, nfe0, nfe1)
        self.angle_conv = AngleConvLayer(nfe1,nfe2,nfa0)
        self.bn1 = nn.BatchNorm1d(nfe2)
        self.vertex_conv1 = VertexConvLayer(1, nfv1, nfe2)
        self.bn2 = nn.BatchNorm1d(nfv1)
        self.vertex_conv2 = VertexConvLayer(nfv1, nfv2, nfe2)
        self.bn3 = nn.BatchNorm1d(nfv2)
        self.force_conv = ForceGraphAttentionLayer(nfv2, nfe2)
        self.cutoff = 6.
    
    def forward(self, c, d):
        '''d:距离矩阵，b*N*N'''
        b,n = d.shape[:2]
        
        cd = d.clone()
        temp = cd > self.cutoff
        cd = (1+torch.cos(pi*d/self.cutoff))/2
        cd[temp] = 0
        cd[cd==1] = 0
        cd = cd.view(b,n,n,1)
        
        e = self.fe(d)
        a = self.fa(d)
        v = torch.ones(b,n,1).to(d.device)
        e = self.edge_conv(v, e)
        e = e*cd
        
        e = self.angle_conv(e, a)        
        e = e.view(b*n,n,-1).transpose(1,2)
        e = self.bn1(e)
        e = e.transpose(1,2)
        e = e.view(b,n,n,-1)
        
        v = self.vertex_conv1(v, e)
        v = v.transpose(1,2)
        v = self.bn2(v)
        v = v.transpose(1,2)
        
        v = self.vertex_conv2(v, e)
        v = v.transpose(1,2)
        v = self.bn3(v)
        v = v.transpose(1,2)
        
        f = self.force_conv(v, e, c)
        return f
        
        
if __name__ == '__main__':
    gcnf = GCNF(32,24,10,20,30,40)
    c = torch.rand(10,4,3)
    d = torch.rand(10,4,4)*4
    for i in range(10):
        for j in range(4):
            d[i,j,j] = 0
    outf = gcnf(c,d)
    print(outf.size())
    
    gcne = GCNE(18,26,30,32)
    oute = gcne(d)
    print(oute.size())
