import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
    
   
class GraphConvLayer(nn.Module):
    """图卷积层"""
    def __init__(self, n_in, n_out, use_bias=True):
        super(GraphConvLayer, self).__init__()
        self.n_in = n_in
        self.n_out = n_out
        self.use_bias = use_bias
        #定义GCN层的权重矩阵
        self.weight = nn.Parameter(torch.Tensor(n_in, n_out))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(n_out))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters() #使用自定义的参数初始化方式
 
    def reset_parameters(self):
        #自定义参数初始化方式
        #权重参数初始化方式
        nn.init.kaiming_uniform_(self.weight)
        if self.use_bias: #偏置参数初始化为0
            nn.init.zeros_(self.bias)
 
    def forward(self, A, x):
        temp = torch.matmul(x, self.weight)
        out = torch.bmm(A, temp)
        if self.use_bias:
            out = out + self.bias
        return F.tanh(out)
    

class GraphAttentionLayer(nn.Module):
    """图注意力层"""
    def __init__(self, n_in, n_out, use_bias=True):
        super().__init__()
        self.n_in = n_in
        self.n_out = n_out
        self.use_bias = use_bias
        self.weight = nn.Parameter(torch.Tensor(n_in, n_out))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(n_out))
        else:
            self.register_parameter('bias', None)
        self.a = nn.Parameter(torch.Tensor(2*n_out+1))
        self.beta = nn.Parameter(torch.Tensor(1))
        self.reset_parameters()
 
    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight)
        self.a.data.uniform_(-0.1,0.1)
        self.beta.data.uniform_(-0.5,0.5)
        if self.use_bias:
            nn.init.zeros_(self.bias)
 
    def forward(self, adj, x):
        B, N = x.shape[:2]
        h = torch.matmul(x, self.weight) #B*N*n_out
        a_input = torch.cat([h.repeat(1,1,N).view(B,N*N,self.n_out),
                             h.repeat(1,N,1),
                             adj.view(B,N*N,1)], dim=2).view(B,N,N,2*self.n_out+1)
        attention = F.softmax(torch.nn.LeakyReLU()(torch.matmul(a_input, self.a)), dim=2) #B*N*N
        h_prime = torch.bmm(attention, h) #B*N*n_out
        if self.use_bias:
            out = self.beta*h_prime + h + self.bias
        else:
            out = self.beta*h_prime + h
        return F.elu(out)
    

class GraphAttentionLayer2(nn.Module):
    """图卷积层"""
    def __init__(self, n_in, n_out, use_bias=True):
        super(GraphAttentionLayer2, self).__init__()
        self.n_in = n_in
        self.n_out = n_out
        self.use_bias = use_bias
        #定义GCN层的权重矩阵
        self.weight = nn.Parameter(torch.Tensor(n_in, n_out))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(n_out))
        else:
            self.register_parameter('bias', None)
        self.a = nn.Parameter(torch.Tensor(2*n_out))
        self.beta = nn.Parameter(torch.Tensor(1))
        self.reset_parameters() #使用自定义的参数初始化方式
 
    def reset_parameters(self):
        #自定义参数初始化方式
        #权重参数初始化方式
        nn.init.kaiming_uniform_(self.weight)
        self.a.data.uniform_(-0.1,0.1)
        self.beta.data.uniform_(-0.5,0.5)
        if self.use_bias: #偏置参数初始化为0
            nn.init.zeros_(self.bias)
 
    def forward(self, adj, x):
        B, N = x.shape[:2]
        h = torch.matmul(x, self.weight) #B*N*n_out
        a_input = torch.cat([h.repeat(1,1,N).view(B,N*N,self.n_out),
                             h.repeat(1,N,1)], dim=2).view(B,N,N,2*self.n_out)
        attention = F.softmax(torch.nn.LeakyReLU()(torch.matmul(a_input, self.a)), dim=2) #B*N*N
        attention = attention * adj
        h_prime = torch.bmm(attention, h) #B*N*n_out
        if self.use_bias:
            out = self.beta*h_prime + h + self.bias
        else:
            out = self.beta*h_prime + h
        return F.elu(out)
    

class GraphAttentionLayer3(nn.Module):
    """图卷积层"""
    def __init__(self, n_in, n_out, use_bias=True):
        super(GraphAttentionLayer3, self).__init__()
        self.n_in = n_in
        self.n_out = n_out
        self.use_bias = use_bias
        #定义GCN层的权重矩阵
        self.weight = nn.Parameter(torch.Tensor(n_in, n_out))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(n_out))
        else:
            self.register_parameter('bias', None)
        self.a = nn.Parameter(torch.Tensor(2*n_out))
        self.beta = nn.Parameter(torch.Tensor(1))
        self.reset_parameters() #使用自定义的参数初始化方式
 
    def reset_parameters(self):
        #自定义参数初始化方式
        #权重参数初始化方式
        nn.init.kaiming_uniform_(self.weight)
        self.a.data.uniform_(-0.1,0.1)
        self.beta.data.uniform_(0,1.)
        if self.use_bias: #偏置参数初始化为0
            nn.init.zeros_(self.bias)
 
    def forward(self, adj, x):
        B, N = x.shape[:2]
        h = torch.matmul(x, self.weight) #B*N*n_out
        a_input = torch.cat([h.repeat(1,1,N).view(B,N*N,self.n_out),
                             h.repeat(1,N,1)], dim=2).view(B,N,N,2*self.n_out)
        attention = F.softmax(torch.nn.LeakyReLU()(torch.matmul(a_input, self.a)), dim=2) #B*N*N
        attention = attention * adj
        attention = attention / attention.sum(axis=-1, keepdim=True)
        h_prime = torch.bmm(attention, h) #B*N*n_out
        if self.use_bias:
            out = self.beta*h_prime + (1-self.beta)*h + self.bias
        else:
            out = self.beta*h_prime + (1-self.beta)*h
        return F.elu(out)
    
    

class GCN33(nn.Module):
    '''25的对偶点特征改为[0,14)的数'''

    def __init__(self, nh=8, ng1=6, ng2=6, nh2=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphConvLayer(nh, ng1)
        self.gcn2 = GraphConvLayer(ng1, ng2)
        self.fc1 = nn.Linear(ng2, 1)
        self.fc2 = nn.Linear(3, nh2)
        self.fc3 = nn.Linear(nh2, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        x = self.fc1(x)
        x = torch.tanh(x)
        
        z = torch.nn.functional.one_hot(Z, num_classes=3).float()
        z = self.fc2(z)
        z = torch.tanh(z)               
        z = self.fc3(z)
        z = F.softplus(z)
        
        #x = torch.cat([x,z], dim=-1) #b*N*(ng2+3)
        x = x+z
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        

class GCN34(nn.Module):
    '''33改为图注意力'''

    def __init__(self, nh=8, ng1=6, ng2=6, nh2=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphAttentionLayer(nh, ng1)
        self.gcn2 = GraphAttentionLayer(ng1, ng2)
        self.fc1 = nn.Linear(ng2, 1)
        self.fc2 = nn.Linear(3, nh2)
        self.fc3 = nn.Linear(nh2, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        x = self.fc1(x)
        x = torch.tanh(x)
        
        z = torch.nn.functional.one_hot(Z, num_classes=3).float()
        z = self.fc2(z)
        z = torch.tanh(z)               
        z = self.fc3(z)
        z = F.softplus(z)
        
        #x = torch.cat([x,z], dim=-1) #b*N*(ng2+3)
        x = x+z
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        

class GCN34_3(nn.Module):
    '''33改为图注意力'''

    def __init__(self, nh=8, ng1=6, ng2=6, ng3=6, nh2=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphAttentionLayer(nh, ng1)
        self.gcn2 = GraphAttentionLayer(ng1, ng2)
        self.gcn3 = GraphAttentionLayer(ng2, ng3)
        self.fc1 = nn.Linear(ng3, 1)
        self.fc2 = nn.Linear(3, nh2)
        self.fc3 = nn.Linear(nh2, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        x = self.gcn2(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        x = self.fc1(x)
        x = torch.tanh(x)
        
        z = torch.nn.functional.one_hot(Z, num_classes=3).float()
        z = self.fc2(z)
        z = torch.tanh(z)               
        z = self.fc3(z)
        z = F.softplus(z)
        
        #x = torch.cat([x,z], dim=-1) #b*N*(ng2+3)
        x = x+z
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        

class GCN35(nn.Module):
    '''33加批归一化'''

    def __init__(self, nh=8, ng1=6, ng2=6, nh2=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphConvLayer(nh, ng1)
        self.gcn2 = GraphConvLayer(ng1, ng2)
        self.fc1 = nn.Linear(ng2+3, nh2)
        self.fc2 = nn.Linear(3, 3)
        self.fc4 = nn.Linear(nh2, 1)
        self.bn = nn.BatchNorm1d(nh2)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        
        z = torch.nn.functional.one_hot(Z, num_classes=3).float()
        z = self.fc2(z)
        z = torch.tanh(z) #3
        
        x = torch.cat([x,z],dim=-1) #ng2+3
        x = self.fc1(x)
        x = x.view(b*60,-1)
        x = self.bn(x)
        x = x.view(b,60,-1)
        x = torch.tanh(x)
        x = self.fc4(x)
        x = F.softplus(x)
        
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        

class GCN36(nn.Module):
    '''33多1层gcn'''

    def __init__(self, nh=8, ng1=6, ng2=6, ng3=6, nh2=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphConvLayer(nh, ng1)
        self.gcn2 = GraphConvLayer(ng1, ng2)
        self.gcn3 = GraphConvLayer(ng2, ng3)
        self.fc1 = nn.Linear(ng3, 1)
        self.fc2 = nn.Linear(3, nh2)
        self.fc3 = nn.Linear(nh2, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        x = self.gcn3(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        x = self.fc1(x)
        x = torch.tanh(x)
        
        z = torch.nn.functional.one_hot(Z, num_classes=3).float()
        z = self.fc2(z)
        z = torch.tanh(z)               
        z = self.fc3(z)
        z = F.softplus(z)
        
        #x = torch.cat([x,z], dim=-1) #b*N*(ng2+3)
        x = x+z
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        

class GCN37(nn.Module):
    '''33多2层gcn'''

    def __init__(self, nh=8, ng1=6, ng2=6, ng3=6, ng4=6, nh2=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphConvLayer(nh, ng1)
        self.gcn2 = GraphConvLayer(ng1, ng2)
        self.gcn3 = GraphConvLayer(ng2, ng3)
        self.gcn4 = GraphConvLayer(ng3, ng4)
        self.fc1 = nn.Linear(ng4, 1)
        self.fc2 = nn.Linear(3, nh2)
        self.fc3 = nn.Linear(nh2, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        x = self.gcn3(A_hat, x)
        x = self.gcn4(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        x = self.fc1(x)
        x = torch.tanh(x)
        
        z = torch.nn.functional.one_hot(Z, num_classes=3).float()
        z = self.fc2(z)
        z = torch.tanh(z)               
        z = self.fc3(z)
        z = F.softplus(z)
        
        #x = torch.cat([x,z], dim=-1) #b*N*(ng2+3)
        x = x+z
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        

class GCN38(nn.Module):
    '''34多1层'''

    def __init__(self, nh=8, ng1=6, ng2=6, ng3=6, nh2=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphAttentionLayer(nh, ng1)
        self.gcn2 = GraphAttentionLayer(ng1, ng2)
        self.gcn3 = GraphAttentionLayer(ng2, ng3)
        self.fc1 = nn.Linear(ng3, 1)
        self.fc2 = nn.Linear(3, nh2)
        self.fc3 = nn.Linear(nh2, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        x = self.gcn3(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        x = self.fc1(x)
        x = torch.tanh(x)
        
        z = torch.nn.functional.one_hot(Z, num_classes=3).float()
        z = self.fc2(z)
        z = torch.tanh(z)               
        z = self.fc3(z)
        z = F.softplus(z)
        
        #x = torch.cat([x,z], dim=-1) #b*N*(ng2+3)
        x = x+z
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        
        
class GCN39(nn.Module):
    '''36不用元素信息'''

    def __init__(self, nh=8, ng1=6, ng2=6, ng3=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphConvLayer(nh, ng1)
        self.gcn2 = GraphConvLayer(ng1, ng2)
        self.gcn3 = GraphConvLayer(ng2, ng3)
        self.fc1 = nn.Linear(ng3, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        x = self.gcn3(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        x = self.fc1(x)
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        
        
class GCN39_2(nn.Module):
    '''39用2层卷积'''

    def __init__(self, nh=8, ng1=6, ng2=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphConvLayer(nh, ng1)
        self.gcn2 = GraphConvLayer(ng1, ng2)
        self.fc1 = nn.Linear(ng2, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        
        x = x[:,L] #b*N*3*ng2
        x = x.sum(dim=-2) #b*N*ng2
        x = self.fc1(x)
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
        
        
class GCN40(nn.Module):
    '''36只用对偶信息'''

    def __init__(self, nh=8, ng1=6, ng2=6, ng3=6):
        super().__init__()
        self.f0 = nn.Embedding(14,nh)
        self.gcn1 = GraphConvLayer(nh, ng1)
        self.gcn2 = GraphConvLayer(ng1, ng2)
        self.gcn3 = GraphConvLayer(ng2, ng3)
        self.fc1 = nn.Linear(ng3, 1)

    def forward(self, Z, S, A, L):
        '''Z：元素类别
        Ft：对偶图点特征
        A：C60对偶图邻接矩阵
        L：C60点的邻接对偶点列表. 60*3'''
        b, M = S.shape[0], A.shape[0]
        A = A + torch.eye(M,device=Z.device)
        D = torch.sum(A,dim=1).pow(-1).diag()
        A_hat = D @ A
        A_hat = A_hat.repeat(b,1).view(-1,M,M)
        
        x = self.f0(S)
        x = torch.tanh(x)
        x = self.gcn1(A_hat, x)
        x = self.gcn2(A_hat, x)
        x = self.gcn3(A_hat, x)
        
        x = self.fc1(x)
        x = x.squeeze(-1)
        x = x.mean(axis=1)
        return x
