# -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 16:03:23 2021

@author: shijie
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import node_subgraph

class TopKPool(nn.Module):
    def __init__(self, node_dim, topk=0.5):
        super(TopKPool, self).__init__()
        self.topk=topk
        self.l1=nn.Linear(node_dim, 1, bias=True)
    def forward(self, g):
        scores=self.l1(g.ndata["a"]).squeeze()
        topk=int(self.topk*scores.nelement())
        topk_value=scores.topk(topk).values[topk-1].item()
        indexs=(scores>=topk_value)
        g=node_subgraph(g, indexs, store_ids=True)
        return g

def onehot_node(nodes, num_class=93):
    zeros=torch.zeros(nodes.shape[0],num_class).long()
    zeros.scatter_(dim=1,index=nodes.unsqueeze(1).long(),src=torch.ones(nodes.shape[0],num_class).long())
    return zeros.float()

class GCN_NORM(nn.Module):
    def __init__(self,node_dim,edge_dim,out_dim,activation=None):
        super(GCN_NORM,self).__init__()
        
        self.activation=activation
        self.L1=nn.Linear(2*node_dim+edge_dim,out_dim)
        self.L2=nn.Linear(2*node_dim+edge_dim,out_dim)
        self.sig=nn.Sigmoid()
        #self.batchnorm=nn.BatchNorm1d(node_dim)
        self.LC=nn.Linear(node_dim,out_dim)
    def message(self,edges):
        x=torch.cat((edges.dst["n1"],edges.src["n1"],edges.data["e1"]),dim=1)
        return {"m":x}
    def reduce(self,nodes):
        x=(self.sig(self.L2(nodes.mailbox["m"])))*self.L1(nodes.mailbox["m"])
        x=self.LC(nodes.data["n1"])+torch.mean(x,1)
        if self.activation:
            x=self.activation(x)
        return {"n1":x}
    def forward(self,g):
        #g.ndata["n1"]=self.batchnorm(g.ndata["n1"])
        g.update_all(self.message, self.reduce)
        return g.ndata["n1"]
    
class GCN(nn.Module):
    def __init__(self,node_dim,edge_dim,out_dim,activation=None):
        super(GCN,self).__init__()
        
        self.activation=activation
        self.L1=nn.Linear(2*node_dim+edge_dim,out_dim)
        self.L2=nn.Linear(2*node_dim+edge_dim,out_dim)
        self.sig=nn.Sigmoid()
        self.LC=nn.Linear(node_dim,out_dim)
    def message(self,edges):
        x=torch.cat((edges.dst["n1"],edges.src["n1"],edges.data["e1"]),dim=1)
        return {"m":x}
    def reduce(self,nodes):
        x=(self.sig(self.L2(nodes.mailbox["m"])))*self.L1(nodes.mailbox["m"])
        x=self.LC(nodes.data["n1"])+torch.mean(x,1)
        x=self.activation(x)
        return {"n1":x}
    def forward(self,g):
        g.update_all(self.message,self.reduce)
        
class GCN1(nn.Module):
    def __init__(self,node_dim,edge_dim,out_dim,activation=None):
        super(GCN1,self).__init__()
        
        self.activation=activation
        self.L1=nn.Linear(2*node_dim+edge_dim,out_dim)
        self.L2=nn.Linear(2*node_dim+edge_dim,out_dim)
        self.sig=nn.Sigmoid()
        self.LC=nn.Linear(node_dim,out_dim)
    def message(self,edges):
        x=torch.cat((edges.dst["n1"],edges.src["n1"],edges.data["e1"]),dim=1)
        return {"m":x}
    def reduce(self,nodes):
        x=(self.sig(self.L2(nodes.mailbox["m"])))*self.L1(nodes.mailbox["m"])
        x=self.LC(nodes.data["n1"])+torch.mean(x,1)
        if self.activation:
            x=self.activation(x)
        return {"n1":x}
    def forward(self,g):
        g.update_all(self.message, self.reduce)
        return g.ndata["n1"]
    
class RESGCN(nn.Module):
    def __init__(self,node_dim,edge_dim,out_dim,activation=None):
        super(RESGCN, self).__init__()
        self.ifres=(node_dim==out_dim)
        self.activation=activation
        self.L1=nn.Linear(2*node_dim+edge_dim,out_dim)
        self.L2=nn.Linear(2*node_dim+edge_dim,out_dim)
        self.sig=nn.Sigmoid()
        self.LC=nn.Linear(node_dim,out_dim)
    def message(self,edges):
        x=torch.cat((edges.dst["n1"],edges.src["n1"],edges.data["e1"]),dim=1)
        return {"m":x}
    def reduce(self,nodes):
        x=(self.sig(self.L2(nodes.mailbox["m"])))*self.L1(nodes.mailbox["m"])
        x=self.LC(nodes.data["n1"])+torch.mean(x,1)
        if self.activation:
            x=self.activation(x)
        if self.ifres:
            return {"n1":x+nodes.data["n1"]}
        else:
            return {"n1":x}
    def forward(self,g):
        #g.ndata["n1"]=self.batchnorm(g.ndata["n1"])
        g.update_all(self.message, self.reduce)
        return g.ndata["n1"]
    
class Net(nn.Module):
    def __init__(self,nodes_dim=[54,128,128,128,128,128],edge_dim=5, hidden_dim=64, activation=F.relu):
        super(Net,self).__init__()
        self.GCN1=RESGCN(nodes_dim[0],edge_dim,nodes_dim[1],activation=activation)
        self.GCN2=RESGCN(nodes_dim[1],edge_dim,nodes_dim[2],activation=activation)
        self.GCN3=RESGCN(nodes_dim[2],edge_dim,nodes_dim[3],activation=activation)
        self.GCN4=RESGCN(nodes_dim[3],edge_dim,nodes_dim[4],activation=activation)
        self.GCN5=RESGCN(nodes_dim[4],edge_dim,nodes_dim[5],activation=activation)
        self.L1=nn.Linear(nodes_dim[-1], hidden_dim)
        self.L2=nn.Linear(hidden_dim, 1)
        self.Lconv=nn.Linear(len(nodes_dim)-1, 1, bias=False)
        self.act=activation
    def forward(self, g1, p):
        #GCN layers-------------
        x1=self.GCN1(g1)
        x2=self.GCN2(g1)
        x3=self.GCN3(g1)
        x4=self.GCN4(g1)   
        self.GCN5(g1)   #x1->x5: Natoms*fea  1000*128
               
        #stack along layers(skip connections)---------------
        x1=torch.stack((x1, x2, x3, x4, g1.ndata["n1"]), 2) #x: Natoms*feature*channels 1000*128*5

        #convlution along channels----------
        x1=self.Lconv(x1).squeeze()  #x: 1000*128

        #max pool atom_wise 
        x1=torch.stack([torch.max(x,0).values for x in torch.split(x1, p)], 0)  #x:batch*fea  32*128

        #Feed Forward
        x1=self.act(self.L1(x1))   #x:batch*hidden_dim  32*64

        #linear transform without activation--->score 
        x1=self.L2(x1)   #x:batch*1   32*1

        return x1  

class Net5(nn.Module):
    def __init__(self,nodes_dim=[54,128,128,128,128,128],edge_dim=5, hidden_dim=64, activation=F.relu):
        super(Net5,self).__init__()
        self.GCN1=RESGCN(nodes_dim[0],edge_dim,nodes_dim[1],activation=activation)
        self.GCN2=RESGCN(nodes_dim[1],edge_dim,nodes_dim[2],activation=activation)
        self.GCN3=RESGCN(nodes_dim[2],edge_dim,nodes_dim[3],activation=activation)
        self.GCN4=RESGCN(nodes_dim[3],edge_dim,nodes_dim[4],activation=activation)
        self.GCN5=RESGCN(nodes_dim[4],edge_dim,nodes_dim[5],activation=activation)
        self.L1=nn.Linear(nodes_dim[-1], hidden_dim)
        self.L2=nn.Linear(hidden_dim, 1)
        self.Lconv=nn.Linear(len(nodes_dim)-1, 1, bias=False)
        self.act=activation
    def forward(self, g1, p):
        #GCN layers-------------
        x1=self.GCN1(g1)
        x2=self.GCN2(g1)
        x3=self.GCN3(g1)
        x4=self.GCN4(g1)   
        self.GCN5(g1)   #x1->x5: Natoms*fea  1000*128
               
        #stack along layers(skip connections)---------------
        x1=torch.stack((x1, x2, x3, x4, g1.ndata["n1"]), 2) #x: Natoms*feature*channels 1000*128*5

        #convlution along channels----------
        x1=self.Lconv(x1).squeeze()  #x: 1000*128

        #max pool atom_wise 
        x1=torch.stack([torch.max(x,0).values for x in torch.split(x1, p)], 0)  #x:batch*fea  32*128

        #Feed Forward
        x1=self.act(self.L1(x1))   #x:batch*hidden_dim  32*64

        #linear transform without activation--->score 
        x1=self.L2(x1)   #x:batch*1   32*1

        return x1 
    
class Net6(nn.Module):
    def __init__(self,nodes_dim=[54,128,128,128,128,128],edge_dim=5, hidden_dims=[80, 80], activation=F.relu):
        super(Net6,self).__init__()
        self.GCN1=RESGCN(nodes_dim[0],edge_dim,nodes_dim[1],activation=activation)
        self.GCN2=RESGCN(nodes_dim[1],edge_dim,nodes_dim[2],activation=activation)
        self.GCN3=RESGCN(nodes_dim[2],edge_dim,nodes_dim[3],activation=activation)
        self.GCN4=RESGCN(nodes_dim[3],edge_dim,nodes_dim[4],activation=activation)
        self.GCN5=RESGCN(nodes_dim[4],edge_dim,nodes_dim[5],activation=activation)
        self.L1=nn.Linear(nodes_dim[-1], hidden_dims[0])
        self.L2=nn.Linear(hidden_dims[0], hidden_dims[1])
        self.L3=nn.Linear(hidden_dims[1], 1)
        self.Lconv=nn.Linear(len(nodes_dim)-1, 1, bias=False)
        self.act=activation
    def forward(self, g1, p):
        #GCN layers-------------
        x1=self.GCN1(g1)
        x2=self.GCN2(g1)
        x3=self.GCN3(g1)
        x4=self.GCN4(g1)   
        self.GCN5(g1)   #x1->x5: Natoms*fea  1000*128
               
        #stack along layers(skip connections)---------------
        x1=torch.stack((x1, x2, x3, x4, g1.ndata["n1"]), 2) #x: Natoms*feature*channels 1000*128*5

        #convlution along channels----------
        x1=self.Lconv(x1).squeeze()  #x: 1000*128
        
        #max pool atom_wise 
        x1=torch.stack([torch.max(x,0).values for x in torch.split(x1, p)], 0)  #x:batch*fea  32*128

        #Feed Forward
        x1=self.act(self.L1(x1))   #x:batch*hidden_dim  32*64

        x1=self.act(self.L2(x1))

        #linear transform without activation--->score 
        x1=self.L3(x1)   #x:batch*1   32*1
        return x1
    
class Net1(nn.Module):
    def __init__(self,nodes_dim=[54,128,128,128,128,128],edge_dim=5, hidden_dim=64, activation=F.relu):
        super(Net1,self).__init__()
        self.GCN1=RESGCN(nodes_dim[0],edge_dim,nodes_dim[1],activation=activation)
        self.GCN2=RESGCN(nodes_dim[1],edge_dim,nodes_dim[2],activation=activation)
        self.GCN3=RESGCN(nodes_dim[2],edge_dim,nodes_dim[3],activation=activation)
        self.GCN4=RESGCN(nodes_dim[3],edge_dim,nodes_dim[4],activation=activation)
        self.GCN5=RESGCN(nodes_dim[4],edge_dim,nodes_dim[5],activation=activation)
        self.L1=nn.Linear(nodes_dim[-1], hidden_dim)
        self.L2=nn.Linear(hidden_dim, 1)
        self.Lconv=nn.Linear(len(nodes_dim)-1, 1, bias=False)
        self.act=activation
    def forward(self, g1, p):
        #GCN layers-------------
        x1=self.GCN1(g1)
        x2=self.GCN2(g1)
        x3=self.GCN3(g1)
        x4=self.GCN4(g1)   
        self.GCN5(g1)   #x1->x5: Natoms*fea  1000*128
               
        #stack along layers(skip connections)---------------
        x1=torch.stack((x1, x2, x3, x4, g1.ndata["n1"]), 2) #x: Natoms*feature*channels 1000*128*5

        #convlution along channels----------
        x1=self.Lconv(x1).squeeze()  #x: 1000*128

        #max pool atom_wise 
        x1=torch.stack([torch.max(x,0).values for x in torch.split(x1, p)], 0)  #x:batch*fea  32*128

        #Feed Forward
        x1=self.act(self.L1(x1))   #x:batch*hidden_dim  32*64

        #linear transform without activation--->score 
        x1=self.L2(x1)   #x:batch*1   32*1

        return x1  
    
class Net2(nn.Module):
    def __init__(self,nodes_dim=[93,128,256,128,64,48],edge_dim=2, hidden_dim=32, activation=F.relu):
        super(Net2,self).__init__()
        self.GCN1=GCN_NORM(nodes_dim[0],edge_dim,nodes_dim[1],activation=activation)
        self.GCN2=GCN_NORM(nodes_dim[1],edge_dim,nodes_dim[2],activation=activation)
        self.GCN3=GCN_NORM(nodes_dim[2],edge_dim,nodes_dim[3],activation=activation)
        self.GCN4=GCN_NORM(nodes_dim[3],edge_dim,nodes_dim[4],activation=activation)
        self.GCN5=GCN_NORM(nodes_dim[4],edge_dim,nodes_dim[5],activation=activation)
        """
        self.GCN6=GCN1(nodes_dim[5],edge_dim,nodes_dim[6],activation=activation)
        self.GCN7=GCN1(nodes_dim[6],edge_dim,nodes_dim[7],activation=activation)
        self.GCN8=GCN1(nodes_dim[7],edge_dim,nodes_dim[8],activation=activation)
        """
        self.L1=nn.Linear(sum(nodes_dim[1:]), hidden_dim)
        self.L2=nn.Linear(hidden_dim, 1)
        self.act=activation
    def forward(self, g1, p):
        x1=self.GCN1(g1)
        x2=self.GCN2(g1)
        x3=self.GCN3(g1)
        x4=self.GCN4(g1)   
        self.GCN5(g1)
        """
        x6=self.GCN6(g1)   
        self.GCN7(g1)
        """
        #x1=torch.stack([torch.mean(x,0) for x in torch.split(x5, p)], 0)
        x1=torch.stack([torch.max(x,0).values for x in torch.split(torch.cat((x1, x2, x3, x4, g1.ndata["n1"]), 1), p)], 0)
        x1=self.L1(x1)
        x1=self.act(x1)
        x1=self.L2(x1)
        #x1=self.out_range*self.sig(x1)
        return x1
        #self.GCN6(g1) 
        
class Net3(nn.Module):
    def __init__(self,nodes_dim=[54,64,64,64,64,64],edge_dim=5, hidden_dim=128, activation=F.relu):
        super(Net3,self).__init__()
        self.GCN1=RESGCN(nodes_dim[0],edge_dim,nodes_dim[1],activation=activation)
        self.GCN2=RESGCN(nodes_dim[1],edge_dim,nodes_dim[2],activation=activation)
        self.GCN3=RESGCN(nodes_dim[2],edge_dim,nodes_dim[3],activation=activation)
        self.GCN4=RESGCN(nodes_dim[3],edge_dim,nodes_dim[4],activation=activation)
        self.GCN5=RESGCN(nodes_dim[4],edge_dim,nodes_dim[5],activation=activation)
        self.L1=nn.Linear(nodes_dim[-1], hidden_dim)
        self.L2=nn.Linear(hidden_dim, 1)
        self.Lconv=nn.Linear(len(nodes_dim)-1, 1, bias=False)
        self.act=activation
    def forward(self, g1, p):
        #GCN layers-------------
        x1=self.GCN1(g1)
        x2=self.GCN2(g1)
        x3=self.GCN3(g1)
        x4=self.GCN4(g1)   
        self.GCN5(g1)   #x1->x5: Natoms*fea  1000*128
               
        #stack along layers(skip connections)---------------
        x1=torch.stack((x1, x2, x3, x4, g1.ndata["n1"]), 2) #x: Natoms*feature*channels 1000*128*5

        #convlution along channels----------
        x1=self.Lconv(x1).squeeze()  #x: 1000*128

        #max pool atom_wise 
        x1=torch.stack([torch.max(x,0).values for x in torch.split(x1, p)], 0)  #x:batch*fea  32*128

        #Feed Forward
        x1=self.act(self.L1(x1))   #x:batch*hidden_dim  32*64

        #linear transform without activation--->score 
        x1=self.L2(x1)   #x:batch*1   32*1

        return x1

class Net4(nn.Module):
    
    def __init__(self,nodes_dim=[54, 128, 256, 128, 64, 48, 48, 48],edge_dim=5, hidden_dim=32, activation=F.relu):
        super(Net4,self).__init__()
        self.GCN1=GCN1(nodes_dim[0],edge_dim,nodes_dim[1],activation=activation)
        self.GCN2=GCN1(nodes_dim[1],edge_dim,nodes_dim[2],activation=activation)
        self.GCN3=GCN1(nodes_dim[2],edge_dim,nodes_dim[3],activation=activation)
        self.GCN4=GCN1(nodes_dim[3],edge_dim,nodes_dim[4],activation=activation)
        self.GCN5=GCN1(nodes_dim[4],edge_dim,nodes_dim[5],activation=activation)
        self.GCN6=GCN1(nodes_dim[5],edge_dim,nodes_dim[6],activation=activation)
        self.GCN7=GCN1(nodes_dim[6],edge_dim,nodes_dim[7],activation=activation)
        """
        self.GCN6=GCN1(nodes_dim[5],edge_dim,nodes_dim[6],activation=activation)
        self.GCN7=GCN1(nodes_dim[6],edge_dim,nodes_dim[7],activation=activation)
        self.GCN8=GCN1(nodes_dim[7],edge_dim,nodes_dim[8],activation=activation)
        """
        self.L1=nn.Linear(sum(nodes_dim[1:]), hidden_dim)
        self.L2=nn.Linear(hidden_dim, 1)
        self.act=activation
        
    def forward(self, g1, p):
        x1=self.GCN1(g1)
        x2=self.GCN2(g1)
        x3=self.GCN3(g1)
        x4=self.GCN4(g1) 
        x5=self.GCN5(g1)
        x6=self.GCN6(g1)
        self.GCN7(g1)
        """
        x6=self.GCN6(g1)   
        self.GCN7(g1)
        """
        #x1=torch.stack([torch.mean(x,0) for x in torch.split(x5, p)], 0)
        x1=torch.stack([torch.max(x,0).values for x in torch.split(torch.cat((x1, x2, x3, x4, x5, x6, g1.ndata["n1"]), 1), p)], 0)
        x1=self.act(self.L1(x1))
        x1=self.L2(x1)   
        #x1=self.out_range*self.sig(x1)
        return x1
    
def testNet():
    from numpy import random
    import dgl
    GS=[]
    nodes=10
    sort_nodes=54
    edge_dim=4
    n_neibors=4
    p=[]
    
    net=Net6(edge_dim=edge_dim)
    for k in range(50):
        u=[]
        v=[]
        for i in range(nodes):
            for j in range(n_neibors):
                u.append(i)
                a=random.choice(nodes)
                while (a==i):
                    a=random.choice(nodes)
                v.append(a)
        g=dgl.graph((u,v))
        g.ndata["n1"]=onehot_node(torch.tensor(random.uniform(0, sort_nodes, nodes)), sort_nodes)
        g.edata["e1"]=torch.tensor(random.uniform(0, 10, (nodes*n_neibors, edge_dim))).float()
        GS.append(g)
        p.append(nodes)
    
    G=dgl.batch(GS)
    
    #model=nn.DataParallel(model, device_ids=[0, 1])
    
    pre_label=net(G, p)
    print(pre_label)
    
if __name__=="__main__":
    testNet()




