import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import FloatTensor
from torch.nn.parameter import Parameter
from .PairNorm import *

class batch_tGCN(nn.Module):

    def __init__(self,in_feat_dim,out_feat_dim,layer_num,g_pool='max',args=None):
        super(batch_tGCN, self).__init__() 
        self.in_feat_dim=in_feat_dim
        self.out_feat_dim=out_feat_dim
        self.g_pool=g_pool
        self.args=args

        self.transforms=nn.ParameterList([])
        self.transforms_bias=nn.ParameterList([])
        self.transforms.append(Parameter(FloatTensor(self.in_feat_dim,self.in_feat_dim),requires_grad=True))
        if(args.xavier_uniform_bias):
            self.transforms_bias.append(Parameter(FloatTensor(1,self.in_feat_dim),requires_grad=True))
        else:
            self.transforms_bias.append(Parameter(FloatTensor(self.in_feat_dim),requires_grad=True))

        self.gcn_layers=nn.ParameterList([])
        self.gcn_layers_bias=nn.ParameterList([])
        self.gcn_layers.append(Parameter(FloatTensor(self.in_feat_dim,self.out_feat_dim),requires_grad=True))#GCN layer第一层
        if(args.xavier_uniform_bias):
            self.gcn_layers_bias.append(Parameter(FloatTensor(1,self.out_feat_dim),requires_grad=True))
        else:
            self.gcn_layers_bias.append(Parameter(FloatTensor(self.out_feat_dim),requires_grad=True))

        self.layer_num=layer_num
        for i in range(self.layer_num-1):
            self.gcn_layers.append(Parameter(FloatTensor(self.out_feat_dim,self.out_feat_dim),requires_grad=True))
            if(args.xavier_uniform_bias):
                self.gcn_layers_bias.append(Parameter(FloatTensor(1,self.out_feat_dim),requires_grad=True))
            else:
                self.gcn_layers_bias.append(Parameter(FloatTensor(self.out_feat_dim),requires_grad=True))

        if not self.args.pairnorm:
            self.LNs=nn.ModuleList()
            for i in range(self.layer_num-1):
                self.LNs.append(nn.LayerNorm(self.out_feat_dim)) #Layer Normalization
       
        self.activate=nn.LeakyReLU(0.1)
        if self.args.gnorm=='softmax':
            self.norm=nn.Softmax(dim=2)
        self._init_params()
    
    def _init_params(self):
   
        torch.nn.init.xavier_uniform_(self.transforms[0].data)
        if(self.args.xavier_uniform_bias):
            print("xavier uniform bias")
            torch.nn.init.xavier_uniform_(self.transforms_bias[0].data)
        else:
            torch.nn.init.constant_(self.transforms_bias[0], 0)
        
        for i in range(self.layer_num):
            torch.nn.init.xavier_uniform_(self.gcn_layers[i].data)
            if(self.args.xavier_uniform_bias):
                print("xavier uniform bias")
                torch.nn.init.xavier_uniform_(self.gcn_layers_bias[i].data)
            else:
                torch.nn.init.constant_(self.gcn_layers_bias[i], 0)


    def forward(self,x):
    
        trans1=torch.matmul(x,self.transforms[0])+self.transforms_bias[0]
        trans2=torch.matmul(x,self.transforms[0])+self.transforms_bias[0]

        trans2=trans2.permute(0,2,1)
        graph=torch.matmul(trans1,trans2)
        if self.args.gnorm=='softmax':
            graph=self.norm(graph)
        else:
            graph=torch.pow(graph,2)
        # normalization 
            graph=graph/graph.sum(2,keepdim=True)

        n=graph.size(1)
        identity_matrix=torch.eye(n,n).cuda()
        graph=graph+identity_matrix 
        degree_matrix=torch.diag_embed(torch.pow(graph.sum(2),-0.5))
        graph=torch.matmul(torch.matmul(degree_matrix,graph),degree_matrix)
        for i in range(self.layer_num-1):
            z=torch.matmul(torch.matmul(graph,x),self.gcn_layers[i])+self.gcn_layers_bias[i]
            z=self.LNs[i](z)
            z=self.activate(z)
            x=z

        z=torch.matmul(torch.matmul(graph,x),self.gcn_layers[-1])+self.gcn_layers_bias[-1]
       
        #g_pool=='max':
        z,_=torch.max(z,1)

        return z

