import torch 
from torch import float32, nn
from torch.nn import init
import copy
import torch.nn.functional as functional
from functionSim_config import *
import shelve

class SigmodLayer(nn.Module):
    def __init__(self,embedSize,depth):
        super(SigmodLayer, self).__init__()
        self.embedSize = embedSize
        self.depth = depth
        self.layers = nn.ModuleList()
        for i in range(self.depth):
            layer = nn.Linear(embedSize,embedSize,bias=False).to(torch.float64).to(device)
            self.layers.append(layer)
            # self.add_module('layer_{}'.format(i), layer)
    #   输入为BNE
    #   输出为BNE
    def forward(self, agg_embs):
        last_embed = agg_embs
        for i in range (self.depth):
            last_embed = self.layers[i](last_embed)
            if i != self.depth - 1:
                last_embed = torch.relu(last_embed)
        return last_embed

class functionSim(nn.Module):
    def __init__(self,layerSize,wordSize,embSize,depth):
        super(functionSim, self).__init__()
        self.layerSize=layerSize
        self.wordSize=wordSize
        self.embSize=embSize
        self.depth=depth
        self.max_simMatrix_lth=100
        
        #   w-->w        
        modifySize_word_to_word=torch.DoubleTensor(self.wordSize,self.wordSize).to(device)
        self.modifySize_word_to_word=torch.nn.Parameter(modifySize_word_to_word).to(device)
        #   w-->e
        modifySize=torch.DoubleTensor(self.wordSize,self.embSize).to(device)
        self.modifySize_word_to_embedding1=torch.nn.Parameter(modifySize).to(device)
        modifySize1=torch.DoubleTensor(self.wordSize,self.embSize).to(device)
        self.modifySize_word_to_embedding2=torch.nn.Parameter(modifySize1).to(device)
        #   e-->e
        modifySize2=torch.DoubleTensor(self.embSize,self.embSize).to(device) 
        self.modifySize_embedding_to_embedding=torch.nn.Parameter(modifySize2).to(device)
 
        init.xavier_uniform_(self.modifySize_word_to_word)
        init.xavier_uniform_(self.modifySize_word_to_embedding1)
        init.xavier_uniform_(self.modifySize_word_to_embedding2)
        init.xavier_uniform_(self.modifySize_embedding_to_embedding)

        self.fc1 = nn.Linear(self.embSize, 32, dtype=torch.double)
        self.fc1.to(device)
        self.fc2 = nn.Linear(32, 1, dtype=torch.double)
        self.fc2.to(device)
        self.sigmoid = nn.Sigmoid()
        self.sigmoid.to(device)

        #多层全连接网络
        self.sig1 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig2 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig3 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig4 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig5 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig1.to(device)
        self.sig2.to(device)
        self.sig3.to(device)
        self.sig4.to(device)
        self.sig5.to(device)
   
    @staticmethod
    def div_with_small_value(n, d, eps=1e-8):
        # too small values are replaced by 1e-8 to prevent it from exploding.
        d = d * (d > eps).float() + eps * (d <= eps).float()
        return n / d
    
    #用公式实现cosine
    def cosine_attention(self, v1, v2):  
        """
        :param v1: (batch, len1, dim)
        :param v2: (batch, len2, dim)
        :return:  (batch, len1, len2)
        """
        # (batch, len1, len2)
        #permute 按照参数下标变换维度  batch dim  len2
        # bmm 矩阵乘 
        a = torch.bmm(v1, v2.permute(0, 2, 1))
        
        # 求第二维度上面的L2范数
        v1_norm = v1.norm(p=2, dim=2, keepdim=True)  # (batch, len1, 1)
        v2_norm = v2.norm(p=2, dim=2, keepdim=True).permute(0, 2, 1)  # (batch, 1 , len2)
        # 对应位置上乘，相当于mul,支持广播
        d = v1_norm * v2_norm#(batch,len1,len2)
        return self.div_with_small_value(a, d)
       
    def get_type_adj_weight(self,adj1,vtype1):
        '''
            对于一张图输出不同类型的邻接矩阵，
            同时邻居的权重做了归一化,
            temp_weigth，是结点的出度。
        '''
        adj1=adj1.float() 
        temp_weight=torch.sum(adj1,2)
        #   BN---->BN1  [2,1164,1]
        adj1_neighbor_weight=temp_weight.unsqueeze(2)
        adj1_neighbor_weight += 1e-6
        type_adj1_with_weight=[copy.deepcopy(adj1),copy.deepcopy(adj1),copy.deepcopy(adj1)]
        """
            进行矩阵的预处理,将邻接矩阵拆分成只包含同一种类型邻居的矩阵
            B*N*N  * （n*1） 这里是算术乘法  可以得到不同类型的邻居矩阵，实际上应该是n*3
            B*N*N  / （b*N） 这里是算术除法，得到不同的权重
        """
        #   BN3----B3N
        tran_vtype=vtype1.transpose(1, 2)
        for i in range(3):
            #   BNN   * B1N   #NN * 1N
            type_adj1_with_weight[i]=adj1 * tran_vtype[:,i,:].unsqueeze(1)
            #   BNN    /   BN1  BN不行要BN1   维度不变
            type_adj1_with_weight[i]/=adj1_neighbor_weight   #N1
        #   将不同的tensor合起来
        return torch.stack((type_adj1_with_weight[0],type_adj1_with_weight[1],type_adj1_with_weight[2]))
    
    def local_graph_inf_agg(self,type_adj1_with_weight,g1_hidden_embed):
        """
            本图不同类型节点聚合
                同类邻居聚合----》类型聚合
            类型邻接矩阵 * 特征矩阵  3*b*N*N  *   b*N*d  =  3*b*N*e
                能得到一个节点不同类型邻居的聚合向量
            使用拼接的方式结合
        """
        tempValue=torch.matmul(type_adj1_with_weight,g1_hidden_embed)
        localValue=self.sig1(tempValue[0]) 
        staticValue=self.sig2(tempValue[1])
        dynamicValue=self.sig3(tempValue[2])
        """
            直接加有点呆,预期维度为b*n*e
            后续改成拼接的方式
        """
        local_graph_final_value=localValue+staticValue+dynamicValue
        local_graph_final_value=self.sig5(local_graph_final_value)
        return local_graph_final_value

    def gene_graph_embedding(self,functionsEmbedding):
        """
            聚合生成图级向量,BNE，将第二维度的合并---》BE
        """
        if graph_embeddings_method=="mean":
            last_embed = torch.mean(functionsEmbedding,dim=1)
        elif graph_embeddings_method=="max":
            last_embed = torch.max(functionsEmbedding,dim=1)[0]
        elif graph_embeddings_method=="sum":
            last_embed = functionsEmbedding.sum(1)
        else:
            assert False ,"图聚合方法未实现"
        return last_embed
    
    def forward(self,adj1,att1,vtype1):
        """
            临时改造，单样本输入
            adj 邻接矩阵 b*n*n 
            att 特征矩阵 b*n*d
            vtype 类型矩阵 b*n*3  3种类型
                1，365，3
                1，376，3
        """
        r=1000 if len(adj1[0])<4000  else 200      
        if use_heterogeous:
            type_adj1_with_weight=self.get_type_adj_weight(adj1,vtype1)
        
        torch.cuda.empty_cache()
        batch,lth1=len(adj1),len(adj1[0])
        #   隐含的特征表示
        g1_hidden_embed=torch.zeros(batch,lth1,self.embSize,dtype=float32)
        #   att扩展成隐含层的维度,att的维度为B*N*E
        g1_hidden_embed=torch.matmul(att1,self.modifySize_word_to_embedding1)
        for i in range(self.layerSize):
            """
                结点本身
                    输入维度为b*n*e   输出维度为 b*n*e
                    用于乘的矩阵应该是e*e ，用matmul就好了,就能得到结点本身的信息
            """
            node_self_value1=torch.matmul(g1_hidden_embed,self.modifySize_embedding_to_embedding)
            #  本图节点聚合
            if use_heterogeous:
                # 异质信息聚合
                local_hete_graph_final_value=self.local_graph_inf_agg(type_adj1_with_weight,g1_hidden_embed)
            else:
                # 同质信息聚合
                tempValue=torch.matmul(adj1,g1_hidden_embed)
                # b*n*e----输出应该也是BNE
                local_graph_final_value=self.sig1(tempValue)  

            # 所有结点信息聚合
            if use_heterogeous:
                # 基础版+异质图
                g1_hidden_embed=torch.sigmoid(node_self_value1+local_hete_graph_final_value)
            else:
                # 基础版
                g1_hidden_embed=torch.sigmoid(node_self_value1+local_graph_final_value)

        last_embed=self.gene_graph_embedding(g1_hidden_embed)
        # 这里应该过一个sigmoid 
        last_embed = self.fc1(last_embed)
        last_embed = self.fc2(last_embed)
        res  = self.sigmoid(last_embed)
        return res
            
if __name__=="__main__":
    funcModel=functionSim(embSize=4,wordSize=8,layerSize=2,depth=2)