import torch
from torch import float32, nn
from torch.nn import init
import copy
from config import *


class SigmodLayer(nn.Module):
    def __init__(self, embedSize, depth):
        super(SigmodLayer, self).__init__()
        self.embedSize = embedSize
        self.depth = depth
        self.layers = nn.ModuleList()
        for i in range(self.depth):
            layer = nn.Linear(embedSize, embedSize, bias=False).to(
                torch.float64).to(device)
            self.layers.append(layer)

    def forward(self, agg_embs):
        last_embed = agg_embs
        for i in range(self.depth):
            last_embed = self.layers[i](last_embed)
            if i != self.depth - 1:
                last_embed = torch.relu(last_embed)
        return last_embed


class RGCN(nn.Module):
    def __init__(self, layerSize, wordSize, embSize):
        super(RGCN, self).__init__()
        self.layerSize = layerSize
        self.wordSize = wordSize
        self.embSize = embSize
        self.depth = 1
        self.max_simMatrix_lth = 100
        #   w-->w
        modifySize_word_to_word = torch.DoubleTensor(
            self.wordSize, self.wordSize).to(device)
        self.modifySize_word_to_word = torch.nn.Parameter(
            modifySize_word_to_word).to(device)
        #   w-->e
        modifySize = torch.DoubleTensor(self.wordSize, self.embSize).to(device)
        self.modifySize_word_to_embedding1 = torch.nn.Parameter(
            modifySize).to(device)
        modifySize1 = torch.DoubleTensor(
            self.wordSize, self.embSize).to(device)
        self.modifySize_word_to_embedding2 = torch.nn.Parameter(
            modifySize1).to(device)
        #   e-->e
        modifySize2 = torch.DoubleTensor(self.embSize, self.embSize).to(device)
        self.modifySize_embedding_to_embedding = torch.nn.Parameter(
            modifySize2).to(device)
        #   3e-->e
        modifySize3 = torch.DoubleTensor(
            3*self.embSize, self.embSize).to(device)
        self.modifySize_3_embedding_to_embedding = torch.nn.Parameter(
            modifySize3).to(device)

        init.xavier_uniform_(self.modifySize_word_to_word)
        init.xavier_uniform_(self.modifySize_word_to_embedding1)
        init.xavier_uniform_(self.modifySize_word_to_embedding2)
        init.xavier_uniform_(self.modifySize_embedding_to_embedding)
        init.xavier_uniform_(self.modifySize_3_embedding_to_embedding)

        # 多层全连接网络
        self.sig1 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig2 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig3 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig4 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig5 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig1.to(device)
        self.sig2.to(device)
        self.sig3.to(device)
        self.sig4.to(device)
        self.sig5.to(device)

    @staticmethod
    def div_with_small_value(n, d, eps=1e-8):
        # too small values are replaced by 1e-8 to prevent it from exploding.
        d = d * (d > eps).float() + eps * (d <= eps).float()
        return n / d

    # 用公式实现cosine
    def cosine_attention(self, v1, v2):
        """
        :param v1: (batch, len1, dim)
        :param v2: (batch, len2, dim)
        :return:  (batch, len1, len2)
        """
        # (batch, len1, len2)
        # permute 按照参数下标变换维度  batch dim  len2
        # bmm 矩阵乘
        a = torch.bmm(v1, v2.permute(0, 2, 1))

        # 求第二维度上面的L2范数
        v1_norm = v1.norm(p=2, dim=2, keepdim=True)  # (batch, len1, 1)
        v2_norm = v2.norm(p=2, dim=2, keepdim=True).permute(
            0, 2, 1)  # (batch, 1 , len2)
        # 对应位置上乘，相当于mul,支持广播
        d = v1_norm * v2_norm  # (batch,len1,len2)
        return self.div_with_small_value(a, d)

    def local_graph_inf_agg(self, type_adj1_with_weight, g1_hidden_embed):
        """
            本图不同类型节点聚合
                同类邻居聚合----》类型聚合
            类型邻接矩阵 * 特征矩阵  3*b*N*N  *   b*N*d  =  3*b*N*e
                能得到一个节点不同类型邻居的聚合向量
            使用拼接的方式结合
        """
        tempValue = torch.matmul(type_adj1_with_weight, g1_hidden_embed)
        localValue = self.sig1(tempValue[0])
        staticValue = self.sig2(tempValue[1])
        dynamicValue = self.sig3(tempValue[2])
        """
            直接加有点呆,预期维度为b*n*e
            后续改成拼接的方式
        """
        local_graph_final_value = localValue+staticValue+dynamicValue
        local_graph_final_value = self.sig5(local_graph_final_value)
        return local_graph_final_value

    def gene_graph_embedding(self, functionsEmbedding):
        """
            聚合生成图级向量,BNE，将第二维度的合并---》BE
        """
        if graph_embeddings_method == "mean":
            last_embed = torch.mean(functionsEmbedding, dim=1)
        elif graph_embeddings_method == "max":
            last_embed = torch.max(functionsEmbedding, dim=1)[0]
        elif graph_embeddings_method == "sum":
            last_embed = functionsEmbedding.sum(1)
        else:
            assert False, "图聚合方法未实现"
        return last_embed

    def get_type_adj_weight(self, adj1, vtype1):
        '''
            对于一张图输出不同类型的邻接矩阵，
            同时邻居的权重做了归一化,
            temp_weigth，是结点的出度。
        '''
        adj1 = adj1.float()
        temp_weight = torch.sum(adj1, 2)
        #   BN---->BN1  [2,1164,1]
        adj1_neighbor_weight = temp_weight.unsqueeze(2)
        adj1_neighbor_weight += 1e-6
        type_adj1_with_weight = [copy.deepcopy(
            adj1), copy.deepcopy(adj1), copy.deepcopy(adj1)]
        """
            进行矩阵的预处理,将邻接矩阵拆分成只包含同一种类型邻居的矩阵
            B*N*N  * （n*1） 这里是算术乘法  可以得到不同类型的邻居矩阵，实际上应该是n*3
            B*N*N  / （b*N） 这里是算术除法，得到不同的权重
        """
        #   BN3----B3N
        tran_vtype = vtype1.transpose(1, 2)
        for i in range(3):
            #   BNN   * B1N   #NN * 1N
            type_adj1_with_weight[i] = adj1 * tran_vtype[:, i, :].unsqueeze(1)
            #   BNN    /   BN1  BN不行要BN1   维度不变
            type_adj1_with_weight[i] /= adj1_neighbor_weight  # N1
        #   将不同的tensor合起来
        return torch.stack((type_adj1_with_weight[0], type_adj1_with_weight[1], type_adj1_with_weight[2]))

    def forward(self, adj1, att1, vtype1, adj2, att2, vtype2):
        """
            adj 邻接矩阵 b*n*n 
            att 特征矩阵 b*n*d
            vtype 类型矩阵 b*n*3  3种类型
                1，365，3
                1，376，3
        """
        lth = len(adj1[0])
        r = 1000 if len(adj1[0]) < 4000 and len(adj2[0]) < 4000 else 200

        type_adj1_with_weight = self.get_type_adj_weight(adj1, vtype1)
        type_adj2_with_weight = self.get_type_adj_weight(adj2, vtype2)

        torch.cuda.empty_cache()
        batch, lth1, lth2 = len(adj1), len(adj1[0]), len(adj2[0])

        #   隐含的特征表示
        g1_hidden_embed = torch.zeros(batch, lth1, self.embSize, dtype=float32)
        g2_hidden_embed = torch.zeros(batch, lth2, self.embSize, dtype=float32)

        #   att扩展成隐含层的维度,att的维度为B*N*E
        g1_hidden_embed = torch.matmul(
            att1, self.modifySize_word_to_embedding1)
        g2_hidden_embed = torch.matmul(
            att2, self.modifySize_word_to_embedding2)

        for i in range(self.layerSize):
            """
                结点本身
                    输入维度为b*n*e   输出维度为 b*n*e
                    用于乘的矩阵应该是e*e ，用matmul就好了,就能得到结点本身的信息
            """
            node_self_value1 = torch.matmul(
                g1_hidden_embed, self.modifySize_embedding_to_embedding)
            node_self_value2 = torch.matmul(
                g2_hidden_embed, self.modifySize_embedding_to_embedding)

            local_hete_graph_final_value = self.local_graph_inf_agg(
                type_adj1_with_weight, g1_hidden_embed)
            local_hete_graph_final_value2 = self.local_graph_inf_agg(
                type_adj2_with_weight, g2_hidden_embed)

            g1_hidden_embed = torch.sigmoid(
                node_self_value1+local_hete_graph_final_value)
            g2_hidden_embed = torch.sigmoid(
                node_self_value2+local_hete_graph_final_value2)

        # 生成图级嵌入向量
        last_embed = self.gene_graph_embedding(g1_hidden_embed)
        last_embed1 = self.gene_graph_embedding(g2_hidden_embed)

        res = torch.cosine_similarity(last_embed, last_embed1)  # 输出应该为B*1
        return res


if __name__ == "__main__":
    funcModel = RGCN(embSize=4, wordSize=8, layerSize=2,)
