import torch
from torch import float32, nn
from torch.nn import init
import copy
import time
import torch.nn.functional as functional
import pynvml
from functionSim_config import *
import shelve


class SigmodLayer(nn.Module):
    def __init__(self, embedSize, depth):
        super(SigmodLayer, self).__init__()
        self.embedSize = embedSize
        self.depth = depth
        self.layers = nn.ModuleList()
        for i in range(self.depth):
            layer = nn.Linear(embedSize, embedSize, bias=False).to(
                torch.float64).to(device)
            self.layers.append(layer)
            # self.add_module('layer_{}'.format(i), layer)
    #   输入为BNE
    #   输出为BNE

    def forward(self, agg_embs):
        last_embed = agg_embs
        for i in range(self.depth):
            last_embed = self.layers[i](last_embed)
            if i != self.depth - 1:
                last_embed = torch.relu(last_embed)
        return last_embed


def get_gpu_memory_usage(str="0"):
    pynvml.nvmlInit()
    gpu_id = 0
    handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
    info = pynvml.nvmlDeviceGetMemoryInfo(handle)
    # 获取GPU显存占用情况（单位：MB）
    used_memory = info.used / (1024 * 1024 * 1024)
    print("{}:  显存占用 {} GB".format(str, used_memory))


class functionSim(nn.Module):
    def __init__(self, layerSize, wordSize, embSize, depth):
        super(functionSim, self).__init__()
        self.layerSize = layerSize
        self.wordSize = wordSize
        self.embSize = embSize
        self.depth = depth
        self.max_simMatrix_lth = 100

        #   学习的主要参数
        #   维度转化
        #   w-->w

        modifySize_word_to_word = torch.DoubleTensor(
            self.wordSize, self.wordSize).to(device)
        self.modifySize_word_to_word = torch.nn.Parameter(
            modifySize_word_to_word).to(device)
        #   w-->e
        modifySize = torch.DoubleTensor(self.wordSize, self.embSize).to(device)
        self.modifySize_word_to_embedding1 = torch.nn.Parameter(
            modifySize).to(device)
        modifySize1 = torch.DoubleTensor(
            self.wordSize, self.embSize).to(device)
        self.modifySize_word_to_embedding2 = torch.nn.Parameter(
            modifySize1).to(device)
        #   e-->e
        modifySize2 = torch.DoubleTensor(self.embSize, self.embSize).to(device)
        self.modifySize_embedding_to_embedding = torch.nn.Parameter(
            modifySize2).to(device)
        # modifySize2=torch.DoubleTensor(self.embSize,self.embSize).to(device)
        # self.modifySize_embedding_to_embedding2=torch.nn.Parameter(modifySize2)
        #   3e-->e
        modifySize3 = torch.DoubleTensor(
            3*self.embSize, self.embSize).to(device)
        self.modifySize_3_embedding_to_embedding = torch.nn.Parameter(
            modifySize3).to(device)

        init.xavier_uniform_(self.modifySize_word_to_word)
        init.xavier_uniform_(self.modifySize_word_to_embedding1)
        init.xavier_uniform_(self.modifySize_word_to_embedding2)
        init.xavier_uniform_(self.modifySize_embedding_to_embedding)
        init.xavier_uniform_(self.modifySize_3_embedding_to_embedding)

        # 多层全连接网络
        self.sig1 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig2 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig3 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig4 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig5 = SigmodLayer(embedSize=self.embSize, depth=self.depth)
        self.sig1.to(device)
        self.sig2.to(device)
        self.sig3.to(device)
        self.sig4.to(device)
        self.sig5.to(device)

    @staticmethod
    def div_with_small_value(n, d, eps=1e-8):
        # too small values are replaced by 1e-8 to prevent it from exploding.
        d = d * (d > eps).float() + eps * (d <= eps).float()
        return n / d

    # 用公式实现cosine
    def cosine_attention(self, v1, v2):
        """
        :param v1: (batch, len1, dim)
        :param v2: (batch, len2, dim)
        :return:  (batch, len1, len2)
        """
        # (batch, len1, len2)
        # permute 按照参数下标变换维度  batch dim  len2
        # bmm 矩阵乘
        a = torch.bmm(v1, v2.permute(0, 2, 1))

        # 求第二维度上面的L2范数
        v1_norm = v1.norm(p=2, dim=2, keepdim=True)  # (batch, len1, 1)
        v2_norm = v2.norm(p=2, dim=2, keepdim=True).permute(
            0, 2, 1)  # (batch, 1 , len2)
        # 对应位置上乘，相当于mul,支持广播
        d = v1_norm * v2_norm  # (batch,len1,len2)
        return self.div_with_small_value(a, d)

    def get_type_adj_weight(self, adj1, vtype1):
        '''
            对于一张图输出不同类型的邻接矩阵，
            同时邻居的权重做了归一化,
            temp_weigth，是结点的出度。
        '''
        adj1 = adj1.float()
        temp_weight = torch.sum(adj1, 2)
        #   BN---->BN1  [2,1164,1]
        adj1_neighbor_weight = temp_weight.unsqueeze(2)
        adj1_neighbor_weight += 1e-6
        type_adj1_with_weight = [copy.deepcopy(
            adj1), copy.deepcopy(adj1), copy.deepcopy(adj1)]
        """
            进行矩阵的预处理,将邻接矩阵拆分成只包含同一种类型邻居的矩阵
            B*N*N  * （n*1） 这里是算术乘法  可以得到不同类型的邻居矩阵，实际上应该是n*3
            B*N*N  / （b*N） 这里是算术除法，得到不同的权重
        """
        #   BN3----B3N
        tran_vtype = vtype1.transpose(1, 2)
        for i in range(3):
            #   BNN   * B1N   #NN * 1N
            type_adj1_with_weight[i] = adj1 * tran_vtype[:, i, :].unsqueeze(1)
            #   BNN    /   BN1  BN不行要BN1   维度不变
            type_adj1_with_weight[i] /= adj1_neighbor_weight  # N1
        #   将不同的tensor合起来
        return torch.stack((type_adj1_with_weight[0], type_adj1_with_weight[1], type_adj1_with_weight[2]))

    def cross_interaction_test(self, feature_p, feature_h, corss_weight_edges):
        """
            类似建立边，
            计算一次最开始的相似值，确定两张图之间最相似的结点之间的边
            后续计算为更新后的特征值，增加最开始建立边之间的信息传递

            能否实现
                空间上可能放不下。。。。改成稀疏矩阵*矩阵  问了下gpt，他说pytorch支持这样操作
                现在就是softmaxed_tensor 应该传进来的值
        """
        # b*n*d b*m*n
        a_h = torch.matmul(corss_weight_edges, feature_h)
        a_mean_h = feature_p-a_h
        torch.cuda.empty_cache()
        return a_mean_h

    def cross_interaction_functionSimGMN(self, feature_p, feature_h, r):
        """
            跨图结点
            聚合另一张图同类型结点的信息 + 加个attention，关注更加相似的跨图结点
            这个是只关注最相似的节点，同时不使用和本图的差异，而是直接返回
        """
        lthp = len(feature_p[0])
        m = lthp//r + 1
        feature_p_blocks = []
        for i in range(m):
            feature_p_blocks.append(feature_p[:, i*r:(i+1)*r])
        temp = []
        for i in range(m):
            # 获得分块的attention,一个点和另一张图所有点的cos相似度
            block_attention = self.cosine_attention(
                feature_p_blocks[i], feature_h)  # 2*400*576
            # 开到20就已经相当大了
            # 这个softmax是不是不全。。。也就是说最后的权重之和不是1
            softmaxed_tensor = functional.softmax(
                block_attention*10, dim=2)  # 2*400*576
            a_h = torch.matmul(softmaxed_tensor, feature_h)
            temp.append(a_h)
        a_mean_h = temp[0]  # 2*400*100
        for i in range(1, len(temp)):  # 将相似矩阵合并起来
            a_mean_h = torch.cat((a_mean_h, temp[i]), dim=1)  # 2*563*100
        # a_mean_h=feature_p-a_mean_h
        torch.cuda.empty_cache()
        return a_mean_h

    def cross_interaction_GMN(self, feature_p, feature_h, r):
        """
        跨图结点
        聚合另一张图同类型结点的信息 + 加个attention，关注更加相似的跨图结点

        相似性矩阵计算，拆小块,设置最大的对比个数max_simMatrix_lth=100

        预期是同类型的结点进行跨图交互，使用结点本身的信息。node_self_value
        先用最简单的方法实现一边吧
        """
        lthp = len(feature_p[0])
        m = lthp//r + 1
        feature_p_blocks = []
        for i in range(m):
            feature_p_blocks.append(feature_p[:, i*r:(i+1)*r])
        temp = []
        for i in range(m):
            # 获得分块的attention,一个点和另一张图所有点的cos相似度
            block_attention = self.cosine_attention(
                feature_p_blocks[i], feature_h)  # 2*400*576
            #   两遍能增大重要节点的权重  --->  error，两边，会趋同
            # softmaxed_tensor = functional.softmax(block_attention, dim=2)  #2*400*576
            # softmaxed_tensor = functional.softmax(softmaxed_tensor, dim=2)  #2*400*576

            # 按照之前的思路，应该是整体放大block_attention的值，这样使用softmax便能增大权重
            softmaxed_tensor = functional.softmax(
                block_attention*10, dim=2)  # 2*400*576

            # m*dim  r*m   r*dim
            a_h = torch.matmul(softmaxed_tensor, feature_h)
            temp.append(a_h)
        a_mean_h = temp[0]  # 2*400*100
        for i in range(1, len(temp)):  # 将相似矩阵合并起来
            a_mean_h = torch.cat((a_mean_h, temp[i]), dim=1)  # 2*563*100
        a_mean_h = feature_p-a_mean_h
        torch.cuda.empty_cache()
        return a_mean_h

    def cross_interaction(self, feature_p, feature_h, r):
        """
            跨图交互的另一个实现方法，应该是MGMN的交互方式
        """
        lthp = len(feature_p[0])
        m = lthp//r + 1
        feature_p_blocks = []
        for i in range(m):
            feature_p_blocks.append(feature_p[:, i*r:(i+1)*r])
        temp = []
        for i in range(m):
            # 获得分块的attention,一个点和另一张图所有点的cos相似度
            block_attention = self.cosine_attention(
                feature_p_blocks[i], feature_h)  # 2*400*576
            # 获得另一个矩阵按权重处理后的特征值   感觉是b*100*x*dim
            a_h = feature_h.unsqueeze(
                1) * block_attention.unsqueeze(3)  # 2*400*576*100
            # 将其压缩成一个图的特征值
            att_mean_h = self.div_with_small_value(a_h.sum(dim=2), block_attention.sum(
                dim=2, keepdim=True))  # (batch, len_p, dim) #2*400*100
            temp.append(att_mean_h)
        a_mean_h = temp[0]  # 2*400*100
        for i in range(1, len(temp)):  # 将相似矩阵合并起来
            a_mean_h = torch.cat((a_mean_h, temp[i]), dim=1)  # 2*563*100
        return a_mean_h

    def local_graph_inf_agg(self, type_adj1_with_weight, g1_hidden_embed):
        """
            本图不同类型节点聚合
                同类邻居聚合----》类型聚合
            类型邻接矩阵 * 特征矩阵  3*b*N*N  *   b*N*d  =  3*b*N*e
                能得到一个节点不同类型邻居的聚合向量
            使用拼接的方式结合
        """
        tempValue = torch.matmul(type_adj1_with_weight, g1_hidden_embed)
        localValue = self.sig1(tempValue[0])
        staticValue = self.sig2(tempValue[1])
        dynamicValue = self.sig3(tempValue[2])
        """
            直接加有点呆,预期维度为b*n*e
            后续改成拼接的方式
        """
        local_graph_final_value = localValue+staticValue+dynamicValue
        local_graph_final_value = self.sig5(local_graph_final_value)
        # concatTensor=torch.cat([localValue,staticValue,dynamicValue],dim=2)
        # local_graph_final_value=torch.matmul(concatTensor,self.modifySize_3_embedding_to_embedding)
        return local_graph_final_value

    def get_cross_edges(self, att1, att2, vtype1, vtype2, r):
        """
            返回跨图间的边的邻接矩阵
            input:
                att:b*m*d att2:b*n*d
            output:
                ans:b*m*n   第(i,j)的值表示n中的第j个结点是否有边到m的第i个结点上
            有没有必要分块吧---->需要分块

        """
        vtype2 = vtype2.transpose(1, 2)
        lthp = len(att1[0])
        lthh = len(att2[0])
        m = lthp//r + 1
        feature_p_blocks = []
        type1_blocks = []
        for i in range(m):
            feature_p_blocks.append(att1[:, i*r:(i+1)*r])
            type1_blocks.append(vtype1[:, i*r:(i+1)*r])
        temp = []
        for i in range(m):
            # 0.353	0.556	0.470	0.919
            block_attention = self.cosine_attention(feature_p_blocks[i], att2)
            type_neighbor = torch.matmul(type1_blocks[i], vtype2)
            type_block_attention = torch.mul(block_attention, type_neighbor)
            softmaxed_tensor = functional.softmax(
                type_block_attention*10, dim=2)  # b*r*n
            temp.append(softmaxed_tensor)

            # 0.338	0.552	0.441	0.908
            # block_attention=self.cosine_attention(feature_p_blocks[i],att2)
            # softmaxed_tensor = functional.softmax(block_attention*10, dim=2)  #b*r*n
            # type_neighbor = torch.matmul(type1_blocks[i],vtype2)
            # type_block_attention = torch.mul(softmaxed_tensor,type_neighbor)
            # temp.append(type_block_attention)
        res = temp[0]  # 2*400*n
        for i in range(1, len(temp)):  # 将相似矩阵合并起来
            res = torch.cat((res, temp[i]), dim=1)  # 2*563*n

        # 指定要保留的最大元素的数量
        k = 100000
        k = min(k, lthh)

        # 对每一行调用 topk 函数
        top_values, top_indices = torch.topk(res, k, dim=2)
        # 创建一个零矩阵，与原始矩阵形状相同
        result_matrix = torch.zeros_like(res)
        # 使用 top_indices 来选择保留的值，将其填充到 result_matrix 中
        result_matrix.scatter_(2, top_indices, top_values)
        torch.cuda.empty_cache()
        return result_matrix

    def gene_graph_embedding(self, functionsEmbedding):
        """
            聚合生成图级向量,BNE，将第二维度的合并---》BE
        """
        if graph_embeddings_method == "mean":
            last_embed = torch.mean(functionsEmbedding, dim=1)
        elif graph_embeddings_method == "max":
            last_embed = torch.max(functionsEmbedding, dim=1)[0]
        elif graph_embeddings_method == "sum":
            last_embed = functionsEmbedding.sum(1)
        else:
            assert False, "图聚合方法未实现"
        return last_embed

    def gene_cross_embedding(self, g1_hidden_embed, g2_hidden_embed, r, cross_edge1):
        """
            生成跨图交互的向量
            size:
                g1:1,256,128
                g2:1,376,128
        """

        if cross_interaction_name == "GMN":
            cross_graph_embed = self.cross_interaction_GMN(
                g1_hidden_embed, g2_hidden_embed, r)
        elif cross_interaction_name == "MGMN":
            cross_graph_embed = self.cross_interaction(
                g1_hidden_embed, g2_hidden_embed, r)
        elif cross_interaction_name == "functionGMN":
            cross_graph_embed = self.cross_interaction_functionSimGMN(
                g1_hidden_embed, g2_hidden_embed, r)
        elif cross_interaction_name == "cross_edge":
            cross_graph_embed = self.cross_interaction_test(
                g1_hidden_embed, g2_hidden_embed, cross_edge1)
        cross_graph_embed = self.sig4(cross_graph_embed)
        return cross_graph_embed

    def forward(self, adj1, att1, vtype1, adj2, att2, vtype2):
        """
            adj 邻接矩阵 b*n*n 
            att 特征矩阵 b*n*d
            vtype 类型矩阵 b*n*3  3种类型
                1，365，3
                1，376，3
        """
        lth = len(adj1[0])
        r = 1000 if len(adj1[0]) < 4000 and len(adj2[0]) < 4000 else 200

        if use_heterogeous:
            type_adj1_with_weight = self.get_type_adj_weight(adj1, vtype1)
            type_adj2_with_weight = self.get_type_adj_weight(adj2, vtype2)

        if use_cross_interaction:
            cross_edge1 = self.get_cross_edges(att1, att2, vtype1, vtype2, r)
            cross_edge2 = self.get_cross_edges(att2, att1, vtype2, vtype1, r)
            # cross_edge1 = 0
            # cross_edge2 = 0
            # pass

        torch.cuda.empty_cache()
        batch, lth1, lth2 = len(adj1), len(adj1[0]), len(adj2[0])

        #   隐含的特征表示
        g1_hidden_embed = torch.zeros(batch, lth1, self.embSize, dtype=float32)
        g2_hidden_embed = torch.zeros(batch, lth2, self.embSize, dtype=float32)

        #   att扩展成隐含层的维度,att的维度为B*N*E
        g1_hidden_embed = torch.matmul(
            att1, self.modifySize_word_to_embedding1)
        g2_hidden_embed = torch.matmul(
            att2, self.modifySize_word_to_embedding2)

        if save_function_embedding:
            # use_cross_interaction
            with shelve.open(r"/home/cyw/projects/function_sim_project/all_data/functionEmbedding/x_layer_{}_cross_{}_hete_{}".format("0", use_cross_interaction, use_heterogeous)) as file:
                file["embedding"] = att1.cpu().detach().numpy()
                file.close()
            with shelve.open(r"/home/cyw/projects/function_sim_project/all_data/functionEmbedding/y_layer_{}_cross_{}_hete_{}".format("0", use_cross_interaction, use_heterogeous)) as file:
                file["embedding"] = att2.cpu().detach().numpy()
                file.close()

        # 保存以下函数的嵌入，跨图有效性的验证
        if save_function_embedding:
            # use_cross_interaction
            with shelve.open(r"/home/cyw/projects/function_sim_project/all_data/functionEmbedding/x_layer_{}_cross_{}_hete_{}".format("1", use_cross_interaction, use_heterogeous)) as file:
                file["embedding"] = g1_hidden_embed.cpu().detach().numpy()
                file.close()
            with shelve.open(r"/home/cyw/projects/function_sim_project/all_data/functionEmbedding/y_layer_{}_cross_{}_hete_{}".format("1", use_cross_interaction, use_heterogeous)) as file:
                file["embedding"] = g2_hidden_embed.cpu().detach().numpy()
                file.close()

        for i in range(self.layerSize):
            """
                结点本身
                    输入维度为b*n*e   输出维度为 b*n*e
                    用于乘的矩阵应该是e*e ，用matmul就好了,就能得到结点本身的信息
            """
            node_self_value1 = torch.matmul(
                g1_hidden_embed, self.modifySize_embedding_to_embedding)
            node_self_value2 = torch.matmul(
                g2_hidden_embed, self.modifySize_embedding_to_embedding)
            #  本图节点聚合
            if use_heterogeous:
                # 异质信息聚合
                local_hete_graph_final_value = self.local_graph_inf_agg(
                    type_adj1_with_weight, g1_hidden_embed)
                local_hete_graph_final_value2 = self.local_graph_inf_agg(
                    type_adj2_with_weight, g2_hidden_embed)
            else:
                # 同质信息聚合
                tempValue = torch.matmul(adj1, g1_hidden_embed)
                local_graph_final_value = self.sig1(
                    tempValue)  # b*n*e----输出应该也是BNE
                tempValue2 = torch.matmul(adj2, g2_hidden_embed)
                local_graph_final_value2 = self.sig1(tempValue2)

            #  跨图信息聚合
            if use_cross_interaction:
                cross_graph_embed = self.gene_cross_embedding(
                    g1_hidden_embed, g2_hidden_embed, r, cross_edge1)
                cross_graph_embed2 = self.gene_cross_embedding(
                    g2_hidden_embed, g1_hidden_embed, r, cross_edge2)

            # 所有结点信息聚合
            if use_cross_interaction and use_heterogeous:
                # 基础版+异质图+跨图交互
                g1_hidden_embed = torch.sigmoid(
                    node_self_value1+local_hete_graph_final_value+cross_graph_embed)
                g2_hidden_embed = torch.sigmoid(
                    node_self_value2+local_hete_graph_final_value2+cross_graph_embed2)
            elif use_heterogeous:
                # 基础版+异质图
                g1_hidden_embed = torch.sigmoid(
                    node_self_value1+local_hete_graph_final_value)
                g2_hidden_embed = torch.sigmoid(
                    node_self_value2+local_hete_graph_final_value2)
            elif use_cross_interaction:
                # 基础版+跨图交互
                g1_hidden_embed = torch.sigmoid(
                    node_self_value1+local_graph_final_value+cross_graph_embed)
                g2_hidden_embed = torch.sigmoid(
                    node_self_value2+local_graph_final_value2+cross_graph_embed2)
            else:
                # 基础版
                g1_hidden_embed = torch.sigmoid(
                    node_self_value1+local_graph_final_value)
                g2_hidden_embed = torch.sigmoid(
                    node_self_value2+local_graph_final_value2)

            # 保存每一层的嵌入值
            if save_function_embedding:
                with shelve.open(r"/home/cyw/projects/function_sim_project/all_data/functionEmbedding/x_layer_{}_cross_{}_hete_{}".format(i+2, use_cross_interaction, use_heterogeous)) as file:
                    file["embedding"] = g1_hidden_embed.cpu().detach().numpy()
                    file.close()
                with shelve.open(r"/home/cyw/projects/function_sim_project/all_data/functionEmbedding/y_layer_{}_cross_{}_hete_{}".format(i+2, use_cross_interaction, use_heterogeous)) as file:
                    file["embedding"] = g2_hidden_embed.cpu().detach().numpy()
                    file.close()

        # 生成图级嵌入向量
        last_embed = self.gene_graph_embedding(g1_hidden_embed)
        last_embed1 = self.gene_graph_embedding(g2_hidden_embed)

        res = torch.cosine_similarity(last_embed, last_embed1)  # 输出应该为B*1
        return res


if __name__ == "__main__":
    funcModel = functionSim(embSize=4, wordSize=8, layerSize=2, depth=2)
