'''
Created on July 1, 2020
PyTorch Implementation of KGIN
@author: Tinglin Huang (tinglin.huang@zju.edu.cn)
'''
__author__ = "huangtinglin"

import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_mean


class Aggregator(nn.Module):
    """
    Relational Path-aware Convolution Network
    """
    def __init__(self, n_users, n_factors):
        super(Aggregator, self).__init__()
        self.n_users = n_users
        self.n_factors = n_factors

    def forward(self, entity_emb, user_emb, latent_emb,
                edge_index, edge_type, interact_mat,
                weight, disen_weight_att):

        n_entities = entity_emb.shape[0]
        channel = entity_emb.shape[1]
        n_users = self.n_users
        n_factors = self.n_factors

        """KG aggregate"""
        head, tail = edge_index
        edge_relation_emb = weight[edge_type - 1]  # exclude interact, remap [1, n_relations) to [0, n_relations-1)
        neigh_relation_emb = entity_emb[tail] * edge_relation_emb  # [-1, channel]
        entity_agg = scatter_mean(src=neigh_relation_emb, index=head, dim_size=n_entities, dim=0)

        """cul user->latent factor attention"""
        # 计算不同意图对用户的重要性
        score_ = torch.mm(user_emb, latent_emb.t())
        score = nn.Softmax(dim=1)(score_).unsqueeze(-1)  # [n_users, n_factors, 1]

        """user aggregate"""
        user_agg = torch.sparse.mm(interact_mat, entity_emb)  # [n_users, channel]
        disen_weight = torch.mm(nn.Softmax(dim=-1)(disen_weight_att),
                                weight).expand(n_users, n_factors, channel)
        user_agg = user_agg * (disen_weight * score).sum(dim=1) + user_agg  # [n_users, channel]

        return entity_agg, user_agg


class GraphConv(nn.Module):
    """
    Graph Convolutional Network
    """
    def __init__(self, channel, n_hops, n_users,
                 n_factors, n_relations, interact_mat,
                 ind, node_dropout_rate=0.5, mess_dropout_rate=0.1):
        super(GraphConv, self).__init__()

        self.convs = nn.ModuleList()
        self.interact_mat = interact_mat
        self.n_relations = n_relations
        self.n_users = n_users
        self.n_factors = n_factors
        self.node_dropout_rate = node_dropout_rate
        self.mess_dropout_rate = mess_dropout_rate
        self.ind = ind

        self.temperature = 0.2

        initializer = nn.init.xavier_uniform_
        weight = initializer(torch.empty(n_relations - 1, channel))  # not include interact
        self.weight = nn.Parameter(weight)  # [n_relations - 1, in_channel]

        disen_weight_att = initializer(torch.empty(n_factors, n_relations - 1))
        self.disen_weight_att = nn.Parameter(disen_weight_att)

        for i in range(n_hops):
            self.convs.append(Aggregator(n_users=n_users, n_factors=n_factors))

        self.dropout = nn.Dropout(p=mess_dropout_rate)  # mess dropout

    def _edge_sampling(self, edge_index, edge_type, rate=0.5):
        # edge_index: [2, -1]
        # edge_type: [-1]
        n_edges = edge_index.shape[1]
        random_indices = np.random.choice(n_edges, size=int(n_edges * rate), replace=False)
        return edge_index[:, random_indices], edge_type[random_indices]

    def _sparse_dropout(self, x, rate=0.5):
        # 返回非零元素的数量
        noise_shape = x._nnz()

        random_tensor = rate
        random_tensor += torch.rand(noise_shape).to(x.device)
        dropout_mask = torch.floor(random_tensor).type(torch.bool)
        i = x._indices()
        v = x._values()

        i = i[:, dropout_mask]
        v = v[dropout_mask]

        out = torch.sparse.FloatTensor(i, v, x.shape).to(x.device)
        return out * (1. / (1 - rate))

    # def _cul_cor_pro(self):
    #     # disen_T: [num_factor, dimension]
    #     disen_T = self.disen_weight_att.t()
    #
    #     # normalized_disen_T: [num_factor, dimension]
    #     normalized_disen_T = disen_T / disen_T.norm(dim=1, keepdim=True)
    #
    #     pos_scores = torch.sum(normalized_disen_T * normalized_disen_T, dim=1)
    #     ttl_scores = torch.sum(torch.mm(disen_T, self.disen_weight_att), dim=1)
    #
    #     pos_scores = torch.exp(pos_scores / self.temperature)
    #     ttl_scores = torch.exp(ttl_scores / self.temperature)
    #
    #     mi_score = - torch.sum(torch.log(pos_scores / ttl_scores))
    #     return mi_score
    # 计算意图之间的相关性
    def _cul_cor(self):
        def CosineSimilarity(tensor_1, tensor_2):
            # tensor_1, tensor_2: [channel]
            normalized_tensor_1 = tensor_1 / tensor_1.norm(dim=0, keepdim=True)
            normalized_tensor_2 = tensor_2 / tensor_2.norm(dim=0, keepdim=True)
            return (normalized_tensor_1 * normalized_tensor_2).sum(dim=0) ** 2  # no negative
        def DistanceCorrelation(tensor_1, tensor_2):
            # tensor_1, tensor_2: [channel]
            # ref: https://en.wikipedia.org/wiki/Distance_correlation
            channel = tensor_1.shape[0]
            zeros = torch.zeros(channel, channel).to(tensor_1.device)
            zero = torch.zeros(1).to(tensor_1.device)
            tensor_1, tensor_2 = tensor_1.unsqueeze(-1), tensor_2.unsqueeze(-1)
            """cul distance matrix"""
            a_, b_ = torch.matmul(tensor_1, tensor_1.t()) * 2, \
                   torch.matmul(tensor_2, tensor_2.t()) * 2  # [channel, channel]
            tensor_1_square, tensor_2_square = tensor_1 ** 2, tensor_2 ** 2
            a, b = torch.sqrt(torch.max(tensor_1_square - a_ + tensor_1_square.t(), zeros) + 1e-8), \
                   torch.sqrt(torch.max(tensor_2_square - b_ + tensor_2_square.t(), zeros) + 1e-8)  # [channel, channel]
            """cul distance correlation"""
            A = a - a.mean(dim=0, keepdim=True) - a.mean(dim=1, keepdim=True) + a.mean()
            B = b - b.mean(dim=0, keepdim=True) - b.mean(dim=1, keepdim=True) + b.mean()
            dcov_AB = torch.sqrt(torch.max((A * B).sum() / channel ** 2, zero) + 1e-8)
            dcov_AA = torch.sqrt(torch.max((A * A).sum() / channel ** 2, zero) + 1e-8)
            dcov_BB = torch.sqrt(torch.max((B * B).sum() / channel ** 2, zero) + 1e-8)
            return dcov_AB / torch.sqrt(dcov_AA * dcov_BB + 1e-8)
        def MutualInformation():
            # disen_T: [num_factor, dimension]
            disen_T = self.disen_weight_att.t()

            # normalized_disen_T: [num_factor, dimension]
            normalized_disen_T = disen_T / disen_T.norm(dim=1, keepdim=True)

            pos_scores = torch.sum(normalized_disen_T * normalized_disen_T, dim=1)
            ttl_scores = torch.sum(torch.mm(disen_T, self.disen_weight_att), dim=1)

            pos_scores = torch.exp(pos_scores / self.temperature)
            ttl_scores = torch.exp(ttl_scores / self.temperature)

            mi_score = - torch.sum(torch.log(pos_scores / ttl_scores))
            return mi_score

        """cul similarity for each latent factor weight pairs"""
        if self.ind == 'mi':
            return MutualInformation()
        else:
            cor = 0
            for i in range(self.n_factors):
                for j in range(i + 1, self.n_factors):
                    if self.ind == 'distance':
                        # 这里求的不同意图对关系的程度向量之间的距离相关性，目的让不同意图对关系的关注模式尽可能不同
                        cor += DistanceCorrelation(self.disen_weight_att[i], self.disen_weight_att[j])
                    else:
                        cor += CosineSimilarity(self.disen_weight_att[i], self.disen_weight_att[j])
        return cor

    def forward(self, user_emb, entity_emb, latent_emb, edge_index, edge_type,
                interact_mat, mess_dropout=True, node_dropout=False):
        # 防止过拟合、增强模型的鲁棒性
        """node dropout"""
        if node_dropout:
            edge_index, edge_type = self._edge_sampling(edge_index, edge_type, self.node_dropout_rate)
            interact_mat = self._sparse_dropout(interact_mat, self.node_dropout_rate)

        entity_res_emb = entity_emb  # [n_entity, channel]
        user_res_emb = user_emb  # [n_users, channel]
        # 不同意图之间的相关性总和
        cor = self._cul_cor()
        for i in range(len(self.convs)):
            entity_emb, user_emb = self.convs[i](entity_emb, user_emb, latent_emb,
                                                 edge_index, edge_type, interact_mat,
                                                 self.weight, self.disen_weight_att)
            # message dropout以dropout_rate概率将某些节点的嵌入置为0
            """message dropout"""
            if mess_dropout:
                entity_emb = self.dropout(entity_emb)
                user_emb = self.dropout(user_emb)
            entity_emb = F.normalize(entity_emb)
            user_emb = F.normalize(user_emb)

            """result emb"""
            entity_res_emb = torch.add(entity_res_emb, entity_emb)
            user_res_emb = torch.add(user_res_emb, user_emb)

        return entity_res_emb, user_res_emb, cor


class Recommender(nn.Module):
    def __init__(self, data_config, args_config, graph, adj_mat, ckg_adj, cf_adj, kg_graph, cf_coo):
        super(Recommender, self).__init__()

        self.n_users = data_config['n_users']
        self.n_items = data_config['n_items']
        self.n_relations = data_config['n_relations']
        self.n_entities = data_config['n_entities']  # include items
        self.n_nodes = data_config['n_nodes']  # n_users + n_entities

        self.decay = args_config.l2
        self.sim_decay = args_config.sim_regularity
        self.emb_size = args_config.dim
        self.context_hops = args_config.context_hops
        self.n_factors = args_config.n_factors
        self.node_dropout = args_config.node_dropout
        self.node_dropout_rate = args_config.node_dropout_rate
        self.mess_dropout = args_config.mess_dropout
        self.mess_dropout_rate = args_config.mess_dropout_rate
        self.ind = args_config.ind
        self.device = torch.device("cuda:" + str(args_config.gpu_id)) if args_config.cuda \
                                                                      else torch.device("cpu")
        self.beta_u = args_config.user_loss
        self.beta_i = args_config.item_loss
        self.temperature = 0.6  # Temperature parameter for contrastive loss

        # self.sim_matrix = self.coo_matrix_to_torch_sparse(final_sim_matrix)
        # self.degree_matrix = torch.from_numpy(ckg_degree).to(self.device)
        # self.sim_matrix, self.degree_matrix = self._create_sim_and_degree_matrices(cf_adj, ckg_adj)

        # Construct CKG (Collaborative Knowledge Graph)
        self.CKG = self._convert_sp_mat_to_sp_tensor(ckg_adj)

        # Degree matrix for normalization
        self.degree_matrix = self.get_degree_matrix(cf_adj, kg_graph)

        # Similarity matrix for contrastive loss
        self.sim_matrix = self._create_sim_matrices(cf_coo)

        self.adj_mat = adj_mat
        self.graph = graph
        self.edge_index, self.edge_type = self._get_edges(graph)

        self._init_weight()
        self.all_embed = nn.Parameter(self.all_embed)
        # 直接定义意图嵌入，并非论文中通过公式进行计算关系r的线性组合进行计算 (n_factors, emb_size)
        self.latent_emb = nn.Parameter(self.latent_emb)

        self.gcn = self._init_model()

    def get_degree_matrix(self, cf_adj, kg_graph):
        """
        Compute the degree matrix for normalization.
        The degree matrix records the number of neighbors for each node in the graph.
        Returns:
            A tensor containing the degree of each node, normalized by 1/sqrt(degree).
        """
        # Step 1: Compute degrees for user-item interactions
        # Convert interaction rows and columns to tensors
        inter_user = torch.tensor(cf_adj[:, 0])  # User indices in interactions
        inter_item = torch.tensor(cf_adj[:, 1])  # Item indices in interactions
        data = torch.tensor([1] * len(inter_item))  # All interactions have a value of 1

        # Create a sparse tensor for user-item interactions
        inter_sparse = torch.sparse_coo_tensor(
            torch.stack([inter_user, inter_item]),  # Indices for sparse tensor
            data,  # Values for sparse tensor
            size=torch.Size([self.n_users, self.n_items])  # Size of the sparse tensor
        )

        # Compute user degrees (sum of interactions per user)
        user_degree = torch.sparse.sum(inter_sparse, dim=1).to_dense()  # Sum over rows
        # Compute item degrees (sum of interactions per item)
        item_degree_inter = torch.sparse.sum(inter_sparse, dim=0).to_dense()  # Sum over columns

        # Step 2: Compute degrees for knowledge graph (KG) entities
        kg_head = kg_graph[:, 0]  # Head entities in KG
        kg_tail = kg_graph[:, -1]  # Tail entities in KG

        # Create bidirectional edges for KG
        kg_head_bidirectional = np.concatenate((kg_head, kg_tail))  # Combine head and tail
        kg_tail_bidirectional = np.concatenate((kg_tail, kg_head))  # Combine tail and head

        # Remove duplicate edges
        edges = list(zip(kg_head_bidirectional, kg_tail_bidirectional))  # Pair head and tail
        edges = list(set(edges))  # Remove duplicates

        # Convert edges to tensors
        head_indices = torch.tensor([x[0] for x in edges])  # Head indices of edges
        tail_indices = torch.tensor([x[1] for x in edges])  # Tail indices of edges
        edge_data = torch.tensor([1] * len(head_indices))  # All edges have a value of 1

        # Create a sparse tensor for KG edges
        kg_sparse = torch.sparse_coo_tensor(
            torch.stack([head_indices, tail_indices]),  # Indices for sparse tensor
            edge_data  # Values for sparse tensor
        )

        # Compute entity degrees (sum of edges per entity)
        entity_degree = torch.sparse.sum(kg_sparse, dim=1).to_dense()  # Sum over rows

        # Step 3: Combine degrees for users, items, and entities
        # Item degrees from KG (only for items present in the KG)
        item_degree_kg = entity_degree[:self.n_items]
        # Total item degrees (sum of interaction degrees and KG degrees)
        item_degree_total = item_degree_inter + item_degree_kg

        # Combine user degrees, item degrees, and remaining entity degrees
        degree_matrix = torch.cat((user_degree, item_degree_total))  # Users and items
        degree_matrix = torch.cat((degree_matrix, entity_degree[self.n_items:]))  # Remaining entities

        # Step 4: Normalize degrees
        degree_matrix = 1 / torch.sqrt(degree_matrix)  # Normalize by 1/sqrt(degree)
        # Handle infinite values (for nodes with zero degree)
        degree_matrix = torch.where(torch.isinf(degree_matrix), torch.tensor(0.0), degree_matrix)

        # Return the degree matrix, moved to the appropriate device (e.g., GPU)
        return degree_matrix.to(self.device)

    def get_Similarity_matrix(self):
        """
        Compute the similarity matrix for contrastive learning.
        The similarity matrix captures relationships between users and items in the CKG (Collaborative Knowledge Graph).

        Returns:
            A sparse tensor representing the similarity matrix.
        """
        # Step 1: Extract indices from the CKG (Collaborative Knowledge Graph) 去重（保证索引唯一）+排序
        ckg_indices = self.CKG.coalesce().indices()

        # Step 2: Filter indices to include only users and items (ignore entities)
        user_item_mask = (ckg_indices[0] < self.n_users + self.n_items + 1) & (
                    ckg_indices[1] < self.n_users + self.n_items + 1)
        filtered_indices = ckg_indices[:, user_item_mask]

        # Step 3: Create a sparse tensor for user-item interactions
        interaction_values = torch.ones(filtered_indices.shape[1], device=self.device)
        if filtered_indices.device != self.device:
            filtered_indices = filtered_indices.to(self.device)
        if interaction_values.device != self.device:
            interaction_values = interaction_values.to(self.device)
        interaction_matrix = torch.sparse_coo_tensor(filtered_indices, interaction_values, dtype=torch.float16)

        # Step 4: Compute the similarity matrix by multiplying the interaction matrix with itself
        similarity_matrix = torch.sparse.mm(interaction_matrix, interaction_matrix)

        # Step 5: Add the original interaction matrix to capture direct relationships
        similarity_matrix = similarity_matrix + interaction_matrix
        similarity_matrix = similarity_matrix.coalesce()  # Remove duplicates

        # Step 6: Extract indices and values from the similarity matrix
        similarity_indices = similarity_matrix.indices()
        similarity_values = similarity_matrix.values()

        # Step 7: Normalize the similarity values using node degrees
        degree_product = self.Degree[similarity_indices][0] * self.Degree[similarity_indices][1]
        normalized_values = -similarity_values * degree_product

        # Step 8: Construct the final similarity matrix
        final_similarity_matrix = torch.sparse_coo_tensor(
            similarity_indices,
            normalized_values,
            size=[self.n_users + self.n_items + 1, self.n_users + self.n_items + 1]
        )

        return final_similarity_matrix
    def _create_sim_matrices(self, cf_adj):
        # print("Creating similarity and degree matrices on GPU...")
        device = self.device
        n_nodes = self.n_nodes
        #
        # # 1. 计算度矩阵 (D^-0.5)
        # # 将 ckg_adj 转为 torch sparse tensor
        # ckg_adj_tensor = self._convert_sp_mat_to_sp_tensor(ckg_adj)
        #
        # # 在 GPU 上计算行和 (degree)
        # rowsum = torch.sparse.sum(ckg_adj_tensor, dim=1).to_dense().flatten()
        # d_inv_sqrt = torch.pow(rowsum, -0.5)
        # d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
        # degree_matrix = d_inv_sqrt

        # 2. 计算相似度矩阵
        # 将 cf_adj 转为 torch sparse tensor
        A = self._convert_sp_mat_to_sp_tensor(cf_adj)

        # 在 GPU 上执行稀疏矩阵乘法 (A @ A)
        print("Performing sparse matrix multiplication on GPU (A @ A)...")
        A_squared = torch.sparse.mm(A, A)

        # 计算 A^2 + A
        similarity_matrix = (A_squared + A).coalesce()

        # 在 GPU 上执行归一化
        rows, cols = similarity_matrix.indices()
        vals = similarity_matrix.values()

        degree_product = self.degree_matrix[rows] * self.degree_matrix[cols]
        normalize_vals = -vals * degree_product

        # 创建最终的 torch 稀疏张量
        final_sim_matrix = torch.sparse_coo_tensor(
            torch.stack([rows, cols]), normalize_vals, (n_nodes, n_nodes), device=self.device
        )
        print("Similarity and degree matrices calculation finished.")

        return final_sim_matrix
    def _convert_sp_mat_to_sp_tensor(self, X):
        coo = X.tocoo()
        i = torch.LongTensor(np.vstack((coo.row, coo.col)))
        v = torch.FloatTensor(coo.data)
        return torch.sparse_coo_tensor(i, v, coo.shape, device=self.device)

    def coo_matrix_to_torch_sparse(self, coo_mat):
        """
        Convert a scipy.sparse.coo_matrix to a torch.sparse_coo_tensor on the specified device.

        Args:
            coo_mat (coo_matrix): Input sparse matrix in COO format.
            device (str or torch.device): Target device (e.g., 'cpu' or 'cuda:0').

        Returns:
            torch.sparse.Tensor: Sparse tensor on the target device.
        """
        # Step 1: Extract COO indices and data
        indices = torch.LongTensor(np.vstack([coo_mat.row, coo_mat.col]))
        values = torch.FloatTensor(coo_mat.data)
        shape = coo_mat.shape
        # Step 2: Create sparse tensor
        sparse_tensor = torch.sparse_coo_tensor(indices, values, shape, device=self.device)

        return sparse_tensor.to(self.device)

    def _init_weight(self):
        initializer = nn.init.xavier_uniform_
        self.all_embed = initializer(torch.empty(self.n_nodes, self.emb_size))
        self.latent_emb = initializer(torch.empty(self.n_factors, self.emb_size))

        # [n_users, n_entities]
        self.interact_mat = self._convert_sp_mat_to_sp_tensor(self.adj_mat)

    def _init_model(self):
        return GraphConv(channel=self.emb_size,
                         n_hops=self.context_hops,
                         n_users=self.n_users,
                         n_relations=self.n_relations,
                         n_factors=self.n_factors,
                         interact_mat=self.interact_mat,
                         ind=self.ind,
                         node_dropout_rate=self.node_dropout_rate,
                         mess_dropout_rate=self.mess_dropout_rate)

    # def _convert_sp_mat_to_sp_tensor(self, X):
    #     coo = X.tocoo()
    #     i = torch.LongTensor([coo.row, coo.col])
    #     v = torch.from_numpy(coo.data).float()
    #     return torch.sparse.FloatTensor(i, v, coo.shape)

    def _get_indices(self, X):
        coo = X.tocoo()
        return torch.LongTensor([coo.row, coo.col]).t()  # [-1, 2]

    def _get_edges(self, graph):
        graph_tensor = torch.tensor(list(graph.edges))  # [-1, 3]
        index = graph_tensor[:, :-1]  # [-1, 2]
        type = graph_tensor[:, -1]  # [-1, 1]
        return index.t().long().to(self.device), type.long().to(self.device)

    def forward(self, batch=None):
        user = batch['users']
        pos_item = batch['pos_items']
        neg_item = batch['neg_items']

        user_emb = self.all_embed[:self.n_users, :]
        item_emb = self.all_embed[self.n_users:, :]
        user_emb_rec, pos_item_emb_rec, neg_item_emb_rec = user_emb[user], item_emb[pos_item], item_emb[neg_item]
        # entity_gcn_emb: [n_entity, channel]
        # user_gcn_emb: [n_users, channel]
        entity_gcn_emb, user_gcn_emb, cor = self.gcn(user_emb,
                                                     item_emb,
                                                     self.latent_emb,
                                                     self.edge_index,
                                                     self.edge_type,
                                                     self.interact_mat,
                                                     mess_dropout=self.mess_dropout,
                                                     node_dropout=self.node_dropout)
        u_e = user_gcn_emb[user]
        pos_e, neg_e = entity_gcn_emb[pos_item], entity_gcn_emb[neg_item]
        rec_loss, mf_loss, emb_loss, cor = self.create_bpr_loss(u_e, pos_e, neg_e, cor)
        cl_loss = self.create_cl_loss(user, user_emb_rec, pos_item, pos_item_emb_rec, neg_item, neg_item_emb_rec)
        return rec_loss + cl_loss, mf_loss, emb_loss, cor, cl_loss
        # return self.create_bpr_loss(u_e, pos_e, neg_e, cor)

    def get_item_similarity(self, pos_item, neg_item):
        """
        Compute the similarity matrix between positive and negative items.
        Args:
            pos_item (torch.Tensor): Indices of the positive items.
            neg_item (torch.Tensor): Indices of the negative items.
        Returns:
            torch.Tensor: A dense similarity matrix between positive and negative items.
        """
        sim = self.sim_matrix.index_select(0, index=pos_item)
        sim = sim.index_select(1, index=neg_item).to_dense()
        sim = 1 + sim
        return sim

    def get_item_loss(self, pos_item, pos_e, neg_item, neg_e):
        """
        Compute the contrastive loss for items based on their embeddings and similarities.
        Args:
            pos_item (torch.Tensor): Indices of the positive items.
            pos_embedding (torch.Tensor): Embeddings of the positive items.
            neg_item (torch.Tensor): Indices of the negative items.
            neg_embedding (torch.Tensor): Embeddings of the negative items.
        Returns:
            torch.Tensor: The computed item contrastive loss.
        """
        sim = self.get_item_similarity(pos_item, neg_item)
        normal_pos_embedding = F.normalize(pos_e)
        normal_neg_embedding = F.normalize(neg_e)
        pos_degree = self.degree_matrix[pos_item]
        neg_degree = self.degree_matrix[neg_item]
        matrix = pos_degree.view(-1, 1) @ neg_degree.view(1, -1)
        matrix = 1 - matrix
        loss = torch.mean(matrix * torch.exp((normal_pos_embedding @ normal_neg_embedding.T) * sim / self.temperature))
        return loss

    # 计算对比损失
    def create_cl_loss(self, users, user_emb_rec, pos_item, pos_item_emb_rec, neg_item, neg_item_emb_rec):
        # Compute user contrastive loss
        user_cl_loss = self.get_user_loss(users, user_emb_rec)
        # Compute item contrastive loss
        item_cl_loss = self.get_item_loss(
            pos_item + self.n_users, pos_item_emb_rec,
            neg_item + self.n_users, neg_item_emb_rec
        )
        cl_loss = self.beta_u * user_cl_loss + self.beta_i * item_cl_loss
        return cl_loss

    def get_user_loss(self, users, user_emb):
        """
        Compute the contrastive loss for users based on their embeddings and similarities.
        Args:
            users (torch.Tensor): Indices of the user nodes.
            embedding (torch.Tensor): Embeddings of the user nodes.
        Returns:
            torch.Tensor: The computed user contrastive loss.
        """
        sim = self.get_user_similarity(users)
        # 沿着指定维度进行L2归一化
        normal_embedding = F.normalize(user_emb)
        degree = self.degree_matrix[users]
        matrix = degree.view(-1, 1) @ degree.view(1, -1)
        matrix = 1 - matrix
        loss = torch.mean(matrix * torch.exp((normal_embedding @ normal_embedding.T) * sim / self.temperature))

        return loss
    def get_user_similarity(self, users):
        """
        Compute the similarity matrix for a given set of user nodes.
        Args:
            users (torch.Tensor): Indices of the user nodes.
        Returns:
            torch.Tensor: A dense similarity matrix for the given user nodes.
        """
        # 取出指定的行
        sim = self.sim_matrix.index_select(0, index=users)
        sim = sim.index_select(1, index=users).to_dense()
        # t = torch.sigmoid(t)*2
        sim = 1 + sim
        return sim

    def generate(self):
        user_emb = self.all_embed[:self.n_users, :]
        item_emb = self.all_embed[self.n_users:, :]
        return self.gcn(user_emb,
                        item_emb,
                        self.latent_emb,
                        self.edge_index,
                        self.edge_type,
                        self.interact_mat,
                        mess_dropout=False, node_dropout=False)[:-1]

    def rating(self, u_g_embeddings, i_g_embeddings):
        return torch.matmul(u_g_embeddings, i_g_embeddings.t())

    def create_bpr_loss(self, users, pos_items, neg_items, cor):
        batch_size = users.shape[0]
        pos_scores = torch.sum(torch.mul(users, pos_items), axis=1)
        neg_scores = torch.sum(torch.mul(users, neg_items), axis=1)
        # 使用mean而非sum,好处：保证了数值稳定，防止数值爆炸，
        mf_loss = -1 * torch.mean(nn.LogSigmoid()(pos_scores - neg_scores))
        # cul regularizer
        regularizer = (torch.norm(users) ** 2
                       + torch.norm(pos_items) ** 2
                       + torch.norm(neg_items) ** 2) / 2
        emb_loss = self.decay * regularizer / batch_size
        cor_loss = self.sim_decay * cor

        return mf_loss + emb_loss + cor_loss, mf_loss, emb_loss, cor
