import torch
from parser1 import args
import scipy.sparse as sp
import numpy as np


def create_item_graph(items):
    # 计算每个item的范数
    norms = torch.norm(items, dim=1, keepdim=True)
    # 标准化items
    normalized_items = items / norms
    # 使用矩阵乘法计算余弦相似度
    tensor = torch.mm(normalized_items, normalized_items.t())
    # 计算每行和每列的阈值
    row_threshold = torch.topk(tensor, 11, dim=1, largest=True).values[:, -1].unsqueeze(1)
    # 将大于等于阈值的位置设置为1，其余位置设置为0
    A = torch.where(tensor > row_threshold, torch.tensor(1.).cuda(), torch.tensor(0.).cuda())

    rowsum = A.sum(dim=1)
    rowsum = torch.pow(rowsum + 1e-8, -0.5).flatten()
    rowsum[rowsum == float('inf')] = 0.
    rowsum_diag = torch.diag(rowsum)

    return rowsum_diag @ A @ rowsum_diag

def calculate_pre(indices,shape):
    adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), shape).cuda()
    row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense()
    col_sum = 1e-7 + torch.sparse.sum(adj.t(), -1).to_dense()
    r_inv_sqrt = torch.pow(row_sum, -0.5)
    rows_inv_sqrt = r_inv_sqrt[indices[0]]
    c_inv_sqrt = torch.pow(col_sum, -0.5)
    cols_inv_sqrt = c_inv_sqrt[indices[1]]
    values = rows_inv_sqrt * cols_inv_sqrt
    return values

def hyper_normal_torch(Hyper_graph):
    # Row Normalization
    rowsum = Hyper_graph.sum(dim=1)
    rowsum = torch.pow(rowsum + 1e-8, -0.5).flatten()
    rowsum_diag = torch.diag(rowsum)
    # Column Normalization
    colsum = Hyper_graph.sum(dim=0)
    colsum = torch.pow(colsum + 1e-8, -0.5).flatten()
    colsum_diag = torch.diag(colsum)

    # Normalization Operation
    normalized_hypergraph = rowsum_diag @ Hyper_graph @ colsum_diag @ colsum_diag @ Hyper_graph.t() @ rowsum_diag
    return normalized_hypergraph

def hyper_graph_create(tensor):
    norm = torch.norm(tensor, p=2, dim=0, keepdim=True)
    # 归一化每列
    tensor = tensor / norm
    # 计算每行和每列的阈值
    # 计算每一列的方差
    variances = torch.var(tensor, dim=0)
    # 根据方差选择方差最大的 l 个特征的索引
    top_l_indices = torch.argsort(variances, descending=True)[:128]
    # 提取出方差最大的 l 个特征
    selected_features = tensor[:, top_l_indices]

    #row_threshold = torch.topk(tensor, max(args.hyper_num,int(args.hyper_rate * tensor.size(1))), dim=1, largest=True).values[:, -1].unsqueeze(1)
    col_threshold = torch.topk(selected_features, args.hyper_num, dim=0, largest=True).values[-1, :].unsqueeze(0)
    # 将大于等于阈值的位置设置为1，其余位置设置为0
    #sparse_tensor = torch.where(tensor > row_threshold, torch.tensor(1.).cuda(), torch.tensor(0.).cuda())
    sparse_tensor = torch.where(selected_features > col_threshold, torch.tensor(1.).cuda(), torch.tensor(0.).cuda())

    return sparse_tensor

def matrix_to_tensor(cur_matrix):
    if type(cur_matrix) != sp.coo_matrix:
        cur_matrix = cur_matrix.tocoo()  #
    indices = torch.from_numpy(np.vstack((cur_matrix.row, cur_matrix.col)).astype(np.int64))  #
    values = torch.from_numpy(cur_matrix.data)  #
    shape = torch.Size(cur_matrix.shape)

    return torch.sparse.FloatTensor(indices, values, shape).to(torch.float32).cuda()  #

def graph_drop(graph,keepRate):
    vals = graph._values()
    idxs = graph._indices()
    edgeNum = vals.size()
    mask = ((torch.rand(edgeNum) + keepRate).floor()).type(torch.bool)
    newVals = vals[mask] / keepRate
    newIdxs = idxs[:, mask]
    return torch.sparse.FloatTensor(newIdxs, newVals, graph.shape)

