from parser1 import args
import numpy as np
import pickle
import scipy.sparse as sp
from scipy.sparse import csr_matrix
import torch
import torch.nn as nn
import torch.nn.functional as F

def neghyper_graph_create(tensor):
    print(torch.topk(tensor, max(args.hyper_num,int(args.hyper_rate * tensor.size(1))), dim=1, largest=True).values.shape,
          torch.topk(tensor, int(args.hyper_rate * tensor.size(0)), dim=0, largest=True).values.shape, tensor.shape)
    # 计算每行和每列的阈值
    row_threshold = torch.topk(tensor, max(args.hyper_num,int(args.hyper_rate * tensor.size(1))), dim=1, largest=False).values[:, -1].unsqueeze(1)
    col_threshold = torch.topk(tensor, int(args.hyper_rate * tensor.size(0)), dim=0, largest=False).values[-1, :].unsqueeze(0)
    # 将大于等于阈值的位置设置为1，其余位置设置为0
    sparse_tensor = torch.where(tensor < row_threshold, torch.tensor(1.).cuda(), torch.tensor(0.).cuda())
    sparse_tensor = torch.where(tensor < col_threshold, torch.tensor(1.).cuda(), sparse_tensor)

    return sparse_tensor

def hyper_graph_create(tensor):
    print(torch.topk(tensor, max(args.hyper_num,int(args.hyper_rate * tensor.size(1))), dim=1, largest=True).values.shape,
          torch.topk(tensor, int(args.hyper_rate * tensor.size(0)), dim=0, largest=True).values.shape, tensor.shape)
    # 计算每行和每列的阈值
    row_threshold = torch.topk(tensor, max(args.hyper_num,int(args.hyper_rate * tensor.size(1))), dim=1, largest=True).values[:, -1].unsqueeze(1)
    col_threshold = torch.topk(tensor, int(args.hyper_rate * tensor.size(0)), dim=0, largest=True).values[-1, :].unsqueeze(0)
    # 将大于等于阈值的位置设置为1，其余位置设置为0
    sparse_tensor = torch.where(tensor > row_threshold, torch.tensor(1.).cuda(), torch.tensor(0.).cuda())
    sparse_tensor = torch.where(tensor > col_threshold, torch.tensor(1.).cuda(), sparse_tensor)

    return sparse_tensor

def hyper_normal_torch(Hyper_graph):
    # Row Normalization
    rowsum = Hyper_graph.sum(dim=1)
    rowsum = torch.pow(rowsum + 1e-8, -0.5).flatten()
    rowsum[rowsum == float('inf')] = 0.
    rowsum_diag = torch.diag(rowsum)
    # Column Normalization
    colsum = Hyper_graph.sum(dim=0)
    colsum = torch.pow(colsum + 1e-8, -0.5).flatten()
    colsum[colsum == float('inf')] = 0.
    colsum_diag = torch.diag(colsum)

    # Normalization Operation
    normalized_hypergraph = rowsum_diag @ Hyper_graph @ colsum_diag @ colsum_diag @ Hyper_graph.t() @ rowsum_diag
    return normalized_hypergraph

def matrix_to_tensor(cur_matrix):
    if type(cur_matrix) != sp.coo_matrix:
        cur_matrix = cur_matrix.tocoo()  #
    indices = torch.from_numpy(np.vstack((cur_matrix.row, cur_matrix.col)).astype(np.int64))  #
    values = torch.from_numpy(cur_matrix.data)  #
    shape = torch.Size(cur_matrix.shape)

    return torch.sparse.FloatTensor(indices, values, shape).to(torch.float32).cuda()  #

def graph_drop(graph,keepRate):
    vals = graph._values()
    idxs = graph._indices()
    edgeNum = vals.size()
    mask = ((torch.rand(edgeNum) + keepRate).floor()).type(torch.bool)
    newVals = vals[mask] / keepRate
    newIdxs = idxs[:, mask]
    return torch.sparse.FloatTensor(newIdxs, newVals, graph.shape)


class Trans_Hyper(nn.Module):
    def __init__(self):
        super(Trans_Hyper,self).__init__()
        self.n_layers = args.n_layers
        self.h_layers = args.hyper_graph_layers
        self.emb_size = args.emb_size
        self.ui_graph = pickle.load(open(args.data_path + args.dataset + '/train_mat', 'rb'))
        self.image_feats = torch.from_numpy(
            np.load(args.data_path + '{}/image_feat.npy'.format(args.dataset))).cuda().float()
        self.text_feats = torch.from_numpy(
            np.load(args.data_path + '{}/text_feat.npy'.format(args.dataset))).cuda().float()
        self.n_user = self.ui_graph.shape[0]
        self.n_item = self.ui_graph.shape[1]
        #print("graph:",self.n_item)
        self.user_emb = nn.Embedding(self.n_user,self.emb_size)
        self.item_emb = nn.Embedding(self.n_item,self.emb_size)
        nn.init.xavier_uniform_(self.item_emb.weight)
        nn.init.xavier_uniform_(self.user_emb.weight)
        A = sp.dok_matrix((self.n_user+self.n_item,self.n_user+self.n_item),dtype=np.float32)
        A = A.tolil()
        R = self.ui_graph.todok()
        A[:self.n_user,self.n_user:] = R
        A[self.n_user:,:self.n_user] = R.T
        sumArr = (A>0).sum(axis=1)
        diag = np.array(sumArr.flatten())[0]+1e-7
        diag = np.power(diag,-0.5)
        D = sp.diags(diag)
        L = D*A*D
        self.L = sp.coo_matrix(L)
        #hyper_graph = torch.cat([self.image_feats, self.text_feats], dim=1)
        # print(super_graph[0:5])
        txt_hyper_graph = hyper_graph_create(self.text_feats)
        img_hyper_graph = hyper_graph_create(self.image_feats)
        #negtxt_hyper_graph = neghyper_graph_create(self.text_feats)
        #negimg_hyper_graph = neghyper_graph_create(self.image_feats)
        hyper_graph = torch.cat([txt_hyper_graph,img_hyper_graph],dim=1)
        #neg_hyper_graph = torch.cat([negtxt_hyper_graph,negimg_hyper_graph],dim=1)
        self.hyper_graph = hyper_normal_torch(hyper_graph)
        #self.neg_hyper_graph = hyper_normal_torch(neg_hyper_graph)

    def sim(self, z1, z2):
        z1 = F.normalize(z1)
        z2 = F.normalize(z2)
        # z1 = z1/((z1**2).sum(-1) + 1e-8)
        # z2 = z2/((z2**2).sum(-1) + 1e-8)
        return torch.mm(z1, z2.t())
    def batched_contrastive_loss(self, z1, z2, batch_size=1024):

        device = z1.device
        num_nodes = z1.size(0)
        num_batches = (num_nodes - 1) // batch_size + 1
        f = lambda x: torch.exp(x / 0.5)  #

        indices = torch.arange(0, num_nodes).to(device)
        losses = []

        for i in range(num_batches):
            tmp_i = indices[i * batch_size:(i + 1) * batch_size]

            tmp_refl_sim_list = []
            tmp_between_sim_list = []
            for j in range(num_batches):
                tmp_j = indices[j * batch_size:(j + 1) * batch_size]
                tmp_refl_sim = f(self.sim(z1[tmp_i], z1[tmp_j]))
                tmp_between_sim = f(self.sim(z1[tmp_i], z2[tmp_j]))

                tmp_refl_sim_list.append(tmp_refl_sim)
                tmp_between_sim_list.append(tmp_between_sim)

            refl_sim = torch.cat(tmp_refl_sim_list, dim=-1)
            between_sim = torch.cat(tmp_between_sim_list, dim=-1)

            losses.append(-torch.log(between_sim[:, i * batch_size:(i + 1) * batch_size].diag() / (
                        refl_sim.sum(1) + between_sim.sum(1) - refl_sim[:,
                                                               i * batch_size:(i + 1) * batch_size].diag()) + 1e-8))

            del refl_sim, between_sim, tmp_refl_sim_list, tmp_between_sim_list

        loss_vec = torch.cat(losses)
        return loss_vec.mean()

    def forward(self,is_val=True):
        all_emb = torch.cat([self.user_emb.weight,self.item_emb.weight])
        emb_lsit = [all_emb]
        for layer in range(self.n_layers):
            all_emb = torch.sparse.mm(graph_drop(matrix_to_tensor(self.L),args.keep_rate),all_emb)
            emb_lsit.append(all_emb)
        all_emb = torch.mean(torch.stack(emb_lsit,dim=1),dim=1)
        user_all_embeddings, item_all_embeddings = torch.split(all_emb, [self.n_user, self.n_item])
        item_emb = self.item_emb.weight
        for layer in range(self.h_layers):
            pos_item_emb = torch.mm(self.hyper_graph,item_emb)
            #neg_item_emb = torch.mm(self.neg_hyper_graph,item_emb)
        return user_all_embeddings,item_all_embeddings,pos_item_emb