from parser1 import args
import numpy as np
import pickle
import scipy.sparse as sp
from scipy.sparse import csr_matrix
import torch
import torch.nn as nn
import torch.nn.functional as F
from functions import *

class Sp_Hyper(nn.Module):
    def __init__(self):
        super(Sp_Hyper,self).__init__()
        self.n_layers = args.n_layers
        self.h_layers = args.hyper_graph_layers
        self.emb_size = args.emb_size
        self.ui_graph = pickle.load(open(args.data_path + args.dataset + '/train_mat', 'rb'))
        self.image_feats = torch.from_numpy(
            np.load(args.data_path + '{}/image_feat.npy'.format(args.dataset))).cuda().float()
        self.text_feats = torch.from_numpy(
            np.load(args.data_path + '{}/text_feat.npy'.format(args.dataset))).cuda().float()
        self.n_user = self.ui_graph.shape[0]
        self.n_item = self.ui_graph.shape[1]
        #print("graph:",self.n_item)
        self.user_emb = nn.Embedding(self.n_user,self.emb_size)
        self.item_emb = nn.Embedding(self.n_item,self.emb_size)
        nn.init.xavier_uniform_(self.item_emb.weight)
        nn.init.xavier_uniform_(self.user_emb.weight)
        A = sp.dok_matrix((self.n_user+self.n_item,self.n_user+self.n_item),dtype=np.float32)
        A = A.tolil()
        R = self.ui_graph.todok()
        A[:self.n_user,self.n_user:] = R
        A[self.n_user:,:self.n_user] = R.T
        sumArr = (A>0).sum(axis=1)
        diag = np.array(sumArr.flatten())[0]+1e-7
        diag = np.power(diag,-0.5)
        D = sp.diags(diag)
        L = D*A*D
        self.L = matrix_to_tensor(sp.coo_matrix(L))
        #hyper_graph = torch.cat([self.image_feats, self.text_feats], dim=1)
        # print(super_graph[0:5])
        hyper_graph = hyper_graph_create(torch.cat([self.image_feats, self.text_feats], dim=1))
        #img_hyper_graph = hyper_graph_create(self.image_feats)
        #hyper_graph = torch.cat([txt_hyper_graph,img_hyper_graph],dim=1)
        self.hyper_graph = hyper_normal_torch(hyper_graph)

        self.ui_graph = self.ui_graph.tocoo()  #
        self.edge_indices = torch.from_numpy(
            np.vstack((self.ui_graph.row, self.ui_graph.col)).astype(np.int64)).cuda()  #
        shape = torch.Size(self.ui_graph.shape)
        self.edge_values = calculate_pre(self.edge_indices, shape)

        txt_item_graph = create_item_graph(self.text_feats)
        img_item_graph = create_item_graph(self.image_feats)
        self.item_graph = 0.5 * txt_item_graph + 0.5 * img_item_graph
        self.project = nn.Linear(self.emb_size,self.emb_size)
        self.sigmod = nn.Sigmoid()
    def pre_epoch_processing(self):
        # degree-sensitive edge pruning
        degree_len = int(self.edge_values.size(0) * (args.keep_rate))
        degree_idx = torch.multinomial(self.edge_values, degree_len)
        # random sample
        keep_indices = self.edge_indices[:, degree_idx]
        # norm values
        keep_values = calculate_pre(keep_indices, torch.Size((self.n_user, self.n_item)))
        all_values = torch.cat((keep_values, keep_values))
        # update keep_indices to users/items+self.n_users
        keep_indices[1] += self.n_user
        all_indices = torch.cat((keep_indices, torch.flip(keep_indices, [0])), 1)
        self.masked_adj = torch.sparse.FloatTensor(all_indices, all_values, self.L.shape).cuda()

    def forward(self,is_val=True):
        all_emb = torch.cat([self.user_emb.weight,self.item_emb.weight])
        emb_lsit = [all_emb]
        for layer in range(self.n_layers):
            if is_val:
                all_emb = torch.sparse.mm(self.L, all_emb)
            else:
                all_emb = torch.mm(self.masked_adj,all_emb)
            emb_lsit.append(all_emb)
        all_emb = torch.mean(torch.stack(emb_lsit,dim=1),dim=1)
        user_all_embeddings, item_all_embeddings = torch.split(all_emb, [self.n_user, self.n_item])
        item_emb = self.item_emb.weight
        for layer in range(self.h_layers):
            item_emb2 = self.sigmod(self.project(torch.mm(self.hyper_graph,item_emb)+item_emb))
        item_all_embeddings = item_all_embeddings+item_emb2
        return user_all_embeddings,item_all_embeddings