import torch
import numpy as np
from parser1 import args
import torch.nn as nn
import pickle
import scipy.sparse as sp
from functions import *

class MLP(nn.Module):
    def __init__(self,input_dim,output_dim):
        super(MLP, self).__init__()
        self.Layer = nn.Linear(input_dim,output_dim,bias=True)
        nn.init.xavier_uniform_(self.Layer.weight)
    def forward(self,x):
        return self.Layer(x)

class FREEDOM(nn.Module):
    def __init__(self):
        super(FREEDOM, self).__init__()
        self.n_layers = args.n_layers
        self.emb_size = args.emb_size
        self.ui_graph = pickle.load(open(args.data_path + args.dataset + '/train_mat', 'rb'))
        self.image_feats = torch.from_numpy(
            np.load(args.data_path + '{}/image_feat.npy'.format(args.dataset))).cuda().float()
        self.text_feats = torch.from_numpy(
            np.load(args.data_path + '{}/text_feat.npy'.format(args.dataset))).cuda().float()
        self.n_user = self.ui_graph.shape[0]
        self.n_item = self.ui_graph.shape[1]
        # print("graph:",self.n_item)
        self.user_emb = nn.Embedding(self.n_user, self.emb_size)
        self.item_emb = nn.Embedding(self.n_item, self.emb_size)
        nn.init.xavier_uniform_(self.item_emb.weight)
        nn.init.xavier_uniform_(self.user_emb.weight)
        A = sp.dok_matrix((self.n_user + self.n_item, self.n_user + self.n_item), dtype=np.float32)
        A = A.tolil()
        R = self.ui_graph.todok()
        A[:self.n_user, self.n_user:] = R
        A[self.n_user:, :self.n_user] = R.T
        sumArr = (A > 0).sum(axis=1)
        diag = np.array(sumArr.flatten())[0] + 1e-7
        diag = np.power(diag, -0.5)
        D = sp.diags(diag)
        L = D * A * D
        self.L = matrix_to_tensor(sp.coo_matrix(L))


        self.ui_graph = self.ui_graph.tocoo()  #
        self.edge_indices = torch.from_numpy(np.vstack((self.ui_graph.row, self.ui_graph.col)).astype(np.int64)).cuda()  #
        shape = torch.Size(self.ui_graph.shape)
        self.edge_values = calculate_pre(self.edge_indices,shape)

        self.h_layers = args.hyper_graph_layers
        txt_item_graph = create_item_graph(self.text_feats)
        img_item_graph = create_item_graph(self.image_feats)
        self.item_graph = 0.9*txt_item_graph+0.1*img_item_graph
        #self.item_graph = sp.coo_matrix(item_graph)
        self.txt_mlp = MLP(self.text_feats.shape[1],args.emb_size)
        self.img_mlp = MLP(self.image_feats.shape[1],args.emb_size)

        txt_hyper_graph = hyper_graph_create(self.text_feats)
        img_hyper_graph = hyper_graph_create(self.image_feats)
        hyper_graph = torch.cat([txt_hyper_graph, img_hyper_graph], dim=1)
        self.hyper_graph = hyper_normal_torch(hyper_graph)

    def pre_epoch_processing(self):
        # degree-sensitive edge pruning
        degree_len = int(self.edge_values.size(0) * (args.keep_rate))
        degree_idx = torch.multinomial(self.edge_values, degree_len)
        # random sample
        keep_indices = self.edge_indices[:, degree_idx]
        # norm values
        keep_values = calculate_pre(keep_indices, torch.Size((self.n_user, self.n_item)))
        all_values = torch.cat((keep_values, keep_values))
        # update keep_indices to users/items+self.n_users
        keep_indices[1] += self.n_user
        all_indices = torch.cat((keep_indices, torch.flip(keep_indices, [0])), 1)
        self.masked_adj = torch.sparse.FloatTensor(all_indices, all_values, self.L.shape).cuda()

    def forward(self,is_drop):
        all_emb = torch.cat([self.user_emb.weight, self.item_emb.weight])
        emb_lsit = [all_emb]
        for layer in range(self.n_layers):
            if is_drop:
                all_emb = torch.sparse.mm(self.masked_adj, all_emb)
            else:
                all_emb = torch.sparse.mm(self.L, all_emb)
            emb_lsit.append(all_emb)
        all_emb = torch.mean(torch.stack(emb_lsit, dim=1), dim=1)
        user_all_embeddings, item_all_embeddings = torch.split(all_emb, [self.n_user, self.n_item])
        item_emb = self.item_emb.weight
        for layer in range(self.h_layers):
            item_emb1 = torch.mm(self.item_graph, item_emb)
            item_emb2 = torch.mm(self.hyper_graph,item_emb)
        item_all_embeddings = item_all_embeddings + item_emb1+item_emb2
        img_emb = self.img_mlp(self.image_feats)
        txt_emb = self.txt_mlp(self.text_feats)
        return user_all_embeddings, item_all_embeddings,img_emb,txt_emb
