from parser1 import args
import numpy as np
import pickle
import scipy.sparse as sp
from scipy.sparse import csr_matrix
import torch
import torch.nn as nn
import torch.nn.functional as F


def matrix_to_tensor(cur_matrix):
    if type(cur_matrix) != sp.coo_matrix:
        cur_matrix = cur_matrix.tocoo()  #
    indices = torch.from_numpy(np.vstack((cur_matrix.row, cur_matrix.col)).astype(np.int64))  #
    values = torch.from_numpy(cur_matrix.data)  #
    shape = torch.Size(cur_matrix.shape)

    return torch.sparse.FloatTensor(indices, values, shape).to(torch.float32).cuda()  #

def graph_drop(graph,keepRate):
    vals = graph._values()
    idxs = graph._indices()
    edgeNum = vals.size()
    mask = ((torch.rand(edgeNum) + keepRate).floor()).type(torch.bool)
    newVals = vals[mask] / keepRate
    newIdxs = idxs[:, mask]
    return torch.sparse.FloatTensor(newIdxs, newVals, graph.shape)

class Hyper_only(nn.Module):
    def __init__(self):
        super(Hyper_only,self).__init__()
        self.emb_size = args.emb_size
        self.n_layers = args.nei_layers
        self.hyper_graph_layers = args.hyper_graph_layers
        self.image_feats = torch.from_numpy(np.load(args.data_path + '{}/image_feat.npy'.format(args.dataset))).cuda().float()
        self.text_feats = torch.from_numpy(np.load(args.data_path + '{}/text_feat.npy'.format(args.dataset))).cuda().float()
        self.img_projection = nn.Linear(self.image_feats.shape[-1],self.emb_size)
        self.txt_projection = nn.Linear(self.text_feats.shape[-1],self.emb_size)
        self.ui_graph = pickle.load(open(args.data_path + args.dataset + '/train_mat', 'rb'))
        self.n_user,self.n_item = self.ui_graph.shape
        #print("graph:",self.n_item)
        self.LeakyRelu = nn.LeakyReLU(0.5)
        self.user_emb = nn.Embedding(self.n_user,self.emb_size)
        self.item_emb = nn.Embedding(self.n_item,self.emb_size)
        self.hyper_item_emb = nn.Embedding(self.n_item,self.emb_size)
        self.hyper_weight = nn.Linear(self.emb_size, self.emb_size)  # 3
        self.Relu = nn.ReLU()
        self.Dropout = nn.Dropout(1-args.keep_rate)
        nn.init.xavier_uniform_(self.item_emb.weight)
        nn.init.xavier_uniform_(self.user_emb.weight)
        nn.init.xavier_uniform_(self.hyper_item_emb.weight)
        A = sp.dok_matrix((self.n_user + self.n_item, self.n_user + self.n_item), dtype=np.float32)
        A = A.tolil()
        R = self.ui_graph.todok()
        A[:self.n_user, self.n_user:] = R
        A[self.n_user:, :self.n_user] = R.T
        sumArr = (A > 0).sum(axis=1)
        diag = np.array(sumArr.flatten())[0] + 1e-7
        diag = np.power(diag, -0.5)
        D = sp.diags(diag)
        L = D * A * D
        self.L = sp.coo_matrix(L)

    def hyper_normal_torch(self,Hyper_graph):
        # Row Normalization
        rowsum = Hyper_graph.sum(dim=1)
        rowsum = torch.pow(rowsum + 1e-8, -0.5).flatten()
        rowsum[rowsum == float('inf')] = 0.
        rowsum_diag = torch.diag(rowsum)
        # Column Normalization
        colsum = Hyper_graph.sum(dim=0)
        colsum = torch.pow(colsum + 1e-8, -0.5).flatten()
        colsum[colsum == float('inf')] = 0.
        colsum_diag = torch.diag(colsum)

        # Normalization Operation
        normalized_hypergraph = rowsum_diag @ Hyper_graph @ colsum_diag @ colsum_diag @ Hyper_graph.t() @ rowsum_diag

        return normalized_hypergraph  # Convert back to NumPy array if needed
    def hyper_normal(self, Hyper_graph):
        rowsum = np.array(Hyper_graph.sum(1))
        rowsum = np.power(rowsum + 1e-8, -0.5).flatten()
        rowsum[np.isinf(rowsum)] = 0.
        rowsum_diag = sp.diags(rowsum)

        colsum = np.array(Hyper_graph.sum(0))
        colsum = np.power(colsum + 1e-8, -0.5).flatten()
        colsum[np.isinf(colsum)] = 0.
        colsum_diag = sp.diags(colsum)
        return rowsum_diag * Hyper_graph * colsum_diag * colsum_diag * Hyper_graph.T * rowsum_diag

    def forward(self,is_val=True):
        image_feats = self.Dropout(self.Relu(self.img_projection(self.image_feats)))
        text_feats = self.Dropout(self.Relu(self.txt_projection(self.text_feats)))
        hyper_graph = torch.cat([image_feats, text_feats], dim=1)
        #print(super_graph[0:5])
        hyper_graph = self.hyper_normal_torch(hyper_graph)
        #print(super_graph[0:5])
        for i in range(self.hyper_graph_layers):
            #print(super_graph.shape,item_emb.weight.shape)
            item_hyper_embedding = self.LeakyRelu(torch.mm(hyper_graph, self.hyper_item_emb.weight))
            #item_hyper_embedding = self.LeakyRelu(self.hyper_weight(item_hyper_embedding))
        #item_emb = self.item_emb.weight + F.normalize(item_hyper_embedding,p=2,dim=1)
        all_emb = torch.cat([self.user_emb.weight, self.item_emb.weight])
        emb_lsit = [all_emb]
        for layer in range(self.n_layers):
            all_emb = torch.sparse.mm(graph_drop(matrix_to_tensor(self.L), args.keep_rate), all_emb)
            emb_lsit.append(all_emb)
        all_emb = torch.mean(torch.stack(emb_lsit, dim=1), dim=1)
        user_all_embeddings, item_all_embeddings = torch.split(all_emb, [self.n_user, self.n_item])
        item_all_embeddings = item_all_embeddings+ F.normalize(item_hyper_embedding,p=2,dim=1)
        return user_all_embeddings, item_all_embeddings

