from parser1 import args
import numpy as np
import pickle
import scipy.sparse as sp
from scipy.sparse import csr_matrix
import torch
import torch.nn as nn
import torch.nn.functional as F


class Discriminator(nn.Module):
    def __init__(self,in_features):
        super().__init__()
        self.disc = nn.Sequential(nn.Linear(in_features,256),
                                  nn.BatchNorm1d(256),
                                  nn.LeakyReLU(0.2),  #由于生成对抗网络的损失非常容易梯度消失，因此使用LeakyReLU
                                  #nn.Dropout(1-args.keep_rate),
                                  nn.Linear(256,128),
                                  nn.BatchNorm1d(128),
                                  #nn.Dropout(1-args.keep_rate),
                                  nn.LeakyReLU(0.2),  # 由于生成对抗网络的损失非常容易梯度消失，因此使用LeakyReLU
                                  nn.Linear(128, 1),
                                  #nn.Sigmoid()
                                  )
    def forward(self,data):
        """输入的data可以是真实数据时，Disc输出dx。输入的data是gz时，Disc输出dgz"""
        return self.disc(data)

class Generator(nn.Module):
    def __init__(self,dim):
        super().__init__()

        self.net = nn.Sequential(
            nn.Linear(dim,int(dim*2)),
            nn.BatchNorm1d(int(dim * 2)),
            nn.LeakyReLU(0.2),
            #nn.Dropout(1-args.keep_rate),

            nn.Linear(dim*2, dim * 2),

            nn.BatchNorm1d(int(dim * 2)),
            nn.LeakyReLU(0.2),
            #nn.Dropout(1-args.keep_rate),

            nn.Linear(dim*2, dim),
            nn.BatchNorm1d(int(dim)),
            nn.LeakyReLU(0.2),
            #nn.LayerNorm(dim)
        )

    def forward(self,x):
        output = self.net(x.float())
        return output




class Distill(nn.Module):
    def __init__(self,dim):
        super().__init__()

        self.net = nn.Sequential(
            nn.Linear(dim,int(dim*2)),
            nn.LeakyReLU(0.5),
            nn.BatchNorm1d(int(dim*2)),
            nn.Dropout(1-args.keep_rate),

            nn.Linear(dim*2, dim * 2),
            nn.LeakyReLU(0.5),
            nn.BatchNorm1d(int(dim * 2)),
            nn.Dropout(1-args.keep_rate),

            nn.Linear(dim*2, dim),
            nn.LeakyReLU(0.5)
            #nn.Tanh()
            #nn.Sigmoid()
        )

    def forward(self,x):
        output = self.net(x.float())
        return output



def matrix_to_tensor(cur_matrix):
    if type(cur_matrix) != sp.coo_matrix:
        cur_matrix = cur_matrix.tocoo()  #
    indices = torch.from_numpy(np.vstack((cur_matrix.row, cur_matrix.col)).astype(np.int64))  #
    values = torch.from_numpy(cur_matrix.data)  #
    shape = torch.Size(cur_matrix.shape)

    return torch.sparse.FloatTensor(indices, values, shape).to(torch.float32).cuda()  #

def graph_drop(graph,keepRate):
    vals = graph._values()
    idxs = graph._indices()
    edgeNum = vals.size()
    mask = ((torch.rand(edgeNum) + keepRate).floor()).type(torch.bool)
    newVals = vals[mask] / keepRate
    newIdxs = idxs[:, mask]
    return torch.sparse.FloatTensor(newIdxs, newVals, graph.shape)

class Gan_Att(nn.Module):
    def __init__(self):
        super(Gan_Att,self).__init__()
        self.emb_size = args.emb_size
        self.n_layers = args.n_layers
        self.nei_layers = args.nei_layers
        self.hyper_graph_layers = args.hyper_graph_layers
        self.image_feats = torch.from_numpy(np.load(args.data_path + '{}/image_feat.npy'.format(args.dataset))).cuda().float()
        self.text_feats = torch.from_numpy(np.load(args.data_path + '{}/text_feat.npy'.format(args.dataset))).cuda().float()
        self.img_projection = nn.Linear(self.image_feats.shape[-1],self.emb_size)
        self.txt_projection = nn.Linear(self.text_feats.shape[-1],self.emb_size)
        self.ui_graph = pickle.load(open(args.data_path + args.dataset + '/train_mat', 'rb'))
        self.n_user,self.n_item = self.ui_graph.shape
        #print("graph:",self.n_item)
        self.LeakyRelu = nn.LeakyReLU(0.5)
        self.user_emb = nn.Embedding(self.n_user,self.emb_size)
        self.item_emb = nn.Embedding(self.n_item,self.emb_size)
        self.hyper_weight = nn.Linear(self.emb_size, self.emb_size)  # 3
        self.Relu = nn.ReLU()
        self.Sigmoid = nn.Sigmoid()
        self.Dropout = nn.Dropout(1-args.keep_rate)
        self.c = 0.01

        self.Gen_model = Generator(self.emb_size)
        self.Dis_model = Discriminator(self.emb_size)
        self.label_loss = nn.MSELoss()
        self.Dis_loss = nn.BCELoss(reduction="mean")
        self.Dis_optimizer = torch.optim.RMSprop(self.Dis_model.parameters(), lr=args.d_lr)
        self.Gen_optimizer = torch.optim.RMSprop(self.Gen_model.parameters(), lr=args.d_lr)


        nn.init.xavier_uniform_(self.item_emb.weight)
        nn.init.xavier_uniform_(self.user_emb.weight)
        A = sp.dok_matrix((self.n_user + self.n_item, self.n_user + self.n_item), dtype=np.float32)
        A = A.tolil()
        R = self.ui_graph.todok()
        A[:self.n_user, self.n_user:] = R
        A[self.n_user:, :self.n_user] = R.T
        sumArr = (A > 0).sum(axis=1)
        diag = np.array(sumArr.flatten())[0] + 1e-7
        diag = np.power(diag, -0.5)
        D = sp.diags(diag)
        L = D * A * D
        self.L = sp.coo_matrix(L)
        initializer = nn.init.xavier_uniform_
        self.weight_dict = nn.ParameterDict({  #7
            'w_q': nn.Parameter(initializer(torch.empty([self.emb_size, self.emb_size]))),
            'w_k': nn.Parameter(initializer(torch.empty([self.emb_size, self.emb_size]))),
            #'w_v': nn.Parameter(initializer(torch.empty([args.embed_size, args.embed_size]))),
            #'w_self_attention_item': nn.Parameter(initializer(torch.empty([args.embed_size, args.embed_size]))),
            #'w_self_attention_user': nn.Parameter(initializer(torch.empty([args.embed_size, args.embed_size]))),
            'w_self_attention_cat': nn.Parameter(initializer(torch.empty([args.head_num * self.emb_size, self.emb_size]))),
        })
        self.embedding_dict = {'user': {}, 'item': {}}

    def hyper_normal_torch(self,Hyper_graph):
        # Row Normalization
        rowsum = Hyper_graph.sum(dim=1)
        rowsum = torch.pow(rowsum + 1e-8, -0.5).flatten()
        rowsum[rowsum == float('inf')] = 0.
        rowsum_diag = torch.diag(rowsum)
        # Column Normalization
        colsum = Hyper_graph.sum(dim=0)
        colsum = torch.pow(colsum + 1e-8, -0.5).flatten()
        colsum[colsum == float('inf')] = 0.
        colsum_diag = torch.diag(colsum)

        # Normalization Operation
        normalized_hypergraph = rowsum_diag @ Hyper_graph @ colsum_diag @ colsum_diag @ Hyper_graph.t() @ rowsum_diag

        return normalized_hypergraph  # Convert back to NumPy array if needed
    def hyper_normal(self, Hyper_graph):
        rowsum = np.array(Hyper_graph.sum(1))
        rowsum = np.power(rowsum + 1e-8, -0.5).flatten()
        rowsum[np.isinf(rowsum)] = 0.
        rowsum_diag = sp.diags(rowsum)

        colsum = np.array(Hyper_graph.sum(0))
        colsum = np.power(colsum + 1e-8, -0.5).flatten()
        colsum[np.isinf(colsum)] = 0.
        colsum_diag = sp.diags(colsum)
        return rowsum_diag * Hyper_graph * colsum_diag * colsum_diag * Hyper_graph.T * rowsum_diag

    def para_dict_to_tenser(self, para_dict):
        """
        :param para_dict: nn.ParameterDict()
        :return: tensor
        """
        tensors = []

        for beh in para_dict.keys():
            tensors.append(para_dict[beh])
        tensors = torch.stack(tensors, dim=0)

        return tensors

    def multi_head_self_attention(self, trans_w, embedding_t_1, embedding_t):

        q = self.para_dict_to_tenser(embedding_t)
        v = k = self.para_dict_to_tenser(embedding_t_1)
        #q = embedding_t
        #v = k = embedding_t_1
        beh, N, d_h = q.shape[0], q.shape[1], self.emb_size / args.head_num

        Q = torch.matmul(q, trans_w['w_q'])
        K = torch.matmul(k, trans_w['w_k'])
        V = v

        Q = Q.reshape(beh, N, args.head_num, int(d_h)).permute(2, 0, 1, 3)
        K = K.reshape(beh, N, args.head_num, int(d_h)).permute(2, 0, 1, 3)

        Q = torch.unsqueeze(Q, 2)
        K = torch.unsqueeze(K, 1)
        V = torch.unsqueeze(V, 1)

        att = torch.mul(Q, K) / torch.sqrt(torch.tensor(d_h))
        att = torch.sum(att, dim=-1)
        att = torch.unsqueeze(att, dim=-1)
        att = F.softmax(att, dim=2)

        Z = torch.mul(att, V)
        Z = torch.sum(Z, dim=2)

        Z_list = [value for value in Z]
        Z = torch.cat(Z_list, -1)
        Z = torch.matmul(Z, self.weight_dict['w_self_attention_cat'])

        #args.model_cat_rate * F.normalize(Z, p=2, dim=2)
        return Z, att.detach()

    def forward(self,is_val=True):
        image_feats = self.Dropout(self.Relu(self.img_projection(self.image_feats)))
        text_feats = self.Dropout(self.Relu(self.txt_projection(self.text_feats)))
        hyper_graph = torch.cat([image_feats, text_feats], dim=1)
        #print(super_graph[0:5])
        hyper_graph = self.hyper_normal_torch(hyper_graph)
        #print(super_graph[0:5])

        item_hyper_list = [self.item_emb.weight]
        for i in range(self.hyper_graph_layers):
            #print(super_graph.shape,item_emb.weight.shape)
            item_hyper_embedding = self.LeakyRelu(torch.mm(hyper_graph, self.item_emb.weight))
            item_hyper_embedding = self.LeakyRelu(self.hyper_weight(item_hyper_embedding))
            item_hyper_list.append(item_hyper_embedding)
        item_hyper_embedding = torch.mean(torch.stack(item_hyper_list, dim=1), dim=1)


        if not is_val:
            # 判别器
            #print(item_super_embedding)

            self.Dis_model.train()
            self.Dis_optimizer.zero_grad()  # 清零判别器迭代后的梯度
            dx = self.Dis_model(item_hyper_embedding.detach()).view(-1)  # 判别器对真实数据的预测概率
            #loss_real = self.Dis_loss(dx, torch.ones_like(dx))  # 所有真实数据的损失均值
            loss_real = -torch.mean(dx)

            #loss_real.backward()
            #D_x = dx.mean().item()

            gz = self.Gen_model(self.item_emb.weight.detach())
            dgz1 = self.Dis_model(gz.detach())  # 需要使用detach来阻止gz进入D的计算图，判别器对生成数据的预测概率
            #loss_fake = self.Dis_loss(dgz1, torch.zeros_like(dgz1))  # 所有生成数据的损失均值
            loss_fake = torch.mean(dgz1)

            #loss_fake.backward()
            #D_G_z1 = dgz1.mean().item()
            Dloss = (loss_real + loss_fake)/2
            Dloss.backward()
            self.Dis_optimizer.step()  # 更新判别器上的权重

            for p in self.Dis_model.parameters():
                p.data.clamp_(-self.c, self.c)

            self.Gen_model.train()
            self.Gen_optimizer.zero_grad()  # 清零生成器更新后梯度
            # 生成器反向传播*==========================================================================
            dgz2 = self.Dis_model(gz)  # 注意，由于在此时判别器上的权重已经被更新过了，所以dgz的值会变化，需要重新生成
            #Gloss1 = self.Dis_loss(dgz2, torch.ones_like(dgz2))
            Gloss1 = -torch.mean(dgz2)
            Gloss2 = self.label_loss(gz, item_hyper_embedding.detach())
            self.gan_bias = Gloss2.item()
            Gloss = (Gloss1 + Gloss2)/2
            Gloss.backward()  # 反向传播
            self.Gen_optimizer.step()  # 更新生成器上的权重
            #D_G_z2 = dgz2.mean().item()

        else:
            self.Dis_model.eval()
            self.Gen_model.eval()

        user_hyper_embedding = self.Gen_model(self.user_emb.weight)#-self.gan_bias
        item_hyper_embedding = self.Gen_model(self.item_emb.weight)



        all_emb = torch.cat([self.user_emb.weight,self.item_emb.weight])
        emb_lsit = [all_emb]
        for layer in range(self.nei_layers):
            all_emb = torch.sparse.mm(graph_drop(matrix_to_tensor(self.L), args.keep_rate), all_emb)
            emb_lsit.append(all_emb)
        all_emb = torch.mean(torch.stack(emb_lsit, dim=1), dim=1)
        user_nei_embeddings, item_nei_embeddings = torch.split(all_emb, [self.n_user, self.n_item])

        self.embedding_dict['user']['nei'] = user_nei_embeddings
        self.embedding_dict['user']['super'] = user_hyper_embedding
        self.embedding_dict['item']['nei'] = item_nei_embeddings
        self.embedding_dict['item']['super'] = item_hyper_embedding
        # print(user_super_embedding[0])
        # print(item_super_embedding[0])
        # print(user_nei_embedding[0])
        # print(item_nei_embedding[0])
        user_attention, _ = self.multi_head_self_attention(self.weight_dict, self.embedding_dict['user'],
                                                           self.embedding_dict['user'])
        item_attention, _ = self.multi_head_self_attention(self.weight_dict, self.embedding_dict['item'],
                                                           self.embedding_dict['item'])
        user_Aemb = user_attention.mean(0)
        item_Aemb = item_attention.mean(0)
        user_0_embedding = user_nei_embeddings +  args.model_cat_rate* F.normalize(
            user_Aemb, p=2, dim=1)
        item_0_embedding = item_nei_embeddings +  args.model_cat_rate * F.normalize(
            item_Aemb, p=2, dim=1)

        all_emb = torch.cat([user_0_embedding, item_0_embedding])


        for layer in range(self.n_layers):
            all_emb = torch.sparse.mm(graph_drop(matrix_to_tensor(self.L), args.keep_rate), all_emb)
            emb_lsit.append(all_emb)
        all_emb = torch.mean(torch.stack(emb_lsit, dim=1), dim=1)
        user_all_embeddings, item_all_embeddings = torch.split(all_emb, [self.n_user, self.n_item])


        return user_all_embeddings, item_all_embeddings