import datetime
import math
import pdb

import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from aggregator import LocalAggregator, GlobalAggregator
from torch.nn import Module, Parameter
import torch.nn.functional as F


# 魔心实现核心代码
class CombineGraph(Module):
    def __init__(self, opt, num_node, adj_all, num):
        super(CombineGraph, self).__init__()
        self.opt = opt

        self.batch_size = opt.batch_size
        self.num_node = num_node
        # 隐层输出纬度
        self.dim = opt.hiddenSize
        # 局部和全局dropout系数
        self.dropout_local = opt.dropout_local
        self.dropout_global = opt.dropout_global
        # 迭代次数
        self.hop = opt.n_iter
        self.sample_num = opt.n_sample
        # 节点邻居，下标是itemId，对应值是邻居item的集合
        self.adj_all = trans_to_cuda(torch.Tensor(adj_all)).long()
        # 节点邻居出现的次数，下标是itemId，对应值是adj_all中邻居item的出现次数
        self.num = trans_to_cuda(torch.Tensor(num)).float()

        # Aggregator
        # 会话级信息聚合器，只有一层
        self.local_agg = LocalAggregator(self.dim, self.opt.alpha, dropout=0.0)
        # 全局级信息聚合器，有self.hop个
        self.global_agg = []
        for i in range(self.hop):
            if opt.activate == 'relu':
                agg = GlobalAggregator(self.dim, opt.dropout_gcn, act=torch.relu)
            else:
                agg = GlobalAggregator(self.dim, opt.dropout_gcn, act=torch.tanh)
            self.add_module('agg_gcn_{}'.format(i), agg)
            self.global_agg.append(agg)

        # Item representation & Position representation
        # item节点的embedding表示
        self.embedding = nn.Embedding(num_node, self.dim)
        # 反向位置信息的embedding表示，但是这里为什么限定是200？
        self.pos_embedding = nn.Embedding(200, self.dim)

        # Parameters
        self.w_1 = nn.Parameter(torch.Tensor(2 * self.dim, self.dim))
        self.w_2 = nn.Parameter(torch.Tensor(self.dim, 1))
        self.glu1 = nn.Linear(self.dim, self.dim)
        self.glu2 = nn.Linear(self.dim, self.dim, bias=False)
        self.linear_transform = nn.Linear(self.dim, self.dim, bias=False)

        self.leakyrelu = nn.LeakyReLU(opt.alpha)
        self.loss_function = nn.CrossEntropyLoss()
        self.optimizer = torch.optim.Adam(self.parameters(), lr=opt.lr, weight_decay=opt.l2)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=opt.lr_dc_step, gamma=opt.lr_dc)

        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1.0 / math.sqrt(self.dim)
        for weight in self.parameters():
            weight.data.uniform_(-stdv, stdv)

    def sample(self, target, n_sample):
        """
        返回target对应的邻居以及邻居出现的次数
        :param target: 目标item
        :param n_sample: 采样个数
        :return: 目标item的邻居，以及邻居出现的次数
        """
        # neighbor = self.adj_all[target.view(-1)]
        # index = np.arange(neighbor.shape[1])
        # np.random.shuffle(index)
        # index = index[:n_sample]
        # return self.adj_all[target.view(-1)][:, index], self.num[target.view(-1)][:, index]
        # target.view(-1) 将target展平
        return self.adj_all[target.view(-1)], self.num[target.view(-1)]

    # 得到最后的item表示并取出堆叠成会话，hidden的维度为：batch_size*seq_len*embedding_size（会话数*item数*embedding维度）。
    def compute_scores(self, hidden, mask):
        # ======== 断点调试 =======
        # pdb.set_trace()
        # ======== 断点调试 =======
        # batch_size*seq_len*1
        mask = mask.float().unsqueeze(-1)

        batch_size = hidden.shape[0]
        # seq_len
        len = hidden.shape[1]
        # 反向位置信息embedding，在计算项目表示时，输入已经逆序处理了，所以这里直接使用前seq_len个位置信息embedding作为反向位置信息输入？
        # pos_emb.shape为seq_len*hidden_size
        pos_emb = self.pos_embedding.weight[:len]
        # pytorch中的repeat函数是将tensor对应的维度扩大指定倍数，这里是分别是将第0维扩大batch_size倍，第1维扩大1倍，第2维扩大1倍
        # pos_emb的shape变为batch_size*seq_len*hidden_size
        pos_emb = pos_emb.unsqueeze(0).repeat(batch_size, 1, 1)

        # h_s即为s',公式12，大小为batch_size*hidden_size
        hs = torch.sum(hidden * mask, -2) / torch.sum(mask, 1)
        # batch_size*seq_len*hidden_size
        hs = hs.unsqueeze(-2).repeat(1, len, 1)
        # 公式11的拼接和线性变换部分，但是这里没有添加b3偏置项
        nh = torch.matmul(torch.cat([pos_emb, hidden], -1), self.w_1)
        # 公式11
        nh = torch.tanh(nh)
        # 公式13中的求和，但是没有偏置项
        nh = torch.sigmoid(self.glu1(nh) + self.glu2(hs))
        # 公式13的beta计算
        beta = torch.matmul(nh, self.w_2)
        beta = beta * mask
        # 公式14
        select = torch.sum(beta * hidden, 1)

        # 候选项初始embedding
        b = self.embedding.weight[1:]  # n_nodes x latent_size
        # 公式15
        scores = torch.matmul(select, b.transpose(1, 0))
        # batch_size*item_num（数据集中item的总个数），scores的内容是每个会话对应的候选项的预测分数
        return scores

    # forward函数，输入会话，邻居项目矩阵，mask， item。item是会话中不重复的item列表（已补0）；
    # 但这里为什么还需要一个未去重的inputs会话序列？这里没有采用类似SR-GNN的邻接矩阵A的计算方式（SR-GNN中的邻接矩阵A就是在原始序列的基础上来的），所以需要传入原始序列进行计算
    def forward(self, inputs, adj, mask_item, item):
        """
        计算会话的embedding
        :param inputs: 输入的未去重会话序列，batch_size*seq_len
        :param adj: item邻居矩阵，并不是邻接矩阵
        :param mask_item: 会话序列掩码,batch_size*seq_len
        :param item: 会话中不重复的item列表（已补0）
        :return: 会话的embedding
        """
        # 批处理大小
        batch_size = inputs.shape[0]
        # 序列长度
        seqs_len = inputs.shape[1]
        # 获得未去重会话序列inputs的初始embedding，这里为什么不适用去重后的会话序列，还要使用原始的会话序列
        h = self.embedding(inputs)

        # local，会话级item表示
        h_local = self.local_agg(h, adj, mask_item)

        # global
        # item_neighbors[0].shape为batch_size*seq_len
        item_neighbors = [inputs]
        weight_neighbors = []
        support_size = seqs_len

        # self.hop是迭代次数，以1次为例（代码默认值为1）
        # 取出会话包含的item的所有邻居及其出现次数记为item_sample_i,weight_sample_i
        for i in range(1, self.hop + 1):
            # ======== 断点调试 =======
            # pdb.set_trace()
            # ======== 断点调试 =======
            item_sample_i, weight_sample_i = self.sample(item_neighbors[-1], self.sample_num)
            support_size *= self.sample_num
            item_neighbors.append(item_sample_i.view(batch_size, support_size))
            weight_neighbors.append(weight_sample_i.view(batch_size, support_size))
        # for循环结束，item_neighbors就包含两项（因为hop默认是1），一项是batch的会话序列，一项是这些会话序列对应的邻居，取出它们的embedding

        # item_neighbors包含两项（因为hop默认是1），一项是batch的会话序列，一项是这些会话序列对应的邻居，取出它们的embedding；
        # 但这里会再一次生成inputs的embedding，因为item_neighbors[0]就是inputs，和前面的`item_neighbors = [inputs]`重复.
        # entity_vectors[0].shape：batch_size*seq_len*hidden_size；entity_vectors[1].shape：batch_size*(seq_len*sample_num)*hidden_size;
        entity_vectors = [self.embedding(i) for i in item_neighbors]
        # weight_vectors[0]的shape为：batch_size*(seq_len*sample_num)
        weight_vectors = weight_neighbors

        session_info = []
        # 进行掩码操作，使得item_emb中补0的值对应的embedding也全为0
        # mask_item的shape为：batch_size*seq_len；mask_item.float().unsqueeze(-1)的shape为：batch_size*seq_len*1；item_emb的shape为：batch_size*seq_len*self.dim
        item_emb = self.embedding(item) * mask_item.float().unsqueeze(-1)
        
        # mean，会话序列中所有项目的均值，对应公式3的操作，用于后续公式2中的s
        sum_item_emb = torch.sum(item_emb, 1) / torch.sum(mask_item.float(), -1).unsqueeze(-1)

        # ======== 断点调试 =======
        # pdb.set_trace()
        # ======== 断点调试 =======
        
        # sum
        # sum_item_emb = torch.sum(item_emb, 1)

        # sum_item_emb的shape为：batch_size*self.dim；sum_item_emb.unsqueeze(-2).shape为：batch_size*1*self.dim
        sum_item_emb = sum_item_emb.unsqueeze(-2)
        for i in range(self.hop):
            # 实际执行 session_info.append(sum_item_emb.repeat(1, entity_vectors[0].shape[1], 1))；
            # session_info添加了一个大小为batch_size*seq_len*self.dim的tensor
            session_info.append(sum_item_emb.repeat(1, entity_vectors[i].shape[1], 1))

        for n_hop in range(self.hop):
            entity_vectors_next_iter = []
            shape = [batch_size, -1, self.sample_num, self.dim]
            for hop in range(self.hop - n_hop):
                # 提取hop对应的聚合函数self.global_agg[n_hop]
                aggregator = self.global_agg[n_hop]
                # self_vectors传入的是entity_vectors[0]，batch_size*seq_len*self.dim，输入序列的embedding
                vector = aggregator(self_vectors=entity_vectors[hop],
                                    # neighbor_vector是输入序列的邻居结点对应的embedding，shape为：batch_size*seq_len*sample_num*self.dim
                                    neighbor_vector=entity_vectors[hop+1].view(shape),
                                    masks=None,
                                    batch_size=batch_size,
                                    # weight_vectors[0]的shape为：batch_size*(seq_len*sample_num)，传入的neighbor_weight的shape为batch_size*seq_len*sample_num
                                    neighbor_weight=weight_vectors[hop].view(batch_size, -1, self.sample_num),
                                    # 传入的extra_vector为batch_size*seq_len*self.dim的会话序列均值，对应公式2中的s
                                    extra_vector=session_info[hop])
                # 将当前hop学习到的表示保存下来，作为下一个hop的输入
                entity_vectors_next_iter.append(vector)
            entity_vectors = entity_vectors_next_iter

        # 取出最后一次更新得到的表示（因为entity_vectors中只包含最后一次更新的表示，因此下标是0）
        h_global = entity_vectors[0].view(batch_size, seqs_len, self.dim)

        # combine
        # 通过dropout，再进行sum pooling，从而得到最终的item表示output。
        h_local = F.dropout(h_local, self.dropout_local, training=self.training)
        h_global = F.dropout(h_global, self.dropout_global, training=self.training)
        output = h_local + h_global

        # 输出节点的最终表示，大小为batch_size*seq_len*hidden_size
        return output


def trans_to_cuda(variable):
    # ======== 内存使用信息摘要打印 =======
    # CUDA out of memory.
    torch.cuda.memory_summary(device=None, abbreviated=False)
    # ======== 内存使用信息摘要打印 =======
    if torch.cuda.is_available():
        return variable.cuda()
    else:
        return variable


def trans_to_cpu(variable):
    if torch.cuda.is_available():
        return variable.cpu()
    else:
        return variable


def forward(model, data):
    # 这里data返回的依次是：alias_inputs、adj、items、mask、target、u_input
    alias_inputs, adj, items, mask, targets, inputs = data
    alias_inputs = trans_to_cuda(alias_inputs).long()
    items = trans_to_cuda(items).long()
    adj = trans_to_cuda(adj).float()
    mask = trans_to_cuda(mask).long()
    inputs = trans_to_cuda(inputs).long()

    # 获得item的表示，这里应该是传入 inputs, adj, mask, items 的顺序？
    hidden = model(items, adj, mask, inputs)
    get = lambda index: hidden[index][alias_inputs[index]]
    # 获取会话序列的初始embedding，大小为batch_size*seq_len*hidden_size
    seq_hidden = torch.stack([get(i) for i in torch.arange(len(alias_inputs)).long()])
    # ======== 断点调试 =======
    # pdb.set_trace()
    # ======== 断点调试 =======
    return targets, model.compute_scores(seq_hidden, mask)


def train_test(model, train_data, test_data):
    print('start training: ', datetime.datetime.now())
    model.train()
    total_loss = 0.0
    # train_loader = torch.utils.data.DataLoader(train_data, num_workers=4, batch_size=model.batch_size,
    #                                            shuffle=True, pin_memory=True)
    # ======== 修改 ==========
    # 修改num_workers为2，太大了本地没法跑
    train_loader = torch.utils.data.DataLoader(train_data, num_workers=3, batch_size=model.batch_size,
                                               shuffle=True, pin_memory=True)
    # ======== 修改 ==========
    for data in tqdm(train_loader):
        # 这里data返回的依次是：alias_inputs、adj、items、mask、target、u_input
        model.optimizer.zero_grad()
        targets, scores = forward(model, data)
        targets = trans_to_cuda(targets).long()
        loss = model.loss_function(scores, targets - 1)
        loss.backward()
        model.optimizer.step()
        total_loss += loss
    print('\tLoss:\t%.3f' % total_loss)
    model.scheduler.step()

    print('start predicting: ', datetime.datetime.now())
    model.eval()
    test_loader = torch.utils.data.DataLoader(test_data, num_workers=4, batch_size=model.batch_size,
                                              shuffle=False, pin_memory=True)
    result = []
    hit, mrr = [], []
    for data in test_loader:
        # targets的shape为(batch_size-1,)给定的目标值；scores的shape为batch_size*item_num（数据集中item的总个数），scores的内容是每个会话对应的候选项的预测分数
        targets, scores = forward(model, data)
        # 返回候选项中分数最高的前20个item的索引（其实就是itemId-1），这里索引是item生成embedding时的数组索引，所以索引和itemId差值为1，即index=itemId-1，所以计算hit时需要target-1
        sub_scores = scores.topk(20)[1]
        sub_scores = trans_to_cpu(sub_scores).detach().numpy()
        targets = targets.numpy()
        for score, target, mask in zip(sub_scores, targets, test_data.mask):
            hit.append(np.isin(target - 1, score))
            if len(np.where(score == target - 1)[0]) == 0:
                mrr.append(0)
            else:
                mrr.append(1 / (np.where(score == target - 1)[0][0] + 1))

    result.append(np.mean(hit) * 100)
    result.append(np.mean(mrr) * 100)

    return result
