"""
@Filename       : transformer.py
@Create Time    : 2020/11/16 8:14
@Author         : Rylynn
@Description    : 

"""

import datetime
import time

import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
import torch.optim as optim
import numpy as np
import dgl
import logging
from dgl.nn import GATConv
from torch.utils.data import DataLoader
from tqdm import tqdm

from evaluate.metric import run_evaluation
from model.transfer.domain_classifier import DomainClassifier
from util.dataloader import DiffuseSequenceDataSet, sequence_collate_fn
from util.preprocess import load_graph

logging.basicConfig(level=logging.DEBUG,#控制台打印的日志级别
                    filename='transformer_{}.log'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S')),
                    filemode='a',
                    format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')


class Transformer(nn.Module):
    def __init__(self, config):
        super(Transformer, self).__init__()
        self.config = config
        self.g, self.feat = self.load_graph()
        activation = nn.LeakyReLU()
        self.conv1 = GATConv(in_feats=config['embed_dim'],
                             out_feats=256,
                             num_heads=8,
                             activation=activation)
        self.conv2 = GATConv(in_feats=8 * 256,
                             out_feats=config['embed_dim'],
                             num_heads=1,
                             activation=activation)
        self.node_num = config['node_num']
        self.user_embed = nn.Embedding(config['node_num'] + 1, config['embed_dim'], padding_idx=0)
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=256, nhead=8)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer=self.encoder_layer, num_layers=6)
        self.linear = nn.Linear(config['embed_dim'], config['node_num'])
        self.softmax = nn.Softmax(dim=1)
        self.cross_entropy = nn.CrossEntropyLoss()

    def forward(self, batch_seqs, batch_seqs_length):
        out = self.encode(batch_seqs, batch_seqs_length)
        out = self.linear(out)
        # prob = self.softmax(out)
        return out

    def load_graph(self):
        g, node2id = load_graph('../../data_forest/twitter/edges.txt')
        g.add_node(0)
        feat_tensor = torch.randn(self.config['node_num'] + 1, self.config['embed_dim'])
        feat = torch.autograd.Variable(feat_tensor, requires_grad=True)
        g = dgl.from_networkx(g)
        g = dgl.add_self_loop(g)

        g = g.to('cuda')
        feat = feat.cuda()
        return g, feat

    def get_sinusoid_encoding_table(self, n_position, d_model):
        def cal_angle(position, hid_idx):  # hid_idx为维度索引
            return position / np.power(10000, 2 * (hid_idx // 2) / d_model)  # //为整数除法

        def get_posi_angle_vec(position):  # position为序列中的位置
            return [cal_angle(position, hid_j) for hid_j in range(d_model)]

        sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
        sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])  # 2i为双数索引位
        sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])  # 2i+1为单数索引位
        return torch.FloatTensor(sinusoid_table)

    def encode(self, batch_seqs, batch_seqs_length):
        mask_mtx = torch.zeros(batch_seqs.shape[0], batch_seqs.shape[1], 1)
        for idx, seqs_length in enumerate(batch_seqs_length):
            mask_mtx[idx, :seqs_length] = torch.FloatTensor([[1]] * seqs_length)
        mask_mtx = mask_mtx.cuda()

        position_embed = self.get_sinusoid_encoding_table(batch_seqs.shape[1], 256)
        position_embed = position_embed.view(1, position_embed.shape[0], position_embed.shape[1])
        batch_position_embed = torch.cat([position_embed for _ in range(batch_seqs.shape[0])])
        batch_position_embed = batch_position_embed.cuda()
        batch_seqs_embed = self.user_embed(batch_seqs)
        # seqs_embed = torch.cat((batch_seqs_embed, batch_position_embed), dim=2)
        seqs_embed = batch_position_embed + batch_seqs_embed
        out = self.transformer_encoder(seqs_embed)
        # TODO: Time-decay effect
        out = out * mask_mtx

        out = torch.sum(out, dim=1) / torch.sum(mask_mtx, dim=1)
        return out

    def query_embed(self, key):
        return self.user_embed(key)

    def loss(self, probs, true_nodes):
        return self.cross_entropy(probs, true_nodes)


def load_story_dict():
    story_count_dict = dict()
    train_dataset = DiffuseSequenceDataSet('../../data/digg/train.pkl')

    for story, seq, next_node in train_dataset:
        if not story_count_dict.get(story):
            story_count_dict[story] = 1
        else:
            story_count_dict[story] += 1

    story_dict = {story: 1 if count > 30 else 0 for story, count in story_count_dict.items()}
    return story_dict


def train_transformer():
    config = {
        'node_num': 12628,
        'embed_dim': 256,
        'state_dim': 256,
        'epoches': 100,
        'lambda': 0.1
    }

    train_dataset = DiffuseSequenceDataSet('../../data_forest/twitter/train.pkl')
    test_dataset = DiffuseSequenceDataSet('../../data_forest/twitter/test.pkl')

    dataloader = DataLoader(train_dataset, batch_size=128, shuffle=True, collate_fn=sequence_collate_fn)
    test_dataloader = DataLoader(test_dataset, batch_size=128, collate_fn=sequence_collate_fn)
    model = Transformer(config=config)
    model = model.cuda()

    domain_classifier = DomainClassifier(input_size=config['embed_dim'])
    domain_classifier = domain_classifier.cuda()

    optimizer = optim.Adam(params=[param for param in model.parameters()]
                                  # [param for param in domain_classifier.parameters()]
                           , lr=0.001)
    adversarial_optimizer = optim.Adam(params=domain_classifier.parameters(), lr=0.0003)
    story_dict = load_story_dict()

    adversarial = 2
    for epoch in range(config['epoches']):
        average_batch_loss = 0.0
        average_domain_loss = 0.0
        # ------------- Training -------------
        for (story, data, next_node, data_length) in tqdm(dataloader):
            data = data.cuda()
            next_node = next_node.cuda()
            optimizer.zero_grad()
            pred = model(data, data_length)
            loss = model.loss(pred, next_node)

            # -------------- Prediction Training ------------
            loss.backward()
            optimizer.step()
            average_batch_loss += loss.item()

            # -------------- Adversarial Training ------------
            # adversarial_optimizer.zero_grad()
            # # next_node_embed = model.query_embed(next_node.reshape(len(next_node)))
            # seq_encoding = model.encode(data, data_length)
            # pred_domain = domain_classifier(seq_encoding)
            #
            # labels = [story_dict[s] for s in story]
            # labels = torch.LongTensor(labels).cuda()
            # adversarial_loss = domain_classifier.loss(pred_domain, labels)
            #
            # if adversarial == 0:
            #     loss.backward()
            #     optimizer.step()
            #     average_batch_loss += loss.item()
            # elif adversarial == 1:
            #     adversarial_loss.backward()
            #     adversarial_optimizer.step()
            #     average_domain_loss += adversarial_loss.item()
            # else:
            #     loss = loss - config['lambda'] * adversarial_loss
            #     loss.backward()
            #     optimizer.step()
            #     average_domain_loss += adversarial_loss.item()
            #     average_batch_loss += loss.item()

        print('Epoches {}/{}, prediction loss:{} '.format(epoch, config['epoches'],
                                                              average_batch_loss / len(dataloader)))
        logging.info('Epoches {}/{}, prediction loss:{}'.format(epoch, config['epoches'],
                                                              average_batch_loss / len(dataloader)))
        adversarial = (adversarial + 1) % 5

        # --------------     Evaluation   --------------
        if (epoch + 1) % 5 == 0:
            run_evaluation(model, test_dataloader)
        # -------------- Finish Evaluation -------------

    torch.save(model,
               open(file='../../models/transformer_twitter_{}.pkl'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S')),
                    mode='wb'))


if __name__ == '__main__':
    train_transformer()
