"""
@Filename       : deep_diffuse.py
@Create Time    : 2020/11/2 16:10
@Author         : Rylynn
@Description    : 

"""
import datetime

import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm

from evaluate.metric import hits_k, map_k, run_evaluation
from util.dataloader import DiffuseSequenceDataSet, sequence_collate_fn


class DeepDiffuse(nn.Module):
    def __init__(self, config):
        super(DeepDiffuse, self).__init__()
        self.user_embed = nn.Embedding(config['node_num'] + 1, config['embed_dim'])
        self.lstm = nn.LSTM(config['embed_dim'], config['state_dim'])
        self.linear = nn.Linear(config['embed_dim'], config['node_num'])

        self.tanh = nn.Tanh()
        self.att_weigth = nn.Linear(config['embed_dim'], config['state_dim'])
        self.att_dot = nn.Parameter(torch.FloatTensor(config['state_dim']), requires_grad=True)

        self.softmax = nn.Softmax(dim=1)
        self.cross_entropy = nn.CrossEntropyLoss()

    def forward(self, batch_seqs, batch_seqs_length):
        out = self.encode(batch_seqs, batch_seqs_length)
        out = self.linear(out)
        return out

    def encode(self, batch_seqs, batch_seqs_length):
        batch_seqs_embed = self.user_embed(batch_seqs)
        batch_seqs_pack = rnn_utils.pack_padded_sequence(batch_seqs_embed, batch_seqs_length, batch_first=True)
        out, _ = self.lstm(batch_seqs_pack)
        out, seq_length = rnn_utils.pad_packed_sequence(out, batch_first=True)
        # Attention
        att_score = torch.matmul(self.tanh(self.att_weigth(out)), self.att_dot)
        att_score = self.softmax(att_score)
        att_score = att_score.reshape(att_score.shape[0], 1, att_score.shape[1])
        out = torch.bmm(att_score, out)
        out = out.reshape(out.shape[0], out.shape[2])
        return out

    def loss(self, probs, true_nodes):
        return self.cross_entropy(probs, true_nodes)


def clip_gradient(optimizer, grad_clip):
    """
    Clips gradients computed during backpropagation to avoid explosion of gradients.

    :param optimizer: optimizer with the gradients to be clipped
    :param grad_clip: clip value
    """
    for group in optimizer.param_groups:
        for param in group["params"]:
            if param.grad is not None:
                param.grad.data.clamp_(-grad_clip, grad_clip)


def train_deep_diffuse(dataset, adversarial):
    config = {
        'node_num': 4400,
        'embed_dim': 64,
        'state_dim': 64,
        'epoches': 25,
        # 'lambda': 0.3
    }

    train_dataset = DiffuseSequenceDataSet('../../data/{}/train.pkl'.format(dataset))
    test_dataset = DiffuseSequenceDataSet('../../data/{}/test.pkl'.format(dataset))

    dataloader = DataLoader(train_dataset, batch_size=128, shuffle=True, collate_fn=sequence_collate_fn)
    test_dataloader = DataLoader(test_dataset, batch_size=128, collate_fn=sequence_collate_fn)
    model = DeepDiffuse(config=config)
    model = model.cuda()

    optimizer = optim.Adam(params=[param for param in model.parameters()], lr=0.001)

    for epoch in range(config['epoches']):
        model.train()
        average_batch_loss = 0.0
        average_domain_loss = 0.0
        # ------------- Training -------------
        for (story, data, next_node, data_length) in tqdm(dataloader):
            data = data.cuda()
            next_node = next_node.cuda()
            optimizer.zero_grad()
            pred = model(data, data_length)
            loss = model.loss(pred, next_node)
            l2_loss = 0
            for param in model.parameters():
                l2_loss += torch.sum(torch.norm(param, 2))

            # clip_gradient(optimizer, 10)

            loss = 0.0005 * l2_loss + loss
            loss.backward()
            optimizer.step()
            average_batch_loss += loss.item()

            # ------------ Adversarial Training -----------
            # data = data.cuda()
            # next_node = next_node.cuda()
            # optimizer.zero_grad()
            # adversarial_optimizer.zero_grad()
            #
            # pred = model(data, data_length)
            # loss = model.loss(pred, next_node)
            # l2_loss = 0
            # for param in model.parameters():
            #     l2_loss += torch.sum(torch.norm(param, 2))
            #
            # loss = 0.005 * l2_loss + loss
            #
            # seq_encoding = model.encode(data, data_length)
            # pred_domain = domain_classifier(seq_encoding)
            #
            # labels = [story_dict[s] for s in story]
            # labels = torch.LongTensor(labels).cuda()
            # adversarial_loss = domain_classifier.loss(pred_domain, labels)
            #
            # if adversarial == 0:
            #     average_domain_loss += adversarial_loss.item()
            #     adversarial_loss.backward()
            #     adversarial_optimizer.step()
            # else:
            #     average_domain_loss += adversarial_loss.item()
            #     average_batch_loss += loss.item()
            #     loss = loss - config['lambda'] * adversarial_loss
            #     loss.backward()
            #     optimizer.step()

        # adversarial = (adversarial + 1) % 3
        print('Epoches {}/{}, prediction loss:{} '.format(epoch, config['epoches'], average_batch_loss / len(dataloader)))
        print('Epoches {}/{}, adversarial loss:{} '.format(epoch, config['epoches'], average_domain_loss / len(dataloader)))

        # ------------- Evaluation -------------
        if (epoch + 1) % 3 == 0:
            run_evaluation(model, test_dataloader)
        # -------------- Finish Evaluation -------------

    if not adversarial:
        torch.save(model, open(
            file='../../deep_diffuse/lstm_{}_{}.pkl'.format(dataset, datetime.datetime.now().strftime('%Y%m%d%H%M%S')),
            mode='wb'))
    else:
        torch.save(model, open(
            file='../../models/adversarial/deep_diffuse_transfer_{}_{}.pkl'.format(dataset, datetime.datetime.now().strftime(
                '%Y%m%d%H%M%S')),
            mode='wb'))


if __name__ == '__main__':
    train_deep_diffuse('memes', adversarial=True)