"""
@Filename       : gnn_lstm.py
@Create Time    : 2020/11/19 22:38
@Author         : Rylynn
@Description    : 

"""
import datetime

import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
import torch.optim as optim
import dgl
from dgl.nn.pytorch import GATConv
from torch.utils.data import DataLoader
from tqdm import tqdm

from evaluate.metric import run_evaluation
from util.dataloader import DiffuseSequenceDataSet, sequence_collate_fn
from util.preprocess import load_graph


class GNNLSTM(nn.Module):
    def __init__(self, config):
        super(GNNLSTM, self).__init__()
        self.config = config
        self.g, self.feat = self.load_graph()
        activation = nn.LeakyReLU()
        self.conv1 = GATConv(in_feats=config['embed_dim'],
                             out_feats=256,
                             num_heads=8,
                             activation=activation)
        self.conv2 = GATConv(in_feats=8 * 256,
                             out_feats=config['embed_dim'],
                             num_heads=1,
                             activation=activation)

        self.lstm = nn.LSTM(config['embed_dim'], config['state_dim'], batch_first=True)
        self.linear = nn.Linear(config['embed_dim'], config['node_num'])

        self.tanh = nn.Tanh()
        self.att_weigth = nn.Linear(config['embed_dim'], config['state_dim'])
        self.att_dot = nn.Parameter(torch.FloatTensor(config['state_dim']), requires_grad=True)

        self.softmax = nn.Softmax(dim=1)
        self.cross_entropy = nn.CrossEntropyLoss()

    def load_graph(self):
        g, node2id = load_graph('../../data/twitter')
        g.add_node(0)
        feat_tensor = torch.randn(self.config['node_num'] + 1, self.config['embed_dim'])
        feat = torch.autograd.Variable(feat_tensor, requires_grad=True)
        g = dgl.from_networkx(g)
        g = dgl.add_self_loop(g)
        g = g.to('cuda')
        feat = feat.cuda()
        return g, feat

    def forward(self, batch_seqs, batch_seqs_length):
        out = self.encode(batch_seqs, batch_seqs_length)
        out = self.linear(out)
        prob = self.softmax(out)
        return out

    def encode(self, batch_seqs, batch_seqs_length):
        out = self.conv1(self.g, self.feat)
        out = out.view(out.shape[0], out.shape[1] * out.shape[2])
        out = self.conv2(self.g, out)
        out = out.view(out.shape[0], out.shape[1] * out.shape[2])
        batch_seqs_embed = out[batch_seqs]
        batch_seqs_pack = rnn_utils.pack_padded_sequence(batch_seqs_embed, batch_seqs_length, batch_first=True)
        out, _ = self.lstm(batch_seqs_pack)
        out, seq_length = rnn_utils.pad_packed_sequence(out, batch_first=True)

        att_score = torch.matmul(self.tanh(self.att_weigth(out)), self.att_dot)
        att_score = self.softmax(att_score)
        att_score = att_score.reshape(att_score.shape[0], 1, att_score.shape[1])
        out = torch.bmm(att_score, out)
        out = out.reshape(out.shape[0], out.shape[2])

        return out

    def loss(self, probs, true_nodes):
        return self.cross_entropy(probs, true_nodes)


def train_gnn_lstm(dataset):
    config = {
        'node_num': 6177,
        'embed_dim': 64,
        'state_dim': 64,
        'epoches': 100,
        'lr': 0.001
    }

    train_dataset = DiffuseSequenceDataSet('../../data/{}/train.pkl'.format(dataset))
    test_dataset = DiffuseSequenceDataSet('../../data/{}/test.pkl'.format(dataset))

    dataloader = DataLoader(train_dataset, batch_size=128, shuffle=True, collate_fn=sequence_collate_fn)
    test_dataloader = DataLoader(test_dataset, batch_size=64, collate_fn=sequence_collate_fn)
    gnn_lstm = GNNLSTM(config=config)
    # gnn_lstm = torch.load('../../models/gnn_lstm_20201120022446.pkl')
    # gnn_lstm = nn.DataParallel(gnn_lstm)
    gnn_lstm = gnn_lstm.cuda()

    optimizer = optim.Adam(params=gnn_lstm.parameters(), lr=config['lr'])

    for epoch in range(config['epoches']):
        gnn_lstm.train()
        average_batch_loss = 0.0
        # ------------- Training -------------
        k = 0
        for (story, data, next_node, data_length) in tqdm(dataloader):
            data = data.cuda()
            next_node = next_node.cuda()
            optimizer.zero_grad()
            pred = gnn_lstm(data, data_length)
            loss = gnn_lstm.loss(pred, next_node)
            l2_loss = 0
            for param in gnn_lstm.parameters():
                l2_loss += torch.sum(torch.norm(param, 2))

            loss = loss + l2_loss * 0.005
            loss.backward()
            optimizer.step()
            average_batch_loss += loss.item()
        
        print('Epoches {}/{}, prediction loss:{} '.format(epoch, config['epoches'],
                                                                  average_batch_loss/len(dataloader)))
        # --------------     Evaluation   --------------
        if (epoch + 1) % 5 == 0:
            run_evaluation(gnn_lstm, test_dataloader)

        # -------------- Finish Evaluation -------------

    torch.save(gnn_lstm,
               open(file='../../models/gnn_lstm_{}.pkl'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S')),
                    mode='wb'))


if __name__ == '__main__':
    train_gnn_lstm('twitter')
