"""
@Filename       : hidan.py
@Create Time    : 2020/12/2 22:28
@Author         : Rylynn
@Description    : 

"""
import datetime

import torch
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import pickle as pkl

from torch.utils.data import DataLoader
from tqdm import tqdm

from evaluate.metric import run_evaluation
from model.het_diffuse.het_graph_builder import load_vocab_dict
from util.dataloader import DiffuseSequenceDataSet, sequence_collate_fn


class HiDAN(nn.Module):
    def __init__(self, config):
        super(HiDAN, self).__init__()
        self.embed_dim = config['embed_dim']
        self.node_num = config['node_num']
        self.hidden_dim = config['hidden_dim']

        self.user_embed = nn.Embedding(self.node_num + 1, self.embed_dim)
        self.user_embed_transform_layer = nn.Linear(self.embed_dim, self.hidden_dim)
        init.xavier_normal_(self.user_embed_transform_layer.weight)

        self.context_w = nn.Parameter(torch.randn(self.hidden_dim, self.hidden_dim), requires_grad=True)
        self.target_w = nn.Parameter(torch.randn(self.hidden_dim, self.hidden_dim), requires_grad=True)
        init.xavier_normal_(self.context_w)
        init.xavier_normal_(self.target_w)

        self.cascade_w = nn.Parameter(torch.randn(self.hidden_dim, self.hidden_dim), requires_grad=True)
        self.user_w = nn.Parameter(torch.randn(self.hidden_dim, self.hidden_dim), requires_grad=True)
        init.xavier_normal_(self.cascade_w)
        init.xavier_normal_(self.user_w)

        self.sigmoid = nn.Sigmoid()
        self.output_linear = nn.Linear(config['hidden_dim'], config['node_num'] + 1)
        init.xavier_normal_(self.output_linear.weight)

        self.softmax = nn.Softmax(dim=-1)
        self.tanh = nn.Tanh()
        self.elu = nn.ELU()
        self.dropout = nn.Dropout(p=1 - config['dropout'])

        self.cross_entropy = nn.CrossEntropyLoss()

    def cascade_attention(self, user_embed, cas_mask):
        bs, sl = user_embed.shape[0], user_embed.shape[1]
        col, row = torch.meshgrid(torch.arange(sl).cuda(), torch.arange(sl).cuda())
        direction_mask = torch.gt(row, col)
        direction_mask_tile = torch.repeat_interleave(direction_mask.view(1, sl, sl), bs, 0)  # (b, l, l)
        length_mask_tile = torch.repeat_interleave(cas_mask.bool().view(bs, 1, sl), sl, 1)   # (b, l) -> (b, 1, l) -> (b, l, l)
        attention_mask = torch.logical_and(direction_mask_tile, length_mask_tile).float()

        context_att = torch.matmul(user_embed, self.context_w)  # (b, l, n)
        target_att = torch.matmul(user_embed, self.target_w)  # (b, l, n)

        context_att = self.dropout(context_att)
        target_att = self.dropout(target_att)

        target_att_t = target_att.permute(0, 2, 1)   # (b, n, l)
        maching_logits = torch.matmul(context_att, target_att_t) + (1 - attention_mask) * (-1e30)  # (b, l, l)
        attention_score = self.softmax(maching_logits) * attention_mask

        d = torch.matmul(attention_score, user_embed)  # (b, l, n)
        return d

    def attention_fusion(self, user_cascade_embed, user_embed, cas_mask):
        gate = self.dropout(self.sigmoid(torch.matmul(user_cascade_embed, self.cascade_w) + torch.matmul(user_embed, self.user_w)))
        user_fusion_embed = gate * user_embed + (1 - gate) * user_cascade_embed
        return user_fusion_embed * cas_mask

    def encode(self, batch_seqs, batch_seqs_length):
        mask_mtx = torch.zeros(batch_seqs.shape[0], batch_seqs.shape[1], 1)
        for idx, seqs_length in enumerate(batch_seqs_length):
            mask_mtx[idx, :seqs_length] = torch.FloatTensor([[1]] * seqs_length)
        mask_mtx = mask_mtx.cuda()

        user_embed = self.user_embed(batch_seqs)  # (b, l, n)
        user_embed = self.dropout(self.elu(self.user_embed_transform_layer(user_embed))) * mask_mtx
        user_cascade_embed = self.cascade_attention(user_embed, mask_mtx)   # (b, l, n)
        user_fusion_embed = self.attention_fusion(user_cascade_embed, user_embed, mask_mtx)   # (b, l, n)
        user_fusion_embed = user_fusion_embed * mask_mtx

        # print(torch.sum(mask_mtx, dim=1))
        out = torch.sum(user_fusion_embed, dim=1) / torch.sum(mask_mtx, dim=1)

        return out

    # def time_user_fusion_embed(self, time_embed, user_fusion_embed):
    #     prob = self.sigmoid(self.time_embed_transform_layer(time_embed)) # (b, l, n)
    #
    #     time_user_att = self.tanh(self.time_user_attention(user_fusion_embed)) # (b, l, n)
    #     time_user_gate = torch.mul(prob, time_user_att)   # (b, l, n)
    #     beta = self.softmax(torch.sum(torch.matmul(time_user_gate, self.time_w)))
    #     encoding = torch.sum(torch.mul(beta, user_fusion_embed))
    #     return encoding

    def forward(self, batch_seqs, batch_seqs_length):
        encoding = self.encode(batch_seqs, batch_seqs_length)
        pred = self.output_linear(encoding)
        return pred

    def loss(self, preds, labels):
        return self.cross_entropy(preds, labels)


def train_hidan(dataset):
    config = {
        'node_num': 12627,
        # 'hidan_twitter': 14000
        # 'twitter': 4900
        # 'hidan_memes': 1110
        # 'memes': 4400,
        # 'node_num': 13755,
        'dataset': 'twitter',
        'embed_dim': 64,
        'hidden_dim': 64,
        'state_dim': 64,
        'epoches': 60,
        'lr_alpha': 0.001,
        'lr_beta': 0.01,
        'dropout': 1,
        'train_batch_size': 16,
        'test_batch_size': 16
    }

    dataset = config['dataset']
    vocab_dict, _, _ = load_vocab_dict('../../data', dataset)
    train_dataset = DiffuseSequenceDataSet('../../data/{}/cascade.txt'.format(dataset), vocab_dict)
    test_dataset = DiffuseSequenceDataSet('../../data/{}/cascadetest.txt'.format(dataset), vocab_dict)

    dataloader = DataLoader(train_dataset, batch_size=config['train_batch_size'], shuffle=True,
                            collate_fn=sequence_collate_fn)
    test_dataloader = DataLoader(test_dataset, batch_size=config['test_batch_size'], collate_fn=sequence_collate_fn)
    hidan = HiDAN(config=config)
    hidan = hidan.cuda()

    optimizer = optim.Adam([param for param in hidan.parameters()], lr=config['lr_alpha'])

    for epoch in range(config['epoches']):
        hidan.train()
        average_batch_loss = 0.0
        average_domain_loss = 0.0
        # ------------- Training -------------
        for (data, next_node, data_length) in tqdm(dataloader):
            data = data.cuda()
            next_node = next_node.cuda()
            optimizer.zero_grad()
            pred = hidan(data, data_length)
            loss = hidan.loss(pred, next_node)
            l2_loss = 0
            for param in hidan.parameters():
                l2_loss += torch.sum(torch.norm(param, 2))

            loss += 5e-5 * l2_loss

            # -------------- Prediction Training ------------
            loss.backward()
            optimizer.step()
            average_batch_loss += loss.item()

        else:
            print('Epoches {}/{}, domain loss:{} '.format(epoch, config['epoches'],
                                                          average_domain_loss / len(dataloader)))
            print('Epoches {}/{}, prediction loss:{} '.format(epoch, config['epoches'],
                                                              average_batch_loss / len(dataloader)))

        # --------------     Evaluation   --------------
        if (epoch + 1) % 3 == 0:
            run_evaluation(hidan, test_dataloader)


if __name__ == '__main__':
    train_hidan('twitter')