import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence  # , PackedSequence,
from language_model.encoder import Encoder, NoisyEncoder
from language_model.decoder import TrainDecoder
import numpy as np
import unittest
import math


class LanguageModelOutput:
    __slots__ = ['output', 'hid_state', 'cell_state', 'agenda']

    def __init__(self, output, hid_state, cell_state, agenda):
        self.output = output
        self.hid_state = hid_state
        self.cell_state = cell_state
        self.agenda = agenda


class LanguageModel(nn.Module):
    def __init__(self, emb_dim, output_dim, hid_dim, agenda_dim, num_layers, kl_weight_steps, kl_weight_rate, kl_weight_cap,
                 dci_keep_rate):
        super(LanguageModel, self).__init__()
        self.noise_mu = 0
        self.noise_sigma = 1
        self.kl_weight_steps = kl_weight_steps
        self.kl_weight_rate = kl_weight_rate
        self.kl_weight_cap = kl_weight_cap

        encoder = Encoder(emb_dim, hid_dim, num_layers=num_layers, bidirectional=True)
        self.noisy_encoder = NoisyEncoder(encoder, self.noise_mu, self.noise_sigma)
        self.dci_keep_rate = dci_keep_rate
        self.agenda_dim = agenda_dim
        # self.agenda_dim = hid_dim * num_layers
        self.linear = nn.Linear(self.agenda_dim, hid_dim * num_layers)
        self.decoder = TrainDecoder(emb_dim, output_dim, hid_dim * num_layers, hid_dim, num_layers)

    def kl_penalty(self, agenda):
        batch_size, agenda_dim = agenda.size()
        return 0.5 * torch.sum(torch.pow(agenda, 2)) * self.noise_sigma / batch_size / 100

    def kl_weight(self, cur_step):
        x = cur_step / float(self.kl_weight_steps)
        return self.kl_weight_cap * self.sigmoid(x, self.kl_weight_rate)

    def forward(self, pack_pad_x):
        agenda, noisy_agenda = self.noisy_encoder(pack_pad_x)

        # batch, agenda_dim = noisy_agenda.size()
        output, hid_state, cell_state = self.decoder(pack_pad_x, noisy_agenda)

        return LanguageModelOutput(output, hid_state, cell_state, noisy_agenda)
        # return self.decoder(pack_pad_x, agenda), agenda

    def loss(self, output, target, agenda, train_step):
        # kl = self.kl_weight(train_step) * self.kl_penalty(agenda)
        kl = self.kl_weight(train_step) * self.kl_penalty(agenda)
        rec = F.cross_entropy(output, target)
        # print('rec: ', rec)
        # print('kl: ', kl)
        return rec + kl

    @staticmethod
    def sigmoid(s, k):
        return 1 / (1 + math.exp(-k * (2 * s - 1)))

    def perplexity(self):
        pass


class LanguageModelTest(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(LanguageModelTest, self).__init__(*args, **kwargs)
        self.lm = LanguageModel(16, 30, 4, 2, 2, 100, 1, 1, 0.8)
        print(self.lm)

        emb = nn.Embedding(20, 16)
        lens = [3, 3, 3, 3, 2, 1]
        docs = [
            [1, 2, 3],
            [4, 5, 6],
            [7, 8, 9],
            [10, 1, 2],
            [2, 3, 1],
            [2, 3, 1]
        ]

        data = [emb(Variable(torch.LongTensor(d)).unsqueeze(0)) for d in docs]
        # print(data)
        data = torch.cat(data, 0)
        # print(data)
        self.pack_pad_data = pack_padded_sequence(data, lens, batch_first=True)

    def test_lm(self):
        self.lm(self.pack_pad_data)


if __name__ == '__main__':
    unittest.main()
