import torch.nn as nn
import torch
import numpy as np
import unittest
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence  # , PackedSequence


# todo use namedtuple to define the type of RNN output
# class RNNOutput:
#     pass


class Encoder(nn.Module):
    # todo add agenda_dim if possible
    def __init__(self, input_dim, hid_dim, num_layers, bidirectional=True):
        super(Encoder, self).__init__()
        self.hid_dim = hid_dim if not bidirectional else hid_dim // 2

        if bidirectional and hid_dim % 2 != 0:
            raise (ValueError('hid_dim must be even for bidirectional LSTM'))

        # the input of lstm is packed sequence of which the batch dim is the first dim
        self.lstm = nn.LSTM(input_dim, self.hid_dim, num_layers=num_layers, bidirectional=bidirectional,
                            batch_first=True)

    def forward(self, pack_pad_x):
        # output, (hidden state, cell state)
        return self.lstm(pack_pad_x)

    @staticmethod
    def make_agenda(x):
        agenda = torch.cat(x, 1)
        return agenda


class NoisyEncoder(nn.Module):
    def __init__(self, encoder: Encoder, noise_mu=0, noise_sigma=1):
        super(NoisyEncoder, self).__init__()
        self.encoder = encoder
        self.noise_mu = noise_mu
        self.noise_sigma = noise_sigma

    def forward(self, pack_pad_x):
        """
        :param pack_pad_x: pack padded sequence
        :return: agenda, agenda + noise
        """
        _, (hid_state, cell_state) = self.encoder(pack_pad_x)
        agenda = self.encoder.make_agenda(hid_state)
        means = self.noise_mu * torch.ones(agenda.size())
        std = self.noise_sigma * torch.ones(agenda.size())

        noise = Variable(torch.normal(means=means, std=std)).cuda()
        return agenda, agenda + noise


class EncoderTest(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(EncoderTest, self).__init__(*args, **kwargs)
        self.enc = Encoder(16, 4, 2, True)
        self.noisy_enc = NoisyEncoder(self.enc, 10, 1.0)
        emb = nn.Embedding(20, 16)
        lens = [3, 3, 3, 3, 2, 1]
        docs = [
            [1, 2, 3],
            [4, 5, 6],
            [7, 8, 9],
            [10, 1, 2],
            [2, 3, 1],
            [2, 3, 1]
        ]

        data = [emb(Variable(torch.LongTensor(d)).unsqueeze(0)) for d in docs]
        print(data)
        data = torch.cat(data, 0)
        print(data)
        self.pack_pad_data = pack_padded_sequence(data, lens, batch_first=True)
        # print(torch.sum(self.pack_pad_data, 1))  # wrong

    def test_encoder(self):
        out, (hs, cs) = self.enc(self.pack_pad_data)
        print('encoder out: \n')
        print(out)
        print(self.enc.make_agenda(hs))

    def test_noisy_encoder(self):
        agenda, noisy_agenda = self.noisy_enc(self.pack_pad_data)
        pad_x, len_x = pad_packed_sequence(self.pack_pad_data, batch_first=True)
        print('pad_x', pad_x.size())
        print('sum', torch.sum(pad_x, 1).size())

        print(agenda)
        print(noisy_agenda)
        print(pad_x)

        length = max(len_x)
        com = torch.cat((pad_x, agenda.view(agenda.size(0), 1, agenda.size(1)).repeat(1, length, 1)), 2)
        print(agenda.view(agenda.size(0), 1, agenda.size(1)).repeat(1, length, 1))
        # print(com)


if __name__ == '__main__':
    unittest.main()
