import torch.nn as nn
import torch
import unittest
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence  # , PackedSequence
from language_model.encoder import Encoder, NoisyEncoder


class TrainDecoder(nn.Module):
    def __init__(self, input_dim, output_dim, agenda_dim, hid_dim, num_layers):
        super(TrainDecoder, self).__init__()
        self.hid_dim = hid_dim
        # the input of lstm is packed sequence of which the batch dim is the first dim
        self.lstm = nn.LSTM(input_dim + agenda_dim, self.hid_dim, batch_first=True, num_layers=num_layers)

        # TODO using adaptive softmax to replace the original one
        self.linear = nn.Linear(hid_dim, output_dim)
        self.softmax = nn.Softmax()

    def forward(self, pack_pad_x, agenda):
        pad_x, len_x = pad_packed_sequence(pack_pad_x, batch_first=True)
        length = len_x[0]
        comb = torch.cat((pad_x, agenda.view(agenda.size(0), 1, agenda.size(1)).repeat(1, length, 1)), 2)
        pack_pad_x = pack_padded_sequence(comb, len_x, batch_first=True)
        x, (hid_state, cell_state) = self.lstm(pack_pad_x)
        x, len_x = pad_packed_sequence(x, batch_first=True)

        # index = torch.LongTensor(len_x) - 1
        # batch_index = torch.LongTensor(range(0, len(len_x)))

        # index = index.cuda()
        # batch_index = batch_index.cuda()
        # print(index)
        # print(x.size())
        # x = x[batch_index, index, :]  # slice last element
        x = x.contiguous()
        x = x.view(-1, x.size(2))

        x = self.linear(x)
        # x = self.softmax(x)

        return x, hid_state, cell_state


class DecoderTest(unittest.TestCase):
    def __init__(self,  *args, **kwargs):
        super(DecoderTest, self).__init__( *args, **kwargs)

        self.enc = Encoder(16, 4, 2, True)
        self.noisy_enc = NoisyEncoder(self.enc, 10, 1.0, 1.0)

        self.decoder = TrainDecoder(16, 4, 4, 2)

        emb = nn.Embedding(20, 16)
        lens = [3, 3, 3, 3, 2, 1]
        docs = [
            [1, 2, 3],
            [4, 5, 6],
            [7, 8, 9],
            [10, 1, 2],
            [2, 3, 1],
            [2, 3, 1]
        ]

        data = [emb(Variable(torch.LongTensor(d)).unsqueeze(0)) for d in docs]
        print(data)
        data = torch.cat(data, 0)
        print(data)
        self.pack_pad_data = pack_padded_sequence(data, lens, batch_first=True)

    def test_decoder(self):
        print(pad_packed_sequence(self.pack_pad_data, batch_first=True))


if __name__ == '__main__':
    unittest.main()
