import torch.nn as nn
import torch
# import unittest
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence  # , PackedSequence
import math

# from language_model.encoder import Encoder, NoisyEncoder


class Encoder(nn.Module):
    # todo add agenda_dim if possible
    def __init__(self, input_dim, hid_dim, num_layers, bidirectional=True):
        super(Encoder, self).__init__()
        self.hid_dim = hid_dim if not bidirectional else hid_dim // 2

        if bidirectional and hid_dim % 2 != 0:
            raise (ValueError('hid_dim must be even for bidirectional LSTM'))

        # the input of lstm is packed sequence of which the batch dim is the first dim
        self.lstm = nn.LSTM(input_dim, self.hid_dim, num_layers=num_layers, bidirectional=bidirectional,
                            batch_first=True)

    def forward(self, pack_pad_x):
        # output, (hidden state, cell state)
        return self.lstm(pack_pad_x)

    @staticmethod
    def make_agenda(x):
        agenda = torch.cat(x, 1)
        return agenda


class NoisyEncoder(nn.Module):
    def __init__(self, encoder: Encoder, noise_mu=0, noise_sigma=1):
        super(NoisyEncoder, self).__init__()
        self.encoder = encoder
        self.noise_mu = noise_mu
        self.noise_sigma = noise_sigma

    def forward(self, pack_pad_x):
        """
        :param pack_pad_x: pack padded sequence
        :return: agenda, agenda + noise
        """
        _, (hid_state, cell_state) = self.encoder(pack_pad_x)
        agenda = self.encoder.make_agenda(hid_state)
        means = self.noise_mu * torch.ones(agenda.size())
        std = self.noise_sigma * torch.ones(agenda.size())

        noise = Variable(torch.normal(means=means, std=std)).cuda()
        return agenda, agenda + noise


class TrainDecoder(nn.Module):
    def __init__(self, input_dim, output_dim, agenda_dim, hid_dim, num_layers):
        super(TrainDecoder, self).__init__()
        self.hid_dim = hid_dim
        # the input of lstm is packed sequence of which the batch dim is the first dim
        self.lstm = nn.LSTM(input_dim + agenda_dim, self.hid_dim, batch_first=True, num_layers=num_layers)

        # TODO using adaptive softmax to replace the original one
        self.linear = nn.Linear(hid_dim, output_dim)
        self.softmax = nn.Softmax()

    def forward(self, pack_pad_x, agenda):
        pad_x, len_x = pad_packed_sequence(pack_pad_x, batch_first=True)
        length = len_x[0]
        comb = torch.cat((pad_x, agenda.view(agenda.size(0), 1, agenda.size(1)).repeat(1, length, 1)), 2)
        pack_pad_x = pack_padded_sequence(comb, len_x, batch_first=True)
        x, (hid_state, cell_state) = self.lstm(pack_pad_x)
        x, len_x = pad_packed_sequence(x, batch_first=True)

        # index = torch.LongTensor(len_x) - 1
        # batch_index = torch.LongTensor(range(0, len(len_x)))

        # index = index.cuda()
        # batch_index = batch_index.cuda()
        # print(index)
        # print(x.size())
        # x = x[batch_index, index, :]  # slice last element
        x = x.contiguous()
        x = x.view(-1, x.size(2))

        x = self.linear(x)
        # x = self.softmax(x)

        return x, hid_state, cell_state


class LanguageModelOutput:
    __slots__ = ['output', 'hid_state', 'cell_state', 'agenda']

    def __init__(self, output, hid_state, cell_state, agenda):
        self.output = output
        self.hid_state = hid_state
        self.cell_state = cell_state
        self.agenda = agenda


class LanguageModel(nn.Module):
    def __init__(self, emb_dim, output_dim, hid_dim, agenda_dim, num_layers, kl_weight_steps, kl_weight_rate, kl_weight_cap,
                 dci_keep_rate):
        super(LanguageModel, self).__init__()
        self.noise_mu = 0
        self.noise_sigma = 1
        self.kl_weight_steps = kl_weight_steps
        self.kl_weight_rate = kl_weight_rate
        self.kl_weight_cap = kl_weight_cap

        encoder = Encoder(emb_dim, hid_dim, num_layers=num_layers, bidirectional=True)
        self.noisy_encoder = NoisyEncoder(encoder, self.noise_mu, self.noise_sigma)
        self.dci_keep_rate = dci_keep_rate
        self.agenda_dim = agenda_dim
        # self.agenda_dim = hid_dim * num_layers
        self.linear = nn.Linear(self.agenda_dim, hid_dim * num_layers)
        self.decoder = TrainDecoder(emb_dim, output_dim, hid_dim * num_layers, hid_dim, num_layers)

    def kl_penalty(self, agenda):
        batch_size, agenda_dim = agenda.size()
        return 0.5 * torch.sum(torch.pow(agenda, 2)) * self.noise_sigma / batch_size / 100

    def kl_weight(self, cur_step):
        x = cur_step / float(self.kl_weight_steps)
        return self.kl_weight_cap * self.sigmoid(x, self.kl_weight_rate)

    def forward(self, pack_pad_x):
        # print(pack_pad_x)
        agenda, noisy_agenda = self.noisy_encoder(pack_pad_x)
        # batch, agenda_dim = noisy_agenda.size()
        output, hid_state, cell_state = self.decoder(pack_pad_x, noisy_agenda)
        # print(output)

        return LanguageModelOutput(output, hid_state, cell_state, noisy_agenda)
        # return self.decoder(pack_pad_x, agenda), agenda

    def loss(self, output, target, agenda, train_step):
        # kl = self.kl_weight(train_step) * self.kl_penalty(agenda)
        kl = self.kl_weight(train_step) * self.kl_penalty(agenda)
        rec = F.cross_entropy(output, target)
        # print('rec: ', rec)
        # print('kl: ', kl)
        return rec + kl

    @staticmethod
    def sigmoid(s, k):
        return 1 / (1 + math.exp(-k * (2 * s - 1)))

    def perplexity(self):
        pass
