# generating sentences from a continuous space https://arxiv.org/pdf/1511.06349.pdf
# RNN-based variational autoencoder
# lstm -> lstm => (linear, linear) => (mu, sigma) => p(z|X) -> decoding lstm -> decoding lstm -> decoding lstm
# z is latent variable of vae

import torch
import torch.nn as nn
# import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence  # , PackedSequence


UNK_CHAR = 3  # unknown character
USE_CUDA = True


class RNN(nn.Module):
    def __init__(self, vocab_size=10, emb_size=100,
                 hid_size=128, latent_size=10,
                 word_dropout=0, unk=UNK_CHAR,
                 feed_latent=False):
        super(RNN, self).__init__()

        self.vocab_size = vocab_size
        self.emb_size = emb_size
        self.hid_size = hid_size
        self.latent_size = latent_size
        self.word_dropout = word_dropout
        self.unk = unk
        self.feed_latent = feed_latent

        # self.emb = nn.Embedding(vocab_size, emb_size)
        self.encode_lstm = nn.LSTM(emb_size, hid_size, batch_first=True)
        self.latent = nn.Linear(hid_size, latent_size * 2)  # mu(mean: latent_size) and sigma(log_var: latent_size)
        self.init_hid = nn.Linear(latent_size, hid_size)
        self.init_c = nn.Linear(latent_size, hid_size)
        self.decoder_lstm = nn.LSTM(emb_size + (latent_size if feed_latent else 0), hid_size, batch_first=True)
        self.out = nn.Linear(hid_size, vocab_size)

    def forward(self, x, len_x):
        x_emb = x  # self.emb(x) do embedding in training code
        packed_x = pack_padded_sequence(x_emb, len_x, batch_first=True)
        _, (h, c) = self.encode_lstm(packed_x)
        latent = self.latent(h[-1])  # use last hidden state of last LSTM layer
        l_mean, l_log_var = latent[:, 0: self.latent_size], latent[:, self.latent_size:]
        l_std = torch.exp(l_log_var / 2.)
        epsilon = Variable(torch.rand(l_std.size()))
        latent = l_mean + l_std * (epsilon.cuda() if USE_CUDA else epsilon)  # z

        init_h = self.init_hid(latent)   # hidden state
        init_c = self.init_c(latent)     # cell state
        init_h = init_h.view(1, init_h.size(0), init_h.size(1))
        init_c = init_c.view(1, init_c.size(0), init_c.size(1))

        if self.word_dropout:
            x_noise = word_dropout_func(x, self.word_dropout, self.unk)
            x_noise = self.emb(x_noise)

            if self.feed_latent:
                latent_ = latent.view(latent.size(0), 1, latent.size(1)).repeat(1, x_emb.size(1), 1)
                x_emb_noise = torch.cat((x_noise, latent_), 2)
            else:
                x_emb_noise = x_noise
            packed_x_noise = pack_padded_sequence(x_emb_noise, len_x, batch_first=True)
        else:
            if self.feed_latent:
                latent_ = latent.view(latent.size(0), 1, latent.size(1)).repeat(1, x_emb.size(1), 1)
                x_emb_noise_ = torch.cat((x_emb, latent_), 2)
                packed_x_noise = pack_padded_sequence(x_emb_noise_, len_x, batch_first=True)
            else:
                packed_x_noise = packed_x

        packed_out, _ = self.decoder_lstm(packed_x_noise, (init_h, init_c))
        out, len_out = pad_packed_sequence(packed_out, batch_first=True)
        # print(out.size())
        out = out.contiguous()
        out = self.out(out.view(-1, out.size(2)))
        # print(out)
        return out, (l_mean, l_log_var, latent)

    # fixme add beam search
    def greedy_generate(self, latent, embed, length=10, deterministic=True):
        latent.register_hook(lambda g: print(torch.abs(g).mean().data[0]))
        init_h = self.init_hid(latent)
        init_c = self.init_c(latent)
        init_h = init_h.view(1, init_h.size(0), init_h.size(1))
        init_c = init_c.view(1, init_c.size(0), init_c.size(1))

        x = torch.ones(latent.size(0), 1).long()  # begin character = 1
        x = Variable(x)  # .cuda()

        out = torch.zeros(latent.size(0), length).long()
        h, c = init_h, init_c

        latent_tmp = latent.view(latent.size(0), 1, latent.size(1))

        for i in range(length):
            x_emb = embed(x)
            if USE_CUDA:
                x_emb = x_emb.cuda()
            if self.feed_latent:
                x_emb = torch.cat((x_emb, latent_tmp), 2)
            o, (h, c) = self.decoder_lstm(x_emb, (h, c))
            # h, c= h.detach(), c.detach()

            o = self.out(o.view(o.size(0), -1))

            if deterministic:
                _, o = o.max(1)
            else:
                o = torch.multinomial(nn.Softmax()(o))

            x.data.copy_(o.data)
            o = o.data.cpu().long()
            out[:, i].copy_(o.view(-1))

        return out

    def beam_search(self, latent, embed, length=10, deterministic=True):
        latent.register_hook(lambda g: print(torch.abs(g).mean().data[0]))
        init_h = self.init_hid(latent)
        init_c = self.init_c(latent)
        init_h = init_h.view(1, init_h.size(0), init_h.size(1))
        init_c = init_c.view(1, init_c.size(0), init_c.size(1))

        x = torch.ones(latent.size(0), 1).long()  # begin character = 1
        x = Variable(x).cuda()
        out = torch.zeros(latent.size(0), length).long()
        h, c = init_h, init_c

        latent_tmp = latent.view(latent.size(0), 1, latent.size(1))

        for i in range(length):
            x_emb = embed(x)
            if self.feed_latent:
                x_emb = torch.cat((x_emb, latent_tmp), 2)
            o, (h, c) = self.decoder_lstm(x_emb, (h, c))
            # h, c= h.detach(), c.detach()

            o = self.out(o.view(o.size(0), -1))

            if deterministic:
                _, o = o.max(1)
            else:
                o = torch.multinomial(nn.Softmax()(o))

            x.data.copy_(o.data)
            o = o.data.cpu().long()
            out[:, i].copy_(o.view(-1))

        return out


def word_dropout_func(x, dropout, unk_char):
    mask = (torch.rand(x.size()) <= dropout).long()

    mask = Variable(mask)  #  .cuda()
    if USE_CUDA:
        mask = mask.cuda()

    x = x * (1 - mask) + mask * unk_char
    return x
