from torch.autograd import Variable
import torch.optim as optim
from model import RNN
import torch.nn.functional as F
import pickle
import torch
# from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence  # , PackedSequence
import numpy as np
import torch.nn as nn


SENT_LEN = 10
HIDDEN_SIZE = 200
BATCH_SIZE = 64
EMB_SIZE = 350
LR = 1e-3
EPOCHS = 100
LATENT_SIZE = 200
WORD_DROPOUT = 0.01
FEED_LATENT = False
USE_CUDA = True


def padding(x, max_size):
    shape = x.data.shape
    pad_size = max_size - shape[0]
    pad = torch.zeros((pad_size, shape[1]))
    # print(pad)
    return Variable(torch.cat((x.data, pad), 0))


# target = x[:, 1:] + EOS
def train(model, embed, opt, data_loader, epoch, inv_vocab_dict):
    model.train()

    train_loss = 0
    for batch_idx, (data, target) in enumerate(data_loader):
        data_lens = [l for _, l in data]
        data = [d for d, _ in data]
        target_lens = [l for _, l in target]
        target = [t for t, _ in target]

        # padding data
        data = [padding(embed(Variable(d)), max(data_lens)).unsqueeze(0) for d in data]
        data = torch.cat(data, 0)
        if USE_CUDA:
            data = data.cuda()

        opt.zero_grad()

        output, (l_mean, l_log_var, latent) = model(data, data_lens)
        # print(sum([t.size(0) for t in target]))
        max_size = target[0].size(0)  # the max size is the first one
        selected_indices = [list(range(max_size * i, max_size * i + s)) for s, i in zip(target_lens, range(len(target)))]
        # flatten list of lists
        selected_indices = torch.LongTensor([v for sublist in selected_indices for v in sublist])
        if USE_CUDA:
            selected_indices = selected_indices.cuda()
        output = torch.index_select(output.data, 0, selected_indices)
        target = torch.cat(target, 0)

        if USE_CUDA:
            target = target.cuda()

        rec = F.cross_entropy(output, target)
        # compute KL divergence
        kl_divergence = (-0.5 * (1 + l_log_var - l_mean ** 2 - torch.exp(l_log_var))).sum(1).mean()
        kl_weight = min(batch_idx / 1000., 1.)
        elbo_loss = rec + kl_divergence
        loss = rec + kl_weight * kl_divergence
        train_loss += elbo_loss.data[0]
        loss.backward()
        # nn.utils.clip_grad_norm(model.parameters(), 0.5)
        opt.step()
        if batch_idx != 0 and batch_idx % 100 == 0:
            # alpha = torch.linspace(0, 1, 10).view(-1, 1).repeat(1, l_mean.size(1))
            # alpha = Variable(alpha).cuda()
            # agenda = l_mean[0:1].repeat(alpha.size(0), 1) * alpha + l_mean[1:2].repeat(alpha.size(0), 1) * (1 - alpha)
            out = model.greedy_generate(latent, embed=embed).numpy()
            for l in out.tolist():
                print(' '.join([inv_vocab_dict[k] for k in l]))
            print(batch_idx)
            print('Train Epoch: {} [{}]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), train_loss
            ))
            train_loss = 0
            pred = output.max(1)[1].cpu().numpy()
            t = target.cpu().numpy()
            print(sum(pred == t))


def gen_data_loader(data_file, tokenizer, vocab_dict, vocab_size, batch_size=BATCH_SIZE):
    fd = open(data_file)
    batch = []
    target_batch = []

    for idx, l in enumerate(fd):
        d = tokenizer(l)

        indices = [vocab_size - 3] + [vocab_dict.get(k, vocab_size - 2) for k in d]
        indices_tensor = torch.LongTensor(indices)
        target_indices = indices[1:] + [vocab_size - 1]  # plus EOS
        target_indices_tensor = torch.LongTensor(target_indices)
        if len(indices) > 0:
            batch.append((indices_tensor, len(indices)))
            target_batch.append((target_indices_tensor, len(target_indices)))

        if idx != 0 and len(batch) == batch_size:
            lens = [l for _, l in batch]
            idx = np.argsort(lens)[::-1]
            yield [batch[i] for i in idx], [target_batch[i] for i in idx]
            batch = []
            target_batch = []

    # process last batch
    length = len(batch)
    last_batch = []
    last_target_batch = []

    for i in range(length):
        last_batch.append(batch[i % batch_size])
        last_target_batch.append(target_batch[i % batch_size])

    lens = [l for _, l in batch]
    idx = np.argsort(lens)[::-1]
    yield [batch[i] for i in idx], [target_batch[i] for i in idx]
    fd.close()


if __name__ == '__main__':
    import sys

    text_file = sys.argv[1]
    CV_model_file = sys.argv[2]

    CV_model = pickle.load(open(CV_model_file, 'rb'))

    CV_vocab_dict = dict(zip(CV_model.vocabulary_, range(0, len(CV_model.vocabulary_))))
    CV_vocab_size = len(CV_model.vocabulary_) + 3  # plus unknown and EOS BOS
    CV_tokenizer = CV_model.build_tokenizer()

    # fixme when word_dropout
    m = RNN(vocab_size=CV_vocab_size, emb_size=EMB_SIZE,
            hid_size=HIDDEN_SIZE, latent_size=LATENT_SIZE,
            word_dropout=0, unk=CV_vocab_size - 2,
            feed_latent=False)

    if USE_CUDA:
        m.cuda()

    emb = nn.Embedding(CV_vocab_size, EMB_SIZE)
    print(m)
    optimizer = optim.SGD(m.parameters(), LR)

    inverse_vocab_dict = dict([(v, k) for k, v in CV_vocab_dict.items()])
    inverse_vocab_dict[CV_vocab_size - 3] = '<BOS>'
    inverse_vocab_dict[CV_vocab_size - 2] = '<UNK>'
    inverse_vocab_dict[CV_vocab_size - 1] = '<EOS>'

    for e in range(EPOCHS):
        train_loader = gen_data_loader(text_file, CV_tokenizer, CV_vocab_dict, CV_vocab_size, BATCH_SIZE)
        train(m, emb, optimizer, train_loader, e, inverse_vocab_dict)
