from language_model.lm import LanguageModel
from data.data_loader import gen_data_loader

from torch.autograd import Variable
import torch.optim as optim

# import torch.nn.functional as F
import pickle
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence  # , PackedSequence
# import numpy as np
import torch.nn as nn


SENT_LEN = 10
HIDDEN_SIZE = 200
BATCH_SIZE = 32
EMB_SIZE = 350
LR = 1e-5
EPOCHS = 100
LATENT_SIZE = 200
WORD_DROPOUT = 0.01
FEED_LATENT = False
USE_CUDA = True


def padding(x, max_size):
    shape = x.data.shape
    pad_size = max_size - shape[0]
    pad = torch.zeros((pad_size, shape[1]))
    # print(pad)
    return Variable(torch.cat((x.data, pad), 0))


# target = x[:, 1:] + EOS
def train(model, embed, opt, data_loader, epoch, inv_vocab_dict):
    model.train()
    train_step = 0
    train_loss = 0
    for batch_idx, (data, target) in enumerate(data_loader):
        train_step += 1

        data_lens = [l for _, l in data]
        data = [d for d, _ in data]
        target_lens = [l for _, l in target]
        target = [t for t, _ in target]

        # padding data
        data = [padding(embed(Variable(d)), max(data_lens)).unsqueeze(0) for d in data]
        data = torch.cat(data, 0)
        if USE_CUDA:
            data = data.cuda()

        opt.zero_grad()

        # print(sum([t.size(0) for t in target]))
        max_size = target[0].size(0)  # the max size is the first one
        selected_indices = [list(range(max_size * i, max_size * i + s)) for s, i in zip(target_lens, range(len(target)))]
        # flatten list of lists
        selected_indices = torch.LongTensor([v for sublist in selected_indices for v in sublist])
        if USE_CUDA:
            selected_indices = selected_indices.cuda()

        data = pack_padded_sequence(data, data_lens, batch_first=True)
        lm_output = model(data)
        # print(lm_output.output)
        # exit()
        target = torch.cat(target, 0)
        if USE_CUDA:
            target = target.cuda()

        output = lm_output.output

        output = torch.index_select(output.data, 0, selected_indices)

        loss = model.loss(output, target, lm_output.agenda, train_step)
        train_loss += loss.data[0]
        loss.backward()
        nn.utils.clip_grad_norm(model.parameters(), 0.5)
        opt.step()
        if batch_idx != 0 and batch_idx % 100 == 0:
            # alpha = torch.linspace(0, 1, 10).view(-1, 1).repeat(1, l_mean.size(1))
            # alpha = Variable(alpha).cuda()
            # agenda = l_mean[0:1].repeat(alpha.size(0), 1) * alpha + l_mean[1:2].repeat(alpha.size(0), 1) * (1 - alpha)
            # out = model.greedy_generate(latent, embed=embed).numpy()
            # for l in out.tolist():
            #     print(' '.join([inv_vocab_dict[k] for k in l]))
            print(batch_idx)
            print('Train Epoch: {} [{}]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), train_loss
            ))
            train_loss = 0
            pred = output.max(1)[1].cpu().numpy()
            t = target.cpu().numpy()
            print(sum(pred == t))


if __name__ == '__main__':
    import sys

    text_file = sys.argv[1]
    CV_model_file = sys.argv[2]

    CV_model = pickle.load(open(CV_model_file, 'rb'))

    CV_vocab_dict = dict(zip(CV_model.vocabulary_, range(0, len(CV_model.vocabulary_))))
    CV_vocab_size = len(CV_model.vocabulary_) + 3  # plus unknown and EOS BOS
    CV_tokenizer = CV_model.build_tokenizer()

    # fixme when word_dropout
    # m = RNN(vocab_size=CV_vocab_size, emb_size=EMB_SIZE,
    #         hid_size=HIDDEN_SIZE, latent_size=LATENT_SIZE,
    #         word_dropout=0, unk=CV_vocab_size - 2,
    #         feed_latent=False)

    m = LanguageModel(emb_dim=EMB_SIZE, output_dim=CV_vocab_size, hid_dim=HIDDEN_SIZE, agenda_dim=LATENT_SIZE,
                      num_layers=2, kl_weight_steps=40000, kl_weight_rate=1.0, kl_weight_cap=1.0, dci_keep_rate=0.8)

    if USE_CUDA:
        m.cuda()

    emb = nn.Embedding(CV_vocab_size, EMB_SIZE)
    print(m)
    optimizer = optim.SGD(m.parameters(), LR)

    inverse_vocab_dict = dict([(v, k) for k, v in CV_vocab_dict.items()])
    inverse_vocab_dict[CV_vocab_size - 3] = '<BOS>'
    inverse_vocab_dict[CV_vocab_size - 2] = '<UNK>'
    inverse_vocab_dict[CV_vocab_size - 1] = '<EOS>'

    for e in range(EPOCHS):
        train_loader = gen_data_loader(text_file, CV_tokenizer, CV_vocab_dict, CV_vocab_size, BATCH_SIZE)
        train(m, emb, optimizer, train_loader, e, inverse_vocab_dict)


