import glob
import unicodedata
import string
import torch
import torch.optim as optim
from model import RNN
import torch.nn.functional as F
from torch.autograd import Variable
from model2 import LanguageModel
from data_loader import gen_data_loader, ALL_CATS, ALL_LETTERS, LEARNING_RATE, N_LETTERS
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence  # , PackedSequence
import torch.nn as nn

BATCH_SIZE = 64
USE_CUDA = True
MAX_LENGTH = 20


def padding(x, max_size):
    shape = x.data.shape
    pad_size = max_size - shape[0]
    pad = torch.zeros((pad_size, shape[1]))

    return Variable(torch.cat((x.data, pad), 0))


def letters_to_tensor(letter_indices, cat_index):

    t_cat = torch.zeros(len(ALL_CATS))
    t_cat[cat_index] = 1
    comb = []
    for i in letter_indices:
        t = torch.zeros(N_LETTERS)
        t[i] = 1
        t_comb = torch.cat((t_cat, t))
        comb.append(t_comb)
    out = torch.stack(comb)
    # print('out: ', out)
    # exit()
    return out


# target = x[:, 1:] + EOS
def train(model, opt, data_loader):
    model.train()

    train_loss = 0
    train_step = 0
    for batch_idx, (X, target) in enumerate(data_loader):
        train_step += 1
        data_lens = [l for _, l, _ in X]
        data = [d for d, _, _ in X]
        cats = [c for _, _, c in X]

        target_lens = [l for _, l in target]
        target = [t for t, _ in target]

        # padding data
        data = [padding(Variable(letters_to_tensor(d, c)), max(data_lens)).unsqueeze(0) for d, c in zip(data, cats)]
        data = torch.cat(data, 0)

        if USE_CUDA:
            data = data.cuda()

        data = pack_padded_sequence(data, data_lens, batch_first=True)

        opt.zero_grad()

        lm_output = model(data)

        # print(sum([t.size(0) for t in target]))
        max_size = target[0].size(0)  # the max size is the first one
        selected_indices = [list(range(max_size * i, max_size * i + s)) for s, i in zip(target_lens, range(len(target)))]
        # flatten list of lists
        selected_indices = torch.LongTensor([v for sublist in selected_indices for v in sublist])
        if USE_CUDA:
            selected_indices = selected_indices.cuda()

        target = torch.cat(target, 0)

        if USE_CUDA:
            target = target.cuda()

        output = lm_output.output

        output = torch.index_select(output.data, 0, selected_indices)

        loss = model.loss(output, target, lm_output.agenda, train_step)
        train_loss += loss.data[0]
        loss.backward()
        nn.utils.clip_grad_norm(model.parameters(), 0.5)
        opt.step()

        if batch_idx != 0 and batch_idx % 100 == 0:
            # alpha = torch.linspace(0, 1, 10).view(-1, 1).repeat(1, l_mean.size(1))
            # alpha = Variable(alpha).cuda()
            # agenda = l_mean[0:1].repeat(alpha.size(0), 1) * alpha + l_mean[1:2].repeat(alpha.size(0), 1) * (1 - alpha)
            # out = model.greedy_generate(latent, embed=embed).numpy()
            train_loss = 0
            pred = output.max(1)[1].cpu().numpy()
            t = target.cpu().numpy()
            print(sum(pred == t), '\t', t.shape)
            sample(model, 'Chinese', 'A')


# Sample from a category and starting letter
def sample(rnn, category, start_letter='A'):
    data = [padding(Variable(letters_to_tensor([ALL_LETTERS.find(start_letter)], ALL_CATS.index(category))), 1).unsqueeze(0)]
    data = torch.cat(data, 0)
    if USE_CUDA:
        data = data.cuda()

    data = pack_padded_sequence(data, [1], batch_first=True)
    print(data)

    lm_output = rnn(data)

    output_name = start_letter
    agenda = lm_output.agenda
    output = lm_output.output
    print('output is', output)

    return output_name


# Get multiple samples from one category and multiple starting letters
def samples(rnn, category, start_letters='ABC'):
    for start_letter in start_letters:
        print(sample(rnn, category, start_letter))


if __name__ == '__main__':
    import sys
    in_file = sys.argv[1]

    m = LanguageModel(len(ALL_CATS) + N_LETTERS, N_LETTERS, 128, 128, 2, 400000, 1, 1, 1)

    print(m)
    if USE_CUDA:
        m.cuda()

    optimizer = optim.Adam(m.parameters(), LEARNING_RATE)
    for e in range(10):
        print('epoch: {}'.format(e))
        train_loader = gen_data_loader(in_file, BATCH_SIZE)
        train(m, optimizer, train_loader)

