import glob
import unicodedata
import string
import torch
import torch.optim as optim
from model import RNN
import torch.nn.functional as F
from torch.autograd import Variable


ALL_LETTERS = string.ascii_letters + " .,;"
N_LETTERS = len(ALL_LETTERS) + 1  # plus EOS
LEARNING_RATE = 0.0001

ALL_CATS = ['Arabic',
            'Chinese',
            'Czech',
            'Dutch',
            'English',
            'French',
            'German',
            'Greek',
            'Irish',
            'Italian',
            'Japanese',
            'Korean',
            'Polish',
            'Portuguese',
            'Russian',
            'Scottish',
            'Spanish',
            'Vietnamese']


def find_files(path): return glob.glob(path)


def unicode_to_ascii(s):
    return ''.join(
        c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in ALL_LETTERS
    )


def letter_to_index(letter):
    return ALL_LETTERS.find(letter)


def letter_to_tensor(letter):
    tensor = torch.zeros(1, N_LETTERS)
    tensor[0][letter_to_index(letter)] = 1
    return tensor


# turn a line into a <line_length x 1 x N_LETTERS>
# or an array of one-hot letter vectors
# not including EOS
def get_input_tensor(line):
    tensor = torch.zeros(len(line), 1, N_LETTERS)
    for li, letter in enumerate(line):
        tensor[li][0][letter_to_index(letter)] = 1

    return tensor


def get_category_tensor(cat, all_cat):
    li = all_cat.index(cat)
    tensor = torch.zeros(1, len(all_cat))
    tensor[0][li] = 1
    return tensor


# input[1:] + [EOS]
def get_target_tensor(line):
    letter_indices = [ALL_LETTERS.find(line[i]) for i in range(1, len(line))]
    letter_indices.append(N_LETTERS - 1)  # add EOS
    return torch.LongTensor(letter_indices)


def train_step(rnn_model, opt, cat_tensor, input_tensor, target_tensor):
    cat_tensor, input_tensor, target_tensor = Variable(cat_tensor), Variable(input_tensor), Variable(target_tensor)
    hidden = rnn_model.init_hidden()
    # print(type(cat_tensor))
    # print(type(hidden))
    opt.zero_grad()
    loss_ = 0
    output_ = None
    for k in range(input_tensor.size()[0]):
        output_, hidden = rnn_model(cat_tensor, input_tensor[k], hidden)
        loss_ += F.nll_loss(output_, target_tensor[k])

    loss_.backward()
    # clipping grad
    # torch.nn.utils.clip_grad_norm(rnn_model.parameters(), 0.25)  # add this line to prevent nan loss
    opt.step()
    # for p in rnn_model.parameters():
    #     p.data.add_(-LEARNING_RATE, p.grad.data)

    return output_, loss_.data[0] / input_tensor.size()[0]


def data_iter(path):
    for f in open(path):
        d = f.strip().split('\t')
        if len(d) == 2:
            category, line = d
            line = unicode_to_ascii(line.strip())
            # print(line)
            yield get_category_tensor(category, ALL_CATS), get_input_tensor(line), get_target_tensor(line)


def train(rnn_model, opt, data_loader):
    sum_loss = 0
    for idx, (cat_tensor, in_tensor, target_tensor) in enumerate(data_loader):
        out, loss = train_step(rnn_model, opt, cat_tensor, in_tensor, target_tensor)
        sum_loss += loss
        if idx % 1000 == 0:
            print(sum_loss)
            sum_loss = 0
            samples(m, 'Russian', 'RUS')

            samples(m, 'German', 'GER')

            samples(m, 'Spanish', 'SPA')

            samples(m, 'Chinese', 'CHI')


MAX_LENGTH = 20


# Sample from a category and starting letter
def sample(rnn, category, start_letter='A'):
    category_tensor = Variable(get_category_tensor(category, ALL_CATS))
    input_tensor = Variable(get_input_tensor(start_letter))
    hidden = rnn.init_hidden()

    output_name = start_letter

    for i in range(MAX_LENGTH):
        # print(input_tensor)
        output, hidden = rnn(category_tensor, input_tensor[0], hidden)
        # print(output.data)
        topv, topi = output.data.topk(1)
        # print(topv)
        topi = topi[0][0]
        if topi == N_LETTERS - 1:
            break
        else:
            letter = ALL_LETTERS[topi]
            output_name += letter
        input_tensor = Variable(get_input_tensor(letter))
    return output_name


# Get multiple samples from one category and multiple starting letters
def samples(rnn, category, start_letters='ABC'):
    for start_letter in start_letters:
        print(sample(rnn, category, start_letter))


if __name__ == '__main__':
    import sys
    in_file = sys.argv[1]

    m = RNN(len(ALL_CATS), N_LETTERS, 128, N_LETTERS)
    optimizer = optim.Adam(m.parameters(), LEARNING_RATE)
    print(m)

    for e in range(10):
        print('epoch: {}'.format(e))
        train_loader = data_iter(in_file)
        train(m, optimizer, train_loader)

    samples(m, 'Russian', 'RUS')

    samples(m, 'German', 'GER')

    samples(m, 'Spanish', 'SPA')

    samples(m, 'Chinese', 'CHI')

