import torch
import numpy as np
import string
import unicodedata


ALL_LETTERS = string.ascii_letters + " .,;"
N_LETTERS = len(ALL_LETTERS) + 3  # plus BOS, UNK, EOS
LEARNING_RATE = 0.0001

ALL_CATS = ['Arabic',
            'Chinese',
            'Czech',
            'Dutch',
            'English',
            'French',
            'German',
            'Greek',
            'Irish',
            'Italian',
            'Japanese',
            'Korean',
            'Polish',
            'Portuguese',
            'Russian',
            'Scottish',
            'Spanish',
            'Vietnamese']


def unicode_to_ascii(s):
    return ''.join(
        c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in ALL_LETTERS
    )


def letter_to_index(letter):
    return ALL_LETTERS.find(letter)


def letter_to_tensor(letter):
    tensor = torch.zeros(1, N_LETTERS)
    tensor[0][letter_to_index(letter)] = 1
    return tensor


# todo use torch Dataset API
def gen_data_loader(data_file, batch_size):
    vocab_size = N_LETTERS
    fd = open(data_file)
    batch = []
    target_batch = []

    for idx, l in enumerate(fd):
        data = l.strip().split('\t')

        if len(data) == 2:
            cat, d = data
        else:
            continue

        d = unicode_to_ascii(d)

        indices = [vocab_size - 3] + [letter_to_index(k) for k in d]
        indices_tensor = torch.LongTensor(indices)
        target_indices = indices[1:] + [vocab_size - 1]  # plus EOS
        target_indices_tensor = torch.LongTensor(target_indices)
        if len(indices) > 0:
            batch.append((indices, len(indices), ALL_CATS.index(cat)))
            target_batch.append((target_indices_tensor, len(target_indices)))

        if idx != 0 and len(batch) == batch_size:
            lens = [l for _, l, c in batch]
            idx = np.argsort(lens)[::-1]
            yield [batch[i] for i in idx], [target_batch[i] for i in idx]
            batch = []
            target_batch = []

    # process last batch
    length = len(batch)
    last_batch = []
    last_target_batch = []

    for i in range(length):
        last_batch.append(batch[i % batch_size])
        last_target_batch.append(target_batch[i % batch_size])

    lens = [l for _, l, _ in batch]
    idx = np.argsort(lens)[::-1]
    yield [batch[i] for i in idx], [target_batch[i] for i in idx]
    fd.close()

