import pickle
import re
import time
import unicodedata

import nltk
import numpy as np
import torch
from flask import request

from config import device, logger
from translation.decoder import Decoder
from translation.encoder import Encoder
from translation.transformer import Transformer

checkpoint = 'repo/tran-en/transformer.pt'
logger.info('loading model: {}...'.format(checkpoint))
vocab_size = 15000
encoder = Encoder(n_src_vocab=vocab_size)
decoder = Decoder(n_tgt_vocab=vocab_size)
model = Transformer()
model.load_state_dict(torch.load(checkpoint))
model = model.to(device)
model.eval()

vocab_file = 'repo/tran-en/vocab.pkl'
# logger.info('loading vocab...')
with open(vocab_file, 'rb') as file:
    data = pickle.load(file)
    src_idx2char = data['dict']['src_idx2char']
    src_char2idx = data['dict']['src_char2idx']
    tgt_idx2char = data['dict']['tgt_idx2char']
    tgt_char2idx = data['dict']['tgt_char2idx']


# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
    return ''.join(
        c for c in unicodedata.normalize('NFD', s)
        if unicodedata.category(c) != 'Mn'
    )


def normalizeString(s):
    s = unicodeToAscii(s.lower().strip())
    s = re.sub(r"([.!?])", r" \1", s)
    s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
    return s


def encode_text(word_map, c):
    return [word_map.get(word, word_map['<unk>']) for word in c]


def do_translate_en():
    start = time.time()
    in_text = request.form['text']
    print('input_text: ' + str(in_text))
    out_text = translate(in_text)
    elapsed = time.time() - start
    elapsed = float(elapsed)
    return out_text, elapsed


def translate(in_text):
    sentence_in = in_text.strip().lower()
    tokens = [normalizeString(s.strip()) for s in nltk.word_tokenize(sentence_in)]
    sentence_in = encode_text(src_char2idx, tokens)
    input = torch.from_numpy(np.array(sentence_in, dtype=np.long)).to(device)
    input_length = torch.LongTensor([len(sentence_in)]).to(device)

    with torch.no_grad():
        nbest_hyps = model.recognize(input=input, input_length=input_length, char_list=tgt_idx2char)

    out_list = []
    for hyp in nbest_hyps:
        out = hyp['yseq']
        out = [tgt_idx2char[idx] for idx in out]
        out = ''.join(out)
        out = out.replace('<sos>', '').replace('<eos>', '')
        out_list.append(out)
        # print('> {}'.format(out))

    out_text = out_list[0]
    print('out: {}'.format(out_text))

    return out_text
