"""
@author : Hyunwoong
@when : 2019-12-19
@homepage : https://github.com/gusdnd852
"""

import math
from collections import Counter
import numpy as np
import torch
import sys
import pathlib
import argparse

sys.path.append(str(pathlib.Path(__file__).parent))

from data import *
from models.model.transformer import Transformer
from util.bleu import get_bleu, idx_to_word


def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


model = Transformer(src_pad_idx=src_pad_idx,
                    trg_pad_idx=trg_pad_idx,
                    trg_sos_idx=trg_sos_idx,
                    d_model=d_model,
                    enc_voc_size=enc_voc_size,
                    dec_voc_size=dec_voc_size,
                    max_len=max_len,
                    ffn_hidden=ffn_hidden,
                    n_head=n_heads,
                    n_layers=n_layers,
                    drop_prob=0.00,
                    device=device,
                    is_inference=True).to(device)

print(f'The model has {count_parameters(model):,} trainable parameters')


def inference_sentence(model, sentence_en):
    sentence_en = sentence_en.lower()
    src = loader.source.process([loader.source.tokenize(sentence_en)])

    output = model(src)

    j = 0
    src_words = idx_to_word(src[j], loader.source.vocab)
    src_tokens = [loader.source.vocab.itos[t] for t in src[j]]
    try:
        # <eos>之后的全部删除，否则后面有很多"."
        output_valid = output[j][:list(output[j].numpy()).index(trg_eos_idx)]
    except ValueError:
        output_valid = output[j]
    output_words = idx_to_word(output_valid, loader.target.vocab)

    print('source :', src_words)
    print('predicted :', output_words)
    print()

    attentions = torch.stack([layer.attention.last_attention.squeeze() for layer in model.encoder.layers], dim=0)

    return src_tokens, src_words, output_words, attentions


def inference_model(sentence_en):

    model_path = pathlib.Path(__file__).parent.joinpath('saved/model-saved.pt').resolve()
    model.load_state_dict(torch.load(model_path, map_location=device))
    # model.load_state_dict(torch.load("./saved/model-saved.pt"))

    with torch.no_grad():
        return inference_sentence(model, sentence_en)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-l', action='store_true', help='load loader')
    args = parser.parse_args()
    if args.l:
        load_loader(pathlib.Path(__file__).parent.joinpath('saved'))
    while True:
        sentence_en = input('Input English sentence : ')
        if not sentence_en:
            break
        inference_model(sentence_en.lower())
