import torch
from loguru import logger
from model.TranslationModel import TranslationModel
from utils.data_helper import LoadEnglishGermanDataset
from config.config import cfg
from utils.data_helper import tokenizer_dict


def translate(model, src, data_loader, config):
    src_vocab = data_loader.de_vocab
    tgt_vocab = data_loader.en_vocab
    src_tokenizer = data_loader.tokenizer['de']
    model.eval()
    # 预测样进行tokenizer处理
    tokens = [src_vocab[tok] for tok in src_tokenizer(src)]
    # 预测样本序列长度
    num_tokens = len(tokens)
    # 封装为tensor
    src = torch.LongTensor(tokens).reshape(num_tokens, 1)
    logger.debug(f'src tensor shape = {src.shape}')

    # 解码预测结果
    translation = greedy_decode(model,
                                src,
                                max_len=num_tokens + 5,
                                start_symbol=data_loader.BOS_IDX,
                                config=config,
                                data_loader=data_loader,
                                tgt_vocab=tgt_vocab)

    return translation


def greedy_decode(model, src, max_len, start_symbol, config, data_loader, tgt_vocab):
    src = src.to(config.device)
    # 对输入的token序列进行解码翻译
    memory = model.encoder(src)

    # 定义解码阶段第一个输入，起始符号 '<bos>'
    ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(config.device)

    # 定义当前时刻翻译结果
    curr_out = ""
    for i in range(max_len - 1):
        memory = memory.to(config.device)
        # [tgt_len, 1, embed_dim]
        out = model.decoder((ys), memory)
        # [1, tgt_len, embed_dim]
        out = out.transpose(0, 1)
        # 只对预测的下一个词进行分类
        prob = model.classification(out[:, -1])
        _, next_word = torch.max(prob, dim=1)
        next_word = next_word.item()
        # 将当前时刻解码的预测输出结果，同之前所有的结果堆叠作为输入再去预测下一个词
        ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0)
        # 如果当前时刻的预测输出为结束标志，则跳出循环结束预测
        if next_word == data_loader.EOS_IDX:
            break
        curr_out += tgt_vocab.lookup_token(next_word) + " "
        logger.debug(f'Translation: {curr_out}')
    pred = " ".join([tgt_vocab.lookup_token(tok) for tok in ys.flatten()]).replace("<bos>", "").replace("<eos>", "")
    return pred


def translate_german_to_english(src, tokenizer, config):
    data_loader = LoadEnglishGermanDataset(cfg.train_corpus_file_paths,
                                           batch_size=config.batch_size,
                                           tokenizer=tokenizer)
    model = TranslationModel(src_vocab_size=len(data_loader.de_vocab),
                             tgt_vocab_size=len(data_loader.en_vocab),
                             d_model=config.d_model,
                             nhead=config.num_head,
                             num_encoder_layers=config.num_encoder_layers,
                             num_decoder_layers=config.num_decoder_layers,
                             dim_feedforward=config.dim_feedforward,
                             dropout=config.dropout)
    model = model.to(config.device)
    torch.load(config.model_save_dir + '/model.pkl')
    return translate(model, src, data_loader, config)


if __name__ == '__main__':
    srcs = ["Eine Gruppe von Menschen steht vor einem Iglu.",
            "Ein Mann in einem blauen Hemd steht auf einer Leiter und putzt ein Fenster."]
    tgts = ["A group of people are facing an igloo.",
            "A man in a blue shirt is standing on a ladder cleaning a window."]

    for index, item in enumerate(srcs):
        r = translate_german_to_english(item, tokenizer_dict, cfg)
        logger.info(f'德语： {item}')
        logger.info(f'翻译： {r}')
        logger.info(f'英语： {tgts[index]}')
