import torch
from tokenizer import ChineseTokenizer, EnglishTokenizer
from model import TranslationModel
from config import PROCESSED_DATA_DIR, MODELS_DIR, SEQ_LEN


def predict_batch(input_tensor, model, zh_tokenizer, en_tokenizer, device):
    """ 预测一个batch """
    model.eval()

    with torch.no_grad():
        # 编码
        src_pad_mask = (input_tensor == zh_tokenizer.pad_token_index)
        memory = model.encoder(input_tensor, src_pad_mask)
        # memory.shape: [batch_size, src_len, d_model]

        # 解码
        batch_size = input_tensor.shape[0]
        decoder_input = torch.full((batch_size, 1), en_tokenizer.sos_token_index, device=device)  # [batch_size, 1]
        # decoder_input.shape: [batch_size, 1]

        indexes_list = [[] for _ in range(batch_size)]
        finished = [False for _ in range(batch_size)]

        for t in range(1, SEQ_LEN):
            tgt_mask = model.transformer.generate_square_subsequent_mask(decoder_input.shape[1]).to(device)
            tgt_pad_mask = (decoder_input == en_tokenizer.pad_token_index)
            output = model.decoder(decoder_input, memory, tgt_mask, tgt_pad_mask, src_pad_mask)
            # output.shape: [batch_size, tgt_len, en_vocab_size]

            predict_indexes = output[:, -1, :].argmax(dim=-1)  # [batch_size]

            for i in range(batch_size):
                if finished[i]:
                    continue
                if predict_indexes[i].item() == en_tokenizer.eos_token_index:
                    finished[i] = True
                    continue
                else:
                    indexes_list[i].append(predict_indexes[i].item())
            if all(finished):
                break

            decoder_input = torch.cat([decoder_input, predict_indexes.unsqueeze(dim=-1)], dim=-1)
            # decoder_input.shape: [batch_size, 2]
    return indexes_list


def predict(user_input, model, zh_tokenizer, en_tokenizer, device):
    """ 预测 """
    # 处理数据
    index_list = zh_tokenizer.encode(user_input, SEQ_LEN, add_sos_eos=False)
    input_tensor = torch.tensor([index_list]).to(device)  # [batch_size, seq_len]

    indexes_list = predict_batch(input_tensor, model, zh_tokenizer, en_tokenizer, device)
    english = en_tokenizer.decode(indexes_list[0])
    return english


def run_predict():
    """ 预测交互程序 """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设备

    # 创建tokenizer
    zh_tokenizer = ChineseTokenizer.from_vocab(PROCESSED_DATA_DIR / 'zh_vocab.txt')
    en_tokenizer = EnglishTokenizer.from_vocab(PROCESSED_DATA_DIR / 'en_vocab.txt')

    # 创建并加载模型
    model = TranslationModel(zh_tokenizer.vocab_size, en_tokenizer.vocab_size,
                             zh_tokenizer.pad_token_index, en_tokenizer.pad_token_index).to(device)
    model.load_state_dict(torch.load(MODELS_DIR / 'model.pt'))

    # 客户端交互
    print('请输入要翻译的中文：（输入q或quit退出系统）')
    while True:
        user_input = input('>')
        if user_input in ['q', 'quit']:
            print('退出系统')
            break

        if not user_input:
            print('请输入要翻译的中文：')
            continue

        english = predict(user_input, model, zh_tokenizer, en_tokenizer, device)
        print(f'英文：{english}')


if __name__ == '__main__':
    run_predict()
