import config
import torch
from tokenizer import ChineseTokenizer, EnglishTokenizer
from model import TranslationEncoder, TranslationDecoder


def batch_predict(input_tensor, encoder, decoder, en_tokenizer, device):
    encoder.eval()
    decoder.eval()

    with torch.no_grad():
        batch_size = input_tensor.shape[0]

        # 先编码得到上下文向量 input_tensor.shape = [batch_size,seq_len]
        context_vector = encoder(input_tensor)
        # context_vector.shape = [batch_size,decoder_hidden_size]

        decoder_hidden = context_vector.unsqueeze(0)
        # encoder_hidden.shape = [1,batch_size,decoder_hidden_size]

        decoder_input = torch.full(size=(batch_size, 1), fill_value=en_tokenizer.sos_token_id, device=device)

        generated = [[] for _ in range(batch_size)]
        if_finished = [False for _ in range(batch_size)]
        # 按时间步进行预测
        for t in range(config.SEQ_LEN):
            decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)
            # decoder_output.shape = [batch_size,seq_len,vocab_size]
            predict_indexes = torch.argmax(decoder_output, dim=2)
            # predict_indexes.shape = [batch_size,1]
            for i in range(batch_size):
                if if_finished[i]:
                    continue
                if predict_indexes[i].item() == en_tokenizer.eos_token_id:
                    if_finished[i] = True
                else:
                    generated[i].append(predict_indexes[i].item())
            if all(if_finished):
                break
            decoder_input = predict_indexes
    return generated


def predict(encoder, decoder, user_input, zh_tokenizer, en_tokenizer, device):
    # 先编码
    index_list = zh_tokenizer.encode(user_input, config.SEQ_LEN, add_sos_eos=False)
    # 转张量
    input_tensor = torch.tensor([index_list]).to(device)
    # 批量预测
    batch_result = batch_predict(input_tensor, encoder, decoder, en_tokenizer, device)
    # 取第一个
    result = batch_result[0]
    return en_tokenizer.decode(result)


def run_predict():
    # 获取设备
    device = torch.device(
        "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu")

    zh_tokenizer = ChineseTokenizer.from_vocab(config.PROCESSED_DIR / 'zh_vocab.txt')
    en_tokenizer = EnglishTokenizer.from_vocab(config.PROCESSED_DIR / 'en_vocab.txt')

    # 加载模型
    encoder = TranslationEncoder(zh_tokenizer.vocab_size, zh_tokenizer.pad_token_id).to(device)
    encoder.load_state_dict(torch.load(config.MODELS_DIR / 'encoder.pt'))

    decoder = TranslationDecoder(en_tokenizer.vocab_size, en_tokenizer.pad_token_id).to(device)
    decoder.load_state_dict(torch.load(config.MODELS_DIR / 'decoder.pt'))

    # 运行预测
    print('中英翻译：（输入q或者quit退出）')

    while True:
        user_input = input('> ')
        if user_input in ['q', 'quit']:
            break
        if user_input.strip() == '':
            continue

        result = predict(encoder, decoder, user_input, zh_tokenizer, en_tokenizer, device)
        print(f"英文:{result}")


if __name__ == '__main__':
    run_predict()
