import torch
from nltk.translate.bleu_score import corpus_bleu
from tqdm import tqdm
from tokenizer import ChineseTokenizer, EnglishTokenizer
from model import TranslationModel
from dataset import get_dataloader
from predict import predict_batch
from config import PROCESSED_DATA_DIR, MODELS_DIR


def evaluate(dataloader, model, zh_tokenizer, en_tokenizer, device):
    """ 评估 """
    model.eval()
    references = []  # 参考（三维）
    hypotheses = []  # 预测（二维）

    special_tokens = [en_tokenizer.pad_token_index, en_tokenizer.sos_token_index, en_tokenizer.eos_token_index]

    for src, tgt in tqdm(dataloader, desc="evaluate"):
        src = src.to(device)  # [batch_size, src_len]
        tgt = tgt.tolist()  # (batch_size, tgt_len)

        # BLEU自动评估指标
        indexes_list = predict_batch(src, model, zh_tokenizer, en_tokenizer, device)
        hypotheses.extend(indexes_list)

        for indexes in tgt:
            indexes = [index for index in indexes if index not in special_tokens]
            references.append([indexes])

    bleu = corpus_bleu(references, hypotheses)
    return bleu


def run_evaluate():
    """ 评估流程 """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设备

    # 创建tokenizer
    zh_tokenizer = ChineseTokenizer.from_vocab(PROCESSED_DATA_DIR / 'zh_vocab.txt')
    en_tokenizer = EnglishTokenizer.from_vocab(PROCESSED_DATA_DIR / 'en_vocab.txt')

    # 创建并加载模型
    model = TranslationModel(zh_tokenizer.vocab_size, en_tokenizer.vocab_size,
                             zh_tokenizer.pad_token_index, en_tokenizer.pad_token_index).to(device)
    model.load_state_dict(torch.load(MODELS_DIR / 'model.pt'))

    # 测试数据
    dataloader = get_dataloader(train=False)

    # 执行评估
    bleu = evaluate(dataloader, model, zh_tokenizer, en_tokenizer, device)
    print(f'BLEU：{bleu:.4f}')


if __name__ == '__main__':
    run_evaluate()
