from nltk.translate.bleu_score import sentence_bleu
from model import *
import torch
#zzzz
if __name__ == '__main__':

    state_dict = torch.load(f"checkpoints/translate-transformer-not-share/best.ckpt", map_location=device)

    # load checkpoints
    model = TransformerModel(config)
    model.load_state_dict(state_dict)

    loss_fct = CrossEntropyWithPadding(config)
    # from dataset import LangPairDataset
    test_ds = LangPairDataset("test", max_length=128, data_dir="./wmt16")
    test_dl = DataLoader(test_ds, batch_size=1, collate_fn=partial(collate_fct, tokenizer=tokenizer))

    model = model.to(device)
    model.eval()
    collect = {}
    loss_collect = []

    predictions = []
    answers = []
    # 初始化BLEU分数列表
    bleu_scores = []
    for idx, batch in tqdm(enumerate(test_dl)):
        encoder_inputs = batch["encoder_inputs"]
        encoder_inputs_mask = batch["encoder_inputs_mask"]
        decoder_inputs = batch["decoder_inputs"]
        decoder_labels = batch["decoder_labels"]
        # print(decoder_labels.cpu())
        # decoder_labels1=tokenizer.decode(decoder_labels.cpu().numpy())
        # print(decoder_labels1)
        # 前向计算
        outputs = model(
            encoder_inputs=encoder_inputs,
            decoder_inputs=decoder_inputs,
            encoder_inputs_mask=encoder_inputs_mask
        )

        loss = loss_fct(outputs.logits, decoder_labels)  # 验证集损失，如果batch_size为1，需要加入mask

        # print(outputs.logits.shape, decoder_labels.shape)

        # loss = loss_fct(outputs.logits[:, :decoder_labels.shape[1]], decoder_labels)         # 验证集损失
        # outputs = model.infer(encoder_inputs=encoder_inputs)
        # print(outputs.logits.shape)
        preds = outputs.logits.argmax(dim=-1)  # 预测结果，[1,seq_len]
        # print(preds.shape)
        # 把preds转为英文单词
        preds = tokenizer.decode(preds.cpu().numpy())  # ['预测句子']
        # predictions.append(preds)
        # print(preds)
        # 把decoder_labels转为英文单词
        decoder_labels = tokenizer.decode(decoder_labels.cpu().numpy())  # ['标签句子']
        # answers.append(decoder_labels)
        # print(decoder_labels)
        belu = sentence_bleu([decoder_labels[0].split()], preds[0].split(), weights=(1, 0, 0, 0))
        bleu_scores.append(belu)
        collect[idx] = {"loss": loss.item(), "src_inputs": encoder_inputs, "trg_inputs": decoder_inputs,
                        "mask": encoder_inputs_mask, "trg_labels": decoder_labels, "preds": preds}
        loss_collect.append(loss.item())
        # break

    # sort collect by value
    collect = sorted(collect.items(), key=lambda x: x[1]["loss"])
    print(f"testing loss: {np.array(loss_collect).mean()}")
    sum(bleu_scores) / len(bleu_scores)