from config import *
from utils import *
from model import *

test_dataset = SummarizationDataset('test')
test_loader = data.DataLoader(test_dataset, batch_size=TEST_BATCH, shuffle=True)

model = SumModel()
state_dict = torch.load(MODEL_DIR + r'\best_nlpcc.pth')
model.load_state_dict(state_dict)
model = model.to(DEVICE)

if __name__ == '__main__':
    model.eval()
    test_preds, test_labels = [], []
    total_loss = 0
    # 打开文件用于保存生成的摘要和参考摘要
    with open(OUTPUT_DIR + 'summary_results.txt', 'w', encoding='utf-8') as f:
        with torch.no_grad():
            for i, test_batch in enumerate(test_loader):
                input = test_batch['input_ids'].to(DEVICE)
                attention_mask = test_batch['attention_mask'].to(DEVICE)
                labels = test_batch['labels'].to(DEVICE)

                test_pred = model(input, attention_mask, labels)
                loss = test_pred.loss
                total_loss += loss.item()
                print('>> batch:', i + 1, 'loss:', round(loss.item(), 5))

                test_generated = model.generate(input_ids=input, attention_mask=attention_mask, max_length=MAX_SUMMARY_LEN)
                test_preds.extend(test_generated.cpu().numpy())
                test_labels.extend(labels.cpu().numpy())

                # 解码生成的摘要和参考摘要并写入文件
                generated_texts = tokenizer.batch_decode(test_generated, skip_special_tokens=True)
                reference_texts = tokenizer.batch_decode(labels, skip_special_tokens=True)

                generated_texts = [preds.replace(" ", "") for preds in generated_texts]
                reference_texts = [labels.replace(" ", "") for labels in reference_texts]
                for gen_text, ref_text in zip(generated_texts, reference_texts):
                    f.write(f"Generated Summary: {gen_text}\n")
                    f.write(f"Reference Summary: {ref_text}\n\n")

    test_report = evaluate(test_preds, test_labels)
    print(f"TEST ROUGE Scores: {test_report}")
    print(f"TEST ROUGE Scores: {total_loss / len(test_loader)}")  #rouge:56.51 #rouge_chinese:55.50 loss:1.70