import torch.optim
from torch.utils import data
from config import *
from utils import *
from model import *
from torch.optim.lr_scheduler import ReduceLROnPlateau

# 加载数据集
train_dataset = SummarizationDataset('train')
val_dataset = SummarizationDataset('val')

train_loader = data.DataLoader(train_dataset, batch_size=TRAIN_BATCH, shuffle=True)
val_loader = data.DataLoader(val_dataset, batch_size=TEST_BATCH, shuffle=False)

# 初始化模型和优化器
model = SumModel().to(DEVICE)
optimizer = torch.optim.AdamW(model.parameters(), lr=LR)

# 初始化学习率调度器
# mode='max' 表示我们希望最大化某个指标（这里是 ROUGE-L 分数）
# factor=0.1 表示当指标没有提升时，学习率乘以 0.1
# patience=2 表示当指标在 2 个 epoch 内没有提升时，降低学习率
scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=2, verbose=True)

if __name__ == '__main__':
    best_rouge_l = 0
    best_avg_rouge = 0

    for epoch in range(EPOCH):
        # 训练模式
        model.train()
        total_loss = 0
        print("-------------开始第{}轮训练---------------".format(epoch + 1))
        for batch_idx, batch in enumerate(train_loader):
            input_ids = batch['input_ids'].to(DEVICE)
            attention_mask = batch['attention_mask'].to(DEVICE)
            labels = batch['labels'].to(DEVICE)

            # 前向传播
            outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
            loss = outputs.loss

            # 梯度清零、反向传播、参数更新
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

            if (batch_idx + 1) % STEP == 0:
                print(f'Epoch: {epoch + 1}, Batch: {batch_idx + 1}, Loss: {loss.item():.4f}')

        avg_loss = total_loss / len(train_loader)
        print(f"Epoch {epoch + 1} completed. Average Loss: {avg_loss:.4f}")

        # 验证模式
        model.eval()
        val_preds, val_labels = [], []

        print(f"-------------开始第{epoch + 1}轮验证---------------")
        with torch.no_grad():
            for val_batch in val_loader:
                val_input_ids = val_batch['input_ids'].to(DEVICE)
                val_attention_mask = val_batch['attention_mask'].to(DEVICE)
                val_labels_batch = val_batch['labels'].to(DEVICE)

                # 生成摘要
                generated_ids = model.generate(
                    input_ids=val_input_ids,
                    attention_mask=val_attention_mask,
                    max_length=MAX_SUMMARY_LEN
                )
                # 收集预测和真实摘要
                val_preds.extend(generated_ids.cpu().numpy())
                val_labels.extend(val_labels_batch.cpu().numpy())

        # 评估 ROUGE 分数
        val_report = evaluate(val_preds, val_labels)
        print(f"Epoch: {epoch + 1}, Validation ROUGE Scores: {val_report}")
        rouge_l_score = val_report["rouge-l"]
        avg_rouge = (val_report["rouge-1"] + val_report["rouge-2"] + val_report["rouge-l"])

        # 更新学习率调度器
        scheduler.step(rouge_l_score)

        # 保存模型
        if rouge_l_score > best_rouge_l:
            best_rouge_l = rouge_l_score
            model_save_path = f"{MODEL_DIR}best.pth"
            torch.save(model.state_dict(), model_save_path)
            print(f"Model saved to {model_save_path}")

        if avg_rouge > best_avg_rouge:
            best_avg_rouge = avg_rouge
            model_save_path = f"{MODEL_DIR}best_avg.pth"
            torch.save(model.state_dict(), model_save_path)
            print(f"Model saved to {model_save_path}")