import os
import torch

def save_checkpoint(model, optimizer, epoch, save_model_dir):
    """
    保存模型与优化器的状态字典，方便断点续训
    """
    save_path = os.path.join(save_model_dir, f"checkpoint_epoch_{epoch}.pt")
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    checkpoint = {
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'epoch': epoch
    }
    torch.save(checkpoint, save_path)
    print(f"✅ 模型已保存到 {save_path}")



def load_checkpoint(model, optimizer, save_model_dir, device):
    """
    自动查找最新断点并加载模型与优化器状态
    """
    # 检查是否存在保存目录
    if not os.path.exists(save_model_dir):
        print("🆕 No checkpoint directory found, start training from scratch.")
        return model, optimizer, 0

    # 找到所有断点文件
    checkpoint_files = [f for f in os.listdir(save_model_dir) if f.startswith('checkpoint_epoch_') and f.endswith('.pt')]
    if not checkpoint_files:
        print("🆕 No checkpoint file found, start training from scratch.")
        return model, optimizer, 0

    # 排序找到最新的 epoch
    checkpoint_files.sort(key=lambda x: int(x.split('_')[-1].split('.')[0]))
    latest_checkpoint = checkpoint_files[-1]
    checkpoint_path = os.path.join(save_model_dir, latest_checkpoint)

    # 加载模型和优化器状态
    checkpoint = torch.load(checkpoint_path, map_location=device)
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    start_epoch = checkpoint['epoch'] + 1

    print(f"✅ 成功加载最新断点：{checkpoint_path}，从第 {start_epoch} 轮开始训练")
    return model, optimizer, start_epoch

