"""
训练相关工具函数
包括模型保存、加载、学习率调度等
"""
import os
import json
import torch
from torch.optim.lr_scheduler import StepLR, CosineAnnealingLR


def save_checkpoint(state, checkpoint_dir, filename='checkpoint.pth'):
    """
    保存训练检查点

    Args:
        state: 包含模型、优化器等状态的字典
        checkpoint_dir: 检查点保存目录
        filename: 文件名
    """
    os.makedirs(checkpoint_dir, exist_ok=True)
    filepath = os.path.join(checkpoint_dir, filename)
    torch.save(state, filepath)
    return filepath


def load_checkpoint(checkpoint_path, model, optimizer=None, scheduler=None):
    """
    加载训练检查点

    Args:
        checkpoint_path: 检查点文件路径
        model: 模型对象
        optimizer: 优化器对象（可选）
        scheduler: 学习率调度器（可选）

    Returns:
        start_epoch: 开始的epoch
        best_metric: 最佳指标值
    """
    if not os.path.exists(checkpoint_path):
        print(f"[WARNING] 检查点文件不存在: {checkpoint_path}")
        return 0, 0.0

    print(f"[INFO] 加载检查点: {checkpoint_path}")
    checkpoint = torch.load(checkpoint_path)

    model.load_state_dict(checkpoint['model_state_dict'])

    if optimizer is not None and 'optimizer_state_dict' in checkpoint:
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    if scheduler is not None and 'scheduler_state_dict' in checkpoint:
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])

    start_epoch = checkpoint.get('epoch', 0) + 1
    best_metric = checkpoint.get('best_metric', 0.0)

    print(f"[INFO] 从 epoch {start_epoch} 继续训练，当前最佳指标: {best_metric:.4f}")

    return start_epoch, best_metric


def save_train_state(checkpoint_dir, epoch, best_metric, config):
    """
    保存训练状态信息（用于断点续训）

    Args:
        checkpoint_dir: 检查点目录
        epoch: 当前epoch
        best_metric: 最佳指标
        config: 训练配置
    """
    state = {
        'epoch': epoch,
        'best_metric': best_metric,
        'config': config
    }

    state_path = os.path.join(checkpoint_dir, 'train_state.json')
    with open(state_path, 'w', encoding='utf-8') as f:
        json.dump(state, f, indent=4, ensure_ascii=False)


def get_scheduler(optimizer, scheduler_type='step', **kwargs):
    """
    获取学习率调度器

    Args:
        optimizer: 优化器
        scheduler_type: 调度器类型 ('step' 或 'cosine')
        **kwargs: 调度器参数

    Returns:
        scheduler: 学习率调度器
    """
    if scheduler_type == 'step':
        step_size = kwargs.get('step_size', 30)
        gamma = kwargs.get('gamma', 0.1)
        scheduler = StepLR(optimizer, step_size=step_size, gamma=gamma)
    elif scheduler_type == 'cosine':
        T_max = kwargs.get('T_max', 100)
        eta_min = kwargs.get('eta_min', 1e-6)
        scheduler = CosineAnnealingLR(optimizer, T_max=T_max, eta_min=eta_min)
    else:
        raise ValueError(f"不支持的调度器类型: {scheduler_type}")

    return scheduler


class EarlyStopping:
    """
    早停机制
    当验证指标在一定epoch内没有改善时停止训练
    """
    def __init__(self, patience=10, min_delta=0.0, mode='max'):
        """
        Args:
            patience: 容忍的epoch数
            min_delta: 最小改善量
            mode: 'max' 表示指标越大越好，'min' 表示越小越好
        """
        self.patience = patience
        self.min_delta = min_delta
        self.mode = mode
        self.counter = 0
        self.best_score = None
        self.early_stop = False

    def __call__(self, metric):
        """
        检查是否应该早停

        Args:
            metric: 当前指标值

        Returns:
            bool: 是否应该早停
        """
        if self.best_score is None:
            self.best_score = metric
            return False

        if self.mode == 'max':
            improved = metric > self.best_score + self.min_delta
        else:
            improved = metric < self.best_score - self.min_delta

        if improved:
            self.best_score = metric
            self.counter = 0
        else:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True

        return self.early_stop


class AverageMeter:
    """
    计算并存储平均值和当前值
    用于跟踪训练过程中的各种指标
    """
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count
