import os
import torch
from training.utils.common_util import async_torch_save


def check_global_checkpoint(config):
    """检查全局检查点并返回需要恢复的折和轮次信息"""
    checkpoint_path = os.path.join(config['trainer']['save_dir'], 'global_checkpoint.pt')
    if os.path.exists(checkpoint_path):
        print(f"检测到全局检查点: {checkpoint_path}")
        try:
            # 移除weights_only参数以兼容旧版本PyTorch
            checkpoint = torch.load(checkpoint_path, map_location='cpu')
            if 'current_fold' in checkpoint:
                current_fold = checkpoint['current_fold']
                current_epoch = checkpoint.get('epoch', 0)
                # 由于检查点中保存的是下一轮的epoch值，所以显示时需要加1
                print(f"将从折 {current_fold + 1} 轮次 {current_epoch} 开始恢复训练")
                return current_fold, current_epoch
        except (EOFError, RuntimeError) as e:
            print(f"错误: 检查点文件已损坏或格式不正确: {e}")
            print(f"删除损坏的检查点文件: {checkpoint_path}")
            try:
                os.remove(checkpoint_path)
            except Exception as remove_error:
                print(f"警告: 无法删除损坏的检查点文件: {remove_error}")
    return None, None


def delete_global_checkpoint(config):
    """删除全局检查点文件"""
    checkpoint_path = os.path.join(config['trainer']['save_dir'], 'global_checkpoint.pt')
    if os.path.exists(checkpoint_path):
        try:
            os.remove(checkpoint_path)
            print(f"全局检查点已删除: {checkpoint_path}")
        except Exception as e:
            print(f"删除全局检查点时出错: {e}")


def is_fold_completed(config, fold_id):
    """检查指定的折是否已经完成训练"""
    fold_dir = os.path.join(config['trainer']['save_dir'], f'fold_{fold_id}')
    results_file = os.path.join(fold_dir, 'results.txt')
    return os.path.exists(results_file)


def save_checkpoint(model, optimizer, config, fold_id, epoch, best_acc, best_f1, best_kappa, 
                   best_cm=None, best_per_class=None, no_improvement_count=0, history=None, is_best=False, 
                   scaler_state=None):
    """
    保存全局检查点和最佳模型
    
    参数:
    - model: 模型实例
    - optimizer: 优化器实例
    - config: 配置字典
    - fold_id: 当前折ID
    - epoch: 当前轮次
    - best_acc: 最佳准确率
    - best_f1: 最佳F1分数
    - best_kappa: 最佳Kappa值
    - best_cm: 最佳混淆矩阵（可选）
    - best_per_class: 最佳每个类别的指标（可选）
    - no_improvement_count: 没有改进的轮次计数（可选）
    - history: 训练历史（可选）
    - is_best: 是否为最佳模型（可选）
    """
    save_dir = os.path.join(config['trainer']['save_dir'], f'fold_{fold_id}')
    global_checkpoint_dir = config['trainer']['save_dir']
    
    # 保存下一轮的epoch值，确保恢复时从下一轮开始
    checkpoint = {
        'current_fold': fold_id,
        'epoch': epoch + 1,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'best_acc': best_acc,
        'best_f1': best_f1,
        'best_kappa': best_kappa,
        'best_cm': best_cm,
        'best_per_class': best_per_class,
        'no_improvement_count': no_improvement_count,
        'history': history if history is not None else [],
        'scaler_state': scaler_state
    }

    # 保存全局检查点（覆盖之前的）
    async_torch_save(checkpoint, os.path.join(global_checkpoint_dir, 'global_checkpoint.pt'), _use_new_zipfile_serialization=False)

    # 保存最佳模型（按折保存）
    # 检查配置中是否启用了保存最佳模型，默认为True
    save_best_model = config.get('trainer', {}).get('save_best_model', True)
    if is_best and save_best_model:
        os.makedirs(save_dir, exist_ok=True)
        async_torch_save(model.state_dict(), os.path.join(save_dir, 'best.model'), _use_new_zipfile_serialization=False)
        print(f"Best model saved to {os.path.join(save_dir, 'best.model')}")
    elif is_best and not save_best_model:
        print("New best model found, but saving is disabled by configuration")


def load_checkpoint(model, optimizer, config, fold_id, scaler=None):
    """
    加载全局检查点恢复训练
    
    参数:
    - model: 模型实例
    - optimizer: 优化器实例
    - config: 配置字典
    - fold_id: 当前折ID
    
    返回值:
    - tuple: (epoch, best_acc, best_f1, best_kappa, best_cm, best_per_class, no_improvement_count, history, success)
    """
    checkpoint_path = os.path.join(config['trainer']['save_dir'], 'global_checkpoint.pt')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if os.path.exists(checkpoint_path):
        print(f"Loading global checkpoint from {checkpoint_path}")
        try:
            # 移除weights_only参数以兼容旧版本PyTorch
            checkpoint = torch.load(checkpoint_path, map_location=device)

            # 检查此检查点是否适用于当前折
            if 'current_fold' in checkpoint and checkpoint['current_fold'] == fold_id:
                model.load_state_dict(checkpoint['model_state_dict'])
                optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                epoch = checkpoint['epoch']
                best_acc = checkpoint['best_acc']
                best_f1 = checkpoint['best_f1']
                best_kappa = checkpoint.get('best_kappa', 0.0)
                best_cm = checkpoint.get('best_cm', None)
                best_per_class = checkpoint.get('best_per_class', None)
                no_improvement_count = checkpoint.get('no_improvement_count', 0)
                history = checkpoint.get('history', [])
                scaler_state = checkpoint.get('scaler_state', None)
                
                # 如果有scaler实例和scaler状态，加载scaler状态
                if scaler is not None and scaler_state is not None:
                    try:
                        scaler.load_state_dict(scaler_state)
                        print(f"Loaded scaler state for mixed precision training")
                    except Exception as e:
                        print(f"Failed to load scaler state: {e}")
                        scaler_state = None
                
                print(f"Loaded checkpoint from epoch {epoch}, fold {fold_id}")
                return epoch, best_acc, best_f1, best_kappa, best_cm, best_per_class, no_improvement_count, history, scaler_state, True
            else:
                print(f"Checkpoint is for fold {checkpoint.get('current_fold', 'unknown')}, skipping for current fold {fold_id}")
        except (EOFError, RuntimeError) as e:
            print(f"错误: 检查点文件已损坏或格式不正确: {e}")
            print(f"删除损坏的检查点文件: {checkpoint_path}")
            try:
                os.remove(checkpoint_path)
            except Exception as remove_error:
                print(f"警告: 无法删除损坏的检查点文件: {remove_error}")
    return 0, 0.0, 0.0, 0.0, None, None, 0, [], None, False