import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader, random_split
from get_model.model.yeast_model import YeastModel
import logging
from pathlib import Path
import os
import torch.optim as optim
import signal
import sys
import atexit
import time
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr, linregress, spearmanr
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from tqdm import tqdm
import warnings
import hydra
from omegaconf import DictConfig, OmegaConf

warnings.filterwarnings("ignore")

# 全局变量用于中断处理
interrupted = False
current_model = None
current_test_loader = None
current_device = None
current_logger = None
current_output_dir = None

def signal_handler(signum, frame):
    """处理中断信号"""
    global interrupted, current_logger
    interrupted = True
    if current_logger:
        current_logger.warning(f"收到中断信号 {signum}，准备保存模型并执行测试...")
    else:
        print(f"收到中断信号 {signum}，准备保存模型并执行测试...")

def cleanup_and_test():
    """清理并执行测试"""
    global current_model, current_test_loader, current_device, current_logger, current_output_dir
    
    if current_model is not None and current_test_loader is not None and current_output_dir is not None:
        try:
            if current_logger:
                current_logger.info("开始执行中断后的测试...")
            
            # 确保模型在评估模式
            current_model.eval()
            
            # 测试集评估
            test_loss, test_r, test_slope, test_intercept, test_preds, test_targets, test_p = evaluate_model(
                current_model, current_test_loader, current_device, current_logger, "test"
            )
            
            # 保存预测结果
            df_test = pd.DataFrame({'pred': test_preds, 'true': test_targets, 'split': 'test'})
            df_test.to_csv(current_output_dir / 'interrupted_test_predictions.csv', index=False)
            
            # 生成测试集散点图
            test_mse = mean_squared_error(test_targets, test_preds)
            plt.figure(figsize=(10, 8))
            plt.scatter(test_targets, test_preds, alpha=0.01, s=0.1, c='green')
            plt.plot([0, 13], [0, 13], 'r--', linewidth=2, label='y=x')
            
            if not np.isnan(test_slope):
                x_range = np.linspace(0, 13, 100)
                y_reg = test_slope * x_range + test_intercept
                plt.plot(x_range, y_reg, 'g-', linewidth=2, label=f'y={test_slope:.3f}x+{test_intercept:.3f}')
            
            plt.xlabel('True Expression (log1p)')
            plt.ylabel('Predicted Expression (log1p)')
            plt.title('Test Set - Interrupted Training Evaluation')
            plt.xlim(0, 13)
            plt.ylim(0, 13)
            plt.legend(loc='lower right')
            plt.grid(True, alpha=0.3)
            
            test_stats_text = f'Test Set:\nPearson r = {test_r:.4f}\nP-value = {test_p:.2e}\nSlope = {test_slope:.4f}\nIntercept = {test_intercept:.4f}\nMSE = {test_mse:.4f}\nN = {len(test_targets):,}'
            plt.text(0.02, 0.98, test_stats_text, transform=plt.gca().transAxes, 
                    fontsize=12, verticalalignment='top', 
                    bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.8))
            
            plt.tight_layout()
            plt.savefig(current_output_dir / 'interrupted_test_evaluation.png', dpi=150, bbox_inches='tight')
            plt.close()
            
            # 生成中断测试报告
            report_path = current_output_dir / 'interrupted_test_report.md'
            with open(report_path, 'w', encoding='utf-8') as f:
                f.write(f"# 中断训练测试报告\n\n")
                f.write(f"**测试时间**: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"**测试原因**: 训练被中断\n\n")
                f.write(f"## 测试集结果\n")
                f.write(f"- **测试集损失**: {test_loss:.6f}\n")
                f.write(f"- **测试集Pearson r**: {test_r:.6f}\n")
                f.write(f"- **测试集斜率**: {test_slope:.6f}\n")
                f.write(f"- **测试集截距**: {test_intercept:.6f}\n")
                f.write(f"- **测试集MSE**: {test_mse:.6f}\n")
                f.write(f"- **测试集样本数**: {len(test_targets):,}\n\n")
            
            if current_logger:
                current_logger.info(f"中断测试完成！结果已保存到: {current_output_dir}")
                current_logger.info(f"测试集结果: 损失={test_loss:.6f}, Pearson r={test_r:.6f}, 样本数={len(test_targets):,}")
            else:
                print(f"中断测试完成！测试集结果: 损失={test_loss:.6f}, Pearson r={test_r:.6f}, 样本数={len(test_targets):,}")
                
        except Exception as e:
            if current_logger:
                current_logger.error(f"中断测试失败: {e}")
            else:
                print(f"中断测试失败: {e}")

# 注册信号处理器
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
atexit.register(cleanup_and_test)

# ========== 路径配置 ==========
INPUT_FILES = {
    'atac1': '/root/autodl-tmp/GetForYeast/4numpy/NumpyFileOutput/ATAC1.npy',
    'atac2': '/root/autodl-tmp/GetForYeast/4numpy/NumpyFileOutput/ATAC2.npy',
    'atac3': '/root/autodl-tmp/GetForYeast/4numpy/NumpyFileOutput/ATAC3.npy',
    'atac4': '/root/autodl-tmp/GetForYeast/4numpy/NumpyFileOutput/ATAC4.npy'
}

OUTPUT_BASE_DIR = '/root/autodl-tmp/GetForYeast/output'

# 实验配置
EXPERIMENTS = {
    'single_atac': {
        'name': 'Single ATAC Training',
        'description': '使用单份ATAC数据训练',
        'input_files': ['atac1'],
        'output_dir': 'single_atac'
    },
    'four_atac': {
        'name': 'Four ATAC Training',
        'description': '使用四份ATAC数据合并训练',
        'input_files': ['atac1', 'atac2', 'atac3', 'atac4'],
        'output_dir': 'four_atac'
    }
}

# ========== 数据集类 ==========
class YeastPeakDataset(Dataset):
    """酵母peak数据集"""

    def __init__(self, data_path: str):
        self.data = np.load(data_path, mmap_mode='r')
        logging.info(f"加载数据: {data_path}")
        
        num_samples, num_peaks, num_features = self.data.shape
        self.motif_dim = 283
        self.accessibility_dim = 1
        self.condition_dim = 60
        self.expression_dim = 1
        
        # 过滤NaN标签
        logging.info("过滤NaN标签...")
        labels = self.data[:, :, -1]
        valid_mask = ~(np.isnan(labels) | np.isinf(labels))
        valid_indices = np.where(valid_mask)
        self.valid_indices = list(zip(valid_indices[0], valid_indices[1]))
        self.total = len(self.valid_indices)
        
        logging.info(f"数据集: 样本={num_samples}, peaks/样本={num_peaks}, "
                    f"特征数={num_features}, 有效样本={self.total}")

    def __len__(self):
        return self.total

    def __getitem__(self, idx: int):
        sample_idx, peak_idx = self.valid_indices[idx]
        row = self.data[sample_idx, peak_idx]
        
        # 提取特征
        motif_feat = torch.tensor(row[:self.motif_dim], dtype=torch.float32).unsqueeze(0)
        accessibility_feat = torch.tensor(row[self.motif_dim:self.motif_dim+self.accessibility_dim], dtype=torch.float32).unsqueeze(0)
        condition_feat = torch.tensor(row[self.motif_dim+self.accessibility_dim:self.motif_dim+self.accessibility_dim+self.condition_dim], dtype=torch.float32).unsqueeze(0)
        
        # 拼接所有特征
        all_features = torch.cat([motif_feat, accessibility_feat, condition_feat], dim=-1)
        
        # 标签是最后一维
        label = torch.tensor(row[-1], dtype=torch.float32).unsqueeze(0)
        
        return {'motif_features': all_features}, label

class MultiDataset(Dataset):
    """多数据集合并类"""
    
    def __init__(self, data_paths: list):
        self.datasets = []
        self.dataset_sizes = []
        self.total_size = 0
        
        for path in data_paths:
            dataset = YeastPeakDataset(path)
            self.datasets.append(dataset)
            self.dataset_sizes.append(len(dataset))
            self.total_size += len(dataset)
            
        logging.info(f"多数据集加载完成，总样本数: {self.total_size}")
        for i, (path, size) in enumerate(zip(data_paths, self.dataset_sizes)):
            logging.info(f"  数据集 {i+1}: {os.path.basename(path)} - {size:,} 样本")

    def __len__(self):
        return self.total_size

    def __getitem__(self, idx: int):
        # 确定属于哪个数据集
        dataset_idx = 0
        cumulative_size = 0
        for i, size in enumerate(self.dataset_sizes):
            if idx < cumulative_size + size:
                dataset_idx = i
                break
            cumulative_size += size
        
        # 在对应数据集中获取样本
        local_idx = idx - cumulative_size
        return self.datasets[dataset_idx][local_idx]

def create_data_loaders(dataset, config, logger):
    """创建训练集、验证集和测试集的DataLoader"""
    
    # 计算划分大小 (70% 训练, 15% 验证, 15% 测试)
    total_size = len(dataset)
    train_size = int(0.7 * total_size)
    val_size = int(0.15 * total_size)
    test_size = total_size - train_size - val_size
    
    logger.info(f"数据集划分: 总样本={total_size:,}, 训练集={train_size:,} (70%), "
                f"验证集={val_size:,} (15%), 测试集={test_size:,} (15%)")
    
    # 设置随机种子确保可重复性
    torch.manual_seed(config.experiment.seed)
    
    # 划分数据集
    train_dataset, val_dataset, test_dataset = random_split(
        dataset, 
        [train_size, val_size, test_size],
        generator=torch.Generator().manual_seed(config.experiment.seed)
    )
    
    # 创建DataLoader
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.training.batch_size,
        shuffle=True,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config.training.batch_size,
        shuffle=False,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=config.training.batch_size,
        shuffle=False,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    return train_loader, val_loader, test_loader

def evaluate_model(model, data_loader, device, logger, split_name="validation"):
    """评估模型性能 - 使用MAE作为主要指标，按重要性排序输出"""
    model.eval()
    total_loss = 0
    all_preds = []
    all_targets = []
    
    with torch.no_grad():
        for batch_x, batch_y in tqdm(data_loader, desc=f'评估 {split_name}'):
            # 将数据移动到设备
            batch_x = {k: v.to(device) for k, v in batch_x.items()}
            batch_y = batch_y.to(device)
            
            # 前向传播
            outputs = model(batch_x)
            loss = model.compute_loss(outputs, batch_y)
            
            total_loss += loss.item()
            
            # 收集预测和真实值
            pred = outputs.detach().cpu().numpy().flatten()
            target = batch_y.detach().cpu().numpy().flatten()
            all_preds.extend(pred.tolist())
            all_targets.extend(target.tolist())
    
    # 计算平均损失
    avg_loss = total_loss / len(data_loader)
    
    # 计算统计指标
    all_preds_np = np.array(all_preds)
    all_targets_np = np.array(all_targets)
    
    try:
        # ===== 主要指标 (按重要性排序) =====
        
        # 1. MAE - 平均绝对误差 (主要指标)
        from sklearn.metrics import mean_absolute_error
        mae = mean_absolute_error(all_targets_np, all_preds_np)
        
        # 2. Spearman相关系数 - 排序相关性
        spearman_rho, spearman_p = spearmanr(all_targets_np, all_preds_np)
        
        # 3. R² - 决定系数 (预测能力)
        from sklearn.metrics import r2_score
        r2 = r2_score(all_targets_np, all_preds_np)
        
        # 4. Pearson相关系数 - 线性相关性
        pearson_r, pearson_p = pearsonr(all_targets_np, all_preds_np)
        
        # 5. Kendall's Tau - 排序一致性
        from scipy.stats import kendalltau
        kendall_tau, kendall_p = kendalltau(all_targets_np, all_preds_np)
        
        # 6. 回归分析 - 斜率和截距
        reg = linregress(all_targets_np, all_preds_np)
        slope = reg.slope
        intercept = reg.intercept
        
        # 7. MSE - 均方误差
        mse = mean_squared_error(all_targets_np, all_preds_np)
        
        # 8. MAPE - 平均绝对百分比误差
        mape = np.mean(np.abs((all_targets_np - all_preds_np) / (all_targets_np + 1e-8))) * 100
        
        # 9. 额外有用的指标
        # RMSE - 均方根误差
        rmse = np.sqrt(mse)
        
        # 中位数绝对误差
        median_ae = np.median(np.abs(all_targets_np - all_preds_np))
        
        # 预测值范围
        pred_range = all_preds_np.max() - all_preds_np.min()
        true_range = all_targets_np.max() - all_targets_np.min()
        
        # 预测值分布统计
        pred_mean = all_preds_np.mean()
        pred_std = all_preds_np.std()
        true_mean = all_targets_np.mean()
        true_std = all_targets_np.std()
        
        # 误差分布统计
        errors = all_preds_np - all_targets_np
        error_mean = errors.mean()
        error_std = errors.std()
        error_median = np.median(errors)
        
        # 分位数误差 (关注不同表达水平)
        q25_error = np.percentile(np.abs(errors), 25)
        q75_error = np.percentile(np.abs(errors), 75)
        
        # 高表达区域误差 (表达值 > 中位数)
        high_expr_mask = all_targets_np > np.median(all_targets_np)
        high_expr_mae = mean_absolute_error(all_targets_np[high_expr_mask], all_preds_np[high_expr_mask]) if np.sum(high_expr_mask) > 0 else float('nan')
        
        # 低表达区域误差 (表达值 <= 中位数)
        low_expr_mask = all_targets_np <= np.median(all_targets_np)
        low_expr_mae = mean_absolute_error(all_targets_np[low_expr_mask], all_preds_np[low_expr_mask]) if np.sum(low_expr_mask) > 0 else float('nan')
        
    except Exception as e:
        # 如果计算失败，设置默认值
        mae = spearman_rho = r2 = pearson_r = kendall_tau = float('nan')
        slope = intercept = mse = mape = rmse = median_ae = float('nan')
        pred_range = true_range = pred_mean = pred_std = true_mean = true_std = float('nan')
        error_mean = error_std = error_median = q25_error = q75_error = float('nan')
        high_expr_mae = low_expr_mae = float('nan')
        logger.warning(f"{split_name}集指标计算失败: {e}")
    
    # 按重要性排序输出评估结果
    logger.info(f"{split_name}集评估结果 (按重要性排序):")
    logger.info(f"  {'='*60}")
    
    # 1. 主要指标 (Top1-3)
    logger.info(f"  🏆 主要指标:")
    logger.info(f"    1. MAE (平均绝对误差) = {mae:.6f} ← 主要指标")
    logger.info(f"    2. Spearman ρ (排序相关性) = {spearman_rho:.6f} (p = {spearman_p:.2e})")
    logger.info(f"    3. R² (决定系数) = {r2:.6f}")
    
    # 2. 相关性指标
    logger.info(f"  📊 相关性指标:")
    logger.info(f"    4. Pearson r (线性相关性) = {pearson_r:.6f} (p = {pearson_p:.2e})")
    logger.info(f"    5. Kendall τ (排序一致性) = {kendall_tau:.6f} (p = {kendall_p:.2e})")
    
    # 3. 回归分析
    logger.info(f"  📈 回归分析:")
    logger.info(f"    6. 回归斜率 = {slope:.6f}")
    logger.info(f"    7. 回归截距 = {intercept:.6f}")
    
    # 4. 误差指标
    logger.info(f"  ⚠️  误差指标:")
    logger.info(f"    8. MSE (均方误差) = {mse:.6f}")
    logger.info(f"    9. RMSE (均方根误差) = {rmse:.6f}")
    logger.info(f"    10. MAPE (平均绝对百分比误差) = {mape:.2f}%")
    logger.info(f"    11. 中位数绝对误差 = {median_ae:.6f}")
    
    # 5. 分布统计
    logger.info(f"  📋 分布统计:")
    logger.info(f"    真实值: 均值={true_mean:.4f}, 标准差={true_std:.4f}, 范围=[{all_targets_np.min():.4f}, {all_targets_np.max():.4f}]")
    logger.info(f"    预测值: 均值={pred_mean:.4f}, 标准差={pred_std:.4f}, 范围=[{all_preds_np.min():.4f}, {all_preds_np.max():.4f}]")
    
    # 6. 误差分布
    logger.info(f"  🔍 误差分布:")
    logger.info(f"    误差均值={error_mean:.6f}, 误差标准差={error_std:.6f}, 误差中位数={error_median:.6f}")
    logger.info(f"    25%分位数误差={q25_error:.6f}, 75%分位数误差={q75_error:.6f}")
    
    # 7. 分区域误差
    logger.info(f"  🎯 分区域误差:")
    logger.info(f"    高表达区域MAE = {high_expr_mae:.6f}")
    logger.info(f"    低表达区域MAE = {low_expr_mae:.6f}")
    
    # 8. 样本信息
    logger.info(f"  📊 样本信息:")
    logger.info(f"    样本数: {len(all_targets):,}")
    logger.info(f"    损失: {avg_loss:.6f}")
    logger.info(f"  {'='*60}")
    
    # 返回主要指标 (MAE作为主要指标)
    return avg_loss, mae, slope, intercept, all_preds, all_targets, spearman_p

def train_experiment(experiment_name: str, experiment_config: dict, config: DictConfig):
    """训练单个实验"""
    
    global current_model, current_test_loader, current_device, current_logger, current_output_dir
    
    # 创建输出目录
    timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    output_dir = Path(OUTPUT_BASE_DIR) / f"{experiment_config['output_dir']}_{timestamp}"
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 设置全局变量
    current_output_dir = output_dir
    
    # 设置日志
    # 清除之前的handlers，避免重复
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(message)s",
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(output_dir / 'train.log', mode='w', encoding='utf-8')
        ],
        force=True  # 强制重新配置
    )
    logger = logging.getLogger(__name__)
    current_logger = logger
    
    logger.info(f"开始实验: {experiment_config['name']}")
    logger.info(f"实验描述: {experiment_config['description']}")
    logger.info(f"输出目录: {output_dir}")
    
    # 打印配置信息用于调试
    logger.info(f"=== 配置信息 ===")
    logger.info(f"max_epochs: {config.training.max_epochs}")
    logger.info(f"batch_size: {config.training.batch_size}")
    logger.info(f"learning_rate: {config.training.learning_rate}")
    logger.info(f"weight_decay: {config.training.weight_decay}")
    logger.info(f"clip_grad: {config.training.clip_grad}")
    logger.info(f"use_lora: {config.training.use_lora}")
    logger.info(f"=================")
    
    # 设置随机种子
    torch.manual_seed(config.experiment.seed)
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    current_device = device
    
    # 准备数据集
    if len(experiment_config['input_files']) == 1:
        # 单数据集
        data_path = INPUT_FILES[experiment_config['input_files'][0]]
        logger.info(f"使用单数据集: {data_path}")
        dataset = YeastPeakDataset(data_path)
    else:
        # 多数据集
        data_paths = [INPUT_FILES[f] for f in experiment_config['input_files']]
        logger.info(f"使用多数据集: {len(data_paths)} 个文件")
        for i, path in enumerate(data_paths):
            logger.info(f"  数据集 {i+1}: {path}")
        dataset = MultiDataset(data_paths)
    
    # 创建数据加载器
    train_loader, val_loader, test_loader = create_data_loaders(
        dataset, config, logger
    )
    current_test_loader = test_loader
    
    # 检查数据分布（用于调试验证集vs测试集性能差异）
    logger.info("检查数据分布...")
    train_labels = []
    val_labels = []
    test_labels = []
    
    # 采样检查数据分布
    for i, (batch_x, batch_y) in enumerate(train_loader):
        if i >= 10:  # 只检查前10个batch
            break
        train_labels.extend(batch_y.numpy().flatten())
    
    for i, (batch_x, batch_y) in enumerate(val_loader):
        if i >= 5:  # 只检查前5个batch
            break
        val_labels.extend(batch_y.numpy().flatten())
    
    for i, (batch_x, batch_y) in enumerate(test_loader):
        if i >= 5:  # 只检查前5个batch
            break
        test_labels.extend(batch_y.numpy().flatten())
    
    train_labels = np.array(train_labels)
    val_labels = np.array(val_labels)
    test_labels = np.array(test_labels)
    
    logger.info(f"训练集标签统计: 样本数={len(train_labels):,}, 范围=[{train_labels.min():.4f}, {train_labels.max():.4f}], 均值={train_labels.mean():.4f}, 标准差={train_labels.std():.4f}")
    logger.info(f"验证集标签统计: 样本数={len(val_labels):,}, 范围=[{val_labels.min():.4f}, {val_labels.max():.4f}], 均值={val_labels.mean():.4f}, 标准差={val_labels.std():.4f}")
    logger.info(f"测试集标签统计: 样本数={len(test_labels):,}, 范围=[{test_labels.min():.4f}, {test_labels.max():.4f}], 均值={test_labels.mean():.4f}, 标准差={test_labels.std():.4f}")

    # 初始化模型
    model_cfg_dict = config.model
    if hasattr(model_cfg_dict, 'model'):
        model_cfg_dict = model_cfg_dict.model

    model_cfg = OmegaConf.to_container(model_cfg_dict, resolve=True)
    logger.info(f"模型配置: {model_cfg}")
    
    # 提取cfg字段作为模型配置
    if 'cfg' in model_cfg:
        model_cfg = model_cfg['cfg']
    
    model = YeastModel(
        cfg=model_cfg,
        use_lora=config.training.use_lora,
        lora_rank=config.training.lora_rank,
        lora_alpha=config.training.lora_alpha,
        lora_layers=config.training.lora_layers,
    )
    
    # 多GPU支持
    if torch.cuda.device_count() > 1:
        logger.info(f"使用 {torch.cuda.device_count()} 个GPU训练")
        model = nn.DataParallel(model)
    model = model.to(device)
    current_model = model
    
    # 打印模型信息
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    logger.info(f"模型总参数: {total_params:,}")
    logger.info(f"可训练参数: {trainable_params:,}")
    
    # 初始化优化器和调度器
    optimizer = optim.AdamW(
        model.parameters(),
        lr=config.training.learning_rate,
        weight_decay=config.training.weight_decay
    )
    
    # 创建学习率调度器
    scheduler_type = config.scheduler.type
    if scheduler_type == 'cosine':
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=config.scheduler.cosine.T_max,
            eta_min=config.scheduler.cosine.eta_min
        )
    elif scheduler_type == 'cosine_warm_restarts':
        scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
            optimizer,
            T_0=config.scheduler.cosine_warm_restarts.T_0,
            T_mult=config.scheduler.cosine_warm_restarts.T_mult,
            eta_min=config.scheduler.cosine_warm_restarts.eta_min
        )
    elif scheduler_type == 'step':
        scheduler = optim.lr_scheduler.StepLR(
            optimizer,
            step_size=config.scheduler.step.step_size,
            gamma=config.scheduler.step.gamma
        )
    elif scheduler_type == 'exponential':
        scheduler = optim.lr_scheduler.ExponentialLR(
            optimizer,
            gamma=config.scheduler.exponential.gamma
        )
    else:
        raise ValueError(f"不支持的学习率调度器类型: {scheduler_type}")
    
    logger.info(f"使用学习率调度器: {scheduler_type}")
    
    # 训练循环
    logger.info("开始训练...")
    max_epochs = config.training.max_epochs
    best_val_loss = float('inf')
    best_val_mae = float('inf')  # 记录最佳验证集MAE (越小越好)
    
    train_losses = []
    val_losses = []
    train_maes = []
    val_maes = []
    train_spearmans = []
    val_spearmans = []
    train_slopes = []
    val_slopes = []
    lr_list = []
    
    for epoch in range(max_epochs):
        # 检查是否被中断
        if interrupted:
            logger.warning("训练被中断，准备保存模型并执行测试...")
            break
            
        # 训练阶段
        model.train()
        total_train_loss = 0
        train_preds = []
        train_targets = []
        
        for batch_x, batch_y in tqdm(train_loader, desc=f'Epoch {epoch+1}/{max_epochs}'):
            if interrupted:
                break
                
            batch_x = {k: v.to(device) for k, v in batch_x.items()}
            batch_y = batch_y.to(device)
            
            optimizer.zero_grad()
            outputs = model(batch_x)
            loss = model.compute_loss(outputs, batch_y)
            loss.backward()
            
            # 梯度裁剪
            if config.training.clip_grad is not None:
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=config.training.clip_grad)
            
            optimizer.step()
            
            total_train_loss += loss.item()
            
            # 收集训练预测
            with torch.no_grad():
                pred = outputs.detach().cpu().numpy().flatten()
                target = batch_y.detach().cpu().numpy().flatten()
                train_preds.extend(pred.tolist())
                train_targets.extend(target.tolist())
        
        if interrupted:
            break
        
        # 计算训练集指标
        avg_train_loss = total_train_loss / len(train_loader)
        train_losses.append(avg_train_loss)
        
        try:
            # 计算训练集MAE (主要指标)
            train_mae = mean_absolute_error(np.array(train_targets), np.array(train_preds))
            train_maes.append(train_mae)
            
            # 计算训练集Spearman相关系数
            train_spearman, _ = spearmanr(np.array(train_targets), np.array(train_preds))
            train_spearmans.append(train_spearman)
            
            # 计算训练集回归参数
            train_reg = linregress(np.array(train_targets), np.array(train_preds))
            train_slope = train_reg.slope
            train_intercept = train_reg.intercept
            train_slopes.append(train_slope)
        except:
            train_mae = train_spearman = train_slope = train_intercept = float('nan')
            train_maes.append(train_mae)
            train_spearmans.append(train_spearman)
            train_slopes.append(train_slope)
        
        # 验证阶段
        val_loss, val_mae, val_slope, val_intercept, val_preds, val_targets, val_p = evaluate_model(
            model, val_loader, device, logger, "validation"
        )
        val_losses.append(val_loss)
        val_maes.append(val_mae)
        # 从evaluate_model的返回值中获取val_spearman，需要重新计算
        try:
            val_spearman, _ = spearmanr(np.array(val_targets), np.array(val_preds))
        except:
            val_spearman = float('nan')
        val_spearmans.append(val_spearman)
        val_slopes.append(val_slope)
        
        # 更新学习率
        scheduler.step()
        current_lr = scheduler.get_last_lr()[0]
        lr_list.append(current_lr)
        
        # 保存最佳模型（基于验证集MAE，越小越好）
        if val_mae < best_val_mae:
            best_val_mae = val_mae
            best_val_loss = val_loss  # 同时记录对应的损失
            
            # 保存模型
            torch.save({
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'epoch': epoch,
                'best_val_loss': best_val_loss,
                'best_val_mae': best_val_mae,
                'config': experiment_config
            }, output_dir / 'best_model.pth')
            
            logger.info(f'Epoch {epoch+1}: 保存最佳模型 (val_mae: {val_mae:.6f}, val_loss: {val_loss:.6f})')
        
        # 输出训练信息（包含主要指标）
        logger.info(f'Epoch {epoch+1}: Train Loss={avg_train_loss:.4f}, Val Loss={val_loss:.4f}, '
                   f'Train MAE={train_mae:.6f}, Val MAE={val_mae:.6f}, '
                   f'Train Spearman={train_spearman:.4f}, Val Spearman={val_spearman:.4f}, '
                   f'Train slope={train_slope:.4f}, Val slope={val_slope:.4f}, '
                   f'Train intercept={train_intercept:.4f}, Val intercept={val_intercept:.4f}, '
                   f'LR={current_lr:.6f}')
        
        # 每五轮保存散点图（包括第一轮）
        if (epoch + 1) % 5 == 0 or epoch == 0:
            save_scatter_plots(output_dir, epoch + 1, train_targets, train_preds, 
                             val_targets, val_preds, train_mae, val_mae, train_slope, val_slope, logger,
                             train_intercept, val_intercept)
        
        # 每轮都保存训练曲线图
        save_training_curves(output_dir, train_losses, val_losses, train_maes, val_maes, 
                           train_spearmans, val_spearmans, train_slopes, val_slopes, lr_list, epoch + 1)
        
        # 训练完成，继续下一轮
    
    # 如果训练被中断，不执行测试，让cleanup_and_test处理
    if interrupted:
        logger.info("训练被中断，测试将由cleanup_and_test函数执行")
        return best_val_loss
    
    # 训练正常结束，加载最佳模型进行测试
    logger.info("加载最佳模型进行测试...")
    try:
        checkpoint = torch.load(output_dir / 'best_model.pth', map_location=device, weights_only=False)
        model.load_state_dict(checkpoint['model_state_dict'])
        logger.info("成功加载最佳模型")
    except Exception as e:
        logger.warning(f"加载最佳模型失败: {e}")
        logger.info("使用当前模型进行测试...")
    
    # 确保模型在评估模式
    model.eval()
    
    # 测试集评估
    test_loss, test_r, test_slope, test_intercept, test_preds, test_targets, test_p = evaluate_model(
        model, test_loader, device, logger, "test"
    )
    
    # 保存最终结果
    save_final_results(output_dir, train_losses, val_losses, train_maes, val_maes,
                      test_loss, test_r, test_slope, test_intercept, test_preds, test_targets, experiment_config, test_p)
    
    logger.info(f"实验 {experiment_config['name']} 完成！")
    return best_val_loss

def save_scatter_plots(output_dir, epoch, train_targets, train_preds, val_targets, val_preds, 
                      train_mae, val_mae, train_slope, val_slope, logger, train_intercept=0, val_intercept=0):
    """保存训练集和验证集的散点图"""
    
    train_mse = mean_squared_error(train_targets, train_preds)
    val_mse = mean_squared_error(val_targets, val_preds)
    
    # 创建散点图
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
    
    # 训练集散点图
    ax1.scatter(train_targets, train_preds, alpha=0.01, s=0.1, c='blue')
    ax1.plot([0, 13], [0, 13], 'r--', linewidth=2, label='y=x')
    
    # 添加回归线
    if not np.isnan(train_slope):
        x_range = np.linspace(0, 13, 100)
        y_reg = train_slope * x_range + train_intercept
        ax1.plot(x_range, y_reg, 'g-', linewidth=2, label=f'y={train_slope:.3f}x+{train_intercept:.3f}')
    
    ax1.set_xlabel('True Expression (log1p)')
    ax1.set_ylabel('Predicted Expression (log1p)')
    ax1.set_title(f'Training Set - Epoch {epoch}')
    ax1.set_xlim(0, 13)
    ax1.set_ylim(0, 13)
    ax1.legend(loc='lower right')
    ax1.grid(True, alpha=0.3)
    
    # 添加统计信息文本框
    train_stats_text = f'Train Set:\nMAE = {train_mae:.4f}\nSlope = {train_slope:.4f}\nIntercept = {train_intercept:.4f}\nMSE = {train_mse:.4f}\nN = {len(train_targets):,}'
    ax1.text(0.02, 0.98, train_stats_text, transform=ax1.transAxes, 
            fontsize=10, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightblue', alpha=0.8))
    
    # 验证集散点图
    ax2.scatter(val_targets, val_preds, alpha=0.01, s=0.1, c='red')
    ax2.plot([0, 13], [0, 13], 'r--', linewidth=2, label='y=x')
    
    # 添加回归线
    if not np.isnan(val_slope):
        x_range = np.linspace(0, 13, 100)
        y_reg = val_slope * x_range + val_intercept
        ax2.plot(x_range, y_reg, 'g-', linewidth=2, label=f'y={val_slope:.3f}x+{val_intercept:.3f}')
    
    ax2.set_xlabel('True Expression (log1p)')
    ax2.set_ylabel('Predicted Expression (log1p)')
    ax2.set_title(f'Validation Set - Epoch {epoch}')
    ax2.set_xlim(0, 13)
    ax2.set_ylim(0, 13)
    ax2.legend(loc='lower right')
    ax2.grid(True, alpha=0.3)
    
    # 添加统计信息文本框
    val_stats_text = f'Val Set:\nMAE = {val_mae:.4f}\nSlope = {val_slope:.4f}\nIntercept = {val_intercept:.4f}\nMSE = {val_mse:.4f}\nN = {len(val_targets):,}'
    ax2.text(0.02, 0.98, val_stats_text, transform=ax2.transAxes, 
            fontsize=10, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightcoral', alpha=0.8))
    
    plt.tight_layout()
    scatter_path = output_dir / f'scatter_epoch{epoch}.png'
    plt.savefig(scatter_path, dpi=150, bbox_inches='tight')
    plt.close()
    
    logger.info(f"保存散点图到: scatter_epoch{epoch}.png")

def save_training_curves(output_dir, train_losses, val_losses, train_maes, val_maes, 
                        train_spearmans, val_spearmans, train_slopes, val_slopes, lr_list, current_epoch):
    """每轮保存训练曲线图"""
    
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))
    
    # 1. 损失曲线
    ax1.plot(range(1, len(train_losses)+1), train_losses, 'b-', linewidth=2, label='Train')
    ax1.plot(range(1, len(val_losses)+1), val_losses, 'r-', linewidth=2, label='Validation')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss')
    ax1.set_title('Training and Validation Loss')
    ax1.legend(loc='upper right')
    ax1.grid(True, alpha=0.3)
    
    # 2. MAE曲线 (主要指标)
    ax2.plot(range(1, len(train_maes)+1), train_maes, 'g-', linewidth=2, label='Train')
    ax2.plot(range(1, len(val_maes)+1), val_maes, 'purple', linewidth=2, label='Validation')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('MAE (平均绝对误差)')
    ax2.set_title('Training and Validation MAE (主要指标)')
    ax2.legend(loc='upper right')
    ax2.grid(True, alpha=0.3)
    
    # 3. Spearman相关系数曲线
    ax3.plot(range(1, len(train_spearmans)+1), train_spearmans, 'brown', linewidth=2, label='Train')
    ax3.plot(range(1, len(val_spearmans)+1), val_spearmans, 'pink', linewidth=2, label='Validation')
    ax3.set_xlabel('Epoch')
    ax3.set_ylabel('Spearman ρ')
    ax3.set_title('Training and Validation Spearman Correlation')
    ax3.legend(loc='lower right')
    ax3.grid(True, alpha=0.3)
    ax3.set_ylim(0, 1)
    
    # 4. 学习率曲线
    ax4.plot(range(1, len(lr_list)+1), lr_list, 'orange', linewidth=2)
    ax4.set_xlabel('Epoch')
    ax4.set_ylabel('Learning Rate')
    ax4.set_title('Learning Rate Schedule')
    ax4.grid(True, alpha=0.3)
    ax4.set_yscale('log')
    
    plt.tight_layout()
    plt.savefig(output_dir / 'training_curves.png', dpi=150, bbox_inches='tight')
    plt.close()

def save_final_results(output_dir, train_losses, val_losses, train_maes, val_maes,
                      test_loss, test_r, test_slope, test_intercept, test_preds, test_targets, experiment_config, test_p=None):
    """保存最终结果和图表"""
    
    # 保存预测结果
    df_test = pd.DataFrame({'pred': test_preds, 'true': test_targets, 'split': 'test'})
    df_test.to_csv(output_dir / 'test_predictions.csv', index=False)
    
    # 生成单独的测试集散点图
    test_mse = mean_squared_error(test_targets, test_preds)
    plt.figure(figsize=(12, 10))
    plt.scatter(test_targets, test_preds, alpha=0.01, s=0.1, c='green')
    plt.plot([0, 13], [0, 13], 'r--', linewidth=2, label='y=x')
    
    if not np.isnan(test_slope):
        x_range = np.linspace(0, 13, 100)
        y_reg = test_slope * x_range + test_intercept
        plt.plot(x_range, y_reg, 'g-', linewidth=2, label=f'y={test_slope:.3f}x+{test_intercept:.3f}')
    
    plt.xlabel('True Expression (log1p)', fontsize=14)
    plt.ylabel('Predicted Expression (log1p)', fontsize=14)
    plt.title('Test Set - Final Evaluation', fontsize=16, fontweight='bold')
    plt.xlim(0, 13)
    plt.ylim(0, 13)
    plt.legend(loc='lower right', fontsize=12)
    plt.grid(True, alpha=0.3)
    
    # 添加统计信息
    p_value_text = f'P-value = {test_p:.2e}' if test_p is not None else 'P-value = N/A'
    test_stats_text = f'Test Results:\nPearson r = {test_r:.4f}\n{p_value_text}\nSlope = {test_slope:.4f}\nIntercept = {test_intercept:.4f}\nMSE = {test_mse:.4f}\nN = {len(test_targets):,}'
    plt.text(0.02, 0.98, test_stats_text, transform=plt.gca().transAxes, 
            fontsize=12, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.9))
    
    plt.tight_layout()
    plt.savefig(output_dir / 'test_set_scatter.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 生成最终结果综合分析图
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16, 12))
    
    # 1. 测试集散点图（大图，更清晰）
    ax1.scatter(test_targets, test_preds, alpha=0.01, s=0.1, c='green')
    ax1.plot([0, 13], [0, 13], 'r--', linewidth=2, label='y=x')
    
    if not np.isnan(test_slope):
        x_range = np.linspace(0, 13, 100)
        y_reg = test_slope * x_range + test_intercept
        ax1.plot(x_range, y_reg, 'g-', linewidth=2, label=f'y={test_slope:.3f}x+{test_intercept:.3f}')
    
    ax1.set_xlabel('True Expression (log1p)', fontsize=12)
    ax1.set_ylabel('Predicted Expression (log1p)', fontsize=12)
    ax1.set_title('Test Set Performance', fontsize=14, fontweight='bold')
    ax1.set_xlim(0, 13)
    ax1.set_ylim(0, 13)
    ax1.legend(fontsize=11)
    ax1.grid(True, alpha=0.3)
    
    # 添加统计信息
    p_value_text = f'P-value = {test_p:.2e}' if test_p is not None else 'P-value = N/A'
    test_stats_text = f'Test Results:\nPearson r = {test_r:.4f}\n{p_value_text}\nSlope = {test_slope:.4f}\nIntercept = {test_intercept:.4f}\nMSE = {test_mse:.4f}\nN = {len(test_targets):,}'
    ax1.text(0.02, 0.98, test_stats_text, transform=ax1.transAxes, 
            fontsize=11, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.9))
    
    # 2. 标签分布对比
    ax2.hist(test_targets, bins=50, alpha=0.7, color='blue', label='True Values', density=True)
    ax2.hist(test_preds, bins=50, alpha=0.7, color='red', label='Predictions', density=True)
    ax2.set_xlabel('Expression (log1p)', fontsize=12)
    ax2.set_ylabel('Density', fontsize=12)
    ax2.set_title('Distribution Comparison', fontsize=14, fontweight='bold')
    ax2.legend(fontsize=11)
    ax2.grid(True, alpha=0.3)
    
    # 3. 残差分析
    residuals = np.array(test_preds) - np.array(test_targets)
    ax3.scatter(test_targets, residuals, alpha=0.01, s=0.1, c='purple')
    ax3.axhline(y=0, color='r', linestyle='--', linewidth=2)
    ax3.set_xlabel('True Expression (log1p)', fontsize=12)
    ax3.set_ylabel('Residuals (Pred - True)', fontsize=12)
    ax3.set_title('Residual Analysis', fontsize=14, fontweight='bold')
    ax3.grid(True, alpha=0.3)
    
    # 添加残差统计
    residual_stats = f'Residual Stats:\nMean = {np.mean(residuals):.4f}\nStd = {np.std(residuals):.4f}\nMAE = {np.mean(np.abs(residuals)):.4f}'
    ax3.text(0.02, 0.98, residual_stats, transform=ax3.transAxes, 
            fontsize=11, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightcoral', alpha=0.9))
    
    # 4. 性能指标总结
    ax4.axis('off')
    
    # 计算最佳性能
    best_val_epoch = np.argmin([m for m in val_maes if not np.isnan(m)]) + 1
    best_val_mae = min([m for m in val_maes if not np.isnan(m)])
    best_val_loss = val_losses[np.argmin([m for m in val_maes if not np.isnan(m)])]
    
    # 创建性能总结表格
    performance_data = [
        ['Metric', 'Train', 'Validation', 'Test'],
        ['Final Loss', f'{train_losses[-1]:.4f}', f'{val_losses[-1]:.4f}', f'{test_loss:.4f}'],
        ['Best MAE', f'{min([m for m in train_maes if not np.isnan(m)]):.4f}', f'{best_val_mae:.4f}', f'{test_mae:.4f}'],
        ['Best Epoch', '-', f'{best_val_epoch}', '-'],
        ['Final Slope', f'{train_slopes[-1]:.4f}', f'{val_slopes[-1]:.4f}', f'{test_slope:.4f}'],
        ['MSE', '-', '-', f'{test_mse:.4f}'],
        ['Sample Count', '-', '-', f'{len(test_targets):,}']
    ]
    
    table = ax4.table(cellText=performance_data[1:], colLabels=performance_data[0],
                     cellLoc='center', loc='center', colWidths=[0.25, 0.25, 0.25, 0.25])
    table.auto_set_font_size(False)
    table.set_fontsize(11)
    table.scale(1, 2)
    
    # 设置表格样式
    for i in range(len(performance_data)):
        for j in range(len(performance_data[0])):
            if i == 0:  # 表头
                table[(i, j)].set_facecolor('#4CAF50')
                table[(i, j)].set_text_props(weight='bold', color='white')
            else:
                table[(i, j)].set_facecolor('#f0f0f0' if i % 2 == 0 else 'white')
    
    ax4.set_title('Performance Summary', fontsize=14, fontweight='bold', pad=20)
    
    plt.tight_layout()
    plt.savefig(output_dir / 'final_results.png', dpi=150, bbox_inches='tight')
    plt.close()
    
    # 生成总结报告
    summary_path = output_dir / 'training_summary.md'
    with open(summary_path, 'w', encoding='utf-8') as f:
        f.write(f"# 训练总结报告\n\n")
        f.write(f"## 实验信息\n")
        f.write(f"- **实验名称**: {experiment_config['name']}\n")
        f.write(f"- **实验描述**: {experiment_config['description']}\n")
        f.write(f"- **输入文件**: {', '.join(experiment_config['input_files'])}\n")
        f.write(f"- **完成时间**: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"- **输出目录**: {output_dir}\n\n")
        
        f.write(f"## 最终结果\n")
        f.write(f"- **测试集损失**: {test_loss:.6f}\n")
        f.write(f"- **测试集Pearson r**: {test_r:.6f}\n")
        f.write(f"- **测试集斜率**: {test_slope:.6f}\n")
        f.write(f"- **测试集截距**: {test_intercept:.6f}\n")
        f.write(f"- **测试集MSE**: {test_mse:.6f}\n")
        f.write(f"- **测试集样本数**: {len(test_targets):,}\n\n")
        
        f.write(f"## 训练过程\n")
        f.write(f"- **训练轮数**: {len(train_losses)}\n")
        f.write(f"- **最佳验证损失**: {min(val_losses):.6f}\n")
        f.write(f"- **最佳验证MAE**: {min([m for m in val_maes if not np.isnan(m)]):.6f}\n")
        f.write(f"- **最佳验证斜率**: {val_slopes[np.argmin([m for m in val_maes if not np.isnan(m)])]:.6f}\n")
        f.write(f"- **最终训练损失**: {train_losses[-1]:.6f}\n")
        f.write(f"- **最终验证损失**: {val_losses[-1]:.6f}\n")
        f.write(f"- **最终验证MAE**: {val_maes[-1]:.6f}\n")
        f.write(f"- **最终验证斜率**: {val_slopes[-1]:.6f}\n\n")
        
        f.write(f"## 训练曲线数据\n")
        f.write(f"### 损失曲线\n")
        for i, (train_loss, val_loss) in enumerate(zip(train_losses, val_losses)):
            f.write(f"- Epoch {i+1}: Train={train_loss:.6f}, Val={val_loss:.6f}\n")
        
        f.write(f"\n### MAE曲线\n")
        for i, (train_mae, val_mae) in enumerate(zip(train_maes, val_maes)):
            f.write(f"- Epoch {i+1}: Train={train_mae:.6f}, Val={val_mae:.6f}\n")
        
        f.write(f"\n### 回归斜率曲线\n")
        for i, (train_slope, val_slope) in enumerate(zip(train_slopes, val_slopes)):
            f.write(f"- Epoch {i+1}: Train={train_slope:.6f}, Val={val_slope:.6f}\n")
        
        f.write(f"\n## 模型性能分析\n")
        f.write(f"- **过拟合检测**: {'是' if train_losses[-1] < val_losses[-1] * 0.8 else '否'}\n")
        f.write(f"- **收敛状态**: {'已收敛' if abs(train_losses[-1] - train_losses[-2]) < 0.001 else '未完全收敛'}\n")
        f.write(f"- **最佳性能**: 第{np.argmin([m for m in val_maes if not np.isnan(m)])+1}轮\n")
        
        f.write(f"\n## 生成的文件\n")
        f.write(f"- `training_summary.md`: 本总结报告\n")
        f.write(f"- `final_results.png`: 最终结果图表\n")
        f.write(f"- `test_set_scatter.png`: 测试集散点图\n")
        f.write(f"- `test_predictions.csv`: 测试集预测结果\n")
        f.write(f"- `best_model.pth`: 最佳模型权重\n")
        f.write(f"- `training_curves.png`: 训练曲线图\n")
        f.write(f"- `scatter_epoch*.png`: 散点图（每5轮）\n")
        f.write(f"- `train.log`: 训练日志\n")

@hydra.main(version_base=None, config_path="get_model/config", config_name="yeast_training")
def main(config: DictConfig):
    """主函数：运行所有实验"""
    
    logger = logging.getLogger(__name__)
    
    logger.info("开始酵母基因表达预测模型训练实验")
    logger.info(f"配置路径: {config}")
    
    # 检查输入文件是否存在
    missing_files = []
    for exp_name, exp_config in EXPERIMENTS.items():
        for file_key in exp_config['input_files']:
            file_path = INPUT_FILES[file_key]
            if not os.path.exists(file_path):
                missing_files.append(f"{file_key}: {file_path}")
    
    if missing_files:
        logger.error("以下输入文件不存在:")
        for file_info in missing_files:
            logger.error(f"  {file_info}")
        raise FileNotFoundError("输入文件缺失")
    
    # 创建输出基础目录
    Path(OUTPUT_BASE_DIR).mkdir(parents=True, exist_ok=True)
    
    # 检查是否指定了特定实验
    import sys
    if len(sys.argv) > 1 and sys.argv[1] in EXPERIMENTS:
        selected_experiment = sys.argv[1]
        logger.info(f"运行指定实验: {selected_experiment}")
        experiments_to_run = {selected_experiment: EXPERIMENTS[selected_experiment]}
    else:
        logger.info("运行所有实验")
        experiments_to_run = EXPERIMENTS
    
    # 运行实验
    results = {}
    for exp_name, exp_config in experiments_to_run.items():
        logger.info(f"\n{'='*60}")
        logger.info(f"开始实验: {exp_name}")
        logger.info(f"{'='*60}")
        
        try:
            best_loss = train_experiment(exp_name, exp_config, config)
            results[exp_name] = {
                'status': 'success',
                'best_loss': best_loss
            }
            logger.info(f"实验 {exp_name} 完成，最佳损失: {best_loss:.6f}")
        except Exception as e:
            logger.error(f"实验 {exp_name} 失败: {str(e)}")
            results[exp_name] = {
                'status': 'failed',
                'error': str(e)
            }
    
            # 如果是因为中断导致的失败，不要继续下一个实验
            if interrupted:
                logger.info("检测到中断信号，停止后续实验")
                break
    
    logger.info("\n所有实验完成！")
    return results

if __name__ == "__main__":
    main()
     