"""
酵母基因表达预测模型训练脚本 - 单Peak版本 (Single Peak Version)

主要特性:
1. 单Peak训练: 每次训练处理单个peak，不考虑peak之间的相关性
2. 独立peak处理: 每个peak独立预测表达量
3. 简单高效: 训练速度快，适合大规模数据集
4. 完整的训练流程: 包含训练、验证、测试的完整流程

单Peak训练优势:
- 训练速度快，内存占用小
- 每个peak独立训练，简单直接
- 适合数据量大的情况
- 易于并行化处理

使用方法:
- 在config.py中设置训练参数
- 运行脚本进行训练
- 训练完成后使用测试脚本进行评估
"""

import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader, random_split
from get_model.model.yeast_model import YeastModel
import logging
from pathlib import Path
import os
import torch.optim as optim
import signal
import sys
import atexit
import time
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr, linregress, spearmanr
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from tqdm import tqdm
import warnings
import hydra
from omegaconf import DictConfig, OmegaConf
from torch.utils.tensorboard import SummaryWriter

warnings.filterwarnings("ignore")

# 全局变量用于中断处理
interrupted = False
training_completed = False  # 标记训练是否正常完成
current_model = None
current_test_loader = None
current_device = None
current_logger = None
current_output_dir = None

def signal_handler(signum, frame):
    """处理中断信号"""
    global interrupted, current_logger
    interrupted = True
    if current_logger:
        current_logger.warning(f"收到中断信号 {signum}，准备保存模型并执行测试...")
    else:
        print(f"收到中断信号 {signum}，准备保存模型并执行测试...")

def cleanup_and_test():
    """清理并执行测试"""
    global current_model, current_test_loader, current_device, current_logger, current_output_dir, training_completed
    
    # 如果训练已经正常完成，不执行中断测试
    if training_completed:
        if current_logger:
            current_logger.info("训练已正常完成，跳过中断测试")
        return
    
    if current_model is not None and current_test_loader is not None and current_output_dir is not None:
        try:
            if current_logger:
                current_logger.info("开始执行中断后的测试...")
            
            # 确保模型在评估模式
            current_model.eval()
            
            # 测试集评估
            test_loss, test_mae, test_spearman, test_r2, test_preds, test_targets, test_p = evaluate_model(
                current_model, current_test_loader, current_device, current_logger, "test"
            )
            
            # 保存预测结果
            df_test = pd.DataFrame({'pred': test_preds, 'true': test_targets, 'split': 'test'})
            df_test.to_csv(current_output_dir / 'interrupted_test_predictions.csv', index=False)
            
            # 生成测试集散点图
            test_mse = mean_squared_error(test_targets, test_preds)
            plt.figure(figsize=(10, 8))
            plt.scatter(test_targets, test_preds, alpha=0.01, s=0.1, c='green')
            plt.plot([0, 1], [0, 1], 'r--', linewidth=2, label='y=x')
            
            plt.xlabel('True expression (0-1 normalized)')
            plt.ylabel('Predicted expression (0-1 normalized)')
            plt.title('Test set - interrupted training evaluation (single-peak)')
            plt.xlim(0, 1)
            plt.ylim(0, 1)
            plt.legend(loc='lower right')
            plt.grid(True, alpha=0.3)
            
            # 增加Pearson r为首要展示
            from scipy.stats import pearsonr as _pearsonr
            _pr, _ = _pearsonr(test_targets, test_preds)
            test_stats_text = f'Test Set (Single Peak):\nPearson r = {_pr:.4f}\nSpearman ρ = {test_spearman:.4f}\nR² = {test_r2:.4f}\nMAE = {test_mae:.4f}\nMSE = {test_mse:.4f}\nN = {len(test_targets):,}'
            plt.text(0.02, 0.98, test_stats_text, transform=plt.gca().transAxes, 
                    fontsize=12, verticalalignment='top', 
                    bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.8))
            
            plt.tight_layout()
            plt.savefig(current_output_dir / 'interrupted_test_evaluation.png', dpi=150, bbox_inches='tight')
            plt.close()
            
            # 生成中断测试报告
            report_path = current_output_dir / 'interrupted_test_report.md'
            with open(report_path, 'w', encoding='utf-8') as f:
                f.write(f"# 中断训练测试报告 (单Peak版本)\n\n")
                f.write(f"**测试时间**: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"**测试原因**: 训练被中断\n")
                f.write(f"**训练方法**: 单Peak训练\n\n")
                f.write(f"## 测试集结果\n")
                # 这里补充Pearson r为首要指标
                from scipy.stats import pearsonr as __pearsonr
                __pr, __ = __pearsonr(test_targets, test_preds)
                f.write(f"- **测试集Pearson r**: {__pr:.6f}\n")
                f.write(f"- **测试集Spearman ρ**: {test_spearman:.6f}\n")
                f.write(f"- **测试集R²**: {test_r2:.6f}\n")
                f.write(f"- **测试集MAE**: {test_mae:.6f}\n")
                f.write(f"- **测试集MSE**: {test_mse:.6f}\n")
                f.write(f"- **测试集样本数**: {len(test_targets):,}\n\n")
            
            if current_logger:
                current_logger.info(f"中断测试完成！结果已保存到: {current_output_dir}")
                current_logger.info(f"测试集结果: 损失={test_loss:.6f}, MAE={test_mae:.6f}, Spearman ρ={test_spearman:.6f}, 样本数={len(test_targets):,}")
            else:
                print(f"中断测试完成！测试集结果: 损失={test_loss:.6f}, MAE={test_mae:.6f}, Spearman ρ={test_spearman:.6f}, 样本数={len(test_targets):,}")
                
        except Exception as e:
            if current_logger:
                current_logger.error(f"中断测试失败: {e}")
            else:
                print(f"中断测试失败: {e}")

# 注册信号处理器
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
atexit.register(cleanup_and_test)

# ========== 配置将从YAML文件加载 ==========
# 所有配置参数现在都从 get_model/config/yeast_training.yaml 加载
# 包括数据路径、实验配置、训练参数等

# ========== 单Peak数据集类 ==========
class YeastPeakSingleDataset(Dataset):
    """
    单Peak数据集，每次返回一个独立的peak
    每个peak独立训练，不考虑peak之间的相关性
    """
    def __init__(self, data_path: str):
        # 加载.npz文件
        npz_file = np.load(data_path, mmap_mode='r')
        self.data = npz_file['data']  # 访问'data'键
        
        logging.info(f"加载训练数据: {data_path}")
        logging.info(f"训练模式: 单Peak训练（每个peak独立处理）")
        
        num_samples, num_peaks, num_features = self.data.shape
        self.motif_dim = 282
        self.accessibility_dim = 1
        self.condition_dim = 74
        
        # 验证数据维度
        expected_features = self.motif_dim + self.accessibility_dim + self.condition_dim + 1  # +1 for label
        if num_features != expected_features:
            logging.warning(f"数据特征维度不匹配！期望={expected_features} (282 motif + 1 accessibility + 74 condition + 1 label), "
                          f"实际={num_features}")
        logging.info(f"特征维度: motif={self.motif_dim}, accessibility={self.accessibility_dim}, "
                    f"condition={self.condition_dim}, 总计={self.motif_dim + self.accessibility_dim + self.condition_dim} (不含标签)")
        
        # 生成所有有效的peak索引（标签不是NaN或inf）
        self.valid_indices = []
        for sample_idx in range(num_samples):
            for peak_idx in range(num_peaks):
                label = self.data[sample_idx, peak_idx, -1]
                if not (np.isnan(label) or np.isinf(label)):
                    self.valid_indices.append((sample_idx, peak_idx))
        
        self.total = len(self.valid_indices)
        
        logging.info(f"训练数据集: 样本={num_samples}, peaks/样本={num_peaks}, "
                    f"特征数={num_features}, 有效peaks总数={self.total:,}")

    def __len__(self):
        return self.total

    def __getitem__(self, idx: int):
        # 获取peak的样本索引和peak索引
        sample_idx, peak_idx = self.valid_indices[idx]
        
        # 获取单个peak的数据
        peak_data = self.data[sample_idx, peak_idx]
        
        # 提取特征
        motif_features = peak_data[:self.motif_dim]
        accessibility_features = peak_data[self.motif_dim:self.motif_dim+self.accessibility_dim]
        condition_features = peak_data[self.motif_dim+self.accessibility_dim:self.motif_dim+self.accessibility_dim+self.condition_dim]
        
        # 拼接所有特征
        all_features = np.concatenate([motif_features, accessibility_features, condition_features])
        
        # 标签是最后一维
        label = peak_data[-1]
        
        # 转换为张量
        features_tensor = torch.tensor(all_features, dtype=torch.float32).unsqueeze(0)  # [1, total_features]
        label_tensor = torch.tensor(label, dtype=torch.float32).unsqueeze(0)  # [1]
        
        return {
            'motif_features': features_tensor,  # [1, total_features] - 保持与模型输入兼容
            'labels': label_tensor,              # [1]
            'sample_idx': sample_idx,
            'peak_idx': peak_idx
        }

class MultiSinglePeakDataset(Dataset):
    """多数据集合并类 - 单Peak版本"""
    
    def __init__(self, data_paths: list):
        self.datasets = []
        self.dataset_sizes = []
        self.total_size = 0
        
        for path in data_paths:
            dataset = YeastPeakSingleDataset(path)
            self.datasets.append(dataset)
            self.dataset_sizes.append(len(dataset))
            self.total_size += len(dataset)
            
        logging.info(f"多数据集单Peak加载完成，总Peak数: {self.total_size:,}")
        for i, (path, size) in enumerate(zip(data_paths, self.dataset_sizes)):
            logging.info(f"  数据集 {i+1}: {os.path.basename(path)} - {size:,} 个有效peaks")

    def __len__(self):
        return self.total_size

    def __getitem__(self, idx: int):
        # 确定属于哪个数据集
        dataset_idx = 0
        cumulative_size = 0
        for i, size in enumerate(self.dataset_sizes):
            if idx < cumulative_size + size:
                dataset_idx = i
                break
            cumulative_size += size
        
        # 在对应数据集中获取样本
        local_idx = idx - cumulative_size
        return self.datasets[dataset_idx][local_idx]

def create_data_loaders(dataset, config, logger):
    """创建训练集、验证集和测试集的DataLoader - 单Peak版本"""
    
    # 计算划分大小 (70% 训练, 15% 验证, 15% 测试)
    total_size = len(dataset)
    train_size = int(0.7 * total_size)
    val_size = int(0.15 * total_size)
    test_size = total_size - train_size - val_size
    
    logger.info(f"数据集划分: 总Peak数={total_size:,}, 训练集={train_size:,} (70%), "
                f"验证集={val_size:,} (15%), 测试集={test_size:,} (15%)")
    
    # 设置随机种子确保可重复性
    torch.manual_seed(config.experiment.seed)
    
    # 划分数据集
    train_dataset, val_dataset, test_dataset = random_split(
        dataset, 
        [train_size, val_size, test_size],
        generator=torch.Generator().manual_seed(config.experiment.seed)
    )
    
    # 创建DataLoader
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.training.batch_size,
        shuffle=True,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config.training.batch_size,
        shuffle=False,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=config.training.batch_size,
        shuffle=False,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    return train_loader, val_loader, test_loader

def evaluate_model(model, data_loader, device, logger, split_name="validation"):
    """评估模型性能 - 单Peak版本；按重要性排序以 Pearson r 为Top1，同时输出MAE、R²等"""
    model.eval()
    total_loss = 0
    all_preds = []
    all_targets = []
    
    with torch.no_grad():
        for batch_x in tqdm(data_loader, desc=f'评估 {split_name}'):
            # 单Peak模式的数据结构
            batch_labels = batch_x['labels'].to(device)  # [batch_size, 1]
            features_for_model = {'motif_features': batch_x['motif_features'].to(device)}  # [batch_size, 1, features]
            
            # 前向传播
            outputs = model(features_for_model)  # [batch_size, 1, 1]
            
            # 计算损失
            loss = model.compute_loss(outputs, batch_labels)
            total_loss += loss.item()
            
            # 收集预测和真实值
            preds = outputs.squeeze(-1).detach().cpu().numpy()  # [batch_size, 1]
            targets = batch_labels.detach().cpu().numpy()  # [batch_size, 1]
            
            all_preds.extend(preds.flatten())
            all_targets.extend(targets.flatten())
    
    # 计算平均损失
    avg_loss = total_loss / len(data_loader) if len(data_loader) > 0 else 0
    
    # 计算统计指标
    all_preds_np = np.array(all_preds)
    all_targets_np = np.array(all_targets)
    
    try:
        # ===== 主要指标 (按重要性排序) =====
        
        # 1. MAE - 平均绝对误差 (主要指标)
        mae = mean_absolute_error(all_targets_np, all_preds_np)
        
        # 2. Spearman相关系数 - 排序相关性
        spearman_rho, spearman_p = spearmanr(all_targets_np, all_preds_np)
        
        # 3. R² - 决定系数 (预测能力)
        r2 = r2_score(all_targets_np, all_preds_np)
        
        # 4. Pearson相关系数 - 线性相关性
        pearson_r, pearson_p = pearsonr(all_targets_np, all_preds_np)
        
        # 5. Kendall's Tau - 排序一致性
        from scipy.stats import kendalltau
        kendall_tau, kendall_p = kendalltau(all_targets_np, all_preds_np)
        
        # 6. 回归分析 - 斜率和截距
        reg = linregress(all_targets_np, all_preds_np)
        slope = reg.slope
        intercept = reg.intercept
        
        # 7. MSE - 均方误差
        mse = mean_squared_error(all_targets_np, all_preds_np)
        
        # 8. MAPE - 平均绝对百分比误差
        mape = np.mean(np.abs((all_targets_np - all_preds_np) / (all_targets_np + 1e-8))) * 100
        
        # 9. 额外有用的指标
        # RMSE - 均方根误差
        rmse = np.sqrt(mse)
        
        # 中位数绝对误差
        median_ae = np.median(np.abs(all_targets_np - all_preds_np))
        
        # 预测值范围
        pred_range = all_preds_np.max() - all_preds_np.min()
        true_range = all_targets_np.max() - all_targets_np.min()
        
        # 预测值分布统计
        pred_mean = all_preds_np.mean()
        pred_std = all_preds_np.std()
        true_mean = all_targets_np.mean()
        true_std = all_targets_np.std()
        
        # 误差分布统计
        errors = all_preds_np - all_targets_np
        error_mean = errors.mean()
        error_std = errors.std()
        error_median = np.median(errors)
        
        # 分位数误差 (关注不同表达水平)
        q25_error = np.percentile(np.abs(errors), 25)
        q75_error = np.percentile(np.abs(errors), 75)
        
        # 高表达区域误差 (表达值 > 中位数)
        high_expr_mask = all_targets_np > np.median(all_targets_np)
        high_expr_mae = mean_absolute_error(all_targets_np[high_expr_mask], all_preds_np[high_expr_mask]) if np.sum(high_expr_mask) > 0 else float('nan')
        
        # 低表达区域误差 (表达值 <= 中位数)
        low_expr_mask = all_targets_np <= np.median(all_targets_np)
        low_expr_mae = mean_absolute_error(all_targets_np[low_expr_mask], all_preds_np[low_expr_mask]) if np.sum(low_expr_mask) > 0 else float('nan')
        
    except Exception as e:
        # 如果计算失败，设置默认值
        mae = spearman_rho = r2 = pearson_r = kendall_tau = float('nan')
        slope = intercept = mse = mape = rmse = median_ae = float('nan')
        pred_range = true_range = pred_mean = pred_std = true_mean = true_std = float('nan')
        error_mean = error_std = error_median = q25_error = q75_error = float('nan')
        high_expr_mae = low_expr_mae = float('nan')
        logger.warning(f"{split_name}集指标计算失败: {e}")
    
    # 按重要性排序输出评估结果 - 基因预测任务专用
    logger.info(f"{split_name}集评估结果 (基因预测任务，按重要性排序):")
    logger.info(f"  {'='*70}")
    
    # 1. 基因预测核心指标 (Top1-5)
    logger.info(f"  🧬 基因预测核心指标:")
    logger.info(f"    1. Pearson r (线性相关性) = {pearson_r:.6f} (p = {pearson_p:.2e}) ← 最重要")
    logger.info(f"    2. MAE (平均绝对误差) = {mae:.6f} ← 预测准确性")
    logger.info(f"    3. R² (决定系数) = {r2:.6f} ← 解释方差")
    logger.info(f"    4. Spearman ρ (排序相关性) = {spearman_rho:.6f} (p = {spearman_p:.2e}) ← 排序相关性")
    logger.info(f"    5. Kendall τ (排序一致性) = {kendall_tau:.6f} (p = {kendall_p:.2e}) ← 排序稳定性")
    
    # 2. 回归分析 (基因表达预测关键)
    logger.info(f"  📈 回归分析 (基因表达预测):")
    logger.info(f"    6. 回归斜率 = {slope:.6f} ← 表达水平缩放")
    logger.info(f"    7. 回归截距 = {intercept:.6f} ← 基础表达水平")
    logger.info(f"    8. 回归R² = {slope**2:.6f} ← 线性拟合度")
    
    # 3. 误差分析 (正态分布数据)
    logger.info(f"  ⚠️  误差分析 (正态分布数据):")
    logger.info(f"    9. RMSE (均方根误差) = {rmse:.6f} ← 大误差惩罚")
    logger.info(f"    10. 中位数绝对误差 = {median_ae:.6f} ← 中值附近误差")
    logger.info(f"    11. MAPE (平均绝对百分比误差) = {mape:.2f}% ← 相对误差")
    logger.info(f"    12. MSE (均方误差) = {mse:.6f} ← 平方误差")
    
    # 4. 分布统计 (正态分布特征)
    logger.info(f"  📊 分布统计 (正态分布特征):")
    logger.info(f"    真实值: 均值={true_mean:.4f}, 标准差={true_std:.4f}, 中位数={np.median(all_targets_np):.4f}")
    logger.info(f"    预测值: 均值={pred_mean:.4f}, 标准差={pred_std:.4f}, 中位数={np.median(all_preds_np):.4f}")
    logger.info(f"    范围: 真实值=[{all_targets_np.min():.4f}, {all_targets_np.max():.4f}], 预测值=[{all_preds_np.min():.4f}, {all_preds_np.max():.4f}]")
    
    # 5. 误差分布 (正态分布数据)
    logger.info(f"  🔍 误差分布 (正态分布数据):")
    logger.info(f"    误差均值={error_mean:.6f}, 误差标准差={error_std:.6f}, 误差中位数={error_median:.6f}")
    logger.info(f"    25%分位数误差={q25_error:.6f}, 75%分位数误差={q75_error:.6f}")
    logger.info(f"    误差范围=[{errors.min():.6f}, {errors.max():.6f}]")
    
    # 6. 表达水平分层分析 (基因表达特征)
    logger.info(f"  🎯 表达水平分层分析 (基因表达特征):")
    logger.info(f"    高表达区域MAE = {high_expr_mae:.6f} (表达值 > {np.median(all_targets_np):.4f})")
    logger.info(f"    低表达区域MAE = {low_expr_mae:.6f} (表达值 ≤ {np.median(all_targets_np):.4f})")
    
    # 7. 样本信息
    logger.info(f"  📊 样本信息:")
    logger.info(f"    有效peak数: {len(all_targets):,}")
    logger.info(f"    损失: {avg_loss:.6f}")
    logger.info(f"  {'='*70}")
    
    # 返回主要指标 (MAE作为主要指标)
    return avg_loss, mae, slope, intercept, all_preds, all_targets, spearman_p

def train_experiment(experiment_name: str, experiment_config: dict, config: DictConfig):
    """训练单个实验 - 单Peak版本"""
    
    global current_model, current_test_loader, current_device, current_logger, current_output_dir
    
    # 创建输出目录
    timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    output_dir = Path(config.data.output_base_dir) / f"{experiment_config.output_dir}_{timestamp}"
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 设置全局变量
    current_output_dir = output_dir
    
    # 设置日志
    # 清除之前的handlers，避免重复
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(message)s",
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(output_dir / 'train_single_peak.log', mode='w', encoding='utf-8')
        ],
        force=True  # 强制重新配置
    )
    logger = logging.getLogger(__name__)
    current_logger = logger
    
    logger.info(f"开始实验: {experiment_config.name}")
    logger.info(f"实验描述: {experiment_config.description}")
    logger.info(f"输出目录: {output_dir}")
    logger.info(f"训练方法: 单Peak方法 (每个peak独立训练)")
    
    # 打印配置信息用于调试
    logger.info(f"=== 配置信息 ===")
    logger.info(f"max_epochs: {config.training.max_epochs}")
    logger.info(f"batch_size: {config.training.batch_size}")
    logger.info(f"learning_rate: {config.training.learning_rate}")
    logger.info(f"weight_decay: {config.training.weight_decay}")
    logger.info(f"clip_grad: {config.training.clip_grad}")
    logger.info(f"early_stopping_patience: {config.training.early_stopping_patience}")
    logger.info(f"early_stopping_min_delta: {config.training.early_stopping_min_delta}")
    logger.info(f"=================")
    
    # 设置随机种子
    torch.manual_seed(config.experiment.seed)
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    current_device = device
    
    # 准备数据集 - 单Peak版本
    if len(experiment_config.input_files) == 1:
        # 单数据集
        data_path = config.data.input_files[experiment_config.input_files[0]]
        logger.info(f"使用单数据集: {data_path}")
        dataset = YeastPeakSingleDataset(data_path)
    else:
        # 多数据集
        data_paths = [config.data.input_files[f] for f in experiment_config.input_files]
        logger.info(f"使用多数据集: {len(data_paths)} 个文件")
        for i, path in enumerate(data_paths):
            logger.info(f"  数据集 {i+1}: {path}")
        dataset = MultiSinglePeakDataset(data_paths)
    
    # 创建数据加载器
    train_loader, val_loader, test_loader = create_data_loaders(dataset, config, logger)
    current_test_loader = test_loader
    
    # 创建模型
    model = YeastModel(config.model.model)
    model = model.to(device)
    current_model = model
    
    # 创建优化器
    optimizer = optim.AdamW(
        model.parameters(),
        lr=config.training.learning_rate,
        weight_decay=config.training.weight_decay
    )
    
    # 创建学习率调度器
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer, 
        T_max=config.training.max_epochs,
        eta_min=config.training.learning_rate * 0.01
    )
    
    # 创建TensorBoard writer
    tensorboard_dir = output_dir / 'tensorboard_logs'
    tensorboard_dir.mkdir(exist_ok=True)
    writer = SummaryWriter(tensorboard_dir)
    logger.info(f"TensorBoard日志目录: {tensorboard_dir}")
    
    # 记录模型结构到TensorBoard
    try:
        # 创建一个示例输入来记录模型结构 - 单Peak模式
        # 特征维度: 282 motif + 1 accessibility + 74 condition = 357
        sample_input = torch.randn(1, 1, 357).to(device)  # [batch_size, 1, features]
        sample_features = {'motif_features': sample_input}
        
        # 记录模型计算图
        writer.add_graph(model, sample_features)
        logger.info("模型结构已记录到TensorBoard")
    except Exception as e:
        logger.warning(f"记录模型结构失败: {e}")
    
    # 训练循环 - 基因预测任务优化
    best_val_pearson = -1.0  # Pearson相关系数，越大越好
    best_val_mae = float('inf')
    train_losses = []
    val_losses = []
    train_maes = []
    val_maes = []
    train_pearsons = []
    val_pearsons = []
    val_spearmans = []
    lr_history = []
    patience_counter = 0
    early_stopping_patience = config.training.get('early_stopping_patience', None)
    early_stopping_min_delta = config.training.get('early_stopping_min_delta', 0.001)
    
    logger.info(f"开始训练，共 {config.training.max_epochs} 个epoch")
    if early_stopping_patience:
        logger.info(f"早停耐心值: {early_stopping_patience}, 最小改善阈值: {early_stopping_min_delta}")
        logger.info(f"早停监控指标: Pearson相关系数 (r) - 基因预测任务重要指标")
    
    for epoch in range(config.training.max_epochs):
        if interrupted:
            logger.warning("训练被中断")
            break
            
        # 训练阶段
        model.train()
        epoch_train_loss = 0
        epoch_train_mae = 0
        train_batches = 0
        
        for batch_idx, batch_data in enumerate(tqdm(train_loader, desc=f'Epoch {epoch+1}/{config.training.max_epochs}')):
            if interrupted:
                break
                
            # 获取数据 - 单Peak模式
            batch_labels = batch_data['labels'].to(device)  # [batch_size, 1]
            features_for_model = {'motif_features': batch_data['motif_features'].to(device)}  # [batch_size, 1, features]
            
            # 前向传播
            optimizer.zero_grad()
            outputs = model(features_for_model)  # [batch_size, 1, 1]
            
            # 计算损失
            loss = model.compute_loss(outputs, batch_labels)
            loss.backward()
            
            # 梯度裁剪
            if config.training.clip_grad > 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), config.training.clip_grad)
            
            optimizer.step()
            
            epoch_train_loss += loss.item()
            train_batches += 1
        
        # 计算平均训练损失
        if train_batches > 0:
            avg_train_loss = epoch_train_loss / train_batches
        else:
            avg_train_loss = 0
        
        # 验证阶段
        val_loss, val_mae, val_slope, val_intercept, val_preds, val_targets, val_p = evaluate_model(
            model, val_loader, device, logger, "validation"
        )
        
        # 计算Pearson和Spearman相关系数
        from scipy.stats import spearmanr, pearsonr
        val_pearson, _ = pearsonr(val_targets, val_preds)
        val_spearman, _ = spearmanr(val_targets, val_preds)
        val_r2 = r2_score(val_targets, val_preds)
        
        # 记录到TensorBoard
        writer.add_scalar('Loss/Train', avg_train_loss, epoch)
        writer.add_scalar('Loss/Validation', val_loss, epoch)
        writer.add_scalar('MAE/Train', epoch_train_mae, epoch)
        writer.add_scalar('MAE/Validation', val_mae, epoch)
        current_lr = optimizer.param_groups[0]['lr']
        writer.add_scalar('Learning_Rate', current_lr, epoch)
        lr_history.append(current_lr)
        writer.add_scalar('Pearson/Validation', val_pearson, epoch)  # Pearson相关系数 (r)
        writer.add_scalar('Spearman/Validation', val_spearman, epoch)  # Spearman相关系数
        writer.add_scalar('R2/Validation', val_r2, epoch)  # R²
        
        # 验证散点图输出频率：前10轮每2轮，之后每10轮
        if ((epoch + 1) <= 10 and (epoch + 1) % 2 == 0) or ((epoch + 1) > 10 and (epoch + 1) % 10 == 0):
            fig, ax = plt.subplots(figsize=(10, 8))
            # 正常散点图（避免过密，控制点大小与透明度）
            targets_np = np.array(val_targets)
            preds_np = np.array(val_preds)
            # 可选采样，避免极端大数据量
            max_points = 100000
            if len(targets_np) > max_points:
                idx = np.random.choice(len(targets_np), max_points, replace=False)
                x_plot = targets_np[idx]
                y_plot = preds_np[idx]
            else:
                x_plot = targets_np
                y_plot = preds_np
            # 基于 plot.py 的 basic 样式：小点、半透明、无描边
            ax.scatter(x_plot, y_plot, s=8, alpha=0.25, c='#1f77b4', edgecolors='none')
            # 动态坐标范围（若数据不在[0,1]，自动扩展）
            # 固定坐标范围为 0-6（非归一化数据）
            lo, hi = 0.0, 6.0
            # 理想参考线（统一范围）
            ax.plot([lo, hi], [lo, hi], 'r--', linewidth=2, label='y=x', zorder=10)
            # 回归拟合线（使用evaluate_model返回的斜率与截距）
            try:
                x_line = np.linspace(lo, hi, 200)
                y_line = val_slope * x_line + val_intercept
                ax.plot(x_line, y_line, 'g-', linewidth=2, label=f'y={val_slope:.3f}x+{val_intercept:.3f}')
            except Exception:
                pass

            ax.set_xlabel('True expression', fontsize=12)
            ax.set_ylabel('Predicted expression', fontsize=12)
            ax.set_title(f'Validation predictions - Epoch {epoch+1}\nPearson r={val_pearson:.4f}, Spearman ρ={val_spearman:.4f}, R²={val_r2:.4f}', fontsize=14)
            ax.set_xlim(lo, hi)
            ax.set_ylim(lo, hi)
            ax.legend(loc='lower right', fontsize=10)
            ax.grid(True, alpha=0.3)
            
            # 添加统计信息文本框
            stats_text = f'Epoch {epoch+1}\n' \
                        f'Pearson r = {val_pearson:.4f}\n' \
                        f'Spearman ρ = {val_spearman:.4f}\n' \
                        f'R² = {val_r2:.4f}\n' \
                        f'MAE = {val_mae:.4f}\n' \
                        f'N = {len(val_targets):,}'
            ax.text(0.02, 0.98, stats_text, transform=ax.transAxes,
                   fontsize=10, verticalalignment='top',
                   bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
            
            plt.tight_layout()
            # 写入TensorBoard
            writer.add_figure('Validation/Scatter', fig, epoch)
            writer.flush()
            
            # 同步保存到文件，便于直接查看
            scatter_dir = output_dir / 'validation_plots'
            scatter_dir.mkdir(parents=True, exist_ok=True)
            scatter_path = scatter_dir / f'val_scatter_epoch_{epoch+1:04d}.png'
            fig.savefig(scatter_path, dpi=160, bbox_inches='tight')
            logger.info(f"Saved validation scatter to: {scatter_path}")
            plt.close(fig)
        
        # 更新学习率
        scheduler.step()
        
        # 保存最佳模型和早停逻辑 - 使用Pearson相关系数作为主要指标
        if val_pearson > best_val_pearson + early_stopping_min_delta:
            best_val_pearson = val_pearson
            best_val_mae = val_mae  # 同时记录MAE
            patience_counter = 0
            
            # 确保输出目录存在
            output_dir.mkdir(parents=True, exist_ok=True)
            
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_pearson': val_pearson,
                'val_spearman': val_spearman,
                'val_r2': val_r2,
                'val_mae': val_mae,
                'val_loss': val_loss,
                'config': config
            }, output_dir / 'best_model.pth')
            logger.info(f"✨ Epoch {epoch+1}: 保存最佳模型 (Pearson r: {val_pearson:.6f}, Spearman ρ: {val_spearman:.6f}, R²: {val_r2:.6f}, MAE: {val_mae:.6f})")
        else:
            patience_counter += 1
            if early_stopping_patience and patience_counter >= early_stopping_patience:
                logger.info(f"⛔ 早停触发！连续 {patience_counter} 轮Pearson r无改善，停止训练")
                logger.info(f"📊 最佳验证指标: Pearson r={best_val_pearson:.6f}, MAE={best_val_mae:.6f}")
                break
        
        # 记录历史
        train_losses.append(avg_train_loss)
        val_losses.append(val_loss)
        train_maes.append(epoch_train_mae)
        val_maes.append(val_mae)
        val_pearsons.append(val_pearson)
        val_spearmans.append(val_spearman)
        
        # 增强的epoch日志输出
        logger.info(f"{'='*100}")
        logger.info(f"📈 Epoch {epoch+1}/{config.training.max_epochs} 总结:")
        logger.info(f"  🔹 损失: Train={avg_train_loss:.6f}, Val={val_loss:.6f}")
        logger.info(f"  🔹 MAE: Val={val_mae:.6f}")
        logger.info(f"  🔹 相关性: Pearson r={val_pearson:.6f}, Spearman ρ={val_spearman:.6f}")
        logger.info(f"  🔹 拟合度: R²={val_r2:.6f}, 斜率={val_slope:.6f}, 截距={val_intercept:.6f}")
        logger.info(f"  🔹 学习率: {optimizer.param_groups[0]['lr']:.6f}")
        logger.info(f"  🔹 早停计数: {patience_counter}/{early_stopping_patience or 'N/A'} {'✅ 最佳模型!' if patience_counter == 0 else ''}")
        logger.info(f"{'='*100}")
    
    # 测试阶段
    logger.info("开始测试阶段...")
    test_loss, test_mae, test_slope, test_intercept, test_preds, test_targets, test_p = evaluate_model(
        model, test_loader, device, logger, "test"
    )
    
    # 保存最终结果
    save_final_results(output_dir, train_losses, val_losses, train_maes, val_maes,
                      test_loss, test_mae, test_slope, test_intercept, test_preds, test_targets, 
                      experiment_config, test_p, val_pearsons, val_spearmans, lr_history)
    
    # 关闭TensorBoard writer
    writer.close()
    logger.info(f"TensorBoard日志已保存到: {tensorboard_dir}")
    
    # 标记训练正常完成
    global training_completed
    training_completed = True
    
    logger.info("训练完成！")
    
    return {
        'best_val_mae': best_val_mae,
        'test_mae': test_mae,
        'test_loss': test_loss,
        'output_dir': output_dir
    }

def save_final_results(output_dir, train_losses, val_losses, train_maes, val_maes,
                      test_loss, test_mae, test_slope, test_intercept, test_preds, test_targets, 
                      experiment_config, test_p=None, val_pearsons=None, val_spearmans=None, lr_history=None):
    """保存最终结果"""
    
    # 计算测试集指标
    from scipy.stats import spearmanr, pearsonr
    test_pearson, _ = pearsonr(test_targets, test_preds)
    test_spearman, _ = spearmanr(test_targets, test_preds)
    test_r2 = r2_score(test_targets, test_preds)
    test_mse = mean_squared_error(test_targets, test_preds)
    test_rmse = np.sqrt(test_mse)
    
    # 计算额外统计指标
    test_median_ae = np.median(np.abs(np.array(test_targets) - np.array(test_preds)))
    test_mape = np.mean(np.abs((np.array(test_targets) - np.array(test_preds)) / (np.array(test_targets) + 1e-8))) * 100
    
    # 分层分析
    median_val = np.median(test_targets)
    high_expr_mask = np.array(test_targets) > median_val
    low_expr_mask = np.array(test_targets) <= median_val
    high_expr_mae = mean_absolute_error(np.array(test_targets)[high_expr_mask], np.array(test_preds)[high_expr_mask])
    low_expr_mae = mean_absolute_error(np.array(test_targets)[low_expr_mask], np.array(test_preds)[low_expr_mask])
    high_expr_r, _ = pearsonr(np.array(test_targets)[high_expr_mask], np.array(test_preds)[high_expr_mask])
    low_expr_r, _ = pearsonr(np.array(test_targets)[low_expr_mask], np.array(test_preds)[low_expr_mask])
    
    # 保存预测结果
    df_test = pd.DataFrame({'pred': test_preds, 'true': test_targets, 'split': 'test'})
    df_test.to_csv(output_dir / 'test_predictions.csv', index=False)
    
    # 保存训练历史 - 增加更多指标
    history_data = {
        'epoch': range(1, len(train_losses) + 1),
        'train_loss': train_losses,
        'val_loss': val_losses,
        'train_mae': train_maes,
        'val_mae': val_maes
    }
    if val_pearsons:
        history_data['val_pearson'] = val_pearsons
    if val_spearmans:
        history_data['val_spearman'] = val_spearmans
    if lr_history:
        history_data['lr'] = lr_history
    
    train_history = pd.DataFrame(history_data)
    train_history.to_csv(output_dir / 'training_history.csv', index=False)
    
    # 保存指标汇总 - 增加更多指标
    metrics_summary = pd.DataFrame({
        'metric': ['Pearson r', 'Spearman ρ', 'R²', 'MAE', 'MSE', 'RMSE', 'Median_AE', 'MAPE(%)', 
                   'Slope', 'Intercept', 'N_samples',
                   'High_Expr_MAE', 'Low_Expr_MAE', 'High_Expr_r', 'Low_Expr_r'],
        'value': [test_pearson, test_spearman, test_r2, test_mae, test_mse, test_rmse, test_median_ae, test_mape,
                  test_slope, test_intercept, len(test_targets),
                  high_expr_mae, low_expr_mae, high_expr_r, low_expr_r]
    })
    metrics_summary.to_csv(output_dir / 'test_metrics_summary.csv', index=False)
    
    # 生成测试集散点图 - 使用hexbin增强可见性
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 7))
    
    # 左图：Hexbin密度图
    hb = ax1.hexbin(test_targets, test_preds, gridsize=50, cmap='Blues', alpha=0.9, mincnt=1)
    ax1.plot([0, 1], [0, 1], 'r--', linewidth=2, label='y=x', zorder=10)
    cb1 = plt.colorbar(hb, ax=ax1)
    cb1.set_label('Point density', fontsize=11)
    
    ax1.set_xlabel('True expression (0-1 normalized)', fontsize=12)
    ax1.set_ylabel('Predicted expression (0-1 normalized)', fontsize=12)
    ax1.set_title('Test predictions - density (single-peak training)', fontsize=14, fontweight='bold')
    ax1.set_xlim(0, 1)
    ax1.set_ylim(0, 1)
    ax1.legend(loc='lower right', fontsize=10)
    ax1.grid(True, alpha=0.3)
    
    test_stats_text = f'Test set (single-peak):\n' \
                     f'Pearson r = {test_pearson:.4f}\n' \
                     f'Spearman ρ = {test_spearman:.4f}\n' \
                     f'R² = {test_r2:.4f}\n' \
                     f'MAE = {test_mae:.4f}\n' \
                     f'RMSE = {test_rmse:.4f}\n' \
                     f'Median AE = {test_median_ae:.4f}\n' \
                     f'N = {len(test_targets):,}'
    ax1.text(0.02, 0.98, test_stats_text, transform=ax1.transAxes, 
            fontsize=11, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.85))
    
    # 右图：传统散点图（采样显示，避免过于密集）
    sample_size = min(50000, len(test_targets))
    sample_indices = np.random.choice(len(test_targets), sample_size, replace=False)
    ax2.scatter(np.array(test_targets)[sample_indices], np.array(test_preds)[sample_indices], 
                alpha=0.3, s=2, c='blue', edgecolors='none')
    ax2.plot([0, 1], [0, 1], 'r--', linewidth=2, label='y=x', zorder=10)
    
    ax2.set_xlabel('True expression (0-1 normalized)', fontsize=12)
    ax2.set_ylabel('Predicted expression (0-1 normalized)', fontsize=12)
    ax2.set_title(f'Test predictions - scatter (sample {sample_size:,})', fontsize=14, fontweight='bold')
    ax2.set_xlim(0, 1)
    ax2.set_ylim(0, 1)
    ax2.legend(loc='lower right', fontsize=10)
    ax2.grid(True, alpha=0.3)
    
    # 添加分层分析文本
    stratified_text = f'Stratified analysis:\n' \
                     f'High expr (>{median_val:.3f}):\n' \
                     f'  MAE={high_expr_mae:.4f}, r={high_expr_r:.4f}\n' \
                     f'Low expr (≤{median_val:.3f}):\n' \
                     f'  MAE={low_expr_mae:.4f}, r={low_expr_r:.4f}'
    ax2.text(0.98, 0.02, stratified_text, transform=ax2.transAxes,
            fontsize=10, verticalalignment='bottom', horizontalalignment='right',
            bbox=dict(boxstyle='round', facecolor='lightyellow', alpha=0.85))
    
    plt.tight_layout()
    plt.savefig(output_dir / 'test_evaluation.png', dpi=200, bbox_inches='tight')
    plt.close()
    
    # 生成训练历史图表 - 增强版
    fig = plt.figure(figsize=(18, 12))
    gs = fig.add_gridspec(3, 3, hspace=0.3, wspace=0.3)
    
    # 损失曲线
    ax1 = fig.add_subplot(gs[0, 0])
    ax1.plot(train_losses, label='训练损失', color='blue', linewidth=2)
    ax1.plot(val_losses, label='验证损失', color='red', linewidth=2)
    ax1.set_xlabel('Epoch', fontsize=11)
    ax1.set_ylabel('损失', fontsize=11)
    ax1.set_title('Training vs. validation loss', fontsize=12, fontweight='bold')
    ax1.legend(fontsize=10)
    ax1.grid(True, alpha=0.3)
    
    # MAE曲线
    ax2 = fig.add_subplot(gs[0, 1])
    ax2.plot(train_maes, label='训练MAE', color='blue', linewidth=2)
    ax2.plot(val_maes, label='验证MAE', color='red', linewidth=2)
    ax2.set_xlabel('Epoch', fontsize=11)
    ax2.set_ylabel('MAE', fontsize=11)
    ax2.set_title('MAE over epochs', fontsize=12, fontweight='bold')
    ax2.legend(fontsize=10)
    ax2.grid(True, alpha=0.3)
    
    # Pearson相关系数曲线
    if val_pearsons:
        ax3 = fig.add_subplot(gs[0, 2])
        ax3.plot(val_pearsons, label='验证Pearson r', color='green', linewidth=2)
        if val_spearmans:
            ax3.plot(val_spearmans, label='验证Spearman ρ', color='purple', linewidth=2, linestyle='--')
        ax3.set_xlabel('Epoch', fontsize=11)
        ax3.set_ylabel('相关系数', fontsize=11)
        ax3.set_title('Correlation over epochs', fontsize=12, fontweight='bold')
        ax3.legend(fontsize=10)
        ax3.grid(True, alpha=0.3)
        ax3.axhline(y=0.9, color='orange', linestyle=':', alpha=0.5, label='目标: 0.9')
        ax3.set_ylim([max(0, min(val_pearsons) - 0.05), 1.0])
    
    # 误差分布
    errors = np.array(test_preds) - np.array(test_targets)
    ax4 = fig.add_subplot(gs[1, 0])
    ax4.hist(errors, bins=60, alpha=0.7, color='skyblue', edgecolor='black', density=True)
    ax4.axvline(0, color='red', linestyle='--', linewidth=2, label='零误差')
    ax4.axvline(np.mean(errors), color='orange', linestyle='-', linewidth=2, label=f'均值: {np.mean(errors):.4f}')
    ax4.axvline(np.median(errors), color='green', linestyle='-.', linewidth=2, label=f'中位数: {np.median(errors):.4f}')
    ax4.set_xlabel('预测误差', fontsize=11)
    ax4.set_ylabel('密度', fontsize=11)
    ax4.set_title('Prediction error distribution (normalized)', fontsize=12, fontweight='bold')
    ax4.legend(fontsize=9)
    ax4.grid(True, alpha=0.3)
    
    # 预测密度图（测试集）
    ax5 = fig.add_subplot(gs[1, 1])
    hb = ax5.hexbin(test_targets, test_preds, gridsize=40, cmap='YlOrRd', alpha=0.85, mincnt=1)
    ax5.plot([0, 1], [0, 1], 'b--', linewidth=2, label='y=x')
    cb = plt.colorbar(hb, ax=ax5)
    cb.set_label('Density', fontsize=10)
    ax5.set_xlabel('True expression', fontsize=11)
    ax5.set_ylabel('Predicted expression', fontsize=11)
    ax5.set_title('Prediction density (test set)', fontsize=12, fontweight='bold')
    ax5.set_xlim(0, 1)
    ax5.set_ylim(0, 1)
    ax5.legend(fontsize=9)
    ax5.grid(True, alpha=0.3)
    
    # 残差图
    ax6 = fig.add_subplot(gs[1, 2])
    ax6.scatter(test_targets, errors, alpha=0.3, s=2, c='purple', edgecolors='none')
    ax6.axhline(0, color='red', linestyle='--', linewidth=2)
    ax6.set_xlabel('True expression', fontsize=11)
    ax6.set_ylabel('Residual (pred - true)', fontsize=11)
    ax6.set_title('Residuals', fontsize=12, fontweight='bold')
    ax6.grid(True, alpha=0.3)
    
    # QQ图（检查误差正态性）
    ax7 = fig.add_subplot(gs[2, 0])
    from scipy import stats
    stats.probplot(errors, dist="norm", plot=ax7)
    ax7.set_title('Q-Q plot (residual normality)', fontsize=12, fontweight='bold')
    ax7.grid(True, alpha=0.3)
    
    # 累积误差分布
    ax8 = fig.add_subplot(gs[2, 1])
    sorted_abs_errors = np.sort(np.abs(errors))
    cumulative = np.arange(1, len(sorted_abs_errors) + 1) / len(sorted_abs_errors)
    ax8.plot(sorted_abs_errors, cumulative, linewidth=2, color='darkblue')
    ax8.axhline(0.5, color='red', linestyle='--', alpha=0.5, label='50%分位点')
    ax8.axhline(0.9, color='orange', linestyle='--', alpha=0.5, label='90%分位点')
    ax8.set_xlabel('Absolute error', fontsize=11)
    ax8.set_ylabel('Cumulative probability', fontsize=11)
    ax8.set_title('Cumulative error distribution', fontsize=12, fontweight='bold')
    ax8.legend(fontsize=9)
    ax8.grid(True, alpha=0.3)
    
    # 表达值分布对比
    ax9 = fig.add_subplot(gs[2, 2])
    ax9.hist(test_targets, bins=50, alpha=0.6, color='blue', label='真实值', density=True)
    ax9.hist(test_preds, bins=50, alpha=0.6, color='red', label='预测值', density=True)
    ax9.set_xlabel('Expression', fontsize=11)
    ax9.set_ylabel('Density', fontsize=11)
    ax9.set_title('Distribution: true vs. predicted', fontsize=12, fontweight='bold')
    ax9.legend(fontsize=10)
    ax9.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig(output_dir / 'training_analysis.png', dpi=150, bbox_inches='tight')
    plt.close()
    
    # 生成训练报告 - 增强版
    report_path = output_dir / 'training_report.md'
    with open(report_path, 'w', encoding='utf-8') as f:
        f.write(f"# 🧬 单Peak训练报告（增强版）\n\n")
        f.write(f"**训练时间**: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"**实验名称**: {experiment_config.name}\n")
        f.write(f"**实验描述**: {experiment_config.description}\n")
        f.write(f"**训练方法**: 单Peak方法（每个peak独立训练）\n\n")
        
        f.write(f"---\n\n")
        f.write(f"## 📊 测试集核心指标\n\n")
        f.write(f"### 相关性指标（最重要）\n")
        f.write(f"- **Pearson相关系数 (r)**: {test_pearson:.6f} ⭐ 主要指标\n")
        f.write(f"- **Spearman相关系数 (ρ)**: {test_spearman:.6f}\n")
        f.write(f"- **决定系数 (R²)**: {test_r2:.6f}\n\n")
        
        f.write(f"### 误差指标\n")
        f.write(f"- **平均绝对误差 (MAE)**: {test_mae:.6f}\n")
        f.write(f"- **均方误差 (MSE)**: {test_mse:.6f}\n")
        f.write(f"- **均方根误差 (RMSE)**: {test_rmse:.6f}\n")
        f.write(f"- **中位数绝对误差**: {test_median_ae:.6f}\n")
        f.write(f"- **平均绝对百分比误差 (MAPE)**: {test_mape:.2f}%\n\n")
        
        f.write(f"### 回归分析\n")
        f.write(f"- **回归斜率**: {test_slope:.6f}\n")
        f.write(f"- **回归截距**: {test_intercept:.6f}\n")
        f.write(f"- **测试集损失**: {test_loss:.6f}\n")
        f.write(f"- **样本数**: {len(test_targets):,}\n\n")
        
        f.write(f"---\n\n")
        f.write(f"## 🎯 分层分析\n\n")
        f.write(f"**分层标准**: 以中位数 {median_val:.4f} 为界\n\n")
        f.write(f"### 高表达区域（> {median_val:.4f}）\n")
        f.write(f"- **MAE**: {high_expr_mae:.6f}\n")
        f.write(f"- **Pearson r**: {high_expr_r:.6f}\n")
        f.write(f"- **样本数**: {np.sum(high_expr_mask):,}\n\n")
        f.write(f"### 低表达区域（≤ {median_val:.4f}）\n")
        f.write(f"- **MAE**: {low_expr_mae:.6f}\n")
        f.write(f"- **Pearson r**: {low_expr_r:.6f}\n")
        f.write(f"- **样本数**: {np.sum(low_expr_mask):,}\n\n")
        
        f.write(f"---\n\n")
        f.write(f"## 📈 训练历史\n\n")
        f.write(f"- **总训练轮次**: {len(train_losses)}\n")
        f.write(f"- **最终训练损失**: {train_losses[-1]:.6f}\n")
        f.write(f"- **最终验证损失**: {val_losses[-1]:.6f}\n")
        f.write(f"- **最终训练MAE**: {train_maes[-1]:.6f}\n")
        f.write(f"- **最终验证MAE**: {val_maes[-1]:.6f}\n")
        if val_pearsons:
            f.write(f"- **最终验证Pearson r**: {val_pearsons[-1]:.6f}\n")
            f.write(f"- **最佳验证Pearson r**: {max(val_pearsons):.6f} (Epoch {val_pearsons.index(max(val_pearsons))+1})\n")
        if val_spearmans:
            f.write(f"- **最终验证Spearman ρ**: {val_spearmans[-1]:.6f}\n")
            f.write(f"- **最佳验证Spearman ρ**: {max(val_spearmans):.6f} (Epoch {val_spearmans.index(max(val_spearmans))+1})\n")
        f.write(f"\n")
        
        f.write(f"---\n\n")
        
        f.write(f"## 文件说明\n")
        f.write(f"- `best_model.pth`: 最佳模型权重\n")
        f.write(f"- `test_predictions.csv`: 测试集预测结果\n")
        f.write(f"- `test_evaluation.png`: 测试集散点图\n")
        f.write(f"- `training_analysis.png`: 训练分析图表（损失曲线、MAE曲线、误差分布、预测密度）\n")
        f.write(f"- `training_history.csv`: 训练历史数据\n")
        f.write(f"- `test_metrics_summary.csv`: 测试指标汇总\n")
        f.write(f"- `tensorboard_logs/`: TensorBoard日志目录\n")
        f.write(f"- `train_single_peak.log`: 训练日志\n")

@hydra.main(version_base=None, config_path="get_model/config", config_name="yeast_training")
def main(config: DictConfig):
    """主函数：运行ATAC单Peak训练"""
    
    logger = logging.getLogger(__name__)
    logger.info("开始ATAC单Peak训练")
    logger.info(f"配置路径: {config}")
    
    # 从YAML配置中获取数据路径和训练配置
    input_files = config.data.input_files
    output_base_dir = config.data.output_base_dir
    training_name = config.training_name
    training_description = config.training_description
    output_dir = config.output_dir
    
    # 检查输入文件
    atac1_path = input_files.atac1
    if not os.path.exists(atac1_path):
        logger.error(f"ATAC1文件不存在: {atac1_path}")
        raise FileNotFoundError("ATAC1文件缺失")
    
    logger.info(f"使用数据文件: {atac1_path}")
    
    # 创建输出基础目录
    Path(output_base_dir).mkdir(parents=True, exist_ok=True)
    
    # 创建训练配置对象
    training_config = type('TrainingConfig', (), {
        'name': training_name,
        'description': training_description,
        'output_dir': output_dir,
        'input_files': ['atac1']
    })()
    
    # 运行训练
    try:
        logger.info(f"开始训练: {training_name}")
        result = train_experiment('atac_training', training_config, config)
        logger.info("训练完成")
        return result
    except Exception as e:
        logger.error(f"训练失败: {e}")
        raise

if __name__ == "__main__":
    main()
