"""
酵母基因表达预测模型训练脚本 - 滑动窗口版本 (Sliding Window Version)

主要特性:
1. 滑动窗口训练: 训练阶段也考虑peak之间的相关性，与测试阶段保持一致
2. 连续peak序列处理: 每次训练处理连续的peak序列，而不是单个peak
3. 空间相关性建模: 模型学习peak之间的空间依赖关系
4. 完整的训练流程: 包含训练、验证、测试的完整流程

滑动窗口训练优势:
- 训练和测试使用一致的数据处理方法
- 模型学习peak间的空间相关性和调控关系
- 提高模型对染色质结构的理解能力
- 减少训练-测试性能差异

使用方法:
- 在config.py中设置滑动窗口参数
- 运行脚本进行训练
- 训练完成后使用 test2.py 进行测试
"""

import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader, random_split
from get_model.model.yeast_model import YeastModel
import logging
from pathlib import Path
import os
import torch.optim as optim
import signal
import sys
import atexit
import time
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr, linregress, spearmanr
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from tqdm import tqdm
import warnings
import hydra
from omegaconf import DictConfig, OmegaConf
from torch.utils.tensorboard import SummaryWriter

warnings.filterwarnings("ignore")

# 全局变量用于中断处理
interrupted = False
training_completed = False  # 标记训练是否正常完成
current_model = None
current_test_loader = None
current_device = None
current_logger = None
current_output_dir = None

def signal_handler(signum, frame):
    """处理中断信号"""
    global interrupted, current_logger
    interrupted = True
    if current_logger:
        current_logger.warning(f"收到中断信号 {signum}，准备保存模型并执行测试...")
    else:
        print(f"收到中断信号 {signum}，准备保存模型并执行测试...")

def cleanup_and_test():
    """清理并执行测试"""
    global current_model, current_test_loader, current_device, current_logger, current_output_dir, training_completed
    
    # 如果训练已经正常完成，不执行中断测试
    if training_completed:
        if current_logger:
            current_logger.info("训练已正常完成，跳过中断测试")
        return
    
    if current_model is not None and current_test_loader is not None and current_output_dir is not None:
        try:
            if current_logger:
                current_logger.info("开始执行中断后的测试...")
            
            # 确保模型在评估模式
            current_model.eval()
            
            # 测试集评估
            test_loss, test_mae, test_spearman, test_r2, test_preds, test_targets, test_p = evaluate_model(
                current_model, current_test_loader, current_device, current_logger, "test"
            )
            
            # 保存预测结果
            df_test = pd.DataFrame({'pred': test_preds, 'true': test_targets, 'split': 'test'})
            df_test.to_csv(current_output_dir / 'interrupted_test_predictions.csv', index=False)
            
            # 生成测试集散点图
            test_mse = mean_squared_error(test_targets, test_preds)
            plt.figure(figsize=(10, 8))
            plt.scatter(test_targets, test_preds, alpha=0.01, s=0.1, c='green')
            plt.plot([0, 1], [0, 1], 'r--', linewidth=2, label='y=x')
            
            plt.xlabel('True Expression (Normalized 0-1)')
            plt.ylabel('Predicted Expression (Normalized 0-1)')
            plt.title('Test Set - Interrupted Training Evaluation (Sliding Window)')
            plt.xlim(0, 1)
            plt.ylim(0, 1)
            plt.legend(loc='lower right')
            plt.grid(True, alpha=0.3)
            
            test_stats_text = f'Test Set (Sliding Window):\nMAE = {test_mae:.4f}\nSpearman ρ = {test_spearman:.4f}\nR² = {test_r2:.4f}\nMSE = {test_mse:.4f}\nN = {len(test_targets):,}'
            plt.text(0.02, 0.98, test_stats_text, transform=plt.gca().transAxes, 
                    fontsize=12, verticalalignment='top', 
                    bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.8))
            
            plt.tight_layout()
            plt.savefig(current_output_dir / 'interrupted_test_evaluation.png', dpi=150, bbox_inches='tight')
            plt.close()
            
            # 生成中断测试报告
            report_path = current_output_dir / 'interrupted_test_report.md'
            with open(report_path, 'w', encoding='utf-8') as f:
                f.write(f"# 中断训练测试报告 (滑动窗口版本)\n\n")
                f.write(f"**测试时间**: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"**测试原因**: 训练被中断\n")
                f.write(f"**训练方法**: 滑动窗口训练\n\n")
                f.write(f"## 测试集结果\n")
                f.write(f"- **测试集损失**: {test_loss:.6f}\n")
                f.write(f"- **测试集MAE**: {test_mae:.6f}\n")
                f.write(f"- **测试集Spearman ρ**: {test_spearman:.6f}\n")
                f.write(f"- **测试集R²**: {test_r2:.6f}\n")
                f.write(f"- **测试集MSE**: {test_mse:.6f}\n")
                f.write(f"- **测试集样本数**: {len(test_targets):,}\n\n")
            
            if current_logger:
                current_logger.info(f"中断测试完成！结果已保存到: {current_output_dir}")
                current_logger.info(f"测试集结果: 损失={test_loss:.6f}, MAE={test_mae:.6f}, Spearman ρ={test_spearman:.6f}, 样本数={len(test_targets):,}")
            else:
                print(f"中断测试完成！测试集结果: 损失={test_loss:.6f}, MAE={test_mae:.6f}, Spearman ρ={test_spearman:.6f}, 样本数={len(test_targets):,}")
                
        except Exception as e:
            if current_logger:
                current_logger.error(f"中断测试失败: {e}")
            else:
                print(f"中断测试失败: {e}")

# 注册信号处理器
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
atexit.register(cleanup_and_test)

# ========== 配置将从YAML文件加载 ==========
# 所有配置参数现在都从 get_model/config/yeast_training.yaml 加载
# 包括数据路径、实验配置、训练参数等

# ========== 滑动窗口数据集类 ==========
class YeastPeakSlidingWindowDataset(Dataset):
    """
    滑动窗口数据集，考虑peak之间的相关性
    每次返回一个连续的peak序列，而不是单个peak
    """
    def __init__(self, data_path: str, window_config: dict):
        # 加载.npz文件
        npz_file = np.load(data_path, mmap_mode='r')
        self.data = npz_file['data']  # 访问'data'键
        self.window_size = window_config['window_size']
        self.stride = window_config['stride']
        self.min_valid_peaks = window_config['min_valid_peaks']
        
        logging.info(f"加载训练数据: {data_path}")
        logging.info(f"滑动窗口配置: 窗口大小={self.window_size}, 步长={self.stride}, 最少有效peak={self.min_valid_peaks}")
        
        num_samples, num_peaks, num_features = self.data.shape
        self.motif_dim = 283
        self.accessibility_dim = 1
        self.condition_dim = 60
        
        # 生成滑动窗口索引
        self.window_indices = self._generate_sliding_windows(num_samples, num_peaks)
        self.total = len(self.window_indices)
        
        logging.info(f"训练数据集: 样本={num_samples}, peaks/样本={num_peaks}, "
                    f"特征数={num_features}, 滑动窗口数={self.total}")

    def _generate_sliding_windows(self, num_samples: int, num_peaks: int):
        """
        生成滑动窗口索引
        
        参数说明:
        - window_size: 滑动窗口大小，表示每个窗口包含多少个连续的peak
        - stride: 滑动步长，表示窗口每次移动的距离
        - min_valid_peaks: 窗口内最少有效peak数量
           - 有效peak: 标签不是NaN或inf的peak
           - 这个参数确保每个窗口有足够的数据进行有意义的训练
           - 如果窗口内有效peak太少，可能影响训练效果
        
        窗口生成策略:
        1. 标准窗口: 从start_idx到start_idx+window_size的完整窗口
        2. 末尾窗口: 处理数据末尾可能不完整的窗口
        3. 质量过滤: 只保留有效peak数量达到要求的窗口
        """
        window_indices = []
        
        for sample_idx in range(num_samples):
            # 对每个样本生成滑动窗口
            
            # 1. 生成标准滑动窗口
            for start_idx in range(0, num_peaks - self.window_size + 1, self.stride):
                end_idx = start_idx + self.window_size
                
                # 检查窗口内是否有足够的有效peak（标签不是NaN）
                window_labels = self.data[sample_idx, start_idx:end_idx, -1]
                valid_peaks = np.sum(~(np.isnan(window_labels) | np.isinf(window_labels)))
                
                if valid_peaks >= self.min_valid_peaks:
                    window_indices.append({
                        'sample_idx': sample_idx,
                        'start_idx': start_idx,
                        'end_idx': end_idx,
                        'valid_peaks': valid_peaks,
                        'window_type': 'standard',
                        'actual_size': self.window_size
                    })
            
            # 2. 处理末尾不完整的窗口
            # 如果最后一个标准窗口的结束位置距离数据末尾还有peak，创建末尾窗口
            last_standard_end = num_peaks - self.window_size + 1
            if last_standard_end < num_peaks:
                # 计算末尾窗口的起始位置，确保至少有min_valid_peaks个peak
                min_required_size = max(self.min_valid_peaks, self.window_size // 2)  # 至少一半窗口大小
                
                # 从后往前尝试不同的末尾窗口大小
                for end_size in range(self.window_size, min_required_size - 1, -1):
                    start_idx = num_peaks - end_size
                    if start_idx >= 0:
                        window_labels = self.data[sample_idx, start_idx:num_peaks, -1]
                        valid_peaks = np.sum(~(np.isnan(window_labels) | np.isinf(window_labels)))
                        
                        if valid_peaks >= self.min_valid_peaks:
                            window_indices.append({
                                'sample_idx': sample_idx,
                                'start_idx': start_idx,
                                'end_idx': num_peaks,
                                'valid_peaks': valid_peaks,
                                'window_type': 'trailing',
                                'actual_size': end_size
                            })
                            break  # 找到合适的末尾窗口后停止
        
        return window_indices

    def __len__(self):
        return self.total

    def __getitem__(self, idx: int):
        window_info = self.window_indices[idx]
        sample_idx = window_info['sample_idx']
        start_idx = window_info['start_idx']
        end_idx = window_info['end_idx']
        window_type = window_info['window_type']
        actual_size = window_info['actual_size']
        
        # 获取窗口内的所有peak数据
        window_data = self.data[sample_idx, start_idx:end_idx]
        
        # 提取特征和标签
        motif_features = torch.tensor(window_data[:, :self.motif_dim], dtype=torch.float32)
        accessibility_features = torch.tensor(window_data[:, self.motif_dim:self.motif_dim+self.accessibility_dim], dtype=torch.float32)
        condition_features = torch.tensor(window_data[:, self.motif_dim+self.accessibility_dim:self.motif_dim+self.accessibility_dim+self.condition_dim], dtype=torch.float32)
        
        # 拼接所有特征 - 现在每个peak都有完整的特征向量
        all_features = torch.cat([motif_features, accessibility_features, condition_features], dim=-1)
        
        # 标签是每个peak的最后一维
        labels = torch.tensor(window_data[:, -1], dtype=torch.float32)
        
        # 创建有效peak的mask
        valid_mask = ~(torch.isnan(labels) | torch.isinf(labels))
        
        # 如果窗口大小小于标准窗口大小，需要padding到标准大小
        if actual_size < self.window_size:
            # 计算需要padding的数量
            padding_size = self.window_size - actual_size
            
            # 创建padding张量（用0填充特征，用NaN填充标签）
            padding_features = torch.zeros(padding_size, all_features.size(1), dtype=torch.float32)
            padding_labels = torch.full((padding_size,), float('nan'), dtype=torch.float32)
            padding_mask = torch.zeros(padding_size, dtype=torch.bool)
            
            # 拼接padding
            all_features = torch.cat([all_features, padding_features], dim=0)
            labels = torch.cat([labels, padding_labels], dim=0)
            valid_mask = torch.cat([valid_mask, padding_mask], dim=0)
        
        return {
            'motif_features': all_features,  # [window_size, total_features]
            'labels': labels,                # [window_size]
            'valid_mask': valid_mask,        # [window_size]
            'sample_idx': sample_idx,
            'start_idx': start_idx,
            'end_idx': end_idx,
            'window_type': window_type,      # 'standard' 或 'trailing'
            'actual_size': actual_size,      # 实际窗口大小
            'window_info': window_info
        }

class MultiSlidingWindowDataset(Dataset):
    """多数据集合并类 - 滑动窗口版本"""
    
    def __init__(self, data_paths: list, window_config: dict):
        self.datasets = []
        self.dataset_sizes = []
        self.total_size = 0
        
        for path in data_paths:
            dataset = YeastPeakSlidingWindowDataset(path, window_config)
            self.datasets.append(dataset)
            self.dataset_sizes.append(len(dataset))
            self.total_size += len(dataset)
            
        logging.info(f"多数据集滑动窗口加载完成，总窗口数: {self.total_size}")
        for i, (path, size) in enumerate(zip(data_paths, self.dataset_sizes)):
            logging.info(f"  数据集 {i+1}: {os.path.basename(path)} - {size:,} 个窗口")

    def __len__(self):
        return self.total_size

    def __getitem__(self, idx: int):
        # 确定属于哪个数据集
        dataset_idx = 0
        cumulative_size = 0
        for i, size in enumerate(self.dataset_sizes):
            if idx < cumulative_size + size:
                dataset_idx = i
                break
            cumulative_size += size
        
        # 在对应数据集中获取样本
        local_idx = idx - cumulative_size
        return self.datasets[dataset_idx][local_idx]

def create_data_loaders(dataset, config, logger):
    """创建训练集、验证集和测试集的DataLoader - 滑动窗口版本"""
    
    # 计算划分大小 (70% 训练, 15% 验证, 15% 测试)
    total_size = len(dataset)
    train_size = int(0.7 * total_size)
    val_size = int(0.15 * total_size)
    test_size = total_size - train_size - val_size
    
    logger.info(f"数据集划分: 总窗口数={total_size:,}, 训练集={train_size:,} (70%), "
                f"验证集={val_size:,} (15%), 测试集={test_size:,} (15%)")
    
    # 设置随机种子确保可重复性
    torch.manual_seed(config.experiment.seed)
    
    # 划分数据集
    train_dataset, val_dataset, test_dataset = random_split(
        dataset, 
        [train_size, val_size, test_size],
        generator=torch.Generator().manual_seed(config.experiment.seed)
    )
    
    # 创建DataLoader
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.training.batch_size,
        shuffle=True,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config.training.batch_size,
        shuffle=False,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=config.training.batch_size,
        shuffle=False,
        num_workers=config.training.num_workers,
        pin_memory=True
    )
    
    return train_loader, val_loader, test_loader

def evaluate_model(model, data_loader, device, logger, split_name="validation"):
    """评估模型性能 - 滑动窗口版本，使用MAE作为主要指标"""
    model.eval()
    total_loss = 0
    all_preds = []
    all_targets = []
    
    with torch.no_grad():
        for batch_x in tqdm(data_loader, desc=f'评估 {split_name}'):
            # 滑动窗口模式的数据结构
            batch_labels = batch_x['labels'].to(device)
            batch_valid_masks = batch_x['valid_mask'].to(device)
            features_for_model = {'motif_features': batch_x['motif_features'].to(device)}
            
            # 前向传播
            outputs = model(features_for_model)  # [batch_size, window_size, 1]
            
            # 计算损失（只考虑有效的peak）
            loss = 0
            valid_count = 0
            
            for i in range(outputs.size(0)):  # 遍历每个样本
                for j in range(outputs.size(1)):  # 遍历每个peak
                    if batch_valid_masks[i, j]:
                        peak_loss = model.compute_loss(
                            outputs[i:i+1, j:j+1], 
                            batch_labels[i:i+1, j:j+1]
                        )
                        loss += peak_loss.item()
                        valid_count += 1
            
            if valid_count > 0:
                loss = loss / valid_count
                total_loss += loss
                
                # 收集预测和真实值
                for i in range(outputs.size(0)):  # 遍历每个样本
                    for j in range(outputs.size(1)):  # 遍历每个peak
                        if batch_valid_masks[i, j]:  # 只保存有效的peak
                            pred = outputs[i, j].detach().cpu().numpy().item()
                            target = batch_labels[i, j].detach().cpu().numpy().item()
                            
                            all_preds.append(pred)
                            all_targets.append(target)
    
    # 计算平均损失
    avg_loss = total_loss / len(data_loader) if len(data_loader) > 0 else 0
    
    # 计算统计指标
    all_preds_np = np.array(all_preds)
    all_targets_np = np.array(all_targets)
    
    try:
        # ===== 主要指标 (按重要性排序) =====
        
        # 1. MAE - 平均绝对误差 (主要指标)
        mae = mean_absolute_error(all_targets_np, all_preds_np)
        
        # 2. Spearman相关系数 - 排序相关性
        spearman_rho, spearman_p = spearmanr(all_targets_np, all_preds_np)
        
        # 3. R² - 决定系数 (预测能力)
        r2 = r2_score(all_targets_np, all_preds_np)
        
        # 4. Pearson相关系数 - 线性相关性
        pearson_r, pearson_p = pearsonr(all_targets_np, all_preds_np)
        
        # 5. Kendall's Tau - 排序一致性
        from scipy.stats import kendalltau
        kendall_tau, kendall_p = kendalltau(all_targets_np, all_preds_np)
        
        # 6. 回归分析 - 斜率和截距
        reg = linregress(all_targets_np, all_preds_np)
        slope = reg.slope
        intercept = reg.intercept
        
        # 7. MSE - 均方误差
        mse = mean_squared_error(all_targets_np, all_preds_np)
        
        # 8. MAPE - 平均绝对百分比误差
        mape = np.mean(np.abs((all_targets_np - all_preds_np) / (all_targets_np + 1e-8))) * 100
        
        # 9. 额外有用的指标
        # RMSE - 均方根误差
        rmse = np.sqrt(mse)
        
        # 中位数绝对误差
        median_ae = np.median(np.abs(all_targets_np - all_preds_np))
        
        # 预测值范围
        pred_range = all_preds_np.max() - all_preds_np.min()
        true_range = all_targets_np.max() - all_targets_np.min()
        
        # 预测值分布统计
        pred_mean = all_preds_np.mean()
        pred_std = all_preds_np.std()
        true_mean = all_targets_np.mean()
        true_std = all_targets_np.std()
        
        # 误差分布统计
        errors = all_preds_np - all_targets_np
        error_mean = errors.mean()
        error_std = errors.std()
        error_median = np.median(errors)
        
        # 分位数误差 (关注不同表达水平)
        q25_error = np.percentile(np.abs(errors), 25)
        q75_error = np.percentile(np.abs(errors), 75)
        
        # 高表达区域误差 (表达值 > 中位数)
        high_expr_mask = all_targets_np > np.median(all_targets_np)
        high_expr_mae = mean_absolute_error(all_targets_np[high_expr_mask], all_preds_np[high_expr_mask]) if np.sum(high_expr_mask) > 0 else float('nan')
        
        # 低表达区域误差 (表达值 <= 中位数)
        low_expr_mask = all_targets_np <= np.median(all_targets_np)
        low_expr_mae = mean_absolute_error(all_targets_np[low_expr_mask], all_preds_np[low_expr_mask]) if np.sum(low_expr_mask) > 0 else float('nan')
        
    except Exception as e:
        # 如果计算失败，设置默认值
        mae = spearman_rho = r2 = pearson_r = kendall_tau = float('nan')
        slope = intercept = mse = mape = rmse = median_ae = float('nan')
        pred_range = true_range = pred_mean = pred_std = true_mean = true_std = float('nan')
        error_mean = error_std = error_median = q25_error = q75_error = float('nan')
        high_expr_mae = low_expr_mae = float('nan')
        logger.warning(f"{split_name}集指标计算失败: {e}")
    
    # 按重要性排序输出评估结果 - 基因预测任务专用
    logger.info(f"{split_name}集评估结果 (基因预测任务，按重要性排序):")
    logger.info(f"  {'='*70}")
    
    # 1. 基因预测核心指标 (Top1-5)
    logger.info(f"  🧬 基因预测核心指标:")
    logger.info(f"    1. Spearman ρ (排序相关性) = {spearman_rho:.6f} (p = {spearman_p:.2e}) ← 基因预测最重要")
    logger.info(f"    2. MAE (平均绝对误差) = {mae:.6f} ← 预测准确性")
    logger.info(f"    3. R² (决定系数) = {r2:.6f} ← 解释方差")
    logger.info(f"    4. Pearson r (线性相关性) = {pearson_r:.6f} (p = {pearson_p:.2e}) ← 线性关系")
    logger.info(f"    5. Kendall τ (排序一致性) = {kendall_tau:.6f} (p = {kendall_p:.2e}) ← 排序稳定性")
    
    # 2. 回归分析 (基因表达预测关键)
    logger.info(f"  📈 回归分析 (基因表达预测):")
    logger.info(f"    6. 回归斜率 = {slope:.6f} ← 表达水平缩放")
    logger.info(f"    7. 回归截距 = {intercept:.6f} ← 基础表达水平")
    logger.info(f"    8. 回归R² = {slope**2:.6f} ← 线性拟合度")
    
    # 3. 误差分析 (正态分布数据)
    logger.info(f"  ⚠️  误差分析 (正态分布数据):")
    logger.info(f"    9. RMSE (均方根误差) = {rmse:.6f} ← 大误差惩罚")
    logger.info(f"    10. 中位数绝对误差 = {median_ae:.6f} ← 中值附近误差")
    logger.info(f"    11. MAPE (平均绝对百分比误差) = {mape:.2f}% ← 相对误差")
    logger.info(f"    12. MSE (均方误差) = {mse:.6f} ← 平方误差")
    
    # 4. 分布统计 (正态分布特征)
    logger.info(f"  📊 分布统计 (正态分布特征):")
    logger.info(f"    真实值: 均值={true_mean:.4f}, 标准差={true_std:.4f}, 中位数={np.median(all_targets_np):.4f}")
    logger.info(f"    预测值: 均值={pred_mean:.4f}, 标准差={pred_std:.4f}, 中位数={np.median(all_preds_np):.4f}")
    logger.info(f"    范围: 真实值=[{all_targets_np.min():.4f}, {all_targets_np.max():.4f}], 预测值=[{all_preds_np.min():.4f}, {all_preds_np.max():.4f}]")
    
    # 5. 误差分布 (正态分布数据)
    logger.info(f"  🔍 误差分布 (正态分布数据):")
    logger.info(f"    误差均值={error_mean:.6f}, 误差标准差={error_std:.6f}, 误差中位数={error_median:.6f}")
    logger.info(f"    25%分位数误差={q25_error:.6f}, 75%分位数误差={q75_error:.6f}")
    logger.info(f"    误差范围=[{errors.min():.6f}, {errors.max():.6f}]")
    
    # 6. 表达水平分层分析 (基因表达特征)
    logger.info(f"  🎯 表达水平分层分析 (基因表达特征):")
    logger.info(f"    高表达区域MAE = {high_expr_mae:.6f} (表达值 > {np.median(all_targets_np):.4f})")
    logger.info(f"    低表达区域MAE = {low_expr_mae:.6f} (表达值 ≤ {np.median(all_targets_np):.4f})")
    
    # 7. 样本信息
    logger.info(f"  📊 样本信息:")
    logger.info(f"    有效peak数: {len(all_targets):,}")
    logger.info(f"    损失: {avg_loss:.6f}")
    logger.info(f"  {'='*70}")
    
    # 返回主要指标 (MAE作为主要指标)
    return avg_loss, mae, slope, intercept, all_preds, all_targets, spearman_p

def train_experiment(experiment_name: str, experiment_config: dict, config: DictConfig):
    """训练单个实验 - 滑动窗口版本"""
    
    global current_model, current_test_loader, current_device, current_logger, current_output_dir
    
    # 创建输出目录
    timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    output_dir = Path(config.data.output_base_dir) / f"{experiment_config.output_dir}_{timestamp}"
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 设置全局变量
    current_output_dir = output_dir
    
    # 设置日志
    # 清除之前的handlers，避免重复
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(message)s",
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(output_dir / 'train_sliding_window.log', mode='w', encoding='utf-8')
        ],
        force=True  # 强制重新配置
    )
    logger = logging.getLogger(__name__)
    current_logger = logger
    
    logger.info(f"开始实验: {experiment_config.name}")
    logger.info(f"实验描述: {experiment_config.description}")
    logger.info(f"输出目录: {output_dir}")
    logger.info(f"训练方法: 滑动窗口方法 (考虑peak之间的相关性)")
    
    # 打印配置信息用于调试
    logger.info(f"=== 配置信息 ===")
    logger.info(f"max_epochs: {config.training.max_epochs}")
    logger.info(f"batch_size: {config.training.batch_size}")
    logger.info(f"learning_rate: {config.training.learning_rate}")
    logger.info(f"weight_decay: {config.training.weight_decay}")
    logger.info(f"clip_grad: {config.training.clip_grad}")
    logger.info(f"early_stopping_patience: {config.training.early_stopping_patience}")
    logger.info(f"early_stopping_min_delta: {config.training.early_stopping_min_delta}")
    logger.info(f"=================")
    
    # 设置随机种子
    torch.manual_seed(config.experiment.seed)
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    current_device = device
    
    # 准备数据集 - 滑动窗口版本
    if len(experiment_config.input_files) == 1:
        # 单数据集
        data_path = config.data.input_files[experiment_config.input_files[0]]
        logger.info(f"使用单数据集: {data_path}")
        dataset = YeastPeakSlidingWindowDataset(data_path, config.sliding_window)
    else:
        # 多数据集
        data_paths = [config.data.input_files[f] for f in experiment_config.input_files]
        logger.info(f"使用多数据集: {len(data_paths)} 个文件")
        for i, path in enumerate(data_paths):
            logger.info(f"  数据集 {i+1}: {path}")
        dataset = MultiSlidingWindowDataset(data_paths, config.sliding_window)
    
    # 创建数据加载器
    train_loader, val_loader, test_loader = create_data_loaders(dataset, config, logger)
    current_test_loader = test_loader
    
    # 创建模型
    model = YeastModel(config.model.model)
    model = model.to(device)
    current_model = model
    
    # 创建优化器
    optimizer = optim.AdamW(
        model.parameters(),
        lr=config.training.learning_rate,
        weight_decay=config.training.weight_decay
    )
    
    # 创建学习率调度器
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer, 
        T_max=config.training.max_epochs,
        eta_min=config.training.learning_rate * 0.01
    )
    
    # 创建TensorBoard writer
    tensorboard_dir = output_dir / 'tensorboard_logs'
    tensorboard_dir.mkdir(exist_ok=True)
    writer = SummaryWriter(tensorboard_dir)
    logger.info(f"TensorBoard日志目录: {tensorboard_dir}")
    
    # 记录模型结构到TensorBoard
    try:
        # 创建一个示例输入来记录模型结构
        sample_input = torch.randn(1, config.sliding_window.window_size, 344).to(device)
        sample_features = {'motif_features': sample_input}
        
        # 记录模型计算图
        writer.add_graph(model, sample_features)
        logger.info("模型结构已记录到TensorBoard")
    except Exception as e:
        logger.warning(f"记录模型结构失败: {e}")
    
    # 训练循环 - 基因预测任务优化
    best_val_spearman = -1.0  # Spearman相关系数，越大越好
    best_val_mae = float('inf')
    train_losses = []
    val_losses = []
    train_maes = []
    val_maes = []
    patience_counter = 0
    early_stopping_patience = config.training.get('early_stopping_patience', None)
    early_stopping_min_delta = config.training.get('early_stopping_min_delta', 0.001)
    
    logger.info(f"开始训练，共 {config.training.max_epochs} 个epoch")
    if early_stopping_patience:
        logger.info(f"早停耐心值: {early_stopping_patience}, 最小改善阈值: {early_stopping_min_delta}")
        logger.info(f"早停监控指标: Spearman相关系数 (基因预测任务最重要指标)")
    
    for epoch in range(config.training.max_epochs):
        if interrupted:
            logger.warning("训练被中断")
            break
            
        # 训练阶段
        model.train()
        epoch_train_loss = 0
        epoch_train_mae = 0
        train_batches = 0
        
        for batch_idx, batch_data in enumerate(tqdm(train_loader, desc=f'Epoch {epoch+1}/{config.training.max_epochs}')):
            if interrupted:
                break
                
            # 获取数据
            batch_labels = batch_data['labels'].to(device)
            batch_valid_masks = batch_data['valid_mask'].to(device)
            features_for_model = {'motif_features': batch_data['motif_features'].to(device)}
            
            # 前向传播
            optimizer.zero_grad()
            outputs = model(features_for_model)  # [batch_size, window_size, 1]
            
            # 计算损失（只考虑有效的peak）
            total_loss = 0
            valid_count = 0
            
            for i in range(outputs.size(0)):  # 遍历每个样本
                for j in range(outputs.size(1)):  # 遍历每个peak
                    if batch_valid_masks[i, j]:
                        peak_loss = model.compute_loss(
                            outputs[i:i+1, j:j+1], 
                            batch_labels[i:i+1, j:j+1]
                        )
                        total_loss += peak_loss
                        valid_count += 1
            
            if valid_count > 0:
                avg_loss = total_loss / valid_count
                avg_loss.backward()
                
                # 梯度裁剪
                if config.training.clip_grad > 0:
                    torch.nn.utils.clip_grad_norm_(model.parameters(), config.training.clip_grad)
                
                optimizer.step()
                
                epoch_train_loss += avg_loss.item()
                train_batches += 1
        
        # 计算平均训练损失
        if train_batches > 0:
            avg_train_loss = epoch_train_loss / train_batches
        else:
            avg_train_loss = 0
        
        # 验证阶段
        val_loss, val_mae, val_slope, val_intercept, val_preds, val_targets, val_p = evaluate_model(
            model, val_loader, device, logger, "validation"
        )
        
        # 计算Spearman相关系数用于早停
        from scipy.stats import spearmanr
        val_spearman, _ = spearmanr(val_targets, val_preds)
        
        # 记录到TensorBoard
        writer.add_scalar('Loss/Train', avg_train_loss, epoch)
        writer.add_scalar('Loss/Validation', val_loss, epoch)
        writer.add_scalar('MAE/Train', epoch_train_mae, epoch)
        writer.add_scalar('MAE/Validation', val_mae, epoch)
        writer.add_scalar('Learning_Rate', optimizer.param_groups[0]['lr'], epoch)
        writer.add_scalar('Spearman/Validation', val_spearman, epoch)  # 真实的Spearman相关系数
        
        # 每5个epoch记录散点图
        if (epoch + 1) % 5 == 0:
            fig, ax = plt.subplots(figsize=(8, 6))
            ax.scatter(val_targets, val_preds, alpha=0.5, s=1)
            ax.plot([0, 1], [0, 1], 'r--', linewidth=2, label='y=x')
            ax.set_xlabel('True Expression (Normalized 0-1)')
            ax.set_ylabel('Predicted Expression (Normalized 0-1)')
            ax.set_title(f'Validation Set - Epoch {epoch+1}')
            ax.set_xlim(0, 1)
            ax.set_ylim(0, 1)
            ax.legend()
            ax.grid(True, alpha=0.3)
            writer.add_figure('Validation/Scatter', fig, epoch)
            plt.close(fig)
        
        # 更新学习率
        scheduler.step()
        
        # 保存最佳模型和早停逻辑 - 基因预测任务优化
        if val_spearman > best_val_spearman + early_stopping_min_delta:
            best_val_spearman = val_spearman
            best_val_mae = val_mae  # 同时记录MAE
            patience_counter = 0
            
            # 确保输出目录存在
            output_dir.mkdir(parents=True, exist_ok=True)
            
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_spearman': val_spearman,
                'val_mae': val_mae,
                'val_loss': val_loss,
                'config': config
            }, output_dir / 'best_model.pth')
            logger.info(f"Epoch {epoch+1}: 保存最佳模型 (Spearman: {val_spearman:.6f}, MAE: {val_mae:.6f})")
        else:
            patience_counter += 1
            if early_stopping_patience and patience_counter >= early_stopping_patience:
                logger.info(f"早停触发！连续 {patience_counter} 轮Spearman无改善，停止训练")
                logger.info(f"最佳验证Spearman: {best_val_spearman:.6f}, MAE: {best_val_mae:.6f}")
                break
        
        # 记录历史
        train_losses.append(avg_train_loss)
        val_losses.append(val_loss)
        train_maes.append(epoch_train_mae)
        val_maes.append(val_mae)
        
        logger.info(f"Epoch {epoch+1}/{config.training.max_epochs}: "
                   f"Train Loss={avg_train_loss:.6f}, Val Loss={val_loss:.6f}, "
                   f"Val MAE={val_mae:.6f}, Val Spearman={val_spearman:.6f}, "
                   f"LR={optimizer.param_groups[0]['lr']:.6f}, "
                   f"Patience: {patience_counter}/{early_stopping_patience or 'N/A'}")
    
    # 测试阶段
    logger.info("开始测试阶段...")
    test_loss, test_mae, test_slope, test_intercept, test_preds, test_targets, test_p = evaluate_model(
        model, test_loader, device, logger, "test"
    )
    
    # 保存最终结果
    save_final_results(output_dir, train_losses, val_losses, train_maes, val_maes,
                      test_loss, test_mae, test_slope, test_intercept, test_preds, test_targets, 
                      experiment_config, test_p)
    
    # 关闭TensorBoard writer
    writer.close()
    logger.info(f"TensorBoard日志已保存到: {tensorboard_dir}")
    
    # 标记训练正常完成
    global training_completed
    training_completed = True
    
    logger.info("训练完成！")
    
    return {
        'best_val_mae': best_val_mae,
        'test_mae': test_mae,
        'test_loss': test_loss,
        'output_dir': output_dir
    }

def save_final_results(output_dir, train_losses, val_losses, train_maes, val_maes,
                      test_loss, test_mae, test_slope, test_intercept, test_preds, test_targets, 
                      experiment_config, test_p=None):
    """保存最终结果"""
    
    # 计算测试集指标
    from scipy.stats import spearmanr
    test_spearman, _ = spearmanr(test_targets, test_preds)
    test_r2 = r2_score(test_targets, test_preds)
    test_mse = mean_squared_error(test_targets, test_preds)
    test_rmse = np.sqrt(test_mse)
    
    # 保存预测结果
    df_test = pd.DataFrame({'pred': test_preds, 'true': test_targets, 'split': 'test'})
    df_test.to_csv(output_dir / 'test_predictions.csv', index=False)
    
    # 保存训练历史
    train_history = pd.DataFrame({
        'epoch': range(1, len(train_losses) + 1),
        'train_loss': train_losses,
        'val_loss': val_losses,
        'train_mae': train_maes,
        'val_mae': val_maes
    })
    train_history.to_csv(output_dir / 'training_history.csv', index=False)
    
    # 保存指标汇总
    metrics_summary = pd.DataFrame({
        'metric': ['MAE', 'MSE', 'RMSE', 'R²', 'Spearman ρ', 'Slope', 'Intercept', 'N_samples'],
        'value': [test_mae, test_mse, test_rmse, test_r2, test_spearman, test_slope, test_intercept, len(test_targets)]
    })
    metrics_summary.to_csv(output_dir / 'test_metrics_summary.csv', index=False)
    
    # 生成测试集散点图
    plt.figure(figsize=(10, 8))
    plt.scatter(test_targets, test_preds, alpha=0.01, s=0.1, c='green')
    plt.plot([0, 1], [0, 1], 'r--', linewidth=2, label='y=x')
    
    plt.xlabel('True Expression (Normalized 0-1)')
    plt.ylabel('Predicted Expression (Normalized 0-1)')
    plt.title('Test Set - Sliding Window Training Results')
    plt.xlim(0, 1)
    plt.ylim(0, 1)
    plt.legend(loc='lower right')
    plt.grid(True, alpha=0.3)
    
    test_stats_text = f'Test Set (Sliding Window):\nMAE = {test_mae:.4f}\nSpearman ρ = {test_spearman:.4f}\nR² = {test_r2:.4f}\nMSE = {test_mse:.4f}\nN = {len(test_targets):,}'
    plt.text(0.02, 0.98, test_stats_text, transform=plt.gca().transAxes, 
            fontsize=12, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.8))
    
    plt.tight_layout()
    plt.savefig(output_dir / 'test_evaluation.png', dpi=150, bbox_inches='tight')
    plt.close()
    
    # 生成训练历史图表
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))
    
    # 损失曲线
    ax1.plot(train_losses, label='Train Loss', color='blue')
    ax1.plot(val_losses, label='Validation Loss', color='red')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss')
    ax1.set_title('Training and Validation Loss')
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    
    # MAE曲线
    ax2.plot(train_maes, label='Train MAE', color='blue')
    ax2.plot(val_maes, label='Validation MAE', color='red')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('MAE')
    ax2.set_title('Training and Validation MAE')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    
    # 误差分布
    errors = np.array(test_preds) - np.array(test_targets)
    ax3.hist(errors, bins=50, alpha=0.7, color='skyblue', edgecolor='black')
    ax3.axvline(0, color='red', linestyle='--', linewidth=2, label='Zero Error')
    ax3.axvline(np.mean(errors), color='orange', linestyle='-', linewidth=2, label=f'Mean Error: {np.mean(errors):.4f}')
    ax3.set_xlabel('Prediction Error')
    ax3.set_ylabel('Frequency')
    ax3.set_title('Prediction Error Distribution')
    ax3.legend()
    ax3.grid(True, alpha=0.3)
    
    # 预测密度图
    ax4.hexbin(test_targets, test_preds, gridsize=30, cmap='Blues', alpha=0.8)
    ax4.plot([0, 1], [0, 1], 'r--', linewidth=2, label='y=x')
    ax4.set_xlabel('True Expression (Normalized 0-1)')
    ax4.set_ylabel('Predicted Expression (Normalized 0-1)')
    ax4.set_title('Prediction Density Plot')
    ax4.set_xlim(0, 1)
    ax4.set_ylim(0, 1)
    ax4.legend()
    ax4.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig(output_dir / 'training_analysis.png', dpi=150, bbox_inches='tight')
    plt.close()
    
    # 生成训练报告
    report_path = output_dir / 'training_report.md'
    with open(report_path, 'w', encoding='utf-8') as f:
        f.write(f"# 滑动窗口训练报告\n\n")
        f.write(f"**训练时间**: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"**实验名称**: {experiment_config.name}\n")
        f.write(f"**实验描述**: {experiment_config.description}\n")
        f.write(f"**训练方法**: 滑动窗口方法\n\n")
        f.write(f"## 测试集结果\n")
        f.write(f"- **测试集损失**: {test_loss:.6f}\n")
        f.write(f"- **测试集MAE**: {test_mae:.6f}\n")
        f.write(f"- **测试集Spearman ρ**: {test_spearman:.6f}\n")
        f.write(f"- **测试集R²**: {test_r2:.6f}\n")
        f.write(f"- **测试集斜率**: {test_slope:.6f}\n")
        f.write(f"- **测试集截距**: {test_intercept:.6f}\n")
        f.write(f"- **测试集MSE**: {test_mse:.6f}\n")
        f.write(f"- **测试集样本数**: {len(test_targets):,}\n\n")
        f.write(f"## 训练历史\n")
        f.write(f"- **总训练轮次**: {len(train_losses)}\n")
        f.write(f"- **最终训练损失**: {train_losses[-1]:.6f}\n")
        f.write(f"- **最终验证损失**: {val_losses[-1]:.6f}\n")
        f.write(f"- **最终训练MAE**: {train_maes[-1]:.6f}\n")
        f.write(f"- **最终验证MAE**: {val_maes[-1]:.6f}\n\n")
        
        f.write(f"## 文件说明\n")
        f.write(f"- `best_model.pth`: 最佳模型权重\n")
        f.write(f"- `test_predictions.csv`: 测试集预测结果\n")
        f.write(f"- `test_evaluation.png`: 测试集散点图\n")
        f.write(f"- `training_analysis.png`: 训练分析图表（损失曲线、MAE曲线、误差分布、预测密度）\n")
        f.write(f"- `training_history.csv`: 训练历史数据\n")
        f.write(f"- `test_metrics_summary.csv`: 测试指标汇总\n")
        f.write(f"- `tensorboard_logs/`: TensorBoard日志目录\n")
        f.write(f"- `train_sliding_window.log`: 训练日志\n")

@hydra.main(version_base=None, config_path="get_model/config", config_name="yeast_training")
def main(config: DictConfig):
    """主函数：运行ATAC滑动窗口训练"""
    
    logger = logging.getLogger(__name__)
    logger.info("开始ATAC滑动窗口训练")
    logger.info(f"配置路径: {config}")
    
    # 从YAML配置中获取数据路径和训练配置
    input_files = config.data.input_files
    output_base_dir = config.data.output_base_dir
    training_name = config.training_name
    training_description = config.training_description
    output_dir = config.output_dir
    
    # 检查输入文件
    atac1_path = input_files.atac1
    if not os.path.exists(atac1_path):
        logger.error(f"ATAC1文件不存在: {atac1_path}")
        raise FileNotFoundError("ATAC1文件缺失")
    
    logger.info(f"使用数据文件: {atac1_path}")
    
    # 创建输出基础目录
    Path(output_base_dir).mkdir(parents=True, exist_ok=True)
    
    # 创建训练配置对象
    training_config = type('TrainingConfig', (), {
        'name': training_name,
        'description': training_description,
        'output_dir': output_dir,
        'input_files': ['atac1']
    })()
    
    # 运行训练
    try:
        logger.info(f"开始训练: {training_name}")
        result = train_experiment('atac_training', training_config, config)
        logger.info("训练完成")
        return result
    except Exception as e:
        logger.error(f"训练失败: {e}")
        raise

if __name__ == "__main__":
    main()
