"""
酵母基因表达预测模型测试脚本 - 滑动窗口版本 (Sliding Window Version)

主要特性:
1. 滑动窗口方法: 考虑peak之间的相关性，每次处理连续的peak序列
2. 可配置的窗口参数: 窗口大小、滑动步长、最少有效peak数量
3. 完整的特征保存: 保存所有peak的原始特征用于后续分析

滑动窗口方法优势:
- 能够捕捉peak之间的空间相关性和调控关系
- 模拟真实的染色质结构，相邻peak可能共享调控机制
- 提高模型对空间信息的利用效率

使用方法:
- 在config.py中设置 USE_SLIDING_WINDOW = True 启用滑动窗口方法
- 调整 SLIDING_WINDOW_CONFIG 中的参数
- 运行脚本进行测试

注意: 此版本需要配合支持滑动窗口训练的模型使用，建议先使用 train_yeast_sliding_window.py 训练模型
"""

import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
from get_model.model.yeast_model import YeastModel
import logging
from pathlib import Path
import os
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr, linregress
from sklearn.metrics import mean_squared_error
from tqdm import tqdm
import warnings
import datetime

# 导入配置
from config import (
    MODEL_CONFIG, SLIDING_WINDOW_CONFIG, OUTPUT_ID_CONFIG,
    MOTIF_DIM, ACCESSIBILITY_DIM, CONDITION_DIM, TOTAL_FEATURES,
    MODEL_ARCH_CONFIG, BATCH_SIZE, NUM_WORKERS, PIN_MEMORY,
    get_output_dir, validate_config
)

warnings.filterwarnings("ignore")

# 生成带时间戳的输出目录
OUTPUT_DIR = get_output_dir()
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)

class YeastPeakSlidingWindowDataset(Dataset):
    """
    滑动窗口数据集，考虑peak之间的相关性
    每次返回一个连续的peak序列，而不是单个peak
    """
    def __init__(self, data_path: str, window_config: dict):
        self.data = np.load(data_path, mmap_mode='r')
        self.window_size = window_config['window_size']
        self.stride = window_config['stride']
        self.min_valid_peaks = window_config['min_valid_peaks']
        
        logging.info(f"加载测试数据: {data_path}")
        logging.info(f"滑动窗口配置: 窗口大小={self.window_size}, 步长={self.stride}, 最少有效peak={self.min_valid_peaks}")
        
        num_samples, num_peaks, num_features = self.data.shape
        self.motif_dim = MOTIF_DIM
        self.accessibility_dim = ACCESSIBILITY_DIM
        self.condition_dim = CONDITION_DIM
        
        # 生成滑动窗口索引
        self.window_indices = self._generate_sliding_windows(num_samples, num_peaks)
        self.total = len(self.window_indices)
        
        logging.info(f"测试数据集: 样本={num_samples}, peaks/样本={num_peaks}, "
                    f"特征数={num_features}, 滑动窗口数={self.total}")

    def _generate_sliding_windows(self, num_samples: int, num_peaks: int):
        """
        生成滑动窗口索引
        
        参数说明:
        - window_size: 滑动窗口大小，表示每个窗口包含多少个连续的peak
        - stride: 滑动步长，表示窗口每次移动的距离
        - min_valid_peaks: 窗口内最少有效peak数量
           - 有效peak: 标签不是NaN或inf的peak
           - 这个参数确保每个窗口有足够的数据进行有意义的预测
           - 如果窗口内有效peak太少，可能影响预测准确性
        
        窗口生成策略:
        1. 标准窗口: 从start_idx到start_idx+window_size的完整窗口
        2. 末尾窗口: 处理数据末尾可能不完整的窗口
        3. 质量过滤: 只保留有效peak数量达到要求的窗口
        """
        window_indices = []
        
        for sample_idx in range(num_samples):
            # 对每个样本生成滑动窗口
            
            # 1. 生成标准滑动窗口
            for start_idx in range(0, num_peaks - self.window_size + 1, self.stride):
                end_idx = start_idx + self.window_size
                
                # 检查窗口内是否有足够的有效peak（标签不是NaN）
                window_labels = self.data[sample_idx, start_idx:end_idx, -1]
                valid_peaks = np.sum(~(np.isnan(window_labels) | np.isinf(window_labels)))
                
                if valid_peaks >= self.min_valid_peaks:
                    window_indices.append({
                        'sample_idx': sample_idx,
                        'start_idx': start_idx,
                        'end_idx': end_idx,
                        'valid_peaks': valid_peaks,
                        'window_type': 'standard',
                        'actual_size': self.window_size
                    })
            
            # 2. 处理末尾不完整的窗口
            # 如果最后一个标准窗口的结束位置距离数据末尾还有peak，创建末尾窗口
            last_standard_end = num_peaks - self.window_size + 1
            if last_standard_end < num_peaks:
                # 计算末尾窗口的起始位置，确保至少有min_valid_peaks个peak
                min_required_size = max(self.min_valid_peaks, self.window_size // 2)  # 至少一半窗口大小
                
                # 从后往前尝试不同的末尾窗口大小
                for end_size in range(self.window_size, min_required_size - 1, -1):
                    start_idx = num_peaks - end_size
                    if start_idx >= 0:
                        window_labels = self.data[sample_idx, start_idx:num_peaks, -1]
                        valid_peaks = np.sum(~(np.isnan(window_labels) | np.isinf(window_labels)))
                        
                        if valid_peaks >= self.min_valid_peaks:
                            window_indices.append({
                                'sample_idx': sample_idx,
                                'start_idx': start_idx,
                                'end_idx': num_peaks,
                                'valid_peaks': valid_peaks,
                                'window_type': 'trailing',
                                'actual_size': end_size
                            })
                            break  # 找到合适的末尾窗口后停止
        
        return window_indices

    def __len__(self):
        return self.total

    def __getitem__(self, idx: int):
        window_info = self.window_indices[idx]
        sample_idx = window_info['sample_idx']
        start_idx = window_info['start_idx']
        end_idx = window_info['end_idx']
        window_type = window_info['window_type']
        actual_size = window_info['actual_size']
        
        # 获取窗口内的所有peak数据
        window_data = self.data[sample_idx, start_idx:end_idx]
        
        # 提取特征和标签
        motif_features = torch.tensor(window_data[:, :self.motif_dim], dtype=torch.float32)
        accessibility_features = torch.tensor(window_data[:, self.motif_dim:self.motif_dim+self.accessibility_dim], dtype=torch.float32)
        condition_features = torch.tensor(window_data[:, self.motif_dim+self.accessibility_dim:self.motif_dim+self.accessibility_dim+self.condition_dim], dtype=torch.float32)
        
        # 拼接所有特征 - 现在每个peak都有完整的特征向量
        all_features = torch.cat([motif_features, accessibility_features, condition_features], dim=-1)
        
        # 标签是每个peak的最后一维
        labels = torch.tensor(window_data[:, -1], dtype=torch.float32)
        
        # 创建有效peak的mask
        valid_mask = ~(torch.isnan(labels) | torch.isinf(labels))
        
        # 如果窗口大小小于标准窗口大小，需要padding到标准大小
        if actual_size < self.window_size:
            # 计算需要padding的数量
            padding_size = self.window_size - actual_size
            
            # 创建padding张量（用0填充特征，用NaN填充标签）
            padding_features = torch.zeros(padding_size, all_features.size(1), dtype=torch.float32)
            padding_labels = torch.full((padding_size,), float('nan'), dtype=torch.float32)
            padding_mask = torch.zeros(padding_size, dtype=torch.bool)
            
            # 拼接padding
            all_features = torch.cat([all_features, padding_features], dim=0)
            labels = torch.cat([labels, padding_labels], dim=0)
            valid_mask = torch.cat([valid_mask, padding_mask], dim=0)
        
        return {
            'motif_features': all_features,  # [window_size, total_features]
            'labels': labels,                # [window_size]
            'valid_mask': valid_mask,        # [window_size]
            'sample_idx': sample_idx,
            'start_idx': start_idx,
            'end_idx': end_idx,
            'window_type': window_type,      # 'standard' 或 'trailing'
            'actual_size': actual_size,      # 实际窗口大小
            'window_info': window_info
        }

def main():
    # 设置日志
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(message)s",
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(OUTPUT_DIR / 'test.log', mode='w', encoding='utf-8')
        ]
    )
    logger = logging.getLogger(__name__)
    
    logger.info("开始模型测试...")
    logger.info(f"模型路径: {MODEL_CONFIG['checkpoint_path']}")
    logger.info(f"测试数据: {MODEL_CONFIG['test_data_path']}")
    
    # 验证配置
    try:
        validate_config()
        logger.info("配置验证通过")
    except ValueError as e:
        logger.error(f"配置验证失败: {e}")
        raise
    
    # 检查文件是否存在
    if not os.path.exists(MODEL_CONFIG['checkpoint_path']):
        raise FileNotFoundError(f"模型文件不存在: {MODEL_CONFIG['checkpoint_path']}")
    if not os.path.exists(MODEL_CONFIG['test_data_path']):
        raise FileNotFoundError(f"测试数据文件不存在: {MODEL_CONFIG['test_data_path']}")
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    
    # 加载测试数据
    logger.info("加载测试数据...")
    test_dataset = YeastPeakSlidingWindowDataset(MODEL_CONFIG['test_data_path'], SLIDING_WINDOW_CONFIG)
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=BATCH_SIZE,
        shuffle=False,
        num_workers=NUM_WORKERS,
        pin_memory=PIN_MEMORY
    )
    
    # 加载模型
    logger.info("加载模型...")
    try:
        checkpoint = torch.load(MODEL_CONFIG['checkpoint_path'], map_location=device, weights_only=False)
        logger.info("成功加载checkpoint")
        
        # 初始化模型
        model = YeastModel(
            cfg=MODEL_ARCH_CONFIG,
            use_lora=False,
            lora_rank=16,
            lora_alpha=32,
            lora_layers=4,
        )
        
        # 加载模型权重
        model.load_state_dict(checkpoint['model_state_dict'])
        logger.info("成功加载模型权重")
        
    except Exception as e:
        logger.error(f"加载模型失败: {e}")
        raise
    
    # 多GPU支持
    if torch.cuda.device_count() > 1:
        logger.info(f"使用 {torch.cuda.device_count()} 个GPU")
        model = nn.DataParallel(model)
    model = model.to(device)
    
    # 确保模型在评估模式
    model.eval()
    
    # 进行测试
    logger.info("开始测试...")
    total_loss = 0
    all_preds = []
    all_targets = []
    all_sample_indices = []
    all_peak_indices = []
    all_original_features = []
    
    with torch.no_grad():
        for batch_x in tqdm(test_loader, desc='评估测试集'):
            # 滑动窗口模式
            # 保存原始信息（在移动到GPU之前）
            batch_sample_indices = batch_x['sample_idx'].cpu().numpy().flatten()
            batch_start_indices = batch_x['start_idx'].cpu().numpy().flatten()
            batch_end_indices = batch_x['end_idx'].cpu().numpy().flatten()
            batch_features = batch_x['motif_features'].cpu().numpy()  # [batch_size, window_size, total_features]
            batch_labels = batch_x['labels'].cpu().numpy()  # [batch_size, window_size]
            batch_valid_masks = batch_x['valid_mask'].cpu().numpy()  # [batch_size, window_size]
            
            # 移动特征到GPU进行推理
            features_for_model = {'motif_features': batch_x['motif_features'].to(device)}
            batch_labels_gpu = batch_x['labels'].to(device)
            batch_valid_masks_gpu = batch_x['valid_mask'].to(device)
            
            outputs = model(features_for_model)  # [batch_size, window_size, 1]
            
            # 计算损失（只考虑有效的peak）
            loss = 0
            valid_count = 0
            
            for i in range(outputs.size(0)):  # 遍历每个样本
                for j in range(outputs.size(1)):  # 遍历每个peak
                    if batch_valid_masks_gpu[i, j]:
                        peak_loss = model.compute_loss(
                            outputs[i:i+1, j:j+1], 
                            batch_labels_gpu[i:i+1, j:j+1]
                        )
                        loss += peak_loss.item()
                        valid_count += 1
            
            if valid_count > 0:
                loss = loss / valid_count
                total_loss += loss
                
                # 收集预测结果和真实标签
                for i in range(outputs.size(0)):  # 遍历每个样本
                    for j in range(outputs.size(1)):  # 遍历每个peak
                        if batch_valid_masks[i, j]:  # 只保存有效的peak
                            pred = outputs[i, j].detach().cpu().numpy().item()
                            target = batch_labels[i, j].item()
                            
                            all_preds.append(pred)
                            all_targets.append(target)
                            all_sample_indices.append(batch_sample_indices[i])
                            all_peak_indices.append(batch_start_indices[i] + j)  # 实际的peak索引
                            all_original_features.append(batch_features[i, j])
    
    avg_loss = total_loss / len(test_loader) if len(test_loader) > 0 else 0
    
    # 计算统计指标
    all_preds_np = np.array(all_preds)
    all_targets_np = np.array(all_targets)
    
    try:
        # ===== 主要指标 (按重要性排序) =====
        
        # 1. MAE - 平均绝对误差 (主要指标)
        from sklearn.metrics import mean_absolute_error
        mae = mean_absolute_error(all_targets_np, all_preds_np)
        
        # 2. Spearman相关系数 - 排序相关性
        from scipy.stats import spearmanr
        spearman_rho, spearman_p = spearmanr(all_targets_np, all_preds_np)
        
        # 3. R² - 决定系数 (预测能力)
        from sklearn.metrics import r2_score
        r2 = r2_score(all_targets_np, all_preds_np)
        
        # 4. Pearson相关系数 - 线性相关性
        r, p = pearsonr(all_targets_np, all_preds_np)
        
        # 5. Kendall's Tau - 排序一致性
        from scipy.stats import kendalltau
        kendall_tau, kendall_p = kendalltau(all_targets_np, all_preds_np)
        
        # 6. 回归分析 - 斜率和截距
        reg = linregress(all_targets_np, all_preds_np)
        slope = reg.slope
        intercept = reg.intercept
        
        # 7. MSE - 均方误差
        mse = mean_squared_error(all_targets_np, all_preds_np)
        
        # 8. MAPE - 平均绝对百分比误差
        mape = np.mean(np.abs((all_targets_np - all_preds_np) / (all_targets_np + 1e-8))) * 100
        
        # 9. 额外有用的指标
        # RMSE - 均方根误差
        rmse = np.sqrt(mse)
        
        # 中位数绝对误差
        median_ae = np.median(np.abs(all_targets_np - all_preds_np))
        
        # 预测值分布统计
        pred_mean = all_preds_np.mean()
        pred_std = all_preds_np.std()
        true_mean = all_targets_np.mean()
        true_std = all_targets_np.std()
        
        # 误差分布统计
        errors = all_preds_np - all_targets_np
        error_mean = errors.mean()
        error_std = errors.std()
        error_median = np.median(errors)
        
        # 分位数误差 (关注不同表达水平)
        q25_error = np.percentile(np.abs(errors), 25)
        q75_error = np.percentile(np.abs(errors), 75)
        
        # 高表达区域误差 (表达值 > 中位数)
        high_expr_mask = all_targets_np > np.median(all_targets_np)
        high_expr_mae = mean_absolute_error(all_targets_np[high_expr_mask], all_preds_np[high_expr_mask]) if np.sum(high_expr_mask) > 0 else float('nan')
        
        # 低表达区域误差 (表达值 <= 中位数)
        low_expr_mask = all_targets_np <= np.median(all_targets_np)
        low_expr_mae = mean_absolute_error(all_targets_np[low_expr_mask], all_preds_np[low_expr_mask]) if np.sum(low_expr_mask) > 0 else float('nan')
        
    except Exception as e:
        # 如果计算失败，设置默认值
        mae = spearman_rho = r2 = r = kendall_tau = float('nan')
        slope = intercept = mse = mape = rmse = median_ae = float('nan')
        pred_mean = pred_std = true_mean = true_std = float('nan')
        error_mean = error_std = error_median = q25_error = q75_error = float('nan')
        high_expr_mae = low_expr_mae = float('nan')
        spearman_p = kendall_p = p = float('nan')
        logger.warning(f"测试集指标计算失败: {e}")
    
    # 按重要性排序输出评估结果
    logger.info(f"测试集评估结果 (按重要性排序):")
    logger.info(f"  {'='*60}")
    
    # 1. 主要指标 (Top1-3)
    logger.info(f"  🏆 主要指标:")
    logger.info(f"    1. MAE (平均绝对误差) = {mae:.6f} ← 主要指标")
    logger.info(f"    2. Spearman ρ (排序相关性) = {spearman_rho:.6f} (p = {spearman_p:.2e})")
    logger.info(f"    3. R² (决定系数) = {r2:.6f}")
    
    # 2. 相关性指标
    logger.info(f"  📊 相关性指标:")
    logger.info(f"    4. Pearson r (线性相关性) = {r:.6f} (p = {p:.2e})")
    logger.info(f"    5. Kendall τ (排序一致性) = {kendall_tau:.6f} (p = {kendall_p:.2e})")
    
    # 3. 回归分析
    logger.info(f"  📈 回归分析:")
    logger.info(f"    6. 回归斜率 = {slope:.6f}")
    logger.info(f"    7. 回归截距 = {intercept:.6f}")
    
    # 4. 误差指标
    logger.info(f"  ⚠️  误差指标:")
    logger.info(f"    8. MSE (均方误差) = {mse:.6f}")
    logger.info(f"    9. RMSE (均方根误差) = {rmse:.6f}")
    logger.info(f"    10. MAPE (平均绝对百分比误差) = {mae:.2f}%")
    logger.info(f"    11. 中位数绝对误差 = {median_ae:.6f}")
    
    # 5. 分布统计
    logger.info(f"  📋 分布统计:")
    logger.info(f"    真实值: 均值={true_mean:.4f}, 标准差={true_std:.4f}, 范围=[{all_targets_np.min():.4f}, {all_targets_np.max():.4f}]")
    logger.info(f"    预测值: 均值={pred_mean:.4f}, 标准差={pred_std:.4f}, 范围=[{all_preds_np.min():.4f}, {all_preds_np.max():.4f}]")
    
    # 6. 误差分布
    logger.info(f"  🔍 误差分布:")
    logger.info(f"    误差均值={error_mean:.6f}, 误差标准差={error_std:.6f}, 误差中位数={error_median:.6f}")
    logger.info(f"    25%分位数误差={q25_error:.6f}, 75%分位数误差={q75_error:.6f}")
    
    # 7. 分区域误差
    logger.info(f"  🎯 分区域误差:")
    logger.info(f"    高表达区域MAE = {high_expr_mae:.6f}")
    logger.info(f"    低表达区域MAE = {low_expr_mae:.6f}")
    
    # 8. 样本信息
    logger.info(f"  📊 样本信息:")
    logger.info(f"    样本数: {len(all_targets):,}")
    logger.info(f"    损失: {avg_loss:.6f}")
    logger.info(f"  {'='*60}")
    
    # 保存预测结果 - 包含更多原始标记信息
    df_test = pd.DataFrame({
        'sample_idx': all_sample_indices,
        'peak_idx': all_peak_indices,
        'pred': all_preds, 
        'true': all_targets
    })
    
    # 添加清晰的ID标识符
    df_test['unique_id'] = df_test.apply(
        lambda row: f"sample_{row['sample_idx']:04d}_peak_{row['peak_idx']:06d}", 
        axis=1
    )
    
    # 添加窗口信息（从原始数据中提取）
    window_info_list = []
    for i, (sample_idx, peak_idx) in enumerate(zip(all_sample_indices, all_peak_indices)):
        # 找到对应的窗口信息
        window_found = False
        for window_info in test_dataset.window_indices:
            if (window_info['sample_idx'] == sample_idx and 
                window_info['start_idx'] <= peak_idx < window_info['end_idx']):
                
                # 计算peak在窗口中的相对位置
                peak_in_window_pos = peak_idx - window_info['start_idx']
                
                window_info_list.append({
                    'window_id': f"sample_{sample_idx:04d}_window_{window_info['start_idx']:06d}_{window_info['end_idx']:06d}",
                    'window_type': window_info['window_type'],
                    'window_start': window_info['start_idx'],
                    'window_end': window_info['end_idx'],
                    'peak_in_window_pos': peak_in_window_pos,
                    'window_actual_size': window_info['actual_size']
                })
                window_found = True
                break
        
        if not window_found:
            # 如果没有找到窗口信息，使用默认值
            window_info_list.append({
                'window_id': f"sample_{sample_idx:04d}_peak_{peak_idx:06d}",
                'window_type': 'unknown',
                'window_start': peak_idx,
                'window_end': peak_idx + 1,
                'peak_in_window_pos': 0,
                'window_actual_size': 1
            })
    
    # 将窗口信息添加到DataFrame
    for key in ['window_id', 'window_type', 'window_start', 'window_end', 'peak_in_window_pos', 'window_actual_size']:
        df_test[key] = [info[key] for info in window_info_list]
    
    # 重新排列列的顺序，让ID信息更清晰
    id_columns = ['unique_id', 'sample_idx', 'peak_idx', 'window_id', 'window_type', 
                  'window_start', 'window_end', 'peak_in_window_pos', 'window_actual_size']
    other_columns = [col for col in df_test.columns if col not in id_columns]
    df_test = df_test[id_columns + other_columns]
    
    # 添加原始特征列
    # 添加motif特征列（前283列）
    for i in range(MOTIF_DIM):
        df_test[f'motif_{i:03d}'] = [row[i] for row in all_original_features]
    
    # 添加accessibility特征列（第284列）
    df_test['accessibility'] = [row[MOTIF_DIM] for row in all_original_features]
    
    # 添加condition特征列（第285-344列）
    for i in range(CONDITION_DIM):
        df_test[f'condition_{i:02d}'] = [row[MOTIF_DIM + ACCESSIBILITY_DIM + i] for row in all_original_features]
    
    df_test.to_csv(OUTPUT_DIR / 'test_predictions.csv', index=False)
    logger.info(f"预测结果已保存到: {OUTPUT_DIR / 'test_predictions.csv'}")
    logger.info(f"CSV包含 {len(df_test.columns)} 列")
    logger.info(f"ID列: {', '.join(id_columns)}")
    logger.info(f"特征列: motif特征({MOTIF_DIM}列), accessibility特征({ACCESSIBILITY_DIM}列), condition特征({CONDITION_DIM}列)")
    
    # 添加滑动窗口信息到日志
    logger.info(f"滑动窗口配置: 窗口大小={SLIDING_WINDOW_CONFIG['window_size']}, "
                f"步长={SLIDING_WINDOW_CONFIG['stride']}, 最少有效peak={SLIDING_WINDOW_CONFIG['min_valid_peaks']}")
    logger.info(f"总滑动窗口数: {len(test_dataset)}")
    
    # 生成测试集散点图
    plt.figure(figsize=(12, 10))
    plt.scatter(all_targets, all_preds, alpha=0.3, s=2, c='blue', edgecolors='none')
    plt.plot([0, 13], [0, 13], 'r--', linewidth=2, label='y=x')
    
    if not np.isnan(slope):
        x_range = np.linspace(0, 13, 100)
        y_reg = slope * x_range + intercept
        plt.plot(x_range, y_reg, 'g-', linewidth=2, label=f'y={slope:.3f}x+{intercept:.3f}')
    
    plt.xlabel('True Expression (log1p)', fontsize=14)
    plt.ylabel('Predicted Expression (log1p)', fontsize=14)
    plt.title('Test Set - Model Evaluation', fontsize=16, fontweight='bold')
    plt.xlim(0, 13)
    plt.ylim(0, 13)
    plt.legend(loc='lower right', fontsize=12)
    plt.grid(True, alpha=0.3)
    
    # 添加统计信息
    p_value_text = f'P-value = {p:.2e}' if p is not None else 'P-value = N/A'
    test_stats_text = f'Test Results:\nMAE = {mae:.4f} (主要指标)\nSpearman ρ = {spearman_rho:.4f}\nR² = {r2:.4f}\nPearson r = {r:.4f}\n{p_value_text}\nSlope = {slope:.4f}\nIntercept = {intercept:.4f}\nMSE = {mse:.4f}\nN = {len(all_targets):,}'
    plt.text(0.02, 0.98, test_stats_text, transform=plt.gca().transAxes, 
            fontsize=12, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.9))
    
    plt.tight_layout()
    plt.savefig(OUTPUT_DIR / 'test_set_scatter.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    logger.info(f"测试完成！结果已保存到: {OUTPUT_DIR}")
    logger.info(f"测试结果: 损失={avg_loss:.6f}, MAE={mae:.6f} (主要指标), Spearman ρ={spearman_rho:.6f}, R²={r2:.6f}, 样本数={len(all_targets):,}")
    
    # 添加滑动窗口统计信息
    logger.info("=" * 60)
    logger.info("滑动窗口统计信息:")
    logger.info(f"  窗口大小: {SLIDING_WINDOW_CONFIG['window_size']}")
    logger.info(f"  滑动步长: {SLIDING_WINDOW_CONFIG['stride']}")
    logger.info(f"  最少有效peak: {SLIDING_WINDOW_CONFIG['min_valid_peaks']}")
    logger.info(f"  总滑动窗口数: {len(test_dataset)}")
    logger.info(f"  平均每个窗口有效peak数: {len(all_targets) / len(test_dataset):.1f}")
    logger.info(f"  总有效peak数: {len(all_targets):,}")
    
    # 统计不同类型窗口的数量
    window_types = {}
    window_sizes = {}
    for window_info in test_dataset.window_indices:
        window_type = window_info['window_type']
        actual_size = window_info['actual_size']
        
        window_types[window_type] = window_types.get(window_type, 0) + 1
        window_sizes[actual_size] = window_sizes.get(actual_size, 0) + 1
    
    logger.info("  窗口类型分布:")
    for window_type, count in window_types.items():
        logger.info(f"    {window_type}: {count} 个")
    
    logger.info("  窗口大小分布:")
    for size, count in sorted(window_sizes.items()):
        logger.info(f"    {size} peaks: {count} 个窗口")
    
    logger.info("=" * 60)

if __name__ == "__main__":
    main()
