import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
from get_model.model.yeast_model import YeastModel
import logging
from pathlib import Path
import os
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr, linregress
from sklearn.metrics import mean_squared_error
from tqdm import tqdm
import warnings
import datetime

warnings.filterwarnings("ignore")

# 配置
CHECKPOINT_PATH = "/root/autodl-tmp/GetForYeast/output/single_atac_20250722_003254/best_model.pth"

# OE数据集路径
OE_DATA_PATHS = {
    "ste12_plasmid_OE": "/root/autodl-tmp/GetForYeast/input/OE/ste12_plasmid_expression_promoter2_copy5.npy"
}

# 生成带时间戳的输出目录
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
OUTPUT_DIR = Path(f"/root/autodl-tmp/GetForYeast/test_results_oe_{timestamp}")
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)

class YeastPeakDataset(Dataset):
    def __init__(self, data_path: str):
        self.data = np.load(data_path, mmap_mode='r')
        logging.info(f"加载测试数据: {data_path}")
        
        num_samples, num_peaks, num_features = self.data.shape
        self.motif_dim = 283
        self.accessibility_dim = 1
        self.condition_dim = 60
        
        # 过滤NaN标签
        labels = self.data[:, :, -1]
        valid_mask = ~(np.isnan(labels) | np.isinf(labels))
        valid_indices = np.where(valid_mask)
        self.valid_indices = list(zip(valid_indices[0], valid_indices[1]))
        self.total = len(self.valid_indices)
        
        logging.info(f"测试数据集: 样本={num_samples}, peaks/样本={num_peaks}, "
                    f"特征数={num_features}, 有效样本={self.total}")

    def __len__(self):
        return self.total

    def __getitem__(self, idx: int):
        sample_idx, peak_idx = self.valid_indices[idx]
        row = self.data[sample_idx, peak_idx]
        
        # 提取特征
        motif_feat = torch.tensor(row[:self.motif_dim], dtype=torch.float32).unsqueeze(0)
        accessibility_feat = torch.tensor(row[self.motif_dim:self.motif_dim+self.accessibility_dim], dtype=torch.float32).unsqueeze(0)
        condition_feat = torch.tensor(row[self.motif_dim+self.accessibility_dim:self.motif_dim+self.accessibility_dim+self.condition_dim], dtype=torch.float32).unsqueeze(0)
        
        # 拼接所有特征
        all_features = torch.cat([motif_feat, accessibility_feat, condition_feat], dim=-1)
        
        # 标签是最后一维
        label = torch.tensor(row[-1], dtype=torch.float32).unsqueeze(0)
        
        # 返回peak索引信息
        return {'motif_features': all_features, 'sample_idx': sample_idx, 'peak_idx': peak_idx}, label

def test_single_dataset(model, device, data_path, dataset_name, logger):
    """测试单个数据集"""
    logger.info(f"加载测试数据: {data_path}")
    test_dataset = YeastPeakDataset(data_path)
    test_loader = DataLoader(
        test_dataset,
        batch_size=512,
        shuffle=False,
        num_workers=4,
        pin_memory=True
    )
    
    # 进行测试
    logger.info("开始测试...")
    total_loss = 0
    all_preds = []
    all_targets = []
    all_sample_indices = []
    all_peak_indices = []
    
    with torch.no_grad():
        for batch_x, batch_y in tqdm(test_loader, desc=f'评估{dataset_name}'):
            batch_x = {k: v.to(device) for k, v in batch_x.items()}
            batch_y = batch_y.to(device)
            
            outputs = model(batch_x)
            loss = model.compute_loss(outputs, batch_y)
            
            total_loss += loss.item()
            
            pred = outputs.detach().cpu().numpy().flatten()
            target = batch_y.detach().cpu().numpy().flatten()
            all_preds.extend(pred.tolist())
            all_targets.extend(target.tolist())
            
            # 收集peak索引信息
            sample_indices = batch_x['sample_idx'].detach().cpu().numpy().flatten()
            peak_indices = batch_x['peak_idx'].detach().cpu().numpy().flatten()
            all_sample_indices.extend(sample_indices.tolist())
            all_peak_indices.extend(peak_indices.tolist())
    
    avg_loss = total_loss / len(test_loader)
    
    # 计算统计指标
    all_preds_np = np.array(all_preds)
    all_targets_np = np.array(all_targets)
    
    try:
        r, p = pearsonr(all_targets_np, all_preds_np)
        reg = linregress(all_targets_np, all_preds_np)
        slope = reg.slope
        intercept = reg.intercept
        mse = mean_squared_error(all_targets_np, all_preds_np)
    except Exception as e:
        r, p, slope, intercept, mse = float('nan'), float('nan'), float('nan'), float('nan'), float('nan')
        logger.warning(f"相关性或回归计算失败: {e}")
    
    logger.info(f"{dataset_name} 测试结果: Loss={avg_loss:.4f}, Pearson r={r:.4f}, "
                f"Slope={slope:.4f}, Intercept={intercept:.4f}, MSE={mse:.4f}, N={len(all_targets):,}")
    
    # 输出peak索引范围信息
    unique_samples = len(set(all_sample_indices))
    unique_peaks = len(set(all_peak_indices))
    logger.info(f"{dataset_name} 数据统计: 唯一样本数={unique_samples}, 唯一peak数={unique_peaks}")
    logger.info(f"样本索引范围: {min(all_sample_indices)} - {max(all_sample_indices)}")
    logger.info(f"Peak索引范围: {min(all_peak_indices)} - {max(all_peak_indices)}")
    
    # 保存预测结果
    df_test = pd.DataFrame({
        'sample_idx': all_sample_indices,
        'peak_idx': all_peak_indices,
        'pred': all_preds, 
        'true': all_targets
    })
    df_test.to_csv(OUTPUT_DIR / f'{dataset_name}_predictions.csv', index=False)
    
    # 生成测试集散点图
    plt.figure(figsize=(12, 10))
    plt.scatter(all_targets, all_preds, alpha=0.6, s=50, c='blue', edgecolors='black', linewidth=0.5)
    plt.plot([0, 13], [0, 13], 'r--', linewidth=2, label='y=x')
    
    if not np.isnan(slope):
        x_range = np.linspace(0, 13, 100)
        y_reg = slope * x_range + intercept
        plt.plot(x_range, y_reg, 'g-', linewidth=2, label=f'y={slope:.3f}x+{intercept:.3f}')
    
    plt.xlabel('True Expression (log1p)', fontsize=14)
    plt.ylabel('Predicted Expression (log1p)', fontsize=14)
    plt.title(f'{dataset_name} - Model Evaluation', fontsize=16, fontweight='bold')
    plt.xlim(0, 13)
    plt.ylim(0, 13)
    plt.legend(loc='lower right', fontsize=12)
    plt.grid(True, alpha=0.3)
    
    # 添加统计信息
    p_value_text = f'P-value = {p:.2e}' if p is not None else 'P-value = N/A'
    test_stats_text = f'Test Results:\nPearson r = {r:.4f}\n{p_value_text}\nSlope = {slope:.4f}\nIntercept = {intercept:.4f}\nMSE = {mse:.4f}\nN = {len(all_targets):,}'
    plt.text(0.02, 0.98, test_stats_text, transform=plt.gca().transAxes, 
            fontsize=12, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.9))
    
    plt.tight_layout()
    plt.savefig(OUTPUT_DIR / f'{dataset_name}_scatter.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 返回结果字典
    return {
        'dataset_name': dataset_name,
        'loss': avg_loss,
        'pearson_r': r,
        'p_value': p,
        'slope': slope,
        'intercept': intercept,
        'mse': mse,
        'sample_count': len(all_targets),
        'unique_samples': unique_samples,
        'unique_peaks': unique_peaks
    }

def generate_summary_report(all_results, logger):
    """生成所有数据集的汇总报告"""
    logger.info(f"\n{'='*60}")
    logger.info("生成汇总报告...")
    
    if not all_results:
        logger.warning("没有成功测试的数据集")
        return
    
    # 创建汇总表格
    summary_data = []
    for dataset_name, result in all_results.items():
        summary_data.append({
            'Dataset': dataset_name,
            'Loss': f"{result['loss']:.6f}",
            'Pearson r': f"{result['pearson_r']:.6f}",
            'P-value': f"{result['p_value']:.2e}" if result['p_value'] is not None else 'N/A',
            'Slope': f"{result['slope']:.6f}" if not np.isnan(result['slope']) else 'N/A',
            'Intercept': f"{result['intercept']:.6f}" if not np.isnan(result['intercept']) else 'N/A',
            'MSE': f"{result['mse']:.6f}" if not np.isnan(result['mse']) else 'N/A',
            'Sample Count': f"{result['sample_count']:,}",
            'Unique Samples': f"{result['unique_samples']:,}",
            'Unique Peaks': f"{result['unique_peaks']:,}"
        })
    
    # 保存汇总表格
    df_summary = pd.DataFrame(summary_data)
    df_summary.to_csv(OUTPUT_DIR / 'summary_report.csv', index=False)
    
    # 打印汇总表格
    logger.info("\n数据集测试结果汇总:")
    logger.info("=" * 120)
    logger.info(f"{'Dataset':<12} {'Loss':<10} {'Pearson r':<10} {'P-value':<10} {'Slope':<10} {'MSE':<10} {'Samples':<10}")
    logger.info("=" * 120)
    
    for row in summary_data:
        logger.info(f"{row['Dataset']:<12} {row['Loss']:<10} {row['Pearson r']:<10} {row['P-value']:<10} "
                   f"{row['Slope']:<10} {row['MSE']:<10} {row['Sample Count']:<10}")
    
    # 生成汇总可视化
    generate_summary_plots(all_results, logger)
    
    logger.info(f"\n汇总报告已保存到: {OUTPUT_DIR}")

def generate_summary_plots(all_results, logger):
    """生成汇总可视化图表"""
    logger.info("生成汇总可视化图表...")
    
    # 1. 性能对比柱状图
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16, 12))
    
    dataset_names = list(all_results.keys())
    losses = [all_results[name]['loss'] for name in dataset_names]
    pearson_rs = [all_results[name]['pearson_r'] for name in dataset_names]
    mses = [all_results[name]['mse'] for name in dataset_names]
    sample_counts = [all_results[name]['sample_count'] for name in dataset_names]
    
    # Loss对比
    ax1.bar(dataset_names, losses, color='skyblue', alpha=0.7)
    ax1.set_title('Loss Comparison Across Datasets', fontsize=14, fontweight='bold')
    ax1.set_ylabel('Loss')
    ax1.tick_params(axis='x', rotation=45)
    
    # Pearson r对比
    ax2.bar(dataset_names, pearson_rs, color='lightgreen', alpha=0.7)
    ax2.set_title('Pearson Correlation Comparison', fontsize=14, fontweight='bold')
    ax2.set_ylabel('Pearson r')
    ax2.tick_params(axis='x', rotation=45)
    
    # MSE对比
    ax3.bar(dataset_names, mses, color='lightcoral', alpha=0.7)
    ax3.set_title('MSE Comparison Across Datasets', fontsize=14, fontweight='bold')
    ax3.set_ylabel('MSE')
    ax3.tick_params(axis='x', rotation=45)
    
    # 样本数量对比
    ax4.bar(dataset_names, sample_counts, color='gold', alpha=0.7)
    ax4.set_title('Sample Count Comparison', fontsize=14, fontweight='bold')
    ax4.set_ylabel('Sample Count')
    ax4.tick_params(axis='x', rotation=45)
    
    plt.tight_layout()
    plt.savefig(OUTPUT_DIR / 'summary_comparison.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 2. 相关性热力图
    metrics = ['loss', 'pearson_r', 'mse', 'sample_count']
    metric_names = ['Loss', 'Pearson r', 'MSE', 'Sample Count']
    
    correlation_matrix = np.zeros((len(metrics), len(dataset_names)))
    for i, metric in enumerate(metrics):
        for j, dataset_name in enumerate(dataset_names):
            correlation_matrix[i, j] = all_results[dataset_name][metric]
    
    plt.figure(figsize=(10, 6))
    im = plt.imshow(correlation_matrix, cmap='RdYlBu_r', aspect='auto')
    plt.colorbar(im)
    
    plt.xticks(range(len(dataset_names)), dataset_names, rotation=45)
    plt.yticks(range(len(metrics)), metric_names)
    plt.title('Performance Metrics Heatmap Across Datasets', fontsize=14, fontweight='bold')
    
    # 添加数值标签
    for i in range(len(metrics)):
        for j in range(len(dataset_names)):
            value = correlation_matrix[i, j]
            if not np.isnan(value):
                plt.text(j, i, f'{value:.3f}', ha='center', va='center', fontsize=10)
    
    plt.tight_layout()
    plt.savefig(OUTPUT_DIR / 'performance_heatmap.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    logger.info("汇总可视化图表生成完成")

def main():
    # 设置日志
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(message)s",
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(OUTPUT_DIR / 'test.log', mode='w', encoding='utf-8')
        ]
    )
    logger = logging.getLogger(__name__)
    
    logger.info("开始OE数据集模型测试...")
    logger.info(f"模型路径: {CHECKPOINT_PATH}")
    logger.info(f"待测试数据集数量: {len(OE_DATA_PATHS)}")
    logger.info("包含数据集: OE (ste12_plasmid_expression_promoter1)")
    
    # 检查模型文件是否存在
    if not os.path.exists(CHECKPOINT_PATH):
        raise FileNotFoundError(f"模型文件不存在: {CHECKPOINT_PATH}")
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    
    # 加载模型
    logger.info("加载模型...")
    try:
        checkpoint = torch.load(CHECKPOINT_PATH, map_location=device, weights_only=False)
        logger.info("成功加载checkpoint")
        
        # 模型配置 - 使用正确的嵌套结构
        model_cfg = {
            'region_embed': {
                'num_features': 344,  # motif(283) + accessibility(1) + condition(60) = 344
                'embed_dim': 768
            },
            'encoder': {
                'embed_dim': 768,
                'num_heads': 12,
                'num_layers': 12,
                'dropout': 0.1
            },
            'head_exp': {
                'embed_dim': 768,
                'output_dim': 1
            }
        }
        
        # 初始化模型
        model = YeastModel(
            cfg=model_cfg,
            use_lora=False,
            lora_rank=16,
            lora_alpha=32,
            lora_layers=4,
        )
        
        # 加载模型权重
        model.load_state_dict(checkpoint['model_state_dict'])
        logger.info("成功加载模型权重")
        
    except Exception as e:
        logger.error(f"加载模型失败: {e}")
        raise
    
    # 多GPU支持
    if torch.cuda.device_count() > 1:
        logger.info(f"使用 {torch.cuda.device_count()} 个GPU")
        model = nn.DataParallel(model)
    model = model.to(device)
    
    # 确保模型在评估模式
    model.eval()
    
    # 存储所有数据集的汇总结果
    all_results = {}
    
    # 循环测试每个OE数据集
    for dataset_name, data_path in OE_DATA_PATHS.items():
        logger.info(f"\n{'='*60}")
        logger.info(f"开始测试数据集: {dataset_name}")
        logger.info(f"数据路径: {data_path}")
        
        # 检查数据文件是否存在
        if not os.path.exists(data_path):
            logger.error(f"数据文件不存在: {data_path}")
            continue
        
        try:
            # 测试当前数据集
            result = test_single_dataset(model, device, data_path, dataset_name, logger)
            all_results[dataset_name] = result
            
        except Exception as e:
            logger.error(f"测试数据集 {dataset_name} 时发生错误: {e}")
            continue
    
    # 生成汇总报告
    generate_summary_report(all_results, logger)
    
    logger.info(f"\n所有测试完成！结果已保存到: {OUTPUT_DIR}")

if __name__ == "__main__":
    main()
