import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
from get_model.model.yeast_model import YeastModel
import logging
from pathlib import Path
import os
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr, linregress
from sklearn.metrics import mean_squared_error
from tqdm import tqdm
import warnings
import datetime

warnings.filterwarnings("ignore")

# 配置
CHECKPOINT_PATH = "/root/autodl-tmp/GetForYeast/output/single_atac_20250722_003254/best_model.pth"
TEST_DATA_PATH = "/root/autodl-tmp/GetForYeast/4numpy/plasmid_3d_matrix_improved.npy"

# 数据维度配置
MOTIF_DIM = 283
ACCESSIBILITY_DIM = 1
CONDITION_DIM = 60
TOTAL_FEATURES = MOTIF_DIM + ACCESSIBILITY_DIM + CONDITION_DIM

# 模型配置
MODEL_CONFIG = {
    'region_embed': {
        'num_features': TOTAL_FEATURES,  # motif(283) + accessibility(1) + condition(60) = 344
        'embed_dim': 768
    },
    'encoder': {
        'embed_dim': 768,
        'num_heads': 12,
        'num_layers': 12,
        'dropout': 0.1
    },
    'head_exp': {
        'embed_dim': 768,
        'output_dim': 1
    }
}

# 生成带时间戳的输出目录
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
OUTPUT_DIR = Path(f"/root/autodl-tmp/GetForYeast/test_results_single_{timestamp}")
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)

class YeastPeakDataset(Dataset):
    def __init__(self, data_path: str):
        self.data = np.load(data_path, mmap_mode='r')
        logging.info(f"加载测试数据: {data_path}")
        
        num_samples, num_peaks, num_features = self.data.shape
        self.motif_dim = MOTIF_DIM
        self.accessibility_dim = ACCESSIBILITY_DIM
        self.condition_dim = CONDITION_DIM
        
        # 过滤NaN标签
        labels = self.data[:, :, -1]
        valid_mask = ~(np.isnan(labels) | np.isinf(labels))
        valid_indices = np.where(valid_mask)
        self.valid_indices = list(zip(valid_indices[0], valid_indices[1]))
        self.total = len(self.valid_indices)
        
        logging.info(f"测试数据集: 样本={num_samples}, peaks/样本={num_peaks}, "
                    f"特征数={num_features}, 有效样本={self.total}")

    def __len__(self):
        return self.total

    def __getitem__(self, idx: int):
        sample_idx, peak_idx = self.valid_indices[idx]
        row = self.data[sample_idx, peak_idx]
        
        # 提取特征
        motif_feat = torch.tensor(row[:self.motif_dim], dtype=torch.float32).unsqueeze(0)
        accessibility_feat = torch.tensor(row[self.motif_dim:self.motif_dim+self.accessibility_dim], dtype=torch.float32).unsqueeze(0)
        condition_feat = torch.tensor(row[self.motif_dim+self.accessibility_dim:self.motif_dim+self.accessibility_dim+self.condition_dim], dtype=torch.float32).unsqueeze(0)
        
        # 拼接所有特征
        all_features = torch.cat([motif_feat, accessibility_feat, condition_feat], dim=-1)
        
        # 标签是最后一维
        label = torch.tensor(row[-1], dtype=torch.float32).unsqueeze(0)
        
        # 返回更多信息用于CSV输出
        return {
            'motif_features': all_features,
            'sample_idx': sample_idx,
            'peak_idx': peak_idx,
            'original_row': row
        }, label

def main():
    # 设置日志
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(message)s",
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(OUTPUT_DIR / 'test.log', mode='w', encoding='utf-8')
        ]
    )
    logger = logging.getLogger(__name__)
    
    logger.info("开始模型测试...")
    logger.info(f"模型路径: {CHECKPOINT_PATH}")
    logger.info(f"测试数据: {TEST_DATA_PATH}")
    
    # 检查文件是否存在
    if not os.path.exists(CHECKPOINT_PATH):
        raise FileNotFoundError(f"模型文件不存在: {CHECKPOINT_PATH}")
    if not os.path.exists(TEST_DATA_PATH):
        raise FileNotFoundError(f"测试数据文件不存在: {TEST_DATA_PATH}")
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    
    # 加载测试数据
    logger.info("加载测试数据...")
    test_dataset = YeastPeakDataset(TEST_DATA_PATH)
    test_loader = DataLoader(
        test_dataset,
        batch_size=512,
        shuffle=False,
        num_workers=4,
        pin_memory=True
    )
    
    # 加载模型
    logger.info("加载模型...")
    try:
        checkpoint = torch.load(CHECKPOINT_PATH, map_location=device, weights_only=False)
        logger.info("成功加载checkpoint")
        
        # 初始化模型
        model = YeastModel(
            cfg=MODEL_CONFIG,
            use_lora=False,
            lora_rank=16,
            lora_alpha=32,
            lora_layers=4,
        )
        
        # 加载模型权重
        model.load_state_dict(checkpoint['model_state_dict'])
        logger.info("成功加载模型权重")
        
    except Exception as e:
        logger.error(f"加载模型失败: {e}")
        raise
    
    # 多GPU支持
    if torch.cuda.device_count() > 1:
        logger.info(f"使用 {torch.cuda.device_count()} 个GPU")
        model = nn.DataParallel(model)
    model = model.to(device)
    
    # 确保模型在评估模式
    model.eval()
    
    # 进行测试
    logger.info("开始测试...")
    total_loss = 0
    all_preds = []
    all_targets = []
    all_sample_indices = []
    all_peak_indices = []
    all_original_features = []
    
    with torch.no_grad():
        for batch_x, batch_y in tqdm(test_loader, desc='评估测试集'):
            # 保存原始信息（在移动到GPU之前）
            batch_sample_indices = batch_x['sample_idx'].cpu().numpy().flatten()
            batch_peak_indices = batch_x['peak_idx'].cpu().numpy().flatten()
            batch_original_rows = batch_x['original_row'].cpu().numpy()
            
            # 移动特征到GPU进行推理
            features_for_model = {'motif_features': batch_x['motif_features'].to(device)}
            batch_y = batch_y.to(device)
            
            outputs = model(features_for_model)
            loss = model.compute_loss(outputs, batch_y)
            
            total_loss += loss.item()
            
            pred = outputs.detach().cpu().numpy().flatten()
            target = batch_y.detach().cpu().numpy().flatten()
            
            all_preds.extend(pred.tolist())
            all_targets.extend(target.tolist())
            all_sample_indices.extend(batch_sample_indices.tolist())
            all_peak_indices.extend(batch_peak_indices.tolist())
            all_original_features.extend(batch_original_rows.tolist())
    
    avg_loss = total_loss / len(test_loader)
    
    # 计算统计指标
    all_preds_np = np.array(all_preds)
    all_targets_np = np.array(all_targets)
    
    try:
        r, p = pearsonr(all_targets_np, all_preds_np)
        reg = linregress(all_targets_np, all_preds_np)
        slope = reg.slope
        intercept = reg.intercept
        mse = mean_squared_error(all_targets_np, all_preds_np)
    except Exception as e:
        r, p, slope, intercept, mse = float('nan'), float('nan'), float('nan'), float('nan'), float('nan')
        logger.warning(f"相关性或回归计算失败: {e}")
    
    logger.info(f"测试集评估结果: Loss={avg_loss:.4f}, Pearson r={r:.4f}, "
                f"Slope={slope:.4f}, Intercept={intercept:.4f}, MSE={mse:.4f}, N={len(all_targets):,}")
    
    # 保存预测结果 - 包含更多原始标记信息
    df_test = pd.DataFrame({
        'sample_idx': all_sample_indices,
        'peak_idx': all_peak_indices,
        'pred': all_preds, 
        'true': all_targets
    })
    
    # 添加原始特征列
    # 添加motif特征列（前283列）
    for i in range(MOTIF_DIM):
        df_test[f'motif_{i:03d}'] = [row[i] for row in all_original_features]
    
    # 添加accessibility特征列（第284列）
    df_test['accessibility'] = [row[MOTIF_DIM] for row in all_original_features]
    
    # 添加condition特征列（第285-344列）
    for i in range(CONDITION_DIM):
        df_test[f'condition_{i:02d}'] = [row[MOTIF_DIM + ACCESSIBILITY_DIM + i] for row in all_original_features]
    
    df_test.to_csv(OUTPUT_DIR / 'test_predictions.csv', index=False)
    logger.info(f"预测结果已保存到: {OUTPUT_DIR / 'test_predictions.csv'}")
    logger.info(f"CSV包含 {len(df_test.columns)} 列: 样本索引、peak索引、预测值、真实值、motif特征({MOTIF_DIM}列)、accessibility特征({ACCESSIBILITY_DIM}列)、condition特征({CONDITION_DIM}列)")
    
    # 生成测试集散点图
    plt.figure(figsize=(12, 10))
    plt.scatter(all_targets, all_preds, alpha=0.3, s=2, c='blue', edgecolors='none')
    plt.plot([0, 13], [0, 13], 'r--', linewidth=2, label='y=x')
    
    if not np.isnan(slope):
        x_range = np.linspace(0, 13, 100)
        y_reg = slope * x_range + intercept
        plt.plot(x_range, y_reg, 'g-', linewidth=2, label=f'y={slope:.3f}x+{intercept:.3f}')
    
    plt.xlabel('True Expression (log1p)', fontsize=14)
    plt.ylabel('Predicted Expression (log1p)', fontsize=14)
    plt.title('Test Set - Model Evaluation', fontsize=16, fontweight='bold')
    plt.xlim(0, 13)
    plt.ylim(0, 13)
    plt.legend(loc='lower right', fontsize=12)
    plt.grid(True, alpha=0.3)
    
    # 添加统计信息
    p_value_text = f'P-value = {p:.2e}' if p is not None else 'P-value = N/A'
    test_stats_text = f'Test Results:\nPearson r = {r:.4f}\n{p_value_text}\nSlope = {slope:.4f}\nIntercept = {intercept:.4f}\nMSE = {mse:.4f}\nN = {len(all_targets):,}'
    plt.text(0.02, 0.98, test_stats_text, transform=plt.gca().transAxes, 
            fontsize=12, verticalalignment='top', 
            bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.9))
    
    plt.tight_layout()
    plt.savefig(OUTPUT_DIR / 'test_set_scatter.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    logger.info(f"测试完成！结果已保存到: {OUTPUT_DIR}")
    logger.info(f"测试结果: 损失={avg_loss:.6f}, Pearson r={r:.6f}, 样本数={len(all_targets):,}")

if __name__ == "__main__":
    main()
