import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
from get_model.model.yeast_model import YeastModel
import logging
from pathlib import Path
import os
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr, linregress, spearmanr
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from tqdm import tqdm
import warnings
import argparse
import datetime

warnings.filterwarnings("ignore")


class SinglePeakDataset(Dataset):
    """
    单峰数据集（window_size=1）。
    适用于非连续位置的一组peak（同一位置的不同改造版本），每个样本仅包含一个peak作为序列长度1的输入。

    期望输入.npy形状: [num_samples, num_peaks, num_features]
    最后一维为标签（真实表达），前面的维度为特征：motif(283) + accessibility(1) + condition(60) = 344
    """

    def __init__(self, data_path: str, motif_dim: int = 283, accessibility_dim: int = 1, condition_dim: int = 60,
                 label_last: bool = True, force_input_dim: int = None, slice_strategy: str = 'head'):
        self.peak_ids = None
        self.data_path = data_path
        # 支持 .npz/.npy，两者与滑窗脚本保持一致
        if data_path.endswith('.npz'):
            npz_file = np.load(data_path, mmap_mode='r')
            if 'data' not in npz_file:
                raise ValueError(".npz 文件缺少 'data' 键")
            self.data = npz_file['data']
            # 需要peak_ids以便溯源
            if 'peak_ids' not in npz_file:
                raise ValueError(".npz 文件缺少 'peak_ids' 键，无法在输出中包含peak_id用于溯源")
            self.peak_ids = npz_file['peak_ids']
        else:
            self.data = np.load(data_path, mmap_mode='r')
        logging.info(f"加载单峰测试数据: {data_path}")

        num_samples, num_peaks, num_features = self.data.shape
        # 推断特征与标签维度
        self.label_last = label_last
        if self.label_last:
            self.feature_dim = num_features - 1
        else:
            self.feature_dim = num_features

        # 兼容旧模型输入维度（例如344），可强制裁剪/截取到指定维度
        self.force_input_dim = force_input_dim
        self.slice_strategy = slice_strategy  # 'head' 或 'tail'

        # 校验传入维度与数据是否一致，不一致则回退为“全部作为单块特征”
        requested_sum = motif_dim + accessibility_dim + condition_dim
        if requested_sum != self.feature_dim:
            logging.warning(
                f"传入特征切分(#{requested_sum})与数据特征维度(#{self.feature_dim})不一致，"
                f"将使用自动模式：将全部 {self.feature_dim} 维作为单块特征(motif)，access/condition置0。"
            )
            self.motif_dim = self.feature_dim
            self.accessibility_dim = 0
            self.condition_dim = 0
        else:
            self.motif_dim = motif_dim
            self.accessibility_dim = accessibility_dim
            self.condition_dim = condition_dim

        # 过滤NaN/Inf标签
        labels = self.data[:, :, -1] if self.label_last else np.full((num_samples, num_peaks), np.nan)
        valid_mask = ~(np.isnan(labels) | np.isinf(labels))
        valid_indices = np.where(valid_mask)
        self.valid_indices = list(zip(valid_indices[0], valid_indices[1]))
        self.total = len(self.valid_indices)

        logging.info(
            f"单峰数据集: 样本={num_samples}, peaks/样本={num_peaks}, 总列数={num_features}, "
            f"特征维度(不含标签)={self.feature_dim}, 有效条目={self.total}"
        )

    def __len__(self):
        return self.total

    def __getitem__(self, idx: int):
        sample_idx, peak_idx = self.valid_indices[idx]
        row = self.data[sample_idx, peak_idx]

        # 特征切片（不含标签）
        feat_slice = row[:self.feature_dim] if self.label_last else row[:self.feature_dim]
        # 如需兼容旧模型输入维度，进行裁剪
        if self.force_input_dim is not None and self.feature_dim != self.force_input_dim:
            if self.feature_dim < self.force_input_dim:
                # 不支持特征不足时的填充，这会影响语义
                raise ValueError(
                    f"特征维度{self.feature_dim}小于要求的force_input_dim={self.force_input_dim}，无法兼容"
                )
            if self.slice_strategy == 'tail':
                feat_slice = feat_slice[-self.force_input_dim:]
            else:
                feat_slice = feat_slice[:self.force_input_dim]
            # 裁剪后将三块划分退化为单块（保持语义一致性）
            self.motif_dim = len(feat_slice)
            self.accessibility_dim = 0
            self.condition_dim = 0
        offset = 0
        # motif块
        motif_part = feat_slice[offset:offset + self.motif_dim] if self.motif_dim > 0 else np.empty((0,), dtype=np.float32)
        offset += self.motif_dim
        # accessibility块
        access_part = feat_slice[offset:offset + self.accessibility_dim] if self.accessibility_dim > 0 else np.empty((0,), dtype=np.float32)
        offset += self.accessibility_dim
        # condition块
        cond_part = feat_slice[offset:offset + self.condition_dim] if self.condition_dim > 0 else np.empty((0,), dtype=np.float32)

        motif_feat = torch.tensor(motif_part, dtype=torch.float32).unsqueeze(0)
        accessibility_feat = torch.tensor(access_part, dtype=torch.float32).unsqueeze(0)
        condition_feat = torch.tensor(cond_part, dtype=torch.float32).unsqueeze(0)

        # 拼接为单峰序列（window_size=1）
        all_features = torch.cat([motif_feat, accessibility_feat, condition_feat], dim=-1)  # [1, 344]

        # 标签：默认最后一列为标签
        label_val = row[-1] if self.label_last else np.nan
        label = torch.tensor(label_val, dtype=torch.float32).unsqueeze(0)

        item = {
            'motif_features': all_features,  # 序列长度=1
            'sample_idx': sample_idx,
            'peak_idx': peak_idx,
        }
        # 若提供了peak_ids，附加上
        # 必须包含peak_id，确保可溯源
        try:
            item['peak_id'] = str(self.peak_ids[peak_idx])
        except Exception:
            item['peak_id'] = ''
        return item, label


def evaluate_single_peaks(model, data_loader, device, output_dir, logger):
    model.eval()
    total_loss = 0.0
    all_preds, all_targets = [], []
    all_sample_indices, all_peak_indices, all_peak_ids = [], [], []

    with torch.no_grad():
        for batch_x, batch_y in tqdm(data_loader, desc='评估单峰(WS=1)'):
            # 组装输入：将[batch, 1, 344]喂入模型
            features_for_model = {'motif_features': batch_x['motif_features'].to(device)}
            batch_y = batch_y.to(device)

            outputs = model(features_for_model)  # 预期输出形状: [batch, 1, 1] 或 [batch, 1]
            loss = model.compute_loss(outputs, batch_y)
            total_loss += loss.item()

            # 统一拉平成向量
            pred = outputs.detach().cpu().numpy().reshape(-1)
            target = batch_y.detach().cpu().numpy().reshape(-1)
            all_preds.extend(pred.tolist())
            all_targets.extend(target.tolist())

            all_sample_indices.extend(batch_x['sample_idx'].detach().cpu().numpy().tolist())
            all_peak_indices.extend(batch_x['peak_idx'].detach().cpu().numpy().tolist())
            # 可选peak_id
            if 'peak_id' in batch_x:
                # batch_x['peak_id'] 可能是列表[str] 或 numpy，直接转str
                pid_list = batch_x['peak_id']
                if isinstance(pid_list, (list, tuple)):
                    all_peak_ids.extend([str(x) for x in pid_list])
                else:
                    try:
                        all_peak_ids.extend([str(x) for x in pid_list])
                    except Exception:
                        all_peak_ids.extend([''] * len(pred))

    avg_loss = total_loss / max(1, len(data_loader))

    # 指标
    preds_np = np.array(all_preds)
    trues_np = np.array(all_targets)
    mae = mean_absolute_error(trues_np, preds_np) if len(trues_np) > 0 else float('nan')
    mse = mean_squared_error(trues_np, preds_np) if len(trues_np) > 0 else float('nan')
    rmse = float(np.sqrt(mse)) if np.isfinite(mse) else float('nan')
    r2 = r2_score(trues_np, preds_np) if len(trues_np) > 1 else float('nan')
    try:
        pearson_r, pearson_p = pearsonr(trues_np, preds_np)
    except Exception:
        pearson_r, pearson_p = float('nan'), float('nan')
    try:
        spearman_rho, spearman_p = spearmanr(trues_np, preds_np)
    except Exception:
        spearman_rho, spearman_p = float('nan'), float('nan')
    try:
        reg = linregress(trues_np, preds_np)
        slope, intercept = reg.slope, reg.intercept
    except Exception:
        slope, intercept = float('nan'), float('nan')

    logger.info(
        f"单峰(WS=1)评估: Loss={avg_loss:.6f}, MAE={mae:.6f}, Spearman={spearman_rho:.6f}, "
        f"Pearson r={pearson_r:.6f}, R²={r2:.6f}, RMSE={rmse:.6f}, N={len(trues_np):,}"
    )

    # 保存结果
    df_dict = {
        'sample_idx': all_sample_indices,
        'peak_idx': all_peak_indices,
        'prediction': all_preds,
        'target': all_targets,
        'error': (preds_np - trues_np).tolist(),
    }
    # peak_id为必需列
    df_dict = {'peak_id': all_peak_ids, **df_dict}
    df = pd.DataFrame(df_dict)
    df.to_csv(output_dir / 'single_peak_predictions.csv', index=False)
    logger.info(f"预测结果已保存: {output_dir / 'single_peak_predictions.csv'}")

    # 简单散点图
    plt.figure(figsize=(10, 8))
    plt.scatter(trues_np, preds_np, alpha=0.3, s=6, c='tab:blue', edgecolors='none')
    plt.plot([trues_np.min(), trues_np.max()], [trues_np.min(), trues_np.max()], 'r--', label='y=x')
    plt.xlabel('True Expression')
    plt.ylabel('Predicted Expression')
    plt.title('Single-peak Evaluation (window_size=1)')
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plt.savefig(output_dir / 'single_peak_scatter.png', dpi=200, bbox_inches='tight')
    plt.close()

    # 保存指标
    metrics = {
        'loss': avg_loss,
        'mae': mae,
        'mse': mse,
        'rmse': rmse,
        'r2': r2,
        'pearson_r': pearson_r,
        'pearson_p': pearson_p,
        'spearman_rho': spearman_rho,
        'spearman_p': spearman_p,
        'slope': slope,
        'intercept': intercept,
        'n': len(trues_np),
    }
    pd.DataFrame([metrics]).to_csv(output_dir / 'single_peak_metrics.csv', index=False)
    logger.info(f"评估指标已保存: {output_dir / 'single_peak_metrics.csv'}")

    return metrics


def main():
    parser = argparse.ArgumentParser(description='单峰(WS=1)评估脚本 - 复用滑窗训练模型')
    parser.add_argument('--data_path', required=True, help='单峰评估数据(.npy)，形状[num_samples, num_peaks, 344+1]')
    parser.add_argument('--checkpoint_path', required=True, help='滑窗训练得到的best_model.pth')
    parser.add_argument('--output_dir', default=None, help='输出目录')
    parser.add_argument('--batch_size', type=int, default=1024, help='评估batch size')
    parser.add_argument('--num_workers', type=int, default=4, help='DataLoader workers')
    parser.add_argument('--motif_dim', type=int, default=283)
    parser.add_argument('--accessibility_dim', type=int, default=1)
    parser.add_argument('--condition_dim', type=int, default=60)
    parser.add_argument('--force_input_dim', type=int, default=None, help='为兼容旧模型输入，可将特征裁剪到该维度(如344)；默认不裁剪')
    parser.add_argument('--slice_strategy', type=str, default='head', choices=['head', 'tail'],
                        help='当裁剪特征到force_input_dim时的策略：head=取前N维，tail=取后N维')

    args = parser.parse_args()

    # 输出目录
    if args.output_dir is None:
        timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
        output_dir = Path(f"/root/autodl-tmp/GetForYeast/test_results_single_ws1_{timestamp}")
    else:
        output_dir = Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)

    # 日志
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(message)s",
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(output_dir / 'single_peak_test.log', mode='w', encoding='utf-8')
        ]
    )
    logger = logging.getLogger(__name__)

    logger.info("开始单峰(WS=1)评估")
    logger.info(f"数据: {args.data_path}")
    logger.info(f"checkpoint: {args.checkpoint_path}")
    logger.info(f"输出目录: {output_dir}")

    # 设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")

    # 数据集 & DataLoader
    dataset = SinglePeakDataset(
        data_path=args.data_path,
        motif_dim=args.motif_dim,
        accessibility_dim=args.accessibility_dim,
        condition_dim=args.condition_dim,
        force_input_dim=args.force_input_dim,
        slice_strategy=args.slice_strategy,
    )
    data_loader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        pin_memory=True,
    )

    # 构建模型（与滑窗训练一致的骨干/嵌入维度）
    # 与模型期望对齐：优先使用force_input_dim，否则用数据实际feature_dim
    input_feature_dim = args.force_input_dim if args.force_input_dim else dataset.feature_dim
    model_cfg = {
        'region_embed': {
            'num_features': input_feature_dim,
            'embed_dim': 768,
        },
        'encoder': {
            'embed_dim': 768,
            'num_heads': 12,
            'num_layers': 12,
            'dropout': 0.1,
        },
        'head_exp': {
            'embed_dim': 768,
            'output_dim': 1,
        },
    }

    try:
        checkpoint = torch.load(args.checkpoint_path, map_location=device, weights_only=False)
        logger.info("成功加载checkpoint")
    except TypeError:
        checkpoint = torch.load(args.checkpoint_path, map_location=device)
        logger.info("成功加载checkpoint (兼容模式)")

    model = YeastModel(cfg=model_cfg)
    try:
        model.load_state_dict(checkpoint['model_state_dict'])
    except Exception as e:
        logger.warning(f"严格加载失败，尝试非严格加载: {e}")
        state = model.state_dict()
        ckpt = checkpoint.get('model_state_dict', checkpoint)
        # 仅加载匹配权重
        filtered = {k: v for k, v in ckpt.items() if k in state and state[k].shape == v.shape}
        state.update(filtered)
        model.load_state_dict(state)
        logger.info(f"已使用非严格方式加载 {len(filtered)} 个权重参数")

    # 多GPU
    if torch.cuda.device_count() > 1:
        logger.info(f"使用 {torch.cuda.device_count()} 张GPU")
        model = nn.DataParallel(model)
    model = model.to(device)
    model.eval()

    # 评估
    metrics = evaluate_single_peaks(model, data_loader, device, output_dir, logger)

    logger.info("单峰(WS=1)评估完成！")
    logger.info(f"主要结果: MAE={metrics['mae']:.6f}, Spearman={metrics['spearman_rho']:.6f}, R²={metrics['r2']:.6f}")


if __name__ == "__main__":
    main()


