# src/diagnostics.py

"""
模型诊断和可解释性分析
使用SHAP (SHapley Additive exPlanations) 进行特征重要性分析
"""

import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import torch
import numpy as np
import matplotlib.pyplot as plt
import shap
from tqdm import tqdm

from src.config_loader import load_config
from src.dataset import HRVDataset
from src.model import HRVHybridTransformer, ChannelIndependentTransformer


class ModelWrapper:
    """
    模型包装器，用于将多输入模型转换为SHAP可以处理的单输入格式
    """
    def __init__(self, model, device, use_multimodal=True, num_ts_features=None, window_size=None):
        """
        Args:
            model: 训练好的PyTorch模型
            device: 运行设备
            use_multimodal: 是否使用多模态输入
            num_ts_features: 时间序列特征数量
            window_size: 时间序列窗口大小
        """
        self.model = model
        self.device = device
        self.use_multimodal = use_multimodal
        self.num_ts_features = num_ts_features
        self.window_size = window_size
        
        if use_multimodal:
            self.ts_size = window_size * num_ts_features
        
    def __call__(self, x):
        """
        Args:
            x: 拼接后的输入 (batch_size, total_features)
               如果use_multimodal=True: 前面是时间序列展平，后面是表格特征
               如果use_multimodal=False: 整个是时间序列展平
        
        Returns:
            模型输出 (batch_size, 1) 或 (batch_size, 2)
        """
        # 将numpy转为tensor（如果需要）
        if isinstance(x, np.ndarray):
            x = torch.FloatTensor(x).to(self.device)
        
        batch_size = x.shape[0]
        
        if self.use_multimodal:
            # 分离时间序列和表格数据
            x_ts_flat = x[:, :self.ts_size]  # (batch_size, window_size * num_features)
            x_tabular = x[:, self.ts_size:]   # (batch_size, tabular_dim)
            
            # 重塑时间序列
            x_ts = x_ts_flat.reshape(batch_size, self.window_size, self.num_ts_features)
            
            # 前向传播
            with torch.no_grad():
                output = self.model(x_ts, x_tabular)
        else:
            # 重塑时间序列
            x_ts = x.reshape(batch_size, self.window_size, self.num_ts_features)
            
            # 前向传播
            with torch.no_grad():
                output = self.model(x_ts)
        
        # 处理多输出情况（多任务学习）
        if isinstance(output, tuple):
            # 返回第一个输出（年龄预测）
            output = output[0]
        
        # 转换为numpy
        if isinstance(output, torch.Tensor):
            output = output.cpu().numpy()
        
        # 确保是2D数组
        if output.ndim == 1:
            output = output.reshape(-1, 1)
        
        return output


def prepare_data_for_shap(dataset, num_background=100, num_test=10, use_multimodal=True):
    """
    准备SHAP分析所需的数据
    
    Args:
        dataset: HRVDataset实例
        num_background: 背景样本数量（用于KernelExplainer）
        num_test: 测试样本数量（需要解释的样本）
        use_multimodal: 是否使用多模态输入
    
    Returns:
        background_data: (num_background, total_features)
        test_data: (num_test, total_features)
        feature_names: 特征名称列表
    """
    print(f"\n准备SHAP数据...")
    print(f"  背景样本: {num_background}")
    print(f"  测试样本: {num_test}")
    
    # 随机选择样本索引
    total_samples = len(dataset)
    indices = np.random.choice(total_samples, size=num_background + num_test, replace=False)
    
    background_indices = indices[:num_background]
    test_indices = indices[num_background:]
    
    background_samples = []
    test_samples = []
    
    # 收集数据
    for idx in tqdm(background_indices, desc="收集背景样本"):
        if use_multimodal:
            ts, tabular, age, gender = dataset[idx]
            # 展平时间序列并拼接表格特征
            sample = np.concatenate([ts.numpy().flatten(), tabular.numpy()])
        else:
            ts, age, gender = dataset[idx]
            sample = ts.numpy().flatten()
        background_samples.append(sample)
    
    for idx in tqdm(test_indices, desc="收集测试样本"):
        if use_multimodal:
            ts, tabular, age, gender = dataset[idx]
            sample = np.concatenate([ts.numpy().flatten(), tabular.numpy()])
        else:
            ts, age, gender = dataset[idx]
            sample = ts.numpy().flatten()
        test_samples.append(sample)
    
    background_data = np.array(background_samples)
    test_data = np.array(test_samples)
    
    # 生成特征名称
    feature_names = []
    
    # 时间序列特征名称
    cfg = dataset.cfg
    ts_feature_names = cfg['data']['features']
    window_size = cfg['preprocessing']['window_size']
    
    for t in range(window_size):
        for feat_name in ts_feature_names:
            feature_names.append(f"{feat_name}_t{t}")
    
    # 表格特征名称
    if use_multimodal:
        for feat_name in ts_feature_names:
            feature_names.append(f"{feat_name}_mean")
            feature_names.append(f"{feat_name}_std")
            feature_names.append(f"{feat_name}_max")
            feature_names.append(f"{feat_name}_min")
        feature_names.append("gender")
    
    print(f"\n数据准备完成:")
    print(f"  背景数据形状: {background_data.shape}")
    print(f"  测试数据形状: {test_data.shape}")
    print(f"  特征数量: {len(feature_names)}")
    
    return background_data, test_data, feature_names


def compute_shap_values(model_wrapper, background_data, test_data, method='kernel'):
    """
    计算SHAP值
    
    Args:
        model_wrapper: ModelWrapper实例
        background_data: 背景数据
        test_data: 测试数据
        method: 'kernel' 或 'deep'
    
    Returns:
        shap_values: SHAP值数组
        explainer: SHAP解释器
    """
    print(f"\n使用 {method.upper()} 方法计算SHAP值...")
    
    if method == 'kernel':
        # KernelExplainer - 模型无关，但较慢
        explainer = shap.KernelExplainer(model_wrapper, background_data)
        shap_values = explainer.shap_values(test_data, nsamples=100)
        
    elif method == 'deep':
        # DeepExplainer - 专门为深度学习设计，更快
        # 需要将数据转换为tensor
        background_tensor = torch.FloatTensor(background_data).to(model_wrapper.device)
        explainer = shap.DeepExplainer(model_wrapper.model, background_tensor)
        test_tensor = torch.FloatTensor(test_data).to(model_wrapper.device)
        shap_values = explainer.shap_values(test_tensor)
    else:
        raise ValueError(f"Unknown method: {method}. Use 'kernel' or 'deep'.")
    
    print(f"SHAP值计算完成!")
    
    return shap_values, explainer


def plot_shap_summary(shap_values, test_data, feature_names, save_path=None):
    """
    绘制SHAP摘要图
    
    Args:
        shap_values: SHAP值
        test_data: 测试数据
        feature_names: 特征名称列表
        save_path: 保存路径（如果为None，则显示图像）
    """
    print("\n生成SHAP摘要图...")
    
    # 条形图 - 显示平均绝对SHAP值（特征重要性）
    plt.figure(figsize=(12, 8))
    shap.summary_plot(shap_values, test_data, feature_names=feature_names, 
                     plot_type='bar', show=False, max_display=20)
    plt.title('SHAP Feature Importance (Top 20)', fontsize=16, fontweight='bold')
    plt.tight_layout()
    
    if save_path:
        bar_path = save_path.replace('.png', '_bar.png')
        plt.savefig(bar_path, dpi=300, bbox_inches='tight')
        print(f"  条形图已保存: {bar_path}")
    else:
        plt.show()
    plt.close()
    
    # 蜂群图 - 显示SHAP值的分布
    plt.figure(figsize=(12, 10))
    shap.summary_plot(shap_values, test_data, feature_names=feature_names, 
                     show=False, max_display=20)
    plt.title('SHAP Summary Plot (Top 20)', fontsize=16, fontweight='bold')
    plt.tight_layout()
    
    if save_path:
        beeswarm_path = save_path.replace('.png', '_beeswarm.png')
        plt.savefig(beeswarm_path, dpi=300, bbox_inches='tight')
        print(f"  蜂群图已保存: {beeswarm_path}")
    else:
        plt.show()
    plt.close()


def analyze_feature_importance(shap_values, feature_names, top_k=20):
    """
    分析特征重要性
    
    Args:
        shap_values: SHAP值
        feature_names: 特征名称列表
        top_k: 显示前k个重要特征
    
    Returns:
        importance_df: 特征重要性DataFrame
    """
    import pandas as pd
    
    # 计算平均绝对SHAP值
    mean_abs_shap = np.abs(shap_values).mean(axis=0)
    
    # 创建DataFrame
    importance_df = pd.DataFrame({
        'feature': feature_names,
        'importance': mean_abs_shap
    })
    
    # 排序
    importance_df = importance_df.sort_values('importance', ascending=False).reset_index(drop=True)
    
    # 打印前k个
    print(f"\n前{top_k}个最重要的特征:")
    print("="*60)
    for i, row in importance_df.head(top_k).iterrows():
        print(f"{i+1:2d}. {row['feature']:30s}  重要性: {row['importance']:.6f}")
    
    return importance_df


def run_shap_analysis(checkpoint_path, split='val', num_background=100, num_test=10, 
                     save_dir=None, method='kernel'):
    """
    运行完整的SHAP分析流程
    
    Args:
        checkpoint_path: 模型checkpoint路径
        split: 数据集划分 ('train', 'val', 或 'test')
        num_background: 背景样本数量
        num_test: 测试样本数量
        save_dir: 结果保存目录
        method: SHAP方法 ('kernel' 或 'deep')
    """
    print("="*70)
    print("SHAP 模型诊断和可解释性分析")
    print("="*70)
    
    # 加载配置
    cfg = load_config()
    
    # 获取配置参数
    task_type = cfg.get('task', {}).get('type', 'age')
    model_type = cfg.get('model', {}).get('type', 'hybrid')
    use_multimodal = cfg.get('multimodal', {}).get('enabled', True)
    device = torch.device(cfg.get('device', 'cuda' if torch.cuda.is_available() else 'cpu'))
    
    print(f"\n配置:")
    print(f"  任务类型: {task_type}")
    print(f"  模型类型: {model_type}")
    print(f"  多模态: {use_multimodal}")
    print(f"  设备: {device}")
    
    # 加载数据集
    print(f"\n加载数据集 (split={split})...")
    dataset = HRVDataset(split=split, cfg=cfg, use_multimodal=use_multimodal, enable_augmentation=False)
    
    # 加载模型
    print(f"\n加载模型: {checkpoint_path}")
    if model_type == 'hybrid':
        model = HRVHybridTransformer(cfg=cfg, task_type=task_type, use_multimodal=use_multimodal)
    else:
        model = ChannelIndependentTransformer(cfg=cfg, task_type=task_type)
    
    # 加载权重
    checkpoint = torch.load(checkpoint_path, map_location=device)
    model.load_state_dict(checkpoint)
    model = model.to(device)
    model.eval()
    print("模型加载成功!")
    
    # 准备数据
    background_data, test_data, feature_names = prepare_data_for_shap(
        dataset, num_background, num_test, use_multimodal
    )
    
    # 创建模型包装器
    num_ts_features = len(cfg['data']['features'])
    window_size = cfg['preprocessing']['window_size']
    
    model_wrapper = ModelWrapper(
        model, device, use_multimodal, 
        num_ts_features, window_size
    )
    
    # 计算SHAP值
    shap_values, explainer = compute_shap_values(
        model_wrapper, background_data, test_data, method=method
    )
    
    # 生成保存路径
    if save_dir is None:
        save_dir = cfg.get('output', {}).get('results_dir', './results')
    os.makedirs(save_dir, exist_ok=True)
    
    save_path = os.path.join(save_dir, f'shap_analysis_{task_type}.png')
    
    # 绘制SHAP图
    plot_shap_summary(shap_values, test_data, feature_names, save_path)
    
    # 分析特征重要性
    importance_df = analyze_feature_importance(shap_values, feature_names, top_k=30)
    
    # 保存特征重要性
    importance_path = os.path.join(save_dir, f'feature_importance_{task_type}.csv')
    importance_df.to_csv(importance_path, index=False)
    print(f"\n特征重要性已保存: {importance_path}")
    
    print("\n" + "="*70)
    print("SHAP分析完成!")
    print("="*70)
    
    return shap_values, explainer, importance_df


# ================== 主函数 ==================
if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description='SHAP模型诊断')
    parser.add_argument('--checkpoint', type=str, required=True, help='模型checkpoint路径')
    parser.add_argument('--split', type=str, default='val', choices=['train', 'val', 'test'],
                       help='数据集划分')
    parser.add_argument('--num_background', type=int, default=100, help='背景样本数量')
    parser.add_argument('--num_test', type=int, default=10, help='测试样本数量')
    parser.add_argument('--save_dir', type=str, default=None, help='结果保存目录')
    parser.add_argument('--method', type=str, default='kernel', choices=['kernel', 'deep'],
                       help='SHAP方法')
    
    args = parser.parse_args()
    
    run_shap_analysis(
        checkpoint_path=args.checkpoint,
        split=args.split,
        num_background=args.num_background,
        num_test=args.num_test,
        save_dir=args.save_dir,
        method=args.method
    )


