import pandas as pd
from torch.utils.data import DataLoader
from models.lwCETModel import lwCET
import torch
import torch.nn.functional as F
import os
from myutils.configUtils import get_configs
from myutils.dataset import SignalDataset
from utils import to_device


def compute_feature_importance_single(model, x_in_single, feature_single):
    """
    计算单个样本的特征重要性 (基于梯度的Saliency Map方法)
    """
    model.eval()

    # 确保输入需要梯度
    x_in_single = x_in_single.unsqueeze(1)
    x_in_single = x_in_single.clone().requires_grad_(True)
    feature_single = feature_single.clone().requires_grad_(True)

    # 前向传播
    logits = model(x_in_single, feature_single)
    probabilities = F.softmax(logits, dim=1)
    # 获取最高概率值（置信度）和预测类别
    confidence, predicted_class = torch.max(probabilities, 1)

    # 为每个样本计算重要性
    results = []
    for i in range(len(x_in_single)):
        # 计算目标类别概率对输入的梯度
        model.zero_grad()
        logits[i, predicted_class[i].squeeze().item()].backward(retain_graph=(i < batch_size - 1))

        # 获取梯度并计算重要性（梯度绝对值）
        x_in_grad = x_in_single.grad.abs().squeeze()
        feature_grad = feature_single.grad.abs().squeeze()

        # 转换为重要性分数
        x_in_importance = [(i, grad.item()) for i, grad in enumerate(x_in_grad[i])]
        feature_importance = [(i, grad.item()) for i, grad in enumerate(feature_grad[i])]

        # 按重要性排序（从大到小）
        x_in_importance_sorted = sorted(x_in_importance, key=lambda x: x[1], reverse=True)
        feature_importance_sorted = sorted(feature_importance, key=lambda x: x[1], reverse=True)

        # 输出简洁报告
        print("=== 单样本预测分析报告 ===")
        print(f"sample_index: {i}")
        print(f"预测类别: {predicted_class[i]}")
        print(f"置信度: {confidence[i]:.4f}")
        print(f"主要决策依据特征:")
        print(f"x_in 分支前5重要特征: {[idx for idx, _ in x_in_importance_sorted[:5]]}")
        print(f"feature 分支前5重要特征: {[idx for idx, _ in feature_importance_sorted[:5]]}")

        results.append({
            'sample_index': i,
            'predicted_class': predicted_class,
            'confidence': confidence, # 预测结果的概率
            'all_probabilities': probabilities.tolist(),  # 所有类别的概率
            'x_in_importance': x_in_importance_sorted,
            'feature_importance': feature_importance_sorted
        })
    return results


if __name__ == "__main__":
    batch_size = 128
    # test_model_name = os.path.join("E:\\3_code_self\\ecg-detection\\experiments_logs\\Exp1\\lwCET_2025_09_29_22_54_03",
    #                                "checkpoint_best.pt")
    test_model_name = os.path.join("E:\\3_code_self\\ecg-detection\\arrhythmia-project",
                                   "checkpoint_best.pt")
    dataset_configs, hparams_class1 = get_configs()
    hparams = hparams_class1.train_params

    device = torch.device('cuda:0')
    # 创建模型实例
    model = lwCET(dataset_configs, hparams, add_fea=True)
    chkpoint = torch.load(test_model_name, map_location='cuda:0' if torch.cuda.is_available() else 'cpu')
    model.load_state_dict(chkpoint['model'])
    model = model.to(device)

    test_file = '../data/mit/bak/0929/train-250929.csv'
    test_data = pd.read_csv(test_file, header=None).values
    test_data = SignalDataset(test_data)
    test_dl = DataLoader(dataset=test_data,
                         batch_size=batch_size,
                         num_workers=2,
                         shuffle=False)
    for batches in test_dl:
        batches = to_device(batches, device)
        data = batches[0].float()
        feature1 = batches[1].float()
        labels = batches[2].long()
        # 执行分析
        analysis_result = compute_feature_importance_single(model, data, feature1)
