import pandas as pd
import numpy as np
import shap
import matplotlib.pyplot as plt
import argparse
import torch
torch.set_num_threads(1)
import re
import json

from src.utils.get import get_df, get_model, load_model
import config.config_ml as config_ml
import config.config_dl as config_dl
from src.data.dataset import ADHDDataset


# ===================== 0. 参数 =====================
parser = argparse.ArgumentParser()
parser.add_argument("--experiment", type=str, default="classification",
                    choices=["classification", "regression"])
parser.add_argument("--model", type=str, default="rf",
                    choices=["transformer","alex","res","mobile","mlp","lstm","rnn","rf","xgb","svm","linear","gbr"])
parser.add_argument("--pipe", type=str, default="ml",
                    choices=["ml", "dl"])
args = parser.parse_args()


# ===================== 配置字典 =====================
CONFIG = {
    "ml": {
        "label_col": config_ml.LABEL_COL,
        "path": lambda exp, model: f"results/experiments/{exp}/{model}/best/best_model_{model}.pkl",
        "json_path": lambda exp, model: f"results/experiments/{exp}/{model}/best/hyperparameters.json",
        "explainer": lambda model, X: shap.Explainer(model.model.named_steps["classifier"], X),
        "process_X": lambda model, X: (
            pd.DataFrame(
                model.model.named_steps["preprocessor"].transform(X),
                columns=[
                    # 去掉 cat_ / num_ 前缀
                    re.sub(r'^(cat|num)_*', '', name)
                    for name in model.model.named_steps["preprocessor"].get_feature_names_out()
                ],
                index=X.index
            )
        ),
        "get_values": lambda shap_values: (
            shap_values.values[:, :, 1] if shap_values.values.ndim == 3 and shap_values.values.shape[2] > 1
            else shap_values.values
        )
    },
    "dl": {
        "label_col": config_dl.LABEL_COL,
        "path": lambda exp, model: f"results/experiments/{exp}/{model}/best/best_model_{model}.pth",
        "json_path": lambda exp, model: f"results/experiments/{exp}/{model}/best/hyperparameters.json",
        "explainer": lambda model, X: shap.KernelExplainer(model, X),
        "process_X": lambda model, X: X,  # DL 默认保持原始特征
        "get_values": lambda shap_values: (
            shap_values[1] if isinstance(shap_values, list) and len(shap_values) > 1
            else shap_values
        )
    }
}


# ===================== 1. 读取数据 =====================
df = get_df(experiment=args.experiment, sensitive=False)
# df.drop(columns=["subject_id", "impute_id"], inplace=True)
if args.pipe == "dl":
    model_name = args.model
    experiment = args.experiment
    if not ((model_name == "mobile" or (model_name == "transformer" and experiment == "classification")) 
            or (model_name == "mlp" or (model_name == "transformer" and experiment == "regression"))):
        df = df.drop(columns=['subject_id', 'impute_id'], errors='ignore')

label_col = CONFIG[args.pipe]["label_col"]
X = df.drop(columns=[label_col])
y = df[label_col]


# ===================== 2. 抽样 =====================
X_sample = X.sample(n=10, random_state=42)
y_sample = y.loc[X_sample.index]


# ===================== 3. 加载模型 =====================
if args.pipe == "ml":
    ModelClass = get_model(args.model, args.experiment, args.pipe)
    model = ModelClass(random_state=42)
    model.load(CONFIG[args.pipe]["path"](args.experiment, args.model))
else:
    dataset = ADHDDataset(df, task=args.experiment, fit=True)
    model_state_dict = torch.load(CONFIG[args.pipe]["path"](args.experiment, args.model), map_location=config_dl.DEVICE)
    json_cfg = json.load(open(CONFIG[args.pipe]["json_path"](args.experiment, args.model)))
    model = load_model(dataset=dataset, model_state_dict=model_state_dict, config_json=json_cfg, pipe=args.pipe)
    model.eval()


# ===================== 4. 处理数据 =====================
if args.pipe == "ml":
    X_sample_proc = CONFIG[args.pipe]["process_X"](model, X_sample)
    feature_names = list(X_sample_proc.columns)
else:
    dataset_sample = ADHDDataset(
        df.loc[X_sample.index],
        label=label_col,
        task=args.experiment,
        fit=False,
        scaler=dataset.scaler,
        cat_encoders=dataset.cat_encoders
    )
    x_cat_list, x_cont_list = zip(*[dataset_sample[i] for i in range(len(dataset_sample))])
    # 修正数据类型
    x_cat_tensor = torch.stack(x_cat_list).to(torch.long)      # 类别特征需要 long 类型
    x_cont_tensor = torch.stack(x_cont_list).to(torch.float32) # 连续特征需要 float32 类型

    X_sample_proc = [x_cat_tensor.to(config_dl.DEVICE),
                     x_cont_tensor.to(config_dl.DEVICE)]
    feature_names = list(dataset.cat_cols) + list(dataset.num_cols)

# ===================== 5. SHAP =====================
if args.pipe == "ml":
    explainer = CONFIG[args.pipe]["explainer"](model, X_sample_proc)
    shap_values = explainer(X_sample_proc)
    shap_values_class = CONFIG[args.pipe]["get_values"](shap_values)
    plot_data = X_sample_proc
else:
    # DL pipeline
    # 1️⃣ 准备 background_data：拼接类别 + 连续特征
    x_cat_np = X_sample_proc[0].cpu().numpy()
    x_cont_np = X_sample_proc[1].cpu().numpy()
    background_data = np.concatenate([x_cat_np, x_cont_np], axis=1)

    # 2️⃣ 定义预测函数
    def model_predict(x_array):
        x_tensor = torch.FloatTensor(x_array).to(config_dl.DEVICE)
        num_cat = X_sample_proc[0].shape[1]
        num_cont = X_sample_proc[1].shape[1]
        x_cat_tensor = x_tensor[:, :num_cat].long()
        x_cont_tensor = x_tensor[:, num_cat:num_cat+num_cont].float()
        with torch.no_grad():
            out = model((x_cat_tensor, x_cont_tensor))
            if out.shape[1] > 1:  # 多分类
                return torch.softmax(out, dim=1).cpu().numpy()
            else:
                return out.cpu().numpy()

    # 3️⃣ 创建 explainer
    explainer = shap.KernelExplainer(model_predict, background_data)

    # 4️⃣ 准备测试数据
    test_data = background_data  # 用同样格式
    shap_values = explainer.shap_values(test_data)
    shap_values_class = CONFIG[args.pipe]["get_values"](shap_values)
    plot_data = pd.DataFrame(test_data, columns=feature_names)  # 给 summary_plot 特征名

print("shap_values shape:", np.array(shap_values_class).shape)

# ===================== 6. 可视化 =====================
title = f"SHAP summary ({args.experiment}, {args.pipe}, {args.model})"
plt.figure(figsize=(12, min(50, len(feature_names)*0.5)))  # 防止图像过大
shap.summary_plot(
    shap_values_class,
    plot_data,
    feature_names=feature_names,
    max_display=len(feature_names),  # 显示所有特征
    show=False
)
plt.title(title)
plt.tight_layout()
plt.savefig(f"results/analysis/interpretability/shap_summary_{args.model}_{args.experiment}_{args.pipe}.png", dpi=300)
plt.close()
