# evaluate_dl.py
import argparse
import os
import datetime
import pandas as pd
import torch
torch.set_num_threads(1)
from torch.utils.data import DataLoader
from src.data.dataset import ADHDDataset
from src.utils import data_utils
from src.utils.save import save_metrics
from src.utils.visualize import plot_confusion_matrix, plot_predicted_vs_true
import config.config_dl as config
import config.config_subgroup as subgroup_config
from src.training.evaluate import validate_classification, validate_regression

def main():
    print("开始亚组分析验证", flush=True)

    # === 解析参数 ===
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default="mobile",
                        choices=["transformer","alex","res","mobile","mlp","lstm","rnn"])
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--experiment", type=str, default="classification",
                        choices=["classification", "regression"])
    args = parser.parse_args()

    torch.manual_seed(args.seed)

    # === 加载数据 ===
    df = pd.read_pickle(r'data/imputed/imputed_dataset_all_binned.pkl')
    if args.experiment == "classification":
        df[config.LABEL_COL] = data_utils.extract_binary_label(df, config.LABEL_COL)
    else:
        df = df[df[config.LABEL_COL] != -1]

    # 计算transformer的全局类别数量
    dataset = ADHDDataset(df, task=args.experiment, fit=True)
    cat_cols = [col for col in dataset.cat_cols if col not in ["age_group", "cesd_group"]]
    global_categories = tuple(df[col].nunique() for col in cat_cols)
    print(f"Global categories: {global_categories}")


    # === 加载模型 ===
    print(f"Loading model: results/{args.experiment}/{args.model}/best/best_model_{args.model}.pth", flush=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    state_dict = torch.load(
        f"results/{args.experiment}/{args.model}/best/best_model_{args.model}.pth",
        map_location=device
    )  

    # === 遍历所有亚组类别 ===
    for subgroup_key, subgroup_values in subgroup_config.SUBGROUPS.items():
        print(f"\n===== 当前亚组变量: {subgroup_key} =====")

        # 输出目录（每个 subgroup 单独一个文件夹）
        output_dir = f'results_subgroup/{args.experiment}/{args.model}/{subgroup_key}'
        os.makedirs(output_dir, exist_ok=True)

        # === 保存基本信息 ===
        with open(f"{output_dir}/info.txt", "w") as f:
            f.write(f"Model: {args.model}\n")
            f.write(f"Seed: {args.seed}\n")
            f.write(f"Experiment: {args.experiment}\n")
            f.write(f"Subgroup Variable: {subgroup_key}\n")
            f.write(f"Model Path: results/{args.experiment}/{args.model}/best/best_model_{args.model}.pth\n")

        # === 针对该 subgroup 的每个取值做验证 ===
        for subgroup in subgroup_values:
            if (args.model == "mobile" or args.model=="transformer" and args.experiment == "classification") or (args.model=="mlp" or args.model=="transformer" and args.experiment == "regression"):
                subgroup_df = df[df[subgroup_key] == subgroup]
            else:
                subgroup_df = df[df[subgroup_key] == subgroup].drop(columns=['subject_id','impute_id'], errors='ignore')
            if len(subgroup_df) == 0:
                print(f"Subgroup: {subgroup_key}={subgroup}, Test set is empty, skipping...")
                continue

            # age_group/cesd_group 列删掉，不参与训练
            subgroup_df = subgroup_df.drop(columns=["age_group", "cesd_group"], errors="ignore")

            # 构建 Dataset & DataLoader
            dataset = ADHDDataset(subgroup_df, task=args.experiment, fit=True)
            loader = DataLoader(dataset, batch_size=config.BATCH_SIZE, num_workers=2, shuffle=False)

            # === 调用 evaluate (只跑验证，不训练) ===
            if args.experiment == "classification":
                metrics, _, cm = validate_classification(args, loader, dataset, model_state_dict=state_dict, categories=global_categories)
            else:
                metrics, _, labels, preds = validate_regression(args, loader, dataset, model_state_dict=state_dict, categories=global_categories)

            metrics[subgroup_key] = subgroup
            metrics["subgroup"] = subgroup
            metrics["n_samples"] = len(subgroup_df)
            print(f"Subgroup: {subgroup_key}={subgroup}, Metrics: {metrics}")

            # 保存csv
            save_metrics(metrics, f"{output_dir}/metrics_{args.model}_{subgroup}.csv")
            
            # 可视化
            if args.experiment == "classification":
                plot_confusion_matrix(cm, f"{output_dir}/confusion_matrix_{subgroup}.png")
            else:
                plot_predicted_vs_true(labels, preds, f"{output_dir}/predicted_vs_true_{subgroup}.png")

if __name__ == "__main__":
    main()
