import config.config as config
from src.data.dataset import ADHDDataset
from src.utils import data_utils
import pandas as pd
import os
import datetime


def get_model(model_name, experiment, pipeline="ml"):
    try:
        ModelClass = config.MODEL_ZOO[pipeline][experiment][model_name]
    except KeyError:
        available_models = list(config.MODEL_ZOO.get(pipeline, {}).get(experiment, {}).keys())
        raise ValueError(
            f"模型 {model_name} 不支持 {experiment} 任务 (pipeline={pipeline})。"
            f" 可用模型: {available_models}"
        )
    return ModelClass

def get_df(experiment, sensitive=False):
    if sensitive:
        df = pd.read_pickle(config.DATA["sensitive"])
    else:
        df = pd.read_pickle(config.DATA["train"])
    if experiment == "classification":
        df[config.LABEL_COL] = data_utils.extract_binary_label(df, config.LABEL_COL)
    else:
        df = df[df[config.LABEL_COL] != -1]
    return df

def get_output_dir(experiment, model, cv=False, sensitive=False, base_dict=config.DIR_DICT, sub_dir=None):
    """
    根据实验类型、模型、是否敏感分析、是否CV，生成输出目录路径，并创建目录。
    :param experiment: 实验名称，比如 "classification" 或 "regression"
    :param model: 模型名称，比如 "rf", "xgb", "transformer"
    :param cv: 是否使用十折交叉验证
    :param sensitive: 是否敏感性分析
    :param base_dict: 路径模板字典，默认从 config.DIR_DICT 传入
    :param sub_dir: 自定义子目录名，如果指定就用，否则 cv=True 时自动是 "10fold"，否则用时间戳
    :return: 最终输出目录，指标 CSV 路径
    """

    # 选择模板
    key = "sensitive" if sensitive else "experiment"
    base_dir = base_dict[key].format(experiment=experiment, model=model)

    # 子目录
    if sub_dir is None:
        sub_dir = "10fold" if cv else datetime.now().strftime("%Y%m%d_%H%M")

    output_dir = os.path.join(base_dir, sub_dir)
    os.makedirs(output_dir, exist_ok=True)

    csv_path = os.path.join(output_dir, f"metrics_{model}.csv")
    return output_dir, csv_path

from datetime import datetime

def get_split_gen(df, experiment, sensitive=False, seed=42, cv=True, label_col=config.LABEL_COL, group_col="subject_id"):
    """
    根据配置自动生成数据切分器(split generator)
    
    参数:
        df: DataFrame
        experiment: "classification" 或 "regression"
        sensitive: 是否敏感性分析（不分组）
        seed: 随机种子
        cv: 是否使用十折
        label_col: 标签列名
        group_col: 分组列名
    返回:
        split_gen: 迭代器，每次返回 train_idx, test_idx
    """

    cfg = config.SPLIT[sensitive][experiment]

    # 获取CV类和参数
    kf_class = cfg["cv"]
    params = cfg["params"]
    needs_groups = cfg["needs_groups"]

    # 仅给需要 random_state 的类传
    if "shuffle" in params and params["shuffle"]:
        kf = kf_class(random_state=seed, **params)
    else:
        kf = kf_class(**params)


    # 标签
    labels = df[label_col] if experiment == "classification" else None

    # 生成器
    if needs_groups:
        split_gen = kf.split(df, labels, groups=df[group_col])
    else:
        split_gen = kf.split(df, labels)

    # 如果不是十折，只取第一折
    if not cv:
        split_gen = [next(split_gen)]

    return split_gen



def load_model(dataset: ADHDDataset, model_state_dict=None, config_json=None, pipe="dl"):
    """
    通用模型加载函数（分类 + 回归），支持直接传入 JSON 配置字典
    并使用已有 dataset 获取特征数

    参数:
        dataset: ADHDDataset, 已构建的数据集，包含 cat_cols 和 num_cols
        model_state_dict: dict or None, 训练好的参数
        config_json: dict, JSON 配置，包含 common, dl, model_specific
        pipe: str, "ml" 或 "dl"

    返回:
        已初始化并加载权重的模型 (nn.Module)
    """
    json_cfg = config_json or {}
    common_cfg = json_cfg.get("common", {})
    dl_cfg = json_cfg.get(pipe, {})
    model_name = common_cfg.get("Model")
    experiment = common_cfg.get("Experiment")

    model_specific_cfg = json_cfg.get("model_specific", {}).get(model_name, {})

    # === 获取类别信息 ===
    cat_cols = [col for col in dataset.cat_cols if col not in ["age_group", "cesd_group"]]
    global_categories = tuple(dataset.df[col].nunique() for col in cat_cols)
    # print(f"Global categories: {global_categories}")

    # === 通用参数 ===
    common_kwargs = dict(
        num_cat_features=len(dataset.cat_cols),
        num_cont_features=len(dataset.num_cols),
        embedding_dim=dl_cfg.get("Embedding Dim"),
        hidden_dim=dl_cfg.get("Hidden Dim", 128),
        dropout=dl_cfg.get("Dropout")
    )
    common_kwargs["num_classes"] = 2 if experiment == "classification" else 1

    # === 特定参数 ===
    extra_kwargs = {}
    if model_name == "transformer":
        extra_kwargs.update(dict(
            heads=dl_cfg.get("Heads"),
            depth=dl_cfg.get("Depth"),
            categories=global_categories
        ))
    elif model_name == "mlp" and experiment == "regression":
        extra_kwargs.update(dict(hidden_dims=dl_cfg.get("Hidden Dims MLP")))
    elif model_name == "lstm":
        extra_kwargs.update(dict(
            num_layers=dl_cfg.get("Num Layers"),
            bidirectional=dl_cfg.get("Bidirectional")
        ))

    # === 加入 model_specific 覆盖 ===
    extra_kwargs.update(model_specific_cfg)

    # === 初始化模型 ===
    ModelClass = get_model(model_name, experiment, pipe)
    model = ModelClass(**common_kwargs, **extra_kwargs).to(config.DEVICE)

    # === 加载权重 ===
    if model_state_dict is not None:
        print("Loading model weights...")
        if model_name == "mobile":
            new_state_dict = {
                ("embedding." + k if "cat_embeddings" in k or "cont_fc" in k else k): v
                for k, v in model_state_dict.items()
            }
        else:
            new_state_dict = model_state_dict
        model.load_state_dict(new_state_dict, strict=False)

    return model



