# ====================================================================================
# 文件: src/parser.py
# 描述: [V_Final] 新增 AGM 和 SRA-CL 参数解析
# ====================================================================================

import argparse
import os
from typing import Any, Dict
import torch
import yaml


def _dict_to_namespace(d):
    if not isinstance(d, dict):
        return d
    namespace = argparse.Namespace()
    for key, value in d.items():
        if isinstance(value, dict):
            setattr(namespace, key, _dict_to_namespace(value))
        elif isinstance(value, list):
            setattr(namespace, key, [_dict_to_namespace(item) if isinstance(item, dict) else item for item in value])
        else:
            setattr(namespace, key, value)
    return namespace


def _flatten_train_config(config):
    if not hasattr(config, "train_config"):
        return
    train_cfg = config.train_config

    # 基础训练参数
    for key, value in vars(train_cfg).items():
        if key not in ["fusion_weights", "progressive_training", "user_recon", "agm", "sracl"]:
            setattr(config, key, value)

    # 融合权重
    fusion_cfg = getattr(train_cfg, "fusion_weights", None)
    if fusion_cfg:
        for key, value in vars(fusion_cfg).items():
            setattr(config, key, value)

    # 直接属性回退
    for weight_key in ["alpha", "beta", "gamma", "bpr_loss_weight", "recon_loss_weight"]:
        if hasattr(train_cfg, weight_key):
            setattr(config, weight_key, getattr(train_cfg, weight_key))

    # 复杂配置对象化
    if hasattr(train_cfg, "progressive_training"):
        setattr(config, "progressive_training", _namespace_to_dict(train_cfg.progressive_training))
    if hasattr(train_cfg, "user_recon"):
        setattr(config, "user_recon", _namespace_to_dict(train_cfg.user_recon))

    # [新增] AGM 和 SRA-CL 配置
    if hasattr(train_cfg, "agm"):
        setattr(config, "agm_config", _namespace_to_dict(train_cfg.agm))
    else:
        setattr(config, "agm_config", {"enable": False})

    if hasattr(train_cfg, "sracl"):
        setattr(config, "sracl_config", _namespace_to_dict(train_cfg.sracl))
    else:
        setattr(config, "sracl_config", {"enable": False})


def _namespace_to_dict(ns: Any) -> Dict[str, Any]:
    if not hasattr(ns, "__dict__"):
        return ns
    result = {}
    for key, value in vars(ns).items():
        if hasattr(value, "__dict__"):
            result[key] = _namespace_to_dict(value)
        else:
            result[key] = value
    return result


def parse_args():
    parser = argparse.ArgumentParser(description="SDKR Training")
    parser.add_argument('--config_file', type=str, required=True)
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--gpu_id', type=str, required=True)
    parser.add_argument('--seed', type=int, required=True)
    args = parser.parse_args()

    if not os.path.exists(args.config_file):
        raise FileNotFoundError(f"配置文件未找到: {args.config_file}")

    print(f"[Parser] 加载 YAML: {args.config_file}")
    with open(args.config_file, 'r') as f:
        config_dict = yaml.safe_load(f)

    config = _dict_to_namespace(config_dict)
    config.dataset = args.dataset
    config.gpu_id = args.gpu_id
    config.seed = args.seed
    config.config_file = args.config_file

    config.data_config = config.data
    config.preproc_config = config.preprocessing
    config.train_config = config.training

    _flatten_train_config(config)

    device_str = f"cuda:{config.gpu_id}" if torch.cuda.is_available() else "cpu"
    setattr(config, "device", torch.device(device_str))

    # 默认 Top-K
    if not hasattr(config, "top_k"): setattr(config, "top_k", [20, 50])

    return config
