import torch
import numpy as np
import os
import time
import argparse
import yaml
import json
import pickle
import scipy.sparse as sp
from scipy.sparse import linalg
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))

import torch.nn as nn
import torch

from src.utils.helper import get_dataloader, check_device, get_num_nodes, seed_everything, build_node_groups_from_csv, build_geo_groups_from_csv
from src.models.DeepPA import DeepPA
from src.trainers.deeppa_trainer import DeepPA_Trainer
from src.utils.graph_algo import load_graph_data
from src.utils.args import get_public_config, str_to_bool
from src.utils.fe import build_feature_transforms


def get_config():
    parser = get_public_config()

    # get private config
    parser.add_argument("--model_name", type=str, default="DeepPA", help="which model to train")
    parser.add_argument("--dropout", type=float, default=0.3)
    parser.add_argument("--filter_type", type=str, default="transition")
    parser.add_argument("--n_blocks", type=int, default=2)
    parser.add_argument("--n_hidden", type=int, default=64)
    parser.add_argument("--n_heads", type=int, default=2)
    parser.add_argument("--spatial_flag", type=str_to_bool, default=True, help="whether to use spatial transformer")
    parser.add_argument("--temporal_flag", type=str_to_bool, default=True, help="whether to use temporal transformer")
    parser.add_argument("--spatial_encoding", type=str_to_bool, default=True, help="whether to use spatial encoding")
    parser.add_argument("--temporal_encoding", type=str_to_bool, default=True, help="whether to use temporal encoding")
    parser.add_argument("--temporal_PE", type=str_to_bool, default=True, help="whether to use temporal PE")
    parser.add_argument("--GCO", type=str_to_bool, default=True, help="whether to use GCO")
    parser.add_argument("--CLUSTER", type=str_to_bool, default=False, help="whether to use CLUSTER")
    parser.add_argument("--GCO_Thre", type=float, default=1, help="The proportion of low frequency signals")
    parser.add_argument("--base_lr", type=float, default=1e-3)
    parser.add_argument("--lr_decay_ratio", type=float, default=0.5)
    # external config file support
    parser.add_argument("--config", type=str, default=None, help="Path to YAML config to override args")
    # override for num_nodes to skip shape detection
    parser.add_argument("--num_nodes_override", type=int, default=None, help="override node count to skip data shape detection")

    args = parser.parse_args()
    # Load YAML config if provided
    if args.config is not None and os.path.exists(args.config):
        with open(args.config, "r", encoding="utf-8") as f:
            cfg = yaml.safe_load(f) or {}
        for k, v in cfg.items():
            if hasattr(args, k):
                setattr(args, k, v)
    args.steps = list(range(3, args.max_epochs + 1, 3))
    print(args)

    # Build log folder name deterministically
    parts = [
        args.n_hidden,
        args.n_blocks,
        args.n_heads,
        args.spatial_flag,
        args.temporal_flag,
        args.spatial_encoding,
        args.temporal_encoding,
        args.temporal_PE,
        args.aug,
        args.batch_size,
        args.base_lr,
        args.n_exp,
        args.GCO,
        args.temporal_encoding,
        args.GCO_Thre,
    ]
    folder_name = "-".join(map(str, parts))
    args.log_dir = "./logs/{}/{}/{}/".format(args.dataset, args.model_name, folder_name)
    print(args.log_dir)
    # Use override if provided (non-zero), otherwise infer from dataset
    try:
        nn_override = getattr(args, "num_nodes_override", None)
        if nn_override is not None and int(nn_override) > 0:
            args.num_nodes = int(nn_override)
            print(f"Using overridden num_nodes={args.num_nodes}; skipping shape detection.")
        else:
            args.num_nodes = get_num_nodes(args.dataset, memmap_dir=getattr(args, "memmap_dir", None))
            print(f"Inferred num_nodes={args.num_nodes} from dataset '{args.dataset}'.")
    except Exception as e:
        # robust fallback
        try:
            args.num_nodes = get_num_nodes(args.dataset, memmap_dir=getattr(args, "memmap_dir", None))
            print(f"Fallback inferred num_nodes={args.num_nodes} due to override error: {e}")
        except Exception:
            # final fallback for SINPA
            if str(args.dataset).upper() == "SINPA":
                args.num_nodes = 1687
                print("Fallback num_nodes=1687 for SINPA (override not provided and detection failed).")
            else:
                raise

    args.datapath = os.path.join("./data", args.dataset)
    if args.seed != 0:
        seed_everything(args.seed)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    return args, folder_name


def main():
    args, fname = get_config()

    device = check_device()
    # 设备与环境信息打印，便于确认是否在 GPU 上运行
    try:
        print(f"Using device: {device}")
        if isinstance(device, torch.device) and device.type == "cuda":
            cur = torch.cuda.current_device()
            print(f"CUDA available: {torch.cuda.is_available()}, current: {cur}, name: {torch.cuda.get_device_name(cur)}")
            print(f"CUDA_VISIBLE_DEVICES={os.environ.get('CUDA_VISIBLE_DEVICES', '')}")
            print(f"FFT_FORCE_CPU={os.environ.get('FFT_FORCE_CPU', '0')} (set 1 to force CPU FFT)")
        else:
            print(f"CUDA available: {torch.cuda.is_available()}")
    except Exception as e:
        print(f"Device info probe failed: {e}")

    # 可选加载动态特征 schema
    schema_obj = None
    feature_schema_path = getattr(args, "feature_schema", None)
    if feature_schema_path and os.path.exists(feature_schema_path):
        try:
            with open(feature_schema_path, "r", encoding="utf-8") as f:
                schema_obj = yaml.safe_load(f) or None
        except Exception as e:
            print(f"Failed to load feature_schema from '{feature_schema_path}': {e}")

    model = DeepPA(
        dropout=args.dropout,
        spatial_flag=args.spatial_flag,
        temporal_flag=args.temporal_flag,
        spatial_encoding=args.spatial_encoding,
        temporal_encoding=args.temporal_encoding,
        temporal_PE=args.temporal_PE,
        GCO=args.GCO,
        CLUSTER=args.CLUSTER,
        n_hidden=args.n_hidden,
        end_channels=args.n_hidden * 8,
        n_blocks=args.n_blocks,
        name=args.model_name,
        dataset=args.dataset,
        device=device,
        num_nodes=args.num_nodes,
        seq_len=args.seq_len,
        horizon=args.horizon,
        input_dim=args.input_dim,
        output_dim=args.output_dim,
        GCO_Thre=args.GCO_Thre,
        fe_transforms=(build_feature_transforms(getattr(args, "fe_transforms", "")) if getattr(args, "fe_enable", False) else []),
        feature_schema=schema_obj,
    )
    print("Model created.")

    print("Loading dataloader. This may take a while...")
    data = get_dataloader(
        args.datapath,
        args.batch_size,
        args.output_dim,
        mode=getattr(args, "mode", "train"),
        num_workers=getattr(args, "num_workers", 0),
        pin_memory=getattr(args, "pin_memory", False),
        prefetch_factor=getattr(args, "prefetch_factor", 2),
        persistent_workers=getattr(args, "persistent_workers", False),
        aug_enable=getattr(args, "aug_enable", False),
        aug_noise_std=getattr(args, "aug_noise_std", 0.0),
        aug_mask_prob=getattr(args, "aug_mask_prob", 0.0),
        memmap_eval=getattr(args, "memmap_eval", False),
        limit_eval_samples=getattr(args, "limit_eval_samples", None),
        dq_enable=getattr(args, "dq_enable", False),
        dq_config=getattr(args, "dq_config", None),
        memmap_dir=getattr(args, "memmap_dir", None),
    )

    # load node group definitions if enabled
    node_groups = None
    if getattr(args, "group_eval", False):
        groups_file = getattr(args, "groups_file", None)
        if groups_file:
            try:
                if os.path.exists(groups_file):
                    with open(groups_file, "r", encoding="utf-8") as f:
                        if groups_file.endswith((".yaml", ".yml")):
                            node_groups = yaml.safe_load(f) or None
                        else:
                            node_groups = json.load(f)
            except Exception:
                try:
                    with open(groups_file, "r", encoding="utf-8") as f:
                        node_groups = yaml.safe_load(f) or None
                except Exception:
                    node_groups = None
        # fallback: auto-build from CSV if no groups file provided or failed to load
        if node_groups is None:
            csv_candidates = [
                os.path.join("./aux_data", "lots_location.csv"),
                os.path.join(args.datapath, "lots_location.csv"),
            ]
            for csv_path in csv_candidates:
                if os.path.exists(csv_path):
                    # try labeled CSV builder first
                    node_groups = build_node_groups_from_csv(csv_path, num_nodes=args.num_nodes)
                    if node_groups is None:
                        node_groups = build_geo_groups_from_csv(csv_path, num_nodes=args.num_nodes)
                    if node_groups is not None:
                        print(f"Auto-built node groups from '{csv_path}': {len(node_groups)} groups")
                        break
        if node_groups is None:
            print("group_eval=True but no groups loaded; grouped metrics will be skipped.")

    # Initialize Trainer
    # Parse mc_alphas from CLI (comma-separated -> list[float])
    try:
        mc_alphas = [float(x) for x in str(args.mc_alphas).split(",") if str(x).strip()]
    except Exception:
        mc_alphas = [0.8, 0.9, 0.95]
    trainer = DeepPA_Trainer(
        model=model,
        adj_mat=None,
        filter_type=args.filter_type,
        data=data,
        aug=args.aug,
        base_lr=args.base_lr,
        steps=args.steps,
        lr_decay_ratio=args.lr_decay_ratio,
        log_dir=args.log_dir,
        n_exp=args.n_exp,
        wandb_flag=args.wandb,
        wandb_mode=args.wandb_mode,
        wandb_project=args.wandb_project,
        wandb_run_name=args.wandb_run_name,
        wandb_dir=args.wandb_dir,
        save_iter=args.save_iter,
        clip_grad_value=args.max_grad_norm,
        max_epochs=args.max_epochs,
        patience=args.patience,
        device=device,
        weight_decay=args.weight_decay,
        optimizer=args.optimizer,
        lr_scheduler=args.lr_scheduler,
        onecycle_pct_start=args.onecycle_pct_start,
        onecycle_anneal_strategy=args.onecycle_anneal_strategy,
        onecycle_max_lr_multiplier=args.onecycle_max_lr_multiplier,
        amp=args.amp,
        accum_steps=args.accum_steps,
        resume_from=args.resume_from,
        auto_resume=args.auto_resume,
        group_eval=args.group_eval,
        node_groups=node_groups,
        mc_eval=args.mc_eval,
        mc_samples=args.mc_samples,
        mc_alphas=mc_alphas,
        export_mc_artifacts=args.export_mc_artifacts,
    )
    print("trainer..")    # Early export-only short-circuit to avoid train/test when no weights
    if getattr(args, "only_export", False):
        try:
            # Try loading weights from resume checkpoint first
            resume_path = getattr(args, "resume_from", None)
            if resume_path and os.path.exists(resume_path):
                try:
                    trainer.load_checkpoint(resume_path)
                except Exception as e:
                    print(f"Resume from checkpoint failed: {e}")
            else:
                # Try loading best weights; fallback silently
                try:
                    trainer.load_best_model(-1, trainer.save_path, args.n_exp)
                except Exception:
                    pass
            # Prepare example input from val/test loader
            # 优先选择 test_loader 以便在小规模（例如 8 节点）数据上导出
            loader = data.get("test_loader") or data.get("val_loader")
            example_X = next(iter(loader))[0]
            example_X = trainer._check_device(example_X)
            
            # 如果模型的 num_nodes 与示例输入不一致，创建一个新的模型用于导出，避免维度冲突
            try:
                current_nodes = int(getattr(trainer.model, "num_nodes", int(example_X.shape[2])))
            except Exception:
                current_nodes = int(example_X.shape[2])
            actual_nodes = int(example_X.shape[2])
            model_to_export = trainer.model
            if current_nodes != actual_nodes:
                print(f"Adjusting model num_nodes from {current_nodes} to {actual_nodes} for export.")
                new_model = DeepPA(
                    dropout=args.dropout,
                    spatial_flag=args.spatial_flag,
                    temporal_flag=args.temporal_flag,
                    spatial_encoding=args.spatial_encoding,
                    temporal_encoding=args.temporal_encoding,
                    temporal_PE=args.temporal_PE,
                    GCO=args.GCO,
                    CLUSTER=args.CLUSTER,
                    n_hidden=args.n_hidden,
                    end_channels=args.n_hidden * 8,
                    n_blocks=args.n_blocks,
                    name=args.model_name,
                    dataset=args.dataset,
                    device=getattr(args, "device", None) or trainer.device,
                    num_nodes=actual_nodes,
                    seq_len=args.seq_len,
                    horizon=args.horizon,
                    input_dim=args.input_dim,
                    output_dim=args.output_dim,
                    GCO_Thre=args.GCO_Thre,
                    fe_transforms=(build_feature_transforms(getattr(args, "fe_transforms", "")) if getattr(args, "fe_enable", False) else []),
                    feature_schema=schema_obj,
                )
                model_to_export = new_model
                # 尝试加载 best 模型权重到新模型（维度不匹配将自动跳过）
                try:
                    best_path = os.path.join(args.log_dir, f"best_model_{args.n_exp}.pt")
                    if os.path.exists(best_path):
                        sd = torch.load(best_path, map_location=trainer.device)
                        model_to_export.load_state_dict(sd, strict=False)
                except Exception as e:
                    print(f"Load best weights into new model skipped: {e}")
            os.makedirs(args.log_dir, exist_ok=True)
            # TorchScript export
            if getattr(args, "export_torchscript", False):
                model_to_export.eval()
                ts_path = os.path.join(args.log_dir, f"deeppa_ts_{args.n_exp}.pt")
                traced = torch.jit.trace(model_to_export, example_X)
                traced.save(ts_path)
                print(f"TorchScript saved to {ts_path}")
                # optional freeze/optimize for inference -> save to separate file
                if getattr(args, "ts_freeze", False):
                    try:
                        ts_frozen_path = os.path.join(args.log_dir, f"deeppa_ts_frozen_{args.n_exp}.pt")
                        frozen = torch.jit.freeze(traced)
                        frozen.save(ts_frozen_path)
                        print(f"TorchScript (frozen) saved to {ts_frozen_path} (freeze-only)")
                    except Exception as e:
                        print(f"TorchScript freeze/optimize skipped: {e}")
                # Optional dynamic quantization (CPU)
                if getattr(args, "quantize_dynamic", False):
                    try:
                        q_model = torch.quantization.quantize_dynamic(model_to_export.cpu().eval(), {nn.Linear}, dtype=torch.qint8)
                        q_example = example_X.cpu()
                        q_traced = torch.jit.trace(q_model, q_example)
                        q_path = os.path.join(args.log_dir, f"deeppa_ts_quant_{args.n_exp}.pt")
                        q_traced.save(q_path)
                        print(f"Quantized TorchScript saved to {q_path}")
                        if getattr(args, "ts_freeze", False):
                            try:
                                q_frozen_path = os.path.join(args.log_dir, f"deeppa_ts_quant_frozen_{args.n_exp}.pt")
                                q_frozen = torch.jit.freeze(q_traced)
                                q_frozen.save(q_frozen_path)
                                print(f"Quantized TorchScript (frozen) saved to {q_frozen_path} (freeze-only)")
                            except Exception as e:
                                print(f"Quantized TorchScript freeze/optimize skipped: {e}")
                    except Exception as e:
                        print(f"Dynamic quantization export failed: {e}")
            # ONNX export
            if getattr(args, "export_onnx", False):
                try:
                    onnx_path = os.path.join(args.log_dir, f"deeppa_{args.n_exp}.onnx")
                    model_to_export.eval()
                    torch.onnx.export(
                        model_to_export,
                        example_X,
                        onnx_path,
                        export_params=True,
                        opset_version=17,
                        input_names=["X"],
                        output_names=["Y"],
                        dynamic_axes={"X": {0: "batch", 2: "nodes"}, "Y": {0: "batch", 2: "nodes"}},
                    )
                    print(f"ONNX saved to {onnx_path}")
                except Exception as e:
                    print(f"ONNX export failed: {e}")
            print("only_export=True: skipping train/test.")
            return
        except Exception as e:
            print(f"Export step error: {e}")
            return

    # Normal train/test flows
    if args.mode == "train":
        print("began training..")
        trainer.train()
        trainer.test(-1, "test")

    else:
        trainer.test(-1, args.mode)
        if args.save_preds:
            trainer.save_preds(-1)

    # Export inference artifacts after train/test if requested (no early return)
    try:
        if getattr(args, "export_torchscript", False) or getattr(args, "export_onnx", False):
            # Try loading weights from resume checkpoint first
            resume_path = getattr(args, "resume_from", None)
            if resume_path and os.path.exists(resume_path):
                try:
                    trainer.load_checkpoint(resume_path)
                except Exception as e:
                    print(f"Resume from checkpoint failed: {e}")
            else:
                # Try loading best weights; fallback silently
                try:
                    trainer.load_best_model(-1, trainer.save_path, args.n_exp)
                except Exception:
                    pass
            # Prepare example input from val/test loader
            # 优先选择 test_loader 以便在小规模（例如 8 节点）数据上导出
            loader = data.get("test_loader") or data.get("val_loader")
            example_X = next(iter(loader))[0]
            example_X = trainer._check_device(example_X)
            os.makedirs(args.log_dir, exist_ok=True)
            # TorchScript export
            if getattr(args, "export_torchscript", False):
                trainer.model.eval()
                ts_path = os.path.join(args.log_dir, f"deeppa_ts_{args.n_exp}.pt")
                traced = torch.jit.trace(trainer.model, example_X)
                traced.save(ts_path)
                print(f"TorchScript saved to {ts_path}")
                # optional freeze/optimize for inference -> save to separate file
                if getattr(args, "ts_freeze", False):
                    try:
                        ts_frozen_path = os.path.join(args.log_dir, f"deeppa_ts_frozen_{args.n_exp}.pt")
                        frozen = torch.jit.freeze(traced)
                        frozen.save(ts_frozen_path)
                        print(f"TorchScript (frozen) saved to {ts_frozen_path} (freeze-only)")
                    except Exception as e:
                        print(f"TorchScript freeze/optimize skipped: {e}")
                # Optional dynamic quantization (CPU)
                if getattr(args, "quantize_dynamic", False):
                    try:
                        q_model = torch.quantization.quantize_dynamic(trainer.model.cpu().eval(), {nn.Linear}, dtype=torch.qint8)
                        q_example = example_X.cpu()
                        q_traced = torch.jit.trace(q_model, q_example)
                        q_path = os.path.join(args.log_dir, f"deeppa_ts_quant_{args.n_exp}.pt")
                        q_traced.save(q_path)
                        print(f"Quantized TorchScript saved to {q_path}")
                        if getattr(args, "ts_freeze", False):
                            try:
                                q_frozen_path = os.path.join(args.log_dir, f"deeppa_ts_quant_frozen_{args.n_exp}.pt")
                                q_frozen = torch.jit.freeze(q_traced)
                                q_frozen.save(q_frozen_path)
                                print(f"Quantized TorchScript (frozen) saved to {q_frozen_path} (freeze-only)")
                            except Exception as e:
                                print(f"Quantized TorchScript freeze/optimize skipped: {e}")
                    except Exception as e:
                        print(f"Dynamic quantization export failed: {e}")
            # ONNX export
            if getattr(args, "export_onnx", False):
                try:
                    onnx_path = os.path.join(args.log_dir, f"deeppa_{args.n_exp}.onnx")
                    trainer.model.eval()
                    torch.onnx.export(
                        trainer.model,
                        example_X,
                        onnx_path,
                        export_params=True,
                        opset_version=17,
                        input_names=["X"],
                        output_names=["Y"],
                        dynamic_axes={"X": {0: "batch", 2: "nodes"}, "Y": {0: "batch", 2: "nodes"}},
                    )
                    print(f"ONNX saved to {onnx_path}")
                except Exception as e:
                    print(f"ONNX export failed: {e}")
    except Exception as e:
        print(f"Export step error: {e}")


if __name__ == "__main__":
    torch.set_num_threads(1)
    main()
