import os
from dataclasses import dataclass, asdict
from datetime import datetime
import json
import torch


@dataclass
class TrainingConfig:
    # Model / encoder params
    # 节点维度
    pos_input_dim: int = 2
    pos_d_model: int = 64
    pos_nhead: int = 8
    pos_num_layers: int = 12
    # 节点坐标范围
    pos_max_nodes: int = 100

    actor_input_dim: int = 64
    actor_hidden_dim: int = 128
    critic_input_dim: int = 64
    critic_hidden_dim: int = 128
    stop_num: int = 3  # 1 means no stop node; otherwise index of stop node in fused graph

    # Training hyperparameters
    # pointer   hierarchical pipeline
    # training_mode: str = 'hierarchical'  # 'pointer' or 'a2c'
    # training_mode: str = 'Transformer'
    training_mode: str = 'pointer'
    num_nodes: int = 20
    epochs: int = 50
    lr_actor: float = 3e-4
    lr_critic: float = 3e-4
    lr_pos: float = 1e-4
    # 略降熵权重，促进更快收敛（仍保留一定探索）
    entropy_coef: float = 5e-4

    # Hierarchical training switch & hyperparameters
    # use_hierarchical: bool = True  # set True to use closed-tour + geometry loss hierarchical trainer
    # 提升外层迭代次数，进行更充分训练
    outer_epochs: int = 5          # number of outer iterations when hierarchical (smoke test -> small)
    # 扩大中间批大小，提升每个 outer 的数据量与梯度稳定性（注意显存/内存）
    middle_batch_size: int = 2      # number of graphs per outer iteration (smoke test -> small)
    # 适度增加内层更新步数，提高同一组上的学习强度
    inner_steps: int = 1             # decoder update steps per group (smoke test -> 1)
    # 减小几何对齐权重，避免过度牵制 RL 优化
    geom_loss_weight: float = 0.5    # weight for geometry alignment loss on encoder
    rl_loss_weight: float = 1.0      # weight for decoder policy gradient loss (inner loop)
    max_groups_per_graph: int = 0    # 0 means use all groups; otherwise limit per graph
    normalize_group_length: bool = True  # normalize route length by group size in hierarchical mode
    closed_center: tuple = (50.0, 50.0)  # depot / center coordinate for closed tours
    # 新增：融合阶段的距离阈值（越大越容易继续合并；1.0 保持原默认行为）
    dist_threshold: float = 0.1

    # Pointer-Net TSP on groups (demo-style)
    use_pointer_for_tsp: bool = False  # disable pointer net branch for smoke hierarchical test
    pointer_lr: float = 1e-3
    pointer_pos_lr: float = 1e-3
    pointer_criticdecoder_lr: int = 1e-3
    pointer_epochs: int = 5
    pointer_middle_batch_size: int = 16
    pointer_temp_start: float = 1.0
    pointer_temp_end: float = 0.3
    pointer_grad_clip: float = 1.0
    pointer_train_use_two_opt_reward: bool = False
    two_opt_enabled: bool = False
    two_opt_max_iter: int = 200
    pointer_eval_attempts: int = 16

    # Exact TSP solver: for small groups (n <= exact_tsp_threshold) use Held-Karp exact solver
    # during evaluation. Use exact solver as optional baseline during training if enabled.
    exact_tsp_threshold: int = 10
    use_exact_baseline_during_training: bool = True

    # Supervised pretraining (behavior cloning) for Transformer decoder
    # Use exact TSP solutions as targets to teach the decoder to output optimal tours for small groups.
    transformer_supervised_pretrain: bool = True
    transformer_supervised_epochs: int = 30
    transformer_supervised_batch_size: int = 16
    transformer_supervised_lr: float = 1e-3
    transformer_supervised_max_group_size: int = 10

    # Imitation loss for mixed training (RL + imitation)
    transformer_imitation_weight_start: float = 1.0  # weight for imitation loss at start of RL training
    transformer_imitation_weight_end: float = 0.1    # weight at end
    transformer_imitation_max_group_size: int = 10   # max group size for imitation loss

    # Device control
    use_gpu: bool = False  # for smoke test force CPU to avoid CUDA issues
    device: str = 'cpu'   # resolved in prepare_run()

    # Output / run management
    base_output_dir: str = None  # if None will default relative to this file
    run_dir: str = None  # populated after prepare_run()
    # seed: int = 4321

    def prepare_run(self):
        if self.base_output_dir is None:
            # place runs/ at fusion_vrp/ level
            self.base_output_dir = os.path.join(os.path.dirname(__file__), 'runs')
        os.makedirs(self.base_output_dir, exist_ok=True)
        # resolve device
        self.device = 'cuda' if (self.use_gpu and torch.cuda.is_available()) else 'cpu'
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        self.run_dir = os.path.join(self.base_output_dir, timestamp)
        os.makedirs(self.run_dir, exist_ok=True)
        # Save config snapshot
        with open(os.path.join(self.run_dir, 'config.json'), 'w', encoding='utf-8') as f:
            json.dump(asdict(self), f, ensure_ascii=False, indent=2)
        return self.run_dir

    def save_metrics(self, losses, path_lengths):
        if not self.run_dir:
            raise RuntimeError('Run directory not initialized. Call prepare_run() first.')
        metrics = {
            'losses': losses,
            'epoch_avg_path_lengths': path_lengths,
            'final_loss': losses[-1] if losses else None,
            'best_loss': min(losses) if losses else None,
            'best_path_length': min(path_lengths) if path_lengths else None,
            'device': self.device,
        }

        with open(os.path.join(self.run_dir, 'metrics.json'), 'w', encoding='utf-8') as f:
            json.dump(metrics, f, ensure_ascii=False, indent=2)
