import os
import math
import logging
import json
import torch
from matplotlib import cm
import matplotlib.pyplot as plt
from fusion_vrp.config import TrainingConfig
from fusion_vrp.models import PositionEncoder, ActorDecoder, CriticEncoder
from fusion_vrp.core import inital_g
from fusion_vrp.training.pipeline import compute_closed_path_length  # import closed tour length


def _load_run_config(save_dir):
    """Try to load config.json from the parent directory of save_dir.
    Returns a dict or None if not found/failed.
    """
    try:
        run_dir = os.path.dirname(save_dir)
        cfg_path = os.path.join(run_dir, 'config.json')
        if os.path.exists(cfg_path):
            with open(cfg_path, 'r', encoding='utf-8') as f:
                return json.load(f)
    except Exception as e:
        logging.warning(f"Failed to load run config: {e}")
    return None


def _resolve_checkpoint_dir(initial_save_dir: str, cfg: dict) -> str:
    """Prefer the provided save_dir; if it doesn't contain checkpoints and cfg has run_dir,
    fall back to cfg['run_dir']/checkpoints. Returns the resolved directory.
    """
    # Quick check for presence of any ckpt file
    def has_ckpts(d):
        return (
            os.path.exists(os.path.join(d, 'pos_encoder.pth')) or
            os.path.exists(os.path.join(d, 'actor.pth')) or
            os.path.exists(os.path.join(d, 'critic.pth'))
        )

    if initial_save_dir and has_ckpts(initial_save_dir):
        return initial_save_dir

    if cfg and isinstance(cfg.get('run_dir'), str):
        candidate = os.path.join(cfg['run_dir'], 'checkpoints')
        if has_ckpts(candidate):
            logging.info(f"Checkpoints not found in {initial_save_dir}; using {candidate} from run config")
            return candidate

    return initial_save_dir


def validate_vrp(save_dir,stop_n , num_nodes=20, visualize_each_group=False, center_pos=(50.0, 50.0)):
    """Validation aligned with CLOSED tour training logic.
    - Uses center embedding (as depot) at index 0 (never selected)
    - Greedy decoding chooses N customer nodes
    - Computes closed tour length: center -> path -> center
    - Visualization draws full closed loop
    """
    # Try to match model hyperparameters with training by reading run config
    cfg = _load_run_config(save_dir)
    # config = TrainingConfig()
    # stop_num
    device = torch.device('cpu')  # keep validation on CPU
    pos_kwargs = dict(input_dim=2, d_model=64, nhead=4, num_layers=4, max_nodes=100)
    actor_kwargs = dict(input_dim=64, hidden_dim=128)
    critic_kwargs = dict(input_dim=64, hidden_dim=128)

    if cfg is not None:
        pos_kwargs.update(
            d_model=cfg.get('pos_d_model', pos_kwargs['d_model']),
            nhead=cfg.get('pos_nhead', pos_kwargs['nhead']),
            num_layers=cfg.get('pos_num_layers', pos_kwargs['num_layers']),
            max_nodes=cfg.get('pos_max_nodes', pos_kwargs['max_nodes']),
        )
        actor_kwargs.update(
            input_dim=cfg.get('actor_input_dim', actor_kwargs['input_dim']),
            hidden_dim=cfg.get('actor_hidden_dim', actor_kwargs['hidden_dim']),
        )
        critic_kwargs.update(
            input_dim=cfg.get('critic_input_dim', critic_kwargs['input_dim']),
            hidden_dim=cfg.get('critic_hidden_dim', critic_kwargs['hidden_dim']),
        )
        # If not explicitly set by caller, use the training center for consistency
        if center_pos == (50.0, 50.0) and 'closed_center' in cfg:
            center_pos = tuple(cfg['closed_center'])
        # Match validation graph size to training if caller left default
        if num_nodes == 20 and 'num_nodes' in cfg:
            num_nodes = int(cfg['num_nodes'])

    # Resolve the actual checkpoints directory (may fall back to run_dir/checkpoints)
    save_dir = _resolve_checkpoint_dir(save_dir, cfg)
    pos_path = os.path.join(save_dir, 'pos_encoder.pth')
    actor_path = os.path.join(save_dir, 'actor.pth')
    critic_path = os.path.join(save_dir, 'critic.pth')

    # Instantiate models with matched hyperparameters
    pos_encoder = PositionEncoder(
        input_dim=2,
        d_model=pos_kwargs['d_model'],
        nhead=pos_kwargs['nhead'],
        num_layers=pos_kwargs['num_layers'],
        max_nodes=pos_kwargs['max_nodes'],
    ).to(device)
    actor = ActorDecoder(actor_kwargs['input_dim'], actor_kwargs['hidden_dim']).to(device)
    critic = CriticEncoder(critic_kwargs['input_dim'], critic_kwargs['hidden_dim']).to(device)

    # Load weights from resolved checkpoint directory
    if os.path.exists(pos_path):
        pos_encoder.load_state_dict(torch.load(pos_path, map_location=device))
    else:
        logging.warning(f"pos_encoder weights not found at {pos_path}; using random weights")
    if os.path.exists(actor_path):
        actor.load_state_dict(torch.load(actor_path, map_location=device))
    else:
        logging.warning(f"actor weights not found at {actor_path}; using random weights")
    if os.path.exists(critic_path):
        critic.load_state_dict(torch.load(critic_path, map_location=device))
    else:
        logging.warning(f"critic weights not found at {critic_path}; using random weights")

    g, groups_indices = inital_g(num_nodes, pos_encoder,stop_n)

    all_routes = []  # list of (route_original_indices, closed_length)
    node_positions = [tuple(g.ndata['pos'][nid].cpu().numpy()) for nid in range(g.num_nodes())]

    center_tensor = torch.tensor(center_pos, dtype=torch.float32, device=device)
    center_x, center_y = center_pos

    for gi, group in enumerate(groups_indices):
        node_idxs = [int(math.log2(x)) for x in group]
        if not node_idxs:
            continue
        group_nodes = [{'pos': g.ndata['pos'][idx]} for idx in node_idxs]
        coords = torch.stack([n['pos'] for n in group_nodes]).to(device)  # [N,2]

        with torch.no_grad():
            node_emb = pos_encoder(coords)
            center_emb = pos_encoder(center_tensor.unsqueeze(0))  # [1,D]
            enc = torch.cat([center_emb, node_emb], dim=0)  # [N+1,D]

        N = node_emb.size(0)
        selected_mask = torch.zeros(N+1, device=device)
        selected_mask[0] = float('-inf')  # depot not selectable
        path_offset = []  # store indices >=1
        for _ in range(N):
            with torch.no_grad():
                probs = actor(enc, selected_mask)
            action = int(torch.argmax(probs).item())
            if action == 0:
                # fallback to second best
                topk = torch.topk(probs, k=2).indices.tolist()
                action = topk[1] if topk[0] == 0 else topk[0]
            path_offset.append(action)
            selected_mask[action] = float('-inf')

        path_local = [p - 1 for p in path_offset]  # indices into group_nodes
        route_original = [node_idxs[i] for i in path_local]
        closed_len = compute_closed_path_length(group_nodes, path_local, center_pos=center_tensor)
        all_routes.append((route_original, closed_len))

        if visualize_each_group:
            fig, ax = plt.subplots()
            xs = [float(g.ndata['pos'][idx][0].item()) for idx in node_idxs]
            ys = [float(g.ndata['pos'][idx][1].item()) for idx in node_idxs]
            ax.scatter(xs, ys, c='gray')
            route_x = [g.ndata['pos'][idx][0].item() for idx in route_original]
            route_y = [g.ndata['pos'][idx][1].item() for idx in route_original]
            ax.scatter([center_x], [center_y], c='red', marker='*', s=120, label='Center')
            ax.plot([center_x] + route_x + [center_x], [center_y] + route_y + [center_y], '-o', label=f'Group {gi} closed={closed_len:.2f}')
            for i, idx in enumerate(node_idxs):
                ax.annotate(str(idx), (xs[i], ys[i]))
            ax.set_title(f'Group {gi} closed length {closed_len:.2f}')
            ax.legend()
            fig.savefig(os.path.join(save_dir, f'group_{gi}_closed_route.png'))
            plt.close(fig)

    # Combined closed-tour visualization
    fig, ax = plt.subplots(figsize=(8, 8))
    xs_all = [p[0] for p in node_positions]
    ys_all = [p[1] for p in node_positions]
    ax.scatter(xs_all, ys_all, c='lightgray', s=30)
    ax.scatter([center_x], [center_y], c='red', marker='*', s=120, label='Center')
    cmap = cm.get_cmap('tab10')
    for i, (route, plen) in enumerate(all_routes):
        color = cmap(i % 10)
        route_x = [g.ndata['pos'][idx][0].item() for idx in route]
        route_y = [g.ndata['pos'][idx][1].item() for idx in route]
        ax.plot([center_x] + route_x + [center_x], [center_y] + route_y + [center_y], '-o', color=color, label=f'G{i} {plen:.1f}')
        for idx in route:
            ax.annotate(str(idx), (g.ndata['pos'][idx][0].item(), g.ndata['pos'][idx][1].item()))
    ax.legend()
    ax.set_title('Closed Tours (Validation)')
    combined_path = os.path.join(save_dir, 'test_routes_closed.png')
    fig.savefig(combined_path)
    plt.close(fig)
    logging.info(f'Closed tours visualization saved to {combined_path}')
