import os
import math
from typing import List, Tuple

import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt

from fusion_vrp.models import PointerNet
from fusion_vrp.core import inital_g


COORD_RANGE = 100.0  # assumed coordinate range consistent with graph generation


def normalize_coords(coords: torch.Tensor, coord_range: float = COORD_RANGE) -> torch.Tensor:
    return coords / coord_range


def closed_length_with_depot(coords_aug: torch.Tensor, tour: torch.Tensor, depot_index: int = 0) -> torch.Tensor:
    """Compute center->tour[0]->...->tour[-1]->center on normalized coords.
    coords_aug: [N+1,2] including depot at depot_index; tour: [N] indices in [0..N] (never includes depot).
    Returns scalar tensor.
    """
    c = coords_aug[depot_index]
    first = coords_aug[tour[0]]
    total = torch.norm(c - first)
    for i in range(1, tour.size(0)):
        total = total + torch.norm(coords_aug[tour[i - 1]] - coords_aug[tour[i]])
    last = coords_aug[tour[-1]]
    total = total + torch.norm(last - c)
    return total


def two_opt_single(coords_np: np.ndarray, tour: np.ndarray, max_iter: int = 200) -> Tuple[np.ndarray, float]:
    """Classic 2-opt on a cycle over customers only (no depot). Returns improved tour and cycle length (customers cycle)."""
    N = len(tour)
    diff_x = coords_np[:, 0][:, None] - coords_np[:, 0][None, :]
    diff_y = coords_np[:, 1][:, None] - coords_np[:, 1][None, :]
    dist_mat = np.sqrt(diff_x * diff_x + diff_y * diff_y)

    def length(t):
        return sum(dist_mat[t[i], t[(i + 1) % N]] for i in range(N))

    best = tour.copy()
    best_len = length(best)
    improved = True
    it = 0
    while improved and it < max_iter:
        improved = False
        it += 1
        for i in range(1, N - 2):
            for j in range(i + 1, N):
                if j - i == 1:
                    continue
                a, b = best[i - 1], best[i]
                c, d = best[j - 1], best[j % N]
                before = dist_mat[a, b] + dist_mat[c, d]
                after = dist_mat[a, c] + dist_mat[b, d]
                if after + 1e-9 < before:
                    best[i:j] = best[i:j][::-1]
                    best_len -= (before - after)
                    improved = True
    return best, best_len


def linear_temp(epoch: int, total: int, start: float, end: float) -> float:
    if total <= 1:
        return end
    return start + (end - start) * (epoch - 1) / (total - 1)


def pointer_train_on_groups(config, model: PointerNet, pos_encoder, criticDecoder):
    """Train PointerNet on VRP groups as closed tours from depot: center->customers->center.
    Uses REINFORCE with greedy baseline; coords normalized to [0,1].
    """
    device = config.device
    model.to(device).train()
    optim = torch.optim.Adam(model.parameters(), lr=getattr(config, 'pointer_lr', 1e-3))
    optim_posencoder = torch.optim.Adam(pos_encoder.parameters(), lr=getattr(config, 'pointer_pos_lr', 1e-3))
    optim_critic = torch.optim.Adam(criticDecoder.parameters(), lr=getattr(config, 'pointer_criticdecoder_lr', 1e-3))

    epochs = getattr(config, 'pointer_epochs', 100)
    temp_start = getattr(config, 'pointer_temp_start', 1.0)
    temp_end = getattr(config, 'pointer_temp_end', 0.3)
    mid_batch = getattr(config, 'pointer_middle_batch_size', 16)
    two_opt_reward = getattr(config, 'pointer_train_use_two_opt_reward', False)
    stop_num = getattr(config, 'stop_num', 3)
    center_xy = torch.tensor(config.closed_center, dtype=torch.float32, device=device)
    center_n = (center_xy / COORD_RANGE).to(device)

    loss_hist: List[float] = []
    len_hist: List[float] = []

    for epoch in range(1, epochs + 1):
        temp = linear_temp(epoch, epochs, temp_start, temp_end)
        losses = []
        lens = []
        for _ in range(mid_batch):
            g, groups = inital_g(config.num_nodes, pos_encoder,stop_num)
            for group in groups:
                node_ids = [int(math.log2(x)) for x in group]
                if not node_ids:
                    continue
                coords = torch.stack([g.ndata['pos'][i].float() for i in node_ids]).to(device)
                coords_n = normalize_coords(coords)  # [N,2]
                # NOTE: coords_n is normalized by COORD_RANGE (coords / COORD_RANGE).
                # closed_length_with_depot operates on coords_aug built from coords_n, so
                # the length returned by closed_length_with_depot is in normalized units (unitless in [0..sqrt(2)]),
                # therefore the code multiplies by COORD_RANGE when converting back to original coordinate units for logging.
                # compute coords with depot
                coords_aug = torch.cat([center_n.unsqueeze(0), coords_n], dim=0)  # [N+1,2], depot at 0

                # --- 1) run pointer model to sample route and compute true length (as before) ---
                tours, logp = model.decode_with_depot(coords_aug.unsqueeze(0), depot_index=0, greedy=False, temperature=temp)
                length = closed_length_with_depot(coords_aug, tours[0], depot_index=0)
                with torch.no_grad():
                    greedy_tour, _ = model.decode_with_depot(coords_aug.unsqueeze(0), depot_index=0, greedy=True, temperature=max(temp * 0.5, 0.2))
                    baseline = closed_length_with_depot(coords_aug, greedy_tour[0], depot_index=0)

                if two_opt_reward:
                    # 2-opt on customers only (heuristic); compare closed length with depot afterwards
                    coords_np = coords_n.detach().cpu().numpy()
                    tour_np = tours[0].detach().cpu().numpy() - 1  # shift to [0..N-1] for customers-only indexing
                    tour_np = np.clip(tour_np, 0, coords_np.shape[0] - 1)
                    imp_tour, _ = two_opt_single(coords_np, tour_np, max_iter=getattr(config, 'two_opt_max_iter', 200))
                    imp_tour_tensor = torch.tensor((imp_tour + 1).tolist(), dtype=torch.long, device=device)  # back to [1..N]
                    best_len = closed_length_with_depot(coords_aug, imp_tour_tensor, depot_index=0)
                    length_eff = best_len
                else:
                    length_eff = length

                # --- 2) Pointer REINFORCE update (only update pointer model parameters) ---
                # adv = (length_eff - baseline).detach().unsqueeze(0)
                # adv = (adv - adv.mean()) / adv.std(unbiased=False).clamp_min(1e-6)

                raw_adv = (length_eff - baseline).detach().unsqueeze(0)  # shape [1]
                if raw_adv.numel() > 1:
                    adv = (raw_adv - raw_adv.mean()) / raw_adv.std(unbiased=False).clamp_min(1e-6)
                else:
                    adv = raw_adv  # keep raw advantage to preserve signal


                loss = (adv * logp).mean()

                optim.zero_grad()
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), getattr(config, 'pointer_grad_clip', 1.0))
                optim.step()

                # start --- 3) Train criticDecoder supervised by true length ---
                # get position embeddings from pos_encoder (pos_encoder returns node-level embeddings)
                # coords_n is normalized coords [N,2]
                pos_emb = pos_encoder(coords_n)  # expected shape: [N, feat]

                # critic predicts cost from embeddings; detach embeddings when training critic
                criterion = nn.MSELoss()
                pred_cost = criticDecoder(pos_emb.detach())
                # match shapes: pred_cost -> scalar or (1,), length_eff is scalar tensor
                target = length_eff.detach().unsqueeze(0)
                loss_crit = criterion(pred_cost.view_as(target), target)
                optim_critic.zero_grad()
                loss_crit.backward()
                optim_critic.step()

                # --- 4) Update pos_encoder by minimizing critic's predicted cost (allow grad through critic) ---
                pred_cost_for_actor = criticDecoder(pos_emb)
                # optional scaling factor for stability
                pos_loss_coef = getattr(config, 'pointer_pos_loss_coef', 1.0)
                loss_pos = pos_loss_coef * pred_cost_for_actor.mean()
                optim_posencoder.zero_grad()
                # loss_pos.backward()
                # optim_posencoder.step()

                # logging-----end
                losses.append(loss.item())
                lens.append((length * COORD_RANGE).item())  # scale back for logging
        loss_hist.append(float(np.mean(losses)) if losses else 0.0)
        len_hist.append(float(np.mean(lens)) if lens else 0.0)
        print(f"[Pointer VRP] Epoch {epoch}/{epochs} | Temp={temp:.2f} | Loss={loss_hist[-1]:.4f} | ClosedLen(avg)={len_hist[-1]:.2f}")

    ckpt_dir = os.path.join(config.run_dir, 'checkpoints')
    os.makedirs(ckpt_dir, exist_ok=True)
    torch.save(model.state_dict(), os.path.join(ckpt_dir, 'pointer_tsp.pth'))
    return loss_hist, len_hist


def pointer_eval_groups(config, model: PointerNet, pos_encoder, candidates_k: int = 16, apply_two_opt: bool = True):
    """Evaluate pointer model on VRP groups with closed tours from depot.
    Returns graph, groups, and list of (route_original, closed_length_real_units).
    """
    device = config.device
    model.to(device).eval()
    center_xy = torch.tensor(config.closed_center, dtype=torch.float32, device=device)
    center_n = (center_xy / COORD_RANGE).to(device)

    g, groups = inital_g(config.num_nodes, pos_encoder)
    results = []
    for group in groups:
        node_ids = [int(math.log2(x)) for x in group]
        if not node_ids:
            continue
        coords = torch.stack([g.ndata['pos'][i].float() for i in node_ids]).to(device)
        coords_n = normalize_coords(coords)
        coords_aug = torch.cat([center_n.unsqueeze(0), coords_n], dim=0)
        best_len = float('inf')
        best_tour = None
        with torch.no_grad():
            for attempt in range(max(1, int(candidates_k))):
                greedy = (attempt == 0)
                tours, _ = model.decode_with_depot(coords_aug.unsqueeze(0), depot_index=0, greedy=greedy, temperature=getattr(config, 'pointer_temp_end', 0.3))
                length = closed_length_with_depot(coords_aug, tours[0], depot_index=0).item()
                if length < best_len:
                    best_len = length
                    best_tour = tours[0].cpu().numpy()
        final_len = best_len
        final_tour = best_tour
        if apply_two_opt and getattr(config, 'two_opt_enabled', True):
            coords_np = coords_n.detach().cpu().numpy()
            tour_np = (best_tour - 1).astype(int)
            tour_np = np.clip(tour_np, 0, coords_np.shape[0] - 1)
            improved_tour, _ = two_opt_single(coords_np, tour_np, getattr(config, 'two_opt_max_iter', 200))
            imp_tour_tensor = torch.tensor((improved_tour + 1).tolist(), dtype=torch.long, device=device)
            imp_len = closed_length_with_depot(coords_aug, imp_tour_tensor, depot_index=0).item()
            if imp_len < final_len:
                final_len = imp_len
                final_tour = (improved_tour + 1).tolist()
        route_original = [node_ids[i - 1] for i in final_tour]
        # compute total goods for this route (sum of g.ndata['goods'] for nodes in route)
        try:
            goods_sum = int(g.ndata['goods'][route_original].sum().item())
        except Exception:
            # fallback: compute by iterating
            goods_sum = int(sum(float(g.ndata['goods'][nid].item()) for nid in route_original))
        results.append((route_original, final_len * COORD_RANGE, goods_sum))
    return g, groups, results


def pointer_validate_and_plot(config, model: PointerNet, pos_encoder, candidates_k: int = 16, apply_two_opt: bool = True):
    g, groups, results = pointer_eval_groups(config, model, pos_encoder, candidates_k, apply_two_opt)
    # Combined visualization (plot each group: center -> route -> center)
    cx, cy = config.closed_center
    fig, ax = plt.subplots(figsize=(8, 8))
    # plot all nodes gray
    xs_all = [float(g.ndata['pos'][i][0].item()) for i in range(g.num_nodes())]
    ys_all = [float(g.ndata['pos'][i][1].item()) for i in range(g.num_nodes())]
    ax.scatter(xs_all, ys_all, c='lightgray', s=30)
    ax.scatter([cx], [cy], c='red', marker='*', s=120, label='Depot')
    for gi, (route, plen, goods_sum) in enumerate(results):
        route_x = [float(g.ndata['pos'][nid][0].item()) for nid in route]
        route_y = [float(g.ndata['pos'][nid][1].item()) for nid in route]
        ax.plot([cx] + route_x + [cx], [cy] + route_y + [cy], '-o', label=f'G{gi} goods={goods_sum} len={plen:.1f}')
    ax.set_title('VRP Closed Tours by Pointer (Center -> Customers -> Center)')
    ax.legend()
    out_path = os.path.join(config.run_dir, 'pointerNet', 'pointer_groups.png')
    os.makedirs(os.path.dirname(out_path), exist_ok=True)
    fig.savefig(out_path)
    plt.close(fig)
    return out_path, results,g
