import os
import math
from typing import List, Tuple

import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt

from fusion_vrp.models import TransformerTSP
from fusion_vrp.core import inital_g
# from fusion_vrp.utils.exact_tsp import exact_tsp
import torch.optim as optim
from torch.nn import CrossEntropyLoss

COORD_RANGE = 100.0


def normalize_coords(coords: torch.Tensor, coord_range: float = COORD_RANGE) -> torch.Tensor:
    return coords / coord_range


def closed_length_with_depot(coords_aug: torch.Tensor, tour: torch.Tensor, depot_index: int = 0) -> torch.Tensor:
    c = coords_aug[depot_index]
    first = coords_aug[tour[0]]
    total = torch.norm(c - first)
    for i in range(1, tour.size(0)):
        total = total + torch.norm(coords_aug[tour[i - 1]] - coords_aug[tour[i]])
    last = coords_aug[tour[-1]]
    total = total + torch.norm(last - c)
    return total


def two_opt_single(coords_np: np.ndarray, tour: np.ndarray, max_iter: int = 200) -> Tuple[np.ndarray, float]:
    N = len(tour)
    diff_x = coords_np[:, 0][:, None] - coords_np[:, 0][None, :]
    diff_y = coords_np[:, 1][:, None] - coords_np[:, 1][None, :]
    dist_mat = np.sqrt(diff_x * diff_x + diff_y * diff_y)

    def length(t):
        return sum(dist_mat[t[i], t[(i + 1) % N]] for i in range(N))

    best = tour.copy()
    best_len = length(best)
    improved = True
    it = 0
    while improved and it < max_iter:
        improved = False
        it += 1
        for i in range(1, N - 2):
            for j in range(i + 1, N):
                if j - i == 1:
                    continue
                a, b = best[i - 1], best[i]
                c, d = best[j - 1], best[j % N]
                before = dist_mat[a, b] + dist_mat[c, d]
                after = dist_mat[a, c] + dist_mat[b, d]
                if after + 1e-9 < before:
                    best[i:j] = best[i:j][::-1]
                    best_len -= (before - after)
                    improved = True
    return best, best_len


def linear_temp(epoch: int, total: int, start: float, end: float) -> float:
    if total <= 1:
        return end
    return start + (end - start) * (epoch - 1) / (total - 1)


def transformer_train_on_groups(config, model: TransformerTSP, bert, criticDecoder):
    """Train TransformerTSP on VRP groups (isolated trainer separate from pointer trainer)."""
    device = config.device
    model.to(device).train()
    # Optionally run supervised pretraining (behavior cloning) to teach model to emit exact tours
    if getattr(config, 'transformer_supervised_pretrain', False):
        transformer_supervised_pretrain(config, model, bert)
        # ensure model still on device and in train mode
        model.to(device).train()
    optim = torch.optim.Adam(model.parameters(), lr=getattr(config, 'transformer_lr', 1e-3))
    optim_posencoder = torch.optim.Adam(bert.parameters(), lr=getattr(config, 'transformer_pos_lr', 1e-3))
    optim_critic = torch.optim.Adam(criticDecoder.parameters(), lr=getattr(config, 'transformer_criticdecoder_lr', 1e-3))

    epochs = getattr(config, 'epochs', 100)
    temp_start = getattr(config, 'transformer_temp_start', 1.0)
    temp_end = getattr(config, 'transformer_temp_end', 0.3)
    mid_batch = getattr(config, 'transformer_middle_batch_size', 16)
    two_opt_reward = getattr(config, 'transformer_train_use_two_opt_reward', False)
    stop_num = getattr(config, 'stop_num', 3)
    center_xy = torch.tensor(config.closed_center, dtype=torch.float32, device=device)
    center_n = (center_xy / COORD_RANGE).to(device)

    # Imitation loss parameters
    imitation_weight_start = getattr(config, 'transformer_imitation_weight_start', 1.0)
    imitation_weight_end = getattr(config, 'transformer_imitation_weight_end', 0.1)
    imitation_max_group_size = getattr(config, 'transformer_imitation_max_group_size', 10)
    loss_fn = CrossEntropyLoss()

    loss_hist: List[float] = []
    len_hist: List[float] = []

    for epoch in range(1, epochs + 1):
        temp = linear_temp(epoch, epochs, temp_start, temp_end)
        imitation_weight = linear_temp(epoch, epochs, imitation_weight_start, imitation_weight_end)
        losses = []
        lens = []
        for _ in range(mid_batch):
            g, groups = inital_g(config.num_nodes, bert, stop_num)
            for group in groups:
                node_ids = [int(math.log2(x)) for x in group]
                if not node_ids:
                    continue
                coords = torch.stack([g.ndata['pos'][i].float() for i in node_ids]).to(device)
                coords_n = normalize_coords(coords)  # [N,2]
                coords_aug = torch.cat([center_n.unsqueeze(0), coords_n], dim=0)  # [N+1,2]

                tours, logp = model.decode_with_depot(coords_aug.unsqueeze(0), depot_index=0, greedy=False, temperature=temp)
                length = closed_length_with_depot(coords_aug, tours[0], depot_index=0)
                # Optionally use exact TSP as baseline for small groups to provide a stronger signal
                exact_threshold = int(getattr(config, 'exact_tsp_threshold', 10))
                use_exact_baseline = bool(getattr(config, 'use_exact_baseline_during_training', True))
                baseline = None
                with torch.no_grad():
                    if use_exact_baseline and coords_n.size(0) <= exact_threshold:
                        try:
                            coords_np = coords_n.detach().cpu().numpy()
                            depot_np = center_n.detach().cpu().numpy()
                            _, exact_cost = exact_tsp(coords_np, depot=depot_np)
                            baseline = torch.tensor(float(exact_cost), dtype=length.dtype, device=device)
                        except Exception:
                            baseline = None
                    if baseline is None:
                        greedy_tour, _ = model.decode_with_depot(coords_aug.unsqueeze(0), depot_index=0, greedy=True, temperature=max(temp * 0.5, 0.2))
                        baseline = closed_length_with_depot(coords_aug, greedy_tour[0], depot_index=0)

                if two_opt_reward:
                    coords_np = coords_n.detach().cpu().numpy()
                    tour_np = tours[0].detach().cpu().numpy() - 1
                    tour_np = np.clip(tour_np, 0, coords_np.shape[0] - 1)
                    imp_tour, _ = two_opt_single(coords_np, tour_np, max_iter=getattr(config, 'two_opt_max_iter', 200))
                    imp_tour_tensor = torch.tensor((imp_tour + 1).tolist(), dtype=torch.long, device=device)
                    best_len = closed_length_with_depot(coords_aug, imp_tour_tensor, depot_index=0)
                    length_eff = best_len
                else:
                    length_eff = length

                raw_adv = (length_eff - baseline).detach().unsqueeze(0)
                if raw_adv.numel() > 1:
                    adv = (raw_adv - raw_adv.mean()) / raw_adv.std(unbiased=False).clamp_min(1e-6)
                else:
                    adv = raw_adv

                rl_loss = (adv * logp).mean()

                # Compute imitation loss for small groups
                imitation_loss = torch.tensor(0.0, device=device)
                if coords_n.size(0) <= imitation_max_group_size:
                    try:
                        coords_np = coords_n.detach().cpu().numpy()
                        depot_np = center_n.detach().cpu().numpy()
                        perm, _ = exact_tsp(coords_np, depot=depot_np)
                        num_cust = coords_n.size(0)
                        # Prepare encoding
                        enc = model.forward(coords_aug.unsqueeze(0).to(device))  # [1, L, d]
                        enc_cust = enc[:, 1:, :]
                        # Initial query: depot embedding
                        query = enc[:, 0, :]
                        selected_mask = torch.zeros((1, num_cust), dtype=torch.bool, device=device)
                        per_step_losses = []
                        for t in range(num_cust):
                            scores = model._score(enc_cust, query, mask=selected_mask.bool(), temperature=1.0)  # [1, num_cust]
                            target_idx = torch.tensor([int(perm[t])], dtype=torch.long, device=device)
                            step_loss = loss_fn(scores, target_idx)
                            per_step_losses.append(step_loss)
                            # Teacher forcing: update query to embedding of true chosen customer
                            chosen_idx = int(perm[t])
                            one_hot = torch.zeros_like(selected_mask)
                            one_hot[0, chosen_idx] = True
                            selected_mask = selected_mask | one_hot
                            query = enc_cust.gather(1, torch.tensor([[chosen_idx]], device=device).view(1,1,1).expand(1,1,enc_cust.size(-1))).squeeze(1)
                        imitation_loss = torch.stack(per_step_losses).sum()
                    except Exception:
                        imitation_loss = torch.tensor(0.0, device=device)

                total_loss = rl_loss + imitation_weight * imitation_loss

                optim.zero_grad()
                total_loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), getattr(config, 'transformer_grad_clip', 1.0))
                optim.step()

                pos_emb = bert(coords_n)
                criterion = nn.MSELoss()
                pred_cost = criticDecoder(pos_emb.detach())
                target = length_eff.detach().unsqueeze(0)
                loss_crit = criterion(pred_cost.view_as(target), target)
                optim_critic.zero_grad()
                loss_crit.backward()
                optim_critic.step()

                pred_cost_for_actor = criticDecoder(pos_emb)
                pos_loss_coef = getattr(config, 'transformer_pos_loss_coef', 1.0)
                loss_pos = pos_loss_coef * pred_cost_for_actor.mean()
                optim_posencoder.zero_grad()

                losses.append(total_loss.item())
                lens.append((length * COORD_RANGE).item())
        loss_hist.append(float(np.mean(losses)) if losses else 0.0)
        len_hist.append(float(np.mean(lens)) if lens else 0.0)
        print(f"[Transformer VRP] Epoch {epoch}/{epochs} | Temp={temp:.2f} | ImitWeight={imitation_weight:.2f} | Loss={loss_hist[-1]:.4f} | ClosedLen(avg)={len_hist[-1]:.2f}")

    ckpt_dir = os.path.join(config.run_dir, 'checkpoints')
    os.makedirs(ckpt_dir, exist_ok=True)
    torch.save(model.state_dict(), os.path.join(ckpt_dir, 'transformer_tsp.pth'))
    return loss_hist, len_hist


def transformer_eval_groups(config, model: TransformerTSP, bert, candidates_k: int = 16, apply_two_opt: bool = True):
    device = config.device
    model.to(device).eval()
    center_xy = torch.tensor(config.closed_center, dtype=torch.float32, device=device)
    center_n = (center_xy / COORD_RANGE).to(device)

    g, groups = inital_g(config.num_nodes, bert)
    results = []
    for group in groups:
        node_ids = [int(math.log2(x)) for x in group]
        if not node_ids:
            continue
        coords = torch.stack([g.ndata['pos'][i].float() for i in node_ids]).to(device)
        coords_n = normalize_coords(coords)
        coords_aug = torch.cat([center_n.unsqueeze(0), coords_n], dim=0)
        # Try exact solver for small groups when enabled
        exact_threshold = int(getattr(config, 'exact_tsp_threshold', 10))
        n_customers = coords_n.size(0)
        if n_customers <= exact_threshold:
            try:
                coords_np = coords_n.detach().cpu().numpy()
                depot_np = center_n.detach().cpu().numpy()
                perm, cost = exact_tsp(coords_np, depot=depot_np)
                # perm: customer indices 0..n-1 -> we convert to coords_aug indices by adding 1
                final_tour = [int(p + 1) for p in perm]
                final_len = float(cost)
                # skip candidate sampling and two-opt since this is exact
                route_original = [node_ids[i - 1] for i in final_tour]
                try:
                    goods_sum = int(g.ndata['goods'][route_original].sum().item())
                except Exception:
                    goods_sum = int(sum(float(g.ndata['goods'][nid].item()) for nid in route_original))
                results.append((route_original, final_len * COORD_RANGE, goods_sum))
                continue
            except Exception:
                # fall back to approximate decoder if exact solver fails unexpectedly
                pass
        best_len = float('inf')
        best_tour = None
        with torch.no_grad():
            for attempt in range(max(1, int(candidates_k))):
                greedy = (attempt == 0)
                tours, _ = model.decode_with_depot(coords_aug.unsqueeze(0), depot_index=0, greedy=greedy, temperature=getattr(config, 'transformer_temp_end', 0.3))
                length = closed_length_with_depot(coords_aug, tours[0], depot_index=0).item()
                if length < best_len:
                    best_len = length
                    best_tour = tours[0].cpu().numpy()
        final_len = best_len
        final_tour = best_tour
        if apply_two_opt and getattr(config, 'two_opt_enabled', True):
            coords_np = coords_n.detach().cpu().numpy()
            tour_np = (best_tour - 1).astype(int)
            tour_np = np.clip(tour_np, 0, coords_np.shape[0] - 1)
            improved_tour, _ = two_opt_single(coords_np, tour_np, getattr(config, 'two_opt_max_iter', 200))
            imp_tour_tensor = torch.tensor((improved_tour + 1).tolist(), dtype=torch.long, device=device)
            imp_len = closed_length_with_depot(coords_aug, imp_tour_tensor, depot_index=0).item()
            if imp_len < final_len:
                final_len = imp_len
                final_tour = (improved_tour + 1).tolist()
        route_original = [node_ids[i - 1] for i in final_tour]
        try:
            goods_sum = int(g.ndata['goods'][route_original].sum().item())
        except Exception:
            goods_sum = int(sum(float(g.ndata['goods'][nid].item()) for nid in route_original))
        results.append((route_original, final_len * COORD_RANGE, goods_sum))
    return g, groups, results


def transformer_validate_and_plot(config, model: TransformerTSP, bert, candidates_k: int = 16, apply_two_opt: bool = True):
    g, groups, results = transformer_eval_groups(config, model, bert, candidates_k, apply_two_opt)
    cx, cy = config.closed_center
    fig, ax = plt.subplots(figsize=(8, 8))
    xs_all = [float(g.ndata['pos'][i][0].item()) for i in range(g.num_nodes())]
    ys_all = [float(g.ndata['pos'][i][1].item()) for i in range(g.num_nodes())]
    ax.scatter(xs_all, ys_all, c='lightgray', s=30)
    ax.scatter([cx], [cy], c='red', marker='*', s=120, label='Depot')
    for gi, (route, plen, goods_sum) in enumerate(results):
        route_x = [float(g.ndata['pos'][nid][0].item()) for nid in route]
        route_y = [float(g.ndata['pos'][nid][1].item()) for nid in route]
        ax.plot([cx] + route_x + [cx], [cy] + route_y + [cy], '-o', label=f'G{gi} goods={goods_sum} len={plen:.1f}')
    ax.set_title('VRP Closed Tours by Transformer (Center -> Customers -> Center)')
    ax.legend()
    out_path = os.path.join(config.run_dir, 'transformer', 'transformer_groups.png')
    os.makedirs(os.path.dirname(out_path), exist_ok=True)
    fig.savefig(out_path)
    plt.close(fig)
    return out_path, results, g


def transformer_supervised_pretrain(config, model: TransformerTSP, bert):
    """Behavior cloning pretraining for Transformer decoder using exact TSP targets.

    This function samples groups from `inital_g`, computes exact TSP permutations for
    small groups (<= transformer_supervised_max_group_size), and trains the transformer's
    decoder with teacher forcing using CrossEntropy loss on per-step scores.
    """
    device = config.device
    model.to(device).train()
    optim_bc = optim.Adam(model.parameters(), lr=getattr(config, 'transformer_supervised_lr', 1e-3))
    loss_fn = CrossEntropyLoss()
    epochs = int(getattr(config, 'transformer_supervised_epochs', 30))
    batch_size = int(getattr(config, 'transformer_supervised_batch_size', 16))
    max_group_size = int(getattr(config, 'transformer_supervised_max_group_size', 10))

    center_xy = torch.tensor(config.closed_center, dtype=torch.float32, device=device)
    center_n = (center_xy / COORD_RANGE).to(device)

    for epoch in range(1, max(1, epochs) + 1):
        batch_losses = []
        samples = 0
        # accumulate batch_size examples, each may come from different graphs/groups
        while samples < batch_size:
            g, groups = inital_g(config.num_nodes, bert)
            for group in groups:
                node_ids = [int(math.log2(x)) for x in group]
                if not node_ids:
                    continue
                if len(node_ids) > max_group_size:
                    continue
                coords = torch.stack([g.ndata['pos'][i].float() for i in node_ids]).to(device)
                coords_n = normalize_coords(coords)
                coords_aug = torch.cat([center_n.unsqueeze(0), coords_n], dim=0)  # [N+1,2]

                # compute exact permutation on normalized coords
                coords_np = coords_n.detach().cpu().numpy()
                depot_np = center_n.detach().cpu().numpy()
                try:
                    perm, _ = exact_tsp(coords_np, depot=depot_np)
                except Exception:
                    continue
                num_cust = coords_n.size(0)
                # prepare encoding
                enc = model.forward(coords_aug.unsqueeze(0).to(device))  # [1, L, d]
                enc_cust = enc[:, 1:, :]
                # initial query: depot embedding
                query = enc[:, 0, :]
                selected_mask = torch.zeros((1, num_cust), dtype=torch.bool, device=device)

                per_example_losses = []
                for t in range(num_cust):
                    scores = model._score(enc_cust, query, mask=selected_mask.bool(), temperature=1.0)  # [1, num_cust]
                    target_idx = torch.tensor([int(perm[t])], dtype=torch.long, device=device)
                    loss_step = loss_fn(scores, target_idx)
                    per_example_losses.append(loss_step)
                    # teacher forcing: update query to embedding of true chosen customer
                    chosen_idx = int(perm[t])
                    one_hot = torch.zeros_like(selected_mask)
                    one_hot[0, chosen_idx] = True
                    selected_mask = selected_mask | one_hot
                    query = enc_cust.gather(1, torch.tensor([[chosen_idx]], device=device).view(1,1,1).expand(1,1,enc_cust.size(-1))).squeeze(1)
                if per_example_losses:
                    total_loss = torch.stack(per_example_losses).sum()
                    batch_losses.append(total_loss)
                    samples += 1
                if samples >= batch_size:
                    break
        if batch_losses:
            batch_loss = torch.stack(batch_losses).mean()
            optim_bc.zero_grad()
            batch_loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), getattr(config, 'transformer_grad_clip', 1.0))
            optim_bc.step()
            print(f"[Supervised Pretrain] Epoch {epoch}/{epochs} | BatchLoss={float(batch_loss):.6f}")
        else:
            print(f"[Supervised Pretrain] Epoch {epoch}/{epochs} | No eligible samples found")
    return
