import math
import logging
import os
from typing import List, Tuple

import torch
from torch.distributions import Categorical
import torch.nn.functional as F

from fusion_vrp.core import inital_g
from fusion_vrp.evaluation import validate_vrp


def pairwise_dist(x: torch.Tensor) -> torch.Tensor:
    """Compute full pairwise Euclidean distance matrix for x: [N,D] -> [N,N]."""
    # (x_i - x_j)^2 = x_i^2 + x_j^2 - 2 x_i.x_j
    sq = (x * x).sum(dim=1, keepdim=True)  # [N,1]
    dist2 = sq + sq.t() - 2 * (x @ x.t())
    dist2 = torch.clamp(dist2, min=0.0)
    return torch.sqrt(dist2 + 1e-9)


def geometry_alignment_loss(emb: torch.Tensor, coords: torch.Tensor) -> torch.Tensor:
    """Align embedding space pairwise distances to coordinate distances (both normalized).
    emb: [N,D] (requires grad)
    coords: [N,2] (no grad needed)
    """
    if emb.size(0) < 2:
        return emb.sum() * 0.0  # zero loss for degenerate group
    d_emb = pairwise_dist(emb)
    with torch.no_grad():
        d_geo = pairwise_dist(coords)
        # normalize geometry distances (avoid division by zero)
        g_max = d_geo.max().clamp_min(1e-6)
        d_geo_norm = d_geo / g_max
    e_max = d_emb.max().detach().clamp_min(1e-6)
    d_emb_norm = d_emb / e_max
    return F.mse_loss(d_emb_norm, d_geo_norm)


def compute_closed_length(center_pos: torch.Tensor, coords: torch.Tensor, order: List[int]) -> float:
    """center -> order[0] + sum internal -> order[-1] -> center."""
    if len(order) == 0:
        return 0.0
    total = 0.0
    c = center_pos
    first = coords[order[0]]
    total += torch.norm(c - first).item()
    for i in range(1, len(order)):
        total += torch.norm(coords[order[i - 1]] - coords[order[i]]).item()
    last = coords[order[-1]]
    total += torch.norm(last - c).item()
    return total


# 新增：标准 TSP 闭环长度（包含最后->第一个的闭环边），无需额外拼接中心段
def cycle_length(coords: torch.Tensor, order: List[int]) -> float:
    if len(order) == 0:
        return 0.0
    total = 0.0
    for i in range(len(order)):
        a = coords[order[i]]
        b = coords[order[(i + 1) % len(order)]]
        total += torch.norm(a - b).item()
    return total


def decode_sample(actor, emb: torch.Tensor, entropy_coef: float) -> Tuple[List[int], torch.Tensor, torch.Tensor]:
    """Sample a permutation using actor with masking. emb: [N,D]."""
    N = emb.size(0)
    mask = torch.zeros(N, device=emb.device)
    path = []
    log_probs = []
    entropies = []
    for _ in range(N):
        # 1) 基于当前嵌入与掩码，计算当前步未被屏蔽节点的选择概率分布 π_t
        #    - emb: 全部节点的表示；mask: 已选节点/不允许选的节点会被置为 -inf，概率为0
        probs = actor(emb, mask)
        # 2) 使用 Categorical 构造离散分布，便于做随机采样、log_prob 与 entropy 计算
        dist = Categorical(probs)
        # 3) 从当前分布中随机采样一个动作（节点索引 a_t），用于探索并驱动策略梯度更新
        a = dist.sample()
        # 4) 计算该动作对应的对数概率 log π_t(a_t)，后续用于 -(A * logp) 的策略损失项
        lp = dist.log_prob(a)
        # 5) 记录本步选择的节点到路径中（形成完整排列）
        path.append(int(a.item()))
        # 6) 累积本步的 log_prob，循环结束后会对所有时间步求和
        log_probs.append(lp)
        # 7) 累积本步策略分布的熵 H(π_t)，用于熵正则（鼓励探索，防止过早收敛）
        entropies.append(dist.entropy())
        # 8) 更新掩码：将已选节点屏蔽（-inf），确保后续步骤不会重复选择该节点
        mask[a] = float('-inf')
    # 9) 将各时间步的 log_prob 叠加为整条轨迹的 log 概率和（标量）
    logp = torch.stack(log_probs).sum()
    # 10) 将各时间步的 entropy 取平均，作为该条轨迹的平均熵（标量）
    entropy = torch.stack(entropies).mean() if entropies else emb.sum()*0
    return path, logp, entropy


def decode_greedy(actor, emb: torch.Tensor) -> List[int]:
    N = emb.size(0)
    mask = torch.zeros(N, device=emb.device)
    path = []
    for _ in range(N):
        probs = actor(emb, mask)
        a = int(torch.argmax(probs).item())
        path.append(a)
        mask[a] = float('-inf')
    return path


def hierarchical_train(config, actor, critic, pos_encoder):
    """Hierarchical training (modified to joint A2C):
    - Actor (PointerNet) is used for sampling but kept frozen.
    - Critic and PositionEncoder are trained jointly using policy & value losses.
    - Geometry alignment loss is included as an auxiliary term to stabilize encoder learning.

    Returns: (decoder_loss_history, avg_closed_length_history)
    """
    device = config.device
    stop_num = getattr(config, 'stop_num', 3)

    for stop_n in range(1, stop_num+1):
        # Freeze actor parameters (we still use it for forward to compute logp)
        actor.to(device).eval()
        for p in actor.parameters():
            p.requires_grad = False

        # Optimizer updates pos_encoder and critic only
        optimizer = torch.optim.Adam([
            {'params': pos_encoder.parameters(), 'lr': config.lr_pos},
            {'params': critic.parameters(), 'lr': config.lr_critic}
        ])

        decoder_loss_hist = []
        closed_len_hist = []
        center_tensor = torch.tensor(config.closed_center, dtype=torch.float32, device=device)

        for outer in range(max(1, config.outer_epochs)):
            total_closed_lengths = []
            total_policy_loss = 0.0
            total_value_loss = 0.0
            total_geom_loss = 0.0
            group_count = 0

            for m in range(max(1, config.middle_batch_size)):
                # , getattr(config, 'stop_num', None), getattr(config, 'dist_threshold', None)
                g, groups = inital_g(config.num_nodes, pos_encoder, stop_n)
                if config.max_groups_per_graph > 0:
                    groups = groups[:config.max_groups_per_graph]

                for group in groups:
                    # gather coords for this group
                    coords_list = []
                    for idx_mask in group:
                        idx = int(math.log2(idx_mask))
                        coords_list.append(g.ndata['pos'][idx].float())
                    if len(coords_list) == 0:
                        continue
                    coords = torch.stack(coords_list).to(device)  # [N,2]
                    center = center_tensor.unsqueeze(0)
                    coords_aug = torch.cat([coords, center], dim=0)  # [N+1,2]

                    # Encode WITHOUT detaching so gradients can flow from policy/value losses to encoder
                    emb = pos_encoder(coords_aug)  # [N+1, D]

                    # Inner RL updates: sample with frozen actor but update critic+encoder
                    for _ in range(max(1, config.inner_steps)):
                        path, logp, entropy = decode_sample(actor, emb, config.entropy_coef)
                        cyc_len = cycle_length(coords_aug, path)
                        norm_len = cyc_len / (len(path)) if config.normalize_group_length else cyc_len
                        reward = -norm_len
                        reward_t = torch.tensor(reward, dtype=torch.float32, device=device)

                        # Critic value (scalar)
                        value = critic(emb).squeeze()

                        # Advantage (detach value for policy term to avoid backprop through critic twice)
                        advantage = (reward_t - value.detach())

                        # normalize advantage across inner steps is trivial here; across batch will be done later
                        T = max(1, len(path))
                        logp_mean = logp / T

                        policy_loss = -(advantage * logp_mean) - config.entropy_coef * entropy
                        value_loss = (value - reward_t).pow(2)

                        # Geometry loss as auxiliary (on current embedding)
                        geom_loss = geometry_alignment_loss(emb, coords_aug)

                        # Weighted total loss (rl_loss_weight controls policy/value term weight; geom_loss_weight for geometry)
                        total_loss = config.rl_loss_weight * (
                                    policy_loss + value_loss) + config.geom_loss_weight * geom_loss

                        optimizer.zero_grad()
                        total_loss.backward()
                        # clip grads for both encoder and critic
                        torch.nn.utils.clip_grad_norm_(pos_encoder.parameters(), 1.0)
                        torch.nn.utils.clip_grad_norm_(critic.parameters(), 1.0)
                        optimizer.step()

                        total_policy_loss += policy_loss.item() if isinstance(policy_loss, torch.Tensor) else float(
                            policy_loss)
                        total_value_loss += value_loss.item() if isinstance(value_loss, torch.Tensor) else float(
                            value_loss)
                        total_geom_loss += geom_loss.item() if isinstance(geom_loss, torch.Tensor) else float(geom_loss)

                    # after inner updates, compute greedy closed length for monitoring
                    with torch.no_grad():
                        greedy_path = decode_greedy(actor, emb)
                    greedy_closed = cycle_length(coords_aug, greedy_path)
                    total_closed_lengths.append(greedy_closed)
                    group_count += 1

            # Summary per outer epoch
            avg_closed = sum(total_closed_lengths) / len(total_closed_lengths) if total_closed_lengths else 0.0
            avg_policy = total_policy_loss / max(1, group_count)
            avg_value = total_value_loss / max(1, group_count)
            avg_geom = total_geom_loss / max(1, group_count)

            decoder_loss_hist.append(avg_policy)
            closed_len_hist.append(avg_closed)
            logging.info(
                f"[Hierarchical-A2C] Outer {outer + 1}/{config.outer_epochs} | groups={group_count} | ClosedLen(avg)={avg_closed:.2f} | PolicyLoss(avg)={avg_policy:.4f} | ValueLoss(avg)={avg_value:.4f} | GeomLoss(avg)={avg_geom:.4f}")

        # 验证分层训练
        run_dir = config.prepare_run()
        ckpt_dir = os.path.join(run_dir, 'hierarchical')
        os.makedirs(ckpt_dir, exist_ok=True)
        torch.save(pos_encoder.state_dict(), os.path.join(ckpt_dir, 'pos_encoder.pth'))
        torch.save(actor.state_dict(), os.path.join(ckpt_dir, 'actor.pth'))
        torch.save(critic.state_dict(), os.path.join(ckpt_dir, 'critic.pth'))

        validate_vrp(ckpt_dir, stop_n, num_nodes=config.num_nodes, visualize_each_group=False)
    return decoder_loss_hist, closed_len_hist




