import math
import os
import logging
import torch
import torch.optim as optim
from torch.distributions import Categorical

from fusion_vrp.core.graph import inital_g


def compute_closed_path_length(group, path, center_pos=None):
    """Closed tour length: center -> first -> ... -> last -> center.
    group: list of {'pos': tensor}; path: indices into group (no center).
    """
    if not path:
        return 0.0
    def to_t(p):
        return p if hasattr(p, 'shape') else torch.tensor(p, dtype=torch.float32)
    c = to_t(center_pos) if center_pos is not None else None
    pos_first = to_t(group[path[0]]['pos'])
    total = torch.norm(c - pos_first).item() if c is not None else 0.0
    for i in range(1, len(path)):
        p1 = to_t(group[path[i-1]]['pos'])
        p2 = to_t(group[path[i]]['pos'])
        total += torch.norm(p1 - p2).item()
    if c is not None:
        plast = to_t(group[path[-1]]['pos'])
        total += torch.norm(plast - c).item()
    return total


def compute_path_length(group, path, center_pos=None):
    """Replicates compute_path_length from original main.py (logic unchanged)."""
    length = 0.0
    if not path:
        return 0.0

    def to_tensor(pos):
        if hasattr(pos, 'shape'):
            return pos
        else:
            return torch.tensor(pos, dtype=torch.float32)

    if center_pos is not None:
        c = to_tensor(center_pos)
        first = to_tensor(group[path[0]]['pos'])
        length += float(torch.norm(c - first).item())

    for i in range(1, len(path)):
        p1 = to_tensor(group[path[i - 1]]['pos'])
        p2 = to_tensor(group[path[i]]['pos'])
        length += float(torch.norm(p1 - p2).item())

    if center_pos is not None:
        c = to_tensor(center_pos)
        last = to_tensor(group[path[-1]]['pos'])
        length += float(torch.norm(last - c).item())

    return length


def train_a2c(config, actor, critic, pos_encoder):
    """Refactored to: closed tour objective, batch loss aggregation, center embedding, advantage normalization."""
    optimizer_a = optim.Adam(actor.parameters(), lr=config.lr_actor)
    optimizer_c = optim.Adam(critic.parameters(), lr=config.lr_critic)
    optimizer_pos = optim.Adam(pos_encoder.parameters(), lr=config.lr_pos)

    losses = []
    epoch_avg_path_lengths = []
    device = config.device
    center_coord = torch.tensor([50.0, 50.0], dtype=torch.float32, device=device)

    for epoch in range(config.epochs):
        g, groups_indices = inital_g(config.num_nodes, pos_encoder)
        group_nodes = []
        for group in groups_indices:
            nodes = []
            for idx_mask in group:
                node_idx = int(math.log2(idx_mask))
                pos = g.ndata['pos'][node_idx]
                nodes.append({'pos': pos})
            if nodes:  # skip empty
                group_nodes.append(nodes)

        # Accumulators for batch update
        batch_policy_terms = []
        batch_value_losses = []
        batch_entropies = []
        batch_adv_raw = []
        batch_logp_adv = []  # store (logp, raw_adv) for later normalization
        closed_lengths = []

        for group_info in group_nodes:
            # Encode group nodes
            coords = torch.stack([n['pos'] for n in group_info]).to(device)
            node_emb = pos_encoder(coords)  # [N, D]
            # center embedding (depot) for richer context (not selectable)
            depot_emb = pos_encoder(center_coord.unsqueeze(0))  # [1, D]
            enc = torch.cat([depot_emb, node_emb], dim=0)  # [N+1, D]

            N = node_emb.size(0)
            selected_mask = torch.zeros(N+1, device=device)
            selected_mask[0] = float('-inf')  # depot not selectable
            path = []
            log_probs = []
            entropies = []
            for _ in range(N):
                probs = actor(enc, selected_mask)
                dist = Categorical(probs)
                action = dist.sample()
                if action.item() == 0:
                    # fallback to next best (rare due to mask)
                    with torch.no_grad():
                        topk = torch.topk(probs, k=2).indices.tolist()
                        alt = topk[1] if topk[0] == 0 else topk[0]
                    action = torch.tensor(alt, device=device)
                log_probs.append(dist.log_prob(action))
                entropies.append(dist.entropy())
                idx = action.item()
                path.append(idx - 1)  # adjust to group index
                selected_mask[idx] = float('-inf')
            # Closed tour length
            closed_len = compute_closed_path_length(group_info, path, center_pos=center_coord)
            norm_len = closed_len / (len(path) + 1)  # +1 includes depot legs count scale
            closed_lengths.append(closed_len)
            reward = -norm_len
            logp_sum = torch.stack(log_probs).sum()
            entropy_mean = torch.stack(entropies).mean()
            # Critic value
            value = critic(enc).squeeze()  # scalar
            adv = reward - value.item()
            batch_logp_adv.append((logp_sum, adv))
            batch_entropies.append(entropy_mean)
            # Store raw value loss
            batch_value_losses.append((value - reward)**2)

        if not batch_logp_adv:
            epoch_avg_path_lengths.append(0.0)
            losses.append(0.0)
            continue
        # Advantage normalization
        adv_tensor = torch.tensor([adv for _, adv in batch_logp_adv], dtype=torch.float32, device=device)
        adv_mean = adv_tensor.mean()
        adv_std = adv_tensor.std().clamp_min(1e-6)
        adv_norm = (adv_tensor - adv_mean) / adv_std

        policy_loss_terms = []
        for (logp_sum, _), a_norm in zip(batch_logp_adv, adv_norm):
            policy_loss_terms.append(-logp_sum * a_norm.detach())
        policy_loss = torch.stack(policy_loss_terms).mean()
        value_loss = torch.stack(batch_value_losses).mean()
        entropy_term = torch.stack(batch_entropies).mean()
        total_loss = policy_loss + value_loss - config.entropy_coef * entropy_term

        optimizer_a.zero_grad(); optimizer_c.zero_grad(); optimizer_pos.zero_grad()
        total_loss.backward()
        torch.nn.utils.clip_grad_norm_(actor.parameters(), 1.0)
        torch.nn.utils.clip_grad_norm_(critic.parameters(), 1.0)
        torch.nn.utils.clip_grad_norm_(pos_encoder.parameters(), 1.0)
        optimizer_a.step(); optimizer_c.step(); optimizer_pos.step()

        avg_closed_len = sum(closed_lengths)/len(closed_lengths)
        losses.append(total_loss.item())
        epoch_avg_path_lengths.append(avg_closed_len)
        logging.info(
            f"Epoch {epoch+1}/{config.epochs} | Groups={len(group_nodes)} | ClosedLen(avg)={avg_closed_len:.2f} | "
            f"Policy={policy_loss.item():.4f} | Value={value_loss.item():.4f} | Ent={entropy_term.item():.4f} | AdvMean={adv_mean.item():.3f}"
        )

    return losses, epoch_avg_path_lengths


def plot_training_results(losses, path_lengths, save_dir):
    import matplotlib.pyplot as plt
    plt.figure(figsize=(12, 6))
    plt.subplot(1, 2, 1)
    plt.plot(range(1, len(losses) + 1), losses, label='Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Loss')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(range(1, len(path_lengths) + 1), path_lengths, label='Path Length', color='orange')
    plt.xlabel('Epoch')
    plt.ylabel('Path Length')
    plt.title('Average Path Length')
    plt.legend()

    os.makedirs(save_dir, exist_ok=True)
    out_path = os.path.join(save_dir, 'training_results.png')
    plt.tight_layout()
    plt.savefig(out_path)
    logging.info(f"训练结果图像已保存到 {out_path}")
    plt.close()
