import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.nn.utils.rnn import pad_sequence
import copy


# from .numerical_coat_net import NumericalCoATNet

class PPOAgent:
    """
    PPO Agent, with KL-Divergence regularization against a reference SFT model
    to stabilize training in complex action spaces.
    """

    def __init__(self, model, device, lr=5e-5, update_epochs=4, clip_epsilon=0.2, gamma=0.99, gae_lambda=0.95, entropy_coef=0.01, kl_beta=0.2):
        self.model = model
        self.device = device
        self.update_epochs = update_epochs
        self.clip_epsilon = clip_epsilon
        self.gamma = gamma
        self.gae_lambda = gae_lambda
        self.entropy_coef = entropy_coef

        # --- [NEW] KL Regularization ---
        self.kl_beta = kl_beta  # Coefficient for the KL penalty
        # Create a non-trainable copy of the initial SFT model as a reference
        self.sft_model = copy.deepcopy(model).to(device)
        self.sft_model.eval()

        self.optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
        self.buffer = {
            "states": [], "actions": [], "log_probs": [],
            "rewards": [], "values": [], "dones": []
        }

    def clear_buffer(self):
        for key in self.buffer:
            self.buffer[key].clear()

    def select_action(self, obs_sequences, max_len, symbol_manager, temperature=1.5):
        # This method remains the same as your current correct version
        self.model.eval()
        batch_size = len(obs_sequences)
        states_tensor = torch.stack([s for s in obs_sequences]).to(self.device)
        generated_traces = [[] for _ in range(batch_size)]
        all_log_probs = [[] for _ in range(batch_size)]

        with torch.no_grad():
            initial_values = self.model.predict_value(states_tensor).squeeze(-1)
            memory = self.model._encode_state(states_tensor)
            decoder_input = torch.full((batch_size, 1), symbol_manager.START, dtype=torch.long, device=self.device)

            for _ in range(max_len):
                tgt_emb = self.model.tgt_tok_emb(decoder_input)
                pos_encoded_tgt = self.model.positional_encoder(tgt_emb)
                tgt_mask = torch.nn.Transformer.generate_square_subsequent_mask(decoder_input.size(1)).to(self.device)
                decoder_output = self.model.transformer_decoder(pos_encoded_tgt, memory, tgt_mask=tgt_mask)
                last_token_output = decoder_output[:, -1, :]
                logits = self.model.generator(last_token_output)

                scaled_logits = logits / temperature
                probs = F.softmax(scaled_logits, dim=-1)
                dist = Categorical(probs)

                next_token = dist.sample()
                log_prob = dist.log_prob(next_token)
                decoder_input = torch.cat([decoder_input, next_token.unsqueeze(1)], dim=1)

                for i in range(batch_size):
                    token_val = next_token[i].item()
                    is_finished = generated_traces[i] and generated_traces[i][-1] == symbol_manager.END
                    if not is_finished:
                        generated_traces[i].append(token_val)
                        all_log_probs[i].append(log_prob[i])

                all_done = True
                for trace in generated_traces:
                    if not trace or trace[-1] != symbol_manager.END:
                        all_done = False
                        break
                if all_done:
                    break

        self.model.train()

        final_traces = []
        final_log_probs = []

        for i in range(batch_size):
            trace = generated_traces[i]
            log_probs = all_log_probs[i]

            if not trace:
                final_traces.append(torch.tensor([], dtype=torch.long, device=self.device))
                final_log_probs.append(torch.tensor([], device=self.device))
                continue

            if trace[-1] == symbol_manager.END:
                action_trace = trace[:-1]
                action_log_probs = log_probs[:-1]
            else:
                action_trace = trace
                action_log_probs = log_probs

            if not action_trace:
                action_trace = [symbol_manager.NO_OP]
                action_log_probs = [torch.tensor(0.0, device=self.device)]
            final_traces.append(torch.tensor(action_trace, dtype=torch.long, device=self.device))

            if action_log_probs:
                final_log_probs.append(torch.stack(action_log_probs))
            else:
                final_log_probs.append(torch.tensor([], device=self.device))

        return final_traces, final_log_probs, initial_values

    def update(self):
        # --- 1. 加载数据 ---
        states = torch.stack(self.buffer["states"]).to(self.device)
        rewards = torch.tensor(self.buffer["rewards"], dtype=torch.float32).to(self.device)
        old_values = torch.stack(self.buffer["values"]).to(self.device).detach()
        actions = pad_sequence(self.buffer["actions"], batch_first=True, padding_value=0).to(self.device)
        old_log_probs_padded = pad_sequence(self.buffer["log_probs"], batch_first=True, padding_value=0).to(self.device)
        old_log_probs = old_log_probs_padded.sum(dim=1).detach()

        # --- 2. [重要] 使用正确的蒙特卡洛方法计算回报和优势 ---
        returns = rewards
        advantages = returns - old_values
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        # --- 3. PPO 更新循环 ---
        for _ in range(self.update_epochs):
            new_values = self.model.predict_value(states).squeeze(-1)

            start_symbol = 1
            decoder_input = torch.cat([torch.full((actions.shape[0], 1), start_symbol, device=self.device), actions], dim=1)

            memory = self.model._encode_state(states)
            tgt_emb = self.model.tgt_tok_emb(decoder_input[:, :-1])
            pos_encoded_tgt = self.model.positional_encoder(tgt_emb)
            tgt_mask = nn.Transformer.generate_square_subsequent_mask(pos_encoded_tgt.size(1)).to(self.device)
            decoder_output = self.model.transformer_decoder(pos_encoded_tgt, memory, tgt_mask=tgt_mask)
            logits = self.model.generator(decoder_output)

            dist = Categorical(logits=logits)
            action_mask = (actions != 0)
            new_log_probs = (dist.log_prob(actions) * action_mask).sum(dim=1)
            entropy = (dist.entropy() * action_mask).sum(dim=1)

            ratio = torch.exp(new_log_probs - old_log_probs)
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages
            actor_loss = -torch.min(surr1, surr2).mean()

            critic_loss = F.mse_loss(new_values, returns)

            # --- [修改] 移除了KL散度惩罚 ---
            # 最终损失函数现在不包含 kl_beta * kl_div
            loss = actor_loss + 0.5 * critic_loss - self.entropy_coef * entropy.mean()

            self.optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
            self.optimizer.step()

        # --- [修改] 返回的统计信息中不再包含 kl_div ---
        stats = {
            "actor_loss": actor_loss.item(),
            "critic_loss": critic_loss.item(),
            "entropy": entropy.mean().item(),
        }
        return stats

