# ppo.py
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from typing import Tuple, Dict
from collections import deque
from model import ActorCritic
import cv2
import os


class PPO:
    def __init__(
        self,
        env,
        lr: float = 3e-4,
        gamma: float = 0.99,
        lmbda: float = 0.95,
        eps_clip: float = 0.2,
        epochs: int = 4,
        batch_size: int = 64,
        value_coef: float = 0.5,
        entropy_coef: float = 0.01,
    ):
        self.env = env
        self.gamma = gamma
        self.lmbda = lmbda
        self.eps_clip = eps_clip
        self.epochs = epochs
        self.batch_size = batch_size
        self.value_coef = value_coef
        self.entropy_coef = entropy_coef

        self.num_robots = 6
        self.vision_size = 128
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        self.policy = ActorCritic(self.vision_size).to(self.device)
        self.optimizer = optim.Adam(self.policy.parameters(), lr=lr)
        self.writer = SummaryWriter()

        self.memory = []

    def save(self, path: str):
        torch.save(self.policy.state_dict(), path)
        print(f"Model saved to {path}")

    def load(self, path: str):
        if os.path.exists(path):
            self.policy.load_state_dict(torch.load(path))
            print(f"Model loaded from {path}")
        else:
            print(f"No existing model found at {path}, starting fresh")

    def collect_rollouts(self, n_steps: int = 2048):
        obs = self.env.reset()
        for _ in range(
            n_steps // self.env.n_envs
        ):  # n_steps total env steps, since parallel
            vision, infos = obs["visions"], obs["infos"]
            actions, log_probs, values = self.policy.act(vision, infos)
            actions_np = actions.cpu().detach().numpy().reshape(self.env.n_envs, -1)

            next_obs, rewards, dones, _, infos = self.env.step(actions_np)

            # Store per agent
            for e in range(self.env.n_envs):
                for a in range(self.num_robots):
                    agent_info = {
                        "time": obs["infos"]["time"][e],
                        "hp": obs["infos"]["hp"][e, a],
                        "bullet": obs["infos"]["bullet"][e, a],
                        "attack_cooldown": obs["infos"]["attack_cooldown"][e, a],
                        "reborn_timer": obs["infos"]["reborn_timer"][e, a],
                        "score_red": obs["infos"]["scores"]["red"][e],
                        "score_blue": obs["infos"]["scores"]["blue"][e],
                    }
                    self.memory.append(
                        {
                            "vision": obs["visions"][e, a],
                            "info": agent_info,
                            "action": actions_np[e, 3 * a : 3 * (a + 1)],
                            "log_prob": log_probs[e * self.num_robots + a].item(),
                            "value": values[e * self.num_robots + a].item(),
                            "reward": rewards[e, a],
                            "done": bool(dones[e, a]),
                        }
                    )

            obs = next_obs

        # Compute GAE
        advantages = np.zeros(len(self.memory))
        returns = np.zeros(len(self.memory))
        gae = 0
        for t in reversed(range(len(self.memory))):
            if t == len(self.memory) - 1:
                next_value = 0
                next_nonterminal = 1.0 - self.memory[t]["done"]
            else:
                next_value = self.memory[t + 1]["value"]
                next_nonterminal = 1.0 - self.memory[t + 1]["done"]
            delta = (
                self.memory[t]["reward"]
                + self.gamma * next_value * next_nonterminal
                - self.memory[t]["value"]
            )
            gae = delta + self.gamma * self.lmbda * next_nonterminal * gae
            advantages[t] = gae
            returns[t] = gae + self.memory[t]["value"]

        for i in range(len(self.memory)):
            self.memory[i]["advantage"] = advantages[i]
            self.memory[i]["return"] = returns[i]

        # Log avg reward
        recent_rewards = [
            m["reward"] for m in self.memory[-self.env.n_envs * self.num_robots :]
        ]
        self.writer.add_scalar("Loss/avg_reward", np.mean(recent_rewards))

    def update(self):
        if len(self.memory) == 0:
            return

        # Prepare data
        visions = np.stack([m["vision"] for m in self.memory])
        actions = np.stack([m["action"] for m in self.memory])
        old_log_probs = np.array([m["log_prob"] for m in self.memory])
        advantages = np.array([m["advantage"] for m in self.memory])
        returns = np.array([m["return"] for m in self.memory])
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        # Infos dict
        infos = {}
        for key in [
            "time",
            "hp",
            "bullet",
            "attack_cooldown",
            "reborn_timer",
            "score_red",
            "score_blue",
        ]:
            infos[key] = np.array([m["info"][key] for m in self.memory])

        # 记录训练前的损失值
        total_actor_loss = 0
        total_critic_loss = 0
        total_entropy_loss = 0
        total_loss = 0
        batch_count = 0

        n_samples = len(self.memory)
        for epoch in range(self.epochs):
            indices = np.random.permutation(n_samples)
            for start in range(0, n_samples, self.batch_size):
                end = start + self.batch_size
                batch_idx = indices[start:end]

                batch_visions = visions[batch_idx]

                batch_actions = torch.tensor(
                    actions[batch_idx], dtype=torch.float32
                ).to(self.device)
                batch_old_lp = torch.tensor(
                    old_log_probs[batch_idx], dtype=torch.float32
                ).to(self.device)
                batch_adv = torch.tensor(advantages[batch_idx], dtype=torch.float32).to(
                    self.device
                )
                batch_ret = torch.tensor(returns[batch_idx], dtype=torch.float32).to(
                    self.device
                )

                # 对于 infos 字典中的数值也需要转为 float32
                batch_infos = {
                    k: torch.tensor(v[batch_idx], dtype=torch.float32).to(self.device)
                    for k, v in infos.items()
                }

                # Evaluate
                new_log_probs, values, entropy = self.policy.evaluate(
                    batch_visions, batch_infos, batch_actions
                )
                ratio = torch.exp(new_log_probs - batch_old_lp)
                surr1 = ratio * batch_adv
                surr2 = (
                    torch.clamp(ratio, 1 - self.eps_clip, 1 + self.eps_clip) * batch_adv
                )
                actor_loss = -torch.min(surr1, surr2).mean()
                critic_loss = nn.MSELoss()(values, batch_ret)
                entropy_loss = -entropy.mean()

                loss = (
                    actor_loss
                    + self.value_coef * critic_loss
                    + self.entropy_coef * entropy_loss
                )

                self.optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_norm_(self.policy.parameters(), 0.5)
                self.optimizer.step()

                # 累积损失值用于平均
                total_actor_loss += actor_loss.item()
                total_critic_loss += critic_loss.item()
                total_entropy_loss += entropy_loss.item()
                total_loss += loss.item()
                batch_count += 1

        # 在所有 epochs 和 batches 完成后记录平均损失
        if batch_count > 0:
            self.writer.add_scalar("Loss/actor", total_actor_loss / batch_count)
            self.writer.add_scalar("Loss/critic", total_critic_loss / batch_count)
            self.writer.add_scalar("Loss/entropy", total_entropy_loss / batch_count)
            self.writer.add_scalar("Loss/total", total_loss / batch_count)

        self.memory = []
