# model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class CNNVisionEncoder(nn.Module):
    def __init__(self, vision_size: int = 600, hidden_dim: int = 128):
        super().__init__()
        self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
        self.pool1 = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.pool2 = nn.MaxPool2d(2, 2)
        feat_size = vision_size // 4
        self.fc_vision = nn.Linear(64 * feat_size * feat_size, hidden_dim)

    def forward(self, x):
        # x: (B, H, W, C)
        B = x.shape[0]
        x = x.permute(0, 3, 1, 2).float() / 255.0  # (B, C, H, W)
        x = F.relu(self.conv1(x))
        x = self.pool1(x)
        x = F.relu(self.conv2(x))
        x = self.pool2(x)
        x = x.reshape(B, -1)  # <-- Changed from .view() to .reshape()
        x = self.fc_vision(x)
        return x


class ActorCritic(nn.Module):
    def __init__(self, vision_size: int, hidden_dims: list = [128, 128]):
        super().__init__()
        hidden_dim = hidden_dims[0]
        self.vision_encoder = CNNVisionEncoder(vision_size, hidden_dim)
        self.fusion = nn.Linear(
            hidden_dim + 7, hidden_dim
        )  # vision + [time, hp, bullet, cooldown, reborn, score_r, score_b]
        # build hidden layers for actor and critic
        self.actor_hidden = []
        self.critic_hidden = []
        for i in range(len(hidden_dims) - 1):
            self.actor_hidden.append(nn.Linear(hidden_dims[i], hidden_dims[i + 1]))
            self.actor_hidden.append(nn.ReLU())
            self.critic_hidden.append(nn.Linear(hidden_dims[i], hidden_dims[i + 1]))
            self.critic_hidden.append(nn.ReLU())
        self.actor_hidden.append(nn.Linear(hidden_dims[-1], hidden_dim))
        self.actor_hidden = nn.Sequential(*self.actor_hidden)
        self.critic_hidden.append(nn.Linear(hidden_dims[-1], hidden_dim))
        self.critic_hidden = nn.Sequential(*self.critic_hidden)

        self.actor_mean = nn.Linear(hidden_dim, 3)
        self.actor_logstd = nn.Parameter(torch.zeros(3))
        self.critic = nn.Linear(hidden_dim, 1)
        self.vision_size = vision_size

    def _prepare_infos(self, infos, num_robots=6):
        # Handle both parallel (n_envs > 1) and single-env (n_envs=1, scalars/1D) cases
        import numpy as np  # Ensure np is available

        if np.isscalar(infos.get("time", 0)):
            n_envs = 1
            # Repeat scalars for n_envs=1
            time_val = np.array([infos["time"]])  # (1,)
            score_red = np.array([infos["scores"]["red"]])  # (1,)
            score_blue = np.array([infos["scores"]["blue"]])  # (1,)
            flat_infos = {}
            flat_infos["time"] = np.repeat(
                time_val, num_robots
            ).flatten()  # (num_robots,)
            flat_infos["score_red"] = np.repeat(score_red, num_robots).flatten()
            flat_infos["score_blue"] = np.repeat(score_blue, num_robots).flatten()
            for key in ["hp", "bullet", "attack_cooldown", "reborn_timer"]:
                val = np.array([infos[key]])  # (1, num_robots)
                flat_infos[key] = val.flatten()  # (num_robots,)
        else:
            n_envs = infos["time"].shape[0]
            flat_infos = {}
            for key in ["time", "score_red", "score_blue"]:
                if key == "time":
                    val = infos["time"]
                elif key == "score_red":
                    val = infos["scores"]["red"]
                elif key == "score_blue":
                    val = infos["scores"]["blue"]
                flat_infos[key] = np.repeat(
                    val[:, np.newaxis], num_robots, axis=1
                ).flatten()
            for key in ["hp", "bullet", "attack_cooldown", "reborn_timer"]:
                flat_infos[key] = infos[key].flatten()
        return flat_infos

    def forward(self, visions, infos):
        # For batched agents: visions (total, H, W, 3), infos dict of (total,)
        vis_features = self.vision_encoder(visions)  # (total, hidden_v)

        # Build agent_infos (total, 7)
        time_t = torch.as_tensor(infos["time"]).unsqueeze(-1).float()
        hp_t = torch.as_tensor(infos["hp"]).unsqueeze(-1).float()
        bullet_t = torch.as_tensor(infos["bullet"]).unsqueeze(-1).float()
        cd_t = torch.as_tensor(infos["attack_cooldown"]).unsqueeze(-1).float()
        reborn_t = torch.as_tensor(infos["reborn_timer"]).unsqueeze(-1).float()
        scr_r_t = torch.as_tensor(infos["score_red"]).unsqueeze(-1).float()
        scr_b_t = torch.as_tensor(infos["score_blue"]).unsqueeze(-1).float()

        # Move tensors to the same device as vis_features
        device = vis_features.device
        time_t = time_t.to(device)
        hp_t = hp_t.to(device)
        bullet_t = bullet_t.to(device)
        cd_t = cd_t.to(device)
        reborn_t = reborn_t.to(device)
        scr_r_t = scr_r_t.to(device)
        scr_b_t = scr_b_t.to(device)

        agent_infos = torch.cat(
            [time_t, hp_t, bullet_t, cd_t, reborn_t, scr_r_t, scr_b_t], dim=-1
        )

        fused = torch.cat([vis_features, agent_infos], dim=-1)
        fused = F.relu(self.fusion(fused))

        # Actor
        actor_h = F.relu(self.actor_hidden(fused))
        mean = self.actor_mean(actor_h)
        std = torch.exp(self.actor_logstd)
        dist = torch.distributions.Normal(mean, std)

        # Critic
        critic_h = F.relu(self.critic_hidden(fused))
        values = self.critic(critic_h).squeeze(-1)

        return dist, values

    def act(self, visions, infos):
        # visions (n_envs, num_robots, H,W,3), infos dict as in obs
        H, W, C = self.vision_size, self.vision_size, 3
        flat_visions = visions.reshape(-1, H, W, C)
        flat_infos = self._prepare_infos(infos)
        flat_visions_t = torch.tensor(flat_visions).to(next(self.parameters()).device)
        dist, values = self.forward(flat_visions_t, flat_infos)
        actions = dist.rsample()
        log_probs = dist.log_prob(actions).sum(-1)
        return actions, log_probs, values

    def evaluate(self, visions, infos, actions):
        # visions (T, H,W,3), infos dict (T,) each, actions (T,3)
        visions_t = torch.tensor(visions).to(next(self.parameters()).device)
        dist, values = self.forward(visions_t, infos)
        log_probs = dist.log_prob(actions).sum(-1)
        entropy = dist.entropy().sum(-1)
        return log_probs, values, entropy
