import torch
from agent import MADDPGAgent
from replay_buffer import ReplayBuffer
import torch.nn.functional as F
class MADDPG:
    def __init__(self, num_agents, obs_dims, action_dims, actor_lr=1e-3, critic_lr=1e-3,
                 gamma=0.95, tau=0.01, buffer_capacity=100000):
        self.num_agents = num_agents
        self.agents = []
        self.gamma = gamma
        self.tau = tau
        self.buffer = ReplayBuffer(buffer_capacity, num_agents)
        total_obs_dim = sum(obs_dims)
        total_action_dim = sum(action_dims)

        for i in range(num_agents):
            agent = MADDPGAgent(agent_id=i,
                                obs_dim=obs_dims[i],
                                action_dim=action_dims[i],
                                total_obs_dim=total_obs_dim,
                                total_action_dim=total_action_dim,
                                actor_lr=actor_lr,
                                critic_lr=critic_lr,
                                gamma=gamma,
                                tau=tau)
            self.agents.append(agent)

    def select_action(self, obs_all):
        actions = []
        for i, agent in enumerate(self.agents):
            obs_tensor = torch.tensor(obs_all[i], dtype=torch.float32).unsqueeze(0)
            action = agent.actor(obs_tensor).detach().numpy()[0]
            actions.append(action)
        return actions

    def update(self, batch_size):
        if len(self.buffer) < batch_size:
            return
        (obs_batch, actions_batch, rewards_batch, global_obs, global_actions,
         next_obs_batch, global_next_obs, dones_batch) = self.buffer.sample(batch_size)

        for i, agent in enumerate(self.agents):
            next_actions = []
            for j, other_agent in enumerate(self.agents):
                next_action = other_agent.target_actor(next_obs_batch[j])
                next_actions.append(next_action)
            global_next_actions = torch.cat(next_actions, dim=1)
            target_Q = agent.target_critic(global_next_obs, global_next_actions)
            y = rewards_batch[i] + agent.gamma * target_Q * (1 - dones_batch[i])
            current_Q = agent.critic(global_obs, global_actions)
            critic_loss = F.mse_loss(current_Q, y.detach())
            agent.critic_optimizer.zero_grad()
            critic_loss.backward()
            agent.critic_optimizer.step()

            current_actions = []
            for j, other_agent in enumerate(self.agents):
                if j == i:
                    current_action = other_agent.actor(obs_batch[j])
                else:
                    current_action = actions_batch[j]
                current_actions.append(current_action)
            global_current_actions = torch.cat(current_actions, dim=1)
            actor_loss = -agent.critic(global_obs, global_current_actions).mean()
            agent.actor_optimizer.zero_grad()
            actor_loss.backward()
            agent.actor_optimizer.step()

            agent.update_target()
