import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from collections import deque, namedtuple
import random


# 自定义多智能体环境
class MultiAgentEnv:
    def __init__(self, n_agents=2, world_size=5):
        self.n_agents = n_agents
        self.world_size = world_size
        self.agent_pos = None
        self.target_pos = np.array([world_size / 2, world_size / 2])

    def reset(self):
        self.agent_pos = np.random.uniform(0, self.world_size, (self.n_agents, 2))
        return self._get_obs()

    def step(self, actions):
        # 限制动作幅度在±0.5范围内
        actions = np.clip(actions, -0.5, 0.5)
        self.agent_pos += actions

        # 计算奖励
        distances = np.linalg.norm(self.agent_pos - self.target_pos, axis=1)
        rewards = -distances  # 每个智能体的负距离奖励
        done = (distances < 0.5).all()

        return self._get_obs(), rewards, done, {}

    def _get_obs(self):
        return np.array([np.concatenate([pos, self.target_pos]) for pos in self.agent_pos])


# 经验回放缓存
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)
        self.Transition = namedtuple('Transition',
                                     ['states', 'actions', 'rewards', 'next_states', 'dones'])

    def add(self, *args):
        self.buffer.append(self.Transition(*args))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)
        return self.Transition(*zip(*batch))

    def __len__(self):
        return len(self.buffer)


# Actor网络
class Actor(nn.Module):
    def __init__(self, obs_dim, action_dim, hidden_dim=256):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(obs_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim),
            nn.Tanh()
        )

    def forward(self, x):
        return self.net(x)


# Critic网络
class Critic(nn.Module):
    def __init__(self, obs_dim, action_dim, n_agents, hidden_dim=256):
        super().__init__()
        input_dim = obs_dim * n_agents + action_dim * n_agents
        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1)
        )

    def forward(self, states, actions):
        x = torch.cat(states + actions, dim=1)
        return self.net(x)


# MADDPG智能体
class MADDPGAgent:
    def __init__(self, obs_dim, action_dim, n_agents, agent_idx, args):
        self.agent_idx = agent_idx
        self.actor = Actor(obs_dim, action_dim)
        self.critic = Critic(obs_dim, action_dim, n_agents)
        self.target_actor = Actor(obs_dim, action_dim)
        self.target_critic = Critic(obs_dim, action_dim, n_agents)

        self.target_actor.load_state_dict(self.actor.state_dict())
        self.target_critic.load_state_dict(self.critic.state_dict())

        self.actor_optim = optim.Adam(self.actor.parameters(), lr=args.actor_lr)
        self.critic_optim = optim.Adam(self.critic.parameters(), lr=args.critic_lr)

        self.gamma = args.gamma
        self.tau = args.tau
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.to_device()

    def to_device(self):
        for net in [self.actor, self.critic, self.target_actor, self.target_critic]:
            net.to(self.device)

    def act(self, obs, noise_scale=0.1):
        obs = torch.FloatTensor(obs).to(self.device)
        with torch.no_grad():
            action = self.actor(obs).cpu().numpy()
        noise = noise_scale * np.random.randn(*action.shape)
        return np.clip(action + noise, -1, 1)

    def update(self, batch, agents):
        # 转换为张量
        states = [torch.FloatTensor(np.array(s)).to(self.device) for s in batch.states]
        actions = [torch.FloatTensor(np.array(a)).to(self.device) for a in batch.actions]
        rewards = torch.FloatTensor(batch.rewards).to(self.device)
        next_states = [torch.FloatTensor(np.array(s)).to(self.device) for s in batch.next_states]
        dones = torch.FloatTensor(batch.dones).to(self.device)

        # Critic更新
        with torch.no_grad():
            target_actions = [agents[i].target_actor(ns) for i, ns in enumerate(next_states)]
            target_q = self.target_critic(next_states, target_actions)
            target_q = rewards[:, self.agent_idx] + (1 - dones) * self.gamma * target_q

        current_q = self.critic(states, actions)
        critic_loss = nn.MSELoss()(current_q, target_q.detach())

        self.critic_optim.zero_grad()
        critic_loss.backward()
        self.critic_optim.step()

        # Actor更新
        new_actions = [agents[i].actor(s) if i == self.agent_idx else agents[i].actor(s).detach()
                       for i, s in enumerate(states)]
        actor_loss = -self.critic(states, new_actions).mean()

        self.actor_optim.zero_grad()
        actor_loss.backward()
        self.actor_optim.step()

        # 软更新目标网络
        for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
            target_param.data.copy_(self.tau * param.data + (1.0 - self.tau) * target_param.data)
        for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
            target_param.data.copy_(self.tau * param.data + (1.0 - self.tau) * target_param.data)

        return critic_loss.item(), actor_loss.item()


# 训练参数
class Args:
    def __init__(self):
        self.actor_lr = 1e-4
        self.critic_lr = 1e-3
        self.gamma = 0.99
        self.tau = 0.005
        self.buffer_size = 100000
        self.batch_size = 256
        self.episodes = 2000
        self.max_steps = 200


# 主训练循环
def train():
    env = MultiAgentEnv(n_agents=2)
    args = Args()
    n_agents = env.n_agents

    agents = [MADDPGAgent(obs_dim=4, action_dim=2, n_agents=n_agents,
                          agent_idx=i, args=args) for i in range(n_agents)]

    buffer = ReplayBuffer(args.buffer_size)

    for ep in range(args.episodes):
        obs = env.reset()
        episode_rewards = np.zeros(n_agents)

        for step in range(args.max_steps):
            actions = np.array([agent.act(obs[i], noise_scale=0.1)
                                for i, agent in enumerate(agents)])

            next_obs, rewards, done, _ = env.step(actions)
            buffer.add(obs, actions, rewards, next_obs, done)
            episode_rewards += rewards

            if len(buffer) >= args.batch_size:
                batch = buffer.sample(args.batch_size)
                for agent in agents:
                    agent.update(batch, agents)

            obs = next_obs
            if done:
                break

        if ep % 100 == 0:
            print(f"Episode {ep}, Avg Reward: {episode_rewards.mean():.2f}")


if __name__ == "__main__":
    train()