#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
基于 MADDPG 算法的多智能体强化学习示例代码
依赖环境：python==3.9, pytorch==1.12.1 (cpu), gym==0.26.2
"""

import gym
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from collections import deque

# -----------------------------
# 定义一个简单的多智能体环境
# -----------------------------
class MultiAgentSimpleEnv(gym.Env):
    """
    一个简单的多智能体环境示例，环境中有多个 agent，
    每个 agent 的状态为一个实数向量，动作为连续动作，
    奖励为负的动作平方和（鼓励小动作）。
    """
    metadata = {'render.modes': ['human']}

    def __init__(self, num_agents=2, obs_dim=4, action_dim=2, max_steps=25):
        super(MultiAgentSimpleEnv, self).__init__()
        self.num_agents = num_agents
        self.obs_dim = obs_dim
        self.action_dim = action_dim
        self.max_steps = max_steps
        self.current_step = 0
        self.numbera = 1
        numberb = 2

        # 为每个智能体定义观测空间和动作空间
        self.observation_space = [
            gym.spaces.Box(low=-np.inf, high=np.inf, shape=(obs_dim,), dtype=np.float32)
            for _ in range(num_agents)
        ]
        self.action_space = [
            gym.spaces.Box(low=-1.0, high=1.0, shape=(action_dim,), dtype=np.float32)
            for _ in range(num_agents)
        ]
        # 初始化各智能体状态
        self.state = [np.zeros(obs_dim, dtype=np.float32) for _ in range(num_agents)]

    def reset(self):
        self.current_step = 0
        self.state = [np.random.uniform(-1, 1, self.obs_dim).astype(np.float32)
                      for _ in range(self.num_agents)]
        return self.state

    def step(self, actions):
        """
        参数 actions 为各智能体动作的列表，每个元素为 numpy 数组
        返回值：
            next_state: 各智能体的下一个状态列表
            rewards: 各智能体获得的奖励列表
            dones: 各智能体的 done 标志（这里统一设定）
            infos: 空字典
        """
        self.current_step += 1
        next_state = []
        rewards = []
        dones = []
        for i in range(self.num_agents):
            # 动态：下一个状态 = 当前状态 + 动作（不足部分补零）+ 噪声
            action = actions[i]
            # 若动作维度小于 obs_dim，则补 0
            if len(action) < self.obs_dim:
                action = np.pad(action, (0, self.obs_dim - len(action)), 'constant')
            noise = np.random.randn(self.obs_dim) * 0.1
            ns = self.state[i] + action + noise
            next_state.append(ns.astype(np.float32))
            # 奖励设计：鼓励小动作
            reward = -np.sum(np.square(actions[i]))
            rewards.append(reward)
            dones.append(self.current_step >= self.max_steps)
        self.state = next_state
        # 多智能体环境中，各 agent 通常共享同一个结束条件
        done = all(dones)
        return next_state, rewards, [done] * self.num_agents, {}

    def render(self, mode='human'):
        pass

    def close(self):
        pass

# -----------------------------
# 定义 Actor 和 Critic 网络
# -----------------------------
class Actor(nn.Module):
    def __init__(self, obs_dim, action_dim, hidden_dim=64):
        super(Actor, self).__init__()
        self.fc1 = nn.Linear(obs_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, action_dim)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        # tanh 保证输出范围在 [-1,1] 内，与动作空间对应
        x = torch.tanh(self.fc3(x))
        return x

class Critic(nn.Module):
    def __init__(self, total_obs_dim, total_action_dim, hidden_dim=64):
        super(Critic, self).__init__()
        self.fc1 = nn.Linear(total_obs_dim + total_action_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, 1)

    def forward(self, obs, actions):
        # obs 和 actions 均为 batch_size x dim 的 tensor
        x = torch.cat([obs, actions], dim=1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

# -----------------------------
# 定义单个智能体的 MADDPG 模块
# -----------------------------
class MADDPGAgent:
    def __init__(self, agent_id, obs_dim, action_dim, total_obs_dim, total_action_dim,
                 actor_lr=1e-3, critic_lr=1e-3, gamma=0.95, tau=0.01):
        self.agent_id = agent_id
        self.obs_dim = obs_dim
        self.action_dim = action_dim
        self.total_obs_dim = total_obs_dim
        self.total_action_dim = total_action_dim
        self.gamma = gamma
        self.tau = tau

        # 初始化 actor 和 critic 及其目标网络
        self.actor = Actor(obs_dim, action_dim)
        self.target_actor = Actor(obs_dim, action_dim)
        self.critic = Critic(total_obs_dim, total_action_dim)
        self.target_critic = Critic(total_obs_dim, total_action_dim)

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_lr)

        # 将目标网络参数复制为当前网络参数
        self.target_actor.load_state_dict(self.actor.state_dict())
        self.target_critic.load_state_dict(self.critic.state_dict())

    def update_target(self):
        """使用软更新更新目标网络参数"""
        for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
            target_param.data.copy_(self.tau * param.data + (1.0 - self.tau) * target_param.data)
        for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
            target_param.data.copy_(self.tau * param.data + (1.0 - self.tau) * target_param.data)

# -----------------------------
# 定义经验回放池
# -----------------------------
class ReplayBuffer:
    def __init__(self, capacity, num_agents):
        self.capacity = capacity
        self.num_agents = num_agents
        self.buffer = deque(maxlen=capacity)

    def add(self, obs, actions, rewards, next_obs, dones):
        """
        存储一组经验，参数均为列表，每个列表长度为智能体数目
        """
        self.buffer.append((obs, actions, rewards, next_obs, dones))

    def sample(self, batch_size):
        """
        随机采样 batch_size 组经验，并返回每个智能体的状态、动作、奖励以及全局状态拼接
        """
        batch = random.sample(self.buffer, batch_size)
        obs_batch = []
        actions_batch = []
        rewards_batch = []
        next_obs_batch = []
        dones_batch = []
        for i in range(self.num_agents):
            obs_batch.append(torch.tensor(np.array([sample[0][i] for sample in batch]), dtype=torch.float32))
            actions_batch.append(torch.tensor(np.array([sample[1][i] for sample in batch]), dtype=torch.float32))
            rewards_batch.append(torch.tensor(np.array([sample[2][i] for sample in batch]), dtype=torch.float32).unsqueeze(1))
            next_obs_batch.append(torch.tensor(np.array([sample[3][i] for sample in batch]), dtype=torch.float32))
            dones_batch.append(torch.tensor(np.array([sample[4][i] for sample in batch]), dtype=torch.float32).unsqueeze(1))
        # 将所有智能体的观测和动作拼接成全局状态和全局动作
        global_obs = torch.cat(obs_batch, dim=1)         # shape: (batch_size, total_obs_dim)
        global_actions = torch.cat(actions_batch, dim=1)   # shape: (batch_size, total_action_dim)
        global_next_obs = torch.cat(next_obs_batch, dim=1)
        return obs_batch, actions_batch, rewards_batch, global_obs, global_actions, next_obs_batch, global_next_obs, dones_batch

    def __len__(self):
        return len(self.buffer)

# -----------------------------
# 定义 MADDPG 算法整体模块（管理多个智能体）
# -----------------------------
class MADDPG:
    def __init__(self, num_agents, obs_dims, action_dims, actor_lr=1e-3, critic_lr=1e-3,
                 gamma=0.95, tau=0.01, buffer_capacity=100000):
        self.num_agents = num_agents
        self.agents = []
        self.gamma = gamma
        self.tau = tau
        self.buffer = ReplayBuffer(buffer_capacity, num_agents)
        total_obs_dim = sum(obs_dims)       # 全局状态维度
        total_action_dim = sum(action_dims)   # 全局动作维度

        for i in range(num_agents):
            agent = MADDPGAgent(agent_id=i,
                                obs_dim=obs_dims[i],
                                action_dim=action_dims[i],
                                total_obs_dim=total_obs_dim,
                                total_action_dim=total_action_dim,
                                actor_lr=actor_lr,
                                critic_lr=critic_lr,
                                gamma=gamma,
                                tau=tau)
            self.agents.append(agent)

    def select_action(self, obs_all):
        """
        根据各智能体当前观测选择动作
        参数 obs_all: 各智能体观测的列表（numpy 数组）
        返回各智能体动作的列表
        """
        actions = []
        for i, agent in enumerate(self.agents):
            obs_tensor = torch.tensor(obs_all[i], dtype=torch.float32).unsqueeze(0)  # 增加 batch 维度
            action = agent.actor(obs_tensor).detach().numpy()[0]
            actions.append(action)
        return actions

    def update(self, batch_size):
        """
        从经验回放中采样数据，对所有智能体进行网络更新
        """
        if len(self.buffer) < batch_size:
            return
        # 从经验池中采样
        (obs_batch, actions_batch, rewards_batch, global_obs, global_actions,
         next_obs_batch, global_next_obs, dones_batch) = self.buffer.sample(batch_size)

        # 对每个智能体分别更新
        for i, agent in enumerate(self.agents):
            # ---- 更新 Critic ----
            # 计算下一个动作：对所有智能体使用目标 actor 网络计算下一步动作
            next_actions = []
            for j, other_agent in enumerate(self.agents):
                next_action = other_agent.target_actor(next_obs_batch[j])
                next_actions.append(next_action)
            global_next_actions = torch.cat(next_actions, dim=1)
            # 计算 target Q 值：y = r + gamma * target_critic(next_state, next_actions) * (1 - done)
            target_Q = agent.target_critic(global_next_obs, global_next_actions)
            y = rewards_batch[i] + agent.gamma * target_Q * (1 - dones_batch[i])
            # 当前 Q 值
            current_Q = agent.critic(global_obs, global_actions)
            critic_loss = F.mse_loss(current_Q, y.detach())
            agent.critic_optimizer.zero_grad()
            critic_loss.backward()
            agent.critic_optimizer.step()

            # ---- 更新 Actor ----
            # 为了更新 agent i 的 actor，我们将其当前 actor 输出替换回经验中对应的动作，
            # 其它智能体的动作保持经验中的动作（不进行梯度更新）
            current_actions = []
            for j, other_agent in enumerate(self.agents):
                if j == i:
                    current_action = other_agent.actor(obs_batch[j])
                else:
                    # 使用经验池中存储的动作（detach 后不可微分）
                    current_action = actions_batch[j]
                current_actions.append(current_action)
            global_current_actions = torch.cat(current_actions, dim=1)
            # actor 损失为负的 critic Q 值均值
            actor_loss = -agent.critic(global_obs, global_current_actions).mean()
            agent.actor_optimizer.zero_grad()
            actor_loss.backward()
            agent.actor_optimizer.step()

            # 更新目标网络
            agent.update_target()

# -----------------------------
# 主训练流程
# -----------------------------
def main():
    # 超参数设置
    num_agents = 2
    obs_dim = 4
    action_dim = 2
    num_episodes = 1000
    max_steps = 25
    batch_size = 128

    # 创建环境
    env = MultiAgentSimpleEnv(num_agents=num_agents, obs_dim=obs_dim, action_dim=action_dim, max_steps=max_steps)
    # 每个智能体的观测和动作维度列表
    obs_dims = [obs_dim for _ in range(num_agents)]
    action_dims = [action_dim for _ in range(num_agents)]
    maddpg = MADDPG(num_agents, obs_dims, action_dims,
                    actor_lr=1e-3, critic_lr=1e-3, gamma=0.95, tau=0.01,
                    buffer_capacity=100000)

    total_steps = 0
    for episode in range(num_episodes):
        obs = env.reset()  # 返回各智能体的初始观测，列表形式
        episode_rewards = np.zeros(num_agents)
        for step in range(max_steps):
            # 根据当前观测选择动作
            actions = maddpg.select_action(obs)
            # 环境交互
            next_obs, rewards, dones, _ = env.step(actions)
            # 将经验存入经验池（各变量均为列表，长度 = num_agents）
            maddpg.buffer.add(obs, actions, rewards, next_obs, dones)
            obs = next_obs
            episode_rewards += np.array(rewards)
            total_steps += 1
            # 每一步都尝试更新网络
            maddpg.update(batch_size)
            if all(dones):
                break
        print(f"Episode: {episode}, Rewards: {episode_rewards}")

if __name__ == '__main__':
    # 固定随机种子，便于复现
    random.seed(0)
    np.random.seed(0)
    torch.manual_seed(0)
    main()
