import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import time
# import random
# import gym
import rl_utils
# !git clone https://github.com/boyu-ai/multiagent-particle-envs.git --quiet
# https://github.com/openai/multiagent-particle-envs
# !pip install -e multiagent-particle-envs
import sys
sys.path.append("..\multiagent-particle-envs")
# 由于multiagent-pariticle-env底层的实现有一些版本问题,因此gym需要改为可用的版本
# !pip install --upgrade gym==0.10.5 -q
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios


def make_env(scenario_name):
    # 从环境文件脚本中创建环境
    scenario = scenarios.load(scenario_name + ".py").Scenario()
    env = MultiAgentEnv(scenario.make_world(), scenario.reset_world, scenario.reward,scenario.observation)
    return env


def onehot_from_logits(logits, eps=0.01):
    ''' 生成最优动作的独热（one-hot）形式 '''
    argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float()
    # 生成随机动作,转换成独热形式
    rand_acs = torch.autograd.Variable(torch.eye(logits.shape[1])[[np.random.choice(range(logits.shape[1]), size=logits.shape[0])]],requires_grad=False).to(logits.device)
    # 通过epsilon-贪婪算法来选择用哪个动作
    onehot = torch.stack([argmax_acs[i] if r > eps else rand_acs[i]
        for i, r in enumerate(torch.rand(logits.shape[0]))])
    return onehot


def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
    """从Gumbel(0,1)分布中采样"""
    U = torch.autograd.Variable(tens_type(*shape).uniform_(),requires_grad=False)
    return -torch.log(-torch.log(U + eps) + eps)


def gumbel_softmax_sample(logits, temperature):
    """ 从Gumbel-Softmax分布中采样"""
    y = logits + sample_gumbel(logits.shape, tens_type=type(logits.data)).to(logits.device)
    return F.softmax(y / temperature, dim=1)


def gumbel_softmax(logits, temperature=1.0):
    y = gumbel_softmax_sample(logits, temperature)
    y_hard = onehot_from_logits(y)
    y = (y_hard.to(logits.device) - y).detach() + y
    # 返回一个y_hard的独热量，但是它的梯度是y
    # 我们既能够得到一个与环境交互的离散动作，又可以正确地反传梯度
    return y


class TwoLayerFC(torch.nn.Module):
    def __init__(self, num_in, num_out, hidden_dim):
        super().__init__()
        self.fc1 = torch.nn.Linear(num_in, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = torch.nn.Linear(hidden_dim, num_out)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return self.fc3(x)


class DDPG:
    ''' DDPG算法 '''
    def __init__(self, state_dim, action_dim, critic_input_dim, hidden_dim,actor_lr, critic_lr, device):
        self.actor = TwoLayerFC(state_dim, action_dim, hidden_dim).to(device)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),lr=actor_lr)
        self.target_actor = TwoLayerFC(state_dim, action_dim,hidden_dim).to(device)
        self.target_actor.load_state_dict(self.actor.state_dict())
        
        self.critic = TwoLayerFC(critic_input_dim, 1, hidden_dim).to(device)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),lr=critic_lr)
        self.critic_target = TwoLayerFC(critic_input_dim, 1,hidden_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())

    def take_action(self, state, explore=False):
        action = self.actor(state)
        if explore:
            action = gumbel_softmax(action)
        else:
            action = onehot_from_logits(action)
        return action.detach().cpu().numpy()[0]

    def soft_update(self, net, target_net, para_soft_update):
        for param_target, param in zip(target_net.parameters(),net.parameters()):
            param_target.data.copy_(param_target.data * (1.0 - para_soft_update) + param.data * para_soft_update)



'''muli-agent DDPG
centralized training with decentralized execution, CTDE
Lowe R, Wu Y I, Tamar A, et al. Multi-agent actor-critic for mixed cooperative-competitive environments [J]. Advances in neural information processing systems, 2017, 30.
'''
class MADDPG:
    def __init__(self, env, device, actor_lr, critic_lr, hidden_dim,state_dims, action_dims, critic_input_dim, discount_factor, para_soft_update):
        self.agents = []
        for i in range(len(env.agents)):
            self.agents.append(DDPG(state_dims[i], action_dims[i], critic_input_dim,hidden_dim, actor_lr, critic_lr, device))
        self.discount_factor = discount_factor
        self.para_soft_update = para_soft_update
        self.critic_criterion = torch.nn.MSELoss()
        self.device = device

    def take_action(self, states, explore):
        states = [torch.tensor(np.array([states[i]]), dtype=torch.float, device=self.device)
            for i in range(len(env.agents))]
        actions = [agent.take_action(state, explore)
            for agent, state in zip(self.agents, states)]
        return actions

    def soft_update_target_of_all_agents(self):
        # 对每个智能体，更新目标 Actor 网络和目标 Critic 网络
        for agt in self.agents:
            agt.soft_update(agt.actor, agt.target_actor, self.para_soft_update)
            agt.soft_update(agt.critic, agt.critic_target, self.para_soft_update)
    
    
    def update(self, sample, agent_index):
        obs_agts, act_agts, rew_agts, next_obs_agts, done_agts = sample
        agent_i = self.agents[agent_index]

        '''对于每个智能体，中心化训练 Critic 网络'''
        # 输入所有智能体的下一状态，到该智能体的目标actor
        next_act_agts_target_actor = [onehot_from_logits(pi_target_actor(_next_obs))
            for pi_target_actor, _next_obs in zip([agt.target_actor for agt in self.agents], next_obs_agts)]
        next_q_values = agent_i.critic_target(torch.cat((*next_obs_agts, *next_act_agts_target_actor), dim=1))
        critic_target_value = rew_agts[agent_index].view(-1, 1) + self.discount_factor * next_q_values * (1 - done_agts[agent_index].view(-1, 1))
        critic_value = agent_i.critic(torch.cat((*obs_agts, *act_agts), dim=1))
        # 用于更新中心化动作价值函数的损失函数
        critic_loss = self.critic_criterion(critic_value, critic_target_value.detach()) # (6) in paper
        agent_i.critic_optimizer.zero_grad()
        critic_loss.backward()
        agent_i.critic_optimizer.step()

        '''对于每个智能体，训练自身的 Actor 网络'''
        # 输入自己的状态，到自己的actor
        agent_i_actor_out = agent_i.actor(obs_agts[agent_index])
        agent_i_act_vf_in = gumbel_softmax(agent_i_actor_out)
        all_actor_acs = []
        for i, (pi_actor, _obs) in enumerate(zip([agt.actor for agt in self.agents], obs_agts)):
            if i == agent_index:
                all_actor_acs.append(agent_i_act_vf_in)
            else: # 输入智能体的状态，到该智能体的actor
                all_actor_acs.append(onehot_from_logits(pi_actor(_obs)))
        # 用到了所有智能体的状态和他们actor的输出
        actor_loss = -agent_i.critic(torch.cat((*obs_agts, *all_actor_acs), dim=1)).mean()
        actor_loss += (agent_i_actor_out**2).mean() * 1e-3
        agent_i.actor_optimizer.zero_grad()
        actor_loss.backward()
        agent_i.actor_optimizer.step()
        










seedseed = 0
# random.seed(seedseed)
np.random.seed(seedseed)
torch.manual_seed(seedseed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

num_episodes = 5000
episode_length = 25  # 每条序列的最大长度
buffer_size = 100000
minimal_size = 4000
batch_size = 1024

hidden_dim = 64
actor_lr = 1e-2
critic_lr = 1e-2
discount_factor = 0.95
para_soft_update = 1e-2
update_interval = 100

env_id = "simple_adversary"
env = make_env(env_id)
replay_buffer = rl_utils.ReplayBuffer(buffer_size)

state_dims = []
action_dims = []
for action_space in env.action_space:
    action_dims.append(action_space.n)
for state_space in env.observation_space:
    state_dims.append(state_space.shape[0])
critic_input_dim = sum(state_dims) + sum(action_dims)

maddpg = MADDPG(env, device, actor_lr, critic_lr, hidden_dim, state_dims,
                action_dims, critic_input_dim, discount_factor, para_soft_update)


def evaluate(env_id, maddpg, n_episode=10, episode_length=25):
    # 对学习的策略进行评估,此时不会进行探索
    env = make_env(env_id)
    returns = np.zeros(len(env.agents))
    for _ in range(n_episode):
        obs = env.reset()
        for t_i in range(episode_length):
            actions = maddpg.take_action(obs, explore=False)
            obs, rew, done, info = env.step(actions)
            rew = np.array(rew)
            returns += rew / n_episode
    return returns.tolist()


def data_batchs_to_agents(data_batchs):
    '''
    data 的形状: batch_size, 智能体个数, 数据长度
    data_agents 的形状: 智能体个数, batch_size, 数据长度
    '''    
    data_agents_ = []
    for index_agent in range(len(data_batchs[0])):
        # 把属于第 index_agent 个智能体的所有 batch 的数据取出来
        data_agent_i = []
        for data_batch_i in data_batchs:
            # 把属于第 index_agent 个智能体的第i个 batch 的数据取出来
            data_agent_i.append(data_batch_i[index_agent])
        data_agents_.append(data_agent_i)
        
    # 转换为 FloatTensor类型
    data_agents = []
    for aa in data_agents_:
        data_agents.append(torch.FloatTensor(np.vstack(aa)).to(device))
    # rearranged = [[sub_x[i] for sub_x in x]
    #                 for i in range(len(x[0]))]
    # stack_array_x = [torch.FloatTensor(np.vstack(aa)).to(device)
    #     for aa in rearranged]
    return data_agents


return_list = []  # 记录每一轮的回报（return）
total_step = 0
time_start = time.perf_counter()  # 记录开始时间
for i_episode in range(num_episodes):
    obs_agts = env.reset()
    # ep_returns = np.zeros(len(env.agents))
    for e_i in range(episode_length):
        act_agts = maddpg.take_action(obs_agts, explore=True)
        next_obs_agts, rew_agts, done_agts, _ = env.step(act_agts)
        replay_buffer.add(obs_agts, act_agts, rew_agts, next_obs_agts, done_agts)
        obs_agts = next_obs_agts
        
        total_step += 1
        if replay_buffer.size() >= minimal_size and total_step % update_interval == 0:
            sample = replay_buffer.sample(batch_size)
            sample = [data_batchs_to_agents(x) for x in sample]
            for agent_index in range(len(env.agents)):
                maddpg.update(sample, agent_index)
            maddpg.soft_update_target_of_all_agents()
            
    if (i_episode + 1) % 10 == 0:
        ep_returns = evaluate(env_id, maddpg, n_episode=100)
        return_list.append(ep_returns)
        print(f"Episode: {i_episode+1}/{num_episodes}, returns: {ep_returns}, time consumption: {(time.perf_counter() - time_start):.2f}(s)")




return_array = np.array(return_list)
for i, agent_name in enumerate(["adversary_0", "agent_0", "agent_1"]):
    plt.figure()
    plt.plot(np.arange(return_array.shape[0]) * 100,rl_utils.moving_average(return_array[:, i], 9))
    plt.xlabel("Episodes")
    plt.ylabel("Returns")
    plt.title(f"{agent_name} by MADDPG")