{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### A3C的核心思想\n",
    "\n",
    "A3C的核心思想是**利用多线程加速训练过程**，有一个全局网络global_net（由actor和critic组成）和若干个worker网络，每个worker运行一个独立的环境，当某一个worker的轨迹运行结束后则自身更新梯度并将梯度增量上传给global_net，然后再从global_net网络获取最新的网络参数。\n",
    "\n",
    "A3C的梯度更新是异步的，即所有线程自己更新自己的梯度，一旦更新完就上传给global_net，然后再获取global_net的最新网络参数。\n",
    "\n",
    "A3C流程图可参考：https://zhuanlan.zhihu.com/p/148492887。\n",
    "\n",
    "#### A3C的缺点\n",
    "\n",
    "A3C存在着致命的缺点。试想worker_i较其他worker更新较快，此时actor_worker_n和critic_worker_n的梯度方向朝着良好的方向收敛；其他更新的较慢的worker网络梯度更新方向还较为发散。这时worker_i的梯度和worker_other的梯度混合，会造成**本来良好的worker_i网络退化回去**。本来worker_i在使用更好的策略，结果其他worker用退化的策略“污染”了worker_i的策略。\n",
    "\n",
    "上述问题导致A3C在实践中表现并不好，经常难以收敛，所以往往使用其同步版本的A2C代替。若使用多线程A2C，则梯度更新是同步的，global_net会等到所有线程都更新完梯度统一更新梯度，让所有线程都使用最新的策略。\n",
    "\n",
    "代码参考自：https://github.com/Quantum-Cheese/DeepReinforcementLearning_Pytorch/tree/master/Actor_Critic/A3C"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### **网络模型**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from collections import namedtuple\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.distributions import Categorical\n",
    "from torch.distributions import Normal\n",
    "\n",
    "\n",
    "class ValueNetwork(nn.Module):\n",
    "\n",
    "    def __init__(self, input_dim, output_dim):\n",
    "        super(ValueNetwork, self).__init__()\n",
    "        self.fc1 = nn.Linear(input_dim, 256)\n",
    "        self.fc2 = nn.Linear(256, output_dim)\n",
    "\n",
    "    def forward(self, state):\n",
    "        value = F.relu(self.fc1(state))\n",
    "        value = self.fc2(value)\n",
    "\n",
    "        return value\n",
    "\n",
    "\n",
    "class ActorDiscrete(nn.Module):\n",
    "    \"\"\"\n",
    "    用于离散动作空间的策略网络\n",
    "    \"\"\"\n",
    "    def __init__(self,state_size,action_size):\n",
    "        super(ActorDiscrete, self).__init__()\n",
    "        self.seed = torch.manual_seed(0)\n",
    "        self.fc1 = nn.Linear(state_size, 128)\n",
    "        # self.fc2 = nn.Linear(64,128)\n",
    "        self.fc2= nn.Linear(128, action_size)\n",
    "\n",
    "    def forward(self, x):\n",
    "        \"\"\"\n",
    "        Build a network that maps state -> action probs.\n",
    "        \"\"\"\n",
    "\n",
    "        x=F.relu(self.fc1(x))\n",
    "        out = F.softmax(self.fc2(x),dim=1)\n",
    "        return out\n",
    "\n",
    "    def act(self,state):\n",
    "        \"\"\"\n",
    "        返回 action 和 action的概率\n",
    "        \"\"\"\n",
    "        # probs for each action (2d tensor)\n",
    "        probs = self.forward(state)\n",
    "        m = Categorical(probs)\n",
    "        action = m.sample()\n",
    "\n",
    "        # return action for current state, and the corresponding probability\n",
    "        return action.item(),probs[:,action.item()].item()\n",
    "\n",
    "\n",
    "class ActorContinous(nn.Module):\n",
    "    \"\"\"\n",
    "    用于连续动作空间的策略网络\n",
    "    \"\"\"\n",
    "    def __init__(self,state_size,action_size):\n",
    "        super(ActorContinous, self).__init__()\n",
    "        self.fc1 = nn.Linear(state_size, 128)\n",
    "        self.fc2 = nn.Linear(128,128)\n",
    "        self.mu_head = nn.Linear(128, action_size)\n",
    "        self.sigma_head = nn.Linear(128, action_size)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = F.relu(self.fc1(x))\n",
    "        x = F.relu(self.fc2(x))\n",
    "        mu = 2.0 * torch.tanh(self.mu_head(x))\n",
    "        sigma = F.softplus(self.sigma_head(x))\n",
    "        return (mu, sigma)\n",
    "\n",
    "    def act(self,state):\n",
    "        \"\"\"\n",
    "        返回 action 和 action 的 log prob\n",
    "        \"\"\"\n",
    "        with torch.no_grad():\n",
    "            (mu, sigma) = self.policy(state)  # 2d tensors\n",
    "        dist = Normal(mu, sigma)\n",
    "        action = dist.sample()\n",
    "        action_log_prob = dist.log_prob(action)\n",
    "\n",
    "        return action.numpy()[0], action_log_prob.numpy()[0]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### **智能体代码**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "import torch\n",
    "import torch.optim as optim\n",
    "import multiprocessing as mp\n",
    "from multiprocessing import Process\n",
    "from Actor_Critic.A3C.untils import ValueNetwork,ActorDiscrete,ActorContinous\n",
    "from Actor_Critic.A3C.worker import Worker\n",
    "\n",
    "GAMMA = 0.9\n",
    "LR = 1e-4\n",
    "GLOBAL_MAX_EPISODE = 5000\n",
    "\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "\n",
    "class A3C():\n",
    "    def __init__(self,env,continuous,state_size,action_size):\n",
    "        self.max_episode=GLOBAL_MAX_EPISODE\n",
    "        self.global_episode = mp.Value('i', 0)  # 进程之间共享的变量\n",
    "        self.global_epi_rew = mp.Value('d',0)\n",
    "        self.rew_queue = mp.Queue()\n",
    "        self.worker_num = mp.cpu_count()\n",
    "\n",
    "        # define the global networks\n",
    "        self.global_valueNet= ValueNetwork(state_size,1).to(device)\n",
    "        # global 的网络参数放入 shared memory，以便复制给各个进程中的 worker网络\n",
    "        self.global_valueNet.share_memory()\n",
    "\n",
    "        if continuous:\n",
    "            self.global_policyNet = ActorContinous(state_size, action_size).to(device)\n",
    "        else:\n",
    "            self.global_policyNet = ActorDiscrete(state_size, action_size).to(device)\n",
    "        self.global_policyNet.share_memory()\n",
    "\n",
    "        # global optimizer\n",
    "        self.global_optimizer_policy = optim.Adam(self.global_policyNet.parameters(), lr=LR)\n",
    "        self.global_optimizer_value = optim.Adam(self.global_valueNet.parameters(),lr=LR)\n",
    "\n",
    "        # define the workers\n",
    "        self.workers=[Worker(env,continuous,state_size,action_size,i,\n",
    "                             self.global_valueNet,self.global_optimizer_value,\n",
    "                             self.global_policyNet,self.global_optimizer_policy,\n",
    "                             self.global_episode,self.global_epi_rew,self.rew_queue,\n",
    "                             self.max_episode,GAMMA)\n",
    "                      for i in range(self.worker_num)]\n",
    "\n",
    "    def train_worker(self):\n",
    "        scores=[]\n",
    "        [w.start() for w in self.workers]\n",
    "        while True:\n",
    "            r = self.rew_queue.get()\n",
    "            if r is not None:\n",
    "                scores.append(r)\n",
    "            else:\n",
    "                break\n",
    "        [w.join() for w in self.workers]\n",
    "\n",
    "        return scores\n",
    "\n",
    "    def save_model(self):\n",
    "        torch.save(self.global_valueNet.state_dict(), \"a3c_value_model.pth\")\n",
    "        torch.save(self.global_policyNet.state_dict(), \"a3c_policy_model.pth\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### **单线程代码**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "import torch.multiprocessing as mp\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from torch.distributions import Normal\n",
    "from Actor_Critic.A3C.untils import ValueNetwork,ActorDiscrete,ActorContinous\n",
    "\n",
    "\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "\n",
    "class Worker(mp.Process):\n",
    "    def __init__(self,env,continuous,state_size,action_size,id, global_valueNet,global_value_optimizer,\n",
    "                 global_policyNet,global_policy_optimizer,\n",
    "                 global_epi,global_epi_rew,rew_queue,\n",
    "                 max_epi,gamma):\n",
    "        super(Worker, self).__init__()\n",
    "        # define env for individual worker\n",
    "        self.env = env\n",
    "        self.continuous = continuous\n",
    "        self.name = str(id)\n",
    "        self.env.seed(id)\n",
    "        self.state_size = state_size\n",
    "        self.action_size = action_size\n",
    "        self.memory=[]\n",
    "\n",
    "        # passing global settings to worker\n",
    "        self.global_valueNet,self.global_value_optimizer = global_valueNet,global_value_optimizer\n",
    "        self.global_policyNet,self.global_policy_optimizer = global_policyNet,global_policy_optimizer\n",
    "        self.global_epi,self.global_epi_rew = global_epi,global_epi_rew\n",
    "        self.rew_queue = rew_queue\n",
    "        self.max_epi = max_epi\n",
    "        # self.batch_size = batch_size\n",
    "        self.gamma = gamma\n",
    "\n",
    "        # define local net for individual worker\n",
    "        self.local_policyNet = ActorDiscrete(self.state_size,self.action_size).to(device)\n",
    "        if self.continuous:\n",
    "            self.local_policyNet = ActorContinous(self.state_size,self.action_size).to(device)\n",
    "        self.local_valueNet = ValueNetwork(self.state_size,1).to(device)\n",
    "\n",
    "    def sync_global(self):\n",
    "        self.local_valueNet.load_state_dict(self.global_valueNet.state_dict())\n",
    "        self.local_policyNet.load_state_dict(self.global_policyNet.state_dict())\n",
    "\n",
    "    def calculate_loss(self):\n",
    "        # get experiences from current trajectory\n",
    "        states = torch.tensor([t[0] for t in self.memory], dtype=torch.float)\n",
    "        log_probs = torch.tensor([t[1] for t in self.memory], dtype=torch.float)\n",
    "\n",
    "        # -- calculate discount future rewards for every time step\n",
    "        # 蒙特卡洛采样获得期望奖励，这并不符合AC方法常用的critic损失函数计算方法\n",
    "        # ground_truth=r+gamma*(r'+gamma*(...))\n",
    "        rewards = [t[2] for t in self.memory]\n",
    "        fur_Rewards = []\n",
    "        for i in range(len(rewards)):\n",
    "            discount = [self.gamma ** i for i in range(len(rewards) - i)]\n",
    "            f_rewards = rewards[i:]\n",
    "            fur_Rewards.append(sum(d * f for d, f in zip(discount, f_rewards)))\n",
    "        fur_Rewards = torch.tensor(fur_Rewards, dtype=torch.float).view(-1, 1)\n",
    "\n",
    "        # calculate loss for critic\n",
    "        V = self.local_valueNet(states)\n",
    "        value_loss = F.mse_loss(fur_Rewards, V)\n",
    "\n",
    "        # compute entropy for policy loss\n",
    "        (mu, sigma) = self.local_policyNet(states)\n",
    "        dist = Normal(mu, sigma)\n",
    "        entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(dist.scale)  # exploration\n",
    "\n",
    "        # calculate loss for actor\n",
    "        advantage = (fur_Rewards - V).detach()\n",
    "        policy_loss = -advantage * log_probs\n",
    "        policy_loss = (policy_loss - 0.005 * entropy).mean()\n",
    "\n",
    "        return value_loss,policy_loss\n",
    "\n",
    "    def update_global(self):\n",
    "        value_loss, policy_loss = self.calculate_loss()\n",
    "\n",
    "        self.global_value_optimizer.zero_grad()\n",
    "        value_loss.backward()\n",
    "        # propagate local gradients to global parameters\n",
    "        for local_params, global_params in zip(self.local_valueNet.parameters(), self.global_valueNet.parameters()):\n",
    "            global_params._grad = local_params._grad\n",
    "        self.global_value_optimizer.step()\n",
    "\n",
    "        self.global_policy_optimizer.zero_grad()\n",
    "        policy_loss.backward()\n",
    "        # propagate local gradients to global parameters\n",
    "        for local_params, global_params in zip(self.local_policyNet.parameters(), self.global_policyNet.parameters()):\n",
    "            global_params._grad = local_params._grad\n",
    "        self.global_policy_optimizer.step()\n",
    "\n",
    "        self.memory=[]  # clear trajectory\n",
    "\n",
    "    def run(self):\n",
    "        while self.global_epi.value < self.max_epi:\n",
    "            state = self.env.reset()\n",
    "            total_reward=0\n",
    "            while True:\n",
    "                state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n",
    "                action, prob = self.local_policyNet.act(state)  # 离散空间取直接prob，连续空间取log prob\n",
    "                next_state, reward, done, _ = self.env.step(action)\n",
    "                self.memory.append([state,action,reward,next_state,done])\n",
    "                total_reward += reward\n",
    "                state = next_state\n",
    "\n",
    "                if done:\n",
    "                    # recoding global episode and episode reward\n",
    "                    with self.global_epi.get_lock():\n",
    "                        self.global_epi.value += 1\n",
    "                    with self.global_epi_rew.get_lock():\n",
    "                        if self.global_epi_rew.value == 0.:\n",
    "                            self.global_epi_rew.value = total_reward\n",
    "                        else:\n",
    "                            # Moving average reward\n",
    "                            self.global_epi_rew.value = self.global_epi_rew.value * 0.99 + total_reward * 0.01\n",
    "                    self.rew_queue.put(self.global_epi_rew.value)\n",
    "\n",
    "                    print(\"w{} | episode: {}\\t , episode reward:{:.4} \\t  \"\n",
    "                          .format(self.name,self.global_epi.value,self.global_epi_rew.value))\n",
    "                    break\n",
    "\n",
    "            # update and sync with the global net when finishing an episode\n",
    "            self.update_global()\n",
    "            self.sync_global()\n",
    "\n",
    "        self.rew_queue.put(None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### **主程序代码**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gym\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from Actor_Critic.A3C.agent_a3c import A3C\n",
    "\n",
    "\n",
    "def get_env_prop(env_name, continuous):\n",
    "    env = gym.make(env_name)\n",
    "    state_dim = env.observation_space.shape[0]\n",
    "    if continuous:\n",
    "        action_dim = env.action_space.shape[0]\n",
    "    else:\n",
    "        action_dim = env.action_space.n\n",
    "\n",
    "    return env,state_dim, action_dim\n",
    "\n",
    "\n",
    "def train_a3c(env_name,continuous):\n",
    "    env,state_size,action_size = get_env_prop(env_name,continuous)\n",
    "    agent = A3C(env,continuous,state_size,action_size)\n",
    "    scores = agent.train_worker()\n",
    "    return scores\n",
    "\n",
    "\n",
    "def train_agent_for_env(env_name,continuous):\n",
    "    env = gym.make(env_name)\n",
    "\n",
    "    state_dim = env.observation_space.shape[0]\n",
    "    if continuous:\n",
    "        action_dim = env.action_space.shape[0]\n",
    "    else:\n",
    "        action_dim = env.action_space.n\n",
    "\n",
    "    agent = A3C(env, continuous,state_dim,action_dim)\n",
    "    scores = agent.train_worker()\n",
    "\n",
    "    return agent,scores\n",
    "\n",
    "\n",
    "def plot_scores(scores,filename):\n",
    "    fig = plt.figure()\n",
    "    ax = fig.add_subplot(111)\n",
    "    plt.plot(np.arange(1, len(scores) + 1), scores)\n",
    "    plt.ylabel('Score')\n",
    "    plt.xlabel('Episode #')\n",
    "    plt.savefig(filename)\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    # env = gym.make(\"Pendulum-v0\")\n",
    "    # train_scores = train_a3c(env,True)\n",
    "\n",
    "    # train A3C on discrete env : CartPole\n",
    "    scores_cartPole = train_agent_for_env(\"CartPole-v0\",False)\n",
    "    plot_scores(scores_cartPole,\"cartPole_trainPlot.png\")\n",
    "\n",
    "    # train A3C on continuous env : continuous\n",
    "    # a3c_mCar = train_agent_for_env(\"MountainCarContinuous-v0\", True)"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
