{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "a2c.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "qS89Jy2-00HL",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import torch\n",
        "import torch.nn as nn\n",
        "import numpy as np \n",
        "import torch.nn.functional as F \n",
        "import torch.optim as optim\n",
        "from torch.distributions import Categorical\n",
        "import gym"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Cg2Z_G6UmDZ5",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def run(env, agent):\n",
        "    for episode in range(MAX_EPISODE):\n",
        "        state = env.reset()\n",
        "        trajectory = [] # [[s, a, r, s', done], [], ...]\n",
        "        episode_reward = 0\n",
        "        for steps in range(MAX_STEPS):\n",
        "            action = agent.get_action(state)\n",
        "            next_state, reward, done, _ = env.step(action)\n",
        "            trajectory.append([state, action, reward, next_state, done])\n",
        "            episode_reward += reward\n",
        "\n",
        "            if done:\n",
        "                break\n",
        "                \n",
        "            state = next_state\n",
        "        if episode % 10 == 0:\n",
        "            print(\"Episode \" + str(episode) + \": \" + str(episode_reward))\n",
        "        agent.update(trajectory)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "wLHVlNXDmH9i",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "class TwoHeadNetwork(nn.Module):\n",
        "\n",
        "    def __init__(self, input_dim, output_dim):\n",
        "        super(TwoHeadNetwork, self).__init__()\n",
        "        self.policy1 = nn.Linear(input_dim, 256) \n",
        "        self.policy2 = nn.Linear(256, output_dim)\n",
        "\n",
        "        self.value1 = nn.Linear(input_dim, 256)\n",
        "        self.value2 = nn.Linear(256, 1)\n",
        "        \n",
        "    def forward(self, state):\n",
        "        logits = F.relu(self.policy1(state))\n",
        "        logits = self.policy2(logits)\n",
        "\n",
        "        value = F.relu(self.value1(state))\n",
        "        value = self.value2(value)\n",
        "\n",
        "        return logits, value\n",
        "\n",
        "\n",
        "class ValueNetwork(nn.Module):\n",
        "\n",
        "    def __init__(self, input_dim, output_dim):\n",
        "        super(ValueNetwork, self).__init__()\n",
        "        self.fc1 = nn.Linear(input_dim, 256)\n",
        "        self.fc2 = nn.Linear(256, output_dim)\n",
        "\n",
        "    def forward(self, state):\n",
        "        value = F.relu(self.fc1(state))\n",
        "        value = self.fc2(value)\n",
        "\n",
        "        return value\n",
        "    \n",
        "\n",
        "class PolicyNetwork(nn.Module):\n",
        "\n",
        "    def __init__(self, input_dim, output_dim):\n",
        "        super(PolicyNetwork, self).__init__()\n",
        "        self.fc1 = nn.Linear(input_dim, 256)\n",
        "        self.fc2 = nn.Linear(256, output_dim)\n",
        "    \n",
        "    def forward(self, state):\n",
        "        logits = F.relu(self.fc1(state))\n",
        "        logits = self.fc2(logits)\n",
        "\n",
        "        return logits"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "2mvzHR62mPxU",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "class A2CAgent():\n",
        "\n",
        "    def __init__(self, env, gamma, lr):\n",
        "        self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
        "        \n",
        "        self.env = env\n",
        "        self.obs_dim = env.observation_space.shape[0]\n",
        "        self.action_dim = env.action_space.n\n",
        "        \n",
        "        self.gamma = gamma\n",
        "        self.lr = lr\n",
        "        \n",
        "        self.model = TwoHeadNetwork(self.obs_dim, self.action_dim)\n",
        "        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n",
        "    \n",
        "    def get_action(self, state):\n",
        "        state = torch.FloatTensor(state).to(self.device)\n",
        "        logits, _ = self.model.forward(state)\n",
        "        dist = F.softmax(logits, dim=0)\n",
        "        probs = Categorical(dist)\n",
        "\n",
        "        return probs.sample().cpu().detach().item()\n",
        "    \n",
        "    def compute_loss(self, trajectory):\n",
        "        states = torch.FloatTensor([sars[0] for sars in trajectory]).to(self.device)\n",
        "        actions = torch.LongTensor([sars[1] for sars in trajectory]).view(-1, 1).to(self.device)\n",
        "        rewards = torch.FloatTensor([sars[2] for sars in trajectory]).to(self.device)\n",
        "        next_states = torch.FloatTensor([sars[3] for sars in trajectory]).to(self.device)\n",
        "        dones = torch.FloatTensor([sars[4] for sars in trajectory]).view(-1, 1).to(self.device)\n",
        "        \n",
        "        # compute discounted rewards\n",
        "        discounted_rewards = [torch.sum(torch.FloatTensor([self.gamma**i for i in range(rewards[j:].size(0))])\\\n",
        "             * rewards[j:]) for j in range(rewards.size(0))]  # sorry, not the most readable code.\n",
        "        value_targets = rewards.view(-1, 1) + torch.FloatTensor(discounted_rewards).view(-1, 1).to(self.device)\n",
        "        \n",
        "        logits, values = self.model.forward(states)\n",
        "        dists = F.softmax(logits, dim=1)\n",
        "        probs = Categorical(dists)\n",
        "        \n",
        "        # compute value loss\n",
        "        value_loss = F.mse_loss(values, value_targets.detach())\n",
        "        \n",
        "        \n",
        "        # compute entropy bonus\n",
        "        entropy = []\n",
        "        for dist in dists:\n",
        "            entropy.append(-torch.sum(dist.mean() * torch.log(dist)))\n",
        "        entropy = torch.stack(entropy).sum()\n",
        "        \n",
        "        # compute policy loss\n",
        "        advantage = value_targets - values\n",
        "        policy_loss = -probs.log_prob(actions.view(actions.size(0))).view(-1, 1) * advantage.detach()\n",
        "        policy_loss = policy_loss.mean()\n",
        "        \n",
        "        total_loss = policy_loss + value_loss - 0.001 * entropy \n",
        "        return total_loss\n",
        "    \n",
        "    def update(self, trajectory):\n",
        "        loss = self.compute_loss(trajectory)\n",
        "\n",
        "        self.optimizer.zero_grad()\n",
        "        loss.backward()\n",
        "        self.optimizer.step()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xwZeX_SVmSgZ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# unlike A2CAgent in a2c.py, here I separated value and policy network.\n",
        "class decoupled_A2CAgent():\n",
        "\n",
        "    def __init__(self, env, gamma, lr):\n",
        "        self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
        "        \n",
        "        self.env = env\n",
        "        self.obs_dim = env.observation_space.shape[0]\n",
        "        self.action_dim = env.action_space.n\n",
        "        \n",
        "        self.gamma = gamma\n",
        "        self.lr = lr\n",
        "        \n",
        "        self.value_network = ValueNetwork(self.obs_dim, 1)\n",
        "        self.policy_network = PolicyNetwork(self.obs_dim, self.action_dim)\n",
        "        \n",
        "        self.value_optimizer = optim.Adam(self.value_network.parameters(), lr=self.lr)\n",
        "        self.policy_optimizer = optim.Adam(self.policy_network.parameters(), lr=self.lr)\n",
        "    \n",
        "    def get_action(self, state):\n",
        "        state = torch.FloatTensor(state).to(self.device)\n",
        "        logits = self.policy_network.forward(state)\n",
        "        dist = F.softmax(logits, dim=0)\n",
        "        probs = Categorical(dist)\n",
        "\n",
        "        return probs.sample().cpu().detach().item()\n",
        "    \n",
        "    def compute_loss(self, trajectory):\n",
        "        states = torch.FloatTensor([sars[0] for sars in trajectory]).to(self.device)\n",
        "        actions = torch.LongTensor([sars[1] for sars in trajectory]).view(-1, 1).to(self.device)\n",
        "        rewards = torch.FloatTensor([sars[2] for sars in trajectory]).to(self.device)\n",
        "        next_states = torch.FloatTensor([sars[3] for sars in trajectory]).to(self.device)\n",
        "        dones = torch.FloatTensor([sars[4] for sars in trajectory]).view(-1, 1).to(self.device)\n",
        "        \n",
        "        # compute value target\n",
        "        discounted_rewards = [torch.sum(torch.FloatTensor([self.gamma**i for i in range(rewards[j:].size(0))])\\\n",
        "             * rewards[j:]) for j in range(rewards.size(0))]  # sorry, not the most readable code.\n",
        "        value_targets = rewards.view(-1, 1) + torch.FloatTensor(discounted_rewards).view(-1, 1).to(self.device)\n",
        "        \n",
        "        # compute value loss\n",
        "        values = self.value_network.forward(states)\n",
        "        value_loss = F.mse_loss(values, value_targets.detach())\n",
        "        \n",
        "        \n",
        "        # compute policy loss with entropy bonus\n",
        "        logits = self.policy_network.forward(states)\n",
        "        dists = F.softmax(logits, dim=1)\n",
        "        probs = Categorical(dists)\n",
        "        \n",
        "        # compute entropy bonus\n",
        "        entropy = []\n",
        "        for dist in dists:\n",
        "            entropy.append(-torch.sum(dist.mean() * torch.log(dist)))\n",
        "        entropy = torch.stack(entropy).sum()\n",
        "      \n",
        "        advantage = value_targets - values\n",
        "        policy_loss = -probs.log_prob(actions.view(actions.size(0))).view(-1, 1) * advantage.detach()\n",
        "        policy_loss = policy_loss.mean() - 0.001 * entropy\n",
        "        \n",
        "        return value_loss, policy_loss\n",
        "    \n",
        "    def update(self, trajectory):\n",
        "        value_loss, policy_loss = self.compute_loss(trajectory)\n",
        "\n",
        "        self.value_optimizer.zero_grad()\n",
        "        value_loss.backward()\n",
        "        self.value_optimizer.step()\n",
        "\n",
        "        self.policy_optimizer.zero_grad()\n",
        "        policy_loss.backward()\n",
        "        self.policy_optimizer.step()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WYAQuxls03zP",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "env = gym.make(\"CartPole-v0\")\n",
        "\n",
        "lr = 1e-4\n",
        "gamma = 0.99\n",
        "\n",
        "MAX_EPISODE = 1500\n",
        "MAX_STEPS = 500\n",
        "\n",
        "agent = Agent(env, gamma, lr)\n",
        "\n",
        "run(env, agent)"
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}