{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true,
    "pycharm": {
     "is_executing": false
    }
   },
   "outputs": [],
   "source": [
    "import gym\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras import layers\n",
    "\n",
    "seed = 42\n",
    "gamma = 0.99\n",
    "max_steps_pre_episode = 10000\n",
    "env = gym.make(\"CartPole-v0\")\n",
    "env.seed(seed)\n",
    "eps = np.finfo(np.float32).eps.item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "num_inputs = 4\n",
    "num_actions = 2\n",
    "num_hidden = 128\n",
    "\n",
    "inputs = layers.Input(shape=(num_inputs,))\n",
    "common = layers.Dense(num_hidden,activation='relu')(inputs)\n",
    "action = layers.Dense(num_actions,activation='softmax')(common)\n",
    "critic = layers.Dense(1)(common)\n",
    "\n",
    "model = keras.Model(inputs =inputs,outputs=[action,critic])"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [
    {
     "name": "stdout",
     "text": [
      "running reward: 14.69 at episode 10\n",
      "running reward: 24.56 at episode 20\n",
      "running reward: 45.74 at episode 30\n",
      "running reward: 51.00 at episode 40\n",
      "running reward: 58.31 at episode 50\n",
      "running reward: 62.20 at episode 60\n",
      "running reward: 59.99 at episode 70\n",
      "running reward: 56.47 at episode 80\n",
      "running reward: 53.97 at episode 90\n",
      "running reward: 55.65 at episode 100\n",
      "running reward: 58.95 at episode 110\n",
      "running reward: 52.99 at episode 120\n",
      "running reward: 55.06 at episode 130\n",
      "running reward: 55.99 at episode 140\n",
      "running reward: 59.99 at episode 150\n",
      "running reward: 58.98 at episode 160\n",
      "running reward: 58.90 at episode 170\n",
      "running reward: 69.65 at episode 180\n",
      "running reward: 76.00 at episode 190\n",
      "running reward: 79.21 at episode 200\n",
      "running reward: 81.03 at episode 210\n",
      "running reward: 70.82 at episode 220\n",
      "running reward: 58.48 at episode 230\n",
      "running reward: 56.17 at episode 240\n",
      "running reward: 66.66 at episode 250\n",
      "running reward: 80.55 at episode 260\n",
      "running reward: 84.68 at episode 270\n",
      "running reward: 87.75 at episode 280\n",
      "running reward: 96.96 at episode 290\n",
      "running reward: 92.38 at episode 300\n",
      "running reward: 97.26 at episode 310\n",
      "running reward: 106.56 at episode 320\n",
      "running reward: 90.18 at episode 330\n",
      "running reward: 75.03 at episode 340\n",
      "running reward: 70.60 at episode 350\n",
      "running reward: 63.89 at episode 360\n",
      "running reward: 58.94 at episode 370\n",
      "running reward: 77.04 at episode 380\n",
      "running reward: 124.27 at episode 390\n",
      "running reward: 131.42 at episode 400\n",
      "running reward: 120.17 at episode 410\n",
      "running reward: 149.20 at episode 420\n",
      "running reward: 169.28 at episode 430\n",
      "running reward: 181.60 at episode 440\n",
      "running reward: 180.88 at episode 450\n",
      "running reward: 174.71 at episode 460\n",
      "running reward: 184.86 at episode 470\n",
      "running reward: 186.93 at episode 480\n",
      "running reward: 192.17 at episode 490\n",
      "running reward: 186.90 at episode 500\n",
      "running reward: 166.93 at episode 510\n",
      "running reward: 135.82 at episode 520\n",
      "running reward: 124.55 at episode 530\n",
      "running reward: 122.32 at episode 540\n",
      "running reward: 126.38 at episode 550\n",
      "running reward: 139.28 at episode 560\n",
      "running reward: 158.14 at episode 570\n",
      "running reward: 174.94 at episode 580\n",
      "running reward: 184.99 at episode 590\n",
      "running reward: 191.02 at episode 600\n",
      "running reward: 194.62 at episode 610\n",
      "Solved at episode 612!\n"
     ],
     "output_type": "stream"
    }
   ],
   "source": [
    "optimizer = keras.optimizers.Adam(learning_rate=0.01)\n",
    "huber_loss = keras.losses.Huber()\n",
    "action_probs_history = []\n",
    "critic_value_history = []\n",
    "rewards_history = []\n",
    "running_reward = 0\n",
    "episode_count = 0\n",
    "\n",
    "\n",
    "while True:\n",
    "    state = env.reset()\n",
    "    episode_reward = 0\n",
    "    with tf.GradientTape() as tape:\n",
    "        for timestep in range(1,max_steps_pre_episode):\n",
    "            state = tf.convert_to_tensor(state)\n",
    "            state = tf.expand_dims(state,0)\n",
    "            \n",
    "            action_probs,critic_value = model(state)\n",
    "            critic_value_history.append(critic_value[0,0])\n",
    "            \n",
    "            action = np.random.choice(num_actions,p=np.squeeze(action_probs))\n",
    "            action_probs_history.append(tf.math.log(action_probs[0,action]))\n",
    "            \n",
    "            state,reward,done,_ = env.step(action)\n",
    "            rewards_history.append(reward)\n",
    "            episode_reward += reward\n",
    "            \n",
    "            if done:\n",
    "                break\n",
    "        \n",
    "        running_reward = 0.05 * episode_reward + ( 1 - 0.05) * running_reward\n",
    "        \n",
    "        returns = []\n",
    "        discounted_sum = 0\n",
    "        for r in rewards_history[::-1]:\n",
    "            discounted_sum = r + gamma *discounted_sum\n",
    "            returns.insert(0,discounted_sum)\n",
    "        \n",
    "        returns = np.array(returns)\n",
    "        returns = (returns - np.mean(returns)) /(np.std(returns) + eps)\n",
    "        returns = returns.tolist()\n",
    "        \n",
    "        history = zip(action_probs_history,critic_value_history,returns)\n",
    "        actor_losses = []\n",
    "        critic_losses = []\n",
    "        for log_prob,value,ret in history:\n",
    "            diff = ret - value\n",
    "            actor_losses.append(-log_prob *diff)\n",
    "            \n",
    "            critic_losses.append(\n",
    "                huber_loss(tf.expand_dims(value,0),tf.expand_dims(ret,0))\n",
    "            )\n",
    "        loss_value = sum(actor_losses) + sum(critic_losses)\n",
    "        grads = tape.gradient(loss_value,model.trainable_variables)\n",
    "        optimizer.apply_gradients(zip(grads,model.trainable_variables))\n",
    "        \n",
    "        action_probs_history.clear()\n",
    "        critic_value_history.clear()\n",
    "        rewards_history.clear()\n",
    "    # Log details\n",
    "    episode_count += 1\n",
    "    if episode_count % 10 == 0:\n",
    "        template = \"running reward: {:.2f} at episode {}\"\n",
    "        print(template.format(running_reward, episode_count))\n",
    "    \n",
    "    if running_reward > 195:  # Condition to consider the task solved\n",
    "        print(\"Solved at episode {}!\".format(episode_count))\n",
    "        break\n",
    "    \n",
    "        \n",
    "            \n",
    "        \n",
    "        \n",
    "        \n",
    "        \n",
    "            "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": false
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "conda-env-keras-py",
   "language": "python",
   "display_name": "Python [conda env:keras]"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "source": [],
    "metadata": {
     "collapsed": false
    }
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}