{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gym\n",
    "import random\n",
    "from collections import deque\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from keras import backend as K\n",
    "from keras import layers, models, optimizers\n",
    "import json\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ReplayBuffer(object):\n",
    "\n",
    "    def __init__(self, buffer_size):\n",
    "        self.buffer_size = buffer_size\n",
    "        self.num_experiences = 0\n",
    "        self.buffer = deque()\n",
    "\n",
    "    def getBatch(self, batch_size):\n",
    "        # Randomly sample batch_size examples\n",
    "        if self.num_experiences < batch_size:\n",
    "            return random.sample(self.buffer, self.num_experiences)\n",
    "        else:\n",
    "            return random.sample(self.buffer, batch_size)\n",
    "\n",
    "    def size(self):\n",
    "        return self.buffer_size\n",
    "\n",
    "    def add(self, state, action, reward, new_state, done):\n",
    "        experience = (state, action, reward, new_state, done)\n",
    "        if self.num_experiences < self.buffer_size:\n",
    "            self.buffer.append(experience)\n",
    "            self.num_experiences += 1\n",
    "        else:\n",
    "            self.buffer.popleft()\n",
    "            self.buffer.append(experience)\n",
    "\n",
    "    def count(self):\n",
    "        # if buffer is full, return buffer size\n",
    "        # otherwise, return experience counter\n",
    "        return self.num_experiences\n",
    "\n",
    "    def erase(self):\n",
    "        self.buffer = deque()\n",
    "        self.num_experiences = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class OUNoise:\n",
    "    \"\"\"Ornstein-Uhlenbeck process.\"\"\"\n",
    "\n",
    "    def __init__(self, size, mu=None, theta=0.15, sigma=0.3):\n",
    "        \"\"\"Initialize parameters and noise process.\"\"\"\n",
    "        self.size = size\n",
    "        self.mu = mu if mu is not None else np.zeros(self.size)\n",
    "        self.theta = theta\n",
    "        self.sigma = sigma\n",
    "        self.state = np.ones(self.size) * self.mu\n",
    "        self.reset()\n",
    "\n",
    "    def reset(self):\n",
    "        \"\"\"Reset the internal state (= noise) to mean (mu).\"\"\"\n",
    "        self.state = self.mu\n",
    "\n",
    "    def sample(self):\n",
    "        \"\"\"Update internal state and return it as a noise sample.\"\"\"\n",
    "        x = self.state\n",
    "        dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))\n",
    "        self.state = x + dx\n",
    "        return self.state"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "HIDDEN1_UNITS = 300\n",
    "HIDDEN2_UNITS = 600\n",
    "\n",
    "class ActorNetwork(object):\n",
    "    def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):\n",
    "        self.sess = sess\n",
    "        self.BATCH_SIZE = BATCH_SIZE\n",
    "        self.TAU = TAU\n",
    "        self.LEARNING_RATE = LEARNING_RATE\n",
    "\n",
    "        K.set_session(sess)\n",
    "\n",
    "        #Now create the model\n",
    "        self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)   \n",
    "        self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) \n",
    "        self.action_gradient = tf.placeholder(tf.float32,[None, action_size])\n",
    "        self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)\n",
    "        grads = zip(self.params_grad, self.weights)\n",
    "        self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)\n",
    "        self.sess.run(tf.initialize_all_variables())\n",
    "\n",
    "    def train(self, states, action_grads):\n",
    "        self.sess.run(self.optimize, feed_dict={\n",
    "            self.state: states,\n",
    "            self.action_gradient: action_grads\n",
    "        })\n",
    "\n",
    "    def target_train(self):\n",
    "        actor_weights = self.model.get_weights()\n",
    "        actor_target_weights = self.target_model.get_weights()\n",
    "        for i in range(len(actor_weights)):\n",
    "            actor_target_weights[i] = self.TAU * actor_weights[i] + (1 - self.TAU)* actor_target_weights[i]\n",
    "        self.target_model.set_weights(actor_target_weights)\n",
    "\n",
    "    def create_actor_network(self, state_size,action_dim):\n",
    "        print(\"Now we build the model\")\n",
    "        S = layers.Input(shape=[state_size])   \n",
    "        h0 = layers.Dense(HIDDEN1_UNITS, activation='relu')(S)\n",
    "        h1 = layers.Dense(HIDDEN2_UNITS, activation='relu')(h0)\n",
    "        Steering = layers.Dense(action_dim,activation='tanh')(h1)#,init=lambda shape, name: RandomNormal(shape, scale=1e-4, name=name))(h1)  \n",
    "        #Acceleration = layers.Dense(1,activation='sigmoid',init=lambda shape, name: RandomNormal(shape, scale=1e-4, name=name))(h1)   \n",
    "        #Brake = layers.Dense(1,activation='sigmoid',init=lambda shape, name: RandomNormal(shape, scale=1e-4, name=name))(h1) \n",
    "        #V = merge([Steering,Acceleration,Brake],mode='concat')          \n",
    "        model = models.Model(input=S,output=Steering)\n",
    "        return model, model.trainable_weights, S\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "HIDDEN1_UNITS = 300\n",
    "HIDDEN2_UNITS = 600\n",
    "\n",
    "class CriticNetwork(object):\n",
    "    def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):\n",
    "        self.sess = sess\n",
    "        self.BATCH_SIZE = BATCH_SIZE\n",
    "        self.TAU = TAU\n",
    "        self.LEARNING_RATE = LEARNING_RATE\n",
    "        self.action_size = action_size\n",
    "        \n",
    "        K.set_session(sess)\n",
    "\n",
    "        #Now create the model\n",
    "        self.model, self.action, self.state = self.create_critic_network(state_size, action_size)  \n",
    "        self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size)  \n",
    "        self.action_grads = tf.gradients(self.model.output, self.action)  #GRADIENTS for policy update\n",
    "        self.sess.run(tf.initialize_all_variables())\n",
    "\n",
    "    def gradients(self, states, actions):\n",
    "        return self.sess.run(self.action_grads, feed_dict={\n",
    "            self.state: states,\n",
    "            self.action: actions\n",
    "        })[0]\n",
    "\n",
    "    def target_train(self):\n",
    "        critic_weights = self.model.get_weights()\n",
    "        critic_target_weights = self.target_model.get_weights()\n",
    "        for i in range(len(critic_weights)):\n",
    "            critic_target_weights[i] = self.TAU * critic_weights[i] + (1 - self.TAU)* critic_target_weights[i]\n",
    "        self.target_model.set_weights(critic_target_weights)\n",
    "\n",
    "    def create_critic_network(self, state_size,action_dim):\n",
    "        print(\"Now we build the model\")\n",
    "        S = layers.Input(shape=[state_size])  \n",
    "        A = layers.Input(shape=[action_dim],name='action2')   \n",
    "        w1 = layers.Dense(HIDDEN1_UNITS, activation='relu')(S)\n",
    "        a1 = layers.Dense(HIDDEN2_UNITS, activation='linear')(A) \n",
    "        h1 = layers.Dense(HIDDEN2_UNITS, activation='linear')(w1)\n",
    "        h2 = layers.add([h1,a1])\n",
    "        h3 = layers.Dense(HIDDEN2_UNITS, activation='relu')(h2)\n",
    "        V = layers.Dense(action_dim,activation='linear')(h3)   \n",
    "        model = models.Model(input=[S,A],output=V)\n",
    "        adam = optimizers.Adam(lr=self.LEARNING_RATE)\n",
    "        model.compile(loss='mse', optimizer=adam)\n",
    "        return model, A, S "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "def playGame(train_indicator=0):    #1 means Train, 0 means simply Run\n",
    "    \n",
    "    # Generate a Torcs environment\n",
    "    env = gym.make('MountainCarContinuous-v0')\n",
    "    env.reset()\n",
    "    \n",
    "    ##########################\n",
    "    print(env.action_space.sample())\n",
    "    print(env.observation_space.sample())\n",
    "    \n",
    "    state_dim = 2\n",
    "    \n",
    "    action_dim = 1\n",
    "    action_low = env.observation_space.low\n",
    "    action_high = env.observation_space.high\n",
    "\n",
    "    ##########################\n",
    "    # Noise process\n",
    "    exploration_mu = 0.1 * np.ones(action_dim)\n",
    "    print(exploration_mu)\n",
    "    exploration_theta = 0.15\n",
    "    exploration_sigma = 0.2\n",
    "    OU = OUNoise(action_dim, exploration_mu, exploration_theta, exploration_sigma)\n",
    "\n",
    "    BUFFER_SIZE = 100000\n",
    "    BATCH_SIZE = 32\n",
    "    GAMMA = 0.99\n",
    "    TAU = 0.001     #Target Network HyperParameters\n",
    "    LRA = 0.0001    #Learning rate for Actor\n",
    "    LRC = 0.001     #Lerning rate for Critic\n",
    "\n",
    "    np.random.seed(1337)\n",
    "\n",
    "    vision = False\n",
    "\n",
    "    EXPLORE = 100000.\n",
    "    episode_count = 2000\n",
    "    max_steps = 100000\n",
    "    reward = 0\n",
    "    done = False\n",
    "    step = 0\n",
    "    epsilon = 1\n",
    "    indicator = 0\n",
    "\n",
    "    #Tensorflow GPU optimization\n",
    "    config = tf.ConfigProto()\n",
    "    config.gpu_options.allow_growth = True\n",
    "    sess = tf.Session(config=config)\n",
    "    K.set_session(sess)\n",
    "\n",
    "    actor = ActorNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA)\n",
    "    critic = CriticNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)\n",
    "    buff = ReplayBuffer(BUFFER_SIZE)    #Create replay buffer\n",
    "\n",
    "    #Now load the weight\n",
    "    print(\"Now we load the weight\")\n",
    "    try:\n",
    "        actor.model.load_weights(\"actormodel.h5\")\n",
    "        critic.model.load_weights(\"criticmodel.h5\")\n",
    "        actor.target_model.load_weights(\"actormodel.h5\")\n",
    "        critic.target_model.load_weights(\"criticmodel.h5\")\n",
    "        print(\"Weight load successfully\")\n",
    "    except:\n",
    "        print(\"Cannot find the weight\")\n",
    "\n",
    "    print(\"TORCS Experiment Start.\")\n",
    "    for i in range(episode_count):\n",
    "\n",
    "        print(\"Episode : \" + str(i) + \" Replay Buffer \" + str(buff.count()))\n",
    "        \n",
    "        ob = env.reset()\n",
    "        \n",
    "        s_t = np.hstack(ob)\n",
    "        print(s_t)\n",
    "     \n",
    "        total_reward = 0.\n",
    "        for j in range(max_steps):\n",
    "            env.render()\n",
    "            loss = 0 \n",
    "            epsilon -= 1.0 / EXPLORE\n",
    "            a_t = np.zeros([1,action_dim])\n",
    "            noise_t = np.zeros([1,action_dim])\n",
    "            \n",
    "            a_t_original = actor.model.predict(s_t.reshape(1, s_t.shape[0]))\n",
    "            noise_t = train_indicator * max(epsilon, 0) * OU.sample().reshape(1, action_dim)\n",
    "\n",
    "            #The following code do the stochastic brake\n",
    "            #if random.random() <= 0.1:\n",
    "            #    print(\"********Now we apply the brake***********\")\n",
    "            #    noise_t[0][2] = train_indicator * max(epsilon, 0) * OU.function(a_t_original[0][2],  0.2 , 1.00, 0.10)\n",
    "\n",
    "            a_t = a_t_original + noise_t\n",
    "\n",
    "            ob, r_t, done, info = env.step(a_t[0])\n",
    "\n",
    "            s_t1 = np.hstack(ob)\n",
    "        \n",
    "            buff.add(s_t, a_t[0], r_t, s_t1, done)      #Add replay buffer\n",
    "            \n",
    "            #Do the batch update\n",
    "            batch = buff.getBatch(BATCH_SIZE)\n",
    "            states = np.asarray([e[0] for e in batch])\n",
    "            actions = np.asarray([e[1] for e in batch])\n",
    "            rewards = np.asarray([e[2] for e in batch])\n",
    "            new_states = np.asarray([e[3] for e in batch])\n",
    "            dones = np.asarray([e[4] for e in batch])\n",
    "            y_t = np.asarray([e[1] for e in batch])\n",
    "\n",
    "            target_q_values = critic.target_model.predict([new_states, actor.target_model.predict(new_states)])  \n",
    "           \n",
    "            for k in range(len(batch)):\n",
    "                if dones[k]:\n",
    "                    y_t[k] = rewards[k]\n",
    "                else:\n",
    "                    y_t[k] = rewards[k] + GAMMA*target_q_values[k]\n",
    "       \n",
    "            if (train_indicator):\n",
    "                loss += critic.model.train_on_batch([states,actions], y_t) \n",
    "                a_for_grad = actor.model.predict(states)\n",
    "                grads = critic.gradients(states, a_for_grad)\n",
    "                actor.train(states, grads)\n",
    "                actor.target_train()\n",
    "                critic.target_train()\n",
    "\n",
    "            total_reward += r_t\n",
    "            s_t = s_t1\n",
    "        \n",
    "            print(\"Episode\", i, \"Step\", step, \"Action\", a_t, \"Reward\", r_t, \"Loss\", loss)\n",
    "        \n",
    "            step += 1\n",
    "            if done:\n",
    "                break\n",
    "\n",
    "        if np.mod(i, 3) == 0:\n",
    "            if (train_indicator):\n",
    "                print(\"Now we save model\")\n",
    "                actor.model.save_weights(\"actormodel.h5\", overwrite=True)\n",
    "                with open(\"actormodel.json\", \"w\") as outfile:\n",
    "                    json.dump(actor.model.to_json(), outfile)\n",
    "\n",
    "                critic.model.save_weights(\"criticmodel.h5\", overwrite=True)\n",
    "                with open(\"criticmodel.json\", \"w\") as outfile:\n",
    "                    json.dump(critic.model.to_json(), outfile)\n",
    "\n",
    "        print(\"TOTAL REWARD @ \" + str(i) +\"-th Episode  : Reward \" + str(total_reward))\n",
    "        print(\"Total Step: \" + str(step))\n",
    "        print(\"\")\n",
    "\n",
    "    env.end()  # This is for shutting down TORCS\n",
    "    print(\"Finish.\")\n",
    "    \n",
    "playGame(1)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
