text
stringlengths
0
4.99k
)
# Build the updated Q-values for the sampled future states
# Use the target model for stability
future_rewards = model_target.predict(state_next_sample)
# Q value = reward + discount factor * expected future reward
updated_q_values = rewards_sample + gamma * tf.reduce_max(
future_rewards, axis=1
)
# If final frame set the last value to -1
updated_q_values = updated_q_values * (1 - done_sample) - done_sample
# Create a mask so we only calculate loss on the updated Q-values
masks = tf.one_hot(action_sample, num_actions)
with tf.GradientTape() as tape:
# Train the model on the states and updated Q-values
q_values = model(state_sample)
# Apply the masks to the Q-values to get the Q-value for action taken
q_action = tf.reduce_sum(tf.multiply(q_values, masks), axis=1)
# Calculate loss between new Q-value and old Q-value
loss = loss_function(updated_q_values, q_action)
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
if frame_count % update_target_network == 0:
# update the the target network with new weights
model_target.set_weights(model.get_weights())
# Log details
template = \"running reward: {:.2f} at episode {}, frame count {}\"
print(template.format(running_reward, episode_count, frame_count))
# Limit the state and reward history
if len(rewards_history) > max_memory_length:
del rewards_history[:1]
del state_history[:1]
del state_next_history[:1]
del action_history[:1]
del done_history[:1]
if done:
break
# Update running reward to check condition for solving
episode_reward_history.append(episode_reward)
if len(episode_reward_history) > 100:
del episode_reward_history[:1]
running_reward = np.mean(episode_reward_history)
episode_count += 1
if running_reward > 40: # Condition to consider the task solved
print(\"Solved at episode {}!\".format(episode_count))
break
Visualizations
Before any training: Imgur
In early stages of training: Imgur
In later stages of training: Imgur
Implementation of a Proximal Policy Optimization agent for the CartPole-v0 environment.
Introduction
This code example solves the CartPole-v0 environment using a Proximal Policy Optimization (PPO) agent.
CartPole-v0
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it from falling over. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center. After 200 steps the episode ends. Thus, the highest return we can get is equal to 200.
CartPole-v0
Proximal Policy Optimization
PPO is a policy gradient method and can be used for environments with either discrete or continuous action spaces. It trains a stochastic policy in an on-policy way. Also, it utilizes the actor critic method. The actor maps the observation to an action and the critic gives an expectation of the rewards of the agent for the observation given. Firstly, it collects a set of trajectories for each epoch by sampling from the latest version of the stochastic policy. Then, the rewards-to-go and the advantage estimates are computed in order to update the policy and fit the value function. The policy is updated via a stochastic gradient ascent optimizer, while the value function is fitted via some gradient descent algorithm. This procedure is applied for many epochs until the environment is solved.
Algorithm
PPO Original Paper
OpenAI Spinning Up docs - PPO
Note
This code example uses Keras and Tensorflow v2. It is based on the PPO Original Paper, the OpenAI's Spinning Up docs for PPO, and the OpenAI's Spinning Up implementation of PPO using Tensorflow v1.
OpenAI Spinning Up Github - PPO
Libraries
For this example the following libraries are used:
numpy for n-dimensional arrays
tensorflow and keras for building the deep RL PPO agent
gym for getting everything we need about the environment
scipy.signal for calculating the discounted cumulative sums of vectors
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import gym
import scipy.signal