text
stringlengths
0
4.99k
)
min_advantage = tf.where(
advantage_buffer > 0,
(1 + clip_ratio) * advantage_buffer,
(1 - clip_ratio) * advantage_buffer,
)
policy_loss = -tf.reduce_mean(
tf.minimum(ratio * advantage_buffer, min_advantage)
)
policy_grads = tape.gradient(policy_loss, actor.trainable_variables)
policy_optimizer.apply_gradients(zip(policy_grads, actor.trainable_variables))
kl = tf.reduce_mean(
logprobability_buffer
- logprobabilities(actor(observation_buffer), action_buffer)
)
kl = tf.reduce_sum(kl)
return kl
# Train the value function by regression on mean-squared error
@tf.function
def train_value_function(observation_buffer, return_buffer):
with tf.GradientTape() as tape: # Record operations for automatic differentiation.
value_loss = tf.reduce_mean((return_buffer - critic(observation_buffer)) ** 2)
value_grads = tape.gradient(value_loss, critic.trainable_variables)
value_optimizer.apply_gradients(zip(value_grads, critic.trainable_variables))
Hyperparameters
# Hyperparameters of the PPO algorithm
steps_per_epoch = 4000
epochs = 30
gamma = 0.99
clip_ratio = 0.2
policy_learning_rate = 3e-4
value_function_learning_rate = 1e-3
train_policy_iterations = 80
train_value_iterations = 80
lam = 0.97
target_kl = 0.01
hidden_sizes = (64, 64)
# True if you want to render the environment
render = False
Initializations
# Initialize the environment and get the dimensionality of the
# observation space and the number of possible actions
env = gym.make(\"CartPole-v0\")
observation_dimensions = env.observation_space.shape[0]
num_actions = env.action_space.n
# Initialize the buffer
buffer = Buffer(observation_dimensions, steps_per_epoch)
# Initialize the actor and the critic as keras models
observation_input = keras.Input(shape=(observation_dimensions,), dtype=tf.float32)
logits = mlp(observation_input, list(hidden_sizes) + [num_actions], tf.tanh, None)
actor = keras.Model(inputs=observation_input, outputs=logits)
value = tf.squeeze(
mlp(observation_input, list(hidden_sizes) + [1], tf.tanh, None), axis=1
)
critic = keras.Model(inputs=observation_input, outputs=value)
# Initialize the policy and the value function optimizers
policy_optimizer = keras.optimizers.Adam(learning_rate=policy_learning_rate)
value_optimizer = keras.optimizers.Adam(learning_rate=value_function_learning_rate)
# Initialize the observation, episode return and episode length
observation, episode_return, episode_length = env.reset(), 0, 0
Train
# Iterate over the number of epochs
for epoch in range(epochs):
# Initialize the sum of the returns, lengths and number of episodes for each epoch
sum_return = 0
sum_length = 0
num_episodes = 0
# Iterate over the steps of each epoch
for t in range(steps_per_epoch):
if render:
env.render()
# Get the logits, action, and take one step in the environment
observation = observation.reshape(1, -1)
logits, action = sample_action(observation)
observation_new, reward, done, _ = env.step(action[0].numpy())
episode_return += reward
episode_length += 1
# Get the value and log-probability of the action
value_t = critic(observation)
logprobability_t = logprobabilities(logits, action)
# Store obs, act, rew, v_t, logp_pi_t
buffer.store(observation, action, reward, value_t, logprobability_t)
# Update the observation
observation = observation_new
# Finish trajectory if reached to a terminal state