repo_id
stringlengths
15
86
file_path
stringlengths
28
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/deep-rl-class/notebooks
hf_public_repos/deep-rl-class/notebooks/unit2/unit2.ipynb
import os os.kill(os.getpid(), 9)# Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()import numpy as np import gymnasium as gym import random import imageio import os import tqdm import pickle5 as pickle from tqdm.notebook import tqdm# Create the FrozenLake-v1 environment using 4x4 map and non-slippery version and render_mode="rgb_array" env = gym.make() # TODO use the correct parametersenv = gym.make("FrozenLake-v1", map_name="4x4", is_slippery=False, render_mode="rgb_array")# We create our environment with gym.make("<name_of_the_environment>")- `is_slippery=False`: The agent always moves in the intended direction due to the non-slippery nature of the frozen lake (deterministic). print("_____OBSERVATION SPACE_____ \n") print("Observation Space", env.observation_space) print("Sample observation", env.observation_space.sample()) # Get a random observationprint("\n _____ACTION SPACE_____ \n") print("Action Space Shape", env.action_space.n) print("Action Space Sample", env.action_space.sample()) # Take a random actionstate_space = print("There are ", state_space, " possible states") action_space = print("There are ", action_space, " possible actions")# Let's create our Qtable of size (state_space, action_space) and initialized each values at 0 using np.zeros. np.zeros needs a tuple (a,b) def initialize_q_table(state_space, action_space): Qtable = return QtableQtable_frozenlake = initialize_q_table(state_space, action_space)state_space = env.observation_space.n print("There are ", state_space, " possible states") action_space = env.action_space.n print("There are ", action_space, " possible actions")# Let's create our Qtable of size (state_space, action_space) and initialized each values at 0 using np.zeros def initialize_q_table(state_space, action_space): Qtable = np.zeros((state_space, action_space)) return QtableQtable_frozenlake = initialize_q_table(state_space, action_space)def greedy_policy(Qtable, state): # Exploitation: take the action with the highest state, action value action = return actiondef greedy_policy(Qtable, state): # Exploitation: take the action with the highest state, action value action = np.argmax(Qtable[state][:]) return actiondef epsilon_greedy_policy(Qtable, state, epsilon): # Randomly generate a number between 0 and 1 random_num = # if random_num > greater than epsilon --> exploitation if random_num > epsilon: # Take the action with the highest value given a state # np.argmax can be useful here action = # else --> exploration else: action = # Take a random action return actiondef epsilon_greedy_policy(Qtable, state, epsilon): # Randomly generate a number between 0 and 1 random_num = random.uniform(0,1) # if random_num > greater than epsilon --> exploitation if random_num > epsilon: # Take the action with the highest value given a state # np.argmax can be useful here action = greedy_policy(Qtable, state) # else --> exploration else: action = env.action_space.sample() return action# Training parameters n_training_episodes = 10000 # Total training episodes learning_rate = 0.7 # Learning rate # Evaluation parameters n_eval_episodes = 100 # Total number of test episodes # Environment parameters env_id = "FrozenLake-v1" # Name of the environment max_steps = 99 # Max steps per episode gamma = 0.95 # Discounting rate eval_seed = [] # The evaluation seed of the environment # Exploration parameters max_epsilon = 1.0 # Exploration probability at start min_epsilon = 0.05 # Minimum exploration probability decay_rate = 0.0005 # Exponential decay rate for exploration probdef train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable): for episode in tqdm(range(n_training_episodes)): # Reduce epsilon (because we need less and less exploration) epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode) # Reset the environment state, info = env.reset() step = 0 terminated = False truncated = False # repeat for step in range(max_steps): # Choose the action At using epsilon greedy policy action = # Take action At and observe Rt+1 and St+1 # Take the action (a) and observe the outcome state(s') and reward (r) new_state, reward, terminated, truncated, info = # Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] Qtable[state][action] = # If terminated or truncated finish the episode if terminated or truncated: break # Our next state is the new state state = new_state return Qtabledef train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable): for episode in tqdm(range(n_training_episodes)): # Reduce epsilon (because we need less and less exploration) epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode) # Reset the environment state, info = env.reset() step = 0 terminated = False truncated = False # repeat for step in range(max_steps): # Choose the action At using epsilon greedy policy action = epsilon_greedy_policy(Qtable, state, epsilon) # Take action At and observe Rt+1 and St+1 # Take the action (a) and observe the outcome state(s') and reward (r) new_state, reward, terminated, truncated, info = env.step(action) # Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] Qtable[state][action] = Qtable[state][action] + learning_rate * (reward + gamma * np.max(Qtable[new_state]) - Qtable[state][action]) # If terminated or truncated finish the episode if terminated or truncated: break # Our next state is the new state state = new_state return QtableQtable_frozenlake = train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable_frozenlake)Qtable_frozenlakedef evaluate_agent(env, max_steps, n_eval_episodes, Q, seed): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param Q: The Q-table :param seed: The evaluation seed array (for taxi-v3) """ episode_rewards = [] for episode in tqdm(range(n_eval_episodes)): if seed: state, info = env.reset(seed=seed[episode]) else: state, info = env.reset() step = 0 truncated = False terminated = False total_rewards_ep = 0 for step in range(max_steps): # Take the action (index) that have the maximum expected future reward given that state action = greedy_policy(Q, state) new_state, reward, terminated, truncated, info = env.step(action) total_rewards_ep += reward if terminated or truncated: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward# Evaluate our Agent mean_reward, std_reward = evaluate_agent(env, max_steps, n_eval_episodes, Qtable_frozenlake, eval_seed) print(f"Mean_reward={mean_reward:.2f} +/- {std_reward:.2f}")from huggingface_hub import HfApi, snapshot_download from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import jsondef record_video(env, Qtable, out_directory, fps=1): """ Generate a replay video of the agent :param env :param Qtable: Qtable of our agent :param out_directory :param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1) """ images = [] terminated = False truncated = False state, info = env.reset(seed=random.randint(0,500)) img = env.render() images.append(img) while not terminated or truncated: # Take the action (index) that have the maximum expected future reward given that state action = np.argmax(Qtable[state][:]) state, reward, terminated, truncated, info = env.step(action) # We directly put next_state = state for recording logic img = env.render() images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps)def push_to_hub( repo_id, model, env, video_fps=1, local_repo_path="hub" ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the Hub :param repo_id: repo_id: id of the model repository from the Hugging Face Hub :param env :param video_fps: how many frame per seconds to record our video replay (with taxi-v3 and frozenlake-v1 we use 1) :param local_repo_path: where the local repository is """ _, repo_name = repo_id.split("/") eval_env = env api = HfApi() # Step 1: Create the repo repo_url = api.create_repo( repo_id=repo_id, exist_ok=True, ) # Step 2: Download files repo_local_path = Path(snapshot_download(repo_id=repo_id)) # Step 3: Save the model if env.spec.kwargs.get("map_name"): model["map_name"] = env.spec.kwargs.get("map_name") if env.spec.kwargs.get("is_slippery", "") == False: model["slippery"] = False # Pickle the model with open((repo_local_path) / "q-learning.pkl", "wb") as f: pickle.dump(model, f) # Step 4: Evaluate the model and build JSON with evaluation metrics mean_reward, std_reward = evaluate_agent( eval_env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"] ) evaluate_data = { "env_id": model["env_id"], "mean_reward": mean_reward, "n_eval_episodes": model["n_eval_episodes"], "eval_datetime": datetime.datetime.now().isoformat() } # Write a JSON file called "results.json" that will contain the # evaluation results with open(repo_local_path / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 5: Create the model card env_name = model["env_id"] if env.spec.kwargs.get("map_name"): env_name += "-" + env.spec.kwargs.get("map_name") if env.spec.kwargs.get("is_slippery", "") == False: env_name += "-" + "no_slippery" metadata = {} metadata["tags"] = [env_name, "q-learning", "reinforcement-learning", "custom-implementation"] # Add metrics eval = metadata_eval_result( model_pretty_name=repo_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_name, dataset_id=env_name, ) # Merges both dictionaries metadata = {**metadata, **eval} model_card = f""" # **Q-Learning** Agent playing1 **{env_id}** This is a trained model of a **Q-Learning** agent playing **{env_id}** . ## Usage ```python model = load_from_hub(repo_id="{repo_id}", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ``` """ evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) readme_path = repo_local_path / "README.md" readme = "" print(readme_path.exists()) if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) # Step 6: Record a video video_path = repo_local_path / "replay.mp4" record_video(env, model["qtable"], video_path, video_fps) # Step 7. Push everything to the Hub api.upload_folder( repo_id=repo_id, folder_path=repo_local_path, path_in_repo=".", ) print("Your model is pushed to the Hub. You can view your model here: ", repo_url)from huggingface_hub import notebook_login notebook_login()model = { "env_id": env_id, "max_steps": max_steps, "n_training_episodes": n_training_episodes, "n_eval_episodes": n_eval_episodes, "eval_seed": eval_seed, "learning_rate": learning_rate, "gamma": gamma, "max_epsilon": max_epsilon, "min_epsilon": min_epsilon, "decay_rate": decay_rate, "qtable": Qtable_frozenlake }modelusername = "" # FILL THIS repo_name = "q-FrozenLake-v1-4x4-noSlippery" push_to_hub( repo_id=f"{username}/{repo_name}", model=model, env=env)env = gym.make("Taxi-v3", render_mode="rgb_array")state_space = env.observation_space.n print("There are ", state_space, " possible states")action_space = env.action_space.n print("There are ", action_space, " possible actions")# Create our Q table with state_size rows and action_size columns (500x6) Qtable_taxi = initialize_q_table(state_space, action_space) print(Qtable_taxi) print("Q-table shape: ", Qtable_taxi .shape)# Training parameters n_training_episodes = 25000 # Total training episodes learning_rate = 0.7 # Learning rate # Evaluation parameters n_eval_episodes = 100 # Total number of test episodes # DO NOT MODIFY EVAL_SEED eval_seed = [16,54,165,177,191,191,120,80,149,178,48,38,6,125,174,73,50,172,100,148,146,6,25,40,68,148,49,167,9,97,164,176,61,7,54,55, 161,131,184,51,170,12,120,113,95,126,51,98,36,135,54,82,45,95,89,59,95,124,9,113,58,85,51,134,121,169,105,21,30,11,50,65,12,43,82,145,152,97,106,55,31,85,38, 112,102,168,123,97,21,83,158,26,80,63,5,81,32,11,28,148] # Evaluation seed, this ensures that all classmates agents are trained on the same taxi starting position # Each seed has a specific starting state # Environment parameters env_id = "Taxi-v3" # Name of the environment max_steps = 99 # Max steps per episode gamma = 0.95 # Discounting rate # Exploration parameters max_epsilon = 1.0 # Exploration probability at start min_epsilon = 0.05 # Minimum exploration probability decay_rate = 0.005 # Exponential decay rate for exploration prob Qtable_taxi = train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable_taxi) Qtable_taximodel = { "env_id": env_id, "max_steps": max_steps, "n_training_episodes": n_training_episodes, "n_eval_episodes": n_eval_episodes, "eval_seed": eval_seed, "learning_rate": learning_rate, "gamma": gamma, "max_epsilon": max_epsilon, "min_epsilon": min_epsilon, "decay_rate": decay_rate, "qtable": Qtable_taxi }username = "" # FILL THIS repo_name = "" # FILL THIS push_to_hub( repo_id=f"{username}/{repo_name}", model=model, env=env)from urllib.error import HTTPError from huggingface_hub import hf_hub_download def load_from_hub(repo_id: str, filename: str) -> str: """ Download a model from Hugging Face Hub. :param repo_id: id of the model repository from the Hugging Face Hub :param filename: name of the model zip file from the repository """ # Get the model from the Hub, download and cache the model on your local disk pickle_model = hf_hub_download( repo_id=repo_id, filename=filename ) with open(pickle_model, 'rb') as f: downloaded_model_file = pickle.load(f) return downloaded_model_filemodel = load_from_hub(repo_id="ThomasSimonini/q-Taxi-v3", filename="q-learning.pkl") # Try to use another model print(model) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])model = load_from_hub(repo_id="ThomasSimonini/q-FrozenLake-v1-no-slippery", filename="q-learning.pkl") # Try to use another model env = gym.make(model["env_id"], is_slippery=False) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])
0
hf_public_repos/deep-rl-class/notebooks
hf_public_repos/deep-rl-class/notebooks/unit5/unit5.ipynb
# Here, we create training-envs-executables and linux !mkdir ./training-envs-executables !mkdir ./training-envs-executables/linuxfrom huggingface_hub import notebook_login notebook_login()
0
hf_public_repos/deep-rl-class/units
hf_public_repos/deep-rl-class/units/en/_toctree.yml
- title: Unit 0. Welcome to the course sections: - local: unit0/introduction title: Welcome to the course 🤗 - local: unit0/setup title: Setup - local: unit0/discord101 title: Discord 101 - title: Unit 1. Introduction to Deep Reinforcement Learning sections: - local: unit1/introduction title: Introduction - local: unit1/what-is-rl title: What is Reinforcement Learning? - local: unit1/rl-framework title: The Reinforcement Learning Framework - local: unit1/tasks title: The type of tasks - local: unit1/exp-exp-tradeoff title: The Exploration/ Exploitation tradeoff - local: unit1/two-methods title: The two main approaches for solving RL problems - local: unit1/deep-rl title: The “Deep” in Deep Reinforcement Learning - local: unit1/summary title: Summary - local: unit1/glossary title: Glossary - local: unit1/hands-on title: Hands-on - local: unit1/quiz title: Quiz - local: unit1/conclusion title: Conclusion - local: unit1/additional-readings title: Additional Readings - title: Bonus Unit 1. Introduction to Deep Reinforcement Learning with Huggy sections: - local: unitbonus1/introduction title: Introduction - local: unitbonus1/how-huggy-works title: How Huggy works? - local: unitbonus1/train title: Train Huggy - local: unitbonus1/play title: Play with Huggy - local: unitbonus1/conclusion title: Conclusion - title: Live 1. How the course work, Q&A, and playing with Huggy sections: - local: live1/live1 title: Live 1. How the course work, Q&A, and playing with Huggy 🐶 - title: Unit 2. Introduction to Q-Learning sections: - local: unit2/introduction title: Introduction - local: unit2/what-is-rl title: What is RL? A short recap - local: unit2/two-types-value-based-methods title: The two types of value-based methods - local: unit2/bellman-equation title: The Bellman Equation, simplify our value estimation - local: unit2/mc-vs-td title: Monte Carlo vs Temporal Difference Learning - local: unit2/mid-way-recap title: Mid-way Recap - local: unit2/mid-way-quiz title: Mid-way Quiz - local: unit2/q-learning title: Introducing Q-Learning - local: unit2/q-learning-example title: A Q-Learning example - local: unit2/q-learning-recap title: Q-Learning Recap - local: unit2/glossary title: Glossary - local: unit2/hands-on title: Hands-on - local: unit2/quiz2 title: Q-Learning Quiz - local: unit2/conclusion title: Conclusion - local: unit2/additional-readings title: Additional Readings - title: Unit 3. Deep Q-Learning with Atari Games sections: - local: unit3/introduction title: Introduction - local: unit3/from-q-to-dqn title: From Q-Learning to Deep Q-Learning - local: unit3/deep-q-network title: The Deep Q-Network (DQN) - local: unit3/deep-q-algorithm title: The Deep Q Algorithm - local: unit3/glossary title: Glossary - local: unit3/hands-on title: Hands-on - local: unit3/quiz title: Quiz - local: unit3/conclusion title: Conclusion - local: unit3/additional-readings title: Additional Readings - title: Bonus Unit 2. Automatic Hyperparameter Tuning with Optuna sections: - local: unitbonus2/introduction title: Introduction - local: unitbonus2/optuna title: Optuna - local: unitbonus2/hands-on title: Hands-on - title: Unit 4. Policy Gradient with PyTorch sections: - local: unit4/introduction title: Introduction - local: unit4/what-are-policy-based-methods title: What are the policy-based methods? - local: unit4/advantages-disadvantages title: The advantages and disadvantages of policy-gradient methods - local: unit4/policy-gradient title: Diving deeper into policy-gradient - local: unit4/pg-theorem title: (Optional) the Policy Gradient Theorem - local: unit4/hands-on title: Hands-on - local: unit4/quiz title: Quiz - local: unit4/conclusion title: Conclusion - local: unit4/additional-readings title: Additional Readings - title: Unit 5. Introduction to Unity ML-Agents sections: - local: unit5/introduction title: Introduction - local: unit5/how-mlagents-works title: How ML-Agents works? - local: unit5/snowball-target title: The SnowballTarget environment - local: unit5/pyramids title: The Pyramids environment - local: unit5/curiosity title: (Optional) What is curiosity in Deep Reinforcement Learning? - local: unit5/hands-on title: Hands-on - local: unit5/bonus title: Bonus. Learn to create your own environments with Unity and MLAgents - local: unit5/conclusion title: Conclusion - title: Unit 6. Actor Critic methods with Robotics environments sections: - local: unit6/introduction title: Introduction - local: unit6/variance-problem title: The Problem of Variance in Reinforce - local: unit6/advantage-actor-critic title: Advantage Actor Critic (A2C) - local: unit6/hands-on title: Advantage Actor Critic (A2C) using Robotics Simulations with PyBullet and Panda-Gym 🤖 - local: unit6/conclusion title: Conclusion - local: unit6/additional-readings title: Additional Readings - title: Unit 7. Introduction to Multi-Agents and AI vs AI sections: - local: unit7/introduction title: Introduction - local: unit7/introduction-to-marl title: An introduction to Multi-Agents Reinforcement Learning (MARL) - local: unit7/multi-agent-setting title: Designing Multi-Agents systems - local: unit7/self-play title: Self-Play - local: unit7/glossary title: Glossary - local: unit7/hands-on title: Let's train our soccer team to beat your classmates' teams (AI vs. AI) - local: unit7/conclusion title: Conclusion - local: unit7/additional-readings title: Additional Readings - title: Unit 8. Part 1 Proximal Policy Optimization (PPO) sections: - local: unit8/introduction title: Introduction - local: unit8/intuition-behind-ppo title: The intuition behind PPO - local: unit8/clipped-surrogate-objective title: Introducing the Clipped Surrogate Objective Function - local: unit8/visualize title: Visualize the Clipped Surrogate Objective Function - local: unit8/hands-on-cleanrl title: PPO with CleanRL - local: unit8/conclusion title: Conclusion - local: unit8/additional-readings title: Additional Readings - title: Unit 8. Part 2 Proximal Policy Optimization (PPO) with Doom sections: - local: unit8/introduction-sf title: Introduction - local: unit8/hands-on-sf title: PPO with Sample Factory and Doom - local: unit8/conclusion-sf title: Conclusion - title: Bonus Unit 3. Advanced Topics in Reinforcement Learning sections: - local: unitbonus3/introduction title: Introduction - local: unitbonus3/model-based title: Model-Based Reinforcement Learning - local: unitbonus3/offline-online title: Offline vs. Online Reinforcement Learning - local: unitbonus3/rlhf title: Reinforcement Learning from Human Feedback - local: unitbonus3/decision-transformers title: Decision Transformers and Offline RL - local: unitbonus3/language-models title: Language models in RL - local: unitbonus3/curriculum-learning title: (Automatic) Curriculum Learning for RL - local: unitbonus3/envs-to-try title: Interesting environments to try - local: unitbonus3/godotrl title: An Introduction to Godot RL - local: unitbonus3/rl-documentation title: Brief introduction to RL documentation - title: Certification and congratulations sections: - local: communication/conclusion title: Congratulations - local: communication/certification title: Get your certificate of completion
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/from-q-to-dqn.mdx
# From Q-Learning to Deep Q-Learning [[from-q-to-dqn]] We learned that **Q-Learning is an algorithm we use to train our Q-Function**, an **action-value function** that determines the value of being at a particular state and taking a specific action at that state. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function.jpg" alt="Q-function"/> </figure> The **Q comes from "the Quality" of that action at that state.** Internally, our Q-function is encoded by **a Q-table, a table where each cell corresponds to a state-action pair value.** Think of this Q-table as **the memory or cheat sheet of our Q-function.** The problem is that Q-Learning is a *tabular method*. This becomes a problem if the states and actions spaces **are not small enough to be represented efficiently by arrays and tables**. In other words: it is **not scalable**. Q-Learning worked well with small state space environments like: - FrozenLake, we had 16 states. - Taxi-v3, we had 500 states. But think of what we're going to do today: we will train an agent to learn to play Space Invaders, a more complex game, using the frames as input. As **[Nikita Melkozerov mentioned](https://twitter.com/meln1k), Atari environments** have an observation space with a shape of (210, 160, 3)*, containing values ranging from 0 to 255 so that gives us \\(256^{210 \times 160 \times 3} = 256^{100800}\\) possible observations (for comparison, we have approximately \\(10^{80}\\) atoms in the observable universe). * A single frame in Atari is composed of an image of 210x160 pixels. Given that the images are in color (RGB), there are 3 channels. This is why the shape is (210, 160, 3). For each pixel, the value can go from 0 to 255. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari.jpg" alt="Atari State Space"/> Therefore, the state space is gigantic; due to this, creating and updating a Q-table for that environment would not be efficient. In this case, the best idea is to approximate the Q-values using a parametrized Q-function \\(Q_{\theta}(s,a)\\) . This neural network will approximate, given a state, the different Q-values for each possible action at that state. And that's exactly what Deep Q-Learning does. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/deep.jpg" alt="Deep Q Learning"/> Now that we understand Deep Q-Learning, let's dive deeper into the Deep Q-Network.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/hands-on.mdx
# Hands-on [[hands-on]] <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit3/unit3.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> Now that you've studied the theory behind Deep Q-Learning, **you’re ready to train your Deep Q-Learning agent to play Atari Games**. We'll start with Space Invaders, but you'll be able to use any Atari game you want 🔥 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/> We're using the [RL-Baselines-3 Zoo integration](https://github.com/DLR-RM/rl-baselines3-zoo), a vanilla version of Deep Q-Learning with no extensions such as Double-DQN, Dueling-DQN, or Prioritized Experience Replay. Also, **if you want to learn to implement Deep Q-Learning by yourself after this hands-on**, you definitely should look at the CleanRL implementation: https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/dqn_atari.py To validate this hands-on for the certification process, you need to push your trained model to the Hub and **get a result of >= 200**. To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward** **If you don't find your model, go to the bottom of the page and click on the refresh button.** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process And you can check your progress here 👉 https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course **To start the hands-on click on Open In Colab button** 👇 : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit3/unit3.ipynb) # Unit 3: Deep Q-Learning with Atari Games 👾 using RL Baselines3 Zoo <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/thumbnail.jpg" alt="Unit 3 Thumbnail"> In this hands-on, **you'll train a Deep Q-Learning agent** playing Space Invaders using [RL Baselines3 Zoo](https://github.com/DLR-RM/rl-baselines3-zoo), a training framework based on [Stable-Baselines3](https://stable-baselines3.readthedocs.io/en/master/) that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results and recording videos. We're using the [RL-Baselines-3 Zoo integration, a vanilla version of Deep Q-Learning](https://stable-baselines3.readthedocs.io/en/master/modules/dqn.html) with no extensions such as Double-DQN, Dueling-DQN, and Prioritized Experience Replay. ### 🎮 Environments: - [SpacesInvadersNoFrameskip-v4](https://gymnasium.farama.org/environments/atari/space_invaders/) You can see the difference between Space Invaders versions here 👉 https://gymnasium.farama.org/environments/atari/space_invaders/#variants ### 📚 RL-Library: - [RL-Baselines3-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo) ## Objectives of this hands-on 🏆 At the end of the hands-on, you will: - Be able to understand deeper **how RL Baselines3 Zoo works**. - Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. ## Prerequisites 🏗️ Before diving into the hands-on, you need to: 🔲 📚 **[Study Deep Q-Learning by reading Unit 3](https://huggingface.co/deep-rl-course/unit3/introduction)** 🤗 We're constantly trying to improve our tutorials, so **if you find some issues in this hands-on**, please [open an issue on the Github Repo](https://github.com/huggingface/deep-rl-class/issues). # Let's train a Deep Q-Learning agent playing Atari' Space Invaders 👾 and upload it to the Hub. We strongly recommend students **to use Google Colab for the hands-on exercises instead of running them on their personal computers**. By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects of setting up your environments**. To validate this hands-on for the certification process, you need to push your trained model to the Hub and **get a result of >= 200**. To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> # Install RL-Baselines3 Zoo and its dependencies 📚 If you see `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.` **this is normal and it's not a critical error** there's a conflict of version. But the packages we need are installed. ```python # For now we install this update of RL-Baselines3 Zoo pip install git+https://github.com/DLR-RM/rl-baselines3-zoo@update/hf ``` IF AND ONLY IF THE VERSION ABOVE DOES NOT EXIST ANYMORE. UNCOMMENT AND INSTALL THE ONE BELOW ```python #pip install rl_zoo3==2.0.0a9 ``` ```bash apt-get install swig cmake ffmpeg ``` To be able to use Atari games in Gymnasium we need to install atari package. And accept-rom-license to download the rom files (games files). ```python !pip install gymnasium[atari] !pip install gymnasium[accept-rom-license] ``` ## Create a virtual display 🔽 During the hands-on, we'll need to generate a replay video. To do so, if you train it on a headless machine, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). Hence the following cell will install the librairies and create and run a virtual screen 🖥 ```bash apt install python-opengl apt install ffmpeg apt install xvfb pip3 install pyvirtualdisplay ``` ```python # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() ``` ## Train our Deep Q-Learning Agent to Play Space Invaders 👾 To train an agent with RL-Baselines3-Zoo, we just need to do two things: 1. Create a hyperparameter config file that will contain our training hyperparameters called `dqn.yml`. This is a template example: ``` SpaceInvadersNoFrameskip-v4: env_wrapper: - stable_baselines3.common.atari_wrappers.AtariWrapper frame_stack: 4 policy: 'CnnPolicy' n_timesteps: !!float 1e7 buffer_size: 100000 learning_rate: !!float 1e-4 batch_size: 32 learning_starts: 100000 target_update_interval: 1000 train_freq: 4 gradient_steps: 1 exploration_fraction: 0.1 exploration_final_eps: 0.01 # If True, you need to deactivate handle_timeout_termination # in the replay_buffer_kwargs optimize_memory_usage: False ``` Here we see that: - We use the `Atari Wrapper` that preprocess the input (Frame reduction ,grayscale, stack 4 frames) - We use `CnnPolicy`, since we use Convolutional layers to process the frames - We train it for 10 million `n_timesteps` - Memory (Experience Replay) size is 100000, aka the amount of experience steps you saved to train again your agent with. 💡 My advice is to **reduce the training timesteps to 1M,** which will take about 90 minutes on a P100. `!nvidia-smi` will tell you what GPU you're using. At 10 million steps, this will take about 9 hours. I recommend running this on your local computer (or somewhere else). Just click on: `File>Download`. In terms of hyperparameters optimization, my advice is to focus on these 3 hyperparameters: - `learning_rate` - `buffer_size (Experience Memory size)` - `batch_size` As a good practice, you need to **check the documentation to understand what each hyperparameters does**: https://stable-baselines3.readthedocs.io/en/master/modules/dqn.html#parameters 2. We start the training and save the models on `logs` folder 📁 - Define the algorithm after `--algo`, where we save the model after `-f` and where the hyperparameter config is after `-c`. ```bash python -m rl_zoo3.train --algo ________ --env SpaceInvadersNoFrameskip-v4 -f _________ -c _________ ``` #### Solution ```bash python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -c dqn.yml ``` ## Let's evaluate our agent 👀 - RL-Baselines3-Zoo provides `enjoy.py`, a python script to evaluate our agent. In most RL libraries, we call the evaluation script `enjoy.py`. - Let's evaluate it for 5000 timesteps 🔥 ```bash python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 --no-render --n-timesteps _________ --folder logs/ ``` #### Solution ```bash python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 --no-render --n-timesteps 5000 --folder logs/ ``` ## Publish our trained model on the Hub 🚀 Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit3/space-invaders-model.gif" alt="Space Invaders model"> By using `rl_zoo3.push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the hub**. This way: - You can **showcase our work** 🔥 - You can **visualize your agent playing** 👀 - You can **share with the community an agent that others can use** 💾 - You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> - Copy the token - Run the cell below and past the token ```bash from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. notebook_login() !git config --global credential.helper store ``` If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 Let's run push_to_hub.py file to upload our trained agent to the Hub. `--repo-name `: The name of the repo `-orga`: Your Hugging Face username `-f`: Where the trained model folder is (in our case `logs`) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit3/select-id.png" alt="Select Id"> ```bash python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 --repo-name _____________________ -orga _____________________ -f logs/ ``` #### Solution ```bash python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 --repo-name dqn-SpaceInvadersNoFrameskip-v4 -orga ThomasSimonini -f logs/ ``` ###. Congrats 🥳 you've just trained and uploaded your first Deep Q-Learning agent using RL-Baselines-3 Zoo. The script above should have displayed a link to a model repository such as https://huggingface.co/ThomasSimonini/dqn-SpaceInvadersNoFrameskip-v4. When you go to this link, you can: - See a **video preview of your agent** at the right. - Click "Files and versions" to see all the files in the repository. - Click "Use in stable-baselines3" to get a code snippet that shows how to load the model. - A model card (`README.md` file) which gives a description of the model and the hyperparameters you used. Under the hood, the Hub uses git-based repositories (don't worry if you don't know what git is), which means you can update the model with new versions as you experiment and improve your agent. **Compare the results of your agents with your classmates** using the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) 🏆 ## Load a powerful trained model 🔥 - The Stable-Baselines3 team uploaded **more than 150 trained Deep Reinforcement Learning agents on the Hub**. You can find them here: 👉 https://huggingface.co/sb3 Some examples: - Asteroids: https://huggingface.co/sb3/dqn-AsteroidsNoFrameskip-v4 - Beam Rider: https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4 - Breakout: https://huggingface.co/sb3/dqn-BreakoutNoFrameskip-v4 - Road Runner: https://huggingface.co/sb3/dqn-RoadRunnerNoFrameskip-v4 Let's load an agent playing Beam Rider: https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4 1. We download the model using `rl_zoo3.load_from_hub`, and place it in a new folder that we can call `rl_trained` ```bash # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env BeamRiderNoFrameskip-v4 -orga sb3 -f rl_trained/ ``` 2. Let's evaluate if for 5000 timesteps ```bash python -m rl_zoo3.enjoy --algo dqn --env BeamRiderNoFrameskip-v4 -n 5000 -f rl_trained/ --no-render ``` Why not trying to train your own **Deep Q-Learning Agent playing BeamRiderNoFrameskip-v4? 🏆.** If you want to try, check https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4#hyperparameters **in the model card, you have the hyperparameters of the trained agent.** But finding hyperparameters can be a daunting task. Fortunately, we'll see in the next Unit, how we can **use Optuna for optimizing the Hyperparameters 🔥.** ## Some additional challenges 🏆 The best way to learn **is to try things by your own**! In the [Leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) you will find your agents. Can you get to the top? Here's a list of environments you can try to train your agent with: - BeamRiderNoFrameskip-v4 - BreakoutNoFrameskip-v4 - EnduroNoFrameskip-v4 - PongNoFrameskip-v4 Also, **if you want to learn to implement Deep Q-Learning by yourself**, you definitely should look at CleanRL implementation: https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/dqn_atari.py <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/> ________________________________________________________________________ Congrats on finishing this chapter! If you’re still feel confused with all these elements...it's totally normal! **This was the same for me and for all people who studied RL.** Take time to really **grasp the material before continuing and try the additional challenges**. It’s important to master these elements and having a solid foundations. In the next unit, **we’re going to learn about [Optuna](https://optuna.org/)**. One of the most critical task in Deep Reinforcement Learning is to find a good set of training hyperparameters. And Optuna is a library that helps you to automate the search. ### This is a course built with you 👷🏿‍♀️ Finally, we want to improve and update the course iteratively with your feedback. If you have some, please fill this form 👉 https://forms.gle/3HgA7bEHwAmmLfwh9 We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the Github Repo](https://github.com/huggingface/deep-rl-class/issues). See you on Bonus unit 2! 🔥 ### Keep Learning, Stay Awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/quiz.mdx
# Quiz [[quiz]] The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: We mentioned Q Learning is a tabular method. What are tabular methods? <details> <summary>Solution</summary> *Tabular methods* is a type of problem in which the state and actions spaces are small enough to approximate value functions to be **represented as arrays and tables**. For instance, **Q-Learning is a tabular method** since we use a table to represent the state, and action value pairs. </details> ### Q2: Why can't we use a classical Q-Learning to solve an Atari Game? <Question choices={[ { text: "Atari environments are too fast for Q-Learning", explain: "" }, { text: "Atari environments have a big observation space. So creating an updating the Q-Table would not be efficient", explain: "", correct: true } ]} /> ### Q3: Why do we stack four frames together when we use frames as input in Deep Q-Learning? <details> <summary>Solution</summary> We stack frames together because it helps us **handle the problem of temporal limitation**: one frame is not enough to capture temporal information. For instance, in pong, our agent **will be unable to know the ball direction if it gets only one frame**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/temporal-limitation.jpg" alt="Temporal limitation"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/temporal-limitation-2.jpg" alt="Temporal limitation"/> </details> ### Q4: What are the two phases of Deep Q-Learning? <Question choices={[ { text: "Sampling", explain: "We perform actions and store the observed experiences tuples in a replay memory.", correct: true, }, { text: "Shuffling", explain: "", }, { text: "Reranking", explain: "", }, { text: "Training", explain: "We select the small batch of tuple randomly and learn from it using a gradient descent update step.", correct: true, } ]} /> ### Q5: Why do we create a replay memory in Deep Q-Learning? <details> <summary>Solution</summary> **1. Make more efficient use of the experiences during the training** Usually, in online reinforcement learning, the agent interacts in the environment, gets experiences (state, action, reward, and next state), learns from them (updates the neural network), and discards them. This is not efficient. But, with experience replay, **we create a replay buffer that saves experience samples that we can reuse during the training**. **2. Avoid forgetting previous experiences and reduce the correlation between experiences** The problem we get if we give sequential samples of experiences to our neural network is that it **tends to forget the previous experiences as it overwrites new experiences**. For instance, if we are in the first level and then the second, which is different, our agent can forget how to behave and play in the first level. </details> ### Q6: How do we use Double Deep Q-Learning? <details> <summary>Solution</summary> When we compute the Q target, we use two networks to decouple the action selection from the target Q value generation. We: - Use our *DQN network* to **select the best action to take for the next state** (the action with the highest Q value). - Use our *Target network* to calculate **the target Q value of taking that action at the next state**. </details> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read again the chapter to reinforce (😏) your knowledge.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/deep-q-network.mdx
# The Deep Q-Network (DQN) [[deep-q-network]] This is the architecture of our Deep Q-Learning network: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/deep-q-network.jpg" alt="Deep Q Network"/> As input, we take a **stack of 4 frames** passed through the network as a state and output a **vector of Q-values for each possible action at that state**. Then, like with Q-Learning, we just need to use our epsilon-greedy policy to select which action to take. When the Neural Network is initialized, **the Q-value estimation is terrible**. But during training, our Deep Q-Network agent will associate a situation with the appropriate action and **learn to play the game well**. ## Preprocessing the input and temporal limitation [[preprocessing]] We need to **preprocess the input**. It’s an essential step since we want to **reduce the complexity of our state to reduce the computation time needed for training**. To achieve this, we **reduce the state space to 84x84 and grayscale it**. We can do this since the colors in Atari environments don't add important information. This is a big improvement since we **reduce our three color channels (RGB) to 1**. We can also **crop a part of the screen in some games** if it does not contain important information. Then we stack four frames together. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/preprocessing.jpg" alt="Preprocessing"/> **Why do we stack four frames together?** We stack frames together because it helps us **handle the problem of temporal limitation**. Let’s take an example with the game of Pong. When you see this frame: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/temporal-limitation.jpg" alt="Temporal Limitation"/> Can you tell me where the ball is going? No, because one frame is not enough to have a sense of motion! But what if I add three more frames? **Here you can see that the ball is going to the right**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/temporal-limitation-2.jpg" alt="Temporal Limitation"/> That’s why, to capture temporal information, we stack four frames together. Then the stacked frames are processed by three convolutional layers. These layers **allow us to capture and exploit spatial relationships in images**. But also, because the frames are stacked together, **we can exploit some temporal properties across those frames**. If you don't know what convolutional layers are, don't worry. You can check out [Lesson 4 of this free Deep Learning Course by Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) Finally, we have a couple of fully connected layers that output a Q-value for each possible action at that state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/deep-q-network.jpg" alt="Deep Q Network"/> So, we see that Deep Q-Learning uses a neural network to approximate, given a state, the different Q-values for each possible action at that state. Now let's study the Deep Q-Learning algorithm.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/introduction.mdx
# Deep Q-Learning [[deep-q-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/thumbnail.jpg" alt="Unit 3 thumbnail" width="100%"> In the last unit, we learned our first reinforcement learning algorithm: Q-Learning, **implemented it from scratch**, and trained it in two environments, FrozenLake-v1 ☃️ and Taxi-v3 🚕. We got excellent results with this simple algorithm, but these environments were relatively simple because the **state space was discrete and small** (16 different states for FrozenLake-v1 and 500 for Taxi-v3). For comparison, the state space in Atari games can **contain \\(10^{9}\\) to \\(10^{11}\\) states**. But as we'll see, producing and updating a **Q-table can become ineffective in large state space environments.** So in this unit, **we'll study our first Deep Reinforcement Learning agent**: Deep Q-Learning. Instead of using a Q-table, Deep Q-Learning uses a Neural Network that takes a state and approximates Q-values for each action based on that state. And **we'll train it to play Space Invaders and other Atari environments using [RL-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo)**, a training framework for RL using Stable-Baselines that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results, and recording videos. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/> So let’s get started! 🚀
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/deep-q-algorithm.mdx
# The Deep Q-Learning Algorithm [[deep-q-algorithm]] We learned that Deep Q-Learning **uses a deep neural network to approximate the different Q-values for each possible action at a state** (value-function estimation). The difference is that, during the training phase, instead of updating the Q-value of a state-action pair directly as we have done with Q-Learning: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-5.jpg" alt="Q Loss"/> in Deep Q-Learning, we create a **loss function that compares our Q-value prediction and the Q-target and uses gradient descent to update the weights of our Deep Q-Network to approximate our Q-values better**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/Q-target.jpg" alt="Q-target"/> The Deep Q-Learning training algorithm has *two phases*: - **Sampling**: we perform actions and **store the observed experience tuples in a replay memory**. - **Training**: Select a **small batch of tuples randomly and learn from this batch using a gradient descent update step**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/sampling-training.jpg" alt="Sampling Training"/> This is not the only difference compared with Q-Learning. Deep Q-Learning training **might suffer from instability**, mainly because of combining a non-linear Q-value function (Neural Network) and bootstrapping (when we update targets with existing estimates and not an actual complete return). To help us stabilize the training, we implement three different solutions: 1. *Experience Replay* to make more **efficient use of experiences**. 2. *Fixed Q-Target* **to stabilize the training**. 3. *Double Deep Q-Learning*, to **handle the problem of the overestimation of Q-values**. Let's go through them! ## Experience Replay to make more efficient use of experiences [[exp-replay]] Why do we create a replay memory? Experience Replay in Deep Q-Learning has two functions: 1. **Make more efficient use of the experiences during the training**. Usually, in online reinforcement learning, the agent interacts with the environment, gets experiences (state, action, reward, and next state), learns from them (updates the neural network), and discards them. This is not efficient. Experience replay helps by **using the experiences of the training more efficiently**. We use a replay buffer that saves experience samples **that we can reuse during the training.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/experience-replay.jpg" alt="Experience Replay"/> ⇒ This allows the agent to **learn from the same experiences multiple times**. 2. **Avoid forgetting previous experiences and reduce the correlation between experiences**. - The problem we get if we give sequential samples of experiences to our neural network is that it tends to forget **the previous experiences as it gets new experiences.** For instance, if the agent is in the first level and then in the second, which is different, it can forget how to behave and play in the first level. The solution is to create a Replay Buffer that stores experience tuples while interacting with the environment and then sample a small batch of tuples. This prevents **the network from only learning about what it has done immediately before.** Experience replay also has other benefits. By randomly sampling the experiences, we remove correlation in the observation sequences and avoid **action values from oscillating or diverging catastrophically.** In the Deep Q-Learning pseudocode, we **initialize a replay memory buffer D with capacity N** (N is a hyperparameter that you can define). We then store experiences in the memory and sample a batch of experiences to feed the Deep Q-Network during the training phase. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/experience-replay-pseudocode.jpg" alt="Experience Replay Pseudocode"/> ## Fixed Q-Target to stabilize the training [[fixed-q]] When we want to calculate the TD error (aka the loss), we calculate the **difference between the TD target (Q-Target) and the current Q-value (estimation of Q)**. But we **don’t have any idea of the real TD target**. We need to estimate it. Using the Bellman equation, we saw that the TD target is just the reward of taking that action at that state plus the discounted highest Q value for the next state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/Q-target.jpg" alt="Q-target"/> However, the problem is that we are using the same parameters (weights) for estimating the TD target **and** the Q-value. Consequently, there is a significant correlation between the TD target and the parameters we are changing. Therefore, at every step of training, **both our Q-values and the target values shift.** We’re getting closer to our target, but the target is also moving. It’s like chasing a moving target! This can lead to significant oscillation in training. It’s like if you were a cowboy (the Q estimation) and you wanted to catch a cow (the Q-target). Your goal is to get closer (reduce the error). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/qtarget-1.jpg" alt="Q-target"/> At each time step, you’re trying to approach the cow, which also moves at each time step (because you use the same parameters). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/qtarget-2.jpg" alt="Q-target"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/qtarget-3.jpg" alt="Q-target"/> This leads to a bizarre path of chasing (a significant oscillation in training). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/qtarget-4.jpg" alt="Q-target"/> Instead, what we see in the pseudo-code is that we: - Use a **separate network with fixed parameters** for estimating the TD Target - **Copy the parameters from our Deep Q-Network every C steps** to update the target network. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/fixed-q-target-pseudocode.jpg" alt="Fixed Q-target Pseudocode"/> ## Double DQN [[double-dqn]] Double DQNs, or Double Deep Q-Learning neural networks, were introduced [by Hado van Hasselt](https://papers.nips.cc/paper/3964-double-q-learning). This method **handles the problem of the overestimation of Q-values.** To understand this problem, remember how we calculate the TD Target: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1.jpg" alt="TD target"/> We face a simple problem by calculating the TD target: how are we sure that **the best action for the next state is the action with the highest Q-value?** We know that the accuracy of Q-values depends on what action we tried **and** what neighboring states we explored. Consequently, we don’t have enough information about the best action to take at the beginning of the training. Therefore, taking the maximum Q-value (which is noisy) as the best action to take can lead to false positives. If non-optimal actions are regularly **given a higher Q value than the optimal best action, the learning will be complicated.** The solution is: when we compute the Q target, we use two networks to decouple the action selection from the target Q-value generation. We: - Use our **DQN network** to select the best action to take for the next state (the action with the highest Q-value). - Use our **Target network** to calculate the target Q-value of taking that action at the next state. Therefore, Double DQN helps us reduce the overestimation of Q-values and, as a consequence, helps us train faster and with more stable learning. Since these three improvements in Deep Q-Learning, many more have been added, such as Prioritized Experience Replay and Dueling Deep Q-Learning. They’re out of the scope of this course but if you’re interested, check the links we put in the reading list.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/glossary.mdx
# Glossary This is a community-created glossary. Contributions are welcomed! - **Tabular Method:** Type of problem in which the state and action spaces are small enough to approximate value functions to be represented as arrays and tables. **Q-learning** is an example of tabular method since a table is used to represent the value for different state-action pairs. - **Deep Q-Learning:** Method that trains a neural network to approximate, given a state, the different **Q-values** for each possible action at that state. It is used to solve problems when observational space is too big to apply a tabular Q-Learning approach. - **Temporal Limitation** is a difficulty presented when the environment state is represented by frames. A frame by itself does not provide temporal information. In order to obtain temporal information, we need to **stack** a number of frames together. - **Phases of Deep Q-Learning:** - **Sampling:** Actions are performed, and observed experience tuples are stored in a **replay memory**. - **Training:** Batches of tuples are selected randomly and the neural network updates its weights using gradient descent. - **Solutions to stabilize Deep Q-Learning:** - **Experience Replay:** A replay memory is created to save experiences samples that can be reused during training. This allows the agent to learn from the same experiences multiple times. Also, it helps the agent avoid forgetting previous experiences as it gets new ones. - **Random sampling** from replay buffer allows to remove correlation in the observation sequences and prevents action values from oscillating or diverging catastrophically. - **Fixed Q-Target:** In order to calculate the **Q-Target** we need to estimate the discounted optimal **Q-value** of the next state by using Bellman equation. The problem is that the same network weights are used to calculate the **Q-Target** and the **Q-value**. This means that everytime we are modifying the **Q-value**, the **Q-Target** also moves with it. To avoid this issue, a separate network with fixed parameters is used for estimating the Temporal Difference Target. The target network is updated by copying parameters from our Deep Q-Network after certain **C steps**. - **Double DQN:** Method to handle **overestimation** of **Q-Values**. This solution uses two networks to decouple the action selection from the target **Value generation**: - **DQN Network** to select the best action to take for the next state (the action with the highest **Q-Value**) - **Target Network** to calculate the target **Q-Value** of taking that action at the next state. This approach reduces the **Q-Values** overestimation, it helps to train faster and have more stable learning. If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls) This glossary was made possible thanks to: - [Dario Paez](https://github.com/dario248)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/conclusion.mdx
# Conclusion [[conclusion]] Congrats on finishing this chapter! There was a lot of information. And congrats on finishing the tutorial. You’ve just trained your first Deep Q-Learning agent and shared it on the Hub 🥳. Take time to really grasp the material before continuing. Don't hesitate to train your agent in other environments (Pong, Seaquest, QBert, Ms Pac Man). The **best way to learn is to try things on your own!** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/> In the next unit, **we're going to learn about Optuna**. One of the most critical tasks in Deep Reinforcement Learning is to find a good set of training hyperparameters. Optuna is a library that helps you to automate the search. Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit3/additional-readings.mdx
# Additional Readings [[additional-readings]] These are **optional readings** if you want to go deeper. - [Foundations of Deep RL Series, L2 Deep Q-Learning by Pieter Abbeel](https://youtu.be/Psrhxy88zww) - [Playing Atari with Deep Reinforcement Learning](https://arxiv.org/abs/1312.5602) - [Double Deep Q-Learning](https://papers.nips.cc/paper/2010/hash/091d584fced301b442654dd8c23b3fc9-Abstract.html) - [Prioritized Experience Replay](https://arxiv.org/abs/1511.05952)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit0/discord101.mdx
# Discord 101 [[discord-101]] Hey there! My name is Huggy, the dog 🐕, and I'm looking forward to train with you during this RL Course! Although I don't know much about fetching sticks (yet), I know one or two things about Discord. So I wrote this guide to help you learn about it! <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/huggy-logo.jpg" alt="Huggy Logo"/> Discord is a free chat platform. If you've used Slack, **it's quite similar**. There is a Hugging Face Community Discord server with 36000 members you can <a href="https://discord.gg/ydHrjt3WP5">join with a single click here</a>. So many humans to play with! Starting in Discord can be a bit intimidating, so let me take you through it. When you [sign-up to our Discord server](http://hf.co/join/discord), you'll choose your interests. Make sure to **click "Reinforcement Learning"**. Then click next, you'll then get to **introduce yourself in the `#introduce-yourself` channel**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/discord2.jpg" alt="Discord"/> ## So which channels are interesting to me? [[channels]] They are in the reinforcement learning lounge. **Don't forget to sign up to these channels** by clicking on 🤖 Reinforcement Learning in `role-assigment`. - `rl-announcements`: where we give the **lastest information about the course**. - `rl-discussions`: where you can **exchange about RL and share information**. - `rl-study-group`: where you can **ask questions and exchange with your classmates**. - `rl-i-made-this`: where you can **share your projects and models**. The HF Community Server has a thriving community of human beings interested in many areas, so you can also learn from those. There are paper discussions, events, and many other things. Was this useful? There are a couple of tips I can share with you: - There are **voice channels** you can use as well, although most people prefer text chat. - You can **use markdown style** for text chats. So if you're writing code, you can use that style. Sadly this does not work as well for links. - You can open threads as well! It's a good idea when **it's a long conversation**. I hope this is useful! And if you have questions, just ask! See you later! Huggy 🐶
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit0/introduction.mdx
# Welcome to the 🤗 Deep Reinforcement Learning Course [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/thumbnail.jpg" alt="Deep RL Course thumbnail" width="100%"/> Welcome to the most fascinating topic in Artificial Intelligence: **Deep Reinforcement Learning**. This course will **teach you about Deep Reinforcement Learning from beginner to expert**. It’s completely free and open-source! In this introduction unit you’ll: - Learn more about the **course content**. - **Define the path** you’re going to take (either self-audit or certification process). - Learn more about the **AI vs. AI challenges** you're going to participate in. - Learn more **about us**. - **Create your Hugging Face account** (it’s free). - **Sign-up to our Discord server**, the place where you can chat with your classmates and us (the Hugging Face team). Let’s get started! ## What to expect? [[expect]] In this course, you will: - 📖 Study Deep Reinforcement Learning in **theory and practice.** - 🧑‍💻 Learn to **use famous Deep RL libraries** such as [Stable Baselines3](https://stable-baselines3.readthedocs.io/en/master/), [RL Baselines3 Zoo](https://github.com/DLR-RM/rl-baselines3-zoo), [Sample Factory](https://samplefactory.dev/) and [CleanRL](https://github.com/vwxyzjn/cleanrl). - 🤖 **Train agents in unique environments** such as [SnowballFight](https://huggingface.co/spaces/ThomasSimonini/SnowballFight), [Huggy the Doggo 🐶](https://huggingface.co/spaces/ThomasSimonini/Huggy), [VizDoom (Doom)](https://vizdoom.cs.put.edu.pl/) and classical ones such as [Space Invaders](https://gymnasium.farama.org/environments/atari/space_invaders/), [PyBullet](https://pybullet.org/wordpress/) and more. - 💾 Share your **trained agents with one line of code to the Hub** and also download powerful agents from the community. - 🏆 Participate in challenges where you will **evaluate your agents against other teams. You'll also get to play against the agents you'll train.** - 🎓 **Earn a certificate of completion** by completing 80% of the assignments. And more! At the end of this course, **you’ll get a solid foundation from the basics to the SOTA (state-of-the-art) of methods**. Don’t forget to **<a href="http://eepurl.com/ic5ZUD">sign up to the course</a>** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).** Sign up 👉 <a href="http://eepurl.com/ic5ZUD">here</a> ## What does the course look like? [[course-look-like]] The course is composed of: - *A theory part*: where you learn a **concept in theory**. - *A hands-on*: where you’ll learn **to use famous Deep RL libraries** to train your agents in unique environments. These hands-on will be **Google Colab notebooks with companion tutorial videos** if you prefer learning with video format! - *Challenges*: you'll get to put your agent to compete against other agents in different challenges. There will also be [a leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) for you to compare the agents' performance. ## What's the syllabus? [[syllabus]] This is the course's syllabus: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/syllabus1.jpg" alt="Syllabus Part 1" width="100%"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/syllabus2.jpg" alt="Syllabus Part 2" width="100%"/> ## Two paths: choose your own adventure [[two-paths]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/two-paths.jpg" alt="Two paths" width="100%"/> You can choose to follow this course either: - *To get a certificate of completion*: you need to complete 80% of the assignments before the end of September 2023. - *To get a certificate of honors*: you need to complete 100% of the assignments before the end of September 2023. - *As a simple audit*: you can participate in all challenges and do assignments if you want, but you have no deadlines. Both paths **are completely free**. Whatever path you choose, we advise you **to follow the recommended pace to enjoy the course and challenges with your fellow classmates.** You don't need to tell us which path you choose. **If you get more than 80% of the assignments done, you'll get a certificate.** ## The Certification Process [[certification-process]] The certification process is **completely free**: - *To get a certificate of completion*: you need to complete 80% of the assignments before the end of September 2023. - *To get a certificate of honors*: you need to complete 100% of the assignments before the end of September 2023. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/certification.jpg" alt="Course certification" width="100%"/> ## How to get most of the course? [[advice]] To get most of the course, we have some advice: 1. <a href="https://discord.gg/ydHrjt3WP5">Join study groups in Discord </a>: studying in groups is always easier. To do that, you need to join our discord server. If you're new to Discord, no worries! We have some tools that will help you learn about it. 2. **Do the quizzes and assignments**: the best way to learn is to do and test yourself. 3. **Define a schedule to stay in sync**: you can use our recommended pace schedule below or create yours. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/advice.jpg" alt="Course advice" width="100%"/> ## What tools do I need? [[tools]] You need only 3 things: - *A computer* with an internet connection. - *Google Colab (free version)*: most of our hands-on will use Google Colab, the **free version is enough.** - A *Hugging Face Account*: to push and load models. If you don’t have an account yet, you can create one **[here](https://hf.co/join)** (it’s free). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/tools.jpg" alt="Course tools needed" width="100%"/> ## What is the recommended pace? [[recommended-pace]] Each chapter in this course is designed **to be completed in 1 week, with approximately 3-4 hours of work per week**. However, you can take as much time as necessary to complete the course. If you want to dive into a topic more in-depth, we'll provide additional resources to help you achieve that. ## Who are we [[who-are-we]] About the author: - <a href="https://twitter.com/ThomasSimonini">Thomas Simonini</a> is a Developer Advocate at Hugging Face 🤗 specializing in Deep Reinforcement Learning. He founded the Deep Reinforcement Learning Course in 2018, which became one of the most used courses in Deep RL. About the team: - <a href="https://twitter.com/osanseviero">Omar Sanseviero</a> is a Machine Learning Engineer at Hugging Face where he works in the intersection of ML, Community and Open Source. Previously, Omar worked as a Software Engineer at Google in the teams of Assistant and TensorFlow Graphics. He is from Peru and likes llamas 🦙. - <a href="https://twitter.com/RisingSayak"> Sayak Paul</a> is a Developer Advocate Engineer at Hugging Face. He's interested in the area of representation learning (self-supervision, semi-supervision, model robustness). And he loves watching crime and action thrillers 🔪. ## When do the challenges start? [[challenges]] In this new version of the course, you have two types of challenges: - [A leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) to compare your agent's performance to other classmates'. - [AI vs. AI challenges](https://huggingface.co/learn/deep-rl-course/unit7/introduction?fw=pt) where you can train your agent and compete against other classmates' agents. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/challenges.jpg" alt="Challenges" width="100%"/> ## I found a bug, or I want to improve the course [[contribute]] Contributions are welcomed 🤗 - If you *found a bug 🐛 in a notebook*, please <a href="https://github.com/huggingface/deep-rl-class/issues">open an issue</a> and **describe the problem**. - If you *want to improve the course*, you can <a href="https://github.com/huggingface/deep-rl-class/pulls">open a Pull Request.</a> ## I still have questions [[questions]] Please ask your question in our <a href="https://discord.gg/ydHrjt3WP5">discord server #rl-discussions.</a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit0/setup.mdx
# Setup [[setup]] After all this information, it's time to get started. We're going to do two things: 1. **Create your Hugging Face account** if it's not already done 2. **Sign up to Discord and introduce yourself** (don't be shy 🤗) ### Let's create my Hugging Face account (If it's not already done) create an account to HF <a href="https://huggingface.co/join">here</a> ### Let's join our Discord server You can now sign up for our Discord Server. This is the place where you **can chat with the community and with us, create and join study groups to grow with each other and more** 👉🏻 Join our discord server <a href="https://discord.gg/ydHrjt3WP5">here.</a> When you join, remember to introduce yourself in #introduce-yourself and sign-up for reinforcement channels in #role-assignments. We have multiple RL-related channels: - `rl-announcements`: where we give the latest information about the course. - `rl-discussions`: where you can chat about RL and share information. - `rl-study-group`: where you can create and join study groups. - `rl-i-made-this`: where you can share your projects and models. If this is your first time using Discord, we wrote a Discord 101 to get the best practices. Check the next section. Congratulations! **You've just finished the on-boarding**. You're now ready to start to learn Deep Reinforcement Learning. Have fun! ### Keep Learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus1/play.mdx
# Play with Huggy [[play]] Now that you've trained Huggy and pushed it to the Hub. **You will be able to play with him ❤️** For this step it’s simple: - Open the Huggy game in your browser: https://huggingface.co/spaces/ThomasSimonini/Huggy - Click on Play with my Huggy model <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/load-huggy.jpg" alt="load-huggy" width="100%"> 1. In step 1, choose your model repository which is the model id (in my case ThomasSimonini/ppo-Huggy). 2. In step 2, **choose which model you want to replay**: - I have multiple ones, since we saved a model every 500000 timesteps. - But if I want the most recent one I choose Huggy.onnx 👉 It's good to **try with different model checkpoints to see the improvement of the agent.**
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus1/introduction.mdx
# Introduction [[introduction]] In this bonus unit, we'll reinforce what we learned in the first unit by teaching Huggy the Dog to fetch the stick and then [play with him directly in your browser](https://huggingface.co/spaces/ThomasSimonini/Huggy) 🐶 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit2/thumbnail.png" alt="Unit bonus 1 thumbnail" width="100%"> So let's get started 🚀
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus1/train.mdx
# Let's train and play with Huggy 🐶 [[train]] <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/bonus-unit1/bonus-unit1.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> We strongly **recommend students use Google Colab for the hands-on exercises** instead of running them on their personal computers. By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects** of setting up your environments. ## Let's train Huggy 🐶 **To start to train Huggy, click on Open In Colab button** 👇 : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/bonus-unit1/bonus-unit1.ipynb) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit2/thumbnail.png" alt="Bonus Unit 1Thumbnail"> In this notebook, we'll reinforce what we learned in the first Unit by **teaching Huggy the Dog to fetch the stick and then play with it directly in your browser** ⬇️ Here is an example of what **you will achieve at the end of the unit.** ⬇️ (launch ▶ to see) ```python %%html <video controls autoplay><source src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy.mp4" type="video/mp4"></video> ``` ### The environment 🎮 - Huggy the Dog, an environment created by [Thomas Simonini](https://twitter.com/ThomasSimonini) based on [Puppo The Corgi](https://blog.unity.com/technology/puppo-the-corgi-cuteness-overload-with-the-unity-ml-agents-toolkit) ### The library used 📚 - [MLAgents](https://github.com/Unity-Technologies/ml-agents) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the Github Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook 🏆 At the end of the notebook, you will: - Understand **the state space, action space, and reward function used to train Huggy**. - **Train your own Huggy** to fetch the stick. - Be able to play **with your trained Huggy directly in your browser**. ## Prerequisites 🏗️ Before diving into the notebook, you need to: 🔲 📚 **Develop an understanding of the foundations of Reinforcement learning** (MC, TD, Rewards hypothesis...) by doing Unit 1 🔲 📚 **Read the introduction to Huggy** by doing Bonus Unit 1 ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> ## Clone the repository and install the dependencies 🔽 - We need to clone the repository, that contains ML-Agents. ```bash # Clone the repository (can take 3min) git clone --depth 1 https://github.com/Unity-Technologies/ml-agents ``` ```bash # Go inside the repository and install the package (can take 3min) %cd ml-agents pip3 install -e ./ml-agents-envs pip3 install -e ./ml-agents ``` ## Download and move the environment zip file in `./trained-envs-executables/linux/` - Our environment executable is in a zip file. - We need to download it and place it to `./trained-envs-executables/linux/` ```bash mkdir ./trained-envs-executables mkdir ./trained-envs-executables/linux ``` ```bash wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1zv3M95ZJTWHUVOWT6ckq_cm98nft8gdF' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1zv3M95ZJTWHUVOWT6ckq_cm98nft8gdF" -O ./trained-envs-executables/linux/Huggy.zip && rm -rf /tmp/cookies.txt ``` Download the file Huggy.zip from https://drive.google.com/uc?export=download&id=1zv3M95ZJTWHUVOWT6ckq_cm98nft8gdF using `wget`. Check out the full solution to download large files from GDrive [here](https://bcrf.biochem.wisc.edu/2021/02/05/download-google-drive-files-using-wget/) ```bash %%capture unzip -d ./trained-envs-executables/linux/ ./trained-envs-executables/linux/Huggy.zip ``` Make sure your file is accessible ```bash chmod -R 755 ./trained-envs-executables/linux/Huggy ``` ## Let's recap how this environment works ### The State Space: what Huggy perceives. Huggy doesn't "see" his environment. Instead, we provide him information about the environment: - The target (stick) position - The relative position between himself and the target - The orientation of his legs. Given all this information, Huggy **can decide which action to take next to fulfill his goal**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy.jpg" alt="Huggy" width="100%"> ### The Action Space: what moves Huggy can do <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy-action.jpg" alt="Huggy action" width="100%"> **Joint motors drive huggy legs**. This means that to get the target, Huggy needs to **learn to rotate the joint motors of each of his legs correctly so he can move**. ### The Reward Function The reward function is designed so that **Huggy will fulfill his goal** : fetch the stick. Remember that one of the foundations of Reinforcement Learning is the *reward hypothesis*: a goal can be described as the **maximization of the expected cumulative reward**. Here, our goal is that Huggy **goes towards the stick but without spinning too much**. Hence, our reward function must translate this goal. Our reward function: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/reward.jpg" alt="Huggy reward function" width="100%"> - *Orientation bonus*: we **reward him for getting close to the target**. - *Time penalty*: a fixed-time penalty given at every action to **force him to get to the stick as fast as possible**. - *Rotation penalty*: we penalize Huggy if **he spins too much and turns too quickly**. - *Getting to the target reward*: we reward Huggy for **reaching the target**. ## Check the Huggy config file - In ML-Agents, you define the **training hyperparameters in config.yaml files.** - For the scope of this notebook, we're not going to modify the hyperparameters, but if you want to try as an experiment, Unity provides very [good documentation explaining each of them here](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Training-Configuration-File.md). - We need to create a config file for Huggy. - Go to `/content/ml-agents/config/ppo` - Create a new file called `Huggy.yaml` - Copy and paste the content below 🔽 ``` behaviors: Huggy: trainer_type: ppo hyperparameters: batch_size: 2048 buffer_size: 20480 learning_rate: 0.0003 beta: 0.005 epsilon: 0.2 lambd: 0.95 num_epoch: 3 learning_rate_schedule: linear network_settings: normalize: true hidden_units: 512 num_layers: 3 vis_encode_type: simple reward_signals: extrinsic: gamma: 0.995 strength: 1.0 checkpoint_interval: 200000 keep_checkpoints: 15 max_steps: 2e6 time_horizon: 1000 summary_freq: 50000 ``` - Don't forget to save the file! - **In the case you want to modify the hyperparameters**, in Google Colab notebook, you can click here to open the config.yaml: `/content/ml-agents/config/ppo/Huggy.yaml` We’re now ready to train our agent 🔥. ## Train our agent To train our agent, we just need to **launch mlagents-learn and select the executable containing the environment.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/mllearn.png" alt="ml learn function" width="100%"> With ML Agents, we run a training script. We define four parameters: 1. `mlagents-learn <config>`: the path where the hyperparameter config file is. 2. `--env`: where the environment executable is. 3. `--run_id`: the name you want to give to your training run id. 4. `--no-graphics`: to not launch the visualization during the training. Train the model and use the `--resume` flag to continue training in case of interruption. > It will fail first time when you use `--resume`, try running the block again to bypass the error. The training will take 30 to 45min depending on your machine (don't forget to **set up a GPU**), go take a ☕️ you deserve it 🤗. ```bash mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id="Huggy" --no-graphics ``` ## Push the agent to the 🤗 Hub - Now that we trained our agent, we’re **ready to push it to the Hub to be able to play with Huggy on your browser🔥.** To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and then get your token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> - Copy the token - Run the cell below and paste the token ```python from huggingface_hub import notebook_login notebook_login() ``` If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` Then, we simply need to run `mlagents-push-to-hf`. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/mlpush.png" alt="ml learn function" width="100%"> And we define 4 parameters: 1. `--run-id`: the name of the training run id. 2. `--local-dir`: where the agent was saved, it’s results/<run_id name>, so in my case results/First Training. 3. `--repo-id`: the name of the Hugging Face repo you want to create or update. It’s always <your huggingface username>/<the repo name> If the repo does not exist **it will be created automatically** 4. `--commit-message`: since HF repos are git repositories you need to give a commit message. ```bash mlagents-push-to-hf --run-id="HuggyTraining" --local-dir="./results/Huggy" --repo-id="ThomasSimonini/ppo-Huggy" --commit-message="Huggy" ``` If everything worked you should see this at the end of the process (but with a different url 😆) : ``` Your model is pushed to the hub. You can view your model here: https://huggingface.co/ThomasSimonini/ppo-Huggy ``` It’s the link to your model repository. The repository contains a model card that explains how to use the model, your Tensorboard logs and your config file. **What’s awesome is that it’s a git repository, which means you can have different commits, update your repository with a new push, open Pull Requests, etc.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/modelcard.png" alt="ml learn function" width="100%"> But now comes the best part: **being able to play with Huggy online 👀.** ## Play with your Huggy 🐕 This step is the simplest: - Open the Huggy game in your browser: https://huggingface.co/spaces/ThomasSimonini/Huggy - Click on Play with my Huggy model <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/load-huggy.jpg" alt="load-huggy" width="100%"> 1. In step 1, choose your model repository, which is the model id (in my case ThomasSimonini/ppo-Huggy). 2. In step 2, **choose which model you want to replay**: - I have multiple ones, since we saved a model every 500000 timesteps. - But since I want the most recent one, I choose `Huggy.onnx` 👉 It's good **to try with different models steps to see the improvement of the agent.** Congrats on finishing this bonus unit! You can now sit and enjoy playing with your Huggy 🐶. And don't **forget to spread the love by sharing Huggy with your friends 🤗**. And if you share about it on social media, **please tag us @huggingface and me @simoninithomas** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy-cover.jpeg" alt="Huggy cover" width="100%"> ## Keep Learning, Stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus1/how-huggy-works.mdx
# How Huggy works [[how-huggy-works]] Huggy is a Deep Reinforcement Learning environment made by Hugging Face and based on [Puppo the Corgi, a project by the Unity MLAgents team](https://blog.unity.com/technology/puppo-the-corgi-cuteness-overload-with-the-unity-ml-agents-toolkit). This environment was created using the [Unity game engine](https://unity.com/) and [MLAgents](https://github.com/Unity-Technologies/ml-agents). ML-Agents is a toolkit for the game engine from Unity that allows us to **create environments using Unity or use pre-made environments to train our agents**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy.jpg" alt="Huggy" width="100%"> In this environment we aim to train Huggy to **fetch the stick we throw. This means he needs to move correctly toward the stick**. ## The State Space, what Huggy perceives. [[state-space]] Huggy doesn't "see" his environment. Instead, we provide him information about the environment: - The target (stick) position - The relative position between himself and the target - The orientation of his legs. Given all this information, Huggy can **use his policy to determine which action to take next to fulfill his goal**. ## The Action Space, what moves Huggy can perform [[action-space]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy-action.jpg" alt="Huggy action" width="100%"> **Joint motors drive Huggy's legs**. This means that to get the target, Huggy needs to **learn to rotate the joint motors of each of his legs correctly so he can move**. ## The Reward Function [[reward-function]] The reward function is designed so that **Huggy will fulfill his goal**: fetch the stick. Remember that one of the foundations of Reinforcement Learning is the *reward hypothesis*: a goal can be described as the **maximization of the expected cumulative reward**. Here, our goal is that Huggy **goes towards the stick but without spinning too much**. Hence, our reward function must translate this goal. Our reward function: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/reward.jpg" alt="Huggy reward function" width="100%"> - *Orientation bonus*: we **reward him for getting close to the target**. - *Time penalty*: a fixed-time penalty given at every action to **force him to get to the stick as fast as possible**. - *Rotation penalty*: we penalize Huggy if **he spins too much and turns too quickly**. - *Getting to the target reward*: we reward Huggy for **reaching the target**. If you want to see what this reward function looks like mathematically, check [Puppo the Corgi presentation](https://blog.unity.com/technology/puppo-the-corgi-cuteness-overload-with-the-unity-ml-agents-toolkit). ## Train Huggy Huggy aims **to learn to run correctly and as fast as possible toward the goal**. To do that, at every step and given the environment observation, he needs to decide how to rotate each joint motor of his legs to move correctly (not spinning too much) and towards the goal. The training loop looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy-loop.jpg" alt="Huggy loop" width="100%"> The training environment looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/training-env.jpg" alt="Huggy training env" width="100%"> It's a place where a **stick is spawned randomly**. When Huggy reaches it, the stick get spawned somewhere else. We built **multiple copies of the environment for the training**. This helps speed up the training by providing more diverse experiences. Now that you have the big picture of the environment, you're ready to train Huggy to fetch the stick. To do that, we're going to use [MLAgents](https://github.com/Unity-Technologies/ml-agents). Don't worry if you have never used it before. In this unit we'll use Google Colab to train Huggy, and then you'll be able to load your trained Huggy and play with him directly in the browser. In a future unit, we will study MLAgents more in-depth and see how it works. But for now, we keep things simple by just using the provided implementation.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus1/conclusion.mdx
# Conclusion [[conclusion]] Congrats on finishing this bonus unit! You can now sit and enjoy playing with your Huggy 🐶. And don't **forget to spread the love by sharing Huggy with your friends 🤗**. And if you share about it on social media, **please tag us @huggingface and me @simoninithomas** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy-cover.jpeg" alt="Huggy cover" width="100%"> Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/intuition-behind-ppo.mdx
# The intuition behind PPO [[the-intuition-behind-ppo]] The idea with Proximal Policy Optimization (PPO) is that we want to improve the training stability of the policy by limiting the change you make to the policy at each training epoch: **we want to avoid having too large of a policy update.** For two reasons: - We know empirically that smaller policy updates during training are **more likely to converge to an optimal solution.** - A too-big step in a policy update can result in falling “off the cliff” (getting a bad policy) **and taking a long time or even having no possibility to recover.** <figure class="image table text-center m-0 w-full"> <img class="center" src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/cliff.jpg" alt="Policy Update cliff"/> <figcaption>Taking smaller policy updates to improve the training stability</figcaption> <figcaption>Modified version from RL — Proximal Policy Optimization (PPO) <a href="https://jonathan-hui.medium.com/rl-proximal-policy-optimization-ppo-explained-77f014ec3f12">Explained by Jonathan Hui</a></figcaption> </figure> **So with PPO, we update the policy conservatively**. To do so, we need to measure how much the current policy changed compared to the former one using a ratio calculation between the current and former policy. And we clip this ratio in a range \\( [1 - \epsilon, 1 + \epsilon] \\), meaning that we **remove the incentive for the current policy to go too far from the old one (hence the proximal policy term).**
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/conclusion-sf.mdx
# Conclusion That's all for today. Congrats on finishing this Unit and the tutorial! ⭐️ Now that you've successfully trained your Doom agent, why not try deathmatch? Remember, that's a much more complex level than the one you've just trained, **but it's a nice experiment and I advise you to try it.** If you do it, don't hesitate to share your model in the `#rl-i-made-this` channel in our [discord server](https://www.hf.co/join/discord). This concludes the last unit, but we are not finished yet! 🤗 The following **bonus unit includes some of the most interesting, advanced, and cutting edge work in Deep Reinforcement Learning**. See you next time 🔥 ## Keep Learning, Stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/hands-on-cleanrl.mdx
# Hands-on <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit8/unit8_part1.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> Now that we studied the theory behind PPO, the best way to understand how it works **is to implement it from scratch.** Implementing an architecture from scratch is the best way to understand it, and it's a good habit. We have already done it for a value-based method with Q-Learning and a Policy-based method with Reinforce. So, to be able to code it, we're going to use two resources: - A tutorial made by [Costa Huang](https://github.com/vwxyzjn). Costa is behind [CleanRL](https://github.com/vwxyzjn/cleanrl), a Deep Reinforcement Learning library that provides high-quality single-file implementation with research-friendly features. - In addition to the tutorial, to go deeper, you can read the 13 core implementation details: [https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/](https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/) Then, to test its robustness, we're going to train it in: - [LunarLander-v2](https://www.gymlibrary.ml/environments/box2d/lunar_lander/) <figure class="image table text-center m-0 w-full"> <video alt="LunarLander" style="max-width: 70%; margin: auto;" autoplay loop autobuffer muted playsinline > <source src="assets/63_deep_rl_intro/lunarlander.mp4" type="video/mp4"> </video> </figure> And finally, we will push the trained model to the Hub to evaluate and visualize your agent playing. LunarLander-v2 is the first environment you used when you started this course. At that time, you didn't know how it worked, and now you can code it from scratch and train it. **How incredible is that 🤩.** <iframe src="https://giphy.com/embed/pynZagVcYxVUk" width="480" height="480" frameBorder="0" class="giphy-embed" allowFullScreen></iframe><p><a href="https://giphy.com/gifs/the-office-michael-heartbreak-pynZagVcYxVUk">via GIPHY</a></p> Let's get started! 🚀 The colab notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit8/unit8_part1.ipynb) # Unit 8: Proximal Policy Gradient (PPO) with PyTorch 🤖 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/thumbnail.png" alt="Unit 8"/> In this notebook, you'll learn to **code your PPO agent from scratch with PyTorch using CleanRL implementation as model**. To test its robustness, we're going to train it in: - [LunarLander-v2 🚀](https://www.gymlibrary.dev/environments/box2d/lunar_lander/) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook 🏆 At the end of the notebook, you will: - Be able to **code your PPO agent from scratch using PyTorch**. - Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. ## Prerequisites 🏗️ Before diving into the notebook, you need to: 🔲 📚 Study [PPO by reading Unit 8](https://huggingface.co/deep-rl-course/unit8/introduction) 🤗 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process), you need to push one model, we don't ask for a minimal result but we **advise you to try different hyperparameters settings to get better results**. If you don't find your model, **go to the bottom of the page and click on the refresh button** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> ## Create a virtual display 🔽 During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). Hence the following cell will install the librairies and create and run a virtual screen 🖥 ```python apt install python-opengl apt install ffmpeg apt install xvfb pip install pyglet==1.5 pip install pyvirtualdisplay ``` ```python # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() ``` ## Install dependencies 🔽 For this exercise, we use `gym==0.21` because the video was recorded with Gym. ```python pip install gym==0.22 pip install imageio-ffmpeg pip install huggingface_hub pip install gym[box2d]==0.22 ``` ## Let's code PPO from scratch with Costa Huang's tutorial - For the core implementation of PPO we're going to use the excellent [Costa Huang](https://costa.sh/) tutorial. - In addition to the tutorial, to go deeper you can read the 37 core implementation details: https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/ 👉 The video tutorial: https://youtu.be/MEt6rrxH8W4 ```python from IPython.display import HTML HTML( '<iframe width="560" height="315" src="https://www.youtube.com/embed/MEt6rrxH8W4" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>' ) ``` ## Add the Hugging Face Integration 🤗 - In order to push our model to the Hub, we need to define a function `package_to_hub` - Add dependencies we need to push our model to the Hub ```python from huggingface_hub import HfApi, upload_folder from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import tempfile import json import shutil import imageio from wasabi import Printer msg = Printer() ``` - Add new argument in `parse_args()` function to define the repo-id where we want to push the model. ```python # Adding HuggingFace argument parser.add_argument( "--repo-id", type=str, default="ThomasSimonini/ppo-CartPole-v1", help="id of the model repository from the Hugging Face Hub {username/repo_name}", ) ``` - Next, we add the methods needed to push the model to the Hub - These methods will: - `_evalutate_agent()`: evaluate the agent. - `_generate_model_card()`: generate the model card of your agent. - `_record_video()`: record a video of your agent. ```python def package_to_hub( repo_id, model, hyperparameters, eval_env, video_fps=30, commit_message="Push agent to the Hub", token=None, logs=None, ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the hub :param repo_id: id of the model repository from the Hugging Face Hub :param model: trained model :param eval_env: environment used to evaluate the agent :param fps: number of fps for rendering the video :param commit_message: commit message :param logs: directory on local machine of tensorboard logs you'd like to upload """ msg.info( "This function will save, evaluate, generate a video of your agent, " "create a model card and push everything to the hub. " "It might take up to 1min. \n " "This is a work in progress: if you encounter a bug, please open an issue." ) # Step 1: Clone or create the repo repo_url = HfApi().create_repo( repo_id=repo_id, token=token, private=False, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = Path(tmpdirname) # Step 2: Save the model torch.save(model.state_dict(), tmpdirname / "model.pt") # Step 3: Evaluate the model and build JSON mean_reward, std_reward = _evaluate_agent(eval_env, 10, model) # First get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters.env_id, "mean_reward": mean_reward, "std_reward": std_reward, "n_evaluation_episodes": 10, "eval_datetime": eval_form_datetime, } # Write a JSON file with open(tmpdirname / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 4: Generate a video video_path = tmpdirname / "replay.mp4" record_video(eval_env, model, video_path, video_fps) # Step 5: Generate the model card generated_model_card, metadata = _generate_model_card( "PPO", hyperparameters.env_id, mean_reward, std_reward, hyperparameters ) _save_model_card(tmpdirname, generated_model_card, metadata) # Step 6: Add logs if needed if logs: _add_logdir(tmpdirname, Path(logs)) msg.info(f"Pushing repo {repo_id} to the Hugging Face Hub") repo_url = upload_folder( repo_id=repo_id, folder_path=tmpdirname, path_in_repo="", commit_message=commit_message, token=token, ) msg.info(f"Your model is pushed to the Hub. You can view your model here: {repo_url}") return repo_url def _evaluate_agent(env, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 while done is False: state = torch.Tensor(state).to(device) action, _, _, _ = policy.get_action_and_value(state) new_state, reward, done, info = env.step(action.cpu().numpy()) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward def record_video(env, policy, out_directory, fps=30): images = [] done = False state = env.reset() img = env.render(mode="rgb_array") images.append(img) while not done: state = torch.Tensor(state).to(device) # Take the action (index) that have the maximum expected future reward given that state action, _, _, _ = policy.get_action_and_value(state) state, reward, done, info = env.step( action.cpu().numpy() ) # We directly put next_state = state for recording logic img = env.render(mode="rgb_array") images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def _generate_model_card(model_name, env_id, mean_reward, std_reward, hyperparameters): """ Generate the model card for the Hub :param model_name: name of the model :env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent :hyperparameters: training arguments """ # Step 1: Select the tags metadata = generate_metadata(model_name, env_id, mean_reward, std_reward) # Transform the hyperparams namespace to string converted_dict = vars(hyperparameters) converted_str = str(converted_dict) converted_str = converted_str.split(", ") converted_str = "\n".join(converted_str) # Step 2: Generate the model card model_card = f""" # PPO Agent Playing {env_id} This is a trained model of a PPO agent playing {env_id}. # Hyperparameters """ return model_card, metadata def generate_metadata(model_name, env_id, mean_reward, std_reward): """ Define the tags for the model card :param model_name: name of the model :param env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent """ metadata = {} metadata["tags"] = [ env_id, "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course", ] # Add metrics eval = metadata_eval_result( model_pretty_name=model_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_id, dataset_id=env_id, ) # Merges both dictionaries metadata = {**metadata, **eval} return metadata def _save_model_card(local_path, generated_model_card, metadata): """Saves a model card for the repository. :param local_path: repository directory :param generated_model_card: model card generated by _generate_model_card() :param metadata: metadata """ readme_path = local_path / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = generated_model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) def _add_logdir(local_path: Path, logdir: Path): """Adds a logdir to the repository. :param local_path: repository directory :param logdir: logdir directory """ if logdir.exists() and logdir.is_dir(): # Add the logdir to the repository under new dir called logs repo_logdir = local_path / "logs" # Delete current logs if they exist if repo_logdir.exists(): shutil.rmtree(repo_logdir) # Copy logdir into repo logdir shutil.copytree(logdir, repo_logdir) ``` - Finally, we call this function at the end of the PPO training ```python # Create the evaluation environment eval_env = gym.make(args.env_id) package_to_hub( repo_id=args.repo_id, model=agent, # The model we want to save hyperparameters=args, eval_env=gym.make(args.env_id), logs=f"runs/{run_name}", ) ``` - Here's what the final ppo.py file looks like: ```python # docs and experiment results can be found at https://docs.cleanrl.dev/rl-algorithms/ppo/#ppopy import argparse import os import random import time from distutils.util import strtobool import gym import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.distributions.categorical import Categorical from torch.utils.tensorboard import SummaryWriter from huggingface_hub import HfApi, upload_folder from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import tempfile import json import shutil import imageio from wasabi import Printer msg = Printer() def parse_args(): # fmt: off parser = argparse.ArgumentParser() parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"), help="the name of this experiment") parser.add_argument("--seed", type=int, default=1, help="seed of the experiment") parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="if toggled, `torch.backends.cudnn.deterministic=False`") parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="if toggled, cuda will be enabled by default") parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="if toggled, this experiment will be tracked with Weights and Biases") parser.add_argument("--wandb-project-name", type=str, default="cleanRL", help="the wandb's project name") parser.add_argument("--wandb-entity", type=str, default=None, help="the entity (team) of wandb's project") parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="weather to capture videos of the agent performances (check out `videos` folder)") # Algorithm specific arguments parser.add_argument("--env-id", type=str, default="CartPole-v1", help="the id of the environment") parser.add_argument("--total-timesteps", type=int, default=50000, help="total timesteps of the experiments") parser.add_argument("--learning-rate", type=float, default=2.5e-4, help="the learning rate of the optimizer") parser.add_argument("--num-envs", type=int, default=4, help="the number of parallel game environments") parser.add_argument("--num-steps", type=int, default=128, help="the number of steps to run in each environment per policy rollout") parser.add_argument("--anneal-lr", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggle learning rate annealing for policy and value networks") parser.add_argument("--gae", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Use GAE for advantage computation") parser.add_argument("--gamma", type=float, default=0.99, help="the discount factor gamma") parser.add_argument("--gae-lambda", type=float, default=0.95, help="the lambda for the general advantage estimation") parser.add_argument("--num-minibatches", type=int, default=4, help="the number of mini-batches") parser.add_argument("--update-epochs", type=int, default=4, help="the K epochs to update the policy") parser.add_argument("--norm-adv", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggles advantages normalization") parser.add_argument("--clip-coef", type=float, default=0.2, help="the surrogate clipping coefficient") parser.add_argument("--clip-vloss", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggles whether or not to use a clipped loss for the value function, as per the paper.") parser.add_argument("--ent-coef", type=float, default=0.01, help="coefficient of the entropy") parser.add_argument("--vf-coef", type=float, default=0.5, help="coefficient of the value function") parser.add_argument("--max-grad-norm", type=float, default=0.5, help="the maximum norm for the gradient clipping") parser.add_argument("--target-kl", type=float, default=None, help="the target KL divergence threshold") # Adding HuggingFace argument parser.add_argument("--repo-id", type=str, default="ThomasSimonini/ppo-CartPole-v1", help="id of the model repository from the Hugging Face Hub {username/repo_name}") args = parser.parse_args() args.batch_size = int(args.num_envs * args.num_steps) args.minibatch_size = int(args.batch_size // args.num_minibatches) # fmt: on return args def package_to_hub( repo_id, model, hyperparameters, eval_env, video_fps=30, commit_message="Push agent to the Hub", token=None, logs=None, ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the hub :param repo_id: id of the model repository from the Hugging Face Hub :param model: trained model :param eval_env: environment used to evaluate the agent :param fps: number of fps for rendering the video :param commit_message: commit message :param logs: directory on local machine of tensorboard logs you'd like to upload """ msg.info( "This function will save, evaluate, generate a video of your agent, " "create a model card and push everything to the hub. " "It might take up to 1min. \n " "This is a work in progress: if you encounter a bug, please open an issue." ) # Step 1: Clone or create the repo repo_url = HfApi().create_repo( repo_id=repo_id, token=token, private=False, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = Path(tmpdirname) # Step 2: Save the model torch.save(model.state_dict(), tmpdirname / "model.pt") # Step 3: Evaluate the model and build JSON mean_reward, std_reward = _evaluate_agent(eval_env, 10, model) # First get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters.env_id, "mean_reward": mean_reward, "std_reward": std_reward, "n_evaluation_episodes": 10, "eval_datetime": eval_form_datetime, } # Write a JSON file with open(tmpdirname / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 4: Generate a video video_path = tmpdirname / "replay.mp4" record_video(eval_env, model, video_path, video_fps) # Step 5: Generate the model card generated_model_card, metadata = _generate_model_card( "PPO", hyperparameters.env_id, mean_reward, std_reward, hyperparameters ) _save_model_card(tmpdirname, generated_model_card, metadata) # Step 6: Add logs if needed if logs: _add_logdir(tmpdirname, Path(logs)) msg.info(f"Pushing repo {repo_id} to the Hugging Face Hub") repo_url = upload_folder( repo_id=repo_id, folder_path=tmpdirname, path_in_repo="", commit_message=commit_message, token=token, ) msg.info(f"Your model is pushed to the Hub. You can view your model here: {repo_url}") return repo_url def _evaluate_agent(env, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 while done is False: state = torch.Tensor(state).to(device) action, _, _, _ = policy.get_action_and_value(state) new_state, reward, done, info = env.step(action.cpu().numpy()) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward def record_video(env, policy, out_directory, fps=30): images = [] done = False state = env.reset() img = env.render(mode="rgb_array") images.append(img) while not done: state = torch.Tensor(state).to(device) # Take the action (index) that have the maximum expected future reward given that state action, _, _, _ = policy.get_action_and_value(state) state, reward, done, info = env.step( action.cpu().numpy() ) # We directly put next_state = state for recording logic img = env.render(mode="rgb_array") images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def _generate_model_card(model_name, env_id, mean_reward, std_reward, hyperparameters): """ Generate the model card for the Hub :param model_name: name of the model :env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent :hyperparameters: training arguments """ # Step 1: Select the tags metadata = generate_metadata(model_name, env_id, mean_reward, std_reward) # Transform the hyperparams namespace to string converted_dict = vars(hyperparameters) converted_str = str(converted_dict) converted_str = converted_str.split(", ") converted_str = "\n".join(converted_str) # Step 2: Generate the model card model_card = f""" # PPO Agent Playing {env_id} This is a trained model of a PPO agent playing {env_id}. # Hyperparameters """ return model_card, metadata def generate_metadata(model_name, env_id, mean_reward, std_reward): """ Define the tags for the model card :param model_name: name of the model :param env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent """ metadata = {} metadata["tags"] = [ env_id, "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course", ] # Add metrics eval = metadata_eval_result( model_pretty_name=model_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_id, dataset_id=env_id, ) # Merges both dictionaries metadata = {**metadata, **eval} return metadata def _save_model_card(local_path, generated_model_card, metadata): """Saves a model card for the repository. :param local_path: repository directory :param generated_model_card: model card generated by _generate_model_card() :param metadata: metadata """ readme_path = local_path / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = generated_model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) def _add_logdir(local_path: Path, logdir: Path): """Adds a logdir to the repository. :param local_path: repository directory :param logdir: logdir directory """ if logdir.exists() and logdir.is_dir(): # Add the logdir to the repository under new dir called logs repo_logdir = local_path / "logs" # Delete current logs if they exist if repo_logdir.exists(): shutil.rmtree(repo_logdir) # Copy logdir into repo logdir shutil.copytree(logdir, repo_logdir) def make_env(env_id, seed, idx, capture_video, run_name): def thunk(): env = gym.make(env_id) env = gym.wrappers.RecordEpisodeStatistics(env) if capture_video: if idx == 0: env = gym.wrappers.RecordVideo(env, f"videos/{run_name}") env.seed(seed) env.action_space.seed(seed) env.observation_space.seed(seed) return env return thunk def layer_init(layer, std=np.sqrt(2), bias_const=0.0): torch.nn.init.orthogonal_(layer.weight, std) torch.nn.init.constant_(layer.bias, bias_const) return layer class Agent(nn.Module): def __init__(self, envs): super().__init__() self.critic = nn.Sequential( layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 64)), nn.Tanh(), layer_init(nn.Linear(64, 64)), nn.Tanh(), layer_init(nn.Linear(64, 1), std=1.0), ) self.actor = nn.Sequential( layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 64)), nn.Tanh(), layer_init(nn.Linear(64, 64)), nn.Tanh(), layer_init(nn.Linear(64, envs.single_action_space.n), std=0.01), ) def get_value(self, x): return self.critic(x) def get_action_and_value(self, x, action=None): logits = self.actor(x) probs = Categorical(logits=logits) if action is None: action = probs.sample() return action, probs.log_prob(action), probs.entropy(), self.critic(x) if __name__ == "__main__": args = parse_args() run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}" if args.track: import wandb wandb.init( project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=run_name, monitor_gym=True, save_code=True, ) writer = SummaryWriter(f"runs/{run_name}") writer.add_text( "hyperparameters", "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])), ) # TRY NOT TO MODIFY: seeding random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = args.torch_deterministic device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") # env setup envs = gym.vector.SyncVectorEnv( [make_env(args.env_id, args.seed + i, i, args.capture_video, run_name) for i in range(args.num_envs)] ) assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported" agent = Agent(envs).to(device) optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5) # ALGO Logic: Storage setup obs = torch.zeros((args.num_steps, args.num_envs) + envs.single_observation_space.shape).to(device) actions = torch.zeros((args.num_steps, args.num_envs) + envs.single_action_space.shape).to(device) logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device) rewards = torch.zeros((args.num_steps, args.num_envs)).to(device) dones = torch.zeros((args.num_steps, args.num_envs)).to(device) values = torch.zeros((args.num_steps, args.num_envs)).to(device) # TRY NOT TO MODIFY: start the game global_step = 0 start_time = time.time() next_obs = torch.Tensor(envs.reset()).to(device) next_done = torch.zeros(args.num_envs).to(device) num_updates = args.total_timesteps // args.batch_size for update in range(1, num_updates + 1): # Annealing the rate if instructed to do so. if args.anneal_lr: frac = 1.0 - (update - 1.0) / num_updates lrnow = frac * args.learning_rate optimizer.param_groups[0]["lr"] = lrnow for step in range(0, args.num_steps): global_step += 1 * args.num_envs obs[step] = next_obs dones[step] = next_done # ALGO LOGIC: action logic with torch.no_grad(): action, logprob, _, value = agent.get_action_and_value(next_obs) values[step] = value.flatten() actions[step] = action logprobs[step] = logprob # TRY NOT TO MODIFY: execute the game and log data. next_obs, reward, done, info = envs.step(action.cpu().numpy()) rewards[step] = torch.tensor(reward).to(device).view(-1) next_obs, next_done = torch.Tensor(next_obs).to(device), torch.Tensor(done).to(device) for item in info: if "episode" in item.keys(): print(f"global_step={global_step}, episodic_return={item['episode']['r']}") writer.add_scalar("charts/episodic_return", item["episode"]["r"], global_step) writer.add_scalar("charts/episodic_length", item["episode"]["l"], global_step) break # bootstrap value if not done with torch.no_grad(): next_value = agent.get_value(next_obs).reshape(1, -1) if args.gae: advantages = torch.zeros_like(rewards).to(device) lastgaelam = 0 for t in reversed(range(args.num_steps)): if t == args.num_steps - 1: nextnonterminal = 1.0 - next_done nextvalues = next_value else: nextnonterminal = 1.0 - dones[t + 1] nextvalues = values[t + 1] delta = rewards[t] + args.gamma * nextvalues * nextnonterminal - values[t] advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam returns = advantages + values else: returns = torch.zeros_like(rewards).to(device) for t in reversed(range(args.num_steps)): if t == args.num_steps - 1: nextnonterminal = 1.0 - next_done next_return = next_value else: nextnonterminal = 1.0 - dones[t + 1] next_return = returns[t + 1] returns[t] = rewards[t] + args.gamma * nextnonterminal * next_return advantages = returns - values # flatten the batch b_obs = obs.reshape((-1,) + envs.single_observation_space.shape) b_logprobs = logprobs.reshape(-1) b_actions = actions.reshape((-1,) + envs.single_action_space.shape) b_advantages = advantages.reshape(-1) b_returns = returns.reshape(-1) b_values = values.reshape(-1) # Optimizing the policy and value network b_inds = np.arange(args.batch_size) clipfracs = [] for epoch in range(args.update_epochs): np.random.shuffle(b_inds) for start in range(0, args.batch_size, args.minibatch_size): end = start + args.minibatch_size mb_inds = b_inds[start:end] _, newlogprob, entropy, newvalue = agent.get_action_and_value( b_obs[mb_inds], b_actions.long()[mb_inds] ) logratio = newlogprob - b_logprobs[mb_inds] ratio = logratio.exp() with torch.no_grad(): # calculate approx_kl http://joschu.net/blog/kl-approx.html old_approx_kl = (-logratio).mean() approx_kl = ((ratio - 1) - logratio).mean() clipfracs += [((ratio - 1.0).abs() > args.clip_coef).float().mean().item()] mb_advantages = b_advantages[mb_inds] if args.norm_adv: mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8) # Policy loss pg_loss1 = -mb_advantages * ratio pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef) pg_loss = torch.max(pg_loss1, pg_loss2).mean() # Value loss newvalue = newvalue.view(-1) if args.clip_vloss: v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2 v_clipped = b_values[mb_inds] + torch.clamp( newvalue - b_values[mb_inds], -args.clip_coef, args.clip_coef, ) v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2 v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped) v_loss = 0.5 * v_loss_max.mean() else: v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean() entropy_loss = entropy.mean() loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm) optimizer.step() if args.target_kl is not None: if approx_kl > args.target_kl: break y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy() var_y = np.var(y_true) explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y # TRY NOT TO MODIFY: record rewards for plotting purposes writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], global_step) writer.add_scalar("losses/value_loss", v_loss.item(), global_step) writer.add_scalar("losses/policy_loss", pg_loss.item(), global_step) writer.add_scalar("losses/entropy", entropy_loss.item(), global_step) writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), global_step) writer.add_scalar("losses/approx_kl", approx_kl.item(), global_step) writer.add_scalar("losses/clipfrac", np.mean(clipfracs), global_step) writer.add_scalar("losses/explained_variance", explained_var, global_step) print("SPS:", int(global_step / (time.time() - start_time))) writer.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step) envs.close() writer.close() # Create the evaluation environment eval_env = gym.make(args.env_id) package_to_hub( repo_id=args.repo_id, model=agent, # The model we want to save hyperparameters=args, eval_env=gym.make(args.env_id), logs=f"runs/{run_name}", ) ``` To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and get your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> - Copy the token - Run the cell below and paste the token ```python from huggingface_hub import notebook_login notebook_login() !git config --global credential.helper store ``` If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` ## Let's start the training 🔥 ⚠️ ⚠️ ⚠️ Don't use **the same repo id with the one you used for the Unit 1** - Now that you've coded PPO from scratch and added the Hugging Face Integration, we're ready to start the training 🔥 - First, you need to copy all your code to a file you create called `ppo.py` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/step1.png" alt="PPO"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/step2.png" alt="PPO"/> - Now we just need to run this python script using `python <name-of-python-script>.py` with the additional parameters we defined using `argparse` - You should modify more hyperparameters otherwise the training will not be super stable. ```python !python ppo.py --env-id="LunarLander-v2" --repo-id="YOUR_REPO_ID" --total-timesteps=50000 ``` ## Some additional challenges 🏆 The best way to learn **is to try things on your own**! Why not try another environment? Or why not trying to modify the implementation to work with Gymnasium? See you in Unit 8, part 2 where we're going to train agents to play Doom 🔥 ## Keep learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/introduction.mdx
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/thumbnail.png" alt="Unit 8"/> In Unit 6, we learned about Advantage Actor Critic (A2C), a hybrid architecture combining value-based and policy-based methods that helps to stabilize the training by reducing the variance with: - *An Actor* that controls **how our agent behaves** (policy-based method). - *A Critic* that measures **how good the action taken is** (value-based method). Today we'll learn about Proximal Policy Optimization (PPO), an architecture that **improves our agent's training stability by avoiding policy updates that are too large**. To do that, we use a ratio that indicates the difference between our current and old policy and clip this ratio to a specific range \\( [1 - \epsilon, 1 + \epsilon] \\) . Doing this will ensure **that our policy update will not be too large and that the training is more stable.** This Unit is in two parts: - In this first part, you'll learn the theory behind PPO and code your PPO agent from scratch using the [CleanRL](https://github.com/vwxyzjn/cleanrl) implementation. To test its robustness you'll use LunarLander-v2. LunarLander-v2 **is the first environment you used when you started this course**. At that time, you didn't know how PPO worked, and now, **you can code it from scratch and train it. How incredible is that 🤩**. - In the second part, we'll get deeper into PPO optimization by using [Sample-Factory](https://samplefactory.dev/) and train an agent playing vizdoom (an open source version of Doom). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/environments.png" alt="Environment"/> <figcaption>These are the environments you're going to use to train your agents: VizDoom environments</figcaption> </figure> Sound exciting? Let's get started! 🚀
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/introduction-sf.mdx
# Introduction to PPO with Sample-Factory <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/thumbnail2.png" alt="thumbnail"/> In this second part of Unit 8, we'll get deeper into PPO optimization by using [Sample-Factory](https://samplefactory.dev/), an **asynchronous implementation of the PPO algorithm**, to train our agent to play [vizdoom](https://vizdoom.cs.put.edu.pl/) (an open source version of Doom). In the notebook, **you'll train your agent to play the Health Gathering level**, where the agent must collect health packs to avoid dying. After that, you can **train your agent to play more complex levels, such as Deathmatch**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/environments.png" alt="Environment"/> Sound exciting? Let's get started! 🚀 The hands-on is made by [Edward Beeching](https://twitter.com/edwardbeeching), a Machine Learning Research Scientist at Hugging Face. He worked on Godot Reinforcement Learning Agents, an open-source interface for developing environments and agents in the Godot Game Engine.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/hands-on-sf.mdx
# Hands-on: advanced Deep Reinforcement Learning. Using Sample Factory to play Doom from pixels <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit8/unit8_part2.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> The colab notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit8/unit8_part2.ipynb) # Unit 8 Part 2: Advanced Deep Reinforcement Learning. Using Sample Factory to play Doom from pixels <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/thumbnail2.png" alt="Thumbnail"/> In this notebook, we will learn how to train a Deep Neural Network to collect objects in a 3D environment based on the game of Doom, a video of the resulting policy is shown below. We train this policy using [Sample Factory](https://www.samplefactory.dev/), an asynchronous implementation of the PPO algorithm. Please note the following points: * [Sample Factory](https://www.samplefactory.dev/) is an advanced RL framework and **only functions on Linux and Mac** (not Windows). * The framework performs best on a **GPU machine with many CPU cores**, where it can achieve speeds of 100k interactions per second. The resources available on a standard Colab notebook **limit the performance of this library**. So the speed in this setting **does not reflect the real-world performance**. * Benchmarks for Sample Factory are available in a number of settings, check out the [examples](https://github.com/alex-petrenko/sample-factory/tree/master/sf_examples) if you want to find out more. ```python from IPython.display import HTML HTML( """<video width="640" height="480" controls> <source src="https://huggingface.co/edbeeching/doom_health_gathering_supreme_3333/resolve/main/replay.mp4" type="video/mp4">Your browser does not support the video tag.</video>""" ) ``` To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process), you need to push one model: - `doom_health_gathering_supreme` get a result of >= 5. To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward** If you don't find your model, **go to the bottom of the page and click on the refresh button** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> Before starting to train our agent, let's **study the library and environments we're going to use**. ## Sample Factory [Sample Factory](https://www.samplefactory.dev/) is one of the **fastest RL libraries focused on very efficient synchronous and asynchronous implementations of policy gradients (PPO)**. Sample Factory is thoroughly **tested, used by many researchers and practitioners**, and is actively maintained. Our implementation is known to **reach SOTA performance in a variety of domains while minimizing RL experiment training time and hardware requirements**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/samplefactoryenvs.png" alt="Sample factory"/> ### Key features - Highly optimized algorithm [architecture](https://www.samplefactory.dev/06-architecture/overview/) for maximum learning throughput - [Synchronous and asynchronous](https://www.samplefactory.dev/07-advanced-topics/sync-async/) training regimes - [Serial (single-process) mode](https://www.samplefactory.dev/07-advanced-topics/serial-mode/) for easy debugging - Optimal performance in both CPU-based and [GPU-accelerated environments](https://www.samplefactory.dev/09-environment-integrations/isaacgym/) - Single- & multi-agent training, self-play, supports [training multiple policies](https://www.samplefactory.dev/07-advanced-topics/multi-policy-training/) at once on one or many GPUs - Population-Based Training ([PBT](https://www.samplefactory.dev/07-advanced-topics/pbt/)) - Discrete, continuous, hybrid action spaces - Vector-based, image-based, dictionary observation spaces - Automatically creates a model architecture by parsing action/observation space specification. Supports [custom model architectures](https://www.samplefactory.dev/03-customization/custom-models/) - Designed to be imported into other projects, [custom environments](https://www.samplefactory.dev/03-customization/custom-environments/) are first-class citizens - Detailed [WandB and Tensorboard summaries](https://www.samplefactory.dev/05-monitoring/metrics-reference/), [custom metrics](https://www.samplefactory.dev/05-monitoring/custom-metrics/) - [HuggingFace 🤗 integration](https://www.samplefactory.dev/10-huggingface/huggingface/) (upload trained models and metrics to the Hub) - [Multiple](https://www.samplefactory.dev/09-environment-integrations/mujoco/) [example](https://www.samplefactory.dev/09-environment-integrations/atari/) [environment](https://www.samplefactory.dev/09-environment-integrations/vizdoom/) [integrations](https://www.samplefactory.dev/09-environment-integrations/dmlab/) with tuned parameters and trained models All of the above policies are available on the 🤗 hub. Search for the tag [sample-factory](https://huggingface.co/models?library=sample-factory&sort=downloads) ### How sample-factory works Sample-factory is one of the **most highly optimized RL implementations available to the community**. It works by **spawning multiple processes that run rollout workers, inference workers and a learner worker**. The *workers* **communicate through shared memory, which lowers the communication cost between processes**. The *rollout workers* interact with the environment and send observations to the *inference workers*. The *inferences workers* query a fixed version of the policy and **send actions back to the rollout worker**. After *k* steps the rollout works send a trajectory of experience to the learner worker, **which it uses to update the agent’s policy network**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/samplefactory.png" alt="Sample factory"/> ### Actor Critic models in Sample-factory Actor Critic models in Sample Factory are composed of three components: - **Encoder** - Process input observations (images, vectors) and map them to a vector. This is the part of the model you will most likely want to customize. - **Core** - Intergrate vectors from one or more encoders, can optionally include a single- or multi-layer LSTM/GRU in a memory-based agent. - **Decoder** - Apply additional layers to the output of the model core before computing the policy and value outputs. The library has been designed to automatically support any observation and action spaces. Users can easily add their custom models. You can find out more in the [documentation](https://www.samplefactory.dev/03-customization/custom-models/#actor-critic-models-in-sample-factory). ## ViZDoom [ViZDoom](https://vizdoom.cs.put.edu.pl/) is an **open-source python interface for the Doom Engine**. The library was created in 2016 by Marek Wydmuch, Michal Kempka at the Institute of Computing Science, Poznan University of Technology, Poland. The library enables the **training of agents directly from the screen pixels in a number of scenarios**, including team deathmatch, shown in the video below. Because the ViZDoom environment is based on a game the was created in the 90s, it can be run on modern hardware at accelerated speeds, **allowing us to learn complex AI behaviors fairly quickly**. The library includes feature such as: - Multi-platform (Linux, macOS, Windows), - API for Python and C++, - [OpenAI Gym](https://www.gymlibrary.dev/) environment wrappers - Easy-to-create custom scenarios (visual editors, scripting language, and examples available), - Async and sync single-player and multiplayer modes, - Lightweight (few MBs) and fast (up to 7000 fps in sync mode, single-threaded), - Customizable resolution and rendering parameters, - Access to the depth buffer (3D vision), - Automatic labeling of game objects visible in the frame, - Access to the audio buffer - Access to the list of actors/objects and map geometry, - Off-screen rendering and episode recording, - Time scaling in async mode. ## We first need to install some dependencies that are required for the ViZDoom environment Now that our Colab runtime is set up, we can start by installing the dependencies required to run ViZDoom on linux. If you are following on your machine on Mac, you will want to follow the installation instructions on the [github page](https://github.com/Farama-Foundation/ViZDoom/blob/master/doc/Quickstart.md#-quickstart-for-macos-and-anaconda3-python-36). ```python # Install ViZDoom deps from # https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux apt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \ nasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \ libopenal-dev timidity libwildmidi-dev unzip ffmpeg # Boost libraries apt-get install libboost-all-dev # Lua binding dependencies apt-get install liblua5.1-dev ``` ## Then we can install Sample Factory and ViZDoom - This can take 7min ```bash pip install sample-factory pip install vizdoom ``` ## Setting up the Doom Environment in sample-factory ```python import functools from sample_factory.algo.utils.context import global_model_factory from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args from sample_factory.envs.env_utils import register_env from sample_factory.train import run_rl from sf_examples.vizdoom.doom.doom_model import make_vizdoom_encoder from sf_examples.vizdoom.doom.doom_params import add_doom_env_args, doom_override_defaults from sf_examples.vizdoom.doom.doom_utils import DOOM_ENVS, make_doom_env_from_spec # Registers all the ViZDoom environments def register_vizdoom_envs(): for env_spec in DOOM_ENVS: make_env_func = functools.partial(make_doom_env_from_spec, env_spec) register_env(env_spec.name, make_env_func) # Sample Factory allows the registration of a custom Neural Network architecture # See https://github.com/alex-petrenko/sample-factory/blob/master/sf_examples/vizdoom/doom/doom_model.py for more details def register_vizdoom_models(): global_model_factory().register_encoder_factory(make_vizdoom_encoder) def register_vizdoom_components(): register_vizdoom_envs() register_vizdoom_models() # parse the command line args and create a config def parse_vizdoom_cfg(argv=None, evaluation=False): parser, _ = parse_sf_args(argv=argv, evaluation=evaluation) # parameters specific to Doom envs add_doom_env_args(parser) # override Doom default values for algo parameters doom_override_defaults(parser) # second parsing pass yields the final configuration final_cfg = parse_full_cfg(parser, argv) return final_cfg ``` Now that the setup if complete, we can train the agent. We have chosen here to learn a ViZDoom task called `Health Gathering Supreme`. ### The scenario: Health Gathering Supreme <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/Health-Gathering-Supreme.png" alt="Health-Gathering-Supreme"/> The objective of this scenario is to **teach the agent how to survive without knowing what makes it survive**. The Agent know only that **life is precious** and death is bad so **it must learn what prolongs its existence and that its health is connected with survival**. The map is a rectangle containing walls and with a green, acidic floor which **hurts the player periodically**. Initially there are some medkits spread uniformly over the map. A new medkit falls from the skies every now and then. **Medkits heal some portions of player's health** - to survive, the agent needs to pick them up. The episode finishes after the player's death or on timeout. Further configuration: - Living_reward = 1 - 3 available buttons: turn left, turn right, move forward - 1 available game variable: HEALTH - death penalty = 100 You can find out more about the scenarios available in ViZDoom [here](https://github.com/Farama-Foundation/ViZDoom/tree/master/scenarios). There are also a number of more complex scenarios that have been create for ViZDoom, such as the ones detailed on [this github page](https://github.com/edbeeching/3d_control_deep_rl). ## Training the agent - We're going to train the agent for 4000000 steps. It will take approximately 20min ```python ## Start the training, this should take around 15 minutes register_vizdoom_components() # The scenario we train on today is health gathering # other scenarios include "doom_basic", "doom_two_colors_easy", "doom_dm", "doom_dwango5", "doom_my_way_home", "doom_deadly_corridor", "doom_defend_the_center", "doom_defend_the_line" env = "doom_health_gathering_supreme" cfg = parse_vizdoom_cfg( argv=[f"--env={env}", "--num_workers=8", "--num_envs_per_worker=4", "--train_for_env_steps=4000000"] ) status = run_rl(cfg) ``` ## Let's take a look at the performance of the trained policy and output a video of the agent. ```python from sample_factory.enjoy import enjoy cfg = parse_vizdoom_cfg( argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10"], evaluation=True ) status = enjoy(cfg) ``` ## Now lets visualize the performance of the agent ```python from base64 import b64encode from IPython.display import HTML mp4 = open("/content/train_dir/default_experiment/replay.mp4", "rb").read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML( """ <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url ) ``` The agent has learned something, but its performance could be better. We would clearly need to train for longer. But let's upload this model to the Hub. ## Now lets upload your checkpoint and video to the Hugging Face Hub To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and get your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> - Copy the token - Run the cell below and paste the token If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` ```python from huggingface_hub import notebook_login notebook_login() !git config --global credential.helper store ``` ```python from sample_factory.enjoy import enjoy hf_username = "ThomasSimonini" # insert your HuggingFace username here cfg = parse_vizdoom_cfg( argv=[ f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10", "--max_num_frames=100000", "--push_to_hub", f"--hf_repository={hf_username}/rl_course_vizdoom_health_gathering_supreme", ], evaluation=True, ) status = enjoy(cfg) ``` ## Let's load another model This agent's performance was good, but we can do better! Let's download and visualize an agent trained for 10B timesteps from the hub. ```bash #download the agent from the hub python -m sample_factory.huggingface.load_from_hub -r edbeeching/doom_health_gathering_supreme_2222 -d ./train_dir ``` ```bash ls train_dir/doom_health_gathering_supreme_2222 ``` ```python env = "doom_health_gathering_supreme" cfg = parse_vizdoom_cfg( argv=[ f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10", "--experiment=doom_health_gathering_supreme_2222", "--train_dir=train_dir", ], evaluation=True, ) status = enjoy(cfg) ``` ```python mp4 = open("/content/train_dir/doom_health_gathering_supreme_2222/replay.mp4", "rb").read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML( """ <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url ) ``` ## Some additional challenges 🏆: Doom Deathmatch Training an agent to play a Doom deathmatch **takes many hours on a more beefy machine than is available in Colab**. Fortunately, we have have **already trained an agent in this scenario and it is available in the 🤗 Hub!** Let’s download the model and visualize the agent’s performance. ```python # Download the agent from the hub python -m sample_factory.huggingface.load_from_hub -r edbeeching/doom_deathmatch_bots_2222 -d ./train_dir ``` Given the agent plays for a long time the video generation can take **10 minutes**. ```python from sample_factory.enjoy import enjoy register_vizdoom_components() env = "doom_deathmatch_bots" cfg = parse_vizdoom_cfg( argv=[ f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=1", "--experiment=doom_deathmatch_bots_2222", "--train_dir=train_dir", ], evaluation=True, ) status = enjoy(cfg) mp4 = open("/content/train_dir/doom_deathmatch_bots_2222/replay.mp4", "rb").read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML( """ <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url ) ``` You **can try to train your agent in this environment** using the code above, but not on colab. **Good luck 🤞** If you prefer an easier scenario, **why not try training in another ViZDoom scenario such as `doom_deadly_corridor` or `doom_defend_the_center`.** --- This concludes the last unit. But we are not finished yet! 🤗 The following **bonus section include some of the most interesting, advanced, and cutting edge work in Deep Reinforcement Learning**. ## Keep learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/clipped-surrogate-objective.mdx
# Introducing the Clipped Surrogate Objective Function ## Recap: The Policy Objective Function Let’s remember what the objective is to optimize in Reinforce: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/lpg.jpg" alt="Reinforce"/> The idea was that by taking a gradient ascent step on this function (equivalent to taking gradient descent of the negative of this function), we would **push our agent to take actions that lead to higher rewards and avoid harmful actions.** However, the problem comes from the step size: - Too small, **the training process was too slow** - Too high, **there was too much variability in the training** With PPO, the idea is to constrain our policy update with a new objective function called the *Clipped surrogate objective function* that **will constrain the policy change in a small range using a clip.** This new function **is designed to avoid destructively large weights updates** : <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ppo-surrogate.jpg" alt="PPO surrogate function"/> Let’s study each part to understand how it works. ## The Ratio Function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ratio1.jpg" alt="Ratio"/> This ratio is calculated as follows: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ratio2.jpg" alt="Ratio"/> It’s the probability of taking action \\( a_t \\) at state \\( s_t \\) in the current policy, divided by the same for the previous policy. As we can see, \\( r_t(\theta) \\) denotes the probability ratio between the current and old policy: - If \\( r_t(\theta) > 1 \\), the **action \\( a_t \\) at state \\( s_t \\) is more likely in the current policy than the old policy.** - If \\( r_t(\theta) \\) is between 0 and 1, the **action is less likely for the current policy than for the old one**. So this probability ratio is an **easy way to estimate the divergence between old and current policy.** ## The unclipped part of the Clipped Surrogate Objective function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/unclipped1.jpg" alt="PPO"/> This ratio **can replace the log probability we use in the policy objective function**. This gives us the left part of the new objective function: multiplying the ratio by the advantage. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/unclipped2.jpg" alt="PPO"/> <figcaption><a href="https://arxiv.org/pdf/1707.06347.pdf">Proximal Policy Optimization Algorithms</a></figcaption> </figure> However, without a constraint, if the action taken is much more probable in our current policy than in our former, **this would lead to a significant policy gradient step** and, therefore, an **excessive policy update.** ## The clipped Part of the Clipped Surrogate Objective function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/clipped.jpg" alt="PPO"/> Consequently, we need to constrain this objective function by penalizing changes that lead to a ratio far away from 1 (in the paper, the ratio can only vary from 0.8 to 1.2). **By clipping the ratio, we ensure that we do not have a too large policy update because the current policy can't be too different from the older one.** To do that, we have two solutions: - *TRPO (Trust Region Policy Optimization)* uses KL divergence constraints outside the objective function to constrain the policy update. But this method **is complicated to implement and takes more computation time.** - *PPO* clip probability ratio directly in the objective function with its **Clipped surrogate objective function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/clipped.jpg" alt="PPO"/> This clipped part is a version where \\( r_t(\theta) \\) is clipped between \\( [1 - \epsilon, 1 + \epsilon] \\). With the Clipped Surrogate Objective function, we have two probability ratios, one non-clipped and one clipped in a range between \\( [1 - \epsilon, 1 + \epsilon] \\), epsilon is a hyperparameter that helps us to define this clip range (in the paper \\( \epsilon = 0.2 \\).). Then, we take the minimum of the clipped and non-clipped objective, **so the final objective is a lower bound (pessimistic bound) of the unclipped objective.** Taking the minimum of the clipped and non-clipped objective means **we'll select either the clipped or the non-clipped objective based on the ratio and advantage situation**.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/visualize.mdx
# Visualize the Clipped Surrogate Objective Function Don't worry. **It's normal if this seems complex to handle right now**. But we're going to see what this Clipped Surrogate Objective Function looks like, and this will help you to visualize better what's going on. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/recap.jpg" alt="PPO"/> <figcaption><a href="https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf">Table from "Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization" by Daniel Bick</a></figcaption> </figure> We have six different situations. Remember first that we take the minimum between the clipped and unclipped objectives. ## Case 1 and 2: the ratio is between the range In situations 1 and 2, **the clipping does not apply since the ratio is between the range** \\( [1 - \epsilon, 1 + \epsilon] \\) In situation 1, we have a positive advantage: the **action is better than the average** of all the actions in that state. Therefore, we should encourage our current policy to increase the probability of taking that action in that state. Since the ratio is between intervals, **we can increase our policy's probability of taking that action at that state.** In situation 2, we have a negative advantage: the action is worse than the average of all actions at that state. Therefore, we should discourage our current policy from taking that action in that state. Since the ratio is between intervals, **we can decrease the probability that our policy takes that action at that state.** ## Case 3 and 4: the ratio is below the range <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/recap.jpg" alt="PPO"/> <figcaption><a href="https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf">Table from "Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization" by Daniel Bick</a></figcaption> </figure> If the probability ratio is lower than \\( [1 - \epsilon] \\), the probability of taking that action at that state is much lower than with the old policy. If, like in situation 3, the advantage estimate is positive (A>0), then **you want to increase the probability of taking that action at that state.** But if, like situation 4, the advantage estimate is negative, **we don't want to decrease further** the probability of taking that action at that state. Therefore, the gradient is = 0 (since we're on a flat line), so we don't update our weights. ## Case 5 and 6: the ratio is above the range <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/recap.jpg" alt="PPO"/> <figcaption><a href="https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf">Table from "Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization" by Daniel Bick</a></figcaption> </figure> If the probability ratio is higher than \\( [1 + \epsilon] \\), the probability of taking that action at that state in the current policy is **much higher than in the former policy.** If, like in situation 5, the advantage is positive, **we don't want to get too greedy**. We already have a higher probability of taking that action at that state than the former policy. Therefore, the gradient is = 0 (since we're on a flat line), so we don't update our weights. If, like in situation 6, the advantage is negative, we want to decrease the probability of taking that action at that state. So if we recap, **we only update the policy with the unclipped objective part**. When the minimum is the clipped objective part, we don't update our policy weights since the gradient will equal 0. So we update our policy only if: - Our ratio is in the range \\( [1 - \epsilon, 1 + \epsilon] \\) - Our ratio is outside the range, but **the advantage leads to getting closer to the range** - Being below the ratio but the advantage is > 0 - Being above the ratio but the advantage is < 0 **You might wonder why, when the minimum is the clipped ratio, the gradient is 0.** When the ratio is clipped, the derivative in this case will not be the derivative of the \\( r_t(\theta) * A_t \\) but the derivative of either \\( (1 - \epsilon)* A_t\\) or the derivative of \\( (1 + \epsilon)* A_t\\) which both = 0. To summarize, thanks to this clipped surrogate objective, **we restrict the range that the current policy can vary from the old one.** Because we remove the incentive for the probability ratio to move outside of the interval since the clip forces the gradient to be zero. If the ratio is > \\( 1 + \epsilon \\) or < \\( 1 - \epsilon \\) the gradient will be equal to 0. The final Clipped Surrogate Objective Loss for PPO Actor-Critic style looks like this, it's a combination of Clipped Surrogate Objective function, Value Loss Function and Entropy bonus: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ppo-objective.jpg" alt="PPO objective"/> That was quite complex. Take time to understand these situations by looking at the table and the graph. **You must understand why this makes sense.** If you want to go deeper, the best resource is the article [Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization" by Daniel Bick, especially part 3.4](https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf).
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/conclusion.mdx
# Conclusion [[Conclusion]] That’s all for today. Congrats on finishing this unit and the tutorial! The best way to learn is to practice and try stuff. **Why not improve the implementation to handle frames as input?**. See you on second part of this Unit 🔥 ## Keep Learning, Stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit8/additional-readings.mdx
# Additional Readings [[additional-readings]] These are **optional readings** if you want to go deeper. ## PPO Explained - [Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization by Daniel Bick](https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf) - [What is the way to understand Proximal Policy Optimization Algorithm in RL?](https://stackoverflow.com/questions/46422845/what-is-the-way-to-understand-proximal-policy-optimization-algorithm-in-rl) - [Foundations of Deep RL Series, L4 TRPO and PPO by Pieter Abbeel](https://youtu.be/KjWF8VIMGiY) - [OpenAI PPO Blogpost](https://openai.com/blog/openai-baselines-ppo/) - [Spinning Up RL PPO](https://spinningup.openai.com/en/latest/algorithms/ppo.html) - [Paper Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347) ## PPO Implementation details - [The 37 Implementation Details of Proximal Policy Optimization](https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/) - [Part 1 of 3 — Proximal Policy Optimization Implementation: 11 Core Implementation Details](https://www.youtube.com/watch?v=MEt6rrxH8W4) ## Importance Sampling - [Importance Sampling Explained](https://youtu.be/C3p2wI4RAi8)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/two-methods.mdx
# Two main approaches for solving RL problems [[two-methods]] <Tip> Now that we learned the RL framework, how do we solve the RL problem? </Tip> In other words, how do we build an RL agent that can **select the actions that maximize its expected cumulative reward?** ## The Policy π: the agent’s brain [[policy]] The Policy **π** is the **brain of our Agent**, it’s the function that tells us what **action to take given the state we are in.** So it **defines the agent’s behavior** at a given time. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_1.jpg" alt="Policy" /> <figcaption>Think of policy as the brain of our agent, the function that will tell us the action to take given a state</figcaption> </figure> This Policy **is the function we want to learn**, our goal is to find the optimal policy π\*, the policy that **maximizes expected return** when the agent acts according to it. We find this π\* **through training.** There are two approaches to train our agent to find this optimal policy π\*: - **Directly,** by teaching the agent to learn which **action to take,** given the current state: **Policy-Based Methods.** - Indirectly, **teach the agent to learn which state is more valuable** and then take the action that **leads to the more valuable states**: Value-Based Methods. ## Policy-Based Methods [[policy-based]] In Policy-Based methods, **we learn a policy function directly.** This function will define a mapping from each state to the best corresponding action. Alternatively, it could define **a probability distribution over the set of possible actions at that state.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_2.jpg" alt="Policy" /> <figcaption>As we can see here, the policy (deterministic) <b>directly indicates the action to take for each step.</b></figcaption> </figure> We have two types of policies: - *Deterministic*: a policy at a given state **will always return the same action.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_3.jpg" alt="Policy"/> <figcaption>action = policy(state)</figcaption> </figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_4.jpg" alt="Policy" width="100%"/> - *Stochastic*: outputs **a probability distribution over actions.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_5.jpg" alt="Policy"/> <figcaption>policy(actions | state) = probability distribution over the set of actions given the current state</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy-based.png" alt="Policy Based"/> <figcaption>Given an initial state, our stochastic policy will output probability distributions over the possible actions at that state.</figcaption> </figure> If we recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/pbm_1.jpg" alt="Pbm recap" width="100%" /> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/pbm_2.jpg" alt="Pbm recap" width="100%" /> ## Value-based methods [[value-based]] In value-based methods, instead of learning a policy function, we **learn a value function** that maps a state to the expected value **of being at that state.** The value of a state is the **expected discounted return** the agent can get if it **starts in that state, and then acts according to our policy.** “Act according to our policy” just means that our policy is **“going to the state with the highest value”.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/value_1.jpg" alt="Value based RL" width="100%" /> Here we see that our value function **defined values for each possible state.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/value_2.jpg" alt="Value based RL"/> <figcaption>Thanks to our value function, at each step our policy will select the state with the biggest value defined by the value function: -7, then -6, then -5 (and so on) to attain the goal.</figcaption> </figure> Thanks to our value function, at each step our policy will select the state with the biggest value defined by the value function: -7, then -6, then -5 (and so on) to attain the goal. If we recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/vbm_1.jpg" alt="Vbm recap" width="100%" /> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/vbm_2.jpg" alt="Vbm recap" width="100%" />
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/what-is-rl.mdx
# What is Reinforcement Learning? [[what-is-reinforcement-learning]] To understand Reinforcement Learning, let’s start with the big picture. ## The big picture [[the-big-picture]] The idea behind Reinforcement Learning is that an agent (an AI) will learn from the environment by **interacting with it** (through trial and error) and **receiving rewards** (negative or positive) as feedback for performing actions. Learning from interactions with the environment **comes from our natural experiences.** For instance, imagine putting your little brother in front of a video game he never played, giving him a controller, and leaving him alone. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/Illustration_1.jpg" alt="Illustration_1" width="100%"> Your brother will interact with the environment (the video game) by pressing the right button (action). He got a coin, that’s a +1 reward. It’s positive, he just understood that in this game **he must get the coins.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/Illustration_2.jpg" alt="Illustration_2" width="100%"> But then, **he presses the right button again** and he touches an enemy. He just died, so that's a -1 reward. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/Illustration_3.jpg" alt="Illustration_3" width="100%"> By interacting with his environment through trial and error, your little brother understands that **he needs to get coins in this environment but avoid the enemies.** **Without any supervision**, the child will get better and better at playing the game. That’s how humans and animals learn, **through interaction.** Reinforcement Learning is just a **computational approach of learning from actions.** ### A formal definition [[a-formal-definition]] We can now make a formal definition: <Tip> Reinforcement learning is a framework for solving control tasks (also called decision problems) by building agents that learn from the environment by interacting with it through trial and error and receiving rewards (positive or negative) as unique feedback. </Tip> But how does Reinforcement Learning work?
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/hands-on.mdx
# Train your first Deep Reinforcement Learning Agent 🤖 [[hands-on]] <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit1/unit1.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> Now that you've studied the bases of Reinforcement Learning, you’re ready to train your first agent and share it with the community through the Hub 🔥: A Lunar Lander agent that will learn to land correctly on the Moon 🌕 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/lunarLander.gif" alt="LunarLander"> And finally, you'll **upload this trained agent to the Hugging Face Hub 🤗, a free, open platform where people can share ML models, datasets, and demos.** Thanks to our <a href="https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard">leaderboard</a>, you'll be able to compare your results with other classmates and exchange the best practices to improve your agent's scores. Who will win the challenge for Unit 1 🏆? To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process), you need to push your trained model to the Hub and **get a result of >= 200**. To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward** **If you don't find your model, go to the bottom of the page and click on the refresh button.** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process And you can check your progress here 👉 https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course So let's get started! 🚀 **To start the hands-on click on Open In Colab button** 👇 : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit1/unit1.ipynb) We strongly **recommend students use Google Colab for the hands-on exercises** instead of running them on their personal computers. By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects** of setting up your environments. # Unit 1: Train your first Deep Reinforcement Learning Agent 🤖 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/thumbnail.jpg" alt="Unit 1 thumbnail" width="100%"> In this notebook, you'll train your **first Deep Reinforcement Learning agent** a Lunar Lander agent that will learn to **land correctly on the Moon 🌕**. Using [Stable-Baselines3](https://stable-baselines3.readthedocs.io/en/master/) a Deep Reinforcement Learning library, share them with the community, and experiment with different configurations ### The environment 🎮 - [LunarLander-v2](https://gymnasium.farama.org/environments/box2d/lunar_lander/) ### The library used 📚 - [Stable-Baselines3](https://stable-baselines3.readthedocs.io/en/master/) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the Github Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook 🏆 At the end of the notebook, you will: - Be able to use **Gymnasium**, the environment library. - Be able to use **Stable-Baselines3**, the deep reinforcement learning library. - Be able to **push your trained agent to the Hub** with a nice video replay and an evaluation score 🔥. ## This notebook is from Deep Reinforcement Learning Course <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/deep-rl-course-illustration.jpg" alt="Deep RL Course illustration"/> In this free course, you will: - 📖 Study Deep Reinforcement Learning in **theory and practice**. - 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0. - 🤖 Train **agents in unique environments** - 🎓 **Earn a certificate of completion** by completing 80% of the assignments. And more! Check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-course Don’t forget to **<a href="http://eepurl.com/ic5ZUD">sign up to the course</a>** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).** The best way to keep in touch and ask questions is **to join our discord server** to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 ## Prerequisites 🏗️ Before diving into the notebook, you need to: 🔲 📝 **[Read Unit 0](https://huggingface.co/deep-rl-course/unit0/introduction)** that gives you all the **information about the course and helps you to onboard** 🤗 🔲 📚 **Develop an understanding of the foundations of Reinforcement learning** (MC, TD, Rewards hypothesis...) by [reading Unit 1](https://huggingface.co/deep-rl-course/unit1/introduction). ## A small recap of Deep Reinforcement Learning 📚 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> Let's do a small recap on what we learned in the first Unit: - Reinforcement Learning is a **computational approach to learning from actions**. We build an agent that learns from the environment by **interacting with it through trial and error** and receiving rewards (negative or positive) as feedback. - The goal of any RL agent is to **maximize its expected cumulative reward** (also called expected return) because RL is based on the _reward hypothesis_, which is that all goals can be described as the maximization of an expected cumulative reward. - The RL process is a **loop that outputs a sequence of state, action, reward, and next state**. - To calculate the expected cumulative reward (expected return), **we discount the rewards**: the rewards that come sooner (at the beginning of the game) are more probable to happen since they are more predictable than the long-term future reward. - To solve an RL problem, you want to **find an optimal policy**; the policy is the "brain" of your AI that will tell us what action to take given a state. The optimal one is the one that gives you the actions that max the expected return. There are **two** ways to find your optimal policy: - By **training your policy directly**: policy-based methods. - By **training a value function** that tells us the expected return the agent will get at each state and use this function to define our policy: value-based methods. - Finally, we spoke about Deep RL because **we introduce deep neural networks to estimate the action to take (policy-based) or to estimate the value of a state (value-based) hence the name "deep."** # Let's train our first Deep Reinforcement Learning agent and upload it to the Hub 🚀 ## Get a certificate 🎓 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process), you need to push your trained model to the Hub and **get a result of >= 200**. To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> ## Install dependencies and create a virtual screen 🔽 The first step is to install the dependencies, we’ll install multiple ones. - `gymnasium[box2d]`: Contains the LunarLander-v2 environment 🌛 - `stable-baselines3[extra]`: The deep reinforcement learning library. - `huggingface_sb3`: Additional code for Stable-baselines3 to load and upload models from the Hugging Face 🤗 Hub. To make things easier, we created a script to install all these dependencies. ```bash apt install swig cmake ``` ```bash pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit1/requirements-unit1.txt ``` During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). Hence the following cell will install virtual screen libraries and create and run a virtual screen 🖥 ```bash sudo apt-get update apt install python-opengl apt install ffmpeg apt install xvfb pip3 install pyvirtualdisplay ``` To make sure the new installed libraries are used, **sometimes it's required to restart the notebook runtime**. The next cell will force the **runtime to crash, so you'll need to connect again and run the code starting from here**. Thanks to this trick, **we will be able to run our virtual screen.** ```python import os os.kill(os.getpid(), 9) ``` ```python # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() ``` ## Import the packages 📦 One additional library we import is huggingface_hub **to be able to upload and download trained models from the hub**. The Hugging Face Hub 🤗 works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations and other features that will allow you to easily collaborate with others. You can see here all the Deep reinforcement Learning models available here👉 https://huggingface.co/models?pipeline_tag=reinforcement-learning&sort=downloads ```python import gymnasium from huggingface_sb3 import load_from_hub, package_to_hub from huggingface_hub import ( notebook_login, ) # To log to our Hugging Face account to be able to upload models to the Hub. from stable_baselines3 import PPO from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.monitor import Monitor ``` ## Understand Gymnasium and how it works 🤖 🏋 The library containing our environment is called Gymnasium. **You'll use Gymnasium a lot in Deep Reinforcement Learning.** Gymnasium is the **new version of Gym library** [maintained by the Farama Foundation](https://farama.org/). The Gymnasium library provides two things: - An interface that allows you to **create RL environments**. - A **collection of environments** (gym-control, atari, box2D...). Let's look at an example, but first let's recall the RL loop. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> At each step: - Our Agent receives a **state (S0)** from the **Environment** — we receive the first frame of our game (Environment). - Based on that **state (S0),** the Agent takes an **action (A0)** — our Agent will move to the right. - The environment transitions to a **new** **state (S1)** — new frame. - The environment gives some **reward (R1)** to the Agent — we’re not dead *(Positive Reward +1)*. With Gymnasium: 1️⃣ We create our environment using `gymnasium.make()` 2️⃣ We reset the environment to its initial state with `observation = env.reset()` At each step: 3️⃣ Get an action using our model (in our example we take a random action) 4️⃣ Using `env.step(action)`, we perform this action in the environment and get - `observation`: The new state (st+1) - `reward`: The reward we get after executing the action - `terminated`: Indicates if the episode terminated (agent reach the terminal state) - `truncated`: Introduced with this new version, it indicates a timelimit or if an agent go out of bounds of the environment for instance. - `info`: A dictionary that provides additional information (depends on the environment). For more explanations check this 👉 https://gymnasium.farama.org/api/env/#gymnasium.Env.step If the episode is terminated: - We reset the environment to its initial state with `observation = env.reset()` **Let's look at an example!** Make sure to read the code ```python import gymnasium as gym # First, we create our environment called LunarLander-v2 env = gym.make("LunarLander-v2") # Then we reset this environment observation, info = env.reset() for _ in range(20): # Take a random action action = env.action_space.sample() print("Action taken:", action) # Do this action in the environment and get # next_state, reward, terminated, truncated and info observation, reward, terminated, truncated, info = env.step(action) # If the game is terminated (in our case we land, crashed) or truncated (timeout) if terminated or truncated: # Reset the environment print("Environment is reset") observation, info = env.reset() env.close() ``` ## Create the LunarLander environment 🌛 and understand how it works ### The environment 🎮 In this first tutorial, we’re going to train our agent, a [Lunar Lander](https://gymnasium.farama.org/environments/box2d/lunar_lander/), **to land correctly on the moon**. To do that, the agent needs to learn **to adapt its speed and position (horizontal, vertical, and angular) to land correctly.** --- 💡 A good habit when you start to use an environment is to check its documentation 👉 https://gymnasium.farama.org/environments/box2d/lunar_lander/ --- Let's see what the Environment looks like: ```python # We create our environment with gym.make("<name_of_the_environment>") env = gym.make("LunarLander-v2") env.reset() print("_____OBSERVATION SPACE_____ \n") print("Observation Space Shape", env.observation_space.shape) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` We see with `Observation Space Shape (8,)` that the observation is a vector of size 8, where each value contains different information about the lander: - Horizontal pad coordinate (x) - Vertical pad coordinate (y) - Horizontal speed (x) - Vertical speed (y) - Angle - Angular speed - If the left leg contact point has touched the land - If the right leg contact point has touched the land ```python print("\n _____ACTION SPACE_____ \n") print("Action Space Shape", env.action_space.n) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` The action space (the set of possible actions the agent can take) is discrete with 4 actions available 🎮: - Action 0: Do nothing, - Action 1: Fire left orientation engine, - Action 2: Fire the main engine, - Action 3: Fire right orientation engine. Reward function (the function that will gives a reward at each timestep) 💰: After every step a reward is granted. The total reward of an episode is the **sum of the rewards for all the steps within that episode**. For each step, the reward: - Is increased/decreased the closer/further the lander is to the landing pad. - Is increased/decreased the slower/faster the lander is moving. - Is decreased the more the lander is tilted (angle not horizontal). - Is increased by 10 points for each leg that is in contact with the ground. - Is decreased by 0.03 points each frame a side engine is firing. - Is decreased by 0.3 points each frame the main engine is firing. The episode receive an **additional reward of -100 or +100 points for crashing or landing safely respectively.** An episode is **considered a solution if it scores at least 200 points.** #### Vectorized Environment - We create a vectorized environment (a method for stacking multiple independent environments into a single environment) of 16 environments, this way, **we'll have more diverse experiences during the training.** ```python # Create the environment env = make_vec_env("LunarLander-v2", n_envs=16) ``` ## Create the Model 🤖 - We have studied our environment and we understood the problem: **being able to land the Lunar Lander to the Landing Pad correctly by controlling left, right and main orientation engine**. Now let's build the algorithm we're going to use to solve this Problem 🚀. - To do so, we're going to use our first Deep RL library, [Stable Baselines3 (SB3)](https://stable-baselines3.readthedocs.io/en/master/). - SB3 is a set of **reliable implementations of reinforcement learning algorithms in PyTorch**. --- 💡 A good habit when using a new library is to dive first on the documentation: https://stable-baselines3.readthedocs.io/en/master/ and then try some tutorials. ---- <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/sb3.png" alt="Stable Baselines3"> To solve this problem, we're going to use SB3 **PPO**. [PPO (aka Proximal Policy Optimization) is one of the SOTA (state of the art) Deep Reinforcement Learning algorithms that you'll study during this course](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html#example%5D). PPO is a combination of: - *Value-based reinforcement learning method*: learning an action-value function that will tell us the **most valuable action to take given a state and action**. - *Policy-based reinforcement learning method*: learning a policy that will **give us a probability distribution over actions**. Stable-Baselines3 is easy to set up: 1️⃣ You **create your environment** (in our case it was done above) 2️⃣ You define the **model you want to use and instantiate this model** `model = PPO("MlpPolicy")` 3️⃣ You **train the agent** with `model.learn` and define the number of training timesteps ``` # Create environment env = gym.make('LunarLander-v2') # Instantiate the agent model = PPO('MlpPolicy', env, verbose=1) # Train the agent model.learn(total_timesteps=int(2e5)) ``` ```python # TODO: Define a PPO MlpPolicy architecture # We use MultiLayerPerceptron (MLPPolicy) because the input is a vector, # if we had frames as input we would use CnnPolicy model = ``` #### Solution ```python # SOLUTION # We added some parameters to accelerate the training model = PPO( policy="MlpPolicy", env=env, n_steps=1024, batch_size=64, n_epochs=4, gamma=0.999, gae_lambda=0.98, ent_coef=0.01, verbose=1, ) ``` ## Train the PPO agent 🏃 - Let's train our agent for 1,000,000 timesteps, don't forget to use GPU on Colab. It will take approximately ~20min, but you can use fewer timesteps if you just want to try it out. - During the training, take a ☕ break you deserved it 🤗 ```python # TODO: Train it for 1,000,000 timesteps # TODO: Specify file name for model and save the model to file model_name = "" ``` #### Solution ```python # SOLUTION # Train it for 1,000,000 timesteps model.learn(total_timesteps=1000000) # Save the model model_name = "ppo-LunarLander-v2" model.save(model_name) ``` ## Evaluate the agent 📈 - Remember to wrap the environment in a [Monitor](https://stable-baselines3.readthedocs.io/en/master/common/monitor.html). - Now that our Lunar Lander agent is trained 🚀, we need to **check its performance**. - Stable-Baselines3 provides a method to do that: `evaluate_policy`. - To fill that part you need to [check the documentation](https://stable-baselines3.readthedocs.io/en/master/guide/examples.html#basic-usage-training-saving-loading) - In the next step, we'll see **how to automatically evaluate and share your agent to compete in a leaderboard, but for now let's do it ourselves** 💡 When you evaluate your agent, you should not use your training environment but create an evaluation environment. ```python # TODO: Evaluate the agent # Create a new environment for evaluation eval_env = # Evaluate the model with 10 evaluation episodes and deterministic=True mean_reward, std_reward = # Print the results ``` #### Solution ```python # @title eval_env = Monitor(gym.make("LunarLander-v2")) mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True) print(f"mean_reward={mean_reward:.2f} +/- {std_reward}") ``` - In my case, I got a mean reward is `200.20 +/- 20.80` after training for 1 million steps, which means that our lunar lander agent is ready to land on the moon 🌛🥳. ## Publish our trained model on the Hub 🔥 Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code. 📚 The libraries documentation 👉 https://github.com/huggingface/huggingface_sb3/tree/main#hugging-face--x-stable-baselines3-v20 Here's an example of a Model Card (with Space Invaders): By using `package_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the hub**. This way: - You can **showcase our work** 🔥 - You can **visualize your agent playing** 👀 - You can **share with the community an agent that others can use** 💾 - You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account on Hugging Face ➡ https://huggingface.co/join 2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> - Copy the token - Run the cell below and paste the token ```python notebook_login() !git config --global credential.helper store ``` If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function Let's fill the `package_to_hub` function: - `model`: our trained model. - `model_name`: the name of the trained model that we defined in `model_save` - `model_architecture`: the model architecture we used, in our case PPO - `env_id`: the name of the environment, in our case `LunarLander-v2` - `eval_env`: the evaluation environment defined in eval_env - `repo_id`: the name of the Hugging Face Hub Repository that will be created/updated `(repo_id = {username}/{repo_name})` 💡 **A good name is `{username}/{model_architecture}-{env_id}` ** - `commit_message`: message of the commit ```python import gymnasium as gym from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.env_util import make_vec_env from huggingface_sb3 import package_to_hub ## TODO: Define a repo_id ## repo_id is the id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2 repo_id = # TODO: Define the name of the environment env_id = # Create the evaluation env and set the render_mode="rgb_array" eval_env = DummyVecEnv([lambda: gym.make(env_id, render_mode="rgb_array")]) # TODO: Define the model architecture we used model_architecture = "" ## TODO: Define the commit message commit_message = "" # method save, evaluate, generate a model card and record a replay video of your agent before pushing the repo to the hub package_to_hub(model=model, # Our trained model model_name=model_name, # The name of our trained model model_architecture=model_architecture, # The model architecture we used: in our case PPO env_id=env_id, # Name of the environment eval_env=eval_env, # Evaluation Environment repo_id=repo_id, # id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2 commit_message=commit_message) ``` #### Solution ```python import gymnasium as gym from stable_baselines3 import PPO from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.env_util import make_vec_env from huggingface_sb3 import package_to_hub # PLACE the variables you've just defined two cells above # Define the name of the environment env_id = "LunarLander-v2" # TODO: Define the model architecture we used model_architecture = "PPO" ## Define a repo_id ## repo_id is the id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2 ## CHANGE WITH YOUR REPO ID repo_id = "ThomasSimonini/ppo-LunarLander-v2" # Change with your repo id, you can't push with mine 😄 ## Define the commit message commit_message = "Upload PPO LunarLander-v2 trained agent" # Create the evaluation env and set the render_mode="rgb_array" eval_env = DummyVecEnv([lambda: Monitor(gym.make(env_id, render_mode="rgb_array"))]) # PLACE the package_to_hub function you've just filled here package_to_hub( model=model, # Our trained model model_name=model_name, # The name of our trained model model_architecture=model_architecture, # The model architecture we used: in our case PPO env_id=env_id, # Name of the environment eval_env=eval_env, # Evaluation Environment repo_id=repo_id, # id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2 commit_message=commit_message, ) ``` Congrats 🥳 you've just trained and uploaded your first Deep Reinforcement Learning agent. The script above should have displayed a link to a model repository such as https://huggingface.co/osanseviero/test_sb3. When you go to this link, you can: * See a video preview of your agent at the right. * Click "Files and versions" to see all the files in the repository. * Click "Use in stable-baselines3" to get a code snippet that shows how to load the model. * A model card (`README.md` file) which gives a description of the model Under the hood, the Hub uses git-based repositories (don't worry if you don't know what git is), which means you can update the model with new versions as you experiment and improve your agent. Compare the results of your LunarLander-v2 with your classmates using the leaderboard 🏆 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard ## Load a saved LunarLander model from the Hub 🤗 Thanks to [ironbar](https://github.com/ironbar) for the contribution. Loading a saved model from the Hub is really easy. You go to https://huggingface.co/models?library=stable-baselines3 to see the list of all the Stable-baselines3 saved models. 1. You select one and copy its repo_id <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit1/copy-id.png" alt="Copy-id"/> 2. Then we just need to use load_from_hub with: - The repo_id - The filename: the saved model inside the repo and its extension (*.zip) Because the model I download from the Hub was trained with Gym (the former version of Gymnasium) we need to install shimmy a API conversion tool that will help us to run the environment correctly. Shimmy Documentation: https://github.com/Farama-Foundation/Shimmy ```python !pip install shimmy ``` ```python from huggingface_sb3 import load_from_hub repo_id = "Classroom-workshop/assignment2-omar" # The repo_id filename = "ppo-LunarLander-v2.zip" # The model filename.zip # When the model was trained on Python 3.8 the pickle protocol is 5 # But Python 3.6, 3.7 use protocol 4 # In order to get compatibility we need to: # 1. Install pickle5 (we done it at the beginning of the colab) # 2. Create a custom empty object we pass as parameter to PPO.load() custom_objects = { "learning_rate": 0.0, "lr_schedule": lambda _: 0.0, "clip_range": lambda _: 0.0, } checkpoint = load_from_hub(repo_id, filename) model = PPO.load(checkpoint, custom_objects=custom_objects, print_system_info=True) ``` Let's evaluate this agent: ```python # @title eval_env = Monitor(gym.make("LunarLander-v2")) mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True) print(f"mean_reward={mean_reward:.2f} +/- {std_reward}") ``` ## Some additional challenges 🏆 The best way to learn **is to try things by your own**! As you saw, the current agent is not doing great. As a first suggestion, you can train for more steps. With 1,000,000 steps, we saw some great results! In the [Leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) you will find your agents. Can you get to the top? Here are some ideas to achieve so: * Train more steps * Try different hyperparameters for `PPO`. You can see them at https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html#parameters. * Check the [Stable-Baselines3 documentation](https://stable-baselines3.readthedocs.io/en/master/modules/dqn.html) and try another model such as DQN. * **Push your new trained model** on the Hub 🔥 **Compare the results of your LunarLander-v2 with your classmates** using the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) 🏆 Is moon landing too boring for you? Try to **change the environment**, why not use MountainCar-v0, CartPole-v1 or CarRacing-v0? Check how they work [using the gym documentation](https://www.gymlibrary.dev/) and have fun 🎉. ________________________________________________________________________ Congrats on finishing this chapter! That was the biggest one, **and there was a lot of information.** If you’re still feel confused with all these elements...it's totally normal! **This was the same for me and for all people who studied RL.** Take time to really **grasp the material before continuing and try the additional challenges**. It’s important to master these elements and have a solid foundations. Naturally, during the course, we’re going to dive deeper into these concepts but **it’s better to have a good understanding of them now before diving into the next chapters.** Next time, in the bonus unit 1, you'll train Huggy the Dog to fetch the stick. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit1/huggy.jpg" alt="Huggy"/> ## Keep learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/summary.mdx
# Summary [[summary]] That was a lot of information! Let's summarize: - Reinforcement Learning is a computational approach of learning from actions. We build an agent that learns from the environment **by interacting with it through trial and error** and receiving rewards (negative or positive) as feedback. - The goal of any RL agent is to maximize its expected cumulative reward (also called expected return) because RL is based on the **reward hypothesis**, which is that **all goals can be described as the maximization of the expected cumulative reward.** - The RL process is a loop that outputs a sequence of **state, action, reward and next state.** - To calculate the expected cumulative reward (expected return), we discount the rewards: the rewards that come sooner (at the beginning of the game) **are more probable to happen since they are more predictable than the long term future reward.** - To solve an RL problem, you want to **find an optimal policy**. The policy is the “brain” of your agent, which will tell us **what action to take given a state.** The optimal policy is the one which **gives you the actions that maximize the expected return.** - There are two ways to find your optimal policy: 1. By training your policy directly: **policy-based methods.** 2. By training a value function that tells us the expected return the agent will get at each state and use this function to define our policy: **value-based methods.** - Finally, we speak about Deep RL because we introduce **deep neural networks to estimate the action to take (policy-based) or to estimate the value of a state (value-based)** hence the name “deep”.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/rl-framework.mdx
# The Reinforcement Learning Framework [[the-reinforcement-learning-framework]] ## The RL Process [[the-rl-process]] <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process.jpg" alt="The RL process" width="100%"> <figcaption>The RL Process: a loop of state, action, reward and next state</figcaption> <figcaption>Source: <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto</a></figcaption> </figure> To understand the RL process, let’s imagine an agent learning to play a platform game: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> - Our Agent receives **state \\(S_0\\)** from the **Environment** — we receive the first frame of our game (Environment). - Based on that **state \\(S_0\\),** the Agent takes **action \\(A_0\\)** — our Agent will move to the right. - The environment goes to a **new** **state \\(S_1\\)** — new frame. - The environment gives some **reward \\(R_1\\)** to the Agent — we’re not dead *(Positive Reward +1)*. This RL loop outputs a sequence of **state, action, reward and next state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/sars.jpg" alt="State, Action, Reward, Next State" width="100%"> The agent's goal is to _maximize_ its cumulative reward, **called the expected return.** ## The reward hypothesis: the central idea of Reinforcement Learning [[reward-hypothesis]] ⇒ Why is the goal of the agent to maximize the expected return? Because RL is based on the **reward hypothesis**, which is that all goals can be described as the **maximization of the expected return** (expected cumulative reward). That’s why in Reinforcement Learning, **to have the best behavior,** we aim to learn to take actions that **maximize the expected cumulative reward.** ## Markov Property [[markov-property]] In papers, you’ll see that the RL process is called a **Markov Decision Process** (MDP). We’ll talk again about the Markov Property in the following units. But if you need to remember something today about it, it's this: the Markov Property implies that our agent needs **only the current state to decide** what action to take and **not the history of all the states and actions** they took before. ## Observations/States Space [[obs-space]] Observations/States are the **information our agent gets from the environment.** In the case of a video game, it can be a frame (a screenshot). In the case of the trading agent, it can be the value of a certain stock, etc. There is a differentiation to make between *observation* and *state*, however: - *State s*: is **a complete description of the state of the world** (there is no hidden information). In a fully observed environment. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/chess.jpg" alt="Chess"> <figcaption>In chess game, we receive a state from the environment since we have access to the whole check board information.</figcaption> </figure> In a chess game, we have access to the whole board information, so we receive a state from the environment. In other words, the environment is fully observed. - *Observation o*: is a **partial description of the state.** In a partially observed environment. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>In Super Mario Bros, we only see the part of the level close to the player, so we receive an observation.</figcaption> </figure> In Super Mario Bros, we only see the part of the level close to the player, so we receive an observation. In Super Mario Bros, we are in a partially observed environment. We receive an observation **since we only see a part of the level.** <Tip> In this course, we use the term "state" to denote both state and observation, but we will make the distinction in implementations. </Tip> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/obs_space_recap.jpg" alt="Obs space recap" width="100%"> ## Action Space [[action-space]] The Action space is the set of **all possible actions in an environment.** The actions can come from a *discrete* or *continuous space*: - *Discrete space*: the number of possible actions is **finite**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>Again, in Super Mario Bros, we have only 5 possible actions: 4 directions and jumping</figcaption> </figure> In Super Mario Bros, we have a finite set of actions since we have only 4 directions and jump. - *Continuous space*: the number of possible actions is **infinite**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/self_driving_car.jpg" alt="Self Driving Car"> <figcaption>A Self Driving Car agent has an infinite number of possible actions since it can turn left 20°, 21,1°, 21,2°, honk, turn right 20°… </figcaption> </figure> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/action_space.jpg" alt="Action space recap" width="100%"> Taking this information into consideration is crucial because it will **have importance when choosing the RL algorithm in the future.** ## Rewards and the discounting [[rewards]] The reward is fundamental in RL because it’s **the only feedback** for the agent. Thanks to it, our agent knows **if the action taken was good or not.** The cumulative reward at each time step **t** can be written as: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_1.jpg" alt="Rewards"> <figcaption>The cumulative reward equals the sum of all rewards in the sequence. </figcaption> </figure> Which is equivalent to: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_2.jpg" alt="Rewards"> <figcaption>The cumulative reward = rt+1 (rt+k+1 = rt+0+1 = rt+1)+ rt+2 (rt+k+1 = rt+1+1 = rt+2) + ... </figcaption> </figure> However, in reality, **we can’t just add them like that.** The rewards that come sooner (at the beginning of the game) **are more likely to happen** since they are more predictable than the long-term future reward. Let’s say your agent is this tiny mouse that can move one tile each time step, and your opponent is the cat (that can move too). The mouse's goal is **to eat the maximum amount of cheese before being eaten by the cat.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_3.jpg" alt="Rewards" width="100%"> As we can see in the diagram, **it’s more probable to eat the cheese near us than the cheese close to the cat** (the closer we are to the cat, the more dangerous it is). Consequently, **the reward near the cat, even if it is bigger (more cheese), will be more discounted** since we’re not really sure we’ll be able to eat it. To discount the rewards, we proceed like this: 1. We define a discount rate called gamma. **It must be between 0 and 1.** Most of the time between **0.95 and 0.99**. - The larger the gamma, the smaller the discount. This means our agent **cares more about the long-term reward.** - On the other hand, the smaller the gamma, the bigger the discount. This means our **agent cares more about the short term reward (the nearest cheese).** 2. Then, each reward will be discounted by gamma to the exponent of the time step. As the time step increases, the cat gets closer to us, **so the future reward is less and less likely to happen.** Our discounted expected cumulative reward is: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_4.jpg" alt="Rewards" width="100%">
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/deep-rl.mdx
# The “Deep” in Reinforcement Learning [[deep-rl]] <Tip> What we've talked about so far is Reinforcement Learning. But where does the "Deep" come into play? </Tip> Deep Reinforcement Learning introduces **deep neural networks to solve Reinforcement Learning problems** — hence the name “deep”. For instance, in the next unit, we’ll learn about two value-based algorithms: Q-Learning (classic Reinforcement Learning) and then Deep Q-Learning. You’ll see the difference is that, in the first approach, **we use a traditional algorithm** to create a Q table that helps us find what action to take for each state. In the second approach, **we will use a Neural Network** (to approximate the Q value). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/deep.jpg" alt="Value based RL"/> <figcaption>Schema inspired by the Q learning notebook by Udacity </figcaption> </figure> If you are not familiar with Deep Learning you should definitely watch [the FastAI Practical Deep Learning for Coders](https://course.fast.ai) (Free).
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/quiz.mdx
# Quiz [[quiz]] The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: What is Reinforcement Learning? <details> <summary>Solution</summary> Reinforcement learning is a **framework for solving control tasks (also called decision problems)** by building agents that learn from the environment by interacting with it through trial and error and **receiving rewards (positive or negative) as unique feedback**. </details> ### Q2: Define the RL Loop <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rl-loop-ex.jpg" alt="Exercise RL Loop"/> At every step: - Our Agent receives ______ from the environment - Based on that ______ the Agent takes an ______ - Our Agent will move to the right - The Environment goes to a ______ - The Environment gives a ______ to the Agent <Question choices={[ { text: "an action a0, action a0, state s0, state s1, reward r1", explain: "At every step: Our Agent receives **state s0** from the environment. Based on that **state s0** the Agent takes an **action a0**. Our Agent will move to the right. The Environment goes to a **new state s1**. The Environment gives **a reward r1** to the Agent." }, { text: "state s0, state s0, action a0, new state s1, reward r1", explain: "", correct: true }, { text: "a state s0, state s0, action a0, state s1, action a1", explain: "At every step: Our Agent receives **state s0** from the environment. Based on that **state s0** the Agent takes an **action a0**. Our Agent will move to the right. The Environment goes to a **new state s1**. The Environment gives **a reward r1** to the Agent." } ]} /> ### Q3: What's the difference between a state and an observation? <Question choices={[ { text: "The state is a complete description of the state of the world (there is no hidden information)", explain: "", correct: true }, { text: "The state is a partial description of the state", explain: "" }, { text: "The observation is a complete description of the state of the world (there is no hidden information)", explain: "" }, { text: "The observation is a partial description of the state", explain: "", correct: true }, { text: "We receive a state when we play with chess environment", explain: "Since we have access to the whole checkboard information.", correct: true }, { text: "We receive an observation when we play with chess environment", explain: "Since we have access to the whole checkboard information." }, { text: "We receive a state when we play with Super Mario Bros", explain: "We only see a part of the level close to the player, so we receive an observation." }, { text: "We receive an observation when we play with Super Mario Bros", explain: "We only see a part of the level close to the player.", correct: true } ]} /> ### Q4: A task is an instance of a Reinforcement Learning problem. What are the two types of tasks? <Question choices={[ { text: "Episodic", explain: "In Episodic task, we have a starting point and an ending point (a terminal state). This creates an episode: a list of States, Actions, Rewards, and new States. For instance, think about Super Mario Bros: an episode begin at the launch of a new Mario Level and ending when you’re killed or you reached the end of the level.", correct: true }, { text: "Recursive", explain: "" }, { text: "Adversarial", explain: "" }, { text: "Continuing", explain: "Continuing tasks are tasks that continue forever (no terminal state). In this case, the agent must learn how to choose the best actions and simultaneously interact with the environment.", correct: true } ]} /> ### Q5: What is the exploration/exploitation tradeoff? <details> <summary>Solution</summary> In Reinforcement Learning, we need to **balance how much we explore the environment and how much we exploit what we know about the environment**. - *Exploration* is exploring the environment by **trying random actions in order to find more information about the environment**. - *Exploitation* is **exploiting known information to maximize the reward**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/expexpltradeoff.jpg" alt="Exploration Exploitation Tradeoff" width="100%"> </details> ### Q6: What is a policy? <details> <summary>Solution</summary> - The Policy π **is the brain of our Agent**. It’s the function that tells us what action to take given the state we are in. So it defines the agent’s behavior at a given time. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_1.jpg" alt="Policy"> </details> ### Q7: What are value-based methods? <details> <summary>Solution</summary> - Value-based methods is one of the main approaches for solving RL problems. - In Value-based methods, instead of training a policy function, **we train a value function that maps a state to the expected value of being at that state**. </details> ### Q8: What are policy-based methods? <details> <summary>Solution</summary> - In *Policy-Based Methods*, we learn a **policy function directly**. - This policy function will **map from each state to the best corresponding action at that state**. Or a **probability distribution over the set of possible actions at that state**. </details> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read again the chapter to reinforce (😏) your knowledge, but **do not worry**: during the course we'll go over again of these concepts, and you'll **reinforce your theoretical knowledge with hands-on**.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx
# The Exploration/Exploitation trade-off [[exp-exp-tradeoff]] Finally, before looking at the different methods to solve Reinforcement Learning problems, we must cover one more very important topic: *the exploration/exploitation trade-off.* - *Exploration* is exploring the environment by trying random actions in order to **find more information about the environment.** - *Exploitation* is **exploiting known information to maximize the reward.** Remember, the goal of our RL agent is to maximize the expected cumulative reward. However, **we can fall into a common trap**. Let’s take an example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_1.jpg" alt="Exploration" width="100%"> In this game, our mouse can have an **infinite amount of small cheese** (+1 each). But at the top of the maze, there is a gigantic sum of cheese (+1000). However, if we only focus on exploitation, our agent will never reach the gigantic sum of cheese. Instead, it will only exploit **the nearest source of rewards,** even if this source is small (exploitation). But if our agent does a little bit of exploration, it can **discover the big reward** (the pile of big cheese). This is what we call the exploration/exploitation trade-off. We need to balance how much we **explore the environment** and how much we **exploit what we know about the environment.** Therefore, we must **define a rule that helps to handle this trade-off**. We’ll see the different ways to handle it in the future units. If it’s still confusing, **think of a real problem: the choice of picking a restaurant:** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_2.jpg" alt="Exploration"> <figcaption>Source: <a href="https://inst.eecs.berkeley.edu/~cs188/sp20/assets/lecture/lec15_6up.pdf"> Berkley AI Course</a> </figcaption> </figure> - *Exploitation*: You go to the same one that you know is good every day and **take the risk to miss another better restaurant.** - *Exploration*: Try restaurants you never went to before, with the risk of having a bad experience **but the probable opportunity of a fantastic experience.** To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/expexpltradeoff.jpg" alt="Exploration Exploitation Tradeoff" width="100%">
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/introduction.mdx
# Introduction to Deep Reinforcement Learning [[introduction-to-deep-reinforcement-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/thumbnail.jpg" alt="Unit 1 thumbnail" width="100%"> Welcome to the most fascinating topic in Artificial Intelligence: **Deep Reinforcement Learning.** Deep RL is a type of Machine Learning where an agent learns **how to behave** in an environment **by performing actions** and **seeing the results.** In this first unit, **you'll learn the foundations of Deep Reinforcement Learning.** Then, you'll **train your Deep Reinforcement Learning agent, a lunar lander to land correctly on the Moon** using <a href="https://stable-baselines3.readthedocs.io/en/master/"> Stable-Baselines3 </a>, a Deep Reinforcement Learning library. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/lunarLander.gif" alt="LunarLander"> And finally, you'll **upload this trained agent to the Hugging Face Hub 🤗, a free, open platform where people can share ML models, datasets, and demos.** It's essential **to master these elements** before diving into implementing Deep Reinforcement Learning agents. The goal of this chapter is to give you solid foundations. After this unit, in a bonus unit, you'll be **able to train Huggy the Dog 🐶 to fetch the stick and play with him 🤗**. <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/huggy.mp4" type="video/mp4" controls autoplay loop mute /> So let's get started! 🚀
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/tasks.mdx
# Type of tasks [[tasks]] A task is an **instance** of a Reinforcement Learning problem. We can have two types of tasks: **episodic** and **continuing**. ## Episodic task [[episodic-task]] In this case, we have a starting point and an ending point **(a terminal state). This creates an episode**: a list of States, Actions, Rewards, and new States. For instance, think about Super Mario Bros: an episode begin at the launch of a new Mario Level and ends **when you’re killed or you reached the end of the level.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>Beginning of a new episode. </figcaption> </figure> ## Continuing tasks [[continuing-tasks]] These are tasks that continue forever (**no terminal state**). In this case, the agent must **learn how to choose the best actions and simultaneously interact with the environment.** For instance, an agent that does automated stock trading. For this task, there is no starting point and terminal state. **The agent keeps running until we decide to stop it.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/stock.jpg" alt="Stock Market" width="100%"> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/tasks.jpg" alt="Tasks recap" width="100%">
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/glossary.mdx
# Glossary [[glossary]] This is a community-created glossary. Contributions are welcomed! ### Agent An agent learns to **make decisions by trial and error, with rewards and punishments from the surroundings**. ### Environment An environment is a simulated world **where an agent can learn by interacting with it**. ### Markov Property It implies that the action taken by our agent is **conditional solely on the present state and independent of the past states and actions**. ### Observations/State - **State**: Complete description of the state of the world. - **Observation**: Partial description of the state of the environment/world. ### Actions - **Discrete Actions**: Finite number of actions, such as left, right, up, and down. - **Continuous Actions**: Infinite possibility of actions; for example, in the case of self-driving cars, the driving scenario has an infinite possibility of actions occurring. ### Rewards and Discounting - **Rewards**: Fundamental factor in RL. Tells the agent whether the action taken is good/bad. - RL algorithms are focused on maximizing the **cumulative reward**. - **Reward Hypothesis**: RL problems can be formulated as a maximisation of (cumulative) return. - **Discounting** is performed because rewards obtained at the start are more likely to happen as they are more predictable than long-term rewards. ### Tasks - **Episodic**: Has a starting point and an ending point. - **Continuous**: Has a starting point but no ending point. ### Exploration v/s Exploitation Trade-Off - **Exploration**: It's all about exploring the environment by trying random actions and receiving feedback/returns/rewards from the environment. - **Exploitation**: It's about exploiting what we know about the environment to gain maximum rewards. - **Exploration-Exploitation Trade-Off**: It balances how much we want to **explore** the environment and how much we want to **exploit** what we know about the environment. ### Policy - **Policy**: It is called the agent's brain. It tells us what action to take, given the state. - **Optimal Policy**: Policy that **maximizes** the **expected return** when an agent acts according to it. It is learned through *training*. ### Policy-based Methods: - An approach to solving RL problems. - In this method, the Policy is learned directly. - Will map each state to the best corresponding action at that state. Or a probability distribution over the set of possible actions at that state. ### Value-based Methods: - Another approach to solving RL problems. - Here, instead of training a policy, we train a **value function** that maps each state to the expected value of being in that state. Contributions are welcomed 🤗 If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls) This glossary was made possible thanks to: - [@lucifermorningstar1305](https://github.com/lucifermorningstar1305) - [@daspartho](https://github.com/daspartho) - [@misza222](https://github.com/misza222)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/conclusion.mdx
# Conclusion [[conclusion]] Congrats on finishing this unit! **That was the biggest one**, and there was a lot of information. And congrats on finishing the tutorial. You’ve just trained your first Deep RL agents and shared them with the community! 🥳 It's **normal if you still feel confused by some of these elements**. This was the same for me and for all people who studied RL. **Take time to really grasp the material** before continuing. It’s important to master these elements and have a solid foundation before entering the fun part. Naturally, during the course, we’re going to use and explain these terms again, but it’s better to understand them before diving into the next units. In the next (bonus) unit, we’re going to reinforce what we just learned by **training Huggy the Dog to fetch a stick**. You will then be able to play with him 🤗. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy.jpg" alt="Huggy"/> Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then, please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit1/additional-readings.mdx
# Additional Readings [[additional-readings]] These are **optional readings** if you want to go deeper. ## Deep Reinforcement Learning [[deep-rl]] - [Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto Chapter 1, 2 and 3](http://incompleteideas.net/book/RLbook2020.pdf) - [Foundations of Deep RL Series, L1 MDPs, Exact Solution Methods, Max-ent RL by Pieter Abbeel](https://youtu.be/2GwBez0D20A) - [Spinning Up RL by OpenAI Part 1: Key concepts of RL](https://spinningup.openai.com/en/latest/spinningup/rl_intro.html) ## Gym [[gym]] - [Getting Started With OpenAI Gym: The Basic Building Blocks](https://blog.paperspace.com/getting-started-with-openai-gym/) - [Make your own Gym custom environment](https://www.gymlibrary.dev/content/environment_creation/)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit7/hands-on.mdx
# Hands-on Now that you learned the basics of multi-agents, you're ready to train your first agents in a multi-agent system: **a 2vs2 soccer team that needs to beat the opponent team**. And you’re going to participate in AI vs. AI challenges where your trained agent will compete against other classmates’ **agents every day and be ranked on a new leaderboard.** To validate this hands-on for the certification process, you just need to push a trained model. There **are no minimal results to attain to validate it.** For more information about the certification process, check this section 👉 [https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process](https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process) This hands-on will be different since to get correct results **you need to train your agents from 4 hours to 8 hours**. And given the risk of timeout in Colab, we advise you to train on your computer. You don’t need a supercomputer: a simple laptop is good enough for this exercise. Let's get started! 🔥 ## What is AI vs. AI? AI vs. AI is an open-source tool we developed at Hugging Face to compete agents on the Hub against one another in a multi-agent setting. These models are then ranked in a leaderboard. The idea of this tool is to have a robust evaluation tool: **by evaluating your agent with a lot of others, you’ll get a good idea of the quality of your policy.** More precisely, AI vs. AI is three tools: - A *matchmaking process* defining the matches (which model against which) and running the model fights using a background task in the Space. - A *leaderboard* getting the match history results and displaying the models’ ELO ratings: [https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos) - A *Space demo* to visualize your agents playing against others: [https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos](https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos) In addition to these three tools, your classmate cyllum created a 🤗 SoccerTwos Challenge Analytics where you can check the detailed match results of a model: [https://huggingface.co/spaces/cyllum/soccertwos-analytics](https://huggingface.co/spaces/cyllum/soccertwos-analytics) We're [wrote a blog post to explain this AI vs. AI tool in detail](https://huggingface.co/blog/aivsai), but to give you the big picture it works this way: - Every four hours, our algorithm **fetches all the available models for a given environment (in our case ML-Agents-SoccerTwos).** - It creates a **queue of matches with the matchmaking algorithm.** - We simulate the match in a Unity headless process and **gather the match result** (1 if the first model won, 0.5 if it’s a draw, 0 if the second model won) in a Dataset. - Then, when all matches from the matches queue are done, **we update the ELO score for each model and update the leaderboard.** ### Competition Rules This first AI vs. AI competition **is an experiment**: the goal is to improve the tool in the future with your feedback. So some **breakups can happen during the challenge**. But don't worry **all the results are saved in a dataset so we can always restart the calculation correctly without losing information**. In order for your model to get correctly evaluated against others you need to follow these rules: 1. **You can't change the observation space or action space of the agent.** By doing that your model will not work during evaluation. 2. You **can't use a custom trainer for now,** you need to use the Unity MLAgents ones. 3. We provide executables to train your agents. You can also use the Unity Editor if you prefer **, but to avoid bugs, we advise that you use our executables**. What will make the difference during this challenge are **the hyperparameters you choose**. We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ### Chat with your classmates, share advice and ask questions on Discord - We created a new channel called `ai-vs-ai-challenge` to exchange advice and ask questions. - If you didn’t join the discord server yet, you can [join here](https://discord.gg/ydHrjt3WP5) ## Step 0: Install MLAgents and download the correct executable We advise you to use [conda](https://docs.conda.io/en/latest/) as a package manager and create a new environment. With conda, we create a new environment called rl with **Python 3.9**: ```bash conda create --name rl python=3.9 conda activate rl ``` To be able to train our agents correctly and push to the Hub, we need to install ML-Agents ```bash git clone https://github.com/Unity-Technologies/ml-agents ``` When the cloning is done (it takes 2.63 GB), we go inside the repository and install the package ```bash cd ml-agents pip install -e ./ml-agents-envs pip install -e ./ml-agents ``` We also need to install pytorch with: ```bash pip install torch ``` Finally, you need to install git-lfs: https://git-lfs.com/ Now that it’s installed, we need to add the environment training executable. Based on your operating system you need to download one of them, unzip it and place it in a new folder inside `ml-agents` that you call `training-envs-executables` At the end your executable should be in `ml-agents/training-envs-executables/SoccerTwos` Windows: Download [this executable](https://drive.google.com/file/d/1sqFxbEdTMubjVktnV4C6ICjp89wLhUcP/view?usp=sharing) Linux (Ubuntu): Download [this executable](https://drive.google.com/file/d/1KuqBKYiXiIcU4kNMqEzhgypuFP5_45CL/view?usp=sharing) Mac: Download [this executable](https://drive.google.com/drive/folders/1h7YB0qwjoxxghApQdEUQmk95ZwIDxrPG?usp=share_link) ⚠ For Mac you need also to call this `xattr -cr training-envs-executables/SoccerTwos/SoccerTwos.app` to be able to run SoccerTwos ## Step 1: Understand the environment The environment is called `SoccerTwos`. The Unity MLAgents Team made it. You can find its documentation [here](https://github.com/Unity-Technologies/ml-agents/blob/develop/docs/Learning-Environment-Examples.md#soccer-twos) The goal in this environment **is to get the ball into the opponent's goal while preventing the ball from entering your own goal.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents"> Unity MLAgents Team</a></figcaption> </figure> ### The reward function The reward function is: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccerreward.png" alt="SoccerTwos Reward"/> ### The observation space The observation space is composed of vectors of size 336: - 11 ray-casts forward distributed over 120 degrees (264 state dimensions) - 3 ray-casts backward distributed over 90 degrees (72 state dimensions) - Both of these ray-casts can detect 6 objects: - Ball - Blue Goal - Purple Goal - Wall - Blue Agent - Purple Agent ### The action space The action space is three discrete branches: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/socceraction.png" alt="SoccerTwos Action"/> ## Step 2: Understand MA-POCA We know how to train agents to play against others: **we can use self-play.** This is a perfect technique for a 1vs1. But in our case we’re 2vs2, and each team has 2 agents. How then can we **train cooperative behavior for groups of agents?** As explained in the [Unity Blog](https://blog.unity.com/technology/ml-agents-v20-release-now-supports-training-complex-cooperative-behaviors), agents typically receive a reward as a group (+1 - penalty) when the team scores a goal. This implies that **every agent on the team is rewarded even if each agent didn’t contribute the same to the win**, which makes it difficult to learn what to do independently. The Unity MLAgents team developed the solution in a new multi-agent trainer called *MA-POCA (Multi-Agent POsthumous Credit Assignment)*. The idea is simple but powerful: a centralized critic **processes the states of all agents in the team to estimate how well each agent is doing**. Think of this critic as a coach. This allows each agent to **make decisions based only on what it perceives locally**, and **simultaneously evaluate how good its behavior is in the context of the whole group**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/mapoca.png" alt="MA POCA"/> <figcaption>This illustrates MA-POCA’s centralized learning and decentralized execution. Source: <a href="https://blog.unity.com/technology/ml-agents-plays-dodgeball">MLAgents Plays Dodgeball</a> </figcaption> </figure> The solution then is to use Self-Play with an MA-POCA trainer (called poca). The poca trainer will help us to train cooperative behavior and self-play to win against an opponent team. If you want to dive deeper into this MA-POCA algorithm, you need to read the paper they published [here](https://arxiv.org/pdf/2111.05992.pdf) and the sources we put on the additional readings section. ## Step 3: Define the config file We already learned in [Unit 5](https://huggingface.co/deep-rl-course/unit5/introduction) that in ML-Agents, you define **the training hyperparameters in `config.yaml` files.** There are multiple hyperparameters. To understand them better, you should read the explanations for each of them in **[the documentation](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Training-Configuration-File.md)** The config file we’re going to use here is in `./config/poca/SoccerTwos.yaml`. It looks like this: ```csharp behaviors: SoccerTwos: trainer_type: poca hyperparameters: batch_size: 2048 buffer_size: 20480 learning_rate: 0.0003 beta: 0.005 epsilon: 0.2 lambd: 0.95 num_epoch: 3 learning_rate_schedule: constant network_settings: normalize: false hidden_units: 512 num_layers: 2 vis_encode_type: simple reward_signals: extrinsic: gamma: 0.99 strength: 1.0 keep_checkpoints: 5 max_steps: 5000000 time_horizon: 1000 summary_freq: 10000 self_play: save_steps: 50000 team_change: 200000 swap_steps: 2000 window: 10 play_against_latest_model_ratio: 0.5 initial_elo: 1200.0 ``` Compared to Pyramids or SnowballTarget, we have new hyperparameters with a self-play part. How you modify them can be critical in getting good results. The advice I can give you here is to check the explanation and recommended value for each parameters (especially self-play ones) against **[the documentation](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Training-Configuration-File.md).** Now that you’ve modified our config file, you’re ready to train your agents. ## Step 4: Start the training To train the agents, we need to **launch mlagents-learn and select the executable containing the environment.** We define four parameters: 1. `mlagents-learn <config>`: the path where the hyperparameter config file is. 2. `-env`: where the environment executable is. 3. `-run_id`: the name you want to give to your training run id. 4. `-no-graphics`: to not launch the visualization during the training. Depending on your hardware, 5M timesteps (the recommended value, but you can also try 10M) will take 5 to 8 hours of training. You can continue using your computer in the meantime, but I advise deactivating the computer standby mode to prevent the training from being stopped. Depending on the executable you use (windows, ubuntu, mac) the training command will look like this (your executable path can be different so don’t hesitate to check before running). ```bash mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id="SoccerTwos" --no-graphics ``` The executable contains 8 copies of SoccerTwos. ⚠️ It’s normal if you don’t see a big increase of ELO score (and even a decrease below 1200) before 2M timesteps, since your agents will spend most of their time moving randomly on the field before being able to goal. ⚠️ You can stop the training with Ctrl + C but beware of typing this command only once to stop the training since MLAgents needs to generate a final .onnx file before closing the run. ## Step 5: **Push the agent to the Hugging Face Hub** Now that we trained our agents, we’re **ready to push them to the Hub to be able to participate in the AI vs. AI challenge and visualize them playing on your browser🔥.** To be able to share your model with the community, there are three more steps to follow: 1️⃣ (If it’s not already done) create an account to HF ➡ [https://huggingface.co/join](https://huggingface.co/join) 2️⃣ Sign in and store your authentication token from the Hugging Face website. Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> Copy the token, run this, and paste the token ```bash huggingface-cli login ``` Then, we need to run `mlagents-push-to-hf`. And we define four parameters: 1. `-run-id`: the name of the training run id. 2. `-local-dir`: where the agent was saved, it’s results/<run_id name>, so in my case results/First Training. 3. `-repo-id`: the name of the Hugging Face repo you want to create or update. It’s always <your huggingface username>/<the repo name> If the repo does not exist **it will be created automatically** 4. `--commit-message`: since HF repos are git repositories you need to give a commit message. In my case ```bash mlagents-push-to-hf --run-id="SoccerTwos" --local-dir="./results/SoccerTwos" --repo-id="ThomasSimonini/poca-SoccerTwos" --commit-message="First Push"` ``` ```bash mlagents-push-to-hf --run-id= # Add your run id --local-dir= # Your local dir --repo-id= # Your repo id --commit-message="First Push" ``` If everything worked you should see this at the end of the process (but with a different url 😆) : Your model is pushed to the Hub. You can view your model here: https://huggingface.co/ThomasSimonini/poca-SoccerTwos It's the link to your model. It contains a model card that explains how to use it, your Tensorboard, and your config file. **What's awesome is that it's a git repository, which means you can have different commits, update your repository with a new push, etc.** ## Step 6: Verify that your model is ready for AI vs AI Challenge Now that your model is pushed to the Hub, **it’s going to be added automatically to the AI vs AI Challenge model pool.** It can take a little bit of time before your model is added to the leaderboard given we do a run of matches every 4h. But to ensure that everything works perfectly you need to check: 1. That you have this tag in your model: ML-Agents-SoccerTwos. This is the tag we use to select models to be added to the challenge pool. To do that go to your model and check the tags <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/verify1.png" alt="Verify"/> If it’s not the case you just need to modify the readme and add it <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/verify2.png" alt="Verify"/> 2. That you have a `SoccerTwos.onnx` file <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/verify3.png" alt="Verify"/> We strongly suggest that you create a new model when you push to the Hub if you want to train it again or train a new version. ## Step 7: Visualize some match in our demo Now that your model is part of AI vs AI Challenge, **you can visualize how good it is compared to others**: https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos In order to do that, you just need to go to this demo: - Select your model as team blue (or team purple if you prefer) and another model to compete against. The best opponents to compare your model to are either whoever is on top of the leaderboard or the [baseline model](https://huggingface.co/unity/MLAgents-SoccerTwos) The matches you see live are not used in the calculation of your result **but they are a good way to visualize how good your agent is**. And don't hesitate to share the best score your agent gets on discord in the #rl-i-made-this channel 🔥
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit7/self-play.mdx
# Self-Play: a classic technique to train competitive agents in adversarial games Now that we've studied the basics of multi-agents, we're ready to go deeper. As mentioned in the introduction, we're going **to train agents in an adversarial game with SoccerTwos, a 2vs2 game**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> ## What is Self-Play? Training agents correctly in an adversarial game can be **quite complex**. On the one hand, we need to find how to get a well-trained opponent to play against your training agent. And on the other hand, if you find a very good trained opponent, how will your agent improve its policy when the opponent is too strong? Think of a child that just started to learn soccer. Playing against a very good soccer player will be useless since it will be too hard to win or at least get the ball from time to time. So the child will continuously lose without having time to learn a good policy. The best solution would be **to have an opponent that is on the same level as the agent and will upgrade its level as the agent upgrades its own**. Because if the opponent is too strong, we’ll learn nothing; if it is too weak, we’ll overlearn useless behavior against a stronger opponent then. This solution is called *self-play*. In self-play, **the agent uses former copies of itself (of its policy) as an opponent**. This way, the agent will play against an agent of the same level (challenging but not too much), have opportunities to gradually improve its policy, and then update its opponent as it becomes better. It’s a way to bootstrap an opponent and progressively increase the opponent's complexity. It’s the same way humans learn in competition: - We start to train against an opponent of similar level - Then we learn from it, and when we acquire some skills, we can move further with stronger opponents. We do the same with self-play: - We **start with a copy of our agent as an opponent** this way, this opponent is on a similar level. - We **learn from it** and, when we acquire some skills, we **update our opponent with a more recent copy of our training policy**. The theory behind self-play is not something new. It was already used by Arthur Samuel’s checker player system in the fifties and by Gerald Tesauro’s TD-Gammon in 1995. If you want to learn more about the history of self-play [check out this very good blogpost by Andrew Cohen](https://blog.unity.com/technology/training-intelligent-adversaries-using-self-play-with-ml-agents) ## Self-Play in MLAgents Self-Play is integrated into the MLAgents library and is managed by multiple hyperparameters that we’re going to study. But the main focus, as explained in the documentation, is the **tradeoff between the skill level and generality of the final policy and the stability of learning**. Training against a set of slowly changing or unchanging adversaries with low diversity **results in more stable training. But a risk to overfit if the change is too slow.** So we need to control: - How **often we change opponents** with the `swap_steps` and `team_change` parameters. - The **number of opponents saved** with the `window` parameter. A larger value of `window`  means that an agent's pool of opponents will contain a larger diversity of behaviors since it will contain policies from earlier in the training run. - The **probability of playing against the current self vs opponent** sampled from the pool with `play_against_latest_model_ratio`. A larger value of `play_against_latest_model_ratio`  indicates that an agent will be playing against the current opponent more often. - The **number of training steps before saving a new opponent** with `save_steps` parameters. A larger value of `save_steps`  will yield a set of opponents that cover a wider range of skill levels and possibly play styles since the policy receives more training. To get more details about these hyperparameters, you definitely need [to check out this part of the documentation](https://github.com/Unity-Technologies/ml-agents/blob/develop/docs/Training-Configuration-File.md#self-play) ## The ELO Score to evaluate our agent ### What is ELO Score? In adversarial games, tracking the **cumulative reward is not always a meaningful metric to track the learning progress:** because this metric is **dependent only on the skill of the opponent.** Instead, we’re using an ***ELO rating system*** (named after Arpad Elo) that calculates the **relative skill level** between 2 players from a given population in a zero-sum game. In a zero-sum game: one agent wins, and the other agent loses. It’s a mathematical representation of a situation in which each participant’s gain or loss of utility **is exactly balanced by the gain or loss of the utility of the other participants.** We talk about zero-sum games because the sum of utility is equal to zero. This ELO (starting at a specific score: frequently 1200) can decrease initially but should increase progressively during the training. The Elo system is **inferred from the losses and draws against other players.** It means that player ratings depend **on the ratings of their opponents and the results scored against them.** Elo defines an Elo score that is the relative skills of a player in a zero-sum game. **We say relative because it depends on the performance of opponents.** The central idea is to think of the performance of a player **as a random variable that is normally distributed.** The difference in rating between 2 players serves as **the predictor of the outcomes of a match.** If the player wins, but the probability of winning is high, it will only win a few points from its opponent since it means that it is much stronger than it. After every game: - The winning player takes **points from the losing one.** - The number of points is determined **by the difference in the 2 players ratings (hence relative).** - If the higher-rated player wins → few points will be taken from the lower-rated player. - If the lower-rated player wins → a lot of points will be taken from the high-rated player. - If it’s a draw → the lower-rated player gains a few points from the higher. So if A and B have rating Ra, and Rb, then the **expected scores are** given by: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/elo1.png" alt="ELO Score"/> Then, at the end of the game, we need to update the player’s actual Elo score. We use a linear adjustment **proportional to the amount by which the player over-performed or under-performed.** We also define a maximum adjustment rating per game: K-factor. - K=16 for master. - K=32 for weaker players. If Player A has Ea points but scored Sa points, then the player’s rating is updated using the formula: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/elo2.png" alt="ELO Score"/> ### Example If we take an example: Player A has a rating of 2600 Player B has a rating of 2300 - We first calculate the expected score: \\(E_{A} = \frac{1}{1+10^{(2300-2600)/400}} = 0.849 \\) \\(E_{B} = \frac{1}{1+10^{(2600-2300)/400}} = 0.151 \\) - If the organizers determined that K=16 and A wins, the new rating would be: \\(ELO_A = 2600 + 16*(1-0.849) = 2602 \\) \\(ELO_B = 2300 + 16*(0-0.151) = 2298 \\) - If the organizers determined that K=16 and B wins, the new rating would be: \\(ELO_A = 2600 + 16*(0-0.849) = 2586 \\) \\(ELO_B = 2300 + 16 *(1-0.151) = 2314 \\) ### The Advantages Using the ELO score has multiple advantages: - Points are **always balanced** (more points are exchanged when there is an unexpected outcome, but the sum is always the same). - It is a **self-corrected system** since if a player wins against a weak player, they will only win a few points. - It **works with team games**: we calculate the average for each team and use it in Elo. ### The Disadvantages - ELO **does not take into account the individual contribution** of each people in the team. - Rating deflation: **a good rating requires skill over time to keep the same rating**. - **Can’t compare rating in history**.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit7/introduction.mdx
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/thumbnail.png" alt="Thumbnail"/> Since the beginning of this course, we learned to train agents in a *single-agent system* where our agent was alone in its environment: it was **not cooperating or collaborating with other agents**. This worked great, and the single-agent system is useful for many applications. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/patchwork.jpg" alt="Patchwork"/> <figcaption> A patchwork of all the environments you’ve trained your agents on since the beginning of the course </figcaption> </figure> But, as humans, **we live in a multi-agent world**. Our intelligence comes from interaction with other agents. And so, our **goal is to create agents that can interact with other humans and other agents**. Consequently, we must study how to train deep reinforcement learning agents in a *multi-agents system* to build robust agents that can adapt, collaborate, or compete. So today we’re going to **learn the basics of the fascinating topic of multi-agents reinforcement learning (MARL)**. And the most exciting part is that, during this unit, you’re going to train your first agents in a multi-agents system: **a 2vs2 soccer team that needs to beat the opponent team**. And you’re going to participate in **AI vs. AI challenge** where your trained agent will compete against other classmates’ agents every day and be ranked on a [new leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> So let’s get started!
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit7/glossary.mdx
# Glossary This is a community-created glossary. Contributions are welcomed! - **Multi-Agent Reinforcement Learning (MARL):** A subfield of reinforcement learning that deals with scenarios where multiple agents interact with each other and a shared environment. In MARL, the goal is to learn effective policies for each agent, considering the dynamic interactions and interdependencies among them. - **Cooperative Agents:** Agents that work together to maximize a common benefit. - **Competitive Agents:** Agents that compete against each other to maximize their benefits by minimizing the opponents. - **Mixed Agents:** Agents that exhibit both cooperative and competitive behaviors, where some agents need to cooperate to beat an opponent (or a group of opponents). - **Decentralized Learning:** An approach in MARL where each agent is trained independently without considering the actions or policies of other agents. The big drawback of this technique is that it will make the environment non-stationary and prevent reaching a global optimum. - **Centralized Learning:** A learning architecture in which a high-level process, **experience buffer**, collects experiences from multiple agents to learn a (single) common policy and achieve a global reward. - **Non-Stationarity:** The condition in which the underlying Markov decision process in the environment changes over time due to the interactions and decisions made by other agents. It makes it difficult for algorithms to converge to a globally optimal solution since the environment is in a constant state of change. - **Self-Play:** A training technique where an agent plays against previous versions of its own policy to create a challenging, yet progressively improving, environment. - **Zero-Sum Game:** A type of game where the total reward is constant, meaning one player's gain is equivalent to another player's loss. - **ELO Score:** A rating system, named after Arpad Elo, commonly used in adversarial games to track relative skill levels between players. It determines numerical ratings of players based on their match outcomes against opponents. If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls) This glossary was made possible thanks to: - [Diego Carpintero](https://github.com/dcarpintero)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit7/introduction-to-marl.mdx
# An introduction to Multi-Agents Reinforcement Learning (MARL) ## From single agent to multiple agents In the first unit, we learned to train agents in a single-agent system. When our agent was alone in its environment: **it was not cooperating or collaborating with other agents**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/patchwork.jpg" alt="Patchwork"/> <figcaption> A patchwork of all the environments you've trained your agents on since the beginning of the course </figcaption> </figure> When we do multi-agents reinforcement learning (MARL), we are in a situation where we have multiple agents **that share and interact in a common environment**. For instance, you can think of a warehouse where **multiple robots need to navigate to load and unload packages**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/warehouse.jpg" alt="Warehouse"/> <figcaption> [Image by upklyak](https://www.freepik.com/free-vector/robots-warehouse-interior-automated-machines_32117680.htm#query=warehouse robot&position=17&from_view=keyword) on Freepik </figcaption> </figure> Or a road with **several autonomous vehicles**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/selfdrivingcar.jpg" alt="Self driving cars"/> <figcaption> [Image by jcomp](https://www.freepik.com/free-vector/autonomous-smart-car-automatic-wireless-sensor-driving-road-around-car-autonomous-smart-car-goes-scans-roads-observe-distance-automatic-braking-system_26413332.htm#query=self driving cars highway&position=34&from_view=search&track=ais) on Freepik </figcaption> </figure> In these examples, we have **multiple agents interacting in the environment and with the other agents**. This implies defining a multi-agents system. But first, let's understand the different types of multi-agent environments. ## Different types of multi-agent environments Given that, in a multi-agent system, agents interact with other agents, we can have different types of environments: - *Cooperative environments*: where your agents need **to maximize the common benefits**. For instance, in a warehouse, **robots must collaborate to load and unload the packages efficiently (as fast as possible)**. - *Competitive/Adversarial environments*: in this case, your agent **wants to maximize its benefits by minimizing the opponent's**. For example, in a game of tennis, **each agent wants to beat the other agent**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/tennis.png" alt="Tennis"/> - *Mixed of both adversarial and cooperative*: like in our SoccerTwos environment, two agents are part of a team (blue or purple): they need to cooperate with each other and beat the opponent team. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> So now we might wonder: how can we design these multi-agent systems? Said differently, **how can we train agents in a multi-agent setting** ?
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit7/multi-agent-setting.mdx
# Designing Multi-Agents systems For this section, you're going to watch this excellent introduction to multi-agents made by <a href="https://www.youtube.com/channel/UCq0imsn84ShAe9PBOFnoIrg"> Brian Douglas </a>. <Youtube id="qgb0gyrpiGk" /> In this video, Brian talked about how to design multi-agent systems. He specifically took a multi-agents system of vacuum cleaners and asked: **how can can cooperate with each other**? We have two solutions to design this multi-agent reinforcement learning system (MARL). ## Decentralized system <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/decentralized.png" alt="Decentralized"/> <figcaption> Source: <a href="https://www.youtube.com/watch?v=qgb0gyrpiGk"> Introduction to Multi-Agent Reinforcement Learning </a> </figcaption> </figure> In decentralized learning, **each agent is trained independently from the others**. In the example given, each vacuum learns to clean as many places as it can **without caring about what other vacuums (agents) are doing**. The benefit is that **since no information is shared between agents, these vacuums can be designed and trained like we train single agents**. The idea here is that **our training agent will consider other agents as part of the environment dynamics**. Not as agents. However, the big drawback of this technique is that it will **make the environment non-stationary** since the underlying Markov decision process changes over time as other agents are also interacting in the environment. And this is problematic for many Reinforcement Learning algorithms **that can't reach a global optimum with a non-stationary environment**. ## Centralized approach <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/centralized.png" alt="Centralized"/> <figcaption> Source: <a href="https://www.youtube.com/watch?v=qgb0gyrpiGk"> Introduction to Multi-Agent Reinforcement Learning </a> </figcaption> </figure> In this architecture, **we have a high-level process that collects agents' experiences**: the experience buffer. And we'll use these experiences **to learn a common policy**. For instance, in the vacuum cleaner example, the observation will be: - The coverage map of the vacuums. - The position of all the vacuums. We use that collective experience **to train a policy that will move all three robots in the most beneficial way as a whole**. So each robot is learning from their common experience. We now have a stationary environment since all the agents are treated as a larger entity, and they know the change of other agents' policies (since it's the same as theirs). If we recap: - In a *decentralized approach*, we **treat all agents independently without considering the existence of the other agents.** - In this case, all agents **consider others agents as part of the environment**. - **It’s a non-stationarity environment condition**, so has no guarantee of convergence. - In a *centralized approach*: - A **single policy is learned from all the agents**. - Takes as input the present state of an environment and the policy outputs joint actions. - The reward is global.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit7/conclusion.mdx
# Conclusion That’s all for today. Congrats on finishing this unit and the tutorial! The best way to learn is to practice and try stuff. **Why not train another agent with a different configuration?** And don’t hesitate from time to time to check the [leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos) See you in Unit 8 🔥 ## Keep Learning, Stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit7/additional-readings.mdx
# Additional Readings [[additional-readings]] ## An introduction to multi-agents - [Multi-agent reinforcement learning: An overview](https://www.dcsc.tudelft.nl/~bdeschutter/pub/rep/10_003.pdf) - [Multiagent Reinforcement Learning, Marc Lanctot](https://rlss.inria.fr/files/2019/07/RLSS_Multiagent.pdf) - [Example of a multi-agent environment](https://www.mathworks.com/help/reinforcement-learning/ug/train-3-agents-for-area-coverage.html?s_eid=PSM_15028) - [A list of different multi-agent environments](https://agents.inf.ed.ac.uk/blog/multiagent-learning-environments/) - [Multi-Agent Reinforcement Learning: Independent vs. Cooperative Agents](https://bit.ly/3nVK7My) - [Dealing with Non-Stationarity in Multi-Agent Deep Reinforcement Learning](https://bit.ly/3v7LxaT) ## Self-Play and MA-POCA - [Self Play Theory and with MLAgents](https://blog.unity.com/technology/training-intelligent-adversaries-using-self-play-with-ml-agents) - [Training complex behavior with MLAgents](https://blog.unity.com/technology/ml-agents-v20-release-now-supports-training-complex-cooperative-behaviors) - [MLAgents plays dodgeball](https://blog.unity.com/technology/ml-agents-plays-dodgeball) - [On the Use and Misuse of Absorbing States in Multi-agent Reinforcement Learning (MA-POCA)](https://arxiv.org/pdf/2111.05992.pdf)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/hands-on.mdx
# Hands on <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit4/unit4.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> Now that we've studied the theory behind Reinforce, **you’re ready to code your Reinforce agent with PyTorch**. And you'll test its robustness using CartPole-v1 and PixelCopter,. You'll then be able to iterate and improve this implementation for more advanced environments. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/> </figure> To validate this hands-on for the certification process, you need to push your trained models to the Hub and: - Get a result of >= 350 for `Cartpole-v1` - Get a result of >= 5 for `PixelCopter`. To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**. **If you don't see your model on the leaderboard, go at the bottom of the leaderboard page and click on the refresh button**. **If you don't find your model, go to the bottom of the page and click on the refresh button.** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process And you can check your progress here 👉 https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course **To start the hands-on click on Open In Colab button** 👇 : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit4/unit4.ipynb) We strongly **recommend students use Google Colab for the hands-on exercises** instead of running them on their personal computers. By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects** of setting up your environments. # Unit 4: Code your first Deep Reinforcement Learning Algorithm with PyTorch: Reinforce. And test its robustness 💪 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/thumbnail.png" alt="thumbnail"/> In this notebook, you'll code your first Deep Reinforcement Learning algorithm from scratch: Reinforce (also called Monte Carlo Policy Gradient). Reinforce is a *Policy-based method*: a Deep Reinforcement Learning algorithm that tries **to optimize the policy directly without using an action-value function**. More precisely, Reinforce is a *Policy-gradient method*, a subclass of *Policy-based methods* that aims **to optimize the policy directly by estimating the weights of the optimal policy using gradient ascent**. To test its robustness, we're going to train it in 2 different simple environments: - Cartpole-v1 - PixelcopterEnv ⬇️ Here is an example of what **you will achieve at the end of this notebook.** ⬇️ <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/> ### 🎮 Environments: - [CartPole-v1](https://www.gymlibrary.dev/environments/classic_control/cart_pole/) - [PixelCopter](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html) ### 📚 RL-Library: - Python - PyTorch We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook 🏆 At the end of the notebook, you will: - Be able to **code a Reinforce algorithm from scratch using PyTorch.** - Be able to **test the robustness of your agent using simple environments.** - Be able to **push your trained agent to the Hub** with a nice video replay and an evaluation score 🔥. ## Prerequisites 🏗️ Before diving into the notebook, you need to: 🔲 📚 [Study Policy Gradients by reading Unit 4](https://huggingface.co/deep-rl-course/unit4/introduction) # Let's code Reinforce algorithm from scratch 🔥 ## Some advice 💡 It's better to run this colab in a copy on your Google Drive, so that **if it times out** you still have the saved notebook on your Google Drive and do not need to fill everything in from scratch. To do that you can either do `Ctrl + S` or `File > Save a copy in Google Drive.` ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> ## Create a virtual display 🖥 During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). The following cell will install the librairies and create and run a virtual screen 🖥 ```python %%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip install pyvirtualdisplay !pip install pyglet==1.5.1 ``` ```python # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() ``` ## Install the dependencies 🔽 The first step is to install the dependencies. We’ll install multiple ones: - `gym` - `gym-games`: Extra gym environments made with PyGame. - `huggingface_hub`: The Hub works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations, and other features that will allow you to easily collaborate with others. You may be wondering why we install gym and not gymnasium, a more recent version of gym? **Because the gym-games we are using are not updated yet with gymnasium**. The differences you'll encounter here: - In `gym` we don't have `terminated` and `truncated` but only `done`. - In `gym` using `env.step()` returns `state, reward, done, info` You can learn more about the differences between Gym and Gymnasium here 👉 https://gymnasium.farama.org/content/migration-guide/ You can see here all the Reinforce models available 👉 https://huggingface.co/models?other=reinforce And you can find all the Deep Reinforcement Learning models here 👉 https://huggingface.co/models?pipeline_tag=reinforcement-learning ```bash !pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit4/requirements-unit4.txt ``` ## Import the packages 📦 In addition to importing the installed libraries, we also import: - `imageio`: A library that will help us to generate a replay video ```python import numpy as np from collections import deque import matplotlib.pyplot as plt %matplotlib inline # PyTorch import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical # Gym import gym import gym_pygame # Hugging Face Hub from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. import imageio ``` ## Check if we have a GPU - Let's check if we have a GPU - If it's the case you should see `device:cuda0` ```python device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ``` ```python print(device) ``` We're now ready to implement our Reinforce algorithm 🔥 # First agent: Playing CartPole-v1 🤖 ## Create the CartPole environment and understand how it works ### [The environment 🎮](https://www.gymlibrary.dev/environments/classic_control/cart_pole/) ### Why do we use a simple environment like CartPole-v1? As explained in [Reinforcement Learning Tips and Tricks](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html), when you implement your agent from scratch, you need **to be sure that it works correctly and find bugs with easy environments before going deeper** as finding bugs will be much easier in simple environments. > Try to have some “sign of life” on toy problems > Validate the implementation by making it run on harder and harder envs (you can compare results against the RL zoo). You usually need to run hyperparameter optimization for that step. ### The CartPole-v1 environment > A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces in the left and right direction on the cart. So, we start with CartPole-v1. The goal is to push the cart left or right **so that the pole stays in the equilibrium.** The episode ends if: - The pole Angle is greater than ±12° - The Cart Position is greater than ±2.4 - The episode length is greater than 500 We get a reward 💰 of +1 every timestep that the Pole stays in the equilibrium. ```python env_id = "CartPole-v1" # Create the env env = gym.make(env_id) # Create the evaluation env eval_env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape[0] a_size = env.action_space.n ``` ```python print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` ```python print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` ## Let's build the Reinforce Architecture This implementation is based on three implementations: - [PyTorch official Reinforcement Learning example](https://github.com/pytorch/examples/blob/main/reinforcement_learning/reinforce.py) - [Udacity Reinforce](https://github.com/udacity/deep-reinforcement-learning/blob/master/reinforce/REINFORCE.ipynb) - [Improvement of the integration by Chris1nexus](https://github.com/huggingface/deep-rl-class/pull/95) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/reinforce.png" alt="Reinforce"/> So we want: - Two fully connected layers (fc1 and fc2). - To use ReLU as activation function of fc1 - To use Softmax to output a probability distribution over actions ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() # Create two fully connected layers def forward(self, x): # Define the forward pass # state goes to fc1 then we apply ReLU activation function # fc1 outputs goes to fc2 # We output the softmax def act(self, state): """ Given a state, take action """ state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = np.argmax(m) return action.item(), m.log_prob(action) ``` ### Solution ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = np.argmax(m) return action.item(), m.log_prob(action) ``` I made a mistake, can you guess where? - To find out let's make a forward pass: ```python debug_policy = Policy(s_size, a_size, 64).to(device) debug_policy.act(env.reset()) ``` - Here we see that the error says `ValueError: The value argument to log_prob must be a Tensor` - It means that `action` in `m.log_prob(action)` must be a Tensor **but it's not.** - Do you know why? Check the act function and try to see why it does not work. Advice 💡: Something is wrong in this implementation. Remember that for the act function **we want to sample an action from the probability distribution over actions**. ### (Real) Solution ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action) ``` By using CartPole, it was easier to debug since **we know that the bug comes from our integration and not from our simple environment**. - Since **we want to sample an action from the probability distribution over actions**, we can't use `action = np.argmax(m)` since it will always output the action that has the highest probability. - We need to replace this with `action = m.sample()` which will sample an action from the probability distribution P(.|s) ### Let's build the Reinforce Training Algorithm This is the Reinforce algorithm pseudocode: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/pg_pseudocode.png" alt="Policy gradient pseudocode"/> - When we calculate the return Gt (line 6), we see that we calculate the sum of discounted rewards **starting at timestep t**. - Why? Because our policy should only **reinforce actions on the basis of the consequences**: so rewards obtained before taking an action are useless (since they were not because of the action), **only the ones that come after the action matters**. - Before coding this you should read this section [don't let the past distract you](https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html#don-t-let-the-past-distract-you) that explains why we use reward-to-go policy gradient. We use an interesting technique coded by [Chris1nexus](https://github.com/Chris1nexus) to **compute the return at each timestep efficiently**. The comments explained the procedure. Don't hesitate also [to check the PR explanation](https://github.com/huggingface/deep-rl-class/pull/95) But overall the idea is to **compute the return at each timestep efficiently**. The second question you may ask is **why do we minimize the loss**? Didn't we talk about Gradient Ascent, not Gradient Descent earlier? - We want to maximize our utility function $J(\theta)$, but in PyTorch and TensorFlow, it's better to **minimize an objective function.** - So let's say we want to reinforce action 3 at a certain timestep. Before training this action P is 0.25. - So we want to modify \\(theta \\) such that \\(\pi_\theta(a_3|s; \theta) > 0.25 \\) - Because all P must sum to 1, max \\(pi_\theta(a_3|s; \theta)\\) will **minimize other action probability.** - So we should tell PyTorch **to min \\(1 - \pi_\theta(a_3|s; \theta)\\).** - This loss function approaches 0 as \\(\pi_\theta(a_3|s; \theta)\\) nears 1. - So we are encouraging the gradient to max \\(\pi_\theta(a_3|s; \theta)\\) ```python def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every): # Help us to calculate the score during the training scores_deque = deque(maxlen=100) scores = [] # Line 3 of pseudocode for i_episode in range(1, n_training_episodes+1): saved_log_probs = [] rewards = [] state = # TODO: reset the environment # Line 4 of pseudocode for t in range(max_t): action, log_prob = # TODO get the action saved_log_probs.append(log_prob) state, reward, done, _ = # TODO: take an env step rewards.append(reward) if done: break scores_deque.append(sum(rewards)) scores.append(sum(rewards)) # Line 6 of pseudocode: calculate the return returns = deque(maxlen=max_t) n_steps = len(rewards) # Compute the discounted returns at each timestep, # as the sum of the gamma-discounted return at time t (G_t) + the reward at time t # In O(N) time, where N is the number of time steps # (this definition of the discounted return G_t follows the definition of this quantity # shown at page 44 of Sutton&Barto 2017 2nd draft) # G_t = r_(t+1) + r_(t+2) + ... # Given this formulation, the returns at each timestep t can be computed # by re-using the computed future returns G_(t+1) to compute the current return G_t # G_t = r_(t+1) + gamma*G_(t+1) # G_(t-1) = r_t + gamma* G_t # (this follows a dynamic programming approach, with which we memorize solutions in order # to avoid computing them multiple times) # This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft) # G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ... ## Given the above, we calculate the returns at timestep t as: # gamma[t] * return[t] + reward[t] # ## We compute this starting from the last timestep to the first, in order ## to employ the formula presented above and avoid redundant computations that would be needed ## if we were to do it from first to last. ## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps ## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1) ## a normal python list would instead require O(N) to do this. for t in range(n_steps)[::-1]: disc_return_t = (returns[0] if len(returns)>0 else 0) returns.appendleft( ) # TODO: complete here ## standardization of the returns is employed to make training more stable eps = np.finfo(np.float32).eps.item() ## eps is the smallest representable float, which is # added to the standard deviation of the returns to avoid numerical instabilities returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) # Line 7: policy_loss = [] for log_prob, disc_return in zip(saved_log_probs, returns): policy_loss.append(-log_prob * disc_return) policy_loss = torch.cat(policy_loss).sum() # Line 8: PyTorch prefers gradient descent optimizer.zero_grad() policy_loss.backward() optimizer.step() if i_episode % print_every == 0: print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) return scores ``` #### Solution ```python def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every): # Help us to calculate the score during the training scores_deque = deque(maxlen=100) scores = [] # Line 3 of pseudocode for i_episode in range(1, n_training_episodes + 1): saved_log_probs = [] rewards = [] state = env.reset() # Line 4 of pseudocode for t in range(max_t): action, log_prob = policy.act(state) saved_log_probs.append(log_prob) state, reward, done, _ = env.step(action) rewards.append(reward) if done: break scores_deque.append(sum(rewards)) scores.append(sum(rewards)) # Line 6 of pseudocode: calculate the return returns = deque(maxlen=max_t) n_steps = len(rewards) # Compute the discounted returns at each timestep, # as # the sum of the gamma-discounted return at time t (G_t) + the reward at time t # # In O(N) time, where N is the number of time steps # (this definition of the discounted return G_t follows the definition of this quantity # shown at page 44 of Sutton&Barto 2017 2nd draft) # G_t = r_(t+1) + r_(t+2) + ... # Given this formulation, the returns at each timestep t can be computed # by re-using the computed future returns G_(t+1) to compute the current return G_t # G_t = r_(t+1) + gamma*G_(t+1) # G_(t-1) = r_t + gamma* G_t # (this follows a dynamic programming approach, with which we memorize solutions in order # to avoid computing them multiple times) # This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft) # G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ... ## Given the above, we calculate the returns at timestep t as: # gamma[t] * return[t] + reward[t] # ## We compute this starting from the last timestep to the first, in order ## to employ the formula presented above and avoid redundant computations that would be needed ## if we were to do it from first to last. ## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps ## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1) ## a normal python list would instead require O(N) to do this. for t in range(n_steps)[::-1]: disc_return_t = returns[0] if len(returns) > 0 else 0 returns.appendleft(gamma * disc_return_t + rewards[t]) ## standardization of the returns is employed to make training more stable eps = np.finfo(np.float32).eps.item() ## eps is the smallest representable float, which is # added to the standard deviation of the returns to avoid numerical instabilities returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) # Line 7: policy_loss = [] for log_prob, disc_return in zip(saved_log_probs, returns): policy_loss.append(-log_prob * disc_return) policy_loss = torch.cat(policy_loss).sum() # Line 8: PyTorch prefers gradient descent optimizer.zero_grad() policy_loss.backward() optimizer.step() if i_episode % print_every == 0: print("Episode {}\tAverage Score: {:.2f}".format(i_episode, np.mean(scores_deque))) return scores ``` ## Train it - We're now ready to train our agent. - But first, we define a variable containing all the training hyperparameters. - You can change the training parameters (and should 😉) ```python cartpole_hyperparameters = { "h_size": 16, "n_training_episodes": 1000, "n_evaluation_episodes": 10, "max_t": 1000, "gamma": 1.0, "lr": 1e-2, "env_id": env_id, "state_space": s_size, "action_space": a_size, } ``` ```python # Create policy and place it to the device cartpole_policy = Policy( cartpole_hyperparameters["state_space"], cartpole_hyperparameters["action_space"], cartpole_hyperparameters["h_size"], ).to(device) cartpole_optimizer = optim.Adam(cartpole_policy.parameters(), lr=cartpole_hyperparameters["lr"]) ``` ```python scores = reinforce( cartpole_policy, cartpole_optimizer, cartpole_hyperparameters["n_training_episodes"], cartpole_hyperparameters["max_t"], cartpole_hyperparameters["gamma"], 100, ) ``` ## Define evaluation method 📝 - Here we define the evaluation method that we're going to use to test our Reinforce agent. ```python def evaluate_agent(env, max_steps, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The Reinforce agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 for step in range(max_steps): action, _ = policy.act(state) new_state, reward, done, info = env.step(action) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward ``` ## Evaluate our agent 📈 ```python evaluate_agent( eval_env, cartpole_hyperparameters["max_t"], cartpole_hyperparameters["n_evaluation_episodes"], cartpole_policy ) ``` ### Publish our trained model on the Hub 🔥 Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code. Here's an example of a Model Card: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/modelcard.png"/> ### Push to the Hub #### Do not modify this code ```python from huggingface_hub import HfApi, snapshot_download from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import json import imageio import tempfile import os ``` ```python def record_video(env, policy, out_directory, fps=30): """ Generate a replay video of the agent :param env :param Qtable: Qtable of our agent :param out_directory :param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1) """ images = [] done = False state = env.reset() img = env.render(mode="rgb_array") images.append(img) while not done: # Take the action (index) that have the maximum expected future reward given that state action, _ = policy.act(state) state, reward, done, info = env.step(action) # We directly put next_state = state for recording logic img = env.render(mode="rgb_array") images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) ``` ```python def push_to_hub(repo_id, model, hyperparameters, eval_env, video_fps=30 ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the Hub :param repo_id: repo_id: id of the model repository from the Hugging Face Hub :param model: the pytorch model we want to save :param hyperparameters: training hyperparameters :param eval_env: evaluation environment :param video_fps: how many frame per seconds to record our video replay """ _, repo_name = repo_id.split("/") api = HfApi() # Step 1: Create the repo repo_url = api.create_repo( repo_id=repo_id, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: local_directory = Path(tmpdirname) # Step 2: Save the model torch.save(model, local_directory / "model.pt") # Step 3: Save the hyperparameters to JSON with open(local_directory / "hyperparameters.json", "w") as outfile: json.dump(hyperparameters, outfile) # Step 4: Evaluate the model and build JSON mean_reward, std_reward = evaluate_agent(eval_env, hyperparameters["max_t"], hyperparameters["n_evaluation_episodes"], model) # Get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters["env_id"], "mean_reward": mean_reward, "n_evaluation_episodes": hyperparameters["n_evaluation_episodes"], "eval_datetime": eval_form_datetime, } # Write a JSON file with open(local_directory / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 5: Create the model card env_name = hyperparameters["env_id"] metadata = {} metadata["tags"] = [ env_name, "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class" ] # Add metrics eval = metadata_eval_result( model_pretty_name=repo_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_name, dataset_id=env_name, ) # Merges both dictionaries metadata = {**metadata, **eval} model_card = f""" # **Reinforce** Agent playing **{env_id}** This is a trained model of a **Reinforce** agent playing **{env_id}** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction """ readme_path = local_directory / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) # Step 6: Record a video video_path = local_directory / "replay.mp4" record_video(env, model, video_path, video_fps) # Step 7. Push everything to the Hub api.upload_folder( repo_id=repo_id, folder_path=local_directory, path_in_repo=".", ) print(f"Your model is pushed to the Hub. You can view your model here: {repo_url}") ``` By using `push_to_hub`, **you evaluate, record a replay, generate a model card of your agent, and push it to the Hub**. This way: - You can **showcase our work** 🔥 - You can **visualize your agent playing** 👀 - You can **share an agent with the community that others can use** 💾 - You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> ```python notebook_login() ``` If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`) 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function ```python repo_id = "" # TODO Define your repo id {username/Reinforce-{model-id}} push_to_hub( repo_id, cartpole_policy, # The model we want to save cartpole_hyperparameters, # Hyperparameters eval_env, # Evaluation environment video_fps=30 ) ``` Now that we tested the robustness of our implementation, let's try a more complex environment: PixelCopter 🚁 ## Second agent: PixelCopter 🚁 ### Study the PixelCopter environment 👀 - [The Environment documentation](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html) ```python env_id = "Pixelcopter-PLE-v0" env = gym.make(env_id) eval_env = gym.make(env_id) s_size = env.observation_space.shape[0] a_size = env.action_space.n ``` ```python print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` ```python print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` The observation space (7) 👀: - player y position - player velocity - player distance to floor - player distance to ceiling - next block x distance to player - next blocks top y location - next blocks bottom y location The action space(2) 🎮: - Up (press accelerator) - Do nothing (don't press accelerator) The reward function 💰: - For each vertical block it passes, it gains a positive reward of +1. Each time a terminal state is reached it receives a negative reward of -1. ### Define the new Policy 🧠 - We need to have a deeper neural network since the environment is more complex ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() # Define the three layers here def forward(self, x): # Define the forward process here return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action) ``` #### Solution ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, h_size * 2) self.fc3 = nn.Linear(h_size * 2, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action) ``` ### Define the hyperparameters ⚙️ - Because this environment is more complex. - Especially for the hidden size, we need more neurons. ```python pixelcopter_hyperparameters = { "h_size": 64, "n_training_episodes": 50000, "n_evaluation_episodes": 10, "max_t": 10000, "gamma": 0.99, "lr": 1e-4, "env_id": env_id, "state_space": s_size, "action_space": a_size, } ``` ### Train it - We're now ready to train our agent 🔥. ```python # Create policy and place it to the device # torch.manual_seed(50) pixelcopter_policy = Policy( pixelcopter_hyperparameters["state_space"], pixelcopter_hyperparameters["action_space"], pixelcopter_hyperparameters["h_size"], ).to(device) pixelcopter_optimizer = optim.Adam(pixelcopter_policy.parameters(), lr=pixelcopter_hyperparameters["lr"]) ``` ```python scores = reinforce( pixelcopter_policy, pixelcopter_optimizer, pixelcopter_hyperparameters["n_training_episodes"], pixelcopter_hyperparameters["max_t"], pixelcopter_hyperparameters["gamma"], 1000, ) ``` ### Publish our trained model on the Hub 🔥 ```python repo_id = "" # TODO Define your repo id {username/Reinforce-{model-id}} push_to_hub( repo_id, pixelcopter_policy, # The model we want to save pixelcopter_hyperparameters, # Hyperparameters eval_env, # Evaluation environment video_fps=30 ) ``` ## Some additional challenges 🏆 The best way to learn **is to try things on your own**! As you saw, the current agent is not doing great. As a first suggestion, you can train for more steps. But also try to find better parameters. In the [Leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) you will find your agents. Can you get to the top? Here are some ideas to climb up the leaderboard: * Train more steps * Try different hyperparameters by looking at what your classmates have done 👉 https://huggingface.co/models?other=reinforce * **Push your new trained model** on the Hub 🔥 * **Improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle frames as observation)? ________________________________________________________________________ **Congrats on finishing this unit**! There was a lot of information. And congrats on finishing the tutorial. You've just coded your first Deep Reinforcement Learning agent from scratch using PyTorch and shared it on the Hub 🥳. Don't hesitate to iterate on this unit **by improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle frames as observation)? In the next unit, **we're going to learn more about Unity MLAgents**, by training agents in Unity environments. This way, you will be ready to participate in the **AI vs AI challenges where you'll train your agents to compete against other agents in a snowball fight and a soccer game.** Sound fun? See you next time! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) See you in Unit 5! 🔥 ### Keep Learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/advantages-disadvantages.mdx
# The advantages and disadvantages of policy-gradient methods At this point, you might ask, "but Deep Q-Learning is excellent! Why use policy-gradient methods?". To answer this question, let's study the **advantages and disadvantages of policy-gradient methods**. ## Advantages There are multiple advantages over value-based methods. Let's see some of them: ### The simplicity of integration We can estimate the policy directly without storing additional data (action values). ### Policy-gradient methods can learn a stochastic policy Policy-gradient methods can **learn a stochastic policy while value functions can't**. This has two consequences: 1. We **don't need to implement an exploration/exploitation trade-off by hand**. Since we output a probability distribution over actions, the agent explores **the state space without always taking the same trajectory.** 2. We also get rid of the problem of **perceptual aliasing**. Perceptual aliasing is when two states seem (or are) the same but need different actions. Let's take an example: we have an intelligent vacuum cleaner whose goal is to suck the dust and avoid killing the hamsters. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster1.jpg" alt="Hamster 1"/> </figure> Our vacuum cleaner can only perceive where the walls are. The problem is that the **two red (colored) states are aliased states because the agent perceives an upper and lower wall for each**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster2.jpg" alt="Hamster 1"/> </figure> Under a deterministic policy, the policy will either always move right when in a red state or always move left. **Either case will cause our agent to get stuck and never suck the dust**. Under a value-based Reinforcement learning algorithm, we learn a **quasi-deterministic policy** ("greedy epsilon strategy"). Consequently, our agent can **spend a lot of time before finding the dust**. On the other hand, an optimal stochastic policy **will randomly move left or right in red (colored) states**. Consequently, **it will not be stuck and will reach the goal state with a high probability**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster3.jpg" alt="Hamster 1"/> </figure> ### Policy-gradient methods are more effective in high-dimensional action spaces and continuous actions spaces The problem with Deep Q-learning is that their **predictions assign a score (maximum expected future reward) for each possible action**, at each time step, given the current state. But what if we have an infinite possibility of actions? For instance, with a self-driving car, at each state, you can have a (near) infinite choice of actions (turning the wheel at 15°, 17.2°, 19,4°, honking, etc.). **We'll need to output a Q-value for each possible action**! And **taking the max action of a continuous output is an optimization problem itself**! Instead, with policy-gradient methods, we output a **probability distribution over actions.** ### Policy-gradient methods have better convergence properties In value-based methods, we use an aggressive operator to **change the value function: we take the maximum over Q-estimates**. Consequently, the action probabilities may change dramatically for an arbitrarily small change in the estimated action values if that change results in a different action having the maximal value. For instance, if during the training, the best action was left (with a Q-value of 0.22) and the training step after it's right (since the right Q-value becomes 0.23), we dramatically changed the policy since now the policy will take most of the time right instead of left. On the other hand, in policy-gradient methods, stochastic policy action preferences (probability of taking action) **change smoothly over time**. ## Disadvantages Naturally, policy-gradient methods also have some disadvantages: - **Frequently, policy-gradient methods converges to a local maximum instead of a global optimum.** - Policy-gradient goes slower, **step by step: it can take longer to train (inefficient).** - Policy-gradient can have high variance. We'll see in the actor-critic unit why, and how we can solve this problem. 👉 If you want to go deeper into the advantages and disadvantages of policy-gradient methods, [you can check this video](https://youtu.be/y3oqOjHilio).
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/quiz.mdx
# Quiz The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: What are the advantages of policy-gradient over value-based methods? (Check all that apply) <Question choices={[ { text: "Policy-gradient methods can learn a stochastic policy", explain: "", correct: true, }, { text: "Policy-gradient methods are more effective in high-dimensional action spaces and continuous actions spaces", explain: "", correct: true, }, { text: "Policy-gradient converges most of the time on a global maximum.", explain: "No, frequently, policy-gradient converges on a local maximum instead of a global optimum.", }, ]} /> ### Q2: What is the Policy Gradient Theorem? <details> <summary>Solution</summary> *The Policy Gradient Theorem* is a formula that will help us to reformulate the objective function into a differentiable function that does not involve the differentiation of the state distribution. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/policy_gradient_theorem.png" alt="Policy Gradient"/> </details> ### Q3: What's the difference between policy-based methods and policy-gradient methods? (Check all that apply) <Question choices={[ { text: "Policy-based methods are a subset of policy-gradient methods.", explain: "", }, { text: "Policy-gradient methods are a subset of policy-based methods.", explain: "", correct: true, }, { text: "In Policy-based methods, we can optimize the parameter θ **indirectly** by maximizing the local approximation of the objective function with techniques like hill climbing, simulated annealing, or evolution strategies.", explain: "", correct: true, }, { text: "In Policy-gradient methods, we optimize the parameter θ **directly** by performing the gradient ascent on the performance of the objective function.", explain: "", correct: true, }, ]} /> ### Q4: Why do we use gradient ascent instead of gradient descent to optimize J(θ)? <Question choices={[ { text: "We want to minimize J(θ) and gradient ascent gives us the gives the direction of the steepest increase of J(θ)", explain: "", }, { text: "We want to maximize J(θ) and gradient ascent gives us the gives the direction of the steepest increase of J(θ)", explain: "", correct: true }, ]} /> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read the chapter again to reinforce (😏) your knowledge.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/introduction.mdx
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/thumbnail.png" alt="thumbnail"/> In the last unit, we learned about Deep Q-Learning. In this value-based deep reinforcement learning algorithm, we **used a deep neural network to approximate the different Q-values for each possible action at a state.** Since the beginning of the course, we have only studied value-based methods, **where we estimate a value function as an intermediate step towards finding an optimal policy.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy" /> In value-based methods, the policy ** \(π\) only exists because of the action value estimates since the policy is just a function** (for instance, greedy-policy) that will select the action with the highest value given a state. With policy-based methods, we want to optimize the policy directly **without having an intermediate step of learning a value function.** So today, **we'll learn about policy-based methods and study a subset of these methods called policy gradient**. Then we'll implement our first policy gradient algorithm called Monte Carlo **Reinforce** from scratch using PyTorch. Then, we'll test its robustness using the CartPole-v1 and PixelCopter environments. You'll then be able to iterate and improve this implementation for more advanced environments. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/> </figure> Let's get started!
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/pg-theorem.mdx
# (Optional) the Policy Gradient Theorem In this optional section where we're **going to study how we differentiate the objective function that we will use to approximate the policy gradient**. Let's first recap our different formulas: 1. The Objective function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/expected_reward.png" alt="Return"/> 2. The probability of a trajectory (given that action comes from \\(\pi_\theta\\)): <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/probability.png" alt="Probability"/> So we have: \\(\nabla_\theta J(\theta) = \nabla_\theta \sum_{\tau}P(\tau;\theta)R(\tau)\\) We can rewrite the gradient of the sum as the sum of the gradient: \\( = \sum_{\tau} \nabla_\theta P(\tau;\theta)R(\tau) \\) We then multiply every term in the sum by \\(\frac{P(\tau;\theta)}{P(\tau;\theta)}\\)(which is possible since it's = 1) \\( = \sum_{\tau} \frac{P(\tau;\theta)}{P(\tau;\theta)}\nabla_\theta P(\tau;\theta)R(\tau) \\) We can simplify further this since \\( \frac{P(\tau;\theta)}{P(\tau;\theta)}\nabla_\theta P(\tau;\theta) = P(\tau;\theta)\frac{\nabla_\theta P(\tau;\theta)}{P(\tau;\theta)} \\) \\(= \sum_{\tau} P(\tau;\theta) \frac{\nabla_\theta P(\tau;\theta)}{P(\tau;\theta)}R(\tau) \\) We can then use the *derivative log trick* (also called *likelihood ratio trick* or *REINFORCE trick*), a simple rule in calculus that implies that \\( \nabla_x log f(x) = \frac{\nabla_x f(x)}{f(x)} \\) So given we have \\(\frac{\nabla_\theta P(\tau;\theta)}{P(\tau;\theta)} \\) we transform it as \\(\nabla_\theta log P(\tau|\theta) \\) So this is our likelihood policy gradient: \\( \nabla_\theta J(\theta) = \sum_{\tau} P(\tau;\theta) \nabla_\theta log P(\tau;\theta) R(\tau) \\) Thanks for this new formula, we can estimate the gradient using trajectory samples (we can approximate the likelihood ratio policy gradient with sample-based estimate if you prefer). \\(\nabla_\theta J(\theta) = \frac{1}{m} \sum^{m}_{i=1} \nabla_\theta log P(\tau^{(i)};\theta)R(\tau^{(i)})\\) where each \\(\tau^{(i)}\\) is a sampled trajectory. But we still have some mathematics work to do there: we need to simplify \\( \nabla_\theta log P(\tau|\theta) \\) We know that: \\(\nabla_\theta log P(\tau^{(i)};\theta)= \nabla_\theta log[ \mu(s_0) \prod_{t=0}^{H} P(s_{t+1}^{(i)}|s_{t}^{(i)}, a_{t}^{(i)}) \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)})]\\) Where \\(\mu(s_0)\\) is the initial state distribution and \\( P(s_{t+1}^{(i)}|s_{t}^{(i)}, a_{t}^{(i)}) \\) is the state transition dynamics of the MDP. We know that the log of a product is equal to the sum of the logs: \\(\nabla_\theta log P(\tau^{(i)};\theta)= \nabla_\theta \left[log \mu(s_0) + \sum\limits_{t=0}^{H}log P(s_{t+1}^{(i)}|s_{t}^{(i)} a_{t}^{(i)}) + \sum\limits_{t=0}^{H}log \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)})\right] \\) We also know that the gradient of the sum is equal to the sum of gradient: \\( \nabla_\theta log P(\tau^{(i)};\theta)=\nabla_\theta log\mu(s_0) + \nabla_\theta \sum\limits_{t=0}^{H} log P(s_{t+1}^{(i)}|s_{t}^{(i)} a_{t}^{(i)}) + \nabla_\theta \sum\limits_{t=0}^{H} log \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)}) \\) Since neither initial state distribution or state transition dynamics of the MDP are dependent of \\(\theta\\), the derivate of both terms are 0. So we can remove them: Since: \\(\nabla_\theta \sum_{t=0}^{H} log P(s_{t+1}^{(i)}|s_{t}^{(i)} a_{t}^{(i)}) = 0 \\) and \\( \nabla_\theta \mu(s_0) = 0\\) \\(\nabla_\theta log P(\tau^{(i)};\theta) = \nabla_\theta \sum_{t=0}^{H} log \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)})\\) We can rewrite the gradient of the sum as the sum of gradients: \\( \nabla_\theta log P(\tau^{(i)};\theta)= \sum_{t=0}^{H} \nabla_\theta log \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)}) \\) So, the final formula for estimating the policy gradient is: \\( \nabla_{\theta} J(\theta) = \hat{g} = \frac{1}{m} \sum^{m}_{i=1} \sum^{H}_{t=0} \nabla_\theta \log \pi_\theta(a^{(i)}_{t} | s_{t}^{(i)})R(\tau^{(i)}) \\)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/policy-gradient.mdx
# Diving deeper into policy-gradient methods ## Getting the big picture We just learned that policy-gradient methods aim to find parameters \\( \theta \\) that **maximize the expected return**. The idea is that we have a *parameterized stochastic policy*. In our case, a neural network outputs a probability distribution over actions. The probability of taking each action is also called the *action preference*. If we take the example of CartPole-v1: - As input, we have a state. - As output, we have a probability distribution over actions at that state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/policy_based.png" alt="Policy based" /> Our goal with policy-gradient is to **control the probability distribution of actions** by tuning the policy such that **good actions (that maximize the return) are sampled more frequently in the future.** Each time the agent interacts with the environment, we tweak the parameters such that good actions will be sampled more likely in the future. But **how are we going to optimize the weights using the expected return**? The idea is that we're going to **let the agent interact during an episode**. And if we win the episode, we consider that each action taken was good and must be more sampled in the future since they lead to win. So for each state-action pair, we want to increase the \\(P(a|s)\\): the probability of taking that action at that state. Or decrease if we lost. The Policy-gradient algorithm (simplified) looks like this: <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/pg_bigpicture.jpg" alt="Policy Gradient Big Picture"/> </figure> Now that we got the big picture, let's dive deeper into policy-gradient methods. ## Diving deeper into policy-gradient methods We have our stochastic policy \\(\pi\\) which has a parameter \\(\theta\\). This \\(\pi\\), given a state, **outputs a probability distribution of actions**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/stochastic_policy.png" alt="Policy"/> </figure> Where \\(\pi_\theta(a_t|s_t)\\) is the probability of the agent selecting action \\(a_t\\) from state \\(s_t\\) given our policy. **But how do we know if our policy is good?** We need to have a way to measure it. To know that, we define a score/objective function called \\(J(\theta)\\). ### The objective function The *objective function* gives us the **performance of the agent** given a trajectory (state action sequence without considering reward (contrary to an episode)), and it outputs the *expected cumulative reward*. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/objective.jpg" alt="Return"/> Let's give some more details on this formula: - The *expected return* (also called expected cumulative reward), is the weighted average (where the weights are given by \\(P(\tau;\theta)\\) of all possible values that the return \\(R(\tau)\\) can take). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/expected_reward.png" alt="Return"/> - \\(R(\tau)\\) : Return from an arbitrary trajectory. To take this quantity and use it to calculate the expected return, we need to multiply it by the probability of each possible trajectory. - \\(P(\tau;\theta)\\) : Probability of each possible trajectory \\(\tau\\) (that probability depends on \\( \theta\\) since it defines the policy that it uses to select the actions of the trajectory which has an impact of the states visited). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/probability.png" alt="Probability"/> - \\(J(\theta)\\) : Expected return, we calculate it by summing for all trajectories, the probability of taking that trajectory given \\(\theta \\) multiplied by the return of this trajectory. Our objective then is to maximize the expected cumulative reward by finding the \\(\theta \\) that will output the best action probability distributions: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/max_objective.png" alt="Max objective"/> ## Gradient Ascent and the Policy-gradient Theorem Policy-gradient is an optimization problem: we want to find the values of \\(\theta\\) that maximize our objective function \\(J(\theta)\\), so we need to use **gradient-ascent**. It's the inverse of *gradient-descent* since it gives the direction of the steepest increase of \\(J(\theta)\\). (If you need a refresher on the difference between gradient descent and gradient ascent [check this](https://www.baeldung.com/cs/gradient-descent-vs-ascent) and [this](https://stats.stackexchange.com/questions/258721/gradient-ascent-vs-gradient-descent-in-logistic-regression)). Our update step for gradient-ascent is: \\( \theta \leftarrow \theta + \alpha * \nabla_\theta J(\theta) \\) We can repeatedly apply this update in the hopes that \\(\theta \\) converges to the value that maximizes \\(J(\theta)\\). However, there are two problems with computing the derivative of \\(J(\theta)\\): 1. We can't calculate the true gradient of the objective function since it requires calculating the probability of each possible trajectory, which is computationally super expensive. So we want to **calculate a gradient estimation with a sample-based estimate (collect some trajectories)**. 2. We have another problem that I explain in the next optional section. To differentiate this objective function, we need to differentiate the state distribution, called the Markov Decision Process dynamics. This is attached to the environment. It gives us the probability of the environment going into the next state, given the current state and the action taken by the agent. The problem is that we can't differentiate it because we might not know about it. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/probability.png" alt="Probability"/> Fortunately we're going to use a solution called the Policy Gradient Theorem that will help us to reformulate the objective function into a differentiable function that does not involve the differentiation of the state distribution. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/policy_gradient_theorem.png" alt="Policy Gradient"/> If you want to understand how we derive this formula for approximating the gradient, check out the next (optional) section. ## The Reinforce algorithm (Monte Carlo Reinforce) The Reinforce algorithm, also called Monte-Carlo policy-gradient, is a policy-gradient algorithm that **uses an estimated return from an entire episode to update the policy parameter** \\(\theta\\): In a loop: - Use the policy \\(\pi_\theta\\) to collect an episode \\(\tau\\) - Use the episode to estimate the gradient \\(\hat{g} = \nabla_\theta J(\theta)\\) <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/policy_gradient_one.png" alt="Policy Gradient"/> </figure> - Update the weights of the policy: \\(\theta \leftarrow \theta + \alpha \hat{g}\\) We can interpret this update as follows: - \\(\nabla_\theta log \pi_\theta(a_t|s_t)\\) is the direction of **steepest increase of the (log) probability** of selecting action at from state st. This tells us **how we should change the weights of policy** if we want to increase/decrease the log probability of selecting action \\(a_t\\) at state \\(s_t\\). - \\(R(\tau)\\): is the scoring function: - If the return is high, it will **push up the probabilities** of the (state, action) combinations. - Otherwise, if the return is low, it will **push down the probabilities** of the (state, action) combinations. We can also **collect multiple episodes (trajectories)** to estimate the gradient: <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/policy_gradient_multiple.png" alt="Policy Gradient"/> </figure>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/what-are-policy-based-methods.mdx
# What are the policy-based methods? The main goal of Reinforcement learning is to **find the optimal policy \\(\pi^{*}\\) that will maximize the expected cumulative reward**. Because Reinforcement Learning is based on the *reward hypothesis*: **all goals can be described as the maximization of the expected cumulative reward.** For instance, in a soccer game (where you're going to train the agents in two units), the goal is to win the game. We can describe this goal in reinforcement learning as **maximizing the number of goals scored** (when the ball crosses the goal line) into your opponent's soccer goals. And **minimizing the number of goals in your soccer goals**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/soccer.jpg" alt="Soccer" /> ## Value-based, Policy-based, and Actor-critic methods In the first unit, we saw two methods to find (or, most of the time, approximate) this optimal policy \\(\pi^{*}\\). - In *value-based methods*, we learn a value function. - The idea is that an optimal value function leads to an optimal policy \\(\pi^{*}\\). - Our objective is to **minimize the loss between the predicted and target value** to approximate the true action-value function. - We have a policy, but it's implicit since it **is generated directly from the value function**. For instance, in Q-Learning, we used an (epsilon-)greedy policy. - On the other hand, in *policy-based methods*, we directly learn to approximate \\(\pi^{*}\\) without having to learn a value function. - The idea is **to parameterize the policy**. For instance, using a neural network \\(\pi_\theta\\), this policy will output a probability distribution over actions (stochastic policy). - <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/stochastic_policy.png" alt="stochastic policy" /> - Our objective then is **to maximize the performance of the parameterized policy using gradient ascent**. - To do that, we control the parameter \\(\theta\\) that will affect the distribution of actions over a state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/policy_based.png" alt="Policy based" /> - Next time, we'll study the *actor-critic* method, which is a combination of value-based and policy-based methods. Consequently, thanks to policy-based methods, we can directly optimize our policy \\(\pi_\theta\\) to output a probability distribution over actions \\(\pi_\theta(a|s)\\) that leads to the best cumulative return. To do that, we define an objective function \\(J(\theta)\\), that is, the expected cumulative reward, and we **want to find the value \\(\theta\\) that maximizes this objective function**. ## The difference between policy-based and policy-gradient methods Policy-gradient methods, what we're going to study in this unit, is a subclass of policy-based methods. In policy-based methods, the optimization is most of the time *on-policy* since for each update, we only use data (trajectories) collected **by our most recent version of** \\(\pi_\theta\\). The difference between these two methods **lies on how we optimize the parameter** \\(\theta\\): - In *policy-based methods*, we search directly for the optimal policy. We can optimize the parameter \\(\theta\\) **indirectly** by maximizing the local approximation of the objective function with techniques like hill climbing, simulated annealing, or evolution strategies. - In *policy-gradient methods*, because it is a subclass of the policy-based methods, we search directly for the optimal policy. But we optimize the parameter \\(\theta\\) **directly** by performing the gradient ascent on the performance of the objective function \\(J(\theta)\\). Before diving more into how policy-gradient methods work (the objective function, policy gradient theorem, gradient ascent, etc.), let's study the advantages and disadvantages of policy-based methods.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/conclusion.mdx
# Conclusion **Congrats on finishing this unit**! There was a lot of information. And congrats on finishing the tutorial. You've just coded your first Deep Reinforcement Learning agent from scratch using PyTorch and shared it on the Hub 🥳. Don't hesitate to iterate on this unit **by improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle frames as observation)? In the next unit, **we're going to learn more about Unity MLAgents**, by training agents in Unity environments. This way, you will be ready to participate in the **AI vs AI challenges where you'll train your agents to compete against other agents in a snowball fight and a soccer game.** Sound fun? See you next time! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit4/additional-readings.mdx
# Additional Readings These are **optional readings** if you want to go deeper. ## Introduction to Policy Optimization - [Part 3: Intro to Policy Optimization - Spinning Up documentation](https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html) ## Policy Gradient - [https://johnwlambert.github.io/policy-gradients/](https://johnwlambert.github.io/policy-gradients/) - [RL - Policy Gradient Explained](https://jonathan-hui.medium.com/rl-policy-gradients-explained-9b13b688b146) - [Chapter 13, Policy Gradient Methods; Reinforcement Learning, an introduction by Richard Sutton and Andrew G. Barto](http://incompleteideas.net/book/RLbook2020.pdf) ## Implementation - [PyTorch Reinforce implementation](https://github.com/pytorch/examples/blob/main/reinforcement_learning/reinforce.py) - [Implementations from DDPG to PPO](https://github.com/MrSyee/pg-is-all-you-need)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit6/hands-on.mdx
# Advantage Actor Critic (A2C) using Robotics Simulations with PyBullet and Panda-Gym 🤖 [[hands-on]] <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit6/unit6.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> Now that you've studied the theory behind Advantage Actor Critic (A2C), **you're ready to train your A2C agent** using Stable-Baselines3 in robotic environments. And train two robots: - A spider 🕷️ to learn to move. - A robotic arm 🦾 to move to the correct position. We're going to use two Robotics environments: - [PyBullet](https://github.com/bulletphysics/bullet3) - [panda-gym](https://github.com/qgallouedec/panda-gym) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/environments.gif" alt="Environments"/> To validate this hands-on for the certification process, you need to push your two trained models to the Hub and get the following results: - `AntBulletEnv-v0` get a result of >= 650. - `PandaReachDense-v2` get a result of >= -3.5. To find your result, [go to the leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward** **If you don't find your model, go to the bottom of the page and click on the refresh button.** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process **To start the hands-on click on Open In Colab button** 👇 : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit6/unit6.ipynb) # Unit 6: Advantage Actor Critic (A2C) using Robotics Simulations with PyBullet and Panda-Gym 🤖 ### 🎮 Environments: - [PyBullet](https://github.com/bulletphysics/bullet3) - [Panda-Gym](https://github.com/qgallouedec/panda-gym) ### 📚 RL-Library: - [Stable-Baselines3](https://stable-baselines3.readthedocs.io/) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook 🏆 At the end of the notebook, you will: - Be able to use the environment librairies **PyBullet** and **Panda-Gym**. - Be able to **train robots using A2C**. - Understand why **we need to normalize the input**. - Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. ## Prerequisites 🏗️ Before diving into the notebook, you need to: 🔲 📚 Study [Actor-Critic methods by reading Unit 6](https://huggingface.co/deep-rl-course/unit6/introduction) 🤗 # Let's train our first robots 🤖 ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> ## Create a virtual display 🔽 During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). The following cell will install the librairies and create and run a virtual screen 🖥 ```python %%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip3 install pyvirtualdisplay ``` ```python # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() ``` ### Install dependencies 🔽 The first step is to install the dependencies, we’ll install multiple ones: - `pybullet`: Contains the walking robots environments. - `panda-gym`: Contains the robotics arm environments. - `stable-baselines3[extra]`: The SB3 deep reinforcement learning library. - `huggingface_sb3`: Additional code for Stable-baselines3 to load and upload models from the Hugging Face 🤗 Hub. - `huggingface_hub`: Library allowing anyone to work with the Hub repositories. ```bash !pip install stable-baselines3[extra]==1.8.0 !pip install huggingface_sb3 !pip install panda_gym==2.0.0 !pip install pyglet==1.5.1 ``` ## Import the packages 📦 ```python import pybullet_envs import panda_gym import gym import os from huggingface_sb3 import load_from_hub, package_to_hub from stable_baselines3 import A2C from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize from stable_baselines3.common.env_util import make_vec_env from huggingface_hub import notebook_login ``` ## Environment 1: AntBulletEnv-v0 🕸 ### Create the AntBulletEnv-v0 #### The environment 🎮 In this environment, the agent needs to use its different joints correctly in order to walk. You can find a detailled explanation of this environment here: https://hackmd.io/@jeffreymo/SJJrSJh5_#PyBullet ```python env_id = "AntBulletEnv-v0" # Create the env env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape[0] a_size = env.action_space ``` ```python print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` The observation Space (from [Jeffrey Y Mo](https://hackmd.io/@jeffreymo/SJJrSJh5_#PyBullet)): The difference is that our observation space is 28 not 29. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/obs_space.png" alt="PyBullet Ant Obs space"/> ```python print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` The action Space (from [Jeffrey Y Mo](https://hackmd.io/@jeffreymo/SJJrSJh5_#PyBullet)): <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/action_space.png" alt="PyBullet Ant Obs space"/> ### Normalize observation and rewards A good practice in reinforcement learning is to [normalize input features](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html). For that purpose, there is a wrapper that will compute a running average and standard deviation of input features. We also normalize rewards with this same wrapper by adding `norm_reward = True` [You should check the documentation to fill this cell](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecnormalize) ```python env = make_vec_env(env_id, n_envs=4) # Adding this wrapper to normalize the observation and the reward env = # TODO: Add the wrapper ``` #### Solution ```python env = make_vec_env(env_id, n_envs=4) env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.0) ``` ### Create the A2C Model 🤖 In this case, because we have a vector of 28 values as input, we'll use an MLP (multi-layer perceptron) as policy. For more information about A2C implementation with StableBaselines3 check: https://stable-baselines3.readthedocs.io/en/master/modules/a2c.html#notes To find the best parameters I checked the [official trained agents by Stable-Baselines3 team](https://huggingface.co/sb3). ```python model = # Create the A2C model and try to find the best parameters ``` #### Solution ```python model = A2C( policy="MlpPolicy", env=env, gae_lambda=0.9, gamma=0.99, learning_rate=0.00096, max_grad_norm=0.5, n_steps=8, vf_coef=0.4, ent_coef=0.0, policy_kwargs=dict(log_std_init=-2, ortho_init=False), normalize_advantage=False, use_rms_prop=True, use_sde=True, verbose=1, ) ``` ### Train the A2C agent 🏃 - Let's train our agent for 2,000,000 timesteps. Don't forget to use GPU on Colab. It will take approximately ~25-40min ```python model.learn(2_000_000) ``` ```python # Save the model and VecNormalize statistics when saving the agent model.save("a2c-AntBulletEnv-v0") env.save("vec_normalize.pkl") ``` ### Evaluate the agent 📈 - Now that our agent is trained, we need to **check its performance**. - Stable-Baselines3 provides a method to do that: `evaluate_policy` - In my case, I got a mean reward of `2371.90 +/- 16.50` ```python from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("AntBulletEnv-v0")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load("a2c-AntBulletEnv-v0") mean_reward, std_reward = evaluate_policy(model, env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") ``` ### Publish your trained model on the Hub 🔥 Now that we saw we got good results after the training, we can publish our trained model on the Hub with one line of code. 📚 The libraries documentation 👉 https://github.com/huggingface/huggingface_sb3/tree/main#hugging-face--x-stable-baselines3-v20 Here's an example of a Model Card (with a PyBullet environment): <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/modelcardpybullet.png" alt="Model Card Pybullet"/> By using `package_to_hub`, as we already mentionned in the former units, **you evaluate, record a replay, generate a model card of your agent and push it to the hub**. This way: - You can **showcase our work** 🔥 - You can **visualize your agent playing** 👀 - You can **share an agent with the community that others can use** 💾 - You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and then you need to get your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> - Copy the token - Run the cell below and paste the token ```python notebook_login() !git config --global credential.helper store ``` If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function ```python package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # Change the username commit_message="Initial commit", ) ``` ## Take a coffee break ☕ - You already trained your first robot that learned to move congratutlations 🥳! - It's **time to take a break**. Don't hesitate to **save this notebook** `File > Save a copy to Drive` to work on this second part later. ## Environment 2: PandaReachDense-v2 🦾 The agent we're going to train is a robotic arm that needs to do controls (moving the arm and using the end-effector). In robotics, the *end-effector* is the device at the end of a robotic arm designed to interact with the environment. In `PandaReach`, the robot must place its end-effector at a target position (green ball). We're going to use the dense version of this environment. This means we'll get a *dense reward function* that **will provide a reward at each timestep** (the closer the agent is to completing the task, the higher the reward). This is in contrast to a *sparse reward function* where the environment **return a reward if and only if the task is completed**. Also, we're going to use the *End-effector displacement control*, which means the **action corresponds to the displacement of the end-effector**. We don't control the individual motion of each joint (joint control). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/robotics.jpg" alt="Robotics"/> This way **the training will be easier**. In `PandaReachDense-v2`, the robotic arm must place its end-effector at a target position (green ball). ```python import gym env_id = "PandaReachDense-v2" # Create the env env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape a_size = env.action_space ``` ```python print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` The observation space **is a dictionary with 3 different elements**: - `achieved_goal`: (x,y,z) position of the goal. - `desired_goal`: (x,y,z) distance between the goal position and the current object position. - `observation`: position (x,y,z) and velocity of the end-effector (vx, vy, vz). Given it's a dictionary as observation, **we will need to use a MultiInputPolicy policy instead of MlpPolicy**. ```python print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` The action space is a vector with 3 values: - Control x, y, z movement Now it's your turn: 1. Define the environment called "PandaReachDense-v2". 2. Make a vectorized environment. 3. Add a wrapper to normalize the observations and rewards. [Check the documentation](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecnormalize) 4. Create the A2C Model (don't forget verbose=1 to print the training logs). 5. Train it for 1M Timesteps. 6. Save the model and VecNormalize statistics when saving the agent. 7. Evaluate your agent. 8. Publish your trained model on the Hub 🔥 with `package_to_hub`. ### Solution (fill the todo) ```python # 1 - 2 env_id = "PandaReachDense-v2" env = make_vec_env(env_id, n_envs=4) # 3 env = VecNormalize(env, norm_obs=True, norm_reward=False, clip_obs=10.0) # 4 model = A2C(policy="MultiInputPolicy", env=env, verbose=1) # 5 model.learn(1_000_000) ``` ```python # 6 model_name = "a2c-PandaReachDense-v2" model.save(model_name) env.save("vec_normalize.pkl") # 7 from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaReachDense-v2")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load(model_name) mean_reward, std_reward = evaluate_policy(model, env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") # 8 package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # TODO: Change the username commit_message="Initial commit", ) ``` ## Some additional challenges 🏆 The best way to learn **is to try things on your own**! Why not try `HalfCheetahBulletEnv-v0` for PyBullet and `PandaPickAndPlace-v1` for Panda-Gym? If you want to try more advanced tasks for panda-gym, you need to check what was done using **TQC or SAC** (a more sample-efficient algorithm suited for robotics tasks). In real robotics, you'll use a more sample-efficient algorithm for a simple reason: contrary to a simulation **if you move your robotic arm too much, you have a risk of breaking it**. PandaPickAndPlace-v1: https://huggingface.co/sb3/tqc-PandaPickAndPlace-v1 And don't hesitate to check panda-gym documentation here: https://panda-gym.readthedocs.io/en/latest/usage/train_with_sb3.html Here are some ideas to go further: * Train more steps * Try different hyperparameters by looking at what your classmates have done 👉 https://huggingface.co/models?other=https://huggingface.co/models?other=AntBulletEnv-v0 * **Push your new trained model** on the Hub 🔥 See you on Unit 7! 🔥 ## Keep learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit6/variance-problem.mdx
# The Problem of Variance in Reinforce [[the-problem-of-variance-in-reinforce]] In Reinforce, we want to **increase the probability of actions in a trajectory proportionally to how high the return is**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/pg.jpg" alt="Reinforce"/> - If the **return is high**, we will **push up** the probabilities of the (state, action) combinations. - Otherwise, if the **return is low**, it will **push down** the probabilities of the (state, action) combinations. This return \\(R(\tau)\\) is calculated using a *Monte-Carlo sampling*. We collect a trajectory and calculate the discounted return, **and use this score to increase or decrease the probability of every action taken in that trajectory**. If the return is good, all actions will be “reinforced” by increasing their likelihood of being taken. \\(R(\tau) = R_{t+1} + \gamma R_{t+2} + \gamma^2 R_{t+3} + ...\\) The advantage of this method is that **it’s unbiased. Since we’re not estimating the return**, we use only the true return we obtain. Given the stochasticity of the environment (random events during an episode) and stochasticity of the policy, **trajectories can lead to different returns, which can lead to high variance**. Consequently, the same starting state can lead to very different returns. Because of this, **the return starting at the same state can vary significantly across episodes**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/variance.jpg" alt="variance"/> The solution is to mitigate the variance by **using a large number of trajectories, hoping that the variance introduced in any one trajectory will be reduced in aggregate and provide a "true" estimation of the return.** However, increasing the batch size significantly **reduces sample efficiency**. So we need to find additional mechanisms to reduce the variance. --- If you want to dive deeper into the question of variance and bias tradeoff in Deep Reinforcement Learning, you can check out these two articles: - [Making Sense of the Bias / Variance Trade-off in (Deep) Reinforcement Learning](https://blog.mlreview.com/making-sense-of-the-bias-variance-trade-off-in-deep-reinforcement-learning-79cf1e83d565) - [Bias-variance Tradeoff in Reinforcement Learning](https://www.endtoend.ai/blog/bias-variance-tradeoff-in-reinforcement-learning/) ---
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit6/introduction.mdx
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/thumbnail.png" alt="Thumbnail"/> In unit 4, we learned about our first Policy-Based algorithm called **Reinforce**. In Policy-Based methods, **we aim to optimize the policy directly without using a value function**. More precisely, Reinforce is part of a subclass of *Policy-Based Methods* called *Policy-Gradient methods*. This subclass optimizes the policy directly by **estimating the weights of the optimal policy using Gradient Ascent**. We saw that Reinforce worked well. However, because we use Monte-Carlo sampling to estimate return (we use an entire episode to calculate the return), **we have significant variance in policy gradient estimation**. Remember that the policy gradient estimation is **the direction of the steepest increase in return**. In other words, how to update our policy weights so that actions that lead to good returns have a higher probability of being taken. The Monte Carlo variance, which we will further study in this unit, **leads to slower training since we need a lot of samples to mitigate it**. So today we'll study **Actor-Critic methods**, a hybrid architecture combining value-based and Policy-Based methods that helps to stabilize the training by reducing the variance using: - *An Actor* that controls **how our agent behaves** (Policy-Based method) - *A Critic* that measures **how good the taken action is** (Value-Based method) We'll study one of these hybrid methods, Advantage Actor Critic (A2C), **and train our agent using Stable-Baselines3 in robotic environments**. We'll train two robots: - A spider 🕷️ to learn to move. - A robotic arm 🦾 to move to the correct position. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/environments.gif" alt="Environments"/> Sound exciting? Let's get started!
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit6/advantage-actor-critic.mdx
# Advantage Actor-Critic (A2C) [[advantage-actor-critic]] ## Reducing variance with Actor-Critic methods The solution to reducing the variance of the Reinforce algorithm and training our agent faster and better is to use a combination of Policy-Based and Value-Based methods: *the Actor-Critic method*. To understand the Actor-Critic, imagine you're playing a video game. You can play with a friend that will provide you with some feedback. You're the Actor and your friend is the Critic. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/ac.jpg" alt="Actor Critic"/> You don't know how to play at the beginning, **so you try some actions randomly**. The Critic observes your action and **provides feedback**. Learning from this feedback, **you'll update your policy and be better at playing that game.** On the other hand, your friend (Critic) will also update their way to provide feedback so it can be better next time. This is the idea behind Actor-Critic. We learn two function approximations: - *A policy* that **controls how our agent acts**: \\( \pi_{\theta}(s) \\) - *A value function* to assist the policy update by measuring how good the action taken is: \\( \hat{q}_{w}(s,a) \\) ## The Actor-Critic Process Now that we have seen the Actor Critic's big picture, let's dive deeper to understand how the Actor and Critic improve together during the training. As we saw, with Actor-Critic methods, there are two function approximations (two neural networks): - *Actor*, a **policy function** parameterized by theta: \\( \pi_{\theta}(s) \\) - *Critic*, a **value function** parameterized by w: \\( \hat{q}_{w}(s,a) \\) Let's see the training process to understand how the Actor and Critic are optimized: - At each timestep, t, we get the current state \\( S_t\\) from the environment and **pass it as input through our Actor and Critic**. - Our Policy takes the state and **outputs an action** \\( A_t \\). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step1.jpg" alt="Step 1 Actor Critic"/> - The Critic takes that action also as input and, using \\( S_t\\) and \\( A_t \\), **computes the value of taking that action at that state: the Q-value**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step2.jpg" alt="Step 2 Actor Critic"/> - The action \\( A_t\\) performed in the environment outputs a new state \\( S_{t+1}\\) and a reward \\( R_{t+1} \\) . <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step3.jpg" alt="Step 3 Actor Critic"/> - The Actor updates its policy parameters using the Q value. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step4.jpg" alt="Step 4 Actor Critic"/> - Thanks to its updated parameters, the Actor produces the next action to take at \\( A_{t+1} \\) given the new state \\( S_{t+1} \\). - The Critic then updates its value parameters. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step5.jpg" alt="Step 5 Actor Critic"/> ## Adding Advantage in Actor-Critic (A2C) We can stabilize learning further by **using the Advantage function as Critic instead of the Action value function**. The idea is that the Advantage function calculates the relative advantage of an action compared to the others possible at a state: **how taking that action at a state is better compared to the average value of the state**. It's subtracting the mean value of the state from the state action pair: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/advantage1.jpg" alt="Advantage Function"/> In other words, this function calculates **the extra reward we get if we take this action at that state compared to the mean reward we get at that state**. The extra reward is what's beyond the expected value of that state. - If A(s,a) > 0: our gradient is **pushed in that direction**. - If A(s,a) < 0 (our action does worse than the average value of that state), **our gradient is pushed in the opposite direction**. The problem with implementing this advantage function is that it requires two value functions — \\( Q(s,a)\\) and \\( V(s)\\). Fortunately, **we can use the TD error as a good estimator of the advantage function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/advantage2.jpg" alt="Advantage Function"/>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit6/conclusion.mdx
# Conclusion [[conclusion]] Congrats on finishing this unit and the tutorial. You've just trained your first virtual robots 🥳. **Take time to grasp the material before continuing**. You can also look at the additional reading materials we provided in the *additional reading* section. Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) See you in next unit! ### Keep learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit6/additional-readings.mdx
# Additional Readings [[additional-readings]] ## Bias-variance tradeoff in Reinforcement Learning If you want to dive deeper into the question of variance and bias tradeoff in Deep Reinforcement Learning, you can check out these two articles: - [Making Sense of the Bias / Variance Trade-off in (Deep) Reinforcement Learning](https://blog.mlreview.com/making-sense-of-the-bias-variance-trade-off-in-deep-reinforcement-learning-79cf1e83d565) - [Bias-variance Tradeoff in Reinforcement Learning](https://www.endtoend.ai/blog/bias-variance-tradeoff-in-reinforcement-learning/) ## Advantage Functions - [Advantage Functions, SpinningUp RL](https://spinningup.openai.com/en/latest/spinningup/rl_intro.html?highlight=advantage%20functio#advantage-functions) ## Actor Critic - [Foundations of Deep RL Series, L3 Policy Gradients and Advantage Estimation by Pieter Abbeel](https://www.youtube.com/watch?v=AKbX1Zvo7r8) - [A2C Paper: Asynchronous Methods for Deep Reinforcement Learning](https://arxiv.org/abs/1602.01783v2)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/bellman-equation.mdx
# The Bellman Equation: simplify our value estimation [[bellman-equation]] The Bellman equation **simplifies our state value or state-action value calculation.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/bellman.jpg" alt="Bellman equation"/> With what we have learned so far, we know that if we calculate \\(V(S_t)\\) (the value of a state), we need to calculate the return starting at that state and then follow the policy forever after. **(The policy we defined in the following example is a Greedy Policy; for simplification, we don't discount the reward).** So to calculate \\(V(S_t)\\), we need to calculate the sum of the expected rewards. Hence: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/bellman2.jpg" alt="Bellman equation"/> <figcaption>To calculate the value of State 1: the sum of rewards if the agent started in that state and then followed the greedy policy (taking actions that leads to the best states values) for all the time steps.</figcaption> </figure> Then, to calculate the \\(V(S_{t+1})\\), we need to calculate the return starting at that state \\(S_{t+1}\\). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/bellman3.jpg" alt="Bellman equation"/> <figcaption>To calculate the value of State 2: the sum of rewards <b>if the agent started in that state</b>, and then followed the <b>policy for all the time steps.</b></figcaption> </figure> So you may have noticed, we're repeating the computation of the value of different states, which can be tedious if you need to do it for each state value or state-action value. Instead of calculating the expected return for each state or each state-action pair, **we can use the Bellman equation.** (hint: if you know what Dynamic Programming is, this is very similar! if you don't know what it is, no worries!) The Bellman equation is a recursive equation that works like this: instead of starting for each state from the beginning and calculating the return, we can consider the value of any state as: **The immediate reward \\(R_{t+1}\\) + the discounted value of the state that follows ( \\(gamma * V(S_{t+1}) \\) ) .** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/bellman4.jpg" alt="Bellman equation"/> </figure> If we go back to our example, we can say that the value of State 1 is equal to the expected cumulative return if we start at that state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/bellman2.jpg" alt="Bellman equation"/> To calculate the value of State 1: the sum of rewards **if the agent started in that state 1** and then followed the **policy for all the time steps.** This is equivalent to \\(V(S_{t})\\) = Immediate reward \\(R_{t+1}\\) + Discounted value of the next state \\(\gamma * V(S_{t+1})\\) <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/bellman6.jpg" alt="Bellman equation"/> <figcaption>For simplification, here we don’t discount so gamma = 1.</figcaption> </figure> In the interest of simplicity, here we don't discount, so gamma = 1. But you'll study an example with gamma = 0.99 in the Q-Learning section of this unit. - The value of \\(V(S_{t+1}) \\) = Immediate reward \\(R_{t+2}\\) + Discounted value of the next state ( \\(gamma * V(S_{t+2})\\) ). - And so on. To recap, the idea of the Bellman equation is that instead of calculating each value as the sum of the expected return, **which is a long process**, we calculate the value as **the sum of immediate reward + the discounted value of the state that follows.** Before going to the next section, think about the role of gamma in the Bellman equation. What happens if the value of gamma is very low (e.g. 0.1 or even 0)? What happens if the value is 1? What happens if the value is very high, such as a million?
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/what-is-rl.mdx
# What is RL? A short recap [[what-is-rl]] In RL, we build an agent that can **make smart decisions**. For instance, an agent that **learns to play a video game.** Or a trading agent that **learns to maximize its benefits** by deciding on **what stocks to buy and when to sell.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/rl-process.jpg" alt="RL process"/> To make intelligent decisions, our agent will learn from the environment by **interacting with it through trial and error** and receiving rewards (positive or negative) **as unique feedback.** Its goal **is to maximize its expected cumulative reward** (because of the reward hypothesis). **The agent's decision-making process is called the policy π:** given a state, a policy will output an action or a probability distribution over actions. That is, given an observation of the environment, a policy will provide an action (or multiple probabilities for each action) that the agent should take. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/policy.jpg" alt="Policy"/> **Our goal is to find an optimal policy π* **, aka., a policy that leads to the best expected cumulative reward. And to find this optimal policy (hence solving the RL problem), there **are two main types of RL methods**: - *Policy-based methods*: **Train the policy directly** to learn which action to take given a state. - *Value-based methods*: **Train a value function** to learn **which state is more valuable** and use this value function **to take the action that leads to it.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-approaches.jpg" alt="Two RL approaches"/> And in this unit, **we'll dive deeper into the value-based methods.**
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/hands-on.mdx
# Hands-on [[hands-on]] <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit2/unit2.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> Now that we studied the Q-Learning algorithm, let's implement it from scratch and train our Q-Learning agent in two environments: 1. [Frozen-Lake-v1 (non-slippery and slippery version)](https://gymnasium.farama.org/environments/toy_text/frozen_lake/) ☃️ : where our agent will need to **go from the starting state (S) to the goal state (G)** by walking only on frozen tiles (F) and avoiding holes (H). 2. [An autonomous taxi](https://gymnasium.farama.org/environments/toy_text/taxi/) 🚖 will need **to learn to navigate** a city to **transport its passengers from point A to point B.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/envs.gif" alt="Environments"/> Thanks to a [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard), you'll be able to compare your results with other classmates and exchange the best practices to improve your agent's scores. Who will win the challenge for Unit 2? To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process), you need to push your trained Taxi model to the Hub and **get a result of >= 4.5**. To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process And you can check your progress here 👉 https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course **To start the hands-on click on the Open In Colab button** 👇 : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit2/unit2.ipynb) We strongly **recommend students use Google Colab for the hands-on exercises** instead of running them on their personal computers. By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects** of setting up your environments. # Unit 2: Q-Learning with FrozenLake-v1 ⛄ and Taxi-v3 🚕 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/thumbnail.jpg" alt="Unit 2 Thumbnail"> In this notebook, **you'll code your first Reinforcement Learning agent from scratch** to play FrozenLake ❄️ using Q-Learning, share it with the community, and experiment with different configurations. ⬇️ Here is an example of what **you will achieve in just a couple of minutes.** ⬇️ <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/envs.gif" alt="Environments"/> ### 🎮 Environments: - [FrozenLake-v1](https://gymnasium.farama.org/environments/toy_text/frozen_lake/) - [Taxi-v3](https://gymnasium.farama.org/environments/toy_text/taxi/) ### 📚 RL-Library: - Python and NumPy - [Gymnasium](https://gymnasium.farama.org/) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook 🏆 At the end of the notebook, you will: - Be able to use **Gymnasium**, the environment library. - Be able to code a Q-Learning agent from scratch. - Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. ## This notebook is from the Deep Reinforcement Learning Course <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/deep-rl-course-illustration.jpg" alt="Deep RL Course illustration"/> In this free course, you will: - 📖 Study Deep Reinforcement Learning in **theory and practice**. - 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0. - 🤖 Train **agents in unique environments** And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-course Don’t forget to **<a href="http://eepurl.com/ic5ZUD">sign up to the course</a>** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).** The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 ## Prerequisites 🏗️ Before diving into the notebook, you need to: 🔲 📚 **Study [Q-Learning by reading Unit 2](https://huggingface.co/deep-rl-course/unit2/introduction)** 🤗 ## A small recap of Q-Learning *Q-Learning* **is the RL algorithm that**: - Trains *Q-Function*, an **action-value function** that encoded, in internal memory, by a *Q-table* **that contains all the state-action pair values.** - Given a state and action, our Q-Function **will search the Q-table for the corresponding value.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function-2.jpg" alt="Q function" width="100%"/> - When the training is done,**we have an optimal Q-Function, so an optimal Q-Table.** - And if we **have an optimal Q-function**, we have an optimal policy, since we **know for, each state, the best action to take.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy" width="100%"/> But, in the beginning, our **Q-Table is useless since it gives arbitrary value for each state-action pair (most of the time we initialize the Q-Table to 0 values)**. But, as we’ll explore the environment and update our Q-Table it will give us better and better approximations <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/q-learning.jpeg" alt="q-learning.jpeg" width="100%"/> This is the Q-Learning pseudocode: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-Learning" width="100%"/> # Let's code our first Reinforcement Learning algorithm 🚀 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process), you need to push your trained Taxi model to the Hub and **get a result of >= 4.5**. To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process ## Install dependencies and create a virtual display 🔽 In the notebook, we'll need to generate a replay video. To do so, with Colab, **we need to have a virtual screen to render the environment** (and thus record the frames). Hence the following cell will install the libraries and create and run a virtual screen 🖥 We’ll install multiple ones: - `gymnasium`: Contains the FrozenLake-v1 ⛄ and Taxi-v3 🚕 environments. - `pygame`: Used for the FrozenLake-v1 and Taxi-v3 UI. - `numpy`: Used for handling our Q-table. The Hugging Face Hub 🤗 works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations and other features that will allow you to easily collaborate with others. You can see here all the Deep RL models available (if they use Q Learning) here 👉 https://huggingface.co/models?other=q-learning ```bash pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit2/requirements-unit2.txt ``` ```bash sudo apt-get update apt install python-opengl ffmpeg xvfb pip3 install pyvirtualdisplay ``` To make sure the new installed libraries are used, **sometimes it's required to restart the notebook runtime**. The next cell will force the **runtime to crash, so you'll need to connect again and run the code starting from here**. Thanks to this trick, **we will be able to run our virtual screen.** ```python import os os.kill(os.getpid(), 9) ``` ```python # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() ``` ## Import the packages 📦 In addition to the installed libraries, we also use: - `random`: To generate random numbers (that will be useful for epsilon-greedy policy). - `imageio`: To generate a replay video. ```python import numpy as np import gymnasium as gym import random import imageio import os import tqdm import pickle5 as pickle from tqdm.notebook import tqdm ``` We're now ready to code our Q-Learning algorithm 🔥 # Part 1: Frozen Lake ⛄ (non slippery version) ## Create and understand [FrozenLake environment ⛄]((https://gymnasium.farama.org/environments/toy_text/frozen_lake/) --- 💡 A good habit when you start to use an environment is to check its documentation 👉 https://gymnasium.farama.org/environments/toy_text/frozen_lake/ --- We're going to train our Q-Learning agent **to navigate from the starting state (S) to the goal state (G) by walking only on frozen tiles (F) and avoid holes (H)**. We can have two sizes of environment: - `map_name="4x4"`: a 4x4 grid version - `map_name="8x8"`: a 8x8 grid version The environment has two modes: - `is_slippery=False`: The agent always moves **in the intended direction** due to the non-slippery nature of the frozen lake (deterministic). - `is_slippery=True`: The agent **may not always move in the intended direction** due to the slippery nature of the frozen lake (stochastic). For now let's keep it simple with the 4x4 map and non-slippery. We add a parameter called `render_mode` that specifies how the environment should be visualised. In our case because we **want to record a video of the environment at the end, we need to set render_mode to rgb_array**. As [explained in the documentation](https://gymnasium.farama.org/api/env/#gymnasium.Env.render) “rgb_array”: Return a single frame representing the current state of the environment. A frame is a np.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image. ```python # Create the FrozenLake-v1 environment using 4x4 map and non-slippery version and render_mode="rgb_array" env = gym.make() # TODO use the correct parameters ``` ### Solution ```python env = gym.make("FrozenLake-v1", map_name="4x4", is_slippery=False, render_mode="rgb_array") ``` You can create your own custom grid like this: ```python desc=["SFFF", "FHFH", "FFFH", "HFFG"] gym.make('FrozenLake-v1', desc=desc, is_slippery=True) ``` but we'll use the default environment for now. ### Let's see what the Environment looks like: ```python # We create our environment with gym.make("<name_of_the_environment>")- `is_slippery=False`: The agent always moves in the intended direction due to the non-slippery nature of the frozen lake (deterministic). print("_____OBSERVATION SPACE_____ \n") print("Observation Space", env.observation_space) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` We see with `Observation Space Shape Discrete(16)` that the observation is an integer representing the **agent’s current position as current_row * nrows + current_col (where both the row and col start at 0)**. For example, the goal position in the 4x4 map can be calculated as follows: 3 * 4 + 3 = 15. The number of possible observations is dependent on the size of the map. **For example, the 4x4 map has 16 possible observations.** For instance, this is what state = 0 looks like: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/frozenlake.png" alt="FrozenLake"> ```python print("\n _____ACTION SPACE_____ \n") print("Action Space Shape", env.action_space.n) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` The action space (the set of possible actions the agent can take) is discrete with 4 actions available 🎮: - 0: GO LEFT - 1: GO DOWN - 2: GO RIGHT - 3: GO UP Reward function 💰: - Reach goal: +1 - Reach hole: 0 - Reach frozen: 0 ## Create and Initialize the Q-table 🗄️ (👀 Step 1 of the pseudocode) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-Learning" width="100%"/> It's time to initialize our Q-table! To know how many rows (states) and columns (actions) to use, we need to know the action and observation space. We already know their values from before, but we'll want to obtain them programmatically so that our algorithm generalizes for different environments. Gym provides us a way to do that: `env.action_space.n` and `env.observation_space.n` ```python state_space = print("There are ", state_space, " possible states") action_space = print("There are ", action_space, " possible actions") ``` ```python # Let's create our Qtable of size (state_space, action_space) and initialized each values at 0 using np.zeros. np.zeros needs a tuple (a,b) def initialize_q_table(state_space, action_space): Qtable = return Qtable ``` ```python Qtable_frozenlake = initialize_q_table(state_space, action_space) ``` ### Solution ```python state_space = env.observation_space.n print("There are ", state_space, " possible states") action_space = env.action_space.n print("There are ", action_space, " possible actions") ``` ```python # Let's create our Qtable of size (state_space, action_space) and initialized each values at 0 using np.zeros def initialize_q_table(state_space, action_space): Qtable = np.zeros((state_space, action_space)) return Qtable ``` ```python Qtable_frozenlake = initialize_q_table(state_space, action_space) ``` ## Define the greedy policy 🤖 Remember we have two policies since Q-Learning is an **off-policy** algorithm. This means we're using a **different policy for acting and updating the value function**. - Epsilon-greedy policy (acting policy) - Greedy-policy (updating policy) The greedy policy will also be the final policy we'll have when the Q-learning agent completes training. The greedy policy is used to select an action using the Q-table. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-4.jpg" alt="Q-Learning" width="100%"/> ```python def greedy_policy(Qtable, state): # Exploitation: take the action with the highest state, action value action = return action ``` #### Solution ```python def greedy_policy(Qtable, state): # Exploitation: take the action with the highest state, action value action = np.argmax(Qtable[state][:]) return action ``` ##Define the epsilon-greedy policy 🤖 Epsilon-greedy is the training policy that handles the exploration/exploitation trade-off. The idea with epsilon-greedy: - With *probability 1 - ɛ* : **we do exploitation** (i.e. our agent selects the action with the highest state-action pair value). - With *probability ɛ*: we do **exploration** (trying a random action). As the training continues, we progressively **reduce the epsilon value since we will need less and less exploration and more exploitation.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-4.jpg" alt="Q-Learning" width="100%"/> ```python def epsilon_greedy_policy(Qtable, state, epsilon): # Randomly generate a number between 0 and 1 random_num = # if random_num > greater than epsilon --> exploitation if random_num > epsilon: # Take the action with the highest value given a state # np.argmax can be useful here action = # else --> exploration else: action = # Take a random action return action ``` #### Solution ```python def epsilon_greedy_policy(Qtable, state, epsilon): # Randomly generate a number between 0 and 1 random_num = random.uniform(0, 1) # if random_num > greater than epsilon --> exploitation if random_num > epsilon: # Take the action with the highest value given a state # np.argmax can be useful here action = greedy_policy(Qtable, state) # else --> exploration else: action = env.action_space.sample() return action ``` ## Define the hyperparameters ⚙️ The exploration related hyperparamters are some of the most important ones. - We need to make sure that our agent **explores enough of the state space** to learn a good value approximation. To do that, we need to have progressive decay of the epsilon. - If you decrease epsilon too fast (too high decay_rate), **you take the risk that your agent will be stuck**, since your agent didn't explore enough of the state space and hence can't solve the problem. ```python # Training parameters n_training_episodes = 10000 # Total training episodes learning_rate = 0.7 # Learning rate # Evaluation parameters n_eval_episodes = 100 # Total number of test episodes # Environment parameters env_id = "FrozenLake-v1" # Name of the environment max_steps = 99 # Max steps per episode gamma = 0.95 # Discounting rate eval_seed = [] # The evaluation seed of the environment # Exploration parameters max_epsilon = 1.0 # Exploration probability at start min_epsilon = 0.05 # Minimum exploration probability decay_rate = 0.0005 # Exponential decay rate for exploration prob ``` ## Create the training loop method <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-Learning" width="100%"/> The training loop goes like this: ``` For episode in the total of training episodes: Reduce epsilon (since we need less and less exploration) Reset the environment For step in max timesteps: Choose the action At using epsilon greedy policy Take the action (a) and observe the outcome state(s') and reward (r) Update the Q-value Q(s,a) using Bellman equation Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] If done, finish the episode Our next state is the new state ``` ```python def train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable): for episode in tqdm(range(n_training_episodes)): # Reduce epsilon (because we need less and less exploration) epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode) # Reset the environment state, info = env.reset() step = 0 terminated = False truncated = False # repeat for step in range(max_steps): # Choose the action At using epsilon greedy policy action = # Take action At and observe Rt+1 and St+1 # Take the action (a) and observe the outcome state(s') and reward (r) new_state, reward, terminated, truncated, info = # Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] Qtable[state][action] = # If terminated or truncated finish the episode if terminated or truncated: break # Our next state is the new state state = new_state return Qtable ``` #### Solution ```python def train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable): for episode in tqdm(range(n_training_episodes)): # Reduce epsilon (because we need less and less exploration) epsilon = min_epsilon + (max_epsilon - min_epsilon) * np.exp(-decay_rate * episode) # Reset the environment state, info = env.reset() step = 0 terminated = False truncated = False # repeat for step in range(max_steps): # Choose the action At using epsilon greedy policy action = epsilon_greedy_policy(Qtable, state, epsilon) # Take action At and observe Rt+1 and St+1 # Take the action (a) and observe the outcome state(s') and reward (r) new_state, reward, terminated, truncated, info = env.step(action) # Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] Qtable[state][action] = Qtable[state][action] + learning_rate * ( reward + gamma * np.max(Qtable[new_state]) - Qtable[state][action] ) # If terminated or truncated finish the episode if terminated or truncated: break # Our next state is the new state state = new_state return Qtable ``` ## Train the Q-Learning agent 🏃 ```python Qtable_frozenlake = train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable_frozenlake) ``` ## Let's see what our Q-Learning table looks like now 👀 ```python Qtable_frozenlake ``` ## The evaluation method 📝 - We defined the evaluation method that we're going to use to test our Q-Learning agent. ```python def evaluate_agent(env, max_steps, n_eval_episodes, Q, seed): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param Q: The Q-table :param seed: The evaluation seed array (for taxi-v3) """ episode_rewards = [] for episode in tqdm(range(n_eval_episodes)): if seed: state, info = env.reset(seed=seed[episode]) else: state, info = env.reset() step = 0 truncated = False terminated = False total_rewards_ep = 0 for step in range(max_steps): # Take the action (index) that have the maximum expected future reward given that state action = greedy_policy(Q, state) new_state, reward, terminated, truncated, info = env.step(action) total_rewards_ep += reward if terminated or truncated: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward ``` ## Evaluate our Q-Learning agent 📈 - Usually, you should have a mean reward of 1.0 - The **environment is relatively easy** since the state space is really small (16). What you can try to do is [to replace it with the slippery version](https://www.gymlibrary.dev/environments/toy_text/frozen_lake/), which introduces stochasticity, making the environment more complex. ```python # Evaluate our Agent mean_reward, std_reward = evaluate_agent(env, max_steps, n_eval_episodes, Qtable_frozenlake, eval_seed) print(f"Mean_reward={mean_reward:.2f} +/- {std_reward:.2f}") ``` ## Publish our trained model to the Hub 🔥 Now that we saw good results after the training, **we can publish our trained model to the Hub 🤗 with one line of code**. Here's an example of a Model Card: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/modelcard.png" alt="Model card" width="100%"/> Under the hood, the Hub uses git-based repositories (don't worry if you don't know what git is), which means you can update the model with new versions as you experiment and improve your agent. #### Do not modify this code ```python from huggingface_hub import HfApi, snapshot_download from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import json ``` ```python def record_video(env, Qtable, out_directory, fps=1): """ Generate a replay video of the agent :param env :param Qtable: Qtable of our agent :param out_directory :param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1) """ images = [] terminated = False truncated = False state, info = env.reset(seed=random.randint(0, 500)) img = env.render() images.append(img) while not terminated or truncated: # Take the action (index) that have the maximum expected future reward given that state action = np.argmax(Qtable[state][:]) state, reward, terminated, truncated, info = env.step( action ) # We directly put next_state = state for recording logic img = env.render() images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) ``` ```python def push_to_hub(repo_id, model, env, video_fps=1, local_repo_path="hub"): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the Hub :param repo_id: repo_id: id of the model repository from the Hugging Face Hub :param env :param video_fps: how many frame per seconds to record our video replay (with taxi-v3 and frozenlake-v1 we use 1) :param local_repo_path: where the local repository is """ _, repo_name = repo_id.split("/") eval_env = env api = HfApi() # Step 1: Create the repo repo_url = api.create_repo( repo_id=repo_id, exist_ok=True, ) # Step 2: Download files repo_local_path = Path(snapshot_download(repo_id=repo_id)) # Step 3: Save the model if env.spec.kwargs.get("map_name"): model["map_name"] = env.spec.kwargs.get("map_name") if env.spec.kwargs.get("is_slippery", "") == False: model["slippery"] = False # Pickle the model with open((repo_local_path) / "q-learning.pkl", "wb") as f: pickle.dump(model, f) # Step 4: Evaluate the model and build JSON with evaluation metrics mean_reward, std_reward = evaluate_agent( eval_env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"] ) evaluate_data = { "env_id": model["env_id"], "mean_reward": mean_reward, "n_eval_episodes": model["n_eval_episodes"], "eval_datetime": datetime.datetime.now().isoformat(), } # Write a JSON file called "results.json" that will contain the # evaluation results with open(repo_local_path / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 5: Create the model card env_name = model["env_id"] if env.spec.kwargs.get("map_name"): env_name += "-" + env.spec.kwargs.get("map_name") if env.spec.kwargs.get("is_slippery", "") == False: env_name += "-" + "no_slippery" metadata = {} metadata["tags"] = [env_name, "q-learning", "reinforcement-learning", "custom-implementation"] # Add metrics eval = metadata_eval_result( model_pretty_name=repo_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_name, dataset_id=env_name, ) # Merges both dictionaries metadata = {**metadata, **eval} model_card = f""" # **Q-Learning** Agent playing1 **{env_id}** This is a trained model of a **Q-Learning** agent playing **{env_id}** . ## Usage model = load_from_hub(repo_id="{repo_id}", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) """ evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) readme_path = repo_local_path / "README.md" readme = "" print(readme_path.exists()) if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) # Step 6: Record a video video_path = repo_local_path / "replay.mp4" record_video(env, model["qtable"], video_path, video_fps) # Step 7. Push everything to the Hub api.upload_folder( repo_id=repo_id, folder_path=repo_local_path, path_in_repo=".", ) print("Your model is pushed to the Hub. You can view your model here: ", repo_url) ``` ### . By using `push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the Hub**. This way: - You can **showcase our work** 🔥 - You can **visualize your agent playing** 👀 - You can **share an agent with the community that others can use** 💾 - You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> ```python from huggingface_hub import notebook_login notebook_login() ``` If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`) 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `push_to_hub()` function - Let's create **the model dictionary that contains the hyperparameters and the Q_table**. ```python model = { "env_id": env_id, "max_steps": max_steps, "n_training_episodes": n_training_episodes, "n_eval_episodes": n_eval_episodes, "eval_seed": eval_seed, "learning_rate": learning_rate, "gamma": gamma, "max_epsilon": max_epsilon, "min_epsilon": min_epsilon, "decay_rate": decay_rate, "qtable": Qtable_frozenlake, } ``` Let's fill the `push_to_hub` function: - `repo_id`: the name of the Hugging Face Hub Repository that will be created/updated ` (repo_id = {username}/{repo_name})` 💡 A good `repo_id` is `{username}/q-{env_id}` - `model`: our model dictionary containing the hyperparameters and the Qtable. - `env`: the environment. - `commit_message`: message of the commit ```python model ``` ```python username = "" # FILL THIS repo_name = "q-FrozenLake-v1-4x4-noSlippery" push_to_hub(repo_id=f"{username}/{repo_name}", model=model, env=env) ``` Congrats 🥳 you've just implemented from scratch, trained, and uploaded your first Reinforcement Learning agent. FrozenLake-v1 no_slippery is very simple environment, let's try a harder one 🔥. # Part 2: Taxi-v3 🚖 ## Create and understand [Taxi-v3 🚕](https://gymnasium.farama.org/environments/toy_text/taxi/) --- 💡 A good habit when you start to use an environment is to check its documentation 👉 https://gymnasium.farama.org/environments/toy_text/taxi/ --- In `Taxi-v3` 🚕, there are four designated locations in the grid world indicated by R(ed), G(reen), Y(ellow), and B(lue). When the episode starts, **the taxi starts off at a random square** and the passenger is at a random location. The taxi drives to the passenger’s location, **picks up the passenger**, drives to the passenger’s destination (another one of the four specified locations), and then **drops off the passenger**. Once the passenger is dropped off, the episode ends. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/taxi.png" alt="Taxi"> ```python env = gym.make("Taxi-v3", render_mode="rgb_array") ``` There are **500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger** (including the case when the passenger is in the taxi), and **4 destination locations.** ```python state_space = env.observation_space.n print("There are ", state_space, " possible states") ``` ```python action_space = env.action_space.n print("There are ", action_space, " possible actions") ``` The action space (the set of possible actions the agent can take) is discrete with **6 actions available 🎮**: - 0: move south - 1: move north - 2: move east - 3: move west - 4: pickup passenger - 5: drop off passenger Reward function 💰: - -1 per step unless other reward is triggered. - +20 delivering passenger. - -10 executing “pickup” and “drop-off” actions illegally. ```python # Create our Q table with state_size rows and action_size columns (500x6) Qtable_taxi = initialize_q_table(state_space, action_space) print(Qtable_taxi) print("Q-table shape: ", Qtable_taxi.shape) ``` ## Define the hyperparameters ⚙️ ⚠ DO NOT MODIFY EVAL_SEED: the eval_seed array **allows us to evaluate your agent with the same taxi starting positions for every classmate** ```python # Training parameters n_training_episodes = 25000 # Total training episodes learning_rate = 0.7 # Learning rate # Evaluation parameters n_eval_episodes = 100 # Total number of test episodes # DO NOT MODIFY EVAL_SEED eval_seed = [ 16, 54, 165, 177, 191, 191, 120, 80, 149, 178, 48, 38, 6, 125, 174, 73, 50, 172, 100, 148, 146, 6, 25, 40, 68, 148, 49, 167, 9, 97, 164, 176, 61, 7, 54, 55, 161, 131, 184, 51, 170, 12, 120, 113, 95, 126, 51, 98, 36, 135, 54, 82, 45, 95, 89, 59, 95, 124, 9, 113, 58, 85, 51, 134, 121, 169, 105, 21, 30, 11, 50, 65, 12, 43, 82, 145, 152, 97, 106, 55, 31, 85, 38, 112, 102, 168, 123, 97, 21, 83, 158, 26, 80, 63, 5, 81, 32, 11, 28, 148, ] # Evaluation seed, this ensures that all classmates agents are trained on the same taxi starting position # Each seed has a specific starting state # Environment parameters env_id = "Taxi-v3" # Name of the environment max_steps = 99 # Max steps per episode gamma = 0.95 # Discounting rate # Exploration parameters max_epsilon = 1.0 # Exploration probability at start min_epsilon = 0.05 # Minimum exploration probability decay_rate = 0.005 # Exponential decay rate for exploration prob ``` ## Train our Q-Learning agent 🏃 ```python Qtable_taxi = train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable_taxi) Qtable_taxi ``` ## Create a model dictionary 💾 and publish our trained model to the Hub 🔥 - We create a model dictionary that will contain all the training hyperparameters for reproducibility and the Q-Table. ```python model = { "env_id": env_id, "max_steps": max_steps, "n_training_episodes": n_training_episodes, "n_eval_episodes": n_eval_episodes, "eval_seed": eval_seed, "learning_rate": learning_rate, "gamma": gamma, "max_epsilon": max_epsilon, "min_epsilon": min_epsilon, "decay_rate": decay_rate, "qtable": Qtable_taxi, } ``` ```python username = "" # FILL THIS repo_name = "" # FILL THIS push_to_hub(repo_id=f"{username}/{repo_name}", model=model, env=env) ``` Now that it's on the Hub, you can compare the results of your Taxi-v3 with your classmates using the leaderboard 🏆 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/taxi-leaderboard.png" alt="Taxi Leaderboard"> # Part 3: Load from Hub 🔽 What's amazing with Hugging Face Hub 🤗 is that you can easily load powerful models from the community. Loading a saved model from the Hub is really easy: 1. You go https://huggingface.co/models?other=q-learning to see the list of all the q-learning saved models. 2. You select one and copy its repo_id <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/copy-id.png" alt="Copy id"> 3. Then we just need to use `load_from_hub` with: - The repo_id - The filename: the saved model inside the repo. #### Do not modify this code ```python from urllib.error import HTTPError from huggingface_hub import hf_hub_download def load_from_hub(repo_id: str, filename: str) -> str: """ Download a model from Hugging Face Hub. :param repo_id: id of the model repository from the Hugging Face Hub :param filename: name of the model zip file from the repository """ # Get the model from the Hub, download and cache the model on your local disk pickle_model = hf_hub_download(repo_id=repo_id, filename=filename) with open(pickle_model, "rb") as f: downloaded_model_file = pickle.load(f) return downloaded_model_file ``` ### . ```python model = load_from_hub(repo_id="ThomasSimonini/q-Taxi-v3", filename="q-learning.pkl") # Try to use another model print(model) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ``` ```python model = load_from_hub( repo_id="ThomasSimonini/q-FrozenLake-v1-no-slippery", filename="q-learning.pkl" ) # Try to use another model env = gym.make(model["env_id"], is_slippery=False) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ``` ## Some additional challenges 🏆 The best way to learn **is to try things on your own**! As you saw, the current agent is not doing great. As a first suggestion, you can train for more steps. With 1,000,000 steps, we saw some great results! In the [Leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) you will find your agents. Can you get to the top? Here are some ideas to climb up the leaderboard: * Train more steps * Try different hyperparameters by looking at what your classmates have done. * **Push your new trained model** on the Hub 🔥 Are walking on ice and driving taxis too boring to you? Try to **change the environment**, why not use FrozenLake-v1 slippery version? Check how they work [using the gymnasium documentation](https://gymnasium.farama.org/) and have fun 🎉. _____________________________________________________________________ Congrats 🥳, you've just implemented, trained, and uploaded your first Reinforcement Learning agent. Understanding Q-Learning is an **important step to understanding value-based methods.** In the next Unit with Deep Q-Learning, we'll see that while creating and updating a Q-table was a good strategy — **however, it is not scalable.** For instance, imagine you create an agent that learns to play Doom. <img src="https://vizdoom.cs.put.edu.pl/user/pages/01.tutorial/basic.png" alt="Doom"/> Doom is a large environment with a huge state space (millions of different states). Creating and updating a Q-table for that environment would not be efficient. That's why we'll study Deep Q-Learning in the next unit, an algorithm **where we use a neural network that approximates, given a state, the different Q-values for each action.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/> See you in Unit 3! 🔥 ## Keep learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/mid-way-recap.mdx
# Mid-way Recap [[mid-way-recap]] Before diving into Q-Learning, let's summarize what we've just learned. We have two types of value-based functions: - State-value function: outputs the expected return if **the agent starts at a given state and acts according to the policy forever after.** - Action-value function: outputs the expected return if **the agent starts in a given state, takes a given action at that state** and then acts accordingly to the policy forever after. - In value-based methods, rather than learning the policy, **we define the policy by hand** and we learn a value function. If we have an optimal value function, we **will have an optimal policy.** There are two types of methods to learn a policy for a value function: - With *the Monte Carlo method*, we update the value function from a complete episode, and so we **use the actual discounted return of this episode.** - With *the TD Learning method,* we update the value function from a step, replacing the unknown \\(G_t\\) with **an estimated return called the TD target.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/summary-learning-mtds.jpg" alt="Summary"/>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/introduction.mdx
# Introduction to Q-Learning [[introduction-q-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/thumbnail.jpg" alt="Unit 2 thumbnail" width="100%"> In the first unit of this class, we learned about Reinforcement Learning (RL), the RL process, and the different methods to solve an RL problem. We also **trained our first agents and uploaded them to the Hugging Face Hub.** In this unit, we're going to **dive deeper into one of the Reinforcement Learning methods: value-based methods** and study our first RL algorithm: **Q-Learning.** We'll also **implement our first RL agent from scratch**, a Q-Learning agent, and will train it in two environments: 1. Frozen-Lake-v1 (non-slippery version): where our agent will need to **go from the starting state (S) to the goal state (G)** by walking only on frozen tiles (F) and avoiding holes (H). 2. An autonomous taxi: where our agent will need **to learn to navigate** a city to **transport its passengers from point A to point B.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/envs.gif" alt="Environments"/> Concretely, we will: - Learn about **value-based methods**. - Learn about the **differences between Monte Carlo and Temporal Difference Learning**. - Study and implement **our first RL algorithm**: Q-Learning. This unit is **fundamental if you want to be able to work on Deep Q-Learning**: the first Deep RL algorithm that played Atari games and beat the human level on some of them (breakout, space invaders, etc). So let's get started! 🚀
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/quiz2.mdx
# Second Quiz [[quiz2]] The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: What is Q-Learning? <Question choices={[ { text: "The algorithm we use to train our Q-function", explain: "", correct: true }, { text: "A value function", explain: "It's an action-value function since it determines the value of being at a particular state and taking a specific action at that state", }, { text: "An algorithm that determines the value of being at a particular state and taking a specific action at that state", explain: "", correct: true }, { text: "A table", explain: "Q-function is not a Q-table. The Q-function is the algorithm that will feed the Q-table." } ]} /> ### Q2: What is a Q-table? <Question choices={[ { text: "An algorithm we use in Q-Learning", explain: "", }, { text: "Q-table is the internal memory of our agent", explain: "", correct: true }, { text: "In Q-table each cell corresponds a state value", explain: "Each cell corresponds to a state-action value pair value. Not a state value.", } ]} /> ### Q3: Why if we have an optimal Q-function Q* we have an optimal policy? <details> <summary>Solution</summary> Because if we have an optimal Q-function, we have an optimal policy since we know for each state what is the best action to take. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="link value policy"/> </details> ### Q4: Can you explain what is Epsilon-Greedy Strategy? <details> <summary>Solution</summary> Epsilon Greedy Strategy is a policy that handles the exploration/exploitation trade-off. The idea is that we define epsilon ɛ = 1.0: - With *probability 1 — ɛ* : we do exploitation (aka our agent selects the action with the highest state-action pair value). - With *probability ɛ* : we do exploration (trying random action). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-4.jpg" alt="Epsilon Greedy"/> </details> ### Q5: How do we update the Q value of a state, action pair? <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-update-ex.jpg" alt="Q Update exercise"/> <details> <summary>Solution</summary> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-update-solution.jpg" alt="Q Update exercise"/> </details> ### Q6: What's the difference between on-policy and off-policy <details> <summary>Solution</summary> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-4.jpg" alt="On/off policy"/> </details> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read again the chapter to reinforce (😏) your knowledge.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/mid-way-quiz.mdx
# Mid-way Quiz [[mid-way-quiz]] The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: What are the two main approaches to find optimal policy? <Question choices={[ { text: "Policy-based methods", explain: "With Policy-Based methods, we train the policy directly to learn which action to take given a state.", correct: true }, { text: "Random-based methods", explain: "" }, { text: "Value-based methods", explain: "With value-based methods, we train a value function to learn which state is more valuable and use this value function to take the action that leads to it.", correct: true }, { text: "Evolution-strategies methods", explain: "" } ]} /> ### Q2: What is the Bellman Equation? <details> <summary>Solution</summary> **The Bellman equation is a recursive equation** that works like this: instead of starting for each state from the beginning and calculating the return, we can consider the value of any state as: Rt+1 + gamma * V(St+1) The immediate reward + the discounted value of the state that follows </details> ### Q3: Define each part of the Bellman Equation <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/bellman4-quiz.jpg" alt="Bellman equation quiz"/> <details> <summary>Solution</summary> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/bellman4.jpg" alt="Bellman equation solution"/> </details> ### Q4: What is the difference between Monte Carlo and Temporal Difference learning methods? <Question choices={[ { text: "With Monte Carlo methods, we update the value function from a complete episode", explain: "", correct: true }, { text: "With Monte Carlo methods, we update the value function from a step", explain: "" }, { text: "With TD learning methods, we update the value function from a complete episode", explain: "" }, { text: "With TD learning methods, we update the value function from a step", explain: "", correct: true }, ]} /> ### Q5: Define each part of Temporal Difference learning formula <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/td-ex.jpg" alt="TD Learning exercise"/> <details> <summary>Solution</summary> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1.jpg" alt="TD Exercise"/> </details> ### Q6: Define each part of Monte Carlo learning formula <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/mc-ex.jpg" alt="MC Learning exercise"/> <details> <summary>Solution</summary> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/monte-carlo-approach.jpg" alt="MC Exercise"/> </details> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read again the previous sections to reinforce (😏) your knowledge.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/glossary.mdx
# Glossary [[glossary]] This is a community-created glossary. Contributions are welcomed! ### Strategies to find the optimal policy - **Policy-based methods.** The policy is usually trained with a neural network to select what action to take given a state. In this case it is the neural network which outputs the action that the agent should take instead of using a value function. Depending on the experience received by the environment, the neural network will be re-adjusted and will provide better actions. - **Value-based methods.** In this case, a value function is trained to output the value of a state or a state-action pair that will represent our policy. However, this value doesn't define what action the agent should take. In contrast, we need to specify the behavior of the agent given the output of the value function. For example, we could decide to adopt a policy to take the action that always leads to the biggest reward (Greedy Policy). In summary, the policy is a Greedy Policy (or whatever decision the user takes) that uses the values of the value-function to decide the actions to take. ### Among the value-based methods, we can find two main strategies - **The state-value function.** For each state, the state-value function is the expected return if the agent starts in that state and follows the policy until the end. - **The action-value function.** In contrast to the state-value function, the action-value calculates for each state and action pair the expected return if the agent starts in that state and takes an action. Then it follows the policy forever after. ### Epsilon-greedy strategy: - Common strategy used in reinforcement learning that involves balancing exploration and exploitation. - Chooses the action with the highest expected reward with a probability of 1-epsilon. - Chooses a random action with a probability of epsilon. - Epsilon is typically decreased over time to shift focus towards exploitation. ### Greedy strategy: - Involves always choosing the action that is expected to lead to the highest reward, based on the current knowledge of the environment. (Only exploitation) - Always chooses the action with the highest expected reward. - Does not include any exploration. - Can be disadvantageous in environments with uncertainty or unknown optimal actions. ### Off-policy vs on-policy algorithms - **Off-policy algorithms:** A different policy is used at training time and inference time - **On-policy algorithms:** The same policy is used during training and inference If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls) This glossary was made possible thanks to: - [Ramón Rueda](https://github.com/ramon-rd) - [Hasarindu Perera](https://github.com/hasarinduperera/) - [Arkady Arkhangorodsky](https://github.com/arkadyark/)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/mc-vs-td.mdx
# Monte Carlo vs Temporal Difference Learning [[mc-vs-td]] The last thing we need to discuss before diving into Q-Learning is the two learning strategies. Remember that an RL agent **learns by interacting with its environment.** The idea is that **given the experience and the received reward, the agent will update its value function or policy.** Monte Carlo and Temporal Difference Learning are two different **strategies on how to train our value function or our policy function.** Both of them **use experience to solve the RL problem.** On one hand, Monte Carlo uses **an entire episode of experience before learning.** On the other hand, Temporal Difference uses **only a step ( \\(S_t, A_t, R_{t+1}, S_{t+1}\\) ) to learn.** We'll explain both of them **using a value-based method example.** ## Monte Carlo: learning at the end of the episode [[monte-carlo]] Monte Carlo waits until the end of the episode, calculates \\(G_t\\) (return) and uses it as **a target for updating \\(V(S_t)\\).** So it requires a **complete episode of interaction before updating our value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/monte-carlo-approach.jpg" alt="Monte Carlo"/> If we take an example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-2.jpg" alt="Monte Carlo"/> - We always start the episode **at the same starting point.** - **The agent takes actions using the policy**. For instance, using an Epsilon Greedy Strategy, a policy that alternates between exploration (random actions) and exploitation. - We get **the reward and the next state.** - We terminate the episode if the cat eats the mouse or if the mouse moves > 10 steps. - At the end of the episode, **we have a list of State, Actions, Rewards, and Next States tuples** For instance [[State tile 3 bottom, Go Left, +1, State tile 2 bottom], [State tile 2 bottom, Go Left, +0, State tile 1 bottom]...] - **The agent will sum the total rewards \\(G_t\\)** (to see how well it did). - It will then **update \\(V(s_t)\\) based on the formula** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3.jpg" alt="Monte Carlo"/> - Then **start a new game with this new knowledge** By running more and more episodes, **the agent will learn to play better and better.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3p.jpg" alt="Monte Carlo"/> For instance, if we train a state-value function using Monte Carlo: - We initialize our value function **so that it returns 0 value for each state** - Our learning rate (lr) is 0.1 and our discount rate is 1 (= no discount) - Our mouse **explores the environment and takes random actions** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4.jpg" alt="Monte Carlo"/> - The mouse made more than 10 steps, so the episode ends . <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4p.jpg" alt="Monte Carlo"/> - We have a list of state, action, rewards, next_state, **we need to calculate the return \\(G{t}\\)** - \\(G_t = R_{t+1} + R_{t+2} + R_{t+3} ...\\) - \\(G_t = R_{t+1} + R_{t+2} + R_{t+3}…\\) (for simplicity we don’t discount the rewards). - \\(G_t = 1 + 0 + 0 + 0+ 0 + 0 + 1 + 1 + 0 + 0\\) - \\(G_t= 3\\) - We can now update \\(V(S_0)\\): <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5.jpg" alt="Monte Carlo"/> - New \\(V(S_0) = V(S_0) + lr * [G_t — V(S_0)]\\) - New \\(V(S_0) = 0 + 0.1 * [3 – 0]\\) - New \\(V(S_0) = 0.3\\) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5p.jpg" alt="Monte Carlo"/> ## Temporal Difference Learning: learning at each step [[td-learning]] **Temporal Difference, on the other hand, waits for only one interaction (one step) \\(S_{t+1}\\)** to form a TD target and update \\(V(S_t)\\) using \\(R_{t+1}\\) and \\( \gamma * V(S_{t+1})\\). The idea with **TD is to update the \\(V(S_t)\\) at each step.** But because we didn't experience an entire episode, we don't have \\(G_t\\) (expected return). Instead, **we estimate \\(G_t\\) by adding \\(R_{t+1}\\) and the discounted value of the next state.** This is called bootstrapping. It's called this **because TD bases its update in part on an existing estimate \\(V(S_{t+1})\\) and not a complete sample \\(G_t\\).** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1.jpg" alt="Temporal Difference"/> This method is called TD(0) or **one-step TD (update the value function after any individual step).** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1p.jpg" alt="Temporal Difference"/> If we take the same example, <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2.jpg" alt="Temporal Difference"/> - We initialize our value function so that it returns 0 value for each state. - Our learning rate (lr) is 0.1, and our discount rate is 1 (no discount). - Our mouse begins to explore the environment and takes a random action: **going to the left** - It gets a reward \\(R_{t+1} = 1\\) since **it eats a piece of cheese** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2p.jpg" alt="Temporal Difference"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3.jpg" alt="Temporal Difference"/> We can now update \\(V(S_0)\\): New \\(V(S_0) = V(S_0) + lr * [R_1 + \gamma * V(S_1) - V(S_0)]\\) New \\(V(S_0) = 0 + 0.1 * [1 + 1 * 0–0]\\) New \\(V(S_0) = 0.1\\) So we just updated our value function for State 0. Now we **continue to interact with this environment with our updated value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3p.jpg" alt="Temporal Difference"/> To summarize: - With *Monte Carlo*, we update the value function from a complete episode, and so we **use the actual accurate discounted return of this episode.** - With *TD Learning*, we update the value function from a step, and we replace \\(G_t\\), which we don't know, with **an estimated return called the TD target.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Summary.jpg" alt="Summary"/>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/two-types-value-based-methods.mdx
# Two types of value-based methods [[two-types-value-based-methods]] In value-based methods, **we learn a value function** that **maps a state to the expected value of being at that state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/vbm-1.jpg" alt="Value Based Methods"/> The value of a state is the **expected discounted return** the agent can get if it **starts at that state and then acts according to our policy.** <Tip> But what does it mean to act according to our policy? After all, we don't have a policy in value-based methods since we train a value function and not a policy. </Tip> Remember that the goal of an **RL agent is to have an optimal policy π\*.** To find the optimal policy, we learned about two different methods: - *Policy-based methods:* **Directly train the policy** to select what action to take given a state (or a probability distribution over actions at that state). In this case, we **don't have a value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-approaches-2.jpg" alt="Two RL approaches"/> The policy takes a state as input and outputs what action to take at that state (deterministic policy: a policy that output one action given a state, contrary to stochastic policy that output a probability distribution over actions). And consequently, **we don't define by hand the behavior of our policy; it's the training that will define it.** - *Value-based methods:* **Indirectly, by training a value function** that outputs the value of a state or a state-action pair. Given this value function, our policy **will take an action.** Since the policy is not trained/learned, **we need to specify its behavior.** For instance, if we want a policy that, given the value function, will take actions that always lead to the biggest reward, **we'll create a Greedy Policy.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-approaches-3.jpg" alt="Two RL approaches"/> <figcaption>Given a state, our action-value function (that we train) outputs the value of each action at that state. Then, our pre-defined Greedy Policy selects the action that will yield the highest value given a state or a state action pair.</figcaption> </figure> Consequently, whatever method you use to solve your problem, **you will have a policy**. In the case of value-based methods, you don't train the policy: your policy **is just a simple pre-specified function** (for instance, the Greedy Policy) that uses the values given by the value-function to select its actions. So the difference is: - In policy-based training, **the optimal policy (denoted π\*) is found by training the policy directly.** - In value-based training, **finding an optimal value function (denoted Q\* or V\*, we'll study the difference below) leads to having an optimal policy.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link between value and policy"/> In fact, most of the time, in value-based methods, you'll use **an Epsilon-Greedy Policy** that handles the exploration/exploitation trade-off; we'll talk about this when we talk about Q-Learning in the second part of this unit. As we mentioned above, we have two types of value-based functions: ## The state-value function [[state-value-function]] We write the state value function under a policy π like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/state-value-function-1.jpg" alt="State value function"/> For each state, the state-value function outputs the expected return if the agent **starts at that state** and then follows the policy forever afterward (for all future timesteps, if you prefer). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/state-value-function-2.jpg" alt="State value function"/> <figcaption>If we take the state with value -7: it's the expected return starting at that state and taking actions according to our policy (greedy policy), so right, right, right, down, down, right, right.</figcaption> </figure> ## The action-value function [[action-value-function]] In the action-value function, for each state and action pair, the action-value function **outputs the expected return** if the agent starts in that state, takes that action, and then follows the policy forever after. The value of taking action \\(a\\) in state \\(s\\) under a policy \\(π\\) is: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/action-state-value-function-1.jpg" alt="Action State value function"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/action-state-value-function-2.jpg" alt="Action State value function"/> We see that the difference is: - For the state-value function, we calculate **the value of a state \\(S_t\\)** - For the action-value function, we calculate **the value of the state-action pair ( \\(S_t, A_t\\) ) hence the value of taking that action at that state.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-types.jpg" alt="Two types of value function"/> <figcaption> Note: We didn't fill all the state-action pairs for the example of Action-value function</figcaption> </figure> In either case, whichever value function we choose (state-value or action-value function), **the returned value is the expected return.** However, the problem is that **to calculate EACH value of a state or a state-action pair, we need to sum all the rewards an agent can get if it starts at that state.** This can be a computationally expensive process, and that's **where the Bellman equation comes in to help us.**
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/q-learning.mdx
# Introducing Q-Learning [[q-learning]] ## What is Q-Learning? [[what-is-q-learning]] Q-Learning is an **off-policy value-based method that uses a TD approach to train its action-value function:** - *Off-policy*: we'll talk about that at the end of this unit. - *Value-based method*: finds the optimal policy indirectly by training a value or action-value function that will tell us **the value of each state or each state-action pair.** - *TD approach:* **updates its action-value function at each step instead of at the end of the episode.** **Q-Learning is the algorithm we use to train our Q-function**, an **action-value function** that determines the value of being at a particular state and taking a specific action at that state. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function.jpg" alt="Q-function"/> <figcaption>Given a state and action, our Q Function outputs a state-action value (also called Q-value)</figcaption> </figure> The **Q comes from "the Quality" (the value) of that action at that state.** Let's recap the difference between value and reward: - The *value of a state*, or a *state-action pair* is the expected cumulative reward our agent gets if it starts at this state (or state-action pair) and then acts accordingly to its policy. - The *reward* is the **feedback I get from the environment** after performing an action at a state. Internally, our Q-function is encoded by **a Q-table, a table where each cell corresponds to a state-action pair value.** Think of this Q-table as **the memory or cheat sheet of our Q-function.** Let's go through an example of a maze. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-1.jpg" alt="Maze example"/> The Q-table is initialized. That's why all values are = 0. This table **contains, for each state and action, the corresponding state-action values.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-2.jpg" alt="Maze example"/> Here we see that the **state-action value of the initial state and going up is 0:** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-3.jpg" alt="Maze example"/> So: the Q-function uses a Q-table **that has the value of each state-action pair.** Given a state and action, **our Q-function will search inside its Q-table to output the value.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function-2.jpg" alt="Q-function"/> </figure> If we recap, *Q-Learning* **is the RL algorithm that:** - Trains a *Q-function* (an **action-value function**), which internally is a **Q-table that contains all the state-action pair values.** - Given a state and action, our Q-function **will search its Q-table for the corresponding value.** - When the training is done, **we have an optimal Q-function, which means we have optimal Q-table.** - And if we **have an optimal Q-function**, we **have an optimal policy** since we **know the best action to take at each state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy"/> In the beginning, **our Q-table is useless since it gives arbitrary values for each state-action pair** (most of the time, we initialize the Q-table to 0). As the agent **explores the environment and we update the Q-table, it will give us a better and better approximation** to the optimal policy. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-1.jpg" alt="Q-learning"/> <figcaption>We see here that with the training, our Q-table is better since, thanks to it, we can know the value of each state-action pair.</figcaption> </figure> Now that we understand what Q-Learning, Q-functions, and Q-tables are, **let's dive deeper into the Q-Learning algorithm**. ## The Q-Learning algorithm [[q-learning-algo]] This is the Q-Learning pseudocode; let's study each part and **see how it works with a simple example before implementing it.** Don't be intimidated by it, it's simpler than it looks! We'll go over each step. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-learning"/> ### Step 1: We initialize the Q-table [[step1]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-3.jpg" alt="Q-learning"/> We need to initialize the Q-table for each state-action pair. **Most of the time, we initialize with values of 0.** ### Step 2: Choose an action using the epsilon-greedy strategy [[step2]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-4.jpg" alt="Q-learning"/> The epsilon-greedy strategy is a policy that handles the exploration/exploitation trade-off. The idea is that, with an initial value of ɛ = 1.0: - *With probability 1 — ɛ* : we do **exploitation** (aka our agent selects the action with the highest state-action pair value). - With probability ɛ: **we do exploration** (trying random action). At the beginning of the training, **the probability of doing exploration will be huge since ɛ is very high, so most of the time, we'll explore.** But as the training goes on, and consequently our **Q-table gets better and better in its estimations, we progressively reduce the epsilon value** since we will need less and less exploration and more exploitation. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-5.jpg" alt="Q-learning"/> ### Step 3: Perform action At, get reward Rt+1 and next state St+1 [[step3]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-6.jpg" alt="Q-learning"/> ### Step 4: Update Q(St, At) [[step4]] Remember that in TD Learning, we update our policy or value function (depending on the RL method we choose) **after one step of the interaction.** To produce our TD target, **we used the immediate reward \\(R_{t+1}\\) plus the discounted value of the next state**, computed by finding the action that maximizes the current Q-function at the next state. (We call that bootstrap). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-7.jpg" alt="Q-learning"/> Therefore, our \\(Q(S_t, A_t)\\) **update formula goes like this:** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-8.jpg" alt="Q-learning"/> This means that to update our \\(Q(S_t, A_t)\\): - We need \\(S_t, A_t, R_{t+1}, S_{t+1}\\). - To update our Q-value at a given state-action pair, we use the TD target. How do we form the TD target? 1. We obtain the reward after taking the action \\(R_{t+1}\\). 2. To get the **best state-action pair value** for the next state, we use a greedy policy to select the next best action. Note that this is not an epsilon-greedy policy, this will always take the action with the highest state-action value. Then when the update of this Q-value is done, we start in a new state and select our action **using a epsilon-greedy policy again.** **This is why we say that Q Learning is an off-policy algorithm.** ## Off-policy vs On-policy [[off-vs-on]] The difference is subtle: - *Off-policy*: using **a different policy for acting (inference) and updating (training).** For instance, with Q-Learning, the epsilon-greedy policy (acting policy), is different from the greedy policy that is **used to select the best next-state action value to update our Q-value (updating policy).** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-1.jpg" alt="Off-on policy"/> <figcaption>Acting Policy</figcaption> </figure> Is different from the policy we use during the training part: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-2.jpg" alt="Off-on policy"/> <figcaption>Updating policy</figcaption> </figure> - *On-policy:* using the **same policy for acting and updating.** For instance, with Sarsa, another value-based algorithm, **the epsilon-greedy policy selects the next state-action pair, not a greedy policy.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-3.jpg" alt="Off-on policy"/> <figcaption>Sarsa</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-4.jpg" alt="Off-on policy"/> </figure>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/q-learning-example.mdx
# A Q-Learning example [[q-learning-example]] To better understand Q-Learning, let's take a simple example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-Example-2.jpg" alt="Maze-Example"/> - You're a mouse in this tiny maze. You always **start at the same starting point.** - The goal is **to eat the big pile of cheese at the bottom right-hand corner** and avoid the poison. After all, who doesn't like cheese? - The episode ends if we eat the poison, **eat the big pile of cheese**, or if we take more than five steps. - The learning rate is 0.1 - The discount rate (gamma) is 0.99 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-1.jpg" alt="Maze-Example"/> The reward function goes like this: - **+0:** Going to a state with no cheese in it. - **+1:** Going to a state with a small cheese in it. - **+10:** Going to the state with the big pile of cheese. - **-10:** Going to the state with the poison and thus dying. - **+0** If we take more than five steps. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-2.jpg" alt="Maze-Example"/> To train our agent to have an optimal policy (so a policy that goes right, right, down), **we will use the Q-Learning algorithm**. ## Step 1: Initialize the Q-table [[step1]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Example-1.jpg" alt="Maze-Example"/> So, for now, **our Q-table is useless**; we need **to train our Q-function using the Q-Learning algorithm.** Let's do it for 2 training timesteps: Training timestep 1: ## Step 2: Choose an action using the Epsilon Greedy Strategy [[step2]] Because epsilon is big (= 1.0), I take a random action. In this case, I go right. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-3.jpg" alt="Maze-Example"/> ## Step 3: Perform action At, get Rt+1 and St+1 [[step3]] By going right, I get a small cheese, so \\(R_{t+1} = 1\\) and I'm in a new state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-4.jpg" alt="Maze-Example"/> ## Step 4: Update Q(St, At) [[step4]] We can now update \\(Q(S_t, A_t)\\) using our formula. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-5.jpg" alt="Maze-Example"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Example-4.jpg" alt="Maze-Example"/> Training timestep 2: ## Step 2: Choose an action using the Epsilon Greedy Strategy [[step2-2]] **I take a random action again, since epsilon=0.99 is big**. (Notice we decay epsilon a little bit because, as the training progress, we want less and less exploration). I took the action 'down'. **This is not a good action since it leads me to the poison.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-6.jpg" alt="Maze-Example"/> ## Step 3: Perform action At, get Rt+1 and St+1 [[step3-3]] Because I ate poison, **I get \\(R_{t+1} = -10\\), and I die.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-7.jpg" alt="Maze-Example"/> ## Step 4: Update Q(St, At) [[step4-4]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-8.jpg" alt="Maze-Example"/> Because we're dead, we start a new episode. But what we see here is that, **with two explorations steps, my agent became smarter.** As we continue exploring and exploiting the environment and updating Q-values using the TD target, the **Q-table will give us a better and better approximation. At the end of the training, we'll get an estimate of the optimal Q-function.**
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/q-learning-recap.mdx
# Q-Learning Recap [[q-learning-recap]] *Q-Learning* **is the RL algorithm that** : - Trains a *Q-function*, an **action-value function** encoded, in internal memory, by a *Q-table* **containing all the state-action pair values.** - Given a state and action, our Q-function **will search its Q-table for the corresponding value.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function-2.jpg" alt="Q function" width="100%"/> - When the training is done, **we have an optimal Q-function, or, equivalently, an optimal Q-table.** - And if we **have an optimal Q-function**, we have an optimal policy, since we **know, for each state, the best action to take.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy" width="100%"/> But, in the beginning, our **Q-table is useless since it gives arbitrary values for each state-action pair (most of the time we initialize the Q-table to 0 values)**. But, as we explore the environment and update our Q-table it will give us a better and better approximation. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/q-learning.jpeg" alt="q-learning.jpeg" width="100%"/> This is the Q-Learning pseudocode: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-Learning" width="100%"/>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/conclusion.mdx
# Conclusion [[conclusion]] Congrats on finishing this chapter! There was a lot of information. And congrats on finishing the tutorials. You’ve just implemented your first RL agent from scratch and shared it on the Hub 🥳. Implementing from scratch when you study a new architecture **is important to understand how it works.** It's **normal if you still feel confused** by all these elements. **This was the same for me and for everyone who studies RL.** Take time to really grasp the material before continuing. In the next chapter, we’re going to dive deeper by studying our first Deep Reinforcement Learning algorithm based on Q-Learning: Deep Q-Learning. And you'll train a **DQN agent with <a href="https://github.com/DLR-RM/rl-baselines3-zoo">RL-Baselines3 Zoo</a> to play Atari Games**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Atari environments"/> Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit2/additional-readings.mdx
# Additional Readings [[additional-readings]] These are **optional readings** if you want to go deeper. ## Monte Carlo and TD Learning [[mc-td]] To dive deeper into Monte Carlo and Temporal Difference Learning: - <a href="https://stats.stackexchange.com/questions/355820/why-do-temporal-difference-td-methods-have-lower-variance-than-monte-carlo-met">Why do temporal difference (TD) methods have lower variance than Monte Carlo methods?</a> - <a href="https://stats.stackexchange.com/questions/336974/when-are-monte-carlo-methods-preferred-over-temporal-difference-ones"> When are Monte Carlo methods preferred over temporal difference ones?</a> ## Q-Learning [[q-learning]] - <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto Chapter 5, 6 and 7</a> - <a href="https://youtu.be/Psrhxy88zww">Foundations of Deep RL Series, L2 Deep Q-Learning by Pieter Abbeel</a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/communication/certification.mdx
# The certification process The certification process is **completely free**: - To get a *certificate of completion*: you need **to pass 80% of the assignments** before the end of September 2023. - To get a *certificate of excellence*: you need **to pass 100% of the assignments** before the end of September 2023. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/certification.jpg" alt="Course certification" width="100%"/> When we say pass, **we mean that your model must be pushed to the Hub and get a result equal or above the minimal requirement**. To check your progression and which unit you passed/not passed: https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course Now that you're ready for the certification process, you need to: 1. Go here: https://huggingface.co/spaces/huggingface-projects/Deep-RL-Course-Certification/ 2. Type your *hugging face username*, your *first name*, *last name* 3. Click on "Generate my certificate". - If you passed 80% of the assignments, **congratulations** you've just got the certificate of completion. - If you passed 100% of the assignments, **congratulations** you've just got the excellence certificate. - If you are below 80%, don't be discouraged! Check which units you need to do again to get your certificate. 4. You can download your certificate in pdf format and png format. Don't hesitate to share your certificate on Twitter (tag me @ThomasSimonini and @huggingface) and on Linkedin.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/communication/conclusion.mdx
# Congratulations <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/communication/thumbnail.png" alt="Thumbnail"/> **Congratulations on finishing this course!** With perseverance, hard work, and determination, **you've acquired a solid background in Deep Reinforcement Learning**. But finishing this course is **not the end of your journey**. It's just the beginning: don't hesitate to explore bonus unit 3, where we show you topics you may be interested in studying. And don't hesitate to **share what you're doing, and ask questions in the discord server** **Thank you** for being part of this course. **I hope you liked this course as much as I loved writing it**. Don't hesitate **to give us feedback on how we can improve the course** using [this form](https://forms.gle/BzKXWzLAGZESGNaE9) And don't forget **to check in the next section how you can get (if you pass) your certificate of completion ‎‍🎓.** One last thing, to keep in touch with the Reinforcement Learning Team and with me: - [Follow me on Twitter](https://twitter.com/thomassimonini) - [Follow Hugging Face Twitter account](https://twitter.com/huggingface) - [Join the Hugging Face Discord](https://www.hf.co/join/discord) ## Keep Learning, Stay Awesome 🤗 Thomas Simonini,
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus2/hands-on.mdx
# Hands-on [[hands-on]] Now that you've learned to use Optuna, here are some ideas to apply what you've learned: 1️⃣ **Beat your LunarLander-v2 agent results**, by using Optuna to find a better set of hyperparameters. You can also try with another environment, such as MountainCar-v0 and CartPole-v1. 2️⃣ **Beat your SpaceInvaders agent results**. By doing this, you'll see how valuable and powerful Optuna can be in training better agents. Have fun! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus2/introduction.mdx
# Introduction [[introduction]] One of the most critical tasks in Deep Reinforcement Learning is to **find a good set of training hyperparameters**. <img src="https://raw.githubusercontent.com/optuna/optuna/master/docs/image/optuna-logo.png" alt="Optuna Logo"/> [Optuna](https://optuna.org/) is a library that helps you to automate the search. In this Unit, we'll study a **little bit of the theory behind automatic hyperparameter tuning**. We'll first try to optimize the parameters of the DQN studied in the last unit manually. We'll then **learn how to automate the search using Optuna**.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus2/optuna.mdx
# Optuna Tutorial [[optuna]] The content below comes from [Antonin's Raffin ICRA 2022 presentations](https://araffin.github.io/tools-for-robotic-rl-icra2022/), he's one of the founders of Stable-Baselines and RL-Baselines3-Zoo. ## The theory behind Hyperparameter tuning <Youtube id="AidFTOdGNFQ" /> ## Optuna Tutorial <Youtube id="ihP7E76KGOI" /> The notebook 👉 [here](https://colab.research.google.com/github/araffin/tools-for-robotic-rl-icra2022/blob/main/notebooks/optuna_lab.ipynb)
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/offline-online.mdx
# Offline vs. Online Reinforcement Learning Deep Reinforcement Learning (RL) is a framework **to build decision-making agents**. These agents aim to learn optimal behavior (policy) by interacting with the environment through **trial and error and receiving rewards as unique feedback**. The agent’s goal **is to maximize its cumulative reward**, called return. Because RL is based on the *reward hypothesis*: all goals can be described as the **maximization of the expected cumulative reward**. Deep Reinforcement Learning agents **learn with batches of experience**. The question is, how do they collect it?: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/offlinevsonlinerl.gif" alt="Unit bonus 3 thumbnail"> <figcaption>A comparison between Reinforcement Learning in an Online and Offline setting, figure taken from <a href="https://offline-rl.github.io/">this post</a></figcaption> </figure> - In *online reinforcement learning*, which is what we've learned during this course, the agent **gathers data directly**: it collects a batch of experience by **interacting with the environment**. Then, it uses this experience immediately (or via some replay buffer) to learn from it (update its policy). But this implies that either you **train your agent directly in the real world or have a simulator**. If you don’t have one, you need to build it, which can be very complex (how to reflect the complex reality of the real world in an environment?), expensive, and insecure (if the simulator has flaws that may provide a competitive advantage, the agent will exploit them). - On the other hand, in *offline reinforcement learning*, the agent only **uses data collected from other agents or human demonstrations**. It does **not interact with the environment**. The process is as follows: - **Create a dataset** using one or more policies and/or human interactions. - Run **offline RL on this dataset** to learn a policy This method has one drawback: the *counterfactual queries problem*. What do we do if our agent **decides to do something for which we don’t have the data?** For instance, turning right on an intersection but we don’t have this trajectory. There exist some solutions on this topic, but if you want to know more about offline reinforcement learning, you can [watch this video](https://www.youtube.com/watch?v=k08N5a0gG0A) ## Further reading For more information, we recommend you check out the following resources: - [Offline Reinforcement Learning, Talk by Sergei Levine](https://www.youtube.com/watch?v=qgZPZREor5I) - [Offline Reinforcement Learning: Tutorial, Review, and Perspectives on Open Problems](https://arxiv.org/abs/2005.01643) ## Author This section was written by <a href="https://twitter.com/ThomasSimonini"> Thomas Simonini</a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/model-based.mdx
# Model Based Reinforcement Learning (MBRL) Model-based reinforcement learning only differs from its model-free counterpart in learning a *dynamics model*, but that has substantial downstream effects on how the decisions are made. The dynamics model usually models the environment transition dynamics, \\( s_{t+1} = f_\theta (s_t, a_t) \\), but things like inverse dynamics models (mapping from states to actions) or reward models (predicting rewards) can be used in this framework. ## Simple definition - There is an agent that repeatedly tries to solve a problem, **accumulating state and action data**. - With that data, the agent creates a structured learning tool, *a dynamics model*, to reason about the world. - With the dynamics model, the agent **decides how to act by predicting the future**. - With those actions, **the agent collects more data, improves said model, and hopefully improves future actions**. ## Academic definition Model-based reinforcement learning (MBRL) follows the framework of an agent interacting in an environment, **learning a model of said environment**, and then **leveraging the model for control (making decisions). Specifically, the agent acts in a Markov Decision Process (MDP) governed by a transition function \\( s_{t+1} = f (s_t , a_t) \\) and returns a reward at each step \\( r(s_t, a_t) \\). With a collected dataset \\( D :={ s_i, a_i, s_{i+1}, r_i} \\), the agent learns a model, \\( s_{t+1} = f_\theta (s_t , a_t) \\) **to minimize the negative log-likelihood of the transitions**. We employ sample-based model-predictive control (MPC) using the learned dynamics model, which optimizes the expected reward over a finite, recursively predicted horizon, \\( \tau \\), from a set of actions sampled from a uniform distribution \\( U(a) \\), (see [paper](https://arxiv.org/pdf/2002.04523) or [paper](https://arxiv.org/pdf/2012.09156.pdf) or [paper](https://arxiv.org/pdf/2009.01221.pdf)). ## Further reading For more information on MBRL, we recommend you check out the following resources: - A [blog post on debugging MBRL](https://www.natolambert.com/writing/debugging-mbrl). - A [recent review paper on MBRL](https://arxiv.org/abs/2006.16712), ## Author This section was written by <a href="https://twitter.com/natolambert"> Nathan Lambert </a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/envs-to-try.mdx
# Interesting Environments to try Here we provide a list of interesting environments you can try to train your agents on: ## MineRL <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/minerl.jpg" alt="MineRL"/> MineRL is a Python library that provides a Gym interface for interacting with the video game Minecraft, accompanied by datasets of human gameplay. Every year there are challenges with this library. Check the [website](https://minerl.io/) To start using this environment, check these resources: - [What is MineRL?](https://www.youtube.com/watch?v=z6PTrGifupU) - [First steps in MineRL](https://www.youtube.com/watch?v=8yIrWcyWGek) - [MineRL documentation and tutorials](https://minerl.readthedocs.io/en/latest/) ## DonkeyCar Simulator <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/donkeycar.jpg" alt="Donkey Car"/> Donkey is a Self Driving Car Platform for hobby remote control cars. This simulator version is built on the Unity game platform. It uses their internal physics and graphics and connects to a donkey Python process to use our trained model to control the simulated Donkey (car). To start using this environment, check these resources: - [DonkeyCar Simulator documentation](https://docs.donkeycar.com/guide/deep_learning/simulator/) - [Learn to Drive Smoothly (Antonin Raffin's tutorial) Part 1](https://www.youtube.com/watch?v=ngK33h00iBE) - [Learn to Drive Smoothly (Antonin Raffin's tutorial) Part 2](https://www.youtube.com/watch?v=DUqssFvcSOY) - [Learn to Drive Smoothly (Antonin Raffin's tutorial) Part 3](https://www.youtube.com/watch?v=v8j2bpcE4Rg) - Pretrained agents: - https://huggingface.co/araffin/tqc-donkey-mountain-track-v0 - https://huggingface.co/araffin/tqc-donkey-avc-sparkfun-v0 - https://huggingface.co/araffin/tqc-donkey-minimonaco-track-v0 ## Starcraft II <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/alphastar.jpg" alt="Alphastar"/> Starcraft II is a famous *real-time strategy game*. DeepMind has used this game for their Deep Reinforcement Learning research with [Alphastar](https://www.deepmind.com/blog/alphastar-mastering-the-real-time-strategy-game-starcraft-ii) To start using this environment, check these resources: - [Starcraft gym](http://starcraftgym.com/) - [A. I. Learns to Play Starcraft 2 (Reinforcement Learning) tutorial](https://www.youtube.com/watch?v=q59wap1ELQ4) ## Author This section was written by <a href="https://twitter.com/ThomasSimonini"> Thomas Simonini</a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/introduction.mdx
# Introduction <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/thumbnail.png" alt="Unit bonus 3 thumbnail"/> Congratulations on finishing this course! **You now have a solid background in Deep Reinforcement Learning**. But this course was just the beginning of your Deep Reinforcement Learning journey, there are so many subsections to discover. In this optional unit, we **give you resources to explore multiple concepts and research topics in Reinforcement Learning**. Contrary to other units, this unit is a collective work of multiple people from Hugging Face. We mention the author for each unit. Sound fun? Let's get started 🔥,
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/language-models.mdx
# Language models in RL ## LMs encode useful knowledge for agents **Language models** (LMs) can exhibit impressive abilities when manipulating text such as question-answering or even step-by-step reasoning. Additionally, their training on massive text corpora allowed them to **encode various types of knowledge including abstract ones about the physical rules of our world** (for instance what is possible to do with an object, what happens when one rotates an object…). A natural question recently studied was whether such knowledge could benefit agents such as robots when trying to solve everyday tasks. And while these works showed interesting results, the proposed agents lacked any learning method. **This limitation prevents these agent from adapting to the environment (e.g. fixing wrong knowledge) or learning new skills.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/language.png" alt="Language"> <figcaption>Source: <a href="https://ai.googleblog.com/2022/08/towards-helpful-robots-grounding.html">Towards Helpful Robots: Grounding Language in Robotic Affordances</a></figcaption> </figure> ## LMs and RL There is therefore a potential synergy between LMs which can bring knowledge about the world, and RL which can align and correct this knowledge by interacting with an environment. It is especially interesting from a RL point-of-view as the RL field mostly relies on the **Tabula-rasa** setup where everything is learned from scratch by the agent leading to: 1) Sample inefficiency 2) Unexpected behaviors from humans’ eyes As a first attempt, the paper [“Grounding Large Language Models with Online Reinforcement Learning”](https://arxiv.org/abs/2302.02662v1) tackled the problem of **adapting or aligning a LM to a textual environment using PPO**. They showed that the knowledge encoded in the LM lead to a fast adaptation to the environment (opening avenues for sample efficient RL agents) but also that such knowledge allowed the LM to better generalize to new tasks once aligned. <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/papier_v4.mp4" type="video/mp4" controls /> Another direction studied in [“Guiding Pretraining in Reinforcement Learning with Large Language Models”](https://arxiv.org/abs/2302.06692) was to keep the LM frozen but leverage its knowledge to **guide an RL agent’s exploration**. Such a method allows the RL agent to be guided towards human-meaningful and plausibly useful behaviors without requiring a human in the loop during training. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/language2.png" alt="Language"> <figcaption> Source: <a href="https://ai.googleblog.com/2022/08/towards-helpful-robots-grounding.html"> Towards Helpful Robots: Grounding Language in Robotic Affordances</a> </figcaption> </figure> Several limitations make these works still very preliminary such as the need to convert the agent's observation to text before giving it to a LM as well as the compute cost of interacting with very large LMs. ## Further reading For more information we recommend you check out the following resources: - [Google Research, 2022 & beyond: Robotics](https://ai.googleblog.com/2023/02/google-research-2022-beyond-robotics.html) - [Pre-Trained Language Models for Interactive Decision-Making](https://arxiv.org/abs/2202.01771) - [Grounding Large Language Models with Online Reinforcement Learning](https://arxiv.org/abs/2302.02662v1) - [Guiding Pretraining in Reinforcement Learning with Large Language Models](https://arxiv.org/abs/2302.06692) ## Author This section was written by <a href="https://twitter.com/ClementRomac"> Clément Romac </a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/rlhf.mdx
# RLHF Reinforcement learning from human feedback (RLHF) is a **methodology for integrating human data labels into a RL-based optimization process**. It is motivated by the **challenge of modeling human preferences**. For many questions, even if you could try and write down an equation for one ideal, humans differ on their preferences. Updating models **based on measured data is an avenue to try and alleviate these inherently human ML problems**. ## Start Learning about RLHF To start learning about RLHF: 1. Read this introduction: [Illustrating Reinforcement Learning from Human Feedback (RLHF)](https://huggingface.co/blog/rlhf). 2. Watch the recorded live we did some weeks ago, where Nathan covered the basics of Reinforcement Learning from Human Feedback (RLHF) and how this technology is being used to enable state-of-the-art ML tools like ChatGPT. Most of the talk is an overview of the interconnected ML models. It covers the basics of Natural Language Processing and RL and how RLHF is used on large language models. We then conclude with open questions in RLHF. <Youtube id="2MBJOuVq380" /> 3. Read other blogs on this topic, such as [Closed-API vs Open-source continues: RLHF, ChatGPT, data moats](https://robotic.substack.com/p/rlhf-chatgpt-data-moats). Let us know if there are more you like! ## Additional readings *Note, this is copied from the Illustrating RLHF blog post above*. Here is a list of the most prevalent papers on RLHF to date. The field was recently popularized with the emergence of DeepRL (around 2017) and has grown into a broader study of the applications of LLMs from many large technology companies. Here are some papers on RLHF that pre-date the LM focus: - [TAMER: Training an Agent Manually via Evaluative Reinforcement](https://www.cs.utexas.edu/~pstone/Papers/bib2html-links/ICDL08-knox.pdf) (Knox and Stone 2008): Proposed a learned agent where humans provided scores on the actions taken iteratively to learn a reward model. - [Interactive Learning from Policy-Dependent Human Feedback](http://proceedings.mlr.press/v70/macglashan17a/macglashan17a.pdf) (MacGlashan et al. 2017): Proposed an actor-critic algorithm, COACH, where human feedback (both positive and negative) is used to tune the advantage function. - [Deep Reinforcement Learning from Human Preferences](https://proceedings.neurips.cc/paper/2017/hash/d5e2c0adad503c91f91df240d0cd4e49-Abstract.html) (Christiano et al. 2017): RLHF applied on preferences between Atari trajectories. - [Deep TAMER: Interactive Agent Shaping in High-Dimensional State Spaces](https://ojs.aaai.org/index.php/AAAI/article/view/11485) (Warnell et al. 2018): Extends the TAMER framework where a deep neural network is used to model the reward prediction. And here is a snapshot of the growing set of papers that show RLHF's performance for LMs: - [Fine-Tuning Language Models from Human Preferences](https://arxiv.org/abs/1909.08593) (Zieglar et al. 2019): An early paper that studies the impact of reward learning on four specific tasks. - [Learning to summarize with human feedback](https://proceedings.neurips.cc/paper/2020/hash/1f89885d556929e98d3ef9b86448f951-Abstract.html) (Stiennon et al., 2020): RLHF applied to the task of summarizing text. Also, [Recursively Summarizing Books with Human Feedback](https://arxiv.org/abs/2109.10862) (OpenAI Alignment Team 2021), follow on work summarizing books. - [WebGPT: Browser-assisted question-answering with human feedback](https://arxiv.org/abs/2112.09332) (OpenAI, 2021): Using RLHF to train an agent to navigate the web. - InstructGPT: [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) (OpenAI Alignment Team 2022): RLHF applied to a general language model [[Blog post](https://openai.com/blog/instruction-following/) on InstructGPT]. - GopherCite: [Teaching language models to support answers with verified quotes](https://www.deepmind.com/publications/gophercite-teaching-language-models-to-support-answers-with-verified-quotes) (Menick et al. 2022): Train a LM with RLHF to return answers with specific citations. - Sparrow: [Improving alignment of dialogue agents via targeted human judgements](https://arxiv.org/abs/2209.14375) (Glaese et al. 2022): Fine-tuning a dialogue agent with RLHF - [ChatGPT: Optimizing Language Models for Dialogue](https://openai.com/blog/chatgpt/) (OpenAI 2022): Training a LM with RLHF for suitable use as an all-purpose chat bot. - [Scaling Laws for Reward Model Overoptimization](https://arxiv.org/abs/2210.10760) (Gao et al. 2022): studies the scaling properties of the learned preference model in RLHF. - [Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback](https://arxiv.org/abs/2204.05862) (Anthropic, 2022): A detailed documentation of training a LM assistant with RLHF. - [Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned](https://arxiv.org/abs/2209.07858) (Ganguli et al. 2022): A detailed documentation of efforts to “discover, measure, and attempt to reduce [language models] potentially harmful outputs.” - [Dynamic Planning in Open-Ended Dialogue using Reinforcement Learning](https://arxiv.org/abs/2208.02294) (Cohen at al. 2022): Using RL to enhance the conversational skill of an open-ended dialogue agent. - [Is Reinforcement Learning (Not) for Natural Language Processing?: Benchmarks, Baselines, and Building Blocks for Natural Language Policy Optimization](https://arxiv.org/abs/2210.01241) (Ramamurthy and Ammanabrolu et al. 2022): Discusses the design space of open-source tools in RLHF and proposes a new algorithm NLPO (Natural Language Policy Optimization) as an alternative to PPO. ## Author This section was written by <a href="https://twitter.com/natolambert"> Nathan Lambert </a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/rl-documentation.mdx
# Brief introduction to RL documentation In this advanced topic, we address the question: **how should we monitor and keep track of powerful reinforcement learning agents that we are training in the real world and interfacing with humans?** As machine learning systems have increasingly impacted modern life, the **call for the documentation of these systems has grown**. Such documentation can cover aspects such as the training data used — where it is stored, when it was collected, who was involved, etc. — or the model optimization framework — the architecture, evaluation metrics, relevant papers, etc. — and more. Today, model cards and datasheets are becoming increasingly available. For example, on the Hub (see documentation [here](https://huggingface.co/docs/hub/model-cards)). If you click on a [popular model on the Hub](https://huggingface.co/models), you can learn about its creation process. These model and data specific logs are designed to be completed when the model or dataset are created, leaving them to go un-updated when these models are built into evolving systems in the future. ​ ## Motivating Reward Reports Reinforcement learning systems are fundamentally designed to optimize based on measurements of reward and time. While the notion of a reward function can be mapped nicely to many well-understood fields of supervised learning (via a loss function), understanding of how machine learning systems evolve over time is limited. To that end, the authors introduce [*Reward Reports for Reinforcement Learning*](https://www.notion.so/Brief-introduction-to-RL-documentation-b8cbda5a6f5242338e0756e6bef72af4) (the pithy naming is designed to mirror the popular papers *Model Cards for Model Reporting* and *Datasheets for Datasets*). The goal is to propose a type of documentation focused on the **human factors of reward** and **time-varying feedback systems**. Building on the documentation frameworks for [model cards](https://arxiv.org/abs/1810.03993) and [datasheets](https://arxiv.org/abs/1803.09010) proposed by Mitchell et al. and Gebru et al., we argue the need for Reward Reports for AI systems. **Reward Reports** are living documents for proposed RL deployments that demarcate design choices. However, many questions remain about the applicability of this framework to different RL applications, roadblocks to system interpretability, and the resonances between deployed supervised machine learning systems and the sequential decision-making utilized in RL. At a minimum, Reward Reports are an opportunity for RL practitioners to deliberate on these questions and begin the work of deciding how to resolve them in practice. ​ ## Capturing temporal behavior with documentation The core piece specific to documentation designed for RL and feedback-driven ML systems is a *change-log*. The change-log updates information from the designer (changed training parameters, data, etc.) along with noticed changes from the user (harmful behavior, unexpected responses, etc.). The change log is accompanied by update triggers that encourage monitoring these effects. ## Contributing Some of the most impactful RL-driven systems are multi-stakeholder in nature and behind the closed doors of private corporations. These corporations are largely without regulation, so the burden of documentation falls on the public. If you are interested in contributing, we are building Reward Reports for popular machine learning systems on a public record on [GitHub](https://github.com/RewardReports/reward-reports). ​ For further reading, you can visit the Reward Reports [paper](https://arxiv.org/abs/2204.10817) or look [an example report](https://github.com/RewardReports/reward-reports/tree/main/examples). ## Author This section was written by <a href="https://twitter.com/natolambert"> Nathan Lambert </a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/decision-transformers.mdx
# Decision Transformers The Decision Transformer model was introduced by ["Decision Transformer: Reinforcement Learning via Sequence Modeling” by Chen L. et al](https://arxiv.org/abs/2106.01345). It abstracts Reinforcement Learning as a conditional-sequence modeling problem. The main idea is that instead of training a policy using RL methods, such as fitting a value function, that will tell us what action to take to maximize the return (cumulative reward), **we use a sequence modeling algorithm (Transformer) that, given a desired return, past states, and actions, will generate future actions to achieve this desired return**. It’s an autoregressive model conditioned on the desired return, past states, and actions to generate future actions that achieve the desired return. This is a complete shift in the Reinforcement Learning paradigm since we use generative trajectory modeling (modeling the joint distribution of the sequence of states, actions, and rewards) to replace conventional RL algorithms. This means that in Decision Transformers, we don’t maximize the return but rather generate a series of future actions that achieve the desired return. The 🤗 Transformers team integrated the Decision Transformer, an Offline Reinforcement Learning method, into the library as well as the Hugging Face Hub. ## Learn about Decision Transformers To learn more about Decision Transformers, you should read the blogpost we wrote about it [Introducing Decision Transformers on Hugging Face](https://huggingface.co/blog/decision-transformers) ## Train your first Decision Transformers Now that you understand how Decision Transformers work thanks to [Introducing Decision Transformers on Hugging Face](https://huggingface.co/blog/decision-transformers), you’re ready to learn to train your first Offline Decision Transformer model from scratch to make a half-cheetah run. Start the tutorial here 👉 https://huggingface.co/blog/train-decision-transformers ## Further reading For more information, we recommend that you check out the following resources: - [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) - [Online Decision Transformer](https://arxiv.org/abs/2202.05607) ## Author This section was written by <a href="https://twitter.com/edwardbeeching">Edward Beeching</a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/curriculum-learning.mdx
# (Automatic) Curriculum Learning for RL While most of the RL methods seen in this course work well in practice, there are some cases where using them alone fails. This can happen, for instance, when: - the task to learn is hard and requires an **incremental acquisition of skills** (for instance when one wants to make a bipedal agent learn to go through hard obstacles, it must first learn to stand, then walk, then maybe jump…) - there are variations in the environment (that affect the difficulty) and one wants its agent to be **robust** to them <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/bipedal.gif" alt="Bipedal"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/movable_creepers.gif" alt="Movable creepers"/> <figcaption> <a href="https://developmentalsystems.org/TeachMyAgent/">TeachMyAgent</a> </figcaption> </figure> In such cases, it seems needed to propose different tasks to our RL agent and organize them such that the agent progressively acquires skills. This approach is called **Curriculum Learning** and usually implies a hand-designed curriculum (or set of tasks organized in a specific order). In practice, one can, for instance, control the generation of the environment, the initial states, or use Self-Play and control the level of opponents proposed to the RL agent. As designing such a curriculum is not always trivial, the field of **Automatic Curriculum Learning (ACL) proposes to design approaches that learn to create such an organization of tasks in order to maximize the RL agent’s performances**. Portelas et al. proposed to define ACL as: > … a family of mechanisms that automatically adapt the distribution of training data by learning to adjust the selection of learning situations to the capabilities of RL agents. > As an example, OpenAI used **Domain Randomization** (they applied random variations on the environment) to make a robot hand solve Rubik’s Cubes. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/dr.jpg" alt="Dr"/> <figcaption> <a href="https://openai.com/blog/solving-rubiks-cube/">OpenAI - Solving Rubik’s Cube with a Robot Hand</a></figcaption> </figure> Finally, you can play with the robustness of agents trained in the <a href="https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo">TeachMyAgent</a> benchmark by controlling environment variations or even drawing the terrain 👇 <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/demo.png" alt="Demo"/> <figcaption> <a href="https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo">https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo</a></figcaption> </figure> ## Further reading For more information, we recommend that you check out the following resources: ### Overview of the field - [Automatic Curriculum Learning For Deep RL: A Short Survey](https://arxiv.org/pdf/2003.04664.pdf) - [Curriculum for Reinforcement Learning](https://lilianweng.github.io/posts/2020-01-29-curriculum-rl/) ### Recent methods - [Evolving Curricula with Regret-Based Environment Design](https://arxiv.org/abs/2203.01302) - [Curriculum Reinforcement Learning via Constrained Optimal Transport](https://proceedings.mlr.press/v162/klink22a.html) - [Prioritized Level Replay](https://arxiv.org/abs/2010.03934) ## Author This section was written by <a href="https://twitter.com/ClementRomac"> Clément Romac </a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unitbonus3/godotrl.mdx
# Godot RL Agents [Godot RL Agents](https://github.com/edbeeching/godot_rl_agents) is an Open Source package that allows video game creators, AI researchers, and hobbyists the opportunity **to learn complex behaviors for their Non Player Characters or agents**. The library provides: - An interface between games created in the [Godot Engine](https://godotengine.org/) and Machine Learning algorithms running in Python - Wrappers for four well known rl frameworks: [StableBaselines3](https://stable-baselines3.readthedocs.io/en/master/), [CleanRL](https://docs.cleanrl.dev/), [Sample Factory](https://www.samplefactory.dev/) and [Ray RLLib](https://docs.ray.io/en/latest/rllib-algorithms.html) - Support for memory-based agents with LSTM or attention based interfaces - Support for *2D and 3D games* - A suite of *AI sensors* to augment your agent's capacity to observe the game world - Godot and Godot RL Agents are **completely free and open source under a very permissive MIT license**. No strings attached, no royalties, nothing. You can find out more about Godot RL agents on their [GitHub page](https://github.com/edbeeching/godot_rl_agents) or their AAAI-2022 Workshop [paper](https://arxiv.org/abs/2112.03636). The library's creator, [Ed Beeching](https://edbeeching.github.io/), is a Research Scientist here at Hugging Face. Installation of the library is simple: `pip install godot-rl` ## Create a custom RL environment with Godot RL Agents In this section, you will **learn how to create a custom environment in the Godot Game Engine** and then implement an AI controller that learns to play with Deep Reinforcement Learning. The example game we create today is simple, **but shows off many of the features of the Godot Engine and the Godot RL Agents library**. You can then dive into the examples for more complex environments and behaviors. The environment we will be building today is called Ring Pong, the game of pong but the pitch is a ring and the paddle moves around the ring. The **objective is to keep the ball bouncing inside the ring**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ringpong.gif" alt="Ring Pong"> ### Installing the Godot Game Engine The [Godot game engine](https://godotengine.org/) is an open source tool for the **creation of video games, tools and user interfaces**. Godot Engine is a feature-packed, cross-platform game engine designed to create 2D and 3D games from a unified interface. It provides a comprehensive set of common tools, so users **can focus on making games without having to reinvent the wheel**. Games can be exported in one click to a number of platforms, including the major desktop platforms (Linux, macOS, Windows) as well as mobile (Android, iOS) and web-based (HTML5) platforms. While we will guide you through the steps to implement your agent, you may wish to learn more about the Godot Game Engine. Their [documentation](https://docs.godotengine.org/en/latest/index.html) is thorough, and there are many tutorials on YouTube we would also recommend [GDQuest](https://www.gdquest.com/), [KidsCanCode](https://kidscancode.org/godot_recipes/4.x/) and [Bramwell](https://www.youtube.com/channel/UCczi7Aq_dTKrQPF5ZV5J3gg) as sources of information. In order to create games in Godot, **you must first download the editor**. Godot RL Agents supports the latest version of Godot, Godot 4.0. Which can be downloaded at the following links: - [Windows](https://downloads.tuxfamily.org/godotengine/4.0.1/Godot_v4.0.1-stable_win64.exe.zip) - [Mac](https://downloads.tuxfamily.org/godotengine/4.0.1/Godot_v4.0.1-stable_macos.universal.zip) - [Linux](https://downloads.tuxfamily.org/godotengine/4.0.1/Godot_v4.0.1-stable_linux.x86_64.zip) ### Loading the starter project We provide two versions of the codebase: - [A starter project, to download and follow along for this tutorial](https://drive.google.com/file/d/1C7xd3TibJHlxFEJPBgBLpksgxrFZ3D8e/view?usp=share_link) - [A final version of the project, for comparison and debugging.](https://drive.google.com/file/d/1k-b2Bu7uIA6poApbouX4c3sq98xqogpZ/view?usp=share_link) To load the project, in the Godot Project Manager click **Import**, navigate to where the files are located and load the **project.godot** file. If you press F5 or play in the editor, you should be able to play the game in human mode. There are several instances of the game running, this is because we want to speed up training our AI agent with many parallel environments. ### Installing the Godot RL Agents plugin The Godot RL Agents plugin can be installed from the Github repo or with the Godot Asset Lib in the editor. First click on the AssetLib and search for “rl” <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/godot1.png" alt="Godot"> Then click on Godot RL Agents, click Download and unselect the LICIENSE and [README.md](http://README.md) files. Then click install. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/godot2.png" alt="Godot"> The Godot RL Agents plugin is now downloaded to your machine your machine. Now click on Project → Project settings and enable the addon: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/godot3.png" alt="Godot"> ### Adding the AI controller We now want to add an AI controller to our game. Open the player.tscn scene, on the left you should see a hierarchy of nodes that looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/godot4.png" alt="Godot"> Right click the **Player** node and click **Add Child Node.** There are many nodes listed here, search for AIController3D and create it. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/godot5.png" alt="Godot"> The AI Controller Node should have been added to the scene tree, next to it is a scroll. Click on it to open the script that is attached to the AIController. The Godot game engine uses a scripting language called GDScript, which is syntactically similar to python. The script contains methods that need to be implemented in order to get our AI controller working. ```python #-- Methods that need implementing using the "extend script" option in Godot --# func get_obs() -> Dictionary: assert(false, "the get_obs method is not implemented when extending from ai_controller") return {"obs":[]} func get_reward() -> float: assert(false, "the get_reward method is not implemented when extending from ai_controller") return 0.0 func get_action_space() -> Dictionary: assert(false, "the get get_action_space method is not implemented when extending from ai_controller") return { "example_actions_continous" : { "size": 2, "action_type": "continuous" }, "example_actions_discrete" : { "size": 2, "action_type": "discrete" }, } func set_action(action) -> void: assert(false, "the get set_action method is not implemented when extending from ai_controller") # -----------------------------------------------------------------------------# ``` In order to implement these methods, we will need to create a class that inherits from AIController3D. This is easy to do in Godot, and is called “extending” a class. Right click the AIController3D Node and click “Extend Script” and call the new script `controller.gd`. You should now have an almost empty script file that looks like this: ```python extends AIController3D # Called when the node enters the scene tree for the first time. func _ready(): pass # Replace with function body. # Called every frame. 'delta' is the elapsed time since the previous frame. func _process(delta): pass ``` We will now implement the 4 missing methods, delete this code, and replace it with the following: ```python extends AIController3D # Stores the action sampled for the agent's policy, running in python var move_action : float = 0.0 func get_obs() -> Dictionary: # get the balls position and velocity in the paddle's frame of reference var ball_pos = to_local(_player.ball.global_position) var ball_vel = to_local(_player.ball.linear_velocity) var obs = [ball_pos.x, ball_pos.z, ball_vel.x/10.0, ball_vel.z/10.0] return {"obs":obs} func get_reward() -> float: return reward func get_action_space() -> Dictionary: return { "move_action" : { "size": 1, "action_type": "continuous" }, } func set_action(action) -> void: move_action = clamp(action["move_action"][0], -1.0, 1.0) ``` We have now defined the agent’s observation, which is the position and velocity of the ball in its local cooridinate space. We have also defined the action space of the agent, which is a single contuninous value ranging from -1 to +1. The next step is to update the Player’s script to use the actions from the AIController, edit the Player’s script by clicking on the scroll next to the player node, update the code in `Player.gd` to the following the following: ```python extends Node3D @export var rotation_speed = 3.0 @onready var ball = get_node("../Ball") @onready var ai_controller = $AIController3D func _ready(): ai_controller.init(self) func game_over(): ai_controller.done = true ai_controller.needs_reset = true func _physics_process(delta): if ai_controller.needs_reset: ai_controller.reset() ball.reset() return var movement : float if ai_controller.heuristic == "human": movement = Input.get_axis("rotate_anticlockwise", "rotate_clockwise") else: movement = ai_controller.move_action rotate_y(movement*delta*rotation_speed) func _on_area_3d_body_entered(body): ai_controller.reward += 1.0 ``` We now need to synchronize between the game running in Godot and the neural network being trained in Python. Godot RL agents provides a node that does just that. Open the train.tscn scene, right click on the root node, and click “Add child node”. Then, search for “sync” and add a Godot RL Agents Sync node. This node handles the communication between Python and Godot over TCP. You can run training live in the the editor, by first launching the python training with `gdrl` In this simple example, a reasonable policy is learned in several minutes. You may wish to speed up training, click on the Sync node in the train scene and you will see there is a “Speed Up” property exposed in the editor: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/godot6.png" alt="Godot"> Try setting this property up to 8 to speed up training. This can be a great benefit on more complex environments, like the multi-player FPS we will learn about in the next chapter. ### There’s more! We have only scratched the surface of what can be achieved with Godot RL Agents, the library includes custom sensors and cameras to enrich the information available to the agent. Take a look at the [examples](https://github.com/edbeeching/godot_rl_agents_examples) to find out more! ## Author This section was written by <a href="https://twitter.com/edwardbeeching">Edward Beeching</a>
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit5/hands-on.mdx
# Hands-on <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit5/unit5.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> We learned what ML-Agents is and how it works. We also studied the two environments we're going to use. Now we're ready to train our agents! <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/envs.png" alt="Environments" /> To validate this hands-on for the certification process, you **just need to push your trained models to the Hub.** There are **no minimum results to attain** in order to validate this Hands On. But if you want to get nice results, you can try to reach the following: - For [Pyramids](https://huggingface.co/spaces/unity/ML-Agents-Pyramids): Mean Reward = 1.75 - For [SnowballTarget](https://huggingface.co/spaces/ThomasSimonini/ML-Agents-SnowballTarget): Mean Reward = 15 or 30 targets shoot in an episode. For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process **To start the hands-on, click on Open In Colab button** 👇 : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit5/unit5.ipynb) We strongly **recommend students use Google Colab for the hands-on exercises** instead of running them on their personal computers. By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects** of setting up your environments. # Unit 5: An Introduction to ML-Agents <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/thumbnail.png" alt="Thumbnail"/> In this notebook, you'll learn about ML-Agents and train two agents. - The first one will learn to **shoot snowballs onto spawning targets**. - The second needs to press a button to spawn a pyramid, then navigate to the pyramid, knock it over, **and move to the gold brick at the top**. To do that, it will need to explore its environment, and we will use a technique called curiosity. After that, you'll be able **to watch your agents playing directly on your browser**. For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process ⬇️ Here is an example of what **you will achieve at the end of this unit.** ⬇️ <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/pyramids.gif" alt="Pyramids"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget.gif" alt="SnowballTarget"/> ### 🎮 Environments: - [Pyramids](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Examples.md#pyramids) - SnowballTarget ### 📚 RL-Library: - [ML-Agents](https://github.com/Unity-Technologies/ml-agents) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook 🏆 At the end of the notebook, you will: - Understand how **ML-Agents** works and the environment library. - Be able to **train agents in Unity Environments**. ## Prerequisites 🏗️ Before diving into the notebook, you need to: 🔲 📚 **Study [what ML-Agents is and how it works by reading Unit 5](https://huggingface.co/deep-rl-course/unit5/introduction)** 🤗 # Let's train our agents 🚀 ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> ## Clone the repository and install the dependencies 🔽 - We need to clone the repository that **contains the experimental version of the library that allows you to push your trained agent to the Hub.** ```bash # Clone the repository git clone --depth 1 https://github.com/Unity-Technologies/ml-agents ``` ```bash # Go inside the repository and install the package cd ml-agents pip install -e ./ml-agents-envs pip install -e ./ml-agents ``` ## SnowballTarget ⛄ If you need a refresher on how this environment works check this section 👉 https://huggingface.co/deep-rl-course/unit5/snowball-target ### Download and move the environment zip file in `./training-envs-executables/linux/` - Our environment executable is in a zip file. - We need to download it and place it to `./training-envs-executables/linux/` - We use a linux executable because we use colab, and colab machines OS is Ubuntu (linux) ```bash # Here, we create training-envs-executables and linux mkdir ./training-envs-executables mkdir ./training-envs-executables/linux ``` Download the file SnowballTarget.zip from https://drive.google.com/file/d/1YHHLjyj6gaZ3Gemx1hQgqrPgSS2ZhmB5 using `wget`. Check out the full solution to download large files from GDrive [here](https://bcrf.biochem.wisc.edu/2021/02/05/download-google-drive-files-using-wget/) ```bash wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1YHHLjyj6gaZ3Gemx1hQgqrPgSS2ZhmB5' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1YHHLjyj6gaZ3Gemx1hQgqrPgSS2ZhmB5" -O ./training-envs-executables/linux/SnowballTarget.zip && rm -rf /tmp/cookies.txt ``` We unzip the executable.zip file ```bash unzip -d ./training-envs-executables/linux/ ./training-envs-executables/linux/SnowballTarget.zip ``` Make sure your file is accessible ```bash chmod -R 755 ./training-envs-executables/linux/SnowballTarget ``` ### Define the SnowballTarget config file - In ML-Agents, you define the **training hyperparameters in config.yaml files.** There are multiple hyperparameters. To understand them better, you should read the explanation for each one in [the documentation](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Training-Configuration-File.md) You need to create a `SnowballTarget.yaml` config file in ./content/ml-agents/config/ppo/ We'll give you a preliminary version of this config (to copy and paste into your `SnowballTarget.yaml file`), **but you should modify it**. ```yaml behaviors: SnowballTarget: trainer_type: ppo summary_freq: 10000 keep_checkpoints: 10 checkpoint_interval: 50000 max_steps: 200000 time_horizon: 64 threaded: true hyperparameters: learning_rate: 0.0003 learning_rate_schedule: linear batch_size: 128 buffer_size: 2048 beta: 0.005 epsilon: 0.2 lambd: 0.95 num_epoch: 3 network_settings: normalize: false hidden_units: 256 num_layers: 2 vis_encode_type: simple reward_signals: extrinsic: gamma: 0.99 strength: 1.0 ``` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballfight_config1.png" alt="Config SnowballTarget"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballfight_config2.png" alt="Config SnowballTarget"/> As an experiment, try to modify some other hyperparameters. Unity provides very [good documentation explaining each of them here](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Training-Configuration-File.md). Now that you've created the config file and understand what most hyperparameters do, we're ready to train our agent 🔥. ### Train the agent To train our agent, we need to **launch mlagents-learn and select the executable containing the environment.** We define four parameters: 1. `mlagents-learn <config>`: the path where the hyperparameter config file is. 2. `--env`: where the environment executable is. 3. `--run_id`: the name you want to give to your training run id. 4. `--no-graphics`: to not launch the visualization during the training. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/mlagentslearn.png" alt="MlAgents learn"/> Train the model and use the `--resume` flag to continue training in case of interruption. > It will fail the first time if and when you use `--resume`. Try rerunning the block to bypass the error. The training will take 10 to 35min depending on your config. Go take a ☕️ you deserve it 🤗. ```bash mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id="SnowballTarget1" --no-graphics ``` ### Push the agent to the Hugging Face Hub - Now that we've trained our agent, we’re **ready to push it to the Hub and visualize it playing on your browser🔥.** To be able to share your model with the community, there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and store your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> - Copy the token - Run the cell below and paste the token ```python from huggingface_hub import notebook_login notebook_login() ``` If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` Then we need to run `mlagents-push-to-hf`. And we define four parameters: 1. `--run-id`: the name of the training run id. 2. `--local-dir`: where the agent was saved, it’s results/<run_id name>, so in my case results/First Training. 3. `--repo-id`: the name of the Hugging Face repo you want to create or update. It’s always <your huggingface username>/<the repo name> If the repo does not exist **it will be created automatically** 4. `--commit-message`: since HF repos are git repositories you need to give a commit message. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/mlagentspushtohub.png" alt="Push to Hub"/> For instance: `mlagents-push-to-hf --run-id="SnowballTarget1" --local-dir="./results/SnowballTarget1" --repo-id="ThomasSimonini/ppo-SnowballTarget" --commit-message="First Push"` ```python mlagents-push-to-hf --run-id= # Add your run id --local-dir= # Your local dir --repo-id= # Your repo id --commit-message= # Your commit message ``` If everything worked you should see this at the end of the process (but with a different url 😆) : ``` Your model is pushed to the hub. You can view your model here: https://huggingface.co/ThomasSimonini/ppo-SnowballTarget ``` It's the link to your model. It contains a model card that explains how to use it, your Tensorboard, and your config file. **What's awesome is that it's a git repository, which means you can have different commits, update your repository with a new push, etc.** But now comes the best: **being able to visualize your agent online 👀.** ### Watch your agent playing 👀 This step it's simple: 1. Remember your repo-id 2. Go here: https://huggingface.co/spaces/ThomasSimonini/ML-Agents-SnowballTarget 3. Launch the game and put it in full screen by clicking on the bottom right button <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget_load.png" alt="Snowballtarget load"/> 1. In step 1, choose your model repository, which is the model id (in my case ThomasSimonini/ppo-SnowballTarget). 2. In step 2, **choose what model you want to replay**: - I have multiple ones since we saved a model every 500000 timesteps. - But if I want the more recent I choose `SnowballTarget.onnx` 👉 It's nice to **try different model stages to see the improvement of the agent.** And don't hesitate to share the best score your agent gets on discord in the #rl-i-made-this channel 🔥 Now let's try a more challenging environment called Pyramids. ## Pyramids 🏆 ### Download and move the environment zip file in `./training-envs-executables/linux/` - Our environment executable is in a zip file. - We need to download it and place it into `./training-envs-executables/linux/` - We use a linux executable because we're using colab, and the colab machine's OS is Ubuntu (linux) Download the file Pyramids.zip from https://drive.google.com/uc?export=download&id=1UiFNdKlsH0NTu32xV-giYUEVKV4-vc7H using `wget`. Check out the full solution to download large files from GDrive [here](https://bcrf.biochem.wisc.edu/2021/02/05/download-google-drive-files-using-wget/) ```python !wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1UiFNdKlsH0NTu32xV-giYUEVKV4-vc7H' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1UiFNdKlsH0NTu32xV-giYUEVKV4-vc7H" -O ./training-envs-executables/linux/Pyramids.zip && rm -rf /tmp/cookies.txt ``` Unzip it ```python %%capture !unzip -d ./training-envs-executables/linux/ ./training-envs-executables/linux/Pyramids.zip ``` Make sure your file is accessible ```bash chmod -R 755 ./training-envs-executables/linux/Pyramids/Pyramids ``` ### Modify the PyramidsRND config file - Contrary to the first environment, which was a custom one, **Pyramids was made by the Unity team**. - So the PyramidsRND config file already exists and is in ./content/ml-agents/config/ppo/PyramidsRND.yaml - You might ask why "RND" is in PyramidsRND. RND stands for *random network distillation* it's a way to generate curiosity rewards. If you want to know more about that, we wrote an article explaining this technique: https://medium.com/data-from-the-trenches/curiosity-driven-learning-through-random-network-distillation-488ffd8e5938 For this training, we’ll modify one thing: - The total training steps hyperparameter is too high since we can hit the benchmark (mean reward = 1.75) in only 1M training steps. 👉 To do that, we go to config/ppo/PyramidsRND.yaml,**and change max_steps to 1000000.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/pyramids-config.png" alt="Pyramids config"/> As an experiment, you should also try to modify some other hyperparameters. Unity provides very [good documentation explaining each of them here](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Training-Configuration-File.md). We’re now ready to train our agent 🔥. ### Train the agent The training will take 30 to 45min depending on your machine, go take a ☕️ you deserve it 🤗. ```python mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id="Pyramids Training" --no-graphics ``` ### Push the agent to the Hugging Face Hub - Now that we trained our agent, we’re **ready to push it to the Hub to be able to visualize it playing on your browser🔥.** ```python mlagents-push-to-hf --run-id= # Add your run id --local-dir= # Your local dir --repo-id= # Your repo id --commit-message= # Your commit message ``` ### Watch your agent playing 👀 👉 https://huggingface.co/spaces/unity/ML-Agents-Pyramids ### 🎁 Bonus: Why not train on another environment? Now that you know how to train an agent using MLAgents, **why not try another environment?** MLAgents provides 17 different environments and we’re building some custom ones. The best way to learn is to try things on your own, have fun. ![cover](https://miro.medium.com/max/1400/0*xERdThTRRM2k_U9f.png) You have the full list of the one currently available environments on Hugging Face here 👉 https://github.com/huggingface/ml-agents#the-environments For the demos to visualize your agent 👉 https://huggingface.co/unity For now we have integrated: - [Worm](https://huggingface.co/spaces/unity/ML-Agents-Worm) demo where you teach a **worm to crawl**. - [Walker](https://huggingface.co/spaces/unity/ML-Agents-Walker) demo where you teach an agent **to walk towards a goal**. That’s all for today. Congrats on finishing this tutorial! The best way to learn is to practice and try stuff. Why not try another environment? ML-Agents has 18 different environments, but you can also create your own. Check the documentation and have fun! See you on Unit 6 🔥, ## Keep Learning, Stay awesome 🤗
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit5/how-mlagents-works.mdx
# How do Unity ML-Agents work? [[how-mlagents-works]] Before training our agent, we need to understand **what ML-Agents is and how it works**. ## What is Unity ML-Agents? [[what-is-mlagents]] [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents) is a toolkit for the game engine Unity that **allows us to create environments using Unity or use pre-made environments to train our agents**. It’s developed by [Unity Technologies](https://unity.com/), the developers of Unity, one of the most famous Game Engines used by the creators of Firewatch, Cuphead, and Cities: Skylines. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/firewatch.jpeg" alt="Firewatch"/> <figcaption>Firewatch was made with Unity</figcaption> </figure> ## The six components [[six-components]] With Unity ML-Agents, you have six essential components: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/mlagents-1.png" alt="MLAgents"/> <figcaption>Source: <a href="https://unity-technologies.github.io/ml-agents/">Unity ML-Agents Documentation</a> </figcaption> </figure> - The first is the *Learning Environment*, which contains **the Unity scene (the environment) and the environment elements** (game characters). - The second is the *Python Low-level API*, which contains **the low-level Python interface for interacting and manipulating the environment**. It’s the API we use to launch the training. - Then, we have the *External Communicator* that **connects the Learning Environment (made with C#) with the low level Python API (Python)**. - The *Python trainers*: the **Reinforcement algorithms made with PyTorch (PPO, SAC…)**. - The *Gym wrapper*: to encapsulate the RL environment in a gym wrapper. - The *PettingZoo wrapper*: PettingZoo is the multi-agents version of the gym wrapper. ## Inside the Learning Component [[inside-learning-component]] Inside the Learning Component, we have **three important elements**: - The first is the *agent component*, the actor of the scene. We’ll **train the agent by optimizing its policy** (which will tell us what action to take in each state). The policy is called the *Brain*. - Finally, there is the *Academy*. This component **orchestrates agents and their decision-making processes**. Think of this Academy as a teacher who handles Python API requests. To better understand its role, let’s remember the RL process. This can be modeled as a loop that works like this: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process.jpg" alt="The RL process" width="100%"> <figcaption>The RL Process: a loop of state, action, reward and next state</figcaption> <figcaption>Source: <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto</a></figcaption> </figure> Now, let’s imagine an agent learning to play a platform game. The RL process looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> - Our Agent receives **state \\(S_0\\)** from the **Environment** — we receive the first frame of our game (Environment). - Based on that **state \\(S_0\\),** the Agent takes **action \\(A_0\\)** — our Agent will move to the right. - The environment goes to a **new** **state \\(S_1\\)** — new frame. - The environment gives some **reward \\(R_1\\)** to the Agent — we’re not dead *(Positive Reward +1)*. This RL loop outputs a sequence of **state, action, reward and next state.** The goal of the agent is to **maximize the expected cumulative reward**. The Academy will be the one that will **send the order to our Agents and ensure that agents are in sync**: - Collect Observations - Select your action using your policy - Take the Action - Reset if you reached the max step or if you’re done. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/academy.png" alt="The MLAgents Academy" width="100%"> Now that we understand how ML-Agents works, **we’re ready to train our agents.**
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit5/bonus.mdx
# Bonus: Learn to create your own environments with Unity and MLAgents **You can create your own reinforcement learning environments with Unity and MLAgents**. Using a game engine such as Unity can be intimidating at first, but here are the steps you can take to learn smoothly. ## Step 1: Know how to use Unity - The best way to learn Unity is to do ["Create with Code" course](https://learn.unity.com/course/create-with-code): it's a series of videos for beginners where **you will create 5 small games with Unity**. ## Step 2: Create the simplest environment with this tutorial - Then, when you know how to use Unity, you can create your [first basic RL environment using this tutorial](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Learning-Environment-Create-New.md). ## Step 3: Iterate and create nice environments - Now that you've created your first simple environment you can iterate to more complex ones using the [MLAgents documentation (especially Designing Agents and Agent part)](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/) - In addition, you can take this free course ["Create a hummingbird environment"](https://learn.unity.com/course/ml-agents-hummingbirds) by [Adam Kelly](https://twitter.com/aktwelve) Have fun! And if you create custom environments don't hesitate to share them to the `#rl-i-made-this` discord channel.
0
hf_public_repos/deep-rl-class/units/en
hf_public_repos/deep-rl-class/units/en/unit5/introduction.mdx
# An Introduction to Unity ML-Agents [[introduction-to-ml-agents]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/thumbnail.png" alt="thumbnail"/> One of the challenges in Reinforcement Learning is **creating environments**. Fortunately for us, we can use game engines to do so. These engines, such as [Unity](https://unity.com/), [Godot](https://godotengine.org/) or [Unreal Engine](https://www.unrealengine.com/), are programs made to create video games. They are perfectly suited for creating environments: they provide physics systems, 2D/3D rendering, and more. One of them, [Unity](https://unity.com/), created the [Unity ML-Agents Toolkit](https://github.com/Unity-Technologies/ml-agents), a plugin based on the game engine Unity that allows us **to use the Unity Game Engine as an environment builder to train agents**. In the first bonus unit, this is what we used to train Huggy to catch a stick! <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/example-envs.png" alt="MLAgents environments"/> <figcaption>Source: <a href="https://github.com/Unity-Technologies/ml-agents">ML-Agents documentation</a></figcaption> </figure> Unity ML-Agents Toolkit provides many exceptional pre-made environments, from playing football (soccer), learning to walk, and jumping over big walls. In this Unit, we'll learn to use ML-Agents, but **don't worry if you don't know how to use the Unity Game Engine**: you don't need to use it to train your agents. So, today, we're going to train two agents: - The first one will learn to **shoot snowballs onto a spawning target**. - The second needs to **press a button to spawn a pyramid, then navigate to the pyramid, knock it over, and move to the gold brick at the top**. To do that, it will need to explore its environment, which will be done using a technique called curiosity. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/envs.png" alt="Environments" /> Then, after training, **you'll push the trained agents to the Hugging Face Hub**, and you'll be able to **visualize them playing directly on your browser without having to use the Unity Editor**. Doing this Unit will **prepare you for the next challenge: AI vs. AI where you will train agents in multi-agents environments and compete against your classmates' agents**. Sound exciting? Let's get started!
0