cryptoque commited on
Commit
a44e472
1 Parent(s): 176db1e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +72 -3
README.md CHANGED
@@ -30,8 +30,77 @@ TODO: Add your code
30
 
31
 
32
  ```python
33
- from stable_baselines3 import ...
34
- from huggingface_sb3 import load_from_hub
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
- ...
37
  ```
 
30
 
31
 
32
  ```python
33
+ import gymnasium
34
+
35
+ from huggingface_sb3 import load_from_hub, package_to_hub
36
+ from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub.
37
+
38
+ from stable_baselines3 import PPO
39
+ from stable_baselines3.common.env_util import make_vec_env
40
+ from stable_baselines3.common.evaluation import evaluate_policy
41
+ from stable_baselines3.common.monitor import Monitor
42
+
43
+ # Create the environment
44
+ env = make_vec_env('LunarLander-v2', n_envs=16)
45
+
46
+ model = PPO(
47
+ policy = 'MlpPolicy',
48
+ env = env,
49
+ n_steps = 1024,
50
+ batch_size = 64,
51
+ n_epochs = 4,
52
+ gamma = 0.999,
53
+ gae_lambda = 0.98,
54
+ ent_coef = 0.01,
55
+ verbose=1)
56
+
57
+ # Train it for 1,000,000 timesteps
58
+ model.learn(total_timesteps=1000000)
59
+ # Save the model
60
+ model_name = "ppo-LunarLander-v2"
61
+ model.save(model_name)
62
+
63
+ #@title
64
+ eval_env = Monitor(gym.make("LunarLander-v2"))
65
+ mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True)
66
+ print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
67
+
68
+ notebook_login()
69
+ !git config --global credential.helper store
70
+
71
+ import gymnasium as gym
72
+
73
+ from stable_baselines3 import PPO
74
+ from stable_baselines3.common.vec_env import DummyVecEnv
75
+ from stable_baselines3.common.env_util import make_vec_env
76
+
77
+ from huggingface_sb3 import package_to_hub
78
+
79
+ # PLACE the variables you've just defined two cells above
80
+ # Define the name of the environment
81
+ env_id = "LunarLander-v2"
82
+
83
+ # TODO: Define the model architecture we used
84
+ model_architecture = "PPO"
85
+
86
+ ## Define a repo_id
87
+ ## repo_id is the id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2
88
+ ## CHANGE WITH YOUR REPO ID
89
+ repo_id = "ThomasSimonini/ppo-LunarLander-v2" # Change with your repo id, you can't push with mine 😄
90
+
91
+ ## Define the commit message
92
+ commit_message = "Upload PPO LunarLander-v2 trained agent"
93
+
94
+ # Create the evaluation env and set the render_mode="rgb_array"
95
+ eval_env = DummyVecEnv([lambda: gym.make(env_id, render_mode="rgb_array")])
96
+
97
+ # PLACE the package_to_hub function you've just filled here
98
+ package_to_hub(model=model, # Our trained model
99
+ model_name=model_name, # The name of our trained model
100
+ model_architecture=model_architecture, # The model architecture we used: in our case PPO
101
+ env_id=env_id, # Name of the environment
102
+ eval_env=eval_env, # Evaluation Environment
103
+ repo_id=repo_id, # id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2
104
+ commit_message=commit_message)
105
 
 
106
  ```