Hamze-Hammami commited on
Commit
8fc4679
1 Parent(s): 75166c9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +81 -4
README.md CHANGED
@@ -25,13 +25,90 @@ model-index:
25
  This is a trained model of a **PPO** agent playing **LunarLander-v2**
26
  using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
27
 
28
- ## Usage (with Stable-baselines3)
29
- TODO: Add your code
30
 
31
 
32
  ```python
33
- from stable_baselines3 import ...
34
- from huggingface_sb3 import load_from_hub
 
 
 
 
 
 
 
 
 
 
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  ...
37
  ```
 
25
  This is a trained model of a **PPO** agent playing **LunarLander-v2**
26
  using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
27
 
28
+ ## Usage
29
+ code was done with gym env and stable-basline3 libraray
30
 
31
 
32
  ```python
33
+ !apt install swig cmake
34
+ !pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit1/requirements-unit1.txt
35
+ !sudo apt-get update
36
+ !apt install python3-opengl
37
+ !apt install ffmpeg
38
+ !apt install xvfb
39
+ !pip3 install pyvirtualdisplay
40
+ # restart colab
41
+ import os
42
+ os.kill(os.getpid(), 9)
43
+ #display
44
+ from pyvirtualdisplay import Display
45
 
46
+ virtual_display = Display(visible=0, size=(1400, 900))
47
+ virtual_display.start()
48
+ # import libraries
49
+ import gymnasium
50
+
51
+ from huggingface_sb3 import load_from_hub, package_to_hub
52
+ from huggingface_hub import (
53
+ notebook_login,
54
+ )
55
+
56
+ from stable_baselines3 import PPO
57
+ from stable_baselines3.common.env_util import make_vec_env
58
+ from stable_baselines3.common.evaluation import evaluate_policy
59
+ from stable_baselines3.common.monitor import Monitor
60
+
61
+ # Create environment
62
+ env = gym.make('LunarLander-v2')
63
+
64
+
65
+ model = PPO(
66
+ policy="MlpPolicy",
67
+ env=env,
68
+ n_steps=1024,
69
+ batch_size=64,
70
+ n_epochs=4,
71
+ gamma=0.999,
72
+ gae_lambda=0.98,
73
+ ent_coef=0.01,
74
+ verbose=1,
75
+ )
76
+ # Train the agent
77
+ model.learn(total_timesteps=1000000)
78
+
79
+ # Save the model
80
+ model_name = "ppo-LunarLander-v2"
81
+ model.save(model_name)
82
+
83
+ #evaluate model
84
+ eval_env = Monitor(gym.make("LunarLander-v2"))
85
+ mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True)
86
+ print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
87
+
88
+ # create a video (for colab)
89
+ import gym
90
+ from stable_baselines3 import PPO
91
+ from IPython.display import Video, display
92
+ import os
93
+
94
+ env = gym.make('LunarLander-v2')
95
+
96
+ model_name = "ppo-LunarLander-v2"
97
+ model = PPO.load(model_name)
98
+
99
+ def record_video(env, model, video_length=500, prefix="ppo-lunarlander"):
100
+ env = gym.wrappers.RecordVideo(env, video_folder=prefix, episode_trigger=lambda x: x == 0)
101
+ obs = env.reset()
102
+ for _ in range(video_length):
103
+ action, _ = model.predict(obs)
104
+ obs, _, done, _ = env.step(action)
105
+ if done:
106
+ obs = env.reset()
107
+ env.close()
108
+
109
+ record_video(env, model, video_length=500, prefix="ppo-lunarlander")
110
+
111
+ video_path = "ppo-lunarlander/rl-video-episode-0.mp4"
112
+ display(Video(video_path))
113
  ...
114
  ```