MorganWKen
commited on
Commit
•
4c1fbbc
1
Parent(s):
45c9d09
Adding model
Browse files
README.md
CHANGED
@@ -30,8 +30,34 @@ TODO: Add your code
|
|
30 |
|
31 |
|
32 |
```python
|
33 |
-
from stable_baselines3 import ...
|
34 |
-
from huggingface_sb3 import load_from_hub
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
...
|
37 |
```
|
|
|
30 |
|
31 |
|
32 |
```python
|
|
|
|
|
33 |
|
34 |
+
# https://stackoverflow.com/questions/72483775/stable-baselines3-ppo-how-to-change-clip-range-parameter-during-training
|
35 |
+
def lrsched():
|
36 |
+
def reallr(progress):
|
37 |
+
lr = 0.004
|
38 |
+
if progress < 0.85:
|
39 |
+
lr = 0.0005
|
40 |
+
if progress < 0.66:
|
41 |
+
lr = 0.00025
|
42 |
+
if progress < 0.33:
|
43 |
+
lr = 0.0001
|
44 |
+
return lr
|
45 |
+
return reallr
|
46 |
+
model = PPO('MlpPolicy', env, n_steps = 1024, batch_size = 64, n_epochs = 4, gamma = 0.999, gae_lambda = 0.98, ent_coef = 0.01, verbose=1, learning_rate=lrsched())
|
47 |
+
|
48 |
+
model.learn(total_timesteps=1000000)
|
49 |
+
|
50 |
+
model_name = "ppo-LunarLander-v2"
|
51 |
+
model.save(model_name)
|
52 |
+
|
53 |
+
eval_env = Monitor(gym.make("LunarLander-v2"))
|
54 |
+
|
55 |
+
# Evaluate the model with 10 evaluation episodes and deterministic=True
|
56 |
+
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True)
|
57 |
+
|
58 |
+
# Print the results
|
59 |
+
print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
|
60 |
+
|
61 |
+
# mean_reward=245.30 +/- 50.161170246383584
|
62 |
...
|
63 |
```
|