Files changed (1) hide show
  1. README.md +7 -6
README.md CHANGED
@@ -41,12 +41,12 @@ agent.load(path)
41
  ```python
42
  # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
43
  cfg_ppo = PPO_DEFAULT_CONFIG.copy()
44
- cfg_ppo["rollouts"] = 16 # memory_size
45
- cfg_ppo["learning_epochs"] = 5
46
- cfg_ppo["mini_batches"] = 4 # 16 * 8192 / 32768
47
  cfg_ppo["discount_factor"] = 0.99
48
- cfg_ppo["lambda"] = 0.95
49
- cfg_ppo["learning_rate"] = 5e-4
50
  cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
51
  cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016}
52
  cfg_ppo["random_timesteps"] = 0
@@ -56,7 +56,7 @@ cfg_ppo["ratio_clip"] = 0.2
56
  cfg_ppo["value_clip"] = 0.2
57
  cfg_ppo["clip_predicted_values"] = True
58
  cfg_ppo["entropy_loss_scale"] = 0.0
59
- cfg_ppo["value_loss_scale"] = 2.0
60
  cfg_ppo["kl_threshold"] = 0
61
  cfg_ppo["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01
62
  cfg_ppo["state_preprocessor"] = RunningStandardScaler
@@ -66,4 +66,5 @@ cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
66
  # logging to TensorBoard and writing checkpoints
67
  cfg_ppo["experiment"]["write_interval"] = 800
68
  cfg_ppo["experiment"]["checkpoint_interval"] = 8000
 
69
  ```
 
41
  ```python
42
  # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
43
  cfg_ppo = PPO_DEFAULT_CONFIG.copy()
44
+ cfg_ppo["rollouts"] = 20 # memory_size
45
+ cfg_ppo["learning_epochs"] = 10
46
+ cfg_ppo["mini_batches"] = 4 # 16 * 8192 / 32768
47
  cfg_ppo["discount_factor"] = 0.99
48
+ cfg_ppo["lambda"] = 1
49
+ cfg_ppo["learning_rate"] = 6e-4
50
  cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
51
  cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016}
52
  cfg_ppo["random_timesteps"] = 0
 
56
  cfg_ppo["value_clip"] = 0.2
57
  cfg_ppo["clip_predicted_values"] = True
58
  cfg_ppo["entropy_loss_scale"] = 0.0
59
+ cfg_ppo["value_loss_scale"] = 2.5
60
  cfg_ppo["kl_threshold"] = 0
61
  cfg_ppo["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01
62
  cfg_ppo["state_preprocessor"] = RunningStandardScaler
 
66
  # logging to TensorBoard and writing checkpoints
67
  cfg_ppo["experiment"]["write_interval"] = 800
68
  cfg_ppo["experiment"]["checkpoint_interval"] = 8000
69
+
70
  ```