AYMAN DAMOUN
commited on
Upload PPO LunarLander-v2 trained agent
Browse files- README.md +1 -1
- config.json +1 -1
- ppo-LunarLander-v2.zip +2 -2
- ppo-LunarLander-v2/data +17 -17
- ppo-LunarLander-v2/policy.optimizer.pth +1 -1
- ppo-LunarLander-v2/policy.pth +1 -1
- ppo-LunarLander-v2/system_info.txt +5 -4
- results.json +1 -1
README.md
CHANGED
@@ -16,7 +16,7 @@ model-index:
|
|
16 |
type: LunarLander-v2
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
-
value:
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
|
|
16 |
type: LunarLander-v2
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
+
value: 253.30 +/- 23.60
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7f850bf5e950>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7f850bf5e9e0>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7f850bf5ea70>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7f850bf5eb00>", "_build": "<function ActorCriticPolicy._build at 0x7f850bf5eb90>", "forward": "<function ActorCriticPolicy.forward at 0x7f850bf5ec20>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7f850bf5ecb0>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7f850bf5ed40>", "_predict": "<function ActorCriticPolicy._predict at 0x7f850bf5edd0>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7f850bf5ee60>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7f850bf5eef0>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7f850bf5ef80>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7f850c3ad4c0>"}, "verbose": 1, "policy_kwargs": {}, "num_timesteps": 1015808, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1702758369816936594, "learning_rate": 0.0003, "tensorboard_log": null, "_last_obs": null, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVgwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSxCFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -0.015808000000000044, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVMwwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQHBoy2H+IdmMAWyUTS8BjAF0lEdAfwJqVhTfi3V9lChoBkdAcecjH4oJA2gHTQkBaAhHQH8DeiN83Mp1fZQoaAZHQHFfNaQmu1ZoB00MAWgIR0B/BE4aP0ZndX2UKGgGR0BvMIFPi1iOaAdNQAFoCEdAfwVSJj2Ba3V9lChoBkdAcVEvegte2WgHTQQBaAhHQH8FqhcqvvB1fZQoaAZHQHGuE1VHWjJoB00dAWgIR0B/BoGqxTsIdX2UKGgGR0Bx6HSncclxaAdNAwFoCEdAfwdc580DU3V9lChoBkdAcdtzGPxQSGgHTVMBaAhHQH8ITP8hs691fZQoaAZHQHF4FW4mTkhoB00YAWgIR0B/CJxp+MIedX2UKGgGR0Bw+/QmeDnOaAdNPwFoCEdAfwuF2V3Ux3V9lChoBkdAcWtXcQAdXGgHTQUBaAhHQH8L6BAfMfR1fZQoaAZHQHDQ4JVsDW9oB00LAWgIR0B/Db+glF+edX2UKGgGR0BxpAPMB6rvaAdL/GgIR0B/DuIFeOXFdX2UKGgGR0By1Y5q/M4caAdNNgFoCEdAfw7zoUzsQnV9lChoBkdAbbRolD4QBmgHS/VoCEdAfw9A3kxREXV9lChoBkdAcDZpAlfJFWgHS+FoCEdAfxCiONo8IXV9lChoBkdAccre0ojOcGgHTSsBaAhHQH8S3jENvwV1fZQoaAZHQHDc4NEw35xoB00vAWgIR0B/E+B8QZn+dX2UKGgGR0BynC4J/oaDaAdNgQFoCEdAfxP81XNkfHV9lChoBkdAcpMSn+AEuGgHTTQBaAhHQH8VRtP557h1fZQoaAZHQHEyt5t3wCtoB00sAWgIR0B/FaGh24d7dX2UKGgGR0BxZmo0hvBKaAdNDQFoCEdAfxXIBzV+Z3V9lChoBkdAcJxvEjxCpmgHTRABaAhHQH8WMiB5HEx1fZQoaAZHQHJKmCEpRXRoB01DAWgIR0B/F3K8tf5UdX2UKGgGR0AugTYdyT6jaAdLyWgIR0B/F9O8CgbqdX2UKGgGR0BxlqiYb83uaAdL92gIR0B/GDZYgaFVdX2UKGgGR0BxdFrGipNsaAdNFwFoCEdAfxlJokAxSHV9lChoBkdAcgqEMLF4s2gHS/NoCEdAfxy7rLQokXV9lChoBkdAcbFPIn0CimgHTRgBaAhHQH8c3O8kD6p1fZQoaAZHQHBou2mYSg5oB00kAWgIR0B/HXVawD/3dX2UKGgGR0BwzXLq2SdOaAdNMwFoCEdAfx6aWX1J2HV9lChoBkdAbWCDgZTAFmgHTQkBaAhHQH8gJVjqfOF1fZQoaAZHQHHm/OdGy5ZoB0v8aAhHQH8ghG+bmU51fZQoaAZHQHAB2OAAhjhoB0v+aAhHQH8h9ELH+611fZQoaAZHQHKcmYBvJiloB00AAWgIR0B/IpiYsunNdX2UKGgGR0Bw+95D7ZWaaAdNNQFoCEdAfyNcNH6MznV9lChoBkdAcQgUn5SFXmgHS/9oCEdAfyReBg/kenV9lChoBkdAcaM8wYcebWgHTSsBaAhHQH8koXGff411fZQoaAZHQHEewGB4D9xoB00wAWgIR0B/JXp2U0N0dX2UKGgGR0Bx4AH3UQTVaAdNDgFoCEdAfyXj8DSw4nV9lChoBkdAbhfKNAC4jWgHTT0BaAhHQH8nz544ZMt1fZQoaAZHQHC4Cvkili1oB00oAWgIR0B/KFlJ6IFedX2UKGgGR0BV+PLTx5LRaAdN6ANoCEdAfyh+4b0e2nV9lChoBkdAcIVFuNxVAGgHTTkBaAhHQH9NIFqzqr11fZQoaAZHQHJC4jKPn0VoB00ZAWgIR0B/TTIp6QeWdX2UKGgGR0BxlsWO6unuaAdNEAFoCEdAf07b6xgRb3V9lChoBkdAclZyKvV3EGgHTQYBaAhHQH9P+aBqbjN1fZQoaAZHQHBBZ4KQaJhoB01yAWgIR0B/UHrD63y7dX2UKGgGR0Bw6F0uDjBEaAdNNQFoCEdAf1C2phnanXV9lChoBkdAcSCWGyon8mgHS+1oCEdAf1FBZZB9kXV9lChoBkdAcctC+De0omgHTQoBaAhHQH9RtVmz0H11fZQoaAZHQG/EAPmPo3doB02VAWgIR0B/UvssxwhodX2UKGgGR0BxS78VHnU2aAdNDgFoCEdAf1MyEL6UJXV9lChoBkdAcKrk/KQq7WgHTQQBaAhHQH9T6l54W1t1fZQoaAZHQHCZS2H+IdloB00WAWgIR0B/VE8lolD4dX2UKGgGR0Bvpuz8gpz+aAdNGAFoCEdAf1eiWE9MbnV9lChoBkdAcitzBhx5s2gHTSIBaAhHQH9X/6j32251fZQoaAZHQHASnTZxrBVoB000AWgIR0B/WFTn7pFDdX2UKGgGR0BR0Kp97WupaAdLyGgIR0B/WTakAPupdX2UKGgGR0BuarEzfrKOaAdNtQFoCEdAf1lxGDtgKHV9lChoBkdAcSbUkv9LpWgHS/doCEdAf1n5/9YOlXV9lChoBkdASaODYh+vyWgHS91oCEdAf1vsunMt9XV9lChoBkdActNOYYzi0mgHS+toCEdAf1vsSkCV8nV9lChoBkdAb5OvduYQa2gHTQMBaAhHQH9cq0hNdqt1fZQoaAZHQHA3XAmAskJoB004AWgIR0B/XP6Eal1sdX2UKGgGR0BzKow35vcaaAdNTgFoCEdAf2DdYW+GoXV9lChoBkdAcDASTyJ9A2gHTSMBaAhHQH9hJ9JBgNR1fZQoaAZHQHBLHI6r/85oB00UAWgIR0B/Yat8uzyCdX2UKGgGR0BxzzhS9/SZaAdNUgFoCEdAf2IUjcEeQ3V9lChoBkdAceU0xM36ymgHTT0BaAhHQH9iPsiSq2l1fZQoaAZHQHHVyXt0FKVoB01FAWgIR0B/Y5FTefqYdX2UKGgGR0By0rIeYD1XaAdNKAFoCEdAf2XMwDeTFHV9lChoBkdAcO0zD4xk/mgHTScBaAhHQH9mIISlFc91fZQoaAZHQHETQfuCwr1oB00ZAWgIR0B/ZrvPTodNdX2UKGgGR0Bu3RQ1rIo3aAdNNQFoCEdAf2cuejEehnV9lChoBkdAclbl8w5/9mgHTSMBaAhHQH9ncmWt2cJ1fZQoaAZHQHAU/Vy3kPtoB00gAWgIR0B/Z+FlCkXUdX2UKGgGR0Bx7I2cawUyaAdL+2gIR0B/aR9XtBv8dX2UKGgGR0BuEzsfJV81aAdNEwFoCEdAf2kuxrzoU3V9lChoBkdAcu9LNwBHTmgHTR4BaAhHQH9qTP0I1Lt1fZQoaAZHQHLiYegctGxoB006AWgIR0B/atwn6VMVdX2UKGgGR0Bw6fPt2LYPaAdL4GgIR0B/bCBvrGBGdX2UKGgGR0BxPAtyxRl6aAdNCgFoCEdAf21c0tRNy3V9lChoBkdAb1C+NcW0q2gHTREBaAhHQH9t8SXdCVt1fZQoaAZHQHAye/gzguRoB00lAWgIR0B/b/7xd6cBdX2UKGgGR0BxZ0gOjIq9aAdNEQFoCEdAf3BwiJO32HV9lChoBkdAcKhFjNIK+mgHTQQBaAhHQH9yJCBwuNB1fZQoaAZHQHMW4HkcS5BoB0v4aAhHQH9zYDTz/ZN1fZQoaAZHQHMHE/KQq7RoB00UAWgIR0B/c3FMqSX/dX2UKGgGR0BwBKwr1/UfaAdNDQFoCEdAf3O5uZThpHV9lChoBkdAb8narWAf+2gHTSQBaAhHQH91huTA31l1fZQoaAZHQHC6s+mm+CdoB00OAWgIR0B/dvMjeKsNdX2UKGgGR0BtqwClrM1TaAdNQAFoCEdAf3gdP+GXX3V9lChoBkdAcnRWgvlEJGgHTREBaAhHQH94pQk5ZKZ1fZQoaAZHQHBuzIBBAwBoB00RAWgIR0B/eUVgx8D0dX2UKGgGR0BydoxxkupTaAdNGwFoCEdAf3stygf2b3V9lChoBkdAcElExIre7GgHTWUBaAhHQH97fqoqCpZ1fZQoaAZHQHAQ4RmK64FoB00MAWgIR0B/e8Sg5BC2dX2UKGgGR0Bw0hBiTdLyaAdNBgJoCEdAf3wYZEUj9nV9lChoBkdAcXp1bqyGBWgHTRwBaAhHQH986YJE6T51ZS4="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 248, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVdgIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoCIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoESiWCAAAAAAAAAABAQEBAQEBAZRoFUsIhZRoGXSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBEoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaAtLCIWUaBl0lFKUjARoaWdolGgRKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgLSwiFlGgZdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_shape": [8], "low": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "low_repr": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high_repr": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWV/QAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgLjAJpOJSJiIeUUpQoSwNoD05OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZROdWIu", "n": "4", "start": "0", "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 1, "n_steps": 1024, "gamma": 0.999, "gae_lambda": 0.98, "ent_coef": 0.01, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 64, "n_epochs": 4, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWVwAIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMVS9ob21lL2dlZWtheW1hbi8ubG9jYWwvbGliL3B5dGhvbjMuMTAvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjFUvaG9tZS9nZWVrYXltYW4vLmxvY2FsL2xpYi9weXRob24zLjEwL3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/yZmZmZmZmoWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVwAIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMVS9ob21lL2dlZWtheW1hbi8ubG9jYWwvbGliL3B5dGhvbjMuMTAvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjFUvaG9tZS9nZWVrYXltYW4vLmxvY2FsL2xpYi9weXRob24zLjEwL3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/M6kqMFUyYYWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "system_info": {"OS": "Linux-6.2.0-37-generic-x86_64-with-glibc2.35 # 38~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Thu Nov 2 18:01:13 UTC 2", "Python": "3.10.12", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.1.2+cu121", "GPU Enabled": "True", "Numpy": "1.26.2", "Cloudpickle": "3.0.0", "Gymnasium": "0.28.1"}}
|
|
|
1 |
+
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x70b2c8701090>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x70b2c8701120>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x70b2c87011b0>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x70b2c8701240>", "_build": "<function ActorCriticPolicy._build at 0x70b2c87012d0>", "forward": "<function ActorCriticPolicy.forward at 0x70b2c8701360>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x70b2c87013f0>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x70b2c8701480>", "_predict": "<function ActorCriticPolicy._predict at 0x70b2c8701510>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x70b2c87015a0>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x70b2c8701630>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x70b2c87016c0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x70b2c8704d40>"}, "verbose": 1, "policy_kwargs": {}, "num_timesteps": 1015808, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1710969282414281194, "learning_rate": 0.0003, "tensorboard_log": null, "_last_obs": null, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVgwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSxCFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -0.015808000000000044, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVQwwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQGDu6Ei+tbOMAWyUTegDjAF0lEdAgg6ZB9kSVXV9lChoBkdAY124yXUpeGgHTegDaAhHQIIO5vYODrZ1fZQoaAZHQGM1HoouwotoB03oA2gIR0CCD3vmYBvKdX2UKGgGR0BhxYvL5h0AaAdN6ANoCEdAghZyOq//N3V9lChoBkdAZhVHsC1Z1WgHTegDaAhHQIIaoyIpH7R1fZQoaAZHQGKl61b7j1hoB03oA2gIR0CCGrO2y9mIdX2UKGgGR0AvP+BH09QoaAdNCQFoCEdAgh1jSofjj3V9lChoBkdAZFDq7iADrGgHTegDaAhHQIIjMAiml691fZQoaAZHQGVKeueSSvFoB03oA2gIR0CCKpGUfPondX2UKGgGR0BgXD+R5kbxaAdN6ANoCEdAgi960x/NJXV9lChoBkdAYFE/bCaZyGgHTegDaAhHQIIxeQbMott1fZQoaAZHQEb4O9WZJCloB00AAWgIR0CCM+UCaJAMdX2UKGgGR0BmOTsIE8q4aAdN6ANoCEdAgj3p3X7LuHV9lChoBkdAZF4DMeOn22gHTegDaAhHQII+6ZBsyi51fZQoaAZHQGDnrVWjoIRoB03oA2gIR0CCWKliz9jxdX2UKGgGR0BiK9diUgSwaAdN6ANoCEdAglm1PFefI3V9lChoBkdAZGtZbpu/DmgHTegDaAhHQIJaHQ4S6Dp1fZQoaAZHQGFPZ6MR6GBoB03oA2gIR0CCWk5WilBQdX2UKGgGR0BhtBGKAJ9iaAdN6ANoCEdAgly/3WWhRXV9lChoBkdAYNH/BFd9lWgHTegDaAhHQIJdB6IFeOZ1fZQoaAZHQGKeXXiBGx5oB03oA2gIR0CCZLc1O0swdX2UKGgGR0BhbFucc2itaAdN6ANoCEdAgmkkQwsXi3V9lChoBkdAYEE0WuX/pGgHTegDaAhHQIJpNVaOgg51fZQoaAZHQGO0E7OmixpoB03oA2gIR0CCa+Sq2jO+dX2UKGgGR0Bf61T3qRlpaAdN6ANoCEdAgndrf+CK8HV9lChoBkdAXS87MgU1ymgHTegDaAhHQIJ8AYaYNRZ1fZQoaAZHQGIY5iVjZthoB03oA2gIR0CCfctsenyedX2UKGgGR0BmIQgaFVT8aAdN6ANoCEdAgn/fxDst03V9lChoBkdAY3NaOgg5imgHTegDaAhHQIKIapgkTpR1fZQoaAZHQGGXupCKJl9oB03oA2gIR0CCiVclgMMJdX2UKGgGR0BwD3qB3A2yaAdNUANoCEdAgow1Zs9B8nV9lChoBkdAYb83EyckMWgHTegDaAhHQIKjXZyuIRB1fZQoaAZHQGTqsotthuxoB03oA2gIR0CCpDi5uqFRdX2UKGgGR0BkHjSThYNiaAdN6ANoCEdAgqSMFEAo5XV9lChoBkdAYkX/7zkIX2gHTegDaAhHQIKkse+23KB1fZQoaAZHQGa9nied07toB03oA2gIR0CCpvZaFEiMdX2UKGgGR0BiBQf0VafSaAdN6ANoCEdAgq7MUh3aBnV9lChoBkdAXkIOnVG0/mgHTegDaAhHQIKy9ORDCxh1fZQoaAZHQGShkkrwvxpoB03oA2gIR0CCswVVPva2dX2UKGgGR0BlrHKuB+WoaAdN6ANoCEdAgrWjye7L+3V9lChoBkdAWdNWYF7laWgHTegDaAhHQILBm0gKWs11fZQoaAZHQEoxLDAJswdoB0v+aAhHQILEpH/cWTJ1fZQoaAZHQGKW9p7CzkZoB03oA2gIR0CCxkMNtqHodX2UKGgGR0Bj/NFOO802aAdN6ANoCEdAgsgJTl1bJXV9lChoBkdAYRZXtjTa02gHTegDaAhHQILKDfR/mT11fZQoaAZHQGC+SS/0ulJoB03oA2gIR0CC0flA/s3RdX2UKGgGR0BlTcWVNYbLaAdN6ANoCEdAgtLFfiPyTnV9lChoBkdAYhONgjQiRmgHTegDaAhHQILVca86FM91fZQoaAZHQGcHv9cbBGhoB03oA2gIR0CC6EBnzxwydX2UKGgGR0BmewkE9t/GaAdN6ANoCEdAgukT7/GVA3V9lChoBkdAYJem4y44ImgHTegDaAhHQILpZHVf/m11fZQoaAZHQGHBVbzK9wpoB03oA2gIR0CC6YrYoRZmdX2UKGgGR0Blf3i704BFaAdN6ANoCEdAguvf0dzXBnV9lChoBkdAYVRcHGCI12gHTegDaAhHQILz42Ifr8l1fZQoaAZHQGQwPN/vv0BoB03oA2gIR0CC+CZIg/1QdX2UKGgGR0Bm4fyAhB7eaAdN6ANoCEdAgvg2vr4WUXV9lChoBkdAYz/1IRRMvmgHTegDaAhHQIMH3Kji4rl1fZQoaAZHQGffN96Tnq5oB03oA2gIR0CDCygOBlMAdX2UKGgGR0Bg+sornTy8aAdN6ANoCEdAgwz3sHB1tHV9lChoBkdAX+oJ6Y3Ns2gHTegDaAhHQIMO6ouPFNt1fZQoaAZHQGHZOIqLCN1oB03oA2gIR0CDETyYG+sYdX2UKGgGR0BjXdjmSyMUaAdN6ANoCEdAgxoJiiItUXV9lChoBkdAYXW9nK4hEGgHTegDaAhHQIMa6bnX/YJ1fZQoaAZHQGS17I1cdHVoB03oA2gIR0CDHaPtD2J0dX2UKGgGR0BfnSy2QXANaAdN6ANoCEdAgzHSmIj4YnV9lChoBkdAZXj5v99+gGgHTegDaAhHQIMyuH1vl2h1fZQoaAZHQGWH1EmY0EZoB03oA2gIR0CDMxR8+iaidX2UKGgGR0Bg5xqdpZfVaAdN6ANoCEdAgzM9/BnBcnV9lChoBkdAYIYj+rELpmgHTegDaAhHQIM1zcEeQuF1fZQoaAZHQHDJW+j/MntoB01BAmgIR0CDOOzkZJkHdX2UKGgGR0ByI7NiYsunaAdNvwFoCEdAgzm/ms/6f3V9lChoBkdAY/zd/rjYI2gHTegDaAhHQIM92CqZML51fZQoaAZHQF6I+T/yXldoB03oA2gIR0CDQho6jnFHdX2UKGgGR0BkeaqhlDneaAdN6ANoCEdAg0Iqtga3qnV9lChoBkdAZFv752yLRGgHTegDaAhHQINVpUedTYN1fZQoaAZHQGYZ2jGkvbpoB03oA2gIR0CDV31gYxcndX2UKGgGR0Bl9Bs9B8hLaAdN6ANoCEdAg1mPYvnKXHV9lChoBkdAb9x91loUSWgHTZYCaAhHQINa4eq7yx11fZQoaAZHQGJbWBreqJdoB03oA2gIR0CDZU7lq8DkdX2UKGgGR0Bj+UNx2jfvaAdN6ANoCEdAg2ZA+Y+jd3V9lChoBkdAb1Tmwqy4WmgHTX4DaAhHQINpI5imVJN1fZQoaAZHQGBzzUiILw5oB03oA2gIR0CDaYM1jy4GdX2UKGgGR0Bhh6YsunMuaAdN6ANoCEdAg26xtP557nV9lChoBkdAZYV4593KS2gHTegDaAhHQIN+xb8m8dx1fZQoaAZHQGHlIHs1KoRoB03oA2gIR0CDgTyOJcgRdX2UKGgGR0BjTiwljVhDaAdN6ANoCEdAg4QJhnanJnV9lChoBkdAYwgvkili0GgHTegDaAhHQIOEyU3XI2h1fZQoaAZHQGbdjBMzuWtoB03oA2gIR0CDiFx7zCk5dX2UKGgGR0BkEjJMg2ZRaAdN6ANoCEdAg4vrjHXEqHV9lChoBkdAZStmEoOQQ2gHTegDaAhHQIOL+mrKeTV1fZQoaAZHQHDeMN2C/XZoB01eAmgIR0CDl6hzvJA/dX2UKGgGR0BiMmMn7YTTaAdN6ANoCEdAg5wBFEy+H3V9lChoBkdAZFXjlxOtXGgHTegDaAhHQIOddpsXSBt1fZQoaAZHQGhA1t4zJp5oB03oA2gIR0CDnwAJb+tKdX2UKGgGR0Bj58se4kNXaAdN6ANoCEdAg5/1t4zJp3V9lChoBkdAYMgl1KXfImgHTegDaAhHQIOoJ3HJcPh1fZQoaAZHQGINM7dSEUVoB03oA2gIR0CDqOJ+lTFVdX2UKGgGR0BlBaG8EmpmaAdN6ANoCEdAg6st0mtyP3V9lChoBkdAYAoLRa5f+mgHTegDaAhHQIOv8mv4dp91fZQoaAZHQGBaRzaK1ohoB03oA2gIR0CDsURmK64EdWUu"}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 248, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVdgIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoCIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoESiWCAAAAAAAAAABAQEBAQEBAZRoFUsIhZRoGXSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBEoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaAtLCIWUaBl0lFKUjARoaWdolGgRKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgLSwiFlGgZdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_shape": [8], "low": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "low_repr": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high_repr": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWV/QAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgLjAJpOJSJiIeUUpQoSwNoD05OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZROdWIu", "n": "4", "start": "0", "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 1, "n_steps": 1024, "gamma": 0.999, "gae_lambda": 0.98, "ent_coef": 0.01, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 64, "n_epochs": 4, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWV3gIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMZC9ob21lL2dlZWtheW1hbi9hbmFjb25kYTMvZW52cy9ybF9lbnYvbGliL3B5dGhvbjMuMTAvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjGQvaG9tZS9nZWVrYXltYW4vYW5hY29uZGEzL2VudnMvcmxfZW52L2xpYi9weXRob24zLjEwL3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/yZmZmZmZmoWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWV3gIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMZC9ob21lL2dlZWtheW1hbi9hbmFjb25kYTMvZW52cy9ybF9lbnYvbGliL3B5dGhvbjMuMTAvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjGQvaG9tZS9nZWVrYXltYW4vYW5hY29uZGEzL2VudnMvcmxfZW52L2xpYi9weXRob24zLjEwL3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/M6kqMFUyYYWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "system_info": {"OS": "Linux-6.5.0-26-generic-x86_64-with-glibc2.35 # 26~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue Mar 12 10:22:43 UTC 2", "Python": "3.10.13", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.2.1+cu121", "GPU Enabled": "True", "Numpy": "1.25.2", "Cloudpickle": "3.0.0", "Gymnasium": "0.28.1", "OpenAI Gym": "0.26.2"}}
|
ppo-LunarLander-v2.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e242019d6a031b4b4065e7a27baa4dc7b85dda3f5a1db1e3ceeb81c7e250eed
|
3 |
+
size 147418
|
ppo-LunarLander-v2/data
CHANGED
@@ -4,20 +4,20 @@
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
-
"__init__": "<function ActorCriticPolicy.__init__ at
|
8 |
-
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at
|
9 |
-
"reset_noise": "<function ActorCriticPolicy.reset_noise at
|
10 |
-
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at
|
11 |
-
"_build": "<function ActorCriticPolicy._build at
|
12 |
-
"forward": "<function ActorCriticPolicy.forward at
|
13 |
-
"extract_features": "<function ActorCriticPolicy.extract_features at
|
14 |
-
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at
|
15 |
-
"_predict": "<function ActorCriticPolicy._predict at
|
16 |
-
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at
|
17 |
-
"get_distribution": "<function ActorCriticPolicy.get_distribution at
|
18 |
-
"predict_values": "<function ActorCriticPolicy.predict_values at
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
-
"_abc_impl": "<_abc._abc_data object at
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
@@ -26,7 +26,7 @@
|
|
26 |
"_num_timesteps_at_start": 0,
|
27 |
"seed": null,
|
28 |
"action_noise": null,
|
29 |
-
"start_time":
|
30 |
"learning_rate": 0.0003,
|
31 |
"tensorboard_log": null,
|
32 |
"_last_obs": null,
|
@@ -42,7 +42,7 @@
|
|
42 |
"_stats_window_size": 100,
|
43 |
"ep_info_buffer": {
|
44 |
":type:": "<class 'collections.deque'>",
|
45 |
-
":serialized:": "
|
46 |
},
|
47 |
"ep_success_buffer": {
|
48 |
":type:": "<class 'collections.deque'>",
|
@@ -84,13 +84,13 @@
|
|
84 |
"n_epochs": 4,
|
85 |
"clip_range": {
|
86 |
":type:": "<class 'function'>",
|
87 |
-
":serialized:": "
|
88 |
},
|
89 |
"clip_range_vf": null,
|
90 |
"normalize_advantage": true,
|
91 |
"target_kl": null,
|
92 |
"lr_schedule": {
|
93 |
":type:": "<class 'function'>",
|
94 |
-
":serialized:": "
|
95 |
}
|
96 |
}
|
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
+
"__init__": "<function ActorCriticPolicy.__init__ at 0x70b2c8701090>",
|
8 |
+
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x70b2c8701120>",
|
9 |
+
"reset_noise": "<function ActorCriticPolicy.reset_noise at 0x70b2c87011b0>",
|
10 |
+
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x70b2c8701240>",
|
11 |
+
"_build": "<function ActorCriticPolicy._build at 0x70b2c87012d0>",
|
12 |
+
"forward": "<function ActorCriticPolicy.forward at 0x70b2c8701360>",
|
13 |
+
"extract_features": "<function ActorCriticPolicy.extract_features at 0x70b2c87013f0>",
|
14 |
+
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x70b2c8701480>",
|
15 |
+
"_predict": "<function ActorCriticPolicy._predict at 0x70b2c8701510>",
|
16 |
+
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x70b2c87015a0>",
|
17 |
+
"get_distribution": "<function ActorCriticPolicy.get_distribution at 0x70b2c8701630>",
|
18 |
+
"predict_values": "<function ActorCriticPolicy.predict_values at 0x70b2c87016c0>",
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
+
"_abc_impl": "<_abc._abc_data object at 0x70b2c8704d40>"
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
|
|
26 |
"_num_timesteps_at_start": 0,
|
27 |
"seed": null,
|
28 |
"action_noise": null,
|
29 |
+
"start_time": 1710969282414281194,
|
30 |
"learning_rate": 0.0003,
|
31 |
"tensorboard_log": null,
|
32 |
"_last_obs": null,
|
|
|
42 |
"_stats_window_size": 100,
|
43 |
"ep_info_buffer": {
|
44 |
":type:": "<class 'collections.deque'>",
|
45 |
+
":serialized:": "gAWVQwwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQGDu6Ei+tbOMAWyUTegDjAF0lEdAgg6ZB9kSVXV9lChoBkdAY124yXUpeGgHTegDaAhHQIIO5vYODrZ1fZQoaAZHQGM1HoouwotoB03oA2gIR0CCD3vmYBvKdX2UKGgGR0BhxYvL5h0AaAdN6ANoCEdAghZyOq//N3V9lChoBkdAZhVHsC1Z1WgHTegDaAhHQIIaoyIpH7R1fZQoaAZHQGKl61b7j1hoB03oA2gIR0CCGrO2y9mIdX2UKGgGR0AvP+BH09QoaAdNCQFoCEdAgh1jSofjj3V9lChoBkdAZFDq7iADrGgHTegDaAhHQIIjMAiml691fZQoaAZHQGVKeueSSvFoB03oA2gIR0CCKpGUfPondX2UKGgGR0BgXD+R5kbxaAdN6ANoCEdAgi960x/NJXV9lChoBkdAYFE/bCaZyGgHTegDaAhHQIIxeQbMott1fZQoaAZHQEb4O9WZJCloB00AAWgIR0CCM+UCaJAMdX2UKGgGR0BmOTsIE8q4aAdN6ANoCEdAgj3p3X7LuHV9lChoBkdAZF4DMeOn22gHTegDaAhHQII+6ZBsyi51fZQoaAZHQGDnrVWjoIRoB03oA2gIR0CCWKliz9jxdX2UKGgGR0BiK9diUgSwaAdN6ANoCEdAglm1PFefI3V9lChoBkdAZGtZbpu/DmgHTegDaAhHQIJaHQ4S6Dp1fZQoaAZHQGFPZ6MR6GBoB03oA2gIR0CCWk5WilBQdX2UKGgGR0BhtBGKAJ9iaAdN6ANoCEdAgly/3WWhRXV9lChoBkdAYNH/BFd9lWgHTegDaAhHQIJdB6IFeOZ1fZQoaAZHQGKeXXiBGx5oB03oA2gIR0CCZLc1O0swdX2UKGgGR0BhbFucc2itaAdN6ANoCEdAgmkkQwsXi3V9lChoBkdAYEE0WuX/pGgHTegDaAhHQIJpNVaOgg51fZQoaAZHQGO0E7OmixpoB03oA2gIR0CCa+Sq2jO+dX2UKGgGR0Bf61T3qRlpaAdN6ANoCEdAgndrf+CK8HV9lChoBkdAXS87MgU1ymgHTegDaAhHQIJ8AYaYNRZ1fZQoaAZHQGIY5iVjZthoB03oA2gIR0CCfctsenyedX2UKGgGR0BmIQgaFVT8aAdN6ANoCEdAgn/fxDst03V9lChoBkdAY3NaOgg5imgHTegDaAhHQIKIapgkTpR1fZQoaAZHQGGXupCKJl9oB03oA2gIR0CCiVclgMMJdX2UKGgGR0BwD3qB3A2yaAdNUANoCEdAgow1Zs9B8nV9lChoBkdAYb83EyckMWgHTegDaAhHQIKjXZyuIRB1fZQoaAZHQGTqsotthuxoB03oA2gIR0CCpDi5uqFRdX2UKGgGR0BkHjSThYNiaAdN6ANoCEdAgqSMFEAo5XV9lChoBkdAYkX/7zkIX2gHTegDaAhHQIKkse+23KB1fZQoaAZHQGa9nied07toB03oA2gIR0CCpvZaFEiMdX2UKGgGR0BiBQf0VafSaAdN6ANoCEdAgq7MUh3aBnV9lChoBkdAXkIOnVG0/mgHTegDaAhHQIKy9ORDCxh1fZQoaAZHQGShkkrwvxpoB03oA2gIR0CCswVVPva2dX2UKGgGR0BlrHKuB+WoaAdN6ANoCEdAgrWjye7L+3V9lChoBkdAWdNWYF7laWgHTegDaAhHQILBm0gKWs11fZQoaAZHQEoxLDAJswdoB0v+aAhHQILEpH/cWTJ1fZQoaAZHQGKW9p7CzkZoB03oA2gIR0CCxkMNtqHodX2UKGgGR0Bj/NFOO802aAdN6ANoCEdAgsgJTl1bJXV9lChoBkdAYRZXtjTa02gHTegDaAhHQILKDfR/mT11fZQoaAZHQGC+SS/0ulJoB03oA2gIR0CC0flA/s3RdX2UKGgGR0BlTcWVNYbLaAdN6ANoCEdAgtLFfiPyTnV9lChoBkdAYhONgjQiRmgHTegDaAhHQILVca86FM91fZQoaAZHQGcHv9cbBGhoB03oA2gIR0CC6EBnzxwydX2UKGgGR0BmewkE9t/GaAdN6ANoCEdAgukT7/GVA3V9lChoBkdAYJem4y44ImgHTegDaAhHQILpZHVf/m11fZQoaAZHQGHBVbzK9wpoB03oA2gIR0CC6YrYoRZmdX2UKGgGR0Blf3i704BFaAdN6ANoCEdAguvf0dzXBnV9lChoBkdAYVRcHGCI12gHTegDaAhHQILz42Ifr8l1fZQoaAZHQGQwPN/vv0BoB03oA2gIR0CC+CZIg/1QdX2UKGgGR0Bm4fyAhB7eaAdN6ANoCEdAgvg2vr4WUXV9lChoBkdAYz/1IRRMvmgHTegDaAhHQIMH3Kji4rl1fZQoaAZHQGffN96Tnq5oB03oA2gIR0CDCygOBlMAdX2UKGgGR0Bg+sornTy8aAdN6ANoCEdAgwz3sHB1tHV9lChoBkdAX+oJ6Y3Ns2gHTegDaAhHQIMO6ouPFNt1fZQoaAZHQGHZOIqLCN1oB03oA2gIR0CDETyYG+sYdX2UKGgGR0BjXdjmSyMUaAdN6ANoCEdAgxoJiiItUXV9lChoBkdAYXW9nK4hEGgHTegDaAhHQIMa6bnX/YJ1fZQoaAZHQGS17I1cdHVoB03oA2gIR0CDHaPtD2J0dX2UKGgGR0BfnSy2QXANaAdN6ANoCEdAgzHSmIj4YnV9lChoBkdAZXj5v99+gGgHTegDaAhHQIMyuH1vl2h1fZQoaAZHQGWH1EmY0EZoB03oA2gIR0CDMxR8+iaidX2UKGgGR0Bg5xqdpZfVaAdN6ANoCEdAgzM9/BnBcnV9lChoBkdAYIYj+rELpmgHTegDaAhHQIM1zcEeQuF1fZQoaAZHQHDJW+j/MntoB01BAmgIR0CDOOzkZJkHdX2UKGgGR0ByI7NiYsunaAdNvwFoCEdAgzm/ms/6f3V9lChoBkdAY/zd/rjYI2gHTegDaAhHQIM92CqZML51fZQoaAZHQF6I+T/yXldoB03oA2gIR0CDQho6jnFHdX2UKGgGR0BkeaqhlDneaAdN6ANoCEdAg0Iqtga3qnV9lChoBkdAZFv752yLRGgHTegDaAhHQINVpUedTYN1fZQoaAZHQGYZ2jGkvbpoB03oA2gIR0CDV31gYxcndX2UKGgGR0Bl9Bs9B8hLaAdN6ANoCEdAg1mPYvnKXHV9lChoBkdAb9x91loUSWgHTZYCaAhHQINa4eq7yx11fZQoaAZHQGJbWBreqJdoB03oA2gIR0CDZU7lq8DkdX2UKGgGR0Bj+UNx2jfvaAdN6ANoCEdAg2ZA+Y+jd3V9lChoBkdAb1Tmwqy4WmgHTX4DaAhHQINpI5imVJN1fZQoaAZHQGBzzUiILw5oB03oA2gIR0CDaYM1jy4GdX2UKGgGR0Bhh6YsunMuaAdN6ANoCEdAg26xtP557nV9lChoBkdAZYV4593KS2gHTegDaAhHQIN+xb8m8dx1fZQoaAZHQGHlIHs1KoRoB03oA2gIR0CDgTyOJcgRdX2UKGgGR0BjTiwljVhDaAdN6ANoCEdAg4QJhnanJnV9lChoBkdAYwgvkili0GgHTegDaAhHQIOEyU3XI2h1fZQoaAZHQGbdjBMzuWtoB03oA2gIR0CDiFx7zCk5dX2UKGgGR0BkEjJMg2ZRaAdN6ANoCEdAg4vrjHXEqHV9lChoBkdAZStmEoOQQ2gHTegDaAhHQIOL+mrKeTV1fZQoaAZHQHDeMN2C/XZoB01eAmgIR0CDl6hzvJA/dX2UKGgGR0BiMmMn7YTTaAdN6ANoCEdAg5wBFEy+H3V9lChoBkdAZFXjlxOtXGgHTegDaAhHQIOddpsXSBt1fZQoaAZHQGhA1t4zJp5oB03oA2gIR0CDnwAJb+tKdX2UKGgGR0Bj58se4kNXaAdN6ANoCEdAg5/1t4zJp3V9lChoBkdAYMgl1KXfImgHTegDaAhHQIOoJ3HJcPh1fZQoaAZHQGINM7dSEUVoB03oA2gIR0CDqOJ+lTFVdX2UKGgGR0BlBaG8EmpmaAdN6ANoCEdAg6st0mtyP3V9lChoBkdAYAoLRa5f+mgHTegDaAhHQIOv8mv4dp91fZQoaAZHQGBaRzaK1ohoB03oA2gIR0CDsURmK64EdWUu"
|
46 |
},
|
47 |
"ep_success_buffer": {
|
48 |
":type:": "<class 'collections.deque'>",
|
|
|
84 |
"n_epochs": 4,
|
85 |
"clip_range": {
|
86 |
":type:": "<class 'function'>",
|
87 |
+
":serialized:": "gAWV3gIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMZC9ob21lL2dlZWtheW1hbi9hbmFjb25kYTMvZW52cy9ybF9lbnYvbGliL3B5dGhvbjMuMTAvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjGQvaG9tZS9nZWVrYXltYW4vYW5hY29uZGEzL2VudnMvcmxfZW52L2xpYi9weXRob24zLjEwL3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/yZmZmZmZmoWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="
|
88 |
},
|
89 |
"clip_range_vf": null,
|
90 |
"normalize_advantage": true,
|
91 |
"target_kl": null,
|
92 |
"lr_schedule": {
|
93 |
":type:": "<class 'function'>",
|
94 |
+
":serialized:": "gAWV3gIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMZC9ob21lL2dlZWtheW1hbi9hbmFjb25kYTMvZW52cy9ybF9lbnYvbGliL3B5dGhvbjMuMTAvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjGQvaG9tZS9nZWVrYXltYW4vYW5hY29uZGEzL2VudnMvcmxfZW52L2xpYi9weXRob24zLjEwL3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/M6kqMFUyYYWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="
|
95 |
}
|
96 |
}
|
ppo-LunarLander-v2/policy.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 88490
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6606582260042af5aeff722d17399cf1511c12266a5736cba67da7afa5b67fee
|
3 |
size 88490
|
ppo-LunarLander-v2/policy.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 43762
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f67cce1acbf2c03374f570f00608700c6a93e2ab9a53d9486204e0d6e1c17e00
|
3 |
size 43762
|
ppo-LunarLander-v2/system_info.txt
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
-
- OS: Linux-6.
|
2 |
-
- Python: 3.10.
|
3 |
- Stable-Baselines3: 2.0.0a5
|
4 |
-
- PyTorch: 2.1
|
5 |
- GPU Enabled: True
|
6 |
-
- Numpy: 1.
|
7 |
- Cloudpickle: 3.0.0
|
8 |
- Gymnasium: 0.28.1
|
|
|
|
1 |
+
- OS: Linux-6.5.0-26-generic-x86_64-with-glibc2.35 # 26~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue Mar 12 10:22:43 UTC 2
|
2 |
+
- Python: 3.10.13
|
3 |
- Stable-Baselines3: 2.0.0a5
|
4 |
+
- PyTorch: 2.2.1+cu121
|
5 |
- GPU Enabled: True
|
6 |
+
- Numpy: 1.25.2
|
7 |
- Cloudpickle: 3.0.0
|
8 |
- Gymnasium: 0.28.1
|
9 |
+
- OpenAI Gym: 0.26.2
|
results.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"mean_reward":
|
|
|
1 |
+
{"mean_reward": 253.3042284575924, "std_reward": 23.604301606464357, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2024-03-20T22:44:03.616619"}
|